Skip to content

Commit 0e0c7c6

Browse files
committed
Added vtr assert to ensure that warning about potentially dereferencing null pointer doesn't appear
1 parent 116da4b commit 0e0c7c6

File tree

1 file changed

+121
-122
lines changed

1 file changed

+121
-122
lines changed

vpr/src/pack/cluster.cpp

Lines changed: 121 additions & 122 deletions
Original file line numberDiff line numberDiff line change
@@ -388,7 +388,7 @@ static bool pb_used_for_blif_model(const t_pb* pb, std::string blif_model_name);
388388

389389
static void print_le_count(std::vector<int>& le_count, const t_pb_type* le_pb_type);
390390

391-
static t_pb* get_parent_pb(t_pb* pb);
391+
static t_pb* get_top_level_pb(t_pb* pb);
392392

393393
/*****************************************/
394394
/*globally accessible function*/
@@ -1980,80 +1980,77 @@ static void mark_and_update_partial_gain(const AtomNetId net_id, enum e_gain_upd
19801980

19811981
auto& atom_ctx = g_vpr_ctx.atom();
19821982
t_pb* cur_pb = atom_ctx.lookup.atom_pb(clustered_blk_id)->parent_pb;
1983-
cur_pb = get_parent_pb(cur_pb);
1984-
1985-
if (cur_pb) {
1986-
if (int(atom_ctx.nlist.net_sinks(net_id).size()) > high_fanout_net_threshold) {
1987-
/* Optimization: It can be too runtime costly for marking all sinks for
1988-
* a high fanout-net that probably has no hope of ever getting packed,
1989-
* thus ignore those high fanout nets */
1990-
if (!is_global.count(net_id)) {
1991-
/* If no low/medium fanout nets, we may need to consider
1992-
* high fan-out nets for packing, so select one and store it */
1993-
while (cur_pb->parent_pb != nullptr) {
1994-
cur_pb = cur_pb->parent_pb;
1995-
}
1996-
AtomNetId stored_net = cur_pb->pb_stats->tie_break_high_fanout_net;
1997-
if (!stored_net || atom_ctx.nlist.net_sinks(net_id).size() < atom_ctx.nlist.net_sinks(stored_net).size()) {
1998-
cur_pb->pb_stats->tie_break_high_fanout_net = net_id;
1999-
}
2000-
}
2001-
return;
2002-
}
2003-
}
1983+
cur_pb = get_top_level_pb(cur_pb);
1984+
1985+
if (int(atom_ctx.nlist.net_sinks(net_id).size()) > high_fanout_net_threshold) {
1986+
/* Optimization: It can be too runtime costly for marking all sinks for
1987+
* a high fanout-net that probably has no hope of ever getting packed,
1988+
* thus ignore those high fanout nets */
1989+
if (!is_global.count(net_id)) {
1990+
/* If no low/medium fanout nets, we may need to consider
1991+
* high fan-out nets for packing, so select one and store it */
1992+
while (cur_pb->parent_pb != nullptr) {
1993+
cur_pb = cur_pb->parent_pb;
1994+
}
1995+
AtomNetId stored_net = cur_pb->pb_stats->tie_break_high_fanout_net;
1996+
if (!stored_net || atom_ctx.nlist.net_sinks(net_id).size() < atom_ctx.nlist.net_sinks(stored_net).size()) {
1997+
cur_pb->pb_stats->tie_break_high_fanout_net = net_id;
1998+
}
1999+
}
2000+
return;
2001+
}
20042002

20052003
/* Mark atom net as being visited, if necessary. */
20062004

2007-
if (cur_pb) {
2008-
if (cur_pb->pb_stats->num_pins_of_net_in_pb.count(net_id) == 0) {
2009-
cur_pb->pb_stats->marked_nets.push_back(net_id);
2010-
}
2011-
2012-
/* Update gains of affected blocks. */
2013-
2014-
if (gain_flag == GAIN) {
2015-
/* Check if this net is connected to it's driver block multiple times (i.e. as both an output and input)
2016-
* If so, avoid double counting by skipping the first (driving) pin. */
2017-
2018-
auto pins = atom_ctx.nlist.net_pins(net_id);
2019-
if (net_output_feeds_driving_block_input[net_id] != 0)
2020-
//We implicitly assume here that net_output_feeds_driver_block_input[net_id] is 2
2021-
//(i.e. the net loops back to the block only once)
2022-
pins = atom_ctx.nlist.net_sinks(net_id);
2023-
2024-
if (cur_pb->pb_stats->num_pins_of_net_in_pb.count(net_id) == 0) {
2025-
for (auto pin_id : pins) {
2026-
auto blk_id = atom_ctx.nlist.pin_block(pin_id);
2027-
if (atom_ctx.lookup.atom_clb(blk_id) == ClusterBlockId::INVALID()) {
2028-
if (cur_pb->pb_stats->sharinggain.count(blk_id) == 0) {
2029-
cur_pb->pb_stats->marked_blocks.push_back(blk_id);
2030-
cur_pb->pb_stats->sharinggain[blk_id] = 1;
2031-
cur_pb->pb_stats->hillgain[blk_id] = 1 - num_ext_inputs_atom_block(blk_id);
2032-
} else {
2033-
cur_pb->pb_stats->sharinggain[blk_id]++;
2034-
cur_pb->pb_stats->hillgain[blk_id]++;
2035-
}
2036-
}
2037-
}
2038-
}
2005+
if (cur_pb->pb_stats->num_pins_of_net_in_pb.count(net_id) == 0) {
2006+
cur_pb->pb_stats->marked_nets.push_back(net_id);
2007+
}
2008+
2009+
/* Update gains of affected blocks. */
2010+
2011+
if (gain_flag == GAIN) {
2012+
/* Check if this net is connected to it's driver block multiple times (i.e. as both an output and input)
2013+
* If so, avoid double counting by skipping the first (driving) pin. */
2014+
2015+
auto pins = atom_ctx.nlist.net_pins(net_id);
2016+
if (net_output_feeds_driving_block_input[net_id] != 0)
2017+
//We implicitly assume here that net_output_feeds_driver_block_input[net_id] is 2
2018+
//(i.e. the net loops back to the block only once)
2019+
pins = atom_ctx.nlist.net_sinks(net_id);
2020+
2021+
if (cur_pb->pb_stats->num_pins_of_net_in_pb.count(net_id) == 0) {
2022+
for (auto pin_id : pins) {
2023+
auto blk_id = atom_ctx.nlist.pin_block(pin_id);
2024+
if (atom_ctx.lookup.atom_clb(blk_id) == ClusterBlockId::INVALID()) {
2025+
if (cur_pb->pb_stats->sharinggain.count(blk_id) == 0) {
2026+
cur_pb->pb_stats->marked_blocks.push_back(blk_id);
2027+
cur_pb->pb_stats->sharinggain[blk_id] = 1;
2028+
cur_pb->pb_stats->hillgain[blk_id] = 1 - num_ext_inputs_atom_block(blk_id);
2029+
} else {
2030+
cur_pb->pb_stats->sharinggain[blk_id]++;
2031+
cur_pb->pb_stats->hillgain[blk_id]++;
2032+
}
2033+
}
2034+
}
2035+
}
2036+
2037+
if (connection_driven) {
2038+
update_connection_gain_values(net_id, clustered_blk_id, cur_pb,
2039+
net_relation_to_clustered_block);
2040+
}
2041+
2042+
if (timing_driven) {
2043+
update_timing_gain_values(net_id, cur_pb,
2044+
net_relation_to_clustered_block,
2045+
timing_info,
2046+
is_global);
2047+
}
2048+
}
2049+
if (cur_pb->pb_stats->num_pins_of_net_in_pb.count(net_id) == 0) {
2050+
cur_pb->pb_stats->num_pins_of_net_in_pb[net_id] = 0;
2051+
}
2052+
cur_pb->pb_stats->num_pins_of_net_in_pb[net_id]++;
20392053

2040-
if (connection_driven) {
2041-
update_connection_gain_values(net_id, clustered_blk_id, cur_pb,
2042-
net_relation_to_clustered_block);
2043-
}
2044-
2045-
if (timing_driven) {
2046-
update_timing_gain_values(net_id, cur_pb,
2047-
net_relation_to_clustered_block,
2048-
timing_info,
2049-
is_global);
2050-
}
2051-
}
2052-
if (cur_pb->pb_stats->num_pins_of_net_in_pb.count(net_id) == 0) {
2053-
cur_pb->pb_stats->num_pins_of_net_in_pb[net_id] = 0;
2054-
}
2055-
cur_pb->pb_stats->num_pins_of_net_in_pb[net_id]++;
2056-
}
20572054
}
20582055

20592056
/*****************************************/
@@ -2064,59 +2061,59 @@ static void update_total_gain(float alpha, float beta, bool timing_driven, bool
20642061
auto& atom_ctx = g_vpr_ctx.atom();
20652062
t_pb* cur_pb = pb;
20662063

2067-
cur_pb = get_parent_pb(cur_pb);
2064+
cur_pb = get_top_level_pb(cur_pb);
20682065
AttractGroupId cluster_att_grp_id;
2069-
if (cur_pb) {
2070-
cluster_att_grp_id = cur_pb->pb_stats->attraction_grp_id;
20712066

2072-
for (AtomBlockId blk_id : cur_pb->pb_stats->marked_blocks) {
2073-
//Initialize connectiongain and sharinggain if
2074-
//they have not previously been updated for the block
2075-
if (cur_pb->pb_stats->connectiongain.count(blk_id) == 0) {
2076-
cur_pb->pb_stats->connectiongain[blk_id] = 0;
2077-
}
2078-
if (cur_pb->pb_stats->sharinggain.count(blk_id) == 0) {
2079-
cur_pb->pb_stats->sharinggain[blk_id] = 0;
2080-
}
2081-
2082-
/* Todo: Right now we update the gain multiple times for each block.
2083-
* Eventually want to move this out of the while loop and only update it
2084-
* for the top-level block in each cluster.*/
2085-
AttractGroupId atom_grp_id = attraction_groups.get_atom_attraction_group(blk_id);
2086-
if (atom_grp_id != AttractGroupId::INVALID() && atom_grp_id == cluster_att_grp_id) {
2087-
//increase gain of atom based on attraction group gain
2088-
float att_grp_gain = attraction_groups.get_attraction_group_gain(atom_grp_id);
2089-
cur_pb->pb_stats->gain[blk_id] += att_grp_gain;
2090-
}
2091-
2092-
/* Todo: This was used to explore different normalization options, can
2093-
* be made more efficient once we decide on which one to use*/
2094-
int num_used_input_pins = atom_ctx.nlist.block_input_pins(blk_id).size();
2095-
int num_used_output_pins = atom_ctx.nlist.block_output_pins(blk_id).size();
2096-
/* end todo */
2097-
2098-
/* Calculate area-only cost function */
2099-
int num_used_pins = num_used_input_pins + num_used_output_pins;
2100-
VTR_ASSERT(num_used_pins > 0);
2101-
if (connection_driven) {
2102-
/*try to absorb as many connections as possible*/
2103-
cur_pb->pb_stats->gain[blk_id] = ((1 - beta)
2104-
* (float)cur_pb->pb_stats->sharinggain[blk_id]
2105-
+ beta * (float)cur_pb->pb_stats->connectiongain[blk_id])
2106-
/ (num_used_pins);
2107-
} else {
2108-
cur_pb->pb_stats->gain[blk_id] = ((float)cur_pb->pb_stats->sharinggain[blk_id])
2109-
/ (num_used_pins);
2110-
}
2067+
cluster_att_grp_id = cur_pb->pb_stats->attraction_grp_id;
2068+
2069+
for (AtomBlockId blk_id : cur_pb->pb_stats->marked_blocks) {
2070+
//Initialize connectiongain and sharinggain if
2071+
//they have not previously been updated for the block
2072+
if (cur_pb->pb_stats->connectiongain.count(blk_id) == 0) {
2073+
cur_pb->pb_stats->connectiongain[blk_id] = 0;
2074+
}
2075+
if (cur_pb->pb_stats->sharinggain.count(blk_id) == 0) {
2076+
cur_pb->pb_stats->sharinggain[blk_id] = 0;
2077+
}
2078+
2079+
/* Todo: Right now we update the gain multiple times for each block.
2080+
* Eventually want to move this out of the while loop and only update it
2081+
* for the top-level block in each cluster.*/
2082+
AttractGroupId atom_grp_id = attraction_groups.get_atom_attraction_group(blk_id);
2083+
if (atom_grp_id != AttractGroupId::INVALID() && atom_grp_id == cluster_att_grp_id) {
2084+
//increase gain of atom based on attraction group gain
2085+
float att_grp_gain = attraction_groups.get_attraction_group_gain(atom_grp_id);
2086+
cur_pb->pb_stats->gain[blk_id] += att_grp_gain;
2087+
}
2088+
2089+
/* Todo: This was used to explore different normalization options, can
2090+
* be made more efficient once we decide on which one to use*/
2091+
int num_used_input_pins = atom_ctx.nlist.block_input_pins(blk_id).size();
2092+
int num_used_output_pins = atom_ctx.nlist.block_output_pins(blk_id).size();
2093+
/* end todo */
2094+
2095+
/* Calculate area-only cost function */
2096+
int num_used_pins = num_used_input_pins + num_used_output_pins;
2097+
VTR_ASSERT(num_used_pins > 0);
2098+
if (connection_driven) {
2099+
/*try to absorb as many connections as possible*/
2100+
cur_pb->pb_stats->gain[blk_id] = ((1 - beta)
2101+
* (float)cur_pb->pb_stats->sharinggain[blk_id]
2102+
+ beta * (float)cur_pb->pb_stats->connectiongain[blk_id])
2103+
/ (num_used_pins);
2104+
} else {
2105+
cur_pb->pb_stats->gain[blk_id] = ((float)cur_pb->pb_stats->sharinggain[blk_id])
2106+
/ (num_used_pins);
2107+
}
2108+
2109+
/* Add in timing driven cost into cost function */
2110+
if (timing_driven) {
2111+
cur_pb->pb_stats->gain[blk_id] = alpha
2112+
* cur_pb->pb_stats->timinggain[blk_id]
2113+
+ (1.0 - alpha) * (float)cur_pb->pb_stats->gain[blk_id];
2114+
}
2115+
}
21112116

2112-
/* Add in timing driven cost into cost function */
2113-
if (timing_driven) {
2114-
cur_pb->pb_stats->gain[blk_id] = alpha
2115-
* cur_pb->pb_stats->timinggain[blk_id]
2116-
+ (1.0 - alpha) * (float)cur_pb->pb_stats->gain[blk_id];
2117-
}
2118-
}
2119-
}
21202117
}
21212118

21222119
/*****************************************/
@@ -3963,13 +3960,15 @@ static void print_le_count(std::vector<int>& le_count, const t_pb_type* le_pb_ty
39633960
VTR_LOG(" LEs used for registers only : %d\n\n", le_count[2]);
39643961
}
39653962

3966-
static t_pb* get_parent_pb(t_pb* pb) {
3963+
static t_pb* get_top_level_pb(t_pb* pb) {
39673964
t_pb* top_level_pb = pb;
39683965

39693966
while (pb) {
39703967
top_level_pb = pb;
39713968
pb = pb->parent_pb;
39723969
}
39733970

3971+
VTR_ASSERT(top_level_pb != nullptr);
3972+
39743973
return top_level_pb;
39753974
}

0 commit comments

Comments
 (0)