Skip to content

Commit 74ae385

Browse files
committed
more fixes for bitstream generation with flat router
1 parent 848d1e7 commit 74ae385

6 files changed

+76
-46
lines changed

vpr/src/base/netlist_writer.cpp

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1245,7 +1245,9 @@ class NetlistWriterVisitor : public NetlistVisitor {
12451245

12461246
//Add the single output connection
12471247
{
1248-
auto atom_net_id = top_pb_route[sink_cluster_pin_idx].atom_net_id; //Connected net in atom netlist
1248+
AtomNetId atom_net_id = AtomNetId::INVALID();
1249+
if (top_pb_route.count(sink_cluster_pin_idx))
1250+
atom_net_id = top_pb_route[sink_cluster_pin_idx].atom_net_id; //Connected net in atom netlist
12491251

12501252
std::string net;
12511253
if (!atom_net_id) {

vpr/src/base/vpr_api.cpp

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717

1818
#include "FlatPlacementInfo.h"
1919
#include "cluster_util.h"
20+
#include "physical_types.h"
2021
#include "verify_placement.h"
2122
#include "vpr_context.h"
2223
#include "vtr_assert.h"
@@ -115,6 +116,9 @@ static void get_intercluster_switch_fanin_estimates(const t_vpr_setup& vpr_setup
115116
int* opin_switch_fanin,
116117
int* wire_switch_fanin,
117118
int* ipin_switch_fanin);
119+
120+
static void unset_port_equivalences(DeviceContext& device_ctx);
121+
118122
/* Local subroutines end */
119123

120124
///@brief Display general VPR information
@@ -369,6 +373,25 @@ void vpr_init_with_options(const t_options* options, t_vpr_setup* vpr_setup, t_a
369373
device_ctx.pad_loc_type = vpr_setup->PlacerOpts.pad_loc_type;
370374
}
371375

376+
/** Port equivalence does not make sense during flat routing.
377+
* Remove port equivalence from all ports in the architecture */
378+
static void unset_port_equivalences(DeviceContext& device_ctx){
379+
for(auto& physical_type: device_ctx.physical_tile_types){
380+
for(auto& sub_tile: physical_type.sub_tiles){
381+
for(auto& port: sub_tile.ports){
382+
port.equivalent = PortEquivalence::NONE;
383+
}
384+
}
385+
}
386+
for(auto& logical_type: device_ctx.logical_block_types){
387+
if(!logical_type.pb_type)
388+
continue;
389+
for(int i=0; i<logical_type.pb_type->num_ports; i++){
390+
logical_type.pb_type->ports[i].equivalent = PortEquivalence::NONE;
391+
}
392+
}
393+
}
394+
372395
bool vpr_flow(t_vpr_setup& vpr_setup, t_arch& arch) {
373396
if (vpr_setup.exit_before_pack) {
374397
VTR_LOG_WARN("Exiting before packing as requested.\n");
@@ -425,6 +448,8 @@ bool vpr_flow(t_vpr_setup& vpr_setup, t_arch& arch) {
425448

426449
bool is_flat = vpr_setup.RouterOpts.flat_routing;
427450
const Netlist<>& router_net_list = is_flat ? (const Netlist<>&)g_vpr_ctx.atom().nlist : (const Netlist<>&)g_vpr_ctx.clustering().clb_nlist;
451+
if (is_flat)
452+
unset_port_equivalences(g_vpr_ctx.mutable_device());
428453
RouteStatus route_status;
429454
{ //Route
430455
route_status = vpr_route_flow(router_net_list, vpr_setup, arch, is_flat);

vpr/src/pack/post_routing_pb_pin_fixup.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1034,6 +1034,7 @@ void sync_netlists_to_routing(const Netlist<>& net_list,
10341034
/* Create net-to-rr_node mapping */
10351035
vtr::vector<RRNodeId, ClusterNetId> rr_node_nets = annotate_rr_node_nets(clustering_ctx,
10361036
device_ctx,
1037+
atom_ctx,
10371038
verbose);
10381039

10391040
IntraLbPbPinLookup intra_lb_pb_pin_lookup(device_ctx.logical_block_types);

vpr/src/pack/sync_netlists_to_routing_flat.cpp

Lines changed: 35 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -47,23 +47,6 @@ static void fixup_atom_pb_graph_pin_mapping(void);
4747

4848
/* Function definitions */
4949

50-
/** Is the clock net found in the routing results?
51-
* (If not, clock_modeling is probably ideal and we should preserve clock routing while rebuilding.) */
52-
inline bool is_clock_net_routed(void){
53-
auto& atom_ctx = g_vpr_ctx.atom();
54-
auto& route_ctx = g_vpr_ctx.routing();
55-
56-
for(auto net_id: atom_ctx.nlist.nets()){
57-
auto& tree = route_ctx.route_trees[net_id];
58-
if(!tree)
59-
continue;
60-
if(route_ctx.is_clock_net[net_id]) /* Clock net has routing */
61-
return true;
62-
}
63-
64-
return false;
65-
}
66-
6750
/** Get the ClusterBlockId for a given RRNodeId. */
6851
inline ClusterBlockId get_cluster_block_from_rr_node(RRNodeId inode){
6952
auto& device_ctx = g_vpr_ctx.device();
@@ -190,21 +173,19 @@ static void sync_pb_routes_to_routing(void){
190173
auto& device_ctx = g_vpr_ctx.device();
191174
auto& atom_ctx = g_vpr_ctx.atom();
192175
auto& cluster_ctx = g_vpr_ctx.mutable_clustering();
176+
auto& place_ctx = g_vpr_ctx.placement();
193177
auto& route_ctx = g_vpr_ctx.routing();
194178
auto& rr_graph = device_ctx.rr_graph;
195179

196-
/* Was the clock net routed? */
197-
bool clock_net_is_routed = is_clock_net_routed();
198-
199180
/* Clear out existing pb_routes: they were made by the intra cluster router and are invalid now */
200181
for (ClusterBlockId clb_blk_id : cluster_ctx.clb_nlist.blocks()) {
201-
/* If we don't have routing for the clock net, don't erase entries associated with a clock net.
202-
* Otherwise we won't have data to rebuild them */
182+
/* Don't erase entries for nets without routing in place (clocks, globals...) */
203183
std::vector<int> pins_to_erase;
204184
auto& pb_routes = cluster_ctx.clb_nlist.block_pb(clb_blk_id)->pb_route;
205185
for(auto& [pin, pb_route]: pb_routes){
206-
if(clock_net_is_routed || !route_ctx.is_clock_net[pb_route.atom_net_id])
207-
pins_to_erase.push_back(pin);
186+
if(!route_ctx.route_trees[ParentNetId(int(pb_route.atom_net_id))])
187+
continue;
188+
pins_to_erase.push_back(pin);
208189
}
209190

210191
for(int pin: pins_to_erase){
@@ -286,8 +267,6 @@ static void sync_clustered_netlist_to_routing(void){
286267
auto& atom_ctx = g_vpr_ctx.mutable_atom();
287268
auto& atom_lookup = atom_ctx.lookup;
288269

289-
bool clock_net_is_routed = is_clock_net_routed();
290-
291270
/* 1. Remove all nets, pins and ports from the clustered netlist.
292271
* If the clock net is not routed, don't remove entries for the clock net
293272
* otherwise we won't have data to rebuild them. */
@@ -297,26 +276,26 @@ static void sync_clustered_netlist_to_routing(void){
297276

298277
for(auto net_id: clb_netlist.nets()){
299278
auto atom_net_id = atom_lookup.atom_net(net_id);
300-
if(!clock_net_is_routed && route_ctx.is_clock_net[atom_net_id])
279+
if(!route_ctx.route_trees[ParentNetId(int(atom_net_id))])
301280
continue;
302281

303282
nets_to_remove.push_back(net_id);
304283
}
305-
for(auto pin_id: clb_netlist.pins()){
306-
ClusterNetId clb_net_id = clb_netlist.pin_net(pin_id);
307-
auto atom_net_id = atom_lookup.atom_net(clb_net_id);
308-
if(!clock_net_is_routed && atom_net_id && route_ctx.is_clock_net[atom_net_id])
309-
continue;
310-
311-
pins_to_remove.push_back(pin_id);
312-
}
313284
for(auto port_id: clb_netlist.ports()){
314-
ClusterNetId clb_net_id = clb_netlist.port_net(port_id, 0);
315-
auto atom_net_id = atom_lookup.atom_net(clb_net_id);
316-
if(!clock_net_is_routed && atom_net_id && route_ctx.is_clock_net[atom_net_id])
317-
continue;
285+
size_t skipped_pins = 0;
286+
287+
for(auto pin_id: clb_netlist.port_pins(port_id)){
288+
ClusterNetId clb_net_id = clb_netlist.pin_net(pin_id);
289+
auto atom_net_id = atom_lookup.atom_net(clb_net_id);
290+
if(atom_net_id && !route_ctx.route_trees[ParentNetId(int(atom_net_id))]){
291+
skipped_pins++;
292+
}else{
293+
pins_to_remove.push_back(pin_id);
294+
}
295+
}
318296

319-
ports_to_remove.push_back(port_id);
297+
if(!skipped_pins) // All pins have been removed, remove port
298+
ports_to_remove.push_back(port_id);
320299
}
321300

322301
/* ClusteredNetlist's iterators rely on internal lookups, so we mark for removal
@@ -357,7 +336,14 @@ static void sync_clustered_netlist_to_routing(void){
357336

358337
int pin_index = rr_graph.node_pin_num(rt_node.inode);
359338

360-
ClusterBlockId clb = get_cluster_block_from_rr_node(rt_node.inode);
339+
auto [_, subtile] = get_sub_tile_from_pin_physical_num(physical_tile, pin_index);
340+
341+
ClusterBlockId clb = place_ctx.grid_blocks().block_at_location({
342+
rr_graph.node_xlow(rt_node.inode),
343+
rr_graph.node_ylow(rt_node.inode),
344+
subtile,
345+
rr_graph.node_layer(rt_node.inode)
346+
});
361347

362348
if(!is_pin_on_tile(physical_tile, pin_index))
363349
continue;
@@ -366,15 +352,15 @@ static void sync_clustered_netlist_to_routing(void){
366352
* Due to how the route tree is traversed, all nodes until the next OPIN on the tile will
367353
* be under this OPIN, so this is valid (we don't need to get the branch explicitly) */
368354
if(node_type == OPIN){
369-
std::string net_name;
370-
net_name = atom_ctx.nlist.net_name(parent_net_id) + "_" + std::to_string(clb_nets_so_far);
355+
std::string net_name = atom_ctx.nlist.net_name(parent_net_id) + "_" + std::to_string(clb_nets_so_far);
371356
clb_net_id = clb_netlist.create_net(net_name);
372357
atom_lookup.add_atom_clb_net(atom_net_id, clb_net_id);
373358
clb_nets_so_far++;
374359
}
375360

376361
t_pb_graph_pin* pb_graph_pin = get_pb_graph_node_pin_from_block_pin(clb, pin_index);
377362

363+
/* Get or create port */
378364
ClusterPortId port_id = clb_netlist.find_port(clb, pb_graph_pin->port->name);
379365
if(!port_id){
380366
PortType port_type;
@@ -431,6 +417,12 @@ static void fixup_atom_pb_graph_pin_mapping(void){
431417

432418
/* Find atom port from pbg pin's model port */
433419
AtomPortId atom_port = atom_ctx.nlist.find_atom_port(atb, atom_pbg_pin->port->model_port);
420+
421+
/* Not an equivalent port, so no need to do fixup */
422+
if (atom_pbg_pin->port->equivalent != PortEquivalence::FULL) {
423+
continue;
424+
}
425+
434426
for(AtomPinId atom_pin: atom_ctx.nlist.port_pins(atom_port)){
435427
/* Match net IDs from pb_route and atom netlist and connect in lookup */
436428
if(pb_route.atom_net_id == atom_ctx.nlist.pin_net(atom_pin)){

vpr/src/route/annotate_routing.cpp

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,11 +15,13 @@
1515

1616
vtr::vector<RRNodeId, ClusterNetId> annotate_rr_node_nets(const ClusteringContext& cluster_ctx,
1717
const DeviceContext& device_ctx,
18+
const AtomContext& atom_ctx,
1819
const bool& verbose) {
1920
size_t counter = 0;
2021
vtr::ScopedStartFinishTimer timer("Annotating rr_node with routed nets");
2122

2223
const auto& rr_graph = device_ctx.rr_graph;
24+
auto& atom_lookup = atom_ctx.lookup;
2325

2426
auto& netlist = cluster_ctx.clb_nlist;
2527
vtr::vector<RRNodeId, ClusterNetId> rr_node_nets;
@@ -47,11 +49,18 @@ vtr::vector<RRNodeId, ClusterNetId> annotate_rr_node_nets(const ClusteringContex
4749
* In some routing architectures, node capacity is more than 1
4850
* which allows a node to be mapped by multiple nets
4951
* Therefore, the sanity check should focus on the nodes
50-
* whose capacity is 1
52+
* whose capacity is 1.
53+
* Flat routing may create two clustered nets from a single
54+
* atom net, which will point to the same atom net routing.
55+
* Ignore clashes if the clustered nets are mapped to the
56+
* same atom net
5157
*/
58+
AtomNetId my_atom = atom_lookup.atom_net(net_id);
59+
AtomNetId existing_atom = atom_lookup.atom_net(rr_node_nets[rr_node]);
5260
if ((rr_node_nets[rr_node])
5361
&& (1 == rr_graph.node_capacity(rr_node))
54-
&& (net_id != rr_node_nets[rr_node])) {
62+
&& (net_id != rr_node_nets[rr_node])
63+
&& (my_atom != existing_atom)) {
5564
VPR_FATAL_ERROR(VPR_ERROR_ANALYSIS,
5665
"Detect two nets '%s' and '%s' that are mapped to the same rr_node '%ld'!\n%s\n",
5766
netlist.net_name(net_id).c_str(),

vpr/src/route/annotate_routing.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212
*******************************************************************/
1313
vtr::vector<RRNodeId, ClusterNetId> annotate_rr_node_nets(const ClusteringContext& cluster_ctx,
1414
const DeviceContext& device_ctx,
15+
const AtomContext& atom_ctx,
1516
const bool& verbose);
1617

1718
#endif

0 commit comments

Comments
 (0)