@@ -80,7 +80,7 @@ bool alloc_route_tree_timing_structs(bool exists_ok) {
80
80
/* Allocates any structures needed to build the routing trees. */
81
81
82
82
auto & device_ctx = g_vpr_ctx.device ();
83
-
83
+ // ESR API Access (Get total number of nodes)
84
84
bool route_tree_structs_are_allocated = (rr_node_to_rt_node.size () == size_t (device_ctx.rr_nodes .size ())
85
85
|| rt_node_free_list != nullptr );
86
86
if (route_tree_structs_are_allocated) {
@@ -196,6 +196,7 @@ t_rt_node* init_route_tree_to_source(ClusterNetId inet) {
196
196
197
197
rt_root->inode = inode;
198
198
rt_root->net_pin_index = OPEN;
199
+ // ESR API Access (get Capacitance and Resistance [4 times])
199
200
rt_root->C_downstream = device_ctx.rr_nodes [inode].C ();
200
201
rt_root->R_upstream = device_ctx.rr_nodes [inode].R ();
201
202
rt_root->Tdel = 0.5 * device_ctx.rr_nodes [inode].R () * device_ctx.rr_nodes [inode].C ();
@@ -264,7 +265,8 @@ t_rt_node* update_route_tree(t_heap* hptr, int target_net_pin_index, SpatialRout
264
265
void add_route_tree_to_rr_node_lookup (t_rt_node* node) {
265
266
if (node) {
266
267
auto & device_ctx = g_vpr_ctx.device ();
267
- if (device_ctx.rr_nodes [node->inode ].type () == SINK) {
268
+ // ESR API Access (get type of node)
269
+ if (device_ctx.rr_nodes [node->inode ].type () == SINK) { // ESR keep iterating until you get to a sink?
268
270
VTR_ASSERT (rr_node_to_rt_node[node->inode ] == nullptr || rr_node_to_rt_node[node->inode ]->inode == node->inode );
269
271
} else {
270
272
VTR_ASSERT (rr_node_to_rt_node[node->inode ] == nullptr || rr_node_to_rt_node[node->inode ] == node);
@@ -318,6 +320,7 @@ add_subtree_to_route_tree(t_heap* hptr, int target_net_pin_index, t_rt_node** si
318
320
std::unordered_set<int > all_visited; // does not include sink
319
321
inode = hptr->prev_node ();
320
322
RREdgeId edge = hptr->prev_edge ();
323
+ // ESR API Access (get the switch for the edge)
321
324
short iswitch = device_ctx.rr_nodes .edge_switch (edge);
322
325
323
326
/* For all "new" nodes in the main path */
@@ -348,6 +351,7 @@ add_subtree_to_route_tree(t_heap* hptr, int target_net_pin_index, t_rt_node** si
348
351
349
352
rr_node_to_rt_node[inode] = rt_node;
350
353
354
+ // ESR API Access (get type of node)
351
355
if (device_ctx.rr_nodes [inode].type () == IPIN) {
352
356
rt_node->re_expand = false ;
353
357
} else {
@@ -357,6 +361,7 @@ add_subtree_to_route_tree(t_heap* hptr, int target_net_pin_index, t_rt_node** si
357
361
downstream_rt_node = rt_node;
358
362
edge = route_ctx.rr_node_route_inf [inode].prev_edge ;
359
363
inode = route_ctx.rr_node_route_inf [inode].prev_node ;
364
+ // ESR API Access (get type of edge)
360
365
iswitch = device_ctx.rr_nodes .edge_switch (edge);
361
366
}
362
367
@@ -405,7 +410,7 @@ static t_rt_node* add_non_configurable_to_route_tree(const int rr_node, const bo
405
410
rt_node->u .child_list = nullptr ;
406
411
rt_node->inode = rr_node;
407
412
rt_node->net_pin_index = OPEN;
408
-
413
+ // ESR API Access (get type of node)
409
414
if (device_ctx.rr_nodes [rr_node].type () == IPIN) {
410
415
rt_node->re_expand = false ;
411
416
} else {
@@ -415,7 +420,7 @@ static t_rt_node* add_non_configurable_to_route_tree(const int rr_node, const bo
415
420
VTR_ASSERT (rt_node->inode == rr_node);
416
421
}
417
422
}
418
-
423
+ // ESR API Access (get non configurable edges, configurable edges, and edge_sink_node)
419
424
for (int iedge : device_ctx.rr_nodes [rr_node].non_configurable_edges ()) {
420
425
// Recursive case: expand children
421
426
VTR_ASSERT (!device_ctx.rr_nodes [rr_node].edge_is_configurable (iedge));
@@ -426,7 +431,7 @@ static t_rt_node* add_non_configurable_to_route_tree(const int rr_node, const bo
426
431
t_rt_node* child_rt_node = add_non_configurable_to_route_tree (to_rr_node, true , visited);
427
432
428
433
if (!child_rt_node) continue ;
429
-
434
+ // ESR API Access (get type of switch)
430
435
int iswitch = device_ctx.rr_nodes [rr_node].edge_switch (iedge);
431
436
432
437
// Create the edge
@@ -472,6 +477,7 @@ void load_new_subtree_R_upstream(t_rt_node* rt_node) {
472
477
}
473
478
R_upstream += device_ctx.rr_switch_inf [iswitch].R ; // Parent switch R
474
479
}
480
+ // ESR API Access (get current node Resistance)
475
481
R_upstream += device_ctx.rr_nodes [inode].R (); // Current node R
476
482
477
483
rt_node->R_upstream = R_upstream;
@@ -487,7 +493,7 @@ float load_new_subtree_C_downstream(t_rt_node* rt_node) {
487
493
488
494
if (rt_node) {
489
495
auto & device_ctx = g_vpr_ctx.device ();
490
-
496
+ // ESR API Access (get node's Capacitance)
491
497
C_downstream += device_ctx.rr_nodes [rt_node->inode ].C ();
492
498
for (t_linked_rt_edge* edge = rt_node->u .child_list ; edge != nullptr ; edge = edge->next ) {
493
499
/* Similar to net_delay.cpp, this for loop traverses a rc subtree, whose edges represent enabled switches.
@@ -580,7 +586,7 @@ void load_route_tree_Tdel(t_rt_node* subtree_rt_root, float Tarrival) {
580
586
/* Assuming the downstream connections are, on average, connected halfway
581
587
* along a wire segment's length. See discussion in net_delay.c if you want
582
588
* to change this. */
583
-
589
+ // ESR API Access (get node's Resistance)
584
590
Tdel = Tarrival + 0.5 * subtree_rt_root->C_downstream * device_ctx.rr_nodes [inode].R ();
585
591
subtree_rt_root->Tdel = Tdel;
586
592
@@ -688,6 +694,7 @@ void print_route_tree(const t_rt_node* rt_node, int depth) {
688
694
}
689
695
690
696
auto & device_ctx = g_vpr_ctx.device ();
697
+ // ESR API Access (get node's type string)
691
698
VTR_LOG (" %srt_node: %d (%s) \t ipin: %d \t R: %g \t C: %g \t delay: %g" ,
692
699
indent.c_str (), rt_node->inode , device_ctx.rr_nodes [rt_node->inode ].type_string (), rt_node->net_pin_index , rt_node->R_upstream , rt_node->C_downstream , rt_node->Tdel );
693
700
@@ -699,7 +706,7 @@ void print_route_tree(const t_rt_node* rt_node, int depth) {
699
706
}
700
707
701
708
auto & route_ctx = g_vpr_ctx.routing ();
702
-
709
+ // ESR API Access (Get node's capacity)
703
710
if (route_ctx.rr_node_route_inf [rt_node->inode ].occ () > device_ctx.rr_nodes [rt_node->inode ].capacity ()) {
704
711
VTR_LOG (" x" );
705
712
}
@@ -801,6 +808,7 @@ static t_trace* traceback_to_route_tree_branch(t_trace* trace,
801
808
// In some cases, the same sink node is put into the tree multiple times in a single route.
802
809
// So it is possible to hit the same node index multiple times during traceback. Create a
803
810
// separate rt_node for each sink with the same node index.
811
+ // ESR API Access (Get node's type)
804
812
if (itr == rr_node_to_rt.end () || device_ctx.rr_nodes [inode].type () == SINK) {
805
813
// Create
806
814
@@ -813,7 +821,7 @@ static t_trace* traceback_to_route_tree_branch(t_trace* trace,
813
821
node->R_upstream = std::numeric_limits<float >::quiet_NaN ();
814
822
node->C_downstream = std::numeric_limits<float >::quiet_NaN ();
815
823
node->Tdel = std::numeric_limits<float >::quiet_NaN ();
816
-
824
+ // ESR API Access (Get node's type)
817
825
auto node_type = device_ctx.rr_nodes [inode].type ();
818
826
if (node_type == IPIN || node_type == SINK)
819
827
node->re_expand = false ;
@@ -954,6 +962,7 @@ t_trace* traceback_from_route_tree(ClusterNetId inet, const t_rt_node* root, int
954
962
nodes.insert (trace->index );
955
963
956
964
// Sanity check that number of sinks match expected
965
+ // ESR API Access (Get node's type)
957
966
if (device_ctx.rr_nodes [trace->index ].type () == SINK) {
958
967
num_trace_sinks += 1 ;
959
968
}
@@ -978,7 +987,7 @@ static t_rt_node* prune_route_tree_recurr(t_rt_node* node, CBRR& connections_inf
978
987
979
988
auto & device_ctx = g_vpr_ctx.device ();
980
989
auto & route_ctx = g_vpr_ctx.routing ();
981
-
990
+ // ESR API Access (Get node's capacity)
982
991
bool congested = (route_ctx.rr_node_route_inf [node->inode ].occ () > device_ctx.rr_nodes [node->inode ].capacity ());
983
992
int node_set = -1 ;
984
993
auto itr = device_ctx.rr_node_to_non_config_node_set .find (node->inode );
@@ -1036,7 +1045,7 @@ static t_rt_node* prune_route_tree_recurr(t_rt_node* node, CBRR& connections_inf
1036
1045
edge = edge->next ;
1037
1046
}
1038
1047
}
1039
-
1048
+ // ESR API Access (Get node's type)
1040
1049
if (device_ctx.rr_nodes [node->inode ].type () == SINK) {
1041
1050
if (!force_prune) {
1042
1051
// Valid path to sink
@@ -1172,9 +1181,9 @@ t_rt_node* prune_route_tree(t_rt_node* rt_root, CBRR& connections_inf, std::vect
1172
1181
1173
1182
auto & device_ctx = g_vpr_ctx.device ();
1174
1183
auto & route_ctx = g_vpr_ctx.routing ();
1175
-
1184
+ // ESR API Access (Get node's type)
1176
1185
VTR_ASSERT_MSG (device_ctx.rr_nodes [rt_root->inode ].type () == SOURCE, " Root of route tree must be SOURCE" );
1177
-
1186
+ // ESR API Access (Get node's capacity)
1178
1187
VTR_ASSERT_MSG (route_ctx.rr_node_route_inf [rt_root->inode ].occ () <= device_ctx.rr_nodes [rt_root->inode ].capacity (),
1179
1188
" Route tree root/SOURCE should never be congested" );
1180
1189
@@ -1258,6 +1267,7 @@ static void print_node(const t_rt_node* rt_node) {
1258
1267
auto & device_ctx = g_vpr_ctx.device ();
1259
1268
1260
1269
int inode = rt_node->inode ;
1270
+ // ESR API Access (Get node's type)
1261
1271
t_rr_type node_type = device_ctx.rr_nodes [inode].type ();
1262
1272
VTR_LOG (" %5.1e %5.1e %2d%6s|%-6d-> " , rt_node->C_downstream , rt_node->R_upstream ,
1263
1273
rt_node->re_expand , rr_node_typename[node_type], inode);
@@ -1278,6 +1288,7 @@ static void print_node_congestion(const t_rt_node* rt_node) {
1278
1288
1279
1289
int inode = rt_node->inode ;
1280
1290
const auto & node_inf = route_ctx.rr_node_route_inf [inode];
1291
+ // ESR API Access (Get node)
1281
1292
const auto & node = device_ctx.rr_nodes [inode];
1282
1293
VTR_LOG (" %2d %2d|%-6d-> " , node_inf.acc_cost , rt_node->Tdel ,
1283
1294
node_inf.occ (), node.capacity (), inode);
@@ -1369,18 +1380,21 @@ bool is_valid_route_tree(const t_rt_node* root) {
1369
1380
short iswitch = root->parent_switch ;
1370
1381
if (root->parent_node ) {
1371
1382
if (device_ctx.rr_switch_inf [iswitch].buffered ()) {
1383
+ // ESR API Access (Get node's Resistance)
1372
1384
float R_upstream_check = device_ctx.rr_nodes [inode].R () + device_ctx.rr_switch_inf [iswitch].R ;
1373
1385
if (!vtr::isclose (root->R_upstream , R_upstream_check, RES_REL_TOL, RES_ABS_TOL)) {
1374
1386
VTR_LOG (" %d mismatch R upstream %e supposed %e\n " , inode, root->R_upstream , R_upstream_check);
1375
1387
return false ;
1376
1388
}
1377
1389
} else {
1390
+ // ESR API Access (Get node's Resistance)
1378
1391
float R_upstream_check = device_ctx.rr_nodes [inode].R () + root->parent_node ->R_upstream + device_ctx.rr_switch_inf [iswitch].R ;
1379
1392
if (!vtr::isclose (root->R_upstream , R_upstream_check, RES_REL_TOL, RES_ABS_TOL)) {
1380
1393
VTR_LOG (" %d mismatch R upstream %e supposed %e\n " , inode, root->R_upstream , R_upstream_check);
1381
1394
return false ;
1382
1395
}
1383
1396
}
1397
+ // ESR API Access (Get node's Resistance [two spots])
1384
1398
} else if (root->R_upstream != device_ctx.rr_nodes [inode].R ()) {
1385
1399
VTR_LOG (" %d mismatch R upstream %e supposed %e\n " , inode, root->R_upstream , device_ctx.rr_nodes [inode].R ());
1386
1400
return false ;
@@ -1392,6 +1406,7 @@ bool is_valid_route_tree(const t_rt_node* root) {
1392
1406
// sink, must not be congested
1393
1407
if (!edge) {
1394
1408
int occ = route_ctx.rr_node_route_inf [inode].occ ();
1409
+ // ESR API Access (Get node's capacity)
1395
1410
int capacity = device_ctx.rr_nodes [inode].capacity ();
1396
1411
if (occ > capacity) {
1397
1412
VTR_LOG (" SINK %d occ %d > cap %d\n " , inode, occ, capacity);
@@ -1423,7 +1438,7 @@ bool is_valid_route_tree(const t_rt_node* root) {
1423
1438
}
1424
1439
edge = edge->next ;
1425
1440
}
1426
-
1441
+ // ESR API Access (Get node's Capacitance)
1427
1442
float C_downstream_check = C_downstream_children + device_ctx.rr_nodes [inode].C ();
1428
1443
if (!vtr::isclose (root->C_downstream , C_downstream_check, CAP_REL_TOL, CAP_ABS_TOL)) {
1429
1444
VTR_LOG (" %d mismatch C downstream %e supposed %e\n " , inode, root->C_downstream , C_downstream_check);
@@ -1439,6 +1454,7 @@ bool is_uncongested_route_tree(const t_rt_node* root) {
1439
1454
auto & device_ctx = g_vpr_ctx.device ();
1440
1455
1441
1456
int inode = root->inode ;
1457
+ // ESR API Access (Get node's capacity)
1442
1458
if (route_ctx.rr_node_route_inf [inode].occ () > device_ctx.rr_nodes [inode].capacity ()) {
1443
1459
// This node is congested
1444
1460
return false ;
@@ -1471,6 +1487,7 @@ init_route_tree_to_source_no_net(int inode) {
1471
1487
rt_root->re_expand = true ;
1472
1488
rt_root->inode = inode;
1473
1489
rt_root->net_pin_index = OPEN;
1490
+ // ESR API Access (Get node's Capacitance and Resistance [4 times])
1474
1491
rt_root->C_downstream = device_ctx.rr_nodes [inode].C ();
1475
1492
rt_root->R_upstream = device_ctx.rr_nodes [inode].R ();
1476
1493
rt_root->Tdel = 0.5 * device_ctx.rr_nodes [inode].R () * device_ctx.rr_nodes [inode].C ();
0 commit comments