@@ -589,7 +589,7 @@ void try_place(const t_placer_opts& placer_opts,
589
589
num_swap_aborted = 0 ;
590
590
num_ts_called = 0 ;
591
591
592
- if (placer_opts.place_algorithm == CRITICALITY_TIMING_PLACE ) {
592
+ if (placer_opts.place_algorithm . is_timing_driven () ) {
593
593
/* do this before the initial placement to avoid messing up the initial placement */
594
594
place_delay_model = alloc_lookups_and_criticalities (chan_width_dist, placer_opts, router_opts, det_routing_arch, segment_inf, directs, num_directs);
595
595
@@ -620,7 +620,7 @@ void try_place(const t_placer_opts& placer_opts,
620
620
621
621
/* Gets initial cost and loads bounding boxes. */
622
622
623
- if (placer_opts.place_algorithm == CRITICALITY_TIMING_PLACE ) {
623
+ if (placer_opts.place_algorithm . is_timing_driven () ) {
624
624
costs.bb_cost = comp_bb_cost (NORMAL);
625
625
626
626
first_crit_exponent = placer_opts.td_place_exp_first ; /* this will be modified when rlim starts to change */
@@ -696,7 +696,7 @@ void try_place(const t_placer_opts& placer_opts,
696
696
// Initial pacement statistics
697
697
VTR_LOG (" Initial placement cost: %g bb_cost: %g td_cost: %g\n " ,
698
698
costs.cost , costs.bb_cost , costs.timing_cost );
699
- if (placer_opts.place_algorithm == CRITICALITY_TIMING_PLACE ) {
699
+ if (placer_opts.place_algorithm . is_timing_driven () ) {
700
700
VTR_LOG (" Initial placement estimated Critical Path Delay (CPD): %g ns\n " ,
701
701
1e9 * critical_path.delay ());
702
702
VTR_LOG (" Initial placement estimated setup Total Negative Slack (sTNS): %g ns\n " ,
@@ -802,7 +802,7 @@ void try_place(const t_placer_opts& placer_opts,
802
802
/* Outer loop of the simulated annealing begins */
803
803
do {
804
804
vtr::Timer temperature_timer;
805
- if (placer_opts.place_algorithm == CRITICALITY_TIMING_PLACE ) {
805
+ if (placer_opts.place_algorithm . is_timing_driven () ) {
806
806
costs.cost = 1 ;
807
807
}
808
808
@@ -836,7 +836,7 @@ void try_place(const t_placer_opts& placer_opts,
836
836
837
837
++num_temps;
838
838
839
- if (placer_opts.place_algorithm == CRITICALITY_TIMING_PLACE ) {
839
+ if (placer_opts.place_algorithm . is_timing_driven () ) {
840
840
critical_path = timing_info->least_slack_critical_path ();
841
841
sTNS = timing_info->setup_total_negative_slack ();
842
842
sWNS = timing_info->setup_worst_negative_slack ();
@@ -898,7 +898,7 @@ void try_place(const t_placer_opts& placer_opts,
898
898
899
899
calc_placer_stats (stats, success_rat, std_dev, costs, move_lim);
900
900
901
- if (placer_opts.place_quench_algorithm == CRITICALITY_TIMING_PLACE ) {
901
+ if (placer_opts.place_quench_algorithm . is_timing_driven () ) {
902
902
critical_path = timing_info->least_slack_critical_path ();
903
903
sTNS = timing_info->setup_total_negative_slack ();
904
904
sWNS = timing_info->setup_worst_negative_slack ();
@@ -935,7 +935,7 @@ void try_place(const t_placer_opts& placer_opts,
935
935
VTR_LOG (" Swaps called: %d\n " , num_ts_called);
936
936
report_aborted_moves ();
937
937
938
- if (placer_opts.place_algorithm == CRITICALITY_TIMING_PLACE ) {
938
+ if (placer_opts.place_algorithm . is_timing_driven () ) {
939
939
// Final timing estimate
940
940
VTR_ASSERT (timing_info);
941
941
perform_full_timing_update (state.crit_exponent ,
@@ -1009,7 +1009,7 @@ static void outer_loop_update_timing_info(const t_placer_opts& placer_opts,
1009
1009
PlacerSetupSlacks* setup_slacks,
1010
1010
ClusteredPinTimingInvalidator* pin_timing_invalidator,
1011
1011
SetupTimingInfo* timing_info) {
1012
- if (placer_opts.place_algorithm != CRITICALITY_TIMING_PLACE ) {
1012
+ if (! placer_opts.place_algorithm . is_timing_driven () ) {
1013
1013
return ;
1014
1014
}
1015
1015
@@ -1238,7 +1238,7 @@ static void placement_inner_loop(float t,
1238
1238
num_swap_rejected++;
1239
1239
}
1240
1240
1241
- if (placer_opts.place_algorithm == CRITICALITY_TIMING_PLACE ) {
1241
+ if (placer_opts.place_algorithm . is_timing_driven () ) {
1242
1242
/* Do we want to re-timing analyze the circuit to get updated slack and criticality values?
1243
1243
* We do this only once in a while, since it is expensive.
1244
1244
*/
@@ -1303,7 +1303,7 @@ static void recompute_costs_from_scratch(const t_placer_opts& placer_opts,
1303
1303
}
1304
1304
costs->bb_cost = new_bb_cost;
1305
1305
1306
- if (placer_opts.place_algorithm == CRITICALITY_TIMING_PLACE ) {
1306
+ if (placer_opts.place_algorithm . is_timing_driven () ) {
1307
1307
double new_timing_cost = 0 .;
1308
1308
comp_td_costs (delay_model, *criticalities, &new_timing_cost);
1309
1309
if (fabs (new_timing_cost - costs->timing_cost ) > costs->timing_cost * ERROR_TOL) {
@@ -1424,7 +1424,7 @@ static bool update_annealing_state(t_annealing_state* state,
1424
1424
// The idea is that as the range limit shrinks (indicating we are fine-tuning a more optimized placement) we can focus more on a smaller number of critical connections, which a higher crit_exponent achieves.
1425
1425
update_rlim (&state->rlim , success_rat, device_ctx.grid );
1426
1426
1427
- if (placer_opts.place_algorithm == CRITICALITY_TIMING_PLACE ) {
1427
+ if (placer_opts.place_algorithm . is_timing_driven () ) {
1428
1428
state->crit_exponent = (1 - (state->rlim - FINAL_RLIM) * state->inverse_delta_rlim )
1429
1429
* (placer_opts.td_place_exp_last - placer_opts.td_place_exp_first )
1430
1430
+ placer_opts.td_place_exp_first ;
@@ -1817,7 +1817,7 @@ static int find_affected_nets_and_update_costs(const t_place_algorithm& place_al
1817
1817
// once per net, not once per pin.
1818
1818
update_net_bb (net_id, blocks_affected, iblk, blk, blk_pin);
1819
1819
1820
- if (place_algorithm == CRITICALITY_TIMING_PLACE || place_algorithm == SLACK_TIMING_PLACE ) {
1820
+ if (place_algorithm. is_timing_driven () ) {
1821
1821
// Determine the change in timing costs if required
1822
1822
update_td_delta_costs (delay_model, *criticalities, net_id, blk_pin, blocks_affected, timing_delta_c);
1823
1823
}
@@ -2440,7 +2440,7 @@ static void alloc_and_load_placement_structs(float place_cost_exp,
2440
2440
max_pins_per_clb = max (max_pins_per_clb, type.num_pins );
2441
2441
}
2442
2442
2443
- if (placer_opts.place_algorithm == CRITICALITY_TIMING_PLACE ) {
2443
+ if (placer_opts.place_algorithm . is_timing_driven () ) {
2444
2444
/* Allocate structures associated with timing driven placement */
2445
2445
/* [0..cluster_ctx.clb_nlist.nets().size()-1][1..num_pins-1] */
2446
2446
connection_delay = make_net_pins_matrix<float >(cluster_ctx.clb_nlist , 0 .f );
@@ -2486,7 +2486,7 @@ static void alloc_and_load_placement_structs(float place_cost_exp,
2486
2486
/* Frees the major structures needed by the placer (and not needed *
2487
2487
* elsewhere). */
2488
2488
static void free_placement_structs (const t_placer_opts& placer_opts) {
2489
- if (placer_opts.place_algorithm == CRITICALITY_TIMING_PLACE ) {
2489
+ if (placer_opts.place_algorithm . is_timing_driven () ) {
2490
2490
vtr::release_memory (connection_timing_cost);
2491
2491
vtr::release_memory (connection_delay);
2492
2492
vtr::release_memory (connection_setup_slack);
@@ -3085,7 +3085,7 @@ static int check_placement_costs(const t_placer_costs& costs,
3085
3085
error++;
3086
3086
}
3087
3087
3088
- if (place_algorithm == CRITICALITY_TIMING_PLACE ) {
3088
+ if (place_algorithm. is_timing_driven () ) {
3089
3089
comp_td_costs (delay_model, *criticalities, &timing_cost_check);
3090
3090
// VTR_LOG("timing_cost recomputed from scratch: %g\n", timing_cost_check);
3091
3091
if (fabs (timing_cost_check - costs.timing_cost ) > costs.timing_cost * ERROR_TOL) {
@@ -3355,5 +3355,5 @@ static void init_annealing_state(t_annealing_state* state,
3355
3355
}
3356
3356
3357
3357
bool placer_needs_lookahead (const t_vpr_setup& vpr_setup) {
3358
- return (vpr_setup.PlacerOpts .place_algorithm == CRITICALITY_TIMING_PLACE );
3358
+ return (vpr_setup.PlacerOpts .place_algorithm . is_timing_driven () );
3359
3359
}
0 commit comments