Skip to content

Commit beb1a11

Browse files
remove dusty_sa_options and fix a few typos
1 parent 51c57d9 commit beb1a11

File tree

2 files changed

+12
-55
lines changed

2 files changed

+12
-55
lines changed

doc/src/vpr/command_line_usage.rst

Lines changed: 7 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -830,47 +830,9 @@ If any of init_t, exit_t or alpha_t is specified, the user schedule, with a fixe
830830

831831
**Default:** ``0.0``
832832

833-
.. _dusty_sa_options:
834-
Setting any of the following 5 options selects :ref:`Dusty's annealing schedule <dusty_sa>` .
835-
836-
.. option:: --alpha_min <float>
837-
838-
The minimum (starting) update factor (alpha) used.
839-
Ranges between 0 and alpha_max.
840-
841-
**Default:** ``0.2``
842-
843-
.. option:: --alpha_max <float>
844-
845-
The maximum (stopping) update factor (alpha) used after which simulated annealing will complete.
846-
Ranges between alpha_min and 1.
847-
848-
**Default:** ``0.9``
849-
850-
.. option:: --alpha_decay <float>
851-
852-
The rate at which alpha will approach 1: alpha(n) = 1 - (1 - alpha(n-1)) * alpha_decay
853-
Ranges between 0 and 1.
854-
855-
**Default:** ``0.7``
856-
857-
.. option:: --anneal_success_min <float>
858-
859-
The minimum success ratio after which the temperature will reset to maintain the target success ratio.
860-
Ranges between 0 and anneal_success_target.
861-
862-
**Default:** ``0.1``
863-
864-
.. option:: --anneal_success_target <float>
865-
866-
The temperature after each reset is selected to keep this target success ratio.
867-
Ranges between anneal_success_target and 1.
868-
869-
**Default:** ``0.25``
870-
871833
.. option:: --RL_agent_placement {on | off}
872834

873-
Uses a Reinforcement Learning (RL) agent in choosing the appropiate move type in placement.
835+
Uses a Reinforcement Learning (RL) agent in choosing the appropriate move type in placement.
874836
It activates the RL agent placement instead of using a fixed probability for each move type.
875837

876838
**Default:** ``on``
@@ -899,7 +861,7 @@ Setting any of the following 5 options selects :ref:`Dusty's annealing schedule
899861

900862
Controls how quickly the agent's memory decays. Values between [0., 1.] specify
901863
the fraction of weight in the exponentially weighted reward average applied to moves
902-
which occured greater than moves_per_temp moves ago. Values < 0 cause the
864+
which occurred greater than moves_per_temp moves ago. Values < 0 cause the
903865
unweighted reward sample average to be used (all samples are weighted equally)
904866

905867
**Default:** ``0.05``
@@ -918,6 +880,8 @@ Setting any of the following 5 options selects :ref:`Dusty's annealing schedule
918880

919881
**Default:** ``move_block_type``
920882

883+
884+
921885
.. option:: --placer_debug_block <int>
922886

923887
.. note:: This option is likely only of interest to developers debugging the placement algorithm
@@ -1015,7 +979,7 @@ The following options are only valid when the placement engine is in timing-driv
1015979

1016980
.. option:: --place_delay_model_reducer {min, max, median, arithmean, geomean}
1017981

1018-
When calculating delta delays for the placment delay model how are multiple values combined?
982+
When calculating delta delays for the placement delay model how are multiple values combined?
1019983

1020984
**Default:** ``min``
1021985

@@ -1048,15 +1012,15 @@ The following options are only valid when the placement engine is in timing-driv
10481012

10491013
.. option:: --place_tsu_abs_margin <float>
10501014

1051-
Specifies an absolute offest added to cell setup times used by the placer.
1015+
Specifies an absolute offset added to cell setup times used by the placer.
10521016
This effectively controls whether the placer should try to achieve extra margin on setup paths.
10531017
For example a value of 500e-12 corresponds to requesting an extra 500ps of setup margin.
10541018

10551019
**Default:** ``0.0``
10561020

10571021
.. option:: --post_place_timing_report <file>
10581022

1059-
Name of the post-placement timing report file to generate (not generated if unspecfied).
1023+
Name of the post-placement timing report file to generate (not generated if unspecified).
10601024

10611025

10621026
.. _noc_placement_options:

vpr/src/base/read_options.cpp

Lines changed: 5 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -2067,7 +2067,7 @@ argparse::ArgumentParser create_arg_parser(const std::string& prog_name, t_optio
20672067

20682068
place_grp.add_argument<bool, ParseOnOff>(args.RL_agent_placement, "--RL_agent_placement")
20692069
.help(
2070-
"Uses a Reinforcement Learning (RL) agent in choosing the appropiate move type in placement."
2070+
"Uses a Reinforcement Learning (RL) agent in choosing the appropriate move type in placement."
20712071
"It activates the RL agent placement instead of using fixed probability for each move type.")
20722072
.default_value("on")
20732073
.show_in(argparse::ShowIn::HELP_ONLY);
@@ -2082,7 +2082,7 @@ argparse::ArgumentParser create_arg_parser(const std::string& prog_name, t_optio
20822082
place_grp.add_argument<bool, ParseOnOff>(args.place_checkpointing, "--place_checkpointing")
20832083
.help(
20842084
"Enable Placement checkpoints. This means saving the placement and restore it if it's better than later placements."
2085-
"Only effective if agnet's 2nd state is activated.")
2085+
"Only effective if agent's 2nd state is activated.")
20862086
.default_value("on")
20872087
.show_in(argparse::ShowIn::HELP_ONLY);
20882088

@@ -2096,7 +2096,7 @@ argparse::ArgumentParser create_arg_parser(const std::string& prog_name, t_optio
20962096
place_grp.add_argument(args.place_agent_gamma, "--place_agent_gamma")
20972097
.help(
20982098
"Controls how quickly the agent's memory decays. "
2099-
"Values between [0., 1.] specify the fraction of weight in the exponentially weighted reward average applied to moves which occured greater than moves_per_temp moves ago."
2099+
"Values between [0., 1.] specify the fraction of weight in the exponentially weighted reward average applied to moves which occurred greater than moves_per_temp moves ago."
21002100
"Values < 0 cause the unweighted reward sample average to be used (all samples are weighted equally)")
21012101
.default_value("0.05")
21022102
.show_in(argparse::ShowIn::HELP_ONLY);
@@ -2159,13 +2159,6 @@ argparse::ArgumentParser create_arg_parser(const std::string& prog_name, t_optio
21592159
.default_value("0")
21602160
.show_in(argparse::ShowIn::HELP_ONLY);
21612161

2162-
/*
2163-
* place_grp.add_argument(args.place_timing_cost_func, "--place_timing_cost_func")
2164-
* .help(
2165-
* "which timing cost function to use")
2166-
* .default_value("0")
2167-
* .show_in(argparse::ShowIn::HELP_ONLY);
2168-
*/
21692162
place_grp.add_argument<e_agent_algorithm, ParsePlaceAgentAlgorithm>(args.place_agent_algorithm, "--place_agent_algorithm")
21702163
.help("Controls which placement RL agent is used")
21712164
.default_value("softmax")
@@ -2219,13 +2212,13 @@ argparse::ArgumentParser create_arg_parser(const std::string& prog_name, t_optio
22192212
.show_in(argparse::ShowIn::HELP_ONLY);
22202213

22212214
place_timing_grp.add_argument(args.inner_loop_recompute_divider, "--inner_loop_recompute_divider")
2222-
.help("Controls how many timing analysies are perform per temperature during placement")
2215+
.help("Controls how many timing analyses are performed per temperature during placement")
22232216
.default_value("0")
22242217
.show_in(argparse::ShowIn::HELP_ONLY);
22252218

22262219
place_timing_grp.add_argument(args.quench_recompute_divider, "--quench_recompute_divider")
22272220
.help(
2228-
"Controls how many timing analysies are perform during the final placement quench (t=0)."
2221+
"Controls how many timing analyses are performed during the final placement quench (t=0)."
22292222
" If unspecified, uses the value from --inner_loop_recompute_divider")
22302223
.default_value("0")
22312224
.show_in(argparse::ShowIn::HELP_ONLY);

0 commit comments

Comments
 (0)