diff --git a/.travis.yml b/.travis.yml index 01aa50f21ec..e9f1fb1a8f5 100644 --- a/.travis.yml +++ b/.travis.yml @@ -44,8 +44,6 @@ addons: - libxft-dev - libxml++2.6-dev - perl - - python - - python3.6 - python-lxml - texinfo - time @@ -238,9 +236,13 @@ jobs: - ./.github/travis/cron_build.sh before_script: + # Switch to python 3.6.3 + - pyenv install -f 3.6.3 + - pyenv global 3.6.3 + - source .github/travis/common.sh - ./.github/travis/setup.sh - + after_script: - ./.github/travis/setup.sh diff --git a/dev/tutorial/NewDeveloperTutorial.txt b/dev/tutorial/NewDeveloperTutorial.txt index babf29e79dd..79ba611d3c6 100644 --- a/dev/tutorial/NewDeveloperTutorial.txt +++ b/dev/tutorial/NewDeveloperTutorial.txt @@ -80,14 +80,14 @@ Task #5 - Open the Black Box 1. Using the custom Verilog circuit and architecture created in Task #4, directly run Odin II on it to generate a blif netlist. You may need to skim the Odin II - readme file and the vtr_flow/scripts/run_vtr_flow.pl. + readme file and the vtr_flow/scripts/run_vtr_flow.py. 2. Using the output netlist of Odin II, run ABC to generate a technology-mapped blif file. - You may need to skim vtr_flow/scripts/run_vtr_flow.pl. + You may need to skim vtr_flow/scripts/run_vtr_flow.py. 3. Using the output of ABC, run VPR to complete the mapping of a user circuit to a target architecture. You may need to consult the VPR User Manual and skim - You may need to skim vtr_flow/scripts/run_vtr_flow.pl. + You may need to skim vtr_flow/scripts/run_vtr_flow.py. 4. Read the VPR section of the online documentation. diff --git a/doc/src/conf.py b/doc/src/conf.py index b3c3dd67e3e..7213ab31e4f 100644 --- a/doc/src/conf.py +++ b/doc/src/conf.py @@ -22,6 +22,7 @@ import recommonmark sys.path.append(".") +sys.path.insert(0, os.path.abspath('../../vtr_flow/scripts/python_libs')) from markdown_code_symlinks import LinkParser, MarkdownSymlinksDomain # Cool looking ReadTheDocs theme @@ -58,13 +59,16 @@ 'sphinx.ext.todo', 'sphinx.ext.mathjax', 'sphinx.ext.imgmath', + 'sphinx.ext.napoleon', + 'sphinx.ext.coverage', 'breathe', 'notfound.extension', 'sphinx_markdown_tables', 'sdcdomain', 'archdomain', 'rrgraphdomain', - 'recommonmark' + 'recommonmark', + 'sphinx.ext.autodoc' ] if have_sphinxcontrib_bibtex: diff --git a/doc/src/dev/tutorials/new_developer_tutorial.rst b/doc/src/dev/tutorials/new_developer_tutorial.rst index b7c05ec6bb6..4aa694a3c70 100644 --- a/doc/src/dev/tutorials/new_developer_tutorial.rst +++ b/doc/src/dev/tutorials/new_developer_tutorial.rst @@ -57,7 +57,7 @@ Open the Black Box At this stage, you have gotten a taste of how an FPGA architect would go about using VTR. As a developer though, you need a much deeper understanding of how this tool works. The purpose of this section is to have you to learn the details of the VTR CAD flow by having you manually do what the scripts do. -Using the custom Verilog circuit and architecture created in the previous step, directly run Odin II on it to generate a blif netlist. You may need to skim the ``ODIN_II/README.rst`` and the ``vtr_flow/scripts/run_vtr_flow.pl``. +Using the custom Verilog circuit and architecture created in the previous step, directly run Odin II on it to generate a blif netlist. You may need to skim the ``ODIN_II/README.rst`` and the ``vtr_flow/scripts/run_vtr_flow.py``. Using the output netlist of Odin II, run ABC to generate a technology-mapped blif file. You may need to skim the ABC homepage (http://www.eecs.berkeley.edu/~alanmi/abc/). diff --git a/doc/src/tutorials/flow/basic_flow.rst b/doc/src/tutorials/flow/basic_flow.rst index 2552958beb3..bb0fb77319c 100644 --- a/doc/src/tutorials/flow/basic_flow.rst +++ b/doc/src/tutorials/flow/basic_flow.rst @@ -46,7 +46,7 @@ The following steps show you to run the VTR design flow to map a sample circuit Some also contain a ``golden_results.txt`` file that is used by the scripts to check for correctness. - The ``vtr_release/vtr_flow/scripts/run_vtr_flow.pl`` script describes the CAD flow employed in the test. + The ``vtr_release/vtr_flow/scripts/run_vtr_flow.py`` script describes the CAD flow employed in the test. You can modify the flow by editing this script. At this point, feel free to run any of the tasks pre-pended with "regression". diff --git a/doc/src/vtr/index.rst b/doc/src/vtr/index.rst index fda8c3a95cb..be581eec596 100644 --- a/doc/src/vtr/index.rst +++ b/doc/src/vtr/index.rst @@ -49,6 +49,7 @@ VTR also includes a set of benchmark designs known to work with the design flow. parse_vtr_task parse_config pass_requirements + python_libs/vtr diff --git a/doc/src/vtr/power_estimation/index.rst b/doc/src/vtr/power_estimation/index.rst index c5f45f8df2f..e971e9978d3 100644 --- a/doc/src/vtr/power_estimation/index.rst +++ b/doc/src/vtr/power_estimation/index.rst @@ -33,8 +33,8 @@ The easiest way to run the VTR flow is to use the :ref:`run_vtr_flow` script. In order to perform power estimation, you must add the following options: - * :option:`run_vtr_flow.pl -power` - * :option:`run_vtr_flow.pl -cmos_tech` ```` + * :option:`run_vtr_flow.py -power` + * :option:`run_vtr_flow.py -cmos_tech` ```` The CMOS technology properties file is an XML file that contains relevant process-dependent information needed for power estimation. XML files for 22nm, 45nm, and 130nm PTM models can be found here:: diff --git a/doc/src/vtr/python_libs/vtr.rst b/doc/src/vtr/python_libs/vtr.rst new file mode 100755 index 00000000000..9dfff1dcc13 --- /dev/null +++ b/doc/src/vtr/python_libs/vtr.rst @@ -0,0 +1,38 @@ +.. _python_libs/vtr: + +VTR Flow Python library +----------------------- +The VTR flow can be imported and implemented as a python library. Below are the descriptions of the useful functions. + +VTR flow +======== +.. automodule:: vtr.flow + :members: run + +ODIN II +======= + +.. automodule:: vtr.odin.odin + :members: + +ABC +=== + +.. automodule:: vtr.abc.abc + :members: run, run_lec + +ACE +=== + +.. automodule:: vtr.ace.ace + :members: + +VPR +=== + +.. automodule:: vtr.vpr.vpr + :members: + +.. toctree:: + :maxdepth: 2 + :caption: Contents: \ No newline at end of file diff --git a/doc/src/vtr/run_vtr_flow.rst b/doc/src/vtr/run_vtr_flow.rst index 68e5346ddd0..6ce79a8e0ca 100644 --- a/doc/src/vtr/run_vtr_flow.rst +++ b/doc/src/vtr/run_vtr_flow.rst @@ -7,16 +7,16 @@ This script runs the VTR flow for a single benchmark circuit and architecture fi The script is located at:: - $VTR_ROOT/vtr_flow/scripts/run_vtr_flow.pl + $VTR_ROOT/vtr_flow/scripts/run_vtr_flow.py -.. program:: run_vtr_flow.pl +.. program:: run_vtr_flow.py Basic Usage ~~~~~~~~~~~ -At a minimum ``run_vtr_flow.pl`` requires two command-line arguments:: +At a minimum ``run_vtr_flow.py`` requires two command-line arguments:: - run_vtr_flow.pl + run_vtr_flow.py where: @@ -44,20 +44,20 @@ The script will also produce an output files (\*.out) for each stage, containing Advanced Usage ~~~~~~~~~~~~~~ -Additional *optional* command arguments can also be passed to ``run_vtr_flow.pl``:: +Additional *optional* command arguments can also be passed to ``run_vtr_flow.py``:: - run_vtr_flow.pl [] [] + run_vtr_flow.py [] [] where: - * ```` are additional arguments passed to ``run_vtr_flow.pl`` (described below), - * ```` are any arguments not recognized by ``run_vtr_flow.pl``. These will be forwarded to VPR. + * ```` are additional arguments passed to ``run_vtr_flow.py`` (described below), + * ```` are any arguments not recognized by ``run_vtr_flow.py``. These will be forwarded to VPR. For example:: - run_vtr_flow.pl my_circuit.v my_arch.xml -track_memory_usage --pack --place + run_vtr_flow.py my_circuit.v my_arch.xml -track_memory_usage --pack --place -will run the VTR flow to map the circuit ``my_circuit.v`` onto the architecture ``my_arch.xml``; the arguments ``--pack`` and ``--place`` will be passed to VPR (since they are unrecognized arguments to ``run_vtr_flow.pl``). +will run the VTR flow to map the circuit ``my_circuit.v`` onto the architecture ``my_arch.xml``; the arguments ``--pack`` and ``--place`` will be passed to VPR (since they are unrecognized arguments to ``run_vtr_flow.py``). They will cause VPR to perform only :ref:`packing and placement `. Detailed Command-line Options diff --git a/doc/src/vtr/running_vtr.rst b/doc/src/vtr/running_vtr.rst index 1eb62707969..922d1fa5298 100644 --- a/doc/src/vtr/running_vtr.rst +++ b/doc/src/vtr/running_vtr.rst @@ -24,7 +24,7 @@ The :ref:`run_vtr_flow` script is provided to execute the VTR flow for a single .. code-block:: none - $VTR_ROOT/vtr_flow/scripts/run_vtr_flow.pl + $VTR_ROOT/vtr_flow/scripts/run_vtr_flow.py It requires two arguments: @@ -41,7 +41,7 @@ Architecture files can be found under:: The script can also be used to run parts of the VTR flow. -.. seealso:: :ref:`run_vtr_flow` for the detailed command line options of ``run_vtr_flow.pl``. +.. seealso:: :ref:`run_vtr_flow` for the detailed command line options of ``run_vtr_flow.py``. Running Multiple Benchmarks & Architectures with Tasks diff --git a/run_reg_test.pl b/run_reg_test.pl index 64486d296f9..f04a22ee224 100755 --- a/run_reg_test.pl +++ b/run_reg_test.pl @@ -65,6 +65,7 @@ my $skip_qor = 0; my $show_failures = 0; my $num_cpu = 1; +my $script = "run_vtr_flow.py"; my $long_task_names = 0; # Parse Input Arguments @@ -87,6 +88,8 @@ $long_task_names = 1; } elsif ( $token eq "-j" ) { #-j N $num_cpu = int(shift(@ARGV)); + } elsif ( $token eq "-script" ) { + $script = shift(@ARGV); } elsif ( $token =~ /^-j(\d+)$/ ) { #-jN $num_cpu = int($1); } elsif ($token eq "quick_test") { @@ -337,10 +340,10 @@ sub run_single_test { print "\nRunning $test\n"; print "-------------------------------------------------------------------------------\n"; - print "scripts/run_vtr_task.pl $run_params \n"; + print "scripts/run_vtr_task.pl $run_params -script $script \n"; print "\n"; chdir("$vtr_flow_path"); - my $test_status = system("scripts/run_vtr_task.pl $run_params \n"); + my $test_status = system("scripts/run_vtr_task.pl $run_params -script $script \n"); chdir(".."); #Perl is obtuse, and requires you to manually shift the return value by 8 bits diff --git a/vtr_flow/scripts/python_libs/vtr/__init__.py b/vtr_flow/scripts/python_libs/vtr/__init__.py new file mode 100644 index 00000000000..2a701260451 --- /dev/null +++ b/vtr_flow/scripts/python_libs/vtr/__init__.py @@ -0,0 +1,30 @@ +""" + __init__ for the VTR python module +""" +from .util import ( + load_config_lines, + find_vtr_file, + CommandRunner, + print_verbose, + relax_w, + file_replace, + RawDefaultHelpFormatter, + VERBOSITY_CHOICES, + format_elapsed_time, + write_tab_delimitted_csv, + load_list_file, + find_vtr_root, + argparse_str2bool, + get_next_run_dir, + get_latest_run_dir, + verify_file, +) +from .inspect import determine_lut_size, determine_min_w, determine_memory_addr_width +#pylint: disable=reimported +from .abc import run, run_lec +from .vpr import run, run_relax_w, cmp_full_vs_incr_sta, run_second_time +from .odin import run +from .ace import run +from .error import * +from .flow import run, VtrStage +#pylint: enable=reimported \ No newline at end of file diff --git a/vtr_flow/scripts/python_libs/vtr/abc/__init__.py b/vtr_flow/scripts/python_libs/vtr/abc/__init__.py new file mode 100644 index 00000000000..26d376b3420 --- /dev/null +++ b/vtr_flow/scripts/python_libs/vtr/abc/__init__.py @@ -0,0 +1,4 @@ +""" + __init__ for abc module +""" +from .abc import run, run_lec diff --git a/vtr_flow/scripts/python_libs/vtr/abc/abc.py b/vtr_flow/scripts/python_libs/vtr/abc/abc.py new file mode 100644 index 00000000000..f08338e70d2 --- /dev/null +++ b/vtr_flow/scripts/python_libs/vtr/abc/abc.py @@ -0,0 +1,443 @@ +""" + Module to run ABC with its various options +""" +import shutil +from collections import OrderedDict +from pathlib import Path +from vtr import find_vtr_file, determine_lut_size, verify_file, CommandRunner +from vtr.error import InspectError + +#pylint: disable=too-many-arguments, too-many-locals +def run( + architecture_file, + circuit_file, + output_netlist, + command_runner=CommandRunner(), + temp_dir=Path("."), + log_filename="abc.out", + abc_exec=None, + abc_script=None, + abc_rc=None, + use_old_abc_script=False, + abc_args=None, + keep_intermediate_files=True, +): + """ + Runs ABC to optimize specified file. + + .. note :: Usage: vtr.abc.run(,,,[OPTIONS]) + + Arguments + ========= + architecture_file : + Architecture file to target + + circuit_file : + Circuit file to optimize + + output_netlist : + File name to output the resulting circuit to + + Other Parameters + ---------------- + command_runner : + A CommandRunner object used to run system commands + + temp_dir : + Directory to run in (created if non-existent) + + log_filename : + File to log result to + + abc_exec : + ABC executable to be run + + abc_script : + The script to be run on abc + + abc_rc : + The ABC rc file + + use_old_abc_script : + Enables the use of the old ABC script + + abc_args : + A dictionary of keyword arguments to pass on to ABC + + keep_intermediate_files : + Determines if intermediate files are kept or deleted + + """ + temp_dir = Path(temp_dir) if not isinstance(temp_dir, Path) else temp_dir + temp_dir.mkdir(parents=True, exist_ok=True) + + abc_args = OrderedDict() if abc_args is None else abc_args + + # Verify that files are Paths or convert them to Paths and check that they exist + architecture_file = verify_file(architecture_file, "Architecture") + circuit_file = verify_file(circuit_file, "Circuit") + output_netlist = verify_file(output_netlist, "Output netlist", should_exist=False) + + blackbox_latches_script = find_vtr_file("blackbox_latches.pl") + clk_list = [] + # + # Parse arguments + # + (abc_args, + abc_flow_type, + lut_size, + abc_run_args, + use_old_latches_restoration_script) = parse_abc_args(abc_args) + + + lut_size = determine_lut_size(str(architecture_file)) if lut_size is None else lut_size + + populate_clock_list( + circuit_file, + blackbox_latches_script, + clk_list, + command_runner, + temp_dir + ) + + abc_exec = find_vtr_file("abc", is_executable=True) if abc_exec is None else abc_exec + abc_rc = Path(abc_exec).parent / "abc.rc" if abc_rc is None else abc_rc + + shutil.copyfile(str(abc_rc), str(temp_dir / "abc.rc")) + + iterations = len(clk_list) + iterations = 1 if iterations == 0 or abc_flow_type != "iterative_bb" else iterations + + original_script = abc_script + input_file = circuit_file.name + for i in range(0, iterations): + pre_abc_blif = temp_dir / (str(i) + "_" + circuit_file.name) + post_abc_blif = temp_dir / (str(i) + "_" + output_netlist.name) + post_abc_raw_blif = temp_dir / ( + str(i) + "_" + output_netlist.with_suffix("").stem + ".raw.abc.blif" + ) + if abc_flow_type == "blanket_bb": + command_runner.run_system_command( + [ + blackbox_latches_script, + "--input", + input_file, + "--output", + pre_abc_blif.name, + ], + temp_dir=temp_dir, + log_filename=str(i) + "_blackboxing_latch.out", + indent_depth=1, + ) + + elif len(clk_list) > i: + command_runner.run_system_command( + [ + blackbox_latches_script, + "--clk_list", + clk_list[i], + "--input", + input_file, + "--output", + pre_abc_blif.name, + ], + temp_dir=temp_dir, + log_filename=str(i) + "_blackboxing_latch.out", + indent_depth=1, + ) + else: + pre_abc_blif = input_file + + abc_script = ( + "; ".join([ + 'echo ""', + 'echo "Load Netlist"', + 'echo "============"', + "read {pre_abc_blif}".format(pre_abc_blif=pre_abc_blif.name), + "time", + 'echo ""', + 'echo "Circuit Info"', + 'echo "=========="', + "print_stats", + "print_latch", + "time", + 'echo ""', + 'echo "LUT Costs"', + 'echo "========="', + "print_lut", + "time", + 'echo ""', + 'echo "Logic Opt + Techmap"', + 'echo "==================="', + "strash", + "ifraig -v", + "scorr -v", + "dc2 -v", + "dch -f", + "if -K {lut_size} -v".format(lut_size=lut_size), + "mfs2 -v", + "print_stats", + "time", + 'echo ""', + 'echo "Output Netlist"', + 'echo "=============="', + "write_hie {pre_abc_blif} {post_abc_raw_blif}".format( + pre_abc_blif=pre_abc_blif.name, + post_abc_raw_blif=post_abc_raw_blif.name, + ), + "time;" + ]) + if abc_script is None + else + "; ".join([ + "read {pre_abc_blif}".format(pre_abc_blif=pre_abc_blif.name), + "time", + "resyn", + "resyn2", + "if -K {lut_size}".format(lut_size=lut_size), + "time", + "scleanup", + "write_hie {pre_abc_blif} {post_abc_raw_blif}".format( + pre_abc_blif=pre_abc_blif.name, + post_abc_raw_blif=post_abc_raw_blif.name, + ), + "print_stats" + ]) + if use_old_abc_script + else abc_script + ) + + cmd = [abc_exec, '-c', abc_script] + if abc_run_args: + cmd.append(abc_run_args) + + command_runner.run_system_command( + cmd, + temp_dir=temp_dir, + log_filename=Path(log_filename).stem + str(i) + Path(log_filename).suffix, + indent_depth=1, + ) + + if abc_flow_type != "blanket_bb" and len(clk_list) > i: + command_runner.run_system_command( + [ + blackbox_latches_script, + "--restore", + clk_list[i], + "--input", + post_abc_raw_blif.name, + "--output", + post_abc_blif.name, + ], + temp_dir=temp_dir, + log_filename="restore_latch" + str(i) + ".out", + indent_depth=1, + ) + else: + restore_multiclock_info_script = ( + find_vtr_file( + "restore_multiclock_latch_information.pl" + ) + if use_old_latches_restoration_script + else + find_vtr_file( + "restore_multiclock_latch.pl" + ) + ) + command_runner.run_system_command( + [ + restore_multiclock_info_script, + pre_abc_blif.name, + post_abc_raw_blif.name, + post_abc_blif.name, + ], + temp_dir=temp_dir, + log_filename="restore_latch" + str(i) + ".out", + indent_depth=1, + ) + if abc_flow_type != "iterative_bb": + break + + abc_script = original_script + input_file = post_abc_blif.name + + command_runner.run_system_command( + [ + blackbox_latches_script, + "--input", + post_abc_blif.name, + "--output", + output_netlist.name, + "--vanilla", + ], + temp_dir=temp_dir, + log_filename="restore_latch" + str(i) + ".out", + indent_depth=1, + ) + if not keep_intermediate_files: + for file in temp_dir.iterdir(): + if file.suffix in (".dot", ".v", ".rc"): + file.unlink() +#pylint: enable=too-many-arguments, too-many-locals +def parse_abc_args(abc_args): + """ + function to parse abc_args + """ + abc_flow_type = "iterative_bb" + abc_run_args = "" + lut_size = None + use_old_latches_restoration_script = False + if "iterative_bb" in abc_args: + abc_flow_type = "iterative_bb" + del abc_args["iterative_bb"] + if "blanket_bb" in abc_args: + abc_flow_type = "blanket_bb" + del abc_args["blanket_bb"] + if "once_bb" in abc_args: + abc_flow_type = "once_bb" + del abc_args["once_bb"] + if "use_old_latches_restoration_script" in abc_args: + use_old_latches_restoration_script = True + del abc_args["use_old_latches_restoration_script"] + if "lut_size" in abc_args: + lut_size = abc_args["lut_size"] + del abc_args["lut_size"] + + for arg, value in abc_args.items(): + if isinstance(value, bool) and value: + abc_run_args += ["--" + arg] + elif isinstance(value, (str, int, float)): + abc_run_args += ["--" + arg, str(value)] + else: + pass + return abc_args, abc_flow_type, lut_size, abc_run_args, use_old_latches_restoration_script + +def populate_clock_list( + circuit_file, + blackbox_latches_script, + clk_list, + command_runner, + temp_dir +): + """ + function to populate the clock list + """ + clk_list_path = temp_dir / "report_clk.out" + cmd = [ + blackbox_latches_script, + "--input", + circuit_file.name, + "--output_list", + clk_list_path.name, + ] + command_runner.run_system_command( + cmd, temp_dir=temp_dir, log_filename="report_clocks.abc.out", indent_depth=1 + ) + with clk_list_path.open("r") as file: + for line in file.readlines(): + clk_list.append(line.strip("\n")) + +#pylint: disable=too-many-arguments +def run_lec( + reference_netlist, + implementation_netlist, + command_runner=CommandRunner(), + temp_dir=Path("."), + log_filename="abc.lec.out", + abc_exec=None, +): + """ + Run Logical Equivalence Checking (LEC) between two netlists using ABC + + .. note :: Usage: vtr.abc.run_lec(,,[OPTIONS]) + + Arguments + ========= + reference_netlist : + The reference netlist to be commpared to + + implementation_netlist : + The implemeted netlist to compare to the reference netlist + + + Other Parameters + ---------------- + command_runner : + A CommandRunner object used to run system commands + + temp_dir : + Directory to run in (created if non-existent) + + log_filename : + File to log result to + + abc_exec : + ABC executable to be run + + """ + temp_dir = Path(temp_dir) if not isinstance(temp_dir, Path) else temp_dir + temp_dir.mkdir(parents=True, exist_ok=True) + + if abc_exec is None: + abc_exec = find_vtr_file("abc", is_executable=True) + + abc_script = ( + "dsec {ref} {imp}".format( + ref=reference_netlist, imp=implementation_netlist + ), + ) + abc_script = "; ".join(abc_script) + + cmd = [abc_exec, "-c", abc_script] + + output, _ = command_runner.run_system_command( + cmd, temp_dir=temp_dir, log_filename=log_filename, indent_depth=1 + ) + + # Check if ABC's LEC engine passed + lec_passed, errored = check_abc_lec_status(output) + if errored: + abc_script = ( + "cec {ref} {imp}".format(ref=reference_netlist, imp=implementation_netlist), + ) + abc_script = "; ".join(abc_script) + cmd = [abc_exec, "-c", abc_script] + output, _ = command_runner.run_system_command( + cmd, temp_dir=temp_dir, log_filename="abc.cec.out", indent_depth=1 + ) + lec_passed, errored = check_abc_lec_status(output) + if lec_passed is None: + raise InspectError( + "Couldn't determine Logical Equivalence status between {input} <-> {output}".format( + input=reference_netlist, output=implementation_netlist + ), + filename=log_filename, + ) + if lec_passed is False: + raise InspectError( + "Logical Equivalence Check failed between {input} <-> {output}".format( + input=reference_netlist, output=implementation_netlist + ), + filename=log_filename, + ) + + assert lec_passed +#pylint: enable=too-many-arguments + +def check_abc_lec_status(output): + """ + Reads abc_lec output and determines if the files were equivelent and + if there were errors when preforming lec. + """ + equivalent = None + errored = False + for line in output: + if "Error: The network has no latches." in line: + errored = True + if line.startswith("Networks are NOT EQUIVALENT"): + equivalent = False + elif line.startswith("Networks are equivalent"): + equivalent = True + + # Returns None if could not determine LEC status + return equivalent, errored diff --git a/vtr_flow/scripts/python_libs/vtr/ace/__init__.py b/vtr_flow/scripts/python_libs/vtr/ace/__init__.py new file mode 100644 index 00000000000..9666f7710ac --- /dev/null +++ b/vtr_flow/scripts/python_libs/vtr/ace/__init__.py @@ -0,0 +1,4 @@ +""" + init for the ACE module +""" +from .ace import run diff --git a/vtr_flow/scripts/python_libs/vtr/ace/ace.py b/vtr_flow/scripts/python_libs/vtr/ace/ace.py new file mode 100644 index 00000000000..441dd892e7b --- /dev/null +++ b/vtr_flow/scripts/python_libs/vtr/ace/ace.py @@ -0,0 +1,99 @@ +""" + Module to run ACE with its various options +""" +from pathlib import Path +from vtr import find_vtr_file, verify_file, CommandRunner +#pylint: disable=too-many-arguments +def run( + circuit_file, + old_netlist, + output_netlist, + output_activity_file, + command_runner=CommandRunner(), + temp_dir=Path("."), + log_filename="ace.out", + ace_exec=None, + ace_seed=1, +): + """ + Runs ACE for activity estimation + + .. note :: Usage: vtr.ace.run(,,,[OPTIONS]) + + Arguments + ========= + circuit_file : + Circuit file to optimize + + old_netlist : + netlist to be anylized + + output_netlist : + File name to output the resulting circuit to + + output_activity_file : + The output activity file + + Other Parameters + ---------------- + command_runner : + A CommandRunner object used to run system commands + + temp_dir : + Directory to run in (created if non-existent) + + log_filename : + File to log result to + + ace_exec : + ACE executable to be run + + ace_seed : + The ACE seed + """ + temp_dir = Path(temp_dir) if not isinstance(temp_dir, Path) else temp_dir + # Verify that files are Paths or convert them to Paths and check that they exist + circuit_file = verify_file(circuit_file, "Circuit") + old_netlist = verify_file(old_netlist, "Previous netlist") + output_netlist = verify_file(output_netlist, "Output netlist", should_exist=False) + output_activity_file = verify_file( + output_activity_file, "Output activity", should_exist=False + ) + + ace_clk_file = temp_dir / "ace_clk.txt" + ace_raw = temp_dir / (circuit_file.with_suffix("").stem + ".raw.ace.blif") + if ace_exec is None: + ace_exec = find_vtr_file("ace") + + cmd = [find_vtr_file("extract_clk_from_blif.py"), ace_clk_file.name, circuit_file.name] + command_runner.run_system_command( + cmd, temp_dir=temp_dir, log_filename="ace_clk_extraction.out", indent_depth=1 + ) + ace_clk = "" + with ace_clk_file.open("r") as file: + ace_clk = file.readline().strip("\n") + cmd = [ + ace_exec, + "-b", + circuit_file.name, + "-c", + ace_clk, + "-n", + ace_raw.name, + "-o", + output_activity_file.name, + "-s", + str(ace_seed), + ] + + command_runner.run_system_command( + cmd, temp_dir=temp_dir, log_filename=log_filename, indent_depth=1 + ) + + clock_script = find_vtr_file("restore_multiclock_latch.pl") + + cmd = [clock_script, old_netlist.name, ace_raw.name, output_netlist.name] + command_runner.run_system_command( + cmd, temp_dir=temp_dir, log_filename="ace_clk_restore.out", indent_depth=1 + ) + #pylint: enable=too-many-arguments diff --git a/vtr_flow/scripts/python_libs/vtr/error.py b/vtr_flow/scripts/python_libs/vtr/error.py new file mode 100644 index 00000000000..ca915e3f933 --- /dev/null +++ b/vtr_flow/scripts/python_libs/vtr/error.py @@ -0,0 +1,42 @@ +""" + Module to handle VTR flow erroring. +""" + + +class VtrError(Exception): + """ + Base class for VTR related exceptions + + Attributes: + msg -- An explanation of the error + """ + + def __init__(self, *msg): + self.msg = " ".join(str(item) for item in msg) + super().__init__() + + +class CommandError(VtrError): + """ + Raised when an external command failed. + + Attributes: + returncode -- The return code from the command + cmd == The command run + """ + + def __init__(self, *msg, cmd, returncode, log=None): + super().__init__(*msg) + self.returncode = returncode + self.cmd = cmd + self.log = log + + +class InspectError(VtrError): + """ + Raised when some query (inspection) result is not found. + """ + + def __init__(self, *msg, filename=None): + super().__init__(*msg) + self.filename = filename diff --git a/vtr_flow/scripts/python_libs/vtr/flow.py b/vtr_flow/scripts/python_libs/vtr/flow.py new file mode 100644 index 00000000000..32cdf27f10e --- /dev/null +++ b/vtr_flow/scripts/python_libs/vtr/flow.py @@ -0,0 +1,377 @@ +""" + Module to run the VTR flow. This module calls other modules that then access the tools like VPR. +""" +import shutil +from pathlib import Path +from collections import OrderedDict +from enum import Enum +import vtr + + +class VtrStage(Enum): + """ + Enum class for the VTR stages\ + """ + + odin = 1 + abc = 2 + ace = 3 + vpr = 4 + + def __le__(self, other): + if self.__class__ is other.__class__: + return int(self.value) <= other.value + return NotImplemented + + def __ge__(self, other): + if self.__class__ is other.__class__: + return int(self.value) >= other.value + return NotImplemented + + +# pylint: disable=too-many-arguments, too-many-locals, too-many-branches, too-many-statements +def run( + architecture_file, + circuit_file, + power_tech_file=None, + start_stage=VtrStage.odin, + end_stage=VtrStage.vpr, + command_runner=vtr.CommandRunner(), + temp_dir=Path("./temp"), + odin_args=None, + abc_args=None, + vpr_args=None, + keep_intermediate_files=True, + keep_result_files=True, + min_hard_mult_size=3, + min_hard_adder_size=1, + check_equivalent=False, + check_incremental_sta_consistency=False, + use_old_abc_script=False, + relax_w_factor=1.3, +): + """ + Runs the VTR CAD flow to map the specified circuit_file onto the target architecture_file + + .. note :: Usage: vtr.run(,,[OPTIONS]) + + Arguments + ========= + architecture_file : + Architecture file to target + + circuit_file : + Circuit to implement + + Other Parameters + ---------------- + power_tech_file : + Technology power file. Enables power analysis and runs ace + + start_stage : + Stage of the flow to start at + + end_stage : + Stage of the flow to finish at + + temp_dir : + Directory to run in (created if non-existent) + + command_runner : + A CommandRunner object used to run system commands + + verbosity : + whether to output error description or not + + odin_args : + A dictionary of keyword arguments to pass on to ODIN II + + abc_args : + A dictionary of keyword arguments to pass on to ABC + + vpr_args : + A dictionary of keyword arguments to pass on to VPR + + keep_intermediate_files : + Determines if intermediate files are kept or deleted + + keep_result_files : + Determines if the result files are kept or deleted + + min_hard_mult_size : + Tells ODIN II the minimum multiplier size that should + be implemented using hard multiplier (if available) + + min_hard_adder_size : + Tells ODIN II the minimum adder size that should be implemented + using hard adder (if available). + + check_equivalent : + Enables Logical Equivalence Checks + + use_old_abc_script : + Enables the use of the old ABC script + + relax_w_factor : + Factor by which to relax minimum channel width for critical path delay routing + + check_incremental_sta_consistency : + Do a second-run of the incremental analysis to compare the result files + + """ + + # + # Initial setup + # + vpr_args = OrderedDict() if not vpr_args else vpr_args + odin_args = OrderedDict() if not odin_args else odin_args + abc_args = OrderedDict() if not abc_args else abc_args + + # Verify that files are Paths or convert them to Paths and check that they exist + architecture_file = vtr.util.verify_file(architecture_file, "Architecture") + circuit_file = vtr.util.verify_file(circuit_file, "Circuit") + if architecture_file.suffix != ".xml": + raise vtr.VtrError( + "Expected Architecture file as second argument (was {})".format(architecture_file.name) + ) + power_tech_file = ( + vtr.util.verify_file(power_tech_file, "Power") if power_tech_file else None + ) + temp_dir = Path(temp_dir) + temp_dir.mkdir(parents=True, exist_ok=True) + netlist_ext = ".blif" if ".eblif" not in circuit_file.suffixes else ".eblif" + + # Define useful filenames + post_odin_netlist = temp_dir / (circuit_file.stem + ".odin" + netlist_ext) + post_abc_netlist = temp_dir / (circuit_file.stem + ".abc" + netlist_ext) + post_ace_netlist = temp_dir / (circuit_file.stem + ".ace" + netlist_ext) + post_ace_activity_file = temp_dir / (circuit_file.stem + ".act") + pre_vpr_netlist = temp_dir / (circuit_file.stem + ".pre-vpr" + netlist_ext) + + # If the user provided a .blif or .eblif netlist, we use that as the baseline for LEC + # (ABC can't LEC behavioural verilog) + lec_base_netlist = circuit_file.name if "blif" in circuit_file.suffixes else None + # Reference netlist for LEC + + gen_postsynthesis_netlist = temp_dir / ( + circuit_file.stem + "_post_synthesis" + netlist_ext + ) + + # Copy the circuit and architecture + circuit_copy = temp_dir / circuit_file.name + architecture_copy = temp_dir / architecture_file.name + shutil.copy(str(circuit_file), str(circuit_copy)) + shutil.copy(str(architecture_file), str(architecture_copy)) + + # There are multiple potential paths for the netlist to reach a tool + # We initialize it here to the user specified circuit and let downstream + # stages update it + next_stage_netlist = circuit_copy + + # + # RTL Elaboration & Synthesis + # + if ( + should_run_stage(VtrStage.odin, start_stage, end_stage) + and circuit_file.suffixes != ".blif" + ): + vtr.odin.run( + architecture_copy, + next_stage_netlist, + output_netlist=post_odin_netlist, + command_runner=command_runner, + temp_dir=temp_dir, + odin_args=odin_args, + min_hard_mult_size=min_hard_mult_size, + min_hard_adder_size=min_hard_adder_size, + ) + + next_stage_netlist = post_odin_netlist + + lec_base_netlist = ( + post_odin_netlist if not lec_base_netlist else lec_base_netlist + ) + + # + # Logic Optimization & Technology Mapping + # + if should_run_stage(VtrStage.abc, start_stage, end_stage): + vtr.abc.run( + architecture_copy, + next_stage_netlist, + output_netlist=post_abc_netlist, + command_runner=command_runner, + temp_dir=temp_dir, + abc_args=abc_args, + keep_intermediate_files=keep_intermediate_files, + use_old_abc_script=use_old_abc_script, + ) + + next_stage_netlist = post_abc_netlist + lec_base_netlist = ( + post_abc_netlist if not lec_base_netlist else lec_base_netlist + ) + + # + # Power Activity Estimation + # + if power_tech_file: + # The user provided a tech file, so do power analysis + + if should_run_stage(VtrStage.ace, start_stage, end_stage): + vtr.ace.run( + next_stage_netlist, + old_netlist=post_odin_netlist, + output_netlist=post_ace_netlist, + output_activity_file=post_ace_activity_file, + command_runner=command_runner, + temp_dir=temp_dir, + ) + + if not keep_intermediate_files: + next_stage_netlist.unlink() + post_odin_netlist.unlink() + + # Use ACE's output netlist + next_stage_netlist = post_ace_netlist + lec_base_netlist = ( + post_ace_netlist if not lec_base_netlist else lec_base_netlist + ) + + # Enable power analysis in VPR + vpr_args["power"] = True + vpr_args["tech_properties"] = str(power_tech_file.resolve()) + + # + # Pack/Place/Route + # + if should_run_stage(VtrStage.vpr, start_stage, end_stage): + # Copy the input netlist for input to vpr + shutil.copyfile(str(next_stage_netlist), str(pre_vpr_netlist)) + route_fixed_w = "route_chan_width" in vpr_args + if ("route" in vpr_args or "place" in vpr_args) and not route_fixed_w: + vpr_args["route_chan_width"] = 300 + route_fixed_w = True + + if route_fixed_w: + # The User specified a fixed channel width + do_second_run = False + second_run_args = vpr_args + + if "write_rr_graph" in vpr_args: + do_second_run = True + + if "analysis" in vpr_args: + do_second_run = True + del vpr_args["analysis"] + + if "route" in vpr_args: + do_second_run = True + del vpr_args["route"] + + vtr.vpr.run( + architecture_copy, + pre_vpr_netlist, + circuit_copy.stem, + command_runner=command_runner, + temp_dir=temp_dir, + vpr_args=vpr_args, + ) + if do_second_run: + #Run vpr again with additional parameters. + #This is used to ensure that files generated by VPR can be re-loaded by it + rr_graph_ext = ( + Path(second_run_args["write_rr_graph"]).suffix + if "write_rr_graph" in second_run_args + else ".xml" + ) + vtr.vpr.run_second_time( + architecture_copy, + pre_vpr_netlist, + circuit_copy.stem, + command_runner=command_runner, + temp_dir=temp_dir, + second_run_args=second_run_args, + rr_graph_ext=rr_graph_ext + ) + else: + # First find minW and then re-route at a relaxed W + vtr.vpr.run_relax_w( + architecture_copy, + pre_vpr_netlist, + circuit_copy.stem, + command_runner=command_runner, + relax_w_factor=relax_w_factor, + temp_dir=temp_dir, + vpr_args=vpr_args, + ) + lec_base_netlist = pre_vpr_netlist if not lec_base_netlist else lec_base_netlist + + # + # Logical Equivalence Checks (LEC) + # + if check_equivalent: + for file in Path(temp_dir).iterdir(): + if "post_synthesis.blif" in str(file): + gen_postsynthesis_netlist = file.name + break + vtr.abc.run_lec( + lec_base_netlist, + gen_postsynthesis_netlist, + command_runner=command_runner, + temp_dir=temp_dir, + ) + + # Do a second-run of the incremental analysis to compare the result files + if check_incremental_sta_consistency: + vtr.vpr.cmp_full_vs_incr_sta( + architecture_copy, + pre_vpr_netlist, + circuit_copy.stem, + command_runner=command_runner, + vpr_args=vpr_args, + temp_dir=temp_dir, + ) + + if not keep_intermediate_files: + delete_intermediate_files( + next_stage_netlist, + post_ace_activity_file, + keep_result_files, + temp_dir, + power_tech_file, + ) + + +# pylint: enable=too-many-arguments, too-many-locals, too-many-branches, too-many-statements + + +def delete_intermediate_files( + next_stage_netlist, + post_ace_activity_file, + keep_result_files, + temp_dir, + power_tech_file, +): + """ + delete intermediate files + """ + next_stage_netlist.unlink() + exts = (".xml", ".sdf", ".v") + exts += (".net", ".place", ".route") if not keep_result_files else None + + for file in temp_dir.iterdir(): + if file.suffix in exts: + file.unlink() + + if power_tech_file: + post_ace_activity_file.unlink() + + +def should_run_stage(stage, flow_start_stage, flow_end_stage): + """ + Returns True if stage falls between flow_start_stage and flow_end_stage + """ + if flow_start_stage <= stage <= flow_end_stage: + return True + return False diff --git a/vtr_flow/scripts/python_libs/vtr/inspect.py b/vtr_flow/scripts/python_libs/vtr/inspect.py new file mode 100644 index 00000000000..265c70998c5 --- /dev/null +++ b/vtr_flow/scripts/python_libs/vtr/inspect.py @@ -0,0 +1,90 @@ +""" + module that contains functions to inspect various files to determine important values +""" +import re + +try: + # Try for the fast c-based version first + import xml.etree.cElementTree as ET +except ImportError: + # Fall back on python implementation + import xml.etree.ElementTree as ET + +from vtr.error import InspectError + + +def determine_lut_size(architecture_file): + """ + Determines the maximum LUT size (K) in an architecture file. + + Assumes LUTs are represented as BLIF '.names' + """ + arch_xml = ET.parse(architecture_file).getroot() + + lut_size = 0 + saw_blif_names = False + for elem in arch_xml.findall(".//pb_type"): # Xpath recrusive search for 'pb_type' + blif_model = elem.get("blif_model") + if blif_model and blif_model == ".names": + saw_blif_names = True + input_port = elem.find("input") + + input_width = int(input_port.get("num_pins")) + assert input_width > 0 + + # Keep the maximum lut size found (i.e. fracturable architectures) + lut_size = max(lut_size, input_width) + + if saw_blif_names and lut_size == 0: + raise InspectError( + "Could not identify valid LUT size (K)", filename=architecture_file + ) + + return lut_size + + +def determine_memory_addr_width(architecture_file): + """ + Determines the maximum RAM block address width in an architecture file + + Assumes RAMS are represented using the standard VTR primitives + (.subckt single_port_ram, .subckt dual_port_ram etc.) + """ + arch_xml = ET.parse(architecture_file).getroot() + + mem_addr_width = 0 + saw_ram = False + for elem in arch_xml.findall(".//pb_type"): # XPATH for recursive search + blif_model = elem.get("blif_model") + if blif_model and "port_ram" in blif_model: + saw_ram = True + for input_port in elem.findall("input"): + port_name = input_port.get("name") + if "addr" in port_name: + input_width = int(input_port.get("num_pins")) + mem_addr_width = max(mem_addr_width, input_width) + + if saw_ram and mem_addr_width == 0: + raise InspectError( + "Could not identify RAM block address width", filename=architecture_file + ) + + return mem_addr_width + + +def determine_min_w(log_filename): + """ + determines the miniumum width. + """ + min_w_regex = re.compile( + r"\s*Best routing used a channel width factor of (?P\d+)." + ) + with open(log_filename) as file: + for line in file: + match = min_w_regex.match(line) + if match: + return int(match.group("min_w")) + + raise InspectError( + "Failed to find minimum channel width.", filename=log_filename + ) diff --git a/vtr_flow/scripts/python_libs/vtr/odin/__init__.py b/vtr_flow/scripts/python_libs/vtr/odin/__init__.py new file mode 100644 index 00000000000..b55d5c160cd --- /dev/null +++ b/vtr_flow/scripts/python_libs/vtr/odin/__init__.py @@ -0,0 +1,4 @@ +""" + init for the ODIN module +""" +from .odin import run diff --git a/vtr_flow/scripts/python_libs/vtr/odin/odin.py b/vtr_flow/scripts/python_libs/vtr/odin/odin.py new file mode 100644 index 00000000000..b224c37e4e6 --- /dev/null +++ b/vtr_flow/scripts/python_libs/vtr/odin/odin.py @@ -0,0 +1,162 @@ +""" + Module to run ODIN II with its various arguments +""" +import shutil +from collections import OrderedDict +from pathlib import Path +from vtr import ( + find_vtr_file, + file_replace, + determine_memory_addr_width, + verify_file, + CommandRunner, +) + +#pylint: disable=too-many-arguments, too-many-locals +def run( + architecture_file, + circuit_file, + output_netlist, + command_runner=CommandRunner(), + temp_dir=Path("."), + odin_args="--adder_type default", + log_filename="odin.out", + odin_exec=None, + odin_config=None, + min_hard_mult_size=3, + min_hard_adder_size=1 +): + """ + Runs ODIN II on the specified architecture file and circuit file + + .. note :: Usage: vtr.odin.run(,,,[OPTIONS]) + + Arguments + ========= + architecture_file : + Architecture file to target + + circuit_file : + Circuit file to optimize + + output_netlist : + File name to output the resulting circuit to + + Other Parameters + ---------------- + command_runner : + A CommandRunner object used to run system commands + + temp_dir : + Directory to run in (created if non-existent) + + odin_args: + A dictionary of keyword arguments to pass on to ODIN II + + log_filename : + File to log result to + + odin_exec: + ODIN II executable to be run + + odin_config: + The ODIN II configuration file + + min_hard_mult_size : + Tells ODIN II the minimum multiplier size that should be implemented using + hard multiplier (if available) + + min_hard_adder_size : + Tells ODIN II the minimum adder size that should be implemented + using hard adder (if available). + + """ + temp_dir = Path(temp_dir) if not isinstance(temp_dir, Path) else temp_dir + temp_dir.mkdir(parents=True, exist_ok=True) + + if odin_args is None: + odin_args = OrderedDict() + + # Verify that files are Paths or convert them to Paths and check that they exist + architecture_file = verify_file(architecture_file, "Architecture") + circuit_file = verify_file(circuit_file, "Circuit") + output_netlist = verify_file(output_netlist, "Output netlist", False) + + if odin_exec is None: + odin_exec = find_vtr_file("odin_II", is_executable=True) + + if odin_config is None: + odin_base_config = find_vtr_file("basic_odin_config_split.xml") + + # Copy the config file + odin_config = "odin_config.xml" + odin_config_full_path = str(temp_dir / odin_config) + shutil.copyfile(odin_base_config, odin_config_full_path) + + # Update the config file + file_replace( + odin_config_full_path, + { + "XXX": circuit_file.name, + "YYY": architecture_file.name, + "ZZZ": output_netlist.name, + "PPP": determine_memory_addr_width(str(architecture_file)), + "MMM": min_hard_mult_size, + "AAA": min_hard_adder_size, + }, + ) + disable_odin_xml = False + if "disable_odin_xml" in odin_args: + disable_odin_xml = True + del odin_args["disable_odin_xml"] + use_odin_simulation = False + if "use_odin_simulation" in odin_args: + use_odin_simulation = True + del odin_args["use_odin_simulation"] + + cmd = [odin_exec, "-c", odin_config] + for arg, value in odin_args.items(): + if isinstance(value, bool) and value: + cmd += ["--" + arg] + elif isinstance(value, (str, int, float)): + cmd += ["--" + arg, str(value)] + else: + pass + cmd += ["-U0"] + if disable_odin_xml: + cmd += [ + "-a", + architecture_file.name, + "-V", + circuit_file.name, + "-o", + output_netlist.name, + ] + + command_runner.run_system_command( + cmd, temp_dir=temp_dir, log_filename=log_filename, indent_depth=1 + ) + + if use_odin_simulation: + sim_dir = temp_dir / "simulation_init" + sim_dir.mkdir() + cmd = [ + odin_exec, + "-b", + output_netlist.name, + "-a", + architecture_file.name, + "-sim_dir", + str(sim_dir), + "-g", + "100", + "--best_coverage", + "-U0", + ] + command_runner.run_system_command( + cmd, + temp_dir=temp_dir, + log_filename="sim_produce_vector.out", + indent_depth=1, + ) +#pylint: enable=too-many-arguments, too-many-locals \ No newline at end of file diff --git a/vtr_flow/scripts/python_libs/vtr/util.py b/vtr_flow/scripts/python_libs/vtr/util.py new file mode 100644 index 00000000000..c7cd39681a1 --- /dev/null +++ b/vtr_flow/scripts/python_libs/vtr/util.py @@ -0,0 +1,582 @@ +""" + Module to utilize many of the tools needed for VTR. +""" +import os +from pathlib import PurePath +from pathlib import Path +import sys +import re +import time +import subprocess +import distutils.spawn as distutils_spawn +import argparse +import csv +from collections import OrderedDict + +from vtr.error import VtrError, InspectError, CommandError + +VERBOSITY_CHOICES = range(5) + + +class RawDefaultHelpFormatter( + argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter +): + """ + An argparse formatter which supports both default arguments and raw + formatting of description/epilog + """ + + +# pylint: disable=too-many-arguments, too-many-instance-attributes, too-few-public-methods, too-many-locals +class CommandRunner: + """ + An object for running system commands with timeouts, memory limits and varying verbose-ness + + Arguments + ========= + timeout_sec: maximum walk-clock-time of the command in seconds. Default: None + max_memory_mb: maximum memory usage of the command in megabytes (if supported). + Default: None + track_memory: Whether to track usage of the command (disabled if not supported). + Default: True + verbose_error: Produce more verbose output if the command fails. + Default: Equal to verbose + verbose: Produce more verbose output. Default: False + echo_cmd: Echo the command before running. Default: Equal to verbose + indent: The string specifying a single indent (used in verbose mode) + valgrind: Indicates if commands should be run with valgrind + """ + + def __init__( + self, + timeout_sec=None, + max_memory_mb=None, + track_memory=True, + verbose_error=None, + verbose=False, + echo_cmd=None, + indent="\t", + show_failures=False, + valgrind=False, + ): + + if verbose_error is None: + verbose_error = verbose + if echo_cmd is None: + echo_cmd = verbose + + self._timeout_sec = timeout_sec + self._max_memory_mb = max_memory_mb + self._track_memory = track_memory + self._verbose_error = verbose_error + self._verbose = verbose + self._echo_cmd = echo_cmd + self._indent = indent + self._show_failures = show_failures + self._valgrind = valgrind + + def run_system_command( + self, cmd, temp_dir, log_filename=None, expected_return_code=0, indent_depth=0 + ): + """ + Runs the specified command in the system shell. + + Returns + ======= + A tuple of the command output (list of lines) and the command's return code. + + Arguments + ========= + cmd: list of tokens that form the command to be run + log_filename: the log fiel name for the command's output. Default: derived from command + temp_dir: The directory to run the command in. Default: None (uses object default). + expected_return_code: The expected return code from the command. + If the actula return code does not match, will generate an exception. Default: 0 + indent_depth: How deep to indent the tool output in verbose mode. Default 0 + """ + # Save the original command + orig_cmd = cmd + temp_dir = Path(temp_dir) if not isinstance(temp_dir, Path) else temp_dir + + # If no log file is specified the name is based on the executed command + log_filename = ( + PurePath(orig_cmd[0]).name + ".out" + if log_filename is None + else log_filename + ) + + # Limit memory usage? + memory_limit = ["ulimit", "-Sv", "{val};".format(val=self._max_memory_mb)] + cmd = ( + memory_limit + cmd + if self._max_memory_mb and check_cmd(memory_limit[0]) + else cmd + ) + + # Enable memory tracking? + memory_tracking = ["/usr/bin/env", "time", "-v"] + if self._track_memory and check_cmd(memory_tracking[0]): + cmd = ( + memory_tracking + + [ + "valgrind", + "--leak-check=full", + "--suppressions={}".format(find_vtr_file("valgrind.supp")), + "--error-exitcode=1", + "--errors-for-leak-kinds=none", + "--track-origins=yes", + "--log-file=valgrind.log", + "--error-limit=no", + ] + + cmd + if self._valgrind + else memory_tracking + cmd + ) + + # Flush before calling subprocess to ensure output is ordered + # correctly if stdout is buffered + sys.stdout.flush() + + # Echo the command? + if self._echo_cmd: + print(cmd) + + # Begin timing + start_time = time.time() + + cmd_output = [] + cmd_returncode = None + proc = None + try: + # Call the command + stderr = None if self._valgrind else subprocess.STDOUT + proc = subprocess.Popen( + cmd, + stdout=subprocess.PIPE, # We grab stdout + stderr=stderr, # stderr redirected to stderr + universal_newlines=True, # Lines always end in \n + cwd=str(temp_dir), # Where to run the command + ) + + # Read the output line-by-line and log it + # to a file. + # + # We do this rather than use proc.communicate() + # to get interactive output + with (temp_dir / log_filename).open("w") as log_f: + # Print the command at the top of the log + log_f.write(" ".join(cmd)) + log_f.write("\n") + + # Read from subprocess output + for line in proc.stdout: + + # Send to log file + log_f.write(line) + + # Save the output + cmd_output.append(line) + + # Abort if over time limit + elapsed_time = time.time() - start_time + if self._timeout_sec and elapsed_time > self._timeout_sec: + proc.terminate() + + # Should now be finished (since we stopped reading from proc.stdout), + # sets the return code + proc.wait() + + finally: + # Clean-up if we did not exit cleanly + if proc: + if proc.returncode is None: + # Still running, stop it + proc.terminate() + + cmd_returncode = proc.returncode + + cmd_errored = cmd_returncode != expected_return_code + + # Send to stdout + if self._show_failures and ( + self._verbose or (cmd_errored and self._verbose_error) + ): + for line in cmd_output: + print(indent_depth * self._indent + line,) + + if self._show_failures and cmd_errored: + raise CommandError( + "Executable {exec_name} failed".format( + exec_name=PurePath(orig_cmd[0]).name + ), + cmd=cmd, + log=str(temp_dir / log_filename), + returncode=cmd_returncode, + ) + if cmd_errored: + raise VtrError("{}".format(PurePath(orig_cmd[0]).name)) + return cmd_output, cmd_returncode + + # pylint: enable=too-many-arguments, too-many-instance-attributes, too-few-public-methods, too-many-locals + + +def check_cmd(command): + """ + Return True if command can be run, False otherwise. + """ + + return Path(command).exists() + + +def write_tab_delimitted_csv(filepath, rows): + """ + Write out the data provied in a tab-delimited CSV format + + filepath: The filepath to write the data to + rows: An iterable of dictionary-like elements; each element + provides a set key-value pairs corresponding to a row + in the output file + """ + # Calculate the max width of each column + columns = OrderedDict() + for row in rows: + for key, value in row.items(): + + if key not in columns: + columns[key] = max(len(key), len(str(value))) + else: + columns[key] = max(columns[key], len(str(value))) + + # Write the elements + with open(filepath, "w+") as file: + writer = csv.writer(file, delimiter="\t") + + # Write out the header + header = [] + for col_name, col_width in columns.items(): + header.append("{:{width}}".format(col_name, width=col_width)) + writer.writerow(header) + + # Write rows + for row in rows: + values = [] + for col_name, col_width in columns.items(): + values.append("{:{width}}".format(row[col_name], width=col_width)) + writer.writerow(values) + + +def load_tab_delimited_csv(filepath): + """ + loads a tab delimted csv as a list of ordered dictionaries + """ + data = [] + with open(filepath) as file: + reader = csv.reader(file, delimiter="\t") + + header = [] + for csv_row in reader: + if len(header) == 0: + header = [val.strip() for val in csv_row] + else: + data_row = OrderedDict() + + for i, value in enumerate(csv_row): + data_row[header[i]] = value.strip() + + data.append(data_row) + + return data + + +def print_verbose(min_verbosity, curr_verbosity, string, endl=True): + """ + Print string if curr_verbosity is gteq min_verbosity + """ + if curr_verbosity >= min_verbosity: + if endl: + print(string) + else: + print(string,) + + +def find_vtr_file(filename, is_executable=False): + """ + Attempts to find a VTR related file by searching the environment. + + Checking the following places: + 1) System path (if is_executable=True) + 2) The inferred vtr root from environment variables or the script file location + + """ + # We assume exectuables are specified in the unix style (no .exe), + # if it was specified with .exe, strip it off + file_path = PurePath(filename) + if file_path.suffix == ".exe": + filename = file_path.name + + # + # Check if it is on the path (provided it is executable) + # + if is_executable: + # Search first for the non-exe version + result = distutils_spawn.find_executable(filename) + if result: + return result + + # If not found try the .exe version + result = distutils_spawn.find_executable(filename + ".exe") + if result: + return result + + vtr_root = find_vtr_root() + + # Check the inferred VTR root + result = find_file_from_vtr_root(filename, vtr_root, is_executable=is_executable) + if result: + return result + + # Since we stripped off the .exe, try looking for the .exe version + # as a last resort (i.e. on Windows/cygwin) + if is_executable: + result = find_file_from_vtr_root( + filename + ".exe", vtr_root, is_executable=is_executable + ) + if result: + return result + + raise ValueError( + "Could not find {type} {file}".format( + type="executable" if is_executable else "file", file=filename + ) + ) + + +def find_file_from_vtr_root(filename, vtr_root, is_executable=False): + """ + Given a vtr_root and a filename searches for the file recursively + under some common VTR directories + """ + for subdir in ["vpr", "abc", "abc_with_bb_support", "ODIN_II", "vtr_flow", "ace2"]: + directory_path = Path(vtr_root) / subdir + for file_path in directory_path.glob("**/*"): + if file_path.name == filename: + if file_path.is_file(): + if is_executable and os.access(str(file_path), os.X_OK): + # Found an executable file as required + return str(file_path) + # Found a file as required + return str(file_path) + return None + + +def find_vtr_root(): + """ + finds the root directory of VTR + """ + for env_var in ["VTR_ROOT"]: + if env_var in os.environ: + return os.environ[env_var] + + # We assume that this file is in /vtr_flow/python_libs/verilogtorouting + inferred_vtr_root = Path(__file__).parent.parent.parent.parent.parent + + if inferred_vtr_root.is_dir(): + return str(inferred_vtr_root) + raise VtrError( + "Could not find VTR root directory. Try setting VTR_ROOT environment variable." + ) + + +def file_replace(filename, search_replace_dict): + """ + searches file for specified values and replaces them with specified values. + """ + lines = [] + with open(filename, "r") as file: + lines = file.readlines() + + with open(filename, "w") as file: + for line in lines: + for search, replace in search_replace_dict.items(): + line = line.replace(search, str(replace)) + print(line, file=file) + + +def relax_w(min_w, relax_factor, base=2): + """ + Scale min_w by relax_factor and round to the nearest multiple of base. + """ + relaxed_w = int(base * round(min_w * relax_factor / base)) + return relaxed_w + + +def load_list_file(list_file): + """ + Loads a file containing a single value-per-line, + potentially with '#' comments + """ + values = [] + with open(list_file) as file: + for line in file: + line = line.strip() + # Handle comments + if "#" in line: + line = line.split("#")[0] + if line == "": + continue + values.append(line) + return values + + +def load_config_lines(filepath, allow_includes=True): + """ + Loads the lines of a file, stripping blank lines and '#' comments. + + If allow_includes is set then lines of the form: + + @include "another_file.txt" + + will cause the specified file to be included in-line. + The @included filename is interpretted as relative to the directory + containing filepath. + + Returns a list of lines + """ + config_lines = [] + + blank_regex = re.compile(r"^\s*$") + try: + with open(filepath) as file: + for line in file: + # Trim '\n' + line = line.strip() + + # Trim comments + if "#" in line: + line = line.split("#")[0] + + # Skip blanks + if blank_regex.match(line): + continue + + if line.startswith("%include"): + if allow_includes: + components = line.split() + assert len(components) == 2 + + include_file = components[1].strip('"') # Strip quotes + include_file_abs = str(Path(filepath).parent / include_file) + + # Recursively load the config + config_lines += load_config_lines( + include_file_abs, allow_includes=allow_includes + ) + else: + raise InspectError( + "@include not allowed in this file", filepath + ) + else: + config_lines.append(line) + except IOError as error: + raise InspectError("Error opening config file ({})".format(error)) + + return config_lines + + +def verify_file(file, file_type, should_exist=True): + """ + Verifies that the file is a Pathlib object and if not makes it one. + Ensures that the file exists by default. + This makes it possible to pass files into the various files as strings or as pathlib objects + """ + if not isinstance(file, Path): + file = Path(file) + if should_exist and not file.is_file(): + raise Exception( + "{file_type} file does not exist: {file} ".format( + file_type=file_type, file=file + ) + ) + + return file + + +def format_elapsed_time(time_delta): + """ + formats a time into desired string format + """ + return "%.2f seconds" % time_delta.total_seconds() + + +def argparse_str2bool(str_val): + """ + parses a string boolean to a boolean + """ + str_val = str_val.lower() + if str_val in ["yes", "on", "true", "1"]: + return True + if str_val in ["no", "off", "false", "0"]: + return False + raise argparse.ArgumentTypeError("Boolean value expected.") + + +def get_next_run_dir(base_dir): + """ + Returns the next unused run directory within base_dir. + + Does not create the directory + """ + return str(PurePath(base_dir) / run_dir_name(get_next_run_number(base_dir))) + + +def get_latest_run_dir(base_dir): + """ + Returns the run directory with the highest run number in base_dir + """ + latest_run_number = get_latest_run_number(base_dir) + + if latest_run_number is None: + return None + + return str(PurePath(base_dir) / run_dir_name(latest_run_number)) + + +def get_next_run_number(base_dir): + """ + Returns the next available (i.e. non-existing) run number in base_dir + """ + latest_run_number = get_latest_run_number(base_dir) + + if latest_run_number is None: + next_run_number = 0 + else: + next_run_number = latest_run_number + 1 + + return next_run_number + + +def get_latest_run_number(base_dir): + """ + Returns the highest run number of all run directories with in base_dir + """ + run_number = 0 + run_dir = Path(base_dir) / run_dir_name(run_number) + + if not run_dir.exists: + # No existing run directories + return None + + while run_dir.exists: + run_number += 1 + run_dir = Path(base_dir) / run_dir_name(run_number) + + # Currently one-past the last existing run dir, + # to get latest existing, subtract one + return run_number - 1 + + +def run_dir_name(run_num): + """ + Returns the formatted directory name for a specific run number + """ + return "run{:03d}".format(run_num) diff --git a/vtr_flow/scripts/python_libs/vtr/vpr/__init__.py b/vtr_flow/scripts/python_libs/vtr/vpr/__init__.py new file mode 100644 index 00000000000..895950332ef --- /dev/null +++ b/vtr_flow/scripts/python_libs/vtr/vpr/__init__.py @@ -0,0 +1,4 @@ +""" + __init__ for the VPR Module +""" +from .vpr import run, run_relax_w, cmp_full_vs_incr_sta, run_second_time diff --git a/vtr_flow/scripts/python_libs/vtr/vpr/vpr.py b/vtr_flow/scripts/python_libs/vtr/vpr/vpr.py new file mode 100644 index 00000000000..2a17f7bfda7 --- /dev/null +++ b/vtr_flow/scripts/python_libs/vtr/vpr/vpr.py @@ -0,0 +1,406 @@ +""" + Module to interact with VPR and its various options +""" +from collections import OrderedDict +from pathlib import Path +from vtr import ( + find_vtr_file, + CommandRunner, + relax_w, + determine_min_w, + verify_file +) +from vtr.error import InspectError + +#pylint: disable=too-many-arguments +def run_relax_w( + architecture, + circuit, + circuit_name=None, + command_runner=CommandRunner(), + temp_dir=Path("."), + relax_w_factor=1.3, + vpr_exec=None, + logfile_base="vpr", + vpr_args=None, +): + """ + Runs VPR twice: + + 1st: To find the minimum channel width + + 2nd: At relaxed channel width (e.g. for critical path delay) + + .. note :: Use: vtr.vpr.run_relax_w(,,[OPTIONS]) + + Arguments + ========= + architecture: + Architecture file + + circuit: + Input circuit netlist + + Other Parameters + ---------------- + circuit_name: + Name of the circuit file + + command_runner: + CommandRunner object + + temp_dir: + Directory to run in + + relax_w_factor: + Factor by which to relax minimum channel width for critical path delay routing + + vpr_exec: + Path to the VPR executable + + logfile_base: + Base name for log files (e.g. "vpr" produces vpr.min_w.out, vpr.relaxed_w.out) + + vpr_args: + Extra arguments for VPR + """ + if vpr_args is None: + vpr_args = OrderedDict() + temp_dir = Path(temp_dir) if not isinstance(temp_dir, Path) else temp_dir + temp_dir.mkdir(parents=True, exist_ok=True) + + # Verify that files are Paths or convert them to Paths and check that they exist + architecture = verify_file(architecture, "Architecture") + circuit = verify_file(circuit, "Circuit") + + vpr_min_w_log = ".".join([logfile_base, "out"]) + vpr_relaxed_w_log = ".".join([logfile_base, "crit_path", "out"]) + max_router_iterations = None + + if "max_router_iterations" in vpr_args: + max_router_iterations = vpr_args["max_router_iterations"] + del vpr_args["max_router_iterations"] + + if "write_rr_graph" in vpr_args: + del vpr_args["write_rr_graph"] + + if "analysis" in vpr_args: + del vpr_args["analysis"] + + if "route" in vpr_args: + del vpr_args["route"] + + if vpr_exec is None: + vpr_exec = find_vtr_file("vpr", is_executable=True) + + run( + architecture, + circuit, + circuit_name, + command_runner, + temp_dir, + log_filename=vpr_min_w_log, + vpr_exec=vpr_exec, + vpr_args=vpr_args, + ) + + if ("pack" in vpr_args or "place" in vpr_args) and "route" not in vpr_args: + # Don't look for min W if routing was not run + return + if max_router_iterations: + vpr_args["max_router_iterations"] = max_router_iterations + min_w = determine_min_w(str(temp_dir / vpr_min_w_log)) + + relaxed_w = relax_w(min_w, relax_w_factor) + + vpr_args["route"] = True # Re-route only + vpr_args["route_chan_width"] = relaxed_w # At a fixed channel width + + # VPR does not support performing routing when fixed pins + # are specified, and placement is not run; so remove the option + + run( + architecture, + circuit, + circuit_name, + command_runner, + temp_dir, + log_filename=vpr_relaxed_w_log, + vpr_exec=vpr_exec, + vpr_args=vpr_args, + ) + + +def run( + architecture, + circuit, + circuit_name=None, + command_runner=CommandRunner(), + temp_dir=Path("."), + log_filename="vpr.out", + vpr_exec=None, + vpr_args=None, +): + """ + Runs VPR with the specified configuration + + .. note :: Usage: vtr.vpr.run(,,[OPTIONS]) + + Arguments + ========= + architecture: + Architecture file + + circuit: + Input circuit file + + Other Parameters + ---------------- + circuit_name: + Name of the circuit file + + command_runner: + CommandRunner object + + temp_dir: + Directory to run in + + log_filename : + File to log result to + + vpr_exec: + Path to the VPR executable + + vpr_args: + Extra arguments for VPR + + + """ + + if vpr_args is None: + vpr_args = OrderedDict() + temp_dir = Path(temp_dir) if not isinstance(temp_dir, Path) else temp_dir + temp_dir.mkdir(parents=True, exist_ok=True) + + if vpr_exec is None: + vpr_exec = find_vtr_file("vpr", is_executable=True) + + # Verify that files are Paths or convert them to Paths and check that they exist + architecture = verify_file(architecture, "Architecture") + circuit = verify_file(circuit, "Circuit") + cmd = [] + if circuit_name: + cmd = [ + vpr_exec, + architecture.name, + circuit_name, + "--circuit_file", + circuit.name, + ] + else: + cmd = [vpr_exec, architecture.name, circuit.name] + + # Translate arbitrary keyword arguments into options for VPR + + for arg, value in vpr_args.items(): + if isinstance(value, bool): + if not value: + pass + cmd += ["--" + arg] + else: + if isinstance(value, list): + cmd += ["--" + arg] + for item in value: + cmd += [str(item)] + else: + cmd += ["--" + arg, str(value)] + + command_runner.run_system_command( + cmd, temp_dir=temp_dir, log_filename=log_filename, indent_depth=1 + ) + +def run_second_time( + architecture, + circuit, + circuit_name=None, + command_runner=CommandRunner(), + temp_dir=Path("."), + vpr_exec=None, + second_run_args=None, + rr_graph_ext=".xml", +): + """ + Run vpr again with additional parameters. + This is used to ensure that files generated by VPR can be re-loaded by it + + .. note :: Usage: vtr.vpr.run_second_time(,,[OPTIONS]) + + Arguments + ========= + architecture: + Architecture file + + circuit: + Input circuit file + + Other Parameters + ---------------- + circuit_name: + Name of the circuit file + + command_runner: + CommandRunner object + + temp_dir: + Directory to run in + + log_filename : + File to log result to + + vpr_exec: + Path to the VPR executable + + second_run_args: + Extra arguments for VPR + + """ + temp_dir = Path(temp_dir) if not isinstance(temp_dir, Path) else temp_dir + temp_dir.mkdir(parents=True, exist_ok=True) + + rr_graph_out_file = "" + if "write_rr_graph" in second_run_args: + rr_graph_out_file = second_run_args["write_rr_graph"] + rr_graph_ext = Path(rr_graph_out_file).suffix + + rr_graph_out_file2 = "rr_graph2" + rr_graph_ext + if "write_rr_graph" in second_run_args: + second_run_args["read_rr_graph"] = rr_graph_out_file + second_run_args["write_rr_graph"] = rr_graph_out_file2 + + #run VPR + run( + architecture, + circuit, + circuit_name, + command_runner, + temp_dir, + log_filename="vpr_second_run.out", + vpr_exec=vpr_exec, + vpr_args=second_run_args, + ) + + if "write_rr_graph" in second_run_args: + cmd = ["diff", rr_graph_out_file, rr_graph_out_file2] + _, diff_result = command_runner.run_system_command( + cmd, temp_dir, log_filename="diff.rr_graph.out", indent_depth=1 + ) + if diff_result: + raise InspectError( + "failed: vpr (RR Graph XML output not consistent when reloaded)" + ) + +def cmp_full_vs_incr_sta( + architecture, + circuit, + circuit_name=None, + command_runner=CommandRunner(), + vpr_args=None, + temp_dir=Path("."), + vpr_exec=None, +): + """ + Sanity check that full STA and the incremental STA produce the same *.net, *.place, *.route + files as well as identical timing report files + + .. note :: Use: vtr.vpr.cmp_full_vs_incr_sta(,,,[OPTIONS]) + + Arguments + ========= + architecture: + Architecture file + + circuit: + Input circuit file + + Other Parameters + ---------------- + circuit_name: + Name of the circuit file + + command_runner: + CommandRunner object + + temp_dir: + Directory to run in + + vpr_exec: + Path to the VPR executable + + vpr_args: + Extra arguments for VPR + """ + + # Verify that files are Paths or convert them to Paths and check that they exist + architecture = verify_file(architecture, "Architecture") + circuit = verify_file(circuit, "Circuit") + if not circuit_name: + circuit_name = circuit.stem + default_output_filenames = [ + "{}.net".format(circuit_name), + "{}.place".format(circuit_name), + "{}.route".format(circuit_name), + "report_timing.setup.rpt", + "report_timing.hold.rpt", + "report_unconstrained_timing.setup.rpt", + "report_unconstrained_timing.hold.rpt", + ] + + # The full STA flow should have already been run + # directly rename the output files + for filename in default_output_filenames: + cmd = ["mv", filename, "full_sta_{}".format(filename)] + command_runner.run_system_command( + cmd, temp_dir=temp_dir, log_filename="move.out", indent_depth=1 + ) + + # run incremental STA flow + incremental_vpr_args = vpr_args + incremental_vpr_args["timing_update_type"] = "incremental" + + run( + architecture, + circuit, + circuit_name, + command_runner, + temp_dir, + log_filename="vpr.incr_sta.out", + vpr_exec=vpr_exec, + vpr_args=incremental_vpr_args, + ) + + # Rename the incremental STA output files + for filename in default_output_filenames: + cmd = ["mv", filename, "incremental_sta_{}".format(filename)] + command_runner.run_system_command( + cmd, temp_dir=temp_dir, log_filename="move.out", indent_depth=1 + ) + + failed_msg = "Failed with these files (not identical):" + identical = True + + for filename in default_output_filenames: + cmd = [ + "diff", + "full_sta_{}".format(filename), + "incremental_sta_{}".format(filename), + ] + _, cmd_return_code = command_runner.run_system_command( + cmd, temp_dir=temp_dir, log_filename="diff.out", indent_depth=1 + ) + if cmd_return_code: + identical = False + failed_msg += " {}".format(filename) + + if not identical: + raise InspectError(failed_msg) +#pylint: disable=too-many-arguments diff --git a/vtr_flow/scripts/run_vtr_flow.py b/vtr_flow/scripts/run_vtr_flow.py new file mode 100755 index 00000000000..6ed8300f08c --- /dev/null +++ b/vtr_flow/scripts/run_vtr_flow.py @@ -0,0 +1,659 @@ +#!/usr/bin/env python3 +""" + Module to run the VTR Flow +""" +import sys +from pathlib import Path +import argparse +import shutil +import textwrap +import socket +from datetime import datetime +from collections import OrderedDict +#pylint: disable=wrong-import-position, import-error +sys.path.insert(0, str(Path(__file__).resolve().parent / "python_libs")) +import vtr +#pylint: enable=wrong-import-position, import-error + +BASIC_VERBOSITY = 1 + +#pylint: disable=too-few-public-methods +class VtrStageArgparseAction(argparse.Action): + """ + Class to parse the VTR stages to begin and end at. + """ + + def __call__(self, parser, namespace, value, option_string=None): + if value == "odin": + setattr(namespace, self.dest, vtr.VtrStage.odin) + elif value == "abc": + setattr(namespace, self.dest, vtr.VtrStage.abc) + elif value == "vpr": + setattr(namespace, self.dest, vtr.VtrStage.vpr) + elif value == "lec": + setattr(namespace, self.dest, vtr.VtrStage.lec) + else: + raise argparse.ArgumentError(self, "Invalid VTR stage '" + value + "'") +#pylint: enable=too-few-public-methods + +def vtr_command_argparser(prog=None): + """ + The VTR command arg parser + """ + usage = "%(prog)s circuit_file architecture_file [options]" + description = textwrap.dedent( + """ + Runs a single architecture and circuit through the VTR CAD flow - mapping + the circuit onto the target archietcture. + + Any unrecognzied arguments are passed to VPR. + """ + ) + + epilog = textwrap.dedent( + """ + Examples + -------- + + Produce more output (e.g. individual tool outputs): + + %(prog)s arch.xml circuit.v -v 2 + + + Route at a fixed channel width: + + %(prog)s arch.xml circuit.v --route_chan_width 100 + + Passing '--route_chan_width 100' will force VPR to route at a fixed channel + width, rather then attempting to find the minimum channel width + (VPR's default behaviour). + + + Run in the directory 'test' (rather than in a temporary directory): + + %(prog)s arch.xml circuit.v -temp_dir test + + + Enable power analysis: + + %(prog)s arch.xml circuit.v -power_tech power_tech.xml + + + Only run specific stage (assumes required results have already been generated): + + %(prog)s arch.xml circuit.blif -start vpr -end vpr + + """ + ) + + parser = argparse.ArgumentParser( + prog=prog, + usage=usage, + description=description, + epilog=epilog, + formatter_class=vtr.RawDefaultHelpFormatter, + ) + + # + # Major arguments + # + parser.add_argument( + "circuit_file", help="The circuit to map to the target architecture." + ) + parser.add_argument("architecture_file", help="The FPGA architecture to target.") + parser.add_argument( + "-start", + "-starting_stage", + choices=str(list(vtr.VtrStage)), + default=vtr.VtrStage.odin, + action=VtrStageArgparseAction, + help="Starting stage of the VTR flow.", + ) + parser.add_argument( + "-delete_intermediate_files", + default=True, + action="store_false", + dest="keep_intermediate_files", + help="Determines if intermediate files are deleted.", + ) + parser.add_argument( + "-delete_result_files", + default=True, + action="store_false", + dest="keep_result_files", + help="Determines if result files are deleted.", + ) + parser.add_argument( + "-end", + "-ending_stage", + choices=str(list(vtr.VtrStage)), + default=vtr.VtrStage.vpr, + action=VtrStageArgparseAction, + help="Ending stage of the VTR flow.", + ) + + parser.add_argument( + "-verbose", + default=False, + action="store_true", + dest="verbose", + help="Verbosity of the script.", + ) + + # + # Power arguments + # + power = parser.add_argument_group( + "Power", description="Power Analysis Related Options" + ) + power.add_argument( + "-power", + default=False, + action="store_true", + dest="do_power", + help="Track the memory usage for each stage. Requires /usr/bin/time -v," + + " disabled if not available.", + ) + power.add_argument( + "-cmos_tech", + default=None, + dest="power_tech", + metavar="POWER_TECH_FILE", + help="Enables power analysis using the specified technology file." + + " Power analysis is disabled if not specified.", + ) + + # + # House keeping arguments + # + house_keeping = parser.add_argument_group( + "House Keeping", + description="Configuration related to how files/time/memory are used by the script.", + ) + + house_keeping.add_argument( + "-temp_dir", + default=None, + help="Directory to run the flow in (will be created if non-existant).", + ) + + house_keeping.add_argument( + "-name", default=None, help="Name for this run to be output." + ) + + house_keeping.add_argument( + "-track_memory_usage", + default=False, + action="store_true", + dest="track_memory_usage", + help="Track the memory usage for each stage." + + " Requires /usr/bin/time -v, disabled if not available.", + ) + + house_keeping.add_argument( + "-show_failures", + default=False, + action="store_true", + dest="show_failures", + help="Tells the flow to display failures in the console.", + ) + + house_keeping.add_argument( + "-limit_memory_usage", + default=None, + metavar="MAX_MEMORY_MB", + help="Specifies the maximum memory usable by any stage. " + "Not supported on some platforms (requires ulimit).", + ) + + house_keeping.add_argument( + "-timeout", + default=14 * 24 * 60 * 60, # 14 days + type=float, + metavar="TIMEOUT_SECONDS", + help="Maximum time in seconds to spend on a single stage.", + ) + house_keeping.add_argument( + "-expect_fail", + default=None, + type=str, + help="Informs VTR that this run is expected to fail with this message.", + ) + house_keeping.add_argument( + "-valgrind", + default=False, + action="store_true", + help="Runs the flow with valgrind with the following options (--leak-check=full," + + " --errors-for-leak-kinds=none, --error-exitcode=1, --track-origins=yes)", + ) + # + # ABC arguments + # + abc = parser.add_argument_group("abc", description="Arguments used by ABC") + abc.add_argument( + "-iterative_bb", + default=False, + action="store_true", + dest="iterative_bb", + help="Use iterative black-boxing flow for multi-clock circuits", + ) + abc.add_argument( + "-once_bb", + default=False, + action="store_true", + dest="once_bb", + help="Use the black-boxing flow a single time", + ) + abc.add_argument( + "-blanket_bb", + default=False, + action="store_true", + dest="blanket_bb", + help="Use iterative black-boxing on all the clocks", + ) + abc.add_argument( + "-use_old_latches_restoration_script", + default=False, + action="store_true", + dest="use_old_latches_restoration_script", + help="Use the old latches restoration script", + ) + abc.add_argument( + "-check_equivalent", + default=False, + action="store_true", + help="Enables Logical Equivalence Checks", + ) + abc.add_argument( + "-use_old_abc_script", + default=False, + action="store_true", + help="Enables use of legacy ABC script adapted for new ABC", + ) + abc.add_argument( + "-lut_size", type=int, help="Tells ABC the LUT size of the FPGA architecture" + ) + # + # ODIN II arguments + # + odin = parser.add_argument_group( + "Odin", description="Arguments to be passed to ODIN II" + ) + odin.add_argument( + "-adder_type", + default="default", + help="Tells ODIN II the adder type used in this configuration", + ) + odin.add_argument( + "-adder_cin_global", + default=False, + action="store_true", + dest="adder_cin_global", + help="Tells ODIN II to connect the first cin in an adder/subtractor" + + "chain to a global gnd/vdd net.", + ) + odin.add_argument( + "-disable_odin_xml", + default=False, + action="store_true", + dest="disable_odin_xml", + help="Disables the ODIN xml", + ) + odin.add_argument( + "-use_odin_simulation", + default=False, + action="store_true", + dest="use_odin_simulation", + help="Tells odin to run simulation.", + ) + odin.add_argument( + "-min_hard_mult_size", + default=3, + type=int, + metavar="min_hard_mult_size", + help="Tells ODIN II the minimum multiplier size to be implemented using hard multiplier.", + ) + odin.add_argument( + "-min_hard_adder_size", + default=1, + type=int, + help="Tells ODIN II the minimum adder size that should be implemented using hard adder.", + ) + # + # VPR arguments + # + vpr = parser.add_argument_group( + "Vpr", description="Arguments to be parsed and then passed to VPR" + ) + vpr.add_argument( + "-crit_path_router_iterations", + type=int, + default=150, + help="Tells VPR the amount of iterations allowed to obtain the critical path.", + ) + vpr.add_argument( + "-fix_pins", + type=str, + help="Controls how the placer handles I/O pads during placement.", + ) + vpr.add_argument( + "-relax_w_factor", + type=float, + default=1.3, + help="Factor by which to relax minimum channel width for critical path delay routing", + ) + vpr.add_argument( + "-verify_rr_graph", + default=False, + action="store_true", + help="Tells VPR to verify the routing resource graph.", + ) + vpr.add_argument( + "-rr_graph_ext", + default=".xml", + type=str, + help="Determines the output rr_graph files' extention.", + ) + vpr.add_argument( + "-check_route", + default=False, + action="store_true", + help="Tells VPR to run final analysis stage.", + ) + vpr.add_argument( + "-check_place", + default=False, + action="store_true", + help="Tells VPR to run routing stage", + ) + vpr.add_argument( + "-sdc_file", default=None, type=str, help="Path to SDC timing constraints file." + ) + vpr.add_argument( + "-check_incremental_sta_consistency", + default=False, + action="store_true", + help="Do a second-run of the incremental analysis to compare the result files", + ) + + return parser + + +def main(): + """ + Main function to call vtr_command_main to run VTR + """ + return vtr_command_main(sys.argv[1:], prog=sys.argv[0]) + + +def vtr_command_main(arg_list, prog=None): + """ + Running VTR with the specified arguemnts. + """ + start = datetime.now() + # Load the arguments + args, unknown_args = vtr_command_argparser(prog).parse_known_args(arg_list) + error_status = "Error" + if args.temp_dir is None: + temp_dir = Path("./temp") + else: + temp_dir = Path(args.temp_dir) + # Specify how command should be run + command_runner = vtr.CommandRunner( + track_memory=True, + max_memory_mb=args.limit_memory_usage, + timeout_sec=args.timeout, + verbose=args.verbose, + show_failures=args.show_failures, + valgrind=args.valgrind, + ) + exit_status = 0 + return_status = 0 + try: + vpr_args = process_unknown_args(unknown_args) + vpr_args.update(process_vpr_args(args, prog, temp_dir)) + if args.sdc_file: + vpr_args["sdc_file"] = get_sdc_file(args.sdc_file, prog) + + print(args.name if args.name else Path(args.architecture_file).stem + + "/" + + Path(args.circuit_file).stem, end="\t\t") + # Run the flow + vtr.run( + Path(args.architecture_file), + Path(args.circuit_file), + power_tech_file=args.power_tech, + temp_dir=temp_dir, + start_stage=args.start, + end_stage=args.end, + command_runner=command_runner, + vpr_args=vpr_args, + abc_args=process_abc_args(args), + odin_args=process_odin_args(args), + keep_intermediate_files=args.keep_intermediate_files, + keep_result_files=args.keep_result_files, + min_hard_mult_size=args.min_hard_mult_size, + min_hard_adder_size=args.min_hard_adder_size, + check_equivalent=args.check_equivalent, + check_incremental_sta_consistency=args.check_incremental_sta_consistency, + use_old_abc_script=args.use_old_abc_script, + relax_w_factor=args.relax_w_factor, + ) + error_status = "OK" + except vtr.VtrError as error: + error_status, return_status, exit_status = except_vtr_error( + error, + args.expect_fail, + args.verbose + ) + + except KeyboardInterrupt as error: + print("{} recieved keyboard interrupt".format(prog)) + exit_status = 4 + return_status = exit_status + + finally: + seconds = datetime.now() - start + print( + "{status} (took {time})".format( + status=error_status, time=vtr.format_elapsed_time(seconds) + ) + ) + temp_dir.mkdir(parents=True, exist_ok=True) + out = temp_dir / "output.txt" + out.touch() + with out.open("w") as file: + file.write("vpr_status=") + if exit_status == 0: + file.write("success\n") + else: + file.write("exited with return code {}\n".format(exit_status)) + file.write( + "vpr_seconds=%d\nrundir=%s\nhostname=%s\nerror=" + % (seconds.total_seconds(), str(Path.cwd()), socket.gethostname()) + ) + file.write("\n") + + sys.exit(return_status) + + +def process_unknown_args(unknown_args): + """ + We convert the unknown_args into a dictionary, which is eventually + used to generate arguments for VPR + """ + vpr_args = OrderedDict() + while len(unknown_args) > 0: + # Get the first argument + arg = unknown_args.pop(0) + + if arg == "": + continue + + if not arg.startswith("-"): + raise vtr.VtrError( + "Extra argument '{}' intended for VPR does not start with '-'".format( + arg + ) + ) + + # To make it a valid kwargs dictionary we trim the initial '-' or '--' from the + # argument name + assert len(arg) >= 2 + if arg[1] == "-": + # Double-dash prefix + arg = arg[2:] + else: + # Single-dash prefix + arg = arg[1:] + + # Determine if there is a value associated with this argument + if len(unknown_args) == 0 or ( + unknown_args[0].startswith("-") and arg != "target_ext_pin_util" + ): + # Single value argument, we place these with value 'True' + # in vpr_args + vpr_args[arg] = True + else: + # Multivalue argument + val = unknown_args.pop(0) + if len(unknown_args) != 0 and not unknown_args[0].startswith("-"): + temp = val + val = [] + val.append(temp) + while len(unknown_args) != 0 and not unknown_args[0].startswith("-"): + val.append(unknown_args.pop(0)) + vpr_args[arg] = val + + return vpr_args + + +def process_abc_args(args): + """ + Finds arguments needed in the ABC stage of the flow + """ + abc_args = OrderedDict() + if args.iterative_bb: + abc_args["iterative_bb"] = True + + if args.once_bb: + abc_args["once_bb"] = True + + if args.blanket_bb: + abc_args["blanket_bb"] = True + + if args.use_old_latches_restoration_script: + abc_args["use_old_latches_restoration_script"] = True + + if args.lut_size: + abc_args["lut_size"] = args.lut_size + return abc_args + + +def process_odin_args(args): + """ + Finds arguments needed in the ODIN stage of the flow + """ + odin_args = OrderedDict() + odin_args["adder_type"] = args.adder_type + + if args.adder_cin_global: + odin_args["adder_cin_global"] = True + + if args.disable_odin_xml: + odin_args["disable_odin_xml"] = True + + if args.use_odin_simulation: + odin_args["use_odin_simulation"] = True + + return odin_args + + +def process_vpr_args(args, prog, temp_dir): + """ + Finds arguments needed in the VPR stage of the flow + """ + vpr_args = OrderedDict() + if args.crit_path_router_iterations: + vpr_args["max_router_iterations"] = args.crit_path_router_iterations + if args.fix_pins: + new_file = str(temp_dir / Path(args.fix_pins).name) + shutil.copyfile(str((Path(prog).parent.parent / args.fix_pins)), new_file) + vpr_args["fix_pins"] = new_file + if args.verify_rr_graph: + rr_graph_out_file = "rr_graph" + args.rr_graph_ext + vpr_args["write_rr_graph"] = rr_graph_out_file + if args.check_place: + vpr_args["route"] = True + if args.check_route: + vpr_args["analysis"] = True + + return vpr_args + +def get_sdc_file(sdc_file, prog): + """ + takes in the sdc_file and returns a path to that file if it exists. + """ + if not Path(sdc_file).exists(): + if sdc_file.startswith("/"): + sdc_file = Path(str(Path(prog).parent.parent) + sdc_file) + else: + sdc_file = Path(prog).parent.parent / sdc_file + + return str(vtr.verify_file(sdc_file, "sdc file")) + +def except_vtr_error(error, expect_fail, verbose): + """ + Handle vtr exceptions + """ + error_status = None + actual_error = None + exit_status = None + if isinstance(error, vtr.CommandError): + # An external command failed + return_status = 1 + if expect_fail: + expect_string = expect_fail + actual_error = None + if "exited with return code" in expect_string: + actual_error = "exited with return code {}".format(error.returncode) + else: + actual_error = error.msg + if expect_string != actual_error: + error_status = "failed: expected '{expected}' but was '{actual}'".format( + expected=expect_string, actual=actual_error + ) + exit_status = 1 + else: + error_status = "OK" + return_status = 0 + if verbose: + error_status += " (as expected {})".format(expect_string) + else: + error_status += "*" + else: + error_status = "failed: {}".format(error.msg) + if not expect_fail or exit_status: + print("Error: {msg}".format(msg=error.msg)) + print("\tfull command: ", " ".join(error.cmd)) + print("\treturncode : ", error.returncode) + print("\tlog file : ", error.log) + exit_status = 1 + elif isinstance(error, vtr.InspectError): + # Something went wrong gathering information + print("\tfile : ", error.filename) + exit_status = 2 + return_status = exit_status + error_status = "failed: {}".format(error.msg) + + elif isinstance(error, vtr.VtrError): + # Generic VTR errors + exit_status = 3 + return_status = exit_status + error_status = "failed: {}".format(error.msg) + return error_status, return_status, exit_status + + +if __name__ == "__main__": + retval = main() + sys.exit(retval) diff --git a/vtr_flow/scripts/run_vtr_task.pl b/vtr_flow/scripts/run_vtr_task.pl index 97409c1c276..101d546a4b0 100755 --- a/vtr_flow/scripts/run_vtr_task.pl +++ b/vtr_flow/scripts/run_vtr_task.pl @@ -68,6 +68,7 @@ my $run_prefix = "run"; my $show_runtime_estimates = 1; my $system_type = "local"; +my $script = "run_vtr_flow.py"; my $shared_script_params = ""; my $verbosity = 0; my $short_task_names = 0; @@ -96,6 +97,10 @@ $verbosity = int( shift(@ARGV) ); } + elsif ( $token eq "-script") { + $script = shift(@ARGV); + } + # Treat the remainder of the command line options as script parameters shared by all tasks elsif ( $token eq "-s" ) { $shared_script_params = join(' ', @ARGV); @@ -105,6 +110,7 @@ print "shared script params: $shared_script_params\n" } + elsif ( $token eq "-system" ) { $system_type = shift(@ARGV); } @@ -191,7 +197,7 @@ my @all_task_actions; foreach my $task (@tasks) { chomp($task); - my $task_actions = generate_single_task_actions($task, $common_task_prefix); + my $task_actions = generate_single_task_actions($task, $common_task_prefix, $script); push(@all_task_actions, @$task_actions); } @@ -210,8 +216,7 @@ sub generate_single_task_actions { my $circuits_dir; my $archs_dir; my $sdc_dir = undef; - my $script_default = "run_vtr_flow.pl"; - my $script = $script_default; + my $script_default = "run_vtr_flow"; my $script_path; my $script_params_common = $shared_script_params; # start with the shared ones then build unique ones my @circuits_list; @@ -219,7 +224,7 @@ sub generate_single_task_actions { my @script_params_list; my $cmos_tech_path = ""; - my ($task, $common_prefix) = @_; + my ($task, $common_prefix,$script) = @_; (my $task_dir = "$vtr_flow_path/tasks/$task") =~ s/\s+$//; # trim right white spaces for chdir to work on Windows chdir($task_dir) or die "Task directory does not exist ($task_dir): $!\n"; @@ -283,7 +288,7 @@ sub generate_single_task_actions { close(CONFIG_FH); # Using default script - if ( $script eq $script_default ) { + if (index($script, $script_default) != -1 ) { # This is hack to automatically add the option '-temp_dir .' if using the run_vtr_flow.pl script # This ensures that a 'temp' folder is not created in each circuit directory