diff --git a/.github/kokoro/continuous/nightly.cfg b/.github/kokoro/continuous/nightly.cfg index 09702c6b915..f844c82c05d 100644 --- a/.github/kokoro/continuous/nightly.cfg +++ b/.github/kokoro/continuous/nightly.cfg @@ -46,7 +46,7 @@ env_vars { value: "vtr_reg_nightly" } -#Options for run_reg_test.pl +#Options for run_reg_test.py # -show_failures: show tool failures in main log output env_vars { key: "VTR_TEST_OPTIONS" diff --git a/.github/kokoro/continuous/strong.cfg b/.github/kokoro/continuous/strong.cfg index 6c797d8f458..feb971afe14 100644 --- a/.github/kokoro/continuous/strong.cfg +++ b/.github/kokoro/continuous/strong.cfg @@ -46,7 +46,7 @@ env_vars { value: "vtr_reg_strong" } -#Options for run_reg_test.pl +#Options for run_reg_test.py # -show_failures: show tool failures in main log output env_vars { key: "VTR_TEST_OPTIONS" diff --git a/.github/kokoro/continuous/strong_sanitized.cfg b/.github/kokoro/continuous/strong_sanitized.cfg index 7f9f0c0f47f..52226fb550b 100644 --- a/.github/kokoro/continuous/strong_sanitized.cfg +++ b/.github/kokoro/continuous/strong_sanitized.cfg @@ -48,7 +48,7 @@ env_vars { value: "vtr_reg_strong" } -#Options for run_reg_test.pl +#Options for run_reg_test.py # -show_failures: show tool failures in main log output # -skip_qor: Skip QoR checks (since we expect run-time failures due to sanitizers) env_vars { diff --git a/.github/kokoro/continuous/weekly.cfg b/.github/kokoro/continuous/weekly.cfg index b2490dc65f6..c3a79d4b82d 100644 --- a/.github/kokoro/continuous/weekly.cfg +++ b/.github/kokoro/continuous/weekly.cfg @@ -46,7 +46,7 @@ env_vars { value: "vtr_reg_weekly" } -#Options for run_reg_test.pl +#Options for run_reg_test.py # -show_failures: show tool failures in main log output env_vars { key: "VTR_TEST_OPTIONS" diff --git a/.github/kokoro/presubmit/nightly.cfg b/.github/kokoro/presubmit/nightly.cfg index e6d33b4c27a..57f2f978499 100644 --- a/.github/kokoro/presubmit/nightly.cfg +++ b/.github/kokoro/presubmit/nightly.cfg @@ -46,7 +46,7 @@ env_vars { value: "vtr_reg_nightly" } -#Options for run_reg_test.pl +#Options for run_reg_test.py # -show_failures: show tool failures in main log output env_vars { key: "VTR_TEST_OPTIONS" diff --git a/.github/kokoro/presubmit/strong.cfg b/.github/kokoro/presubmit/strong.cfg index f1c0c7393b8..c61cb39d781 100644 --- a/.github/kokoro/presubmit/strong.cfg +++ b/.github/kokoro/presubmit/strong.cfg @@ -46,7 +46,7 @@ env_vars { value: "vtr_reg_strong" } -#Options for run_reg_test.pl +#Options for run_reg_test.py # -show_failures: show tool failures in main log output env_vars { key: "VTR_TEST_OPTIONS" diff --git a/.github/kokoro/presubmit/strong_sanitized.cfg b/.github/kokoro/presubmit/strong_sanitized.cfg index 1ad3d7e0c83..5600e5e2de8 100644 --- a/.github/kokoro/presubmit/strong_sanitized.cfg +++ b/.github/kokoro/presubmit/strong_sanitized.cfg @@ -48,7 +48,7 @@ env_vars { value: "vtr_reg_strong" } -#Options for run_reg_test.pl +#Options for run_reg_test.py # -show_failures: show tool failures in main log output # -skip_qor: Skip QoR checks (since we expect run-time failures due to sanitizers) env_vars { diff --git a/.github/kokoro/steps/vtr-test.sh b/.github/kokoro/steps/vtr-test.sh index 06a537ac89b..459c4257d01 100644 --- a/.github/kokoro/steps/vtr-test.sh +++ b/.github/kokoro/steps/vtr-test.sh @@ -45,5 +45,5 @@ echo "========================================" echo "Running Tests" echo "========================================" export VPR_NUM_WORKERS=1 -./run_reg_test.pl $VTR_TEST $VTR_TEST_OPTIONS -j$NUM_CORES +./run_reg_test.py $VTR_TEST $VTR_TEST_OPTIONS -j$NUM_CORES kill $MONITOR diff --git a/.travis.yml b/.travis.yml index 69c4c3eb522..f9b9c63874f 100644 --- a/.travis.yml +++ b/.travis.yml @@ -86,7 +86,7 @@ jobs: - ./dev/check-format-py.sh - stage: Test name: "Python Lint" - script: + script: - python3 ./dev/pylint_check.py - stage: Test name: "C++ Unit Tests" @@ -112,7 +112,7 @@ jobs: - MATRIX_EVAL="CC=gcc-5 && CXX=g++-5" script: - ./.github/travis/build.sh - - travis_wait 30 ./run_reg_test.pl vtr_reg_basic -show_failures -j2 + - travis_wait 30 ./run_reg_test.py vtr_reg_basic -show_failures -j2 - stage: Test name: "Basic Regression Tests with NO_GRAPHICS" env: @@ -136,7 +136,7 @@ jobs: - MATRIX_EVAL="CC=gcc-5 && CXX=g++-5" script: - ./.github/travis/build.sh - - travis_wait 30 ./run_reg_test.pl vtr_reg_strong -show_failures -j2 + - travis_wait 30 ./run_reg_test.py vtr_reg_strong -show_failures -j2 - stage: Test name: "Basic Valgrind Memory Tests" env: @@ -144,7 +144,7 @@ jobs: - MATRIX_EVAL="CC=gcc-5 && CXX=g++-5" script: - ./.github/travis/build.sh - - travis_wait 30 ./run_reg_test.pl vtr_reg_valgrind_small -show_failures -j2 + - travis_wait 30 ./run_reg_test.py vtr_reg_valgrind_small -show_failures -j2 - stage: Test name: "Sanitized Basic Regression Tests" env: @@ -155,7 +155,7 @@ jobs: script: - ./.github/travis/build.sh #We skip QoR since we are only checking for errors in sanitizer runs - - travis_wait 50 ./run_reg_test.pl vtr_reg_basic -show_failures -skip_qor -j2 + - travis_wait 50 ./run_reg_test.py vtr_reg_basic -show_failures -skip_qor -j2 #Currently strong regression with sanitizers is disabled as it exceeds the maximum travis job run-time #- stage: Test #name: "Sanitized Strong Regression Tests" @@ -167,7 +167,7 @@ jobs: #script: #- ./.github/travis/build.sh ##We skip QoR since we are only checking for errors in sanitizer runs - #- travis_wait 60 ./run_reg_test.pl vtr_reg_strong -show_failures -skip_qor -j2 + #- travis_wait 60 ./run_reg_test.py vtr_reg_strong -show_failures -skip_qor -j2 - stage: Test name: "ODIN-II Micro Tests" env: @@ -176,7 +176,7 @@ jobs: - BUILD_TYPE=debug script: - ./.github/travis/build.sh - - ./run_reg_test.pl odin_reg_micro -show_failures -j2 + - ./run_reg_test.py odin_reg_micro -show_failures -j2 - stage: Test name: "Build Compatibility: GCC 5 (Ubuntu Xenial - 16.04)" diff --git a/Makefile b/Makefile index c64277fc757..48d41c267c5 100644 --- a/Makefile +++ b/Makefile @@ -87,7 +87,7 @@ ifneq (,$(findstring pgo,$(BUILD_TYPE))) #Need titan benchmarks for pgo_profile task @+$(MAKE) -C $(BUILD_DIR) get_titan_benchmarks #Note profiling must be done serially to avoid corrupting the generated profiles - ./run_reg_test.pl pgo_profile + ./run_reg_test.py pgo_profile # #Configure 2nd-stage build to use profiling data to guide compiler optimization # diff --git a/ODIN_II/verify_odin.sh b/ODIN_II/verify_odin.sh index cda231ee36e..93bb5090f62 100755 --- a/ODIN_II/verify_odin.sh +++ b/ODIN_II/verify_odin.sh @@ -1255,7 +1255,7 @@ FILTERED_VTR_TASK_PATH="${NEW_RUN_DIR}/vtr/task_list.txt" function run_vtr_reg() { pushd "${VTR_DIR}" &> /dev/null RELATIVE_PATH_TO_TEST=$(realapath_from "${FILTERED_VTR_TASK_PATH}" "${VTR_REG_DIR}") - /usr/bin/env perl run_reg_test.pl -j "${_NUMBER_OF_PROCESS}" "${RELATIVE_PATH_TO_TEST}" + /usr/bin/env perl run_reg_test.py -j "${_NUMBER_OF_PROCESS}" "${RELATIVE_PATH_TO_TEST}" popd &> /dev/null } diff --git a/README.developers.md b/README.developers.md index 45642c45246..aeb1d27d3f5 100644 --- a/README.developers.md +++ b/README.developers.md @@ -45,7 +45,7 @@ The overall approach is similar, but we call out the differences below. At the bare minimum it is recommended to run: ``` make #Rebuild the code - ./run_reg_test.pl vtr_reg_basic vtr_reg_strong #Run tests + ./run_reg_test.py vtr_reg_basic vtr_reg_strong #Run tests ``` See [Running Tests](#running-tests) for more details. @@ -250,11 +250,11 @@ There are 4 main regression tests: QoR checks in this regression are aimed at evaluating quality and run-time of the VTR flow. As a result any QoR failures are a concern and should be investigated and understood. -These can be run with `run_reg_test.pl`: +These can be run with `run_reg_test.py`: ```shell #From the VTR root directory -$ ./run_reg_test.pl vtr_reg_basic -$ ./run_reg_test.pl vtr_reg_strong +$ ./run_reg_test.py vtr_reg_basic +$ ./run_reg_test.py vtr_reg_strong ``` The *nightly* and *weekly* regressions require the Titan and ISPD benchmarks @@ -263,22 +263,22 @@ which can be integrated into your VTR tree with: make get_titan_benchmarks make get_ispd_benchmarks ``` -They can then be run using `run_reg_test.pl`: +They can then be run using `run_reg_test.py`: ```shell -$ ./run_reg_test.pl vtr_reg_nightly -$ ./run_reg_test.pl vtr_reg_weekly +$ ./run_reg_test.py vtr_reg_nightly +$ ./run_reg_test.py vtr_reg_weekly ``` To speed-up things up, individual sub-tests can be run in parallel using the `-j` option: ```shell #Run up to 4 tests in parallel -$ ./run_reg_test.pl vtr_reg_strong -j4 +$ ./run_reg_test.py vtr_reg_strong -j4 ``` You can also run multiple regression tests together: ```shell #Run both the basic and strong regression, with up to 4 tests in parallel -$ ./run_reg_test.pl vtr_reg_basic vtr_reg_strong -j4 +$ ./run_reg_test.py vtr_reg_basic vtr_reg_strong -j4 ``` ## Odin Functionality Tests @@ -291,8 +291,8 @@ Odin has its own set of tests to verify the correctness of its synthesis results These can be run with: ```shell #From the VTR root directory -$ ./run_reg_test.pl odin_reg_micro -$ ./run_reg_test.pl odin_reg_full +$ ./run_reg_test.py odin_reg_micro +$ ./run_reg_test.py odin_reg_full ``` and should be used when making changes to Odin. @@ -391,7 +391,7 @@ Lets assume we have a failure in `vtr_reg_basic`: ```shell #In the VTR root directory -$ ./run_reg_test.pl vtr_reg_strong +$ ./run_reg_test.py vtr_reg_strong #Output trimmed... regression_tests/vtr_reg_basic/basic_no_timing ----------------------------------------- @@ -421,7 +421,7 @@ latest run002 run004 run005 There we see there is a `config` directory (which defines the test), and a set of run-directories. Each time a test is run it creates a new `runXXX` directory (where `XXX` is an incrementing number). From the above we can tell that our last run was `run005` (the symbolic link `latest` also points to the most recent run directory). -From the output of `run_reg_test.pl` we know that one of the failing architecture/circuit/parameters combinations was `k4_N10_memSize16384_memData64/ch_intrinsics/common`. +From the output of `run_reg_test.py` we know that one of the failing architecture/circuit/parameters combinations was `k4_N10_memSize16384_memData64/ch_intrinsics/common`. Each architecture/circuit/parameter combination is run in its own sub-folder. Lets move to that directory: ```shell @@ -561,12 +561,12 @@ A typical approach to evaluating an algorithm change would be to run `vtr_reg_qo $ cd vtr_flow/tasks #Run the VTR benchmarks -$ ../scripts/run_vtr_task.pl regression_tests/vtr_reg_nightly/vtr_reg_qor_chain +$ ../scripts/run_vtr_task.py regression_tests/vtr_reg_nightly/vtr_reg_qor_chain #Several hours later... they complete #Parse the results -$ ../scripts/parse_vtr_task.pl regression_tests/vtr_reg_nightly/vtr_reg_qor_chain +$ ../scripts/python_libs/vtr/parse_vtr_task.py regression_tests/vtr_reg_nightly/vtr_reg_qor_chain #The run directory should now contain a summary parse_results.txt file $ head -5 vtr_reg_nightly/vtr_reg_qor_chain/latest/parse_results.txt @@ -596,12 +596,12 @@ $ make get_titan_benchmarks $ cd vtr_flow/tasks #Run the VTR benchmarks -$ ../scripts/run_vtr_task.pl regression_tests/vtr_reg_weekly/vtr_reg_titan +$ ../scripts/run_vtr_task.py regression_tests/vtr_reg_weekly/vtr_reg_titan #Several days later... they complete #Parse the results -$ ../scripts/parse_vtr_task.pl regression_tests/vtr_reg_weekly/vtr_reg_titan +$ ../scripts/python_libs/vtr/parse_vtr_task.py regression_tests/vtr_reg_weekly/vtr_reg_titan #The run directory should now contain a summary parse_results.txt file $ head -5 vtr_reg_nightly/vtr_reg_qor_chain/latest/parse_results.txt @@ -793,7 +793,7 @@ This describes adding a test to `vtr_reg_strong`, but the process is similar for ```shell #From the VTR root $ cd vtr_flow/tasks - $ ../scripts/run_vtr_task.pl regression_tests/vtr_reg_strong/strong_mytest + $ ../scripts/run_vtr_task.py regression_tests/vtr_reg_strong/strong_mytest regression_tests/vtr_reg_strong/strong_mytest ----------------------------------------- @@ -801,14 +801,14 @@ This describes adding a test to `vtr_reg_strong`, but the process is similar for k6_frac_N10_mem32K_40nm/ch_intrinsics...OK ``` - Next we can generate the golden reference results using `parse_vtr_task.pl` with the `-create_golden` option: + Next we can generate the golden reference results using `parse_vtr_task.py` with the `-create_golden` option: ```shell - $ ../scripts/parse_vtr_task.pl regression_tests/vtr_reg_strong/strong_mytest -create_golden + $ ../scripts/python_libs/vtr/parse_vtr_task.py regression_tests/vtr_reg_strong/strong_mytest -create_golden ``` And check that everything matches with `-check_golden`: ```shell - $ ../scripts/parse_vtr_task.pl regression_tests/vtr_reg_strong/strong_mytest -check_golden + $ ../scripts/python_libs/vtr/parse_vtr_task.py regression_tests/vtr_reg_strong/strong_mytest -check_golden regression_tests/vtr_reg_strong/strong_mytest...[Pass] ``` @@ -825,7 +825,7 @@ This describes adding a test to `vtr_reg_strong`, but the process is similar for Now, when we run `vtr_reg_strong`: ```shell #From the VTR root directory - $ ./run_reg_test.pl vtr_reg_strong + $ ./run_reg_test.py vtr_reg_strong #Output trimmed... regression_tests/vtr_reg_strong/strong_mytest ----------------------------------------- diff --git a/dev/DOCKER_DEPLOY.md b/dev/DOCKER_DEPLOY.md index 80ecb796247..b3c80fbe741 100644 --- a/dev/DOCKER_DEPLOY.md +++ b/dev/DOCKER_DEPLOY.md @@ -36,7 +36,7 @@ First, use one of the terminals and compile VTR: make && make installation/ Second, ensure that a basic regression test passes: -./run_reg_test.pl vtr_reg_basic +./run_reg_test.py vtr_reg_basic Third, run and/or modify VTR in the usual way. diff --git a/dev/pylint_check.py b/dev/pylint_check.py index 73ed6cbb71d..0c85b85e749 100755 --- a/dev/pylint_check.py +++ b/dev/pylint_check.py @@ -176,7 +176,7 @@ def main(): ignore_list.append("C0330") # Build pylint command - cmd = ["pylint", path, "-s", "n", "--disable=C0330"] + cmd = ["pylint", path, "-s", "n"] if ignore_list: cmd.append("--disable=" + ",".join(ignore_list)) diff --git a/doc/src/quickstart/index.rst b/doc/src/quickstart/index.rst index 96d43a1ad09..3679d5b718a 100644 --- a/doc/src/quickstart/index.rst +++ b/doc/src/quickstart/index.rst @@ -375,7 +375,7 @@ Lets make a new directory to work in named ``blink_run_flow``: > mkdir -p ~/vtr_work/quickstart/blink_run_flow > cd ~/vtr_work/quickstart/blink_run_flow -Now lets run the script (``$VTR_ROOT/vtr_flow/scripts/run_vtr_flow.pl``) passing in: +Now lets run the script (``$VTR_ROOT/vtr_flow/scripts/run_vtr_flow.py``) passing in: * The circuit verilog file (``$VTR_ROOT/doc/src/quickstart/blink.v``) * The FPGA architecture file (``$VTR_ROOT/vtr_flow/arch/timing/EArch.xml``) @@ -390,7 +390,7 @@ The resulting command is: .. code-block:: bash - > $VTR_ROOT/vtr_flow/scripts/run_vtr_flow.pl \ + > $VTR_ROOT/vtr_flow/scripts/run_vtr_flow.py \ $VTR_ROOT/doc/src/quickstart/blink.v \ $VTR_ROOT/vtr_flow/arch/timing/EArch.xml \ -temp_dir . \ diff --git a/doc/src/tutorials/flow/basic_flow.rst b/doc/src/tutorials/flow/basic_flow.rst index bb0fb77319c..3da97ae441c 100644 --- a/doc/src/tutorials/flow/basic_flow.rst +++ b/doc/src/tutorials/flow/basic_flow.rst @@ -9,7 +9,7 @@ The following steps show you to run the VTR design flow to map a sample circuit .. code-block:: shell - ../scripts/run_vtr_task.pl basic_flow + ../scripts/run_vtr_task.py basic_flow This command will run the VTR flow on a single circuit and a single architecture. The files generated from the run are stored in ``basic_flow/run[#]`` where ``[#]`` is the number of runs you have done. @@ -18,7 +18,7 @@ The following steps show you to run the VTR design flow to map a sample circuit .. code-block:: shell - ../scripts/parse_vtr_task.pl basic_flow/ + ../scripts/python_libs/vtr/parse_vtr_task.py basic_flow/ This parses out the information of the VTR run and outputs the results in a text file called ``run[#]/parse_results.txt``. @@ -30,7 +30,7 @@ The following steps show you to run the VTR design flow to map a sample circuit .. code-block:: shell - ../scripts/parse_vtr_task.pl -check_golden basic_flow + ../scripts/python_libs/vtr/parse_vtr_task.py -check_golden basic_flow It should return: ``basic_flow...[Pass]`` diff --git a/doc/src/vtr/install_vtr.rst b/doc/src/vtr/install_vtr.rst index 63bdcaabce0..d5fa4a6a310 100644 --- a/doc/src/vtr/install_vtr.rst +++ b/doc/src/vtr/install_vtr.rst @@ -25,7 +25,7 @@ Verifying Installation ~~~~~~~~~~~~~~~~~~~~~~ To verfiy that VTR has been installed correctly run:: - $VTR_ROOT/vtr_flow/scripts/run_vtr_task.pl basic_flow + $VTR_ROOT/vtr_flow/scripts/run_vtr_task.py basic_flow The expected output is:: diff --git a/doc/src/vtr/parse_vtr_flow.rst b/doc/src/vtr/parse_vtr_flow.rst index 7a491ece9c1..7b1aabe02a9 100644 --- a/doc/src/vtr/parse_vtr_flow.rst +++ b/doc/src/vtr/parse_vtr_flow.rst @@ -9,15 +9,15 @@ This script parses statistics generated by a single execution of the VTR flow. The script is located at:: - $VTR_ROOT/vtr_flow/scripts/parse_vtr_flow.pl + $VTR_ROOT/vtr_flow/scripts/python_libs/vtr/parse_vtr_flow.py -.. program:: parse_vtr_flow.pl +.. program:: parse_vtr_flow.py Usage ~~~~~ Typical usage is:: - parse_vtr_flow.pl + parse_vtr_flow.py where: diff --git a/doc/src/vtr/parse_vtr_task.rst b/doc/src/vtr/parse_vtr_task.rst index 9c3cd5d12b2..6915968fe12 100644 --- a/doc/src/vtr/parse_vtr_task.rst +++ b/doc/src/vtr/parse_vtr_task.rst @@ -10,16 +10,16 @@ The script will always parse the results of the latest execution of the task. The script is located at:: - $VTR_ROOT/vtr_flow/scripts/parse_vtr_task.pl + $VTR_ROOT/vtr_flow/scripts/python_libs/vtr/parse_vtr_task.py -.. program:: parse_vtr_task.pl +.. program:: parse_vtr_task.py Usage ~~~~~ Typical usage is:: - parse_vtr_task.pl ... + parse_vtr_task.py ... .. note:: At least one task must be specified, either directly as a parameter or through the :option:`-l` option. diff --git a/doc/src/vtr/pass_requirements.rst b/doc/src/vtr/pass_requirements.rst index cfd80798e60..8c1f48aeb30 100644 --- a/doc/src/vtr/pass_requirements.rst +++ b/doc/src/vtr/pass_requirements.rst @@ -4,7 +4,7 @@ Pass Requirements ----------------- The :ref:`parse_vtr_task` scripts allow you to compare an executed task to a *golden* reference result. -The comparison, which is performed when using the :option:`parse_vtr_task.pl -check_golden` option, which reports either ``Pass`` or ``Fail``. +The comparison, which is performed when using the :option:`parse_vtr_task.py -check_golden` option, which reports either ``Pass`` or ``Fail``. The requirements that must be met to qualify as a ``Pass`` are specified in the pass requirements file. Task Configuration diff --git a/doc/src/vtr/run_vtr_task.rst b/doc/src/vtr/run_vtr_task.rst index 2aec679059d..a8458266656 100644 --- a/doc/src/vtr/run_vtr_task.rst +++ b/doc/src/vtr/run_vtr_task.rst @@ -10,16 +10,16 @@ This script runs the VTR flow for a single benchmark circuit and architecture fi The script is located at:: - $VTR_ROOT/vtr_flow/scripts/run_vtr_task.pl + $VTR_ROOT/vtr_flow/scripts/run_vtr_task.py -.. program:: run_vtr_task.pl +.. program:: run_vtr_task.py Basic Usage ~~~~~~~~~~~ Typical usage is:: - run_vtr_task.pl ... + run_vtr_task.py ... .. note:: At least one task must be specified, either directly as a parameter or via the :option:`-l` options. @@ -74,7 +74,7 @@ Detailed Command-line Options #From $VTR_ROOT/vtr_flow/tasks - $ ../scripts/run_vtr_task.pl regression_tests/vtr_reg_basic/basic_timing + $ ../scripts/run_vtr_task.py regression_tests/vtr_reg_basic/basic_timing regression_tests/vtr_reg_basic/basic_timing: k6_N10_mem32K_40nm.xml/ch_intrinsics.v/common OK (took 2.24 seconds) regression_tests/vtr_reg_basic/basic_timing: k6_N10_mem32K_40nm.xml/diffeq1.v/common OK (took 10.94 seconds) @@ -86,7 +86,7 @@ Detailed Command-line Options #From $VTR_ROOT/vtr_flow/tasks - $ ../scripts/run_vtr_task.pl regression_tests/vtr_reg_basic/basic_timing -system scripts + $ ../scripts/run_vtr_task.py regression_tests/vtr_reg_basic/basic_timing -system scripts /project/trees/vtr/vtr_flow/tasks/regression_tests/vtr_reg_basic/basic_timing/run001/k6_N10_mem32K_40nm.xml/ch_intrinsics.v/common/vtr_flow.sh /project/trees/vtr/vtr_flow/tasks/regression_tests/vtr_reg_basic/basic_timing/run001/k6_N10_mem32K_40nm.xml/diffeq1.v/common/vtr_flow.sh @@ -102,12 +102,12 @@ Detailed Command-line Options #From $VTR_ROOT/vtr_flow/tasks - $ ../scripts/run_vtr_task.pl regression_tests/vtr_reg_basic/basic_timing -system scripts | parallel -j4 'cd $(dirname {}) && {}' + $ ../scripts/run_vtr_task.py regression_tests/vtr_reg_basic/basic_timing -system scripts | parallel -j4 'cd $(dirname {}) && {}' regression_tests/vtr_reg_basic/basic_timing: k6_N10_mem32K_40nm.xml/ch_intrinsics.v/common OK (took 2.11 seconds) regression_tests/vtr_reg_basic/basic_timing: k6_N10_mem32K_40nm.xml/diffeq1.v/common OK (took 10.94 seconds) where ``{}`` is a special variable interpretted by the ``parallel`` command to represent the input line (i.e. a script, see ``parallel``'s documentation for details). - This will run the scripts generated by run_vtr_task.pl in parallel (up to 4 at-a-time due to ``-j4``). + This will run the scripts generated by run_vtr_task.py in parallel (up to 4 at-a-time due to ``-j4``). Each script is invoked in the script's containing directory (``cd $(dirname {})``), which mimics the behaviour of ``-system local -j4``. .. note:: diff --git a/doc/src/vtr/running_vtr.rst b/doc/src/vtr/running_vtr.rst index 922d1fa5298..1c217c734aa 100644 --- a/doc/src/vtr/running_vtr.rst +++ b/doc/src/vtr/running_vtr.rst @@ -56,9 +56,9 @@ VTR provides a variety of standard tasks which can be found under:: Tasks can be executed using :ref:`run_vtr_task`:: - $VTR_ROOT/vtr_flow/scripts/run_vtr_task.pl + $VTR_ROOT/vtr_flow/scripts/run_vtr_task.py -.. seealso:: :ref:`run_vtr_task` for the detailed command line options of ``run_vtr_task.pl``. +.. seealso:: :ref:`run_vtr_task` for the detailed command line options of ``run_vtr_task.py``. .. seealso:: :ref:`vtr_tasks` for more information on creating, modifying and running tasks. diff --git a/run_quick_test.pl b/run_quick_test.pl index 989e2c2c45b..d1c1e8c64c7 100755 --- a/run_quick_test.pl +++ b/run_quick_test.pl @@ -18,5 +18,5 @@ use strict; use Cwd; -system("./run_reg_test.pl vtr_reg_basic"); +system("./run_reg_test.py vtr_reg_basic"); diff --git a/run_reg_test.py b/run_reg_test.py new file mode 100755 index 00000000000..a4746accc2a --- /dev/null +++ b/run_reg_test.py @@ -0,0 +1,328 @@ +#!/usr/bin/env python3 +""" + Module for running regression tests +""" +from pathlib import Path +import sys +import argparse +import textwrap +import subprocess +from collections import OrderedDict +from prettytable import PrettyTable + + +# pylint: disable=wrong-import-position, import-error +sys.path.insert(0, str(Path(__file__).resolve().parent / "vtr_flow/scripts/python_libs")) +sys.path.insert(0, str(Path(__file__).resolve().parent / "vtr_flow/scripts")) +from run_vtr_task import vtr_command_main as run_vtr_task +from vtr import RawDefaultHelpFormatter, paths + +# pylint: enable=wrong-import-position, import-error +BASIC_VERBOSITY = 1 + + +def vtr_command_argparser(prog=None): + """ Parses the arguments of run_reg_test """ + + description = textwrap.dedent( + """ + Runs one or more VTR regression tests. + """ + ) + epilog = textwrap.dedent( + """ + Examples + -------- + + Run the regression test 'vtr_reg_strong': + + %(prog)s vtr_reg_strong + + Run the regression tests 'vtr_reg_basic' and 'vtr_reg_strong': + + %(prog)s vtr_reg_basic vtr_reg_strong + + Run regression tests 'vtr_reg_basic' and 'vtr_reg_strong' + with 8 parallel workers: + + %(prog)s vtr_reg_basic vtr_reg_strong -j8 + """ + ) + + parser = argparse.ArgumentParser( + prog=prog, description=description, epilog=epilog, formatter_class=RawDefaultHelpFormatter, + ) + + # + # Major arguments + # + parser.add_argument( + "reg_test", nargs="+", help="Regression tests to be run", + ) + + parser.add_argument( + "-create_golden", + default=False, + action="store_true", + help="Create golden reference results for the associated tasks", + ) + + parser.add_argument( + "-check_golden", + default=False, + action="store_true", + help="Check golden reference results for the associated tasks", + ) + + parser.add_argument( + "-parse", default=False, action="store_true", help="Only run the parse tests.", + ) + + parser.add_argument( + "-display_qor", + default=False, + action="store_true", + help="Displays the previous Qor test results", + ) + + parser.add_argument( + "-script", + default="run_vtr_flow.py", + help="Determines what flow script is used for the tests", + ) + + parser.add_argument( + "-skip_qor", default=False, action="store_true", help="Skips running the Qor tests", + ) + + parser.add_argument( + "-j", + default=1, + type=int, + metavar="NUM_PROC", + help="How many processors to use for execution.", + ) + + parser.add_argument( + "-calc_geomean", + default=False, + action="store_true", + help="Enable the calculation of the task geomeans.", + ) + + parser.add_argument( + "-show_failures", + default=False, + action="store_true", + help="Produce additional debug output", + ) + + parser.add_argument( + "-long_task_names", default=False, action="store_true", help="Display long task names", + ) + + return parser + + +def vtr_command_main(arg_list, prog=None): + """ + Run the given regression tests + """ + # Load the arguments + args = vtr_command_argparser(prog).parse_args(arg_list) + + total_num_func_failures = 0 + total_num_qor_failures = 0 + tests_run = False + for reg_test in args.reg_test: + num_func_failures = 0 + num_qor_failures = 0 + if args.parse: + tests_run = True + num_qor_failures = parse_single_test(collect_task_list(reg_test)) + elif args.check_golden: + tests_run = True + num_qor_failures = parse_single_test(collect_task_list(reg_test), check=True) + elif args.create_golden: + # Create golden results + num_qor_failures = 0 + parse_single_test(collect_task_list(reg_test), create=True) + elif args.calc_geomean: + # Calculate geo mean values + num_qor_failures = 0 + parse_single_test(collect_task_list(reg_test), calculate=True) + elif args.display_qor: + num_qor_failures = display_qor(reg_test) + elif reg_test.startswith("odin"): + total_num_func_failures += run_odin_test(args, reg_test) + else: + # Run any ODIN tests + print_header("Verilog-to-Routing Regression Testing") + + tests_run = True + + # Collect the task lists + vtr_task_list_files = collect_task_list(reg_test) + + # Run the actual tasks, recording functionality failures + if vtr_task_list_files: + num_func_failures += run_tasks(args, vtr_task_list_files) + + # Check against golden results + if not args.skip_qor and vtr_task_list_files: + num_qor_failures += parse_single_test( + vtr_task_list_files, check=True, calculate=True + ) + print("\nTest '{}' had {} qor test failures".format(reg_test, num_qor_failures)) + print("\nTest '{}' had {} run failures\n".format(reg_test, num_func_failures)) + total_num_func_failures += num_func_failures + total_num_qor_failures += num_qor_failures + # Final summary + if ( + tests_run + and total_num_func_failures == 0 + and (total_num_qor_failures == 0 or args.skip_qor) + ): + print("All tests passed") + elif tests_run and total_num_func_failures != 0 or total_num_qor_failures != 0: + print("Error: {} tests failed".format(total_num_func_failures + total_num_qor_failures)) + + sys.exit(total_num_func_failures + total_num_qor_failures) + + +def display_qor(reg_test): + """ Display the qor tests script files to be run outside of this script """ + test_dir = paths.regression_tests_path / reg_test + if not (test_dir / "qor_geomean.txt").is_file(): + print("QoR results do not exist ({}/qor_geomean.txt)".format(str(test_dir))) + return 1 + print_header("{} QoR Results".format(reg_test)) + with (test_dir / "qor_geomean.txt").open("r") as results: + + # create list of desired values, their unit and how to display them. + data = OrderedDict() + data["revision"] = ["", "{}"] + data["date"] = ["", "{}"] + data["total_runtime"] = [" s", "%.3f"] + data["total_wirelength"] = [" units", "%.0f"] + data["num_clb"] = [" blocks", "%.2f"] + data["min_chan_width"] = [" tracks", "%.3f"] + data["crit_path_delay"] = [" ns", "%.3f"] + + positions = OrderedDict() + position = 0 + # get positions of headers that we want in the qor results + for header in results.readline().split(): + if header in data: + positions[header] = position + position += 1 + + table = PrettyTable() + table.field_names = list(data.keys()) + for line in results.readlines(): + info = line.split() + row = [] + for key, values in data.items(): + if len(info) - 1 < positions[key]: + row += [""] + else: + if values[1] == "{}" or not info[positions[key]].isnumeric(): + row += [("{}".format(info[positions[key]])) + values[0]] + else: + row += [(values[1] % float(info[positions[key]])) + values[0]] + table.add_row(row) + print(table) + return 0 + + +def run_odin_test(args, test_name): + """ Run ODIN II test with given test name """ + odin_reg_script = [ + str(paths.odin_verify_path), + "--clean", + "-C", + str(paths.odin_output_on_error_path), + "--nb_of_process", + str(args.j), + "--test", + "{}/regression_test/benchmark/".format(str(paths.odin_path)), + ] + if test_name == "odin_reg_full": + odin_reg_script[-1] += "suite/full_suite" + elif test_name == "odin_reg_syntax": + odin_reg_script[-1] += "task/syntax" + elif test_name == "odin_reg_arch": + odin_reg_script[-1] += "task/arch_sweep" + elif test_name == "odin_reg_operators": + odin_reg_script[-1] += "task/operators" + elif test_name == "odin_reg_large": + odin_reg_script[-1] += "task/large" + elif test_name == "odin_reg": + odin_reg_script[-1] += "task/full" + elif test_name == "odin_reg_micro": + odin_reg_script[-1] += "suite/light_suite" + else: + raise IOError("Test does not exist: {}".format(test_name)) + + odin_root = str(Path(odin_reg_script[0]).resolve().parent) + + result = subprocess.call(odin_reg_script, cwd=odin_root) + + assert result is not None + if result != 0: + # Error + print("FAILED test '{}'".format(test_name)) + return 1 + + # Pass + print("PASSED test '{}'".format(test_name)) + return 0 + + +def collect_task_list(reg_test): + """ create a list of task files """ + task_list_filepath = paths.tasks_path / "regression_tests" / reg_test / "task_list.txt" + if not task_list_filepath.is_file(): + raise IOError("Test does not exist: {}".format(reg_test)) + return str(task_list_filepath) + + +def run_tasks(args, task_lists): + """Call 'run_vtr_task' with all the required arguments in the command""" + print_header("Running {}".format(args.reg_test[0]), "-", False) + vtr_task_cmd = ["-l"] + [task_lists] + vtr_task_cmd += ["-j", str(args.j), "-script", args.script] + if args.show_failures: + vtr_task_cmd += ["-show_failures"] + if not args.long_task_names: + vtr_task_cmd += ["-short_task_names"] + # Exit code is number of failures + print("scripts/run_vtr_task.py {} \n".format(" ".join(map(str, vtr_task_cmd)))) + return run_vtr_task(vtr_task_cmd) + + +def parse_single_test(task_lists, check=True, calculate=True, create=False): + """ parse the test results """ + vtr_task_cmd = ["-l"] + [task_lists] + if check: + vtr_task_cmd += ["-check_golden"] + if calculate: + vtr_task_cmd += ["-calc_geomean"] + if create: + vtr_task_cmd += ["-create_golden"] + + # Exit code is number of failures + return run_vtr_task(vtr_task_cmd) + + +def print_header(heading, divider="=", print_first_line=True): + """ Print heading formated in the center of two lines """ + if print_first_line: + print(divider * len(heading) * 2) + print(" " * int((len(heading) / 2)), end="") + print(heading) + print(divider * len(heading) * 2) + + +if __name__ == "__main__": + vtr_command_main(sys.argv[1:]) diff --git a/vtr_flow/benchmarks/arithmetic/task_and_parser.bash b/vtr_flow/benchmarks/arithmetic/task_and_parser.bash index df85f9c50cf..3deed211079 100644 --- a/vtr_flow/benchmarks/arithmetic/task_and_parser.bash +++ b/vtr_flow/benchmarks/arithmetic/task_and_parser.bash @@ -16,7 +16,7 @@ popd if [ -z "$1" ] || [ "$1" == "task" ]; then pushd . - "$VTR_SCRIPTS_DIR/run_vtr_task.pl" "$TASK_PATH" -p 4 || exit 1 + "$VTR_SCRIPTS_DIR/run_vtr_task.py" "$TASK_PATH" -p 4 || exit 1 pushd . cd "$ABS_TASK_PATH" @@ -38,7 +38,7 @@ fi if [ -z "$1" ] || [ "$1" == "parse" ]; then pushd . echo "running parser" - "$VTR_SCRIPTS_DIR/parse_vtr_task.pl" "$TASK_PATH" || exit 1 + "$VTR_SCRIPTS_DIR/parse_vtr_task.py" "$TASK_PATH" || exit 1 echo "parser done" popd fi diff --git a/vtr_flow/parse/parse_config/common/abc.txt b/vtr_flow/parse/parse_config/common/abc.txt index 30b61d54cdb..d4410e1dbfd 100644 --- a/vtr_flow/parse/parse_config/common/abc.txt +++ b/vtr_flow/parse/parse_config/common/abc.txt @@ -1,10 +1,10 @@ #ABC Metrics -abc_depth;abc0.out;lev\s*=\s*(\d+) +abc_depth;abc0.out;.* lev\s*=\s*(\d+) #ABC Run-time Metrics abc_synth_time;abc0.out;elapse: .* seconds, total: (.*) seconds abc_cec_time;abc.cec.out;elapse: .* seconds, total: (.*) seconds -abc_sec_time;abc.sec.out;elapse: .* seconds, total: (.*) seconds +abc_sec_time;abc.lec.out;elapse: .* seconds, total: (.*) seconds #Memory usage -max_abc_mem;abc0.out;Maximum resident set size \(kbytes\): (\d+) +max_abc_mem;abc0.out;\s*Maximum resident set size \(kbytes\): (\d+) diff --git a/vtr_flow/parse/parse_config/common/odin.txt b/vtr_flow/parse/parse_config/common/odin.txt index 9fc10de3797..06b1bd4a212 100644 --- a/vtr_flow/parse/parse_config/common/odin.txt +++ b/vtr_flow/parse/parse_config/common/odin.txt @@ -1,4 +1,4 @@ #Odin Run-time Metrics odin_synth_time;odin.out;Odin II took (.*) seconds -max_odin_mem;odin.out;Maximum resident set size \(kbytes\): (\d+) +max_odin_mem;odin.out;\s*Maximum resident set size \(kbytes\): (\d+) diff --git a/vtr_flow/parse/parse_config/common/vpr.common.txt b/vtr_flow/parse/parse_config/common/vpr.common.txt index 09caf9ff23a..3923501c503 100644 --- a/vtr_flow/parse/parse_config/common/vpr.common.txt +++ b/vtr_flow/parse/parse_config/common/vpr.common.txt @@ -8,4 +8,4 @@ hostname;output.txt;hostname=(.*) rundir;output.txt;rundir=(.*) #Run-time Metrics -max_vpr_mem;vpr.out;Maximum resident set size \(kbytes\): (\d+) +max_vpr_mem;vpr.out;\s*Maximum resident set size \(kbytes\): (\d+) diff --git a/vtr_flow/parse/parse_config/common/vpr.pack.txt b/vtr_flow/parse/parse_config/common/vpr.pack.txt index 37d0f3c72bf..6f82fef4712 100644 --- a/vtr_flow/parse/parse_config/common/vpr.pack.txt +++ b/vtr_flow/parse/parse_config/common/vpr.pack.txt @@ -17,4 +17,4 @@ device_limiting_resources;vpr.out;FPGA size limited by block type\(s\): (.*) device_name;vpr.out;FPGA sized to \d+ x \d+: \d+ grid tiles \((\S+)\) #VPR Run-time Metrics -pack_time;vpr.out;^#*\s*Packing took (.*) seconds +pack_time;vpr.out;\s*Packing took (.*) seconds diff --git a/vtr_flow/parse/parse_config/common/vpr.place.txt b/vtr_flow/parse/parse_config/common/vpr.place.txt index e517abd467d..37eb5dd6b0c 100644 --- a/vtr_flow/parse/parse_config/common/vpr.place.txt +++ b/vtr_flow/parse/parse_config/common/vpr.place.txt @@ -2,5 +2,5 @@ placed_wirelength_est;vpr.out;BB estimate of min-dist \(placement\) wire length: (\d+) #VPR Run-time Metrics -place_time;vpr.out;^#*\s*Placement took (.*) seconds -place_quench_time;vpr.out;^#*\s*Placement Quench took (.*) seconds +place_time;vpr.out;\s*Placement took (.*) seconds +place_quench_time;vpr.out;\s*Placement Quench took (.*) seconds diff --git a/vtr_flow/parse/parse_config/common/vpr.route_fixed_chan_width.txt b/vtr_flow/parse/parse_config/common/vpr.route_fixed_chan_width.txt index 74cb06a9c04..d8219dbdb85 100644 --- a/vtr_flow/parse/parse_config/common/vpr.route_fixed_chan_width.txt +++ b/vtr_flow/parse/parse_config/common/vpr.route_fixed_chan_width.txt @@ -1,5 +1,5 @@ #VPR Routing Metrics -routed_wirelength;vpr.out;Total wirelength: (.*), average +routed_wirelength;vpr.out;\s*Total wirelength: (.*), average .* total_nets_routed;vpr.out;total_nets_routed: (\d+) total_connections_routed;vpr.out;total_connections_routed: (\d+) @@ -7,7 +7,7 @@ total_heap_pushes;vpr.out;total_heap_pushes: (\d+) total_heap_pops;vpr.out;total_heap_pops: (\d+) #Area Metrics -logic_block_area_total;vpr.out;Total logic block area .*: (.*) -logic_block_area_used;vpr.out;Total used logic block area: (.*) -routing_area_total;vpr.out;Total routing area: (.*), per logic tile: .* -routing_area_per_tile;vpr.out;Total routing area: .*, per logic tile: (.*) +logic_block_area_total;vpr.out;\s*Total logic block area .*: (.*) +logic_block_area_used;vpr.out;\s*Total used logic block area: (.*) +routing_area_total;vpr.out;\s*Total routing area: (.*), per logic tile: .* +routing_area_per_tile;vpr.out;\s*Total routing area: .*, per logic tile: (.*) diff --git a/vtr_flow/parse/parse_config/common/vpr.route_min_chan_width.txt b/vtr_flow/parse/parse_config/common/vpr.route_min_chan_width.txt index 39a949df2bb..bd65a800f0c 100644 --- a/vtr_flow/parse/parse_config/common/vpr.route_min_chan_width.txt +++ b/vtr_flow/parse/parse_config/common/vpr.route_min_chan_width.txt @@ -1,12 +1,12 @@ #VPR Min W Routing Metrics min_chan_width;vpr.out;Best routing used a channel width factor of (\d+) -routed_wirelength;vpr.out;Total wirelength: (.*), average +routed_wirelength;vpr.out;\s*Total wirelength: (.*), average .* min_chan_width_route_success_iteration;vpr.out;Successfully routed after (\d+) routing iterations -logic_block_area_total;vpr.out;Total logic block area .*: (.*) -logic_block_area_used;vpr.out;Total used logic block area: (.*) -min_chan_width_routing_area_total;vpr.out;Total routing area: (.*), per logic tile: .* -min_chan_width_routing_area_per_tile;vpr.out;Total routing area: .*, per logic tile: (.*) +logic_block_area_total;vpr.out;\s*Total logic block area .*: (.*) +logic_block_area_used;vpr.out;\s*Total used logic block area: (.*) +min_chan_width_routing_area_total;vpr.out;\s*Total routing area: (.*), per logic tile: .* +min_chan_width_routing_area_per_tile;vpr.out;\s*Total routing area: .*, per logic tile: (.*) #VPR Run-time Metrics -min_chan_width_route_time;vpr.out;^#*\s*Routing took (.*) seconds +min_chan_width_route_time;vpr.out;\s*Routing took (.*) seconds .* diff --git a/vtr_flow/parse/parse_config/timing/vpr.route_fixed_chan_width.txt b/vtr_flow/parse/parse_config/timing/vpr.route_fixed_chan_width.txt index 576923a6eff..a9c5d2af567 100644 --- a/vtr_flow/parse/parse_config/timing/vpr.route_fixed_chan_width.txt +++ b/vtr_flow/parse/parse_config/timing/vpr.route_fixed_chan_width.txt @@ -11,7 +11,7 @@ setup_WNS;vpr.out;Final setup Worst Negative Slack \(sWNS\): (.*) ns hold_TNS;vpr.out;Final hold Total Negative Slack \(hTNS\): (.*) ns hold_WNS;vpr.out;Final hold Worst Negative Slack \(hWNS\): (.*) ns -crit_path_route_time;vpr.out;^#*\s*Routing took (.*) seconds +crit_path_route_time;vpr.out;\s*Routing took (.*) seconds crit_path_total_timing_analysis_time;vpr.out;Flow timing analysis took (.*) seconds crit_path_total_sta_time;vpr.out;Flow timing analysis took .* seconds \((.*) STA diff --git a/vtr_flow/parse/parse_config/timing/vpr.route_relaxed_chan_width.txt b/vtr_flow/parse/parse_config/timing/vpr.route_relaxed_chan_width.txt index 574f03870f3..565c6e096cc 100644 --- a/vtr_flow/parse/parse_config/timing/vpr.route_relaxed_chan_width.txt +++ b/vtr_flow/parse/parse_config/timing/vpr.route_relaxed_chan_width.txt @@ -1,11 +1,11 @@ #VPR Critical Path Routing Metrics -crit_path_routed_wirelength;vpr.crit_path.out;Total wirelength: (.*), average +crit_path_routed_wirelength;vpr.crit_path.out;\s*Total wirelength: (.*), average crit_path_route_success_iteration;vpr.crit_path.out;Successfully routed after (\d+) routing iterations -crit_path_total_nets_routed;vpr.crit_path.out;total_nets_routed: (\d+) -crit_path_total_connections_routed;vpr.crit_path.out;total_connections_routed: (\d+) -crit_path_total_heap_pushes;vpr.crit_path.out;total_heap_pushes: (\d+) -crit_path_total_heap_pops;vpr.crit_path.out;total_heap_pops: (\d+) +crit_path_total_nets_routed;vpr.crit_path.out;.* total_nets_routed: (\d+) +crit_path_total_connections_routed;vpr.crit_path.out;.* total_connections_routed: (\d+) +crit_path_total_heap_pushes;vpr.crit_path.out;.* total_heap_pushes: (\d+) +crit_path_total_heap_pops;vpr.crit_path.out;.* total_heap_pops: (\d+) #VPR Analysis (final implementation) Metrics critical_path_delay;vpr.crit_path.out;Final critical path delay \(least slack\): (.*) ns @@ -16,10 +16,10 @@ hold_TNS;vpr.crit_path.out;Final hold Total Negative Slack \(hTNS\): (.*) ns hold_WNS;vpr.crit_path.out;Final hold Worst Negative Slack \(hWNS\): (.*) ns #Area Metrics -crit_path_routing_area_total;vpr.crit_path.out;Total routing area: (.*), per logic tile: .* -crit_path_routing_area_per_tile;vpr.crit_path.out;Total routing area: .*, per logic tile: (.*) +crit_path_routing_area_total;vpr.crit_path.out;\s*Total routing area: (.*), per logic tile: .* +crit_path_routing_area_per_tile;vpr.crit_path.out;\s*Total routing area: .*, per logic tile: (.*) #Run-time Metrics -crit_path_route_time;vpr.crit_path.out;^#*\s*Routing took (.*) seconds +crit_path_route_time;vpr.crit_path.out;\s*Routing took (.*) seconds crit_path_total_timing_analysis_time;vpr.crit_path.out;Flow timing analysis took (.*) seconds crit_path_total_sta_time;vpr.crit_path.out;Flow timing analysis took .* seconds \((.*) STA diff --git a/vtr_flow/parse/parse_config/vpr_chain.txt b/vtr_flow/parse/parse_config/vpr_chain.txt index e7728e5ce30..48315bd83ae 100644 --- a/vtr_flow/parse/parse_config/vpr_chain.txt +++ b/vtr_flow/parse/parse_config/vpr_chain.txt @@ -1,7 +1,7 @@ %include "vpr_standard.txt" num_le;vpr.out;\s*Total number of Logic Elements used\s*:\s*(\d+) -num_luts;vpr.out;6-LUT\s*:\s*(\d+) +num_luts;vpr.out;\s*6-LUT\s*:\s*(\d+) num_add_blocks;odin.out;The Total Number of Hard Block adders: (\d+) max_add_chain_length;odin.out;The Number of Hard Block adders in the Longest Chain: (\d+) num_sub_blocks;odin.out;The Total Number of Hard Block subs: (\d+) diff --git a/vtr_flow/parse/qor_config/qor_fixed_chan_width.txt b/vtr_flow/parse/qor_config/qor_fixed_chan_width.txt index 902c65cddf9..72c6708c322 100644 --- a/vtr_flow/parse/qor_config/qor_fixed_chan_width.txt +++ b/vtr_flow/parse/qor_config/qor_fixed_chan_width.txt @@ -1,5 +1,5 @@ vpr_status;output.txt;vpr_status=(.*) -total_wirelength;vpr.out;Total wirelength:\s*(\d+) +total_wirelength;vpr.out;\s*Total wirelength: (\d+) #crit_path_delay_(mcw);vpr.out;Final critical path: (.*) ns #crit_path_delay_(1.3mcw);vpr.crit_path.out;Final critical path: (.*) ns #total_wirelength_(mcw);vpr.out;Total wirelength:\s*(\d+) diff --git a/vtr_flow/parse/qor_config/qor_no_timing.txt b/vtr_flow/parse/qor_config/qor_no_timing.txt index e8ab411df4b..91a1bcad862 100644 --- a/vtr_flow/parse/qor_config/qor_no_timing.txt +++ b/vtr_flow/parse/qor_config/qor_no_timing.txt @@ -1,5 +1,5 @@ vpr_status;output.txt;vpr_status=(.*) -total_wirelength;vpr.out;Total wirelength:\s*(\d+) +total_wirelength;vpr.out;\s*Total wirelength:\s*(\d+) #min_chan_width;vpr.out;Best routing used a channel width factor of (\d+) #crit_path_delay_(mcw);vpr.out;Final critical path: (.*) ns #crit_path_delay_(1.3mcw);vpr.crit_path.out;Final critical path: (.*) ns diff --git a/vtr_flow/parse/qor_config/qor_rr_graph.txt b/vtr_flow/parse/qor_config/qor_rr_graph.txt index 7483fab8bf7..7f80fb3986d 100644 --- a/vtr_flow/parse/qor_config/qor_rr_graph.txt +++ b/vtr_flow/parse/qor_config/qor_rr_graph.txt @@ -1,5 +1,5 @@ vpr_status;output.txt;vpr_status=(.*) -total_wirelength;vpr_write_rr_graph.out;Total wirelength:\s*(\d+) +total_wirelength;vpr_write_rr_graph.out;\s*Total wirelength:\s*(\d+) #crit_path_delay_(mcw);vpr_write_rr_graph.out;Final critical path: (.*) ns #crit_path_delay_(1.3mcw);vpr.crit_path.out;Final critical path: (.*) ns #total_wirelength_(mcw);vpr_write_rr_graph.out;Total wirelength:\s*(\d+) diff --git a/vtr_flow/parse/qor_config/qor_standard.txt b/vtr_flow/parse/qor_config/qor_standard.txt index 902c65cddf9..7aa3c13b1fc 100644 --- a/vtr_flow/parse/qor_config/qor_standard.txt +++ b/vtr_flow/parse/qor_config/qor_standard.txt @@ -1,5 +1,5 @@ vpr_status;output.txt;vpr_status=(.*) -total_wirelength;vpr.out;Total wirelength:\s*(\d+) +total_wirelength;vpr.out;\s*Total wirelength:\s*(\d+) #crit_path_delay_(mcw);vpr.out;Final critical path: (.*) ns #crit_path_delay_(1.3mcw);vpr.crit_path.out;Final critical path: (.*) ns #total_wirelength_(mcw);vpr.out;Total wirelength:\s*(\d+) diff --git a/vtr_flow/parse/qor_config/qor_vpr_ispd.txt b/vtr_flow/parse/qor_config/qor_vpr_ispd.txt index 902c65cddf9..7aa3c13b1fc 100644 --- a/vtr_flow/parse/qor_config/qor_vpr_ispd.txt +++ b/vtr_flow/parse/qor_config/qor_vpr_ispd.txt @@ -1,5 +1,5 @@ vpr_status;output.txt;vpr_status=(.*) -total_wirelength;vpr.out;Total wirelength:\s*(\d+) +total_wirelength;vpr.out;\s*Total wirelength:\s*(\d+) #crit_path_delay_(mcw);vpr.out;Final critical path: (.*) ns #crit_path_delay_(1.3mcw);vpr.crit_path.out;Final critical path: (.*) ns #total_wirelength_(mcw);vpr.out;Total wirelength:\s*(\d+) diff --git a/vtr_flow/parse/qor_config/qor_vpr_titan.txt b/vtr_flow/parse/qor_config/qor_vpr_titan.txt index 902c65cddf9..7aa3c13b1fc 100644 --- a/vtr_flow/parse/qor_config/qor_vpr_titan.txt +++ b/vtr_flow/parse/qor_config/qor_vpr_titan.txt @@ -1,5 +1,5 @@ vpr_status;output.txt;vpr_status=(.*) -total_wirelength;vpr.out;Total wirelength:\s*(\d+) +total_wirelength;vpr.out;\s*Total wirelength:\s*(\d+) #crit_path_delay_(mcw);vpr.out;Final critical path: (.*) ns #crit_path_delay_(1.3mcw);vpr.crit_path.out;Final critical path: (.*) ns #total_wirelength_(mcw);vpr.out;Total wirelength:\s*(\d+) diff --git a/vtr_flow/scripts/flow_script_template.txt b/vtr_flow/scripts/flow_script_template.txt new file mode 100644 index 00000000000..1cc16fc374f --- /dev/null +++ b/vtr_flow/scripts/flow_script_template.txt @@ -0,0 +1,27 @@ +#!/usr/bin/env bash + +VTR_RUNTIME_ESTIMATE_SECONDS={estimated_time} +VTR_MEMORY_ESTIMATE_BYTES={estimated_memory} + +VTR_RUNTIME_ESTIMATE_HUMAN_READABLE="{human_readable_time}" +VTR_MEMORY_ESTIMATE_HUMAN_READABLE="{human_readable_memory}" + +#We redirect all command output to both stdout and the log file with 'tee'. + +#Begin I/O redirection +{{ + + {script} {command} + + #The IO redirection occurs in a sub-shell, + #so we need to exit it with the correct code + exit \$? + +}} |& tee vtr_flow.out +#End I/O redirection + +#We used a pipe to redirect IO. +#To get the correct exit status we need to exit with the +#status of the first element in the pipeline (i.e. the real +#command run above) +exit \${{PIPESTATUS[0]}} diff --git a/vtr_flow/scripts/populate_after_running_task.sh b/vtr_flow/scripts/populate_after_running_task.sh index 6039d2e30eb..70bdbc05902 100755 --- a/vtr_flow/scripts/populate_after_running_task.sh +++ b/vtr_flow/scripts/populate_after_running_task.sh @@ -3,12 +3,12 @@ task=$1 if [[ "$2" == "-norun" ]]; then echo skip to populate elif [[ "$2" == "-l" ]]; then - ~/vtr/vtr_flow/scripts/run_vtr_task.pl -l $task + ~/vtr/vtr_flow/scripts/run_vtr_task.py -l $task else - ~/vtr/vtr_flow/scripts/run_vtr_task.pl $task + ~/vtr/vtr_flow/scripts/run_vtr_task.py $task fi ~/benchtracker/populate_db.py $task --root_directory ~/vtr/vtr_flow/tasks/ \ --database ~/benchtracker/results.db -s \ -"~/vtr/vtr_flow/scripts/parse_vtr_task.pl {task_dir} -run {run_number}" \ +"~/vtr/vtr_flow/scripts/parse_vtr_task.py {task_dir} -run {run_number}" \ -k arch circuit diff --git a/vtr_flow/scripts/python_libs/vtr/__init__.py b/vtr_flow/scripts/python_libs/vtr/__init__.py index e83c8966f08..566b9359e4d 100644 --- a/vtr_flow/scripts/python_libs/vtr/__init__.py +++ b/vtr_flow/scripts/python_libs/vtr/__init__.py @@ -8,16 +8,26 @@ relax_w, file_replace, RawDefaultHelpFormatter, - VERBOSITY_CHOICES, format_elapsed_time, write_tab_delimitted_csv, load_list_file, argparse_str2bool, get_next_run_dir, get_latest_run_dir, + get_latest_run_number, verify_file, + pretty_print_table, + find_task_dir, +) +from .inspect import ( + determine_lut_size, + determine_min_w, + determine_memory_addr_width, + load_parse_patterns, + load_pass_requirements, + load_parse_results, + load_script_param, ) -from .inspect import determine_lut_size, determine_min_w, determine_memory_addr_width # pylint: disable=reimported from .abc import run, run_lec @@ -26,5 +36,22 @@ from .ace import run from .error import * from .flow import run, VtrStage +from .task import ( + load_task_config, + TaskConfig, + find_task_config_file, + shorten_task_names, + find_longest_task_description, + create_jobs, +) +from .parse_vtr_flow import parse_vtr_flow +from .parse_vtr_task import ( + parse_tasks, + find_latest_run_dir, + check_golden_results_for_tasks, + create_golden_results_for_tasks, + calc_geomean, + summarize_qor, +) # pylint: enable=reimported diff --git a/vtr_flow/scripts/python_libs/vtr/inspect.py b/vtr_flow/scripts/python_libs/vtr/inspect.py index 4db47af9970..b75d7c9cacf 100644 --- a/vtr_flow/scripts/python_libs/vtr/inspect.py +++ b/vtr_flow/scripts/python_libs/vtr/inspect.py @@ -2,6 +2,9 @@ module that contains functions to inspect various files to determine important values """ import re +from collections import OrderedDict +from pathlib import Path +import abc try: # Try for the fast c-based version first @@ -11,6 +14,448 @@ import xml.etree.ElementTree as ET from vtr.error import InspectError +from vtr import load_config_lines + + +class ParsePattern: + """ Pattern for parsing log files """ + + def __init__(self, name, filename, regex_str, default_value=None): + self._name = name + self._filename = filename + self._regex = re.compile(regex_str) + self._default_value = default_value + + def name(self): + """ Return name of what is being parsed for """ + return self._name + + def filename(self): + """ Log filename to parse """ + return self._filename + + def regex(self): + """ Regex expression to use for parsing """ + return self._regex + + def default_value(self): + """ Return the default parse value """ + return self._default_value + + +class PassRequirement(abc.ABC): + """ Used to check if a parsed value passes an expression """ + + def __init__(self, metric): + self._metric = metric + self._type = type + + def metric(self): + """ Return pass matric """ + return self._metric + + @abc.abstractmethod + def type(self): + """ Return the type of requirement checking """ + + @abc.abstractmethod + def check_passed(self, golden_value, check_value, check_string="golden value"): + """ Return whether the check passed """ + + +class EqualPassRequirement(PassRequirement): + """ Used to check if parsed value is equal to golden value """ + + def type(self): + return "Equal" + + def check_passed(self, golden_value, check_value, check_string="golden value"): + if golden_value == check_value: + return True, "" + + return ( + False, + "Task value '{}' does not match {} '{}'".format( + check_value, check_string, golden_value + ), + ) + + +class RangePassRequirement(PassRequirement): + """ Used to check if parsed value is within a range """ + + def __init__(self, metric, min_value=None, max_value=None): + super().__init__(metric) + + if max_value < min_value: + raise InspectError("Invalid range specification (max value larger than min value)") + + self._min_value = min_value + self._max_value = max_value + + def type(self): + return "Range" + + def min_value(self): + """ Get min value of golden range """ + return self._min_value + + def max_value(self): + """ Get max value of golden range """ + return self._max_value + + def check_passed(self, golden_value, check_value, check_string="golden value"): + """ Check if parsed value is within a range or equal to golden value """ + + if golden_value is None or check_value is None: + if golden_value is None and check_value is None: + ret_value = True + ret_str = "both golden and check are None" + elif check_value is None: + ret_value = False + ret_str = ("{} is {}, but check value is None".format(check_string, golden_value),) + else: + ret_value = False + ret_str = "{} is None, but check value is {}".format(check_string, check_value) + return (ret_value, ret_str) + + assert golden_value is not None + assert check_value is not None + + original_golden_value = golden_value + try: + golden_value = float(golden_value) + except ValueError: + raise InspectError( + "Failed to convert {} '{}' to float".format(check_string, golden_value) + ) from ValueError + original_check_value = check_value + try: + check_value = float(check_value) + except ValueError: + raise InspectError( + "Failed to convert check value '{}' to float".format(check_value) + ) from ValueError + + norm_check_value = None + if golden_value == 0.0: # Avoid division by zero + if golden_value == check_value: + return True, "{} and check both equal 0".format(check_string) + norm_check_value = float("inf") + else: + norm_check_value = check_value / golden_value + + if original_check_value == original_golden_value: + return True, "Check value equal to {}".format(check_string) + + if self.min_value() <= norm_check_value <= self.max_value(): + return True, "relative value within range" + + return ( + False, + "relative value {} outside of range [{},{}] " + "and not equal to {} value: {}".format( + norm_check_value, self.min_value(), self.max_value(), check_string, golden_value, + ), + ) + + +class RangeAbsPassRequirement(PassRequirement): + """ Check if value is in some relative ratio range, or below some absolute value """ + + def __init__(self, metric, min_value=None, max_value=None, abs_threshold=None): + super().__init__(metric) + + if max_value < min_value: + raise InspectError("Invalid range specification (max value larger than min value)") + + self._min_value = min_value + self._max_value = max_value + self._abs_threshold = abs_threshold + + def type(self): + """ Return requirement type """ + return "Range" + + def min_value(self): + """ Return min value of ratio range """ + return self._min_value + + def max_value(self): + """ Return max value of ratio range """ + return self._max_value + + def abs_threshold(self): + """ Get absolute threshold """ + return self._abs_threshold + + def check_passed(self, golden_value, check_value, check_string="golden value"): + """ + Check if parsed value is within acceptable range, + absolute value or equal to golden value + """ + + # Check for nulls + if golden_value is None or check_value is None: + if golden_value is None and check_value is None: + ret_value = True + ret_str = "both {} and check are None".format(check_string) + elif golden_value is None: + ret_value = False + ret_str = "{} is None, but check value is {}".format(check_string, check_value) + else: + ret_value = False + ret_str = "{} is {}, but check value is None".format(check_string, golden_value) + + return (ret_value, ret_str) + + assert golden_value is not None + assert check_value is not None + + # Convert values to float + original_golden_value = golden_value + try: + golden_value = float(golden_value) + except ValueError: + raise InspectError( + "Failed to convert {} '{}' to float".format(check_string, golden_value) + ) from ValueError + + original_check_value = check_value + try: + check_value = float(check_value) + except ValueError: + raise InspectError( + "Failed to convert check value '{}' to float".format(check_value) + ) from ValueError + + # Get relative ratio + norm_check_value = None + if golden_value == 0.0: # Avoid division by zero + if golden_value == check_value: + return True, "{} and check both equal 0".format(check_string) + norm_check_value = float("inf") + else: + norm_check_value = check_value / golden_value + + # Check if the original values match + if original_check_value == original_golden_value: + return True, "Check value equal to {}".format(check_string) + + # Check if value within range + if (self.min_value() <= norm_check_value <= self.max_value()) or abs( + check_value - golden_value + ) <= self.abs_threshold(): + return True, "relative value within range" + + return ( + False, + "relative value {} outside of range [{},{}], " + "above absolute threshold {} and not equal to {} value: {}".format( + norm_check_value, + self.min_value(), + self.max_value(), + self.abs_threshold(), + check_string, + golden_value, + ), + ) + + +class ParseResults: + """ This class contains all parse results and metrics """ + + PRIMARY_KEYS = ("architecture", "circuit", "script_params") + + def __init__(self): + self._metrics = OrderedDict() + + def add_result(self, arch, circuit, parse_result, script_param=None): + """ Add new parse result for given arch/circuit pair """ + script_param = load_script_param(script_param) + self._metrics[(arch, circuit, script_param)] = parse_result + + def metrics(self, arch, circuit, script_param=None): + """ Return individual metric based on the architechure, circuit and script""" + script_param = load_script_param(script_param) + if (arch, circuit, script_param) in self._metrics: + return self._metrics[(arch, circuit, script_param)] + return None + + def all_metrics(self): + """ Return all metric results """ + return self._metrics + + +def load_script_param(script_param): + """ + Create script parameter string to be used in task names and output. + """ + if script_param and "common" not in script_param: + script_param = "common_" + script_param + if script_param: + script_param = script_param.replace(" ", "_") + else: + script_param = "common" + return script_param + + +def load_parse_patterns(parse_config_filepath): + """ + Loads the parse patterns from the desired file. + These parse patterns are later used to load in the results file + The lines of this file should be formated in either of the following ways: + name;path;regex;[default value] + name;path;regex + """ + parse_patterns = OrderedDict() + + for line in load_config_lines(parse_config_filepath): + + components = line.split(";") + + if len(components) == 3 or len(components) == 4: + + name = components[0] + filepath = components[1] + regex_str = components[2] + + default_value = None + if len(components) == 4: + default_value = components[3] + + if name not in parse_patterns: + parse_patterns[name] = ParsePattern(name, filepath, regex_str, default_value) + else: + raise InspectError( + "Duplicate parse pattern name '{}'".format(name), parse_config_filepath, + ) + + else: + raise InspectError( + "Invalid parse format line: '{}'".format(line), parse_config_filepath + ) + + return parse_patterns + + +def load_pass_requirements(pass_requirements_filepath): + """ + Load the pass requirements from particular file + The lines of the pass requiremtents file should follow one of the following format: + name;Range(min,max) + name;RangeAbs(min,max,absolute_value) + name;Equal() + """ + parse_patterns = OrderedDict() + + for line in load_config_lines(pass_requirements_filepath): + components = line.split(";") + + if len(components) != 2: + raise InspectError( + "Invalid pass requirement format line: '{}'".format(line), + pass_requirements_filepath, + ) + + metric = components[0] + expr = components[1] + + if metric in parse_patterns: + raise InspectError( + "Duplicate pass requirement for '{}'".format(metric), pass_requirements_filepath, + ) + + func, params_str = expr.split("(") + params_str = params_str.rstrip(")") + params = [] + + if params_str != "": + params = params_str.split(",") + + if func == "Range": + if len(params) != 2: + raise InspectError( + "Range() pass requirement function requires two arguments", + pass_requirements_filepath, + ) + + parse_patterns[metric] = RangePassRequirement( + metric, float(params[0]), float(params[1]) + ) + elif func == "RangeAbs": + if len(params) != 3: + raise InspectError( + "RangeAbs() pass requirement function requires two arguments", + pass_requirements_filepath, + ) + + parse_patterns[metric] = RangeAbsPassRequirement( + metric, float(params[0]), float(params[1]), float(params[2]) + ) + elif func == "Equal": + if len(params) != 0: + raise InspectError( + "Equal() pass requirement function requires no arguments", + pass_requirements_filepath, + ) + parse_patterns[metric] = EqualPassRequirement(metric) + else: + raise InspectError( + "Unexpected pass requirement function '{}' for metric '{}'".format(func, metric), + pass_requirements_filepath, + ) + + return parse_patterns + + +def load_parse_results(parse_results_filepath): + """ + Load the results from the parsed result file. + """ + header = [] + + parse_results = ParseResults() + if not Path(parse_results_filepath).exists(): + return parse_results + + with open(parse_results_filepath) as file: + for lineno, row in enumerate(file): + if row[0] == "+": + row = row[1:] + elements = [elem.strip() for elem in row.split("\t")] + if lineno == 0: + header = elements + else: + result = OrderedDict() + + arch = None + circuit = None + script_param = None + for i, elem in enumerate(elements): + metric = header[i] + + if elem == "": + elem = None + + if metric == "arch": + metric = "architecture" + + if metric == "architecture": + arch = elem + elif metric == "circuit": + circuit = elem + elif metric == "script_params": + script_param = elem + + result[metric] = elem + + if not (arch and circuit): + print(parse_results_filepath) + + parse_results.add_result(arch, circuit, result, script_param) + + return parse_results def determine_lut_size(architecture_file): diff --git a/vtr_flow/scripts/python_libs/vtr/parse_vtr_flow.py b/vtr_flow/scripts/python_libs/vtr/parse_vtr_flow.py new file mode 100755 index 00000000000..605024094db --- /dev/null +++ b/vtr_flow/scripts/python_libs/vtr/parse_vtr_flow.py @@ -0,0 +1,90 @@ +#!/usr/bin/env python3 +""" +Module to parse the vtr flow results. +""" +import sys +from pathlib import Path +import glob +from collections import OrderedDict + +# pylint: disable=wrong-import-position +sys.path.insert(0, str(Path(__file__).resolve().parent.parent)) +import vtr +from vtr import paths + +# pylint: enable=wrong-import-position + + +def parse_vtr_flow(arg_list): + """ + parse vtr flow output + """ + parse_path = arg_list[0] + parse_config_file = arg_list[1] + parse_config_file = vtr.util.verify_file(parse_config_file, "parse config") + + extra_params = arg_list[2:] + if parse_config_file is None: + parse_config_file = str(paths.vtr_benchmarks_parse_path) + + parse_patterns = vtr.load_parse_patterns(str(parse_config_file)) + + metrics = OrderedDict() + + extra_params_parsed = OrderedDict() + + for param in extra_params: + key, value = param.split("=", 1) + extra_params_parsed[key] = value + print(key, end="\t") + + # Set defaults + for parse_pattern in parse_patterns.values(): + metrics[parse_pattern.name()] = ( + parse_pattern.default_value() if parse_pattern.default_value() is not None else "" + ) + print(parse_pattern.name(), end="\t") + print("") + + for key, value in extra_params_parsed.items(): + print(value, end="\t") + + # Process each pattern + for parse_pattern in parse_patterns.values(): + + # We interpret the parse pattern's filename as a glob pattern + filepaths = glob.glob(str(Path(parse_path) / parse_pattern.filename())) + + if len(filepaths) > 1: + raise vtr.InspectError( + "File pattern '{}' is ambiguous ({} files matched)".format( + parse_pattern.filename(), len(filepaths) + ), + len(filepaths), + filepaths, + ) + + if len(filepaths) == 1: + + assert Path(filepaths[0]).exists + metrics[parse_pattern.name()] = "-1" + with open(filepaths[0], "r") as file: + for line in file: + while line[0] == "#": + line = line[1:] + match = parse_pattern.regex().match(line) + if match and match.groups(): + # Extract the first group value + metrics[parse_pattern.name()] = match.groups()[0] + print(metrics[parse_pattern.name()], end="\t") + else: + # No matching file, skip + print("-1", end="\t") + assert len(filepaths) == 0 + print("") + + return 0 + + +if __name__ == "__main__": + parse_vtr_flow(sys.argv[1:]) diff --git a/vtr_flow/scripts/python_libs/vtr/parse_vtr_task.py b/vtr_flow/scripts/python_libs/vtr/parse_vtr_task.py new file mode 100755 index 00000000000..db138993f8d --- /dev/null +++ b/vtr_flow/scripts/python_libs/vtr/parse_vtr_task.py @@ -0,0 +1,555 @@ +""" +Parse one or more task provided +""" + +#!/usr/bin/env python3 +from pathlib import Path +from pathlib import PurePath +import sys +import argparse +import textwrap +import shutil +from datetime import datetime +from contextlib import redirect_stdout + +# pylint: disable=wrong-import-position +sys.path.insert(0, str(Path(__file__).resolve().parent.parent)) +from vtr import ( + load_list_file, + RawDefaultHelpFormatter, + get_latest_run_dir, + load_task_config, + find_task_config_file, + load_pass_requirements, + load_parse_results, + parse_vtr_flow, + get_latest_run_number, + pretty_print_table, + find_task_dir, + CommandError, + InspectError, + VtrError, + create_jobs, + paths, +) + +# pylint: enable=wrong-import-position + +FIRST_PARSE_FILE = "parse_results.txt" +SECOND_PARSE_FILE = "parse_results_2.txt" +QOR_PARSE_FILE = "qor_results.txt" + + +def vtr_command_argparser(prog=None): + """ + Argument parse for parse_vtr_flow.py + Parses one or more VTR tasks. + """ + + description = textwrap.dedent( + """ + Parses one or more VTR tasks. + """ + ) + epilog = textwrap.dedent( + """ + Examples + -------- + + Parse the task named 'timing_chain': + + %(prog)s timing_chain + + Parse all the tasks listed in the file 'task_list.txt': + + %(prog)s -l task_list.txt + + + Exit Code + --------- + The exit code equals the number failures + (i.e. exit code 0 indicates no failures). + """ + ) + + parser = argparse.ArgumentParser( + prog=prog, description=description, epilog=epilog, formatter_class=RawDefaultHelpFormatter, + ) + + # + # Major arguments + # + parser.add_argument("task", nargs="*", help="Tasks to be run") + + parser.add_argument( + "-l", + nargs="*", + default=[], + metavar="TASK_LIST_FILE", + dest="list_file", + help="A file listing tasks to be run", + ) + + parser.add_argument( + "-parse_qor", + default=False, + action="store_true", + help="Perform only parsing on the latest task run", + ) + + parser.add_argument( + "-create_golden", + default=False, + action="store_true", + help="Update or create golden results for the specified task", + ) + + parser.add_argument( + "-check_golden", + default=False, + action="store_true", + help="Check the latest task run against golden results", + ) + + parser.add_argument( + "-calc_geomean", + default=False, + action="store_true", + help="QoR geomeans are not computed by default", + ) + + parser.add_argument("-run", default=None, type=str, help="") + + parser.add_argument("-revision", default="", help="Revision number") + + return parser + + +def vtr_command_main(arg_list, prog=None): + """ + Main function for parse_vtr_task + Parses in the results from run_vtr_task.py + """ + # Load the arguments + args = vtr_command_argparser(prog).parse_args(arg_list) + try: + task_names = args.task + + for list_file in args.list_file: + task_names += load_list_file(list_file) + + config_files = [find_task_config_file(task_name) for task_name in task_names] + + configs = [load_task_config(config_file) for config_file in config_files] + num_failed = 0 + + jobs = create_jobs(args, configs, after_run=True) + parse_tasks(configs, jobs) + + if args.create_golden: + create_golden_results_for_tasks(configs) + + if args.check_golden: + num_failed += check_golden_results_for_tasks(configs) + + if args.calc_geomean: + summarize_qor(configs) + calc_geomean(args, configs) + + except CommandError as error: + print("Error: {msg}".format(msg=error.msg)) + print("\tfull command: ", error.cmd) + print("\treturncode : ", error.returncode) + print("\tlog file : ", error.log) + num_failed += 1 + except InspectError as error: + print("Error: {msg}".format(msg=error.msg)) + if error.filename: + print("\tfile: ", error.filename) + num_failed += 1 + except VtrError as error: + print("Error:", error.msg) + num_failed += 1 + + return num_failed + + +def parse_tasks(configs, jobs): + """ + Parse the selection of tasks specified in configs and associated jobs + """ + for config in configs: + config_jobs = [job for job in jobs if job.task_name() == config.task_name] + parse_task(config, config_jobs) + + +def parse_task(config, config_jobs, flow_metrics_basename=FIRST_PARSE_FILE): + """ + Parse a single task run. + + This generates a file parse_results.txt in the task's working directory, + which is an amalgam of the parse_rests.txt's produced by each job (flow invocation) + """ + run_dir = find_latest_run_dir(config) + + # Record max widths for pretty printing + max_arch_len = len("architecture") + max_circuit_len = len("circuit") + for job in config_jobs: + work_dir = job.work_dir(get_latest_run_dir(find_task_dir(config))) + if job.parse_command(): + parse_filepath = str(PurePath(work_dir) / flow_metrics_basename) + with open(parse_filepath, "w+") as parse_file: + with redirect_stdout(parse_file): + parse_vtr_flow(job.parse_command()) + if job.second_parse_command(): + parse_filepath = str(PurePath(work_dir) / SECOND_PARSE_FILE) + with open(parse_filepath, "w+") as parse_file: + with redirect_stdout(parse_file): + parse_vtr_flow(job.second_parse_command()) + if job.qor_parse_command(): + parse_filepath = str(PurePath(work_dir) / QOR_PARSE_FILE) + with open(parse_filepath, "w+") as parse_file: + with redirect_stdout(parse_file): + parse_vtr_flow(job.qor_parse_command()) + max_arch_len = max(max_arch_len, len(job.arch())) + max_circuit_len = max(max_circuit_len, len(job.circuit())) + parse_files(config_jobs, run_dir, flow_metrics_basename) + + if config.second_parse_file: + parse_files(config_jobs, run_dir, SECOND_PARSE_FILE) + + if config.qor_parse_file: + parse_files(config_jobs, run_dir, QOR_PARSE_FILE) + + +def parse_files(config_jobs, run_dir, flow_metrics_basename=FIRST_PARSE_FILE): + """ Parse the result files from the give jobs """ + task_parse_results_filepath = str(PurePath(run_dir) / flow_metrics_basename) + with open(task_parse_results_filepath, "w") as out_f: + + # Start the header + + header = True + for job in config_jobs: + # Open the job results file + # + # The job results file is basically the same format, + # but excludes the architecture and circuit fields, + # which we prefix to each line of the task result file + job_parse_results_filepath = Path(job.work_dir(run_dir)) / flow_metrics_basename + if job_parse_results_filepath.exists(): + with open(job_parse_results_filepath) as in_f: + lines = in_f.readlines() + assert len(lines) == 2 + if header: + # First line is the header + print(lines[0], file=out_f, end="") + header = False + # Second line is the data + print(lines[1], file=out_f, end="") + pretty_print_table(job_parse_results_filepath) + else: + print( + "Warning: Flow result file not found (task QoR will be incomplete): {} ".format( + str(job_parse_results_filepath) + ) + ) + + +def create_golden_results_for_tasks(configs): + """ Runs create_golden_results_for_task on all of the give configuration """ + + for config in configs: + create_golden_results_for_task(config) + + +def create_golden_results_for_task(config): + """ + Copies the latest task run's parse_results.txt into the config directory as golden_results.txt + """ + run_dir = find_latest_run_dir(config) + + task_results = str(PurePath(run_dir).joinpath(FIRST_PARSE_FILE)) + golden_results_filepath = str(PurePath(config.config_dir).joinpath("golden_results.txt")) + + shutil.copy(task_results, golden_results_filepath) + + +def check_golden_results_for_tasks(configs): + """ runs check_golden_results_for_task on all the input configurations """ + num_qor_failures = 0 + + print("\nCalculating QoR results...") + for config in configs: + num_qor_failures += check_golden_results_for_task(config) + + return num_qor_failures + + +def check_golden_results_for_task(config): + """ + Copies the latest task run's parse_results.txt into the config directory as golden_results.txt + """ + num_qor_failures = 0 + run_dir = find_latest_run_dir(config) + + if not config.pass_requirements_file: + print( + "Warning: no pass requirements file for task {}, QoR will not be checked".format( + config.task_name + ) + ) + else: + + # Load the pass requirements file + + # Load the task's parse results + task_results_filepath = str(PurePath(run_dir).joinpath(FIRST_PARSE_FILE)) + + # Load the golden reference + if config.second_parse_file: + second_results_filepath = str(PurePath(run_dir).joinpath(SECOND_PARSE_FILE)) + num_qor_failures = check_two_files( + config, + task_results_filepath, + second_results_filepath, + second_name="second parse file", + ) + pretty_print_table(second_results_filepath) + + else: + golden_results_filepath = str( + PurePath(config.config_dir).joinpath("golden_results.txt") + ) + num_qor_failures = check_two_files( + config, task_results_filepath, golden_results_filepath, + ) + pretty_print_table(task_results_filepath) + + if num_qor_failures == 0: + print("{}...[Pass]".format("/".join(str((Path(config.config_dir).parent)).split("/")[-3:]))) + + return num_qor_failures + + +# pylint: disable=too-many-branches,too-many-locals +def check_two_files( + config, + first_results_filepath, + second_results_filepath, + first_name="task", + second_name="golden", +): + """ Compare two files results """ + first_results = load_parse_results(first_results_filepath) + second_results = load_parse_results(second_results_filepath) + # Verify that the architecture and circuit are specified + for param in ["architecture", "circuit", "script_params"]: + if param not in first_results.PRIMARY_KEYS: + raise InspectError( + "Required param '{}' missing from {} results: {}".format( + param, first_name, first_results_filepath + ), + first_results_filepath, + ) + + if param not in second_results.PRIMARY_KEYS: + raise InspectError( + "Required param '{}' missing from {} results: {}".format( + param, second_name, second_results_filepath + ), + second_results_filepath, + ) + + # Verify that all params and pass requirement metric are included in both the result files + # We do not worry about non-pass_requriements elements being different or missing + pass_req_filepath = str(paths.pass_requirements_path / config.pass_requirements_file) + pass_requirements = load_pass_requirements(pass_req_filepath) + + for metric in pass_requirements.keys(): + for ((arch, circuit, script_params), result,) in first_results.all_metrics().items(): + if metric not in result: + raise InspectError( + "Required metric '{}' missing from {} results".format(metric, first_name), + first_results_filepath, + ) + + for ((arch, circuit, script_params), result,) in second_results.all_metrics().items(): + if metric not in result: + raise InspectError( + "Required metric '{}' missing from {} results".format(metric, second_name), + second_results_filepath, + ) + + # Load the primary keys for result files + second_primary_keys = [] + for (arch, circuit, script_params), _ in second_results.all_metrics().items(): + second_primary_keys.append((arch, circuit, script_params)) + + first_primary_keys = [] + for (arch, circuit, script_params), _ in first_results.all_metrics().items(): + first_primary_keys.append((arch, circuit, script_params)) + + # Ensure that first result file has all the second result file cases + for arch, circuit, script_params in second_primary_keys: + if first_results.metrics(arch, circuit, script_params) is None: + raise InspectError( + "Required case {}/{} missing from {} results: {}".format( + arch, circuit, first_name, first_results_filepath + ) + ) + + # Warn about any elements in first result file that are not found in second result file + for arch, circuit, script_params in first_primary_keys: + if second_results.metrics(arch, circuit, script_params) is None: + print( + "Warning: {} includes result for {}/{} missing in {} results".format( + first_name, arch, circuit, second_name + ) + ) + num_qor_failures = 0 + # Verify that the first results pass each metric for all cases in the second results + for (arch, circuit, script_params) in second_primary_keys: + second_metrics = second_results.metrics(arch, circuit, script_params) + first_metrics = first_results.metrics(arch, circuit, script_params) + first_fail = True + for metric in pass_requirements.keys(): + + if not metric in second_metrics: + print("Warning: Metric {} missing from {} results".format(metric, second_name)) + continue + + if not metric in first_metrics: + print("Warning: Metric {} missing from {} results".format(metric, first_name)) + continue + + try: + metric_passed, reason = pass_requirements[metric].check_passed( + second_metrics[metric], first_metrics[metric], second_name + ) + except InspectError as error: + metric_passed = False + reason = error.msg + + if not metric_passed: + if first_fail: + print( + "\n{}...[Fail]".format( + "/".join(str((Path(config.config_dir).parent)).split("/")[-3:]) + ) + ) + first_fail = False + print("[Fail]\n{}/{}/{} {} {}".format(arch, circuit, script_params, metric, reason)) + num_qor_failures += 1 + return num_qor_failures + + +# pylint: enable=too-many-branches,too-many-locals + + +def summarize_qor(configs): + """ Summarize the Qor results """ + + first = True + task_path = Path(configs[0].config_dir).parent + if len(configs) > 1 or (task_path.parent / "task_list.txt").is_file(): + task_path = task_path.parent + task_path = task_path / "task_summary" + task_path.mkdir(exist_ok=True) + out_file = task_path / (str(Path(find_latest_run_dir(configs[0])).stem) + "_summary.txt") + with out_file.open("w+") as out: + for config in configs: + with (Path(find_latest_run_dir(config)) / QOR_PARSE_FILE).open("r") as in_file: + headers = in_file.readline() + if first: + print("task_name \t{}".format(headers), file=out, end="") + first = False + for line in in_file: + print("{}\t{}".format(config.task_name, line), file=out, end="") + pretty_print_table(str(Path(find_latest_run_dir(config)) / QOR_PARSE_FILE)) + + +def calc_geomean(args, configs): + """ caclulate and ouput the geomean values to the geomean file """ + first = False + task_path = Path(configs[0].config_dir).parent + if len(configs) > 1 or (task_path.parent / "task_list.txt").is_file(): + task_path = task_path.parent + out_file = task_path / "qor_geomean.txt" + if not out_file.is_file(): + first = True + summary_file = ( + task_path + / "task_summary" + / (str(Path(find_latest_run_dir(configs[0])).stem) + "_summary.txt") + ) + + with out_file.open("w" if first else "a") as out: + with summary_file.open("r") as summary: + params = summary.readline().strip().split("\t")[4:] + if first: + print("run", file=out, end="\t") + for param in params: + print(param, file=out, end="\t") + print("date\trevision", file=out) + first = False + lines = summary.readlines() + print( + get_latest_run_number(str(Path(configs[0].config_dir).parent)), file=out, end="\t", + ) + for index in range(len(params)): + geo_mean = 1 + num = 0 + previous_value = None + geo_mean, num, previous_value = calculate_individual_geo_mean( + lines, index, geo_mean, num + ) + if num: + geo_mean **= 1 / num + print(geo_mean, file=out, end="\t") + else: + print( + previous_value if previous_value is not None else "-1", file=out, end="\t", + ) + print(datetime.date(datetime.now()), file=out, end="\t") + print(args.revision, file=out) + + +def calculate_individual_geo_mean(lines, index, geo_mean, num): + """ Calculate an individual line of parse results goe_mean """ + previous_value = None + for line in lines: + line = line.split("\t")[4:] + current_value = line[index] + try: + if float(current_value) > 0: + geo_mean *= float(current_value) + num += 1 + except ValueError: + if not previous_value: + previous_value = current_value + elif current_value != previous_value: + previous_value = "-1" + return geo_mean, num, previous_value + + +def find_latest_run_dir(config): + """ Find the latest run directory for given configuration """ + task_dir = find_task_dir(config) + + run_dir = get_latest_run_dir(task_dir) + + if not run_dir: + raise InspectError( + "Failed to find run directory for task '{}' in '{}'".format(config.task_name, task_dir) + ) + + assert Path(run_dir).is_dir() + + return run_dir + + +if __name__ == "__main__": + retval = vtr_command_main(sys.argv[1:]) + sys.exit(retval) diff --git a/vtr_flow/scripts/python_libs/vtr/paths.py b/vtr_flow/scripts/python_libs/vtr/paths.py index 77cf979f103..048aeb83a43 100644 --- a/vtr_flow/scripts/python_libs/vtr/paths.py +++ b/vtr_flow/scripts/python_libs/vtr/paths.py @@ -7,13 +7,13 @@ # VTR Paths vtr_flow_path = root_path / "vtr_flow" -scripts_path = vtr_flow_path / "scripts" -build_path = root_path / "build" # ODIN paths odin_path = root_path / "ODIN_II" odin_exe_path = odin_path / "odin_II" odin_cfg_path = vtr_flow_path / "misc" / "basic_odin_config_split.xml" +odin_verify_path = odin_path / "verify_odin.sh" +odin_output_on_error_path = odin_path / "regression_test" / ".library" / "output_on_error.conf" # ABC paths abc_path = root_path / "abc" @@ -29,6 +29,20 @@ vpr_path = root_path / "vpr" vpr_exe_path = vpr_path / "vpr" +# Flow scripts +scripts_path = vtr_flow_path / "scripts" +run_vtr_flow_path = scripts_path / "run_vtr_flow.py" +flow_template_path = scripts_path / "flow_script_template.txt" + +# Task files +tasks_path = vtr_flow_path / "tasks" +regression_tests_path = tasks_path / "regression_tests" + +# Parsing files +parse_path = vtr_flow_path / "parse" +vtr_benchmarks_parse_path = parse_path / "parse_config" / "common" / "vtr_benchmarks.txt" +pass_requirements_path = parse_path / "pass_requirements" + # Other scripts blackbox_latches_script_path = scripts_path / "blackbox_latches.pl" restore_multiclock_latch_old_script_path = scripts_path / "restore_multiclock_latch_information.pl" diff --git a/vtr_flow/scripts/python_libs/vtr/task.py b/vtr_flow/scripts/python_libs/vtr/task.py new file mode 100644 index 00000000000..c60197f1dfb --- /dev/null +++ b/vtr_flow/scripts/python_libs/vtr/task.py @@ -0,0 +1,614 @@ +""" +Module that contains the task functions +""" +from pathlib import Path +from pathlib import PurePath +from shlex import split +import itertools + +from vtr import ( + VtrError, + InspectError, + load_list_file, + load_parse_results, + get_next_run_dir, + find_task_dir, + load_script_param, + get_latest_run_dir, + paths, +) + +# pylint: disable=too-many-instance-attributes, too-many-arguments, too-many-locals,too-few-public-methods +class TaskConfig: + """ + An object representing a task config file + """ + + def __init__( + self, + task_name, + config_dir, + circuits_dir, + archs_dir, + circuit_list_add, + arch_list_add, + parse_file, + second_parse_file=None, + script_path=None, + script_params=None, + script_params_common=None, + script_params_list_add=None, + pass_requirements_file=None, + sdc_dir=None, + qor_parse_file=None, + cmos_tech_behavior=None, + pad_file=None, + ): + self.task_name = task_name + self.config_dir = config_dir + self.circuit_dir = circuits_dir + self.arch_dir = archs_dir + self.circuits = circuit_list_add + self.archs = arch_list_add + self.parse_file = parse_file + self.second_parse_file = second_parse_file + self.script_path = script_path + self.script_params = script_params + self.script_params_common = script_params_common + self.script_params_list_add = script_params_list_add + self.pass_requirements_file = pass_requirements_file + self.sdc_dir = sdc_dir + self.qor_parse_file = qor_parse_file + self.cmos_tech_behavior = cmos_tech_behavior + self.pad_file = pad_file + + +# pylint: enable=too-few-public-methods + + +class Job: + """ + A class to store the nessesary information for a job that needs to be run. + """ + + def __init__( + self, + task_name, + arch, + circuit, + script_params, + work_dir, + run_command, + parse_command, + second_parse_command, + qor_parse_command, + ): + self._task_name = task_name + self._arch = arch + self._circuit = circuit + self._script_params = script_params + self._run_command = run_command + self._parse_command = parse_command + self._second_parse_command = second_parse_command + self._qor_parse_command = qor_parse_command + self._work_dir = work_dir + + def task_name(self): + """ + return the task name of the job + """ + return self._task_name + + def arch(self): + """ + return the architecture file name of the job + """ + return self._arch + + def circuit(self): + """ + return the circuit file name of the job + """ + return self._circuit + + def script_params(self): + """ + return the script parameter of the job + """ + return self._script_params + + def job_name(self): + """ + return the name of the job + """ + return str(PurePath(self.arch()) / self.circuit() / self.script_params()) + + def run_command(self): + """ + return the run command of the job + """ + return self._run_command + + def parse_command(self): + """ + return the parse command of the job + """ + return self._parse_command + + def second_parse_command(self): + """ + return the second parse command of the job + """ + return self._second_parse_command + + def qor_parse_command(self): + """ + return the qor parse command of the job + """ + return self._qor_parse_command + + def work_dir(self, run_dir): + """ + return the work directory of the job + """ + return str(PurePath(run_dir).joinpath(self._work_dir)) + + +# pylint: enable=too-many-instance-attributes + + +def load_task_config(config_file): + """ + Load task config information + """ + # Load the file stripping comments + values = load_list_file(config_file) + + # Keys that can appear only once + unique_keys = set( + [ + "circuits_dir", + "archs_dir", + "parse_file", + "script_path", + "script_params", + "script_params_common", + "pass_requirements_file", + "sdc_dir", + "qor_parse_file", + "cmos_tech_behavior", + "pad_file", + ] + ) + + # Keys that are repeated to create lists + repeated_key_pattern = "_list_add" + + # Keys that are required + required_keys = set( + ["circuits_dir", "archs_dir", "circuit_list_add", "arch_list_add", "parse_file"] + ) + + # Interpret the file + key_values = {} + for line in values: + key, value = line.split("=") + + # Trim whitespace + key = key.strip() + value = value.strip() + + if key in unique_keys: + if key not in key_values: + key_values[key] = value + elif key == "parse_file": + key_values["second_parse_file"] = value + else: + raise VtrError( + "Duplicate {key} in config file {file}".format(key=key, file=config_file) + ) + + elif repeated_key_pattern in key: + if key not in key_values: + key_values[key] = [] + if key == "script_params_list_add": + key_values[key] += [value] + else: + key_values[key].append(value) + + else: + # All valid keys should have been collected by now + raise VtrError( + "Unrecognzied key '{key}' in config file {file}".format(key=key, file=config_file) + ) + + # We split the script params into a list + if "script_params" in key_values: + key_values["script_params"] = split(key_values["script_params"]) + + if "script_params_common" in key_values: + key_values["script_params_common"] = split(key_values["script_params_common"]) + + check_required_feilds(config_file, required_keys, key_values) + + # Useful meta-data about the config + config_dir = str(Path(config_file).parent) + key_values["config_dir"] = config_dir + key_values["task_name"] = Path(config_dir).parent.name + + # Create the task config object + return TaskConfig(**key_values) + + +def check_required_feilds(config_file, required_keys, key_values): + """ + Check that all required fields were specified + """ + for required_key in required_keys: + if required_key not in key_values: + raise VtrError( + "Missing required key '{key}' in config file {file}".format( + key=required_key, file=config_file + ) + ) + + +def shorten_task_names(configs, common_task_prefix): + """ + Shorten the task names of the configs by remove the common task prefix. + """ + new_configs = [] + for config in configs: + config.task_name = config.task_name.replace(common_task_prefix, "") + new_configs += [config] + return new_configs + + +def find_longest_task_description(configs): + """ + Finds the longest task description in the list of configurations. + This is used for output spacing. + """ + longest = 0 + for config in configs: + for arch, circuit in itertools.product(config.archs, config.circuits): + if config.script_params_list_add: + for param in config.script_params_list_add: + arch_circuit_len = len(str(PurePath(arch) / circuit / "common_" / param)) + if arch_circuit_len > longest: + longest = arch_circuit_len + else: + arch_circuit_len = len(str(PurePath(arch) / circuit / "common")) + if arch_circuit_len > longest: + longest = arch_circuit_len + return longest + + +def create_jobs(args, configs, longest_name=0, longest_arch_circuit=0, after_run=False): + """ + Create the jobs to be executed depending on the configs. + """ + jobs = [] + for config in configs: + for arch, circuit in itertools.product(config.archs, config.circuits): + golden_results = load_parse_results( + str(PurePath(config.config_dir).joinpath("golden_results.txt")) + ) + abs_arch_filepath = resolve_vtr_source_file(config, arch, config.arch_dir) + abs_circuit_filepath = resolve_vtr_source_file(config, circuit, config.circuit_dir) + work_dir = str(PurePath(arch).joinpath(circuit)) + + run_dir = ( + str(Path(get_latest_run_dir(find_task_dir(config))) / work_dir) + if after_run + else str(Path(get_next_run_dir(find_task_dir(config))) / work_dir) + ) + + # Collect any extra script params from the config file + cmd = [abs_circuit_filepath, abs_arch_filepath] + + if hasattr(args, "show_failures") and args.show_failures: + cmd += ["-show_failures"] + cmd += config.script_params if config.script_params else [] + cmd += config.script_params_common if config.script_params_common else [] + cmd += ( + args.shared_script_params + if hasattr(args, "shared_script_params") and args.shared_script_params + else [] + ) + + # Apply any special config based parameters + if config.cmos_tech_behavior: + cmd += [ + "-cmos_tech", + resolve_vtr_source_file(config, config.cmos_tech_behavior, "tech"), + ] + + cmd += ( + ["--fix_pins", resolve_vtr_source_file(config, config.pad_file)] + if config.pad_file + else [] + ) + + if config.sdc_dir: + cmd += [ + "-sdc_file", + "{}/{}.sdc".format(config.sdc_dir, Path(circuit).stem), + ] + + parse_cmd = None + second_parse_cmd = None + qor_parse_command = None + if config.parse_file: + parse_cmd = [ + resolve_vtr_source_file( + config, config.parse_file, str(PurePath("parse").joinpath("parse_config")), + ) + ] + + if config.second_parse_file: + second_parse_cmd = [ + resolve_vtr_source_file( + config, + config.second_parse_file, + str(PurePath("parse").joinpath("parse_config")), + ) + ] + + if config.qor_parse_file: + qor_parse_command = [ + resolve_vtr_source_file( + config, + config.qor_parse_file, + str(PurePath("parse").joinpath("qor_config")), + ) + ] + # We specify less verbosity to the sub-script + # This keeps the amount of output reasonable + if hasattr(args, "verbosity") and max(0, args.verbosity - 1): + cmd += ["-verbose"] + if config.script_params_list_add: + for value in config.script_params_list_add: + jobs.append( + create_job( + args, + config, + circuit, + arch, + value, + cmd, + parse_cmd, + second_parse_cmd, + qor_parse_command, + work_dir, + run_dir, + longest_name, + longest_arch_circuit, + golden_results, + ) + ) + else: + jobs.append( + create_job( + args, + config, + circuit, + arch, + None, + cmd, + parse_cmd, + second_parse_cmd, + qor_parse_command, + work_dir, + run_dir, + longest_name, + longest_arch_circuit, + golden_results, + ) + ) + + return jobs + + +def create_job( + args, + config, + circuit, + arch, + param, + cmd, + parse_cmd, + second_parse_cmd, + qor_parse_command, + work_dir, + run_dir, + longest_name, + longest_arch_circuit, + golden_results, +): + """ + Create an individual job with the specified parameters + """ + param_string = "common" + (("_" + param.replace(" ", "_")) if param else "") + if not param: + param = "common" + # determine spacing for nice output + num_spaces_before = int((longest_name - len(config.task_name))) + 8 + num_spaces_after = int((longest_arch_circuit - len(work_dir + "/{}".format(param_string)))) + cmd += [ + "-name", + "{}:{}{}/{}{}".format( + config.task_name, + " " * num_spaces_before, + work_dir, + param_string, + " " * num_spaces_after, + ), + ] + + cmd += ["-temp_dir", run_dir + "/{}".format(param_string)] + expected_min_w = ret_expected_min_w(circuit, arch, golden_results, param) + expected_min_w = ( + int(expected_min_w * args.minw_hint_factor) + if hasattr(args, "minw_hint_factor") + else expected_min_w + ) + expected_min_w += expected_min_w % 2 + if expected_min_w > 0: + cmd += ["--min_route_chan_width_hint", str(expected_min_w)] + expected_vpr_status = ret_expected_vpr_status(arch, circuit, golden_results, param) + if expected_vpr_status not in ("success", "Unknown"): + cmd += ["-expect_fail", expected_vpr_status] + current_parse_cmd = parse_cmd.copy() + + if config.parse_file: + current_parse_cmd += [ + "arch={}".format(arch), + "circuit={}".format(circuit), + "script_params={}".format(load_script_param(param)), + ] + current_parse_cmd.insert(0, run_dir + "/{}".format(load_script_param(param))) + current_second_parse_cmd = second_parse_cmd.copy() if second_parse_cmd else None + + if config.second_parse_file: + current_second_parse_cmd += [ + "arch={}".format(arch), + "circuit={}".format(circuit), + "script_params={}".format(load_script_param(param)), + ] + current_second_parse_cmd.insert(0, run_dir + "/{}".format(load_script_param(param))) + current_qor_parse_command = qor_parse_command.copy() if qor_parse_command else None + + if config.qor_parse_file: + current_qor_parse_command += [ + "arch={}".format(arch), + "circuit={}".format(circuit), + "script_params={}".format("common"), + ] + current_qor_parse_command.insert(0, run_dir + "/{}".format(load_script_param(param))) + current_cmd = cmd.copy() + if param_string != "common": + current_cmd += param.split(" ") + return Job( + config.task_name, + arch, + circuit, + param_string, + work_dir + "/" + param_string, + current_cmd, + current_parse_cmd, + current_second_parse_cmd, + current_qor_parse_command, + ) + + +# pylint: enable=too-many-arguments,too-many-locals + + +def ret_expected_min_w(circuit, arch, golden_results, script_params=None): + """ + Retrive the expected minimum channel width from the golden results. + """ + script_params = load_script_param(script_params) + golden_metrics = golden_results.metrics(arch, circuit, script_params) + if golden_metrics and "min_chan_width" in golden_metrics: + return int(golden_metrics["min_chan_width"]) + return -1 + + +def ret_expected_vpr_status(arch, circuit, golden_results, script_params=None): + """ + Retrive the expected VPR status from the golden_results. + """ + script_params = load_script_param(script_params) + golden_metrics = golden_results.metrics(arch, circuit, script_params) + if not golden_metrics or "vpr_status" not in golden_metrics: + return "Unknown" + + return golden_metrics["vpr_status"] + + +def resolve_vtr_source_file(config, filename, base_dir=""): + """ + Resolves an filename with a base_dir + + Checks the following in order: + 1) filename as absolute path + 2) filename under config directory + 3) base_dir as absolute path (join filename with base_dir) + 4) filename and base_dir are relative paths (join under vtr_root) + """ + + # Absolute path + if PurePath(filename).is_absolute(): + return filename + + # Under config + config_path = Path(config.config_dir) + assert config_path.is_absolute() + joined_path = config_path / filename + if joined_path.exists(): + return str(joined_path) + + # Under base dir + base_path = Path(base_dir) + if base_path.is_absolute(): + # Absolute base + joined_path = base_path / filename + if joined_path.exists(): + return str(joined_path) + else: + # Relative base under the VTR flow directory + joined_path = paths.vtr_flow_path / base_dir / filename + if joined_path.exists(): + return str(joined_path) + + # Not found + raise InspectError("Failed to resolve VTR source file {}".format(filename)) + + +def find_task_config_file(task_name): + """ + See if we can find the config.txt assuming the task name is an + absolute/relative path + """ + + base_dirs = [] + if PurePath(task_name).is_absolute(): + # Only check the root path since the path is aboslute + base_dirs.append("/") + else: + # Not absolute path, so check from the current directory first + base_dirs.append(".") + + vtr_flow_tasks_dir = str(paths.vtr_flow_path / "tasks") + + # Then the VTR tasks directory + base_dirs.append(vtr_flow_tasks_dir) + + # Generate potential config files (from most to least specific) + potential_config_file_paths = [] + for base_dir in base_dirs: + # Assume points directly to a config.txt + assume_config_path = str(PurePath(base_dir) / task_name) + potential_config_file_paths.append(assume_config_path) + + # Assume points to a config dir (containing config.txt) + assume_config_dir_path = str(PurePath(base_dir) / task_name / "config.txt") + potential_config_file_paths.append(assume_config_dir_path) + + # Assume points to a task dir (containing config/config.txt) + assume_task_dir_path = str(PurePath(base_dir) / task_name / "config" / "config.txt") + potential_config_file_paths.append(assume_task_dir_path) + + # Find the first potential file that is valid + for config_file in potential_config_file_paths: + config_path = Path(config_file) + is_file = config_path.is_file() + is_named_config = config_path.name == "config.txt" + is_in_config_dir = config_path.parent.name == "config" + + if is_file and is_named_config and is_in_config_dir: + return config_path.resolve() + + raise VtrError("Could not find config/config.txt for task {name}".format(name=task_name)) diff --git a/vtr_flow/scripts/python_libs/vtr/util.py b/vtr_flow/scripts/python_libs/vtr/util.py index 67f884db994..3db3f7907dc 100644 --- a/vtr_flow/scripts/python_libs/vtr/util.py +++ b/vtr_flow/scripts/python_libs/vtr/util.py @@ -10,12 +10,11 @@ import argparse import csv from collections import OrderedDict +from prettytable import PrettyTable import vtr.error from vtr.error import CommandError from vtr import paths -VERBOSITY_CHOICES = range(5) - class RawDefaultHelpFormatter( argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter @@ -56,6 +55,7 @@ def __init__( indent="\t", show_failures=False, valgrind=False, + expect_fail=None, ): if echo_cmd is None: echo_cmd = verbose @@ -68,6 +68,7 @@ def __init__( self._indent = indent self._show_failures = show_failures self._valgrind = valgrind + self._expect_fail = expect_fail def run_system_command( self, cmd, temp_dir, log_filename=None, expected_return_code=0, indent_depth=0 @@ -191,19 +192,13 @@ def run_system_command( for line in cmd_output: print(indent_depth * self._indent + line, end="") - if self._show_failures and cmd_errored: + if self._show_failures and cmd_errored and not self._expect_fail: print("\nFailed log file follows({}):".format(str((temp_dir / log_filename).resolve()))) for line in cmd_output: print(indent_depth * self._indent + "<" + line, end="") - raise CommandError( - "Executable {exec_name} failed".format(exec_name=PurePath(orig_cmd[0]).name), - cmd=cmd, - log=str(temp_dir / log_filename), - returncode=cmd_returncode, - ) if cmd_errored: raise CommandError( - "{}".format(PurePath(orig_cmd[0]).name), + "Executable {} failed".format(PurePath(orig_cmd[0]).name), cmd=cmd, log=str(temp_dir / log_filename), returncode=cmd_returncode, @@ -221,6 +216,29 @@ def check_cmd(command): return Path(command).exists() +def pretty_print_table(file, border=False): + """ Convert file to a pretty, easily read table """ + table = PrettyTable() + table.border = border + reader = None + with open(file, "r") as csv_file: + reader = csv.reader(csv_file, delimiter="\t") + first = True + for row in reader: + row = [row_item.strip() + "\t" for row_item in row] + while row[-1] == "\t": + row = row[:-1] + if first: + table.field_names = list(row) + for head in list(row): + table.align[head] = "l" + first = False + else: + table.add_row(row) + with open(file, "w+") as out_file: + print(table, file=out_file) + + def write_tab_delimitted_csv(filepath, rows): """ Write out the data provied in a tab-delimited CSV format @@ -430,6 +448,18 @@ def get_next_run_dir(base_dir): return str(PurePath(base_dir) / run_dir_name(get_next_run_number(base_dir))) +def find_task_dir(config): + """ + find the task directory + """ + task_dir = None + # Task dir is just above the config directory + task_dir = Path(config.config_dir).parent + assert task_dir.is_dir + + return str(task_dir) + + def get_latest_run_dir(base_dir): """ Returns the run directory with the highest run number in base_dir @@ -449,7 +479,7 @@ def get_next_run_number(base_dir): latest_run_number = get_latest_run_number(base_dir) if latest_run_number is None: - next_run_number = 0 + next_run_number = 1 else: next_run_number = latest_run_number + 1 @@ -460,14 +490,14 @@ def get_latest_run_number(base_dir): """ Returns the highest run number of all run directories with in base_dir """ - run_number = 0 + run_number = 1 run_dir = Path(base_dir) / run_dir_name(run_number) - if not run_dir.exists: + if not run_dir.exists(): # No existing run directories return None - while run_dir.exists: + while run_dir.exists(): run_number += 1 run_dir = Path(base_dir) / run_dir_name(run_number) diff --git a/vtr_flow/scripts/run_vtr_flow.py b/vtr_flow/scripts/run_vtr_flow.py index d8265967060..f4361d4da5b 100755 --- a/vtr_flow/scripts/run_vtr_flow.py +++ b/vtr_flow/scripts/run_vtr_flow.py @@ -369,13 +369,6 @@ def vtr_command_argparser(prog=None): return parser -def main(): - """ - Main function to call vtr_command_main to run VTR - """ - return vtr_command_main(sys.argv[1:], prog=sys.argv[0]) - - def vtr_command_main(arg_list, prog=None): """ Running VTR with the specified arguemnts. @@ -396,6 +389,7 @@ def vtr_command_main(arg_list, prog=None): verbose=args.verbose, show_failures=args.show_failures, valgrind=args.valgrind, + expect_fail=args.expect_fail, ) exit_status = 0 return_status = 0 @@ -466,8 +460,9 @@ def vtr_command_main(arg_list, prog=None): % (seconds.total_seconds(), str(Path.cwd()), socket.gethostname()) ) file.write("\n") - - sys.exit(return_status) + if __name__ == "__main__": + sys.exit(return_status) + return return_status def process_unknown_args(unknown_args): @@ -644,5 +639,4 @@ def except_vtr_error(error, expect_fail, verbose): if __name__ == "__main__": - retval = main() - sys.exit(retval) + sys.exit(vtr_command_main(sys.argv[1:], prog=sys.argv[0])) diff --git a/vtr_flow/scripts/run_vtr_task.py b/vtr_flow/scripts/run_vtr_task.py new file mode 100755 index 00000000000..ef77d207e2b --- /dev/null +++ b/vtr_flow/scripts/run_vtr_task.py @@ -0,0 +1,457 @@ +#!/usr/bin/env python3 + +""" This module is a wrapper around the scripts/python_libs/vtr, +allowing the user to run one or more VTR tasks. """ + + +from pathlib import Path +from pathlib import PurePath +import sys +import argparse +import textwrap +import subprocess +from datetime import datetime +from contextlib import redirect_stdout +from multiprocessing import Pool, Manager +from difflib import SequenceMatcher + +from run_vtr_flow import vtr_command_main as run_vtr_flow + +# pylint: disable=wrong-import-position, import-error +sys.path.insert(0, str(Path(__file__).resolve().parent / "python_libs")) + +from vtr import ( + load_list_file, + format_elapsed_time, + RawDefaultHelpFormatter, + argparse_str2bool, + get_next_run_dir, + load_task_config, + find_task_config_file, + load_parse_results, + parse_tasks, + find_task_dir, + shorten_task_names, + find_longest_task_description, + check_golden_results_for_tasks, + create_golden_results_for_tasks, + create_jobs, + calc_geomean, + summarize_qor, + paths, +) +from vtr.error import VtrError, InspectError, CommandError + +# pylint: enable=wrong-import-position, import-error + + +def vtr_command_argparser(prog=None): + """ Argument parse for run_vtr_task """ + + description = textwrap.dedent( + """ + Runs one or more VTR tasks. + """ + ) + epilog = textwrap.dedent( + """ + Examples + -------- + + Run the task named 'timing_chain': + + %(prog)s timing_chain + + Run all the tasks listed in the file 'task_list.txt': + + %(prog)s -l task_list.txt + + Run 'timing_chain' with 4 jobs running in parallel: + + %(prog)s timing_chain -j4 + + Exit Code + --------- + The exit code equals the number failures (i.e. exit code 0 indicates no failures). + """ + ) + + parser = argparse.ArgumentParser( + prog=prog, description=description, epilog=epilog, formatter_class=RawDefaultHelpFormatter, + ) + + # + # Major arguments + # + parser.add_argument("task", nargs="*", help="Tasks to be run") + + parser.add_argument( + "-l", + nargs="*", + default=[], + metavar="TASK_LIST_FILE", + dest="list_file", + help="A file listing tasks to be run", + ) + + parser.add_argument( + "-parse", + default=False, + action="store_true", + help="Perform only parsing on the latest task run", + ) + + parser.add_argument( + "-create_golden", + default=False, + action="store_true", + help="Update or create golden results for the specified task", + ) + + parser.add_argument( + "-check_golden", + default=False, + action="store_true", + help="Check the latest task run against golden results", + ) + + parser.add_argument( + "-system", + choices=["local", "scripts"], + default="local", + help="What system to run the tasks on.", + ) + + parser.add_argument( + "-script", + default="run_vtr_flow.py", + help="Determines what flow script is used for the tasks", + ) + + parser.add_argument( + "-short_task_names", default=False, action="store_true", help="Output shorter task names.", + ) + + parser.add_argument( + "-show_failures", + default=False, + action="store_true", + help="Produce additional debug output", + ) + + parser.add_argument( + "-j", + "-p", + default=1, + type=int, + metavar="NUM_PROC", + help="How many processors to use for execution.", + ) + + parser.add_argument( + "-timeout", + default=30 * 24 * 60 * 60, # 30 days + metavar="TIMEOUT_SECONDS", + help="Time limit for this script.", + ) + + parser.add_argument( + "-verbosity", + default=0, + type=int, + help="Sets the verbosity of the script. Higher values produce more output.", + ) + + parser.add_argument( + "-minw_hint_factor", + default=1, + type=float, + help="Minimum width hint factor to multiplied by the minimum width hint", + ) + + parser.add_argument("-revision", default="", help="Revision number") + + parser.add_argument( + "-calc_geomean", + default=False, + action="store_true", + help="QoR geomeans are not computed by default", + ) + + parser.add_argument( + "-print_metadata", + default=True, + type=argparse_str2bool, + help="Print meta-data like command-line arguments and run-time", + ) + + parser.add_argument( + "-s", + nargs=argparse.REMAINDER, + default=[], + dest="shared_script_params", + help="Treat the remainder of the command line options as script parameters " + "shared by all tasks", + ) + + return parser + + +def vtr_command_main(arg_list, prog=None): + """ Run the vtr tasks given and the tasks in the lists given """ + # Load the arguments + args = vtr_command_argparser(prog).parse_args(arg_list) + + # Don't run if parsing or handling golden results + args.run = not (args.parse or args.create_golden or args.check_golden or args.calc_geomean) + + # Always parse if running + if args.run: + args.parse = True + + num_failed = -1 + try: + task_names = args.task + + for list_file in args.list_file: + task_names += load_list_file(list_file) + + config_files = [find_task_config_file(task_name) for task_name in task_names] + configs = [] + longest_name = 0 # longest task name for use in creating prettier output + common_task_prefix = None # common task prefix to shorten task names + for config_file in config_files: + config = load_task_config(config_file) + configs += [config] + if common_task_prefix is None: + common_task_prefix = config.task_name + else: + match = SequenceMatcher( + None, common_task_prefix, config.task_name + ).find_longest_match(0, len(common_task_prefix), 0, len(config.task_name)) + common_task_prefix = common_task_prefix[match.a : match.a + match.size] + if len(config.task_name) > longest_name: + longest_name = len(config.task_name) + if args.short_task_names: + configs = shorten_task_names(configs, common_task_prefix) + longest_arch_circuit = find_longest_task_description( + configs + ) # find longest task description for use in creating prettier output + num_failed = run_tasks(args, configs, longest_name, longest_arch_circuit) + + except CommandError as exception: + print("Error: {msg}".format(msg=exception.msg)) + print("\tfull command: ", exception.cmd) + print("\treturncode : ", exception.returncode) + print("\tlog file : ", exception.log) + except InspectError as exception: + print("Error: {msg}".format(msg=exception.msg)) + if exception.filename: + print("\tfile: ", exception.filename) + except VtrError as exception: + print("Error:", exception.msg) + if __name__ == "main": + sys.exit(num_failed) + return num_failed + + +def run_tasks( + args, configs, longest_name, longest_arch_circuit, +): + """ + Runs the specified set of tasks (configs) + """ + start = datetime.now() + num_failed = 0 + + jobs = create_jobs(args, configs, longest_name, longest_arch_circuit) + + run_dirs = {} + for config in configs: + task_dir = find_task_dir(config) + task_run_dir = get_next_run_dir(task_dir) + run_dirs[config.task_name] = task_run_dir + + # We could potentially support other 'run' systems (e.g. a cluster), + # rather than just the local machine + if args.system == "local": + assert args.j > 0, "Invalid number of processors" + + if args.run: + num_failed = run_parallel(args, jobs, run_dirs) + print("Elapsed time: {}".format(format_elapsed_time(datetime.now() - start))) + + if args.parse: + print("\nParsing test results...") + print("scripts/parse_vtr_task.py -l {}".format(args.list_file[0])) + parse_tasks(configs, jobs) + + if args.create_golden: + create_golden_results_for_tasks(configs) + + if args.check_golden: + num_failed += check_golden_results_for_tasks(configs) + + if args.calc_geomean: + summarize_qor(configs) + calc_geomean(args, configs) + elif args.system == "scripts": + for _, value in run_dirs.items(): + Path(value).mkdir(parents=True) + run_scripts = create_run_scripts(args, jobs, run_dirs) + for script in run_scripts: + print(script) + else: + raise VtrError("Unrecognized run system {system}".format(system=args.system)) + return num_failed + + +def run_parallel(args, queued_jobs, run_dirs): + """ + Run each external command in commands with at most args.j commands running in parllel + """ + # Determine the run dir for each config + + # We pop off the jobs of queued_jobs, which python does from the end, + # so reverse the list now so we get the expected order. This also ensures + # we are working with a copy of the jobs + queued_jobs = list(reversed(queued_jobs)) + # Find the max taskname length for pretty printing + + queued_procs = [] + queue = Manager().Queue() + for job in queued_jobs: + queued_procs += [(queue, run_dirs, job, args.script)] + # Queue of currently running subprocesses + + num_failed = 0 + with Pool(processes=args.j) as pool: + for proc in queued_procs: + pool.apply_async(run_vtr_flow_process, proc) + pool.close() + pool.join() + for _ in queued_procs: + num_failed += queue.get() + + return num_failed + + +def create_run_scripts(args, jobs, run_dirs): + """ Create the bash script files for each job run """ + run_script_files = [] + for job in jobs: + run_script_files += [create_run_script(args, job, job.work_dir(run_dirs[job.task_name()]))] + return run_script_files + + +def create_run_script(args, job, work_dir): + """ Create the bash run script for a particular job """ + + runtime_estimate = ret_expected_runtime(job, work_dir) + memory_estimate = ret_expected_memory(job, work_dir) + if runtime_estimate < 0: + runtime_estimate = 0 + + if memory_estimate < 0: + memory_estimate = 0 + + human_readable_runtime_est = format_human_readable_time(runtime_estimate) + human_readable_memory_est = format_human_readable_memory(memory_estimate) + Path(work_dir).mkdir(parents=True) + run_script_file = Path(work_dir) / "vtr_flow.sh" + template = str(paths.flow_template_path) + with open(template, "r") as in_file: + template_string = in_file.readlines() + template_string = "".join(template_string) + with open(run_script_file, "w+") as out_file: + print( + template_string.format( + estimated_time=runtime_estimate, + estimated_memory=memory_estimate, + human_readable_time=human_readable_runtime_est, + human_readable_memory=human_readable_memory_est, + script=args.script, + command=job.run_command(), + ), + file=out_file, + end="", + ) + return str(run_script_file) + + +def ret_expected_runtime(job, work_dir): + """ Returns the expected run-time (in seconds) of the specified run, or -1 if unkown """ + seconds = -1 + golden_results = load_parse_results( + str(Path(work_dir).parent.parent.parent.parent / "config/golden_results.txt") + ) + metrics = golden_results.metrics(job.arch(), job.circuit(), job.script_params()) + if "vtr_flow_elapsed_time" in metrics: + seconds = float(metrics["vtr_flow_elapsed_time"]) + return seconds + + +def ret_expected_memory(job, work_dir): + """ Returns the expected memory usage (in bytes) of the specified run, or -1 if unkown """ + memory_kib = -1 + golden_results = load_parse_results( + str(Path(work_dir).parent.parent.parent.parent / "config/golden_results.txt") + ) + metrics = golden_results.metrics(job.arch(), job.circuit(), job.script_params()) + for metric in ["max_odin_mem", "max_abc_mem", "max_ace_mem", "max_vpr_mem"]: + if metric in metrics and int(metrics[metric]) > memory_kib: + memory_kib = int(metrics[metric]) + return memory_kib + + +def format_human_readable_time(seconds): + """ format the number of seconds given as a human readable value """ + if seconds < 60: + return "%.0f seconds" % seconds + if seconds < 60 * 60: + minutes = seconds / 60 + return "%.0f minutes" % minutes + hour = seconds / 60 / 60 + return "%.0f hours" % hour + + +def format_human_readable_memory(num_bytes): + """ format the number of bytes given as a human readable value """ + if num_bytes < 1024 ** 3: + return "%.2f MiB" % (num_bytes / (1024 ** 2)) + return "%.2f GiB" % (num_bytes / (1024 ** 3)) + + +def run_vtr_flow_process(queue, run_dirs, job, script): + """ + This is the function that the multiprocessing calls. + It runs the vtr flow and allerts the multiprocessor through a queue if the flow failed. + """ + work_dir = job.work_dir(run_dirs[job.task_name()]) + Path(work_dir).mkdir(parents=True, exist_ok=True) + out = None + vtr_flow_out = str(PurePath(work_dir) / "vtr_flow.out") + with open(vtr_flow_out, "w+") as out_file: + with redirect_stdout(out_file): + if script == "run_vtr_flow.py": + out = run_vtr_flow(job.run_command(), str(paths.run_vtr_flow_path)) + else: + out = subprocess.call( + [str(paths.scripts_path / script)] + job.run_command(), + cwd=str(paths.root_path), + stdout=out_file, + ) + + with open(vtr_flow_out, "r") as out_file: + for line in out_file.readlines(): + print(line, end="") + + # report flow failure to queue + if out: + queue.put(1) + else: + queue.put(0) + + +if __name__ == "__main__": + vtr_command_main(sys.argv[1:]) diff --git a/vtr_flow/tasks/regression_tests/README.md b/vtr_flow/tasks/regression_tests/README.md index 32f96c810fa..7b29e764f3b 100644 --- a/vtr_flow/tasks/regression_tests/README.md +++ b/vtr_flow/tasks/regression_tests/README.md @@ -11,31 +11,31 @@ to [`regression_tests.ods`](./regression_tests.ods) for details on architecture DO-IT-ALL COMMAND - This command will execute, parse, and check results. ``` -./run_reg_test.pl vtr_reg_basic +./run_reg_test.py vtr_reg_basic ``` To create golden results, use: ``` -./run_reg_test.pl -create_golden vtr_reg_basic +./run_reg_test.py -create_golden vtr_reg_basic ``` Execute with: ``` -/run_vtr_task.pl -l /regression_tests/vtr_reg_basic/task_list.txt +/run_vtr_task.py -l /regression_tests/vtr_reg_basic/task_list.txt ``` Parse results with: ``` -/parse_vtr_task.pl -l /regression_tests/vtr_reg_basic/task_list.txt +/parse_vtr_task.py -l /regression_tests/vtr_reg_basic/task_list.txt ``` Check results with: ``` -/parse_vtr_task.pl -check_golden -l /regression_tests/vtr_reg_basic/task_list.txt +/parse_vtr_task.py -check_golden -l /regression_tests/vtr_reg_basic/task_list.txt ``` Create golden results with: ``` -/parse_vtr_task.pl -create_golden -l /regression_tests/vtr_reg_basic/task_list.txt +/parse_vtr_task.py -create_golden -l /regression_tests/vtr_reg_basic/task_list.txt ``` ## LEVEL TWO - Strong VTR Regression - `vtr_reg_strong` @@ -45,32 +45,32 @@ Create golden results with: DO-IT-ALL COMMAND - This command will execute, parse, and check results. ``` -./run_reg_test.pl vtr_reg_strong -./run_reg_test.pl vtr_reg_valgrind_small +./run_reg_test.py vtr_reg_strong +./run_reg_test.py vtr_reg_valgrind_small ``` To create golden results, use: ``` -./run_reg_test.pl -create_golden vtr_reg_strong +./run_reg_test.py -create_golden vtr_reg_strong ``` Execute with: ``` -/run_vtr_task.pl -l /regression_tests/vtr_reg_strong/task_list.txt +/run_vtr_task.py -l /regression_tests/vtr_reg_strong/task_list.txt ``` Parse results with: ``` -/parse_vtr_task.pl -l /regression_tests/vtr_reg_strong/task_list.txt +/parse_vtr_task.py -l /regression_tests/vtr_reg_strong/task_list.txt ``` Check results with: ``` -/parse_vtr_task.pl -check_golden -l /regression_tests/vtr_reg_strong/task_list.txt +/parse_vtr_task.py -check_golden -l /regression_tests/vtr_reg_strong/task_list.txt ``` Create golden results with: ``` -/parse_vtr_task.pl -create_golden -l /regression_tests/vtr_reg_strong/task_list.txt +/parse_vtr_task.py -create_golden -l /regression_tests/vtr_reg_strong/task_list.txt ``` ## LEVEL THREE - Nightly VTR Regression - `vtr_reg_nightly` @@ -80,33 +80,33 @@ Create golden results with: DO-IT-ALL COMMAND - This command will execute, parse, and check results. ``` -./run_reg_test.pl vtr_reg_nightly -./run_reg_test.pl vtr_reg_valgrind +./run_reg_test.py vtr_reg_nightly +./run_reg_test.py vtr_reg_valgrind ``` To create golden results, use: ``` -./run_reg_test.pl -create_golden vtr_reg_nightly +./run_reg_test.py -create_golden vtr_reg_nightly ``` Execute with: ``` -/run_vtr_task.pl -l /regression_tests/vtr_reg_nightly/task_list.txt +/run_vtr_task.py -l /regression_tests/vtr_reg_nightly/task_list.txt ``` Parse results with: ``` -/parse_vtr_task.pl -l /regression_tests/vtr_reg_nightly/task_list.txt +/parse_vtr_task.py -l /regression_tests/vtr_reg_nightly/task_list.txt ``` Check results with: ``` -/parse_vtr_task.pl -check_golden -l /regression_tests/vtr_reg_nightly/task_list.txt +/parse_vtr_task.py -check_golden -l /regression_tests/vtr_reg_nightly/task_list.txt ``` Create golden results with: ``` -/parse_vtr_task.pl -create_golden -l /regression_tests/vtr_reg_nightly/task_list.txt +/parse_vtr_task.py -create_golden -l /regression_tests/vtr_reg_nightly/task_list.txt ``` @@ -117,30 +117,30 @@ Create golden results with: DO-IT-ALL COMMAND - This command will execute, parse, and check results. ``` -./run_reg_test.pl vtr_reg_weekly +./run_reg_test.py vtr_reg_weekly ``` To create golden results, use: ``` -./run_reg_test.pl -create_golden vtr_reg_weekly +./run_reg_test.py -create_golden vtr_reg_weekly ``` Execute with: ``` -/run_vtr_task.pl -l /regression_tests/vtr_reg_weekly/task_list.txt +/run_vtr_task.py -l /regression_tests/vtr_reg_weekly/task_list.txt ``` Parse results with: ``` -/parse_vtr_task.pl -l /regression_tests/vtr_reg_weekly/task_list.txt +/parse_vtr_task.py -l /regression_tests/vtr_reg_weekly/task_list.txt ``` Check results with: ``` -/parse_vtr_task.pl -check_golden -l /regression_tests/vtr_reg_weekly/task_list.txt +/parse_vtr_task.py -check_golden -l /regression_tests/vtr_reg_weekly/task_list.txt ``` Create golden results with: ``` -/parse_vtr_task.pl -create_golden -l /regression_tests/vtr_reg_weekly/task_list.txt +/parse_vtr_task.py -create_golden -l /regression_tests/vtr_reg_weekly/task_list.txt ``` diff --git a/vtr_flow/tasks/run_all.sh b/vtr_flow/tasks/run_all.sh index 99941cb8fca..d6e0bbf3d44 100755 --- a/vtr_flow/tasks/run_all.sh +++ b/vtr_flow/tasks/run_all.sh @@ -5,5 +5,5 @@ for task in $tasks do striped="${task%/config}" striped="${striped#./}" - ~/vtr/vtr_flow/scripts/run_vtr_task.pl $striped + ~/vtr/vtr_flow/scripts/run_vtr_task.py $striped done