diff --git a/.github/kokoro/continuous/nightly.cfg b/.github/kokoro/continuous/nightly.cfg index f844c82c05d..66b12188de8 100644 --- a/.github/kokoro/continuous/nightly.cfg +++ b/.github/kokoro/continuous/nightly.cfg @@ -55,5 +55,5 @@ env_vars { env_vars { key: "NUM_CORES" - value: "5" + value: "3" } diff --git a/.github/kokoro/presubmit/nightly.cfg b/.github/kokoro/presubmit/nightly.cfg index 57f2f978499..a0c52122887 100644 --- a/.github/kokoro/presubmit/nightly.cfg +++ b/.github/kokoro/presubmit/nightly.cfg @@ -55,5 +55,5 @@ env_vars { env_vars { key: "NUM_CORES" - value: "5" + value: "3" } diff --git a/.github/kokoro/steps/vtr-full-setup.sh b/.github/kokoro/steps/vtr-full-setup.sh index f15d56e074e..bfaea2975dc 100755 --- a/.github/kokoro/steps/vtr-full-setup.sh +++ b/.github/kokoro/steps/vtr-full-setup.sh @@ -2,5 +2,6 @@ make get_titan_benchmarks make get_ispd_benchmarks +make get_symbiflow_benchmarks dev/upgrade_vtr_archs.sh diff --git a/.github/kokoro/steps/vtr-test.sh b/.github/kokoro/steps/vtr-test.sh index 459c4257d01..f624ad2b6b6 100644 --- a/.github/kokoro/steps/vtr-test.sh +++ b/.github/kokoro/steps/vtr-test.sh @@ -47,3 +47,26 @@ echo "========================================" export VPR_NUM_WORKERS=1 ./run_reg_test.py $VTR_TEST $VTR_TEST_OPTIONS -j$NUM_CORES kill $MONITOR + +echo "========================================" +echo "Cleaning benchmarks files" +echo "========================================" +# Removing Symbiflow archs and benchmarks +find vtr_flow/arch/symbiflow/ -type f -not -name 'README.*' -delete +find vtr_flow/benchmarks/symbiflow/ -type f -not -name 'README.*' -delete + +# Removing ISPD benchmarks +find vtr_flow/benchmarks/ispd_blif/ -type f -not -name 'README.*' -delete + +# Removing Titan benchmarks +find vtr_flow/benchmarks/titan_blif/ -type f -not -name 'README.*' -delete + +# Removing ISPD, Titan and Symbiflow tarballs +find . -type f -regex ".*\.tar\.\(gz\|xz\)" -delete + +# Make sure working directory doesn't exceed disk space limit! +echo "Working directory size: $(du -sh)" +if [[ $(du -s | cut -d $'\t' -f 1) -gt $(expr 1024 \* 1024 \* 90) ]]; then + echo "Working directory too large!" + exit 1 +fi diff --git a/.gitignore b/.gitignore index 1279c4a11e0..ef99b5bc203 100644 --- a/.gitignore +++ b/.gitignore @@ -46,6 +46,17 @@ vtr_flow/benchmarks/titan_other_blif/*.sdc ispd_benchmarks_vtr*.tar.gz vtr_flow/benchmarks/ispd_blif/*.blif +# +# SymbiFlow benchmarks +# +# We ignore the SymbiFlow netlists and architectures because of thier large size. +# +*symbiflow*.tar.xz +vtr_flow/arch/symbiflow/*.bin +vtr_flow/arch/symbiflow/*.xml +vtr_flow/benchmarks/symbiflow/*.eblif +vtr_flow/benchmarks/symbiflow/sdc/*.sdc + # # Cloud9 Directory # diff --git a/CMakeLists.txt b/CMakeLists.txt index c5e8423bfba..eaa4baf3494 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -314,6 +314,14 @@ add_custom_target(get_ispd_benchmarks WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} COMMENT "Downloading (~50MB) and extracting Titan benchmarks (~0.5GB) into VTR source tree.") +# +# SymbiFlow Benchmarks +# +add_custom_target(get_symbiflow_benchmarks + COMMAND ./vtr_flow/scripts/download_symbiflow.py --vtr_flow_dir ./vtr_flow + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} + COMMENT "Downloading (~100MB) and extracting SymbiFlow architectures (~2.7GB) into VTR source tree.") + # # Unit Testing # diff --git a/vtr_flow/arch/symbiflow/README.md b/vtr_flow/arch/symbiflow/README.md new file mode 100644 index 00000000000..1dfce3a1eb5 --- /dev/null +++ b/vtr_flow/arch/symbiflow/README.md @@ -0,0 +1,12 @@ +SymbiFlow architectures +======================= + +This directory holds all the SymbiFlow architectures that are generated in the [SymbiFlow-arch-defs](https://github.com/SymbiFlow/symbiflow-arch-defs) repository. + +The data files needed to successfully run VPR are: +- Architecture XML definition +- RR Graph +- Router Lookahead +- Place delay matrix lookup + +All the data files can be downloaded with the `make get_symbiflow_benchmarks` target in the root directory. diff --git a/vtr_flow/benchmarks/symbiflow/README.md b/vtr_flow/benchmarks/symbiflow/README.md new file mode 100644 index 00000000000..24beb11a9b1 --- /dev/null +++ b/vtr_flow/benchmarks/symbiflow/README.md @@ -0,0 +1,12 @@ +SymbiFlow benchmarks +==================== + +This directory holds all the SymbiFlow benchmarks that are generated in the [SymbiFlow-arch-defs](https://github.com/SymbiFlow/symbiflow-arch-defs) repository. + +The circuits come along with the SDC constraints file, if present, and have been produced with yosys. +They are compatible with the symbiflow architectures produced in the same Symbiflow-arch-defs build. + +Some of the circuites require also the place constraint files to correctly place some IOs and clock tiles +in the correct location, so not to incur in routability issues. + +All the data files can be downloaded with the `make get_symbiflow_benchmarks` target in the root directory. diff --git a/vtr_flow/benchmarks/symbiflow/place_constr/README.md b/vtr_flow/benchmarks/symbiflow/place_constr/README.md new file mode 100644 index 00000000000..a0e6293406e --- /dev/null +++ b/vtr_flow/benchmarks/symbiflow/place_constr/README.md @@ -0,0 +1,4 @@ +Place constraints +================= + +This directory contains the Place constraints files corresponding to the homonym circuit file. diff --git a/vtr_flow/benchmarks/symbiflow/sdc/README.md b/vtr_flow/benchmarks/symbiflow/sdc/README.md new file mode 100644 index 00000000000..009c7040580 --- /dev/null +++ b/vtr_flow/benchmarks/symbiflow/sdc/README.md @@ -0,0 +1,4 @@ +SDC constraints +=============== + +This directory contains the SDC constraints files corresponding to the homonym circuit file. diff --git a/vtr_flow/scripts/download_symbiflow.py b/vtr_flow/scripts/download_symbiflow.py new file mode 100755 index 00000000000..1e8be82f20c --- /dev/null +++ b/vtr_flow/scripts/download_symbiflow.py @@ -0,0 +1,195 @@ +#!/usr/bin/env python3 +""" + Script to download the SymbiFlow Series-7 architectures +""" + +import sys +import os +import argparse +import math +import textwrap +import fnmatch +import tempfile +import shutil +import subprocess +from urllib import request + +GCS_URL = { + "architectures": + "https://storage.googleapis.com/symbiflow-arch-defs-gha/symbiflow-xc7a50t_test-latest", + "benchmarks": + "https://storage.googleapis.com/symbiflow-arch-defs-gha/symbiflow-benchmarks-latest" +} + +SYMBIFLOW_URL_MIRRORS = {"google": GCS_URL} + + +class ExtractionError(Exception): + """ + Extraction error exception class + """ + + +def parse_args(): + """ + Parses and returns script's arguments + """ + + description = textwrap.dedent( + """ + Download and extract a symbiflow benchmark release into a + VTR-style directory structure. + + If a previous matching symbiflow release tar.gz file is found + does nothing (unless --force is specified). + """ + ) + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=description + ) + + parser.add_argument( + "--vtr_flow_dir", + required=True, + help="The 'vtr_flow' directory under the VTR tree. " + "If specified this will extract the symbiflow release, " + "placing benchmarks under vtr_flow/benchmarks/symbiflow ", + ) + parser.add_argument( + "--force", + default=False, + action="store_true", + help="Run extraction step even if directores etc. already exist", + ) + + parser.add_argument("--mirror", default="google", choices=["google"], help="Download mirror") + + parser.add_argument( + "--upgrade_archs", + action="store_true", + default=True, + help="Try to upgrade included architecture files (using the upgrade_archs.py)", + ) + + return parser.parse_args() + + +def main(): + """ + Main function + """ + + args = parse_args() + + try: + urls = SYMBIFLOW_URL_MIRRORS[args.mirror] + archs_tar_xz_url = urls["architectures"] + benchmarks_tar_xz_url = urls["benchmarks"] + + archs_tar_xz_filename = "archs_symbiflow.tar.xz" + benchmarks_tar_xz_filename = "benchmarks_symbiflow.tar.xz" + + print("Downloading architectures {}".format(archs_tar_xz_url)) + download_url(archs_tar_xz_filename, archs_tar_xz_url) + + print("Extracting architectures {}".format(archs_tar_xz_filename)) + symbiflow_data_dir = "share/symbiflow/arch/xc7a50t_test" + extract_to_vtr_flow_dir(args, archs_tar_xz_filename, "arch", symbiflow_data_dir) + + print("Downloading benchmarks {}".format(benchmarks_tar_xz_url)) + download_url(benchmarks_tar_xz_filename, benchmarks_tar_xz_url) + + print("Extracting benchmarks {}".format(benchmarks_tar_xz_filename)) + extract_to_vtr_flow_dir(args, benchmarks_tar_xz_filename, "benchmarks") + + except ExtractionError as error: + print("Failed to extract data: ", error) + sys.exit(1) + + sys.exit(0) + + +def download_url(filename, url): + """ + Downloads the symbiflow release + """ + latest_package_url = request.urlopen(url).read().decode("utf-8") + print("Downloading latest package:\n{}".format(latest_package_url)) + request.urlretrieve(latest_package_url, filename, reporthook=download_progress_callback) + + +def download_progress_callback(block_num, block_size, expected_size): + """ + Callback for urllib.urlretrieve which prints a dot for every percent of a file downloaded + """ + total_blocks = int(math.ceil(expected_size / block_size)) + progress_increment = int(math.ceil(total_blocks / 100)) + + if block_num % progress_increment == 0: + sys.stdout.write(".") + sys.stdout.flush() + if block_num * block_size >= expected_size: + print("") + + +def extract_to_vtr_flow_dir(args, tar_xz_filename, destination, extract_path=""): + """ + Extracts the 'benchmarks' directory of the symbiflow release + into its corresponding vtr directory + """ + + # Reference directories + dest_dir = os.path.join(args.vtr_flow_dir, destination) + symbiflow_extract_dir = os.path.join(dest_dir, "symbiflow") + + if not args.force: + # Check that all expected directories exist + expected_dirs = [ + args.vtr_flow_dir, + symbiflow_extract_dir, + ] + for directory in expected_dirs: + if not os.path.isdir(directory): + raise ExtractionError("{} should be a directory".format(directory)) + + # Create a temporary working directory + tmpdir = tempfile.mkdtemp(suffix="download_symbiflow", dir=".") + + # Extract matching files into the temporary directory + subprocess.call( + "tar -C {} -xf {} {}".format(tmpdir, tar_xz_filename, extract_path), + shell=True, + ) + + # Move the extracted files to the relevant directories, SDC files first (since we + # need to look up the BLIF name to make it match) + for dirpath, _, filenames in os.walk(tmpdir): + for filename in filenames: + src_file_path = os.path.join(dirpath, filename) + dst_file_path = None + + if fnmatch.fnmatch(src_file_path, "*/xc7a50t_test/arch.timing.xml"): + dst_file_path = os.path.join(symbiflow_extract_dir, "arch.timing.xml") + + elif fnmatch.fnmatch(src_file_path, "*/xc7a50t_test/*.bin"): + dst_file_path = os.path.join(symbiflow_extract_dir, filename) + + elif fnmatch.fnmatch(src_file_path, "**/*.eblif"): + dst_file_path = os.path.join(symbiflow_extract_dir, filename) + + elif fnmatch.fnmatch(src_file_path, "**/*.sdc"): + dst_file_path = os.path.join(symbiflow_extract_dir, "sdc", filename) + + elif fnmatch.fnmatch(src_file_path, "**/*.place"): + dst_file_path = os.path.join(symbiflow_extract_dir, "place_constr", filename) + + if dst_file_path: + shutil.move(src_file_path, dst_file_path) + + shutil.rmtree(tmpdir) + + print("Done") + + +if __name__ == "__main__": + main() diff --git a/vtr_flow/scripts/python_libs/vtr/task.py b/vtr_flow/scripts/python_libs/vtr/task.py index c60197f1dfb..eda41b6b914 100644 --- a/vtr_flow/scripts/python_libs/vtr/task.py +++ b/vtr_flow/scripts/python_libs/vtr/task.py @@ -18,7 +18,7 @@ paths, ) -# pylint: disable=too-many-instance-attributes, too-many-arguments, too-many-locals,too-few-public-methods +# pylint: disable=too-many-instance-attributes, too-many-arguments, too-many-locals, too-few-public-methods class TaskConfig: """ An object representing a task config file @@ -40,9 +40,12 @@ def __init__( script_params_list_add=None, pass_requirements_file=None, sdc_dir=None, + place_constr_dir=None, qor_parse_file=None, cmos_tech_behavior=None, pad_file=None, + additional_files=None, + additional_files_list_add=None ): self.task_name = task_name self.config_dir = config_dir @@ -58,9 +61,12 @@ def __init__( self.script_params_list_add = script_params_list_add self.pass_requirements_file = pass_requirements_file self.sdc_dir = sdc_dir + self.place_constr_dir = place_constr_dir self.qor_parse_file = qor_parse_file self.cmos_tech_behavior = cmos_tech_behavior self.pad_file = pad_file + self.additional_files = additional_files + self.additional_files_list_add = additional_files_list_add # pylint: enable=too-few-public-methods @@ -169,12 +175,14 @@ def load_task_config(config_file): [ "circuits_dir", "archs_dir", + "additional_files", "parse_file", "script_path", "script_params", "script_params_common", "pass_requirements_file", "sdc_dir", + "place_constr_dir", "qor_parse_file", "cmos_tech_behavior", "pad_file", @@ -229,7 +237,7 @@ def load_task_config(config_file): if "script_params_common" in key_values: key_values["script_params_common"] = split(key_values["script_params_common"]) - check_required_feilds(config_file, required_keys, key_values) + check_required_fields(config_file, required_keys, key_values) # Useful meta-data about the config config_dir = str(Path(config_file).parent) @@ -240,7 +248,7 @@ def load_task_config(config_file): return TaskConfig(**key_values) -def check_required_feilds(config_file, required_keys, key_values): +def check_required_fields(config_file, required_keys, key_values): """ Check that all required fields were specified """ @@ -283,7 +291,7 @@ def find_longest_task_description(configs): longest = arch_circuit_len return longest - +# pylint: disable=too-many-branches def create_jobs(args, configs, longest_name=0, longest_arch_circuit=0, after_run=False): """ Create the jobs to be executed depending on the configs. @@ -307,6 +315,14 @@ def create_jobs(args, configs, longest_name=0, longest_arch_circuit=0, after_run # Collect any extra script params from the config file cmd = [abs_circuit_filepath, abs_arch_filepath] + # Check if additional architectural data files are present + if config.additional_files_list_add: + for additional_file in config.additional_files_list_add: + flag, file_name = additional_file.split(',') + + cmd += [flag] + cmd += [resolve_vtr_source_file(config, file_name, config.arch_dir)] + if hasattr(args, "show_failures") and args.show_failures: cmd += ["-show_failures"] cmd += config.script_params if config.script_params else [] @@ -331,9 +347,22 @@ def create_jobs(args, configs, longest_name=0, longest_arch_circuit=0, after_run ) if config.sdc_dir: + sdc_name = "{}.sdc".format(Path(circuit).stem) + sdc_file = resolve_vtr_source_file(config, sdc_name, config.sdc_dir) + cmd += [ "-sdc_file", - "{}/{}.sdc".format(config.sdc_dir, Path(circuit).stem), + "{}".format(sdc_file) + ] + + if config.place_constr_dir: + place_constr_name = "{}.place".format(Path(circuit).stem) + place_constr_file = resolve_vtr_source_file( + config, place_constr_name, config.place_constr_dir) + + cmd += [ + "--fix_clusters", + "{}".format(place_constr_file) ] parse_cmd = None diff --git a/vtr_flow/tasks/regression_tests/vtr_reg_nightly/symbiflow/config/config.txt b/vtr_flow/tasks/regression_tests/vtr_reg_nightly/symbiflow/config/config.txt new file mode 100644 index 00000000000..45108602633 --- /dev/null +++ b/vtr_flow/tasks/regression_tests/vtr_reg_nightly/symbiflow/config/config.txt @@ -0,0 +1,42 @@ +############################################ +# Configuration file for running experiments +############################################## + +# Path to directory of circuits to use +circuits_dir=benchmarks/symbiflow + +# SDC directory +sdc_dir=benchmarks/symbiflow/sdc + +# Place constraints directory +place_constr_dir=benchmarks/symbiflow/place_constr + +# Path to directory of architectures to use +archs_dir=arch/symbiflow + +#Large benchmarks +circuit_list_add=picosoc_basys3_full_100.eblif +circuit_list_add=picosoc_basys3_full_50.eblif +circuit_list_add=linux_arty.eblif +circuit_list_add=minilitex_arty.eblif +circuit_list_add=minilitex_ddr_arty.eblif +circuit_list_add=minilitex_ddr_eth_arty.eblif + +# Add architectures to list to sweep +arch_list_add=arch.timing.xml + +# Add additional required architectural data files +additional_files_list_add=--read_rr_graph,rr_graph_xc7a50t_test.rr_graph.real.bin +additional_files_list_add=--read_router_lookahead,rr_graph_xc7a50t_test.lookahead.bin +additional_files_list_add=--read_placement_delay_lookup,rr_graph_xc7a50t_test.place_delay.bin + +# Parse info and how to parse +parse_file=vpr_standard.txt + +# How to parse QoR info +qor_parse_file=qor_standard.txt + +# Pass requirements +pass_requirements_file=pass_requirements.txt + +script_params=-starting_stage vpr --max_router_iterations 500 --routing_failure_predictor off --router_high_fanout_threshold 1000 --constant_net_method route --route_chan_width 500 --router_heap bucket --clock_modeling route --place_delta_delay_matrix_calculation_method dijkstra --place_delay_model delta_override --router_lookahead extended_map --check_route quick --strict_checks off --allow_dangling_combinational_nodes on --disable_errors check_unbuffered_edges:check_route --congested_routing_iteration_threshold 0.8 --incremental_reroute_delay_ripup off --base_cost_type delay_normalized_length_bounded --bb_factor 10 --initial_pres_fac 4.0 --check_rr_graph off diff --git a/vtr_flow/tasks/regression_tests/vtr_reg_nightly/task_list.txt b/vtr_flow/tasks/regression_tests/vtr_reg_nightly/task_list.txt index d8770bc8c70..8bf2bc3db9e 100644 --- a/vtr_flow/tasks/regression_tests/vtr_reg_nightly/task_list.txt +++ b/vtr_flow/tasks/regression_tests/vtr_reg_nightly/task_list.txt @@ -14,3 +14,4 @@ regression_tests/vtr_reg_nightly/vpr_verify_rr_graph_titan regression_tests/vtr_reg_nightly/vpr_verify_rr_graph_error_check regression_tests/vtr_reg_nightly/vtr_timing_update_diff regression_tests/vtr_reg_nightly/vtr_timing_update_diff_titan +regression_tests/vtr_reg_nightly/symbiflow