diff --git a/dev/pylint_check.py b/dev/pylint_check.py index 6f97859e060..62dd9901e60 100755 --- a/dev/pylint_check.py +++ b/dev/pylint_check.py @@ -90,7 +90,7 @@ class TermColor: - """ Terminal codes for printing in color """ + """Terminal codes for printing in color""" # pylint: disable=too-few-public-methods @@ -105,14 +105,14 @@ class TermColor: def error(*msg, returncode=-1): - """ Print an error message and exit program """ + """Print an error message and exit program""" print(TermColor.RED + "ERROR:", " ".join(str(item) for item in msg), TermColor.END) sys.exit(returncode) def expand_paths(): - """ Build a list of all python files to process by going through 'paths_to_lint' """ + """Build a list of all python files to process by going through 'paths_to_lint'""" paths = [] for (path, is_recursive) in paths_to_lint: diff --git a/run_reg_test.py b/run_reg_test.py index eb98e275df9..7f82e636039 100755 --- a/run_reg_test.py +++ b/run_reg_test.py @@ -22,7 +22,7 @@ def vtr_command_argparser(prog=None): - """ Parses the arguments of run_reg_test """ + """Parses the arguments of run_reg_test""" description = textwrap.dedent( """ @@ -205,7 +205,7 @@ def vtr_command_main(arg_list, prog=None): def display_qor(reg_test): - """ Display the qor tests script files to be run outside of this script """ + """Display the qor tests script files to be run outside of this script""" test_dir = paths.regression_tests_path / reg_test if not (test_dir / "qor_geomean.txt").is_file(): print("QoR results do not exist ({}/qor_geomean.txt)".format(str(test_dir))) @@ -250,7 +250,7 @@ def display_qor(reg_test): def run_odin_test(args, test_name): - """ Run ODIN II test with given test name """ + """Run ODIN II test with given test name""" odin_reg_script = [ str(paths.odin_verify_path), "--clean", @@ -294,7 +294,7 @@ def run_odin_test(args, test_name): def collect_task_list(reg_test): - """ create a list of task files """ + """create a list of task files""" task_list_filepath = paths.tasks_path / "regression_tests" / reg_test / "task_list.txt" if not task_list_filepath.is_file(): raise IOError("Test does not exist: {}".format(reg_test)) @@ -316,7 +316,7 @@ def run_tasks(args, task_lists): def parse_single_test(task_lists, check=True, calculate=True, create=False): - """ parse the test results """ + """parse the test results""" vtr_task_cmd = ["-l"] + [task_lists] if check: vtr_task_cmd += ["-check_golden"] @@ -330,7 +330,7 @@ def parse_single_test(task_lists, check=True, calculate=True, create=False): def print_header(heading, divider="=", print_first_line=True): - """ Print heading formated in the center of two lines """ + """Print heading formated in the center of two lines""" if print_first_line: print(divider * len(heading) * 2) print(" " * int((len(heading) / 2)), end="") diff --git a/vtr_flow/scripts/flow_script_template.txt b/vtr_flow/scripts/flow_script_template.txt index 1cc16fc374f..1b4e779c9e5 100644 --- a/vtr_flow/scripts/flow_script_template.txt +++ b/vtr_flow/scripts/flow_script_template.txt @@ -15,7 +15,7 @@ VTR_MEMORY_ESTIMATE_HUMAN_READABLE="{human_readable_memory}" #The IO redirection occurs in a sub-shell, #so we need to exit it with the correct code - exit \$? + exit $? }} |& tee vtr_flow.out #End I/O redirection @@ -24,4 +24,4 @@ VTR_MEMORY_ESTIMATE_HUMAN_READABLE="{human_readable_memory}" #To get the correct exit status we need to exit with the #status of the first element in the pipeline (i.e. the real #command run above) -exit \${{PIPESTATUS[0]}} +exit ${{PIPESTATUS[0]}} diff --git a/vtr_flow/scripts/python_libs/vtr/log_parse.py b/vtr_flow/scripts/python_libs/vtr/log_parse.py index 77d1c277e9f..9e812c33562 100644 --- a/vtr_flow/scripts/python_libs/vtr/log_parse.py +++ b/vtr_flow/scripts/python_libs/vtr/log_parse.py @@ -18,7 +18,7 @@ class ParsePattern: - """ Pattern for parsing log files """ + """Pattern for parsing log files""" def __init__(self, name, filename, regex_str, default_value=None): self._name = name @@ -27,44 +27,44 @@ def __init__(self, name, filename, regex_str, default_value=None): self._default_value = default_value def name(self): - """ Return name of what is being parsed for """ + """Return name of what is being parsed for""" return self._name def filename(self): - """ Log filename to parse """ + """Log filename to parse""" return self._filename def regex(self): - """ Regex expression to use for parsing """ + """Regex expression to use for parsing""" return self._regex def default_value(self): - """ Return the default parse value """ + """Return the default parse value""" return self._default_value class PassRequirement(abc.ABC): - """ Used to check if a parsed value passes an expression """ + """Used to check if a parsed value passes an expression""" def __init__(self, metric): self._metric = metric self._type = type def metric(self): - """ Return pass matric """ + """Return pass matric""" return self._metric @abc.abstractmethod def type(self): - """ Return the type of requirement checking """ + """Return the type of requirement checking""" @abc.abstractmethod def check_passed(self, golden_value, check_value, check_string="golden value"): - """ Return whether the check passed """ + """Return whether the check passed""" class EqualPassRequirement(PassRequirement): - """ Used to check if parsed value is equal to golden value """ + """Used to check if parsed value is equal to golden value""" def type(self): return "Equal" @@ -82,7 +82,7 @@ def check_passed(self, golden_value, check_value, check_string="golden value"): class RangePassRequirement(PassRequirement): - """ Used to check if parsed value is within a range """ + """Used to check if parsed value is within a range""" def __init__(self, metric, min_value=None, max_value=None): super().__init__(metric) @@ -97,15 +97,15 @@ def type(self): return "Range" def min_value(self): - """ Get min value of golden range """ + """Get min value of golden range""" return self._min_value def max_value(self): - """ Get max value of golden range """ + """Get max value of golden range""" return self._max_value def check_passed(self, golden_value, check_value, check_string="golden value"): - """ Check if parsed value is within a range or equal to golden value """ + """Check if parsed value is within a range or equal to golden value""" if golden_value is None or check_value is None: if golden_value is None and check_value is None: @@ -165,7 +165,7 @@ def check_passed(self, golden_value, check_value, check_string="golden value"): class RangeAbsPassRequirement(PassRequirement): - """ Check if value is in some relative ratio range, or below some absolute value """ + """Check if value is in some relative ratio range, or below some absolute value""" def __init__(self, metric, min_value=None, max_value=None, abs_threshold=None): super().__init__(metric) @@ -178,19 +178,19 @@ def __init__(self, metric, min_value=None, max_value=None, abs_threshold=None): self._abs_threshold = abs_threshold def type(self): - """ Return requirement type """ + """Return requirement type""" return "Range" def min_value(self): - """ Return min value of ratio range """ + """Return min value of ratio range""" return self._min_value def max_value(self): - """ Return max value of ratio range """ + """Return max value of ratio range""" return self._max_value def abs_threshold(self): - """ Get absolute threshold """ + """Get absolute threshold""" return self._abs_threshold def check_passed(self, golden_value, check_value, check_string="golden value"): @@ -267,7 +267,7 @@ def check_passed(self, golden_value, check_value, check_string="golden value"): class ParseResults: - """ This class contains all parse results and metrics """ + """This class contains all parse results and metrics""" PRIMARY_KEYS = ("architecture", "circuit", "script_params") @@ -275,19 +275,19 @@ def __init__(self): self._metrics = OrderedDict() def add_result(self, arch, circuit, parse_result, script_param=None): - """ Add new parse result for given arch/circuit pair """ + """Add new parse result for given arch/circuit pair""" script_param = load_script_param(script_param) self._metrics[(arch, circuit, script_param)] = parse_result def metrics(self, arch, circuit, script_param=None): - """ Return individual metric based on the architechure, circuit and script""" + """Return individual metric based on the architechure, circuit and script""" script_param = load_script_param(script_param) if (arch, circuit, script_param) in self._metrics: return self._metrics[(arch, circuit, script_param)] return None def all_metrics(self): - """ Return all metric results """ + """Return all metric results""" return self._metrics diff --git a/vtr_flow/scripts/python_libs/vtr/parse_vtr_task.py b/vtr_flow/scripts/python_libs/vtr/parse_vtr_task.py index c4c1fde28c4..106be75c6c1 100755 --- a/vtr_flow/scripts/python_libs/vtr/parse_vtr_task.py +++ b/vtr_flow/scripts/python_libs/vtr/parse_vtr_task.py @@ -228,7 +228,7 @@ def parse_task(config, config_jobs, flow_metrics_basename=FIRST_PARSE_FILE): def parse_files(config_jobs, run_dir, flow_metrics_basename=FIRST_PARSE_FILE): - """ Parse the result files from the give jobs """ + """Parse the result files from the give jobs""" task_parse_results_filepath = str(PurePath(run_dir) / flow_metrics_basename) with open(task_parse_results_filepath, "w") as out_f: @@ -262,7 +262,7 @@ def parse_files(config_jobs, run_dir, flow_metrics_basename=FIRST_PARSE_FILE): def create_golden_results_for_tasks(configs): - """ Runs create_golden_results_for_task on all of the give configuration """ + """Runs create_golden_results_for_task on all of the give configuration""" for config in configs: create_golden_results_for_task(config) @@ -281,7 +281,7 @@ def create_golden_results_for_task(config): def check_golden_results_for_tasks(configs): - """ runs check_golden_results_for_task on all the input configurations """ + """runs check_golden_results_for_task on all the input configurations""" num_qor_failures = 0 print("\nCalculating QoR results...") @@ -347,7 +347,7 @@ def check_two_files( first_name="task", second_name="golden", ): - """ Compare two files results """ + """Compare two files results""" first_results = load_parse_results(first_results_filepath) second_results = load_parse_results(second_results_filepath) # Verify that the architecture and circuit are specified @@ -461,7 +461,7 @@ def check_two_files( def summarize_qor(configs): - """ Summarize the Qor results """ + """Summarize the Qor results""" first = True task_path = Path(configs[0].config_dir).parent @@ -483,7 +483,7 @@ def summarize_qor(configs): def calc_geomean(args, configs): - """ caclulate and ouput the geomean values to the geomean file """ + """caclulate and ouput the geomean values to the geomean file""" first = False task_path = Path(configs[0].config_dir).parent if len(configs) > 1 or (task_path.parent / "task_list.txt").is_file(): @@ -533,7 +533,7 @@ def calc_geomean(args, configs): def calculate_individual_geo_mean(lines, index, geo_mean, num): - """ Calculate an individual line of parse results goe_mean """ + """Calculate an individual line of parse results goe_mean""" previous_value = None for line in lines: line = line.split("\t")[4:] @@ -551,7 +551,7 @@ def calculate_individual_geo_mean(lines, index, geo_mean, num): def find_latest_run_dir(config): - """ Find the latest run directory for given configuration """ + """Find the latest run directory for given configuration""" task_dir = find_task_dir(config) run_dir = get_latest_run_dir(task_dir) diff --git a/vtr_flow/scripts/python_libs/vtr/task.py b/vtr_flow/scripts/python_libs/vtr/task.py index d99248af5c6..33c9d4ab41a 100644 --- a/vtr_flow/scripts/python_libs/vtr/task.py +++ b/vtr_flow/scripts/python_libs/vtr/task.py @@ -293,7 +293,7 @@ def find_longest_task_description(configs): # pylint: disable=too-many-branches -def create_jobs(args, configs, longest_name=0, longest_arch_circuit=0, after_run=False): +def create_jobs(args, configs, after_run=False): """ Create the jobs to be executed depending on the configs. """ @@ -409,8 +409,6 @@ def create_jobs(args, configs, longest_name=0, longest_arch_circuit=0, after_run qor_parse_command, work_dir, run_dir, - longest_name, - longest_arch_circuit, golden_results, ) ) @@ -428,8 +426,6 @@ def create_jobs(args, configs, longest_name=0, longest_arch_circuit=0, after_run qor_parse_command, work_dir, run_dir, - longest_name, - longest_arch_circuit, golden_results, ) ) @@ -449,8 +445,6 @@ def create_job( qor_parse_command, work_dir, run_dir, - longest_name, - longest_arch_circuit, golden_results, ): """ @@ -459,21 +453,7 @@ def create_job( param_string = "common" + (("_" + param.replace(" ", "_")) if param else "") if not param: param = "common" - # determine spacing for nice output - num_spaces_before = int((longest_name - len(config.task_name))) + 8 - num_spaces_after = int((longest_arch_circuit - len(work_dir + "/{}".format(param_string)))) - cmd += [ - "-name", - "{}:{}{}/{}{}".format( - config.task_name, - " " * num_spaces_before, - work_dir, - param_string, - " " * num_spaces_after, - ), - ] - - cmd += ["-temp_dir", run_dir + "/{}".format(param_string)] + expected_min_w = ret_expected_min_w(circuit, arch, golden_results, param) expected_min_w = ( int(expected_min_w * args.minw_hint_factor) @@ -514,6 +494,7 @@ def create_job( ] current_qor_parse_command.insert(0, run_dir + "/{}".format(load_script_param(param))) current_cmd = cmd.copy() + current_cmd += ["-temp_dir", run_dir + "/{}".format(param_string)] if param_string != "common": current_cmd += param.split(" ") return Job( diff --git a/vtr_flow/scripts/python_libs/vtr/util.py b/vtr_flow/scripts/python_libs/vtr/util.py index f23f14613e4..f53b5f07bf8 100644 --- a/vtr_flow/scripts/python_libs/vtr/util.py +++ b/vtr_flow/scripts/python_libs/vtr/util.py @@ -217,7 +217,7 @@ def check_cmd(command): def pretty_print_table(file, border=False): - """ Convert file to a pretty, easily read table """ + """Convert file to a pretty, easily read table""" table = PrettyTable() table.border = border reader = None diff --git a/vtr_flow/scripts/run_vtr_task.py b/vtr_flow/scripts/run_vtr_task.py index daead96e326..54a6a97792d 100755 --- a/vtr_flow/scripts/run_vtr_task.py +++ b/vtr_flow/scripts/run_vtr_task.py @@ -7,6 +7,7 @@ from pathlib import Path from pathlib import PurePath import sys +import os import argparse import textwrap import subprocess @@ -32,7 +33,6 @@ parse_tasks, find_task_dir, shorten_task_names, - find_longest_task_description, check_golden_results_for_tasks, create_golden_results_for_tasks, create_jobs, @@ -46,7 +46,7 @@ def vtr_command_argparser(prog=None): - """ Argument parse for run_vtr_task """ + """Argument parse for run_vtr_task""" description = textwrap.dedent( """ @@ -122,7 +122,10 @@ def vtr_command_argparser(prog=None): "-system", choices=["local", "scripts"], default="local", - help="What system to run the tasks on.", + help="""What system to run the tasks on: + (local) runs the flow invocations on the local machine, + (scripts) Prints out all the generated script files + (instead of calling them to run all the flow invocations).""", ) parser.add_argument( @@ -204,7 +207,7 @@ def vtr_command_argparser(prog=None): def vtr_command_main(arg_list, prog=None): - """ Run the vtr tasks given and the tasks in the lists given """ + """Run the vtr tasks given and the tasks in the lists given""" # Load the arguments args = vtr_command_argparser(prog).parse_args(arg_list) @@ -224,7 +227,6 @@ def vtr_command_main(arg_list, prog=None): config_files = [find_task_config_file(task_name) for task_name in task_names] configs = [] - longest_name = 0 # longest task name for use in creating prettier output common_task_prefix = None # common task prefix to shorten task names for config_file in config_files: config = load_task_config(config_file) @@ -236,14 +238,9 @@ def vtr_command_main(arg_list, prog=None): None, common_task_prefix, config.task_name ).find_longest_match(0, len(common_task_prefix), 0, len(config.task_name)) common_task_prefix = common_task_prefix[match.a : match.a + match.size] - if len(config.task_name) > longest_name: - longest_name = len(config.task_name) if args.short_task_names: configs = shorten_task_names(configs, common_task_prefix) - longest_arch_circuit = find_longest_task_description( - configs - ) # find longest task description for use in creating prettier output - num_failed = run_tasks(args, configs, longest_name, longest_arch_circuit) + num_failed = run_tasks(args, configs) except CommandError as exception: print("Error: {msg}".format(msg=exception.msg)) @@ -264,8 +261,6 @@ def vtr_command_main(arg_list, prog=None): def run_tasks( args, configs, - longest_name, - longest_arch_circuit, ): """ Runs the specified set of tasks (configs) @@ -273,7 +268,7 @@ def run_tasks( start = datetime.now() num_failed = 0 - jobs = create_jobs(args, configs, longest_name, longest_arch_circuit) + jobs = create_jobs(args, configs) run_dirs = {} for config in configs: @@ -307,10 +302,13 @@ def run_tasks( if args.calc_geomean: summarize_qor(configs) calc_geomean(args, configs) + # This option generates a shell script (vtr_flow.sh) for each architecture, + # circuit, script_params + # The generated can be used to be submitted on a large cluster elif args.system == "scripts": for _, value in run_dirs.items(): Path(value).mkdir(parents=True) - run_scripts = create_run_scripts(args, jobs, run_dirs) + run_scripts = create_run_scripts(jobs, run_dirs) for script in run_scripts: print(script) else: @@ -348,24 +346,26 @@ def run_parallel(args, queued_jobs, run_dirs): return num_failed -def create_run_scripts(args, jobs, run_dirs): - """ Create the bash script files for each job run """ +def create_run_scripts(jobs, run_dirs): + """Create the bash script files for each job run""" run_script_files = [] for job in jobs: - run_script_files += [create_run_script(args, job, job.work_dir(run_dirs[job.task_name()]))] + run_script_files += [create_run_script(job, job.work_dir(run_dirs[job.task_name()]))] return run_script_files -def create_run_script(args, job, work_dir): - """ Create the bash run script for a particular job """ +def create_run_script(job, work_dir): + """Create the bash run script for a particular job""" runtime_estimate = ret_expected_runtime(job, work_dir) memory_estimate = ret_expected_memory(job, work_dir) - if runtime_estimate < 0: - runtime_estimate = 0 - if memory_estimate < 0: - memory_estimate = 0 + runtime_estimate = max(runtime_estimate, 0) + memory_estimate = max(memory_estimate, 0) + + separator = " " + command_options_list = job.run_command() + command_options = separator.join(command_options_list) human_readable_runtime_est = format_human_readable_time(runtime_estimate) human_readable_memory_est = format_human_readable_memory(memory_estimate) @@ -382,34 +382,43 @@ def create_run_script(args, job, work_dir): estimated_memory=memory_estimate, human_readable_time=human_readable_runtime_est, human_readable_memory=human_readable_memory_est, - script=args.script, - command=job.run_command(), + script=str(paths.run_vtr_flow_path), + command=command_options, ), file=out_file, end="", ) + + os.system("chmod +x " + str(run_script_file)) return str(run_script_file) def ret_expected_runtime(job, work_dir): - """ Returns the expected run-time (in seconds) of the specified run, or -1 if unkown """ + """Returns the expected run-time (in seconds) of the specified run, or -1 if unkown""" seconds = -1 golden_results = load_parse_results( str(Path(work_dir).parent.parent.parent.parent / "config/golden_results.txt") ) + metrics = golden_results.metrics(job.arch(), job.circuit(), job.script_params()) + if metrics is None: + metrics = golden_results.metrics(job.arch(), job.circuit(), "common") + if "vtr_flow_elapsed_time" in metrics: seconds = float(metrics["vtr_flow_elapsed_time"]) return seconds def ret_expected_memory(job, work_dir): - """ Returns the expected memory usage (in bytes) of the specified run, or -1 if unkown """ + """Returns the expected memory usage (in bytes) of the specified run, or -1 if unkown""" memory_kib = -1 golden_results = load_parse_results( str(Path(work_dir).parent.parent.parent.parent / "config/golden_results.txt") ) metrics = golden_results.metrics(job.arch(), job.circuit(), job.script_params()) + if metrics is None: + metrics = golden_results.metrics(job.arch(), job.circuit(), "common") + for metric in ["max_odin_mem", "max_abc_mem", "max_ace_mem", "max_vpr_mem"]: if metric in metrics and int(metrics[metric]) > memory_kib: memory_kib = int(metrics[metric]) @@ -417,7 +426,7 @@ def ret_expected_memory(job, work_dir): def format_human_readable_time(seconds): - """ format the number of seconds given as a human readable value """ + """format the number of seconds given as a human readable value""" if seconds < 60: return "%.0f seconds" % seconds if seconds < 60 * 60: @@ -428,7 +437,7 @@ def format_human_readable_time(seconds): def format_human_readable_memory(num_bytes): - """ format the number of bytes given as a human readable value """ + """format the number of bytes given as a human readable value""" if num_bytes < 1024 ** 3: return "%.2f MiB" % (num_bytes / (1024 ** 2)) return "%.2f GiB" % (num_bytes / (1024 ** 3))