From 6776566ebbfb02bb7e51406ac1b7b3d94216c96f Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Tue, 1 Oct 2019 16:41:32 +0200 Subject: [PATCH 01/58] place: WIP added equivalent sites strong regression test Signed-off-by: Alessandro Comodi --- vtr_flow/arch/equivalent_sites/slice.xml | 1831 +++++++++++++++++ .../microbenchmarks/carry_chain.blif | 94 +- vtr_flow/scripts/upgrade_arch.py | 42 +- .../strong_equivalent_sites/config/config.txt | 28 + 4 files changed, 1902 insertions(+), 93 deletions(-) create mode 100644 vtr_flow/arch/equivalent_sites/slice.xml create mode 100644 vtr_flow/tasks/regression_tests/vtr_reg_strong/strong_equivalent_sites/config/config.txt diff --git a/vtr_flow/arch/equivalent_sites/slice.xml b/vtr_flow/arch/equivalent_sites/slice.xml new file mode 100644 index 00000000000..43726175057 --- /dev/null +++ b/vtr_flow/arch/equivalent_sites/slice.xml @@ -0,0 +1,1831 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + io_tile.in io_tile.out + io_tile.in io_tile.out + io_tile.in io_tile.out + io_tile.in io_tile.out + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 0.068e-9 + 0.068e-9 + 0.068e-9 + 0.068e-9 + 0.068e-9 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 0.068e-9 + 0.068e-9 + 0.068e-9 + 0.068e-9 + 0.068e-9 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 0.068e-9 + 0.068e-9 + 0.068e-9 + 0.068e-9 + 0.068e-9 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 0.068e-9 + 0.068e-9 + 0.068e-9 + 0.068e-9 + 0.068e-9 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 0.068e-9 + 0.068e-9 + 0.068e-9 + 0.068e-9 + 0.068e-9 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 0.068e-9 + 0.068e-9 + 0.068e-9 + 0.068e-9 + 0.068e-9 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 0.068e-9 + 0.068e-9 + 0.068e-9 + 0.068e-9 + 0.068e-9 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 0.068e-9 + 0.068e-9 + 0.068e-9 + 0.068e-9 + 0.068e-9 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 0.068e-9 + 0.068e-9 + 0.068e-9 + 0.068e-9 + 0.068e-9 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 0.068e-9 + 0.068e-9 + 0.068e-9 + 0.068e-9 + 0.068e-9 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 0.068e-9 + 0.068e-9 + 0.068e-9 + 0.068e-9 + 0.068e-9 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1 1 1 1 1 1 1 1 1 1 1 1 1 + 1 1 1 1 1 1 1 1 1 1 1 1 + + + diff --git a/vtr_flow/benchmarks/microbenchmarks/carry_chain.blif b/vtr_flow/benchmarks/microbenchmarks/carry_chain.blif index 7adc6d29f1f..0918a3e0110 100644 --- a/vtr_flow/benchmarks/microbenchmarks/carry_chain.blif +++ b/vtr_flow/benchmarks/microbenchmarks/carry_chain.blif @@ -2,14 +2,7 @@ .inputs \ clk .outputs \ - out[0] \ - out[1] \ - out[2] \ - out[3] \ - out[4] \ - out[5] \ - out[6] \ - out[7] + out .names $false @@ -126,52 +119,11 @@ .subckt CARRY \ CI=$auto$alumacc.cc:474:replace_alu$26.C[15] \ - S=counter0[15] \ + S=out \ CO_CHAIN=$auto$alumacc.cc:474:replace_alu$26.C[16] \ CO_FABRIC=__vpr__unconn6 \ O=$0\counter0[21:0][15] -.subckt CARRY0 \ - CI=$auto$alumacc.cc:474:replace_alu$26.C[16] \ - S=counter0[16] \ - CO_CHAIN=$auto$alumacc.cc:474:replace_alu$26.C[17] \ - CO_FABRIC=__vpr__unconn7 \ - O=$0\counter0[21:0][16] - -.subckt CARRY \ - CI=$auto$alumacc.cc:474:replace_alu$26.C[17] \ - S=counter0[17] \ - CO_CHAIN=$auto$alumacc.cc:474:replace_alu$26.C[18] \ - CO_FABRIC=__vpr__unconn8 \ - O=$0\counter0[21:0][17] - -.subckt CARRY \ - CI=$auto$alumacc.cc:474:replace_alu$26.C[18] \ - S=counter0[18] \ - CO_CHAIN=$auto$alumacc.cc:474:replace_alu$26.C[19] \ - CO_FABRIC=__vpr__unconn9 \ - O=$0\counter0[21:0][18] - -.subckt CARRY \ - CI=$auto$alumacc.cc:474:replace_alu$26.C[19] \ - S=counter0[19] \ - CO_CHAIN=$auto$alumacc.cc:474:replace_alu$26.C[20] \ - CO_FABRIC=__vpr__unconn10 \ - O=$0\counter0[21:0][19] - -.subckt CARRY0 \ - CI=$auto$alumacc.cc:474:replace_alu$26.C[20] \ - S=counter0[20] \ - CO_CHAIN=$auto$alumacc.cc:474:replace_alu$26.C[21] \ - CO_FABRIC=__vpr__unconn12 \ - O=$0\counter0[21:0][20] - -.subckt CARRY \ - CI=$auto$alumacc.cc:474:replace_alu$26.C[21] \ - S=out[0] \ - CO_CHAIN=__vpr__unconn13 \ - O=$0\counter0[21:0][21] - .subckt FDRE \ CE=$true \ D=$0\counter0[21:0][0] \ @@ -283,45 +235,3 @@ R=$false \ Q=counter0[15] \ C=clk - -.subckt FDRE \ - CE=$true \ - D=$0\counter0[21:0][16] \ - R=$false \ - Q=counter0[16] \ - C=clk - -.subckt FDRE \ - CE=$true \ - D=$0\counter0[21:0][17] \ - R=$false \ - Q=counter0[17] \ - C=clk - -.subckt FDRE \ - CE=$true \ - D=$0\counter0[21:0][18] \ - R=$false \ - Q=counter0[18] \ - C=clk - -.subckt FDRE \ - CE=$true \ - D=$0\counter0[21:0][19] \ - R=$false \ - Q=counter0[19] \ - C=clk - -.subckt FDRE \ - CE=$true \ - D=$0\counter0[21:0][20] \ - R=$false \ - Q=counter0[20] \ - C=clk - -.subckt FDRE \ - CE=$true \ - D=$0\counter0[21:0][21] \ - R=$false \ - Q=out[0] \ - C=clk diff --git a/vtr_flow/scripts/upgrade_arch.py b/vtr_flow/scripts/upgrade_arch.py index 2ea9f6510aa..336125d3c84 100755 --- a/vtr_flow/scripts/upgrade_arch.py +++ b/vtr_flow/scripts/upgrade_arch.py @@ -41,6 +41,7 @@ def __init__(self): "upgrade_complex_sb_num_conns", "add_missing_comb_model_internal_timing_edges", "add_tile_tags", + "add_site_directs", ] def parse_args(): @@ -144,6 +145,11 @@ def main(): if result: modified = True + if "add_site_directs" in args.features: + result = add_site_directs(arch) + if result: + modified = True + if modified: if args.debug: root.write(sys.stdout, pretty_print=args.pretty) @@ -932,7 +938,7 @@ def swap_tags(tile, pb_type): if arch.findall('./tiles'): - return False + return False models = arch.find('./models') @@ -966,6 +972,40 @@ def swap_tags(tile, pb_type): return True +def add_site_directs(arch): + TAGS_TO_COPY = ['input', 'output', 'clock'] + + def add_directs(equivalent_site, pb_type): + for child in pb_type: + if child.tag in TAGS_TO_COPY: + tile_name = equivalent_site.attrib['pb_type'] + port = child.attrib['name'] + + from_to = "%s.%s" % (tile_name, port) + + direct = ET.Element("direct") + direct.set("from", from_to) + direct.set("to", from_to) + equivalent_site.append(direct) + + if arch.findall('./tiles/tile/equivalent_sites/site/direct'): + return False + + top_pb_types = [] + for pb_type in arch.iter('pb_type'): + if pb_type.getparent().tag == 'complexblocklist': + top_pb_types.append(pb_type) + + sites = [] + for pb_type in arch.iter('site'): + sites.append(pb_type) + + for pb_type in top_pb_types: + for site in sites: + if pb_type.attrib['name'] == site.attrib['pb_type']: + add_directs(site, pb_type) + + return True if __name__ == "__main__": main() diff --git a/vtr_flow/tasks/regression_tests/vtr_reg_strong/strong_equivalent_sites/config/config.txt b/vtr_flow/tasks/regression_tests/vtr_reg_strong/strong_equivalent_sites/config/config.txt new file mode 100644 index 00000000000..ce9abe5c381 --- /dev/null +++ b/vtr_flow/tasks/regression_tests/vtr_reg_strong/strong_equivalent_sites/config/config.txt @@ -0,0 +1,28 @@ +############################################## +# Configuration file for running experiments +############################################## + +# Path to directory of circuits to use +circuits_dir=benchmarks/microbenchmarks + +# Path to directory of architectures to use +archs_dir=arch/equivalent_sites + +# Add circuits to list to sweep +circuit_list_add=carry_chain.blif + +# Add architectures to list to sweep +arch_list_add=slice.xml + +# Parse info and how to parse +parse_file=vpr_standard.txt + +# How to parse QoR info +qor_parse_file=qor_standard.txt + +# Pass requirements +pass_requirements_file=pass_requirements.txt + +# Script parameters +#script_params="" +script_params = -track_memory_usage -lut_size 1 -starting_stage vpr From 8d463e3d31fb8e06d771fea7715549c6b2c7799f Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Tue, 1 Oct 2019 16:41:51 +0200 Subject: [PATCH 02/58] place: added equivalent sites placement capability Signed-off-by: Alessandro Comodi --- libs/libarchfpga/src/arch_util.cpp | 4 - libs/libarchfpga/src/physical_types.h | 61 +++-- libs/libarchfpga/src/read_xml_arch_file.cpp | 165 ++++++++++---- utils/fasm/src/fasm.cpp | 10 +- utils/fasm/src/fasm.h | 3 +- utils/route_diag/src/main.cpp | 4 +- vpr/src/base/SetupGrid.cpp | 13 +- vpr/src/base/SetupVPR.cpp | 32 ++- vpr/src/base/ShowSetup.cpp | 3 +- vpr/src/base/check_netlist.cpp | 7 +- vpr/src/base/clock_modeling.cpp | 2 +- vpr/src/base/clustered_netlist.cpp | 14 +- vpr/src/base/clustered_netlist.h | 14 +- vpr/src/base/clustered_netlist_utils.cpp | 4 +- vpr/src/base/device_grid.cpp | 4 +- vpr/src/base/device_grid.h | 2 +- vpr/src/base/read_netlist.cpp | 7 +- vpr/src/base/read_place.cpp | 6 +- vpr/src/base/stats.cpp | 2 +- vpr/src/base/vpr_api.cpp | 18 +- vpr/src/base/vpr_context.h | 5 +- vpr/src/draw/draw.cpp | 22 +- vpr/src/draw/intra_logic_block.cpp | 6 +- vpr/src/pack/cluster.cpp | 23 +- vpr/src/pack/cluster_placement.cpp | 2 +- vpr/src/pack/lb_type_rr_graph.cpp | 6 +- vpr/src/pack/output_clustering.cpp | 3 +- vpr/src/pack/pack.cpp | 19 +- vpr/src/pack/pack_report.cpp | 18 +- vpr/src/pack/pb_type_graph.cpp | 2 +- vpr/src/place/move_utils.cpp | 41 ++-- vpr/src/place/place.cpp | 24 +- vpr/src/place/place_macro.cpp | 2 +- vpr/src/place/timing_place_lookup.cpp | 18 +- vpr/src/place/uniform_move_generator.cpp | 7 +- vpr/src/power/power.cpp | 2 +- vpr/src/route/route_common.cpp | 12 +- vpr/src/route/rr_graph2.cpp | 16 +- vpr/src/util/vpr_utils.cpp | 240 ++++++++++++++------ vpr/src/util/vpr_utils.h | 21 +- 40 files changed, 570 insertions(+), 294 deletions(-) diff --git a/libs/libarchfpga/src/arch_util.cpp b/libs/libarchfpga/src/arch_util.cpp index b16b5f9de83..27780c42ab4 100644 --- a/libs/libarchfpga/src/arch_util.cpp +++ b/libs/libarchfpga/src/arch_util.cpp @@ -252,10 +252,6 @@ void free_type_descriptors(std::vector& type_descriptors) vtr::free(type.is_pin_global); vtr::free(type.pin_class); - for (auto equivalent_site : type.equivalent_sites) { - vtr::free(equivalent_site.pb_type_name); - } - for (auto port : type.ports) { vtr::free(port.name); } diff --git a/libs/libarchfpga/src/physical_types.h b/libs/libarchfpga/src/physical_types.h index 7743a395220..1424eb646f3 100644 --- a/libs/libarchfpga/src/physical_types.h +++ b/libs/libarchfpga/src/physical_types.h @@ -55,7 +55,9 @@ struct t_port_power; struct t_physical_tile_port; struct t_equivalent_site; struct t_physical_tile_type; +typedef const t_physical_tile_type* t_physical_tile_type_ptr; struct t_logical_block_type; +typedef const t_logical_block_type* t_logical_block_type_ptr; struct t_pb_type; struct t_pb_graph_pin_power; struct t_mode; @@ -521,27 +523,6 @@ enum class e_sb_type { constexpr int NO_SWITCH = -1; constexpr int DEFAULT_SWITCH = -2; -/* Describes the type for a logical block - * name: unique identifier for type - * pb_type: Internal subblocks and routing information for this physical block - * pb_graph_head: Head of DAG of pb_types_nodes and their edges - * - * index: Keep track of type in array for easy access - * physical_tile_index: index of the corresponding physical tile type - */ -struct t_logical_block_type { - char* name = nullptr; - - /* Clustering info */ - t_pb_type* pb_type = nullptr; - t_pb_graph_node* pb_graph_head = nullptr; - - int index = -1; /* index of type descriptor in array (allows for index referencing) */ - - int physical_tile_index = -1; /* index of the corresponding physical tile type */ -}; -typedef const t_logical_block_type* t_logical_block_type_ptr; - /* Describes the type for a physical tile * name: unique identifier for type * num_pins: Number of pins for the block @@ -626,14 +607,13 @@ struct t_physical_tile_type { int index = -1; /* index of type descriptor in array (allows for index referencing) */ - int logical_block_index = -1; /* index of the corresponding logical block type */ - - std::vector equivalent_sites; + std::vector equivalent_sites_names; + std::vector equivalent_sites; + std::unordered_map> tile_block_pin_directs_map; /* Returns the indices of pins that contain a clock for this physical logic block */ std::vector get_clock_pins_indices() const; }; -typedef const t_physical_tile_type* t_physical_tile_type_ptr; /** Describes I/O and clock ports of a physical tile type * @@ -668,17 +648,24 @@ struct t_physical_tile_port { int tile_type_index; }; -/** Describes the equivalent sites related to a specific tile type - * - * It corresponds to the tags in the FPGA architecture description +/* Describes the type for a logical block + * name: unique identifier for type + * pb_type: Internal subblocks and routing information for this physical block + * pb_graph_head: Head of DAG of pb_types_nodes and their edges * + * index: Keep track of type in array for easy access + * physical_tile_index: index of the corresponding physical tile type */ -struct t_equivalent_site { - char* pb_type_name; +struct t_logical_block_type { + char* name = nullptr; + + /* Clustering info */ + t_pb_type* pb_type = nullptr; + t_pb_graph_node* pb_graph_head = nullptr; - // XXX Variables to hold information on mapping between site and tile - // XXX as well as references to the belonging pb_type and tile_type - //t_logical_block_type* block_type; + int index = -1; /* index of type descriptor in array (allows for index referencing) */ + + std::vector equivalent_tiles; }; /************************************************************************************************* @@ -726,8 +713,9 @@ struct t_equivalent_site { * modes: Different modes accepted * ports: I/O and clock ports * num_clock_pins: A count of the total number of clock pins - * int num_input_pins: A count of the total number of input pins - * int num_output_pins: A count of the total number of output pins + * num_input_pins: A count of the total number of input pins + * num_output_pins: A count of the total number of output pins + * num_pins: A count of the total number of pins * timing: Timing matrix of block [0..num_inputs-1][0..num_outputs-1] * parent_mode: mode of the parent block * t_mode_power: ??? @@ -749,6 +737,8 @@ struct t_pb_type { int num_input_pins = 0; /* inputs not including clock pins */ int num_output_pins = 0; + int num_pins = 0; + t_mode* parent_mode = nullptr; int depth = 0; /* depth of pb_type */ @@ -861,6 +851,7 @@ struct t_port { int index; int port_index_by_type; + int absolute_first_pin_index; t_port_power* port_power; }; diff --git a/libs/libarchfpga/src/read_xml_arch_file.cpp b/libs/libarchfpga/src/read_xml_arch_file.cpp index c060945ecc0..20280bed7fd 100644 --- a/libs/libarchfpga/src/read_xml_arch_file.cpp +++ b/libs/libarchfpga/src/read_xml_arch_file.cpp @@ -84,14 +84,16 @@ static void SetupPinLocationsAndPinClasses(pugi::xml_node Locations, static void LoadPinLoc(pugi::xml_node Locations, t_physical_tile_type* type, const pugiutil::loc_data& loc_data); -static std::pair ProcessCustomPinLoc(pugi::xml_node Locations, - t_physical_tile_type_ptr type, - const char* pin_loc_string, - const pugiutil::loc_data& loc_data); +template +static std::pair ProcessPinString(pugi::xml_node Locations, + T type, + const char* pin_loc_string, + const pugiutil::loc_data& loc_data); /* Process XML hierarchy */ static void ProcessTiles(pugi::xml_node Node, std::vector& PhysicalTileTypes, + std::vector& LogicalBlockTypes, const t_default_fc_spec& arch_def_fc, t_arch& arch, const pugiutil::loc_data& loc_data); @@ -106,7 +108,13 @@ static void ProcessTilePort(pugi::xml_node Node, const pugiutil::loc_data& loc_data); static void ProcessTileEquivalentSites(pugi::xml_node Parent, t_physical_tile_type* PhysicalTileType, + std::vector& LogicalBlockTypes, const pugiutil::loc_data& loc_data); +static void ProcessEquivalentSiteDirects(pugi::xml_node Parent, + t_physical_tile_type* PhysicalTileType, + t_logical_block_type* LogicalBlockType, + std::string site_name, + const pugiutil::loc_data& loc_data); static void ProcessPb_Type(pugi::xml_node Parent, t_pb_type* pb_type, t_mode* mode, @@ -215,6 +223,10 @@ static void link_physical_logical_types(std::vector& Physi static void check_port_equivalence(t_physical_tile_type& physical_tile, t_logical_block_type& logical_block); static const t_physical_tile_port* get_port_by_name(t_physical_tile_type_ptr type, const char* port_name); +static const t_port* get_port_by_name(t_logical_block_type_ptr type, const char* port_name); + +template +static T get_type_by_name(const char* type_name, std::vector& types); /* * @@ -298,14 +310,14 @@ void XmlReadArch(const char* ArchFile, ProcessSwitchblocks(Next, arch, loc_data); } - /* Process logical block types */ - Next = get_single_child(architecture, "tiles", loc_data); - ProcessTiles(Next, PhysicalTileTypes, arch_def_fc, *arch, loc_data); - /* Process logical block types */ Next = get_single_child(architecture, "complexblocklist", loc_data); ProcessComplexBlocks(Next, LogicalBlockTypes, *arch, timing_enabled, loc_data); + /* Process logical block types */ + Next = get_single_child(architecture, "tiles", loc_data); + ProcessTiles(Next, PhysicalTileTypes, LogicalBlockTypes, arch_def_fc, *arch, loc_data); + /* Link Physical Tiles with Logical Blocks */ link_physical_logical_types(PhysicalTileTypes, LogicalBlockTypes); @@ -796,10 +808,10 @@ static void LoadPinLoc(pugi::xml_node Locations, for (int height = 0; height < type->height; ++height) { for (e_side side : {TOP, RIGHT, BOTTOM, LEFT}) { for (int pin = 0; pin < type->num_pin_loc_assignments[width][height][side]; ++pin) { - auto pin_range = ProcessCustomPinLoc(Locations, - type, - type->pin_loc_assignments[width][height][side][pin], - loc_data); + auto pin_range = ProcessPinString(Locations, + type, + type->pin_loc_assignments[width][height][side][pin], + loc_data); for (int pin_num = pin_range.first; pin_num < pin_range.second; ++pin_num) { VTR_ASSERT(pin_num < type->num_pins / type->capacity); @@ -827,10 +839,11 @@ static void LoadPinLoc(pugi::xml_node Locations, } } -static std::pair ProcessCustomPinLoc(pugi::xml_node Locations, - t_physical_tile_type_ptr type, - const char* pin_loc_string, - const pugiutil::loc_data& loc_data) { +template +static std::pair ProcessPinString(pugi::xml_node Locations, + T type, + const char* pin_loc_string, + const pugiutil::loc_data& loc_data) { int num_tokens; auto tokens = GetTokensFromString(pin_loc_string, &num_tokens); @@ -1393,6 +1406,8 @@ static void ProcessPb_Type(pugi::xml_node Parent, t_pb_type* pb_type, t_mode* mo /* process ports */ j = 0; + int absolute_port_first_pin_index = 0; + for (i = 0; i < 3; i++) { if (i == 0) { k = 0; @@ -1411,6 +1426,9 @@ static void ProcessPb_Type(pugi::xml_node Parent, t_pb_type* pb_type, t_mode* mo ProcessPb_TypePort(Cur, &pb_type->ports[j], pb_type->pb_type_power->estimation_method, is_root_pb_type, loc_data); + pb_type->ports[j].absolute_first_pin_index = absolute_port_first_pin_index; + absolute_port_first_pin_index += pb_type->ports[j].num_pins; + //Check port name duplicates ret_pb_ports = pb_port_names.insert(std::pair(pb_type->ports[j].name, 0)); if (!ret_pb_ports.second) { @@ -1443,6 +1461,8 @@ static void ProcessPb_Type(pugi::xml_node Parent, t_pb_type* pb_type, t_mode* mo } } + pb_type->num_pins = pb_type->num_input_pins + pb_type->num_output_pins + pb_type->num_clock_pins; + //Warn that max_internal_delay is no longer supported //TODO: eventually remove try { @@ -2929,6 +2949,7 @@ static void ProcessChanWidthDistrDir(pugi::xml_node Node, t_chan* chan, const pu static void ProcessTiles(pugi::xml_node Node, std::vector& PhysicalTileTypes, + std::vector& LogicalBlockTypes, const t_default_fc_spec& arch_def_fc, t_arch& arch, const pugiutil::loc_data& loc_data) { @@ -2941,7 +2962,6 @@ static void ProcessTiles(pugi::xml_node Node, */ t_physical_tile_type EMPTY_PHYSICAL_TILE_TYPE = SetupEmptyPhysicalType(); EMPTY_PHYSICAL_TILE_TYPE.index = 0; - EMPTY_PHYSICAL_TILE_TYPE.logical_block_index = 0; PhysicalTileTypes.push_back(EMPTY_PHYSICAL_TILE_TYPE); /* Process the types */ @@ -3000,7 +3020,7 @@ static void ProcessTiles(pugi::xml_node Node, //Load equivalent sites infromation Cur = get_single_child(CurTileType, "equivalent_sites", loc_data, ReqOpt::REQUIRED); - ProcessTileEquivalentSites(Cur, &PhysicalTileType, loc_data); + ProcessTileEquivalentSites(Cur, &PhysicalTileType, LogicalBlockTypes, loc_data); PhysicalTileType.index = index; @@ -3179,33 +3199,78 @@ static void ProcessTilePort(pugi::xml_node Node, static void ProcessTileEquivalentSites(pugi::xml_node Parent, t_physical_tile_type* PhysicalTileType, + std::vector& LogicalBlockTypes, const pugiutil::loc_data& loc_data) { pugi::xml_node CurSite; expect_only_children(Parent, {"site"}, loc_data); - if (count_children(Parent, "site", loc_data) != 1) { + if (count_children(Parent, "site", loc_data) < 1) { archfpga_throw(loc_data.filename_c_str(), loc_data.line(Parent), - "Zero or more than one sites corresponding to a tile.\n"); + "There are no sites corresponding to this tile: %s.\n", PhysicalTileType->name); } CurSite = Parent.first_child(); while (CurSite) { check_node(CurSite, "site", loc_data); - t_equivalent_site equivalent_site; - expect_only_attributes(CurSite, {"pb_type"}, loc_data); /* Load equivalent site name */ - auto Prop = get_attribute(CurSite, "pb_type", loc_data).value(); - equivalent_site.pb_type_name = vtr::strdup(Prop); + auto Prop = std::string(get_attribute(CurSite, "pb_type", loc_data).value()); + PhysicalTileType->equivalent_sites_names.push_back(Prop); + + auto LogicalBlockType = get_type_by_name(Prop.c_str(), LogicalBlockTypes); - PhysicalTileType->equivalent_sites.push_back(equivalent_site); + ProcessEquivalentSiteDirects(CurSite, PhysicalTileType, &LogicalBlockType, Prop, loc_data); CurSite = CurSite.next_sibling(CurSite.name()); } } +static void ProcessEquivalentSiteDirects(pugi::xml_node Parent, + t_physical_tile_type* PhysicalTileType, + t_logical_block_type* LogicalBlockType, + std::string site_name, + const pugiutil::loc_data& loc_data) { + pugi::xml_node CurDirect; + + expect_only_children(Parent, {"direct"}, loc_data); + + if (count_children(Parent, "direct", loc_data) < 1) { + archfpga_throw(loc_data.filename_c_str(), loc_data.line(Parent), + "There are no direct pin mappings between site %s and tile %s.\n", site_name, PhysicalTileType->name); + } + + std::unordered_map directs_map; + + CurDirect = Parent.first_child(); + while (CurDirect) { + check_node(CurDirect, "direct", loc_data); + + expect_only_attributes(CurDirect, {"from", "to"}, loc_data); + + std::string from, to; + from = std::string(get_attribute(CurDirect, "from", loc_data).value()); + to = std::string(get_attribute(CurDirect, "to", loc_data).value()); + + // XXX + auto from_pins = ProcessPinString(CurDirect, PhysicalTileType, from.c_str(), loc_data); + auto to_pins = ProcessPinString(CurDirect, LogicalBlockType, to.c_str(), loc_data); + + // Checking that the number of pins is exactly the same + VTR_ASSERT(from_pins.second - from_pins.first == to_pins.second - to_pins.first); + + int num_pins = from_pins.second - from_pins.first; + for (int i = 0; i < num_pins; i++) { + directs_map[to_pins.first + i] = from_pins.first + i; + } + + CurDirect = CurDirect.next_sibling(CurDirect.name()); + } + + PhysicalTileType->tile_block_pin_directs_map[LogicalBlockType->index] = directs_map; +} + /* Takes in node pointing to and loads all the * child type objects. */ static void ProcessComplexBlocks(pugi::xml_node Node, @@ -3222,7 +3287,6 @@ static void ProcessComplexBlocks(pugi::xml_node Node, */ t_logical_block_type EMPTY_LOGICAL_BLOCK_TYPE = SetupEmptyLogicalType(); EMPTY_LOGICAL_BLOCK_TYPE.index = 0; - EMPTY_LOGICAL_BLOCK_TYPE.physical_tile_index = 0; LogicalBlockTypes.push_back(EMPTY_LOGICAL_BLOCK_TYPE); /* Process the types */ @@ -4676,32 +4740,32 @@ e_side string_to_side(std::string side_str) { static void link_physical_logical_types(std::vector& PhysicalTileTypes, std::vector& LogicalBlockTypes) { - std::map check_equivalence; for (auto& physical_tile : PhysicalTileTypes) { if (physical_tile.index == EMPTY_TYPE_INDEX) continue; - for (auto& equivalent_site : physical_tile.equivalent_sites) { + unsigned int logical_block_added = 0; + for (auto& equivalent_site_name : physical_tile.equivalent_sites_names) { for (auto& logical_block : LogicalBlockTypes) { if (logical_block.index == EMPTY_TYPE_INDEX) continue; // Check the corresponding Logical Block - if (0 == strcmp(logical_block.pb_type->name, equivalent_site.pb_type_name)) { - physical_tile.logical_block_index = logical_block.index; - logical_block.physical_tile_index = physical_tile.index; - - auto result = check_equivalence.emplace(&physical_tile, &logical_block); - if (!result.second) { - archfpga_throw(__FILE__, __LINE__, - "Logical and Physical types do not have a one to one mapping\n"); - } + if (0 == strcmp(logical_block.pb_type->name, equivalent_site_name.c_str())) { + physical_tile.equivalent_sites.push_back(&logical_block); + logical_block.equivalent_tiles.push_back(&physical_tile); - check_port_equivalence(physical_tile, logical_block); + // TODO: Add check direct interconnect between site and tile add also pin mapping of integers + logical_block_added++; break; } } } + + if (logical_block_added != physical_tile.equivalent_sites.size()) { + archfpga_throw(__FILE__, __LINE__, + "Could not create link between the %s and all its equivalent sites.\n", physical_tile.name); + } } } @@ -4738,3 +4802,28 @@ static const t_physical_tile_port* get_port_by_name(t_physical_tile_type_ptr typ return nullptr; } + +static const t_port* get_port_by_name(t_logical_block_type_ptr type, const char* port_name) { + auto pb_type = type->pb_type; + + for (int i = 0; i < pb_type->num_ports; i++) { + auto port = pb_type->ports[i]; + if (0 == strcmp(port.name, port_name)) { + return &pb_type->ports[port.index]; + } + } + + return nullptr; +} + +template +static T get_type_by_name(const char* type_name, std::vector& types) { + for (auto type : types) { + if (0 == strcmp(type.name, type_name)) { + return type; + } + } + + archfpga_throw(__FILE__, __LINE__, + "Could not find type: %s\n", type_name); +} diff --git a/utils/fasm/src/fasm.cpp b/utils/fasm/src/fasm.cpp index 361abdb1a4b..64607f6a6b8 100644 --- a/utils/fasm/src/fasm.cpp +++ b/utils/fasm/src/fasm.cpp @@ -42,6 +42,7 @@ void FasmWriterVisitor::visit_top_impl(const char* top_level_name) { void FasmWriterVisitor::visit_clb_impl(ClusterBlockId blk_id, const t_pb* clb) { auto& place_ctx = g_vpr_ctx.placement(); auto& device_ctx = g_vpr_ctx.device(); + auto& cluster_ctx = g_vpr_ctx.clustering(); current_blk_id_ = blk_id; @@ -54,7 +55,8 @@ void FasmWriterVisitor::visit_clb_impl(ClusterBlockId blk_id, const t_pb* clb) { int y = place_ctx.block_locs[blk_id].loc.y; int z = place_ctx.block_locs[blk_id].loc.z; auto &grid_loc = device_ctx.grid[x][y]; - blk_type_ = grid_loc.type; + physical_tile_ = grid_loc.type; + logical_block_ = cluster_ctx.clb_nlist.block_type(blk_id); blk_prefix_ = ""; clb_prefix_ = ""; @@ -94,11 +96,11 @@ void FasmWriterVisitor::visit_clb_impl(ClusterBlockId blk_id, const t_pb* clb) { VTR_ASSERT(value != nullptr); std::string prefix_unsplit = value->front().as_string(); std::vector fasm_prefixes = vtr::split(prefix_unsplit, " \t\n"); - if(fasm_prefixes.size() != static_cast(blk_type_->capacity)) { + if(fasm_prefixes.size() != static_cast(physical_tile_->capacity)) { vpr_throw(VPR_ERROR_OTHER, __FILE__, __LINE__, "number of fasm_prefix (%s) options (%d) for block (%s) must match capacity(%d)", - prefix_unsplit.c_str(), fasm_prefixes.size(), blk_type_->name, blk_type_->capacity); + prefix_unsplit.c_str(), fasm_prefixes.size(), physical_tile_->name, physical_tile_->capacity); } grid_prefix = fasm_prefixes[z]; blk_prefix_ = grid_prefix + "."; @@ -122,7 +124,7 @@ void FasmWriterVisitor::check_interconnect(const t_pb_routes &pb_routes, int ino return; } - t_pb_graph_pin *prev_pin = pb_graph_pin_lookup_from_index_by_type_.at(blk_type_->index)[prev_node]; + t_pb_graph_pin *prev_pin = pb_graph_pin_lookup_from_index_by_type_.at(logical_block_->index)[prev_node]; int prev_edge; for(prev_edge = 0; prev_edge < prev_pin->num_output_edges; prev_edge++) { diff --git a/utils/fasm/src/fasm.h b/utils/fasm/src/fasm.h index 28ab1c79d7f..892dc6a83d7 100644 --- a/utils/fasm/src/fasm.h +++ b/utils/fasm/src/fasm.h @@ -86,7 +86,8 @@ class FasmWriterVisitor : public NetlistVisitor { t_pb_graph_node *root_clb_; bool current_blk_has_prefix_; - t_physical_tile_type_ptr blk_type_; + t_physical_tile_type_ptr physical_tile_; + t_logical_block_type_ptr logical_block_; std::string blk_prefix_; std::string clb_prefix_; std::map clb_prefix_map_; diff --git a/utils/route_diag/src/main.cpp b/utils/route_diag/src/main.cpp index 0c8e095cd52..ee1c467d17c 100644 --- a/utils/route_diag/src/main.cpp +++ b/utils/route_diag/src/main.cpp @@ -154,7 +154,7 @@ static void profile_source(int source_rr_node, for (int sink_x = start_x; sink_x <= end_x; sink_x++) { for (int sink_y = start_y; sink_y <= end_y; sink_y++) { - if(device_ctx.grid[sink_x][sink_y].type == device_ctx.EMPTY_TYPE) { + if(device_ctx.grid[sink_x][sink_y].type == device_ctx.EMPTY_PHYSICAL_TILE_TYPE) { continue; } @@ -220,7 +220,7 @@ static t_chan_width setup_chan_width(t_router_opts router_opts, if (router_opts.fixed_channel_width == NO_FIXED_CHANNEL_WIDTH) { auto& device_ctx = g_vpr_ctx.device(); - auto type = physical_tile_type(find_most_common_block_type(device_ctx.grid)); + auto type = find_most_common_tile_type(device_ctx.grid); width_fac = 4 * type->num_pins; /*this is 2x the value that binary search starts */ diff --git a/vpr/src/base/SetupGrid.cpp b/vpr/src/base/SetupGrid.cpp index 6712196800b..64115810a44 100644 --- a/vpr/src/base/SetupGrid.cpp +++ b/vpr/src/base/SetupGrid.cpp @@ -227,7 +227,7 @@ static std::vector grid_overused_resources(const Devic for (auto kv : instance_counts) { t_physical_tile_type_ptr type; size_t min_count; - std::tie(type, min_count) = std::make_pair(physical_tile_type(kv.first), kv.second); + std::tie(type, min_count) = std::make_pair(kv.first->equivalent_tiles[0], kv.second); size_t inst_cnt = grid.num_instances(type); @@ -531,7 +531,7 @@ static void set_grid_block_type(int priority, const t_physical_tile_type* type, VTR_ASSERT(grid_priorities[x][y] <= priority); if (grid_tile.type != nullptr - && grid_tile.type != device_ctx.EMPTY_TYPE) { + && grid_tile.type != device_ctx.EMPTY_PHYSICAL_TILE_TYPE) { //We are overriding a non-empty block, we need to be careful //to ensure we remove any blocks which will be invalidated when we //overwrite part of their locations @@ -566,8 +566,8 @@ static void set_grid_block_type(int priority, const t_physical_tile_type* type, // Note: that we explicitly check the type and offsets, since the original block // may have been completely overwritten, and we don't want to change anything // in that case - VTR_ASSERT(device_ctx.EMPTY_TYPE->width == 1); - VTR_ASSERT(device_ctx.EMPTY_TYPE->height == 1); + VTR_ASSERT(device_ctx.EMPTY_PHYSICAL_TILE_TYPE->width == 1); + VTR_ASSERT(device_ctx.EMPTY_PHYSICAL_TILE_TYPE->height == 1); #ifdef VERBOSE VTR_LOG("Ripping up block '%s' at (%d,%d) offset (%d,%d). Overlapped by '%s' at (%d,%d)\n", @@ -576,7 +576,7 @@ static void set_grid_block_type(int priority, const t_physical_tile_type* type, type->name, x_root, y_root); #endif - grid[x][y].type = device_ctx.EMPTY_TYPE; + grid[x][y].type = device_ctx.EMPTY_PHYSICAL_TILE_TYPE; grid[x][y].width_offset = 0; grid[x][y].height_offset = 0; @@ -664,7 +664,8 @@ float calculate_device_utilization(const DeviceGrid& grid, std::mapequivalent_tiles[0]; size_t count = kv.second; float type_area = type->width * type->height; diff --git a/vpr/src/base/SetupVPR.cpp b/vpr/src/base/SetupVPR.cpp index 12d6638ce0b..6b3a25c243d 100644 --- a/vpr/src/base/SetupVPR.cpp +++ b/vpr/src/base/SetupVPR.cpp @@ -111,22 +111,38 @@ void SetupVPR(const t_options* Options, *library_models = Arch->model_library; /* TODO: this is inelegant, I should be populating this information in XmlReadArch */ - device_ctx.EMPTY_TYPE = nullptr; + device_ctx.EMPTY_PHYSICAL_TILE_TYPE = nullptr; for (const auto& type : device_ctx.physical_tile_types) { if (strcmp(type.name, EMPTY_BLOCK_NAME) == 0) { - VTR_ASSERT(device_ctx.EMPTY_TYPE == nullptr); - device_ctx.EMPTY_TYPE = &type; + VTR_ASSERT(device_ctx.EMPTY_PHYSICAL_TILE_TYPE == nullptr); + device_ctx.EMPTY_PHYSICAL_TILE_TYPE = &type; } else { - if (block_type_contains_blif_model(logical_block_type(&type), MODEL_INPUT)) { - device_ctx.input_types.insert(&type); + for (const auto& equivalent_site : type.equivalent_sites) { + if (block_type_contains_blif_model(equivalent_site, MODEL_INPUT)) { + device_ctx.input_types.insert(&type); + break; + } } - if (block_type_contains_blif_model(logical_block_type(&type), MODEL_OUTPUT)) { - device_ctx.output_types.insert(&type); + + for (const auto& equivalent_site : type.equivalent_sites) { + if (block_type_contains_blif_model(equivalent_site, MODEL_OUTPUT)) { + device_ctx.output_types.insert(&type); + break; + } } } } - VTR_ASSERT(device_ctx.EMPTY_TYPE != nullptr); + device_ctx.EMPTY_LOGICAL_BLOCK_TYPE = nullptr; + for (const auto& type : device_ctx.logical_block_types) { + if (0 == strcmp(type.name, EMPTY_BLOCK_NAME)) { + device_ctx.EMPTY_LOGICAL_BLOCK_TYPE = &type; + break; + } + } + + VTR_ASSERT(device_ctx.EMPTY_PHYSICAL_TILE_TYPE != nullptr); + VTR_ASSERT(device_ctx.EMPTY_LOGICAL_BLOCK_TYPE != nullptr); if (device_ctx.input_types.empty()) { VPR_ERROR(VPR_ERROR_ARCH, diff --git a/vpr/src/base/ShowSetup.cpp b/vpr/src/base/ShowSetup.cpp index 3b6c374af76..b8a66d3345a 100644 --- a/vpr/src/base/ShowSetup.cpp +++ b/vpr/src/base/ShowSetup.cpp @@ -75,7 +75,8 @@ void printClusteredNetlistStats() { for (auto blk_id : cluster_ctx.clb_nlist.blocks()) { num_blocks_type[cluster_ctx.clb_nlist.block_type(blk_id)->index]++; - auto type = physical_tile_type(blk_id); + // XXX mapping here + auto type = cluster_ctx.clb_nlist.block_type(blk_id)->equivalent_tiles[0]; if (is_io_type(type)) { for (j = 0; j < type->num_pins; j++) { if (cluster_ctx.clb_nlist.block_net(blk_id, j) != ClusterNetId::INVALID()) { diff --git a/vpr/src/base/check_netlist.cpp b/vpr/src/base/check_netlist.cpp index 63fa3cd78f2..89e62394955 100644 --- a/vpr/src/base/check_netlist.cpp +++ b/vpr/src/base/check_netlist.cpp @@ -93,8 +93,9 @@ static int check_connections_to_global_clb_pins(ClusterNetId net_id, int verbosi ClusterBlockId blk_id = cluster_ctx.clb_nlist.pin_block(pin_id); int pin_index = cluster_ctx.clb_nlist.pin_physical_index(pin_id); - if (physical_tile_type(blk_id)->is_ignored_pin[pin_index] != net_is_ignored - && !is_io_type(physical_tile_type(blk_id))) { + auto logical_type = cluster_ctx.clb_nlist.block_type(blk_id); + if (physical_tile_type(logical_type)->is_ignored_pin[pin_index] != net_is_ignored + && !is_io_type(physical_tile_type(logical_type))) { VTR_LOGV_WARN(verbosity > 2, "Global net '%s' connects to non-global architecture pin '%s' (netlist pin '%s')\n", cluster_ctx.clb_nlist.net_name(net_id).c_str(), @@ -144,7 +145,7 @@ static int check_clb_conn(ClusterBlockId iblk, int num_conn) { /* This case should already have been flagged as an error -- this is * * just a redundant double check. */ - if (num_conn > physical_tile_type(type)->num_pins) { + if (num_conn > type->pb_type->num_pins) { VTR_LOG_ERROR("logic block #%d with output %s has %d pins.\n", iblk, cluster_ctx.clb_nlist.block_name(iblk).c_str(), num_conn); error++; diff --git a/vpr/src/base/clock_modeling.cpp b/vpr/src/base/clock_modeling.cpp index 0e09f4092db..623eb3d7d6c 100644 --- a/vpr/src/base/clock_modeling.cpp +++ b/vpr/src/base/clock_modeling.cpp @@ -6,7 +6,7 @@ void ClockModeling::treat_clock_pins_as_non_globals() { auto& device_ctx = g_vpr_ctx.mutable_device(); for (const auto& type : device_ctx.physical_tile_types) { - if (logical_block_type(&type)->pb_type) { + if (!is_empty_type(&type)) { for (auto clock_pin_idx : type.get_clock_pins_indices()) { // clock pins should be originally considered as global when reading the architecture VTR_ASSERT(type.is_ignored_pin[clock_pin_idx]); diff --git a/vpr/src/base/clustered_netlist.cpp b/vpr/src/base/clustered_netlist.cpp index a2fc8c31daf..03bbdce99a1 100644 --- a/vpr/src/base/clustered_netlist.cpp +++ b/vpr/src/base/clustered_netlist.cpp @@ -52,7 +52,7 @@ int ClusteredNetlist::block_pin_net_index(const ClusterBlockId blk_id, const int ClusterPinId ClusteredNetlist::block_pin(const ClusterBlockId blk, const int phys_pin_index) const { VTR_ASSERT_SAFE(valid_block_id(blk)); - VTR_ASSERT_SAFE_MSG(phys_pin_index >= 0 && phys_pin_index < physical_tile_type(block_type(blk))->num_pins, "Physical pin index must be in range"); + VTR_ASSERT_SAFE_MSG(phys_pin_index >= 0 && phys_pin_index < block_type(blk)->pb_type->num_pins, "Physical pin index must be in range"); return block_logical_pins_[blk][phys_pin_index]; } @@ -81,6 +81,12 @@ int ClusteredNetlist::pin_physical_index(const ClusterPinId id) const { return pin_physical_index_[id]; } +int ClusteredNetlist::pin_logical_index(const ClusterPinId pin_id) const { + VTR_ASSERT_SAFE(valid_pin_id(pin_id)); + + return pin_logical_index_[pin_id]; +} + int ClusteredNetlist::net_pin_physical_index(const ClusterNetId net_id, int net_pin_index) const { auto pin_id = net_pin(net_id, net_pin_index); @@ -122,7 +128,7 @@ ClusterBlockId ClusteredNetlist::create_block(const char* name, t_pb* pb, t_logi block_types_.insert(blk_id, type); //Allocate and initialize every potential pin of the block - block_logical_pins_.insert(blk_id, std::vector(physical_tile_type(type)->num_pins, ClusterPinId::INVALID())); + block_logical_pins_.insert(blk_id, std::vector(get_max_num_pins(type), ClusterPinId::INVALID())); } //Check post-conditions: size @@ -170,6 +176,7 @@ ClusterPinId ClusteredNetlist::create_pin(const ClusterPortId port_id, BitIndex ClusterPinId pin_id = Netlist::create_pin(port_id, port_bit, net_id, pin_type_, is_const); pin_physical_index_.push_back(pin_index); + pin_logical_index_.push_back(pin_index); ClusterBlockId block_id = port_block(port_id); block_logical_pins_[block_id][pin_index] = pin_id; @@ -254,7 +261,7 @@ void ClusteredNetlist::clean_nets_impl(const vtr::vector_map& /*pin_id_map*/, const vtr::vector_map& /*port_id_map*/) { for (auto blk : blocks()) { - block_logical_pins_[blk] = std::vector(physical_tile_type(blk)->num_pins, ClusterPinId::INVALID()); //Reset + block_logical_pins_[blk] = std::vector(get_max_num_pins(block_type(blk)), ClusterPinId::INVALID()); //Reset for (auto pin : block_pins(blk)) { int phys_pin_index = pin_physical_index(pin); block_logical_pins_[blk][phys_pin_index] = pin; @@ -284,6 +291,7 @@ void ClusteredNetlist::shrink_to_fit_impl() { //Pin data pin_physical_index_.shrink_to_fit(); + pin_logical_index_.shrink_to_fit(); //Net data net_is_ignored_.shrink_to_fit(); diff --git a/vpr/src/base/clustered_netlist.h b/vpr/src/base/clustered_netlist.h index 2147c8557ed..62a4da70517 100644 --- a/vpr/src/base/clustered_netlist.h +++ b/vpr/src/base/clustered_netlist.h @@ -148,9 +148,13 @@ class ClusteredNetlist : public Netlist block_pbs_; //Physical block representing the clustering & internal hierarchy of each CLB - vtr::vector_map block_types_; //The type of physical block this user circuit block is mapped to - vtr::vector_map> block_logical_pins_; //The logical pin associated with each physical block pin + vtr::vector_map block_types_; //The type of logical block this user circuit block is mapped to + vtr::vector_map> block_logical_pins_; //The logical pin associated with each physical tile pin //Pins vtr::vector_map pin_physical_index_; //The physical pin index (i.e. pin index - //in t_logical_block_type) of logical pins + //in t_physical_tile_type) corresponding + //to the logical pin + vtr::vector_map pin_logical_index_; //The logical pin index of this block //Nets vtr::vector_map net_is_ignored_; //Boolean mapping indicating if the net is ignored diff --git a/vpr/src/base/clustered_netlist_utils.cpp b/vpr/src/base/clustered_netlist_utils.cpp index a642405e82c..2ccdb7bc6ff 100644 --- a/vpr/src/base/clustered_netlist_utils.cpp +++ b/vpr/src/base/clustered_netlist_utils.cpp @@ -17,7 +17,7 @@ void ClusteredPinAtomPinsLookup::init_lookup(const ClusteredNetlist& clustered_n clustered_pin_connected_atom_pins_.resize(clustered_pins.size()); for (ClusterPinId clustered_pin : clustered_pins) { auto clustered_block = clustered_netlist.pin_block(clustered_pin); - int phys_pin_index = clustered_netlist.pin_physical_index(clustered_pin); - clustered_pin_connected_atom_pins_[clustered_pin] = find_clb_pin_connected_atom_pins(clustered_block, phys_pin_index, pb_gpin_lookup); + int log_pin_index = clustered_netlist.pin_logical_index(clustered_pin); + clustered_pin_connected_atom_pins_[clustered_pin] = find_clb_pin_connected_atom_pins(clustered_block, log_pin_index, pb_gpin_lookup); } } diff --git a/vpr/src/base/device_grid.cpp b/vpr/src/base/device_grid.cpp index c37f3eed4a9..3be488d26dd 100644 --- a/vpr/src/base/device_grid.cpp +++ b/vpr/src/base/device_grid.cpp @@ -13,7 +13,7 @@ DeviceGrid::DeviceGrid(std::string grid_name, vtr::Matrix grid, std } size_t DeviceGrid::num_instances(t_physical_tile_type_ptr type) const { - auto iter = instance_counts_.find(logical_block_type(type)); + auto iter = instance_counts_.find(type); if (iter != instance_counts_.end()) { //Return count return iter->second; @@ -36,7 +36,7 @@ void DeviceGrid::count_instances() { if (grid_[x][y].width_offset == 0 && grid_[x][y].height_offset == 0) { //Add capacity only if this is the root location - instance_counts_[logical_block_type(type)] += type->capacity; + instance_counts_[type] += type->capacity; } } } diff --git a/vpr/src/base/device_grid.h b/vpr/src/base/device_grid.h index 9247aac1e2d..6f0584c94db 100644 --- a/vpr/src/base/device_grid.h +++ b/vpr/src/base/device_grid.h @@ -37,7 +37,7 @@ class DeviceGrid { //traditional 2-d indexing to be used vtr::Matrix grid_; - std::map instance_counts_; + std::map instance_counts_; std::vector limiting_resources_; }; diff --git a/vpr/src/base/read_netlist.cpp b/vpr/src/base/read_netlist.cpp index cfbb78c384b..f8c6c79130d 100644 --- a/vpr/src/base/read_netlist.cpp +++ b/vpr/src/base/read_netlist.cpp @@ -857,6 +857,7 @@ static void load_external_nets_and_cb(ClusteredNetlist& clb_nlist) { ClusterNetId clb_net_id; auto& atom_ctx = g_vpr_ctx.atom(); + auto& device_ctx = g_vpr_ctx.device(); ext_nhash = alloc_hash_table(); @@ -868,14 +869,13 @@ static void load_external_nets_and_cb(ClusteredNetlist& clb_nlist) { /* Determine the external nets of complex block */ for (auto blk_id : clb_nlist.blocks()) { block_type = clb_nlist.block_type(blk_id); - tile_type = physical_tile_type(block_type); const t_pb* pb = clb_nlist.block_pb(blk_id); ipin = 0; VTR_ASSERT(block_type->pb_type->num_input_pins + block_type->pb_type->num_output_pins + block_type->pb_type->num_clock_pins - == tile_type->num_pins / tile_type->capacity); + == block_type->pb_type->num_pins); int num_input_ports = pb->pb_graph_node->num_input_ports; int num_output_ports = pb->pb_graph_node->num_output_ports; @@ -951,7 +951,8 @@ static void load_external_nets_and_cb(ClusteredNetlist& clb_nlist) { * and blocks point back to net pins */ for (auto blk_id : clb_nlist.blocks()) { block_type = clb_nlist.block_type(blk_id); - tile_type = physical_tile_type(block_type); + // XXX Use pin mapping here! To check that all the possible pins can be used in the correct tile! + tile_type = find_block_type_by_name(block_type->name, device_ctx.physical_tile_types); for (j = 0; j < tile_type->num_pins; j++) { //Iterate through each pin of the block, and see if there is a net allocated/used for it clb_net_id = clb_nlist.block_net(blk_id, j); diff --git a/vpr/src/base/read_place.cpp b/vpr/src/base/read_place.cpp index 7c81a3158f3..8b6afb65d9b 100644 --- a/vpr/src/base/read_place.cpp +++ b/vpr/src/base/read_place.cpp @@ -160,7 +160,8 @@ void read_user_pad_loc(const char* pad_loc_file) { hash_table = alloc_hash_table(); for (auto blk_id : cluster_ctx.clb_nlist.blocks()) { - if (is_io_type(physical_tile_type(blk_id))) { + auto logical_block = cluster_ctx.clb_nlist.block_type(blk_id); + if (is_io_type(physical_tile_type(logical_block))) { insert_in_hash_table(hash_table, cluster_ctx.clb_nlist.block_name(blk_id).c_str(), size_t(blk_id)); place_ctx.block_locs[blk_id].loc.x = OPEN; /* Mark as not seen yet. */ } @@ -266,7 +267,8 @@ void read_user_pad_loc(const char* pad_loc_file) { } for (auto blk_id : cluster_ctx.clb_nlist.blocks()) { - auto type = physical_tile_type(blk_id); + auto logical_block = cluster_ctx.clb_nlist.block_type(blk_id); + auto type = physical_tile_type(logical_block); if (is_io_type(type) && place_ctx.block_locs[blk_id].loc.x == OPEN) { vpr_throw(VPR_ERROR_PLACE_F, pad_loc_file, 0, "IO block %s location was not specified in the pad file.\n", cluster_ctx.clb_nlist.block_name(blk_id).c_str()); diff --git a/vpr/src/base/stats.cpp b/vpr/src/base/stats.cpp index 28eb70c45db..846185ee215 100644 --- a/vpr/src/base/stats.cpp +++ b/vpr/src/base/stats.cpp @@ -61,7 +61,7 @@ void routing_stats(bool full_stats, enum e_route_type route_type, std::vectorarea == UNDEFINED) { area += grid_logic_tile_area * type->width * type->height; } else { diff --git a/vpr/src/base/vpr_api.cpp b/vpr/src/base/vpr_api.cpp index 0854b644f8b..b8f8b7a0710 100644 --- a/vpr/src/base/vpr_api.cpp +++ b/vpr/src/base/vpr_api.cpp @@ -418,11 +418,17 @@ void vpr_create_device_grid(const t_vpr_setup& vpr_setup, const t_arch& Arch) { VTR_LOG("\n"); VTR_LOG("Resource usage...\n"); - for (const auto& type : device_ctx.physical_tile_types) { - VTR_LOG("\tNetlist %d\tblocks of type: %s\n", - num_type_instances[logical_block_type(&type)], type.name); - VTR_LOG("\tArchitecture %d\tblocks of type: %s\n", - device_ctx.grid.num_instances(&type), type.name); + for (const auto& type : device_ctx.logical_block_types) { + if (is_empty_type(&type)) continue; + + VTR_LOG("\tNetlist\n\t\t%d\tblocks of type: %s\n", + num_type_instances[&type], type.name); + + VTR_LOG("\tArchitecture\n"); + for(const auto equivalent_tile : type.equivalent_tiles) { + VTR_LOG("\t\t%d\tblocks of type: %s\n", + device_ctx.grid.num_instances(equivalent_tile), equivalent_tile->name); + } } VTR_LOG("\n"); @@ -880,7 +886,7 @@ static void get_intercluster_switch_fanin_estimates(const t_vpr_setup& vpr_setup //Build a dummy 10x10 device to determine the 'best' block type to use auto grid = create_device_grid(vpr_setup.device_layout, arch.grid_layouts, 10, 10); - auto type = physical_tile_type(find_most_common_block_type(grid)); + auto type = find_most_common_tile_type(grid); /* get Fc_in/out for most common block (e.g. logic blocks) */ VTR_ASSERT(type->fc_specs.size() > 0); diff --git a/vpr/src/base/vpr_context.h b/vpr/src/base/vpr_context.h index 7a72c7b2c97..8a275b53c0c 100644 --- a/vpr/src/base/vpr_context.h +++ b/vpr/src/base/vpr_context.h @@ -120,7 +120,10 @@ struct DeviceContext : public Context { /* Special pointers to identify special blocks on an FPGA: I/Os, unused, and default */ std::set input_types; std::set output_types; - t_physical_tile_type_ptr EMPTY_TYPE; + + /* Empty types */ + t_physical_tile_type_ptr EMPTY_PHYSICAL_TILE_TYPE; + t_logical_block_type_ptr EMPTY_LOGICAL_BLOCK_TYPE; /* block_types are blocks that can be moved by the placer * such as: I/Os, CLBs, memories, multipliers, etc diff --git a/vpr/src/draw/draw.cpp b/vpr/src/draw/draw.cpp index 1e099500fc1..680c6053c6d 100644 --- a/vpr/src/draw/draw.cpp +++ b/vpr/src/draw/draw.cpp @@ -2654,15 +2654,16 @@ void draw_highlight_blocks_color(t_logical_block_type_ptr type, ClusterBlockId b t_draw_state* draw_state = get_draw_state_vars(); auto& cluster_ctx = g_vpr_ctx.clustering(); - for (k = 0; k < physical_tile_type(type)->num_pins; k++) { /* Each pin on a CLB */ + for (k = 0; k < type->pb_type->num_pins; k++) { /* Each pin on a CLB */ ClusterNetId net_id = cluster_ctx.clb_nlist.block_net(blk_id, k); if (net_id == ClusterNetId::INVALID()) continue; - iclass = physical_tile_type(type)->pin_class[k]; + // XXX Logical Physical Mapping to be used here + iclass = physical_tile_type(blk_id)->pin_class[k]; - if (physical_tile_type(type)->class_inf[iclass].type == DRIVER) { /* Fanout */ + if (physical_tile_type(blk_id)->class_inf[iclass].type == DRIVER) { /* Fanout */ if (draw_state->block_color[blk_id] == SELECTED_COLOR) { /* If block already highlighted, de-highlight the fanout. (the deselect case)*/ draw_state->net_color[net_id] = ezgl::BLACK; @@ -2712,7 +2713,8 @@ void deselect_all() { /* Create some colour highlighting */ for (auto blk_id : cluster_ctx.clb_nlist.blocks()) { - draw_reset_blk_color(blk_id); + if (blk_id != ClusterBlockId::INVALID()) + draw_reset_blk_color(blk_id); } for (auto net_id : cluster_ctx.clb_nlist.nets()) @@ -2726,9 +2728,13 @@ void deselect_all() { } static void draw_reset_blk_color(ClusterBlockId blk_id) { + auto& cluster_ctx = g_vpr_ctx.clustering(); + + auto logical_block = cluster_ctx.clb_nlist.block_type(blk_id); + t_draw_state* draw_state = get_draw_state_vars(); - draw_state->block_color[blk_id] = get_block_type_color(physical_tile_type(blk_id)); + draw_state->block_color[blk_id] = get_block_type_color(physical_tile_type(logical_block)); } /** @@ -3305,10 +3311,8 @@ static void draw_block_pin_util() { continue; } - t_pb_type* pb_type = logical_block_type(&type)->pb_type; - - total_input_pins[&type] = pb_type->num_input_pins + pb_type->num_clock_pins; - total_output_pins[&type] = pb_type->num_output_pins; + total_input_pins[&type] = type.num_input_pins + type.num_clock_pins; + total_output_pins[&type] = type.num_output_pins; } auto blks = cluster_ctx.clb_nlist.blocks(); diff --git a/vpr/src/draw/intra_logic_block.cpp b/vpr/src/draw/intra_logic_block.cpp index 40eb8aa0ad9..89740e3ad33 100644 --- a/vpr/src/draw/intra_logic_block.cpp +++ b/vpr/src/draw/intra_logic_block.cpp @@ -68,7 +68,7 @@ void draw_internal_alloc_blk() { draw_coords->blk_info.resize(device_ctx.logical_block_types.size()); for (const auto& type : device_ctx.logical_block_types) { - if (physical_tile_type(&type) == device_ctx.EMPTY_TYPE) { + if (&type == device_ctx.EMPTY_LOGICAL_BLOCK_TYPE) { continue; } @@ -92,7 +92,7 @@ void draw_internal_init_blk() { auto& device_ctx = g_vpr_ctx.device(); for (const auto& type : device_ctx.physical_tile_types) { /* Empty block has no sub_blocks */ - if (&type == device_ctx.EMPTY_TYPE) + if (&type == device_ctx.EMPTY_PHYSICAL_TILE_TYPE) continue; pb_graph_head_node = logical_block_type(&type)->pb_graph_head; @@ -151,7 +151,7 @@ void draw_internal_draw_subblk(ezgl::renderer* g) { continue; /* Don't draw if tile is empty. This includes corners. */ - if (device_ctx.grid[i][j].type == device_ctx.EMPTY_TYPE) + if (device_ctx.grid[i][j].type == device_ctx.EMPTY_PHYSICAL_TILE_TYPE) continue; int num_sub_tiles = device_ctx.grid[i][j].type->capacity; diff --git a/vpr/src/pack/cluster.cpp b/vpr/src/pack/cluster.cpp index 20086355b95..6bb85180995 100644 --- a/vpr/src/pack/cluster.cpp +++ b/vpr/src/pack/cluster.cpp @@ -451,7 +451,7 @@ std::map do_clustering(const t_packer_opts& pa num_molecules = count_molecules(molecule_head); for (const auto& type : device_ctx.logical_block_types) { - if (device_ctx.EMPTY_TYPE == physical_tile_type(&type)) + if (is_empty_type(&type)) continue; cur_cluster_size = get_max_primitives_in_pb_type(type.pb_type); @@ -1963,8 +1963,14 @@ static void start_new_cluster(t_cluster_placement_stats* cluster_placement_stats //support the same primitive(s). std::stable_sort(candidate_types.begin(), candidate_types.end(), [&](t_logical_block_type_ptr lhs, t_logical_block_type_ptr rhs) { - float lhs_util = vtr::safe_ratio(num_used_type_instances[lhs], device_ctx.grid.num_instances(physical_tile_type(lhs))); - float rhs_util = vtr::safe_ratio(num_used_type_instances[rhs], device_ctx.grid.num_instances(physical_tile_type(rhs))); + int lhs_num_instances = 0; + int rhs_num_instances = 0; + // Count number of instances for each type + for (auto type : lhs->equivalent_tiles) lhs_num_instances += device_ctx.grid.num_instances(type); + for (auto type : rhs->equivalent_tiles) rhs_num_instances += device_ctx.grid.num_instances(type); + + float lhs_util = vtr::safe_ratio(num_used_type_instances[lhs], lhs_num_instances); + float rhs_util = vtr::safe_ratio(num_used_type_instances[rhs], rhs_num_instances); //Lower util first return lhs_util < rhs_util; }); @@ -2053,10 +2059,17 @@ static void start_new_cluster(t_cluster_placement_stats* cluster_placement_stats VTR_ASSERT(success); //Successfully create cluster - num_used_type_instances[clb_nlist->block_type(clb_index)]++; + auto block_type = clb_nlist->block_type(clb_index); + num_used_type_instances[block_type]++; /* Expand FPGA size if needed */ - if (num_used_type_instances[clb_nlist->block_type(clb_index)] > device_ctx.grid.num_instances(physical_tile_type(clb_index))) { + // Check used type instances against the possible equivalent physical locations + unsigned int num_instances = 0; + for (auto equivalent_tile : block_type->equivalent_tiles) { + num_instances += device_ctx.grid.num_instances(equivalent_tile); + } + + if (num_used_type_instances[block_type] > num_instances) { device_ctx.grid = create_device_grid(device_layout_name, arch->grid_layouts, num_used_type_instances, target_device_utilization); VTR_LOGV(verbosity > 0, "Not enough resources expand FPGA size to (%d x %d)\n", device_ctx.grid.width(), device_ctx.grid.height()); diff --git a/vpr/src/pack/cluster_placement.cpp b/vpr/src/pack/cluster_placement.cpp index 807908f3c57..36a78bec6e8 100644 --- a/vpr/src/pack/cluster_placement.cpp +++ b/vpr/src/pack/cluster_placement.cpp @@ -63,7 +63,7 @@ t_cluster_placement_stats* alloc_and_load_cluster_placement_stats() { cluster_placement_stats_list = (t_cluster_placement_stats*)vtr::calloc(device_ctx.logical_block_types.size(), sizeof(t_cluster_placement_stats)); for (const auto& type : device_ctx.logical_block_types) { - if (device_ctx.EMPTY_TYPE != physical_tile_type(&type)) { + if (!is_empty_type(&type)) { cluster_placement_stats_list[type.index].valid_primitives = (t_cluster_placement_primitive**)vtr::calloc( get_max_primitives_in_pb_type(type.pb_type) + 1, sizeof(t_cluster_placement_primitive*)); /* too much memory allocated but shouldn't be a problem */ diff --git a/vpr/src/pack/lb_type_rr_graph.cpp b/vpr/src/pack/lb_type_rr_graph.cpp index 8f2763d53df..cc600ca47f7 100644 --- a/vpr/src/pack/lb_type_rr_graph.cpp +++ b/vpr/src/pack/lb_type_rr_graph.cpp @@ -58,7 +58,7 @@ std::vector* alloc_and_load_all_lb_type_rr_graph() { for (const auto& type : device_ctx.logical_block_types) { int itype = type.index; - if (physical_tile_type(&type) != device_ctx.EMPTY_TYPE) { + if (&type != device_ctx.EMPTY_LOGICAL_BLOCK_TYPE) { alloc_and_load_lb_type_rr_graph_for_type(&type, lb_type_rr_graphs[itype]); /* Now that the data is loaded, reallocate to the precise amount of memory needed to prevent insidious bugs */ @@ -75,7 +75,7 @@ void free_all_lb_type_rr_graph(std::vector* lb_type_rr_graphs for (const auto& type : device_ctx.logical_block_types) { int itype = type.index; - if (physical_tile_type(&type) != device_ctx.EMPTY_TYPE) { + if (!is_empty_type(&type)) { int graph_size = lb_type_rr_graphs[itype].size(); for (int inode = 0; inode < graph_size; inode++) { t_lb_type_rr_node* node = &lb_type_rr_graphs[itype][inode]; @@ -133,7 +133,7 @@ void echo_lb_type_rr_graphs(char* filename, std::vector* lb_t auto& device_ctx = g_vpr_ctx.device(); for (const auto& type : device_ctx.logical_block_types) { - if (physical_tile_type(&type) != device_ctx.EMPTY_TYPE) { + if (!is_empty_type(&type)) { fprintf(fp, "--------------------------------------------------------------\n"); fprintf(fp, "Intra-Logic Block Routing Resource For Type %s\n", type.name); fprintf(fp, "--------------------------------------------------------------\n"); diff --git a/vpr/src/pack/output_clustering.cpp b/vpr/src/pack/output_clustering.cpp index 9987be6a32c..e51c3e706dd 100644 --- a/vpr/src/pack/output_clustering.cpp +++ b/vpr/src/pack/output_clustering.cpp @@ -63,7 +63,8 @@ static void print_stats() { /* Counters used only for statistics purposes. */ for (auto blk_id : cluster_ctx.clb_nlist.blocks()) { - auto type = physical_tile_type(blk_id); + // XXX Use mapping here + auto type = cluster_ctx.clb_nlist.block_type(blk_id)->equivalent_tiles[0]; for (ipin = 0; ipin < type->num_pins; ipin++) { if (cluster_ctx.clb_nlist.block_pb(blk_id)->pb_route.empty()) { ClusterNetId clb_net_id = cluster_ctx.clb_nlist.block_net(blk_id, ipin); diff --git a/vpr/src/pack/pack.cpp b/vpr/src/pack/pack.cpp index 0dfd4349126..0a263458036 100644 --- a/vpr/src/pack/pack.cpp +++ b/vpr/src/pack/pack.cpp @@ -169,7 +169,11 @@ bool try_pack(t_packer_opts* packer_opts, } resource_reqs += std::string(iter->first->name) + ": " + std::to_string(iter->second); - resource_avail += std::string(iter->first->name) + ": " + std::to_string(grid.num_instances(physical_tile_type(iter->first))); + + int num_instances = 0; + for (auto type : iter->first->equivalent_tiles) num_instances += grid.num_instances(type); + + resource_avail += std::string(iter->first->name) + ": " + std::to_string(num_instances); } VPR_FATAL_ERROR(VPR_ERROR_OTHER, "Failed to find device which satisifies resource requirements required: %s (available %s)", resource_reqs.c_str(), resource_avail.c_str()); @@ -274,14 +278,21 @@ static bool try_size_device_grid(const t_arch& arch, const std::map type_util; for (const auto& type : device_ctx.logical_block_types) { - auto physical_type = physical_tile_type(&type); + if(is_empty_type(&type)) continue; + auto itr = num_type_instances.find(&type); if (itr == num_type_instances.end()) continue; float num_instances = itr->second; float util = 0.; - if (device_ctx.grid.num_instances(physical_type) != 0) { - util = num_instances / device_ctx.grid.num_instances(physical_type); + + float num_total_instances = 0.; + for (const auto& equivalent_tile : type.equivalent_tiles) { + num_total_instances += device_ctx.grid.num_instances(equivalent_tile); + } + + if (num_total_instances != 0) { + util = num_instances / num_total_instances; } type_util[&type] = util; diff --git a/vpr/src/pack/pack_report.cpp b/vpr/src/pack/pack_report.cpp index a0f920b6df1..c571a2737d9 100644 --- a/vpr/src/pack/pack_report.cpp +++ b/vpr/src/pack/pack_report.cpp @@ -15,22 +15,22 @@ void report_packing_pin_usage(std::ostream& os, const VprContext& ctx) { auto& cluster_ctx = ctx.clustering(); auto& device_ctx = ctx.device(); - std::map total_input_pins; - std::map total_output_pins; - for (auto const& type : device_ctx.physical_tile_types) { + std::map total_input_pins; + std::map total_output_pins; + for (auto const& type : device_ctx.logical_block_types) { if (is_empty_type(&type)) continue; - t_pb_type* pb_type = logical_block_type(&type)->pb_type; + t_pb_type* pb_type = type.pb_type; total_input_pins[&type] = pb_type->num_input_pins + pb_type->num_clock_pins; total_output_pins[&type] = pb_type->num_output_pins; } - std::map> inputs_used; - std::map> outputs_used; + std::map> inputs_used; + std::map> outputs_used; for (auto blk : cluster_ctx.clb_nlist.blocks()) { - t_physical_tile_type_ptr type = physical_tile_type(blk); + t_logical_block_type_ptr type = cluster_ctx.clb_nlist.block_type(blk); inputs_used[type].push_back(cluster_ctx.clb_nlist.block_input_pins(blk).size() + cluster_ctx.clb_nlist.block_clock_pins(blk).size()); outputs_used[type].push_back(cluster_ctx.clb_nlist.block_output_pins(blk).size()); @@ -40,8 +40,8 @@ void report_packing_pin_usage(std::ostream& os, const VprContext& ctx) { os << std::fixed << std::setprecision(2); - for (auto const& physical_type : device_ctx.physical_tile_types) { - auto type = &physical_type; + for (auto const& logical_type : device_ctx.logical_block_types) { + auto type = &logical_type; if (is_empty_type(type)) continue; if (!inputs_used.count(type)) continue; diff --git a/vpr/src/pack/pb_type_graph.cpp b/vpr/src/pack/pb_type_graph.cpp index d5c7324a60d..8d60c3072a5 100644 --- a/vpr/src/pack/pb_type_graph.cpp +++ b/vpr/src/pack/pb_type_graph.cpp @@ -133,7 +133,7 @@ void alloc_and_load_all_pb_graphs(bool load_power_structures) { load_pin_classes_in_pb_graph_head(type.pb_graph_head); } else { type.pb_graph_head = nullptr; - VTR_ASSERT(physical_tile_type(&type) == device_ctx.EMPTY_TYPE); + VTR_ASSERT(&type == device_ctx.EMPTY_LOGICAL_BLOCK_TYPE); } } diff --git a/vpr/src/place/move_utils.cpp b/vpr/src/place/move_utils.cpp index e9751f684d4..94b6422a3f2 100644 --- a/vpr/src/place/move_utils.cpp +++ b/vpr/src/place/move_utils.cpp @@ -497,27 +497,27 @@ bool find_to_loc_uniform(t_physical_tile_type_ptr type, //case with a physical distance rlim) auto& grid = g_vpr_ctx.device().grid; - auto grid_type = grid[from.x][from.y].type; - VTR_ASSERT(type == grid_type); + auto from_type = grid[from.x][from.y].type; //Retrieve the compressed block grid for this block type - const auto& compressed_block_grid = g_vpr_ctx.placement().compressed_block_grids[type->index]; + const auto& to_compressed_block_grid = g_vpr_ctx.placement().compressed_block_grids[type->index]; + const auto& from_compressed_block_grid = g_vpr_ctx.placement().compressed_block_grids[from_type->index]; //Determine the rlim in each dimension - int rlim_x = std::min(compressed_block_grid.compressed_to_grid_x.size(), rlim); - int rlim_y = std::min(compressed_block_grid.compressed_to_grid_y.size(), rlim); /* for aspect_ratio != 1 case. */ + int rlim_x = std::min(to_compressed_block_grid.compressed_to_grid_x.size(), rlim); + int rlim_y = std::min(to_compressed_block_grid.compressed_to_grid_y.size(), rlim); /* for aspect_ratio != 1 case. */ //Determine the coordinates in the compressed grid space of the current block - int cx_from = grid_to_compressed(compressed_block_grid.compressed_to_grid_x, from.x); - int cy_from = grid_to_compressed(compressed_block_grid.compressed_to_grid_y, from.y); + int cx_from = grid_to_compressed(from_compressed_block_grid.compressed_to_grid_x, from.x); + int cy_from = grid_to_compressed(from_compressed_block_grid.compressed_to_grid_y, from.y); - //Determin the valid compressed grid location ranges + //Determine the valid compressed grid location ranges int min_cx = std::max(0, cx_from - rlim_x); - int max_cx = std::min(compressed_block_grid.compressed_to_grid_x.size() - 1, cx_from + rlim_x); + int max_cx = std::min(to_compressed_block_grid.compressed_to_grid_x.size() - 1, cx_from + rlim_x); int delta_cx = max_cx - min_cx; int min_cy = std::max(0, cy_from - rlim_y); - int max_cy = std::min(compressed_block_grid.compressed_to_grid_y.size() - 1, cy_from + rlim_y); + int max_cy = std::min(to_compressed_block_grid.compressed_to_grid_y.size() - 1, cy_from + rlim_y); int cx_to = OPEN; int cy_to = OPEN; @@ -544,19 +544,19 @@ bool find_to_loc_uniform(t_physical_tile_type_ptr type, // //The candidates are stored in a flat_map so we can efficiently find the set of valid //candidates with upper/lower bound. - auto y_lower_iter = compressed_block_grid.grid[cx_to].lower_bound(min_cy); - if (y_lower_iter == compressed_block_grid.grid[cx_to].end()) { + auto y_lower_iter = to_compressed_block_grid.grid[cx_to].lower_bound(min_cy); + if (y_lower_iter == to_compressed_block_grid.grid[cx_to].end()) { continue; } - auto y_upper_iter = compressed_block_grid.grid[cx_to].upper_bound(max_cy); + auto y_upper_iter = to_compressed_block_grid.grid[cx_to].upper_bound(max_cy); if (y_lower_iter->first > min_cy) { //No valid blocks at this x location which are within rlim_y // //Fall back to allow the whole y range - y_lower_iter = compressed_block_grid.grid[cx_to].begin(); - y_upper_iter = compressed_block_grid.grid[cx_to].end(); + y_lower_iter = to_compressed_block_grid.grid[cx_to].begin(); + y_upper_iter = to_compressed_block_grid.grid[cx_to].end(); min_cy = y_lower_iter->first; max_cy = (y_upper_iter - 1)->first; @@ -602,17 +602,16 @@ bool find_to_loc_uniform(t_physical_tile_type_ptr type, VTR_ASSERT(cy_to != OPEN); //Convert to true (uncompressed) grid locations - to.x = compressed_block_grid.compressed_to_grid_x[cx_to]; - to.y = compressed_block_grid.compressed_to_grid_y[cy_to]; + to.x = to_compressed_block_grid.compressed_to_grid_x[cx_to]; + to.y = to_compressed_block_grid.compressed_to_grid_y[cy_to]; //Each x/y location contains only a single type, so we can pick a random //z (capcity) location to.z = vtr::irand(type->capacity - 1); - auto& device_ctx = g_vpr_ctx.device(); - VTR_ASSERT_MSG(device_ctx.grid[to.x][to.y].type == type, "Type must match"); - VTR_ASSERT_MSG(device_ctx.grid[to.x][to.y].width_offset == 0, "Should be at block base location"); - VTR_ASSERT_MSG(device_ctx.grid[to.x][to.y].height_offset == 0, "Should be at block base location"); + VTR_ASSERT_MSG(grid[to.x][to.y].type == type, "Type must match"); + VTR_ASSERT_MSG(grid[to.x][to.y].width_offset == 0, "Should be at block base location"); + VTR_ASSERT_MSG(grid[to.x][to.y].height_offset == 0, "Should be at block base location"); return true; } diff --git a/vpr/src/place/place.cpp b/vpr/src/place/place.cpp index 15b922ebb70..1744ea66fee 100644 --- a/vpr/src/place/place.cpp +++ b/vpr/src/place/place.cpp @@ -297,7 +297,7 @@ static int try_place_macro(int itype, int ipos, int imacro); static void initial_placement_pl_macros(int macros_max_num_tries, int* free_locations); static void initial_placement_blocks(int* free_locations, enum e_pad_loc_type pad_loc_type); -static void initial_placement_location(const int* free_locations, ClusterBlockId blk_id, int& pipos, t_pl_loc& to); +static void initial_placement_location(const int* free_locations, int& pipos, int itype, t_pl_loc& to); static void initial_placement(enum e_pad_loc_type pad_loc_type, const char* pad_loc_file); @@ -509,6 +509,7 @@ void try_place(const t_placer_opts& placer_opts, directs, num_directs); initial_placement(placer_opts.pad_loc_type, placer_opts.pad_loc_file.c_str()); + init_draw_coords((float)width_fac); //Enables fast look-up of atom pins connect to CLB pins ClusteredPinAtomPinsLookup netlist_pin_lookup(cluster_ctx.clb_nlist, pb_gpin_lookup); @@ -2406,7 +2407,8 @@ static void initial_placement_pl_macros(int macros_max_num_tries, int* free_loca // Assume that all the blocks in the macro are of the same type blk_id = pl_macros[imacro].members[0].blk_index; - auto type = physical_tile_type(blk_id); + auto logical_block = cluster_ctx.clb_nlist.block_type(blk_id); + auto type = pick_random_placement_type(logical_block); itype = type->index; if (free_locations[itype] < int(pl_macros[imacro].members.size())) { VPR_FATAL_ERROR(VPR_ERROR_PLACE, @@ -2471,15 +2473,19 @@ static void initial_placement_blocks(int* free_locations, enum e_pad_loc_type pa continue; } + auto logical_block = cluster_ctx.clb_nlist.block_type(blk_id); + /* Don't do IOs if the user specifies IOs; we'll read those locations later. */ - if (!(is_io_type(physical_tile_type(blk_id)) && pad_loc_type == USER)) { + if (!(is_io_type(physical_tile_type(logical_block)) && pad_loc_type == USER)) { /* Randomly select a free location of the appropriate type for blk_id. * We have a linearized list of all the free locations that can * accommodate a block of that type in free_locations[itype]. * Choose one randomly and put blk_id there. Then we don't want to pick * that location again, so remove it from the free_locations array. */ - itype = cluster_ctx.clb_nlist.block_type(blk_id)->index; + + itype = pick_random_placement_type(logical_block)->index; + if (free_locations[itype] <= 0) { VPR_FATAL_ERROR(VPR_ERROR_PLACE, "Initial placement failed.\n" @@ -2488,7 +2494,7 @@ static void initial_placement_blocks(int* free_locations, enum e_pad_loc_type pa } t_pl_loc to; - initial_placement_location(free_locations, blk_id, ipos, to); + initial_placement_location(free_locations, ipos, itype, to); // Make sure that the position is EMPTY_BLOCK before placing the block down VTR_ASSERT(place_ctx.grid_blocks[to.x][to.y].blocks[to.z] == EMPTY_BLOCK_ID); @@ -2499,7 +2505,7 @@ static void initial_placement_blocks(int* free_locations, enum e_pad_loc_type pa place_ctx.block_locs[blk_id].loc = to; //Mark IOs as fixed if specifying a (fixed) random placement - if (is_io_type(physical_tile_type(blk_id)) && pad_loc_type == RANDOM) { + if (is_io_type(physical_tile_type(logical_block)) && pad_loc_type == RANDOM) { place_ctx.block_locs[blk_id].is_fixed = true; } @@ -2513,11 +2519,7 @@ static void initial_placement_blocks(int* free_locations, enum e_pad_loc_type pa } } -static void initial_placement_location(const int* free_locations, ClusterBlockId blk_id, int& ipos, t_pl_loc& to) { - auto& cluster_ctx = g_vpr_ctx.clustering(); - - int itype = cluster_ctx.clb_nlist.block_type(blk_id)->index; - +static void initial_placement_location(const int* free_locations, int& ipos, int itype, t_pl_loc& to) { ipos = vtr::irand(free_locations[itype] - 1); to = legal_pos[itype][ipos]; } diff --git a/vpr/src/place/place_macro.cpp b/vpr/src/place/place_macro.cpp index 4faeb1d9deb..79f20225dc3 100644 --- a/vpr/src/place/place_macro.cpp +++ b/vpr/src/place/place_macro.cpp @@ -77,7 +77,7 @@ static void find_all_the_macro(int* num_of_macro, std::vector& p num_macro = 0; for (auto blk_id : cluster_ctx.clb_nlist.blocks()) { - num_blk_pins = physical_tile_type(blk_id)->num_pins; + num_blk_pins = cluster_ctx.clb_nlist.block_type(blk_id)->pb_type->num_pins; for (to_iblk_pin = 0; to_iblk_pin < num_blk_pins; to_iblk_pin++) { to_net_id = cluster_ctx.clb_nlist.block_net(blk_id, to_iblk_pin); to_idirect = f_idirect_from_blk_pin[cluster_ctx.clb_nlist.block_type(blk_id)->index][to_iblk_pin]; diff --git a/vpr/src/place/timing_place_lookup.cpp b/vpr/src/place/timing_place_lookup.cpp index 48dee3549ff..4ea36626665 100644 --- a/vpr/src/place/timing_place_lookup.cpp +++ b/vpr/src/place/timing_place_lookup.cpp @@ -268,7 +268,7 @@ static t_chan_width setup_chan_width(const t_router_opts& router_opts, if (router_opts.fixed_channel_width == NO_FIXED_CHANNEL_WIDTH) { auto& device_ctx = g_vpr_ctx.device(); - auto type = physical_tile_type(find_most_common_block_type(device_ctx.grid)); + auto type = find_most_common_tile_type(device_ctx.grid); width_fac = 4 * type->num_pins; /*this is 2x the value that binary search starts */ @@ -365,8 +365,8 @@ static void generic_compute_matrix( t_physical_tile_type_ptr src_type = device_ctx.grid[source_x][source_y].type; t_physical_tile_type_ptr sink_type = device_ctx.grid[sink_x][sink_y].type; - bool src_or_target_empty = (src_type == device_ctx.EMPTY_TYPE - || sink_type == device_ctx.EMPTY_TYPE); + bool src_or_target_empty = (src_type == device_ctx.EMPTY_PHYSICAL_TILE_TYPE + || sink_type == device_ctx.EMPTY_PHYSICAL_TILE_TYPE); bool is_allowed_type = allowed_types.empty() || allowed_types.find(src_type->name) != allowed_types.end(); @@ -471,7 +471,7 @@ static vtr::Matrix compute_delta_delays( for (y = 0; y < grid.height(); ++y) { auto type = grid[x][y].type; - if (type != device_ctx.EMPTY_TYPE) { + if (type != device_ctx.EMPTY_PHYSICAL_TILE_TYPE) { if (!allowed_types.empty() && allowed_types.find(std::string(type->name)) == allowed_types.end()) { continue; } @@ -501,7 +501,7 @@ static vtr::Matrix compute_delta_delays( for (x = 0; x < grid.width(); ++x) { auto type = grid[x][y].type; - if (type != device_ctx.EMPTY_TYPE) { + if (type != device_ctx.EMPTY_PHYSICAL_TILE_TYPE) { if (!allowed_types.empty() && allowed_types.find(std::string(type->name)) == allowed_types.end()) { continue; } @@ -887,16 +887,16 @@ void OverrideDelayModel::compute_override_delay_model( std::set> sampled_rr_pairs; for (int iconn = 0; iconn < num_conns; ++iconn) { //Find the associated pins - int from_pin = find_pin(logical_block_type(from_type), from_port.port_name(), from_port.port_low_index() + iconn); - int to_pin = find_pin(logical_block_type(to_type), to_port.port_name(), to_port.port_low_index() + iconn); + int from_pin = find_pin(from_type, from_port.port_name(), from_port.port_low_index() + iconn); + int to_pin = find_pin(to_type, to_port.port_name(), to_port.port_low_index() + iconn); VTR_ASSERT(from_pin != OPEN); VTR_ASSERT(to_pin != OPEN); - int from_pin_class = find_pin_class(logical_block_type(from_type), from_port.port_name(), from_port.port_low_index() + iconn, DRIVER); + int from_pin_class = find_pin_class(from_type, from_port.port_name(), from_port.port_low_index() + iconn, DRIVER); VTR_ASSERT(from_pin_class != OPEN); - int to_pin_class = find_pin_class(logical_block_type(to_type), to_port.port_name(), to_port.port_low_index() + iconn, RECEIVER); + int to_pin_class = find_pin_class(to_type, to_port.port_name(), to_port.port_low_index() + iconn, RECEIVER); VTR_ASSERT(to_pin_class != OPEN); int src_rr = OPEN; diff --git a/vpr/src/place/uniform_move_generator.cpp b/vpr/src/place/uniform_move_generator.cpp index b29604188bf..93261b7469b 100644 --- a/vpr/src/place/uniform_move_generator.cpp +++ b/vpr/src/place/uniform_move_generator.cpp @@ -14,10 +14,13 @@ e_create_move UniformMoveGenerator::propose_move(t_pl_blocks_to_be_moved& blocks t_pl_loc from = place_ctx.block_locs[b_from].loc; auto cluster_from_type = cluster_ctx.clb_nlist.block_type(b_from); auto grid_from_type = g_vpr_ctx.device().grid[from.x][from.y].type; - VTR_ASSERT(physical_tile_type(cluster_from_type) == grid_from_type); + VTR_ASSERT(is_tile_compatible(grid_from_type, cluster_from_type)); t_pl_loc to; - if (!find_to_loc_uniform(physical_tile_type(b_from), rlim, from, to)) { + + auto type = pick_random_placement_type(cluster_from_type); + + if (!find_to_loc_uniform(type, rlim, from, to)) { return e_create_move::ABORT; } diff --git a/vpr/src/power/power.cpp b/vpr/src/power/power.cpp index 94294cf55c2..7350eae9907 100644 --- a/vpr/src/power/power.cpp +++ b/vpr/src/power/power.cpp @@ -606,7 +606,7 @@ static void power_usage_blocks(t_power_usage* power_usage) { for (size_t y = 0; y < device_ctx.grid.height(); y++) { if ((device_ctx.grid[x][y].width_offset != 0) || (device_ctx.grid[x][y].height_offset != 0) - || (device_ctx.grid[x][y].type == device_ctx.EMPTY_TYPE)) { + || (device_ctx.grid[x][y].type == device_ctx.EMPTY_PHYSICAL_TILE_TYPE)) { continue; } diff --git a/vpr/src/route/route_common.cpp b/vpr/src/route/route_common.cpp index 396517ed1ba..761aa886c78 100644 --- a/vpr/src/route/route_common.cpp +++ b/vpr/src/route/route_common.cpp @@ -1052,13 +1052,21 @@ static vtr::vector> load_net_rr_terminals(const t i = place_ctx.block_locs[block_id].loc.x; j = place_ctx.block_locs[block_id].loc.y; auto type = physical_tile_type(block_id); + auto logical_block = cluster_ctx.clb_nlist.block_type(block_id); /* In the routing graph, each (x, y) location has unique pins on it * so when there is capacity, blocks are packed and their pin numbers * are offset to get their actual rr_node */ - node_block_pin = cluster_ctx.clb_nlist.pin_physical_index(pin_id); + node_block_pin = cluster_ctx.clb_nlist.pin_logical_index(pin_id); - iclass = type->pin_class[node_block_pin]; + auto pin_directs_map = type->tile_block_pin_directs_map; + auto map_result = pin_directs_map.find(logical_block->index); + std::unordered_map map = map_result->second; + + auto pin_result = map.find(node_block_pin); + auto phys_pin = pin_result->second; + + iclass = type->pin_class[phys_pin]; inode = get_rr_node_index(L_rr_node_indices, i, j, (pin_count == 0 ? SOURCE : SINK), /* First pin is driver */ iclass); diff --git a/vpr/src/route/rr_graph2.cpp b/vpr/src/route/rr_graph2.cpp index 72bbe09a555..556a879229f 100644 --- a/vpr/src/route/rr_graph2.cpp +++ b/vpr/src/route/rr_graph2.cpp @@ -452,7 +452,7 @@ void obstruct_chan_details(const DeviceGrid& grid, if (!trim_obs_channels) continue; - if (grid[x][y].type == device_ctx.EMPTY_TYPE) + if (grid[x][y].type == device_ctx.EMPTY_PHYSICAL_TILE_TYPE) continue; if (grid[x][y].width_offset > 0 || grid[x][y].height_offset > 0) continue; @@ -491,22 +491,22 @@ void obstruct_chan_details(const DeviceGrid& grid, if ((x == 0) || (y == 0)) continue; } - if (grid[x][y].type == device_ctx.EMPTY_TYPE) { + if (grid[x][y].type == device_ctx.EMPTY_PHYSICAL_TILE_TYPE) { if ((x == grid.width() - 2) && is_io_type(grid[x + 1][y].type)) //-2 for no perim channels continue; if ((y == grid.height() - 2) && is_io_type(grid[x][y + 1].type)) //-2 for no perim channels continue; } - if (is_io_type(grid[x][y].type) || (grid[x][y].type == device_ctx.EMPTY_TYPE)) { - if (is_io_type(grid[x][y + 1].type) || (grid[x][y + 1].type == device_ctx.EMPTY_TYPE)) { + if (is_io_type(grid[x][y].type) || (grid[x][y].type == device_ctx.EMPTY_PHYSICAL_TILE_TYPE)) { + if (is_io_type(grid[x][y + 1].type) || (grid[x][y + 1].type == device_ctx.EMPTY_PHYSICAL_TILE_TYPE)) { for (int track = 0; track < nodes_per_chan->max; ++track) { chan_details_x[x][y][track].set_length(0); } } } - if (is_io_type(grid[x][y].type) || (grid[x][y].type == device_ctx.EMPTY_TYPE)) { - if (is_io_type(grid[x + 1][y].type) || (grid[x + 1][y].type == device_ctx.EMPTY_TYPE)) { + if (is_io_type(grid[x][y].type) || (grid[x][y].type == device_ctx.EMPTY_PHYSICAL_TILE_TYPE)) { + if (is_io_type(grid[x + 1][y].type) || (grid[x + 1][y].type == device_ctx.EMPTY_PHYSICAL_TILE_TYPE)) { for (int track = 0; track < nodes_per_chan->max; ++track) { chan_details_y[x][y][track].set_length(0); } @@ -1370,7 +1370,7 @@ int find_average_rr_node_index(int device_width, for (int x = 0; x < device_width; ++x) { for (int y = 0; y < device_height; ++y) { - if (device_ctx.grid[x][y].type == device_ctx.EMPTY_TYPE) + if (device_ctx.grid[x][y].type == device_ctx.EMPTY_PHYSICAL_TILE_TYPE) continue; if (is_io_type(device_ctx.grid[x][y].type)) continue; @@ -1429,7 +1429,7 @@ int get_track_to_pins(int seg, } /* PAJ - if the pointed to is an EMPTY then shouldn't look for ipins */ - if (device_ctx.grid[x][y].type == device_ctx.EMPTY_TYPE) + if (device_ctx.grid[x][y].type == device_ctx.EMPTY_PHYSICAL_TILE_TYPE) continue; /* Move from logical (straight) to physical (twisted) track index diff --git a/vpr/src/util/vpr_utils.cpp b/vpr/src/util/vpr_utils.cpp index 463a7273802..12086083b18 100644 --- a/vpr/src/util/vpr_utils.cpp +++ b/vpr/src/util/vpr_utils.cpp @@ -6,6 +6,7 @@ #include "vtr_assert.h" #include "vtr_log.h" #include "vtr_memory.h" +#include "vtr_random.h" #include "vpr_types.h" #include "vpr_error.h" @@ -161,6 +162,9 @@ void sync_grid_to_blocks() { auto type = physical_tile_type(blk_id); + auto logical_type = cluster_ctx.clb_nlist.block_type(blk_id); + VTR_LOG("PHYSICAL TILE: %s\tLOGICAL_BLOCK: %s\n", type->name, logical_type->name); + /* Check range of block coords */ if (blk_x < 0 || blk_y < 0 || (blk_x + type->width - 1) > int(device_ctx.grid.width() - 1) @@ -217,20 +221,18 @@ std::string block_type_pin_index_to_name(t_physical_tile_type_ptr type, int pin_ pin_name += "."; - t_pb_type* pb_type = logical_block_type(type)->pb_type; int curr_index = 0; - for (int iport = 0; iport < pb_type->num_ports; ++iport) { - t_port* port = &pb_type->ports[iport]; + for (auto const port : type->ports) { - if (curr_index + port->num_pins > pin_index) { + if (curr_index + port.num_pins > pin_index) { //This port contains the desired pin index int index_in_port = pin_index - curr_index; - pin_name += port->name; + pin_name += port.name; pin_name += "[" + std::to_string(index_in_port) + "]"; return pin_name; } - curr_index += port->num_pins; + curr_index += port.num_pins; } return ""; @@ -330,36 +332,39 @@ void swap(IntraLbPbPinLookup& lhs, IntraLbPbPinLookup& rhs) { //Returns the set of pins which are connected to the top level clb pin // The pin(s) may be input(s) or and output (returning the connected sinks or drivers respectively) -std::vector find_clb_pin_connected_atom_pins(ClusterBlockId clb, int clb_pin, const IntraLbPbPinLookup& pb_gpin_lookup) { +std::vector find_clb_pin_connected_atom_pins(ClusterBlockId clb, int log_pin, const IntraLbPbPinLookup& pb_gpin_lookup) { std::vector atom_pins; + auto& clb_nlist = g_vpr_ctx.clustering().clb_nlist; + + auto logical_block = clb_nlist.block_type(clb); - if (is_opin(clb_pin, physical_tile_type(clb))) { + if (is_opin(log_pin, physical_tile_type(logical_block))) { //output - AtomPinId driver = find_clb_pin_driver_atom_pin(clb, clb_pin, pb_gpin_lookup); + AtomPinId driver = find_clb_pin_driver_atom_pin(clb, log_pin, pb_gpin_lookup); if (driver) { atom_pins.push_back(driver); } } else { //input - atom_pins = find_clb_pin_sink_atom_pins(clb, clb_pin, pb_gpin_lookup); + atom_pins = find_clb_pin_sink_atom_pins(clb, log_pin, pb_gpin_lookup); } return atom_pins; } //Returns the atom pin which drives the top level clb output pin -AtomPinId find_clb_pin_driver_atom_pin(ClusterBlockId clb, int clb_pin, const IntraLbPbPinLookup& pb_gpin_lookup) { +AtomPinId find_clb_pin_driver_atom_pin(ClusterBlockId clb, int log_pin, const IntraLbPbPinLookup& pb_gpin_lookup) { auto& cluster_ctx = g_vpr_ctx.clustering(); auto& atom_ctx = g_vpr_ctx.atom(); - int pb_pin_id = find_clb_pb_pin(clb, clb_pin); - if (pb_pin_id < 0) { + if (log_pin < 0) { //CLB output pin has no internal driver return AtomPinId::INVALID(); } const t_pb_routes& pb_routes = cluster_ctx.clb_nlist.block_pb(clb)->pb_route; - AtomNetId atom_net = pb_routes[pb_pin_id].atom_net_id; + AtomNetId atom_net = pb_routes[log_pin].atom_net_id; + int pb_pin_id = log_pin; //Trace back until the driver is reached while (pb_routes[pb_pin_id].driver_pb_pin_id >= 0) { pb_pin_id = pb_routes[pb_pin_id].driver_pb_pin_id; @@ -378,29 +383,27 @@ AtomPinId find_clb_pin_driver_atom_pin(ClusterBlockId clb, int clb_pin, const In } //Returns the set of atom sink pins associated with the top level clb input pin -std::vector find_clb_pin_sink_atom_pins(ClusterBlockId clb, int clb_pin, const IntraLbPbPinLookup& pb_gpin_lookup) { +std::vector find_clb_pin_sink_atom_pins(ClusterBlockId clb, int log_pin, const IntraLbPbPinLookup& pb_gpin_lookup) { auto& cluster_ctx = g_vpr_ctx.clustering(); auto& atom_ctx = g_vpr_ctx.atom(); const t_pb_routes& pb_routes = cluster_ctx.clb_nlist.block_pb(clb)->pb_route; - VTR_ASSERT_MSG(clb_pin < physical_tile_type(clb)->num_pins, "Must be a valid top-level pin"); - - int pb_pin = find_clb_pb_pin(clb, clb_pin); + VTR_ASSERT_MSG(log_pin < cluster_ctx.clb_nlist.block_type(clb)->pb_type->num_pins, "Must be a valid tile pin"); VTR_ASSERT(cluster_ctx.clb_nlist.block_pb(clb)); - VTR_ASSERT_MSG(pb_pin < cluster_ctx.clb_nlist.block_pb(clb)->pb_graph_node->num_pins(), "Pin must map to a top-level pb pin"); + VTR_ASSERT_MSG(log_pin < cluster_ctx.clb_nlist.block_pb(clb)->pb_graph_node->num_pins(), "Pin must map to a top-level pb pin"); - VTR_ASSERT_MSG(pb_routes[pb_pin].driver_pb_pin_id < 0, "CLB input pin should have no internal drivers"); + VTR_ASSERT_MSG(pb_routes[log_pin].driver_pb_pin_id < 0, "CLB input pin should have no internal drivers"); - AtomNetId atom_net = pb_routes[pb_pin].atom_net_id; + AtomNetId atom_net = pb_routes[log_pin].atom_net_id; VTR_ASSERT(atom_net); - std::vector connected_sink_pb_pins = find_connected_internal_clb_sink_pins(clb, pb_pin); + std::vector connected_sink_pb_pins = find_connected_internal_clb_sink_pins(clb, log_pin); std::vector sink_atom_pins; for (int sink_pb_pin : connected_sink_pb_pins) { - //Map the pb_pin_id to AtomPinId + //Map the log_pin_id to AtomPinId AtomPinId atom_pin = find_atom_pin_for_pb_route_id(clb, sink_pb_pin, pb_gpin_lookup); VTR_ASSERT(atom_pin); @@ -551,7 +554,6 @@ int find_clb_pb_pin(ClusterBlockId clb, int clb_pin) { pb_pin = clb_pin - place_ctx.block_locs[clb].loc.z * num_basic_block_pins; } else { - //No offset pb_pin = clb_pin; } @@ -633,33 +635,48 @@ bool is_io_type(t_physical_tile_type_ptr type) { bool is_empty_type(t_physical_tile_type_ptr type) { auto& device_ctx = g_vpr_ctx.device(); - return type == device_ctx.EMPTY_TYPE; + return type == device_ctx.EMPTY_PHYSICAL_TILE_TYPE; +} + +bool is_empty_type(t_logical_block_type_ptr type) { + auto& device_ctx = g_vpr_ctx.device(); + + return type == device_ctx.EMPTY_LOGICAL_BLOCK_TYPE; } t_physical_tile_type_ptr physical_tile_type(t_logical_block_type_ptr logical_block_type) { auto& device_ctx = g_vpr_ctx.device(); - /* It is assumed that there is a 1:1 mapping between logical and physical types - * making it possible to use the same index to access the corresponding type - */ - return &device_ctx.physical_tile_types[logical_block_type->index]; + if (0 == logical_block_type->equivalent_tiles.size()) + return device_ctx.EMPTY_PHYSICAL_TILE_TYPE; + + for (auto type : logical_block_type->equivalent_tiles) { + if (0 == strcmp(type->name, logical_block_type->name)) return type; + } + + return nullptr; } t_physical_tile_type_ptr physical_tile_type(ClusterBlockId blk) { - auto& cluster_ctx = g_vpr_ctx.clustering(); + auto& place_ctx = g_vpr_ctx.placement(); + auto& device_ctx = g_vpr_ctx.device(); - auto blk_type = cluster_ctx.clb_nlist.block_type(blk); + auto block_loc = place_ctx.block_locs[blk]; + auto loc = block_loc.loc; - return physical_tile_type(blk_type); + return device_ctx.grid[loc.x][loc.y].type; } t_logical_block_type_ptr logical_block_type(t_physical_tile_type_ptr physical_tile_type) { auto& device_ctx = g_vpr_ctx.device(); - /* It is assumed that there is a 1:1 mapping between logical and physical types - * making it possible to use the same index to access the corresponding type - */ - return &device_ctx.logical_block_types[physical_tile_type->index]; + if (0 == physical_tile_type->equivalent_sites.size()) + return device_ctx.EMPTY_LOGICAL_BLOCK_TYPE; + + for (auto type : physical_tile_type->equivalent_sites) { + if (0 == strcmp(type->name, physical_tile_type->name)) return type; + } + return nullptr; } /* Each node in the pb_graph for a top-level pb_type can be uniquely identified @@ -746,7 +763,12 @@ t_logical_block_type_ptr infer_logic_block_type(const DeviceGrid& grid) { //Sort the candidates by the most common block type auto by_desc_grid_count = [&](t_logical_block_type_ptr lhs, t_logical_block_type_ptr rhs) { - return grid.num_instances(physical_tile_type(lhs)) > grid.num_instances(physical_tile_type(rhs)); + int lhs_num_instances = 0; + int rhs_num_instances = 0; + // Count number of instances for each type + for (auto type : lhs->equivalent_tiles) lhs_num_instances += grid.num_instances(type); + for (auto type : rhs->equivalent_tiles) rhs_num_instances += grid.num_instances(type); + return lhs_num_instances > rhs_num_instances; }; std::stable_sort(logic_block_candidates.begin(), logic_block_candidates.end(), by_desc_grid_count); @@ -765,21 +787,46 @@ t_logical_block_type_ptr infer_logic_block_type(const DeviceGrid& grid) { t_logical_block_type_ptr find_most_common_block_type(const DeviceGrid& grid) { auto& device_ctx = g_vpr_ctx.device(); + t_logical_block_type_ptr max_type = nullptr; + size_t max_count = 0; + for (const auto& logical_block : device_ctx.logical_block_types) { + size_t inst_cnt = 0; + for (const auto& equivalent_tile : logical_block.equivalent_tiles) { + inst_cnt += grid.num_instances(equivalent_tile); + } + + if (max_count < inst_cnt) { + max_count = inst_cnt; + max_type = &logical_block; + } + } + + if (max_type == nullptr) { + VTR_LOG_WARN("Unable to determine most common block type (perhaps the device grid was empty?)\n"); + } + + return max_type; +} + +t_physical_tile_type_ptr find_most_common_tile_type(const DeviceGrid& grid) { + auto& device_ctx = g_vpr_ctx.device(); + t_physical_tile_type_ptr max_type = nullptr; size_t max_count = 0; - for (const auto& type : device_ctx.physical_tile_types) { - size_t inst_cnt = grid.num_instances(&type); + for (const auto& physical_tile : device_ctx.physical_tile_types) { + size_t inst_cnt = grid.num_instances(&physical_tile); + if (max_count < inst_cnt) { max_count = inst_cnt; - max_type = &type; + max_type = &physical_tile; } } if (max_type == nullptr) { VTR_LOG_WARN("Unable to determine most common block type (perhaps the device grid was empty?)\n"); - return nullptr; } - return logical_block_type(max_type); + + return max_type; } InstPort parse_inst_port(std::string str) { @@ -787,12 +834,19 @@ InstPort parse_inst_port(std::string str) { auto& device_ctx = g_vpr_ctx.device(); auto blk_type = find_block_type_by_name(inst_port.instance_name(), device_ctx.physical_tile_types); - if (!blk_type) { + if (blk_type == nullptr) { VPR_FATAL_ERROR(VPR_ERROR_ARCH, "Failed to find block type named %s", inst_port.instance_name().c_str()); } - const t_port* port = find_pb_graph_port(logical_block_type(blk_type)->pb_graph_head, inst_port.port_name()); - if (!port) { + int num_pins = OPEN; + for (auto physical_port : blk_type->ports) { + if (0 == strcmp(inst_port.port_name().c_str(), physical_port.name)) { + num_pins = physical_port.num_pins; + break; + } + } + + if (num_pins == OPEN) { VPR_FATAL_ERROR(VPR_ERROR_ARCH, "Failed to find port %s on block type %s", inst_port.port_name().c_str(), inst_port.instance_name().c_str()); } @@ -800,55 +854,52 @@ InstPort parse_inst_port(std::string str) { VTR_ASSERT(inst_port.port_high_index() == InstPort::UNSPECIFIED); inst_port.set_port_low_index(0); - inst_port.set_port_high_index(port->num_pins - 1); + inst_port.set_port_high_index(num_pins - 1); } else { - if (inst_port.port_low_index() < 0 || inst_port.port_low_index() >= port->num_pins - || inst_port.port_high_index() < 0 || inst_port.port_high_index() >= port->num_pins) { + if (inst_port.port_low_index() < 0 || inst_port.port_low_index() >= num_pins + || inst_port.port_high_index() < 0 || inst_port.port_high_index() >= num_pins) { VPR_FATAL_ERROR(VPR_ERROR_ARCH, "Pin indices [%d:%d] on port %s of block type %s out of expected range [%d:%d]", inst_port.port_low_index(), inst_port.port_high_index(), inst_port.port_name().c_str(), inst_port.instance_name().c_str(), - 0, port->num_pins - 1); + 0, num_pins - 1); } } return inst_port; } //Returns the pin class associated with the specified pin_index_in_port within the port port_name on type -int find_pin_class(t_logical_block_type_ptr type, std::string port_name, int pin_index_in_port, e_pin_type pin_type) { +int find_pin_class(t_physical_tile_type_ptr type, std::string port_name, int pin_index_in_port, e_pin_type pin_type) { int iclass = OPEN; int ipin = find_pin(type, port_name, pin_index_in_port); if (ipin != OPEN) { - iclass = physical_tile_type(type)->pin_class[ipin]; + iclass = type->pin_class[ipin]; if (iclass != OPEN) { - VTR_ASSERT(physical_tile_type(type)->class_inf[iclass].type == pin_type); + VTR_ASSERT(type->class_inf[iclass].type == pin_type); } } return iclass; } -int find_pin(t_logical_block_type_ptr type, std::string port_name, int pin_index_in_port) { +int find_pin(t_physical_tile_type_ptr type, std::string port_name, int pin_index_in_port) { int ipin = OPEN; - - t_pb_type* pb_type = type->pb_type; - t_port* matched_port = nullptr; int port_base_ipin = 0; - for (int iport = 0; iport < pb_type->num_ports; ++iport) { - t_port* port = &pb_type->ports[iport]; + int num_pins = OPEN; + + for (auto port : type->ports) { - if (port->name == port_name) { - matched_port = port; + if (port.name == port_name) { + num_pins = port.num_pins; break; } - port_base_ipin += port->num_pins; + port_base_ipin += port.num_pins; } - if (matched_port) { - VTR_ASSERT(matched_port->name == port_name); - VTR_ASSERT(pin_index_in_port < matched_port->num_pins); + if (num_pins != OPEN) { + VTR_ASSERT(pin_index_in_port < num_pins); ipin = port_base_ipin + pin_index_in_port; } @@ -1652,7 +1703,7 @@ static void alloc_and_load_port_pin_from_blk_pin() { temp_port_pin_from_blk_pin = (int**)vtr::malloc(device_ctx.logical_block_types.size() * sizeof(int*)); for (const auto& type : device_ctx.logical_block_types) { itype = type.index; - blk_pin_count = physical_tile_type(&type)->num_pins; + blk_pin_count = type.pb_type->num_pins; temp_port_from_blk_pin[itype] = (int*)vtr::malloc(blk_pin_count * sizeof(int)); temp_port_pin_from_blk_pin[itype] = (int*)vtr::malloc(blk_pin_count * sizeof(int)); @@ -1667,7 +1718,7 @@ static void alloc_and_load_port_pin_from_blk_pin() { for (const auto& type : device_ctx.logical_block_types) { itype = type.index; - /* itype starts from 1 since device_ctx.logical_block_types[0] is the EMPTY_TYPE. */ + /* itype starts from 1 since device_ctx.logical_block_types[0] is the EMPTY_PHYSICAL_TILE_TYPE. */ if (itype == 0) { continue; } @@ -1723,7 +1774,7 @@ void free_blk_pin_from_port_pin() { for (const auto& type : device_ctx.logical_block_types) { int itype = type.index; - // Avoid EMPTY_TYPE + // Avoid EMPTY_PHYSICAL_TILE_TYPE if (itype == 0) { continue; } @@ -1767,7 +1818,7 @@ static void alloc_and_load_blk_pin_from_port_pin() { } /* Load the values */ - /* itype starts from 1 since device_ctx.block_types[0] is the EMPTY_TYPE. */ + /* itype starts from 1 since device_ctx.block_types[0] is the EMPTY_PHYSICAL_TILE_TYPE. */ for (itype = 1; itype < device_ctx.logical_block_types.size(); itype++) { blk_pin_count = 0; num_ports = device_ctx.logical_block_types[itype].pb_type->num_ports; @@ -2000,8 +2051,10 @@ void alloc_and_load_idirect_from_blk_pin(t_direct_inf* directs, int num_directs, temp_idirect_from_blk_pin = (int**)vtr::malloc(device_ctx.logical_block_types.size() * sizeof(int*)); temp_direct_type_from_blk_pin = (int**)vtr::malloc(device_ctx.logical_block_types.size() * sizeof(int*)); for (const auto& type : device_ctx.logical_block_types) { + if (is_empty_type(&type)) continue; + int itype = type.index; - num_type_pins = physical_tile_type(&type)->num_pins; + num_type_pins = type.pb_type->num_pins; temp_idirect_from_blk_pin[itype] = (int*)vtr::malloc(num_type_pins * sizeof(int)); temp_direct_type_from_blk_pin[itype] = (int*)vtr::malloc(num_type_pins * sizeof(int)); @@ -2230,6 +2283,44 @@ void place_sync_external_block_connections(ClusterBlockId iblk) { place_ctx.block_locs[iblk].nets_and_pins_synced_to_z_coordinate = true; } +void update_physical_pin_indices() { + auto& cluster_ctx = g_vpr_ctx.mutable_clustering(); + auto& clb_nlist = cluster_ctx.clb_nlist; + + for (auto blk : clb_nlist.blocks()) { + auto logical_block = clb_nlist.block_type(blk); + auto physical_tile = physical_tile_type(blk); + + // Physical tile and logical block are already compatible + if (physical_tile == physical_tile_type(logical_block)) { + continue; + } + + for (auto pin : clb_nlist.block_pins(blk)) { + int block_pin = clb_nlist.pin_logical_index(pin); + + auto pin_directs_map = physical_tile->tile_block_pin_directs_map; + auto map_result = pin_directs_map.find(logical_block->index); + std::unordered_map map = map_result->second; + + auto pin_result = map.find(block_pin); + auto phys_pin = pin_result->second; + + clb_nlist.set_pin_physical_index(pin, phys_pin); + } + } +} + +int get_max_num_pins(t_logical_block_type_ptr logical_block) { + int max_num_pins = 0; + + for (auto physical_tile : logical_block->equivalent_tiles) { + max_num_pins = std::max(max_num_pins, physical_tile->num_pins); + } + + return max_num_pins; +} + int max_pins_per_grid_tile() { auto& device_ctx = g_vpr_ctx.device(); int max_pins = 0; @@ -2241,6 +2332,17 @@ int max_pins_per_grid_tile() { return max_pins; } +bool is_tile_compatible(t_physical_tile_type_ptr physical_tile, t_logical_block_type_ptr logical_block) { + auto equivalent_tiles = logical_block->equivalent_tiles; + return std::find(equivalent_tiles.begin(), equivalent_tiles.end(), physical_tile) != equivalent_tiles.end(); +} + +t_physical_tile_type_ptr pick_random_placement_type(t_logical_block_type_ptr logical_block) { + auto equivalent_tiles = logical_block->equivalent_tiles; + + return equivalent_tiles[vtr::irand((int)equivalent_tiles.size() - 1)]; +} + void pretty_print_uint(const char* prefix, size_t value, int num_digits, int scientific_precision) { //Print as integer if it will fit in the width, other wise scientific if (value <= std::pow(10, num_digits) - 1) { diff --git a/vpr/src/util/vpr_utils.h b/vpr/src/util/vpr_utils.h index 5209db5aa00..33d5df2eec8 100644 --- a/vpr/src/util/vpr_utils.h +++ b/vpr/src/util/vpr_utils.h @@ -26,6 +26,7 @@ bool is_input_type(t_physical_tile_type_ptr type); bool is_output_type(t_physical_tile_type_ptr type); bool is_io_type(t_physical_tile_type_ptr type); bool is_empty_type(t_physical_tile_type_ptr type); +bool is_empty_type(t_logical_block_type_ptr type); //Returns the corresponding physical/logical type given the logical/physical type as parameter t_physical_tile_type_ptr physical_tile_type(t_logical_block_type_ptr logical_block_type); @@ -76,13 +77,13 @@ class IntraLbPbPinLookup { }; //Find the atom pins (driver or sinks) connected to the specified top-level CLB pin -std::vector find_clb_pin_connected_atom_pins(ClusterBlockId clb, int clb_pin, const IntraLbPbPinLookup& pb_gpin_lookup); +std::vector find_clb_pin_connected_atom_pins(ClusterBlockId clb, int log_pin, const IntraLbPbPinLookup& pb_gpin_lookup); //Find the atom pin driving to the specified top-level CLB pin -AtomPinId find_clb_pin_driver_atom_pin(ClusterBlockId clb, int clb_pin, const IntraLbPbPinLookup& pb_gpin_lookup); +AtomPinId find_clb_pin_driver_atom_pin(ClusterBlockId clb, int log_pin, const IntraLbPbPinLookup& pb_gpin_lookup); //Find the atom pins driven by the specified top-level CLB pin -std::vector find_clb_pin_sink_atom_pins(ClusterBlockId clb, int clb_pin, const IntraLbPbPinLookup& pb_gpin_lookup); +std::vector find_clb_pin_sink_atom_pins(ClusterBlockId clb, int log_pin, const IntraLbPbPinLookup& pb_gpin_lookup); std::tuple find_pb_route_clb_input_net_pin(ClusterBlockId clb, int sink_pb_route_id); @@ -113,16 +114,19 @@ AtomPinId find_atom_pin(ClusterBlockId blk_id, const t_pb_graph_pin* pb_gpin); //Returns the block type matching name, or nullptr (if not found) t_physical_tile_type_ptr find_block_type_by_name(std::string name, const std::vector& types); -//Returns the block type which is most common in the device grid +//Returns the logical block type which is most common in the device grid t_logical_block_type_ptr find_most_common_block_type(const DeviceGrid& grid); +//Returns the physical tile type which is most common in the device grid +t_physical_tile_type_ptr find_most_common_tile_type(const DeviceGrid& grid); + //Parses a block_name.port[x:y] (e.g. LAB.data_in[3:10]) pin range specification, if no pin range is specified //looks-up the block port and fills in the full range InstPort parse_inst_port(std::string str); -int find_pin_class(t_logical_block_type_ptr type, std::string port_name, int pin_index_in_port, e_pin_type pin_type); +int find_pin_class(t_physical_tile_type_ptr type, std::string port_name, int pin_index_in_port, e_pin_type pin_type); -int find_pin(t_logical_block_type_ptr type, std::string port_name, int pin_index_in_port); +int find_pin(t_physical_tile_type_ptr type, std::string port_name, int pin_index_in_port); //Returns the block type which is most likely the logic block t_logical_block_type_ptr infer_logic_block_type(const DeviceGrid& grid); @@ -168,6 +172,11 @@ void print_usage_by_wire_length(); AtomBlockId find_memory_sibling(const t_pb* pb); void place_sync_external_block_connections(ClusterBlockId iblk); +void update_physical_pin_indices(); +int get_max_num_pins(t_logical_block_type_ptr logical_block); + +bool is_tile_compatible(t_physical_tile_type_ptr physical_tile, t_logical_block_type_ptr logical_block); +t_physical_tile_type_ptr pick_random_placement_type(t_logical_block_type_ptr logical_block); int max_pins_per_grid_tile(); From 26b10daf01f0d448c51f667663a4a078de3f581c Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Wed, 2 Oct 2019 13:22:03 +0200 Subject: [PATCH 03/58] place: added placement priority and check direct pin mappings Signed-off-by: Alessandro Comodi --- libs/libarchfpga/src/physical_types.h | 4 + libs/libarchfpga/src/read_xml_arch_file.cpp | 84 +++++++++++++++------ vpr/src/base/clustered_netlist.h | 6 +- vpr/src/base/read_place.cpp | 4 +- vpr/src/base/vpr_api.cpp | 2 +- vpr/src/pack/cluster.cpp | 6 +- vpr/src/pack/pack.cpp | 5 +- vpr/src/place/place.cpp | 29 +++++-- vpr/src/util/vpr_utils.cpp | 38 ++-------- vpr/src/util/vpr_utils.h | 1 - 10 files changed, 106 insertions(+), 73 deletions(-) diff --git a/libs/libarchfpga/src/physical_types.h b/libs/libarchfpga/src/physical_types.h index 1424eb646f3..46411a02f48 100644 --- a/libs/libarchfpga/src/physical_types.h +++ b/libs/libarchfpga/src/physical_types.h @@ -609,6 +609,9 @@ struct t_physical_tile_type { std::vector equivalent_sites_names; std::vector equivalent_sites; + + /* Unordered map indexed by the logical block index. + * tile_block_pin_directs_map[logical block index][logical block pin] -> physical tile pin */ std::unordered_map> tile_block_pin_directs_map; /* Returns the indices of pins that contain a clock for this physical logic block */ @@ -666,6 +669,7 @@ struct t_logical_block_type { int index = -1; /* index of type descriptor in array (allows for index referencing) */ std::vector equivalent_tiles; + std::map> placement_priority; }; /************************************************************************************************* diff --git a/libs/libarchfpga/src/read_xml_arch_file.cpp b/libs/libarchfpga/src/read_xml_arch_file.cpp index 20280bed7fd..87e293dcd36 100644 --- a/libs/libarchfpga/src/read_xml_arch_file.cpp +++ b/libs/libarchfpga/src/read_xml_arch_file.cpp @@ -220,13 +220,16 @@ e_side string_to_side(std::string side_str); static void link_physical_logical_types(std::vector& PhysicalTileTypes, std::vector& LogicalBlockTypes); -static void check_port_equivalence(t_physical_tile_type& physical_tile, t_logical_block_type& logical_block); +static void check_port_direct_mappings(t_physical_tile_type_ptr physical_tile, t_logical_block_type_ptr logical_block); static const t_physical_tile_port* get_port_by_name(t_physical_tile_type_ptr type, const char* port_name); static const t_port* get_port_by_name(t_logical_block_type_ptr type, const char* port_name); +static const t_physical_tile_port* get_port_by_pin(t_physical_tile_type_ptr type, int pin); +static const t_port* get_port_by_pin(t_logical_block_type_ptr type, int pin); + template -static T get_type_by_name(const char* type_name, std::vector& types); +static T* get_type_by_name(const char* type_name, std::vector& types); /* * @@ -809,9 +812,9 @@ static void LoadPinLoc(pugi::xml_node Locations, for (e_side side : {TOP, RIGHT, BOTTOM, LEFT}) { for (int pin = 0; pin < type->num_pin_loc_assignments[width][height][side]; ++pin) { auto pin_range = ProcessPinString(Locations, - type, - type->pin_loc_assignments[width][height][side][pin], - loc_data); + type, + type->pin_loc_assignments[width][height][side][pin], + loc_data); for (int pin_num = pin_range.first; pin_num < pin_range.second; ++pin_num) { VTR_ASSERT(pin_num < type->num_pins / type->capacity); @@ -3221,7 +3224,10 @@ static void ProcessTileEquivalentSites(pugi::xml_node Parent, auto LogicalBlockType = get_type_by_name(Prop.c_str(), LogicalBlockTypes); - ProcessEquivalentSiteDirects(CurSite, PhysicalTileType, &LogicalBlockType, Prop, loc_data); + auto priority = get_attribute(CurSite, "priority", loc_data, ReqOpt::OPTIONAL).as_int(0); + LogicalBlockType->placement_priority[priority].push_back(PhysicalTileType); + + ProcessEquivalentSiteDirects(CurSite, PhysicalTileType, LogicalBlockType, Prop, loc_data); CurSite = CurSite.next_sibling(CurSite.name()); } @@ -3238,7 +3244,7 @@ static void ProcessEquivalentSiteDirects(pugi::xml_node Parent, if (count_children(Parent, "direct", loc_data) < 1) { archfpga_throw(loc_data.filename_c_str(), loc_data.line(Parent), - "There are no direct pin mappings between site %s and tile %s.\n", site_name, PhysicalTileType->name); + "There are no direct pin mappings between site %s and tile %s.\n", site_name.c_str(), PhysicalTileType->name); } std::unordered_map directs_map; @@ -4740,7 +4746,6 @@ e_side string_to_side(std::string side_str) { static void link_physical_logical_types(std::vector& PhysicalTileTypes, std::vector& LogicalBlockTypes) { - for (auto& physical_tile : PhysicalTileTypes) { if (physical_tile.index == EMPTY_TYPE_INDEX) continue; @@ -4754,7 +4759,7 @@ static void link_physical_logical_types(std::vector& Physi physical_tile.equivalent_sites.push_back(&logical_block); logical_block.equivalent_tiles.push_back(&physical_tile); - // TODO: Add check direct interconnect between site and tile add also pin mapping of integers + check_port_direct_mappings(&physical_tile, &logical_block); logical_block_added++; break; @@ -4769,26 +4774,34 @@ static void link_physical_logical_types(std::vector& Physi } } -static void check_port_equivalence(t_physical_tile_type& physical_tile, t_logical_block_type& logical_block) { - auto pb_type = logical_block.pb_type; - auto pb_type_ports = pb_type->ports; +static void check_port_direct_mappings(t_physical_tile_type_ptr physical_tile, t_logical_block_type_ptr logical_block) { + auto pb_type = logical_block->pb_type; + + if (pb_type->num_pins > physical_tile->num_pins) { + archfpga_throw(__FILE__, __LINE__, + "Logical Block (%s) has more pins than the Physical Tile (%s).\n", + logical_block->name, physical_tile->name); + } + + auto& pin_direct_mapping = physical_tile->tile_block_pin_directs_map.at(logical_block->index); - if (pb_type->num_ports != (int)physical_tile.ports.size()) { + if (pb_type->num_pins != (int)pin_direct_mapping.size()) { archfpga_throw(__FILE__, __LINE__, "Logical block (%s) and Physical tile (%s) have a different number of ports.\n", - logical_block.name, physical_tile.name); + logical_block->name, physical_tile->name); } - for (auto& tile_port : physical_tile.ports) { - auto block_port = pb_type_ports[tile_port.index]; + for (auto pin_map : pin_direct_mapping) { + auto block_port = get_port_by_pin(logical_block, pin_map.first); + auto tile_port = get_port_by_pin(physical_tile, pin_map.second); - if (0 != strcmp(tile_port.name, block_port.name) - || tile_port.type != block_port.type - || tile_port.num_pins != block_port.num_pins - || tile_port.equivalent != block_port.equivalent) { + if (0 != strcmp(tile_port->name, block_port->name) + || tile_port->type != block_port->type + || tile_port->num_pins != block_port->num_pins + || tile_port->equivalent != block_port->equivalent) { archfpga_throw(__FILE__, __LINE__, "Logical block (%s) and Physical tile (%s) do not have equivalent port specifications.\n", - logical_block.name, physical_tile.name); + logical_block->name, physical_tile->name); } } } @@ -4816,11 +4829,34 @@ static const t_port* get_port_by_name(t_logical_block_type_ptr type, const char* return nullptr; } +static const t_physical_tile_port* get_port_by_pin(t_physical_tile_type_ptr type, int pin) { + for (auto port : type->ports) { + if (pin >= port.absolute_first_pin_index && pin < port.num_pins) { + return &type->ports[port.index]; + } + } + + return nullptr; +} + +static const t_port* get_port_by_pin(t_logical_block_type_ptr type, int pin) { + auto pb_type = type->pb_type; + + for (int i = 0; i < pb_type->num_ports; i++) { + auto port = pb_type->ports[i]; + if (pin >= port.absolute_first_pin_index && pin < port.num_pins) { + return &pb_type->ports[port.index]; + } + } + + return nullptr; +} + template -static T get_type_by_name(const char* type_name, std::vector& types) { - for (auto type : types) { +static T* get_type_by_name(const char* type_name, std::vector& types) { + for (auto& type : types) { if (0 == strcmp(type.name, type_name)) { - return type; + return &type; } } diff --git a/vpr/src/base/clustered_netlist.h b/vpr/src/base/clustered_netlist.h index 62a4da70517..41d72963e01 100644 --- a/vpr/src/base/clustered_netlist.h +++ b/vpr/src/base/clustered_netlist.h @@ -254,9 +254,9 @@ class ClusteredNetlist : public Netlist pin_physical_index_; //The physical pin index (i.e. pin index - //in t_physical_tile_type) corresponding - //to the logical pin - vtr::vector_map pin_logical_index_; //The logical pin index of this block + //in t_physical_tile_type) corresponding + //to the logical pin + vtr::vector_map pin_logical_index_; //The logical pin index of this block //Nets vtr::vector_map net_is_ignored_; //Boolean mapping indicating if the net is ignored diff --git a/vpr/src/base/read_place.cpp b/vpr/src/base/read_place.cpp index 8b6afb65d9b..1b0e363b2f3 100644 --- a/vpr/src/base/read_place.cpp +++ b/vpr/src/base/read_place.cpp @@ -160,7 +160,7 @@ void read_user_pad_loc(const char* pad_loc_file) { hash_table = alloc_hash_table(); for (auto blk_id : cluster_ctx.clb_nlist.blocks()) { - auto logical_block = cluster_ctx.clb_nlist.block_type(blk_id); + auto logical_block = cluster_ctx.clb_nlist.block_type(blk_id); if (is_io_type(physical_tile_type(logical_block))) { insert_in_hash_table(hash_table, cluster_ctx.clb_nlist.block_name(blk_id).c_str(), size_t(blk_id)); place_ctx.block_locs[blk_id].loc.x = OPEN; /* Mark as not seen yet. */ @@ -267,7 +267,7 @@ void read_user_pad_loc(const char* pad_loc_file) { } for (auto blk_id : cluster_ctx.clb_nlist.blocks()) { - auto logical_block = cluster_ctx.clb_nlist.block_type(blk_id); + auto logical_block = cluster_ctx.clb_nlist.block_type(blk_id); auto type = physical_tile_type(logical_block); if (is_io_type(type) && place_ctx.block_locs[blk_id].loc.x == OPEN) { vpr_throw(VPR_ERROR_PLACE_F, pad_loc_file, 0, diff --git a/vpr/src/base/vpr_api.cpp b/vpr/src/base/vpr_api.cpp index b8f8b7a0710..735d080a59e 100644 --- a/vpr/src/base/vpr_api.cpp +++ b/vpr/src/base/vpr_api.cpp @@ -425,7 +425,7 @@ void vpr_create_device_grid(const t_vpr_setup& vpr_setup, const t_arch& Arch) { num_type_instances[&type], type.name); VTR_LOG("\tArchitecture\n"); - for(const auto equivalent_tile : type.equivalent_tiles) { + for (const auto equivalent_tile : type.equivalent_tiles) { VTR_LOG("\t\t%d\tblocks of type: %s\n", device_ctx.grid.num_instances(equivalent_tile), equivalent_tile->name); } diff --git a/vpr/src/pack/cluster.cpp b/vpr/src/pack/cluster.cpp index 6bb85180995..fa9d4d8f832 100644 --- a/vpr/src/pack/cluster.cpp +++ b/vpr/src/pack/cluster.cpp @@ -1966,8 +1966,10 @@ static void start_new_cluster(t_cluster_placement_stats* cluster_placement_stats int lhs_num_instances = 0; int rhs_num_instances = 0; // Count number of instances for each type - for (auto type : lhs->equivalent_tiles) lhs_num_instances += device_ctx.grid.num_instances(type); - for (auto type : rhs->equivalent_tiles) rhs_num_instances += device_ctx.grid.num_instances(type); + for (auto type : lhs->equivalent_tiles) + lhs_num_instances += device_ctx.grid.num_instances(type); + for (auto type : rhs->equivalent_tiles) + rhs_num_instances += device_ctx.grid.num_instances(type); float lhs_util = vtr::safe_ratio(num_used_type_instances[lhs], lhs_num_instances); float rhs_util = vtr::safe_ratio(num_used_type_instances[rhs], rhs_num_instances); diff --git a/vpr/src/pack/pack.cpp b/vpr/src/pack/pack.cpp index 0a263458036..612d0df24d9 100644 --- a/vpr/src/pack/pack.cpp +++ b/vpr/src/pack/pack.cpp @@ -171,7 +171,8 @@ bool try_pack(t_packer_opts* packer_opts, resource_reqs += std::string(iter->first->name) + ": " + std::to_string(iter->second); int num_instances = 0; - for (auto type : iter->first->equivalent_tiles) num_instances += grid.num_instances(type); + for (auto type : iter->first->equivalent_tiles) + num_instances += grid.num_instances(type); resource_avail += std::string(iter->first->name) + ": " + std::to_string(num_instances); } @@ -278,7 +279,7 @@ static bool try_size_device_grid(const t_arch& arch, const std::map type_util; for (const auto& type : device_ctx.logical_block_types) { - if(is_empty_type(&type)) continue; + if (is_empty_type(&type)) continue; auto itr = num_type_instances.find(&type); if (itr == num_type_instances.end()) continue; diff --git a/vpr/src/place/place.cpp b/vpr/src/place/place.cpp index 1744ea66fee..c9eccd8e170 100644 --- a/vpr/src/place/place.cpp +++ b/vpr/src/place/place.cpp @@ -418,6 +418,8 @@ static void placement_inner_loop(float t, static void recompute_costs_from_scratch(const t_placer_opts& placer_opts, const PlaceDelayModel* delay_model, t_placer_costs* costs); +static t_physical_tile_type_ptr pick_highest_priority_type(t_logical_block_type_ptr logical_block, int num_needed_types, int* free_locations); + static void calc_placer_stats(t_placer_statistics& stats, float& success_rat, double& std_dev, const t_placer_costs& costs, const int move_lim); static void generate_post_place_timing_reports(const t_placer_opts& placer_opts, @@ -2408,9 +2410,9 @@ static void initial_placement_pl_macros(int macros_max_num_tries, int* free_loca // Assume that all the blocks in the macro are of the same type blk_id = pl_macros[imacro].members[0].blk_index; auto logical_block = cluster_ctx.clb_nlist.block_type(blk_id); - auto type = pick_random_placement_type(logical_block); - itype = type->index; - if (free_locations[itype] < int(pl_macros[imacro].members.size())) { + auto type = pick_highest_priority_type(logical_block, int(pl_macros[imacro].members.size()), free_locations); + + if (type == nullptr) { VPR_FATAL_ERROR(VPR_ERROR_PLACE, "Initial placement failed.\n" "Could not place macro length %zu with head block %s (#%zu); not enough free locations of type %s (#%d).\n" @@ -2418,6 +2420,8 @@ static void initial_placement_pl_macros(int macros_max_num_tries, int* free_loca pl_macros[imacro].members.size(), cluster_ctx.clb_nlist.block_name(blk_id).c_str(), size_t(blk_id), type->name, itype); } + itype = type->index; + // Try to place the macro first, if can be placed - place them, otherwise try again for (itry = 0; itry < macros_max_num_tries && macro_placed == false; itry++) { // Choose a random position for the head @@ -2484,15 +2488,17 @@ static void initial_placement_blocks(int* free_locations, enum e_pad_loc_type pa * that location again, so remove it from the free_locations array. */ - itype = pick_random_placement_type(logical_block)->index; + auto type = pick_highest_priority_type(logical_block, 1, free_locations); - if (free_locations[itype] <= 0) { + if (type == nullptr) { VPR_FATAL_ERROR(VPR_ERROR_PLACE, "Initial placement failed.\n" "Could not place block %s (#%zu); no free locations of type %s (#%d).\n", cluster_ctx.clb_nlist.block_name(blk_id).c_str(), size_t(blk_id), device_ctx.physical_tile_types[itype].name, itype); } + itype = type->index; + t_pl_loc to; initial_placement_location(free_locations, ipos, itype, to); @@ -2844,6 +2850,19 @@ int check_macro_placement_consistency() { return error; } +static t_physical_tile_type_ptr pick_highest_priority_type(t_logical_block_type_ptr logical_block, int num_needed_types, int* free_locations) { + // Loop through the ordered map to get tiles in a decreasing priority order + for (auto& physical_tiles : logical_block->placement_priority) { + for (auto tile : physical_tiles.second) { + if (free_locations[tile->index] >= num_needed_types) { + return tile; + } + } + } + + return nullptr; +} + #ifdef VERBOSE static void print_clb_placement(const char* fname) { /* Prints out the clb placements to a file. */ diff --git a/vpr/src/util/vpr_utils.cpp b/vpr/src/util/vpr_utils.cpp index 12086083b18..7fb47b72809 100644 --- a/vpr/src/util/vpr_utils.cpp +++ b/vpr/src/util/vpr_utils.cpp @@ -223,7 +223,6 @@ std::string block_type_pin_index_to_name(t_physical_tile_type_ptr type, int pin_ int curr_index = 0; for (auto const port : type->ports) { - if (curr_index + port.num_pins > pin_index) { //This port contains the desired pin index int index_in_port = pin_index - curr_index; @@ -766,8 +765,10 @@ t_logical_block_type_ptr infer_logic_block_type(const DeviceGrid& grid) { int lhs_num_instances = 0; int rhs_num_instances = 0; // Count number of instances for each type - for (auto type : lhs->equivalent_tiles) lhs_num_instances += grid.num_instances(type); - for (auto type : rhs->equivalent_tiles) rhs_num_instances += grid.num_instances(type); + for (auto type : lhs->equivalent_tiles) + lhs_num_instances += grid.num_instances(type); + for (auto type : rhs->equivalent_tiles) + rhs_num_instances += grid.num_instances(type); return lhs_num_instances > rhs_num_instances; }; std::stable_sort(logic_block_candidates.begin(), logic_block_candidates.end(), by_desc_grid_count); @@ -841,7 +842,7 @@ InstPort parse_inst_port(std::string str) { int num_pins = OPEN; for (auto physical_port : blk_type->ports) { if (0 == strcmp(inst_port.port_name().c_str(), physical_port.name)) { - num_pins = physical_port.num_pins; + num_pins = physical_port.num_pins; break; } } @@ -890,7 +891,6 @@ int find_pin(t_physical_tile_type_ptr type, std::string port_name, int pin_index int num_pins = OPEN; for (auto port : type->ports) { - if (port.name == port_name) { num_pins = port.num_pins; break; @@ -2283,34 +2283,6 @@ void place_sync_external_block_connections(ClusterBlockId iblk) { place_ctx.block_locs[iblk].nets_and_pins_synced_to_z_coordinate = true; } -void update_physical_pin_indices() { - auto& cluster_ctx = g_vpr_ctx.mutable_clustering(); - auto& clb_nlist = cluster_ctx.clb_nlist; - - for (auto blk : clb_nlist.blocks()) { - auto logical_block = clb_nlist.block_type(blk); - auto physical_tile = physical_tile_type(blk); - - // Physical tile and logical block are already compatible - if (physical_tile == physical_tile_type(logical_block)) { - continue; - } - - for (auto pin : clb_nlist.block_pins(blk)) { - int block_pin = clb_nlist.pin_logical_index(pin); - - auto pin_directs_map = physical_tile->tile_block_pin_directs_map; - auto map_result = pin_directs_map.find(logical_block->index); - std::unordered_map map = map_result->second; - - auto pin_result = map.find(block_pin); - auto phys_pin = pin_result->second; - - clb_nlist.set_pin_physical_index(pin, phys_pin); - } - } -} - int get_max_num_pins(t_logical_block_type_ptr logical_block) { int max_num_pins = 0; diff --git a/vpr/src/util/vpr_utils.h b/vpr/src/util/vpr_utils.h index 33d5df2eec8..ef8c76265ac 100644 --- a/vpr/src/util/vpr_utils.h +++ b/vpr/src/util/vpr_utils.h @@ -172,7 +172,6 @@ void print_usage_by_wire_length(); AtomBlockId find_memory_sibling(const t_pb* pb); void place_sync_external_block_connections(ClusterBlockId iblk); -void update_physical_pin_indices(); int get_max_num_pins(t_logical_block_type_ptr logical_block); bool is_tile_compatible(t_physical_tile_type_ptr physical_tile, t_logical_block_type_ptr logical_block); From f59b18c58818d0a6665b7ce3c4a5b466547731ec Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Wed, 2 Oct 2019 16:52:20 +0200 Subject: [PATCH 04/58] place: fix equivalent placement issues In addition now physical_tile_type() and logical_block_type() returns the highest priority corresponding type Signed-off-by: Alessandro Comodi --- libs/libarchfpga/src/physical_types.h | 11 +++++-- libs/libarchfpga/src/read_xml_arch_file.cpp | 16 +++++----- vpr/src/place/move_utils.cpp | 35 ++++++++++++++++----- vpr/src/place/place.cpp | 19 ++++++----- vpr/src/util/vpr_utils.cpp | 23 ++++++++------ 5 files changed, 68 insertions(+), 36 deletions(-) diff --git a/libs/libarchfpga/src/physical_types.h b/libs/libarchfpga/src/physical_types.h index 46411a02f48..97c69dedd03 100644 --- a/libs/libarchfpga/src/physical_types.h +++ b/libs/libarchfpga/src/physical_types.h @@ -610,7 +610,11 @@ struct t_physical_tile_type { std::vector equivalent_sites_names; std::vector equivalent_sites; - /* Unordered map indexed by the logical block index. + /* Map holding the priority for which this logical block needs to be placed. + * logical_blocks_priority[priority] -> vector holding the logical_block indices */ + std::map> logical_blocks_priority; + + /* Unordered map indexed by the logical block index. * tile_block_pin_directs_map[logical block index][logical block pin] -> physical tile pin */ std::unordered_map> tile_block_pin_directs_map; @@ -669,7 +673,10 @@ struct t_logical_block_type { int index = -1; /* index of type descriptor in array (allows for index referencing) */ std::vector equivalent_tiles; - std::map> placement_priority; + + /* Map holding the priority for which this logical block needs to be placed. + * physical_tiles_priority[priority] -> vector holding the physical tile indices */ + std::map> physical_tiles_priority; }; /************************************************************************************************* diff --git a/libs/libarchfpga/src/read_xml_arch_file.cpp b/libs/libarchfpga/src/read_xml_arch_file.cpp index 87e293dcd36..09788a91d40 100644 --- a/libs/libarchfpga/src/read_xml_arch_file.cpp +++ b/libs/libarchfpga/src/read_xml_arch_file.cpp @@ -2976,6 +2976,8 @@ static void ProcessTiles(pugi::xml_node Node, t_physical_tile_type PhysicalTileType; + PhysicalTileType.index = index; + /* Parses the properties fields of the type */ ProcessTileProps(CurTileType, &PhysicalTileType, loc_data); @@ -3025,8 +3027,6 @@ static void ProcessTiles(pugi::xml_node Node, Cur = get_single_child(CurTileType, "equivalent_sites", loc_data, ReqOpt::REQUIRED); ProcessTileEquivalentSites(Cur, &PhysicalTileType, LogicalBlockTypes, loc_data); - PhysicalTileType.index = index; - /* Type fully read */ ++index; @@ -3217,7 +3217,7 @@ static void ProcessTileEquivalentSites(pugi::xml_node Parent, while (CurSite) { check_node(CurSite, "site", loc_data); - expect_only_attributes(CurSite, {"pb_type"}, loc_data); + expect_only_attributes(CurSite, {"pb_type", "priority"}, loc_data); /* Load equivalent site name */ auto Prop = std::string(get_attribute(CurSite, "pb_type", loc_data).value()); PhysicalTileType->equivalent_sites_names.push_back(Prop); @@ -3225,7 +3225,8 @@ static void ProcessTileEquivalentSites(pugi::xml_node Parent, auto LogicalBlockType = get_type_by_name(Prop.c_str(), LogicalBlockTypes); auto priority = get_attribute(CurSite, "priority", loc_data, ReqOpt::OPTIONAL).as_int(0); - LogicalBlockType->placement_priority[priority].push_back(PhysicalTileType); + LogicalBlockType->physical_tiles_priority[priority].push_back(PhysicalTileType->index); + PhysicalTileType->logical_blocks_priority[priority].push_back(LogicalBlockType->index); ProcessEquivalentSiteDirects(CurSite, PhysicalTileType, LogicalBlockType, Prop, loc_data); @@ -4795,8 +4796,7 @@ static void check_port_direct_mappings(t_physical_tile_type_ptr physical_tile, t auto block_port = get_port_by_pin(logical_block, pin_map.first); auto tile_port = get_port_by_pin(physical_tile, pin_map.second); - if (0 != strcmp(tile_port->name, block_port->name) - || tile_port->type != block_port->type + if (tile_port->type != block_port->type || tile_port->num_pins != block_port->num_pins || tile_port->equivalent != block_port->equivalent) { archfpga_throw(__FILE__, __LINE__, @@ -4831,7 +4831,7 @@ static const t_port* get_port_by_name(t_logical_block_type_ptr type, const char* static const t_physical_tile_port* get_port_by_pin(t_physical_tile_type_ptr type, int pin) { for (auto port : type->ports) { - if (pin >= port.absolute_first_pin_index && pin < port.num_pins) { + if (pin >= port.absolute_first_pin_index && pin < port.absolute_first_pin_index + port.num_pins) { return &type->ports[port.index]; } } @@ -4844,7 +4844,7 @@ static const t_port* get_port_by_pin(t_logical_block_type_ptr type, int pin) { for (int i = 0; i < pb_type->num_ports; i++) { auto port = pb_type->ports[i]; - if (pin >= port.absolute_first_pin_index && pin < port.num_pins) { + if (pin >= port.absolute_first_pin_index && pin < port.absolute_first_pin_index + port.num_pins) { return &pb_type->ports[port.index]; } } diff --git a/vpr/src/place/move_utils.cpp b/vpr/src/place/move_utils.cpp index 94b6422a3f2..9528b47aa82 100644 --- a/vpr/src/place/move_utils.cpp +++ b/vpr/src/place/move_utils.cpp @@ -588,7 +588,33 @@ bool find_to_loc_uniform(t_physical_tile_type_ptr type, if (cx_from == cx_to && cy_from == cy_to) { continue; //Same from/to location -- try again for new y-position } else { - legal = true; + VTR_ASSERT(cx_to != OPEN); + VTR_ASSERT(cy_to != OPEN); + + //Convert to true (uncompressed) grid locations + to.x = to_compressed_block_grid.compressed_to_grid_x[cx_to]; + to.y = to_compressed_block_grid.compressed_to_grid_y[cy_to]; + + auto& place_ctx = g_vpr_ctx.placement(); + auto& cluster_ctx = g_vpr_ctx.clustering(); + + auto blocks = place_ctx.grid_blocks[to.x][to.y].blocks; + bool impossible_swap = false; + for (auto blk : blocks) { + if (blk == ClusterBlockId::INVALID()) { + continue; + } + + auto block_type = cluster_ctx.clb_nlist.block_type(blk); + if (!is_tile_compatible(from_type, block_type)) { + impossible_swap = true; + break; + } + } + + if (!impossible_swap) { + legal = true; + } } } } @@ -598,13 +624,6 @@ bool find_to_loc_uniform(t_physical_tile_type_ptr type, return false; } - VTR_ASSERT(cx_to != OPEN); - VTR_ASSERT(cy_to != OPEN); - - //Convert to true (uncompressed) grid locations - to.x = to_compressed_block_grid.compressed_to_grid_x[cx_to]; - to.y = to_compressed_block_grid.compressed_to_grid_y[cy_to]; - //Each x/y location contains only a single type, so we can pick a random //z (capcity) location to.z = vtr::irand(type->capacity - 1); diff --git a/vpr/src/place/place.cpp b/vpr/src/place/place.cpp index c9eccd8e170..cc98b8d3168 100644 --- a/vpr/src/place/place.cpp +++ b/vpr/src/place/place.cpp @@ -418,7 +418,7 @@ static void placement_inner_loop(float t, static void recompute_costs_from_scratch(const t_placer_opts& placer_opts, const PlaceDelayModel* delay_model, t_placer_costs* costs); -static t_physical_tile_type_ptr pick_highest_priority_type(t_logical_block_type_ptr logical_block, int num_needed_types, int* free_locations); +static t_physical_tile_type_ptr pick_highest_placement_priority_type(t_logical_block_type_ptr logical_block, int num_needed_types, int* free_locations); static void calc_placer_stats(t_placer_statistics& stats, float& success_rat, double& std_dev, const t_placer_costs& costs, const int move_lim); @@ -2410,7 +2410,7 @@ static void initial_placement_pl_macros(int macros_max_num_tries, int* free_loca // Assume that all the blocks in the macro are of the same type blk_id = pl_macros[imacro].members[0].blk_index; auto logical_block = cluster_ctx.clb_nlist.block_type(blk_id); - auto type = pick_highest_priority_type(logical_block, int(pl_macros[imacro].members.size()), free_locations); + auto type = pick_highest_placement_priority_type(logical_block, int(pl_macros[imacro].members.size()), free_locations); if (type == nullptr) { VPR_FATAL_ERROR(VPR_ERROR_PLACE, @@ -2488,7 +2488,7 @@ static void initial_placement_blocks(int* free_locations, enum e_pad_loc_type pa * that location again, so remove it from the free_locations array. */ - auto type = pick_highest_priority_type(logical_block, 1, free_locations); + auto type = pick_highest_placement_priority_type(logical_block, 1, free_locations); if (type == nullptr) { VPR_FATAL_ERROR(VPR_ERROR_PLACE, @@ -2850,12 +2850,15 @@ int check_macro_placement_consistency() { return error; } -static t_physical_tile_type_ptr pick_highest_priority_type(t_logical_block_type_ptr logical_block, int num_needed_types, int* free_locations) { +static t_physical_tile_type_ptr pick_highest_placement_priority_type(t_logical_block_type_ptr logical_block, int num_needed_types, int* free_locations) { + auto& device_ctx = g_vpr_ctx.device(); + auto physical_tiles = device_ctx.physical_tile_types; + // Loop through the ordered map to get tiles in a decreasing priority order - for (auto& physical_tiles : logical_block->placement_priority) { - for (auto tile : physical_tiles.second) { - if (free_locations[tile->index] >= num_needed_types) { - return tile; + for (auto& physical_tiles_ids : logical_block->physical_tiles_priority) { + for (auto tile_id : physical_tiles_ids.second) { + if (free_locations[tile_id] >= num_needed_types) { + return &physical_tiles[tile_id]; } } } diff --git a/vpr/src/util/vpr_utils.cpp b/vpr/src/util/vpr_utils.cpp index 7fb47b72809..bf7646ab7ea 100644 --- a/vpr/src/util/vpr_utils.cpp +++ b/vpr/src/util/vpr_utils.cpp @@ -645,12 +645,13 @@ bool is_empty_type(t_logical_block_type_ptr type) { t_physical_tile_type_ptr physical_tile_type(t_logical_block_type_ptr logical_block_type) { auto& device_ctx = g_vpr_ctx.device(); + auto& physical_tiles = device_ctx.physical_tile_types; - if (0 == logical_block_type->equivalent_tiles.size()) - return device_ctx.EMPTY_PHYSICAL_TILE_TYPE; - - for (auto type : logical_block_type->equivalent_tiles) { - if (0 == strcmp(type->name, logical_block_type->name)) return type; + // Loop through the ordered map to get tiles in a decreasing priority order + for (auto& physical_tiles_ids : logical_block_type->physical_tiles_priority) { + for (auto tile_id : physical_tiles_ids.second) { + return &physical_tiles[tile_id]; + } } return nullptr; @@ -668,13 +669,15 @@ t_physical_tile_type_ptr physical_tile_type(ClusterBlockId blk) { t_logical_block_type_ptr logical_block_type(t_physical_tile_type_ptr physical_tile_type) { auto& device_ctx = g_vpr_ctx.device(); + auto& logical_blocks = device_ctx.logical_block_types; - if (0 == physical_tile_type->equivalent_sites.size()) - return device_ctx.EMPTY_LOGICAL_BLOCK_TYPE; - - for (auto type : physical_tile_type->equivalent_sites) { - if (0 == strcmp(type->name, physical_tile_type->name)) return type; + // Loop through the ordered map to get tiles in a decreasing priority order + for (auto& logical_blocks_ids : physical_tile_type->logical_blocks_priority) { + for (auto block_id : logical_blocks_ids.second) { + return &logical_blocks[block_id]; + } } + return nullptr; } From 279630c3b2732a835153ce3fad1bd8118a76f089 Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Wed, 2 Oct 2019 17:40:52 +0200 Subject: [PATCH 05/58] place: added post-placement resources utilization log Signed-off-by: Alessandro Comodi --- vpr/src/base/SetupGrid.cpp | 3 +-- vpr/src/place/place.cpp | 33 +++++++++++++++++++++++++++++++++ vpr/src/util/vpr_utils.cpp | 3 --- 3 files changed, 34 insertions(+), 5 deletions(-) diff --git a/vpr/src/base/SetupGrid.cpp b/vpr/src/base/SetupGrid.cpp index 64115810a44..b025e34da21 100644 --- a/vpr/src/base/SetupGrid.cpp +++ b/vpr/src/base/SetupGrid.cpp @@ -664,8 +664,7 @@ float calculate_device_utilization(const DeviceGrid& grid, std::mapequivalent_tiles[0]; + t_physical_tile_type_ptr type = physical_tile_type(kv.first); size_t count = kv.second; float type_area = type->width * type->height; diff --git a/vpr/src/place/place.cpp b/vpr/src/place/place.cpp index cc98b8d3168..7528c54214f 100644 --- a/vpr/src/place/place.cpp +++ b/vpr/src/place/place.cpp @@ -439,6 +439,7 @@ static void print_place_status(const float t, const float rlim, const float crit_exponent, size_t tot_moves); +static void print_resources_utilization(); /*****************************************************************************/ void try_place(const t_placer_opts& placer_opts, @@ -832,6 +833,8 @@ void try_place(const t_placer_opts& placer_opts, report_aborted_moves(); + print_resources_utilization(); + free_placement_structs(placer_opts); if (placer_opts.place_algorithm == PATH_TIMING_DRIVEN_PLACE || placer_opts.enable_timing_computations) { @@ -2962,3 +2965,33 @@ static void print_place_status(const float t, VTR_LOG(" %6.3f\n", t / oldt); fflush(stdout); } + +static void print_resources_utilization() { + auto& place_ctx = g_vpr_ctx.placement(); + auto& cluster_ctx = g_vpr_ctx.clustering(); + auto& device_ctx = g_vpr_ctx.device(); + + //Record the resource requirement + std::map num_type_instances; + std::map> num_placed_instances; + for (auto blk_id : cluster_ctx.clb_nlist.blocks()) { + auto block_loc = place_ctx.block_locs[blk_id]; + auto loc = block_loc.loc; + + auto physical_tile = device_ctx.grid[loc.x][loc.y].type; + auto logical_block = cluster_ctx.clb_nlist.block_type(blk_id); + + num_type_instances[logical_block]++; + num_placed_instances[logical_block][physical_tile]++; + } + + for (auto logical_block : num_type_instances) { + VTR_LOG("Logical Block: %s\n", logical_block.first->name); + VTR_LOG("\tInstances -> %d\n", logical_block.second); + + VTR_LOG("\tPhysical Tiles used:\n"); + for (auto physical_tile : num_placed_instances[logical_block.first]) { + VTR_LOG("\t\t%s: %d\n", physical_tile.first->name, physical_tile.second); + } + } +} diff --git a/vpr/src/util/vpr_utils.cpp b/vpr/src/util/vpr_utils.cpp index bf7646ab7ea..ea49ca67be8 100644 --- a/vpr/src/util/vpr_utils.cpp +++ b/vpr/src/util/vpr_utils.cpp @@ -162,9 +162,6 @@ void sync_grid_to_blocks() { auto type = physical_tile_type(blk_id); - auto logical_type = cluster_ctx.clb_nlist.block_type(blk_id); - VTR_LOG("PHYSICAL TILE: %s\tLOGICAL_BLOCK: %s\n", type->name, logical_type->name); - /* Check range of block coords */ if (blk_x < 0 || blk_y < 0 || (blk_x + type->width - 1) > int(device_ctx.grid.width() - 1) From 2cb76bbdda1c0d1831d365f2829dd93c1193f724 Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Thu, 3 Oct 2019 14:49:59 +0200 Subject: [PATCH 06/58] place: fixed issue with place_macro output echo file Signed-off-by: Alessandro Comodi --- vpr/src/place/place_macro.cpp | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/vpr/src/place/place_macro.cpp b/vpr/src/place/place_macro.cpp index 79f20225dc3..fb6e7deaed7 100644 --- a/vpr/src/place/place_macro.cpp +++ b/vpr/src/place/place_macro.cpp @@ -454,9 +454,13 @@ static void write_place_macros(std::string filename, const std::vectornum_pins; ++ipin) { if (f_idirect_from_blk_pin[itype][ipin] != OPEN) { if (f_direct_type_from_blk_pin[itype][ipin] == SOURCE) { fprintf(f, "%-9s %-9d true SOURCE \n", type.name, ipin); From 7427f5b7c07f946e791909fd42acfdc329bbab83 Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Thu, 3 Oct 2019 14:50:11 +0200 Subject: [PATCH 07/58] place: fixed warnings for potentially uninitialized variable Signed-off-by: Alessandro Comodi --- vpr/src/place/place.cpp | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/vpr/src/place/place.cpp b/vpr/src/place/place.cpp index 7528c54214f..718408cdc38 100644 --- a/vpr/src/place/place.cpp +++ b/vpr/src/place/place.cpp @@ -2420,7 +2420,7 @@ static void initial_placement_pl_macros(int macros_max_num_tries, int* free_loca "Initial placement failed.\n" "Could not place macro length %zu with head block %s (#%zu); not enough free locations of type %s (#%d).\n" "VPR cannot auto-size for your circuit, please resize the FPGA manually.\n", - pl_macros[imacro].members.size(), cluster_ctx.clb_nlist.block_name(blk_id).c_str(), size_t(blk_id), type->name, itype); + pl_macros[imacro].members.size(), cluster_ctx.clb_nlist.block_name(blk_id).c_str(), size_t(blk_id), logical_block->name, logical_block->index); } itype = type->index; @@ -2472,7 +2472,6 @@ static void initial_placement_blocks(int* free_locations, enum e_pad_loc_type pa int itype, ipos; auto& cluster_ctx = g_vpr_ctx.clustering(); auto& place_ctx = g_vpr_ctx.mutable_placement(); - auto& device_ctx = g_vpr_ctx.device(); for (auto blk_id : cluster_ctx.clb_nlist.blocks()) { if (place_ctx.block_locs[blk_id].loc.x != -1) { // -1 is a sentinel for an empty block @@ -2497,7 +2496,7 @@ static void initial_placement_blocks(int* free_locations, enum e_pad_loc_type pa VPR_FATAL_ERROR(VPR_ERROR_PLACE, "Initial placement failed.\n" "Could not place block %s (#%zu); no free locations of type %s (#%d).\n", - cluster_ctx.clb_nlist.block_name(blk_id).c_str(), size_t(blk_id), device_ctx.physical_tile_types[itype].name, itype); + cluster_ctx.clb_nlist.block_name(blk_id).c_str(), size_t(blk_id), logical_block->name, logical_block->index); } itype = type->index; From d0f8c8d29c64f76397f86419bf2d4555cc212534 Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Thu, 3 Oct 2019 17:19:12 +0200 Subject: [PATCH 08/58] place: solved capacity issue in route_common Signed-off-by: Alessandro Comodi --- vpr/src/route/route_common.cpp | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/vpr/src/route/route_common.cpp b/vpr/src/route/route_common.cpp index 761aa886c78..8770c9de2c2 100644 --- a/vpr/src/route/route_common.cpp +++ b/vpr/src/route/route_common.cpp @@ -1064,7 +1064,12 @@ static vtr::vector> load_net_rr_terminals(const t std::unordered_map map = map_result->second; auto pin_result = map.find(node_block_pin); - auto phys_pin = pin_result->second; + auto orig_phys_pin = pin_result->second; + + VTR_ASSERT(type->num_pins % type->capacity == 0); + int max_num_block_pins = type->num_pins / type->capacity; + + int phys_pin = orig_phys_pin + place_ctx.block_locs[block_id].loc.z * max_num_block_pins; iclass = type->pin_class[phys_pin]; From 38c1a2432d9ff10ef1c3bc036ed692b3bf2ba16f Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Thu, 3 Oct 2019 17:30:13 +0200 Subject: [PATCH 09/58] place: fix empty type seg fault Signed-off-by: Alessandro Comodi --- vpr/src/route/clock_connection_builders.cpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/vpr/src/route/clock_connection_builders.cpp b/vpr/src/route/clock_connection_builders.cpp index 68adb5a4f62..b517d5fc460 100644 --- a/vpr/src/route/clock_connection_builders.cpp +++ b/vpr/src/route/clock_connection_builders.cpp @@ -225,6 +225,12 @@ void ClockToPinsConnection::create_switches(const ClockRRGraphBuilder& clock_gra } auto type = grid[x][y].type; + + // Skip EMPTY type + if (is_empty_type(type)) { + continue; + } + auto width_offset = grid[x][y].width_offset; auto height_offset = grid[x][y].height_offset; From bbb2cb79cd96afe6b2a2939fac216d702badbb1f Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Thu, 3 Oct 2019 18:37:16 +0200 Subject: [PATCH 10/58] place: corrected num_pins assertion in clustered netlist Signed-off-by: Alessandro Comodi --- vpr/src/base/clustered_netlist.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vpr/src/base/clustered_netlist.cpp b/vpr/src/base/clustered_netlist.cpp index 03bbdce99a1..9e4eaba5d32 100644 --- a/vpr/src/base/clustered_netlist.cpp +++ b/vpr/src/base/clustered_netlist.cpp @@ -52,7 +52,7 @@ int ClusteredNetlist::block_pin_net_index(const ClusterBlockId blk_id, const int ClusterPinId ClusteredNetlist::block_pin(const ClusterBlockId blk, const int phys_pin_index) const { VTR_ASSERT_SAFE(valid_block_id(blk)); - VTR_ASSERT_SAFE_MSG(phys_pin_index >= 0 && phys_pin_index < block_type(blk)->pb_type->num_pins, "Physical pin index must be in range"); + VTR_ASSERT_SAFE_MSG(phys_pin_index >= 0 && phys_pin_index < physical_tile_type(block_type(blk))->num_pins, "Physical pin index must be in range"); return block_logical_pins_[blk][phys_pin_index]; } From 68e6ae2070969cffb0a9686b40f93896440db6ed Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Fri, 4 Oct 2019 11:42:27 +0200 Subject: [PATCH 11/58] place: added few comments and removed compile warning Signed-off-by: Alessandro Comodi --- libs/libarchfpga/src/physical_types.h | 1 - libs/libarchfpga/src/read_xml_arch_file.cpp | 6 +++--- vpr/src/base/clustered_netlist.h | 6 ++++-- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/libs/libarchfpga/src/physical_types.h b/libs/libarchfpga/src/physical_types.h index 97c69dedd03..68bb9351b4c 100644 --- a/libs/libarchfpga/src/physical_types.h +++ b/libs/libarchfpga/src/physical_types.h @@ -652,7 +652,6 @@ struct t_physical_tile_port { int index; int absolute_first_pin_index; int port_index_by_type; - int tile_type_index; }; /* Describes the type for a logical block diff --git a/libs/libarchfpga/src/read_xml_arch_file.cpp b/libs/libarchfpga/src/read_xml_arch_file.cpp index 09788a91d40..50b8251389e 100644 --- a/libs/libarchfpga/src/read_xml_arch_file.cpp +++ b/libs/libarchfpga/src/read_xml_arch_file.cpp @@ -878,8 +878,6 @@ static std::pair ProcessPinString(pugi::xml_node Locations, VTR_ASSERT(port != nullptr); int abs_first_pin_idx = port->absolute_first_pin_index; - std::pair pins; - token_index++; // All the pins of the port are taken or the port has a single pin @@ -3257,10 +3255,12 @@ static void ProcessEquivalentSiteDirects(pugi::xml_node Parent, expect_only_attributes(CurDirect, {"from", "to"}, loc_data); std::string from, to; + // `from` attribute is relative to the physical tile pins from = std::string(get_attribute(CurDirect, "from", loc_data).value()); + + // `to` attribute is relative to the logical block pins to = std::string(get_attribute(CurDirect, "to", loc_data).value()); - // XXX auto from_pins = ProcessPinString(CurDirect, PhysicalTileType, from.c_str(), loc_data); auto to_pins = ProcessPinString(CurDirect, LogicalBlockType, to.c_str(), loc_data); diff --git a/vpr/src/base/clustered_netlist.h b/vpr/src/base/clustered_netlist.h index 41d72963e01..849c6174ab1 100644 --- a/vpr/src/base/clustered_netlist.h +++ b/vpr/src/base/clustered_netlist.h @@ -255,8 +255,10 @@ class ClusteredNetlist : public Netlist pin_physical_index_; //The physical pin index (i.e. pin index //in t_physical_tile_type) corresponding - //to the logical pin - vtr::vector_map pin_logical_index_; //The logical pin index of this block + //to the clustered pin + vtr::vector_map pin_logical_index_; //The logical pin index of this block (i.e. pin index + //in t_logical_block_type) corresponding + //to the clustered pin //Nets vtr::vector_map net_is_ignored_; //Boolean mapping indicating if the net is ignored From f065724997a58f47e73f0d9001a2bb00552a2b5a Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Fri, 4 Oct 2019 12:40:25 +0200 Subject: [PATCH 12/58] place: solved valgrind memory leak Signed-off-by: Alessandro Comodi --- vpr/src/place/place.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vpr/src/place/place.cpp b/vpr/src/place/place.cpp index 718408cdc38..e31abf8e6b9 100644 --- a/vpr/src/place/place.cpp +++ b/vpr/src/place/place.cpp @@ -2854,7 +2854,7 @@ int check_macro_placement_consistency() { static t_physical_tile_type_ptr pick_highest_placement_priority_type(t_logical_block_type_ptr logical_block, int num_needed_types, int* free_locations) { auto& device_ctx = g_vpr_ctx.device(); - auto physical_tiles = device_ctx.physical_tile_types; + auto& physical_tiles = device_ctx.physical_tile_types; // Loop through the ordered map to get tiles in a decreasing priority order for (auto& physical_tiles_ids : logical_block->physical_tiles_priority) { From 358cdb5e415a7008f10adbe094d332cf6f15fe47 Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Fri, 4 Oct 2019 13:23:36 +0200 Subject: [PATCH 13/58] place: use vtr::rand only when needed Signed-off-by: Alessandro Comodi --- vpr/src/util/vpr_utils.cpp | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/vpr/src/util/vpr_utils.cpp b/vpr/src/util/vpr_utils.cpp index ea49ca67be8..e2f7e83618f 100644 --- a/vpr/src/util/vpr_utils.cpp +++ b/vpr/src/util/vpr_utils.cpp @@ -2312,7 +2312,14 @@ bool is_tile_compatible(t_physical_tile_type_ptr physical_tile, t_logical_block_ t_physical_tile_type_ptr pick_random_placement_type(t_logical_block_type_ptr logical_block) { auto equivalent_tiles = logical_block->equivalent_tiles; - return equivalent_tiles[vtr::irand((int)equivalent_tiles.size() - 1)]; + size_t num_equivalent_tiles = equivalent_tiles.size(); + int index = 0; + + if (num_equivalent_tiles > 1) { + index = vtr::irand((int)equivalent_tiles.size() - 1); + } + + return equivalent_tiles[index]; } void pretty_print_uint(const char* prefix, size_t value, int num_digits, int scientific_precision) { From 671fff2cc21010d594969dcf976777a57947b35c Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Fri, 4 Oct 2019 14:15:36 +0200 Subject: [PATCH 14/58] arch: updated architecture files to have direct pin mapping Signed-off-by: Alessandro Comodi --- libs/libarchfpga/arch/mult_luts_arch.xml | 28 ++- libs/libarchfpga/arch/sample_arch.xml | 28 ++- utils/fasm/test/test_fasm_arch.xml | 12 +- vpr/test/test_read_arch_metadata.xml | 12 +- vtr_flow/arch/bidir/k4_n4_v7_bidir.xml | 12 +- .../arch/bidir/k4_n4_v7_bidir_pass_gate.xml | 12 +- vtr_flow/arch/bidir/k4_n4_v7_l1_bidir.xml | 12 +- .../arch/bidir/k4_n4_v7_longline_bidir.xml | 12 +- vtr_flow/arch/common/arch.xml | 12 +- ...0.85sL2-0.15gL4-on-cb-off-sb_22nm_22nm.xml | 35 +++- ...2-0.15gL4-on-cb-off-sb_22nm_22nm_error.xml | 35 +++- .../custom_grid/buffered_flyover_wires.xml | 30 +++- vtr_flow/arch/custom_grid/column_io.xml | 30 +++- vtr_flow/arch/custom_grid/custom_sbloc.xml | 30 +++- vtr_flow/arch/custom_grid/fixed_grid.xml | 30 +++- .../arch/custom_grid/multiple_io_types.xml | 48 +++++- .../arch/custom_grid/multiwidth_blocks.xml | 30 +++- vtr_flow/arch/custom_grid/non_column.xml | 36 +++- .../non_column_tall_aspect_ratio.xml | 36 +++- .../non_column_wide_aspect_ratio.xml | 36 +++- .../custom_grid/shorted_flyover_wires.xml | 30 +++- .../k6_frac_N10_mem32K_40nm_custom_pins.xml | 28 ++- vtr_flow/arch/equivalent_sites/slice.xml | 17 +- vtr_flow/arch/ispd/ultrascale_ispd.xml | 39 ++++- .../k6_N8_I27_fleI6_fleO1_ff1_nmodes_1.xml | 28 ++- .../k6_N8_I48_fleI5_fleO2_ff1_nmodes_2.xml | 28 ++- .../k6_N8_I48_fleI5_fleO2_ff2_nmodes_2.xml | 28 ++- .../k6_N8_I48_fleI6_fleO1_ff1_nmodes_1.xml | 28 ++- .../k6_N8_I48_fleI6_fleO2_ff1_nmodes_2.xml | 28 ++- .../k6_N8_I48_fleI6_fleO2_ff2_nmodes_2.xml | 28 ++- .../k6_N8_I56_fleI7_fleO2_ff1_nmodes_2.xml | 28 ++- .../k6_N8_I56_fleI7_fleO2_ff2_nmodes_2.xml | 28 ++- .../k6_N8_I64_fleI8_fleO2_ff1_nmodes_2.xml | 28 ++- .../k6_N8_I64_fleI8_fleO2_ff2_nmodes_2.xml | 28 ++- .../k6_N8_I72_fleI9_fleO2_ff1_nmodes_2.xml | 28 ++- .../k6_N8_I72_fleI9_fleO2_ff2_nmodes_2.xml | 28 ++- .../k6_N8_I80_fleI10_fleO2_ff1_nmodes_2.xml | 28 ++- .../k6_N8_I80_fleI10_fleO2_ff2_nmodes_2.xml | 28 ++- .../k4_N10_memSize1024_memData16.xml | 28 ++- .../k4_N10_memSize1024_memData2.xml | 28 ++- .../k4_N10_memSize1024_memData32.xml | 28 ++- .../k4_N10_memSize1024_memData4.xml | 28 ++- .../k4_N10_memSize1024_memData64.xml | 28 ++- .../k4_N10_memSize1024_memData8.xml | 28 ++- .../k4_N10_memSize131072_memData16.xml | 28 ++- .../k4_N10_memSize131072_memData2.xml | 28 ++- .../k4_N10_memSize131072_memData32.xml | 28 ++- .../k4_N10_memSize131072_memData4.xml | 28 ++- .../k4_N10_memSize131072_memData64.xml | 28 ++- .../k4_N10_memSize131072_memData8.xml | 28 ++- .../k4_N10_memSize16384_memData16.xml | 28 ++- .../k4_N10_memSize16384_memData2.xml | 28 ++- .../k4_N10_memSize16384_memData32.xml | 28 ++- .../k4_N10_memSize16384_memData4.xml | 28 ++- .../k4_N10_memSize16384_memData64.xml | 28 ++- .../k4_N10_memSize16384_memData8.xml | 28 ++- .../k4_N10_memSize2048_memData16.xml | 28 ++- .../k4_N10_memSize2048_memData2.xml | 28 ++- .../k4_N10_memSize2048_memData32.xml | 28 ++- .../k4_N10_memSize2048_memData4.xml | 28 ++- .../k4_N10_memSize2048_memData64.xml | 28 ++- .../k4_N10_memSize2048_memData8.xml | 28 ++- .../k4_N10_memSize262144_memData16.xml | 28 ++- .../k4_N10_memSize262144_memData2.xml | 28 ++- .../k4_N10_memSize262144_memData32.xml | 28 ++- .../k4_N10_memSize262144_memData4.xml | 28 ++- .../k4_N10_memSize262144_memData64.xml | 28 ++- .../k4_N10_memSize262144_memData8.xml | 28 ++- .../k4_N10_memSize32768_memData16.xml | 28 ++- .../k4_N10_memSize32768_memData2.xml | 28 ++- .../k4_N10_memSize32768_memData32.xml | 28 ++- .../k4_N10_memSize32768_memData4.xml | 28 ++- .../k4_N10_memSize32768_memData64.xml | 28 ++- .../k4_N10_memSize32768_memData8.xml | 28 ++- .../k4_N10_memSize4096_memData16.xml | 28 ++- .../k4_N10_memSize4096_memData2.xml | 28 ++- .../k4_N10_memSize4096_memData32.xml | 28 ++- .../k4_N10_memSize4096_memData4.xml | 28 ++- .../k4_N10_memSize4096_memData64.xml | 28 ++- .../k4_N10_memSize4096_memData8.xml | 28 ++- .../k4_N10_memSize512_memData16.xml | 28 ++- .../k4_N10_memSize512_memData2.xml | 28 ++- .../k4_N10_memSize512_memData32.xml | 28 ++- .../k4_N10_memSize512_memData4.xml | 28 ++- .../k4_N10_memSize512_memData64.xml | 28 ++- .../k4_N10_memSize512_memData8.xml | 28 ++- .../k4_N10_memSize524288_memData16.xml | 28 ++- .../k4_N10_memSize524288_memData2.xml | 28 ++- .../k4_N10_memSize524288_memData32.xml | 28 ++- .../k4_N10_memSize524288_memData4.xml | 28 ++- .../k4_N10_memSize524288_memData64.xml | 28 ++- .../k4_N10_memSize524288_memData8.xml | 28 ++- .../k4_N10_memSize65536_memData16.xml | 28 ++- .../k4_N10_memSize65536_memData2.xml | 28 ++- .../k4_N10_memSize65536_memData32.xml | 28 ++- .../k4_N10_memSize65536_memData4.xml | 28 ++- .../k4_N10_memSize65536_memData64.xml | 28 ++- .../k4_N10_memSize65536_memData8.xml | 28 ++- .../k4_N10_memSize8192_memData16.xml | 28 ++- .../k4_N10_memSize8192_memData2.xml | 28 ++- .../k4_N10_memSize8192_memData32.xml | 28 ++- .../k4_N10_memSize8192_memData4.xml | 28 ++- .../k4_N10_memSize8192_memData64.xml | 28 ++- .../k4_N10_memSize8192_memData8.xml | 28 ++- .../k6_N10_mem32K_40nm_nonuniform.xml | 28 ++- .../k6_N10_mem32K_40nm_pulse.xml | 28 ++- .../k6_N10_I40_Fi6_L1_frac0_ff1_45nm.xml | 28 ++- .../k6_N10_I40_Fi6_L2_frac0_ff1_45nm.xml | 28 ++- .../k6_N10_I40_Fi6_L3_frac0_ff1_45nm.xml | 28 ++- .../k6_N10_I40_Fi6_L4_frac0_ff1_130nm.xml | 28 ++- .../k6_N10_I40_Fi6_L4_frac0_ff1_22nm.xml | 28 ++- .../k6_N10_I40_Fi6_L4_frac0_ff1_45nm.xml | 28 ++- .../k6_N10_I40_Fi6_L4_frac0_ff1_C10_45nm.xml | 28 ++- .../k6_N10_I40_Fi6_L4_frac0_ff1_C15_45nm.xml | 28 ++- .../k6_N10_I40_Fi6_L4_frac0_ff1_C20_45nm.xml | 28 ++- .../k6_N10_I40_Fi6_L4_frac0_ff1_C25_45nm.xml | 28 ++- .../k6_N10_I40_Fi6_L4_frac0_ff1_C30_45nm.xml | 28 ++- .../k6_N10_I40_Fi6_L4_frac0_ff1_C35_45nm.xml | 28 ++- .../k6_N10_I40_Fi6_L4_frac0_ff1_C40_45nm.xml | 28 ++- .../k6_N10_I40_Fi6_L4_frac0_ff1_C45_45nm.xml | 28 ++- .../k6_N10_I40_Fi6_L4_frac0_ff1_C50_45nm.xml | 28 ++- .../k6_N10_I40_Fi6_L4_frac0_ff1_C5_45nm.xml | 28 ++- .../k6_N10_I40_Fi6_L4_frac1_ff1_45nm.xml | 28 ++- .../k6_N10_I40_Fi6_L4_frac1_ff2_45nm.xml | 28 ++- .../k6_N10_I40_Fi6_L4_frac1_ff2_C10_45nm.xml | 28 ++- .../k6_N10_I40_Fi6_L4_frac1_ff2_C15_45nm.xml | 28 ++- .../k6_N10_I40_Fi6_L4_frac1_ff2_C20_45nm.xml | 28 ++- .../k6_N10_I40_Fi6_L4_frac1_ff2_C25_45nm.xml | 28 ++- .../k6_N10_I40_Fi6_L4_frac1_ff2_C30_45nm.xml | 28 ++- .../k6_N10_I40_Fi6_L4_frac1_ff2_C35_45nm.xml | 28 ++- .../k6_N10_I40_Fi6_L4_frac1_ff2_C40_45nm.xml | 28 ++- .../k6_N10_I40_Fi6_L4_frac1_ff2_C45_45nm.xml | 28 ++- .../k6_N10_I40_Fi6_L4_frac1_ff2_C50_45nm.xml | 28 ++- .../k6_N10_I40_Fi6_L4_frac1_ff2_C55_45nm.xml | 28 ++- .../k6_N10_I40_Fi6_L4_frac1_ff2_C5_45nm.xml | 28 ++- .../k6_N10_I40_Fi6_L4_frac1_ff2_C60_45nm.xml | 28 ++- .../k6_N10_I40_Fi6_L5_frac0_ff1_45nm.xml | 28 ++- .../k6_N10_I40_Fi6_L6_frac0_ff1_45nm.xml | 28 ++- .../k6_N10_I40_Fi7_L4_frac1_ff1_45nm.xml | 28 ++- .../k6_N10_I40_Fi7_L4_frac1_ff2_45nm.xml | 28 ++- .../k6_N10_I40_Fi8_L4_frac1_ff1_45nm.xml | 28 ++- .../k6_N10_I40_Fi8_L4_frac1_ff2_45nm.xml | 28 ++- .../k6_N10_I47_Fi7_L4_frac1_ff1_45nm.xml | 28 ++- .../k6_N10_I47_Fi7_L4_frac1_ff2_45nm.xml | 28 ++- .../k6_N10_I53_Fi8_L4_frac1_ff1_45nm.xml | 28 ++- .../k6_N10_I53_Fi8_L4_frac1_ff2_45nm.xml | 28 ++- vtr_flow/arch/routing_mode/arch.xml | 12 +- vtr_flow/arch/routing_mode/slicem.xml | 58 ++++++- vtr_flow/arch/timing/EArch.xml | 33 +++- .../fixed_k6_N8_gate_boost_0.2V_22nm.xml | 31 +++- ...8_lookahead_chain_gate_boost_0.2V_22nm.xml | 33 +++- ..._unbalanced_chain_gate_boost_0.2V_22nm.xml | 33 +++- ...6_N8_ripple_chain_gate_boost_0.2V_22nm.xml | 33 +++- ...nced_ripple_chain_gate_boost_0.2V_22nm.xml | 33 +++- .../fixed_k6_frac_2ripple_N8_22nm.xml | 33 +++- .../fixed_k6_frac_2uripple_N8_22nm.xml | 33 +++- .../fixed_size/fixed_k6_frac_N8_22nm.xml | 31 +++- .../fixed_k6_frac_ripple_N8_22nm.xml | 33 +++- .../fixed_k6_frac_uripple_N8_22nm.xml | 33 +++- ...8_lookahead_chain_gate_boost_0.2V_22nm.xml | 33 +++- ..._unbalanced_chain_gate_boost_0.2V_22nm.xml | 33 +++- ...6_N8_ripple_chain_gate_boost_0.2V_22nm.xml | 33 +++- ...nced_ripple_chain_gate_boost_0.2V_22nm.xml | 33 +++- .../k6_frac_2ripple_N8_22nm.xml | 33 +++- .../k6_frac_2uripple_N8_22nm.xml | 33 +++- .../fraclut_carrychain/k6_frac_N8_22nm.xml | 31 +++- .../k6_frac_ripple_N8_22nm.xml | 33 +++- .../k6_frac_uripple_N8_22nm.xml | 33 +++- .../global_nonuniform/x_delta_y_delta.xml | 28 ++- .../global_nonuniform/x_delta_y_uniform.xml | 28 ++- .../x_gaussian_y_gaussian.xml | 28 ++- .../x_gaussian_y_uniform.xml | 28 ++- .../global_nonuniform/x_uniform_y_delta.xml | 28 ++- .../x_uniform_y_gaussian.xml | 28 ++- vtr_flow/arch/timing/hard_fpu_arch_timing.xml | 24 ++- vtr_flow/arch/timing/k4_N4_90nm.xml | 12 +- .../timing/k4_N4_90nm_default_fc_pinloc.xml | 12 +- vtr_flow/arch/timing/k4_N8_legacy_45nm.xml | 12 +- vtr_flow/arch/timing/k6_N10_40nm.xml | 12 +- .../timing/k6_N10_gate_boost_0.2V_22nm.xml | 31 +++- vtr_flow/arch/timing/k6_N10_legacy_45nm.xml | 12 +- vtr_flow/arch/timing/k6_N10_mem32K_40nm.xml | 28 ++- .../arch/timing/k6_N10_mem32K_40nm_fc_abs.xml | 28 ++- ..._N10_ripple_chain_gate_boost_0.2V_22nm.xml | 33 +++- ...nced_ripple_chain_gate_boost_0.2V_22nm.xml | 33 +++- .../timing/k6_N8_gate_boost_0.2V_22nm.xml | 31 +++- ...8_lookahead_chain_gate_boost_0.2V_22nm.xml | 33 +++- ..._unbalanced_chain_gate_boost_0.2V_22nm.xml | 33 +++- ...6_N8_ripple_chain_gate_boost_0.2V_22nm.xml | 33 +++- ...nced_ripple_chain_gate_boost_0.2V_22nm.xml | 33 +++- vtr_flow/arch/timing/k6_frac_N10_40nm.xml | 12 +- ...c_N10_4add_2chains_depop50_mem20K_22nm.xml | 32 +++- ...dd_2chains_tie_off_depop50_mem20K_22nm.xml | 32 +++- ...rac_N10_frac_chain_depop50_mem32K_40nm.xml | 33 +++- .../k6_frac_N10_frac_chain_mem32K_40nm.xml | 30 +++- ...frac_N10_frac_chain_mem32K_htree0_40nm.xml | 30 +++- ...rac_chain_mem32K_htree0_routedCLK_40nm.xml | 30 +++- ...N10_frac_chain_mem32K_htree0short_40nm.xml | 30 +++- .../arch/timing/k6_frac_N10_mem32K_40nm.xml | 28 ++- vtr_flow/arch/timing/soft_fpu_arch_timing.xml | 12 +- .../timing/soft_fpu_arch_timing_chain.xml | 14 +- vtr_flow/arch/timing/xc6vlx240tff1156.xml | 163 +++++++++++++++++- vtr_flow/arch/titan/stratixiv_arch.timing.xml | 51 +++++- 203 files changed, 5043 insertions(+), 792 deletions(-) diff --git a/libs/libarchfpga/arch/mult_luts_arch.xml b/libs/libarchfpga/arch/mult_luts_arch.xml index e1e4d7a3573..29a0887fa30 100644 --- a/libs/libarchfpga/arch/mult_luts_arch.xml +++ b/libs/libarchfpga/arch/mult_luts_arch.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/libs/libarchfpga/arch/sample_arch.xml b/libs/libarchfpga/arch/sample_arch.xml index 345b346dd28..96958667c67 100755 --- a/libs/libarchfpga/arch/sample_arch.xml +++ b/libs/libarchfpga/arch/sample_arch.xml @@ -135,7 +135,11 @@ - + + + + + @@ -150,7 +154,11 @@ - + + + + + @@ -160,7 +168,11 @@ - + + + + + @@ -170,7 +182,15 @@ - + + + + + + + + + diff --git a/utils/fasm/test/test_fasm_arch.xml b/utils/fasm/test/test_fasm_arch.xml index 14bbe144e43..af5724f5133 100644 --- a/utils/fasm/test/test_fasm_arch.xml +++ b/utils/fasm/test/test_fasm_arch.xml @@ -3,7 +3,11 @@ - + + + + + @@ -18,7 +22,11 @@ - + + + + + diff --git a/vpr/test/test_read_arch_metadata.xml b/vpr/test/test_read_arch_metadata.xml index 1068b139531..d2df1d08ec3 100644 --- a/vpr/test/test_read_arch_metadata.xml +++ b/vpr/test/test_read_arch_metadata.xml @@ -3,7 +3,11 @@ - + + + + + @@ -18,7 +22,11 @@ - + + + + + diff --git a/vtr_flow/arch/bidir/k4_n4_v7_bidir.xml b/vtr_flow/arch/bidir/k4_n4_v7_bidir.xml index 009fef90cf6..8a387aee81e 100644 --- a/vtr_flow/arch/bidir/k4_n4_v7_bidir.xml +++ b/vtr_flow/arch/bidir/k4_n4_v7_bidir.xml @@ -26,7 +26,11 @@ Architecture based off Stratix IV - + + + + + @@ -41,7 +45,11 @@ Architecture based off Stratix IV - + + + + + diff --git a/vtr_flow/arch/bidir/k4_n4_v7_bidir_pass_gate.xml b/vtr_flow/arch/bidir/k4_n4_v7_bidir_pass_gate.xml index e6d3fb6eca3..c4359c4d314 100644 --- a/vtr_flow/arch/bidir/k4_n4_v7_bidir_pass_gate.xml +++ b/vtr_flow/arch/bidir/k4_n4_v7_bidir_pass_gate.xml @@ -26,7 +26,11 @@ Architecture based off Stratix IV - + + + + + @@ -41,7 +45,11 @@ Architecture based off Stratix IV - + + + + + diff --git a/vtr_flow/arch/bidir/k4_n4_v7_l1_bidir.xml b/vtr_flow/arch/bidir/k4_n4_v7_l1_bidir.xml index 933ab020ca3..f6e8d532818 100644 --- a/vtr_flow/arch/bidir/k4_n4_v7_l1_bidir.xml +++ b/vtr_flow/arch/bidir/k4_n4_v7_l1_bidir.xml @@ -26,7 +26,11 @@ Architecture based off Stratix IV - + + + + + @@ -41,7 +45,11 @@ Architecture based off Stratix IV - + + + + + diff --git a/vtr_flow/arch/bidir/k4_n4_v7_longline_bidir.xml b/vtr_flow/arch/bidir/k4_n4_v7_longline_bidir.xml index 087558b8143..6eb20c66c66 100644 --- a/vtr_flow/arch/bidir/k4_n4_v7_longline_bidir.xml +++ b/vtr_flow/arch/bidir/k4_n4_v7_longline_bidir.xml @@ -26,7 +26,11 @@ Architecture based off Stratix IV - + + + + + @@ -41,7 +45,11 @@ Architecture based off Stratix IV - + + + + + diff --git a/vtr_flow/arch/common/arch.xml b/vtr_flow/arch/common/arch.xml index 5fdb82c166b..d388bfd1f4d 100644 --- a/vtr_flow/arch/common/arch.xml +++ b/vtr_flow/arch/common/arch.xml @@ -14,7 +14,12 @@ - + + + + + + @@ -30,7 +35,10 @@ - + + + + diff --git a/vtr_flow/arch/complex_switch/k4_N8_topology-0.85sL2-0.15gL4-on-cb-off-sb_22nm_22nm.xml b/vtr_flow/arch/complex_switch/k4_N8_topology-0.85sL2-0.15gL4-on-cb-off-sb_22nm_22nm.xml index 5b2a4a00959..a9e0e89cc3e 100644 --- a/vtr_flow/arch/complex_switch/k4_N8_topology-0.85sL2-0.15gL4-on-cb-off-sb_22nm_22nm.xml +++ b/vtr_flow/arch/complex_switch/k4_N8_topology-0.85sL2-0.15gL4-on-cb-off-sb_22nm_22nm.xml @@ -69,7 +69,11 @@ - + + + + + @@ -88,7 +92,18 @@ - + + + + + + + + + + + + @@ -116,7 +131,11 @@ - + + + + + @@ -130,7 +149,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/complex_switch/k4_N8_topology-0.85sL2-0.15gL4-on-cb-off-sb_22nm_22nm_error.xml b/vtr_flow/arch/complex_switch/k4_N8_topology-0.85sL2-0.15gL4-on-cb-off-sb_22nm_22nm_error.xml index 5b34bb01bd9..272943c8ecb 100644 --- a/vtr_flow/arch/complex_switch/k4_N8_topology-0.85sL2-0.15gL4-on-cb-off-sb_22nm_22nm_error.xml +++ b/vtr_flow/arch/complex_switch/k4_N8_topology-0.85sL2-0.15gL4-on-cb-off-sb_22nm_22nm_error.xml @@ -69,7 +69,11 @@ - + + + + + @@ -88,7 +92,18 @@ - + + + + + + + + + + + + @@ -116,7 +131,11 @@ - + + + + + @@ -130,7 +149,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/custom_grid/buffered_flyover_wires.xml b/vtr_flow/arch/custom_grid/buffered_flyover_wires.xml index b1932706a79..fc7e61a5cad 100644 --- a/vtr_flow/arch/custom_grid/buffered_flyover_wires.xml +++ b/vtr_flow/arch/custom_grid/buffered_flyover_wires.xml @@ -160,7 +160,11 @@ - + + + + + @@ -175,7 +179,13 @@ - + + + + + + + @@ -190,7 +200,11 @@ - + + + + + @@ -213,7 +227,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/custom_grid/column_io.xml b/vtr_flow/arch/custom_grid/column_io.xml index d7af343f10d..240d306f655 100644 --- a/vtr_flow/arch/custom_grid/column_io.xml +++ b/vtr_flow/arch/custom_grid/column_io.xml @@ -160,7 +160,11 @@ - + + + + + @@ -170,7 +174,13 @@ - + + + + + + + @@ -185,7 +195,11 @@ - + + + + + @@ -195,7 +209,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/custom_grid/custom_sbloc.xml b/vtr_flow/arch/custom_grid/custom_sbloc.xml index a17df3083ce..9a1ed7ddc37 100644 --- a/vtr_flow/arch/custom_grid/custom_sbloc.xml +++ b/vtr_flow/arch/custom_grid/custom_sbloc.xml @@ -160,7 +160,11 @@ - + + + + + @@ -175,7 +179,13 @@ - + + + + + + + @@ -190,7 +200,11 @@ - + + + + + @@ -213,7 +227,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/custom_grid/fixed_grid.xml b/vtr_flow/arch/custom_grid/fixed_grid.xml index a625dfb2178..37af3bbae72 100644 --- a/vtr_flow/arch/custom_grid/fixed_grid.xml +++ b/vtr_flow/arch/custom_grid/fixed_grid.xml @@ -160,7 +160,11 @@ - + + + + + @@ -175,7 +179,13 @@ - + + + + + + + @@ -190,7 +200,11 @@ - + + + + + @@ -200,7 +214,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/custom_grid/multiple_io_types.xml b/vtr_flow/arch/custom_grid/multiple_io_types.xml index 44e1fd64338..88440e8b591 100644 --- a/vtr_flow/arch/custom_grid/multiple_io_types.xml +++ b/vtr_flow/arch/custom_grid/multiple_io_types.xml @@ -160,7 +160,11 @@ - + + + + + @@ -172,7 +176,11 @@ - + + + + + @@ -184,7 +192,11 @@ - + + + + + @@ -196,7 +208,11 @@ - + + + + + @@ -208,7 +224,13 @@ - + + + + + + + @@ -223,7 +245,11 @@ - + + + + + @@ -233,7 +259,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/custom_grid/multiwidth_blocks.xml b/vtr_flow/arch/custom_grid/multiwidth_blocks.xml index 648a7462865..00b27e73594 100644 --- a/vtr_flow/arch/custom_grid/multiwidth_blocks.xml +++ b/vtr_flow/arch/custom_grid/multiwidth_blocks.xml @@ -160,7 +160,11 @@ - + + + + + @@ -175,7 +179,13 @@ - + + + + + + + @@ -190,7 +200,11 @@ - + + + + + @@ -200,7 +214,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/custom_grid/non_column.xml b/vtr_flow/arch/custom_grid/non_column.xml index 3d8ce76d129..9517de5c6de 100644 --- a/vtr_flow/arch/custom_grid/non_column.xml +++ b/vtr_flow/arch/custom_grid/non_column.xml @@ -169,7 +169,11 @@ - + + + + + @@ -184,7 +188,13 @@ - + + + + + + + @@ -199,7 +209,11 @@ - + + + + + @@ -209,7 +223,15 @@ - + + + + + + + + + @@ -223,7 +245,11 @@ - + + + + + diff --git a/vtr_flow/arch/custom_grid/non_column_tall_aspect_ratio.xml b/vtr_flow/arch/custom_grid/non_column_tall_aspect_ratio.xml index 35b9cc986f5..2a234551415 100644 --- a/vtr_flow/arch/custom_grid/non_column_tall_aspect_ratio.xml +++ b/vtr_flow/arch/custom_grid/non_column_tall_aspect_ratio.xml @@ -169,7 +169,11 @@ - + + + + + @@ -184,7 +188,13 @@ - + + + + + + + @@ -199,7 +209,11 @@ - + + + + + @@ -209,7 +223,15 @@ - + + + + + + + + + @@ -223,7 +245,11 @@ - + + + + + diff --git a/vtr_flow/arch/custom_grid/non_column_wide_aspect_ratio.xml b/vtr_flow/arch/custom_grid/non_column_wide_aspect_ratio.xml index 53998ad7b0c..c0660792c1a 100644 --- a/vtr_flow/arch/custom_grid/non_column_wide_aspect_ratio.xml +++ b/vtr_flow/arch/custom_grid/non_column_wide_aspect_ratio.xml @@ -169,7 +169,11 @@ - + + + + + @@ -184,7 +188,13 @@ - + + + + + + + @@ -199,7 +209,11 @@ - + + + + + @@ -209,7 +223,15 @@ - + + + + + + + + + @@ -223,7 +245,11 @@ - + + + + + diff --git a/vtr_flow/arch/custom_grid/shorted_flyover_wires.xml b/vtr_flow/arch/custom_grid/shorted_flyover_wires.xml index babc5933049..9b7ed82e617 100644 --- a/vtr_flow/arch/custom_grid/shorted_flyover_wires.xml +++ b/vtr_flow/arch/custom_grid/shorted_flyover_wires.xml @@ -160,7 +160,11 @@ - + + + + + @@ -175,7 +179,13 @@ - + + + + + + + @@ -190,7 +200,11 @@ - + + + + + @@ -213,7 +227,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/custom_pins/k6_frac_N10_mem32K_40nm_custom_pins.xml b/vtr_flow/arch/custom_pins/k6_frac_N10_mem32K_40nm_custom_pins.xml index 2156d0c9240..c1ae10b27f4 100644 --- a/vtr_flow/arch/custom_pins/k6_frac_N10_mem32K_40nm_custom_pins.xml +++ b/vtr_flow/arch/custom_pins/k6_frac_N10_mem32K_40nm_custom_pins.xml @@ -138,7 +138,11 @@ - + + + + + @@ -153,7 +157,11 @@ - + + + + + @@ -163,7 +171,11 @@ - + + + + + @@ -210,7 +222,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/equivalent_sites/slice.xml b/vtr_flow/arch/equivalent_sites/slice.xml index 43726175057..b182649f552 100644 --- a/vtr_flow/arch/equivalent_sites/slice.xml +++ b/vtr_flow/arch/equivalent_sites/slice.xml @@ -1,12 +1,11 @@ - - - - + + + @@ -16,10 +15,10 @@ - - - - + + + + @@ -533,7 +532,6 @@ - @@ -1017,7 +1015,6 @@ - diff --git a/vtr_flow/arch/ispd/ultrascale_ispd.xml b/vtr_flow/arch/ispd/ultrascale_ispd.xml index e8d42d3608b..5d6a5a3c55b 100644 --- a/vtr_flow/arch/ispd/ultrascale_ispd.xml +++ b/vtr_flow/arch/ispd/ultrascale_ispd.xml @@ -254,7 +254,27 @@ - + + + + + + + + + + + + + + + + + + + + + @@ -280,7 +300,11 @@ - + + + + + @@ -290,7 +314,11 @@ - + + + + + @@ -300,7 +328,10 @@ - + + + + diff --git a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I27_fleI6_fleO1_ff1_nmodes_1.xml b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I27_fleI6_fleO1_ff1_nmodes_1.xml index 59fe9770579..0e7d490d497 100755 --- a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I27_fleI6_fleO1_ff1_nmodes_1.xml +++ b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I27_fleI6_fleO1_ff1_nmodes_1.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I48_fleI5_fleO2_ff1_nmodes_2.xml b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I48_fleI5_fleO2_ff1_nmodes_2.xml index 099efce007d..7693f784e50 100755 --- a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I48_fleI5_fleO2_ff1_nmodes_2.xml +++ b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I48_fleI5_fleO2_ff1_nmodes_2.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I48_fleI5_fleO2_ff2_nmodes_2.xml b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I48_fleI5_fleO2_ff2_nmodes_2.xml index 21abda7ff3a..0104da8443c 100755 --- a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I48_fleI5_fleO2_ff2_nmodes_2.xml +++ b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I48_fleI5_fleO2_ff2_nmodes_2.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I48_fleI6_fleO1_ff1_nmodes_1.xml b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I48_fleI6_fleO1_ff1_nmodes_1.xml index 85ad6c4092d..5189f643925 100755 --- a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I48_fleI6_fleO1_ff1_nmodes_1.xml +++ b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I48_fleI6_fleO1_ff1_nmodes_1.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I48_fleI6_fleO2_ff1_nmodes_2.xml b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I48_fleI6_fleO2_ff1_nmodes_2.xml index 03f756e7d85..20131d0001e 100755 --- a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I48_fleI6_fleO2_ff1_nmodes_2.xml +++ b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I48_fleI6_fleO2_ff1_nmodes_2.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I48_fleI6_fleO2_ff2_nmodes_2.xml b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I48_fleI6_fleO2_ff2_nmodes_2.xml index 24c083ddb6a..69c6b1caec1 100755 --- a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I48_fleI6_fleO2_ff2_nmodes_2.xml +++ b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I48_fleI6_fleO2_ff2_nmodes_2.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I56_fleI7_fleO2_ff1_nmodes_2.xml b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I56_fleI7_fleO2_ff1_nmodes_2.xml index 4121489c726..d2e4e35b553 100755 --- a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I56_fleI7_fleO2_ff1_nmodes_2.xml +++ b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I56_fleI7_fleO2_ff1_nmodes_2.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I56_fleI7_fleO2_ff2_nmodes_2.xml b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I56_fleI7_fleO2_ff2_nmodes_2.xml index dc0f5aaa65a..9649004effe 100755 --- a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I56_fleI7_fleO2_ff2_nmodes_2.xml +++ b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I56_fleI7_fleO2_ff2_nmodes_2.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I64_fleI8_fleO2_ff1_nmodes_2.xml b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I64_fleI8_fleO2_ff1_nmodes_2.xml index 8bdb792db6c..077cdb4b94e 100755 --- a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I64_fleI8_fleO2_ff1_nmodes_2.xml +++ b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I64_fleI8_fleO2_ff1_nmodes_2.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I64_fleI8_fleO2_ff2_nmodes_2.xml b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I64_fleI8_fleO2_ff2_nmodes_2.xml index cd5b6834574..34481ea1c8b 100755 --- a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I64_fleI8_fleO2_ff2_nmodes_2.xml +++ b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I64_fleI8_fleO2_ff2_nmodes_2.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I72_fleI9_fleO2_ff1_nmodes_2.xml b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I72_fleI9_fleO2_ff1_nmodes_2.xml index 14bfda4542a..c81f86ec75c 100755 --- a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I72_fleI9_fleO2_ff1_nmodes_2.xml +++ b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I72_fleI9_fleO2_ff1_nmodes_2.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I72_fleI9_fleO2_ff2_nmodes_2.xml b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I72_fleI9_fleO2_ff2_nmodes_2.xml index 4d93eb12952..10ed6407b2d 100755 --- a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I72_fleI9_fleO2_ff2_nmodes_2.xml +++ b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I72_fleI9_fleO2_ff2_nmodes_2.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I80_fleI10_fleO2_ff1_nmodes_2.xml b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I80_fleI10_fleO2_ff1_nmodes_2.xml index 12a18ce614e..b8b3c5b7c36 100755 --- a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I80_fleI10_fleO2_ff1_nmodes_2.xml +++ b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I80_fleI10_fleO2_ff1_nmodes_2.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I80_fleI10_fleO2_ff2_nmodes_2.xml b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I80_fleI10_fleO2_ff2_nmodes_2.xml index 98630d2609e..91a8f86b132 100755 --- a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I80_fleI10_fleO2_ff2_nmodes_2.xml +++ b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I80_fleI10_fleO2_ff2_nmodes_2.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize1024_memData16.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize1024_memData16.xml index 2c662901725..44476873a49 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize1024_memData16.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize1024_memData16.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize1024_memData2.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize1024_memData2.xml index d0514b0901c..b592f17f3c6 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize1024_memData2.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize1024_memData2.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize1024_memData32.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize1024_memData32.xml index 5c483aa930e..4a6512ac0c2 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize1024_memData32.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize1024_memData32.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize1024_memData4.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize1024_memData4.xml index 63db148c142..f0d8d007753 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize1024_memData4.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize1024_memData4.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize1024_memData64.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize1024_memData64.xml index a358e4eb4b0..5625599f8e8 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize1024_memData64.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize1024_memData64.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize1024_memData8.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize1024_memData8.xml index 179c4f6ccfa..dcb88e61bf6 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize1024_memData8.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize1024_memData8.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize131072_memData16.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize131072_memData16.xml index 404df585cea..be627142da2 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize131072_memData16.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize131072_memData16.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize131072_memData2.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize131072_memData2.xml index b9114aba6cf..5497f8595b1 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize131072_memData2.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize131072_memData2.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize131072_memData32.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize131072_memData32.xml index 061eb6f8d6a..ac93c0520ae 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize131072_memData32.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize131072_memData32.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize131072_memData4.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize131072_memData4.xml index ac827bcdb58..75ce9bfa952 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize131072_memData4.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize131072_memData4.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize131072_memData64.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize131072_memData64.xml index 7c673eb4ee5..5684dc65deb 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize131072_memData64.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize131072_memData64.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize131072_memData8.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize131072_memData8.xml index ae83fb8714c..649a8fa20ea 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize131072_memData8.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize131072_memData8.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize16384_memData16.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize16384_memData16.xml index b7316290906..315e4cdfb90 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize16384_memData16.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize16384_memData16.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize16384_memData2.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize16384_memData2.xml index 242f7f12f9b..4318cc167c7 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize16384_memData2.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize16384_memData2.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize16384_memData32.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize16384_memData32.xml index 080d6c4f0eb..29e94e0194c 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize16384_memData32.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize16384_memData32.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize16384_memData4.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize16384_memData4.xml index dffd90760f7..01e31d251e5 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize16384_memData4.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize16384_memData4.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize16384_memData64.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize16384_memData64.xml index b7b8ce4e0bf..981b84f295a 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize16384_memData64.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize16384_memData64.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize16384_memData8.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize16384_memData8.xml index 7a7db575f34..86740110a43 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize16384_memData8.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize16384_memData8.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize2048_memData16.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize2048_memData16.xml index 5410bd729d6..f1fe5eb3bc6 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize2048_memData16.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize2048_memData16.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize2048_memData2.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize2048_memData2.xml index 6e34b4212f8..8cd8c1f0dc0 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize2048_memData2.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize2048_memData2.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize2048_memData32.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize2048_memData32.xml index ef58d19de9c..ab07482fef7 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize2048_memData32.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize2048_memData32.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize2048_memData4.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize2048_memData4.xml index f887cd1f9de..568856b4b16 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize2048_memData4.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize2048_memData4.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize2048_memData64.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize2048_memData64.xml index 4c00d96294a..f8c1ace98a0 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize2048_memData64.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize2048_memData64.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize2048_memData8.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize2048_memData8.xml index 602bd294750..2df633f9462 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize2048_memData8.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize2048_memData8.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize262144_memData16.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize262144_memData16.xml index 17dcdefb8a4..01e55ac2b06 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize262144_memData16.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize262144_memData16.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize262144_memData2.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize262144_memData2.xml index 71a21b7c184..baf2a068f46 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize262144_memData2.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize262144_memData2.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize262144_memData32.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize262144_memData32.xml index 4dd19e9df07..a6090842c4e 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize262144_memData32.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize262144_memData32.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize262144_memData4.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize262144_memData4.xml index 7657b059224..9420b936ce2 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize262144_memData4.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize262144_memData4.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize262144_memData64.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize262144_memData64.xml index 5b4faf666fa..b7bc9796832 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize262144_memData64.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize262144_memData64.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize262144_memData8.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize262144_memData8.xml index e4d33b755e7..aec2a896633 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize262144_memData8.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize262144_memData8.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize32768_memData16.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize32768_memData16.xml index 55541cd3135..44ad66182f8 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize32768_memData16.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize32768_memData16.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize32768_memData2.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize32768_memData2.xml index d4aab26bb9d..825bba56a18 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize32768_memData2.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize32768_memData2.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize32768_memData32.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize32768_memData32.xml index 0aac9b0cf83..f2a50c39723 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize32768_memData32.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize32768_memData32.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize32768_memData4.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize32768_memData4.xml index 589f5ad3d27..715097a8d6a 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize32768_memData4.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize32768_memData4.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize32768_memData64.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize32768_memData64.xml index 59fa1d78f09..a94ad0612a3 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize32768_memData64.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize32768_memData64.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize32768_memData8.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize32768_memData8.xml index 23d67220b5e..c801c7f6fd2 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize32768_memData8.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize32768_memData8.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize4096_memData16.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize4096_memData16.xml index a2d2e4df793..e582bb4fe85 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize4096_memData16.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize4096_memData16.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize4096_memData2.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize4096_memData2.xml index 53370394877..93eea7cdfb1 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize4096_memData2.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize4096_memData2.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize4096_memData32.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize4096_memData32.xml index 9729f315d3f..860e751ab1c 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize4096_memData32.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize4096_memData32.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize4096_memData4.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize4096_memData4.xml index 28588d50a4a..38624cb1352 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize4096_memData4.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize4096_memData4.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize4096_memData64.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize4096_memData64.xml index b9eedba6199..13b1f26e4ec 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize4096_memData64.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize4096_memData64.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize4096_memData8.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize4096_memData8.xml index c48b8bec86e..04351132895 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize4096_memData8.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize4096_memData8.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize512_memData16.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize512_memData16.xml index 151e7698e85..a2d2a0b975f 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize512_memData16.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize512_memData16.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize512_memData2.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize512_memData2.xml index 002b45c5bb0..d114fb2fd7e 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize512_memData2.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize512_memData2.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize512_memData32.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize512_memData32.xml index 0d9ef1531a8..6213c9d7d34 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize512_memData32.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize512_memData32.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize512_memData4.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize512_memData4.xml index 19a06a24ee1..c5ae11af773 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize512_memData4.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize512_memData4.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize512_memData64.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize512_memData64.xml index 3fcf05f37a6..e80dae9c4b3 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize512_memData64.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize512_memData64.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize512_memData8.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize512_memData8.xml index ac73e606ae9..fd7b57d03a0 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize512_memData8.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize512_memData8.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize524288_memData16.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize524288_memData16.xml index 06e4a4504de..a6a57799dd6 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize524288_memData16.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize524288_memData16.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize524288_memData2.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize524288_memData2.xml index b23cee5a648..baa7cc0cc88 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize524288_memData2.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize524288_memData2.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize524288_memData32.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize524288_memData32.xml index 1b6325e812d..b38dddd0a89 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize524288_memData32.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize524288_memData32.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize524288_memData4.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize524288_memData4.xml index 167252a292a..fc07fe97fff 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize524288_memData4.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize524288_memData4.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize524288_memData64.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize524288_memData64.xml index 7746aeb75e7..29e73e01c9d 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize524288_memData64.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize524288_memData64.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize524288_memData8.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize524288_memData8.xml index d7ae45457e2..a78968bbf00 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize524288_memData8.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize524288_memData8.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize65536_memData16.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize65536_memData16.xml index 947512b1310..fd1de7f5658 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize65536_memData16.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize65536_memData16.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize65536_memData2.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize65536_memData2.xml index 04e6f2648e0..35dffcb173f 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize65536_memData2.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize65536_memData2.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize65536_memData32.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize65536_memData32.xml index b094da1ae80..b6f08c861e8 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize65536_memData32.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize65536_memData32.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize65536_memData4.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize65536_memData4.xml index 97355cb1f8f..4f0e87b250f 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize65536_memData4.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize65536_memData4.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize65536_memData64.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize65536_memData64.xml index 28eb6addc2f..a8d004faa91 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize65536_memData64.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize65536_memData64.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize65536_memData8.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize65536_memData8.xml index 4d704467857..16eefd1c2b3 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize65536_memData8.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize65536_memData8.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize8192_memData16.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize8192_memData16.xml index 8d294a0c2f7..19fa775c701 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize8192_memData16.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize8192_memData16.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize8192_memData2.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize8192_memData2.xml index 409909d75d2..5c447a82083 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize8192_memData2.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize8192_memData2.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize8192_memData32.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize8192_memData32.xml index ac3c4b679d8..7fe72a762bd 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize8192_memData32.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize8192_memData32.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize8192_memData4.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize8192_memData4.xml index 6b591134bec..3505c8baf73 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize8192_memData4.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize8192_memData4.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize8192_memData64.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize8192_memData64.xml index c080f301e6d..e8d64502a8e 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize8192_memData64.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize8192_memData64.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize8192_memData8.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize8192_memData8.xml index 7deeb2c24d6..55fc60138b3 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize8192_memData8.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize8192_memData8.xml @@ -54,7 +54,11 @@ - + + + + + @@ -69,7 +73,11 @@ - + + + + + @@ -79,7 +87,15 @@ - + + + + + + + + + @@ -93,7 +109,11 @@ - + + + + + diff --git a/vtr_flow/arch/nonuniform_chan_width/k6_N10_mem32K_40nm_nonuniform.xml b/vtr_flow/arch/nonuniform_chan_width/k6_N10_mem32K_40nm_nonuniform.xml index bc26c6a42b7..ea38df7f1a2 100644 --- a/vtr_flow/arch/nonuniform_chan_width/k6_N10_mem32K_40nm_nonuniform.xml +++ b/vtr_flow/arch/nonuniform_chan_width/k6_N10_mem32K_40nm_nonuniform.xml @@ -81,7 +81,11 @@ - + + + + + @@ -96,7 +100,11 @@ - + + + + + @@ -106,7 +114,11 @@ - + + + + + @@ -116,7 +128,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/nonuniform_chan_width/k6_N10_mem32K_40nm_pulse.xml b/vtr_flow/arch/nonuniform_chan_width/k6_N10_mem32K_40nm_pulse.xml index 797d1d2f6e7..687d482236d 100644 --- a/vtr_flow/arch/nonuniform_chan_width/k6_N10_mem32K_40nm_pulse.xml +++ b/vtr_flow/arch/nonuniform_chan_width/k6_N10_mem32K_40nm_pulse.xml @@ -81,7 +81,11 @@ - + + + + + @@ -96,7 +100,11 @@ - + + + + + @@ -106,7 +114,11 @@ - + + + + + @@ -116,7 +128,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L1_frac0_ff1_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L1_frac0_ff1_45nm.xml index c49ae41112b..b718251f668 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L1_frac0_ff1_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L1_frac0_ff1_45nm.xml @@ -59,7 +59,11 @@ - + + + + + @@ -74,7 +78,11 @@ - + + + + + @@ -84,7 +92,11 @@ - + + + + + @@ -94,7 +106,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L2_frac0_ff1_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L2_frac0_ff1_45nm.xml index 382a585889f..dfeefd9b9e6 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L2_frac0_ff1_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L2_frac0_ff1_45nm.xml @@ -59,7 +59,11 @@ - + + + + + @@ -74,7 +78,11 @@ - + + + + + @@ -84,7 +92,11 @@ - + + + + + @@ -94,7 +106,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L3_frac0_ff1_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L3_frac0_ff1_45nm.xml index b3ca770d0d5..3d5749fb886 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L3_frac0_ff1_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L3_frac0_ff1_45nm.xml @@ -59,7 +59,11 @@ - + + + + + @@ -74,7 +78,11 @@ - + + + + + @@ -84,7 +92,11 @@ - + + + + + @@ -94,7 +106,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_130nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_130nm.xml index afe4cb6c6a8..2bc0bb89115 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_130nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_130nm.xml @@ -59,7 +59,11 @@ - + + + + + @@ -74,7 +78,11 @@ - + + + + + @@ -84,7 +92,11 @@ - + + + + + @@ -94,7 +106,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_22nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_22nm.xml index fda0f59f5bc..6d18f270a6b 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_22nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_22nm.xml @@ -59,7 +59,11 @@ - + + + + + @@ -74,7 +78,11 @@ - + + + + + @@ -84,7 +92,11 @@ - + + + + + @@ -94,7 +106,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_45nm.xml index a8630c4c7c8..7de1dadad70 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_45nm.xml @@ -59,7 +59,11 @@ - + + + + + @@ -74,7 +78,11 @@ - + + + + + @@ -84,7 +92,11 @@ - + + + + + @@ -94,7 +106,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C10_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C10_45nm.xml index 7d9ab53c13a..c3365a077c0 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C10_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C10_45nm.xml @@ -59,7 +59,11 @@ - + + + + + @@ -74,7 +78,11 @@ - + + + + + @@ -84,7 +92,11 @@ - + + + + + @@ -94,7 +106,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C15_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C15_45nm.xml index fe1ee636f1e..29bb788c127 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C15_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C15_45nm.xml @@ -59,7 +59,11 @@ - + + + + + @@ -74,7 +78,11 @@ - + + + + + @@ -84,7 +92,11 @@ - + + + + + @@ -94,7 +106,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C20_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C20_45nm.xml index e9600fedda3..9beb66b499d 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C20_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C20_45nm.xml @@ -59,7 +59,11 @@ - + + + + + @@ -74,7 +78,11 @@ - + + + + + @@ -84,7 +92,11 @@ - + + + + + @@ -94,7 +106,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C25_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C25_45nm.xml index cbd5af7fbd9..d45c7e92b68 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C25_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C25_45nm.xml @@ -59,7 +59,11 @@ - + + + + + @@ -74,7 +78,11 @@ - + + + + + @@ -84,7 +92,11 @@ - + + + + + @@ -94,7 +106,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C30_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C30_45nm.xml index 11a90988ac2..ae9fa49a16e 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C30_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C30_45nm.xml @@ -59,7 +59,11 @@ - + + + + + @@ -74,7 +78,11 @@ - + + + + + @@ -84,7 +92,11 @@ - + + + + + @@ -94,7 +106,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C35_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C35_45nm.xml index 92c4f741736..4374fdb86eb 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C35_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C35_45nm.xml @@ -59,7 +59,11 @@ - + + + + + @@ -74,7 +78,11 @@ - + + + + + @@ -84,7 +92,11 @@ - + + + + + @@ -94,7 +106,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C40_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C40_45nm.xml index ae845b39dfe..6bf67bc963f 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C40_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C40_45nm.xml @@ -59,7 +59,11 @@ - + + + + + @@ -74,7 +78,11 @@ - + + + + + @@ -84,7 +92,11 @@ - + + + + + @@ -94,7 +106,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C45_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C45_45nm.xml index da2a01116ff..bc2383df137 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C45_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C45_45nm.xml @@ -59,7 +59,11 @@ - + + + + + @@ -74,7 +78,11 @@ - + + + + + @@ -84,7 +92,11 @@ - + + + + + @@ -94,7 +106,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C50_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C50_45nm.xml index 0b66cfdea55..209bf155396 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C50_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C50_45nm.xml @@ -59,7 +59,11 @@ - + + + + + @@ -74,7 +78,11 @@ - + + + + + @@ -84,7 +92,11 @@ - + + + + + @@ -94,7 +106,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C5_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C5_45nm.xml index 0e8e20faa7e..988b203ceac 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C5_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C5_45nm.xml @@ -59,7 +59,11 @@ - + + + + + @@ -74,7 +78,11 @@ - + + + + + @@ -84,7 +92,11 @@ - + + + + + @@ -94,7 +106,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff1_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff1_45nm.xml index 614133454ff..727b27ce2c6 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff1_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff1_45nm.xml @@ -59,7 +59,11 @@ - + + + + + @@ -74,7 +78,11 @@ - + + + + + @@ -84,7 +92,11 @@ - + + + + + @@ -94,7 +106,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_45nm.xml index 490c73f15e7..1b4c88da4e8 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_45nm.xml @@ -59,7 +59,11 @@ - + + + + + @@ -74,7 +78,11 @@ - + + + + + @@ -84,7 +92,11 @@ - + + + + + @@ -94,7 +106,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C10_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C10_45nm.xml index 5dfe501e5c1..5f08e4b7c1e 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C10_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C10_45nm.xml @@ -59,7 +59,11 @@ - + + + + + @@ -74,7 +78,11 @@ - + + + + + @@ -84,7 +92,11 @@ - + + + + + @@ -94,7 +106,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C15_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C15_45nm.xml index a81672146f4..1bba09b65a0 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C15_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C15_45nm.xml @@ -59,7 +59,11 @@ - + + + + + @@ -74,7 +78,11 @@ - + + + + + @@ -84,7 +92,11 @@ - + + + + + @@ -94,7 +106,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C20_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C20_45nm.xml index 3e03809a74f..e5b69cfe269 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C20_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C20_45nm.xml @@ -59,7 +59,11 @@ - + + + + + @@ -74,7 +78,11 @@ - + + + + + @@ -84,7 +92,11 @@ - + + + + + @@ -94,7 +106,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C25_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C25_45nm.xml index 5d7d7869beb..4e1f262a479 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C25_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C25_45nm.xml @@ -59,7 +59,11 @@ - + + + + + @@ -74,7 +78,11 @@ - + + + + + @@ -84,7 +92,11 @@ - + + + + + @@ -94,7 +106,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C30_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C30_45nm.xml index 21381ff943f..47dae6ce90e 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C30_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C30_45nm.xml @@ -59,7 +59,11 @@ - + + + + + @@ -74,7 +78,11 @@ - + + + + + @@ -84,7 +92,11 @@ - + + + + + @@ -94,7 +106,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C35_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C35_45nm.xml index f4720dc52a0..ce07bd01eef 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C35_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C35_45nm.xml @@ -59,7 +59,11 @@ - + + + + + @@ -74,7 +78,11 @@ - + + + + + @@ -84,7 +92,11 @@ - + + + + + @@ -94,7 +106,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C40_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C40_45nm.xml index 8615138ab29..f4adc47f9e0 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C40_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C40_45nm.xml @@ -59,7 +59,11 @@ - + + + + + @@ -74,7 +78,11 @@ - + + + + + @@ -84,7 +92,11 @@ - + + + + + @@ -94,7 +106,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C45_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C45_45nm.xml index a3d164b9e90..187207d2e62 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C45_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C45_45nm.xml @@ -59,7 +59,11 @@ - + + + + + @@ -74,7 +78,11 @@ - + + + + + @@ -84,7 +92,11 @@ - + + + + + @@ -94,7 +106,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C50_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C50_45nm.xml index 7723512c110..4b3b7a51c03 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C50_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C50_45nm.xml @@ -59,7 +59,11 @@ - + + + + + @@ -74,7 +78,11 @@ - + + + + + @@ -84,7 +92,11 @@ - + + + + + @@ -94,7 +106,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C55_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C55_45nm.xml index 0c93813eeba..31fd17f2396 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C55_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C55_45nm.xml @@ -59,7 +59,11 @@ - + + + + + @@ -74,7 +78,11 @@ - + + + + + @@ -84,7 +92,11 @@ - + + + + + @@ -94,7 +106,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C5_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C5_45nm.xml index 9e872b92471..2aafc02e36d 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C5_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C5_45nm.xml @@ -59,7 +59,11 @@ - + + + + + @@ -74,7 +78,11 @@ - + + + + + @@ -84,7 +92,11 @@ - + + + + + @@ -94,7 +106,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C60_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C60_45nm.xml index 008e80d4829..f527c15e67c 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C60_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C60_45nm.xml @@ -59,7 +59,11 @@ - + + + + + @@ -74,7 +78,11 @@ - + + + + + @@ -84,7 +92,11 @@ - + + + + + @@ -94,7 +106,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L5_frac0_ff1_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L5_frac0_ff1_45nm.xml index 3e8be2dc5f0..25548bee024 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L5_frac0_ff1_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L5_frac0_ff1_45nm.xml @@ -59,7 +59,11 @@ - + + + + + @@ -74,7 +78,11 @@ - + + + + + @@ -84,7 +92,11 @@ - + + + + + @@ -94,7 +106,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L6_frac0_ff1_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L6_frac0_ff1_45nm.xml index 4fa56a0f98e..ccd31ec4ef2 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L6_frac0_ff1_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L6_frac0_ff1_45nm.xml @@ -59,7 +59,11 @@ - + + + + + @@ -74,7 +78,11 @@ - + + + + + @@ -84,7 +92,11 @@ - + + + + + @@ -94,7 +106,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi7_L4_frac1_ff1_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi7_L4_frac1_ff1_45nm.xml index 73d0cba12a9..558f11a29b4 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi7_L4_frac1_ff1_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi7_L4_frac1_ff1_45nm.xml @@ -59,7 +59,11 @@ - + + + + + @@ -74,7 +78,11 @@ - + + + + + @@ -84,7 +92,11 @@ - + + + + + @@ -94,7 +106,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi7_L4_frac1_ff2_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi7_L4_frac1_ff2_45nm.xml index e74e92cb560..f959da6cf5e 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi7_L4_frac1_ff2_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi7_L4_frac1_ff2_45nm.xml @@ -59,7 +59,11 @@ - + + + + + @@ -74,7 +78,11 @@ - + + + + + @@ -84,7 +92,11 @@ - + + + + + @@ -94,7 +106,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi8_L4_frac1_ff1_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi8_L4_frac1_ff1_45nm.xml index a374dd063b4..63c6481765a 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi8_L4_frac1_ff1_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi8_L4_frac1_ff1_45nm.xml @@ -59,7 +59,11 @@ - + + + + + @@ -74,7 +78,11 @@ - + + + + + @@ -84,7 +92,11 @@ - + + + + + @@ -94,7 +106,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi8_L4_frac1_ff2_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi8_L4_frac1_ff2_45nm.xml index de05dbdf150..46df17691fa 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi8_L4_frac1_ff2_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi8_L4_frac1_ff2_45nm.xml @@ -59,7 +59,11 @@ - + + + + + @@ -74,7 +78,11 @@ - + + + + + @@ -84,7 +92,11 @@ - + + + + + @@ -94,7 +106,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/power/k6_N10_I47_Fi7_L4_frac1_ff1_45nm.xml b/vtr_flow/arch/power/k6_N10_I47_Fi7_L4_frac1_ff1_45nm.xml index bf800f56827..a777b1bfca2 100644 --- a/vtr_flow/arch/power/k6_N10_I47_Fi7_L4_frac1_ff1_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I47_Fi7_L4_frac1_ff1_45nm.xml @@ -59,7 +59,11 @@ - + + + + + @@ -74,7 +78,11 @@ - + + + + + @@ -84,7 +92,11 @@ - + + + + + @@ -94,7 +106,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/power/k6_N10_I47_Fi7_L4_frac1_ff2_45nm.xml b/vtr_flow/arch/power/k6_N10_I47_Fi7_L4_frac1_ff2_45nm.xml index c02fbf7661f..69ed65b4fad 100644 --- a/vtr_flow/arch/power/k6_N10_I47_Fi7_L4_frac1_ff2_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I47_Fi7_L4_frac1_ff2_45nm.xml @@ -59,7 +59,11 @@ - + + + + + @@ -74,7 +78,11 @@ - + + + + + @@ -84,7 +92,11 @@ - + + + + + @@ -94,7 +106,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/power/k6_N10_I53_Fi8_L4_frac1_ff1_45nm.xml b/vtr_flow/arch/power/k6_N10_I53_Fi8_L4_frac1_ff1_45nm.xml index 7352f987d95..d6046cdb54f 100644 --- a/vtr_flow/arch/power/k6_N10_I53_Fi8_L4_frac1_ff1_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I53_Fi8_L4_frac1_ff1_45nm.xml @@ -59,7 +59,11 @@ - + + + + + @@ -74,7 +78,11 @@ - + + + + + @@ -84,7 +92,11 @@ - + + + + + @@ -94,7 +106,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/power/k6_N10_I53_Fi8_L4_frac1_ff2_45nm.xml b/vtr_flow/arch/power/k6_N10_I53_Fi8_L4_frac1_ff2_45nm.xml index 7e989afeb06..8084556e5a2 100644 --- a/vtr_flow/arch/power/k6_N10_I53_Fi8_L4_frac1_ff2_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I53_Fi8_L4_frac1_ff2_45nm.xml @@ -59,7 +59,11 @@ - + + + + + @@ -74,7 +78,11 @@ - + + + + + @@ -84,7 +92,11 @@ - + + + + + @@ -94,7 +106,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/routing_mode/arch.xml b/vtr_flow/arch/routing_mode/arch.xml index e8e2ce9a7ea..0531516d3cf 100644 --- a/vtr_flow/arch/routing_mode/arch.xml +++ b/vtr_flow/arch/routing_mode/arch.xml @@ -43,7 +43,12 @@ - + + + + + + @@ -59,7 +64,10 @@ - + + + + diff --git a/vtr_flow/arch/routing_mode/slicem.xml b/vtr_flow/arch/routing_mode/slicem.xml index 4215c3ace38..b666c24a1f7 100644 --- a/vtr_flow/arch/routing_mode/slicem.xml +++ b/vtr_flow/arch/routing_mode/slicem.xml @@ -63,7 +63,10 @@ - + + + + @@ -77,7 +80,58 @@ - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/vtr_flow/arch/timing/EArch.xml b/vtr_flow/arch/timing/EArch.xml index 6f83218cd67..3664908ff0f 100644 --- a/vtr_flow/arch/timing/EArch.xml +++ b/vtr_flow/arch/timing/EArch.xml @@ -151,7 +151,11 @@ - + + + + + @@ -166,7 +170,16 @@ - + + + + + + + + + + @@ -184,7 +197,11 @@ - + + + + + @@ -194,7 +211,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/timing/fixed_size/fixed_k6_N8_gate_boost_0.2V_22nm.xml b/vtr_flow/arch/timing/fixed_size/fixed_k6_N8_gate_boost_0.2V_22nm.xml index 98d311e41ca..9effe996a87 100755 --- a/vtr_flow/arch/timing/fixed_size/fixed_k6_N8_gate_boost_0.2V_22nm.xml +++ b/vtr_flow/arch/timing/fixed_size/fixed_k6_N8_gate_boost_0.2V_22nm.xml @@ -67,7 +67,11 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - + + + + + @@ -82,7 +86,14 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - + + + + + + + + @@ -98,7 +109,11 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - + + + + + @@ -108,7 +123,15 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - + + + + + + + + + diff --git a/vtr_flow/arch/timing/fixed_size/fixed_k6_N8_lookahead_chain_gate_boost_0.2V_22nm.xml b/vtr_flow/arch/timing/fixed_size/fixed_k6_N8_lookahead_chain_gate_boost_0.2V_22nm.xml index e2fad47db2a..46fcb62e772 100755 --- a/vtr_flow/arch/timing/fixed_size/fixed_k6_N8_lookahead_chain_gate_boost_0.2V_22nm.xml +++ b/vtr_flow/arch/timing/fixed_size/fixed_k6_N8_lookahead_chain_gate_boost_0.2V_22nm.xml @@ -85,7 +85,11 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + @@ -100,7 +104,16 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + + + + + + @@ -122,7 +135,11 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + @@ -132,7 +149,15 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + + + + + diff --git a/vtr_flow/arch/timing/fixed_size/fixed_k6_N8_lookahead_unbalanced_chain_gate_boost_0.2V_22nm.xml b/vtr_flow/arch/timing/fixed_size/fixed_k6_N8_lookahead_unbalanced_chain_gate_boost_0.2V_22nm.xml index fc9bbce42d7..4e5b94b1370 100755 --- a/vtr_flow/arch/timing/fixed_size/fixed_k6_N8_lookahead_unbalanced_chain_gate_boost_0.2V_22nm.xml +++ b/vtr_flow/arch/timing/fixed_size/fixed_k6_N8_lookahead_unbalanced_chain_gate_boost_0.2V_22nm.xml @@ -88,7 +88,11 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + @@ -103,7 +107,16 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + + + + + + @@ -125,7 +138,11 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + @@ -135,7 +152,15 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + + + + + diff --git a/vtr_flow/arch/timing/fixed_size/fixed_k6_N8_ripple_chain_gate_boost_0.2V_22nm.xml b/vtr_flow/arch/timing/fixed_size/fixed_k6_N8_ripple_chain_gate_boost_0.2V_22nm.xml index 22307383828..4af6242e8a1 100755 --- a/vtr_flow/arch/timing/fixed_size/fixed_k6_N8_ripple_chain_gate_boost_0.2V_22nm.xml +++ b/vtr_flow/arch/timing/fixed_size/fixed_k6_N8_ripple_chain_gate_boost_0.2V_22nm.xml @@ -85,7 +85,11 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + @@ -100,7 +104,16 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + + + + + + @@ -122,7 +135,11 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + @@ -132,7 +149,15 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + + + + + diff --git a/vtr_flow/arch/timing/fixed_size/fixed_k6_N8_unbalanced_ripple_chain_gate_boost_0.2V_22nm.xml b/vtr_flow/arch/timing/fixed_size/fixed_k6_N8_unbalanced_ripple_chain_gate_boost_0.2V_22nm.xml index 22b6ac44f6d..96c63f0b9b0 100755 --- a/vtr_flow/arch/timing/fixed_size/fixed_k6_N8_unbalanced_ripple_chain_gate_boost_0.2V_22nm.xml +++ b/vtr_flow/arch/timing/fixed_size/fixed_k6_N8_unbalanced_ripple_chain_gate_boost_0.2V_22nm.xml @@ -87,7 +87,11 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + @@ -102,7 +106,16 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + + + + + + @@ -124,7 +137,11 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + @@ -134,7 +151,15 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + + + + + diff --git a/vtr_flow/arch/timing/fixed_size/fixed_k6_frac_2ripple_N8_22nm.xml b/vtr_flow/arch/timing/fixed_size/fixed_k6_frac_2ripple_N8_22nm.xml index cf6f3ca18e6..3b5901d7d2f 100644 --- a/vtr_flow/arch/timing/fixed_size/fixed_k6_frac_2ripple_N8_22nm.xml +++ b/vtr_flow/arch/timing/fixed_size/fixed_k6_frac_2ripple_N8_22nm.xml @@ -83,7 +83,11 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + @@ -98,7 +102,16 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + + + + + + @@ -119,7 +132,11 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + @@ -129,7 +146,15 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + + + + + diff --git a/vtr_flow/arch/timing/fixed_size/fixed_k6_frac_2uripple_N8_22nm.xml b/vtr_flow/arch/timing/fixed_size/fixed_k6_frac_2uripple_N8_22nm.xml index b2676a9fb76..e20915cc323 100644 --- a/vtr_flow/arch/timing/fixed_size/fixed_k6_frac_2uripple_N8_22nm.xml +++ b/vtr_flow/arch/timing/fixed_size/fixed_k6_frac_2uripple_N8_22nm.xml @@ -78,7 +78,11 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - + + + + + @@ -93,7 +97,16 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - + + + + + + + + + + @@ -114,7 +127,11 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - + + + + + @@ -124,7 +141,15 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - + + + + + + + + + diff --git a/vtr_flow/arch/timing/fixed_size/fixed_k6_frac_N8_22nm.xml b/vtr_flow/arch/timing/fixed_size/fixed_k6_frac_N8_22nm.xml index b68d6759635..2abbf03f48f 100644 --- a/vtr_flow/arch/timing/fixed_size/fixed_k6_frac_N8_22nm.xml +++ b/vtr_flow/arch/timing/fixed_size/fixed_k6_frac_N8_22nm.xml @@ -67,7 +67,11 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - + + + + + @@ -82,7 +86,14 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - + + + + + + + + @@ -98,7 +109,11 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - + + + + + @@ -108,7 +123,15 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - + + + + + + + + + diff --git a/vtr_flow/arch/timing/fixed_size/fixed_k6_frac_ripple_N8_22nm.xml b/vtr_flow/arch/timing/fixed_size/fixed_k6_frac_ripple_N8_22nm.xml index bef5c735918..8e251a66c0c 100644 --- a/vtr_flow/arch/timing/fixed_size/fixed_k6_frac_ripple_N8_22nm.xml +++ b/vtr_flow/arch/timing/fixed_size/fixed_k6_frac_ripple_N8_22nm.xml @@ -78,7 +78,11 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - + + + + + @@ -93,7 +97,16 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - + + + + + + + + + + @@ -114,7 +127,11 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - + + + + + @@ -124,7 +141,15 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - + + + + + + + + + diff --git a/vtr_flow/arch/timing/fixed_size/fixed_k6_frac_uripple_N8_22nm.xml b/vtr_flow/arch/timing/fixed_size/fixed_k6_frac_uripple_N8_22nm.xml index f1eb631ad4b..9402c319a53 100644 --- a/vtr_flow/arch/timing/fixed_size/fixed_k6_frac_uripple_N8_22nm.xml +++ b/vtr_flow/arch/timing/fixed_size/fixed_k6_frac_uripple_N8_22nm.xml @@ -78,7 +78,11 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - + + + + + @@ -93,7 +97,16 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - + + + + + + + + + + @@ -114,7 +127,11 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - + + + + + @@ -124,7 +141,15 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - + + + + + + + + + diff --git a/vtr_flow/arch/timing/fixed_size/fixed_nointerclb_k6_N8_lookahead_chain_gate_boost_0.2V_22nm.xml b/vtr_flow/arch/timing/fixed_size/fixed_nointerclb_k6_N8_lookahead_chain_gate_boost_0.2V_22nm.xml index e06480a1666..87d9737fe20 100755 --- a/vtr_flow/arch/timing/fixed_size/fixed_nointerclb_k6_N8_lookahead_chain_gate_boost_0.2V_22nm.xml +++ b/vtr_flow/arch/timing/fixed_size/fixed_nointerclb_k6_N8_lookahead_chain_gate_boost_0.2V_22nm.xml @@ -85,7 +85,11 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + @@ -100,7 +104,16 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + + + + + + @@ -120,7 +133,11 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + @@ -130,7 +147,15 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + + + + + diff --git a/vtr_flow/arch/timing/fixed_size/fixed_nointerclb_k6_N8_lookahead_unbalanced_chain_gate_boost_0.2V_22nm.xml b/vtr_flow/arch/timing/fixed_size/fixed_nointerclb_k6_N8_lookahead_unbalanced_chain_gate_boost_0.2V_22nm.xml index a044031777d..9f19410402a 100755 --- a/vtr_flow/arch/timing/fixed_size/fixed_nointerclb_k6_N8_lookahead_unbalanced_chain_gate_boost_0.2V_22nm.xml +++ b/vtr_flow/arch/timing/fixed_size/fixed_nointerclb_k6_N8_lookahead_unbalanced_chain_gate_boost_0.2V_22nm.xml @@ -88,7 +88,11 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + @@ -103,7 +107,16 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + + + + + + @@ -123,7 +136,11 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + @@ -133,7 +150,15 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + + + + + diff --git a/vtr_flow/arch/timing/fixed_size/fixed_nointerclb_k6_N8_ripple_chain_gate_boost_0.2V_22nm.xml b/vtr_flow/arch/timing/fixed_size/fixed_nointerclb_k6_N8_ripple_chain_gate_boost_0.2V_22nm.xml index 2f2559707ea..c07b8215744 100755 --- a/vtr_flow/arch/timing/fixed_size/fixed_nointerclb_k6_N8_ripple_chain_gate_boost_0.2V_22nm.xml +++ b/vtr_flow/arch/timing/fixed_size/fixed_nointerclb_k6_N8_ripple_chain_gate_boost_0.2V_22nm.xml @@ -85,7 +85,11 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + @@ -100,7 +104,16 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + + + + + + @@ -120,7 +133,11 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + @@ -130,7 +147,15 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + + + + + diff --git a/vtr_flow/arch/timing/fixed_size/fixed_nointerclb_k6_N8_unbalanced_ripple_chain_gate_boost_0.2V_22nm.xml b/vtr_flow/arch/timing/fixed_size/fixed_nointerclb_k6_N8_unbalanced_ripple_chain_gate_boost_0.2V_22nm.xml index ff01a7cb038..ce2e6202a2a 100755 --- a/vtr_flow/arch/timing/fixed_size/fixed_nointerclb_k6_N8_unbalanced_ripple_chain_gate_boost_0.2V_22nm.xml +++ b/vtr_flow/arch/timing/fixed_size/fixed_nointerclb_k6_N8_unbalanced_ripple_chain_gate_boost_0.2V_22nm.xml @@ -87,7 +87,11 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + @@ -102,7 +106,16 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + + + + + + @@ -122,7 +135,11 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + @@ -132,7 +149,15 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + + + + + diff --git a/vtr_flow/arch/timing/fraclut_carrychain/k6_frac_2ripple_N8_22nm.xml b/vtr_flow/arch/timing/fraclut_carrychain/k6_frac_2ripple_N8_22nm.xml index 43e9e951b55..9acffac40d3 100644 --- a/vtr_flow/arch/timing/fraclut_carrychain/k6_frac_2ripple_N8_22nm.xml +++ b/vtr_flow/arch/timing/fraclut_carrychain/k6_frac_2ripple_N8_22nm.xml @@ -83,7 +83,11 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + @@ -98,7 +102,16 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + + + + + + @@ -119,7 +132,11 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + @@ -129,7 +146,15 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + + + + + diff --git a/vtr_flow/arch/timing/fraclut_carrychain/k6_frac_2uripple_N8_22nm.xml b/vtr_flow/arch/timing/fraclut_carrychain/k6_frac_2uripple_N8_22nm.xml index cd276646043..036ed2d3b91 100644 --- a/vtr_flow/arch/timing/fraclut_carrychain/k6_frac_2uripple_N8_22nm.xml +++ b/vtr_flow/arch/timing/fraclut_carrychain/k6_frac_2uripple_N8_22nm.xml @@ -78,7 +78,11 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - + + + + + @@ -93,7 +97,16 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - + + + + + + + + + + @@ -114,7 +127,11 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - + + + + + @@ -124,7 +141,15 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - + + + + + + + + + diff --git a/vtr_flow/arch/timing/fraclut_carrychain/k6_frac_N8_22nm.xml b/vtr_flow/arch/timing/fraclut_carrychain/k6_frac_N8_22nm.xml index 22419ea1597..fb389dd6583 100644 --- a/vtr_flow/arch/timing/fraclut_carrychain/k6_frac_N8_22nm.xml +++ b/vtr_flow/arch/timing/fraclut_carrychain/k6_frac_N8_22nm.xml @@ -67,7 +67,11 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - + + + + + @@ -82,7 +86,14 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - + + + + + + + + @@ -98,7 +109,11 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - + + + + + @@ -108,7 +123,15 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - + + + + + + + + + diff --git a/vtr_flow/arch/timing/fraclut_carrychain/k6_frac_ripple_N8_22nm.xml b/vtr_flow/arch/timing/fraclut_carrychain/k6_frac_ripple_N8_22nm.xml index 25aa0c15803..05092ea78ff 100644 --- a/vtr_flow/arch/timing/fraclut_carrychain/k6_frac_ripple_N8_22nm.xml +++ b/vtr_flow/arch/timing/fraclut_carrychain/k6_frac_ripple_N8_22nm.xml @@ -78,7 +78,11 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - + + + + + @@ -93,7 +97,16 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - + + + + + + + + + + @@ -114,7 +127,11 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - + + + + + @@ -124,7 +141,15 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - + + + + + + + + + diff --git a/vtr_flow/arch/timing/fraclut_carrychain/k6_frac_uripple_N8_22nm.xml b/vtr_flow/arch/timing/fraclut_carrychain/k6_frac_uripple_N8_22nm.xml index 4c60ffb98ac..975e7265d5a 100644 --- a/vtr_flow/arch/timing/fraclut_carrychain/k6_frac_uripple_N8_22nm.xml +++ b/vtr_flow/arch/timing/fraclut_carrychain/k6_frac_uripple_N8_22nm.xml @@ -78,7 +78,11 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - + + + + + @@ -93,7 +97,16 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - + + + + + + + + + + @@ -114,7 +127,11 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - + + + + + @@ -124,7 +141,15 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - + + + + + + + + + diff --git a/vtr_flow/arch/timing/global_nonuniform/x_delta_y_delta.xml b/vtr_flow/arch/timing/global_nonuniform/x_delta_y_delta.xml index 036597930b3..3a7edcd7c6d 100644 --- a/vtr_flow/arch/timing/global_nonuniform/x_delta_y_delta.xml +++ b/vtr_flow/arch/timing/global_nonuniform/x_delta_y_delta.xml @@ -135,7 +135,11 @@ - + + + + + @@ -150,7 +154,11 @@ - + + + + + @@ -160,7 +168,11 @@ - + + + + + @@ -170,7 +182,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/timing/global_nonuniform/x_delta_y_uniform.xml b/vtr_flow/arch/timing/global_nonuniform/x_delta_y_uniform.xml index 9a4dd6904f8..7436261debd 100644 --- a/vtr_flow/arch/timing/global_nonuniform/x_delta_y_uniform.xml +++ b/vtr_flow/arch/timing/global_nonuniform/x_delta_y_uniform.xml @@ -135,7 +135,11 @@ - + + + + + @@ -150,7 +154,11 @@ - + + + + + @@ -160,7 +168,11 @@ - + + + + + @@ -170,7 +182,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/timing/global_nonuniform/x_gaussian_y_gaussian.xml b/vtr_flow/arch/timing/global_nonuniform/x_gaussian_y_gaussian.xml index c5c31e0b0b3..91e1d3232b3 100644 --- a/vtr_flow/arch/timing/global_nonuniform/x_gaussian_y_gaussian.xml +++ b/vtr_flow/arch/timing/global_nonuniform/x_gaussian_y_gaussian.xml @@ -135,7 +135,11 @@ - + + + + + @@ -150,7 +154,11 @@ - + + + + + @@ -160,7 +168,11 @@ - + + + + + @@ -170,7 +182,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/timing/global_nonuniform/x_gaussian_y_uniform.xml b/vtr_flow/arch/timing/global_nonuniform/x_gaussian_y_uniform.xml index 2815c136dd3..84021d5b3fd 100644 --- a/vtr_flow/arch/timing/global_nonuniform/x_gaussian_y_uniform.xml +++ b/vtr_flow/arch/timing/global_nonuniform/x_gaussian_y_uniform.xml @@ -135,7 +135,11 @@ - + + + + + @@ -150,7 +154,11 @@ - + + + + + @@ -160,7 +168,11 @@ - + + + + + @@ -170,7 +182,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/timing/global_nonuniform/x_uniform_y_delta.xml b/vtr_flow/arch/timing/global_nonuniform/x_uniform_y_delta.xml index 396eb59cedf..da194c29dc9 100644 --- a/vtr_flow/arch/timing/global_nonuniform/x_uniform_y_delta.xml +++ b/vtr_flow/arch/timing/global_nonuniform/x_uniform_y_delta.xml @@ -135,7 +135,11 @@ - + + + + + @@ -150,7 +154,11 @@ - + + + + + @@ -160,7 +168,11 @@ - + + + + + @@ -170,7 +182,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/timing/global_nonuniform/x_uniform_y_gaussian.xml b/vtr_flow/arch/timing/global_nonuniform/x_uniform_y_gaussian.xml index b30b0fcabd0..4c592e34eca 100644 --- a/vtr_flow/arch/timing/global_nonuniform/x_uniform_y_gaussian.xml +++ b/vtr_flow/arch/timing/global_nonuniform/x_uniform_y_gaussian.xml @@ -135,7 +135,11 @@ - + + + + + @@ -150,7 +154,11 @@ - + + + + + @@ -160,7 +168,11 @@ - + + + + + @@ -170,7 +182,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/timing/hard_fpu_arch_timing.xml b/vtr_flow/arch/timing/hard_fpu_arch_timing.xml index e34a38b0851..2ceeebbea8a 100755 --- a/vtr_flow/arch/timing/hard_fpu_arch_timing.xml +++ b/vtr_flow/arch/timing/hard_fpu_arch_timing.xml @@ -27,7 +27,11 @@ - + + + + + @@ -42,7 +46,11 @@ - + + + + + @@ -52,7 +60,17 @@ - + + + + + + + + + + + diff --git a/vtr_flow/arch/timing/k4_N4_90nm.xml b/vtr_flow/arch/timing/k4_N4_90nm.xml index 54378686b64..4dca4766b55 100644 --- a/vtr_flow/arch/timing/k4_N4_90nm.xml +++ b/vtr_flow/arch/timing/k4_N4_90nm.xml @@ -18,7 +18,11 @@ - + + + + + @@ -33,7 +37,11 @@ - + + + + + diff --git a/vtr_flow/arch/timing/k4_N4_90nm_default_fc_pinloc.xml b/vtr_flow/arch/timing/k4_N4_90nm_default_fc_pinloc.xml index 05d42742562..c7c3c153208 100644 --- a/vtr_flow/arch/timing/k4_N4_90nm_default_fc_pinloc.xml +++ b/vtr_flow/arch/timing/k4_N4_90nm_default_fc_pinloc.xml @@ -18,7 +18,11 @@ - + + + + + @@ -33,7 +37,11 @@ - + + + + + diff --git a/vtr_flow/arch/timing/k4_N8_legacy_45nm.xml b/vtr_flow/arch/timing/k4_N8_legacy_45nm.xml index 8ebe5c09e4c..ffac26c78f1 100644 --- a/vtr_flow/arch/timing/k4_N8_legacy_45nm.xml +++ b/vtr_flow/arch/timing/k4_N8_legacy_45nm.xml @@ -18,7 +18,11 @@ - + + + + + @@ -33,7 +37,11 @@ - + + + + + diff --git a/vtr_flow/arch/timing/k6_N10_40nm.xml b/vtr_flow/arch/timing/k6_N10_40nm.xml index 3f52c56efa1..e880678accd 100644 --- a/vtr_flow/arch/timing/k6_N10_40nm.xml +++ b/vtr_flow/arch/timing/k6_N10_40nm.xml @@ -28,7 +28,11 @@ - + + + + + @@ -43,7 +47,11 @@ - + + + + + diff --git a/vtr_flow/arch/timing/k6_N10_gate_boost_0.2V_22nm.xml b/vtr_flow/arch/timing/k6_N10_gate_boost_0.2V_22nm.xml index c7574d8e9a0..2d974b642c9 100644 --- a/vtr_flow/arch/timing/k6_N10_gate_boost_0.2V_22nm.xml +++ b/vtr_flow/arch/timing/k6_N10_gate_boost_0.2V_22nm.xml @@ -67,7 +67,11 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - + + + + + @@ -82,7 +86,14 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - + + + + + + + + @@ -98,7 +109,11 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - + + + + + @@ -108,7 +123,15 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - + + + + + + + + + diff --git a/vtr_flow/arch/timing/k6_N10_legacy_45nm.xml b/vtr_flow/arch/timing/k6_N10_legacy_45nm.xml index f242c90968e..11d203f5036 100644 --- a/vtr_flow/arch/timing/k6_N10_legacy_45nm.xml +++ b/vtr_flow/arch/timing/k6_N10_legacy_45nm.xml @@ -18,7 +18,11 @@ - + + + + + @@ -33,7 +37,11 @@ - + + + + + diff --git a/vtr_flow/arch/timing/k6_N10_mem32K_40nm.xml b/vtr_flow/arch/timing/k6_N10_mem32K_40nm.xml index c6922800705..c1bc2f468ba 100644 --- a/vtr_flow/arch/timing/k6_N10_mem32K_40nm.xml +++ b/vtr_flow/arch/timing/k6_N10_mem32K_40nm.xml @@ -81,7 +81,11 @@ - + + + + + @@ -96,7 +100,11 @@ - + + + + + @@ -106,7 +114,11 @@ - + + + + + @@ -116,7 +128,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/timing/k6_N10_mem32K_40nm_fc_abs.xml b/vtr_flow/arch/timing/k6_N10_mem32K_40nm_fc_abs.xml index 1d17aa21021..ac34af75dc2 100644 --- a/vtr_flow/arch/timing/k6_N10_mem32K_40nm_fc_abs.xml +++ b/vtr_flow/arch/timing/k6_N10_mem32K_40nm_fc_abs.xml @@ -81,7 +81,11 @@ - + + + + + @@ -96,7 +100,11 @@ - + + + + + @@ -106,7 +114,11 @@ - + + + + + @@ -116,7 +128,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/timing/k6_N10_ripple_chain_gate_boost_0.2V_22nm.xml b/vtr_flow/arch/timing/k6_N10_ripple_chain_gate_boost_0.2V_22nm.xml index 9098d3e59e0..dd91f2255e1 100644 --- a/vtr_flow/arch/timing/k6_N10_ripple_chain_gate_boost_0.2V_22nm.xml +++ b/vtr_flow/arch/timing/k6_N10_ripple_chain_gate_boost_0.2V_22nm.xml @@ -82,7 +82,11 @@ to isolate one chain from the next - + + + + + @@ -97,7 +101,16 @@ to isolate one chain from the next - + + + + + + + + + + @@ -119,7 +132,11 @@ to isolate one chain from the next - + + + + + @@ -129,7 +146,15 @@ to isolate one chain from the next - + + + + + + + + + diff --git a/vtr_flow/arch/timing/k6_N10_unbalanced_ripple_chain_gate_boost_0.2V_22nm.xml b/vtr_flow/arch/timing/k6_N10_unbalanced_ripple_chain_gate_boost_0.2V_22nm.xml index a0750e38154..f67a56044c3 100644 --- a/vtr_flow/arch/timing/k6_N10_unbalanced_ripple_chain_gate_boost_0.2V_22nm.xml +++ b/vtr_flow/arch/timing/k6_N10_unbalanced_ripple_chain_gate_boost_0.2V_22nm.xml @@ -80,7 +80,11 @@ carry chain from Safeen's CMOS ripple carry adder not gate boosted - + + + + + @@ -95,7 +99,16 @@ carry chain from Safeen's CMOS ripple carry adder not gate boosted - + + + + + + + + + + @@ -117,7 +130,11 @@ carry chain from Safeen's CMOS ripple carry adder not gate boosted - + + + + + @@ -127,7 +144,15 @@ carry chain from Safeen's CMOS ripple carry adder not gate boosted - + + + + + + + + + diff --git a/vtr_flow/arch/timing/k6_N8_gate_boost_0.2V_22nm.xml b/vtr_flow/arch/timing/k6_N8_gate_boost_0.2V_22nm.xml index 5c0a12ea2d0..4888855c7bc 100644 --- a/vtr_flow/arch/timing/k6_N8_gate_boost_0.2V_22nm.xml +++ b/vtr_flow/arch/timing/k6_N8_gate_boost_0.2V_22nm.xml @@ -67,7 +67,11 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - + + + + + @@ -82,7 +86,14 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - + + + + + + + + @@ -98,7 +109,11 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - + + + + + @@ -108,7 +123,15 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - + + + + + + + + + diff --git a/vtr_flow/arch/timing/k6_N8_lookahead_chain_gate_boost_0.2V_22nm.xml b/vtr_flow/arch/timing/k6_N8_lookahead_chain_gate_boost_0.2V_22nm.xml index 4ac176270d8..7c14b360365 100644 --- a/vtr_flow/arch/timing/k6_N8_lookahead_chain_gate_boost_0.2V_22nm.xml +++ b/vtr_flow/arch/timing/k6_N8_lookahead_chain_gate_boost_0.2V_22nm.xml @@ -85,7 +85,11 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + @@ -100,7 +104,16 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + + + + + + @@ -122,7 +135,11 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + @@ -132,7 +149,15 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + + + + + diff --git a/vtr_flow/arch/timing/k6_N8_lookahead_unbalanced_chain_gate_boost_0.2V_22nm.xml b/vtr_flow/arch/timing/k6_N8_lookahead_unbalanced_chain_gate_boost_0.2V_22nm.xml index aed96671546..fdc7f0db5db 100644 --- a/vtr_flow/arch/timing/k6_N8_lookahead_unbalanced_chain_gate_boost_0.2V_22nm.xml +++ b/vtr_flow/arch/timing/k6_N8_lookahead_unbalanced_chain_gate_boost_0.2V_22nm.xml @@ -88,7 +88,11 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + @@ -103,7 +107,16 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + + + + + + @@ -125,7 +138,11 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + @@ -135,7 +152,15 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + + + + + diff --git a/vtr_flow/arch/timing/k6_N8_ripple_chain_gate_boost_0.2V_22nm.xml b/vtr_flow/arch/timing/k6_N8_ripple_chain_gate_boost_0.2V_22nm.xml index c22b29c4e3a..a8c2bd43e25 100644 --- a/vtr_flow/arch/timing/k6_N8_ripple_chain_gate_boost_0.2V_22nm.xml +++ b/vtr_flow/arch/timing/k6_N8_ripple_chain_gate_boost_0.2V_22nm.xml @@ -85,7 +85,11 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + @@ -100,7 +104,16 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + + + + + + @@ -122,7 +135,11 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + @@ -132,7 +149,15 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + + + + + diff --git a/vtr_flow/arch/timing/k6_N8_unbalanced_ripple_chain_gate_boost_0.2V_22nm.xml b/vtr_flow/arch/timing/k6_N8_unbalanced_ripple_chain_gate_boost_0.2V_22nm.xml index 268dcf4b638..38afc7d572d 100644 --- a/vtr_flow/arch/timing/k6_N8_unbalanced_ripple_chain_gate_boost_0.2V_22nm.xml +++ b/vtr_flow/arch/timing/k6_N8_unbalanced_ripple_chain_gate_boost_0.2V_22nm.xml @@ -87,7 +87,11 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + @@ -102,7 +106,16 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + + + + + + @@ -124,7 +137,11 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + @@ -134,7 +151,15 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - + + + + + + + + + diff --git a/vtr_flow/arch/timing/k6_frac_N10_40nm.xml b/vtr_flow/arch/timing/k6_frac_N10_40nm.xml index 4da00d67a14..b41af897794 100644 --- a/vtr_flow/arch/timing/k6_frac_N10_40nm.xml +++ b/vtr_flow/arch/timing/k6_frac_N10_40nm.xml @@ -28,7 +28,11 @@ - + + + + + @@ -43,7 +47,11 @@ - + + + + + diff --git a/vtr_flow/arch/timing/k6_frac_N10_4add_2chains_depop50_mem20K_22nm.xml b/vtr_flow/arch/timing/k6_frac_N10_4add_2chains_depop50_mem20K_22nm.xml index 0ef615a3aa4..f449b5d926e 100644 --- a/vtr_flow/arch/timing/k6_frac_N10_4add_2chains_depop50_mem20K_22nm.xml +++ b/vtr_flow/arch/timing/k6_frac_N10_4add_2chains_depop50_mem20K_22nm.xml @@ -98,7 +98,11 @@ - + + + + + @@ -113,7 +117,16 @@ - + + + + + + + + + + @@ -131,7 +144,10 @@ - + + + + @@ -140,7 +156,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/timing/k6_frac_N10_4add_2chains_tie_off_depop50_mem20K_22nm.xml b/vtr_flow/arch/timing/k6_frac_N10_4add_2chains_tie_off_depop50_mem20K_22nm.xml index bd2264a60ac..0659b45da63 100644 --- a/vtr_flow/arch/timing/k6_frac_N10_4add_2chains_tie_off_depop50_mem20K_22nm.xml +++ b/vtr_flow/arch/timing/k6_frac_N10_4add_2chains_tie_off_depop50_mem20K_22nm.xml @@ -98,7 +98,11 @@ - + + + + + @@ -113,7 +117,16 @@ - + + + + + + + + + + @@ -131,7 +144,10 @@ - + + + + @@ -140,7 +156,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/timing/k6_frac_N10_frac_chain_depop50_mem32K_40nm.xml b/vtr_flow/arch/timing/k6_frac_N10_frac_chain_depop50_mem32K_40nm.xml index 4594c649609..29fb7f958d0 100644 --- a/vtr_flow/arch/timing/k6_frac_N10_frac_chain_depop50_mem32K_40nm.xml +++ b/vtr_flow/arch/timing/k6_frac_N10_frac_chain_depop50_mem32K_40nm.xml @@ -151,7 +151,11 @@ - + + + + + @@ -166,7 +170,16 @@ - + + + + + + + + + + @@ -184,7 +197,11 @@ - + + + + + @@ -194,7 +211,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/timing/k6_frac_N10_frac_chain_mem32K_40nm.xml b/vtr_flow/arch/timing/k6_frac_N10_frac_chain_mem32K_40nm.xml index cead76b744f..39f0e98a58a 100644 --- a/vtr_flow/arch/timing/k6_frac_N10_frac_chain_mem32K_40nm.xml +++ b/vtr_flow/arch/timing/k6_frac_N10_frac_chain_mem32K_40nm.xml @@ -160,7 +160,11 @@ - + + + + + @@ -175,7 +179,13 @@ - + + + + + + + @@ -190,7 +200,11 @@ - + + + + + @@ -200,7 +214,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/timing/k6_frac_N10_frac_chain_mem32K_htree0_40nm.xml b/vtr_flow/arch/timing/k6_frac_N10_frac_chain_mem32K_htree0_40nm.xml index f7adb5ed34d..fc944c68d63 100644 --- a/vtr_flow/arch/timing/k6_frac_N10_frac_chain_mem32K_htree0_40nm.xml +++ b/vtr_flow/arch/timing/k6_frac_N10_frac_chain_mem32K_htree0_40nm.xml @@ -160,7 +160,11 @@ - + + + + + @@ -178,7 +182,13 @@ - + + + + + + + @@ -195,7 +205,11 @@ - + + + + + @@ -205,7 +219,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/timing/k6_frac_N10_frac_chain_mem32K_htree0_routedCLK_40nm.xml b/vtr_flow/arch/timing/k6_frac_N10_frac_chain_mem32K_htree0_routedCLK_40nm.xml index b4cc4086b65..5d427db0e7e 100644 --- a/vtr_flow/arch/timing/k6_frac_N10_frac_chain_mem32K_htree0_routedCLK_40nm.xml +++ b/vtr_flow/arch/timing/k6_frac_N10_frac_chain_mem32K_htree0_routedCLK_40nm.xml @@ -160,7 +160,11 @@ - + + + + + @@ -175,7 +179,13 @@ - + + + + + + + @@ -190,7 +200,11 @@ - + + + + + @@ -200,7 +214,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/timing/k6_frac_N10_frac_chain_mem32K_htree0short_40nm.xml b/vtr_flow/arch/timing/k6_frac_N10_frac_chain_mem32K_htree0short_40nm.xml index fa729857e30..4fb3d4bd08a 100644 --- a/vtr_flow/arch/timing/k6_frac_N10_frac_chain_mem32K_htree0short_40nm.xml +++ b/vtr_flow/arch/timing/k6_frac_N10_frac_chain_mem32K_htree0short_40nm.xml @@ -160,7 +160,11 @@ - + + + + + @@ -178,7 +182,13 @@ - + + + + + + + @@ -195,7 +205,11 @@ - + + + + + @@ -205,7 +219,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/timing/k6_frac_N10_mem32K_40nm.xml b/vtr_flow/arch/timing/k6_frac_N10_mem32K_40nm.xml index aaab5da733f..7704b83e183 100644 --- a/vtr_flow/arch/timing/k6_frac_N10_mem32K_40nm.xml +++ b/vtr_flow/arch/timing/k6_frac_N10_mem32K_40nm.xml @@ -135,7 +135,11 @@ - + + + + + @@ -150,7 +154,11 @@ - + + + + + @@ -160,7 +168,11 @@ - + + + + + @@ -170,7 +182,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/timing/soft_fpu_arch_timing.xml b/vtr_flow/arch/timing/soft_fpu_arch_timing.xml index 42e5b5e8db5..889249cbc2e 100755 --- a/vtr_flow/arch/timing/soft_fpu_arch_timing.xml +++ b/vtr_flow/arch/timing/soft_fpu_arch_timing.xml @@ -9,7 +9,11 @@ - + + + + + @@ -24,7 +28,11 @@ - + + + + + diff --git a/vtr_flow/arch/timing/soft_fpu_arch_timing_chain.xml b/vtr_flow/arch/timing/soft_fpu_arch_timing_chain.xml index 8854fc2242a..6780a5779ef 100644 --- a/vtr_flow/arch/timing/soft_fpu_arch_timing_chain.xml +++ b/vtr_flow/arch/timing/soft_fpu_arch_timing_chain.xml @@ -20,7 +20,11 @@ - + + + + + @@ -35,7 +39,13 @@ - + + + + + + + diff --git a/vtr_flow/arch/timing/xc6vlx240tff1156.xml b/vtr_flow/arch/timing/xc6vlx240tff1156.xml index d47950af431..33c7d5ce252 100644 --- a/vtr_flow/arch/timing/xc6vlx240tff1156.xml +++ b/vtr_flow/arch/timing/xc6vlx240tff1156.xml @@ -79,7 +79,12 @@ - + + + + + + @@ -90,7 +95,37 @@ - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -130,7 +165,22 @@ - + + + + + + + + + + + + + + + + @@ -151,7 +201,102 @@ - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -252,7 +397,15 @@ - + + + + + + + + + diff --git a/vtr_flow/arch/titan/stratixiv_arch.timing.xml b/vtr_flow/arch/titan/stratixiv_arch.timing.xml index dc322dcf818..af1fc734a17 100644 --- a/vtr_flow/arch/titan/stratixiv_arch.timing.xml +++ b/vtr_flow/arch/titan/stratixiv_arch.timing.xml @@ -4412,7 +4412,11 @@ - + + + + + @@ -4435,7 +4439,12 @@ - + + + + + + @@ -4451,7 +4460,16 @@ - + + + + + + + + + + @@ -4505,7 +4523,18 @@ - + + + + + + + + + + + + @@ -4596,7 +4625,12 @@ - + + + + + + @@ -4634,7 +4668,12 @@ - + + + + + + From 25754020d758834545a9abdb0527480f7e3718ce Mon Sep 17 00:00:00 2001 From: Keith Rothman <537074+litghost@users.noreply.github.com> Date: Fri, 11 Oct 2019 16:31:50 -0700 Subject: [PATCH 15/58] Improve error message when port cannot be found. Signed-off-by: Keith Rothman <537074+litghost@users.noreply.github.com> --- libs/libarchfpga/src/read_xml_arch_file.cpp | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/libs/libarchfpga/src/read_xml_arch_file.cpp b/libs/libarchfpga/src/read_xml_arch_file.cpp index 50b8251389e..a8ac4e106d6 100644 --- a/libs/libarchfpga/src/read_xml_arch_file.cpp +++ b/libs/libarchfpga/src/read_xml_arch_file.cpp @@ -875,7 +875,12 @@ static std::pair ProcessPinString(pugi::xml_node Locations, } auto port = get_port_by_name(type, token.data); - VTR_ASSERT(port != nullptr); + if (port == nullptr) { + archfpga_throw(loc_data.filename_c_str(), loc_data.line(Locations), + "Port %s for %s could not be found: %s\n", + type->name, token.data, + pin_loc_string); + } int abs_first_pin_idx = port->absolute_first_pin_index; token_index++; From 03e18f647d35ec188c0c80421c286e85356accdb Mon Sep 17 00:00:00 2001 From: Keith Rothman <537074+litghost@users.noreply.github.com> Date: Fri, 11 Oct 2019 16:32:08 -0700 Subject: [PATCH 16/58] Avoid segfault when a tile is not present in grid. Signed-off-by: Keith Rothman <537074+litghost@users.noreply.github.com> --- vpr/src/base/SetupGrid.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/vpr/src/base/SetupGrid.cpp b/vpr/src/base/SetupGrid.cpp index b025e34da21..bee64af60e7 100644 --- a/vpr/src/base/SetupGrid.cpp +++ b/vpr/src/base/SetupGrid.cpp @@ -665,6 +665,9 @@ float calculate_device_utilization(const DeviceGrid& grid, std::mapwidth * type->height; From ded6441bc938adcfbefd2a2dac7ad9ccaaa28d76 Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Mon, 14 Oct 2019 11:24:16 +0200 Subject: [PATCH 17/58] equivalent: added checks for equivalent sites pins Signed-off-by: Alessandro Comodi --- libs/libarchfpga/src/read_xml_arch_file.cpp | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/libs/libarchfpga/src/read_xml_arch_file.cpp b/libs/libarchfpga/src/read_xml_arch_file.cpp index a8ac4e106d6..8b6e8e6eca1 100644 --- a/libs/libarchfpga/src/read_xml_arch_file.cpp +++ b/libs/libarchfpga/src/read_xml_arch_file.cpp @@ -3270,7 +3270,12 @@ static void ProcessEquivalentSiteDirects(pugi::xml_node Parent, auto to_pins = ProcessPinString(CurDirect, LogicalBlockType, to.c_str(), loc_data); // Checking that the number of pins is exactly the same - VTR_ASSERT(from_pins.second - from_pins.first == to_pins.second - to_pins.first); + if (from_pins.second - from_pins.first != to_pins.second - to_pins.first) { + archfpga_throw(loc_data.filename_c_str(), loc_data.line(Parent), + "The number of pins specified in the direct pin mapping is " + "not equivalent for Physical Tile %s and Logical Block %s.\n", + PhysicalTileType->name, LogicalBlockType->name); + } int num_pins = from_pins.second - from_pins.first; for (int i = 0; i < num_pins; i++) { @@ -4801,6 +4806,9 @@ static void check_port_direct_mappings(t_physical_tile_type_ptr physical_tile, t auto block_port = get_port_by_pin(logical_block, pin_map.first); auto tile_port = get_port_by_pin(physical_tile, pin_map.second); + VTR_ASSERT(block_port != nullptr); + VTR_ASSERT(tile_port != nullptr); + if (tile_port->type != block_port->type || tile_port->num_pins != block_port->num_pins || tile_port->equivalent != block_port->equivalent) { From a6740fa19d1ff1a7d6639614bed9129c464b079d Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Mon, 14 Oct 2019 12:23:53 +0200 Subject: [PATCH 18/58] equivalent: patch to have different tile - block names Signed-off-by: Alessandro Comodi --- vpr/src/base/SetupGrid.cpp | 4 ++-- vpr/src/base/read_netlist.cpp | 3 +-- vpr/src/place/timing_place_lookup.cpp | 4 ++-- vpr/src/util/vpr_utils.cpp | 13 +++++++++++-- vpr/src/util/vpr_utils.h | 7 +++++-- 5 files changed, 21 insertions(+), 10 deletions(-) diff --git a/vpr/src/base/SetupGrid.cpp b/vpr/src/base/SetupGrid.cpp index bee64af60e7..74652a04c0f 100644 --- a/vpr/src/base/SetupGrid.cpp +++ b/vpr/src/base/SetupGrid.cpp @@ -277,7 +277,7 @@ static DeviceGrid build_device_grid(const t_grid_def& grid_def, size_t grid_widt auto grid = vtr::Matrix({grid_width, grid_height}); //Initialize the device to all empty blocks - auto empty_type = find_block_type_by_name(EMPTY_BLOCK_NAME, device_ctx.physical_tile_types); + auto empty_type = device_ctx.EMPTY_PHYSICAL_TILE_TYPE; VTR_ASSERT(empty_type != nullptr); for (size_t x = 0; x < grid_width; ++x) { for (size_t y = 0; y < grid_height; ++y) { @@ -290,7 +290,7 @@ static DeviceGrid build_device_grid(const t_grid_def& grid_def, size_t grid_widt for (const auto& grid_loc_def : grid_def.loc_defs) { //Fill in the block types according to the specification - auto type = find_block_type_by_name(grid_loc_def.block_type, device_ctx.physical_tile_types); + auto type = find_tile_type_by_name(grid_loc_def.block_type, device_ctx.physical_tile_types); if (!type) { VPR_FATAL_ERROR(VPR_ERROR_ARCH, diff --git a/vpr/src/base/read_netlist.cpp b/vpr/src/base/read_netlist.cpp index f8c6c79130d..8937e4a6868 100644 --- a/vpr/src/base/read_netlist.cpp +++ b/vpr/src/base/read_netlist.cpp @@ -857,7 +857,6 @@ static void load_external_nets_and_cb(ClusteredNetlist& clb_nlist) { ClusterNetId clb_net_id; auto& atom_ctx = g_vpr_ctx.atom(); - auto& device_ctx = g_vpr_ctx.device(); ext_nhash = alloc_hash_table(); @@ -952,7 +951,7 @@ static void load_external_nets_and_cb(ClusteredNetlist& clb_nlist) { for (auto blk_id : clb_nlist.blocks()) { block_type = clb_nlist.block_type(blk_id); // XXX Use pin mapping here! To check that all the possible pins can be used in the correct tile! - tile_type = find_block_type_by_name(block_type->name, device_ctx.physical_tile_types); + tile_type = physical_tile_type(block_type); for (j = 0; j < tile_type->num_pins; j++) { //Iterate through each pin of the block, and see if there is a net allocated/used for it clb_net_id = clb_nlist.block_net(blk_id, j); diff --git a/vpr/src/place/timing_place_lookup.cpp b/vpr/src/place/timing_place_lookup.cpp index 4ea36626665..1e3e7a3c08e 100644 --- a/vpr/src/place/timing_place_lookup.cpp +++ b/vpr/src/place/timing_place_lookup.cpp @@ -867,8 +867,8 @@ void OverrideDelayModel::compute_override_delay_model( InstPort from_port = parse_inst_port(direct->from_pin); InstPort to_port = parse_inst_port(direct->to_pin); - t_physical_tile_type_ptr from_type = find_block_type_by_name(from_port.instance_name(), device_ctx.physical_tile_types); - t_physical_tile_type_ptr to_type = find_block_type_by_name(to_port.instance_name(), device_ctx.physical_tile_types); + t_physical_tile_type_ptr from_type = find_block_type_by_name(from_port.instance_name(), device_ctx.logical_block_types); + t_physical_tile_type_ptr to_type = find_block_type_by_name(to_port.instance_name(), device_ctx.logical_block_types); int num_conns = from_port.port_high_index() - from_port.port_low_index() + 1; VTR_ASSERT_MSG(num_conns == to_port.port_high_index() - to_port.port_low_index() + 1, "Directs must have the same size to/from"); diff --git a/vpr/src/util/vpr_utils.cpp b/vpr/src/util/vpr_utils.cpp index e2f7e83618f..a2adbaecc0b 100644 --- a/vpr/src/util/vpr_utils.cpp +++ b/vpr/src/util/vpr_utils.cpp @@ -731,7 +731,16 @@ void get_pin_range_for_block(const ClusterBlockId blk_id, *pin_high = (place_ctx.block_locs[blk_id].loc.z + 1) * (type->num_pins / type->capacity) - 1; } -t_physical_tile_type_ptr find_block_type_by_name(std::string name, const std::vector& types) { +t_physical_tile_type_ptr find_block_type_by_name(std::string name, const std::vector& types) { + for (auto const& type : types) { + if (type.name == name) { + return physical_tile_type(&type); + } + } + return nullptr; //Not found +} + +t_physical_tile_type_ptr find_tile_type_by_name(std::string name, const std::vector& types) { for (auto const& type : types) { if (type.name == name) { return &type; @@ -834,7 +843,7 @@ InstPort parse_inst_port(std::string str) { InstPort inst_port(str); auto& device_ctx = g_vpr_ctx.device(); - auto blk_type = find_block_type_by_name(inst_port.instance_name(), device_ctx.physical_tile_types); + auto blk_type = find_block_type_by_name(inst_port.instance_name(), device_ctx.logical_block_types); if (blk_type == nullptr) { VPR_FATAL_ERROR(VPR_ERROR_ARCH, "Failed to find block type named %s", inst_port.instance_name().c_str()); } diff --git a/vpr/src/util/vpr_utils.h b/vpr/src/util/vpr_utils.h index ef8c76265ac..f38257f93cd 100644 --- a/vpr/src/util/vpr_utils.h +++ b/vpr/src/util/vpr_utils.h @@ -111,8 +111,11 @@ const t_pb_graph_pin* find_pb_graph_pin(const t_pb_graph_node* pb_gnode, std::st AtomPinId find_atom_pin(ClusterBlockId blk_id, const t_pb_graph_pin* pb_gpin); -//Returns the block type matching name, or nullptr (if not found) -t_physical_tile_type_ptr find_block_type_by_name(std::string name, const std::vector& types); +//Returns the physical tile type matching a given logical block type name, or nullptr (if not found) +t_physical_tile_type_ptr find_block_type_by_name(std::string name, const std::vector& types); + +//Returns the physical tile type matching a given physical tile type name, or nullptr (if not found) +t_physical_tile_type_ptr find_tile_type_by_name(std::string name, const std::vector& types); //Returns the logical block type which is most common in the device grid t_logical_block_type_ptr find_most_common_block_type(const DeviceGrid& grid); From 0b33d8f2421ddfead8dd9d18a560b3d9db478d2b Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Mon, 14 Oct 2019 12:35:04 +0200 Subject: [PATCH 19/58] equivalent: added VPR_THROW when no corresponding type is found Signed-off-by: Alessandro Comodi --- vpr/src/base/check_netlist.cpp | 5 +++-- vpr/src/util/vpr_utils.cpp | 4 ++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/vpr/src/base/check_netlist.cpp b/vpr/src/base/check_netlist.cpp index 89e62394955..4881ee6c7d2 100644 --- a/vpr/src/base/check_netlist.cpp +++ b/vpr/src/base/check_netlist.cpp @@ -94,12 +94,13 @@ static int check_connections_to_global_clb_pins(ClusterNetId net_id, int verbosi int pin_index = cluster_ctx.clb_nlist.pin_physical_index(pin_id); auto logical_type = cluster_ctx.clb_nlist.block_type(blk_id); - if (physical_tile_type(logical_type)->is_ignored_pin[pin_index] != net_is_ignored + auto physical_type = physical_tile_type(logical_type); + if (physical_type->is_ignored_pin[pin_index] != net_is_ignored && !is_io_type(physical_tile_type(logical_type))) { VTR_LOGV_WARN(verbosity > 2, "Global net '%s' connects to non-global architecture pin '%s' (netlist pin '%s')\n", cluster_ctx.clb_nlist.net_name(net_id).c_str(), - block_type_pin_index_to_name(physical_tile_type(blk_id), pin_index).c_str(), + block_type_pin_index_to_name(physical_type, pin_index).c_str(), cluster_ctx.clb_nlist.pin_name(pin_id).c_str()); ++global_to_non_global_connection_count; diff --git a/vpr/src/util/vpr_utils.cpp b/vpr/src/util/vpr_utils.cpp index a2adbaecc0b..36f7cd27c70 100644 --- a/vpr/src/util/vpr_utils.cpp +++ b/vpr/src/util/vpr_utils.cpp @@ -651,7 +651,7 @@ t_physical_tile_type_ptr physical_tile_type(t_logical_block_type_ptr logical_blo } } - return nullptr; + VPR_THROW(VPR_ERROR_OTHER, "No corresponding physical tile type found for logical block type %s\n", logical_block_type->name); } t_physical_tile_type_ptr physical_tile_type(ClusterBlockId blk) { @@ -675,7 +675,7 @@ t_logical_block_type_ptr logical_block_type(t_physical_tile_type_ptr physical_ti } } - return nullptr; + VPR_THROW(VPR_ERROR_OTHER, "No corresponding logical block type found for physical tile type %s\n", physical_tile_type->name); } /* Each node in the pb_graph for a top-level pb_type can be uniquely identified From 7b009886265fd23549bb5c79d40f80813c1751f6 Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Mon, 14 Oct 2019 13:03:32 +0200 Subject: [PATCH 20/58] equivalent: added check that each pb_type has at least one equivalent tile Signed-off-by: Alessandro Comodi --- libs/libarchfpga/src/read_xml_arch_file.cpp | 9 +++++++++ vpr/src/base/SetupGrid.cpp | 6 ++++-- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/libs/libarchfpga/src/read_xml_arch_file.cpp b/libs/libarchfpga/src/read_xml_arch_file.cpp index 8b6e8e6eca1..c65b9c2d618 100644 --- a/libs/libarchfpga/src/read_xml_arch_file.cpp +++ b/libs/libarchfpga/src/read_xml_arch_file.cpp @@ -4783,6 +4783,15 @@ static void link_physical_logical_types(std::vector& Physi "Could not create link between the %s and all its equivalent sites.\n", physical_tile.name); } } + + for (auto& logical_block : LogicalBlockTypes) { + if (logical_block.index == EMPTY_TYPE_INDEX) continue; + + if ((int)logical_block.equivalent_tiles.size() <= 0) { + archfpga_throw(__FILE__, __LINE__, + "Logical Block %s does not have any equivalent tiles.\n", logical_block.name); + } + } } static void check_port_direct_mappings(t_physical_tile_type_ptr physical_tile, t_logical_block_type_ptr logical_block) { diff --git a/vpr/src/base/SetupGrid.cpp b/vpr/src/base/SetupGrid.cpp index 74652a04c0f..dad86352cdb 100644 --- a/vpr/src/base/SetupGrid.cpp +++ b/vpr/src/base/SetupGrid.cpp @@ -664,10 +664,12 @@ float calculate_device_utilization(const DeviceGrid& grid, std::mapwidth * type->height; From 19856bab27da541ac1326e0b610e5a8ce5b9d43a Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Mon, 14 Oct 2019 13:16:03 +0200 Subject: [PATCH 21/58] equivalent: add referenced veriable in for loop Signed-off-by: Alessandro Comodi --- vpr/src/util/vpr_utils.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vpr/src/util/vpr_utils.cpp b/vpr/src/util/vpr_utils.cpp index 36f7cd27c70..b37cfbbc38f 100644 --- a/vpr/src/util/vpr_utils.cpp +++ b/vpr/src/util/vpr_utils.cpp @@ -219,7 +219,7 @@ std::string block_type_pin_index_to_name(t_physical_tile_type_ptr type, int pin_ pin_name += "."; int curr_index = 0; - for (auto const port : type->ports) { + for (auto const& port : type->ports) { if (curr_index + port.num_pins > pin_index) { //This port contains the desired pin index int index_in_port = pin_index - curr_index; From 2c72d2b9d7d04b67ed394e382d2bcc1e61ed4731 Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Mon, 14 Oct 2019 16:32:47 +0200 Subject: [PATCH 22/58] equivalent: clb_directs should affect phy tiles and not log blocks Signed-off-by: Alessandro Comodi --- vpr/src/route/rr_graph.cpp | 87 +++++++++++++++++++++----------------- 1 file changed, 48 insertions(+), 39 deletions(-) diff --git a/vpr/src/route/rr_graph.cpp b/vpr/src/route/rr_graph.cpp index 3aa6afd94a9..a9d0b51d468 100644 --- a/vpr/src/route/rr_graph.cpp +++ b/vpr/src/route/rr_graph.cpp @@ -51,10 +51,10 @@ struct t_mux_size_distribution { }; struct t_clb_to_clb_directs { - t_logical_block_type_ptr from_clb_type; + t_physical_tile_type_ptr from_clb_type; int from_clb_pin_start_index; int from_clb_pin_end_index; - t_logical_block_type_ptr to_clb_type; + t_physical_tile_type_ptr to_clb_type; int to_clb_pin_start_index; int to_clb_pin_end_index; int switch_index; //The switch type used by this direct connection @@ -2654,94 +2654,103 @@ static void build_unidir_rr_opins(const int i, const int j, const e_side side, c * TODO: The function that does this parsing in placement is poorly done because it lacks generality on heterogeniety, should replace with this one */ static t_clb_to_clb_directs* alloc_and_load_clb_to_clb_directs(const t_direct_inf* directs, const int num_directs, int delayless_switch) { - int i, j; - unsigned int itype; + int i; t_clb_to_clb_directs* clb_to_clb_directs; - char *pb_type_name, *port_name; + char *tile_name, *port_name; int start_pin_index, end_pin_index; - t_pb_type* pb_type; + t_physical_tile_type_ptr physical_tile = nullptr; + t_physical_tile_port tile_port; auto& device_ctx = g_vpr_ctx.device(); clb_to_clb_directs = (t_clb_to_clb_directs*)vtr::calloc(num_directs, sizeof(t_clb_to_clb_directs)); - pb_type_name = nullptr; + tile_name = nullptr; port_name = nullptr; for (i = 0; i < num_directs; i++) { - pb_type_name = (char*)vtr::malloc((strlen(directs[i].from_pin) + strlen(directs[i].to_pin)) * sizeof(char)); + tile_name = (char*)vtr::malloc((strlen(directs[i].from_pin) + strlen(directs[i].to_pin)) * sizeof(char)); port_name = (char*)vtr::malloc((strlen(directs[i].from_pin) + strlen(directs[i].to_pin)) * sizeof(char)); // Load from pins // Parse out the pb_type name, port name, and pin range - parse_direct_pin_name(directs[i].from_pin, directs[i].line, &start_pin_index, &end_pin_index, pb_type_name, port_name); + parse_direct_pin_name(directs[i].from_pin, directs[i].line, &start_pin_index, &end_pin_index, tile_name, port_name); // Figure out which type, port, and pin is used - for (itype = 0; itype < device_ctx.logical_block_types.size(); ++itype) { - if (strcmp(device_ctx.logical_block_types[itype].name, pb_type_name) == 0) { + for (auto& type : device_ctx.physical_tile_types) { + if (strcmp(type.name, tile_name) == 0) { + physical_tile = &type; break; } } - if (itype >= device_ctx.logical_block_types.size()) { - vpr_throw(VPR_ERROR_ARCH, get_arch_file_name(), directs[i].line, "Unable to find block %s.\n", pb_type_name); + if (physical_tile == nullptr) { + VPR_THROW(VPR_ERROR_ARCH, "Unable to find block %s.\n", tile_name); } - clb_to_clb_directs[i].from_clb_type = &device_ctx.logical_block_types[itype]; - pb_type = clb_to_clb_directs[i].from_clb_type->pb_type; + clb_to_clb_directs[i].from_clb_type = physical_tile; - for (j = 0; j < pb_type->num_ports; j++) { - if (strcmp(pb_type->ports[j].name, port_name) == 0) { + bool port_found = false; + for (auto port : physical_tile->ports) { + if (0 == strcmp(port.name, port_name)) { + tile_port = port; + port_found = true; break; } } - if (j >= pb_type->num_ports) { - vpr_throw(VPR_ERROR_ARCH, get_arch_file_name(), directs[i].line, "Unable to find port %s (on block %s).\n", port_name, pb_type_name); + + if (!port_found) { + VPR_THROW(VPR_ERROR_ARCH, "Unable to find port %s (on block %s).\n", port_name, tile_name); } if (start_pin_index == OPEN) { VTR_ASSERT(start_pin_index == end_pin_index); start_pin_index = 0; - end_pin_index = pb_type->ports[j].num_pins - 1; + end_pin_index = tile_port.num_pins - 1; } - get_blk_pin_from_port_pin(clb_to_clb_directs[i].from_clb_type->index, j, start_pin_index, &clb_to_clb_directs[i].from_clb_pin_start_index); - get_blk_pin_from_port_pin(clb_to_clb_directs[i].from_clb_type->index, j, end_pin_index, &clb_to_clb_directs[i].from_clb_pin_end_index); + + clb_to_clb_directs[i].from_clb_pin_start_index = tile_port.absolute_first_pin_index + start_pin_index; + clb_to_clb_directs[i].from_clb_pin_end_index = tile_port.absolute_first_pin_index + end_pin_index; // Load to pins // Parse out the pb_type name, port name, and pin range - parse_direct_pin_name(directs[i].to_pin, directs[i].line, &start_pin_index, &end_pin_index, pb_type_name, port_name); + parse_direct_pin_name(directs[i].to_pin, directs[i].line, &start_pin_index, &end_pin_index, tile_name, port_name); // Figure out which type, port, and pin is used - for (itype = 0; itype < device_ctx.logical_block_types.size(); ++itype) { - if (strcmp(device_ctx.logical_block_types[itype].name, pb_type_name) == 0) { + for (auto& type : device_ctx.physical_tile_types) { + if (strcmp(type.name, tile_name) == 0) { + physical_tile = &type; break; } } - if (itype >= device_ctx.logical_block_types.size()) { - vpr_throw(VPR_ERROR_ARCH, get_arch_file_name(), directs[i].line, "Unable to find block %s.\n", pb_type_name); + if (physical_tile == nullptr) { + VPR_THROW(VPR_ERROR_ARCH, "Unable to find block %s.\n", tile_name); } - clb_to_clb_directs[i].to_clb_type = &device_ctx.logical_block_types[itype]; - pb_type = clb_to_clb_directs[i].to_clb_type->pb_type; + clb_to_clb_directs[i].from_clb_type = physical_tile; - for (j = 0; j < pb_type->num_ports; j++) { - if (strcmp(pb_type->ports[j].name, port_name) == 0) { + port_found = false; + for (auto port : physical_tile->ports) { + if (0 == strcmp(port.name, port_name)) { + tile_port = port; + port_found = true; break; } } - if (j >= pb_type->num_ports) { - vpr_throw(VPR_ERROR_ARCH, get_arch_file_name(), directs[i].line, "Unable to find port %s (on block %s).\n", port_name, pb_type_name); + + if (!port_found) { + VPR_THROW(VPR_ERROR_ARCH, "Unable to find port %s (on block %s).\n", port_name, tile_name); } if (start_pin_index == OPEN) { VTR_ASSERT(start_pin_index == end_pin_index); start_pin_index = 0; - end_pin_index = pb_type->ports[j].num_pins - 1; + end_pin_index = tile_port.num_pins - 1; } - get_blk_pin_from_port_pin(clb_to_clb_directs[i].to_clb_type->index, j, start_pin_index, &clb_to_clb_directs[i].to_clb_pin_start_index); - get_blk_pin_from_port_pin(clb_to_clb_directs[i].to_clb_type->index, j, end_pin_index, &clb_to_clb_directs[i].to_clb_pin_end_index); + clb_to_clb_directs[i].to_clb_pin_start_index = tile_port.absolute_first_pin_index + start_pin_index; + clb_to_clb_directs[i].to_clb_pin_end_index = tile_port.absolute_first_pin_index + end_pin_index; if (abs(clb_to_clb_directs[i].from_clb_pin_start_index - clb_to_clb_directs[i].from_clb_pin_end_index) != abs(clb_to_clb_directs[i].to_clb_pin_start_index - clb_to_clb_directs[i].to_clb_pin_end_index)) { vpr_throw(VPR_ERROR_ARCH, get_arch_file_name(), directs[i].line, @@ -2756,7 +2765,7 @@ static t_clb_to_clb_directs* alloc_and_load_clb_to_clb_directs(const t_direct_in //Use the delayless switch by default clb_to_clb_directs[i].switch_index = delayless_switch; } - free(pb_type_name); + free(tile_name); free(port_name); //We must be careful to clean-up anything that we may have incidentally allocated. @@ -2804,7 +2813,7 @@ static int get_opin_direct_connecions(int x, /* Iterate through all direct connections */ for (int i = 0; i < num_directs; i++) { /* Find matching direct clb-to-clb connections with the same type as current grid location */ - if (clb_to_clb_directs[i].from_clb_type == logical_block_type(curr_type)) { //We are at a valid starting point + if (clb_to_clb_directs[i].from_clb_type == curr_type) { //We are at a valid starting point if (directs[i].from_side != NUM_SIDES && directs[i].from_side != side) continue; @@ -2815,7 +2824,7 @@ static int get_opin_direct_connecions(int x, && y + directs[i].y_offset > 0) { //Only add connections if the target clb type matches the type in the direct specification t_physical_tile_type_ptr target_type = device_ctx.grid[x + directs[i].x_offset][y + directs[i].y_offset].type; - if (clb_to_clb_directs[i].to_clb_type == logical_block_type(target_type) + if (clb_to_clb_directs[i].to_clb_type == target_type && z + directs[i].z_offset < int(target_type->capacity) && z + directs[i].z_offset >= 0) { /* Compute index of opin with regards to given pins */ From eeead6946c361e27fd2fbad422040a2198811f12 Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Mon, 14 Oct 2019 16:40:49 +0200 Subject: [PATCH 23/58] equivalent: skip empty tile Signed-off-by: Alessandro Comodi --- vpr/src/base/vpr_api.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/vpr/src/base/vpr_api.cpp b/vpr/src/base/vpr_api.cpp index 735d080a59e..a5cb7d66fd1 100644 --- a/vpr/src/base/vpr_api.cpp +++ b/vpr/src/base/vpr_api.cpp @@ -435,6 +435,10 @@ void vpr_create_device_grid(const t_vpr_setup& vpr_setup, const t_arch& Arch) { float device_utilization = calculate_device_utilization(device_ctx.grid, num_type_instances); VTR_LOG("Device Utilization: %.2f (target %.2f)\n", device_utilization, target_device_utilization); for (const auto& type : device_ctx.physical_tile_types) { + if (is_empty_type(&type)) { + continue; + } + float util = 0.; if (device_ctx.grid.num_instances(&type) != 0) { util = float(num_type_instances[logical_block_type(&type)]) / device_ctx.grid.num_instances(&type); From 0680e5e5e7a4038781cbf4c0c3f4850835329c38 Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Wed, 16 Oct 2019 11:28:43 +0200 Subject: [PATCH 24/58] equivalent: avoid segfaults Signed-off-by: Alessandro Comodi --- libs/libarchfpga/src/echo_arch.cpp | 16 +++++++++++-- vpr/src/place/place_macro.cpp | 4 ++-- vpr/src/place/timing_place_lookup.cpp | 4 ++-- vpr/src/util/vpr_utils.cpp | 34 ++++++++++----------------- vpr/src/util/vpr_utils.h | 3 --- 5 files changed, 31 insertions(+), 30 deletions(-) diff --git a/libs/libarchfpga/src/echo_arch.cpp b/libs/libarchfpga/src/echo_arch.cpp index 292d53907df..8d1ea79971a 100644 --- a/libs/libarchfpga/src/echo_arch.cpp +++ b/libs/libarchfpga/src/echo_arch.cpp @@ -102,11 +102,23 @@ void EchoArch(const char* EchoFile, int index = Type.index; fprintf(Echo, "\tindex: %d\n", index); - if (LogicalBlockTypes[Type.index].pb_type) { - PrintPb_types_rec(Echo, LogicalBlockTypes[Type.index].pb_type, 2); + + for (auto LogicalBlock : Type.equivalent_sites) { + fprintf(Echo, "\nEquivalent Site: %s\n", LogicalBlock->name); } fprintf(Echo, "\n"); } + + fprintf(Echo, "*************************************************\n\n"); + fprintf(Echo, "*************************************************\n"); + + for (auto& LogicalBlock : LogicalBlockTypes) { + if (LogicalBlock.pb_type) { + PrintPb_types_rec(Echo, LogicalBlock.pb_type, 2); + } + fprintf(Echo, "\n"); + } + fclose(Echo); } diff --git a/vpr/src/place/place_macro.cpp b/vpr/src/place/place_macro.cpp index fb6e7deaed7..4b84afad3d4 100644 --- a/vpr/src/place/place_macro.cpp +++ b/vpr/src/place/place_macro.cpp @@ -454,13 +454,13 @@ static void write_place_macros(std::string filename, const std::vectornum_pins; ++ipin) { + for (int ipin = 0; ipin < type.num_pins; ++ipin) { if (f_idirect_from_blk_pin[itype][ipin] != OPEN) { if (f_direct_type_from_blk_pin[itype][ipin] == SOURCE) { fprintf(f, "%-9s %-9d true SOURCE \n", type.name, ipin); diff --git a/vpr/src/place/timing_place_lookup.cpp b/vpr/src/place/timing_place_lookup.cpp index 1e3e7a3c08e..4aa439aab16 100644 --- a/vpr/src/place/timing_place_lookup.cpp +++ b/vpr/src/place/timing_place_lookup.cpp @@ -867,8 +867,8 @@ void OverrideDelayModel::compute_override_delay_model( InstPort from_port = parse_inst_port(direct->from_pin); InstPort to_port = parse_inst_port(direct->to_pin); - t_physical_tile_type_ptr from_type = find_block_type_by_name(from_port.instance_name(), device_ctx.logical_block_types); - t_physical_tile_type_ptr to_type = find_block_type_by_name(to_port.instance_name(), device_ctx.logical_block_types); + t_physical_tile_type_ptr from_type = find_tile_type_by_name(from_port.instance_name(), device_ctx.physical_tile_types); + t_physical_tile_type_ptr to_type = find_tile_type_by_name(to_port.instance_name(), device_ctx.physical_tile_types); int num_conns = from_port.port_high_index() - from_port.port_low_index() + 1; VTR_ASSERT_MSG(num_conns == to_port.port_high_index() - to_port.port_low_index() + 1, "Directs must have the same size to/from"); diff --git a/vpr/src/util/vpr_utils.cpp b/vpr/src/util/vpr_utils.cpp index b37cfbbc38f..19839b30403 100644 --- a/vpr/src/util/vpr_utils.cpp +++ b/vpr/src/util/vpr_utils.cpp @@ -42,12 +42,12 @@ static int** f_port_from_blk_pin = nullptr; /* f_port_pin_from_blk_pin array allow us to quickly find what port pin a* * block pin corresponds to. * - * [0...device_ctx.logical_block_types.size()-1][0...blk_pin_count-1] */ + * [0...device_ctx.physical_tile_types.size()-1][0...blk_pin_count-1] */ static int** f_port_pin_from_blk_pin = nullptr; /* f_port_pin_to_block_pin array allows us to quickly find what block * * pin a port pin corresponds to. * - * [0...device_ctx.logical_block_types.size()-1][0...num_ports-1][0...num_port_pins-1] */ + * [0...device_ctx.physical_tile_types.size()-1][0...num_ports-1][0...num_port_pins-1] */ static int*** f_blk_pin_from_port_pin = nullptr; //Regular expressions used to determine register and logic primitives @@ -731,15 +731,6 @@ void get_pin_range_for_block(const ClusterBlockId blk_id, *pin_high = (place_ctx.block_locs[blk_id].loc.z + 1) * (type->num_pins / type->capacity) - 1; } -t_physical_tile_type_ptr find_block_type_by_name(std::string name, const std::vector& types) { - for (auto const& type : types) { - if (type.name == name) { - return physical_tile_type(&type); - } - } - return nullptr; //Not found -} - t_physical_tile_type_ptr find_tile_type_by_name(std::string name, const std::vector& types) { for (auto const& type : types) { if (type.name == name) { @@ -843,7 +834,7 @@ InstPort parse_inst_port(std::string str) { InstPort inst_port(str); auto& device_ctx = g_vpr_ctx.device(); - auto blk_type = find_block_type_by_name(inst_port.instance_name(), device_ctx.logical_block_types); + auto blk_type = find_tile_type_by_name(inst_port.instance_name(), device_ctx.physical_tile_types); if (blk_type == nullptr) { VPR_FATAL_ERROR(VPR_ERROR_ARCH, "Failed to find block type named %s", inst_port.instance_name().c_str()); } @@ -1993,14 +1984,15 @@ static void mark_direct_of_ports(int idirect, int direct_type, char* pb_type_nam auto& device_ctx = g_vpr_ctx.device(); // Go through all the block types - for (itype = 1; itype < device_ctx.logical_block_types.size(); itype++) { + for (itype = 1; itype < device_ctx.physical_tile_types.size(); itype++) { + auto& physical_tile = device_ctx.physical_tile_types[itype]; // Find blocks with the same pb_type_name - if (strcmp(device_ctx.logical_block_types[itype].pb_type->name, pb_type_name) == 0) { - num_ports = device_ctx.logical_block_types[itype].pb_type->num_ports; + if (strcmp(physical_tile.name, pb_type_name) == 0) { + num_ports = physical_tile.ports.size(); for (iport = 0; iport < num_ports; iport++) { // Find ports with the same port_name - if (strcmp(device_ctx.logical_block_types[itype].pb_type->ports[iport].name, port_name) == 0) { - num_port_pins = device_ctx.logical_block_types[itype].pb_type->ports[iport].num_pins; + if (strcmp(physical_tile.ports[iport].name, port_name) == 0) { + num_port_pins = physical_tile.ports[iport].num_pins; // Check whether the end_pin_index is valid if (end_pin_index > num_port_pins) { @@ -2057,13 +2049,13 @@ void alloc_and_load_idirect_from_blk_pin(t_direct_inf* directs, int num_directs, auto& device_ctx = g_vpr_ctx.device(); /* Allocate and initialize the values to OPEN (-1). */ - temp_idirect_from_blk_pin = (int**)vtr::malloc(device_ctx.logical_block_types.size() * sizeof(int*)); - temp_direct_type_from_blk_pin = (int**)vtr::malloc(device_ctx.logical_block_types.size() * sizeof(int*)); - for (const auto& type : device_ctx.logical_block_types) { + temp_idirect_from_blk_pin = (int**)vtr::malloc(device_ctx.physical_tile_types.size() * sizeof(int*)); + temp_direct_type_from_blk_pin = (int**)vtr::malloc(device_ctx.physical_tile_types.size() * sizeof(int*)); + for (const auto& type : device_ctx.physical_tile_types) { if (is_empty_type(&type)) continue; int itype = type.index; - num_type_pins = type.pb_type->num_pins; + num_type_pins = type.num_pins; temp_idirect_from_blk_pin[itype] = (int*)vtr::malloc(num_type_pins * sizeof(int)); temp_direct_type_from_blk_pin[itype] = (int*)vtr::malloc(num_type_pins * sizeof(int)); diff --git a/vpr/src/util/vpr_utils.h b/vpr/src/util/vpr_utils.h index f38257f93cd..ae7a3240a77 100644 --- a/vpr/src/util/vpr_utils.h +++ b/vpr/src/util/vpr_utils.h @@ -111,9 +111,6 @@ const t_pb_graph_pin* find_pb_graph_pin(const t_pb_graph_node* pb_gnode, std::st AtomPinId find_atom_pin(ClusterBlockId blk_id, const t_pb_graph_pin* pb_gpin); -//Returns the physical tile type matching a given logical block type name, or nullptr (if not found) -t_physical_tile_type_ptr find_block_type_by_name(std::string name, const std::vector& types); - //Returns the physical tile type matching a given physical tile type name, or nullptr (if not found) t_physical_tile_type_ptr find_tile_type_by_name(std::string name, const std::vector& types); From ce2b4aecc499090daa2c6a73aa145de55f6cfe39 Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Wed, 16 Oct 2019 11:32:21 +0200 Subject: [PATCH 25/58] equivalent: added bimap to store pin mappings between tile and block Signed-off-by: Alessandro Comodi --- libs/libarchfpga/src/physical_types.h | 37 +++++++++++++- libs/libarchfpga/src/read_xml_arch_file.cpp | 54 +++++++++++++++++++-- vpr/src/route/route_common.cpp | 6 +-- 3 files changed, 88 insertions(+), 9 deletions(-) diff --git a/libs/libarchfpga/src/physical_types.h b/libs/libarchfpga/src/physical_types.h index 68bb9351b4c..e34b740031c 100644 --- a/libs/libarchfpga/src/physical_types.h +++ b/libs/libarchfpga/src/physical_types.h @@ -38,6 +38,7 @@ #include "vtr_ndmatrix.h" #include "vtr_hash.h" +#include "vtr_bimap.h" #include "logic_types.h" #include "clock_types.h" @@ -58,6 +59,8 @@ struct t_physical_tile_type; typedef const t_physical_tile_type* t_physical_tile_type_ptr; struct t_logical_block_type; typedef const t_logical_block_type* t_logical_block_type_ptr; +struct t_logical_pin; +struct t_physical_pin; struct t_pb_type; struct t_pb_graph_pin_power; struct t_mode; @@ -616,12 +619,44 @@ struct t_physical_tile_type { /* Unordered map indexed by the logical block index. * tile_block_pin_directs_map[logical block index][logical block pin] -> physical tile pin */ - std::unordered_map> tile_block_pin_directs_map; + std::unordered_map> tile_block_pin_directs_map; /* Returns the indices of pins that contain a clock for this physical logic block */ std::vector get_clock_pins_indices() const; }; +struct t_logical_pin { + int pin = -1; + + t_logical_pin(int value) { + pin = value; + } + + bool operator==(const t_logical_pin o) const { + return pin == o.pin; + } + + bool operator<(const t_logical_pin o) const { + return pin < o.pin; + } +}; + +struct t_physical_pin { + int pin = -1; + + t_physical_pin(int value) { + pin = value; + } + + bool operator==(const t_physical_pin o) const { + return pin == o.pin; + } + + bool operator<(const t_physical_pin o) const { + return pin < o.pin; + } +}; + /** Describes I/O and clock ports of a physical tile type * * It corresponds to tags in the FPGA architecture description diff --git a/libs/libarchfpga/src/read_xml_arch_file.cpp b/libs/libarchfpga/src/read_xml_arch_file.cpp index c65b9c2d618..a7a0583445c 100644 --- a/libs/libarchfpga/src/read_xml_arch_file.cpp +++ b/libs/libarchfpga/src/read_xml_arch_file.cpp @@ -52,6 +52,7 @@ #include "vtr_memory.h" #include "vtr_digest.h" #include "vtr_token.h" +#include "vtr_bimap.h" #include "arch_types.h" #include "arch_util.h" @@ -3251,7 +3252,7 @@ static void ProcessEquivalentSiteDirects(pugi::xml_node Parent, "There are no direct pin mappings between site %s and tile %s.\n", site_name.c_str(), PhysicalTileType->name); } - std::unordered_map directs_map; + vtr::bimap directs_map; CurDirect = Parent.first_child(); while (CurDirect) { @@ -3279,7 +3280,16 @@ static void ProcessEquivalentSiteDirects(pugi::xml_node Parent, int num_pins = from_pins.second - from_pins.first; for (int i = 0; i < num_pins; i++) { - directs_map[to_pins.first + i] = from_pins.first + i; + t_physical_pin phy_pin(from_pins.first + i); + t_logical_pin log_pin(to_pins.first + i); + + auto result = directs_map.insert(log_pin, phy_pin); + if (!result.second) { + archfpga_throw(loc_data.filename_c_str(), loc_data.line(Parent), + "Duplicate logical pin (%d) to physical pin (%d) mappings found for " + "Physical Tile %s and Logical Block %s.\n", + log_pin.pin, phy_pin.pin, PhysicalTileType->name, LogicalBlockType->name); + } } CurDirect = CurDirect.next_sibling(CurDirect.name()); @@ -4791,6 +4801,42 @@ static void link_physical_logical_types(std::vector& Physi archfpga_throw(__FILE__, __LINE__, "Logical Block %s does not have any equivalent tiles.\n", logical_block.name); } + + std::unordered_map ignored_pins_check_map; + std::unordered_map global_pins_check_map; + + for (int pin = 0; pin < logical_block.pb_type->num_pins; pin++) { + for (auto& tile : logical_block.equivalent_tiles) { + auto direct_map = tile->tile_block_pin_directs_map.at(logical_block.index); + auto result = direct_map.find(t_logical_pin(pin)); + if (result == direct_map.end()) { + archfpga_throw(__FILE__, __LINE__, + "Logical pin %d not present in pin mapping between Tile %s and Block %s.\n", + pin, tile->name, logical_block.name); + } + + int phy_index = result->second.pin; + + bool is_ignored = tile->is_ignored_pin[phy_index]; + bool is_global = tile->is_pin_global[phy_index]; + + auto ignored_result = ignored_pins_check_map.insert(std::pair(pin, is_ignored)); + if (!ignored_result.second && ignored_result.first->second != is_ignored) { + archfpga_throw(__FILE__, __LINE__, + "Physical Tile %s has a different value for the ignored pin (physical pin: %d, logical pin: %d) " + "different from the corresponding pins of the other equivalent sites\n.", + tile->name, phy_index, pin); + } + + auto global_result = global_pins_check_map.insert(std::pair(pin, is_global)); + if (!global_result.second && global_result.first->second != is_global) { + archfpga_throw(__FILE__, __LINE__, + "Physical Tile %s has a different value for the global pin (physical pin: %d, logical pin: %d) " + "different from the corresponding pins of the other equivalent sites\n.", + tile->name, phy_index, pin); + } + } + } } } @@ -4812,8 +4858,8 @@ static void check_port_direct_mappings(t_physical_tile_type_ptr physical_tile, t } for (auto pin_map : pin_direct_mapping) { - auto block_port = get_port_by_pin(logical_block, pin_map.first); - auto tile_port = get_port_by_pin(physical_tile, pin_map.second); + auto block_port = get_port_by_pin(logical_block, pin_map.first.pin); + auto tile_port = get_port_by_pin(physical_tile, pin_map.second.pin); VTR_ASSERT(block_port != nullptr); VTR_ASSERT(tile_port != nullptr); diff --git a/vpr/src/route/route_common.cpp b/vpr/src/route/route_common.cpp index 8770c9de2c2..10ffec2ff23 100644 --- a/vpr/src/route/route_common.cpp +++ b/vpr/src/route/route_common.cpp @@ -1060,11 +1060,9 @@ static vtr::vector> load_net_rr_terminals(const t node_block_pin = cluster_ctx.clb_nlist.pin_logical_index(pin_id); auto pin_directs_map = type->tile_block_pin_directs_map; - auto map_result = pin_directs_map.find(logical_block->index); - std::unordered_map map = map_result->second; + auto map = pin_directs_map[logical_block->index]; - auto pin_result = map.find(node_block_pin); - auto orig_phys_pin = pin_result->second; + auto orig_phys_pin = map[t_logical_pin(node_block_pin)].pin; VTR_ASSERT(type->num_pins % type->capacity == 0); int max_num_block_pins = type->num_pins / type->capacity; From f679556081c20f1f12cfdc0eb2899e2dab2b2b33 Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Wed, 16 Oct 2019 12:01:39 +0200 Subject: [PATCH 26/58] equivalent: fixed bug in clb_directs Signed-off-by: Alessandro Comodi --- vpr/src/route/rr_graph.cpp | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/vpr/src/route/rr_graph.cpp b/vpr/src/route/rr_graph.cpp index a9d0b51d468..2fb98991d14 100644 --- a/vpr/src/route/rr_graph.cpp +++ b/vpr/src/route/rr_graph.cpp @@ -2677,7 +2677,7 @@ static t_clb_to_clb_directs* alloc_and_load_clb_to_clb_directs(const t_direct_in parse_direct_pin_name(directs[i].from_pin, directs[i].line, &start_pin_index, &end_pin_index, tile_name, port_name); // Figure out which type, port, and pin is used - for (auto& type : device_ctx.physical_tile_types) { + for (const auto& type : device_ctx.physical_tile_types) { if (strcmp(type.name, tile_name) == 0) { physical_tile = &type; break; @@ -2691,7 +2691,7 @@ static t_clb_to_clb_directs* alloc_and_load_clb_to_clb_directs(const t_direct_in clb_to_clb_directs[i].from_clb_type = physical_tile; bool port_found = false; - for (auto port : physical_tile->ports) { + for (const auto& port : physical_tile->ports) { if (0 == strcmp(port.name, port_name)) { tile_port = port; port_found = true; @@ -2717,7 +2717,7 @@ static t_clb_to_clb_directs* alloc_and_load_clb_to_clb_directs(const t_direct_in parse_direct_pin_name(directs[i].to_pin, directs[i].line, &start_pin_index, &end_pin_index, tile_name, port_name); // Figure out which type, port, and pin is used - for (auto& type : device_ctx.physical_tile_types) { + for (const auto& type : device_ctx.physical_tile_types) { if (strcmp(type.name, tile_name) == 0) { physical_tile = &type; break; @@ -2728,10 +2728,10 @@ static t_clb_to_clb_directs* alloc_and_load_clb_to_clb_directs(const t_direct_in VPR_THROW(VPR_ERROR_ARCH, "Unable to find block %s.\n", tile_name); } - clb_to_clb_directs[i].from_clb_type = physical_tile; + clb_to_clb_directs[i].to_clb_type = physical_tile; port_found = false; - for (auto port : physical_tile->ports) { + for (const auto& port : physical_tile->ports) { if (0 == strcmp(port.name, port_name)) { tile_port = port; port_found = true; From 6751d562cba4ece34a7d20a2dd68f2e142b18e97 Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Fri, 18 Oct 2019 11:16:00 +0200 Subject: [PATCH 27/58] equivalent: using physical_tiles instead of logical_block Signed-off-by: Alessandro Comodi --- vpr/src/place/place.cpp | 1 - vpr/src/util/vpr_utils.cpp | 152 +++---------------------------------- vpr/src/util/vpr_utils.h | 3 - 3 files changed, 9 insertions(+), 147 deletions(-) diff --git a/vpr/src/place/place.cpp b/vpr/src/place/place.cpp index e31abf8e6b9..e8808a7254c 100644 --- a/vpr/src/place/place.cpp +++ b/vpr/src/place/place.cpp @@ -1722,7 +1722,6 @@ static void free_placement_structs(const t_placer_opts& placer_opts) { free_placement_macros_structs(); /* Frees up all the data structure used in vpr_utils. */ - free_port_pin_from_blk_pin(); free_blk_pin_from_port_pin(); } diff --git a/vpr/src/util/vpr_utils.cpp b/vpr/src/util/vpr_utils.cpp index 19839b30403..462b36eb522 100644 --- a/vpr/src/util/vpr_utils.cpp +++ b/vpr/src/util/vpr_utils.cpp @@ -34,17 +34,6 @@ * while in the post-pack level, block pins are used. The reason block * * type is used instead of blocks is to save memories. */ -/* f_port_from_blk_pin array allow us to quickly find what port a block * - * pin corresponds to. * - * [0...device_ctx.logical_block_type.size()-1][0...blk_pin_count-1] * - * */ -static int** f_port_from_blk_pin = nullptr; - -/* f_port_pin_from_blk_pin array allow us to quickly find what port pin a* - * block pin corresponds to. * - * [0...device_ctx.physical_tile_types.size()-1][0...blk_pin_count-1] */ -static int** f_port_pin_from_blk_pin = nullptr; - /* f_port_pin_to_block_pin array allows us to quickly find what block * * pin a port pin corresponds to. * * [0...device_ctx.physical_tile_types.size()-1][0...num_ports-1][0...num_port_pins-1] */ @@ -57,11 +46,6 @@ const std::regex LOGIC_MODEL_REGEX("(.subckt\\s+)?.*(lut|names|lcell).*", std::r /******************** Subroutine declarations ********************************/ -/* Allocates and loads f_port_from_blk_pin and f_port_pin_from_blk_pin * - * arrays. * - * The arrays are freed in free_placement_structs() */ -static void alloc_and_load_port_pin_from_blk_pin(); - /* Allocates and loads blk_pin_from_port_pin array. * * The arrays are freed in free_placement_structs() */ static void alloc_and_load_blk_pin_from_port_pin(); @@ -1624,124 +1608,6 @@ void free_pb_stats(t_pb* pb) { * * ***************************************************************************************/ -void get_port_pin_from_blk_pin(int blk_type_index, int blk_pin, int* port, int* port_pin) { - /* These two mappings are needed since there are two different netlist * - * conventions - in the cluster level, ports and port pins are used * - * while in the post-pack level, block pins are used. The reason block * - * type is used instead of blocks is that the mapping is the same for * - * blocks belonging to the same block type. * - * * - * f_port_from_blk_pin array allow us to quickly find what port a * - * block pin corresponds to. * - * [0...device_ctx.logical_block_types.size()-1][0...blk_pin_count-1] * - * * - * f_port_pin_from_blk_pin array allow us to quickly find what port * - * pin a block pin corresponds to. * - * [0...device_ctx.logical_block_types.size()-1][0...blk_pin_count-1] */ - - /* If either one of the arrays is not allocated and loaded, it is * - * corrupted, so free both of them. */ - if ((f_port_from_blk_pin == nullptr && f_port_pin_from_blk_pin != nullptr) - || (f_port_from_blk_pin != nullptr && f_port_pin_from_blk_pin == nullptr)) { - free_port_pin_from_blk_pin(); - } - - /* If the arrays are not allocated and loaded, allocate it. */ - if (f_port_from_blk_pin == nullptr && f_port_pin_from_blk_pin == nullptr) { - alloc_and_load_port_pin_from_blk_pin(); - } - - /* Return the port and port_pin for the pin. */ - *port = f_port_from_blk_pin[blk_type_index][blk_pin]; - *port_pin = f_port_pin_from_blk_pin[blk_type_index][blk_pin]; -} - -void free_port_pin_from_blk_pin() { - /* Frees the f_port_from_blk_pin and f_port_pin_from_blk_pin arrays. * - * * - * This function is called when the file-scope arrays are corrupted. * - * Otherwise, the arrays are freed in free_placement_structs() */ - - unsigned int itype; - - auto& device_ctx = g_vpr_ctx.device(); - - if (f_port_from_blk_pin != nullptr) { - for (itype = 1; itype < device_ctx.logical_block_types.size(); itype++) { - free(f_port_from_blk_pin[itype]); - } - free(f_port_from_blk_pin); - - f_port_from_blk_pin = nullptr; - } - - if (f_port_pin_from_blk_pin != nullptr) { - for (itype = 1; itype < device_ctx.logical_block_types.size(); itype++) { - free(f_port_pin_from_blk_pin[itype]); - } - free(f_port_pin_from_blk_pin); - - f_port_pin_from_blk_pin = nullptr; - } -} - -static void alloc_and_load_port_pin_from_blk_pin() { - /* Allocates and loads f_port_from_blk_pin and f_port_pin_from_blk_pin * - * arrays. * - * * - * The arrays are freed in free_placement_structs() */ - - int** temp_port_from_blk_pin = nullptr; - int** temp_port_pin_from_blk_pin = nullptr; - unsigned int itype; - int iblk_pin, iport, iport_pin; - int blk_pin_count, num_port_pins, num_ports; - auto& device_ctx = g_vpr_ctx.device(); - - /* Allocate and initialize the values to OPEN (-1). */ - temp_port_from_blk_pin = (int**)vtr::malloc(device_ctx.logical_block_types.size() * sizeof(int*)); - temp_port_pin_from_blk_pin = (int**)vtr::malloc(device_ctx.logical_block_types.size() * sizeof(int*)); - for (const auto& type : device_ctx.logical_block_types) { - itype = type.index; - blk_pin_count = type.pb_type->num_pins; - - temp_port_from_blk_pin[itype] = (int*)vtr::malloc(blk_pin_count * sizeof(int)); - temp_port_pin_from_blk_pin[itype] = (int*)vtr::malloc(blk_pin_count * sizeof(int)); - - for (iblk_pin = 0; iblk_pin < blk_pin_count; iblk_pin++) { - temp_port_from_blk_pin[itype][iblk_pin] = OPEN; - temp_port_pin_from_blk_pin[itype][iblk_pin] = OPEN; - } - } - - /* Load the values */ - for (const auto& type : device_ctx.logical_block_types) { - itype = type.index; - - /* itype starts from 1 since device_ctx.logical_block_types[0] is the EMPTY_PHYSICAL_TILE_TYPE. */ - if (itype == 0) { - continue; - } - - blk_pin_count = 0; - num_ports = type.pb_type->num_ports; - - for (iport = 0; iport < num_ports; iport++) { - num_port_pins = type.pb_type->ports[iport].num_pins; - - for (iport_pin = 0; iport_pin < num_port_pins; iport_pin++) { - temp_port_from_blk_pin[itype][blk_pin_count] = iport; - temp_port_pin_from_blk_pin[itype][blk_pin_count] = iport_pin; - blk_pin_count++; - } - } - } - - /* Sets the file_scope variables to point at the arrays. */ - f_port_from_blk_pin = temp_port_from_blk_pin; - f_port_pin_from_blk_pin = temp_port_pin_from_blk_pin; -} - void get_blk_pin_from_port_pin(int blk_type_index, int port, int port_pin, int* blk_pin) { /* This mapping is needed since there are two different netlist * * conventions - in the cluster level, ports and port pins are used * @@ -1771,7 +1637,7 @@ void free_blk_pin_from_port_pin() { auto& device_ctx = g_vpr_ctx.device(); if (f_blk_pin_from_port_pin != nullptr) { - for (const auto& type : device_ctx.logical_block_types) { + for (const auto& type : device_ctx.physical_tile_types) { int itype = type.index; // Avoid EMPTY_PHYSICAL_TILE_TYPE @@ -1779,7 +1645,7 @@ void free_blk_pin_from_port_pin() { continue; } - num_ports = type.pb_type->num_ports; + num_ports = type.ports.size(); for (iport = 0; iport < num_ports; iport++) { free(f_blk_pin_from_port_pin[itype][iport]); } @@ -1803,12 +1669,12 @@ static void alloc_and_load_blk_pin_from_port_pin() { auto& device_ctx = g_vpr_ctx.device(); /* Allocate and initialize the values to OPEN (-1). */ - temp_blk_pin_from_port_pin = (int***)vtr::malloc(device_ctx.logical_block_types.size() * sizeof(int**)); - for (itype = 1; itype < device_ctx.logical_block_types.size(); itype++) { - num_ports = device_ctx.logical_block_types[itype].pb_type->num_ports; + temp_blk_pin_from_port_pin = (int***)vtr::malloc(device_ctx.physical_tile_types.size() * sizeof(int**)); + for (itype = 1; itype < device_ctx.physical_tile_types.size(); itype++) { + num_ports = device_ctx.physical_tile_types[itype].ports.size(); temp_blk_pin_from_port_pin[itype] = (int**)vtr::malloc(num_ports * sizeof(int*)); for (iport = 0; iport < num_ports; iport++) { - num_port_pins = device_ctx.logical_block_types[itype].pb_type->ports[iport].num_pins; + num_port_pins = device_ctx.physical_tile_types[itype].ports[iport].num_pins; temp_blk_pin_from_port_pin[itype][iport] = (int*)vtr::malloc(num_port_pins * sizeof(int)); for (iport_pin = 0; iport_pin < num_port_pins; iport_pin++) { @@ -1819,11 +1685,11 @@ static void alloc_and_load_blk_pin_from_port_pin() { /* Load the values */ /* itype starts from 1 since device_ctx.block_types[0] is the EMPTY_PHYSICAL_TILE_TYPE. */ - for (itype = 1; itype < device_ctx.logical_block_types.size(); itype++) { + for (itype = 1; itype < device_ctx.physical_tile_types.size(); itype++) { blk_pin_count = 0; - num_ports = device_ctx.logical_block_types[itype].pb_type->num_ports; + num_ports = device_ctx.physical_tile_types[itype].ports.size(); for (iport = 0; iport < num_ports; iport++) { - num_port_pins = device_ctx.logical_block_types[itype].pb_type->ports[iport].num_pins; + num_port_pins = device_ctx.physical_tile_types[itype].ports[iport].num_pins; for (iport_pin = 0; iport_pin < num_port_pins; iport_pin++) { temp_blk_pin_from_port_pin[itype][iport][iport_pin] = blk_pin_count; blk_pin_count++; diff --git a/vpr/src/util/vpr_utils.h b/vpr/src/util/vpr_utils.h index ae7a3240a77..36533a4ce1f 100644 --- a/vpr/src/util/vpr_utils.h +++ b/vpr/src/util/vpr_utils.h @@ -152,9 +152,6 @@ void free_pin_id_to_pb_mapping(vtr::vector& pin_id_to_pb float compute_primitive_base_cost(const t_pb_graph_node* primitive); int num_ext_inputs_atom_block(AtomBlockId blk_id); -void get_port_pin_from_blk_pin(int blk_type_index, int blk_pin, int* port, int* port_pin); -void free_port_pin_from_blk_pin(); - void get_blk_pin_from_port_pin(int blk_type_index, int port, int port_pin, int* blk_pin); void free_blk_pin_from_port_pin(); From 84a2c16637524ad40a19db40918b50cf0690707e Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Thu, 17 Oct 2019 14:59:02 +0200 Subject: [PATCH 28/58] equivalent: added Documentation Signed-off-by: Alessandro Comodi --- doc/src/arch/example_arch.xml | 28 ++++++++++++++++++---- doc/src/arch/reference.rst | 16 ++++++++++++- vtr_flow/scripts/upgrade_arch.py | 41 ++++++++++++++++++++++++++++++++ 3 files changed, 80 insertions(+), 5 deletions(-) diff --git a/doc/src/arch/example_arch.xml b/doc/src/arch/example_arch.xml index b11e950b1fd..f4aaa606e6d 100644 --- a/doc/src/arch/example_arch.xml +++ b/doc/src/arch/example_arch.xml @@ -80,7 +80,11 @@ - + + + + + @@ -95,7 +99,11 @@ - + + + + + @@ -105,7 +113,11 @@ - + + + + + @@ -114,7 +126,15 @@ - + + + + + + + + + diff --git a/doc/src/arch/reference.rst b/doc/src/arch/reference.rst index f98fee87b6b..1cd93f56889 100644 --- a/doc/src/arch/reference.rst +++ b/doc/src/arch/reference.rst @@ -1183,6 +1183,15 @@ The following tags are common to all ```` tags: .. arch:tag:: + Each instance of site must also specify the direct connections between the physical + tile pins and the logical block pins. + + .. arch:tag:: + + Attributes: + - ``from`` is relative to the physical tile pins + - ``to`` is relative to the logical block pins + :req_param pb_type: Name of the corresponding pb_type. **Example: Equivalent Sites** @@ -1190,7 +1199,12 @@ The following tags are common to all ```` tags: .. code-block:: xml - + + + + + ... + .. _arch_complex_blocks: diff --git a/vtr_flow/scripts/upgrade_arch.py b/vtr_flow/scripts/upgrade_arch.py index 336125d3c84..ef764798358 100755 --- a/vtr_flow/scripts/upgrade_arch.py +++ b/vtr_flow/scripts/upgrade_arch.py @@ -973,6 +973,47 @@ def swap_tags(tile, pb_type): return True def add_site_directs(arch): + """ + This function adds the direct pin mappings between a physical + tile and a corresponding logical block. + + Note: the example below is only for explanatory reasons, the signal names are invented + + BEFORE: + + + + + + + + + + + + + + AFTER: + + + + + + + + + + + + + + ... + + + + + """ + TAGS_TO_COPY = ['input', 'output', 'clock'] def add_directs(equivalent_site, pb_type): From d6a4d2d8192a637ba0d888bbf3194ab0e032a92d Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Mon, 7 Oct 2019 13:13:08 +0200 Subject: [PATCH 29/58] place: move initial placement in separate file Signed-off-by: Alessandro Comodi --- vpr/src/place/initial_placement.cpp | 426 ++++++++++++++++++++++++++++ vpr/src/place/initial_placement.h | 9 + vpr/src/place/place.cpp | 404 +------------------------- 3 files changed, 436 insertions(+), 403 deletions(-) create mode 100644 vpr/src/place/initial_placement.cpp create mode 100644 vpr/src/place/initial_placement.h diff --git a/vpr/src/place/initial_placement.cpp b/vpr/src/place/initial_placement.cpp new file mode 100644 index 00000000000..ff0c82b9e55 --- /dev/null +++ b/vpr/src/place/initial_placement.cpp @@ -0,0 +1,426 @@ +#include "vtr_memory.h" +#include "vtr_random.h" + +#include "globals.h" +#include "read_place.h" +#include "initial_placement.h" + +/* The maximum number of tries when trying to place a carry chain at a * + * random location before trying exhaustive placement - find the fist * + * legal position and place it during initial placement. */ +#define MAX_NUM_TRIES_TO_PLACE_MACROS_RANDOMLY 4 + +static t_pl_loc** legal_pos = nullptr; /* [0..device_ctx.num_block_types-1][0..type_tsize - 1] */ +static int* num_legal_pos = nullptr; /* [0..num_legal_pos-1] */ + +static void alloc_legal_placements(); +static void load_legal_placements(); + +static void free_legal_placements(); + +static int check_macro_can_be_placed(t_pl_macro pl_macro, int itype, t_pl_loc head_pos); +static int try_place_macro(int itype, int ipos, t_pl_macro pl_macro); +static void initial_placement_pl_macros(int macros_max_num_tries, int* free_locations); + +static void initial_placement_blocks(int* free_locations, enum e_pad_loc_type pad_loc_type); +static void initial_placement_location(const int* free_locations, int& pipos, int itype, t_pl_loc& to); + +static t_physical_tile_type_ptr pick_placement_type(t_logical_block_type_ptr logical_block, + int num_needed_types, + int* free_locations); + +static void alloc_legal_placements() { + auto& device_ctx = g_vpr_ctx.device(); + auto& place_ctx = g_vpr_ctx.mutable_placement(); + + legal_pos = new t_pl_loc*[device_ctx.physical_tile_types.size()]; + num_legal_pos = (int*)vtr::calloc(device_ctx.physical_tile_types.size(), sizeof(int)); + + /* Initialize all occupancy to zero. */ + + for (size_t i = 0; i < device_ctx.grid.width(); i++) { + for (size_t j = 0; j < device_ctx.grid.height(); j++) { + place_ctx.grid_blocks[i][j].usage = 0; + + for (int k = 0; k < device_ctx.grid[i][j].type->capacity; k++) { + if (place_ctx.grid_blocks[i][j].blocks[k] != INVALID_BLOCK_ID) { + place_ctx.grid_blocks[i][j].blocks[k] = EMPTY_BLOCK_ID; + if (device_ctx.grid[i][j].width_offset == 0 && device_ctx.grid[i][j].height_offset == 0) { + num_legal_pos[device_ctx.grid[i][j].type->index]++; + } + } + } + } + } + + for (const auto& type : device_ctx.physical_tile_types) { + legal_pos[type.index] = new t_pl_loc[num_legal_pos[type.index]]; + } +} + +static void load_legal_placements() { + auto& device_ctx = g_vpr_ctx.device(); + auto& place_ctx = g_vpr_ctx.placement(); + + int* index = (int*)vtr::calloc(device_ctx.physical_tile_types.size(), sizeof(int)); + + for (size_t i = 0; i < device_ctx.grid.width(); i++) { + for (size_t j = 0; j < device_ctx.grid.height(); j++) { + for (int k = 0; k < device_ctx.grid[i][j].type->capacity; k++) { + if (place_ctx.grid_blocks[i][j].blocks[k] == INVALID_BLOCK_ID) { + continue; + } + if (device_ctx.grid[i][j].width_offset == 0 && device_ctx.grid[i][j].height_offset == 0) { + int itype = device_ctx.grid[i][j].type->index; + legal_pos[itype][index[itype]].x = i; + legal_pos[itype][index[itype]].y = j; + legal_pos[itype][index[itype]].z = k; + index[itype]++; + } + } + } + } + free(index); +} + +static void free_legal_placements() { + auto& device_ctx = g_vpr_ctx.device(); + + for (unsigned int i = 0; i < device_ctx.physical_tile_types.size(); i++) { + delete[] legal_pos[i]; + } + delete[] legal_pos; /* Free the mapping list */ + free(num_legal_pos); +} + +static int check_macro_can_be_placed(t_pl_macro pl_macro, int itype, t_pl_loc head_pos) { + auto& device_ctx = g_vpr_ctx.device(); + auto& place_ctx = g_vpr_ctx.placement(); + + // Every macro can be placed until proven otherwise + int macro_can_be_placed = true; + + // Check whether all the members can be placed + for (size_t imember = 0; imember < pl_macro.members.size(); imember++) { + t_pl_loc member_pos = head_pos + pl_macro.members[imember].offset; + + // Check whether the location could accept block of this type + // Then check whether the location could still accommodate more blocks + // Also check whether the member position is valid, that is the member's location + // still within the chip's dimemsion and the member_z is allowed at that location on the grid + if (member_pos.x < int(device_ctx.grid.width()) && member_pos.y < int(device_ctx.grid.height()) + && device_ctx.grid[member_pos.x][member_pos.y].type->index == itype + && place_ctx.grid_blocks[member_pos.x][member_pos.y].blocks[member_pos.z] == EMPTY_BLOCK_ID) { + // Can still accommodate blocks here, check the next position + continue; + } else { + // Cant be placed here - skip to the next try + macro_can_be_placed = false; + break; + } + } + + return (macro_can_be_placed); +} + +static int try_place_macro(int itype, int ipos, t_pl_macro pl_macro) { + auto& place_ctx = g_vpr_ctx.mutable_placement(); + + int macro_placed = false; + + // Choose a random position for the head + t_pl_loc head_pos = legal_pos[itype][ipos]; + + // If that location is occupied, do nothing. + if (place_ctx.grid_blocks[head_pos.x][head_pos.y].blocks[head_pos.z] != EMPTY_BLOCK_ID) { + return (macro_placed); + } + + int macro_can_be_placed = check_macro_can_be_placed(pl_macro, itype, head_pos); + + if (macro_can_be_placed) { + // Place down the macro + macro_placed = true; + for (size_t imember = 0; imember < pl_macro.members.size(); imember++) { + t_pl_loc member_pos = head_pos + pl_macro.members[imember].offset; + + ClusterBlockId iblk = pl_macro.members[imember].blk_index; + place_ctx.block_locs[iblk].loc = member_pos; + + place_ctx.grid_blocks[member_pos.x][member_pos.y].blocks[member_pos.z] = pl_macro.members[imember].blk_index; + place_ctx.grid_blocks[member_pos.x][member_pos.y].usage++; + + // Could not ensure that the randomiser would not pick this location again + // So, would have to do a lazy removal - whenever I come across a block that could not be placed, + // go ahead and remove it from the legal_pos[][] array + + } // Finish placing all the members in the macro + + } // End of this choice of legal_pos + + return (macro_placed); +} + +static void initial_placement_pl_macros(int macros_max_num_tries, int* free_locations) { + int macro_placed; + int itype, itry, ipos; + ClusterBlockId blk_id; + + auto& cluster_ctx = g_vpr_ctx.clustering(); + auto& device_ctx = g_vpr_ctx.device(); + auto& place_ctx = g_vpr_ctx.placement(); + + auto& pl_macros = place_ctx.pl_macros; + + // The map serves to place first the most constrained block ids + std::map> sorted_pl_macros_map; + + for (size_t imacro = 0; imacro < pl_macros.size(); imacro++) { + blk_id = pl_macros[imacro].members[0].blk_index; + auto logical_block = cluster_ctx.clb_nlist.block_type(blk_id); + + size_t num_equivalent_tiles = logical_block->equivalent_tiles.size(); + sorted_pl_macros_map[num_equivalent_tiles].push_back(pl_macros[imacro]); + } + + /* Macros are harder to place. Do them first */ + for (auto& sorted_pl_macros : sorted_pl_macros_map) { + for (auto& pl_macro : sorted_pl_macros.second) { + // Every macro are not placed in the beginnning + macro_placed = false; + + // Assume that all the blocks in the macro are of the same type + blk_id = pl_macro.members[0].blk_index; + auto logical_block = cluster_ctx.clb_nlist.block_type(blk_id); + auto type = pick_placement_type(logical_block, int(pl_macro.members.size()), free_locations); + + if (type == nullptr) { + VPR_FATAL_ERROR(VPR_ERROR_PLACE, + "Initial placement failed.\n" + "Could not place macro length %zu with head block %s (#%zu); not enough free locations of type %s (#%d).\n" + "VPR cannot auto-size for your circuit, please resize the FPGA manually.\n", + pl_macro.members.size(), cluster_ctx.clb_nlist.block_name(blk_id).c_str(), size_t(blk_id), logical_block->name, logical_block->index); + } + + itype = type->index; + + // Try to place the macro first, if can be placed - place them, otherwise try again + for (itry = 0; itry < macros_max_num_tries && macro_placed == false; itry++) { + // Choose a random position for the head + ipos = vtr::irand(free_locations[itype] - 1); + + // Try to place the macro + macro_placed = try_place_macro(itype, ipos, pl_macro); + + } // Finished all tries + + if (macro_placed == false) { + // if a macro still could not be placed after macros_max_num_tries times, + // go through the chip exhaustively to find a legal placement for the macro + // place the macro on the first location that is legal + // then set macro_placed = true; + // if there are no legal positions, error out + + // Exhaustive placement of carry macros + for (ipos = 0; ipos < free_locations[itype] && macro_placed == false; ipos++) { + // Try to place the macro + macro_placed = try_place_macro(itype, ipos, pl_macro); + + } // Exhausted all the legal placement position for this macro + + // If macro could not be placed after exhaustive placement, error out + if (macro_placed == false) { + // Error out + VPR_FATAL_ERROR(VPR_ERROR_PLACE, + "Initial placement failed.\n" + "Could not place macro length %zu with head block %s (#%zu); not enough free locations of type %s (#%d).\n" + "Please manually size the FPGA because VPR can't do this yet.\n", + pl_macro.members.size(), cluster_ctx.clb_nlist.block_name(blk_id).c_str(), size_t(blk_id), device_ctx.physical_tile_types[itype].name, itype); + } + + } else { + // This macro has been placed successfully, proceed to place the next macro + continue; + } + } + } // Finish placing all the pl_macros successfully +} + +/* Place blocks that are NOT a part of any macro. + * We'll randomly place each block in the clustered netlist, one by one. */ +static void initial_placement_blocks(int* free_locations, enum e_pad_loc_type pad_loc_type) { + int itype, ipos; + auto& cluster_ctx = g_vpr_ctx.clustering(); + auto& place_ctx = g_vpr_ctx.mutable_placement(); + + // The map serves to place first the most constrained block ids + std::map> sorted_block_map; + + for (auto blk_id : cluster_ctx.clb_nlist.blocks()) { + auto logical_block = cluster_ctx.clb_nlist.block_type(blk_id); + + size_t num_equivalent_tiles = logical_block->equivalent_tiles.size(); + sorted_block_map[num_equivalent_tiles].push_back(blk_id); + } + + for (auto& sorted_blocks : sorted_block_map) { + for (auto blk_id : sorted_blocks.second) { + if (place_ctx.block_locs[blk_id].loc.x != -1) { // -1 is a sentinel for an empty block + // block placed. + continue; + } + + auto logical_block = cluster_ctx.clb_nlist.block_type(blk_id); + + /* Don't do IOs if the user specifies IOs; we'll read those locations later. */ + if (!(is_io_type(physical_tile_type(logical_block)) && pad_loc_type == USER)) { + /* Randomly select a free location of the appropriate type for blk_id. + * We have a linearized list of all the free locations that can + * accommodate a block of that type in free_locations[itype]. + * Choose one randomly and put blk_id there. Then we don't want to pick + * that location again, so remove it from the free_locations array. + */ + + auto type = pick_placement_type(logical_block, 1, free_locations); + + if (type == nullptr) { + VPR_FATAL_ERROR(VPR_ERROR_PLACE, + "Initial placement failed.\n" + "Could not place block %s (#%zu); no free locations of type %s (#%d).\n", + cluster_ctx.clb_nlist.block_name(blk_id).c_str(), size_t(blk_id), logical_block->name, logical_block->index); + } + + itype = type->index; + + t_pl_loc to; + initial_placement_location(free_locations, ipos, itype, to); + + // Make sure that the position is EMPTY_BLOCK before placing the block down + VTR_ASSERT(place_ctx.grid_blocks[to.x][to.y].blocks[to.z] == EMPTY_BLOCK_ID); + + place_ctx.grid_blocks[to.x][to.y].blocks[to.z] = blk_id; + place_ctx.grid_blocks[to.x][to.y].usage++; + + place_ctx.block_locs[blk_id].loc = to; + + //Mark IOs as fixed if specifying a (fixed) random placement + if (is_io_type(physical_tile_type(logical_block)) && pad_loc_type == RANDOM) { + place_ctx.block_locs[blk_id].is_fixed = true; + } + + /* Ensure randomizer doesn't pick this location again, since it's occupied. Could shift all the + * legal positions in legal_pos to remove the entry (choice) we just used, but faster to + * just move the last entry in legal_pos to the spot we just used and decrement the + * count of free_locations. */ + legal_pos[itype][ipos] = legal_pos[itype][free_locations[itype] - 1]; /* overwrite used block position */ + free_locations[itype]--; + } + } + } +} + +static void initial_placement_location(const int* free_locations, int& ipos, int itype, t_pl_loc& to) { + ipos = vtr::irand(free_locations[itype] - 1); + to = legal_pos[itype][ipos]; +} + +static t_physical_tile_type_ptr pick_placement_type(t_logical_block_type_ptr logical_block, + int num_needed_types, + int* free_locations) { + // Loop through the ordered map to get tiles in a decreasing priority order + for (auto& tile : logical_block->equivalent_tiles) { + if (free_locations[tile->index] >= num_needed_types) { + return tile; + } + } + + return nullptr; +} + +void initial_placement(enum e_pad_loc_type pad_loc_type, + const char* pad_loc_file) { + /* Randomly places the blocks to create an initial placement. We rely on + * the legal_pos array already being loaded. That legal_pos[itype] is an + * array that gives every legal value of (x,y,z) that can accommodate a block. + * The number of such locations is given by num_legal_pos[itype]. + */ + + // Loading legal placement locations + alloc_legal_placements(); + load_legal_placements(); + + int itype, ipos; + int* free_locations; /* [0..device_ctx.num_block_types-1]. + * Stores how many locations there are for this type that *might* still be free. + * That is, this stores the number of entries in legal_pos[itype] that are worth considering + * as you look for a free location. + */ + auto& device_ctx = g_vpr_ctx.device(); + auto& cluster_ctx = g_vpr_ctx.clustering(); + auto& place_ctx = g_vpr_ctx.mutable_placement(); + + free_locations = (int*)vtr::malloc(device_ctx.physical_tile_types.size() * sizeof(int)); + for (const auto& type : device_ctx.physical_tile_types) { + itype = type.index; + free_locations[itype] = num_legal_pos[itype]; + } + + /* We'll use the grid to record where everything goes. Initialize to the grid has no + * blocks placed anywhere. + */ + for (size_t i = 0; i < device_ctx.grid.width(); i++) { + for (size_t j = 0; j < device_ctx.grid.height(); j++) { + place_ctx.grid_blocks[i][j].usage = 0; + itype = device_ctx.grid[i][j].type->index; + for (int k = 0; k < device_ctx.physical_tile_types[itype].capacity; k++) { + if (place_ctx.grid_blocks[i][j].blocks[k] != INVALID_BLOCK_ID) { + place_ctx.grid_blocks[i][j].blocks[k] = EMPTY_BLOCK_ID; + } + } + } + } + + /* Similarly, mark all blocks as not being placed yet. */ + for (auto blk_id : cluster_ctx.clb_nlist.blocks()) { + place_ctx.block_locs[blk_id].loc = t_pl_loc(); + } + + initial_placement_pl_macros(MAX_NUM_TRIES_TO_PLACE_MACROS_RANDOMLY, free_locations); + + // All the macros are placed, update the legal_pos[][] array + for (const auto& type : device_ctx.physical_tile_types) { + itype = type.index; + VTR_ASSERT(free_locations[itype] >= 0); + for (ipos = 0; ipos < free_locations[itype]; ipos++) { + t_pl_loc pos = legal_pos[itype][ipos]; + + // Check if that location is occupied. If it is, remove from legal_pos + if (place_ctx.grid_blocks[pos.x][pos.y].blocks[pos.z] != EMPTY_BLOCK_ID && place_ctx.grid_blocks[pos.x][pos.y].blocks[pos.z] != INVALID_BLOCK_ID) { + legal_pos[itype][ipos] = legal_pos[itype][free_locations[itype] - 1]; + free_locations[itype]--; + + // After the move, I need to check this particular entry again + ipos--; + continue; + } + } + } // Finish updating the legal_pos[][] and free_locations[] array + + initial_placement_blocks(free_locations, pad_loc_type); + + if (pad_loc_type == USER) { + read_user_pad_loc(pad_loc_file); + } + + /* Restore legal_pos */ + load_legal_placements(); + +#ifdef VERBOSE + VTR_LOG("At end of initial_placement.\n"); + if (getEchoEnabled() && isEchoFileEnabled(E_ECHO_INITIAL_CLB_PLACEMENT)) { + print_clb_placement(getEchoFileName(E_ECHO_INITIAL_CLB_PLACEMENT)); + } +#endif + free(free_locations); + free_legal_placements(); +} diff --git a/vpr/src/place/initial_placement.h b/vpr/src/place/initial_placement.h new file mode 100644 index 00000000000..ec2ad38f326 --- /dev/null +++ b/vpr/src/place/initial_placement.h @@ -0,0 +1,9 @@ +#ifndef VPR_INITIAL_PLACEMENT_H +#define VPR_INITIAL_PLACEMENT_H + +#include "vpr_types.h" + +void initial_placement(enum e_pad_loc_type pad_loc_type, + const char* pad_loc_file); + +#endif diff --git a/vpr/src/place/place.cpp b/vpr/src/place/place.cpp index e8808a7254c..3f1451be77c 100644 --- a/vpr/src/place/place.cpp +++ b/vpr/src/place/place.cpp @@ -27,6 +27,7 @@ #include "place_macro.h" #include "histogram.h" #include "place_util.h" +#include "initial_placement.h" #include "place_delay_model.h" #include "move_transactions.h" #include "move_utils.h" @@ -59,11 +60,6 @@ using std::min; * variables round-offs check. */ #define MAX_MOVES_BEFORE_RECOMPUTE 500000 -/* The maximum number of tries when trying to place a carry chain at a * - * random location before trying exhaustive placement - find the fist * - * legal position and place it during initial placement. */ -#define MAX_NUM_TRIES_TO_PLACE_MACROS_RANDOMLY 4 - /* Flags for the states of the bounding box. * * Stored as char for memory efficiency. */ #define NOT_UPDATED_YET 'N' @@ -113,9 +109,6 @@ constexpr double MAX_INV_TIMING_COST = 1.e9; /* Cost of a net, and a temporary cost of a net used during move assessment. */ static vtr::vector net_cost, temp_net_cost; -static t_pl_loc** legal_pos = nullptr; /* [0..device_ctx.num_block_types-1][0..type_tsize - 1] */ -static int* num_legal_pos = nullptr; /* [0..num_legal_pos-1] */ - /* [0...cluster_ctx.clb_nlist.nets().size()-1] * * A flag array to indicate whether the specific bounding box has been updated * * in this particular swap or not. If it has been updated before, the code * @@ -285,23 +278,6 @@ static void alloc_and_load_for_fast_cost_update(float place_cost_exp); static void free_fast_cost_update(); -static void alloc_legal_placements(); -static void load_legal_placements(); - -static void free_legal_placements(); - -static int check_macro_can_be_placed(int imacro, int itype, t_pl_loc head_pos); - -static int try_place_macro(int itype, int ipos, int imacro); - -static void initial_placement_pl_macros(int macros_max_num_tries, int* free_locations); - -static void initial_placement_blocks(int* free_locations, enum e_pad_loc_type pad_loc_type); -static void initial_placement_location(const int* free_locations, int& pipos, int itype, t_pl_loc& to); - -static void initial_placement(enum e_pad_loc_type pad_loc_type, - const char* pad_loc_file); - static double comp_bb_cost(e_cost_methods method); static void update_move_nets(int num_nets_affected); @@ -418,8 +394,6 @@ static void placement_inner_loop(float t, static void recompute_costs_from_scratch(const t_placer_opts& placer_opts, const PlaceDelayModel* delay_model, t_placer_costs* costs); -static t_physical_tile_type_ptr pick_highest_placement_priority_type(t_logical_block_type_ptr logical_block, int num_needed_types, int* free_locations); - static void calc_placer_stats(t_placer_statistics& stats, float& success_rat, double& std_dev, const t_placer_costs& costs, const int move_lim); static void generate_post_place_timing_reports(const t_placer_opts& placer_opts, @@ -1691,7 +1665,6 @@ static double comp_bb_cost(e_cost_methods method) { static void free_placement_structs(const t_placer_opts& placer_opts) { auto& cluster_ctx = g_vpr_ctx.clustering(); - free_legal_placements(); free_fast_cost_update(); if (placer_opts.place_algorithm == PATH_TIMING_DRIVEN_PLACE @@ -1742,9 +1715,6 @@ static void alloc_and_load_placement_structs(float place_cost_exp, init_placement_context(); - alloc_legal_placements(); - load_legal_placements(); - max_pins_per_clb = 0; for (const auto& type : device_ctx.physical_tile_types) { max_pins_per_clb = max(max_pins_per_clb, type.num_pins); @@ -2257,362 +2227,6 @@ static void update_bb(ClusterNetId net_id, t_bb* bb_coord_new, t_bb* bb_edge_new } } -static void alloc_legal_placements() { - auto& device_ctx = g_vpr_ctx.device(); - auto& place_ctx = g_vpr_ctx.mutable_placement(); - - legal_pos = new t_pl_loc*[device_ctx.physical_tile_types.size()]; - num_legal_pos = (int*)vtr::calloc(device_ctx.physical_tile_types.size(), sizeof(int)); - - /* Initialize all occupancy to zero. */ - - for (size_t i = 0; i < device_ctx.grid.width(); i++) { - for (size_t j = 0; j < device_ctx.grid.height(); j++) { - place_ctx.grid_blocks[i][j].usage = 0; - - for (int k = 0; k < device_ctx.grid[i][j].type->capacity; k++) { - if (place_ctx.grid_blocks[i][j].blocks[k] != INVALID_BLOCK_ID) { - place_ctx.grid_blocks[i][j].blocks[k] = EMPTY_BLOCK_ID; - if (device_ctx.grid[i][j].width_offset == 0 && device_ctx.grid[i][j].height_offset == 0) { - num_legal_pos[device_ctx.grid[i][j].type->index]++; - } - } - } - } - } - - for (const auto& type : device_ctx.physical_tile_types) { - legal_pos[type.index] = new t_pl_loc[num_legal_pos[type.index]]; - } -} - -static void load_legal_placements() { - auto& device_ctx = g_vpr_ctx.device(); - auto& place_ctx = g_vpr_ctx.placement(); - - int* index = (int*)vtr::calloc(device_ctx.physical_tile_types.size(), sizeof(int)); - - for (size_t i = 0; i < device_ctx.grid.width(); i++) { - for (size_t j = 0; j < device_ctx.grid.height(); j++) { - for (int k = 0; k < device_ctx.grid[i][j].type->capacity; k++) { - if (place_ctx.grid_blocks[i][j].blocks[k] == INVALID_BLOCK_ID) { - continue; - } - if (device_ctx.grid[i][j].width_offset == 0 && device_ctx.grid[i][j].height_offset == 0) { - int itype = device_ctx.grid[i][j].type->index; - legal_pos[itype][index[itype]].x = i; - legal_pos[itype][index[itype]].y = j; - legal_pos[itype][index[itype]].z = k; - index[itype]++; - } - } - } - } - free(index); -} - -static void free_legal_placements() { - auto& device_ctx = g_vpr_ctx.device(); - - for (unsigned int i = 0; i < device_ctx.physical_tile_types.size(); i++) { - delete[] legal_pos[i]; - } - delete[] legal_pos; /* Free the mapping list */ - free(num_legal_pos); -} - -static int check_macro_can_be_placed(int imacro, int itype, t_pl_loc head_pos) { - auto& device_ctx = g_vpr_ctx.device(); - auto& place_ctx = g_vpr_ctx.placement(); - - // Every macro can be placed until proven otherwise - int macro_can_be_placed = true; - - auto& pl_macros = place_ctx.pl_macros; - - // Check whether all the members can be placed - for (size_t imember = 0; imember < pl_macros[imacro].members.size(); imember++) { - t_pl_loc member_pos = head_pos + pl_macros[imacro].members[imember].offset; - - // Check whether the location could accept block of this type - // Then check whether the location could still accommodate more blocks - // Also check whether the member position is valid, that is the member's location - // still within the chip's dimemsion and the member_z is allowed at that location on the grid - if (member_pos.x < int(device_ctx.grid.width()) && member_pos.y < int(device_ctx.grid.height()) - && device_ctx.grid[member_pos.x][member_pos.y].type->index == itype - && place_ctx.grid_blocks[member_pos.x][member_pos.y].blocks[member_pos.z] == EMPTY_BLOCK_ID) { - // Can still accommodate blocks here, check the next position - continue; - } else { - // Cant be placed here - skip to the next try - macro_can_be_placed = false; - break; - } - } - - return (macro_can_be_placed); -} - -static int try_place_macro(int itype, int ipos, int imacro) { - auto& place_ctx = g_vpr_ctx.mutable_placement(); - - int macro_placed = false; - - // Choose a random position for the head - t_pl_loc head_pos = legal_pos[itype][ipos]; - - // If that location is occupied, do nothing. - if (place_ctx.grid_blocks[head_pos.x][head_pos.y].blocks[head_pos.z] != EMPTY_BLOCK_ID) { - return (macro_placed); - } - - int macro_can_be_placed = check_macro_can_be_placed(imacro, itype, head_pos); - - if (macro_can_be_placed) { - auto& pl_macros = place_ctx.pl_macros; - - // Place down the macro - macro_placed = true; - for (size_t imember = 0; imember < pl_macros[imacro].members.size(); imember++) { - t_pl_loc member_pos = head_pos + pl_macros[imacro].members[imember].offset; - - ClusterBlockId iblk = pl_macros[imacro].members[imember].blk_index; - place_ctx.block_locs[iblk].loc = member_pos; - - place_ctx.grid_blocks[member_pos.x][member_pos.y].blocks[member_pos.z] = pl_macros[imacro].members[imember].blk_index; - place_ctx.grid_blocks[member_pos.x][member_pos.y].usage++; - - // Could not ensure that the randomiser would not pick this location again - // So, would have to do a lazy removal - whenever I come across a block that could not be placed, - // go ahead and remove it from the legal_pos[][] array - - } // Finish placing all the members in the macro - - } // End of this choice of legal_pos - - return (macro_placed); -} - -static void initial_placement_pl_macros(int macros_max_num_tries, int* free_locations) { - int macro_placed; - int itype, itry, ipos; - ClusterBlockId blk_id; - - auto& cluster_ctx = g_vpr_ctx.clustering(); - auto& device_ctx = g_vpr_ctx.device(); - auto& place_ctx = g_vpr_ctx.placement(); - - auto& pl_macros = place_ctx.pl_macros; - - /* Macros are harder to place. Do them first */ - for (size_t imacro = 0; imacro < place_ctx.pl_macros.size(); imacro++) { - // Every macro are not placed in the beginnning - macro_placed = false; - - // Assume that all the blocks in the macro are of the same type - blk_id = pl_macros[imacro].members[0].blk_index; - auto logical_block = cluster_ctx.clb_nlist.block_type(blk_id); - auto type = pick_highest_placement_priority_type(logical_block, int(pl_macros[imacro].members.size()), free_locations); - - if (type == nullptr) { - VPR_FATAL_ERROR(VPR_ERROR_PLACE, - "Initial placement failed.\n" - "Could not place macro length %zu with head block %s (#%zu); not enough free locations of type %s (#%d).\n" - "VPR cannot auto-size for your circuit, please resize the FPGA manually.\n", - pl_macros[imacro].members.size(), cluster_ctx.clb_nlist.block_name(blk_id).c_str(), size_t(blk_id), logical_block->name, logical_block->index); - } - - itype = type->index; - - // Try to place the macro first, if can be placed - place them, otherwise try again - for (itry = 0; itry < macros_max_num_tries && macro_placed == false; itry++) { - // Choose a random position for the head - ipos = vtr::irand(free_locations[itype] - 1); - - // Try to place the macro - macro_placed = try_place_macro(itype, ipos, imacro); - - } // Finished all tries - - if (macro_placed == false) { - // if a macro still could not be placed after macros_max_num_tries times, - // go through the chip exhaustively to find a legal placement for the macro - // place the macro on the first location that is legal - // then set macro_placed = true; - // if there are no legal positions, error out - - // Exhaustive placement of carry macros - for (ipos = 0; ipos < free_locations[itype] && macro_placed == false; ipos++) { - // Try to place the macro - macro_placed = try_place_macro(itype, ipos, imacro); - - } // Exhausted all the legal placement position for this macro - - // If macro could not be placed after exhaustive placement, error out - if (macro_placed == false) { - // Error out - VPR_FATAL_ERROR(VPR_ERROR_PLACE, - "Initial placement failed.\n" - "Could not place macro length %zu with head block %s (#%zu); not enough free locations of type %s (#%d).\n" - "Please manually size the FPGA because VPR can't do this yet.\n", - pl_macros[imacro].members.size(), cluster_ctx.clb_nlist.block_name(blk_id).c_str(), size_t(blk_id), device_ctx.physical_tile_types[itype].name, itype); - } - - } else { - // This macro has been placed successfully, proceed to place the next macro - continue; - } - } // Finish placing all the pl_macros successfully -} - -/* Place blocks that are NOT a part of any macro. - * We'll randomly place each block in the clustered netlist, one by one. */ -static void initial_placement_blocks(int* free_locations, enum e_pad_loc_type pad_loc_type) { - int itype, ipos; - auto& cluster_ctx = g_vpr_ctx.clustering(); - auto& place_ctx = g_vpr_ctx.mutable_placement(); - - for (auto blk_id : cluster_ctx.clb_nlist.blocks()) { - if (place_ctx.block_locs[blk_id].loc.x != -1) { // -1 is a sentinel for an empty block - // block placed. - continue; - } - - auto logical_block = cluster_ctx.clb_nlist.block_type(blk_id); - - /* Don't do IOs if the user specifies IOs; we'll read those locations later. */ - if (!(is_io_type(physical_tile_type(logical_block)) && pad_loc_type == USER)) { - /* Randomly select a free location of the appropriate type for blk_id. - * We have a linearized list of all the free locations that can - * accommodate a block of that type in free_locations[itype]. - * Choose one randomly and put blk_id there. Then we don't want to pick - * that location again, so remove it from the free_locations array. - */ - - auto type = pick_highest_placement_priority_type(logical_block, 1, free_locations); - - if (type == nullptr) { - VPR_FATAL_ERROR(VPR_ERROR_PLACE, - "Initial placement failed.\n" - "Could not place block %s (#%zu); no free locations of type %s (#%d).\n", - cluster_ctx.clb_nlist.block_name(blk_id).c_str(), size_t(blk_id), logical_block->name, logical_block->index); - } - - itype = type->index; - - t_pl_loc to; - initial_placement_location(free_locations, ipos, itype, to); - - // Make sure that the position is EMPTY_BLOCK before placing the block down - VTR_ASSERT(place_ctx.grid_blocks[to.x][to.y].blocks[to.z] == EMPTY_BLOCK_ID); - - place_ctx.grid_blocks[to.x][to.y].blocks[to.z] = blk_id; - place_ctx.grid_blocks[to.x][to.y].usage++; - - place_ctx.block_locs[blk_id].loc = to; - - //Mark IOs as fixed if specifying a (fixed) random placement - if (is_io_type(physical_tile_type(logical_block)) && pad_loc_type == RANDOM) { - place_ctx.block_locs[blk_id].is_fixed = true; - } - - /* Ensure randomizer doesn't pick this location again, since it's occupied. Could shift all the - * legal positions in legal_pos to remove the entry (choice) we just used, but faster to - * just move the last entry in legal_pos to the spot we just used and decrement the - * count of free_locations. */ - legal_pos[itype][ipos] = legal_pos[itype][free_locations[itype] - 1]; /* overwrite used block position */ - free_locations[itype]--; - } - } -} - -static void initial_placement_location(const int* free_locations, int& ipos, int itype, t_pl_loc& to) { - ipos = vtr::irand(free_locations[itype] - 1); - to = legal_pos[itype][ipos]; -} - -static void initial_placement(enum e_pad_loc_type pad_loc_type, - const char* pad_loc_file) { - /* Randomly places the blocks to create an initial placement. We rely on - * the legal_pos array already being loaded. That legal_pos[itype] is an - * array that gives every legal value of (x,y,z) that can accommodate a block. - * The number of such locations is given by num_legal_pos[itype]. - */ - int itype, ipos; - int* free_locations; /* [0..device_ctx.num_block_types-1]. - * Stores how many locations there are for this type that *might* still be free. - * That is, this stores the number of entries in legal_pos[itype] that are worth considering - * as you look for a free location. - */ - auto& device_ctx = g_vpr_ctx.device(); - auto& cluster_ctx = g_vpr_ctx.clustering(); - auto& place_ctx = g_vpr_ctx.mutable_placement(); - - free_locations = (int*)vtr::malloc(device_ctx.physical_tile_types.size() * sizeof(int)); - for (const auto& type : device_ctx.physical_tile_types) { - itype = type.index; - free_locations[itype] = num_legal_pos[itype]; - } - - /* We'll use the grid to record where everything goes. Initialize to the grid has no - * blocks placed anywhere. - */ - for (size_t i = 0; i < device_ctx.grid.width(); i++) { - for (size_t j = 0; j < device_ctx.grid.height(); j++) { - place_ctx.grid_blocks[i][j].usage = 0; - itype = device_ctx.grid[i][j].type->index; - for (int k = 0; k < device_ctx.physical_tile_types[itype].capacity; k++) { - if (place_ctx.grid_blocks[i][j].blocks[k] != INVALID_BLOCK_ID) { - place_ctx.grid_blocks[i][j].blocks[k] = EMPTY_BLOCK_ID; - } - } - } - } - - /* Similarly, mark all blocks as not being placed yet. */ - for (auto blk_id : cluster_ctx.clb_nlist.blocks()) { - place_ctx.block_locs[blk_id].loc = t_pl_loc(); - } - - initial_placement_pl_macros(MAX_NUM_TRIES_TO_PLACE_MACROS_RANDOMLY, free_locations); - - // All the macros are placed, update the legal_pos[][] array - for (const auto& type : device_ctx.physical_tile_types) { - itype = type.index; - VTR_ASSERT(free_locations[itype] >= 0); - for (ipos = 0; ipos < free_locations[itype]; ipos++) { - t_pl_loc pos = legal_pos[itype][ipos]; - - // Check if that location is occupied. If it is, remove from legal_pos - if (place_ctx.grid_blocks[pos.x][pos.y].blocks[pos.z] != EMPTY_BLOCK_ID && place_ctx.grid_blocks[pos.x][pos.y].blocks[pos.z] != INVALID_BLOCK_ID) { - legal_pos[itype][ipos] = legal_pos[itype][free_locations[itype] - 1]; - free_locations[itype]--; - - // After the move, I need to check this particular entry again - ipos--; - continue; - } - } - } // Finish updating the legal_pos[][] and free_locations[] array - - initial_placement_blocks(free_locations, pad_loc_type); - - if (pad_loc_type == USER) { - read_user_pad_loc(pad_loc_file); - } - - /* Restore legal_pos */ - load_legal_placements(); - -#ifdef VERBOSE - VTR_LOG("At end of initial_placement.\n"); - if (getEchoEnabled() && isEchoFileEnabled(E_ECHO_INITIAL_CLB_PLACEMENT)) { - print_clb_placement(getEchoFileName(E_ECHO_INITIAL_CLB_PLACEMENT)); - } -#endif - free(free_locations); -} - static void free_fast_cost_update() { auto& device_ctx = g_vpr_ctx.device(); @@ -2851,22 +2465,6 @@ int check_macro_placement_consistency() { return error; } -static t_physical_tile_type_ptr pick_highest_placement_priority_type(t_logical_block_type_ptr logical_block, int num_needed_types, int* free_locations) { - auto& device_ctx = g_vpr_ctx.device(); - auto& physical_tiles = device_ctx.physical_tile_types; - - // Loop through the ordered map to get tiles in a decreasing priority order - for (auto& physical_tiles_ids : logical_block->physical_tiles_priority) { - for (auto tile_id : physical_tiles_ids.second) { - if (free_locations[tile_id] >= num_needed_types) { - return &physical_tiles[tile_id]; - } - } - } - - return nullptr; -} - #ifdef VERBOSE static void print_clb_placement(const char* fname) { /* Prints out the clb placements to a file. */ From b0139930a181a61f01ee7cb0accf988b28dd44d6 Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Thu, 17 Oct 2019 17:58:02 +0200 Subject: [PATCH 30/58] equivalent: delete physical_tile_type(t_logical_block_type) function Signed-off-by: Alessandro Comodi --- vpr/src/base/SetupGrid.cpp | 2 +- vpr/src/base/check_netlist.cpp | 10 ++-- vpr/src/base/clustered_netlist.cpp | 2 +- vpr/src/base/read_netlist.cpp | 75 ++++++++++++------------ vpr/src/base/read_place.cpp | 4 +- vpr/src/draw/draw.cpp | 4 +- vpr/src/place/initial_placement.cpp | 4 +- vpr/src/place/uniform_move_generator.cpp | 2 +- vpr/src/route/route_common.cpp | 5 +- vpr/src/util/vpr_utils.cpp | 56 +++++++++++++----- vpr/src/util/vpr_utils.h | 10 +++- 11 files changed, 102 insertions(+), 72 deletions(-) diff --git a/vpr/src/base/SetupGrid.cpp b/vpr/src/base/SetupGrid.cpp index dad86352cdb..deda993b3b3 100644 --- a/vpr/src/base/SetupGrid.cpp +++ b/vpr/src/base/SetupGrid.cpp @@ -668,7 +668,7 @@ float calculate_device_utilization(const DeviceGrid& grid, std::mapis_ignored_pin[pin_index] != net_is_ignored - && !is_io_type(physical_tile_type(logical_type))) { + && !is_io_type(physical_type)) { VTR_LOGV_WARN(verbosity > 2, "Global net '%s' connects to non-global architecture pin '%s' (netlist pin '%s')\n", cluster_ctx.clb_nlist.net_name(net_id).c_str(), diff --git a/vpr/src/base/clustered_netlist.cpp b/vpr/src/base/clustered_netlist.cpp index 9e4eaba5d32..eb9b1cabba5 100644 --- a/vpr/src/base/clustered_netlist.cpp +++ b/vpr/src/base/clustered_netlist.cpp @@ -52,7 +52,7 @@ int ClusteredNetlist::block_pin_net_index(const ClusterBlockId blk_id, const int ClusterPinId ClusteredNetlist::block_pin(const ClusterBlockId blk, const int phys_pin_index) const { VTR_ASSERT_SAFE(valid_block_id(blk)); - VTR_ASSERT_SAFE_MSG(phys_pin_index >= 0 && phys_pin_index < physical_tile_type(block_type(blk))->num_pins, "Physical pin index must be in range"); + VTR_ASSERT_SAFE_MSG(phys_pin_index >= 0 && phys_pin_index < pick_random_physical_type(block_type(blk))->num_pins, "Physical pin index must be in range"); return block_logical_pins_[blk][phys_pin_index]; } diff --git a/vpr/src/base/read_netlist.cpp b/vpr/src/base/read_netlist.cpp index 8937e4a6868..d310d2b1668 100644 --- a/vpr/src/base/read_netlist.cpp +++ b/vpr/src/base/read_netlist.cpp @@ -860,7 +860,6 @@ static void load_external_nets_and_cb(ClusteredNetlist& clb_nlist) { ext_nhash = alloc_hash_table(); - t_physical_tile_type_ptr tile_type; t_logical_block_type_ptr block_type; /* Assumes that complex block pins are ordered inputs, outputs, globals */ @@ -951,43 +950,44 @@ static void load_external_nets_and_cb(ClusteredNetlist& clb_nlist) { for (auto blk_id : clb_nlist.blocks()) { block_type = clb_nlist.block_type(blk_id); // XXX Use pin mapping here! To check that all the possible pins can be used in the correct tile! - tile_type = physical_tile_type(block_type); - for (j = 0; j < tile_type->num_pins; j++) { - //Iterate through each pin of the block, and see if there is a net allocated/used for it - clb_net_id = clb_nlist.block_net(blk_id, j); - - if (clb_net_id != ClusterNetId::INVALID()) { - //Verify old and new CLB netlists have the same # of pins per net - if (RECEIVER == tile_type->class_inf[tile_type->pin_class[j]].type) { - count[clb_net_id]++; - - if (count[clb_net_id] > (int)clb_nlist.net_sinks(clb_net_id).size()) { - VPR_FATAL_ERROR(VPR_ERROR_NET_F, - "net %s #%d inconsistency, expected %d terminals but encountered %d terminals, it is likely net terminal is disconnected in netlist file.\n", - clb_nlist.net_name(clb_net_id).c_str(), size_t(clb_net_id), count[clb_net_id], - clb_nlist.net_sinks(clb_net_id).size()); - } + for (const auto& tile_type : block_type->equivalent_tiles) { + for (j = 0; j < tile_type->num_pins; j++) { + //Iterate through each pin of the block, and see if there is a net allocated/used for it + clb_net_id = clb_nlist.block_net(blk_id, j); - //Asserts the ClusterBlockId is the same when ClusterNetId & pin BitIndex is provided - VTR_ASSERT(blk_id == clb_nlist.pin_block(*(clb_nlist.net_pins(clb_net_id).begin() + count[clb_net_id]))); - //Asserts the block's pin index is the same - VTR_ASSERT(j == clb_nlist.pin_physical_index(*(clb_nlist.net_pins(clb_net_id).begin() + count[clb_net_id]))); - VTR_ASSERT(j == clb_nlist.net_pin_physical_index(clb_net_id, count[clb_net_id])); + if (clb_net_id != ClusterNetId::INVALID()) { + //Verify old and new CLB netlists have the same # of pins per net + if (RECEIVER == tile_type->class_inf[tile_type->pin_class[j]].type) { + count[clb_net_id]++; - // nets connecting to global pins are marked as global nets - if (tile_type->is_pin_global[j]) { - clb_nlist.set_net_is_global(clb_net_id, true); - } + if (count[clb_net_id] > (int)clb_nlist.net_sinks(clb_net_id).size()) { + VPR_FATAL_ERROR(VPR_ERROR_NET_F, + "net %s #%d inconsistency, expected %d terminals but encountered %d terminals, it is likely net terminal is disconnected in netlist file.\n", + clb_nlist.net_name(clb_net_id).c_str(), size_t(clb_net_id), count[clb_net_id], + clb_nlist.net_sinks(clb_net_id).size()); + } - if (tile_type->is_ignored_pin[j]) { - clb_nlist.set_net_is_ignored(clb_net_id, true); - } - /* Error check performed later to ensure no mixing of ignored and non ignored signals */ + //Asserts the ClusterBlockId is the same when ClusterNetId & pin BitIndex is provided + VTR_ASSERT(blk_id == clb_nlist.pin_block(*(clb_nlist.net_pins(clb_net_id).begin() + count[clb_net_id]))); + //Asserts the block's pin index is the same + VTR_ASSERT(j == clb_nlist.pin_physical_index(*(clb_nlist.net_pins(clb_net_id).begin() + count[clb_net_id]))); + VTR_ASSERT(j == clb_nlist.net_pin_physical_index(clb_net_id, count[clb_net_id])); - } else { - VTR_ASSERT(DRIVER == tile_type->class_inf[tile_type->pin_class[j]].type); - VTR_ASSERT(j == clb_nlist.pin_physical_index(*(clb_nlist.net_pins(clb_net_id).begin()))); - VTR_ASSERT(j == clb_nlist.net_pin_physical_index(clb_net_id, 0)); + // nets connecting to global pins are marked as global nets + if (tile_type->is_pin_global[j]) { + clb_nlist.set_net_is_global(clb_net_id, true); + } + + if (tile_type->is_ignored_pin[j]) { + clb_nlist.set_net_is_ignored(clb_net_id, true); + } + /* Error check performed later to ensure no mixing of ignored and non ignored signals */ + + } else { + VTR_ASSERT(DRIVER == tile_type->class_inf[tile_type->pin_class[j]].type); + VTR_ASSERT(j == clb_nlist.pin_physical_index(*(clb_nlist.net_pins(clb_net_id).begin()))); + VTR_ASSERT(j == clb_nlist.net_pin_physical_index(clb_net_id, 0)); + } } } } @@ -999,8 +999,11 @@ static void load_external_nets_and_cb(ClusteredNetlist& clb_nlist) { for (auto pin_id : clb_nlist.net_sinks(net_id)) { bool is_ignored_net = clb_nlist.net_is_ignored(net_id); block_type = clb_nlist.block_type(clb_nlist.pin_block(pin_id)); - tile_type = physical_tile_type(block_type); - if (tile_type->is_ignored_pin[clb_nlist.pin_physical_index(pin_id)] != is_ignored_net) { + auto tile_type = pick_random_physical_type(block_type); + int log_pin = clb_nlist.pin_logical_index(pin_id); + int phy_pin = get_physical_pin(tile_type, block_type, log_pin); + + if (tile_type->is_ignored_pin[phy_pin] != is_ignored_net) { VTR_LOG_WARN( "Netlist connects net %s to both global and non-global pins.\n", clb_nlist.net_name(net_id).c_str()); diff --git a/vpr/src/base/read_place.cpp b/vpr/src/base/read_place.cpp index 1b0e363b2f3..95640c7fbc0 100644 --- a/vpr/src/base/read_place.cpp +++ b/vpr/src/base/read_place.cpp @@ -161,7 +161,7 @@ void read_user_pad_loc(const char* pad_loc_file) { hash_table = alloc_hash_table(); for (auto blk_id : cluster_ctx.clb_nlist.blocks()) { auto logical_block = cluster_ctx.clb_nlist.block_type(blk_id); - if (is_io_type(physical_tile_type(logical_block))) { + if (is_io_type(pick_random_physical_type(logical_block))) { insert_in_hash_table(hash_table, cluster_ctx.clb_nlist.block_name(blk_id).c_str(), size_t(blk_id)); place_ctx.block_locs[blk_id].loc.x = OPEN; /* Mark as not seen yet. */ } @@ -268,7 +268,7 @@ void read_user_pad_loc(const char* pad_loc_file) { for (auto blk_id : cluster_ctx.clb_nlist.blocks()) { auto logical_block = cluster_ctx.clb_nlist.block_type(blk_id); - auto type = physical_tile_type(logical_block); + auto type = pick_random_physical_type(logical_block); if (is_io_type(type) && place_ctx.block_locs[blk_id].loc.x == OPEN) { vpr_throw(VPR_ERROR_PLACE_F, pad_loc_file, 0, "IO block %s location was not specified in the pad file.\n", cluster_ctx.clb_nlist.block_name(blk_id).c_str()); diff --git a/vpr/src/draw/draw.cpp b/vpr/src/draw/draw.cpp index 680c6053c6d..c9304fb8ade 100644 --- a/vpr/src/draw/draw.cpp +++ b/vpr/src/draw/draw.cpp @@ -2730,11 +2730,9 @@ void deselect_all() { static void draw_reset_blk_color(ClusterBlockId blk_id) { auto& cluster_ctx = g_vpr_ctx.clustering(); - auto logical_block = cluster_ctx.clb_nlist.block_type(blk_id); - t_draw_state* draw_state = get_draw_state_vars(); - draw_state->block_color[blk_id] = get_block_type_color(physical_tile_type(logical_block)); + draw_state->block_color[blk_id] = get_block_type_color(physical_tile_type(blk_id)); } /** diff --git a/vpr/src/place/initial_placement.cpp b/vpr/src/place/initial_placement.cpp index ff0c82b9e55..80952af3a74 100644 --- a/vpr/src/place/initial_placement.cpp +++ b/vpr/src/place/initial_placement.cpp @@ -273,7 +273,7 @@ static void initial_placement_blocks(int* free_locations, enum e_pad_loc_type pa auto logical_block = cluster_ctx.clb_nlist.block_type(blk_id); /* Don't do IOs if the user specifies IOs; we'll read those locations later. */ - if (!(is_io_type(physical_tile_type(logical_block)) && pad_loc_type == USER)) { + if (!(is_io_type(pick_random_physical_type(logical_block)) && pad_loc_type == USER)) { /* Randomly select a free location of the appropriate type for blk_id. * We have a linearized list of all the free locations that can * accommodate a block of that type in free_locations[itype]. @@ -304,7 +304,7 @@ static void initial_placement_blocks(int* free_locations, enum e_pad_loc_type pa place_ctx.block_locs[blk_id].loc = to; //Mark IOs as fixed if specifying a (fixed) random placement - if (is_io_type(physical_tile_type(logical_block)) && pad_loc_type == RANDOM) { + if (is_io_type(pick_random_physical_type(logical_block)) && pad_loc_type == RANDOM) { place_ctx.block_locs[blk_id].is_fixed = true; } diff --git a/vpr/src/place/uniform_move_generator.cpp b/vpr/src/place/uniform_move_generator.cpp index 93261b7469b..82cd632802e 100644 --- a/vpr/src/place/uniform_move_generator.cpp +++ b/vpr/src/place/uniform_move_generator.cpp @@ -18,7 +18,7 @@ e_create_move UniformMoveGenerator::propose_move(t_pl_blocks_to_be_moved& blocks t_pl_loc to; - auto type = pick_random_placement_type(cluster_from_type); + auto type = pick_random_physical_type(cluster_from_type); if (!find_to_loc_uniform(type, rlim, from, to)) { return e_create_move::ABORT; diff --git a/vpr/src/route/route_common.cpp b/vpr/src/route/route_common.cpp index 10ffec2ff23..a8a72f12977 100644 --- a/vpr/src/route/route_common.cpp +++ b/vpr/src/route/route_common.cpp @@ -1059,10 +1059,7 @@ static vtr::vector> load_net_rr_terminals(const t * are offset to get their actual rr_node */ node_block_pin = cluster_ctx.clb_nlist.pin_logical_index(pin_id); - auto pin_directs_map = type->tile_block_pin_directs_map; - auto map = pin_directs_map[logical_block->index]; - - auto orig_phys_pin = map[t_logical_pin(node_block_pin)].pin; + int orig_phys_pin = get_physical_pin(type, logical_block, node_block_pin); VTR_ASSERT(type->num_pins % type->capacity == 0); int max_num_block_pins = type->num_pins / type->capacity; diff --git a/vpr/src/util/vpr_utils.cpp b/vpr/src/util/vpr_utils.cpp index 462b36eb522..29a03b84cb8 100644 --- a/vpr/src/util/vpr_utils.cpp +++ b/vpr/src/util/vpr_utils.cpp @@ -318,7 +318,7 @@ std::vector find_clb_pin_connected_atom_pins(ClusterBlockId clb, int auto logical_block = clb_nlist.block_type(clb); - if (is_opin(log_pin, physical_tile_type(logical_block))) { + if (is_opin(log_pin, pick_random_physical_type(logical_block))) { //output AtomPinId driver = find_clb_pin_driver_atom_pin(clb, log_pin, pb_gpin_lookup); if (driver) { @@ -624,20 +624,6 @@ bool is_empty_type(t_logical_block_type_ptr type) { return type == device_ctx.EMPTY_LOGICAL_BLOCK_TYPE; } -t_physical_tile_type_ptr physical_tile_type(t_logical_block_type_ptr logical_block_type) { - auto& device_ctx = g_vpr_ctx.device(); - auto& physical_tiles = device_ctx.physical_tile_types; - - // Loop through the ordered map to get tiles in a decreasing priority order - for (auto& physical_tiles_ids : logical_block_type->physical_tiles_priority) { - for (auto tile_id : physical_tiles_ids.second) { - return &physical_tiles[tile_id]; - } - } - - VPR_THROW(VPR_ERROR_OTHER, "No corresponding physical tile type found for logical block type %s\n", logical_block_type->name); -} - t_physical_tile_type_ptr physical_tile_type(ClusterBlockId blk) { auto& place_ctx = g_vpr_ctx.placement(); auto& device_ctx = g_vpr_ctx.device(); @@ -2176,7 +2162,7 @@ bool is_tile_compatible(t_physical_tile_type_ptr physical_tile, t_logical_block_ return std::find(equivalent_tiles.begin(), equivalent_tiles.end(), physical_tile) != equivalent_tiles.end(); } -t_physical_tile_type_ptr pick_random_placement_type(t_logical_block_type_ptr logical_block) { +t_physical_tile_type_ptr pick_random_physical_type(t_logical_block_type_ptr logical_block) { auto equivalent_tiles = logical_block->equivalent_tiles; size_t num_equivalent_tiles = equivalent_tiles.size(); @@ -2189,6 +2175,44 @@ t_physical_tile_type_ptr pick_random_placement_type(t_logical_block_type_ptr log return equivalent_tiles[index]; } +int get_logical_pin(t_physical_tile_type_ptr physical_tile, + t_logical_block_type_ptr logical_block, + int pin) { + t_physical_pin physical_pin(pin); + + auto direct_map = physical_tile->tile_block_pin_directs_map.at(logical_block->index); + auto result = direct_map.find(physical_pin); + + if (result == direct_map.inverse_end()) { + VTR_LOG_WARN( + "Couldn't find the corresponding logical pin of the physical pin %d." + "Physical Tile: %s, Logical Block: %s.\n", + pin, physical_tile->name, logical_block->name); + return OPEN; + } + + return result->second.pin; +} + +int get_physical_pin(t_physical_tile_type_ptr physical_tile, + t_logical_block_type_ptr logical_block, + int pin) { + t_logical_pin logical_pin(pin); + + auto direct_map = physical_tile->tile_block_pin_directs_map.at(logical_block->index); + auto result = direct_map.find(logical_pin); + + if (result == direct_map.end()) { + VTR_LOG_WARN( + "Couldn't find the corresponding physical pin of the logical pin %d." + "Physical Tile: %s, Logical Block: %s.\n", + pin, physical_tile->name, logical_block->name); + return OPEN; + } + + return result->second.pin; +} + void pretty_print_uint(const char* prefix, size_t value, int num_digits, int scientific_precision) { //Print as integer if it will fit in the width, other wise scientific if (value <= std::pow(10, num_digits) - 1) { diff --git a/vpr/src/util/vpr_utils.h b/vpr/src/util/vpr_utils.h index 36533a4ce1f..99842bb4bf7 100644 --- a/vpr/src/util/vpr_utils.h +++ b/vpr/src/util/vpr_utils.h @@ -29,7 +29,6 @@ bool is_empty_type(t_physical_tile_type_ptr type); bool is_empty_type(t_logical_block_type_ptr type); //Returns the corresponding physical/logical type given the logical/physical type as parameter -t_physical_tile_type_ptr physical_tile_type(t_logical_block_type_ptr logical_block_type); t_physical_tile_type_ptr physical_tile_type(ClusterBlockId blk); t_logical_block_type_ptr logical_block_type(t_physical_tile_type_ptr physical_tile_type); @@ -172,7 +171,14 @@ void place_sync_external_block_connections(ClusterBlockId iblk); int get_max_num_pins(t_logical_block_type_ptr logical_block); bool is_tile_compatible(t_physical_tile_type_ptr physical_tile, t_logical_block_type_ptr logical_block); -t_physical_tile_type_ptr pick_random_placement_type(t_logical_block_type_ptr logical_block); +t_physical_tile_type_ptr pick_random_physical_type(t_logical_block_type_ptr logical_block); + +int get_logical_pin(t_physical_tile_type_ptr physical_tile, + t_logical_block_type_ptr logical_block, + int pin); +int get_physical_pin(t_physical_tile_type_ptr physical_tile, + t_logical_block_type_ptr logical_block, + int pin); int max_pins_per_grid_tile(); From 7bd7f03e3b45d7fc8d49f86cf01d7f405bfe0a58 Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Fri, 18 Oct 2019 16:52:38 +0200 Subject: [PATCH 31/58] equivalent: updated regression test (WIP) Signed-off-by: Alessandro Comodi --- vtr_flow/arch/equivalent_sites/equivalent.xml | 236 +++ vtr_flow/arch/equivalent_sites/slice.xml | 1828 ----------------- .../microbenchmarks/equivalent.blif | 10 + .../strong_equivalent_sites/config/config.txt | 4 +- 4 files changed, 248 insertions(+), 1830 deletions(-) create mode 100644 vtr_flow/arch/equivalent_sites/equivalent.xml delete mode 100644 vtr_flow/arch/equivalent_sites/slice.xml create mode 100644 vtr_flow/benchmarks/microbenchmarks/equivalent.blif diff --git a/vtr_flow/arch/equivalent_sites/equivalent.xml b/vtr_flow/arch/equivalent_sites/equivalent.xml new file mode 100644 index 00000000000..2180d833025 --- /dev/null +++ b/vtr_flow/arch/equivalent_sites/equivalent.xml @@ -0,0 +1,236 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + io_tile.I io_tile.O + io_tile.I io_tile.O + io_tile.I io_tile.O + io_tile.I io_tile.O + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1 1 1 1 1 1 1 1 1 1 1 1 1 + 1 1 1 1 1 1 1 1 1 1 1 1 + + + diff --git a/vtr_flow/arch/equivalent_sites/slice.xml b/vtr_flow/arch/equivalent_sites/slice.xml deleted file mode 100644 index b182649f552..00000000000 --- a/vtr_flow/arch/equivalent_sites/slice.xml +++ /dev/null @@ -1,1828 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - io_tile.in io_tile.out - io_tile.in io_tile.out - io_tile.in io_tile.out - io_tile.in io_tile.out - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 0.068e-9 - 0.068e-9 - 0.068e-9 - 0.068e-9 - 0.068e-9 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 0.068e-9 - 0.068e-9 - 0.068e-9 - 0.068e-9 - 0.068e-9 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 0.068e-9 - 0.068e-9 - 0.068e-9 - 0.068e-9 - 0.068e-9 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 0.068e-9 - 0.068e-9 - 0.068e-9 - 0.068e-9 - 0.068e-9 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 0.068e-9 - 0.068e-9 - 0.068e-9 - 0.068e-9 - 0.068e-9 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 0.068e-9 - 0.068e-9 - 0.068e-9 - 0.068e-9 - 0.068e-9 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 0.068e-9 - 0.068e-9 - 0.068e-9 - 0.068e-9 - 0.068e-9 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 0.068e-9 - 0.068e-9 - 0.068e-9 - 0.068e-9 - 0.068e-9 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 0.068e-9 - 0.068e-9 - 0.068e-9 - 0.068e-9 - 0.068e-9 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 0.068e-9 - 0.068e-9 - 0.068e-9 - 0.068e-9 - 0.068e-9 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 0.068e-9 - 0.068e-9 - 0.068e-9 - 0.068e-9 - 0.068e-9 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 1 1 1 1 1 1 1 1 1 1 1 1 1 - 1 1 1 1 1 1 1 1 1 1 1 1 - - - diff --git a/vtr_flow/benchmarks/microbenchmarks/equivalent.blif b/vtr_flow/benchmarks/microbenchmarks/equivalent.blif new file mode 100644 index 00000000000..2ead16d0908 --- /dev/null +++ b/vtr_flow/benchmarks/microbenchmarks/equivalent.blif @@ -0,0 +1,10 @@ +.model top +.inputs in +.outputs out +.names $false +.names $true +1 +.subckt IO_0 I=in O=out_1 +.subckt IO_1 I=out_1 O=out_2 +.subckt IO_2 I=out_2 O=out +.end diff --git a/vtr_flow/tasks/regression_tests/vtr_reg_strong/strong_equivalent_sites/config/config.txt b/vtr_flow/tasks/regression_tests/vtr_reg_strong/strong_equivalent_sites/config/config.txt index ce9abe5c381..c028818fe53 100644 --- a/vtr_flow/tasks/regression_tests/vtr_reg_strong/strong_equivalent_sites/config/config.txt +++ b/vtr_flow/tasks/regression_tests/vtr_reg_strong/strong_equivalent_sites/config/config.txt @@ -9,10 +9,10 @@ circuits_dir=benchmarks/microbenchmarks archs_dir=arch/equivalent_sites # Add circuits to list to sweep -circuit_list_add=carry_chain.blif +circuit_list_add=equivalent.blif # Add architectures to list to sweep -arch_list_add=slice.xml +arch_list_add=equivalent.xml # Parse info and how to parse parse_file=vpr_standard.txt From 19f1e962ca1db56f6c42b6cf60b645cf63f77560 Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Fri, 18 Oct 2019 16:53:37 +0200 Subject: [PATCH 32/58] equivalent: fix potential segfaults Signed-off-by: Alessandro Comodi --- vpr/src/pack/output_clustering.cpp | 29 +++++++++++++++++------------ vpr/src/place/place_macro.cpp | 19 +++++++++++++------ 2 files changed, 30 insertions(+), 18 deletions(-) diff --git a/vpr/src/pack/output_clustering.cpp b/vpr/src/pack/output_clustering.cpp index e51c3e706dd..d881b9eb510 100644 --- a/vpr/src/pack/output_clustering.cpp +++ b/vpr/src/pack/output_clustering.cpp @@ -63,19 +63,24 @@ static void print_stats() { /* Counters used only for statistics purposes. */ for (auto blk_id : cluster_ctx.clb_nlist.blocks()) { - // XXX Use mapping here - auto type = cluster_ctx.clb_nlist.block_type(blk_id)->equivalent_tiles[0]; - for (ipin = 0; ipin < type->num_pins; ipin++) { + auto logical_block = cluster_ctx.clb_nlist.block_type(blk_id); + auto physical_tile = pick_random_physical_type(logical_block); + for (ipin = 0; ipin < logical_block->pb_type->num_pins; ipin++) { + int phy_pin = get_physical_pin(physical_tile, logical_block, ipin); + auto pin_class = physical_tile->pin_class[phy_pin]; + auto pin_class_inf = physical_tile->class_inf[pin_class]; + if (cluster_ctx.clb_nlist.block_pb(blk_id)->pb_route.empty()) { ClusterNetId clb_net_id = cluster_ctx.clb_nlist.block_net(blk_id, ipin); if (clb_net_id != ClusterNetId::INVALID()) { auto net_id = atom_ctx.lookup.atom_net(clb_net_id); VTR_ASSERT(net_id); nets_absorbed[net_id] = false; - if (type->class_inf[type->pin_class[ipin]].type == RECEIVER) { - num_clb_inputs_used[type->index]++; - } else if (type->class_inf[type->pin_class[ipin]].type == DRIVER) { - num_clb_outputs_used[type->index]++; + + if (pin_class_inf.type == RECEIVER) { + num_clb_inputs_used[logical_block->index]++; + } else if (pin_class_inf.type == DRIVER) { + num_clb_outputs_used[logical_block->index]++; } } } else { @@ -87,16 +92,16 @@ static void print_stats() { auto atom_net_id = pb->pb_route[pb_graph_pin_id].atom_net_id; if (atom_net_id) { nets_absorbed[atom_net_id] = false; - if (type->class_inf[type->pin_class[ipin]].type == RECEIVER) { - num_clb_inputs_used[type->index]++; - } else if (type->class_inf[type->pin_class[ipin]].type == DRIVER) { - num_clb_outputs_used[type->index]++; + if (pin_class_inf.type == RECEIVER) { + num_clb_inputs_used[logical_block->index]++; + } else if (pin_class_inf.type == DRIVER) { + num_clb_outputs_used[logical_block->index]++; } } } } } - num_clb_types[type->index]++; + num_clb_types[logical_block->index]++; } for (itype = 0; itype < device_ctx.logical_block_types.size(); itype++) { diff --git a/vpr/src/place/place_macro.cpp b/vpr/src/place/place_macro.cpp index 4b84afad3d4..db8cf827dfa 100644 --- a/vpr/src/place/place_macro.cpp +++ b/vpr/src/place/place_macro.cpp @@ -77,11 +77,16 @@ static void find_all_the_macro(int* num_of_macro, std::vector& p num_macro = 0; for (auto blk_id : cluster_ctx.clb_nlist.blocks()) { + auto logical_block = cluster_ctx.clb_nlist.block_type(blk_id); + auto physical_tile = pick_random_physical_type(logical_block); + num_blk_pins = cluster_ctx.clb_nlist.block_type(blk_id)->pb_type->num_pins; for (to_iblk_pin = 0; to_iblk_pin < num_blk_pins; to_iblk_pin++) { + int to_phy_pin = get_physical_pin(physical_tile, logical_block, to_iblk_pin); + to_net_id = cluster_ctx.clb_nlist.block_net(blk_id, to_iblk_pin); - to_idirect = f_idirect_from_blk_pin[cluster_ctx.clb_nlist.block_type(blk_id)->index][to_iblk_pin]; - to_src_or_sink = f_direct_type_from_blk_pin[cluster_ctx.clb_nlist.block_type(blk_id)->index][to_iblk_pin]; + to_idirect = f_idirect_from_blk_pin[physical_tile->index][to_phy_pin]; + to_src_or_sink = f_direct_type_from_blk_pin[physical_tile->index][to_phy_pin]; // Identify potential macro head blocks (i.e. start of a macro) // @@ -97,9 +102,11 @@ static void find_all_the_macro(int* num_of_macro, std::vector& p || (is_constant_clb_net(to_net_id) && !net_is_driven_by_direct(to_net_id)))) { for (from_iblk_pin = 0; from_iblk_pin < num_blk_pins; from_iblk_pin++) { + int from_phy_pin = get_physical_pin(physical_tile, logical_block, from_iblk_pin); + from_net_id = cluster_ctx.clb_nlist.block_net(blk_id, from_iblk_pin); - from_idirect = f_idirect_from_blk_pin[cluster_ctx.clb_nlist.block_type(blk_id)->index][from_iblk_pin]; - from_src_or_sink = f_direct_type_from_blk_pin[cluster_ctx.clb_nlist.block_type(blk_id)->index][from_iblk_pin]; + from_idirect = f_idirect_from_blk_pin[physical_tile->index][from_phy_pin]; + from_src_or_sink = f_direct_type_from_blk_pin[physical_tile->index][from_phy_pin]; // Confirm whether this is a head macro // @@ -129,8 +136,8 @@ static void find_all_the_macro(int* num_of_macro, std::vector& p next_blk_id = cluster_ctx.clb_nlist.net_pin_block(curr_net_id, 1); // Assume that the from_iblk_pin index is the same for the next block - VTR_ASSERT(f_idirect_from_blk_pin[cluster_ctx.clb_nlist.block_type(next_blk_id)->index][from_iblk_pin] == from_idirect - && f_direct_type_from_blk_pin[cluster_ctx.clb_nlist.block_type(next_blk_id)->index][from_iblk_pin] == SOURCE); + VTR_ASSERT(f_idirect_from_blk_pin[physical_tile->index][from_phy_pin] == from_idirect + && f_direct_type_from_blk_pin[physical_tile->index][from_phy_pin] == SOURCE); next_net_id = cluster_ctx.clb_nlist.block_net(next_blk_id, from_iblk_pin); // Mark down this block as a member of the macro From b1534f84741286f31b131eeb4ea2afac3167d7b0 Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Fri, 18 Oct 2019 19:29:11 +0200 Subject: [PATCH 33/58] equivalent: fixed physical-logical pin bug Signed-off-by: Alessandro Comodi --- vpr/src/base/read_netlist.cpp | 70 +++++++++++++++++------------------ vpr/src/util/vpr_utils.cpp | 5 ++- 2 files changed, 39 insertions(+), 36 deletions(-) diff --git a/vpr/src/base/read_netlist.cpp b/vpr/src/base/read_netlist.cpp index d310d2b1668..fb461bb3f29 100644 --- a/vpr/src/base/read_netlist.cpp +++ b/vpr/src/base/read_netlist.cpp @@ -949,45 +949,45 @@ static void load_external_nets_and_cb(ClusteredNetlist& clb_nlist) { * and blocks point back to net pins */ for (auto blk_id : clb_nlist.blocks()) { block_type = clb_nlist.block_type(blk_id); - // XXX Use pin mapping here! To check that all the possible pins can be used in the correct tile! - for (const auto& tile_type : block_type->equivalent_tiles) { - for (j = 0; j < tile_type->num_pins; j++) { - //Iterate through each pin of the block, and see if there is a net allocated/used for it - clb_net_id = clb_nlist.block_net(blk_id, j); - - if (clb_net_id != ClusterNetId::INVALID()) { - //Verify old and new CLB netlists have the same # of pins per net - if (RECEIVER == tile_type->class_inf[tile_type->pin_class[j]].type) { - count[clb_net_id]++; - - if (count[clb_net_id] > (int)clb_nlist.net_sinks(clb_net_id).size()) { - VPR_FATAL_ERROR(VPR_ERROR_NET_F, - "net %s #%d inconsistency, expected %d terminals but encountered %d terminals, it is likely net terminal is disconnected in netlist file.\n", - clb_nlist.net_name(clb_net_id).c_str(), size_t(clb_net_id), count[clb_net_id], - clb_nlist.net_sinks(clb_net_id).size()); - } - - //Asserts the ClusterBlockId is the same when ClusterNetId & pin BitIndex is provided - VTR_ASSERT(blk_id == clb_nlist.pin_block(*(clb_nlist.net_pins(clb_net_id).begin() + count[clb_net_id]))); - //Asserts the block's pin index is the same - VTR_ASSERT(j == clb_nlist.pin_physical_index(*(clb_nlist.net_pins(clb_net_id).begin() + count[clb_net_id]))); - VTR_ASSERT(j == clb_nlist.net_pin_physical_index(clb_net_id, count[clb_net_id])); + auto tile_type = pick_random_physical_type(block_type); + for (j = 0; j < block_type->pb_type->num_pins; j++) { + int phy_pin = get_physical_pin(tile_type, block_type, j); + + //Iterate through each pin of the block, and see if there is a net allocated/used for it + clb_net_id = clb_nlist.block_net(blk_id, j); + + if (clb_net_id != ClusterNetId::INVALID()) { + //Verify old and new CLB netlists have the same # of pins per net + if (RECEIVER == tile_type->class_inf[tile_type->pin_class[phy_pin]].type) { + count[clb_net_id]++; + + if (count[clb_net_id] > (int)clb_nlist.net_sinks(clb_net_id).size()) { + VPR_FATAL_ERROR(VPR_ERROR_NET_F, + "net %s #%d inconsistency, expected %d terminals but encountered %d terminals, it is likely net terminal is disconnected in netlist file.\n", + clb_nlist.net_name(clb_net_id).c_str(), size_t(clb_net_id), count[clb_net_id], + clb_nlist.net_sinks(clb_net_id).size()); + } - // nets connecting to global pins are marked as global nets - if (tile_type->is_pin_global[j]) { - clb_nlist.set_net_is_global(clb_net_id, true); - } + //Asserts the ClusterBlockId is the same when ClusterNetId & pin BitIndex is provided + VTR_ASSERT(blk_id == clb_nlist.pin_block(*(clb_nlist.net_pins(clb_net_id).begin() + count[clb_net_id]))); + //Asserts the block's pin index is the same + VTR_ASSERT(j == clb_nlist.pin_physical_index(*(clb_nlist.net_pins(clb_net_id).begin() + count[clb_net_id]))); + VTR_ASSERT(j == clb_nlist.net_pin_physical_index(clb_net_id, count[clb_net_id])); - if (tile_type->is_ignored_pin[j]) { - clb_nlist.set_net_is_ignored(clb_net_id, true); - } - /* Error check performed later to ensure no mixing of ignored and non ignored signals */ + // nets connecting to global pins are marked as global nets + if (tile_type->is_pin_global[phy_pin]) { + clb_nlist.set_net_is_global(clb_net_id, true); + } - } else { - VTR_ASSERT(DRIVER == tile_type->class_inf[tile_type->pin_class[j]].type); - VTR_ASSERT(j == clb_nlist.pin_physical_index(*(clb_nlist.net_pins(clb_net_id).begin()))); - VTR_ASSERT(j == clb_nlist.net_pin_physical_index(clb_net_id, 0)); + if (tile_type->is_ignored_pin[phy_pin]) { + clb_nlist.set_net_is_ignored(clb_net_id, true); } + /* Error check performed later to ensure no mixing of ignored and non ignored signals */ + + } else { + VTR_ASSERT(DRIVER == tile_type->class_inf[tile_type->pin_class[phy_pin]].type); + VTR_ASSERT(j == clb_nlist.pin_physical_index(*(clb_nlist.net_pins(clb_net_id).begin()))); + VTR_ASSERT(j == clb_nlist.net_pin_physical_index(clb_net_id, 0)); } } } diff --git a/vpr/src/util/vpr_utils.cpp b/vpr/src/util/vpr_utils.cpp index 29a03b84cb8..31220cd55bd 100644 --- a/vpr/src/util/vpr_utils.cpp +++ b/vpr/src/util/vpr_utils.cpp @@ -317,8 +317,11 @@ std::vector find_clb_pin_connected_atom_pins(ClusterBlockId clb, int auto& clb_nlist = g_vpr_ctx.clustering().clb_nlist; auto logical_block = clb_nlist.block_type(clb); + auto physical_tile = pick_random_physical_type(logical_block); - if (is_opin(log_pin, pick_random_physical_type(logical_block))) { + int phy_pin = get_physical_pin(physical_tile, logical_block, log_pin); + + if (is_opin(phy_pin, physical_tile)) { //output AtomPinId driver = find_clb_pin_driver_atom_pin(clb, log_pin, pb_gpin_lookup); if (driver) { From 92a0288fae0c1164c2ea1500c4cb3a43d2f3c7a3 Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Mon, 21 Oct 2019 12:46:51 +0200 Subject: [PATCH 34/58] equivalent: use logical-physical pin mappings Signed-off-by: Alessandro Comodi --- vpr/src/base/ShowSetup.cpp | 18 +++++++++++------- vpr/src/draw/draw.cpp | 8 +++++--- 2 files changed, 16 insertions(+), 10 deletions(-) diff --git a/vpr/src/base/ShowSetup.cpp b/vpr/src/base/ShowSetup.cpp index b8a66d3345a..3a96ab9851b 100644 --- a/vpr/src/base/ShowSetup.cpp +++ b/vpr/src/base/ShowSetup.cpp @@ -74,16 +74,20 @@ void printClusteredNetlistStats() { L_num_p_outputs = 0; for (auto blk_id : cluster_ctx.clb_nlist.blocks()) { - num_blocks_type[cluster_ctx.clb_nlist.block_type(blk_id)->index]++; - // XXX mapping here - auto type = cluster_ctx.clb_nlist.block_type(blk_id)->equivalent_tiles[0]; - if (is_io_type(type)) { - for (j = 0; j < type->num_pins; j++) { + auto logical_block = cluster_ctx.clb_nlist.block_type(blk_id); + auto physical_tile = pick_random_physical_type(logical_block); + num_blocks_type[logical_block->index]++; + if (is_io_type(physical_tile)) { + for (j = 0; j < logical_block->pb_type->num_pins; j++) { + int phy_pin = get_physical_pin(physical_tile, logical_block, j); + auto pin_class = physical_tile->pin_class[phy_pin]; + auto class_inf = physical_tile->class_inf[pin_class]; + if (cluster_ctx.clb_nlist.block_net(blk_id, j) != ClusterNetId::INVALID()) { - if (type->class_inf[type->pin_class[j]].type == DRIVER) { + if (class_inf.type == DRIVER) { L_num_p_inputs++; } else { - VTR_ASSERT(type->class_inf[type->pin_class[j]].type == RECEIVER); + VTR_ASSERT(class_inf.type == RECEIVER); L_num_p_outputs++; } } diff --git a/vpr/src/draw/draw.cpp b/vpr/src/draw/draw.cpp index c9304fb8ade..e279b0c1f3d 100644 --- a/vpr/src/draw/draw.cpp +++ b/vpr/src/draw/draw.cpp @@ -2660,10 +2660,12 @@ void draw_highlight_blocks_color(t_logical_block_type_ptr type, ClusterBlockId b if (net_id == ClusterNetId::INVALID()) continue; - // XXX Logical Physical Mapping to be used here - iclass = physical_tile_type(blk_id)->pin_class[k]; + auto physical_tile = physical_tile_type(blk_id); + int phy_pin = get_physical_pin(physical_tile, type, k); - if (physical_tile_type(blk_id)->class_inf[iclass].type == DRIVER) { /* Fanout */ + iclass = physical_tile->pin_class[phy_pin]; + + if (physical_tile->class_inf[iclass].type == DRIVER) { /* Fanout */ if (draw_state->block_color[blk_id] == SELECTED_COLOR) { /* If block already highlighted, de-highlight the fanout. (the deselect case)*/ draw_state->net_color[net_id] = ezgl::BLACK; From aefafd5ccc22b48fe7a47e9060c106c17f726e8f Mon Sep 17 00:00:00 2001 From: Keith Rothman <537074+litghost@users.noreply.github.com> Date: Thu, 7 Nov 2019 11:49:43 -0800 Subject: [PATCH 35/58] Fix failing assertion. Signed-off-by: Keith Rothman <537074+litghost@users.noreply.github.com> --- vpr/src/base/clustered_netlist.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vpr/src/base/clustered_netlist.cpp b/vpr/src/base/clustered_netlist.cpp index eb9b1cabba5..fc37cc0d249 100644 --- a/vpr/src/base/clustered_netlist.cpp +++ b/vpr/src/base/clustered_netlist.cpp @@ -52,7 +52,7 @@ int ClusteredNetlist::block_pin_net_index(const ClusterBlockId blk_id, const int ClusterPinId ClusteredNetlist::block_pin(const ClusterBlockId blk, const int phys_pin_index) const { VTR_ASSERT_SAFE(valid_block_id(blk)); - VTR_ASSERT_SAFE_MSG(phys_pin_index >= 0 && phys_pin_index < pick_random_physical_type(block_type(blk))->num_pins, "Physical pin index must be in range"); + VTR_ASSERT_SAFE_MSG(phys_pin_index >= 0 && phys_pin_index < static_cast(block_logical_pins_[blk].size()), "Physical pin index must be in range"); return block_logical_pins_[blk][phys_pin_index]; } From e176779c2abfddd2ff923456a372a4fb015c4a95 Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Fri, 18 Oct 2019 17:03:32 +0200 Subject: [PATCH 36/58] equivalent: fix warning in draw.cpp Signed-off-by: Alessandro Comodi --- vpr/src/draw/draw.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/vpr/src/draw/draw.cpp b/vpr/src/draw/draw.cpp index e279b0c1f3d..13ecb026494 100644 --- a/vpr/src/draw/draw.cpp +++ b/vpr/src/draw/draw.cpp @@ -2730,8 +2730,6 @@ void deselect_all() { } static void draw_reset_blk_color(ClusterBlockId blk_id) { - auto& cluster_ctx = g_vpr_ctx.clustering(); - t_draw_state* draw_state = get_draw_state_vars(); draw_state->block_color[blk_id] = get_block_type_color(physical_tile_type(blk_id)); From 319a64516d9c53023e3ab263d503940d1b55f4b0 Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Mon, 21 Oct 2019 10:24:43 +0200 Subject: [PATCH 37/58] WIP: testbench Signed-off-by: Alessandro Comodi --- vtr_flow/arch/equivalent_sites/equivalent.xml | 199 ++++++++---------- .../microbenchmarks/equivalent.blif | 5 +- 2 files changed, 85 insertions(+), 119 deletions(-) diff --git a/vtr_flow/arch/equivalent_sites/equivalent.xml b/vtr_flow/arch/equivalent_sites/equivalent.xml index 2180d833025..7caf8788455 100644 --- a/vtr_flow/arch/equivalent_sites/equivalent.xml +++ b/vtr_flow/arch/equivalent_sites/equivalent.xml @@ -3,89 +3,85 @@ - + - + - + - + - + - + - + - + - - + + + + + + + + + + + + + + - - + + - - + + - - + + - - + + - - io_tile.I io_tile.O - io_tile.I io_tile.O - io_tile.I io_tile.O - io_tile.I io_tile.O - + - - - - - - - - - - - - - - + + + - + @@ -93,118 +89,85 @@ - + - - - + + + - - - - - - - - - - - - - + + + + - + + - - - + + + + + + + + + - + + - - + + - - - - - - - - - - - - - - - - - - - - + + + - + + - - + + - - - - - - - - - - - - - - - - - - - - + + + - + + - + - - - - - - - - + + + + + + + + @@ -233,4 +196,8 @@ 1 1 1 1 1 1 1 1 1 1 1 1 + + + + diff --git a/vtr_flow/benchmarks/microbenchmarks/equivalent.blif b/vtr_flow/benchmarks/microbenchmarks/equivalent.blif index 2ead16d0908..6292a60d433 100644 --- a/vtr_flow/benchmarks/microbenchmarks/equivalent.blif +++ b/vtr_flow/benchmarks/microbenchmarks/equivalent.blif @@ -4,7 +4,6 @@ .names $false .names $true 1 -.subckt IO_0 I=in O=out_1 -.subckt IO_1 I=out_1 O=out_2 -.subckt IO_2 I=out_2 O=out +.subckt IO_0 in=in out=out_1 +.subckt IO_1 in=out_1 out=out .end From 8a2824bb2137dffae2f5e37ebbfdff73d4443250 Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Mon, 21 Oct 2019 17:29:15 +0200 Subject: [PATCH 38/58] equivalent: avoid using logical_block_type() func Also solved draw.cpp issue Signed-off-by: Alessandro Comodi --- libs/libarchfpga/src/read_xml_arch_file.cpp | 2 +- vpr/src/base/vpr_api.cpp | 9 ++++--- vpr/src/draw/draw.cpp | 8 ++++-- vpr/src/draw/draw_types.cpp | 2 +- vpr/src/draw/intra_logic_block.cpp | 8 +++--- vpr/src/power/power.cpp | 16 +++++++++--- vpr/src/route/clock_connection_builders.cpp | 12 +++++++-- vpr/src/util/vpr_utils.cpp | 27 ++++++++++----------- vpr/src/util/vpr_utils.h | 4 +-- 9 files changed, 56 insertions(+), 32 deletions(-) diff --git a/libs/libarchfpga/src/read_xml_arch_file.cpp b/libs/libarchfpga/src/read_xml_arch_file.cpp index a7a0583445c..08b3732fb60 100644 --- a/libs/libarchfpga/src/read_xml_arch_file.cpp +++ b/libs/libarchfpga/src/read_xml_arch_file.cpp @@ -642,7 +642,7 @@ static void SetupPinLocationsAndPinClasses(pugi::xml_node Locations, if (port.equivalent != PortEquivalence::NONE) { PhysicalTileType->class_inf[num_class].num_pins = port.num_pins; PhysicalTileType->class_inf[num_class].pinlist = (int*)vtr::malloc(sizeof(int) * port.num_pins); - PhysicalTileType->class_inf[num_class].equivalence = PhysicalTileType->ports[i].equivalent; + PhysicalTileType->class_inf[num_class].equivalence = port.equivalent; } for (k = 0; k < port.num_pins; ++k) { diff --git a/vpr/src/base/vpr_api.cpp b/vpr/src/base/vpr_api.cpp index a5cb7d66fd1..f2900d0d299 100644 --- a/vpr/src/base/vpr_api.cpp +++ b/vpr/src/base/vpr_api.cpp @@ -439,11 +439,14 @@ void vpr_create_device_grid(const t_vpr_setup& vpr_setup, const t_arch& Arch) { continue; } - float util = 0.; if (device_ctx.grid.num_instances(&type) != 0) { - util = float(num_type_instances[logical_block_type(&type)]) / device_ctx.grid.num_instances(&type); + float util = 0.; + VTR_LOG("\tPhysical Tile %s:\n", type.name); + for (auto logical_block : type.equivalent_sites) { + util = float(num_type_instances[logical_block]) / device_ctx.grid.num_instances(&type); + VTR_LOG("\tBlock Utilization: %.2f Logical Block: %s\n", util, logical_block->name); + } } - VTR_LOG("\tBlock Utilization: %.2f Type: %s\n", util, type.name); } VTR_LOG("\n"); diff --git a/vpr/src/draw/draw.cpp b/vpr/src/draw/draw.cpp index 13ecb026494..a04755be146 100644 --- a/vpr/src/draw/draw.cpp +++ b/vpr/src/draw/draw.cpp @@ -2730,9 +2730,13 @@ void deselect_all() { } static void draw_reset_blk_color(ClusterBlockId blk_id) { + auto& clb_nlist = g_vpr_ctx.clustering().clb_nlist; + + auto logical_block = clb_nlist.block_type(blk_id); + t_draw_state* draw_state = get_draw_state_vars(); - draw_state->block_color[blk_id] = get_block_type_color(physical_tile_type(blk_id)); + draw_state->block_color[blk_id] = get_block_type_color(pick_random_physical_type(logical_block)); } /** @@ -3690,7 +3694,7 @@ static void highlight_blocks(double x, double y) { } } - if (clb_index == EMPTY_BLOCK_ID) { + if (clb_index == EMPTY_BLOCK_ID || clb_index == ClusterBlockId::INVALID()) { //Nothing found return; } diff --git a/vpr/src/draw/draw_types.cpp b/vpr/src/draw/draw_types.cpp index 7282aef73f9..14b0b45ca26 100644 --- a/vpr/src/draw/draw_types.cpp +++ b/vpr/src/draw/draw_types.cpp @@ -95,7 +95,7 @@ ezgl::rectangle t_draw_coords::get_absolute_clb_bbox(const ClusterBlockId clb_in ezgl::rectangle t_draw_coords::get_absolute_clb_bbox(int grid_x, int grid_y, int sub_block_index) { auto& device_ctx = g_vpr_ctx.device(); - return get_pb_bbox(grid_x, grid_y, sub_block_index, *logical_block_type(device_ctx.grid[grid_x][grid_y].type)->pb_graph_head); + return get_pb_bbox(grid_x, grid_y, sub_block_index, *pick_random_logical_type(device_ctx.grid[grid_x][grid_y].type)->pb_graph_head); } #endif // NO_GRAPHICS diff --git a/vpr/src/draw/intra_logic_block.cpp b/vpr/src/draw/intra_logic_block.cpp index 89740e3ad33..ac465f15266 100644 --- a/vpr/src/draw/intra_logic_block.cpp +++ b/vpr/src/draw/intra_logic_block.cpp @@ -92,10 +92,12 @@ void draw_internal_init_blk() { auto& device_ctx = g_vpr_ctx.device(); for (const auto& type : device_ctx.physical_tile_types) { /* Empty block has no sub_blocks */ - if (&type == device_ctx.EMPTY_PHYSICAL_TILE_TYPE) + if (is_empty_type(&type)) { continue; + } - pb_graph_head_node = logical_block_type(&type)->pb_graph_head; + auto logical_block = pick_random_logical_type(&type); + pb_graph_head_node = logical_block->pb_graph_head; int type_descriptor_index = type.index; int num_sub_tiles = type.capacity; @@ -129,7 +131,7 @@ void draw_internal_init_blk() { clb_bbox.width(), clb_bbox.height()); /* Determine the max number of sub_block levels in the FPGA */ - draw_state->max_sub_blk_lvl = std::max(draw_internal_find_max_lvl(*logical_block_type(&type)->pb_type), + draw_state->max_sub_blk_lvl = std::max(draw_internal_find_max_lvl(*logical_block->pb_type), draw_state->max_sub_blk_lvl); } } diff --git a/vpr/src/power/power.cpp b/vpr/src/power/power.cpp index 7350eae9907..758167d6b54 100644 --- a/vpr/src/power/power.cpp +++ b/vpr/src/power/power.cpp @@ -601,26 +601,34 @@ static void power_usage_blocks(t_power_usage* power_usage) { power_reset_tile_usage(); + t_logical_block_type_ptr logical_block; + /* Loop through all grid locations */ for (size_t x = 0; x < device_ctx.grid.width(); x++) { for (size_t y = 0; y < device_ctx.grid.height(); y++) { + auto physical_tile = device_ctx.grid[x][y].type; + if ((device_ctx.grid[x][y].width_offset != 0) || (device_ctx.grid[x][y].height_offset != 0) - || (device_ctx.grid[x][y].type == device_ctx.EMPTY_PHYSICAL_TILE_TYPE)) { + || is_empty_type(physical_tile)) { continue; } - for (int z = 0; z < device_ctx.grid[x][y].type->capacity; z++) { + for (int z = 0; z < physical_tile->capacity; z++) { t_pb* pb = nullptr; t_power_usage pb_power; ClusterBlockId iblk = place_ctx.grid_blocks[x][y].blocks[z]; - if (iblk != EMPTY_BLOCK_ID && iblk != INVALID_BLOCK_ID) + if (iblk != EMPTY_BLOCK_ID && iblk != INVALID_BLOCK_ID) { pb = cluster_ctx.clb_nlist.block_pb(iblk); + logical_block = cluster_ctx.clb_nlist.block_type(iblk); + } else { + logical_block = pick_random_logical_type(physical_tile); + } /* Calculate power of this CLB */ - power_usage_pb(&pb_power, pb, logical_block_type(device_ctx.grid[x][y].type)->pb_graph_head, iblk); + power_usage_pb(&pb_power, pb, logical_block->pb_graph_head, iblk); power_add_usage(power_usage, &pb_power); } } diff --git a/vpr/src/route/clock_connection_builders.cpp b/vpr/src/route/clock_connection_builders.cpp index b517d5fc460..e8fca69771b 100644 --- a/vpr/src/route/clock_connection_builders.cpp +++ b/vpr/src/route/clock_connection_builders.cpp @@ -234,8 +234,16 @@ void ClockToPinsConnection::create_switches(const ClockRRGraphBuilder& clock_gra auto width_offset = grid[x][y].width_offset; auto height_offset = grid[x][y].height_offset; - // Ignore gird locations that do not have blocks - if (!logical_block_type(type)->pb_type) { + // Ignore grid locations that do not have blocks + bool has_pb_type = false; + for (auto logical_block : type->equivalent_sites) { + if (logical_block->pb_type) { + has_pb_type = true; + break; + } + } + + if (!has_pb_type) { continue; } diff --git a/vpr/src/util/vpr_utils.cpp b/vpr/src/util/vpr_utils.cpp index 31220cd55bd..376b208b02d 100644 --- a/vpr/src/util/vpr_utils.cpp +++ b/vpr/src/util/vpr_utils.cpp @@ -637,20 +637,6 @@ t_physical_tile_type_ptr physical_tile_type(ClusterBlockId blk) { return device_ctx.grid[loc.x][loc.y].type; } -t_logical_block_type_ptr logical_block_type(t_physical_tile_type_ptr physical_tile_type) { - auto& device_ctx = g_vpr_ctx.device(); - auto& logical_blocks = device_ctx.logical_block_types; - - // Loop through the ordered map to get tiles in a decreasing priority order - for (auto& logical_blocks_ids : physical_tile_type->logical_blocks_priority) { - for (auto block_id : logical_blocks_ids.second) { - return &logical_blocks[block_id]; - } - } - - VPR_THROW(VPR_ERROR_OTHER, "No corresponding logical block type found for physical tile type %s\n", physical_tile_type->name); -} - /* Each node in the pb_graph for a top-level pb_type can be uniquely identified * by its pins. Since the pins in a cluster of a certain type are densely indexed, * this function will find the pin index (int pin_count_in_cluster) of the first @@ -2178,6 +2164,19 @@ t_physical_tile_type_ptr pick_random_physical_type(t_logical_block_type_ptr logi return equivalent_tiles[index]; } +t_logical_block_type_ptr pick_random_logical_type(t_physical_tile_type_ptr physical_tile) { + auto equivalent_sites = physical_tile->equivalent_sites; + + size_t num_equivalent_sites = equivalent_sites.size(); + int index = 0; + + if (num_equivalent_sites > 1) { + index = vtr::irand((int)equivalent_sites.size() - 1); + } + + return equivalent_sites[index]; +} + int get_logical_pin(t_physical_tile_type_ptr physical_tile, t_logical_block_type_ptr logical_block, int pin) { diff --git a/vpr/src/util/vpr_utils.h b/vpr/src/util/vpr_utils.h index 99842bb4bf7..5b9368a7d17 100644 --- a/vpr/src/util/vpr_utils.h +++ b/vpr/src/util/vpr_utils.h @@ -28,9 +28,8 @@ bool is_io_type(t_physical_tile_type_ptr type); bool is_empty_type(t_physical_tile_type_ptr type); bool is_empty_type(t_logical_block_type_ptr type); -//Returns the corresponding physical/logical type given the logical/physical type as parameter +//Returns the corresponding physical type given the logical type as parameter t_physical_tile_type_ptr physical_tile_type(ClusterBlockId blk); -t_logical_block_type_ptr logical_block_type(t_physical_tile_type_ptr physical_tile_type); int get_unique_pb_graph_node_id(const t_pb_graph_node* pb_graph_node); @@ -172,6 +171,7 @@ int get_max_num_pins(t_logical_block_type_ptr logical_block); bool is_tile_compatible(t_physical_tile_type_ptr physical_tile, t_logical_block_type_ptr logical_block); t_physical_tile_type_ptr pick_random_physical_type(t_logical_block_type_ptr logical_block); +t_logical_block_type_ptr pick_random_logical_type(t_physical_tile_type_ptr physical_tile); int get_logical_pin(t_physical_tile_type_ptr physical_tile, t_logical_block_type_ptr logical_block, From 84e06560149d0f6c4d4b91fc8b4c7a61002a1008 Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Wed, 23 Oct 2019 13:15:44 +0200 Subject: [PATCH 39/58] equivalent: fixing auto-sizing of FPGA device with equivalent tiles Signed-off-by: Alessandro Comodi --- vpr/src/base/SetupGrid.cpp | 26 ++++++++++++++++++++++---- 1 file changed, 22 insertions(+), 4 deletions(-) diff --git a/vpr/src/base/SetupGrid.cpp b/vpr/src/base/SetupGrid.cpp index deda993b3b3..1cf36956f6a 100644 --- a/vpr/src/base/SetupGrid.cpp +++ b/vpr/src/base/SetupGrid.cpp @@ -221,15 +221,33 @@ static DeviceGrid auto_size_device_grid(const std::vector& grid_layo } static std::vector grid_overused_resources(const DeviceGrid& grid, std::map instance_counts) { + auto& device_ctx = g_vpr_ctx.device(); + std::vector overused_resources; + std::unordered_map min_count_map; + // Initialize min_count_map + for (const auto& physical_tile : device_ctx.physical_tile_types) { + min_count_map.insert(std::make_pair(&physical_tile, size_t(0))); + } + //Are the resources satisified? for (auto kv : instance_counts) { - t_physical_tile_type_ptr type; - size_t min_count; - std::tie(type, min_count) = std::make_pair(kv.first->equivalent_tiles[0], kv.second); + t_physical_tile_type_ptr type = nullptr; + + size_t inst_cnt = 0; + for (auto& physical_tile : kv.first->equivalent_tiles) { + size_t tmp_inst_cnt = grid.num_instances(physical_tile); + + if (inst_cnt <= tmp_inst_cnt) { + type = physical_tile; + inst_cnt = tmp_inst_cnt; + } + } - size_t inst_cnt = grid.num_instances(type); + VTR_ASSERT(type); + size_t min_count = min_count_map.at(type) + kv.second; + min_count_map.at(type) = min_count; if (inst_cnt < min_count) { overused_resources.push_back(type); From 21fd7421dcc345d83a4e9c0bf4a40b837f6c3884 Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Wed, 23 Oct 2019 13:16:14 +0200 Subject: [PATCH 40/58] equivalent: fixed initial regression test Signed-off-by: Alessandro Comodi --- vtr_flow/arch/equivalent_sites/equivalent.xml | 58 ++++++++----------- .../vtr_reg_strong/task_list.txt | 1 + 2 files changed, 26 insertions(+), 33 deletions(-) diff --git a/vtr_flow/arch/equivalent_sites/equivalent.xml b/vtr_flow/arch/equivalent_sites/equivalent.xml index 7caf8788455..7252b4f81e5 100644 --- a/vtr_flow/arch/equivalent_sites/equivalent.xml +++ b/vtr_flow/arch/equivalent_sites/equivalent.xml @@ -44,7 +44,12 @@ - + + io_tile.in io_tile.out + io_tile.in io_tile.out + io_tile.in io_tile.out + io_tile.in io_tile.out + @@ -68,7 +73,12 @@ - + + pass_through_tile.in pass_through_tile.out + pass_through_tile.in pass_through_tile.out + pass_through_tile.in pass_through_tile.out + pass_through_tile.in pass_through_tile.out + @@ -155,49 +165,31 @@ - - - - - - - - - - - - - - - - - - + + + + + - - + + - - + - - - - 1 1 1 1 1 1 1 1 1 1 1 1 1 - 1 1 1 1 1 1 1 1 1 1 1 1 + + + + 1 1 + 1 - - - - diff --git a/vtr_flow/tasks/regression_tests/vtr_reg_strong/task_list.txt b/vtr_flow/tasks/regression_tests/vtr_reg_strong/task_list.txt index 6f7c5af06ec..f1e9286c4d3 100644 --- a/vtr_flow/tasks/regression_tests/vtr_reg_strong/task_list.txt +++ b/vtr_flow/tasks/regression_tests/vtr_reg_strong/task_list.txt @@ -53,3 +53,4 @@ regression_tests/vtr_reg_strong/strong_sdc regression_tests/vtr_reg_strong/strong_timing_report_detail regression_tests/vtr_reg_strong/strong_route_reconverge regression_tests/vtr_reg_strong/strong_clock_buf +regression_tests/vtr_reg_strong/strong_equivalent_sites From 3ea2011d4f3c4d4d64a69cc6832043a4426ae5cc Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Tue, 12 Nov 2019 14:31:27 +0100 Subject: [PATCH 41/58] equivalent: adopting direct/custom pin_mapping Signed-off-by: Alessandro Comodi --- libs/libarchfpga/src/read_xml_arch_file.cpp | 58 ++++++++++++++++----- 1 file changed, 46 insertions(+), 12 deletions(-) diff --git a/libs/libarchfpga/src/read_xml_arch_file.cpp b/libs/libarchfpga/src/read_xml_arch_file.cpp index 08b3732fb60..b93cd4396f6 100644 --- a/libs/libarchfpga/src/read_xml_arch_file.cpp +++ b/libs/libarchfpga/src/read_xml_arch_file.cpp @@ -111,11 +111,15 @@ static void ProcessTileEquivalentSites(pugi::xml_node Parent, t_physical_tile_type* PhysicalTileType, std::vector& LogicalBlockTypes, const pugiutil::loc_data& loc_data); -static void ProcessEquivalentSiteDirects(pugi::xml_node Parent, - t_physical_tile_type* PhysicalTileType, - t_logical_block_type* LogicalBlockType, - std::string site_name, - const pugiutil::loc_data& loc_data); +static void ProcessEquivalentSiteDirectConnection(pugi::xml_node Parent, + t_physical_tile_type* PhysicalTileType, + t_logical_block_type* LogicalBlockType, + const pugiutil::loc_data& loc_data); +static void ProcessEquivalentSiteCustomConnection(pugi::xml_node Parent, + t_physical_tile_type* PhysicalTileType, + t_logical_block_type* LogicalBlockType, + std::string site_name, + const pugiutil::loc_data& loc_data); static void ProcessPb_Type(pugi::xml_node Parent, t_pb_type* pb_type, t_mode* mode, @@ -3221,7 +3225,7 @@ static void ProcessTileEquivalentSites(pugi::xml_node Parent, while (CurSite) { check_node(CurSite, "site", loc_data); - expect_only_attributes(CurSite, {"pb_type", "priority"}, loc_data); + expect_only_attributes(CurSite, {"pb_type", "priority", "pin_mapping"}, loc_data); /* Load equivalent site name */ auto Prop = std::string(get_attribute(CurSite, "pb_type", loc_data).value()); PhysicalTileType->equivalent_sites_names.push_back(Prop); @@ -3232,17 +3236,47 @@ static void ProcessTileEquivalentSites(pugi::xml_node Parent, LogicalBlockType->physical_tiles_priority[priority].push_back(PhysicalTileType->index); PhysicalTileType->logical_blocks_priority[priority].push_back(LogicalBlockType->index); - ProcessEquivalentSiteDirects(CurSite, PhysicalTileType, LogicalBlockType, Prop, loc_data); + auto pin_mapping = get_attribute(CurSite, "pin_mapping", loc_data, ReqOpt::OPTIONAL).as_string("direct"); + + if (0 == strcmp(pin_mapping, "custom")) { + // Pin mapping between Tile and Pb Type is user-defined + ProcessEquivalentSiteCustomConnection(CurSite, PhysicalTileType, LogicalBlockType, Prop, loc_data); + } else if (0 == strcmp(pin_mapping, "direct")) { + ProcessEquivalentSiteDirectConnection(CurSite, PhysicalTileType, LogicalBlockType, loc_data); + } CurSite = CurSite.next_sibling(CurSite.name()); } } -static void ProcessEquivalentSiteDirects(pugi::xml_node Parent, - t_physical_tile_type* PhysicalTileType, - t_logical_block_type* LogicalBlockType, - std::string site_name, - const pugiutil::loc_data& loc_data) { +static void ProcessEquivalentSiteDirectConnection(pugi::xml_node Parent, + t_physical_tile_type* PhysicalTileType, + t_logical_block_type* LogicalBlockType, + const pugiutil::loc_data& loc_data) { + int num_pins = PhysicalTileType->num_pins / PhysicalTileType->capacity; + + if (num_pins != LogicalBlockType->pb_type->num_pins) { + archfpga_throw(loc_data.filename_c_str(), loc_data.line(Parent), + "Pin definition differ between site %s and tile %s. User-defined pin mapping is required.\n", LogicalBlockType->pb_type->name, PhysicalTileType->name); + } + + vtr::bimap directs_map; + + for (int npin = 0; npin < num_pins; npin++) { + t_physical_pin phy_pin(npin); + t_logical_pin log_pin(npin); + + directs_map.insert(log_pin, phy_pin); + } + + PhysicalTileType->tile_block_pin_directs_map[LogicalBlockType->index] = directs_map; +} + +static void ProcessEquivalentSiteCustomConnection(pugi::xml_node Parent, + t_physical_tile_type* PhysicalTileType, + t_logical_block_type* LogicalBlockType, + std::string site_name, + const pugiutil::loc_data& loc_data) { pugi::xml_node CurDirect; expect_only_children(Parent, {"direct"}, loc_data); From 927470985b3e7cdf3d9bc886157c1f1aa1da2cdf Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Tue, 12 Nov 2019 14:31:54 +0100 Subject: [PATCH 42/58] Revert "arch: updated architecture files to have direct pin mapping" This reverts commit a70d414217d6711e4ab3254eac50677c5c76c513. Signed-off-by: Alessandro Comodi --- libs/libarchfpga/arch/mult_luts_arch.xml | 28 +-- libs/libarchfpga/arch/sample_arch.xml | 28 +-- utils/fasm/test/test_fasm_arch.xml | 12 +- vpr/test/test_read_arch_metadata.xml | 12 +- vtr_flow/arch/bidir/k4_n4_v7_bidir.xml | 12 +- .../arch/bidir/k4_n4_v7_bidir_pass_gate.xml | 12 +- vtr_flow/arch/bidir/k4_n4_v7_l1_bidir.xml | 12 +- .../arch/bidir/k4_n4_v7_longline_bidir.xml | 12 +- vtr_flow/arch/common/arch.xml | 12 +- ...0.85sL2-0.15gL4-on-cb-off-sb_22nm_22nm.xml | 35 +--- ...2-0.15gL4-on-cb-off-sb_22nm_22nm_error.xml | 35 +--- .../custom_grid/buffered_flyover_wires.xml | 30 +--- vtr_flow/arch/custom_grid/column_io.xml | 30 +--- vtr_flow/arch/custom_grid/custom_sbloc.xml | 30 +--- vtr_flow/arch/custom_grid/fixed_grid.xml | 30 +--- .../arch/custom_grid/multiple_io_types.xml | 48 +----- .../arch/custom_grid/multiwidth_blocks.xml | 30 +--- vtr_flow/arch/custom_grid/non_column.xml | 36 +--- .../non_column_tall_aspect_ratio.xml | 36 +--- .../non_column_wide_aspect_ratio.xml | 36 +--- .../custom_grid/shorted_flyover_wires.xml | 30 +--- .../k6_frac_N10_mem32K_40nm_custom_pins.xml | 28 +-- vtr_flow/arch/ispd/ultrascale_ispd.xml | 39 +---- .../k6_N8_I27_fleI6_fleO1_ff1_nmodes_1.xml | 28 +-- .../k6_N8_I48_fleI5_fleO2_ff1_nmodes_2.xml | 28 +-- .../k6_N8_I48_fleI5_fleO2_ff2_nmodes_2.xml | 28 +-- .../k6_N8_I48_fleI6_fleO1_ff1_nmodes_1.xml | 28 +-- .../k6_N8_I48_fleI6_fleO2_ff1_nmodes_2.xml | 28 +-- .../k6_N8_I48_fleI6_fleO2_ff2_nmodes_2.xml | 28 +-- .../k6_N8_I56_fleI7_fleO2_ff1_nmodes_2.xml | 28 +-- .../k6_N8_I56_fleI7_fleO2_ff2_nmodes_2.xml | 28 +-- .../k6_N8_I64_fleI8_fleO2_ff1_nmodes_2.xml | 28 +-- .../k6_N8_I64_fleI8_fleO2_ff2_nmodes_2.xml | 28 +-- .../k6_N8_I72_fleI9_fleO2_ff1_nmodes_2.xml | 28 +-- .../k6_N8_I72_fleI9_fleO2_ff2_nmodes_2.xml | 28 +-- .../k6_N8_I80_fleI10_fleO2_ff1_nmodes_2.xml | 28 +-- .../k6_N8_I80_fleI10_fleO2_ff2_nmodes_2.xml | 28 +-- .../k4_N10_memSize1024_memData16.xml | 28 +-- .../k4_N10_memSize1024_memData2.xml | 28 +-- .../k4_N10_memSize1024_memData32.xml | 28 +-- .../k4_N10_memSize1024_memData4.xml | 28 +-- .../k4_N10_memSize1024_memData64.xml | 28 +-- .../k4_N10_memSize1024_memData8.xml | 28 +-- .../k4_N10_memSize131072_memData16.xml | 28 +-- .../k4_N10_memSize131072_memData2.xml | 28 +-- .../k4_N10_memSize131072_memData32.xml | 28 +-- .../k4_N10_memSize131072_memData4.xml | 28 +-- .../k4_N10_memSize131072_memData64.xml | 28 +-- .../k4_N10_memSize131072_memData8.xml | 28 +-- .../k4_N10_memSize16384_memData16.xml | 28 +-- .../k4_N10_memSize16384_memData2.xml | 28 +-- .../k4_N10_memSize16384_memData32.xml | 28 +-- .../k4_N10_memSize16384_memData4.xml | 28 +-- .../k4_N10_memSize16384_memData64.xml | 28 +-- .../k4_N10_memSize16384_memData8.xml | 28 +-- .../k4_N10_memSize2048_memData16.xml | 28 +-- .../k4_N10_memSize2048_memData2.xml | 28 +-- .../k4_N10_memSize2048_memData32.xml | 28 +-- .../k4_N10_memSize2048_memData4.xml | 28 +-- .../k4_N10_memSize2048_memData64.xml | 28 +-- .../k4_N10_memSize2048_memData8.xml | 28 +-- .../k4_N10_memSize262144_memData16.xml | 28 +-- .../k4_N10_memSize262144_memData2.xml | 28 +-- .../k4_N10_memSize262144_memData32.xml | 28 +-- .../k4_N10_memSize262144_memData4.xml | 28 +-- .../k4_N10_memSize262144_memData64.xml | 28 +-- .../k4_N10_memSize262144_memData8.xml | 28 +-- .../k4_N10_memSize32768_memData16.xml | 28 +-- .../k4_N10_memSize32768_memData2.xml | 28 +-- .../k4_N10_memSize32768_memData32.xml | 28 +-- .../k4_N10_memSize32768_memData4.xml | 28 +-- .../k4_N10_memSize32768_memData64.xml | 28 +-- .../k4_N10_memSize32768_memData8.xml | 28 +-- .../k4_N10_memSize4096_memData16.xml | 28 +-- .../k4_N10_memSize4096_memData2.xml | 28 +-- .../k4_N10_memSize4096_memData32.xml | 28 +-- .../k4_N10_memSize4096_memData4.xml | 28 +-- .../k4_N10_memSize4096_memData64.xml | 28 +-- .../k4_N10_memSize4096_memData8.xml | 28 +-- .../k4_N10_memSize512_memData16.xml | 28 +-- .../k4_N10_memSize512_memData2.xml | 28 +-- .../k4_N10_memSize512_memData32.xml | 28 +-- .../k4_N10_memSize512_memData4.xml | 28 +-- .../k4_N10_memSize512_memData64.xml | 28 +-- .../k4_N10_memSize512_memData8.xml | 28 +-- .../k4_N10_memSize524288_memData16.xml | 28 +-- .../k4_N10_memSize524288_memData2.xml | 28 +-- .../k4_N10_memSize524288_memData32.xml | 28 +-- .../k4_N10_memSize524288_memData4.xml | 28 +-- .../k4_N10_memSize524288_memData64.xml | 28 +-- .../k4_N10_memSize524288_memData8.xml | 28 +-- .../k4_N10_memSize65536_memData16.xml | 28 +-- .../k4_N10_memSize65536_memData2.xml | 28 +-- .../k4_N10_memSize65536_memData32.xml | 28 +-- .../k4_N10_memSize65536_memData4.xml | 28 +-- .../k4_N10_memSize65536_memData64.xml | 28 +-- .../k4_N10_memSize65536_memData8.xml | 28 +-- .../k4_N10_memSize8192_memData16.xml | 28 +-- .../k4_N10_memSize8192_memData2.xml | 28 +-- .../k4_N10_memSize8192_memData32.xml | 28 +-- .../k4_N10_memSize8192_memData4.xml | 28 +-- .../k4_N10_memSize8192_memData64.xml | 28 +-- .../k4_N10_memSize8192_memData8.xml | 28 +-- .../k6_N10_mem32K_40nm_nonuniform.xml | 28 +-- .../k6_N10_mem32K_40nm_pulse.xml | 28 +-- .../k6_N10_I40_Fi6_L1_frac0_ff1_45nm.xml | 28 +-- .../k6_N10_I40_Fi6_L2_frac0_ff1_45nm.xml | 28 +-- .../k6_N10_I40_Fi6_L3_frac0_ff1_45nm.xml | 28 +-- .../k6_N10_I40_Fi6_L4_frac0_ff1_130nm.xml | 28 +-- .../k6_N10_I40_Fi6_L4_frac0_ff1_22nm.xml | 28 +-- .../k6_N10_I40_Fi6_L4_frac0_ff1_45nm.xml | 28 +-- .../k6_N10_I40_Fi6_L4_frac0_ff1_C10_45nm.xml | 28 +-- .../k6_N10_I40_Fi6_L4_frac0_ff1_C15_45nm.xml | 28 +-- .../k6_N10_I40_Fi6_L4_frac0_ff1_C20_45nm.xml | 28 +-- .../k6_N10_I40_Fi6_L4_frac0_ff1_C25_45nm.xml | 28 +-- .../k6_N10_I40_Fi6_L4_frac0_ff1_C30_45nm.xml | 28 +-- .../k6_N10_I40_Fi6_L4_frac0_ff1_C35_45nm.xml | 28 +-- .../k6_N10_I40_Fi6_L4_frac0_ff1_C40_45nm.xml | 28 +-- .../k6_N10_I40_Fi6_L4_frac0_ff1_C45_45nm.xml | 28 +-- .../k6_N10_I40_Fi6_L4_frac0_ff1_C50_45nm.xml | 28 +-- .../k6_N10_I40_Fi6_L4_frac0_ff1_C5_45nm.xml | 28 +-- .../k6_N10_I40_Fi6_L4_frac1_ff1_45nm.xml | 28 +-- .../k6_N10_I40_Fi6_L4_frac1_ff2_45nm.xml | 28 +-- .../k6_N10_I40_Fi6_L4_frac1_ff2_C10_45nm.xml | 28 +-- .../k6_N10_I40_Fi6_L4_frac1_ff2_C15_45nm.xml | 28 +-- .../k6_N10_I40_Fi6_L4_frac1_ff2_C20_45nm.xml | 28 +-- .../k6_N10_I40_Fi6_L4_frac1_ff2_C25_45nm.xml | 28 +-- .../k6_N10_I40_Fi6_L4_frac1_ff2_C30_45nm.xml | 28 +-- .../k6_N10_I40_Fi6_L4_frac1_ff2_C35_45nm.xml | 28 +-- .../k6_N10_I40_Fi6_L4_frac1_ff2_C40_45nm.xml | 28 +-- .../k6_N10_I40_Fi6_L4_frac1_ff2_C45_45nm.xml | 28 +-- .../k6_N10_I40_Fi6_L4_frac1_ff2_C50_45nm.xml | 28 +-- .../k6_N10_I40_Fi6_L4_frac1_ff2_C55_45nm.xml | 28 +-- .../k6_N10_I40_Fi6_L4_frac1_ff2_C5_45nm.xml | 28 +-- .../k6_N10_I40_Fi6_L4_frac1_ff2_C60_45nm.xml | 28 +-- .../k6_N10_I40_Fi6_L5_frac0_ff1_45nm.xml | 28 +-- .../k6_N10_I40_Fi6_L6_frac0_ff1_45nm.xml | 28 +-- .../k6_N10_I40_Fi7_L4_frac1_ff1_45nm.xml | 28 +-- .../k6_N10_I40_Fi7_L4_frac1_ff2_45nm.xml | 28 +-- .../k6_N10_I40_Fi8_L4_frac1_ff1_45nm.xml | 28 +-- .../k6_N10_I40_Fi8_L4_frac1_ff2_45nm.xml | 28 +-- .../k6_N10_I47_Fi7_L4_frac1_ff1_45nm.xml | 28 +-- .../k6_N10_I47_Fi7_L4_frac1_ff2_45nm.xml | 28 +-- .../k6_N10_I53_Fi8_L4_frac1_ff1_45nm.xml | 28 +-- .../k6_N10_I53_Fi8_L4_frac1_ff2_45nm.xml | 28 +-- vtr_flow/arch/routing_mode/arch.xml | 12 +- vtr_flow/arch/routing_mode/slicem.xml | 58 +------ vtr_flow/arch/timing/EArch.xml | 33 +--- .../fixed_k6_N8_gate_boost_0.2V_22nm.xml | 31 +--- ...8_lookahead_chain_gate_boost_0.2V_22nm.xml | 33 +--- ..._unbalanced_chain_gate_boost_0.2V_22nm.xml | 33 +--- ...6_N8_ripple_chain_gate_boost_0.2V_22nm.xml | 33 +--- ...nced_ripple_chain_gate_boost_0.2V_22nm.xml | 33 +--- .../fixed_k6_frac_2ripple_N8_22nm.xml | 33 +--- .../fixed_k6_frac_2uripple_N8_22nm.xml | 33 +--- .../fixed_size/fixed_k6_frac_N8_22nm.xml | 31 +--- .../fixed_k6_frac_ripple_N8_22nm.xml | 33 +--- .../fixed_k6_frac_uripple_N8_22nm.xml | 33 +--- ...8_lookahead_chain_gate_boost_0.2V_22nm.xml | 33 +--- ..._unbalanced_chain_gate_boost_0.2V_22nm.xml | 33 +--- ...6_N8_ripple_chain_gate_boost_0.2V_22nm.xml | 33 +--- ...nced_ripple_chain_gate_boost_0.2V_22nm.xml | 33 +--- .../k6_frac_2ripple_N8_22nm.xml | 33 +--- .../k6_frac_2uripple_N8_22nm.xml | 33 +--- .../fraclut_carrychain/k6_frac_N8_22nm.xml | 31 +--- .../k6_frac_ripple_N8_22nm.xml | 33 +--- .../k6_frac_uripple_N8_22nm.xml | 33 +--- .../global_nonuniform/x_delta_y_delta.xml | 28 +-- .../global_nonuniform/x_delta_y_uniform.xml | 28 +-- .../x_gaussian_y_gaussian.xml | 28 +-- .../x_gaussian_y_uniform.xml | 28 +-- .../global_nonuniform/x_uniform_y_delta.xml | 28 +-- .../x_uniform_y_gaussian.xml | 28 +-- vtr_flow/arch/timing/hard_fpu_arch_timing.xml | 24 +-- vtr_flow/arch/timing/k4_N4_90nm.xml | 12 +- .../timing/k4_N4_90nm_default_fc_pinloc.xml | 12 +- vtr_flow/arch/timing/k4_N8_legacy_45nm.xml | 12 +- vtr_flow/arch/timing/k6_N10_40nm.xml | 12 +- .../timing/k6_N10_gate_boost_0.2V_22nm.xml | 31 +--- vtr_flow/arch/timing/k6_N10_legacy_45nm.xml | 12 +- vtr_flow/arch/timing/k6_N10_mem32K_40nm.xml | 28 +-- .../arch/timing/k6_N10_mem32K_40nm_fc_abs.xml | 28 +-- ..._N10_ripple_chain_gate_boost_0.2V_22nm.xml | 33 +--- ...nced_ripple_chain_gate_boost_0.2V_22nm.xml | 33 +--- .../timing/k6_N8_gate_boost_0.2V_22nm.xml | 31 +--- ...8_lookahead_chain_gate_boost_0.2V_22nm.xml | 33 +--- ..._unbalanced_chain_gate_boost_0.2V_22nm.xml | 33 +--- ...6_N8_ripple_chain_gate_boost_0.2V_22nm.xml | 33 +--- ...nced_ripple_chain_gate_boost_0.2V_22nm.xml | 33 +--- vtr_flow/arch/timing/k6_frac_N10_40nm.xml | 12 +- ...c_N10_4add_2chains_depop50_mem20K_22nm.xml | 32 +--- ...dd_2chains_tie_off_depop50_mem20K_22nm.xml | 32 +--- ...rac_N10_frac_chain_depop50_mem32K_40nm.xml | 33 +--- .../k6_frac_N10_frac_chain_mem32K_40nm.xml | 30 +--- ...frac_N10_frac_chain_mem32K_htree0_40nm.xml | 30 +--- ...rac_chain_mem32K_htree0_routedCLK_40nm.xml | 30 +--- ...N10_frac_chain_mem32K_htree0short_40nm.xml | 30 +--- .../arch/timing/k6_frac_N10_mem32K_40nm.xml | 28 +-- vtr_flow/arch/timing/soft_fpu_arch_timing.xml | 12 +- .../timing/soft_fpu_arch_timing_chain.xml | 14 +- vtr_flow/arch/timing/xc6vlx240tff1156.xml | 163 +----------------- vtr_flow/arch/titan/stratixiv_arch.timing.xml | 51 +----- 202 files changed, 782 insertions(+), 5036 deletions(-) diff --git a/libs/libarchfpga/arch/mult_luts_arch.xml b/libs/libarchfpga/arch/mult_luts_arch.xml index 29a0887fa30..e1e4d7a3573 100644 --- a/libs/libarchfpga/arch/mult_luts_arch.xml +++ b/libs/libarchfpga/arch/mult_luts_arch.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/libs/libarchfpga/arch/sample_arch.xml b/libs/libarchfpga/arch/sample_arch.xml index 96958667c67..345b346dd28 100755 --- a/libs/libarchfpga/arch/sample_arch.xml +++ b/libs/libarchfpga/arch/sample_arch.xml @@ -135,11 +135,7 @@ - - - - - + @@ -154,11 +150,7 @@ - - - - - + @@ -168,11 +160,7 @@ - - - - - + @@ -182,15 +170,7 @@ - - - - - - - - - + diff --git a/utils/fasm/test/test_fasm_arch.xml b/utils/fasm/test/test_fasm_arch.xml index af5724f5133..14bbe144e43 100644 --- a/utils/fasm/test/test_fasm_arch.xml +++ b/utils/fasm/test/test_fasm_arch.xml @@ -3,11 +3,7 @@ - - - - - + @@ -22,11 +18,7 @@ - - - - - + diff --git a/vpr/test/test_read_arch_metadata.xml b/vpr/test/test_read_arch_metadata.xml index d2df1d08ec3..1068b139531 100644 --- a/vpr/test/test_read_arch_metadata.xml +++ b/vpr/test/test_read_arch_metadata.xml @@ -3,11 +3,7 @@ - - - - - + @@ -22,11 +18,7 @@ - - - - - + diff --git a/vtr_flow/arch/bidir/k4_n4_v7_bidir.xml b/vtr_flow/arch/bidir/k4_n4_v7_bidir.xml index 8a387aee81e..009fef90cf6 100644 --- a/vtr_flow/arch/bidir/k4_n4_v7_bidir.xml +++ b/vtr_flow/arch/bidir/k4_n4_v7_bidir.xml @@ -26,11 +26,7 @@ Architecture based off Stratix IV - - - - - + @@ -45,11 +41,7 @@ Architecture based off Stratix IV - - - - - + diff --git a/vtr_flow/arch/bidir/k4_n4_v7_bidir_pass_gate.xml b/vtr_flow/arch/bidir/k4_n4_v7_bidir_pass_gate.xml index c4359c4d314..e6d3fb6eca3 100644 --- a/vtr_flow/arch/bidir/k4_n4_v7_bidir_pass_gate.xml +++ b/vtr_flow/arch/bidir/k4_n4_v7_bidir_pass_gate.xml @@ -26,11 +26,7 @@ Architecture based off Stratix IV - - - - - + @@ -45,11 +41,7 @@ Architecture based off Stratix IV - - - - - + diff --git a/vtr_flow/arch/bidir/k4_n4_v7_l1_bidir.xml b/vtr_flow/arch/bidir/k4_n4_v7_l1_bidir.xml index f6e8d532818..933ab020ca3 100644 --- a/vtr_flow/arch/bidir/k4_n4_v7_l1_bidir.xml +++ b/vtr_flow/arch/bidir/k4_n4_v7_l1_bidir.xml @@ -26,11 +26,7 @@ Architecture based off Stratix IV - - - - - + @@ -45,11 +41,7 @@ Architecture based off Stratix IV - - - - - + diff --git a/vtr_flow/arch/bidir/k4_n4_v7_longline_bidir.xml b/vtr_flow/arch/bidir/k4_n4_v7_longline_bidir.xml index 6eb20c66c66..087558b8143 100644 --- a/vtr_flow/arch/bidir/k4_n4_v7_longline_bidir.xml +++ b/vtr_flow/arch/bidir/k4_n4_v7_longline_bidir.xml @@ -26,11 +26,7 @@ Architecture based off Stratix IV - - - - - + @@ -45,11 +41,7 @@ Architecture based off Stratix IV - - - - - + diff --git a/vtr_flow/arch/common/arch.xml b/vtr_flow/arch/common/arch.xml index d388bfd1f4d..5fdb82c166b 100644 --- a/vtr_flow/arch/common/arch.xml +++ b/vtr_flow/arch/common/arch.xml @@ -14,12 +14,7 @@ - - - - - - + @@ -35,10 +30,7 @@ - - - - + diff --git a/vtr_flow/arch/complex_switch/k4_N8_topology-0.85sL2-0.15gL4-on-cb-off-sb_22nm_22nm.xml b/vtr_flow/arch/complex_switch/k4_N8_topology-0.85sL2-0.15gL4-on-cb-off-sb_22nm_22nm.xml index a9e0e89cc3e..5b2a4a00959 100644 --- a/vtr_flow/arch/complex_switch/k4_N8_topology-0.85sL2-0.15gL4-on-cb-off-sb_22nm_22nm.xml +++ b/vtr_flow/arch/complex_switch/k4_N8_topology-0.85sL2-0.15gL4-on-cb-off-sb_22nm_22nm.xml @@ -69,11 +69,7 @@ - - - - - + @@ -92,18 +88,7 @@ - - - - - - - - - - - - + @@ -131,11 +116,7 @@ - - - - - + @@ -149,15 +130,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/complex_switch/k4_N8_topology-0.85sL2-0.15gL4-on-cb-off-sb_22nm_22nm_error.xml b/vtr_flow/arch/complex_switch/k4_N8_topology-0.85sL2-0.15gL4-on-cb-off-sb_22nm_22nm_error.xml index 272943c8ecb..5b34bb01bd9 100644 --- a/vtr_flow/arch/complex_switch/k4_N8_topology-0.85sL2-0.15gL4-on-cb-off-sb_22nm_22nm_error.xml +++ b/vtr_flow/arch/complex_switch/k4_N8_topology-0.85sL2-0.15gL4-on-cb-off-sb_22nm_22nm_error.xml @@ -69,11 +69,7 @@ - - - - - + @@ -92,18 +88,7 @@ - - - - - - - - - - - - + @@ -131,11 +116,7 @@ - - - - - + @@ -149,15 +130,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/custom_grid/buffered_flyover_wires.xml b/vtr_flow/arch/custom_grid/buffered_flyover_wires.xml index fc7e61a5cad..b1932706a79 100644 --- a/vtr_flow/arch/custom_grid/buffered_flyover_wires.xml +++ b/vtr_flow/arch/custom_grid/buffered_flyover_wires.xml @@ -160,11 +160,7 @@ - - - - - + @@ -179,13 +175,7 @@ - - - - - - - + @@ -200,11 +190,7 @@ - - - - - + @@ -227,15 +213,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/custom_grid/column_io.xml b/vtr_flow/arch/custom_grid/column_io.xml index 240d306f655..d7af343f10d 100644 --- a/vtr_flow/arch/custom_grid/column_io.xml +++ b/vtr_flow/arch/custom_grid/column_io.xml @@ -160,11 +160,7 @@ - - - - - + @@ -174,13 +170,7 @@ - - - - - - - + @@ -195,11 +185,7 @@ - - - - - + @@ -209,15 +195,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/custom_grid/custom_sbloc.xml b/vtr_flow/arch/custom_grid/custom_sbloc.xml index 9a1ed7ddc37..a17df3083ce 100644 --- a/vtr_flow/arch/custom_grid/custom_sbloc.xml +++ b/vtr_flow/arch/custom_grid/custom_sbloc.xml @@ -160,11 +160,7 @@ - - - - - + @@ -179,13 +175,7 @@ - - - - - - - + @@ -200,11 +190,7 @@ - - - - - + @@ -227,15 +213,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/custom_grid/fixed_grid.xml b/vtr_flow/arch/custom_grid/fixed_grid.xml index 37af3bbae72..a625dfb2178 100644 --- a/vtr_flow/arch/custom_grid/fixed_grid.xml +++ b/vtr_flow/arch/custom_grid/fixed_grid.xml @@ -160,11 +160,7 @@ - - - - - + @@ -179,13 +175,7 @@ - - - - - - - + @@ -200,11 +190,7 @@ - - - - - + @@ -214,15 +200,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/custom_grid/multiple_io_types.xml b/vtr_flow/arch/custom_grid/multiple_io_types.xml index 88440e8b591..44e1fd64338 100644 --- a/vtr_flow/arch/custom_grid/multiple_io_types.xml +++ b/vtr_flow/arch/custom_grid/multiple_io_types.xml @@ -160,11 +160,7 @@ - - - - - + @@ -176,11 +172,7 @@ - - - - - + @@ -192,11 +184,7 @@ - - - - - + @@ -208,11 +196,7 @@ - - - - - + @@ -224,13 +208,7 @@ - - - - - - - + @@ -245,11 +223,7 @@ - - - - - + @@ -259,15 +233,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/custom_grid/multiwidth_blocks.xml b/vtr_flow/arch/custom_grid/multiwidth_blocks.xml index 00b27e73594..648a7462865 100644 --- a/vtr_flow/arch/custom_grid/multiwidth_blocks.xml +++ b/vtr_flow/arch/custom_grid/multiwidth_blocks.xml @@ -160,11 +160,7 @@ - - - - - + @@ -179,13 +175,7 @@ - - - - - - - + @@ -200,11 +190,7 @@ - - - - - + @@ -214,15 +200,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/custom_grid/non_column.xml b/vtr_flow/arch/custom_grid/non_column.xml index 9517de5c6de..3d8ce76d129 100644 --- a/vtr_flow/arch/custom_grid/non_column.xml +++ b/vtr_flow/arch/custom_grid/non_column.xml @@ -169,11 +169,7 @@ - - - - - + @@ -188,13 +184,7 @@ - - - - - - - + @@ -209,11 +199,7 @@ - - - - - + @@ -223,15 +209,7 @@ - - - - - - - - - + @@ -245,11 +223,7 @@ - - - - - + diff --git a/vtr_flow/arch/custom_grid/non_column_tall_aspect_ratio.xml b/vtr_flow/arch/custom_grid/non_column_tall_aspect_ratio.xml index 2a234551415..35b9cc986f5 100644 --- a/vtr_flow/arch/custom_grid/non_column_tall_aspect_ratio.xml +++ b/vtr_flow/arch/custom_grid/non_column_tall_aspect_ratio.xml @@ -169,11 +169,7 @@ - - - - - + @@ -188,13 +184,7 @@ - - - - - - - + @@ -209,11 +199,7 @@ - - - - - + @@ -223,15 +209,7 @@ - - - - - - - - - + @@ -245,11 +223,7 @@ - - - - - + diff --git a/vtr_flow/arch/custom_grid/non_column_wide_aspect_ratio.xml b/vtr_flow/arch/custom_grid/non_column_wide_aspect_ratio.xml index c0660792c1a..53998ad7b0c 100644 --- a/vtr_flow/arch/custom_grid/non_column_wide_aspect_ratio.xml +++ b/vtr_flow/arch/custom_grid/non_column_wide_aspect_ratio.xml @@ -169,11 +169,7 @@ - - - - - + @@ -188,13 +184,7 @@ - - - - - - - + @@ -209,11 +199,7 @@ - - - - - + @@ -223,15 +209,7 @@ - - - - - - - - - + @@ -245,11 +223,7 @@ - - - - - + diff --git a/vtr_flow/arch/custom_grid/shorted_flyover_wires.xml b/vtr_flow/arch/custom_grid/shorted_flyover_wires.xml index 9b7ed82e617..babc5933049 100644 --- a/vtr_flow/arch/custom_grid/shorted_flyover_wires.xml +++ b/vtr_flow/arch/custom_grid/shorted_flyover_wires.xml @@ -160,11 +160,7 @@ - - - - - + @@ -179,13 +175,7 @@ - - - - - - - + @@ -200,11 +190,7 @@ - - - - - + @@ -227,15 +213,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/custom_pins/k6_frac_N10_mem32K_40nm_custom_pins.xml b/vtr_flow/arch/custom_pins/k6_frac_N10_mem32K_40nm_custom_pins.xml index c1ae10b27f4..2156d0c9240 100644 --- a/vtr_flow/arch/custom_pins/k6_frac_N10_mem32K_40nm_custom_pins.xml +++ b/vtr_flow/arch/custom_pins/k6_frac_N10_mem32K_40nm_custom_pins.xml @@ -138,11 +138,7 @@ - - - - - + @@ -157,11 +153,7 @@ - - - - - + @@ -171,11 +163,7 @@ - - - - - + @@ -222,15 +210,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/ispd/ultrascale_ispd.xml b/vtr_flow/arch/ispd/ultrascale_ispd.xml index 5d6a5a3c55b..e8d42d3608b 100644 --- a/vtr_flow/arch/ispd/ultrascale_ispd.xml +++ b/vtr_flow/arch/ispd/ultrascale_ispd.xml @@ -254,27 +254,7 @@ - - - - - - - - - - - - - - - - - - - - - + @@ -300,11 +280,7 @@ - - - - - + @@ -314,11 +290,7 @@ - - - - - + @@ -328,10 +300,7 @@ - - - - + diff --git a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I27_fleI6_fleO1_ff1_nmodes_1.xml b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I27_fleI6_fleO1_ff1_nmodes_1.xml index 0e7d490d497..59fe9770579 100755 --- a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I27_fleI6_fleO1_ff1_nmodes_1.xml +++ b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I27_fleI6_fleO1_ff1_nmodes_1.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I48_fleI5_fleO2_ff1_nmodes_2.xml b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I48_fleI5_fleO2_ff1_nmodes_2.xml index 7693f784e50..099efce007d 100755 --- a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I48_fleI5_fleO2_ff1_nmodes_2.xml +++ b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I48_fleI5_fleO2_ff1_nmodes_2.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I48_fleI5_fleO2_ff2_nmodes_2.xml b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I48_fleI5_fleO2_ff2_nmodes_2.xml index 0104da8443c..21abda7ff3a 100755 --- a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I48_fleI5_fleO2_ff2_nmodes_2.xml +++ b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I48_fleI5_fleO2_ff2_nmodes_2.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I48_fleI6_fleO1_ff1_nmodes_1.xml b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I48_fleI6_fleO1_ff1_nmodes_1.xml index 5189f643925..85ad6c4092d 100755 --- a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I48_fleI6_fleO1_ff1_nmodes_1.xml +++ b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I48_fleI6_fleO1_ff1_nmodes_1.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I48_fleI6_fleO2_ff1_nmodes_2.xml b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I48_fleI6_fleO2_ff1_nmodes_2.xml index 20131d0001e..03f756e7d85 100755 --- a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I48_fleI6_fleO2_ff1_nmodes_2.xml +++ b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I48_fleI6_fleO2_ff1_nmodes_2.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I48_fleI6_fleO2_ff2_nmodes_2.xml b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I48_fleI6_fleO2_ff2_nmodes_2.xml index 69c6b1caec1..24c083ddb6a 100755 --- a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I48_fleI6_fleO2_ff2_nmodes_2.xml +++ b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I48_fleI6_fleO2_ff2_nmodes_2.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I56_fleI7_fleO2_ff1_nmodes_2.xml b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I56_fleI7_fleO2_ff1_nmodes_2.xml index d2e4e35b553..4121489c726 100755 --- a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I56_fleI7_fleO2_ff1_nmodes_2.xml +++ b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I56_fleI7_fleO2_ff1_nmodes_2.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I56_fleI7_fleO2_ff2_nmodes_2.xml b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I56_fleI7_fleO2_ff2_nmodes_2.xml index 9649004effe..dc0f5aaa65a 100755 --- a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I56_fleI7_fleO2_ff2_nmodes_2.xml +++ b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I56_fleI7_fleO2_ff2_nmodes_2.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I64_fleI8_fleO2_ff1_nmodes_2.xml b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I64_fleI8_fleO2_ff1_nmodes_2.xml index 077cdb4b94e..8bdb792db6c 100755 --- a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I64_fleI8_fleO2_ff1_nmodes_2.xml +++ b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I64_fleI8_fleO2_ff1_nmodes_2.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I64_fleI8_fleO2_ff2_nmodes_2.xml b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I64_fleI8_fleO2_ff2_nmodes_2.xml index 34481ea1c8b..cd5b6834574 100755 --- a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I64_fleI8_fleO2_ff2_nmodes_2.xml +++ b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I64_fleI8_fleO2_ff2_nmodes_2.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I72_fleI9_fleO2_ff1_nmodes_2.xml b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I72_fleI9_fleO2_ff1_nmodes_2.xml index c81f86ec75c..14bfda4542a 100755 --- a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I72_fleI9_fleO2_ff1_nmodes_2.xml +++ b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I72_fleI9_fleO2_ff1_nmodes_2.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I72_fleI9_fleO2_ff2_nmodes_2.xml b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I72_fleI9_fleO2_ff2_nmodes_2.xml index 10ed6407b2d..4d93eb12952 100755 --- a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I72_fleI9_fleO2_ff2_nmodes_2.xml +++ b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I72_fleI9_fleO2_ff2_nmodes_2.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I80_fleI10_fleO2_ff1_nmodes_2.xml b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I80_fleI10_fleO2_ff1_nmodes_2.xml index b8b3c5b7c36..12a18ce614e 100755 --- a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I80_fleI10_fleO2_ff1_nmodes_2.xml +++ b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I80_fleI10_fleO2_ff1_nmodes_2.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I80_fleI10_fleO2_ff2_nmodes_2.xml b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I80_fleI10_fleO2_ff2_nmodes_2.xml index 91a8f86b132..98630d2609e 100755 --- a/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I80_fleI10_fleO2_ff2_nmodes_2.xml +++ b/vtr_flow/arch/no_timing/fracturable_lut_sweep/k6_N8_I80_fleI10_fleO2_ff2_nmodes_2.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize1024_memData16.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize1024_memData16.xml index 44476873a49..2c662901725 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize1024_memData16.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize1024_memData16.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize1024_memData2.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize1024_memData2.xml index b592f17f3c6..d0514b0901c 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize1024_memData2.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize1024_memData2.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize1024_memData32.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize1024_memData32.xml index 4a6512ac0c2..5c483aa930e 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize1024_memData32.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize1024_memData32.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize1024_memData4.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize1024_memData4.xml index f0d8d007753..63db148c142 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize1024_memData4.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize1024_memData4.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize1024_memData64.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize1024_memData64.xml index 5625599f8e8..a358e4eb4b0 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize1024_memData64.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize1024_memData64.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize1024_memData8.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize1024_memData8.xml index dcb88e61bf6..179c4f6ccfa 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize1024_memData8.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize1024_memData8.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize131072_memData16.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize131072_memData16.xml index be627142da2..404df585cea 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize131072_memData16.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize131072_memData16.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize131072_memData2.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize131072_memData2.xml index 5497f8595b1..b9114aba6cf 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize131072_memData2.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize131072_memData2.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize131072_memData32.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize131072_memData32.xml index ac93c0520ae..061eb6f8d6a 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize131072_memData32.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize131072_memData32.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize131072_memData4.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize131072_memData4.xml index 75ce9bfa952..ac827bcdb58 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize131072_memData4.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize131072_memData4.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize131072_memData64.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize131072_memData64.xml index 5684dc65deb..7c673eb4ee5 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize131072_memData64.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize131072_memData64.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize131072_memData8.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize131072_memData8.xml index 649a8fa20ea..ae83fb8714c 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize131072_memData8.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize131072_memData8.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize16384_memData16.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize16384_memData16.xml index 315e4cdfb90..b7316290906 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize16384_memData16.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize16384_memData16.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize16384_memData2.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize16384_memData2.xml index 4318cc167c7..242f7f12f9b 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize16384_memData2.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize16384_memData2.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize16384_memData32.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize16384_memData32.xml index 29e94e0194c..080d6c4f0eb 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize16384_memData32.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize16384_memData32.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize16384_memData4.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize16384_memData4.xml index 01e31d251e5..dffd90760f7 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize16384_memData4.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize16384_memData4.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize16384_memData64.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize16384_memData64.xml index 981b84f295a..b7b8ce4e0bf 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize16384_memData64.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize16384_memData64.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize16384_memData8.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize16384_memData8.xml index 86740110a43..7a7db575f34 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize16384_memData8.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize16384_memData8.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize2048_memData16.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize2048_memData16.xml index f1fe5eb3bc6..5410bd729d6 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize2048_memData16.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize2048_memData16.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize2048_memData2.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize2048_memData2.xml index 8cd8c1f0dc0..6e34b4212f8 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize2048_memData2.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize2048_memData2.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize2048_memData32.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize2048_memData32.xml index ab07482fef7..ef58d19de9c 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize2048_memData32.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize2048_memData32.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize2048_memData4.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize2048_memData4.xml index 568856b4b16..f887cd1f9de 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize2048_memData4.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize2048_memData4.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize2048_memData64.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize2048_memData64.xml index f8c1ace98a0..4c00d96294a 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize2048_memData64.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize2048_memData64.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize2048_memData8.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize2048_memData8.xml index 2df633f9462..602bd294750 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize2048_memData8.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize2048_memData8.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize262144_memData16.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize262144_memData16.xml index 01e55ac2b06..17dcdefb8a4 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize262144_memData16.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize262144_memData16.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize262144_memData2.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize262144_memData2.xml index baf2a068f46..71a21b7c184 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize262144_memData2.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize262144_memData2.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize262144_memData32.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize262144_memData32.xml index a6090842c4e..4dd19e9df07 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize262144_memData32.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize262144_memData32.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize262144_memData4.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize262144_memData4.xml index 9420b936ce2..7657b059224 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize262144_memData4.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize262144_memData4.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize262144_memData64.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize262144_memData64.xml index b7bc9796832..5b4faf666fa 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize262144_memData64.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize262144_memData64.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize262144_memData8.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize262144_memData8.xml index aec2a896633..e4d33b755e7 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize262144_memData8.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize262144_memData8.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize32768_memData16.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize32768_memData16.xml index 44ad66182f8..55541cd3135 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize32768_memData16.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize32768_memData16.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize32768_memData2.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize32768_memData2.xml index 825bba56a18..d4aab26bb9d 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize32768_memData2.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize32768_memData2.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize32768_memData32.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize32768_memData32.xml index f2a50c39723..0aac9b0cf83 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize32768_memData32.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize32768_memData32.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize32768_memData4.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize32768_memData4.xml index 715097a8d6a..589f5ad3d27 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize32768_memData4.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize32768_memData4.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize32768_memData64.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize32768_memData64.xml index a94ad0612a3..59fa1d78f09 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize32768_memData64.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize32768_memData64.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize32768_memData8.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize32768_memData8.xml index c801c7f6fd2..23d67220b5e 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize32768_memData8.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize32768_memData8.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize4096_memData16.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize4096_memData16.xml index e582bb4fe85..a2d2e4df793 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize4096_memData16.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize4096_memData16.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize4096_memData2.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize4096_memData2.xml index 93eea7cdfb1..53370394877 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize4096_memData2.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize4096_memData2.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize4096_memData32.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize4096_memData32.xml index 860e751ab1c..9729f315d3f 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize4096_memData32.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize4096_memData32.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize4096_memData4.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize4096_memData4.xml index 38624cb1352..28588d50a4a 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize4096_memData4.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize4096_memData4.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize4096_memData64.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize4096_memData64.xml index 13b1f26e4ec..b9eedba6199 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize4096_memData64.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize4096_memData64.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize4096_memData8.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize4096_memData8.xml index 04351132895..c48b8bec86e 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize4096_memData8.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize4096_memData8.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize512_memData16.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize512_memData16.xml index a2d2a0b975f..151e7698e85 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize512_memData16.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize512_memData16.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize512_memData2.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize512_memData2.xml index d114fb2fd7e..002b45c5bb0 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize512_memData2.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize512_memData2.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize512_memData32.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize512_memData32.xml index 6213c9d7d34..0d9ef1531a8 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize512_memData32.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize512_memData32.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize512_memData4.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize512_memData4.xml index c5ae11af773..19a06a24ee1 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize512_memData4.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize512_memData4.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize512_memData64.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize512_memData64.xml index e80dae9c4b3..3fcf05f37a6 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize512_memData64.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize512_memData64.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize512_memData8.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize512_memData8.xml index fd7b57d03a0..ac73e606ae9 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize512_memData8.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize512_memData8.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize524288_memData16.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize524288_memData16.xml index a6a57799dd6..06e4a4504de 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize524288_memData16.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize524288_memData16.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize524288_memData2.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize524288_memData2.xml index baa7cc0cc88..b23cee5a648 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize524288_memData2.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize524288_memData2.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize524288_memData32.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize524288_memData32.xml index b38dddd0a89..1b6325e812d 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize524288_memData32.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize524288_memData32.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize524288_memData4.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize524288_memData4.xml index fc07fe97fff..167252a292a 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize524288_memData4.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize524288_memData4.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize524288_memData64.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize524288_memData64.xml index 29e73e01c9d..7746aeb75e7 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize524288_memData64.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize524288_memData64.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize524288_memData8.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize524288_memData8.xml index a78968bbf00..d7ae45457e2 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize524288_memData8.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize524288_memData8.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize65536_memData16.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize65536_memData16.xml index fd1de7f5658..947512b1310 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize65536_memData16.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize65536_memData16.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize65536_memData2.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize65536_memData2.xml index 35dffcb173f..04e6f2648e0 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize65536_memData2.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize65536_memData2.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize65536_memData32.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize65536_memData32.xml index b6f08c861e8..b094da1ae80 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize65536_memData32.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize65536_memData32.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize65536_memData4.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize65536_memData4.xml index 4f0e87b250f..97355cb1f8f 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize65536_memData4.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize65536_memData4.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize65536_memData64.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize65536_memData64.xml index a8d004faa91..28eb6addc2f 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize65536_memData64.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize65536_memData64.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize65536_memData8.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize65536_memData8.xml index 16eefd1c2b3..4d704467857 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize65536_memData8.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize65536_memData8.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize8192_memData16.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize8192_memData16.xml index 19fa775c701..8d294a0c2f7 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize8192_memData16.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize8192_memData16.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize8192_memData2.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize8192_memData2.xml index 5c447a82083..409909d75d2 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize8192_memData2.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize8192_memData2.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize8192_memData32.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize8192_memData32.xml index 7fe72a762bd..ac3c4b679d8 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize8192_memData32.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize8192_memData32.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize8192_memData4.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize8192_memData4.xml index 3505c8baf73..6b591134bec 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize8192_memData4.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize8192_memData4.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize8192_memData64.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize8192_memData64.xml index e8d64502a8e..c080f301e6d 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize8192_memData64.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize8192_memData64.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize8192_memData8.xml b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize8192_memData8.xml index 55fc60138b3..7deeb2c24d6 100755 --- a/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize8192_memData8.xml +++ b/vtr_flow/arch/no_timing/memory_sweep/k4_N10_memSize8192_memData8.xml @@ -54,11 +54,7 @@ - - - - - + @@ -73,11 +69,7 @@ - - - - - + @@ -87,15 +79,7 @@ - - - - - - - - - + @@ -109,11 +93,7 @@ - - - - - + diff --git a/vtr_flow/arch/nonuniform_chan_width/k6_N10_mem32K_40nm_nonuniform.xml b/vtr_flow/arch/nonuniform_chan_width/k6_N10_mem32K_40nm_nonuniform.xml index ea38df7f1a2..bc26c6a42b7 100644 --- a/vtr_flow/arch/nonuniform_chan_width/k6_N10_mem32K_40nm_nonuniform.xml +++ b/vtr_flow/arch/nonuniform_chan_width/k6_N10_mem32K_40nm_nonuniform.xml @@ -81,11 +81,7 @@ - - - - - + @@ -100,11 +96,7 @@ - - - - - + @@ -114,11 +106,7 @@ - - - - - + @@ -128,15 +116,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/nonuniform_chan_width/k6_N10_mem32K_40nm_pulse.xml b/vtr_flow/arch/nonuniform_chan_width/k6_N10_mem32K_40nm_pulse.xml index 687d482236d..797d1d2f6e7 100644 --- a/vtr_flow/arch/nonuniform_chan_width/k6_N10_mem32K_40nm_pulse.xml +++ b/vtr_flow/arch/nonuniform_chan_width/k6_N10_mem32K_40nm_pulse.xml @@ -81,11 +81,7 @@ - - - - - + @@ -100,11 +96,7 @@ - - - - - + @@ -114,11 +106,7 @@ - - - - - + @@ -128,15 +116,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L1_frac0_ff1_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L1_frac0_ff1_45nm.xml index b718251f668..c49ae41112b 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L1_frac0_ff1_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L1_frac0_ff1_45nm.xml @@ -59,11 +59,7 @@ - - - - - + @@ -78,11 +74,7 @@ - - - - - + @@ -92,11 +84,7 @@ - - - - - + @@ -106,15 +94,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L2_frac0_ff1_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L2_frac0_ff1_45nm.xml index dfeefd9b9e6..382a585889f 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L2_frac0_ff1_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L2_frac0_ff1_45nm.xml @@ -59,11 +59,7 @@ - - - - - + @@ -78,11 +74,7 @@ - - - - - + @@ -92,11 +84,7 @@ - - - - - + @@ -106,15 +94,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L3_frac0_ff1_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L3_frac0_ff1_45nm.xml index 3d5749fb886..b3ca770d0d5 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L3_frac0_ff1_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L3_frac0_ff1_45nm.xml @@ -59,11 +59,7 @@ - - - - - + @@ -78,11 +74,7 @@ - - - - - + @@ -92,11 +84,7 @@ - - - - - + @@ -106,15 +94,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_130nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_130nm.xml index 2bc0bb89115..afe4cb6c6a8 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_130nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_130nm.xml @@ -59,11 +59,7 @@ - - - - - + @@ -78,11 +74,7 @@ - - - - - + @@ -92,11 +84,7 @@ - - - - - + @@ -106,15 +94,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_22nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_22nm.xml index 6d18f270a6b..fda0f59f5bc 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_22nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_22nm.xml @@ -59,11 +59,7 @@ - - - - - + @@ -78,11 +74,7 @@ - - - - - + @@ -92,11 +84,7 @@ - - - - - + @@ -106,15 +94,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_45nm.xml index 7de1dadad70..a8630c4c7c8 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_45nm.xml @@ -59,11 +59,7 @@ - - - - - + @@ -78,11 +74,7 @@ - - - - - + @@ -92,11 +84,7 @@ - - - - - + @@ -106,15 +94,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C10_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C10_45nm.xml index c3365a077c0..7d9ab53c13a 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C10_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C10_45nm.xml @@ -59,11 +59,7 @@ - - - - - + @@ -78,11 +74,7 @@ - - - - - + @@ -92,11 +84,7 @@ - - - - - + @@ -106,15 +94,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C15_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C15_45nm.xml index 29bb788c127..fe1ee636f1e 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C15_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C15_45nm.xml @@ -59,11 +59,7 @@ - - - - - + @@ -78,11 +74,7 @@ - - - - - + @@ -92,11 +84,7 @@ - - - - - + @@ -106,15 +94,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C20_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C20_45nm.xml index 9beb66b499d..e9600fedda3 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C20_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C20_45nm.xml @@ -59,11 +59,7 @@ - - - - - + @@ -78,11 +74,7 @@ - - - - - + @@ -92,11 +84,7 @@ - - - - - + @@ -106,15 +94,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C25_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C25_45nm.xml index d45c7e92b68..cbd5af7fbd9 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C25_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C25_45nm.xml @@ -59,11 +59,7 @@ - - - - - + @@ -78,11 +74,7 @@ - - - - - + @@ -92,11 +84,7 @@ - - - - - + @@ -106,15 +94,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C30_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C30_45nm.xml index ae9fa49a16e..11a90988ac2 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C30_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C30_45nm.xml @@ -59,11 +59,7 @@ - - - - - + @@ -78,11 +74,7 @@ - - - - - + @@ -92,11 +84,7 @@ - - - - - + @@ -106,15 +94,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C35_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C35_45nm.xml index 4374fdb86eb..92c4f741736 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C35_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C35_45nm.xml @@ -59,11 +59,7 @@ - - - - - + @@ -78,11 +74,7 @@ - - - - - + @@ -92,11 +84,7 @@ - - - - - + @@ -106,15 +94,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C40_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C40_45nm.xml index 6bf67bc963f..ae845b39dfe 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C40_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C40_45nm.xml @@ -59,11 +59,7 @@ - - - - - + @@ -78,11 +74,7 @@ - - - - - + @@ -92,11 +84,7 @@ - - - - - + @@ -106,15 +94,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C45_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C45_45nm.xml index bc2383df137..da2a01116ff 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C45_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C45_45nm.xml @@ -59,11 +59,7 @@ - - - - - + @@ -78,11 +74,7 @@ - - - - - + @@ -92,11 +84,7 @@ - - - - - + @@ -106,15 +94,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C50_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C50_45nm.xml index 209bf155396..0b66cfdea55 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C50_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C50_45nm.xml @@ -59,11 +59,7 @@ - - - - - + @@ -78,11 +74,7 @@ - - - - - + @@ -92,11 +84,7 @@ - - - - - + @@ -106,15 +94,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C5_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C5_45nm.xml index 988b203ceac..0e8e20faa7e 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C5_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac0_ff1_C5_45nm.xml @@ -59,11 +59,7 @@ - - - - - + @@ -78,11 +74,7 @@ - - - - - + @@ -92,11 +84,7 @@ - - - - - + @@ -106,15 +94,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff1_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff1_45nm.xml index 727b27ce2c6..614133454ff 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff1_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff1_45nm.xml @@ -59,11 +59,7 @@ - - - - - + @@ -78,11 +74,7 @@ - - - - - + @@ -92,11 +84,7 @@ - - - - - + @@ -106,15 +94,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_45nm.xml index 1b4c88da4e8..490c73f15e7 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_45nm.xml @@ -59,11 +59,7 @@ - - - - - + @@ -78,11 +74,7 @@ - - - - - + @@ -92,11 +84,7 @@ - - - - - + @@ -106,15 +94,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C10_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C10_45nm.xml index 5f08e4b7c1e..5dfe501e5c1 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C10_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C10_45nm.xml @@ -59,11 +59,7 @@ - - - - - + @@ -78,11 +74,7 @@ - - - - - + @@ -92,11 +84,7 @@ - - - - - + @@ -106,15 +94,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C15_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C15_45nm.xml index 1bba09b65a0..a81672146f4 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C15_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C15_45nm.xml @@ -59,11 +59,7 @@ - - - - - + @@ -78,11 +74,7 @@ - - - - - + @@ -92,11 +84,7 @@ - - - - - + @@ -106,15 +94,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C20_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C20_45nm.xml index e5b69cfe269..3e03809a74f 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C20_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C20_45nm.xml @@ -59,11 +59,7 @@ - - - - - + @@ -78,11 +74,7 @@ - - - - - + @@ -92,11 +84,7 @@ - - - - - + @@ -106,15 +94,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C25_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C25_45nm.xml index 4e1f262a479..5d7d7869beb 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C25_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C25_45nm.xml @@ -59,11 +59,7 @@ - - - - - + @@ -78,11 +74,7 @@ - - - - - + @@ -92,11 +84,7 @@ - - - - - + @@ -106,15 +94,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C30_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C30_45nm.xml index 47dae6ce90e..21381ff943f 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C30_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C30_45nm.xml @@ -59,11 +59,7 @@ - - - - - + @@ -78,11 +74,7 @@ - - - - - + @@ -92,11 +84,7 @@ - - - - - + @@ -106,15 +94,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C35_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C35_45nm.xml index ce07bd01eef..f4720dc52a0 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C35_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C35_45nm.xml @@ -59,11 +59,7 @@ - - - - - + @@ -78,11 +74,7 @@ - - - - - + @@ -92,11 +84,7 @@ - - - - - + @@ -106,15 +94,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C40_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C40_45nm.xml index f4adc47f9e0..8615138ab29 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C40_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C40_45nm.xml @@ -59,11 +59,7 @@ - - - - - + @@ -78,11 +74,7 @@ - - - - - + @@ -92,11 +84,7 @@ - - - - - + @@ -106,15 +94,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C45_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C45_45nm.xml index 187207d2e62..a3d164b9e90 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C45_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C45_45nm.xml @@ -59,11 +59,7 @@ - - - - - + @@ -78,11 +74,7 @@ - - - - - + @@ -92,11 +84,7 @@ - - - - - + @@ -106,15 +94,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C50_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C50_45nm.xml index 4b3b7a51c03..7723512c110 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C50_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C50_45nm.xml @@ -59,11 +59,7 @@ - - - - - + @@ -78,11 +74,7 @@ - - - - - + @@ -92,11 +84,7 @@ - - - - - + @@ -106,15 +94,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C55_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C55_45nm.xml index 31fd17f2396..0c93813eeba 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C55_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C55_45nm.xml @@ -59,11 +59,7 @@ - - - - - + @@ -78,11 +74,7 @@ - - - - - + @@ -92,11 +84,7 @@ - - - - - + @@ -106,15 +94,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C5_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C5_45nm.xml index 2aafc02e36d..9e872b92471 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C5_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C5_45nm.xml @@ -59,11 +59,7 @@ - - - - - + @@ -78,11 +74,7 @@ - - - - - + @@ -92,11 +84,7 @@ - - - - - + @@ -106,15 +94,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C60_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C60_45nm.xml index f527c15e67c..008e80d4829 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C60_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L4_frac1_ff2_C60_45nm.xml @@ -59,11 +59,7 @@ - - - - - + @@ -78,11 +74,7 @@ - - - - - + @@ -92,11 +84,7 @@ - - - - - + @@ -106,15 +94,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L5_frac0_ff1_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L5_frac0_ff1_45nm.xml index 25548bee024..3e8be2dc5f0 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L5_frac0_ff1_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L5_frac0_ff1_45nm.xml @@ -59,11 +59,7 @@ - - - - - + @@ -78,11 +74,7 @@ - - - - - + @@ -92,11 +84,7 @@ - - - - - + @@ -106,15 +94,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi6_L6_frac0_ff1_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi6_L6_frac0_ff1_45nm.xml index ccd31ec4ef2..4fa56a0f98e 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi6_L6_frac0_ff1_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi6_L6_frac0_ff1_45nm.xml @@ -59,11 +59,7 @@ - - - - - + @@ -78,11 +74,7 @@ - - - - - + @@ -92,11 +84,7 @@ - - - - - + @@ -106,15 +94,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi7_L4_frac1_ff1_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi7_L4_frac1_ff1_45nm.xml index 558f11a29b4..73d0cba12a9 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi7_L4_frac1_ff1_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi7_L4_frac1_ff1_45nm.xml @@ -59,11 +59,7 @@ - - - - - + @@ -78,11 +74,7 @@ - - - - - + @@ -92,11 +84,7 @@ - - - - - + @@ -106,15 +94,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi7_L4_frac1_ff2_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi7_L4_frac1_ff2_45nm.xml index f959da6cf5e..e74e92cb560 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi7_L4_frac1_ff2_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi7_L4_frac1_ff2_45nm.xml @@ -59,11 +59,7 @@ - - - - - + @@ -78,11 +74,7 @@ - - - - - + @@ -92,11 +84,7 @@ - - - - - + @@ -106,15 +94,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi8_L4_frac1_ff1_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi8_L4_frac1_ff1_45nm.xml index 63c6481765a..a374dd063b4 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi8_L4_frac1_ff1_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi8_L4_frac1_ff1_45nm.xml @@ -59,11 +59,7 @@ - - - - - + @@ -78,11 +74,7 @@ - - - - - + @@ -92,11 +84,7 @@ - - - - - + @@ -106,15 +94,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/power/k6_N10_I40_Fi8_L4_frac1_ff2_45nm.xml b/vtr_flow/arch/power/k6_N10_I40_Fi8_L4_frac1_ff2_45nm.xml index 46df17691fa..de05dbdf150 100644 --- a/vtr_flow/arch/power/k6_N10_I40_Fi8_L4_frac1_ff2_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I40_Fi8_L4_frac1_ff2_45nm.xml @@ -59,11 +59,7 @@ - - - - - + @@ -78,11 +74,7 @@ - - - - - + @@ -92,11 +84,7 @@ - - - - - + @@ -106,15 +94,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/power/k6_N10_I47_Fi7_L4_frac1_ff1_45nm.xml b/vtr_flow/arch/power/k6_N10_I47_Fi7_L4_frac1_ff1_45nm.xml index a777b1bfca2..bf800f56827 100644 --- a/vtr_flow/arch/power/k6_N10_I47_Fi7_L4_frac1_ff1_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I47_Fi7_L4_frac1_ff1_45nm.xml @@ -59,11 +59,7 @@ - - - - - + @@ -78,11 +74,7 @@ - - - - - + @@ -92,11 +84,7 @@ - - - - - + @@ -106,15 +94,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/power/k6_N10_I47_Fi7_L4_frac1_ff2_45nm.xml b/vtr_flow/arch/power/k6_N10_I47_Fi7_L4_frac1_ff2_45nm.xml index 69ed65b4fad..c02fbf7661f 100644 --- a/vtr_flow/arch/power/k6_N10_I47_Fi7_L4_frac1_ff2_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I47_Fi7_L4_frac1_ff2_45nm.xml @@ -59,11 +59,7 @@ - - - - - + @@ -78,11 +74,7 @@ - - - - - + @@ -92,11 +84,7 @@ - - - - - + @@ -106,15 +94,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/power/k6_N10_I53_Fi8_L4_frac1_ff1_45nm.xml b/vtr_flow/arch/power/k6_N10_I53_Fi8_L4_frac1_ff1_45nm.xml index d6046cdb54f..7352f987d95 100644 --- a/vtr_flow/arch/power/k6_N10_I53_Fi8_L4_frac1_ff1_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I53_Fi8_L4_frac1_ff1_45nm.xml @@ -59,11 +59,7 @@ - - - - - + @@ -78,11 +74,7 @@ - - - - - + @@ -92,11 +84,7 @@ - - - - - + @@ -106,15 +94,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/power/k6_N10_I53_Fi8_L4_frac1_ff2_45nm.xml b/vtr_flow/arch/power/k6_N10_I53_Fi8_L4_frac1_ff2_45nm.xml index 8084556e5a2..7e989afeb06 100644 --- a/vtr_flow/arch/power/k6_N10_I53_Fi8_L4_frac1_ff2_45nm.xml +++ b/vtr_flow/arch/power/k6_N10_I53_Fi8_L4_frac1_ff2_45nm.xml @@ -59,11 +59,7 @@ - - - - - + @@ -78,11 +74,7 @@ - - - - - + @@ -92,11 +84,7 @@ - - - - - + @@ -106,15 +94,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/routing_mode/arch.xml b/vtr_flow/arch/routing_mode/arch.xml index 0531516d3cf..e8e2ce9a7ea 100644 --- a/vtr_flow/arch/routing_mode/arch.xml +++ b/vtr_flow/arch/routing_mode/arch.xml @@ -43,12 +43,7 @@ - - - - - - + @@ -64,10 +59,7 @@ - - - - + diff --git a/vtr_flow/arch/routing_mode/slicem.xml b/vtr_flow/arch/routing_mode/slicem.xml index b666c24a1f7..4215c3ace38 100644 --- a/vtr_flow/arch/routing_mode/slicem.xml +++ b/vtr_flow/arch/routing_mode/slicem.xml @@ -63,10 +63,7 @@ - - - - + @@ -80,58 +77,7 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + diff --git a/vtr_flow/arch/timing/EArch.xml b/vtr_flow/arch/timing/EArch.xml index 3664908ff0f..6f83218cd67 100644 --- a/vtr_flow/arch/timing/EArch.xml +++ b/vtr_flow/arch/timing/EArch.xml @@ -151,11 +151,7 @@ - - - - - + @@ -170,16 +166,7 @@ - - - - - - - - - - + @@ -197,11 +184,7 @@ - - - - - + @@ -211,15 +194,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/timing/fixed_size/fixed_k6_N8_gate_boost_0.2V_22nm.xml b/vtr_flow/arch/timing/fixed_size/fixed_k6_N8_gate_boost_0.2V_22nm.xml index 9effe996a87..98d311e41ca 100755 --- a/vtr_flow/arch/timing/fixed_size/fixed_k6_N8_gate_boost_0.2V_22nm.xml +++ b/vtr_flow/arch/timing/fixed_size/fixed_k6_N8_gate_boost_0.2V_22nm.xml @@ -67,11 +67,7 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - - - - - + @@ -86,14 +82,7 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - - - - - - - - + @@ -109,11 +98,7 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - - - - - + @@ -123,15 +108,7 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - - - - - - - - - + diff --git a/vtr_flow/arch/timing/fixed_size/fixed_k6_N8_lookahead_chain_gate_boost_0.2V_22nm.xml b/vtr_flow/arch/timing/fixed_size/fixed_k6_N8_lookahead_chain_gate_boost_0.2V_22nm.xml index 46fcb62e772..e2fad47db2a 100755 --- a/vtr_flow/arch/timing/fixed_size/fixed_k6_N8_lookahead_chain_gate_boost_0.2V_22nm.xml +++ b/vtr_flow/arch/timing/fixed_size/fixed_k6_N8_lookahead_chain_gate_boost_0.2V_22nm.xml @@ -85,11 +85,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - + @@ -104,16 +100,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - - - - - - + @@ -135,11 +122,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - + @@ -149,15 +132,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - - - - - + diff --git a/vtr_flow/arch/timing/fixed_size/fixed_k6_N8_lookahead_unbalanced_chain_gate_boost_0.2V_22nm.xml b/vtr_flow/arch/timing/fixed_size/fixed_k6_N8_lookahead_unbalanced_chain_gate_boost_0.2V_22nm.xml index 4e5b94b1370..fc9bbce42d7 100755 --- a/vtr_flow/arch/timing/fixed_size/fixed_k6_N8_lookahead_unbalanced_chain_gate_boost_0.2V_22nm.xml +++ b/vtr_flow/arch/timing/fixed_size/fixed_k6_N8_lookahead_unbalanced_chain_gate_boost_0.2V_22nm.xml @@ -88,11 +88,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - + @@ -107,16 +103,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - - - - - - + @@ -138,11 +125,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - + @@ -152,15 +135,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - - - - - + diff --git a/vtr_flow/arch/timing/fixed_size/fixed_k6_N8_ripple_chain_gate_boost_0.2V_22nm.xml b/vtr_flow/arch/timing/fixed_size/fixed_k6_N8_ripple_chain_gate_boost_0.2V_22nm.xml index 4af6242e8a1..22307383828 100755 --- a/vtr_flow/arch/timing/fixed_size/fixed_k6_N8_ripple_chain_gate_boost_0.2V_22nm.xml +++ b/vtr_flow/arch/timing/fixed_size/fixed_k6_N8_ripple_chain_gate_boost_0.2V_22nm.xml @@ -85,11 +85,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - + @@ -104,16 +100,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - - - - - - + @@ -135,11 +122,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - + @@ -149,15 +132,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - - - - - + diff --git a/vtr_flow/arch/timing/fixed_size/fixed_k6_N8_unbalanced_ripple_chain_gate_boost_0.2V_22nm.xml b/vtr_flow/arch/timing/fixed_size/fixed_k6_N8_unbalanced_ripple_chain_gate_boost_0.2V_22nm.xml index 96c63f0b9b0..22b6ac44f6d 100755 --- a/vtr_flow/arch/timing/fixed_size/fixed_k6_N8_unbalanced_ripple_chain_gate_boost_0.2V_22nm.xml +++ b/vtr_flow/arch/timing/fixed_size/fixed_k6_N8_unbalanced_ripple_chain_gate_boost_0.2V_22nm.xml @@ -87,11 +87,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - + @@ -106,16 +102,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - - - - - - + @@ -137,11 +124,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - + @@ -151,15 +134,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - - - - - + diff --git a/vtr_flow/arch/timing/fixed_size/fixed_k6_frac_2ripple_N8_22nm.xml b/vtr_flow/arch/timing/fixed_size/fixed_k6_frac_2ripple_N8_22nm.xml index 3b5901d7d2f..cf6f3ca18e6 100644 --- a/vtr_flow/arch/timing/fixed_size/fixed_k6_frac_2ripple_N8_22nm.xml +++ b/vtr_flow/arch/timing/fixed_size/fixed_k6_frac_2ripple_N8_22nm.xml @@ -83,11 +83,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - + @@ -102,16 +98,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - - - - - - + @@ -132,11 +119,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - + @@ -146,15 +129,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - - - - - + diff --git a/vtr_flow/arch/timing/fixed_size/fixed_k6_frac_2uripple_N8_22nm.xml b/vtr_flow/arch/timing/fixed_size/fixed_k6_frac_2uripple_N8_22nm.xml index e20915cc323..b2676a9fb76 100644 --- a/vtr_flow/arch/timing/fixed_size/fixed_k6_frac_2uripple_N8_22nm.xml +++ b/vtr_flow/arch/timing/fixed_size/fixed_k6_frac_2uripple_N8_22nm.xml @@ -78,11 +78,7 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - - - - - + @@ -97,16 +93,7 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - - - - - - - - - - + @@ -127,11 +114,7 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - - - - - + @@ -141,15 +124,7 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - - - - - - - - - + diff --git a/vtr_flow/arch/timing/fixed_size/fixed_k6_frac_N8_22nm.xml b/vtr_flow/arch/timing/fixed_size/fixed_k6_frac_N8_22nm.xml index 2abbf03f48f..b68d6759635 100644 --- a/vtr_flow/arch/timing/fixed_size/fixed_k6_frac_N8_22nm.xml +++ b/vtr_flow/arch/timing/fixed_size/fixed_k6_frac_N8_22nm.xml @@ -67,11 +67,7 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - - - - - + @@ -86,14 +82,7 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - - - - - - - - + @@ -109,11 +98,7 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - - - - - + @@ -123,15 +108,7 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - - - - - - - - - + diff --git a/vtr_flow/arch/timing/fixed_size/fixed_k6_frac_ripple_N8_22nm.xml b/vtr_flow/arch/timing/fixed_size/fixed_k6_frac_ripple_N8_22nm.xml index 8e251a66c0c..bef5c735918 100644 --- a/vtr_flow/arch/timing/fixed_size/fixed_k6_frac_ripple_N8_22nm.xml +++ b/vtr_flow/arch/timing/fixed_size/fixed_k6_frac_ripple_N8_22nm.xml @@ -78,11 +78,7 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - - - - - + @@ -97,16 +93,7 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - - - - - - - - - - + @@ -127,11 +114,7 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - - - - - + @@ -141,15 +124,7 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - - - - - - - - - + diff --git a/vtr_flow/arch/timing/fixed_size/fixed_k6_frac_uripple_N8_22nm.xml b/vtr_flow/arch/timing/fixed_size/fixed_k6_frac_uripple_N8_22nm.xml index 9402c319a53..f1eb631ad4b 100644 --- a/vtr_flow/arch/timing/fixed_size/fixed_k6_frac_uripple_N8_22nm.xml +++ b/vtr_flow/arch/timing/fixed_size/fixed_k6_frac_uripple_N8_22nm.xml @@ -78,11 +78,7 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - - - - - + @@ -97,16 +93,7 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - - - - - - - - - - + @@ -127,11 +114,7 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - - - - - + @@ -141,15 +124,7 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - - - - - - - - - + diff --git a/vtr_flow/arch/timing/fixed_size/fixed_nointerclb_k6_N8_lookahead_chain_gate_boost_0.2V_22nm.xml b/vtr_flow/arch/timing/fixed_size/fixed_nointerclb_k6_N8_lookahead_chain_gate_boost_0.2V_22nm.xml index 87d9737fe20..e06480a1666 100755 --- a/vtr_flow/arch/timing/fixed_size/fixed_nointerclb_k6_N8_lookahead_chain_gate_boost_0.2V_22nm.xml +++ b/vtr_flow/arch/timing/fixed_size/fixed_nointerclb_k6_N8_lookahead_chain_gate_boost_0.2V_22nm.xml @@ -85,11 +85,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - + @@ -104,16 +100,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - - - - - - + @@ -133,11 +120,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - + @@ -147,15 +130,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - - - - - + diff --git a/vtr_flow/arch/timing/fixed_size/fixed_nointerclb_k6_N8_lookahead_unbalanced_chain_gate_boost_0.2V_22nm.xml b/vtr_flow/arch/timing/fixed_size/fixed_nointerclb_k6_N8_lookahead_unbalanced_chain_gate_boost_0.2V_22nm.xml index 9f19410402a..a044031777d 100755 --- a/vtr_flow/arch/timing/fixed_size/fixed_nointerclb_k6_N8_lookahead_unbalanced_chain_gate_boost_0.2V_22nm.xml +++ b/vtr_flow/arch/timing/fixed_size/fixed_nointerclb_k6_N8_lookahead_unbalanced_chain_gate_boost_0.2V_22nm.xml @@ -88,11 +88,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - + @@ -107,16 +103,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - - - - - - + @@ -136,11 +123,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - + @@ -150,15 +133,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - - - - - + diff --git a/vtr_flow/arch/timing/fixed_size/fixed_nointerclb_k6_N8_ripple_chain_gate_boost_0.2V_22nm.xml b/vtr_flow/arch/timing/fixed_size/fixed_nointerclb_k6_N8_ripple_chain_gate_boost_0.2V_22nm.xml index c07b8215744..2f2559707ea 100755 --- a/vtr_flow/arch/timing/fixed_size/fixed_nointerclb_k6_N8_ripple_chain_gate_boost_0.2V_22nm.xml +++ b/vtr_flow/arch/timing/fixed_size/fixed_nointerclb_k6_N8_ripple_chain_gate_boost_0.2V_22nm.xml @@ -85,11 +85,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - + @@ -104,16 +100,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - - - - - - + @@ -133,11 +120,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - + @@ -147,15 +130,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - - - - - + diff --git a/vtr_flow/arch/timing/fixed_size/fixed_nointerclb_k6_N8_unbalanced_ripple_chain_gate_boost_0.2V_22nm.xml b/vtr_flow/arch/timing/fixed_size/fixed_nointerclb_k6_N8_unbalanced_ripple_chain_gate_boost_0.2V_22nm.xml index ce2e6202a2a..ff01a7cb038 100755 --- a/vtr_flow/arch/timing/fixed_size/fixed_nointerclb_k6_N8_unbalanced_ripple_chain_gate_boost_0.2V_22nm.xml +++ b/vtr_flow/arch/timing/fixed_size/fixed_nointerclb_k6_N8_unbalanced_ripple_chain_gate_boost_0.2V_22nm.xml @@ -87,11 +87,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - + @@ -106,16 +102,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - - - - - - + @@ -135,11 +122,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - + @@ -149,15 +132,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - - - - - + diff --git a/vtr_flow/arch/timing/fraclut_carrychain/k6_frac_2ripple_N8_22nm.xml b/vtr_flow/arch/timing/fraclut_carrychain/k6_frac_2ripple_N8_22nm.xml index 9acffac40d3..43e9e951b55 100644 --- a/vtr_flow/arch/timing/fraclut_carrychain/k6_frac_2ripple_N8_22nm.xml +++ b/vtr_flow/arch/timing/fraclut_carrychain/k6_frac_2ripple_N8_22nm.xml @@ -83,11 +83,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - + @@ -102,16 +98,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - - - - - - + @@ -132,11 +119,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - + @@ -146,15 +129,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - - - - - + diff --git a/vtr_flow/arch/timing/fraclut_carrychain/k6_frac_2uripple_N8_22nm.xml b/vtr_flow/arch/timing/fraclut_carrychain/k6_frac_2uripple_N8_22nm.xml index 036ed2d3b91..cd276646043 100644 --- a/vtr_flow/arch/timing/fraclut_carrychain/k6_frac_2uripple_N8_22nm.xml +++ b/vtr_flow/arch/timing/fraclut_carrychain/k6_frac_2uripple_N8_22nm.xml @@ -78,11 +78,7 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - - - - - + @@ -97,16 +93,7 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - - - - - - - - - - + @@ -127,11 +114,7 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - - - - - + @@ -141,15 +124,7 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - - - - - - - - - + diff --git a/vtr_flow/arch/timing/fraclut_carrychain/k6_frac_N8_22nm.xml b/vtr_flow/arch/timing/fraclut_carrychain/k6_frac_N8_22nm.xml index fb389dd6583..22419ea1597 100644 --- a/vtr_flow/arch/timing/fraclut_carrychain/k6_frac_N8_22nm.xml +++ b/vtr_flow/arch/timing/fraclut_carrychain/k6_frac_N8_22nm.xml @@ -67,11 +67,7 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - - - - - + @@ -86,14 +82,7 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - - - - - - - - + @@ -109,11 +98,7 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - - - - - + @@ -123,15 +108,7 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - - - - - - - - - + diff --git a/vtr_flow/arch/timing/fraclut_carrychain/k6_frac_ripple_N8_22nm.xml b/vtr_flow/arch/timing/fraclut_carrychain/k6_frac_ripple_N8_22nm.xml index 05092ea78ff..25aa0c15803 100644 --- a/vtr_flow/arch/timing/fraclut_carrychain/k6_frac_ripple_N8_22nm.xml +++ b/vtr_flow/arch/timing/fraclut_carrychain/k6_frac_ripple_N8_22nm.xml @@ -78,11 +78,7 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - - - - - + @@ -97,16 +93,7 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - - - - - - - - - - + @@ -127,11 +114,7 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - - - - - + @@ -141,15 +124,7 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - - - - - - - - - + diff --git a/vtr_flow/arch/timing/fraclut_carrychain/k6_frac_uripple_N8_22nm.xml b/vtr_flow/arch/timing/fraclut_carrychain/k6_frac_uripple_N8_22nm.xml index 975e7265d5a..4c60ffb98ac 100644 --- a/vtr_flow/arch/timing/fraclut_carrychain/k6_frac_uripple_N8_22nm.xml +++ b/vtr_flow/arch/timing/fraclut_carrychain/k6_frac_uripple_N8_22nm.xml @@ -78,11 +78,7 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - - - - - + @@ -97,16 +93,7 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - - - - - - - - - - + @@ -127,11 +114,7 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - - - - - + @@ -141,15 +124,7 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - - - - - - - - - + diff --git a/vtr_flow/arch/timing/global_nonuniform/x_delta_y_delta.xml b/vtr_flow/arch/timing/global_nonuniform/x_delta_y_delta.xml index 3a7edcd7c6d..036597930b3 100644 --- a/vtr_flow/arch/timing/global_nonuniform/x_delta_y_delta.xml +++ b/vtr_flow/arch/timing/global_nonuniform/x_delta_y_delta.xml @@ -135,11 +135,7 @@ - - - - - + @@ -154,11 +150,7 @@ - - - - - + @@ -168,11 +160,7 @@ - - - - - + @@ -182,15 +170,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/timing/global_nonuniform/x_delta_y_uniform.xml b/vtr_flow/arch/timing/global_nonuniform/x_delta_y_uniform.xml index 7436261debd..9a4dd6904f8 100644 --- a/vtr_flow/arch/timing/global_nonuniform/x_delta_y_uniform.xml +++ b/vtr_flow/arch/timing/global_nonuniform/x_delta_y_uniform.xml @@ -135,11 +135,7 @@ - - - - - + @@ -154,11 +150,7 @@ - - - - - + @@ -168,11 +160,7 @@ - - - - - + @@ -182,15 +170,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/timing/global_nonuniform/x_gaussian_y_gaussian.xml b/vtr_flow/arch/timing/global_nonuniform/x_gaussian_y_gaussian.xml index 91e1d3232b3..c5c31e0b0b3 100644 --- a/vtr_flow/arch/timing/global_nonuniform/x_gaussian_y_gaussian.xml +++ b/vtr_flow/arch/timing/global_nonuniform/x_gaussian_y_gaussian.xml @@ -135,11 +135,7 @@ - - - - - + @@ -154,11 +150,7 @@ - - - - - + @@ -168,11 +160,7 @@ - - - - - + @@ -182,15 +170,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/timing/global_nonuniform/x_gaussian_y_uniform.xml b/vtr_flow/arch/timing/global_nonuniform/x_gaussian_y_uniform.xml index 84021d5b3fd..2815c136dd3 100644 --- a/vtr_flow/arch/timing/global_nonuniform/x_gaussian_y_uniform.xml +++ b/vtr_flow/arch/timing/global_nonuniform/x_gaussian_y_uniform.xml @@ -135,11 +135,7 @@ - - - - - + @@ -154,11 +150,7 @@ - - - - - + @@ -168,11 +160,7 @@ - - - - - + @@ -182,15 +170,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/timing/global_nonuniform/x_uniform_y_delta.xml b/vtr_flow/arch/timing/global_nonuniform/x_uniform_y_delta.xml index da194c29dc9..396eb59cedf 100644 --- a/vtr_flow/arch/timing/global_nonuniform/x_uniform_y_delta.xml +++ b/vtr_flow/arch/timing/global_nonuniform/x_uniform_y_delta.xml @@ -135,11 +135,7 @@ - - - - - + @@ -154,11 +150,7 @@ - - - - - + @@ -168,11 +160,7 @@ - - - - - + @@ -182,15 +170,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/timing/global_nonuniform/x_uniform_y_gaussian.xml b/vtr_flow/arch/timing/global_nonuniform/x_uniform_y_gaussian.xml index 4c592e34eca..b30b0fcabd0 100644 --- a/vtr_flow/arch/timing/global_nonuniform/x_uniform_y_gaussian.xml +++ b/vtr_flow/arch/timing/global_nonuniform/x_uniform_y_gaussian.xml @@ -135,11 +135,7 @@ - - - - - + @@ -154,11 +150,7 @@ - - - - - + @@ -168,11 +160,7 @@ - - - - - + @@ -182,15 +170,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/timing/hard_fpu_arch_timing.xml b/vtr_flow/arch/timing/hard_fpu_arch_timing.xml index 2ceeebbea8a..e34a38b0851 100755 --- a/vtr_flow/arch/timing/hard_fpu_arch_timing.xml +++ b/vtr_flow/arch/timing/hard_fpu_arch_timing.xml @@ -27,11 +27,7 @@ - - - - - + @@ -46,11 +42,7 @@ - - - - - + @@ -60,17 +52,7 @@ - - - - - - - - - - - + diff --git a/vtr_flow/arch/timing/k4_N4_90nm.xml b/vtr_flow/arch/timing/k4_N4_90nm.xml index 4dca4766b55..54378686b64 100644 --- a/vtr_flow/arch/timing/k4_N4_90nm.xml +++ b/vtr_flow/arch/timing/k4_N4_90nm.xml @@ -18,11 +18,7 @@ - - - - - + @@ -37,11 +33,7 @@ - - - - - + diff --git a/vtr_flow/arch/timing/k4_N4_90nm_default_fc_pinloc.xml b/vtr_flow/arch/timing/k4_N4_90nm_default_fc_pinloc.xml index c7c3c153208..05d42742562 100644 --- a/vtr_flow/arch/timing/k4_N4_90nm_default_fc_pinloc.xml +++ b/vtr_flow/arch/timing/k4_N4_90nm_default_fc_pinloc.xml @@ -18,11 +18,7 @@ - - - - - + @@ -37,11 +33,7 @@ - - - - - + diff --git a/vtr_flow/arch/timing/k4_N8_legacy_45nm.xml b/vtr_flow/arch/timing/k4_N8_legacy_45nm.xml index ffac26c78f1..8ebe5c09e4c 100644 --- a/vtr_flow/arch/timing/k4_N8_legacy_45nm.xml +++ b/vtr_flow/arch/timing/k4_N8_legacy_45nm.xml @@ -18,11 +18,7 @@ - - - - - + @@ -37,11 +33,7 @@ - - - - - + diff --git a/vtr_flow/arch/timing/k6_N10_40nm.xml b/vtr_flow/arch/timing/k6_N10_40nm.xml index e880678accd..3f52c56efa1 100644 --- a/vtr_flow/arch/timing/k6_N10_40nm.xml +++ b/vtr_flow/arch/timing/k6_N10_40nm.xml @@ -28,11 +28,7 @@ - - - - - + @@ -47,11 +43,7 @@ - - - - - + diff --git a/vtr_flow/arch/timing/k6_N10_gate_boost_0.2V_22nm.xml b/vtr_flow/arch/timing/k6_N10_gate_boost_0.2V_22nm.xml index 2d974b642c9..c7574d8e9a0 100644 --- a/vtr_flow/arch/timing/k6_N10_gate_boost_0.2V_22nm.xml +++ b/vtr_flow/arch/timing/k6_N10_gate_boost_0.2V_22nm.xml @@ -67,11 +67,7 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - - - - - + @@ -86,14 +82,7 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - - - - - - - - + @@ -109,11 +98,7 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - - - - - + @@ -123,15 +108,7 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - - - - - - - - - + diff --git a/vtr_flow/arch/timing/k6_N10_legacy_45nm.xml b/vtr_flow/arch/timing/k6_N10_legacy_45nm.xml index 11d203f5036..f242c90968e 100644 --- a/vtr_flow/arch/timing/k6_N10_legacy_45nm.xml +++ b/vtr_flow/arch/timing/k6_N10_legacy_45nm.xml @@ -18,11 +18,7 @@ - - - - - + @@ -37,11 +33,7 @@ - - - - - + diff --git a/vtr_flow/arch/timing/k6_N10_mem32K_40nm.xml b/vtr_flow/arch/timing/k6_N10_mem32K_40nm.xml index c1bc2f468ba..c6922800705 100644 --- a/vtr_flow/arch/timing/k6_N10_mem32K_40nm.xml +++ b/vtr_flow/arch/timing/k6_N10_mem32K_40nm.xml @@ -81,11 +81,7 @@ - - - - - + @@ -100,11 +96,7 @@ - - - - - + @@ -114,11 +106,7 @@ - - - - - + @@ -128,15 +116,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/timing/k6_N10_mem32K_40nm_fc_abs.xml b/vtr_flow/arch/timing/k6_N10_mem32K_40nm_fc_abs.xml index ac34af75dc2..1d17aa21021 100644 --- a/vtr_flow/arch/timing/k6_N10_mem32K_40nm_fc_abs.xml +++ b/vtr_flow/arch/timing/k6_N10_mem32K_40nm_fc_abs.xml @@ -81,11 +81,7 @@ - - - - - + @@ -100,11 +96,7 @@ - - - - - + @@ -114,11 +106,7 @@ - - - - - + @@ -128,15 +116,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/timing/k6_N10_ripple_chain_gate_boost_0.2V_22nm.xml b/vtr_flow/arch/timing/k6_N10_ripple_chain_gate_boost_0.2V_22nm.xml index dd91f2255e1..9098d3e59e0 100644 --- a/vtr_flow/arch/timing/k6_N10_ripple_chain_gate_boost_0.2V_22nm.xml +++ b/vtr_flow/arch/timing/k6_N10_ripple_chain_gate_boost_0.2V_22nm.xml @@ -82,11 +82,7 @@ to isolate one chain from the next - - - - - + @@ -101,16 +97,7 @@ to isolate one chain from the next - - - - - - - - - - + @@ -132,11 +119,7 @@ to isolate one chain from the next - - - - - + @@ -146,15 +129,7 @@ to isolate one chain from the next - - - - - - - - - + diff --git a/vtr_flow/arch/timing/k6_N10_unbalanced_ripple_chain_gate_boost_0.2V_22nm.xml b/vtr_flow/arch/timing/k6_N10_unbalanced_ripple_chain_gate_boost_0.2V_22nm.xml index f67a56044c3..a0750e38154 100644 --- a/vtr_flow/arch/timing/k6_N10_unbalanced_ripple_chain_gate_boost_0.2V_22nm.xml +++ b/vtr_flow/arch/timing/k6_N10_unbalanced_ripple_chain_gate_boost_0.2V_22nm.xml @@ -80,11 +80,7 @@ carry chain from Safeen's CMOS ripple carry adder not gate boosted - - - - - + @@ -99,16 +95,7 @@ carry chain from Safeen's CMOS ripple carry adder not gate boosted - - - - - - - - - - + @@ -130,11 +117,7 @@ carry chain from Safeen's CMOS ripple carry adder not gate boosted - - - - - + @@ -144,15 +127,7 @@ carry chain from Safeen's CMOS ripple carry adder not gate boosted - - - - - - - - - + diff --git a/vtr_flow/arch/timing/k6_N8_gate_boost_0.2V_22nm.xml b/vtr_flow/arch/timing/k6_N8_gate_boost_0.2V_22nm.xml index 4888855c7bc..5c0a12ea2d0 100644 --- a/vtr_flow/arch/timing/k6_N8_gate_boost_0.2V_22nm.xml +++ b/vtr_flow/arch/timing/k6_N8_gate_boost_0.2V_22nm.xml @@ -67,11 +67,7 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - - - - - + @@ -86,14 +82,7 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - - - - - - - - + @@ -109,11 +98,7 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - - - - - + @@ -123,15 +108,7 @@ Scaling assumptions from 40nm to 22nm: delay constant area drop (22/40)^2 but si - - - - - - - - - + diff --git a/vtr_flow/arch/timing/k6_N8_lookahead_chain_gate_boost_0.2V_22nm.xml b/vtr_flow/arch/timing/k6_N8_lookahead_chain_gate_boost_0.2V_22nm.xml index 7c14b360365..4ac176270d8 100644 --- a/vtr_flow/arch/timing/k6_N8_lookahead_chain_gate_boost_0.2V_22nm.xml +++ b/vtr_flow/arch/timing/k6_N8_lookahead_chain_gate_boost_0.2V_22nm.xml @@ -85,11 +85,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - + @@ -104,16 +100,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - - - - - - + @@ -135,11 +122,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - + @@ -149,15 +132,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - - - - - + diff --git a/vtr_flow/arch/timing/k6_N8_lookahead_unbalanced_chain_gate_boost_0.2V_22nm.xml b/vtr_flow/arch/timing/k6_N8_lookahead_unbalanced_chain_gate_boost_0.2V_22nm.xml index fdc7f0db5db..aed96671546 100644 --- a/vtr_flow/arch/timing/k6_N8_lookahead_unbalanced_chain_gate_boost_0.2V_22nm.xml +++ b/vtr_flow/arch/timing/k6_N8_lookahead_unbalanced_chain_gate_boost_0.2V_22nm.xml @@ -88,11 +88,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - + @@ -107,16 +103,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - - - - - - + @@ -138,11 +125,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - + @@ -152,15 +135,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - - - - - + diff --git a/vtr_flow/arch/timing/k6_N8_ripple_chain_gate_boost_0.2V_22nm.xml b/vtr_flow/arch/timing/k6_N8_ripple_chain_gate_boost_0.2V_22nm.xml index a8c2bd43e25..c22b29c4e3a 100644 --- a/vtr_flow/arch/timing/k6_N8_ripple_chain_gate_boost_0.2V_22nm.xml +++ b/vtr_flow/arch/timing/k6_N8_ripple_chain_gate_boost_0.2V_22nm.xml @@ -85,11 +85,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - + @@ -104,16 +100,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - - - - - - + @@ -135,11 +122,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - + @@ -149,15 +132,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - - - - - + diff --git a/vtr_flow/arch/timing/k6_N8_unbalanced_ripple_chain_gate_boost_0.2V_22nm.xml b/vtr_flow/arch/timing/k6_N8_unbalanced_ripple_chain_gate_boost_0.2V_22nm.xml index 38afc7d572d..268dcf4b638 100644 --- a/vtr_flow/arch/timing/k6_N8_unbalanced_ripple_chain_gate_boost_0.2V_22nm.xml +++ b/vtr_flow/arch/timing/k6_N8_unbalanced_ripple_chain_gate_boost_0.2V_22nm.xml @@ -87,11 +87,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - + @@ -106,16 +102,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - - - - - - + @@ -137,11 +124,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - + @@ -151,15 +134,7 @@ Each 2-to-1 mux adds 6 MWTAs for SRAM cell. - - - - - - - - - + diff --git a/vtr_flow/arch/timing/k6_frac_N10_40nm.xml b/vtr_flow/arch/timing/k6_frac_N10_40nm.xml index b41af897794..4da00d67a14 100644 --- a/vtr_flow/arch/timing/k6_frac_N10_40nm.xml +++ b/vtr_flow/arch/timing/k6_frac_N10_40nm.xml @@ -28,11 +28,7 @@ - - - - - + @@ -47,11 +43,7 @@ - - - - - + diff --git a/vtr_flow/arch/timing/k6_frac_N10_4add_2chains_depop50_mem20K_22nm.xml b/vtr_flow/arch/timing/k6_frac_N10_4add_2chains_depop50_mem20K_22nm.xml index f449b5d926e..0ef615a3aa4 100644 --- a/vtr_flow/arch/timing/k6_frac_N10_4add_2chains_depop50_mem20K_22nm.xml +++ b/vtr_flow/arch/timing/k6_frac_N10_4add_2chains_depop50_mem20K_22nm.xml @@ -98,11 +98,7 @@ - - - - - + @@ -117,16 +113,7 @@ - - - - - - - - - - + @@ -144,10 +131,7 @@ - - - - + @@ -156,15 +140,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/timing/k6_frac_N10_4add_2chains_tie_off_depop50_mem20K_22nm.xml b/vtr_flow/arch/timing/k6_frac_N10_4add_2chains_tie_off_depop50_mem20K_22nm.xml index 0659b45da63..bd2264a60ac 100644 --- a/vtr_flow/arch/timing/k6_frac_N10_4add_2chains_tie_off_depop50_mem20K_22nm.xml +++ b/vtr_flow/arch/timing/k6_frac_N10_4add_2chains_tie_off_depop50_mem20K_22nm.xml @@ -98,11 +98,7 @@ - - - - - + @@ -117,16 +113,7 @@ - - - - - - - - - - + @@ -144,10 +131,7 @@ - - - - + @@ -156,15 +140,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/timing/k6_frac_N10_frac_chain_depop50_mem32K_40nm.xml b/vtr_flow/arch/timing/k6_frac_N10_frac_chain_depop50_mem32K_40nm.xml index 29fb7f958d0..4594c649609 100644 --- a/vtr_flow/arch/timing/k6_frac_N10_frac_chain_depop50_mem32K_40nm.xml +++ b/vtr_flow/arch/timing/k6_frac_N10_frac_chain_depop50_mem32K_40nm.xml @@ -151,11 +151,7 @@ - - - - - + @@ -170,16 +166,7 @@ - - - - - - - - - - + @@ -197,11 +184,7 @@ - - - - - + @@ -211,15 +194,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/timing/k6_frac_N10_frac_chain_mem32K_40nm.xml b/vtr_flow/arch/timing/k6_frac_N10_frac_chain_mem32K_40nm.xml index 39f0e98a58a..cead76b744f 100644 --- a/vtr_flow/arch/timing/k6_frac_N10_frac_chain_mem32K_40nm.xml +++ b/vtr_flow/arch/timing/k6_frac_N10_frac_chain_mem32K_40nm.xml @@ -160,11 +160,7 @@ - - - - - + @@ -179,13 +175,7 @@ - - - - - - - + @@ -200,11 +190,7 @@ - - - - - + @@ -214,15 +200,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/timing/k6_frac_N10_frac_chain_mem32K_htree0_40nm.xml b/vtr_flow/arch/timing/k6_frac_N10_frac_chain_mem32K_htree0_40nm.xml index fc944c68d63..f7adb5ed34d 100644 --- a/vtr_flow/arch/timing/k6_frac_N10_frac_chain_mem32K_htree0_40nm.xml +++ b/vtr_flow/arch/timing/k6_frac_N10_frac_chain_mem32K_htree0_40nm.xml @@ -160,11 +160,7 @@ - - - - - + @@ -182,13 +178,7 @@ - - - - - - - + @@ -205,11 +195,7 @@ - - - - - + @@ -219,15 +205,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/timing/k6_frac_N10_frac_chain_mem32K_htree0_routedCLK_40nm.xml b/vtr_flow/arch/timing/k6_frac_N10_frac_chain_mem32K_htree0_routedCLK_40nm.xml index 5d427db0e7e..b4cc4086b65 100644 --- a/vtr_flow/arch/timing/k6_frac_N10_frac_chain_mem32K_htree0_routedCLK_40nm.xml +++ b/vtr_flow/arch/timing/k6_frac_N10_frac_chain_mem32K_htree0_routedCLK_40nm.xml @@ -160,11 +160,7 @@ - - - - - + @@ -179,13 +175,7 @@ - - - - - - - + @@ -200,11 +190,7 @@ - - - - - + @@ -214,15 +200,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/timing/k6_frac_N10_frac_chain_mem32K_htree0short_40nm.xml b/vtr_flow/arch/timing/k6_frac_N10_frac_chain_mem32K_htree0short_40nm.xml index 4fb3d4bd08a..fa729857e30 100644 --- a/vtr_flow/arch/timing/k6_frac_N10_frac_chain_mem32K_htree0short_40nm.xml +++ b/vtr_flow/arch/timing/k6_frac_N10_frac_chain_mem32K_htree0short_40nm.xml @@ -160,11 +160,7 @@ - - - - - + @@ -182,13 +178,7 @@ - - - - - - - + @@ -205,11 +195,7 @@ - - - - - + @@ -219,15 +205,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/timing/k6_frac_N10_mem32K_40nm.xml b/vtr_flow/arch/timing/k6_frac_N10_mem32K_40nm.xml index 7704b83e183..aaab5da733f 100644 --- a/vtr_flow/arch/timing/k6_frac_N10_mem32K_40nm.xml +++ b/vtr_flow/arch/timing/k6_frac_N10_mem32K_40nm.xml @@ -135,11 +135,7 @@ - - - - - + @@ -154,11 +150,7 @@ - - - - - + @@ -168,11 +160,7 @@ - - - - - + @@ -182,15 +170,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/timing/soft_fpu_arch_timing.xml b/vtr_flow/arch/timing/soft_fpu_arch_timing.xml index 889249cbc2e..42e5b5e8db5 100755 --- a/vtr_flow/arch/timing/soft_fpu_arch_timing.xml +++ b/vtr_flow/arch/timing/soft_fpu_arch_timing.xml @@ -9,11 +9,7 @@ - - - - - + @@ -28,11 +24,7 @@ - - - - - + diff --git a/vtr_flow/arch/timing/soft_fpu_arch_timing_chain.xml b/vtr_flow/arch/timing/soft_fpu_arch_timing_chain.xml index 6780a5779ef..8854fc2242a 100644 --- a/vtr_flow/arch/timing/soft_fpu_arch_timing_chain.xml +++ b/vtr_flow/arch/timing/soft_fpu_arch_timing_chain.xml @@ -20,11 +20,7 @@ - - - - - + @@ -39,13 +35,7 @@ - - - - - - - + diff --git a/vtr_flow/arch/timing/xc6vlx240tff1156.xml b/vtr_flow/arch/timing/xc6vlx240tff1156.xml index 33c7d5ce252..d47950af431 100644 --- a/vtr_flow/arch/timing/xc6vlx240tff1156.xml +++ b/vtr_flow/arch/timing/xc6vlx240tff1156.xml @@ -79,12 +79,7 @@ - - - - - - + @@ -95,37 +90,7 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + @@ -165,22 +130,7 @@ - - - - - - - - - - - - - - - - + @@ -201,102 +151,7 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + @@ -397,15 +252,7 @@ - - - - - - - - - + diff --git a/vtr_flow/arch/titan/stratixiv_arch.timing.xml b/vtr_flow/arch/titan/stratixiv_arch.timing.xml index af1fc734a17..dc322dcf818 100644 --- a/vtr_flow/arch/titan/stratixiv_arch.timing.xml +++ b/vtr_flow/arch/titan/stratixiv_arch.timing.xml @@ -4412,11 +4412,7 @@ - - - - - + @@ -4439,12 +4435,7 @@ - - - - - - + @@ -4460,16 +4451,7 @@ - - - - - - - - - - + @@ -4523,18 +4505,7 @@ - - - - - - - - - - - - + @@ -4625,12 +4596,7 @@ - - - - - - + @@ -4668,12 +4634,7 @@ - - - - - - + From 5c5709bfabf5b9cacffbc3dfe44f52f53fe283c7 Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Tue, 12 Nov 2019 14:56:51 +0100 Subject: [PATCH 43/58] upgrade_arch: add pin_mapping attribute to equivalent_sites Signed-off-by: Alessandro Comodi --- vtr_flow/scripts/upgrade_arch.py | 30 +++--------------------------- 1 file changed, 3 insertions(+), 27 deletions(-) diff --git a/vtr_flow/scripts/upgrade_arch.py b/vtr_flow/scripts/upgrade_arch.py index ef764798358..88956eded55 100755 --- a/vtr_flow/scripts/upgrade_arch.py +++ b/vtr_flow/scripts/upgrade_arch.py @@ -1002,36 +1002,12 @@ def add_site_directs(arch): - - - - - - ... - + """ - TAGS_TO_COPY = ['input', 'output', 'clock'] - - def add_directs(equivalent_site, pb_type): - for child in pb_type: - if child.tag in TAGS_TO_COPY: - tile_name = equivalent_site.attrib['pb_type'] - port = child.attrib['name'] - - from_to = "%s.%s" % (tile_name, port) - - direct = ET.Element("direct") - direct.set("from", from_to) - direct.set("to", from_to) - equivalent_site.append(direct) - - if arch.findall('./tiles/tile/equivalent_sites/site/direct'): - return False - top_pb_types = [] for pb_type in arch.iter('pb_type'): if pb_type.getparent().tag == 'complexblocklist': @@ -1043,8 +1019,8 @@ def add_directs(equivalent_site, pb_type): for pb_type in top_pb_types: for site in sites: - if pb_type.attrib['name'] == site.attrib['pb_type']: - add_directs(site, pb_type) + if 'pin_mapping' not in site.attrib: + site.attrib['pin_mapping'] = "direct" return True From 3d94cd57759d536b1b9d14c7da5b30528a0734c9 Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Tue, 12 Nov 2019 15:04:44 +0100 Subject: [PATCH 44/58] equivalent: equivalent regression test with custom pin_mapping Signed-off-by: Alessandro Comodi --- vtr_flow/arch/equivalent_sites/equivalent.xml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/vtr_flow/arch/equivalent_sites/equivalent.xml b/vtr_flow/arch/equivalent_sites/equivalent.xml index 7252b4f81e5..5e8fb3b55d8 100644 --- a/vtr_flow/arch/equivalent_sites/equivalent.xml +++ b/vtr_flow/arch/equivalent_sites/equivalent.xml @@ -39,7 +39,7 @@ - + @@ -56,19 +56,19 @@ - + - + - + - + From f521b3a3e95009ed838e8ca3ad1cb9f135dee8be Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Wed, 20 Nov 2019 10:54:25 +0100 Subject: [PATCH 45/58] equivalent: remove placement priority attribute from architectures Signed-off-by: Alessandro Comodi --- libs/libarchfpga/src/physical_types.h | 8 -------- libs/libarchfpga/src/read_xml_arch_file.cpp | 6 +----- 2 files changed, 1 insertion(+), 13 deletions(-) diff --git a/libs/libarchfpga/src/physical_types.h b/libs/libarchfpga/src/physical_types.h index e34b740031c..023120965b0 100644 --- a/libs/libarchfpga/src/physical_types.h +++ b/libs/libarchfpga/src/physical_types.h @@ -613,10 +613,6 @@ struct t_physical_tile_type { std::vector equivalent_sites_names; std::vector equivalent_sites; - /* Map holding the priority for which this logical block needs to be placed. - * logical_blocks_priority[priority] -> vector holding the logical_block indices */ - std::map> logical_blocks_priority; - /* Unordered map indexed by the logical block index. * tile_block_pin_directs_map[logical block index][logical block pin] -> physical tile pin */ std::unordered_map> tile_block_pin_directs_map; @@ -707,10 +703,6 @@ struct t_logical_block_type { int index = -1; /* index of type descriptor in array (allows for index referencing) */ std::vector equivalent_tiles; - - /* Map holding the priority for which this logical block needs to be placed. - * physical_tiles_priority[priority] -> vector holding the physical tile indices */ - std::map> physical_tiles_priority; }; /************************************************************************************************* diff --git a/libs/libarchfpga/src/read_xml_arch_file.cpp b/libs/libarchfpga/src/read_xml_arch_file.cpp index b93cd4396f6..55aa02b7fbb 100644 --- a/libs/libarchfpga/src/read_xml_arch_file.cpp +++ b/libs/libarchfpga/src/read_xml_arch_file.cpp @@ -3225,17 +3225,13 @@ static void ProcessTileEquivalentSites(pugi::xml_node Parent, while (CurSite) { check_node(CurSite, "site", loc_data); - expect_only_attributes(CurSite, {"pb_type", "priority", "pin_mapping"}, loc_data); + expect_only_attributes(CurSite, {"pb_type", "pin_mapping"}, loc_data); /* Load equivalent site name */ auto Prop = std::string(get_attribute(CurSite, "pb_type", loc_data).value()); PhysicalTileType->equivalent_sites_names.push_back(Prop); auto LogicalBlockType = get_type_by_name(Prop.c_str(), LogicalBlockTypes); - auto priority = get_attribute(CurSite, "priority", loc_data, ReqOpt::OPTIONAL).as_int(0); - LogicalBlockType->physical_tiles_priority[priority].push_back(PhysicalTileType->index); - PhysicalTileType->logical_blocks_priority[priority].push_back(LogicalBlockType->index); - auto pin_mapping = get_attribute(CurSite, "pin_mapping", loc_data, ReqOpt::OPTIONAL).as_string("direct"); if (0 == strcmp(pin_mapping, "custom")) { From a004e014c0f88c84d313ce1f1e6afe3f90c50102 Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Wed, 20 Nov 2019 12:09:42 +0100 Subject: [PATCH 46/58] equivalent: sorting blocks and pl_macros in initial_placement Signed-off-by: Alessandro Comodi --- vpr/src/place/initial_placement.cpp | 211 ++++++++++++++-------------- 1 file changed, 109 insertions(+), 102 deletions(-) diff --git a/vpr/src/place/initial_placement.cpp b/vpr/src/place/initial_placement.cpp index 80952af3a74..f3bcf114939 100644 --- a/vpr/src/place/initial_placement.cpp +++ b/vpr/src/place/initial_placement.cpp @@ -172,76 +172,78 @@ static void initial_placement_pl_macros(int macros_max_num_tries, int* free_loca auto& pl_macros = place_ctx.pl_macros; - // The map serves to place first the most constrained block ids - std::map> sorted_pl_macros_map; + // Sorting blocks to place to have most constricted ones to be placed first + std::vector sorted_pl_macros(pl_macros.begin(), pl_macros.end()); - for (size_t imacro = 0; imacro < pl_macros.size(); imacro++) { - blk_id = pl_macros[imacro].members[0].blk_index; - auto logical_block = cluster_ctx.clb_nlist.block_type(blk_id); + auto criteria = [&cluster_ctx](const t_pl_macro lhs, t_pl_macro rhs) { + auto lhs_logical_block = cluster_ctx.clb_nlist.block_type(lhs.members[0].blk_index); + auto rhs_logical_block = cluster_ctx.clb_nlist.block_type(rhs.members[0].blk_index); - size_t num_equivalent_tiles = logical_block->equivalent_tiles.size(); - sorted_pl_macros_map[num_equivalent_tiles].push_back(pl_macros[imacro]); - } + auto lhs_num_tiles = lhs_logical_block->equivalent_tiles.size(); + auto rhs_num_tiles = rhs_logical_block->equivalent_tiles.size(); + + return lhs_num_tiles < rhs_num_tiles; + }; + + std::sort(sorted_pl_macros.begin(), sorted_pl_macros.end(), criteria); /* Macros are harder to place. Do them first */ - for (auto& sorted_pl_macros : sorted_pl_macros_map) { - for (auto& pl_macro : sorted_pl_macros.second) { - // Every macro are not placed in the beginnning - macro_placed = false; + for (auto pl_macro : sorted_pl_macros) { + // Every macro are not placed in the beginnning + macro_placed = false; - // Assume that all the blocks in the macro are of the same type - blk_id = pl_macro.members[0].blk_index; - auto logical_block = cluster_ctx.clb_nlist.block_type(blk_id); - auto type = pick_placement_type(logical_block, int(pl_macro.members.size()), free_locations); + // Assume that all the blocks in the macro are of the same type + blk_id = pl_macro.members[0].blk_index; + auto logical_block = cluster_ctx.clb_nlist.block_type(blk_id); + auto type = pick_placement_type(logical_block, int(pl_macro.members.size()), free_locations); + + if (type == nullptr) { + VPR_FATAL_ERROR(VPR_ERROR_PLACE, + "Initial placement failed.\n" + "Could not place macro length %zu with head block %s (#%zu); not enough free locations of type %s (#%d).\n" + "VPR cannot auto-size for your circuit, please resize the FPGA manually.\n", + pl_macro.members.size(), cluster_ctx.clb_nlist.block_name(blk_id).c_str(), size_t(blk_id), logical_block->name, logical_block->index); + } - if (type == nullptr) { - VPR_FATAL_ERROR(VPR_ERROR_PLACE, - "Initial placement failed.\n" - "Could not place macro length %zu with head block %s (#%zu); not enough free locations of type %s (#%d).\n" - "VPR cannot auto-size for your circuit, please resize the FPGA manually.\n", - pl_macro.members.size(), cluster_ctx.clb_nlist.block_name(blk_id).c_str(), size_t(blk_id), logical_block->name, logical_block->index); - } + itype = type->index; - itype = type->index; + // Try to place the macro first, if can be placed - place them, otherwise try again + for (itry = 0; itry < macros_max_num_tries && macro_placed == false; itry++) { + // Choose a random position for the head + ipos = vtr::irand(free_locations[itype] - 1); + + // Try to place the macro + macro_placed = try_place_macro(itype, ipos, pl_macro); + + } // Finished all tries - // Try to place the macro first, if can be placed - place them, otherwise try again - for (itry = 0; itry < macros_max_num_tries && macro_placed == false; itry++) { - // Choose a random position for the head - ipos = vtr::irand(free_locations[itype] - 1); + if (macro_placed == false) { + // if a macro still could not be placed after macros_max_num_tries times, + // go through the chip exhaustively to find a legal placement for the macro + // place the macro on the first location that is legal + // then set macro_placed = true; + // if there are no legal positions, error out + // Exhaustive placement of carry macros + for (ipos = 0; ipos < free_locations[itype] && macro_placed == false; ipos++) { // Try to place the macro macro_placed = try_place_macro(itype, ipos, pl_macro); - } // Finished all tries + } // Exhausted all the legal placement position for this macro + // If macro could not be placed after exhaustive placement, error out if (macro_placed == false) { - // if a macro still could not be placed after macros_max_num_tries times, - // go through the chip exhaustively to find a legal placement for the macro - // place the macro on the first location that is legal - // then set macro_placed = true; - // if there are no legal positions, error out - - // Exhaustive placement of carry macros - for (ipos = 0; ipos < free_locations[itype] && macro_placed == false; ipos++) { - // Try to place the macro - macro_placed = try_place_macro(itype, ipos, pl_macro); - - } // Exhausted all the legal placement position for this macro - - // If macro could not be placed after exhaustive placement, error out - if (macro_placed == false) { - // Error out - VPR_FATAL_ERROR(VPR_ERROR_PLACE, - "Initial placement failed.\n" - "Could not place macro length %zu with head block %s (#%zu); not enough free locations of type %s (#%d).\n" - "Please manually size the FPGA because VPR can't do this yet.\n", - pl_macro.members.size(), cluster_ctx.clb_nlist.block_name(blk_id).c_str(), size_t(blk_id), device_ctx.physical_tile_types[itype].name, itype); - } - - } else { - // This macro has been placed successfully, proceed to place the next macro - continue; + // Error out + VPR_FATAL_ERROR(VPR_ERROR_PLACE, + "Initial placement failed.\n" + "Could not place macro length %zu with head block %s (#%zu); not enough free locations of type %s (#%d).\n" + "Please manually size the FPGA because VPR can't do this yet.\n", + pl_macro.members.size(), cluster_ctx.clb_nlist.block_name(blk_id).c_str(), size_t(blk_id), device_ctx.physical_tile_types[itype].name, itype); } + + } else { + // This macro has been placed successfully, proceed to place the next macro + continue; } } // Finish placing all the pl_macros successfully } @@ -253,68 +255,73 @@ static void initial_placement_blocks(int* free_locations, enum e_pad_loc_type pa auto& cluster_ctx = g_vpr_ctx.clustering(); auto& place_ctx = g_vpr_ctx.mutable_placement(); - // The map serves to place first the most constrained block ids - std::map> sorted_block_map; + auto blocks = cluster_ctx.clb_nlist.blocks(); - for (auto blk_id : cluster_ctx.clb_nlist.blocks()) { - auto logical_block = cluster_ctx.clb_nlist.block_type(blk_id); + // Sorting blocks to place to have most constricted ones to be placed first + std::vector sorted_blocks(blocks.begin(), blocks.end()); - size_t num_equivalent_tiles = logical_block->equivalent_tiles.size(); - sorted_block_map[num_equivalent_tiles].push_back(blk_id); - } + auto criteria = [&cluster_ctx](const ClusterBlockId lhs, ClusterBlockId rhs) { + auto lhs_logical_block = cluster_ctx.clb_nlist.block_type(lhs); + auto rhs_logical_block = cluster_ctx.clb_nlist.block_type(rhs); - for (auto& sorted_blocks : sorted_block_map) { - for (auto blk_id : sorted_blocks.second) { - if (place_ctx.block_locs[blk_id].loc.x != -1) { // -1 is a sentinel for an empty block - // block placed. - continue; - } + auto lhs_num_tiles = lhs_logical_block->equivalent_tiles.size(); + auto rhs_num_tiles = rhs_logical_block->equivalent_tiles.size(); - auto logical_block = cluster_ctx.clb_nlist.block_type(blk_id); + return lhs_num_tiles < rhs_num_tiles; + }; - /* Don't do IOs if the user specifies IOs; we'll read those locations later. */ - if (!(is_io_type(pick_random_physical_type(logical_block)) && pad_loc_type == USER)) { - /* Randomly select a free location of the appropriate type for blk_id. - * We have a linearized list of all the free locations that can - * accommodate a block of that type in free_locations[itype]. - * Choose one randomly and put blk_id there. Then we don't want to pick - * that location again, so remove it from the free_locations array. - */ + std::sort(sorted_blocks.begin(), sorted_blocks.end(), criteria); - auto type = pick_placement_type(logical_block, 1, free_locations); + for (auto blk_id : sorted_blocks) { + if (place_ctx.block_locs[blk_id].loc.x != -1) { // -1 is a sentinel for an empty block + // block placed. + continue; + } - if (type == nullptr) { - VPR_FATAL_ERROR(VPR_ERROR_PLACE, - "Initial placement failed.\n" - "Could not place block %s (#%zu); no free locations of type %s (#%d).\n", - cluster_ctx.clb_nlist.block_name(blk_id).c_str(), size_t(blk_id), logical_block->name, logical_block->index); - } + auto logical_block = cluster_ctx.clb_nlist.block_type(blk_id); - itype = type->index; + /* Don't do IOs if the user specifies IOs; we'll read those locations later. */ + if (!(is_io_type(pick_random_physical_type(logical_block)) && pad_loc_type == USER)) { + /* Randomly select a free location of the appropriate type for blk_id. + * We have a linearized list of all the free locations that can + * accommodate a block of that type in free_locations[itype]. + * Choose one randomly and put blk_id there. Then we don't want to pick + * that location again, so remove it from the free_locations array. + */ - t_pl_loc to; - initial_placement_location(free_locations, ipos, itype, to); + auto type = pick_placement_type(logical_block, 1, free_locations); - // Make sure that the position is EMPTY_BLOCK before placing the block down - VTR_ASSERT(place_ctx.grid_blocks[to.x][to.y].blocks[to.z] == EMPTY_BLOCK_ID); + if (type == nullptr) { + VPR_FATAL_ERROR(VPR_ERROR_PLACE, + "Initial placement failed.\n" + "Could not place block %s (#%zu); no free locations of type %s (#%d).\n", + cluster_ctx.clb_nlist.block_name(blk_id).c_str(), size_t(blk_id), logical_block->name, logical_block->index); + } + + itype = type->index; - place_ctx.grid_blocks[to.x][to.y].blocks[to.z] = blk_id; - place_ctx.grid_blocks[to.x][to.y].usage++; + t_pl_loc to; + initial_placement_location(free_locations, ipos, itype, to); - place_ctx.block_locs[blk_id].loc = to; + // Make sure that the position is EMPTY_BLOCK before placing the block down + VTR_ASSERT(place_ctx.grid_blocks[to.x][to.y].blocks[to.z] == EMPTY_BLOCK_ID); - //Mark IOs as fixed if specifying a (fixed) random placement - if (is_io_type(pick_random_physical_type(logical_block)) && pad_loc_type == RANDOM) { - place_ctx.block_locs[blk_id].is_fixed = true; - } + place_ctx.grid_blocks[to.x][to.y].blocks[to.z] = blk_id; + place_ctx.grid_blocks[to.x][to.y].usage++; - /* Ensure randomizer doesn't pick this location again, since it's occupied. Could shift all the - * legal positions in legal_pos to remove the entry (choice) we just used, but faster to - * just move the last entry in legal_pos to the spot we just used and decrement the - * count of free_locations. */ - legal_pos[itype][ipos] = legal_pos[itype][free_locations[itype] - 1]; /* overwrite used block position */ - free_locations[itype]--; + place_ctx.block_locs[blk_id].loc = to; + + //Mark IOs as fixed if specifying a (fixed) random placement + if (is_io_type(pick_random_physical_type(logical_block)) && pad_loc_type == RANDOM) { + place_ctx.block_locs[blk_id].is_fixed = true; } + + /* Ensure randomizer doesn't pick this location again, since it's occupied. Could shift all the + * legal positions in legal_pos to remove the entry (choice) we just used, but faster to + * just move the last entry in legal_pos to the spot we just used and decrement the + * count of free_locations. */ + legal_pos[itype][ipos] = legal_pos[itype][free_locations[itype] - 1]; /* overwrite used block position */ + free_locations[itype]--; } } } From 2c582c89f3e6fbef4ae9b6b0b6f8cef5820aa664 Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Wed, 20 Nov 2019 17:04:17 +0100 Subject: [PATCH 47/58] equivalent: initial placement sorts only if equivalent tiles are present Signed-off-by: Alessandro Comodi --- vpr/src/base/SetupVPR.cpp | 7 ++++++- vpr/src/base/vpr_context.h | 4 ++++ vpr/src/place/initial_placement.cpp | 9 +++++++-- 3 files changed, 17 insertions(+), 3 deletions(-) diff --git a/vpr/src/base/SetupVPR.cpp b/vpr/src/base/SetupVPR.cpp index 6b3a25c243d..4fd21004eb5 100644 --- a/vpr/src/base/SetupVPR.cpp +++ b/vpr/src/base/SetupVPR.cpp @@ -134,13 +134,18 @@ void SetupVPR(const t_options* Options, } device_ctx.EMPTY_LOGICAL_BLOCK_TYPE = nullptr; + int max_equivalent_tiles = 0; for (const auto& type : device_ctx.logical_block_types) { if (0 == strcmp(type.name, EMPTY_BLOCK_NAME)) { device_ctx.EMPTY_LOGICAL_BLOCK_TYPE = &type; - break; } + + max_equivalent_tiles = std::max(max_equivalent_tiles, (int)type.equivalent_tiles.size()); } + VTR_ASSERT(max_equivalent_tiles > 0); + device_ctx.has_multiple_equivalent_tiles = max_equivalent_tiles > 1; + VTR_ASSERT(device_ctx.EMPTY_PHYSICAL_TILE_TYPE != nullptr); VTR_ASSERT(device_ctx.EMPTY_LOGICAL_BLOCK_TYPE != nullptr); diff --git a/vpr/src/base/vpr_context.h b/vpr/src/base/vpr_context.h index 8a275b53c0c..dc1c07650f9 100644 --- a/vpr/src/base/vpr_context.h +++ b/vpr/src/base/vpr_context.h @@ -132,6 +132,10 @@ struct DeviceContext : public Context { std::vector physical_tile_types; std::vector logical_block_types; + /* Boolean that indicates whether the architecture implements an N:M + * physical tiles to logical blocks mapping */ + bool has_multiple_equivalent_tiles; + /******************************************************************* * Routing related ********************************************************************/ diff --git a/vpr/src/place/initial_placement.cpp b/vpr/src/place/initial_placement.cpp index f3bcf114939..0570213128e 100644 --- a/vpr/src/place/initial_placement.cpp +++ b/vpr/src/place/initial_placement.cpp @@ -185,7 +185,9 @@ static void initial_placement_pl_macros(int macros_max_num_tries, int* free_loca return lhs_num_tiles < rhs_num_tiles; }; - std::sort(sorted_pl_macros.begin(), sorted_pl_macros.end(), criteria); + if (device_ctx.has_multiple_equivalent_tiles) { + std::sort(sorted_pl_macros.begin(), sorted_pl_macros.end(), criteria); + } /* Macros are harder to place. Do them first */ for (auto pl_macro : sorted_pl_macros) { @@ -253,6 +255,7 @@ static void initial_placement_pl_macros(int macros_max_num_tries, int* free_loca static void initial_placement_blocks(int* free_locations, enum e_pad_loc_type pad_loc_type) { int itype, ipos; auto& cluster_ctx = g_vpr_ctx.clustering(); + auto& device_ctx = g_vpr_ctx.device(); auto& place_ctx = g_vpr_ctx.mutable_placement(); auto blocks = cluster_ctx.clb_nlist.blocks(); @@ -270,7 +273,9 @@ static void initial_placement_blocks(int* free_locations, enum e_pad_loc_type pa return lhs_num_tiles < rhs_num_tiles; }; - std::sort(sorted_blocks.begin(), sorted_blocks.end(), criteria); + if (device_ctx.has_multiple_equivalent_tiles) { + std::sort(sorted_blocks.begin(), sorted_blocks.end(), criteria); + } for (auto blk_id : sorted_blocks) { if (place_ctx.block_locs[blk_id].loc.x != -1) { // -1 is a sentinel for an empty block From 175fe4dcf107ecc1279f02171eb8ce09e3b76385 Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Wed, 20 Nov 2019 12:13:29 +0100 Subject: [PATCH 48/58] place: rename locations alloc/load functions during initial placement Signed-off-by: Alessandro Comodi --- vpr/src/place/initial_placement.cpp | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/vpr/src/place/initial_placement.cpp b/vpr/src/place/initial_placement.cpp index 0570213128e..86d9645b709 100644 --- a/vpr/src/place/initial_placement.cpp +++ b/vpr/src/place/initial_placement.cpp @@ -13,10 +13,10 @@ static t_pl_loc** legal_pos = nullptr; /* [0..device_ctx.num_block_types-1][0..type_tsize - 1] */ static int* num_legal_pos = nullptr; /* [0..num_legal_pos-1] */ -static void alloc_legal_placements(); -static void load_legal_placements(); +static void alloc_legal_placement_locations(); +static void load_legal_placement_locations(); -static void free_legal_placements(); +static void free_legal_placement_locations(); static int check_macro_can_be_placed(t_pl_macro pl_macro, int itype, t_pl_loc head_pos); static int try_place_macro(int itype, int ipos, t_pl_macro pl_macro); @@ -29,7 +29,7 @@ static t_physical_tile_type_ptr pick_placement_type(t_logical_block_type_ptr log int num_needed_types, int* free_locations); -static void alloc_legal_placements() { +static void alloc_legal_placement_locations() { auto& device_ctx = g_vpr_ctx.device(); auto& place_ctx = g_vpr_ctx.mutable_placement(); @@ -58,7 +58,7 @@ static void alloc_legal_placements() { } } -static void load_legal_placements() { +static void load_legal_placement_locations() { auto& device_ctx = g_vpr_ctx.device(); auto& place_ctx = g_vpr_ctx.placement(); @@ -83,7 +83,7 @@ static void load_legal_placements() { free(index); } -static void free_legal_placements() { +static void free_legal_placement_locations() { auto& device_ctx = g_vpr_ctx.device(); for (unsigned int i = 0; i < device_ctx.physical_tile_types.size(); i++) { @@ -358,8 +358,8 @@ void initial_placement(enum e_pad_loc_type pad_loc_type, */ // Loading legal placement locations - alloc_legal_placements(); - load_legal_placements(); + alloc_legal_placement_locations(); + load_legal_placement_locations(); int itype, ipos; int* free_locations; /* [0..device_ctx.num_block_types-1]. @@ -425,7 +425,7 @@ void initial_placement(enum e_pad_loc_type pad_loc_type, } /* Restore legal_pos */ - load_legal_placements(); + load_legal_placement_locations(); #ifdef VERBOSE VTR_LOG("At end of initial_placement.\n"); @@ -434,5 +434,5 @@ void initial_placement(enum e_pad_loc_type pad_loc_type, } #endif free(free_locations); - free_legal_placements(); + free_legal_placement_locations(); } From 4b914d0843284e8c59106d79cad46d7e885a2b75 Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Wed, 20 Nov 2019 13:27:49 +0100 Subject: [PATCH 49/58] equivalent: change phy/log_pin to physical/logical_pin Signed-off-by: Alessandro Comodi --- libs/libarchfpga/src/read_xml_arch_file.cpp | 14 ++++----- vpr/src/base/ShowSetup.cpp | 4 +-- vpr/src/base/clustered_netlist_utils.cpp | 4 +-- vpr/src/base/read_netlist.cpp | 16 +++++------ vpr/src/draw/draw.cpp | 4 +-- vpr/src/pack/output_clustering.cpp | 4 +-- vpr/src/place/place_macro.cpp | 16 +++++------ vpr/src/util/vpr_utils.cpp | 32 ++++++++++----------- vpr/src/util/vpr_utils.h | 6 ++-- 9 files changed, 50 insertions(+), 50 deletions(-) diff --git a/libs/libarchfpga/src/read_xml_arch_file.cpp b/libs/libarchfpga/src/read_xml_arch_file.cpp index 55aa02b7fbb..2430b61fb00 100644 --- a/libs/libarchfpga/src/read_xml_arch_file.cpp +++ b/libs/libarchfpga/src/read_xml_arch_file.cpp @@ -3259,10 +3259,10 @@ static void ProcessEquivalentSiteDirectConnection(pugi::xml_node Parent, vtr::bimap directs_map; for (int npin = 0; npin < num_pins; npin++) { - t_physical_pin phy_pin(npin); - t_logical_pin log_pin(npin); + t_physical_pin physical_pin(npin); + t_logical_pin logical_pin(npin); - directs_map.insert(log_pin, phy_pin); + directs_map.insert(logical_pin, physical_pin); } PhysicalTileType->tile_block_pin_directs_map[LogicalBlockType->index] = directs_map; @@ -3310,15 +3310,15 @@ static void ProcessEquivalentSiteCustomConnection(pugi::xml_node Parent, int num_pins = from_pins.second - from_pins.first; for (int i = 0; i < num_pins; i++) { - t_physical_pin phy_pin(from_pins.first + i); - t_logical_pin log_pin(to_pins.first + i); + t_physical_pin physical_pin(from_pins.first + i); + t_logical_pin logical_pin(to_pins.first + i); - auto result = directs_map.insert(log_pin, phy_pin); + auto result = directs_map.insert(logical_pin, physical_pin); if (!result.second) { archfpga_throw(loc_data.filename_c_str(), loc_data.line(Parent), "Duplicate logical pin (%d) to physical pin (%d) mappings found for " "Physical Tile %s and Logical Block %s.\n", - log_pin.pin, phy_pin.pin, PhysicalTileType->name, LogicalBlockType->name); + logical_pin.pin, physical_pin.pin, PhysicalTileType->name, LogicalBlockType->name); } } diff --git a/vpr/src/base/ShowSetup.cpp b/vpr/src/base/ShowSetup.cpp index 3a96ab9851b..4a64bea6938 100644 --- a/vpr/src/base/ShowSetup.cpp +++ b/vpr/src/base/ShowSetup.cpp @@ -79,8 +79,8 @@ void printClusteredNetlistStats() { num_blocks_type[logical_block->index]++; if (is_io_type(physical_tile)) { for (j = 0; j < logical_block->pb_type->num_pins; j++) { - int phy_pin = get_physical_pin(physical_tile, logical_block, j); - auto pin_class = physical_tile->pin_class[phy_pin]; + int physical_pin = get_physical_pin(physical_tile, logical_block, j); + auto pin_class = physical_tile->pin_class[physical_pin]; auto class_inf = physical_tile->class_inf[pin_class]; if (cluster_ctx.clb_nlist.block_net(blk_id, j) != ClusterNetId::INVALID()) { diff --git a/vpr/src/base/clustered_netlist_utils.cpp b/vpr/src/base/clustered_netlist_utils.cpp index 2ccdb7bc6ff..0e7f09a6fe8 100644 --- a/vpr/src/base/clustered_netlist_utils.cpp +++ b/vpr/src/base/clustered_netlist_utils.cpp @@ -17,7 +17,7 @@ void ClusteredPinAtomPinsLookup::init_lookup(const ClusteredNetlist& clustered_n clustered_pin_connected_atom_pins_.resize(clustered_pins.size()); for (ClusterPinId clustered_pin : clustered_pins) { auto clustered_block = clustered_netlist.pin_block(clustered_pin); - int log_pin_index = clustered_netlist.pin_logical_index(clustered_pin); - clustered_pin_connected_atom_pins_[clustered_pin] = find_clb_pin_connected_atom_pins(clustered_block, log_pin_index, pb_gpin_lookup); + int logical_pin_index = clustered_netlist.pin_logical_index(clustered_pin); + clustered_pin_connected_atom_pins_[clustered_pin] = find_clb_pin_connected_atom_pins(clustered_block, logical_pin_index, pb_gpin_lookup); } } diff --git a/vpr/src/base/read_netlist.cpp b/vpr/src/base/read_netlist.cpp index fb461bb3f29..e368035ff0b 100644 --- a/vpr/src/base/read_netlist.cpp +++ b/vpr/src/base/read_netlist.cpp @@ -951,14 +951,14 @@ static void load_external_nets_and_cb(ClusteredNetlist& clb_nlist) { block_type = clb_nlist.block_type(blk_id); auto tile_type = pick_random_physical_type(block_type); for (j = 0; j < block_type->pb_type->num_pins; j++) { - int phy_pin = get_physical_pin(tile_type, block_type, j); + int physical_pin = get_physical_pin(tile_type, block_type, j); //Iterate through each pin of the block, and see if there is a net allocated/used for it clb_net_id = clb_nlist.block_net(blk_id, j); if (clb_net_id != ClusterNetId::INVALID()) { //Verify old and new CLB netlists have the same # of pins per net - if (RECEIVER == tile_type->class_inf[tile_type->pin_class[phy_pin]].type) { + if (RECEIVER == tile_type->class_inf[tile_type->pin_class[physical_pin]].type) { count[clb_net_id]++; if (count[clb_net_id] > (int)clb_nlist.net_sinks(clb_net_id).size()) { @@ -975,17 +975,17 @@ static void load_external_nets_and_cb(ClusteredNetlist& clb_nlist) { VTR_ASSERT(j == clb_nlist.net_pin_physical_index(clb_net_id, count[clb_net_id])); // nets connecting to global pins are marked as global nets - if (tile_type->is_pin_global[phy_pin]) { + if (tile_type->is_pin_global[physical_pin]) { clb_nlist.set_net_is_global(clb_net_id, true); } - if (tile_type->is_ignored_pin[phy_pin]) { + if (tile_type->is_ignored_pin[physical_pin]) { clb_nlist.set_net_is_ignored(clb_net_id, true); } /* Error check performed later to ensure no mixing of ignored and non ignored signals */ } else { - VTR_ASSERT(DRIVER == tile_type->class_inf[tile_type->pin_class[phy_pin]].type); + VTR_ASSERT(DRIVER == tile_type->class_inf[tile_type->pin_class[physical_pin]].type); VTR_ASSERT(j == clb_nlist.pin_physical_index(*(clb_nlist.net_pins(clb_net_id).begin()))); VTR_ASSERT(j == clb_nlist.net_pin_physical_index(clb_net_id, 0)); } @@ -1000,10 +1000,10 @@ static void load_external_nets_and_cb(ClusteredNetlist& clb_nlist) { bool is_ignored_net = clb_nlist.net_is_ignored(net_id); block_type = clb_nlist.block_type(clb_nlist.pin_block(pin_id)); auto tile_type = pick_random_physical_type(block_type); - int log_pin = clb_nlist.pin_logical_index(pin_id); - int phy_pin = get_physical_pin(tile_type, block_type, log_pin); + int logical_pin = clb_nlist.pin_logical_index(pin_id); + int physical_pin = get_physical_pin(tile_type, block_type, logical_pin); - if (tile_type->is_ignored_pin[phy_pin] != is_ignored_net) { + if (tile_type->is_ignored_pin[physical_pin] != is_ignored_net) { VTR_LOG_WARN( "Netlist connects net %s to both global and non-global pins.\n", clb_nlist.net_name(net_id).c_str()); diff --git a/vpr/src/draw/draw.cpp b/vpr/src/draw/draw.cpp index a04755be146..465304e44fc 100644 --- a/vpr/src/draw/draw.cpp +++ b/vpr/src/draw/draw.cpp @@ -2661,9 +2661,9 @@ void draw_highlight_blocks_color(t_logical_block_type_ptr type, ClusterBlockId b continue; auto physical_tile = physical_tile_type(blk_id); - int phy_pin = get_physical_pin(physical_tile, type, k); + int physical_pin = get_physical_pin(physical_tile, type, k); - iclass = physical_tile->pin_class[phy_pin]; + iclass = physical_tile->pin_class[physical_pin]; if (physical_tile->class_inf[iclass].type == DRIVER) { /* Fanout */ if (draw_state->block_color[blk_id] == SELECTED_COLOR) { diff --git a/vpr/src/pack/output_clustering.cpp b/vpr/src/pack/output_clustering.cpp index d881b9eb510..b2cb8a9192d 100644 --- a/vpr/src/pack/output_clustering.cpp +++ b/vpr/src/pack/output_clustering.cpp @@ -66,8 +66,8 @@ static void print_stats() { auto logical_block = cluster_ctx.clb_nlist.block_type(blk_id); auto physical_tile = pick_random_physical_type(logical_block); for (ipin = 0; ipin < logical_block->pb_type->num_pins; ipin++) { - int phy_pin = get_physical_pin(physical_tile, logical_block, ipin); - auto pin_class = physical_tile->pin_class[phy_pin]; + int physical_pin = get_physical_pin(physical_tile, logical_block, ipin); + auto pin_class = physical_tile->pin_class[physical_pin]; auto pin_class_inf = physical_tile->class_inf[pin_class]; if (cluster_ctx.clb_nlist.block_pb(blk_id)->pb_route.empty()) { diff --git a/vpr/src/place/place_macro.cpp b/vpr/src/place/place_macro.cpp index db8cf827dfa..e1f39273e9d 100644 --- a/vpr/src/place/place_macro.cpp +++ b/vpr/src/place/place_macro.cpp @@ -82,11 +82,11 @@ static void find_all_the_macro(int* num_of_macro, std::vector& p num_blk_pins = cluster_ctx.clb_nlist.block_type(blk_id)->pb_type->num_pins; for (to_iblk_pin = 0; to_iblk_pin < num_blk_pins; to_iblk_pin++) { - int to_phy_pin = get_physical_pin(physical_tile, logical_block, to_iblk_pin); + int to_physical_pin = get_physical_pin(physical_tile, logical_block, to_iblk_pin); to_net_id = cluster_ctx.clb_nlist.block_net(blk_id, to_iblk_pin); - to_idirect = f_idirect_from_blk_pin[physical_tile->index][to_phy_pin]; - to_src_or_sink = f_direct_type_from_blk_pin[physical_tile->index][to_phy_pin]; + to_idirect = f_idirect_from_blk_pin[physical_tile->index][to_physical_pin]; + to_src_or_sink = f_direct_type_from_blk_pin[physical_tile->index][to_physical_pin]; // Identify potential macro head blocks (i.e. start of a macro) // @@ -102,11 +102,11 @@ static void find_all_the_macro(int* num_of_macro, std::vector& p || (is_constant_clb_net(to_net_id) && !net_is_driven_by_direct(to_net_id)))) { for (from_iblk_pin = 0; from_iblk_pin < num_blk_pins; from_iblk_pin++) { - int from_phy_pin = get_physical_pin(physical_tile, logical_block, from_iblk_pin); + int from_physical_pin = get_physical_pin(physical_tile, logical_block, from_iblk_pin); from_net_id = cluster_ctx.clb_nlist.block_net(blk_id, from_iblk_pin); - from_idirect = f_idirect_from_blk_pin[physical_tile->index][from_phy_pin]; - from_src_or_sink = f_direct_type_from_blk_pin[physical_tile->index][from_phy_pin]; + from_idirect = f_idirect_from_blk_pin[physical_tile->index][from_physical_pin]; + from_src_or_sink = f_direct_type_from_blk_pin[physical_tile->index][from_physical_pin]; // Confirm whether this is a head macro // @@ -136,8 +136,8 @@ static void find_all_the_macro(int* num_of_macro, std::vector& p next_blk_id = cluster_ctx.clb_nlist.net_pin_block(curr_net_id, 1); // Assume that the from_iblk_pin index is the same for the next block - VTR_ASSERT(f_idirect_from_blk_pin[physical_tile->index][from_phy_pin] == from_idirect - && f_direct_type_from_blk_pin[physical_tile->index][from_phy_pin] == SOURCE); + VTR_ASSERT(f_idirect_from_blk_pin[physical_tile->index][from_physical_pin] == from_idirect + && f_direct_type_from_blk_pin[physical_tile->index][from_physical_pin] == SOURCE); next_net_id = cluster_ctx.clb_nlist.block_net(next_blk_id, from_iblk_pin); // Mark down this block as a member of the macro diff --git a/vpr/src/util/vpr_utils.cpp b/vpr/src/util/vpr_utils.cpp index 376b208b02d..8027c40df16 100644 --- a/vpr/src/util/vpr_utils.cpp +++ b/vpr/src/util/vpr_utils.cpp @@ -312,42 +312,42 @@ void swap(IntraLbPbPinLookup& lhs, IntraLbPbPinLookup& rhs) { //Returns the set of pins which are connected to the top level clb pin // The pin(s) may be input(s) or and output (returning the connected sinks or drivers respectively) -std::vector find_clb_pin_connected_atom_pins(ClusterBlockId clb, int log_pin, const IntraLbPbPinLookup& pb_gpin_lookup) { +std::vector find_clb_pin_connected_atom_pins(ClusterBlockId clb, int logical_pin, const IntraLbPbPinLookup& pb_gpin_lookup) { std::vector atom_pins; auto& clb_nlist = g_vpr_ctx.clustering().clb_nlist; auto logical_block = clb_nlist.block_type(clb); auto physical_tile = pick_random_physical_type(logical_block); - int phy_pin = get_physical_pin(physical_tile, logical_block, log_pin); + int physical_pin = get_physical_pin(physical_tile, logical_block, logical_pin); - if (is_opin(phy_pin, physical_tile)) { + if (is_opin(physical_pin, physical_tile)) { //output - AtomPinId driver = find_clb_pin_driver_atom_pin(clb, log_pin, pb_gpin_lookup); + AtomPinId driver = find_clb_pin_driver_atom_pin(clb, logical_pin, pb_gpin_lookup); if (driver) { atom_pins.push_back(driver); } } else { //input - atom_pins = find_clb_pin_sink_atom_pins(clb, log_pin, pb_gpin_lookup); + atom_pins = find_clb_pin_sink_atom_pins(clb, logical_pin, pb_gpin_lookup); } return atom_pins; } //Returns the atom pin which drives the top level clb output pin -AtomPinId find_clb_pin_driver_atom_pin(ClusterBlockId clb, int log_pin, const IntraLbPbPinLookup& pb_gpin_lookup) { +AtomPinId find_clb_pin_driver_atom_pin(ClusterBlockId clb, int logical_pin, const IntraLbPbPinLookup& pb_gpin_lookup) { auto& cluster_ctx = g_vpr_ctx.clustering(); auto& atom_ctx = g_vpr_ctx.atom(); - if (log_pin < 0) { + if (logical_pin < 0) { //CLB output pin has no internal driver return AtomPinId::INVALID(); } const t_pb_routes& pb_routes = cluster_ctx.clb_nlist.block_pb(clb)->pb_route; - AtomNetId atom_net = pb_routes[log_pin].atom_net_id; + AtomNetId atom_net = pb_routes[logical_pin].atom_net_id; - int pb_pin_id = log_pin; + int pb_pin_id = logical_pin; //Trace back until the driver is reached while (pb_routes[pb_pin_id].driver_pb_pin_id >= 0) { pb_pin_id = pb_routes[pb_pin_id].driver_pb_pin_id; @@ -366,27 +366,27 @@ AtomPinId find_clb_pin_driver_atom_pin(ClusterBlockId clb, int log_pin, const In } //Returns the set of atom sink pins associated with the top level clb input pin -std::vector find_clb_pin_sink_atom_pins(ClusterBlockId clb, int log_pin, const IntraLbPbPinLookup& pb_gpin_lookup) { +std::vector find_clb_pin_sink_atom_pins(ClusterBlockId clb, int logical_pin, const IntraLbPbPinLookup& pb_gpin_lookup) { auto& cluster_ctx = g_vpr_ctx.clustering(); auto& atom_ctx = g_vpr_ctx.atom(); const t_pb_routes& pb_routes = cluster_ctx.clb_nlist.block_pb(clb)->pb_route; - VTR_ASSERT_MSG(log_pin < cluster_ctx.clb_nlist.block_type(clb)->pb_type->num_pins, "Must be a valid tile pin"); + VTR_ASSERT_MSG(logical_pin < cluster_ctx.clb_nlist.block_type(clb)->pb_type->num_pins, "Must be a valid tile pin"); VTR_ASSERT(cluster_ctx.clb_nlist.block_pb(clb)); - VTR_ASSERT_MSG(log_pin < cluster_ctx.clb_nlist.block_pb(clb)->pb_graph_node->num_pins(), "Pin must map to a top-level pb pin"); + VTR_ASSERT_MSG(logical_pin < cluster_ctx.clb_nlist.block_pb(clb)->pb_graph_node->num_pins(), "Pin must map to a top-level pb pin"); - VTR_ASSERT_MSG(pb_routes[log_pin].driver_pb_pin_id < 0, "CLB input pin should have no internal drivers"); + VTR_ASSERT_MSG(pb_routes[logical_pin].driver_pb_pin_id < 0, "CLB input pin should have no internal drivers"); - AtomNetId atom_net = pb_routes[log_pin].atom_net_id; + AtomNetId atom_net = pb_routes[logical_pin].atom_net_id; VTR_ASSERT(atom_net); - std::vector connected_sink_pb_pins = find_connected_internal_clb_sink_pins(clb, log_pin); + std::vector connected_sink_pb_pins = find_connected_internal_clb_sink_pins(clb, logical_pin); std::vector sink_atom_pins; for (int sink_pb_pin : connected_sink_pb_pins) { - //Map the log_pin_id to AtomPinId + //Map the logical_pin_id to AtomPinId AtomPinId atom_pin = find_atom_pin_for_pb_route_id(clb, sink_pb_pin, pb_gpin_lookup); VTR_ASSERT(atom_pin); diff --git a/vpr/src/util/vpr_utils.h b/vpr/src/util/vpr_utils.h index 5b9368a7d17..f0b6dc07c33 100644 --- a/vpr/src/util/vpr_utils.h +++ b/vpr/src/util/vpr_utils.h @@ -75,13 +75,13 @@ class IntraLbPbPinLookup { }; //Find the atom pins (driver or sinks) connected to the specified top-level CLB pin -std::vector find_clb_pin_connected_atom_pins(ClusterBlockId clb, int log_pin, const IntraLbPbPinLookup& pb_gpin_lookup); +std::vector find_clb_pin_connected_atom_pins(ClusterBlockId clb, int logical_pin, const IntraLbPbPinLookup& pb_gpin_lookup); //Find the atom pin driving to the specified top-level CLB pin -AtomPinId find_clb_pin_driver_atom_pin(ClusterBlockId clb, int log_pin, const IntraLbPbPinLookup& pb_gpin_lookup); +AtomPinId find_clb_pin_driver_atom_pin(ClusterBlockId clb, int logical_pin, const IntraLbPbPinLookup& pb_gpin_lookup); //Find the atom pins driven by the specified top-level CLB pin -std::vector find_clb_pin_sink_atom_pins(ClusterBlockId clb, int log_pin, const IntraLbPbPinLookup& pb_gpin_lookup); +std::vector find_clb_pin_sink_atom_pins(ClusterBlockId clb, int logical_pin, const IntraLbPbPinLookup& pb_gpin_lookup); std::tuple find_pb_route_clb_input_net_pin(ClusterBlockId clb, int sink_pb_route_id); From 5e0da0c2dde7d661600f4de570ec42415bb5395d Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Wed, 20 Nov 2019 13:33:22 +0100 Subject: [PATCH 50/58] equivalent: added in-code comments Signed-off-by: Alessandro Comodi --- libs/libarchfpga/src/physical_types.h | 8 ++++++++ vpr/src/route/rr_graph.cpp | 4 ++++ 2 files changed, 12 insertions(+) diff --git a/libs/libarchfpga/src/physical_types.h b/libs/libarchfpga/src/physical_types.h index 023120965b0..eeb05d8a22f 100644 --- a/libs/libarchfpga/src/physical_types.h +++ b/libs/libarchfpga/src/physical_types.h @@ -621,6 +621,10 @@ struct t_physical_tile_type { std::vector get_clock_pins_indices() const; }; +/** A logical pin defines the pin index of a logical block type (i.e. a top level PB type) + * This structure wraps the int value of the logical pin to allow its storage in the + * vtr::bimap container. + */ struct t_logical_pin { int pin = -1; @@ -637,6 +641,10 @@ struct t_logical_pin { } }; +/** A physical pin defines the pin index of a physical tile type (i.e. a grid tile type) + * This structure wraps the int value of the physical pin to allow its storage in the + * vtr::bimap container. + */ struct t_physical_pin { int pin = -1; diff --git a/vpr/src/route/rr_graph.cpp b/vpr/src/route/rr_graph.cpp index 2fb98991d14..df1bb8b0967 100644 --- a/vpr/src/route/rr_graph.cpp +++ b/vpr/src/route/rr_graph.cpp @@ -2709,6 +2709,8 @@ static t_clb_to_clb_directs* alloc_and_load_clb_to_clb_directs(const t_direct_in end_pin_index = tile_port.num_pins - 1; } + // Add clb directs start/end pin indices based on the absolute pin position + // of the port defined in the direct connection. The CLB is the source one. clb_to_clb_directs[i].from_clb_pin_start_index = tile_port.absolute_first_pin_index + start_pin_index; clb_to_clb_directs[i].from_clb_pin_end_index = tile_port.absolute_first_pin_index + end_pin_index; @@ -2749,6 +2751,8 @@ static t_clb_to_clb_directs* alloc_and_load_clb_to_clb_directs(const t_direct_in end_pin_index = tile_port.num_pins - 1; } + // Add clb directs start/end pin indices based on the absolute pin position + // of the port defined in the direct connection. The CLB is the destination one. clb_to_clb_directs[i].to_clb_pin_start_index = tile_port.absolute_first_pin_index + start_pin_index; clb_to_clb_directs[i].to_clb_pin_end_index = tile_port.absolute_first_pin_index + end_pin_index; From 76e5c11eadeea2e338b8aa7bd27ddc60af3afc1e Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Wed, 20 Nov 2019 16:08:59 +0100 Subject: [PATCH 51/58] equivalent: remove need of equivalent_sites_names Signed-off-by: Alessandro Comodi --- libs/libarchfpga/src/physical_types.h | 1 - libs/libarchfpga/src/read_xml_arch_file.cpp | 26 +++++++-------------- 2 files changed, 9 insertions(+), 18 deletions(-) diff --git a/libs/libarchfpga/src/physical_types.h b/libs/libarchfpga/src/physical_types.h index eeb05d8a22f..36901e6f0ed 100644 --- a/libs/libarchfpga/src/physical_types.h +++ b/libs/libarchfpga/src/physical_types.h @@ -610,7 +610,6 @@ struct t_physical_tile_type { int index = -1; /* index of type descriptor in array (allows for index referencing) */ - std::vector equivalent_sites_names; std::vector equivalent_sites; /* Unordered map indexed by the logical block index. diff --git a/libs/libarchfpga/src/read_xml_arch_file.cpp b/libs/libarchfpga/src/read_xml_arch_file.cpp index 2430b61fb00..7f004f315bb 100644 --- a/libs/libarchfpga/src/read_xml_arch_file.cpp +++ b/libs/libarchfpga/src/read_xml_arch_file.cpp @@ -3228,7 +3228,6 @@ static void ProcessTileEquivalentSites(pugi::xml_node Parent, expect_only_attributes(CurSite, {"pb_type", "pin_mapping"}, loc_data); /* Load equivalent site name */ auto Prop = std::string(get_attribute(CurSite, "pb_type", loc_data).value()); - PhysicalTileType->equivalent_sites_names.push_back(Prop); auto LogicalBlockType = get_type_by_name(Prop.c_str(), LogicalBlockTypes); @@ -3241,6 +3240,12 @@ static void ProcessTileEquivalentSites(pugi::xml_node Parent, ProcessEquivalentSiteDirectConnection(CurSite, PhysicalTileType, LogicalBlockType, loc_data); } + if (0 == strcmp(LogicalBlockType->pb_type->name, Prop.c_str())) { + PhysicalTileType->equivalent_sites.push_back(LogicalBlockType); + + check_port_direct_mappings(PhysicalTileType, LogicalBlockType); + } + CurSite = CurSite.next_sibling(CurSite.name()); } } @@ -4800,28 +4805,15 @@ static void link_physical_logical_types(std::vector& Physi for (auto& physical_tile : PhysicalTileTypes) { if (physical_tile.index == EMPTY_TYPE_INDEX) continue; - unsigned int logical_block_added = 0; - for (auto& equivalent_site_name : physical_tile.equivalent_sites_names) { - for (auto& logical_block : LogicalBlockTypes) { - if (logical_block.index == EMPTY_TYPE_INDEX) continue; - - // Check the corresponding Logical Block - if (0 == strcmp(logical_block.pb_type->name, equivalent_site_name.c_str())) { - physical_tile.equivalent_sites.push_back(&logical_block); + for (auto& logical_block : LogicalBlockTypes) { + for (auto site : physical_tile.equivalent_sites) { + if (0 == strcmp(logical_block.name, site->pb_type->name)) { logical_block.equivalent_tiles.push_back(&physical_tile); - check_port_direct_mappings(&physical_tile, &logical_block); - - logical_block_added++; break; } } } - - if (logical_block_added != physical_tile.equivalent_sites.size()) { - archfpga_throw(__FILE__, __LINE__, - "Could not create link between the %s and all its equivalent sites.\n", physical_tile.name); - } } for (auto& logical_block : LogicalBlockTypes) { From 856ab70a8a9e78321f7c043b1e96f3df846c90cf Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Wed, 20 Nov 2019 16:27:44 +0100 Subject: [PATCH 52/58] equivalent: fixed example architecture.xml in docs Signed-off-by: Alessandro Comodi --- doc/src/arch/example_arch.xml | 28 ++++------------------------ 1 file changed, 4 insertions(+), 24 deletions(-) diff --git a/doc/src/arch/example_arch.xml b/doc/src/arch/example_arch.xml index f4aaa606e6d..8a319b2d0e4 100644 --- a/doc/src/arch/example_arch.xml +++ b/doc/src/arch/example_arch.xml @@ -80,11 +80,7 @@ - - - - - + @@ -99,11 +95,7 @@ - - - - - + @@ -113,11 +105,7 @@ - - - - - + @@ -126,15 +114,7 @@ - - - - - - - - - + From 820d4d2fc1366954d17137914bb8d64d6ac6d951 Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Wed, 20 Nov 2019 18:04:28 +0100 Subject: [PATCH 53/58] equivalent: reorder physical/logical equivalent tiles based on pin differences This change enables a sorting of the logical block and physical tiles equivalent counterpart lists based on the differences in the number of pins. This ordering allows to select the "best" corresponding candidate of a logical/physical type when there is the need to access its data. This also allows to not rely on the `pick_random_type` function Signed-off-by: Alessandro Comodi --- libs/libarchfpga/src/read_xml_arch_file.cpp | 36 +++++++++++++++++++-- vpr/src/base/SetupGrid.cpp | 2 +- vpr/src/base/ShowSetup.cpp | 2 +- vpr/src/base/check_netlist.cpp | 2 +- vpr/src/base/read_netlist.cpp | 4 +-- vpr/src/base/read_place.cpp | 4 +-- vpr/src/draw/draw.cpp | 2 +- vpr/src/draw/draw_types.cpp | 2 +- vpr/src/draw/intra_logic_block.cpp | 2 +- vpr/src/pack/output_clustering.cpp | 2 +- vpr/src/place/initial_placement.cpp | 4 +-- vpr/src/place/place_macro.cpp | 2 +- vpr/src/place/uniform_move_generator.cpp | 2 +- vpr/src/power/power.cpp | 2 +- vpr/src/util/vpr_utils.cpp | 31 +++++------------- vpr/src/util/vpr_utils.h | 4 +-- 16 files changed, 60 insertions(+), 43 deletions(-) diff --git a/libs/libarchfpga/src/read_xml_arch_file.cpp b/libs/libarchfpga/src/read_xml_arch_file.cpp index 7f004f315bb..32381facab6 100644 --- a/libs/libarchfpga/src/read_xml_arch_file.cpp +++ b/libs/libarchfpga/src/read_xml_arch_file.cpp @@ -4805,8 +4805,24 @@ static void link_physical_logical_types(std::vector& Physi for (auto& physical_tile : PhysicalTileTypes) { if (physical_tile.index == EMPTY_TYPE_INDEX) continue; + auto& equivalent_sites = physical_tile.equivalent_sites; + + auto criteria = [physical_tile](const t_logical_block_type* lhs, const t_logical_block_type* rhs) { + int num_physical_pins = physical_tile.num_pins / physical_tile.capacity; + + int lhs_num_logical_pins = lhs->pb_type->num_pins; + int rhs_num_logical_pins = rhs->pb_type->num_pins; + + int lhs_diff_num_pins = num_physical_pins - lhs_num_logical_pins; + int rhs_diff_num_pins = num_physical_pins - rhs_num_logical_pins; + + return lhs_diff_num_pins < rhs_diff_num_pins; + }; + + std::sort(equivalent_sites.begin(), equivalent_sites.end(), criteria); + for (auto& logical_block : LogicalBlockTypes) { - for (auto site : physical_tile.equivalent_sites) { + for (auto site : equivalent_sites) { if (0 == strcmp(logical_block.name, site->pb_type->name)) { logical_block.equivalent_tiles.push_back(&physical_tile); @@ -4819,7 +4835,9 @@ static void link_physical_logical_types(std::vector& Physi for (auto& logical_block : LogicalBlockTypes) { if (logical_block.index == EMPTY_TYPE_INDEX) continue; - if ((int)logical_block.equivalent_tiles.size() <= 0) { + auto& equivalent_tiles = logical_block.equivalent_tiles; + + if ((int)equivalent_tiles.size() <= 0) { archfpga_throw(__FILE__, __LINE__, "Logical Block %s does not have any equivalent tiles.\n", logical_block.name); } @@ -4827,6 +4845,20 @@ static void link_physical_logical_types(std::vector& Physi std::unordered_map ignored_pins_check_map; std::unordered_map global_pins_check_map; + auto criteria = [logical_block](const t_physical_tile_type* lhs, const t_physical_tile_type* rhs) { + int num_logical_pins = logical_block.pb_type->num_pins; + + int lhs_num_physical_pins = lhs->num_pins / lhs->capacity; + int rhs_num_physical_pins = rhs->num_pins / rhs->capacity; + + int lhs_diff_num_pins = lhs_num_physical_pins - num_logical_pins; + int rhs_diff_num_pins = rhs_num_physical_pins - num_logical_pins; + + return lhs_diff_num_pins < rhs_diff_num_pins; + }; + + std::sort(equivalent_tiles.begin(), equivalent_tiles.end(), criteria); + for (int pin = 0; pin < logical_block.pb_type->num_pins; pin++) { for (auto& tile : logical_block.equivalent_tiles) { auto direct_map = tile->tile_block_pin_directs_map.at(logical_block.index); diff --git a/vpr/src/base/SetupGrid.cpp b/vpr/src/base/SetupGrid.cpp index 1cf36956f6a..982f313cafc 100644 --- a/vpr/src/base/SetupGrid.cpp +++ b/vpr/src/base/SetupGrid.cpp @@ -686,7 +686,7 @@ float calculate_device_utilization(const DeviceGrid& grid, std::mapindex]++; if (is_io_type(physical_tile)) { for (j = 0; j < logical_block->pb_type->num_pins; j++) { diff --git a/vpr/src/base/check_netlist.cpp b/vpr/src/base/check_netlist.cpp index 1164638fa5f..5f0b0136776 100644 --- a/vpr/src/base/check_netlist.cpp +++ b/vpr/src/base/check_netlist.cpp @@ -92,7 +92,7 @@ static int check_connections_to_global_clb_pins(ClusterNetId net_id, int verbosi for (auto pin_id : cluster_ctx.clb_nlist.net_pins(net_id)) { ClusterBlockId blk_id = cluster_ctx.clb_nlist.pin_block(pin_id); auto logical_type = cluster_ctx.clb_nlist.block_type(blk_id); - auto physical_type = pick_random_physical_type(logical_type); + auto physical_type = pick_best_physical_type(logical_type); int log_index = cluster_ctx.clb_nlist.pin_physical_index(pin_id); int pin_index = get_physical_pin(physical_type, logical_type, log_index); diff --git a/vpr/src/base/read_netlist.cpp b/vpr/src/base/read_netlist.cpp index e368035ff0b..b326eb4ab12 100644 --- a/vpr/src/base/read_netlist.cpp +++ b/vpr/src/base/read_netlist.cpp @@ -949,7 +949,7 @@ static void load_external_nets_and_cb(ClusteredNetlist& clb_nlist) { * and blocks point back to net pins */ for (auto blk_id : clb_nlist.blocks()) { block_type = clb_nlist.block_type(blk_id); - auto tile_type = pick_random_physical_type(block_type); + auto tile_type = pick_best_physical_type(block_type); for (j = 0; j < block_type->pb_type->num_pins; j++) { int physical_pin = get_physical_pin(tile_type, block_type, j); @@ -999,7 +999,7 @@ static void load_external_nets_and_cb(ClusteredNetlist& clb_nlist) { for (auto pin_id : clb_nlist.net_sinks(net_id)) { bool is_ignored_net = clb_nlist.net_is_ignored(net_id); block_type = clb_nlist.block_type(clb_nlist.pin_block(pin_id)); - auto tile_type = pick_random_physical_type(block_type); + auto tile_type = pick_best_physical_type(block_type); int logical_pin = clb_nlist.pin_logical_index(pin_id); int physical_pin = get_physical_pin(tile_type, block_type, logical_pin); diff --git a/vpr/src/base/read_place.cpp b/vpr/src/base/read_place.cpp index 95640c7fbc0..929192ec34c 100644 --- a/vpr/src/base/read_place.cpp +++ b/vpr/src/base/read_place.cpp @@ -161,7 +161,7 @@ void read_user_pad_loc(const char* pad_loc_file) { hash_table = alloc_hash_table(); for (auto blk_id : cluster_ctx.clb_nlist.blocks()) { auto logical_block = cluster_ctx.clb_nlist.block_type(blk_id); - if (is_io_type(pick_random_physical_type(logical_block))) { + if (is_io_type(pick_best_physical_type(logical_block))) { insert_in_hash_table(hash_table, cluster_ctx.clb_nlist.block_name(blk_id).c_str(), size_t(blk_id)); place_ctx.block_locs[blk_id].loc.x = OPEN; /* Mark as not seen yet. */ } @@ -268,7 +268,7 @@ void read_user_pad_loc(const char* pad_loc_file) { for (auto blk_id : cluster_ctx.clb_nlist.blocks()) { auto logical_block = cluster_ctx.clb_nlist.block_type(blk_id); - auto type = pick_random_physical_type(logical_block); + auto type = pick_best_physical_type(logical_block); if (is_io_type(type) && place_ctx.block_locs[blk_id].loc.x == OPEN) { vpr_throw(VPR_ERROR_PLACE_F, pad_loc_file, 0, "IO block %s location was not specified in the pad file.\n", cluster_ctx.clb_nlist.block_name(blk_id).c_str()); diff --git a/vpr/src/draw/draw.cpp b/vpr/src/draw/draw.cpp index 465304e44fc..d29ede76133 100644 --- a/vpr/src/draw/draw.cpp +++ b/vpr/src/draw/draw.cpp @@ -2736,7 +2736,7 @@ static void draw_reset_blk_color(ClusterBlockId blk_id) { t_draw_state* draw_state = get_draw_state_vars(); - draw_state->block_color[blk_id] = get_block_type_color(pick_random_physical_type(logical_block)); + draw_state->block_color[blk_id] = get_block_type_color(pick_best_physical_type(logical_block)); } /** diff --git a/vpr/src/draw/draw_types.cpp b/vpr/src/draw/draw_types.cpp index 14b0b45ca26..5d6e4ca7a83 100644 --- a/vpr/src/draw/draw_types.cpp +++ b/vpr/src/draw/draw_types.cpp @@ -95,7 +95,7 @@ ezgl::rectangle t_draw_coords::get_absolute_clb_bbox(const ClusterBlockId clb_in ezgl::rectangle t_draw_coords::get_absolute_clb_bbox(int grid_x, int grid_y, int sub_block_index) { auto& device_ctx = g_vpr_ctx.device(); - return get_pb_bbox(grid_x, grid_y, sub_block_index, *pick_random_logical_type(device_ctx.grid[grid_x][grid_y].type)->pb_graph_head); + return get_pb_bbox(grid_x, grid_y, sub_block_index, *pick_best_logical_type(device_ctx.grid[grid_x][grid_y].type)->pb_graph_head); } #endif // NO_GRAPHICS diff --git a/vpr/src/draw/intra_logic_block.cpp b/vpr/src/draw/intra_logic_block.cpp index ac465f15266..a3771f7fed5 100644 --- a/vpr/src/draw/intra_logic_block.cpp +++ b/vpr/src/draw/intra_logic_block.cpp @@ -96,7 +96,7 @@ void draw_internal_init_blk() { continue; } - auto logical_block = pick_random_logical_type(&type); + auto logical_block = pick_best_logical_type(&type); pb_graph_head_node = logical_block->pb_graph_head; int type_descriptor_index = type.index; diff --git a/vpr/src/pack/output_clustering.cpp b/vpr/src/pack/output_clustering.cpp index b2cb8a9192d..bb8a98ae5d0 100644 --- a/vpr/src/pack/output_clustering.cpp +++ b/vpr/src/pack/output_clustering.cpp @@ -64,7 +64,7 @@ static void print_stats() { for (auto blk_id : cluster_ctx.clb_nlist.blocks()) { auto logical_block = cluster_ctx.clb_nlist.block_type(blk_id); - auto physical_tile = pick_random_physical_type(logical_block); + auto physical_tile = pick_best_physical_type(logical_block); for (ipin = 0; ipin < logical_block->pb_type->num_pins; ipin++) { int physical_pin = get_physical_pin(physical_tile, logical_block, ipin); auto pin_class = physical_tile->pin_class[physical_pin]; diff --git a/vpr/src/place/initial_placement.cpp b/vpr/src/place/initial_placement.cpp index 86d9645b709..8a1f0e12e94 100644 --- a/vpr/src/place/initial_placement.cpp +++ b/vpr/src/place/initial_placement.cpp @@ -286,7 +286,7 @@ static void initial_placement_blocks(int* free_locations, enum e_pad_loc_type pa auto logical_block = cluster_ctx.clb_nlist.block_type(blk_id); /* Don't do IOs if the user specifies IOs; we'll read those locations later. */ - if (!(is_io_type(pick_random_physical_type(logical_block)) && pad_loc_type == USER)) { + if (!(is_io_type(pick_best_physical_type(logical_block)) && pad_loc_type == USER)) { /* Randomly select a free location of the appropriate type for blk_id. * We have a linearized list of all the free locations that can * accommodate a block of that type in free_locations[itype]. @@ -317,7 +317,7 @@ static void initial_placement_blocks(int* free_locations, enum e_pad_loc_type pa place_ctx.block_locs[blk_id].loc = to; //Mark IOs as fixed if specifying a (fixed) random placement - if (is_io_type(pick_random_physical_type(logical_block)) && pad_loc_type == RANDOM) { + if (is_io_type(pick_best_physical_type(logical_block)) && pad_loc_type == RANDOM) { place_ctx.block_locs[blk_id].is_fixed = true; } diff --git a/vpr/src/place/place_macro.cpp b/vpr/src/place/place_macro.cpp index e1f39273e9d..527c2c9773a 100644 --- a/vpr/src/place/place_macro.cpp +++ b/vpr/src/place/place_macro.cpp @@ -78,7 +78,7 @@ static void find_all_the_macro(int* num_of_macro, std::vector& p num_macro = 0; for (auto blk_id : cluster_ctx.clb_nlist.blocks()) { auto logical_block = cluster_ctx.clb_nlist.block_type(blk_id); - auto physical_tile = pick_random_physical_type(logical_block); + auto physical_tile = pick_best_physical_type(logical_block); num_blk_pins = cluster_ctx.clb_nlist.block_type(blk_id)->pb_type->num_pins; for (to_iblk_pin = 0; to_iblk_pin < num_blk_pins; to_iblk_pin++) { diff --git a/vpr/src/place/uniform_move_generator.cpp b/vpr/src/place/uniform_move_generator.cpp index 82cd632802e..7d4bf1c439b 100644 --- a/vpr/src/place/uniform_move_generator.cpp +++ b/vpr/src/place/uniform_move_generator.cpp @@ -18,7 +18,7 @@ e_create_move UniformMoveGenerator::propose_move(t_pl_blocks_to_be_moved& blocks t_pl_loc to; - auto type = pick_random_physical_type(cluster_from_type); + auto type = pick_best_physical_type(cluster_from_type); if (!find_to_loc_uniform(type, rlim, from, to)) { return e_create_move::ABORT; diff --git a/vpr/src/power/power.cpp b/vpr/src/power/power.cpp index 758167d6b54..d4e17c0f852 100644 --- a/vpr/src/power/power.cpp +++ b/vpr/src/power/power.cpp @@ -624,7 +624,7 @@ static void power_usage_blocks(t_power_usage* power_usage) { pb = cluster_ctx.clb_nlist.block_pb(iblk); logical_block = cluster_ctx.clb_nlist.block_type(iblk); } else { - logical_block = pick_random_logical_type(physical_tile); + logical_block = pick_best_logical_type(physical_tile); } /* Calculate power of this CLB */ diff --git a/vpr/src/util/vpr_utils.cpp b/vpr/src/util/vpr_utils.cpp index 8027c40df16..09e39cebf76 100644 --- a/vpr/src/util/vpr_utils.cpp +++ b/vpr/src/util/vpr_utils.cpp @@ -317,7 +317,7 @@ std::vector find_clb_pin_connected_atom_pins(ClusterBlockId clb, int auto& clb_nlist = g_vpr_ctx.clustering().clb_nlist; auto logical_block = clb_nlist.block_type(clb); - auto physical_tile = pick_random_physical_type(logical_block); + auto physical_tile = pick_best_physical_type(logical_block); int physical_pin = get_physical_pin(physical_tile, logical_block, logical_pin); @@ -2151,30 +2151,15 @@ bool is_tile_compatible(t_physical_tile_type_ptr physical_tile, t_logical_block_ return std::find(equivalent_tiles.begin(), equivalent_tiles.end(), physical_tile) != equivalent_tiles.end(); } -t_physical_tile_type_ptr pick_random_physical_type(t_logical_block_type_ptr logical_block) { - auto equivalent_tiles = logical_block->equivalent_tiles; - - size_t num_equivalent_tiles = equivalent_tiles.size(); - int index = 0; - - if (num_equivalent_tiles > 1) { - index = vtr::irand((int)equivalent_tiles.size() - 1); - } - - return equivalent_tiles[index]; +/** + * This function returns the most common physical tile type given a logical block + */ +t_physical_tile_type_ptr pick_best_physical_type(t_logical_block_type_ptr logical_block) { + return logical_block->equivalent_tiles[0]; } -t_logical_block_type_ptr pick_random_logical_type(t_physical_tile_type_ptr physical_tile) { - auto equivalent_sites = physical_tile->equivalent_sites; - - size_t num_equivalent_sites = equivalent_sites.size(); - int index = 0; - - if (num_equivalent_sites > 1) { - index = vtr::irand((int)equivalent_sites.size() - 1); - } - - return equivalent_sites[index]; +t_logical_block_type_ptr pick_best_logical_type(t_physical_tile_type_ptr physical_tile) { + return physical_tile->equivalent_sites[0]; } int get_logical_pin(t_physical_tile_type_ptr physical_tile, diff --git a/vpr/src/util/vpr_utils.h b/vpr/src/util/vpr_utils.h index f0b6dc07c33..a2f8ce1ae4f 100644 --- a/vpr/src/util/vpr_utils.h +++ b/vpr/src/util/vpr_utils.h @@ -170,8 +170,8 @@ void place_sync_external_block_connections(ClusterBlockId iblk); int get_max_num_pins(t_logical_block_type_ptr logical_block); bool is_tile_compatible(t_physical_tile_type_ptr physical_tile, t_logical_block_type_ptr logical_block); -t_physical_tile_type_ptr pick_random_physical_type(t_logical_block_type_ptr logical_block); -t_logical_block_type_ptr pick_random_logical_type(t_physical_tile_type_ptr physical_tile); +t_physical_tile_type_ptr pick_best_physical_type(t_logical_block_type_ptr logical_block); +t_logical_block_type_ptr pick_best_logical_type(t_physical_tile_type_ptr physical_tile); int get_logical_pin(t_physical_tile_type_ptr physical_tile, t_logical_block_type_ptr logical_block, From 0bf4db656551f777905add207aabc4b8856477f4 Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Thu, 21 Nov 2019 13:07:25 +0100 Subject: [PATCH 54/58] equivalent: use logical_block_types in compressed grid locations equivalent: add compatibility check in is_legal_swap Signed-off-by: Alessandro Comodi --- vpr/src/place/compressed_grid.cpp | 12 ++-- vpr/src/place/move_utils.cpp | 76 +++++++++--------------- vpr/src/place/move_utils.h | 2 +- vpr/src/place/place.cpp | 14 ++--- vpr/src/place/uniform_move_generator.cpp | 4 +- 5 files changed, 45 insertions(+), 63 deletions(-) diff --git a/vpr/src/place/compressed_grid.cpp b/vpr/src/place/compressed_grid.cpp index b0f5ba39d13..9f3a6219eb4 100644 --- a/vpr/src/place/compressed_grid.cpp +++ b/vpr/src/place/compressed_grid.cpp @@ -6,19 +6,21 @@ std::vector create_compressed_block_grids() { auto& grid = device_ctx.grid; //Collect the set of x/y locations for each instace of a block type - std::vector>> block_locations(device_ctx.physical_tile_types.size()); + std::vector>> block_locations(device_ctx.logical_block_types.size()); for (size_t x = 0; x < grid.width(); ++x) { for (size_t y = 0; y < grid.height(); ++y) { const t_grid_tile& tile = grid[x][y]; if (tile.width_offset == 0 && tile.height_offset == 0) { - //Only record at block root location - block_locations[tile.type->index].emplace_back(x, y); + for (auto& block : tile.type->equivalent_sites) { + //Only record at block root location + block_locations[block->index].emplace_back(x, y); + } } } } - std::vector compressed_type_grids(device_ctx.physical_tile_types.size()); - for (const auto& type : device_ctx.physical_tile_types) { + std::vector compressed_type_grids(device_ctx.logical_block_types.size()); + for (const auto& type : device_ctx.logical_block_types) { compressed_type_grids[type.index] = create_compressed_block_grid(block_locations[type.index]); } diff --git a/vpr/src/place/move_utils.cpp b/vpr/src/place/move_utils.cpp index 9528b47aa82..c10155f5bfe 100644 --- a/vpr/src/place/move_utils.cpp +++ b/vpr/src/place/move_utils.cpp @@ -417,11 +417,12 @@ bool is_legal_swap_to_location(ClusterBlockId blk, t_pl_loc to) { //(neccessarily) translationally invariant for an arbitrary macro auto& device_ctx = g_vpr_ctx.device(); + auto& cluster_ctx = g_vpr_ctx.clustering(); if (to.x < 0 || to.x >= int(device_ctx.grid.width()) || to.y < 0 || to.y >= int(device_ctx.grid.height()) || to.z < 0 || to.z >= device_ctx.grid[to.x][to.y].type->capacity - || (device_ctx.grid[to.x][to.y].type != physical_tile_type(blk))) { + || !is_tile_compatible(device_ctx.grid[to.x][to.y].type, cluster_ctx.clb_nlist.block_type(blk))) { return false; } return true; @@ -482,7 +483,7 @@ ClusterBlockId pick_from_block() { return ClusterBlockId::INVALID(); } -bool find_to_loc_uniform(t_physical_tile_type_ptr type, +bool find_to_loc_uniform(t_logical_block_type_ptr type, float rlim, const t_pl_loc from, t_pl_loc& to) { @@ -495,29 +496,25 @@ bool find_to_loc_uniform(t_physical_tile_type_ptr type, // //This ensures that such blocks don't get locked down too early during placement (as would be the //case with a physical distance rlim) - auto& grid = g_vpr_ctx.device().grid; - - auto from_type = grid[from.x][from.y].type; //Retrieve the compressed block grid for this block type - const auto& to_compressed_block_grid = g_vpr_ctx.placement().compressed_block_grids[type->index]; - const auto& from_compressed_block_grid = g_vpr_ctx.placement().compressed_block_grids[from_type->index]; + const auto& compressed_block_grid = g_vpr_ctx.placement().compressed_block_grids[type->index]; //Determine the rlim in each dimension - int rlim_x = std::min(to_compressed_block_grid.compressed_to_grid_x.size(), rlim); - int rlim_y = std::min(to_compressed_block_grid.compressed_to_grid_y.size(), rlim); /* for aspect_ratio != 1 case. */ + int rlim_x = std::min(compressed_block_grid.compressed_to_grid_x.size(), rlim); + int rlim_y = std::min(compressed_block_grid.compressed_to_grid_y.size(), rlim); /* for aspect_ratio != 1 case. */ //Determine the coordinates in the compressed grid space of the current block - int cx_from = grid_to_compressed(from_compressed_block_grid.compressed_to_grid_x, from.x); - int cy_from = grid_to_compressed(from_compressed_block_grid.compressed_to_grid_y, from.y); + int cx_from = grid_to_compressed(compressed_block_grid.compressed_to_grid_x, from.x); + int cy_from = grid_to_compressed(compressed_block_grid.compressed_to_grid_y, from.y); //Determine the valid compressed grid location ranges int min_cx = std::max(0, cx_from - rlim_x); - int max_cx = std::min(to_compressed_block_grid.compressed_to_grid_x.size() - 1, cx_from + rlim_x); + int max_cx = std::min(compressed_block_grid.compressed_to_grid_x.size() - 1, cx_from + rlim_x); int delta_cx = max_cx - min_cx; int min_cy = std::max(0, cy_from - rlim_y); - int max_cy = std::min(to_compressed_block_grid.compressed_to_grid_y.size() - 1, cy_from + rlim_y); + int max_cy = std::min(compressed_block_grid.compressed_to_grid_y.size() - 1, cy_from + rlim_y); int cx_to = OPEN; int cy_to = OPEN; @@ -544,19 +541,19 @@ bool find_to_loc_uniform(t_physical_tile_type_ptr type, // //The candidates are stored in a flat_map so we can efficiently find the set of valid //candidates with upper/lower bound. - auto y_lower_iter = to_compressed_block_grid.grid[cx_to].lower_bound(min_cy); - if (y_lower_iter == to_compressed_block_grid.grid[cx_to].end()) { + auto y_lower_iter = compressed_block_grid.grid[cx_to].lower_bound(min_cy); + if (y_lower_iter == compressed_block_grid.grid[cx_to].end()) { continue; } - auto y_upper_iter = to_compressed_block_grid.grid[cx_to].upper_bound(max_cy); + auto y_upper_iter = compressed_block_grid.grid[cx_to].upper_bound(max_cy); if (y_lower_iter->first > min_cy) { //No valid blocks at this x location which are within rlim_y // //Fall back to allow the whole y range - y_lower_iter = to_compressed_block_grid.grid[cx_to].begin(); - y_upper_iter = to_compressed_block_grid.grid[cx_to].end(); + y_lower_iter = compressed_block_grid.grid[cx_to].begin(); + y_upper_iter = compressed_block_grid.grid[cx_to].end(); min_cy = y_lower_iter->first; max_cy = (y_upper_iter - 1)->first; @@ -588,33 +585,7 @@ bool find_to_loc_uniform(t_physical_tile_type_ptr type, if (cx_from == cx_to && cy_from == cy_to) { continue; //Same from/to location -- try again for new y-position } else { - VTR_ASSERT(cx_to != OPEN); - VTR_ASSERT(cy_to != OPEN); - - //Convert to true (uncompressed) grid locations - to.x = to_compressed_block_grid.compressed_to_grid_x[cx_to]; - to.y = to_compressed_block_grid.compressed_to_grid_y[cy_to]; - - auto& place_ctx = g_vpr_ctx.placement(); - auto& cluster_ctx = g_vpr_ctx.clustering(); - - auto blocks = place_ctx.grid_blocks[to.x][to.y].blocks; - bool impossible_swap = false; - for (auto blk : blocks) { - if (blk == ClusterBlockId::INVALID()) { - continue; - } - - auto block_type = cluster_ctx.clb_nlist.block_type(blk); - if (!is_tile_compatible(from_type, block_type)) { - impossible_swap = true; - break; - } - } - - if (!impossible_swap) { - legal = true; - } + legal = true; } } } @@ -624,11 +595,22 @@ bool find_to_loc_uniform(t_physical_tile_type_ptr type, return false; } + VTR_ASSERT(cx_to != OPEN); + VTR_ASSERT(cy_to != OPEN); + + //Convert to true (uncompressed) grid locations + to.x = compressed_block_grid.compressed_to_grid_x[cx_to]; + to.y = compressed_block_grid.compressed_to_grid_y[cy_to]; + + auto& grid = g_vpr_ctx.device().grid; + + auto to_type = grid[to.x][to.y].type; + //Each x/y location contains only a single type, so we can pick a random //z (capcity) location - to.z = vtr::irand(type->capacity - 1); + to.z = vtr::irand(to_type->capacity - 1); - VTR_ASSERT_MSG(grid[to.x][to.y].type == type, "Type must match"); + VTR_ASSERT_MSG(is_tile_compatible(to_type, type), "Type must be compatible"); VTR_ASSERT_MSG(grid[to.x][to.y].width_offset == 0, "Should be at block base location"); VTR_ASSERT_MSG(grid[to.x][to.y].height_offset == 0, "Should be at block base location"); diff --git a/vpr/src/place/move_utils.h b/vpr/src/place/move_utils.h index ddf7e17c891..7f6b438509b 100644 --- a/vpr/src/place/move_utils.h +++ b/vpr/src/place/move_utils.h @@ -46,7 +46,7 @@ std::set determine_locations_emptied_by_move(t_pl_blocks_to_be_moved& ClusterBlockId pick_from_block(); -bool find_to_loc_uniform(t_physical_tile_type_ptr type, +bool find_to_loc_uniform(t_logical_block_type_ptr type, float rlim, const t_pl_loc from, t_pl_loc& to); diff --git a/vpr/src/place/place.cpp b/vpr/src/place/place.cpp index 3f1451be77c..de81a28b99d 100644 --- a/vpr/src/place/place.cpp +++ b/vpr/src/place/place.cpp @@ -940,8 +940,8 @@ static void placement_inner_loop(float t, /* Lines below prevent too much round-off error from accumulating * in the cost over many iterations (due to incremental updates). - * This round-off can lead to error checks failing because the cost - * is different from what you get when you recompute from scratch. + * This round-off can lead to error checks failing because the cost + * is different from what you get when you recompute from scratch. */ ++(*moves_since_cost_recompute); if (*moves_since_cost_recompute > MAX_MOVES_BEFORE_RECOMPUTE) { @@ -1219,15 +1219,15 @@ static e_move_result try_swap(float t, VTR_ASSERT(create_move_outcome == e_create_move::VALID); /* - * To make evaluating the move simpler (e.g. calculating changed bounding box), - * we first move the blocks to thier new locations (apply the move to - * place_ctx.block_locs) and then computed the change in cost. If the move is + * To make evaluating the move simpler (e.g. calculating changed bounding box), + * we first move the blocks to thier new locations (apply the move to + * place_ctx.block_locs) and then computed the change in cost. If the move is * accepted, the inverse look-up in place_ctx.grid_blocks is updated (committing - * the move). If the move is rejected the blocks are returned to their original + * the move). If the move is rejected the blocks are returned to their original * positions (reverting place_ctx.block_locs to its original state). * * Note that the inverse look-up place_ctx.grid_blocks is only updated - * after move acceptance is determined, and so should not be used when + * after move acceptance is determined, and so should not be used when * evaluating a move. */ diff --git a/vpr/src/place/uniform_move_generator.cpp b/vpr/src/place/uniform_move_generator.cpp index 7d4bf1c439b..6d39f3f3eaf 100644 --- a/vpr/src/place/uniform_move_generator.cpp +++ b/vpr/src/place/uniform_move_generator.cpp @@ -18,9 +18,7 @@ e_create_move UniformMoveGenerator::propose_move(t_pl_blocks_to_be_moved& blocks t_pl_loc to; - auto type = pick_best_physical_type(cluster_from_type); - - if (!find_to_loc_uniform(type, rlim, from, to)) { + if (!find_to_loc_uniform(cluster_from_type, rlim, from, to)) { return e_create_move::ABORT; } From 8d69193637ed2451d678b54b592e01ee64cd2d9c Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Thu, 21 Nov 2019 15:33:38 +0100 Subject: [PATCH 55/58] equivalent: check mutual phy/log type compatibility when swapping Signed-off-by: Alessandro Comodi --- vpr/src/place/move_utils.cpp | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/vpr/src/place/move_utils.cpp b/vpr/src/place/move_utils.cpp index c10155f5bfe..ef00c65cbbe 100644 --- a/vpr/src/place/move_utils.cpp +++ b/vpr/src/place/move_utils.cpp @@ -113,6 +113,8 @@ e_block_move_result record_single_block_swap(t_pl_blocks_to_be_moved& blocks_aff ClusterBlockId b_to = place_ctx.grid_blocks[to.x][to.y].blocks[to.z]; + t_pl_loc curr_from = place_ctx.block_locs[b_from].loc; + e_block_move_result outcome = e_block_move_result::VALID; // Check whether the to_location is empty @@ -121,6 +123,13 @@ e_block_move_result record_single_block_swap(t_pl_blocks_to_be_moved& blocks_aff outcome = record_block_move(blocks_affected, b_from, to); } else if (b_to != INVALID_BLOCK_ID) { + // Check whether block to is compatible with from location + if (b_to != EMPTY_BLOCK_ID && b_to != INVALID_BLOCK_ID) { + if (!(is_legal_swap_to_location(b_to, curr_from))) { + return e_block_move_result::ABORT; + } + } + // Sets up the blocks moved outcome = record_block_move(blocks_affected, b_from, to); @@ -253,10 +262,18 @@ e_block_move_result record_macro_macro_swaps(t_pl_blocks_to_be_moved& blocks_aff ClusterBlockId b_from = place_ctx.pl_macros[imacro_from].members[imember_from].blk_index; t_pl_loc curr_to = place_ctx.block_locs[b_from].loc + swap_offset; + t_pl_loc curr_from = place_ctx.block_locs[b_from].loc; ClusterBlockId b_to = place_ctx.pl_macros[imacro_to].members[imember_to].blk_index; VTR_ASSERT_SAFE(curr_to == place_ctx.block_locs[b_to].loc); + // Check whether block to is compatible with from location + if (b_to != EMPTY_BLOCK_ID && b_to != INVALID_BLOCK_ID) { + if (!(is_legal_swap_to_location(b_to, curr_from))) { + return e_block_move_result::ABORT; + } + } + if (!is_legal_swap_to_location(b_from, curr_to)) { log_move_abort("macro_from swap to location illegal"); return e_block_move_result::ABORT; From 6f3d2ddbdc96d5231f844a73d8b237f11a2e3d96 Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Fri, 22 Nov 2019 13:00:44 +0100 Subject: [PATCH 56/58] equivalent: delete pin_physical_index from clustered netlist Signed-off-by: Alessandro Comodi --- vpr/src/base/check_netlist.cpp | 2 +- vpr/src/base/clustered_netlist.cpp | 44 +++++------------- vpr/src/base/clustered_netlist.h | 36 +++++---------- vpr/src/base/read_netlist.cpp | 8 ++-- vpr/src/base/read_route.cpp | 2 +- vpr/src/base/vpr_context.h | 3 ++ vpr/src/place/place.cpp | 21 +++++---- vpr/src/place/place_macro.cpp | 2 +- vpr/src/route/check_route.cpp | 45 ++++++++----------- vpr/src/route/route_common.cpp | 24 +++------- vpr/src/timing/PostClusterDelayCalculator.tpp | 2 +- vpr/src/util/vpr_utils.cpp | 43 ++++++++++++++---- vpr/src/util/vpr_utils.h | 3 ++ 13 files changed, 110 insertions(+), 125 deletions(-) diff --git a/vpr/src/base/check_netlist.cpp b/vpr/src/base/check_netlist.cpp index 5f0b0136776..ee77a8b8fff 100644 --- a/vpr/src/base/check_netlist.cpp +++ b/vpr/src/base/check_netlist.cpp @@ -94,7 +94,7 @@ static int check_connections_to_global_clb_pins(ClusterNetId net_id, int verbosi auto logical_type = cluster_ctx.clb_nlist.block_type(blk_id); auto physical_type = pick_best_physical_type(logical_type); - int log_index = cluster_ctx.clb_nlist.pin_physical_index(pin_id); + int log_index = cluster_ctx.clb_nlist.pin_logical_index(pin_id); int pin_index = get_physical_pin(physical_type, logical_type, log_index); if (physical_type->is_ignored_pin[pin_index] != net_is_ignored diff --git a/vpr/src/base/clustered_netlist.cpp b/vpr/src/base/clustered_netlist.cpp index fc37cc0d249..f0e031dd413 100644 --- a/vpr/src/base/clustered_netlist.cpp +++ b/vpr/src/base/clustered_netlist.cpp @@ -30,8 +30,8 @@ t_logical_block_type_ptr ClusteredNetlist::block_type(const ClusterBlockId id) c return block_types_[id]; } -ClusterNetId ClusteredNetlist::block_net(const ClusterBlockId blk_id, const int phys_pin_index) const { - auto pin_id = block_pin(blk_id, phys_pin_index); +ClusterNetId ClusteredNetlist::block_net(const ClusterBlockId blk_id, const int logical_pin_index) const { + auto pin_id = block_pin(blk_id, logical_pin_index); if (pin_id) { return pin_net(pin_id); @@ -50,11 +50,11 @@ int ClusteredNetlist::block_pin_net_index(const ClusterBlockId blk_id, const int return OPEN; } -ClusterPinId ClusteredNetlist::block_pin(const ClusterBlockId blk, const int phys_pin_index) const { +ClusterPinId ClusteredNetlist::block_pin(const ClusterBlockId blk, const int logical_pin_index) const { VTR_ASSERT_SAFE(valid_block_id(blk)); - VTR_ASSERT_SAFE_MSG(phys_pin_index >= 0 && phys_pin_index < static_cast(block_logical_pins_[blk].size()), "Physical pin index must be in range"); + VTR_ASSERT_SAFE_MSG(logical_pin_index >= 0 && logical_pin_index < static_cast(block_logical_pins_[blk].size()), "Logical pin index must be in range"); - return block_logical_pins_[blk][phys_pin_index]; + return block_logical_pins_[blk][logical_pin_index]; } bool ClusteredNetlist::block_contains_primary_input(const ClusterBlockId blk) const { @@ -75,23 +75,17 @@ bool ClusteredNetlist::block_contains_primary_output(const ClusterBlockId blk) c * Pins * */ -int ClusteredNetlist::pin_physical_index(const ClusterPinId id) const { - VTR_ASSERT_SAFE(valid_pin_id(id)); - - return pin_physical_index_[id]; -} - int ClusteredNetlist::pin_logical_index(const ClusterPinId pin_id) const { VTR_ASSERT_SAFE(valid_pin_id(pin_id)); return pin_logical_index_[pin_id]; } -int ClusteredNetlist::net_pin_physical_index(const ClusterNetId net_id, int net_pin_index) const { +int ClusteredNetlist::net_pin_logical_index(const ClusterNetId net_id, int net_pin_index) const { auto pin_id = net_pin(net_id, net_pin_index); if (pin_id) { - return pin_physical_index(pin_id); + return pin_logical_index(pin_id); } return OPEN; //No valid pin found @@ -141,20 +135,6 @@ ClusterBlockId ClusteredNetlist::create_block(const char* name, t_pb* pb, t_logi return blk_id; } -void ClusteredNetlist::set_pin_physical_index(const ClusterPinId pin, const int phys_pin_index) { - VTR_ASSERT_SAFE(valid_pin_id(pin)); - auto blk = pin_block(pin); - - int old_phys_pin_index = pin_physical_index(pin); - - //Invalidate old mapping - block_logical_pins_[blk][old_phys_pin_index] = ClusterPinId::INVALID(); - - //Update mappings - pin_physical_index_[pin] = phys_pin_index; - block_logical_pins_[blk][phys_pin_index] = pin; -} - ClusterPortId ClusteredNetlist::create_port(const ClusterBlockId blk_id, const std::string name, BitIndex width, PortType type) { ClusterPortId port_id = find_port(blk_id, name); if (!port_id) { @@ -175,7 +155,6 @@ ClusterPortId ClusteredNetlist::create_port(const ClusterBlockId blk_id, const s ClusterPinId ClusteredNetlist::create_pin(const ClusterPortId port_id, BitIndex port_bit, const ClusterNetId net_id, const PinType pin_type_, int pin_index, bool is_const) { ClusterPinId pin_id = Netlist::create_pin(port_id, port_bit, net_id, pin_type_, is_const); - pin_physical_index_.push_back(pin_index); pin_logical_index_.push_back(pin_index); ClusterBlockId block_id = port_block(port_id); @@ -249,7 +228,7 @@ void ClusteredNetlist::clean_ports_impl(const vtr::vector_map& pin_id_map) { //Update all the pin values - pin_physical_index_ = clean_and_reorder_values(pin_physical_index_, pin_id_map); + pin_logical_index_ = clean_and_reorder_values(pin_logical_index_, pin_id_map); } void ClusteredNetlist::clean_nets_impl(const vtr::vector_map& net_id_map) { @@ -263,8 +242,8 @@ void ClusteredNetlist::rebuild_block_refs_impl(const vtr::vector_map(get_max_num_pins(block_type(blk)), ClusterPinId::INVALID()); //Reset for (auto pin : block_pins(blk)) { - int phys_pin_index = pin_physical_index(pin); - block_logical_pins_[blk][phys_pin_index] = pin; + int logical_pin_index = pin_logical_index(pin); + block_logical_pins_[blk][logical_pin_index] = pin; } } } @@ -290,7 +269,6 @@ void ClusteredNetlist::shrink_to_fit_impl() { block_logical_pins_.shrink_to_fit(); //Pin data - pin_physical_index_.shrink_to_fit(); pin_logical_index_.shrink_to_fit(); //Net data @@ -318,7 +296,7 @@ bool ClusteredNetlist::validate_port_sizes_impl(size_t /*num_ports*/) const { } bool ClusteredNetlist::validate_pin_sizes_impl(size_t num_pins) const { - if (pin_physical_index_.size() != num_pins) { + if (pin_logical_index_.size() != num_pins) { return false; } return true; diff --git a/vpr/src/base/clustered_netlist.h b/vpr/src/base/clustered_netlist.h index 849c6174ab1..53039bfdb15 100644 --- a/vpr/src/base/clustered_netlist.h +++ b/vpr/src/base/clustered_netlist.h @@ -68,12 +68,12 @@ * Pins * ---- * The only piece of unique pin information is: - * physical_pin_index_ + * logical_pin_index_ * - * Example of physical_pin_index_ + * Example of logical_pin_index_ * --------------------- - * Given a ClusterPinId, physical_pin_index_ will return the index of the pin within its block - * relative to the t_logical_block_type (physical description of the block). + * Given a ClusterPinId, logical_pin_index_ will return the index of the pin within its block + * relative to the t_logical_block_type (logical description of the block). * * +-----------+ * 0-->|O X|-->3 @@ -83,7 +83,7 @@ * * The index skips over unused pins, e.g. CLB has 6 pins (3 in, 3 out, numbered [0...5]), where * the first two ins, and last two outs are used. Indices [0,1] represent the ins, and [4,5] - * represent the outs. Indices [2,3] are unused. Therefore, physical_pin_index_[92] = 5. + * represent the outs. Indices [2,3] are unused. Therefore, logical_pin_index_[92] = 5. * * Nets * ---- @@ -134,8 +134,8 @@ class ClusteredNetlist : public Netlist> block_logical_pins_; //The logical pin associated with each physical tile pin //Pins - vtr::vector_map pin_physical_index_; //The physical pin index (i.e. pin index - //in t_physical_tile_type) corresponding - //to the clustered pin - vtr::vector_map pin_logical_index_; //The logical pin index of this block (i.e. pin index - //in t_logical_block_type) corresponding - //to the clustered pin + vtr::vector_map pin_logical_index_; //The logical pin index of this block (i.e. pin index + //in t_logical_block_type) corresponding + //to the clustered pin //Nets vtr::vector_map net_is_ignored_; //Boolean mapping indicating if the net is ignored diff --git a/vpr/src/base/read_netlist.cpp b/vpr/src/base/read_netlist.cpp index b326eb4ab12..a44ad17e6b3 100644 --- a/vpr/src/base/read_netlist.cpp +++ b/vpr/src/base/read_netlist.cpp @@ -971,8 +971,8 @@ static void load_external_nets_and_cb(ClusteredNetlist& clb_nlist) { //Asserts the ClusterBlockId is the same when ClusterNetId & pin BitIndex is provided VTR_ASSERT(blk_id == clb_nlist.pin_block(*(clb_nlist.net_pins(clb_net_id).begin() + count[clb_net_id]))); //Asserts the block's pin index is the same - VTR_ASSERT(j == clb_nlist.pin_physical_index(*(clb_nlist.net_pins(clb_net_id).begin() + count[clb_net_id]))); - VTR_ASSERT(j == clb_nlist.net_pin_physical_index(clb_net_id, count[clb_net_id])); + VTR_ASSERT(j == clb_nlist.pin_logical_index(*(clb_nlist.net_pins(clb_net_id).begin() + count[clb_net_id]))); + VTR_ASSERT(j == clb_nlist.net_pin_logical_index(clb_net_id, count[clb_net_id])); // nets connecting to global pins are marked as global nets if (tile_type->is_pin_global[physical_pin]) { @@ -986,8 +986,8 @@ static void load_external_nets_and_cb(ClusteredNetlist& clb_nlist) { } else { VTR_ASSERT(DRIVER == tile_type->class_inf[tile_type->pin_class[physical_pin]].type); - VTR_ASSERT(j == clb_nlist.pin_physical_index(*(clb_nlist.net_pins(clb_net_id).begin()))); - VTR_ASSERT(j == clb_nlist.net_pin_physical_index(clb_net_id, 0)); + VTR_ASSERT(j == clb_nlist.pin_logical_index(*(clb_nlist.net_pins(clb_net_id).begin()))); + VTR_ASSERT(j == clb_nlist.net_pin_logical_index(clb_net_id, 0)); } } } diff --git a/vpr/src/base/read_route.cpp b/vpr/src/base/read_route.cpp index 6bdfeb2b1b3..b030a751f5a 100644 --- a/vpr/src/base/read_route.cpp +++ b/vpr/src/base/read_route.cpp @@ -378,7 +378,7 @@ static void process_global_blocks(std::ifstream& fp, ClusterNetId inet, const ch x, y, place_ctx.block_locs[bnum].loc.x, place_ctx.block_locs[bnum].loc.y); } - int pin_index = cluster_ctx.clb_nlist.net_pin_physical_index(inet, pin_counter); + int pin_index = net_pin_tile_index(inet, pin_counter); if (physical_tile_type(bnum)->pin_class[pin_index] != atoi(tokens[7].c_str())) { vpr_throw(VPR_ERROR_ROUTE, filename, lineno, "The pin class %d of %lu net does not match given ", diff --git a/vpr/src/base/vpr_context.h b/vpr/src/base/vpr_context.h index dc1c07650f9..b6a1f9859da 100644 --- a/vpr/src/base/vpr_context.h +++ b/vpr/src/base/vpr_context.h @@ -254,6 +254,9 @@ struct PlacementContext : public Context { //Clustered block placement locations vtr::vector_map block_locs; + //Clustered pin placement mapping with physical pin + vtr::vector_map physical_pins; + //Clustered block associated with each grid location (i.e. inverse of block_locs) vtr::Matrix grid_blocks; //[0..device_ctx.grid.width()-1][0..device_ctx.grid.width()-1] diff --git a/vpr/src/place/place.cpp b/vpr/src/place/place.cpp index de81a28b99d..bb1ef486927 100644 --- a/vpr/src/place/place.cpp +++ b/vpr/src/place/place.cpp @@ -487,6 +487,11 @@ void try_place(const t_placer_opts& placer_opts, initial_placement(placer_opts.pad_loc_type, placer_opts.pad_loc_file.c_str()); + // Update physical pin values + for (auto block_id : cluster_ctx.clb_nlist.blocks()) { + place_sync_external_block_connections(block_id); + } + init_draw_coords((float)width_fac); //Enables fast look-up of atom pins connect to CLB pins ClusteredPinAtomPinsLookup netlist_pin_lookup(cluster_ctx.clb_nlist, pb_gpin_lookup); @@ -1385,7 +1390,7 @@ static void update_net_bb(const ClusterNetId net, } } else { //For large nets, update bounding box incrementally - int iblk_pin = cluster_ctx.clb_nlist.pin_physical_index(blk_pin); + int iblk_pin = pin_tile_index(blk_pin); t_physical_tile_type_ptr blk_type = physical_tile_type(blk); int pin_width_offset = blk_type->pin_width_offset[iblk_pin]; @@ -1491,8 +1496,8 @@ static float comp_td_point_to_point_delay(const PlaceDelayModel* delay_model, Cl ClusterBlockId source_block = cluster_ctx.clb_nlist.pin_block(source_pin); ClusterBlockId sink_block = cluster_ctx.clb_nlist.pin_block(sink_pin); - int source_block_ipin = cluster_ctx.clb_nlist.pin_physical_index(source_pin); - int sink_block_ipin = cluster_ctx.clb_nlist.pin_physical_index(sink_pin); + int source_block_ipin = cluster_ctx.clb_nlist.pin_logical_index(source_pin); + int sink_block_ipin = cluster_ctx.clb_nlist.pin_logical_index(sink_pin); int source_x = place_ctx.block_locs[source_block].loc.x; int source_y = place_ctx.block_locs[source_block].loc.y; @@ -1799,7 +1804,7 @@ static void alloc_and_load_net_pin_indices() { continue; netpin = 0; for (auto pin_id : cluster_ctx.clb_nlist.net_pins(net_id)) { - int pin_index = cluster_ctx.clb_nlist.pin_physical_index(pin_id); + int pin_index = cluster_ctx.clb_nlist.pin_logical_index(pin_id); ClusterBlockId block_id = cluster_ctx.clb_nlist.pin_block(pin_id); net_pin_indices[block_id][pin_index] = netpin; netpin++; @@ -1836,7 +1841,7 @@ static void get_bb_from_scratch(ClusterNetId net_id, t_bb* coords, t_bb* num_on_ auto& grid = device_ctx.grid; ClusterBlockId bnum = cluster_ctx.clb_nlist.net_driver_block(net_id); - pnum = cluster_ctx.clb_nlist.net_pin_physical_index(net_id, 0); + pnum = net_pin_tile_index(net_id, 0); VTR_ASSERT(pnum >= 0); x = place_ctx.block_locs[bnum].loc.x + physical_tile_type(bnum)->pin_width_offset[pnum]; y = place_ctx.block_locs[bnum].loc.y + physical_tile_type(bnum)->pin_height_offset[pnum]; @@ -1855,7 +1860,7 @@ static void get_bb_from_scratch(ClusterNetId net_id, t_bb* coords, t_bb* num_on_ for (auto pin_id : cluster_ctx.clb_nlist.net_sinks(net_id)) { bnum = cluster_ctx.clb_nlist.pin_block(pin_id); - pnum = cluster_ctx.clb_nlist.pin_physical_index(pin_id); + pnum = pin_tile_index(pin_id); x = place_ctx.block_locs[bnum].loc.x + physical_tile_type(bnum)->pin_width_offset[pnum]; y = place_ctx.block_locs[bnum].loc.y + physical_tile_type(bnum)->pin_height_offset[pnum]; @@ -1995,7 +2000,7 @@ static void get_non_updateable_bb(ClusterNetId net_id, t_bb* bb_coord_new) { auto& device_ctx = g_vpr_ctx.device(); ClusterBlockId bnum = cluster_ctx.clb_nlist.net_driver_block(net_id); - pnum = cluster_ctx.clb_nlist.net_pin_physical_index(net_id, 0); + pnum = net_pin_tile_index(net_id, 0); x = place_ctx.block_locs[bnum].loc.x + physical_tile_type(bnum)->pin_width_offset[pnum]; y = place_ctx.block_locs[bnum].loc.y + physical_tile_type(bnum)->pin_height_offset[pnum]; @@ -2006,7 +2011,7 @@ static void get_non_updateable_bb(ClusterNetId net_id, t_bb* bb_coord_new) { for (auto pin_id : cluster_ctx.clb_nlist.net_sinks(net_id)) { bnum = cluster_ctx.clb_nlist.pin_block(pin_id); - pnum = cluster_ctx.clb_nlist.pin_physical_index(pin_id); + pnum = pin_tile_index(pin_id); x = place_ctx.block_locs[bnum].loc.x + physical_tile_type(bnum)->pin_width_offset[pnum]; y = place_ctx.block_locs[bnum].loc.y + physical_tile_type(bnum)->pin_height_offset[pnum]; diff --git a/vpr/src/place/place_macro.cpp b/vpr/src/place/place_macro.cpp index 527c2c9773a..5411e3223f8 100644 --- a/vpr/src/place/place_macro.cpp +++ b/vpr/src/place/place_macro.cpp @@ -495,7 +495,7 @@ static bool net_is_driven_by_direct(ClusterNetId clb_net) { auto& cluster_ctx = g_vpr_ctx.clustering(); ClusterBlockId block_id = cluster_ctx.clb_nlist.net_driver_block(clb_net); - int pin_index = cluster_ctx.clb_nlist.net_pin_physical_index(clb_net, 0); + int pin_index = cluster_ctx.clb_nlist.net_pin_logical_index(clb_net, 0); auto direct = f_idirect_from_blk_pin[cluster_ctx.clb_nlist.block_type(block_id)->index][pin_index]; diff --git a/vpr/src/route/check_route.cpp b/vpr/src/route/check_route.cpp index 67a6782d83f..6a848b3ca35 100644 --- a/vpr/src/route/check_route.cpp +++ b/vpr/src/route/check_route.cpp @@ -173,29 +173,25 @@ void check_route(enum e_route_type route_type) { /* Checks that this SINK node is one of the terminals of inet, and marks * * the appropriate pin as being reached. */ static void check_sink(int inode, ClusterNetId net_id, bool* pin_done) { - int i, j, ifound, ptc_num, iclass, iblk, pin_index; - ClusterBlockId bnum; - unsigned int ipin; - t_physical_tile_type_ptr type; auto& device_ctx = g_vpr_ctx.device(); auto& cluster_ctx = g_vpr_ctx.clustering(); auto& place_ctx = g_vpr_ctx.placement(); VTR_ASSERT(device_ctx.rr_nodes[inode].type() == SINK); - i = device_ctx.rr_nodes[inode].xlow(); - j = device_ctx.rr_nodes[inode].ylow(); - type = device_ctx.grid[i][j].type; + int i = device_ctx.rr_nodes[inode].xlow(); + int j = device_ctx.rr_nodes[inode].ylow(); + auto type = device_ctx.grid[i][j].type; /* For sinks, ptc_num is the class */ - ptc_num = device_ctx.rr_nodes[inode].ptc_num(); - ifound = 0; + int ptc_num = device_ctx.rr_nodes[inode].ptc_num(); + int ifound = 0; - for (iblk = 0; iblk < type->capacity; iblk++) { - bnum = place_ctx.grid_blocks[i][j].blocks[iblk]; /* Hardcoded to one cluster_ctx block*/ - ipin = 1; + for (int iblk = 0; iblk < type->capacity; iblk++) { + ClusterBlockId bnum = place_ctx.grid_blocks[i][j].blocks[iblk]; /* Hardcoded to one cluster_ctx block*/ + unsigned int ipin = 1; for (auto pin_id : cluster_ctx.clb_nlist.net_sinks(net_id)) { if (cluster_ctx.clb_nlist.pin_block(pin_id) == bnum) { - pin_index = cluster_ctx.clb_nlist.pin_physical_index(pin_id); - iclass = type->pin_class[pin_index]; + int pin_index = pin_tile_index(pin_id); + int iclass = type->pin_class[pin_index]; if (iclass == ptc_num) { /* Could connect to same pin class on the same clb more than once. Only * * update pin_done for a pin that hasn't been reached yet. */ @@ -225,27 +221,23 @@ static void check_sink(int inode, ClusterNetId net_id, bool* pin_done) { /* Checks that the node passed in is a valid source for this net. */ static void check_source(int inode, ClusterNetId net_id) { - t_rr_type rr_type; - t_physical_tile_type_ptr type; - ClusterBlockId blk_id; - int i, j, ptc_num, node_block_pin, iclass; auto& device_ctx = g_vpr_ctx.device(); auto& cluster_ctx = g_vpr_ctx.clustering(); auto& place_ctx = g_vpr_ctx.placement(); - rr_type = device_ctx.rr_nodes[inode].type(); + t_rr_type rr_type = device_ctx.rr_nodes[inode].type(); if (rr_type != SOURCE) { VPR_FATAL_ERROR(VPR_ERROR_ROUTE, "in check_source: net %d begins with a node of type %d.\n", size_t(net_id), rr_type); } - i = device_ctx.rr_nodes[inode].xlow(); - j = device_ctx.rr_nodes[inode].ylow(); + int i = device_ctx.rr_nodes[inode].xlow(); + int j = device_ctx.rr_nodes[inode].ylow(); /* for sinks and sources, ptc_num is class */ - ptc_num = device_ctx.rr_nodes[inode].ptc_num(); + int ptc_num = device_ctx.rr_nodes[inode].ptc_num(); /* First node_block for net is the source */ - blk_id = cluster_ctx.clb_nlist.net_driver_block(net_id); - type = device_ctx.grid[i][j].type; + ClusterBlockId blk_id = cluster_ctx.clb_nlist.net_driver_block(net_id); + auto type = device_ctx.grid[i][j].type; if (place_ctx.block_locs[blk_id].loc.x != i || place_ctx.block_locs[blk_id].loc.y != j) { VPR_FATAL_ERROR(VPR_ERROR_ROUTE, @@ -253,8 +245,9 @@ static void check_source(int inode, ClusterNetId net_id) { } //Get the driver pin's index in the block - node_block_pin = cluster_ctx.clb_nlist.net_pin_physical_index(net_id, 0); - iclass = type->pin_class[node_block_pin]; + auto physical_pin = net_pin_tile_index(net_id, 0); + + int iclass = type->pin_class[physical_pin]; if (ptc_num != iclass) { VPR_FATAL_ERROR(VPR_ERROR_ROUTE, diff --git a/vpr/src/route/route_common.cpp b/vpr/src/route/route_common.cpp index a8a72f12977..8132a8f15fc 100644 --- a/vpr/src/route/route_common.cpp +++ b/vpr/src/route/route_common.cpp @@ -1034,8 +1034,6 @@ void reset_rr_node_route_structs() { static vtr::vector> load_net_rr_terminals(const t_rr_node_indices& L_rr_node_indices) { vtr::vector> net_rr_terminals; - int inode, i, j, node_block_pin, iclass; - auto& cluster_ctx = g_vpr_ctx.clustering(); auto& place_ctx = g_vpr_ctx.placement(); @@ -1049,27 +1047,19 @@ static vtr::vector> load_net_rr_terminals(const t int pin_count = 0; for (auto pin_id : cluster_ctx.clb_nlist.net_pins(net_id)) { auto block_id = cluster_ctx.clb_nlist.pin_block(pin_id); - i = place_ctx.block_locs[block_id].loc.x; - j = place_ctx.block_locs[block_id].loc.y; + int i = place_ctx.block_locs[block_id].loc.x; + int j = place_ctx.block_locs[block_id].loc.y; auto type = physical_tile_type(block_id); - auto logical_block = cluster_ctx.clb_nlist.block_type(block_id); /* In the routing graph, each (x, y) location has unique pins on it * so when there is capacity, blocks are packed and their pin numbers * are offset to get their actual rr_node */ - node_block_pin = cluster_ctx.clb_nlist.pin_logical_index(pin_id); - - int orig_phys_pin = get_physical_pin(type, logical_block, node_block_pin); - - VTR_ASSERT(type->num_pins % type->capacity == 0); - int max_num_block_pins = type->num_pins / type->capacity; - - int phys_pin = orig_phys_pin + place_ctx.block_locs[block_id].loc.z * max_num_block_pins; + int phys_pin = pin_tile_index(pin_id); - iclass = type->pin_class[phys_pin]; + int iclass = type->pin_class[phys_pin]; - inode = get_rr_node_index(L_rr_node_indices, i, j, (pin_count == 0 ? SOURCE : SINK), /* First pin is driver */ - iclass); + int inode = get_rr_node_index(L_rr_node_indices, i, j, (pin_count == 0 ? SOURCE : SINK), /* First pin is driver */ + iclass); net_rr_terminals[net_id][pin_count] = inode; pin_count++; } @@ -1548,7 +1538,7 @@ void print_route(FILE* fp, const vtr::vector& traceba for (auto pin_id : cluster_ctx.clb_nlist.net_pins(net_id)) { ClusterBlockId block_id = cluster_ctx.clb_nlist.pin_block(pin_id); - int pin_index = cluster_ctx.clb_nlist.pin_physical_index(pin_id); + int pin_index = pin_tile_index(pin_id); int iclass = physical_tile_type(block_id)->pin_class[pin_index]; fprintf(fp, "Block %s (#%zu) at (%d,%d), Pin class %d.\n", diff --git a/vpr/src/timing/PostClusterDelayCalculator.tpp b/vpr/src/timing/PostClusterDelayCalculator.tpp index 6d3325af8ef..f1daacd1dfb 100644 --- a/vpr/src/timing/PostClusterDelayCalculator.tpp +++ b/vpr/src/timing/PostClusterDelayCalculator.tpp @@ -263,7 +263,7 @@ inline tatum::Time PostClusterDelayCalculator::atom_net_delay(const tatum::Timin ClusterBlockId driver_block_id = cluster_ctx.clb_nlist.net_driver_block(net_id); VTR_ASSERT(driver_block_id == clb_src_block); - src_block_pin_index = cluster_ctx.clb_nlist.net_pin_physical_index(net_id, 0); + src_block_pin_index = cluster_ctx.clb_nlist.net_pin_logical_index(net_id, 0); tatum::Time driver_clb_delay = tatum::Time(clb_delay_calc_.internal_src_to_clb_output_delay(driver_block_id, src_block_pin_index, diff --git a/vpr/src/util/vpr_utils.cpp b/vpr/src/util/vpr_utils.cpp index 09e39cebf76..07fa3623a3e 100644 --- a/vpr/src/util/vpr_utils.cpp +++ b/vpr/src/util/vpr_utils.cpp @@ -2105,24 +2105,34 @@ void print_switch_usage() { */ void place_sync_external_block_connections(ClusterBlockId iblk) { - auto& cluster_ctx = g_vpr_ctx.mutable_clustering(); + auto& cluster_ctx = g_vpr_ctx.clustering(); + auto& clb_nlist = cluster_ctx.clb_nlist; auto& place_ctx = g_vpr_ctx.mutable_placement(); VTR_ASSERT_MSG(place_ctx.block_locs[iblk].nets_and_pins_synced_to_z_coordinate == false, "Block net and pins must not be already synced"); - auto type = physical_tile_type(iblk); - VTR_ASSERT(type->num_pins % type->capacity == 0); - int max_num_block_pins = type->num_pins / type->capacity; + auto physical_tile = physical_tile_type(iblk); + auto logical_block = clb_nlist.block_type(iblk); + + VTR_ASSERT(physical_tile->num_pins % physical_tile->capacity == 0); + int max_num_block_pins = physical_tile->num_pins / physical_tile->capacity; /* Logical location and physical location is offset by z * max_num_block_pins */ - auto& clb_nlist = cluster_ctx.clb_nlist; for (auto pin : clb_nlist.block_pins(iblk)) { - int orig_phys_pin_index = clb_nlist.pin_physical_index(pin); - int new_phys_pin_index = orig_phys_pin_index + place_ctx.block_locs[iblk].loc.z * max_num_block_pins; - clb_nlist.set_pin_physical_index(pin, new_phys_pin_index); + int logical_pin_index = clb_nlist.pin_logical_index(pin); + int physical_pin_index = get_physical_pin(physical_tile, logical_block, logical_pin_index); + + int new_physical_pin_index = physical_pin_index + place_ctx.block_locs[iblk].loc.z * max_num_block_pins; + + auto result = place_ctx.physical_pins.find(pin); + if (result != place_ctx.physical_pins.end()) { + place_ctx.physical_pins[pin] = new_physical_pin_index; + } else { + place_ctx.physical_pins.insert(pin, new_physical_pin_index); + } } //Mark the block as synced - place_ctx.block_locs[iblk].nets_and_pins_synced_to_z_coordinate = true; + //place_ctx.block_locs[iblk].nets_and_pins_synced_to_z_coordinate = true; } int get_max_num_pins(t_logical_block_type_ptr logical_block) { @@ -2200,6 +2210,21 @@ int get_physical_pin(t_physical_tile_type_ptr physical_tile, return result->second.pin; } +int net_pin_tile_index(const ClusterNetId net_id, int net_pin_index) { + auto& cluster_ctx = g_vpr_ctx.clustering(); + + // Get the logical pin index of pin within it's logical block type + auto pin_id = cluster_ctx.clb_nlist.net_pin(net_id, net_pin_index); + + return pin_tile_index(pin_id); +} + +int pin_tile_index(const ClusterPinId pin) { + auto& place_ctx = g_vpr_ctx.placement(); + + return place_ctx.physical_pins[pin]; +} + void pretty_print_uint(const char* prefix, size_t value, int num_digits, int scientific_precision) { //Print as integer if it will fit in the width, other wise scientific if (value <= std::pow(10, num_digits) - 1) { diff --git a/vpr/src/util/vpr_utils.h b/vpr/src/util/vpr_utils.h index a2f8ce1ae4f..1bb3604e418 100644 --- a/vpr/src/util/vpr_utils.h +++ b/vpr/src/util/vpr_utils.h @@ -180,6 +180,9 @@ int get_physical_pin(t_physical_tile_type_ptr physical_tile, t_logical_block_type_ptr logical_block, int pin); +int net_pin_tile_index(const ClusterNetId net_id, int net_pin_index); +int pin_tile_index(const ClusterPinId pin); + int max_pins_per_grid_tile(); void pretty_print_uint(const char* prefix, size_t value, int num_digits, int scientific_precision); From 5ff0d5d7fb2b15a9100a4edf3d69fbadf583b80c Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Mon, 25 Nov 2019 15:50:06 +0100 Subject: [PATCH 57/58] docs: added/modified equivalent sites documentation added tutorial for using equivalent sites. Signed-off-by: Alessandro Comodi --- doc/src/arch/reference.rst | 53 +++-- doc/src/tutorials/arch/equivalent_sites.rst | 234 ++++++++++++++++++++ doc/src/tutorials/arch/index.rst | 1 + 3 files changed, 268 insertions(+), 20 deletions(-) create mode 100644 doc/src/tutorials/arch/equivalent_sites.rst diff --git a/doc/src/arch/reference.rst b/doc/src/arch/reference.rst index 1cd93f56889..aa9475721e8 100644 --- a/doc/src/arch/reference.rst +++ b/doc/src/arch/reference.rst @@ -799,7 +799,7 @@ Tile ~~~~ .. arch:tag:: - A tile refers to a placeable element within an FPGA architecture. + A tile refers to a placeable element within an FPGA architecture and describes its physical compositions on the grid. The following attributes are applicable to each tile. The only required one is the name of the tile. @@ -1179,33 +1179,46 @@ The following tags are common to all ```` tags: .. arch:tag:: - Describes the Complex Blocks that can be placed within this tile. + .. seealso:: For a step-by-step walkthrough on describing equivalent sites see :ref:`equivalent_sites_tutorial`. - .. arch:tag:: + Describes the Complex Blocks that can be placed within a tile. + Each physical tile can comprehend a number from 1 to N of possible Complex Blocks, or ``sites``. + A ``site`` corresponds to a top-level Complex Block that must be placeable in at least 1 physical tile locations. - Each instance of site must also specify the direct connections between the physical - tile pins and the logical block pins. + .. arch:tag:: - .. arch:tag:: + :req_param pb_type: Name of the corresponding pb_type. - Attributes: - - ``from`` is relative to the physical tile pins - - ``to`` is relative to the logical block pins + :opt_param pin_mapping: Specifies whether the pin mapping between physical tile and logical pb_type: - :req_param pb_type: Name of the corresponding pb_type. + * ``direct``: the pin mapping does not need to be specified as the tile pin definition is equal to the corresponding pb_type one; + * ``custom``: the pin mapping is user-defined. - **Example: Equivalent Sites** - .. code-block:: xml + **Default:** ``direct`` - - - - - - ... - - + **Example: Equivalent Sites** + + .. code-block:: xml + + + + + + .. arch:tag:: + + Desctibes the mapping of a physical tile's port on the logical block's (pb_type) port. + ``direct`` is an option sub-tag of ``site``. + + .. note:: This tag is need only if the pin_mapping of the ``site`` is defined as ``custom`` + + Attributes: + - ``from`` is relative to the physical tile pins + - ``to`` is relative to the logical block pins + + .. code-block:: xml + + .. _arch_complex_blocks: diff --git a/doc/src/tutorials/arch/equivalent_sites.rst b/doc/src/tutorials/arch/equivalent_sites.rst new file mode 100644 index 00000000000..6c505773fda --- /dev/null +++ b/doc/src/tutorials/arch/equivalent_sites.rst @@ -0,0 +1,234 @@ +.. _equivalent_sites_tutorial: + +Equivalent Sites tutorial +========================= + +This tutorial aims at providing information to the user on how to model the equivalent sites to enable ``equivalent placement`` in VPR. + +Equivalent site placement allows the user to define complex logical blocks (top-level pb_types) that can be used in multiple physical location types of the FPGA device grid. +In the same way, the user can define many physical tiles that have different physical attributes that can implement the same logical block. + +The first case (multiple physical grid location types for one complex logical block) is explained below. +The device has at disposal two different Configurable Logic Blocks (CLB), SLICEL and SLICEM. +In this case, the SLICEM CLB is a superset that implements additional features w.r.t. the SLICEL CLB. +Therefore, the user can decide to model the architecture to be able to place the SLICEL Complex Block in a SLICEM physical tile, being it a valid grid location. +This behavior can lead to the generation of more accurate and better placement results, given that a Complex Logic Block is not bound to only one physical location type. + +Below the user can find the implementation of this situation starting from an example that does not make use of the equivalent site placement: + +.. code-block:: xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ... + + + + + + + + + + + + + + ... + + + +As the user can see, ``SLICEL`` and ``SLICEM`` are treated as two different entities, even though they seem to be similar one to another. +To have the possibility to make VPR choose a ``SLICEM`` location when placing a ``SLICEL_SITE`` pb_type, the user needs to change the ``SLICEM`` tile accordingly, as shown below: + +.. code-block:: xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +With the above description of the ``SLICEM`` tile, the user can now have the ``SLICEL`` sites to be placed in ``SLICEM`` physical locations. +One thing to notice is that not all the pins have been mapped for the ``SLICEL_SITE``. For instance, the ``WE`` and ``AI`` port are absent from the ``SLICEL_SITE`` definition, hence they cannot appear in the pin mapping between physical tile and logical block. + +The second case described in this tutorial refers to the situation for which there are multiple different physical location types in the device grid that are used by one complex logical blocks. +Imagine the situation for which the device has left and right I/O tile types which have different pinlocations, hence they need to be defined in two different ways. +With equivalent site placement, the user doesn't need to define multiple different pb_types that implement the same functionality. + +Below the user can find the implementation of this situation starting from an example that does not make use of the equivalent site placement: + +.. code-block:: xml + + + + + + + + + + + + + LEFT_IOPAD_TILE.INPUT + LEFT_IOPAD_TILE.OUTPUT + + + + + + + + + + + + + RIGHT_IOPAD_TILE.INPUT + RIGHT_IOPAD_TILE.OUTPUT + + + + + + + + + + ... + + + + + + ... + + + +To avoid duplicating the complex logic blocks in ``LEFT`` and ``RIGHT IOPADS``, the user can describe the pb_type only once and add it to the equivalent sites tag of the two different tiles, as follows: + +.. code-block:: xml + + + + + + + + + + + + + LEFT_IOPAD_TILE.INPUT + LEFT_IOPAD_TILE.OUTPUT + + + + + + + + + + + + + RIGHT_IOPAD_TILE.INPUT + RIGHT_IOPAD_TILE.OUTPUT + + + + + + + + + + ... + + + +With this implementation, the ``IOPAD_SITE`` can be placed both in the ``LEFT`` and ``RIGHT`` physical location types. +Note that the pin_mapping is set as ``direct``, given that the physical tile and the logical block share the same IO pins. + +The two different cases can be mixed to have a N to M mapping of physical tiles/logical blocks. diff --git a/doc/src/tutorials/arch/index.rst b/doc/src/tutorials/arch/index.rst index f901d325b84..8a3a8d22482 100644 --- a/doc/src/tutorials/arch/index.rst +++ b/doc/src/tutorials/arch/index.rst @@ -30,6 +30,7 @@ Multiple examples of how this language can be used to describe different types o fracturable_multiplier configurable_memory xilinx_virtex_6_like + equivalent_sites **Modeling Guides:** From ced5d94839871acdeec9e4355563f6157863c862 Mon Sep 17 00:00:00 2001 From: Alessandro Comodi Date: Tue, 10 Dec 2019 10:43:41 +0100 Subject: [PATCH 58/58] equivalent: addressed review comments Deleted usage of the following utility functions as their functionality was not required anymore: - `int find_clb_pb_pin(ClusterBlockId clb, int clb_pin);` - `int find_pb_pin_clb_pin(ClusterBlockId clb, int pb_pin);` In fact they were returning different values depending on the `nets_and_pins_synced_to_z_coordinate` boolean which is currently unused. Added comments to newly introduced utility functions and changed name to have higher understendability Signed-off-by: Alessandro Comodi --- vpr/src/base/read_route.cpp | 2 +- vpr/src/base/vpr_types.h | 4 +- vpr/src/place/place.cpp | 10 ++--- vpr/src/route/check_route.cpp | 4 +- vpr/src/route/route_common.cpp | 4 +- vpr/src/timing/clb_delay_calc.inl | 6 +-- vpr/src/util/vpr_utils.cpp | 72 +++---------------------------- vpr/src/util/vpr_utils.h | 23 +++------- 8 files changed, 25 insertions(+), 100 deletions(-) diff --git a/vpr/src/base/read_route.cpp b/vpr/src/base/read_route.cpp index b030a751f5a..9ec4069fe2c 100644 --- a/vpr/src/base/read_route.cpp +++ b/vpr/src/base/read_route.cpp @@ -378,7 +378,7 @@ static void process_global_blocks(std::ifstream& fp, ClusterNetId inet, const ch x, y, place_ctx.block_locs[bnum].loc.x, place_ctx.block_locs[bnum].loc.y); } - int pin_index = net_pin_tile_index(inet, pin_counter); + int pin_index = net_pin_to_tile_pin_index(inet, pin_counter); if (physical_tile_type(bnum)->pin_class[pin_index] != atoi(tokens[7].c_str())) { vpr_throw(VPR_ERROR_ROUTE, filename, lineno, "The pin class %d of %lu net does not match given ", diff --git a/vpr/src/base/vpr_types.h b/vpr/src/base/vpr_types.h index f1c74839f64..e6c010f7398 100644 --- a/vpr/src/base/vpr_types.h +++ b/vpr/src/base/vpr_types.h @@ -625,13 +625,11 @@ struct t_place_region { * x: x-coordinate * y: y-coordinate * z: occupancy coordinate - * is_fixed: true if this block's position is fixed by the user and shouldn't be moved during annealing - * nets_and_pins_synced_to_z_coordinate: true if the associated clb's pins have been synced to the z location (i.e. after placement) */ + * is_fixed: true if this block's position is fixed by the user and shouldn't be moved during annealing */ struct t_block_loc { t_pl_loc loc; bool is_fixed = false; - bool nets_and_pins_synced_to_z_coordinate = false; }; /* Stores the clustered blocks placed at a particular grid location */ diff --git a/vpr/src/place/place.cpp b/vpr/src/place/place.cpp index bb1ef486927..8a2b32fd962 100644 --- a/vpr/src/place/place.cpp +++ b/vpr/src/place/place.cpp @@ -1390,7 +1390,7 @@ static void update_net_bb(const ClusterNetId net, } } else { //For large nets, update bounding box incrementally - int iblk_pin = pin_tile_index(blk_pin); + int iblk_pin = tile_pin_index(blk_pin); t_physical_tile_type_ptr blk_type = physical_tile_type(blk); int pin_width_offset = blk_type->pin_width_offset[iblk_pin]; @@ -1841,7 +1841,7 @@ static void get_bb_from_scratch(ClusterNetId net_id, t_bb* coords, t_bb* num_on_ auto& grid = device_ctx.grid; ClusterBlockId bnum = cluster_ctx.clb_nlist.net_driver_block(net_id); - pnum = net_pin_tile_index(net_id, 0); + pnum = net_pin_to_tile_pin_index(net_id, 0); VTR_ASSERT(pnum >= 0); x = place_ctx.block_locs[bnum].loc.x + physical_tile_type(bnum)->pin_width_offset[pnum]; y = place_ctx.block_locs[bnum].loc.y + physical_tile_type(bnum)->pin_height_offset[pnum]; @@ -1860,7 +1860,7 @@ static void get_bb_from_scratch(ClusterNetId net_id, t_bb* coords, t_bb* num_on_ for (auto pin_id : cluster_ctx.clb_nlist.net_sinks(net_id)) { bnum = cluster_ctx.clb_nlist.pin_block(pin_id); - pnum = pin_tile_index(pin_id); + pnum = tile_pin_index(pin_id); x = place_ctx.block_locs[bnum].loc.x + physical_tile_type(bnum)->pin_width_offset[pnum]; y = place_ctx.block_locs[bnum].loc.y + physical_tile_type(bnum)->pin_height_offset[pnum]; @@ -2000,7 +2000,7 @@ static void get_non_updateable_bb(ClusterNetId net_id, t_bb* bb_coord_new) { auto& device_ctx = g_vpr_ctx.device(); ClusterBlockId bnum = cluster_ctx.clb_nlist.net_driver_block(net_id); - pnum = net_pin_tile_index(net_id, 0); + pnum = net_pin_to_tile_pin_index(net_id, 0); x = place_ctx.block_locs[bnum].loc.x + physical_tile_type(bnum)->pin_width_offset[pnum]; y = place_ctx.block_locs[bnum].loc.y + physical_tile_type(bnum)->pin_height_offset[pnum]; @@ -2011,7 +2011,7 @@ static void get_non_updateable_bb(ClusterNetId net_id, t_bb* bb_coord_new) { for (auto pin_id : cluster_ctx.clb_nlist.net_sinks(net_id)) { bnum = cluster_ctx.clb_nlist.pin_block(pin_id); - pnum = pin_tile_index(pin_id); + pnum = tile_pin_index(pin_id); x = place_ctx.block_locs[bnum].loc.x + physical_tile_type(bnum)->pin_width_offset[pnum]; y = place_ctx.block_locs[bnum].loc.y + physical_tile_type(bnum)->pin_height_offset[pnum]; diff --git a/vpr/src/route/check_route.cpp b/vpr/src/route/check_route.cpp index 6a848b3ca35..e9fa206736d 100644 --- a/vpr/src/route/check_route.cpp +++ b/vpr/src/route/check_route.cpp @@ -190,7 +190,7 @@ static void check_sink(int inode, ClusterNetId net_id, bool* pin_done) { unsigned int ipin = 1; for (auto pin_id : cluster_ctx.clb_nlist.net_sinks(net_id)) { if (cluster_ctx.clb_nlist.pin_block(pin_id) == bnum) { - int pin_index = pin_tile_index(pin_id); + int pin_index = tile_pin_index(pin_id); int iclass = type->pin_class[pin_index]; if (iclass == ptc_num) { /* Could connect to same pin class on the same clb more than once. Only * @@ -245,7 +245,7 @@ static void check_source(int inode, ClusterNetId net_id) { } //Get the driver pin's index in the block - auto physical_pin = net_pin_tile_index(net_id, 0); + auto physical_pin = net_pin_to_tile_pin_index(net_id, 0); int iclass = type->pin_class[physical_pin]; diff --git a/vpr/src/route/route_common.cpp b/vpr/src/route/route_common.cpp index 8132a8f15fc..6204dad984b 100644 --- a/vpr/src/route/route_common.cpp +++ b/vpr/src/route/route_common.cpp @@ -1054,7 +1054,7 @@ static vtr::vector> load_net_rr_terminals(const t /* In the routing graph, each (x, y) location has unique pins on it * so when there is capacity, blocks are packed and their pin numbers * are offset to get their actual rr_node */ - int phys_pin = pin_tile_index(pin_id); + int phys_pin = tile_pin_index(pin_id); int iclass = type->pin_class[phys_pin]; @@ -1538,7 +1538,7 @@ void print_route(FILE* fp, const vtr::vector& traceba for (auto pin_id : cluster_ctx.clb_nlist.net_pins(net_id)) { ClusterBlockId block_id = cluster_ctx.clb_nlist.pin_block(pin_id); - int pin_index = pin_tile_index(pin_id); + int pin_index = tile_pin_index(pin_id); int iclass = physical_tile_type(block_id)->pin_class[pin_index]; fprintf(fp, "Block %s (#%zu) at (%d,%d), Pin class %d.\n", diff --git a/vpr/src/timing/clb_delay_calc.inl b/vpr/src/timing/clb_delay_calc.inl index e0ee78dd940..cabf04b620b 100644 --- a/vpr/src/timing/clb_delay_calc.inl +++ b/vpr/src/timing/clb_delay_calc.inl @@ -10,13 +10,11 @@ inline ClbDelayCalc::ClbDelayCalc() : intra_lb_pb_pin_lookup_(g_vpr_ctx.device().logical_block_types) {} inline float ClbDelayCalc::clb_input_to_internal_sink_delay(const ClusterBlockId block_id, const int pin_index, int internal_sink_pin, DelayType delay_type) const { - int pb_ipin = find_clb_pb_pin(block_id, pin_index); - return trace_delay(block_id, pb_ipin, internal_sink_pin, delay_type); + return trace_delay(block_id, pin_index, internal_sink_pin, delay_type); } inline float ClbDelayCalc::internal_src_to_clb_output_delay(const ClusterBlockId block_id, const int pin_index, int internal_src_pin, DelayType delay_type) const { - int pb_opin = find_clb_pb_pin(block_id, pin_index); - return trace_delay(block_id, internal_src_pin, pb_opin, delay_type); + return trace_delay(block_id, internal_src_pin, pin_index, delay_type); } inline float ClbDelayCalc::internal_src_to_internal_sink_delay(const ClusterBlockId clb, int internal_src_pin, int internal_sink_pin, DelayType delay_type) const { diff --git a/vpr/src/util/vpr_utils.cpp b/vpr/src/util/vpr_utils.cpp index 07fa3623a3e..69e712d4999 100644 --- a/vpr/src/util/vpr_utils.cpp +++ b/vpr/src/util/vpr_utils.cpp @@ -505,67 +505,13 @@ std::tuple find_pb_route_clb_input_net_pin(ClusterBlockI return std::make_tuple(ClusterNetId::INVALID(), -1, -1); } - //To account for capacity > 1 blocks we need to convert the pb_pin to the clb pin - int clb_pin = find_pb_pin_clb_pin(clb, curr_pb_pin_id); - VTR_ASSERT(clb_pin >= 0); - - //clb_pin should be a top-level CLB input - ClusterNetId clb_net_idx = cluster_ctx.clb_nlist.block_net(clb, clb_pin); - int clb_net_pin_idx = cluster_ctx.clb_nlist.block_pin_net_index(clb, clb_pin); + //curr_pb_pin should be a top-level CLB input + ClusterNetId clb_net_idx = cluster_ctx.clb_nlist.block_net(clb, curr_pb_pin_id); + int clb_net_pin_idx = cluster_ctx.clb_nlist.block_pin_net_index(clb, curr_pb_pin_id); VTR_ASSERT(clb_net_idx != ClusterNetId::INVALID()); VTR_ASSERT(clb_net_pin_idx >= 0); - return std::tuple(clb_net_idx, clb_pin, clb_net_pin_idx); -} - -//Return the pb pin index corresponding to the pin clb_pin on block clb -// Given a clb_pin index on a this function will return the corresponding -// pin index on the pb_type (accounting for the possible z-coordinate offset). -int find_clb_pb_pin(ClusterBlockId clb, int clb_pin) { - auto& place_ctx = g_vpr_ctx.placement(); - - auto type = physical_tile_type(clb); - VTR_ASSERT_MSG(clb_pin < type->num_pins, "Must be a valid top-level pin"); - - int pb_pin = -1; - if (place_ctx.block_locs[clb].nets_and_pins_synced_to_z_coordinate) { - //Pins have been offset by z-coordinate, need to remove offset - - VTR_ASSERT(type->num_pins % type->capacity == 0); - int num_basic_block_pins = type->num_pins / type->capacity; - /* Logical location and physical location is offset by z * max_num_block_pins */ - - pb_pin = clb_pin - place_ctx.block_locs[clb].loc.z * num_basic_block_pins; - } else { - pb_pin = clb_pin; - } - - VTR_ASSERT(pb_pin >= 0); - - return pb_pin; -} - -//Inverse of find_clb_pb_pin() -int find_pb_pin_clb_pin(ClusterBlockId clb, int pb_pin) { - auto& place_ctx = g_vpr_ctx.placement(); - - auto type = physical_tile_type(clb); - - int clb_pin = -1; - if (place_ctx.block_locs[clb].nets_and_pins_synced_to_z_coordinate) { - //Pins have been offset by z-coordinate, need to remove offset - VTR_ASSERT(type->num_pins % type->capacity == 0); - int num_basic_block_pins = type->num_pins / type->capacity; - /* Logical location and physical location is offset by z * max_num_block_pins */ - - clb_pin = pb_pin + place_ctx.block_locs[clb].loc.z * num_basic_block_pins; - } else { - //No offset - clb_pin = pb_pin; - } - VTR_ASSERT(clb_pin >= 0); - - return clb_pin; + return std::tuple(clb_net_idx, curr_pb_pin_id, clb_net_pin_idx); } bool is_clb_external_pin(ClusterBlockId blk_id, int pb_pin_id) { @@ -2108,7 +2054,6 @@ void place_sync_external_block_connections(ClusterBlockId iblk) { auto& cluster_ctx = g_vpr_ctx.clustering(); auto& clb_nlist = cluster_ctx.clb_nlist; auto& place_ctx = g_vpr_ctx.mutable_placement(); - VTR_ASSERT_MSG(place_ctx.block_locs[iblk].nets_and_pins_synced_to_z_coordinate == false, "Block net and pins must not be already synced"); auto physical_tile = physical_tile_type(iblk); auto logical_block = clb_nlist.block_type(iblk); @@ -2130,9 +2075,6 @@ void place_sync_external_block_connections(ClusterBlockId iblk) { place_ctx.physical_pins.insert(pin, new_physical_pin_index); } } - - //Mark the block as synced - //place_ctx.block_locs[iblk].nets_and_pins_synced_to_z_coordinate = true; } int get_max_num_pins(t_logical_block_type_ptr logical_block) { @@ -2210,16 +2152,16 @@ int get_physical_pin(t_physical_tile_type_ptr physical_tile, return result->second.pin; } -int net_pin_tile_index(const ClusterNetId net_id, int net_pin_index) { +int net_pin_to_tile_pin_index(const ClusterNetId net_id, int net_pin_index) { auto& cluster_ctx = g_vpr_ctx.clustering(); // Get the logical pin index of pin within it's logical block type auto pin_id = cluster_ctx.clb_nlist.net_pin(net_id, net_pin_index); - return pin_tile_index(pin_id); + return tile_pin_index(pin_id); } -int pin_tile_index(const ClusterPinId pin) { +int tile_pin_index(const ClusterPinId pin) { auto& place_ctx = g_vpr_ctx.placement(); return place_ctx.physical_pins[pin]; diff --git a/vpr/src/util/vpr_utils.h b/vpr/src/util/vpr_utils.h index 1bb3604e418..d05dc88173e 100644 --- a/vpr/src/util/vpr_utils.h +++ b/vpr/src/util/vpr_utils.h @@ -85,22 +85,6 @@ std::vector find_clb_pin_sink_atom_pins(ClusterBlockId clb, int logic std::tuple find_pb_route_clb_input_net_pin(ClusterBlockId clb, int sink_pb_route_id); -//Return the pb pin index corresponding to the pin clb_pin on block clb, -//acounting for the effect of 'z' position > 0. -// -// Note that a CLB pin index does not (neccessarily) map directly to the pb_route index representing the first stage -// of internal routing in the block, since a block may have capacity > 1 (e.g. IOs) -// -// In the clustered netlist blocks with capacity > 1 may have their 'z' position > 0, and their clb pin indicies offset -// by the number of pins on the type (c.f. post_place_sync()). -// -// This offset is not mirrored in the t_pb or pb graph, so we need to recover the basic pin index before processing -// further -- which is what this function does. -int find_clb_pb_pin(ClusterBlockId clb, int clb_pin); - -//Return the clb_pin corresponding to the pb_pin on the specified block -int find_pb_pin_clb_pin(ClusterBlockId clb, int pb_pin); - //Returns the port matching name within pb_gnode const t_port* find_pb_graph_port(const t_pb_graph_node* pb_gnode, std::string port_name); @@ -180,8 +164,11 @@ int get_physical_pin(t_physical_tile_type_ptr physical_tile, t_logical_block_type_ptr logical_block, int pin); -int net_pin_tile_index(const ClusterNetId net_id, int net_pin_index); -int pin_tile_index(const ClusterPinId pin); +//Returns the physical pin of the tile, related to the given ClusterNedId, and the net pin index +int net_pin_to_tile_pin_index(const ClusterNetId net_id, int net_pin_index); + +//Returns the physical pin of the tile, related to the given ClusterPinId +int tile_pin_index(const ClusterPinId pin); int max_pins_per_grid_tile();