diff --git a/vpr/src/place/compressed_grid.h b/vpr/src/place/compressed_grid.h index c9030a39376..72ebff4dbc8 100644 --- a/vpr/src/place/compressed_grid.h +++ b/vpr/src/place/compressed_grid.h @@ -1,6 +1,7 @@ #ifndef VPR_COMPRESSED_GRID_H #define VPR_COMPRESSED_GRID_H +#include #include "physical_types.h" #include "vtr_geometry.h" @@ -60,18 +61,22 @@ struct t_compressed_block_grid { * * This function takes a physical tile location in the grid and converts it to the corresponding * compressed location. The conversion approximates by rounding up to the nearest valid compressed location. + * If all the compressed locations are less than the grid location, the function will return the last compressed location. * * @param grid_loc The physical tile location in the grid. * @return The corresponding compressed location with the same layer number. */ inline t_physical_tile_loc grid_loc_to_compressed_loc_approx_round_up(t_physical_tile_loc grid_loc) const { auto find_compressed_index = [](const std::vector& compressed, int value) -> int { - auto itr = std::upper_bound(compressed.begin(), compressed.end(), value); - if (itr == compressed.begin()) - return 0; - if (itr == compressed.end() || *(itr - 1) == value) - return (int)std::distance(compressed.begin(), itr - 1); - return (int)std::distance(compressed.begin(), itr); + // Get the first element that is not less than the value + auto itr = std::lower_bound(compressed.begin(), compressed.end(), value); + if (itr == compressed.end()) { + // If all the compressed locations are less than the grid location, return the last compressed location + return compressed.size() - 1; + } else { + // Return the index of the first element that is not less than the value + return std::distance(compressed.begin(), itr); + } }; int layer_num = grid_loc.layer_num; @@ -86,17 +91,22 @@ struct t_compressed_block_grid { * * This function takes a physical tile location in the grid and converts it to the corresponding * compressed location. The conversion approximates by rounding down to the nearest valid compressed location. + * If all the compressed locations are bigger than the grid location, the function will return the first compressed location. * * @param grid_loc The physical tile location in the grid. * @return The corresponding compressed location with the same layer number. */ inline t_physical_tile_loc grid_loc_to_compressed_loc_approx_round_down(t_physical_tile_loc grid_loc) const { auto find_compressed_index = [](const std::vector& compressed, int value) -> int { - auto itr = std::lower_bound(compressed.begin(), compressed.end(), value); - if (itr == compressed.end()) { - return (int)std::distance(compressed.begin(), itr - 1); + // Get the first element that is strictly bigger than the value + auto itr = std::upper_bound(compressed.begin(), compressed.end(), value); + if (itr == compressed.begin()) { + // If all the compressed locations are less than the grid location, return the first compressed location + return 0; + } else { + // Return the index of the first element that is less than or equal to the value + return std::distance(compressed.begin(), itr - 1); } - return (int)std::distance(compressed.begin(), itr); }; int layer_num = grid_loc.layer_num; @@ -111,31 +121,30 @@ struct t_compressed_block_grid { * * Useful when the point is of a different block type from coords. * - * @param point represents a coordinate in one dimension of the point - * @param coords represents vector of coordinate values of a single type only - * - * Hence, the exact point coordinate will not be found in coords if they are of different block types. In this case the function will return - * the nearest compressed location to point by rounding it down + * @param grid_loc non-compressed physical tile location in the grid + * @return Neared x and y compressed locations in the grid (in the same layer) */ inline t_physical_tile_loc grid_loc_to_compressed_loc_approx(t_physical_tile_loc grid_loc) const { auto find_closest_compressed_point = [](int loc, const std::vector& compressed_grid_dim) -> int { + VTR_ASSERT(compressed_grid_dim.size() > 0); + + // Find the first element not less than loc auto itr = std::lower_bound(compressed_grid_dim.begin(), compressed_grid_dim.end(), loc); - int cx; - if (itr < compressed_grid_dim.end() - 1) { - int dist_prev = abs(loc - *itr); - int dist_next = abs(loc - *(itr+1)); - if (dist_prev < dist_next) { - cx = std::distance(compressed_grid_dim.begin(), itr); - } else { - cx = std::distance(compressed_grid_dim.begin(), itr + 1); - } - } else if (itr == compressed_grid_dim.end()) { - cx = std::distance(compressed_grid_dim.begin(), itr - 1); + + if (itr == compressed_grid_dim.end()) { + // If all the compressed locations are less than the grid location, return the last compressed location + return compressed_grid_dim.size() - 1; + } else if (*itr == loc) { + // If we found exact match, return the index of the element + return std::distance(compressed_grid_dim.begin(), itr); } else { - cx = std::distance(compressed_grid_dim.begin(), itr); + // If we didn't find exact match, return the index of the closest compressed location + int dist_prev = loc - *(itr - 1); + int dist_next = *itr - loc; + VTR_ASSERT_DEBUG(dist_prev > 0 && dist_next > 0); + return (dist_prev <= dist_next) ? (std::distance(compressed_grid_dim.begin(), itr - 1)) : + (std::distance(compressed_grid_dim.begin(), itr)); } - - return cx; }; const int layer_num = grid_loc.layer_num; diff --git a/vpr/src/route/overuse_report.cpp b/vpr/src/route/overuse_report.cpp index 38c11908543..06c27580994 100644 --- a/vpr/src/route/overuse_report.cpp +++ b/vpr/src/route/overuse_report.cpp @@ -30,6 +30,13 @@ static void report_congested_nets(const Netlist<>& net_list, static void log_overused_nodes_header(); static void log_single_overused_node_status(int overuse_index, RRNodeId inode); + +/** + * @brief When reporting overused IPIN/OPIN nodes, we also print the nets + * connected to other pins of the same block. This information may help + * the user understand why the node is overused or why other pins are not + * being utilized for routing the net. + */ void print_block_pins_nets(std::ostream& os, t_physical_tile_type_ptr physical_type, int layer, @@ -448,7 +455,8 @@ void print_block_pins_nets(std::ostream& os, const auto& rr_graph = g_vpr_ctx.device().rr_graph; t_pin_range pin_num_range; - if (is_pin_on_tile(physical_type, pin_physical_num)) { + bool pin_on_tile = is_pin_on_tile(physical_type, pin_physical_num); + if (pin_on_tile) { pin_num_range.low = 0; pin_num_range.high = physical_type->num_pins - 1; } else { @@ -470,7 +478,12 @@ void print_block_pins_nets(std::ostream& os, for (int pin = pin_num_range.low; pin <= pin_num_range.high; pin++) { t_rr_type rr_type = (get_pin_type_from_pin_physical_num(physical_type, pin) == DRIVER) ? t_rr_type::OPIN : t_rr_type::IPIN; RRNodeId node_id = get_pin_rr_node_id(rr_graph.node_lookup(), physical_type, layer, root_x, root_y, pin); - VTR_ASSERT(node_id != RRNodeId::INVALID()); + // When flat router is enabled, RR Node chains collapse into a single node. Thus, when + // looking up the RR Node ID, it may return an invalid node ID. In this case, we skip + // this pin. + if (!pin_on_tile && node_id == RRNodeId::INVALID()) { + continue; + } auto search_result = rr_node_to_net_map.find(node_id); if (rr_type == t_rr_type::OPIN) { os << " OPIN - ";