Skip to content

Commit 69a3594

Browse files
committed
Store node_intern_event_id in CurrentDepGraph.
1 parent bd5c107 commit 69a3594

File tree

1 file changed

+24
-22
lines changed
  • compiler/rustc_query_system/src/dep_graph

1 file changed

+24
-22
lines changed

compiler/rustc_query_system/src/dep_graph/graph.rs

+24-22
Original file line numberDiff line numberDiff line change
@@ -33,12 +33,6 @@ pub struct DepGraph<K: DepKind> {
3333
/// each task has a `DepNodeIndex` that uniquely identifies it. This unique
3434
/// ID is used for self-profiling.
3535
virtual_dep_node_index: Lrc<AtomicU32>,
36-
37-
/// The cached event id for profiling node interning. This saves us
38-
/// from having to look up the event id every time we intern a node
39-
/// which may incur too much overhead.
40-
/// This will be None if self-profiling is disabled.
41-
node_intern_event_id: Option<EventId>,
4236
}
4337

4438
rustc_index::newtype_index! {
@@ -116,8 +110,13 @@ impl<K: DepKind> DepGraph<K> {
116110
) -> DepGraph<K> {
117111
let prev_graph_node_count = prev_graph.node_count();
118112

119-
let current =
120-
CurrentDepGraph::new(prev_graph_node_count, encoder, record_graph, record_stats);
113+
let current = CurrentDepGraph::new(
114+
profiler,
115+
prev_graph_node_count,
116+
encoder,
117+
record_graph,
118+
record_stats,
119+
);
121120

122121
// Instantiate a dependy-less node only once for anonymous queries.
123122
let _green_node_index = current.intern_new_node(
@@ -128,10 +127,6 @@ impl<K: DepKind> DepGraph<K> {
128127
);
129128
debug_assert_eq!(_green_node_index, DepNodeIndex::SINGLETON_DEPENDENCYLESS_ANON_NODE);
130129

131-
let node_intern_event_id = profiler
132-
.get_or_alloc_cached_string("incr_comp_intern_dep_graph_node")
133-
.map(EventId::from_label);
134-
135130
DepGraph {
136131
data: Some(Lrc::new(DepGraphData {
137132
previous_work_products: prev_work_products,
@@ -142,16 +137,11 @@ impl<K: DepKind> DepGraph<K> {
142137
colors: DepNodeColorMap::new(prev_graph_node_count),
143138
})),
144139
virtual_dep_node_index: Lrc::new(AtomicU32::new(0)),
145-
node_intern_event_id,
146140
}
147141
}
148142

149143
pub fn new_disabled() -> DepGraph<K> {
150-
DepGraph {
151-
data: None,
152-
virtual_dep_node_index: Lrc::new(AtomicU32::new(0)),
153-
node_intern_event_id: None,
154-
}
144+
DepGraph { data: None, virtual_dep_node_index: Lrc::new(AtomicU32::new(0)) }
155145
}
156146

157147
/// Returns `true` if we are actually building the full dep-graph, and `false` otherwise.
@@ -275,9 +265,6 @@ impl<K: DepKind> DepGraph<K> {
275265

276266
let print_status = cfg!(debug_assertions) && dcx.sess().opts.debugging_opts.dep_tasks;
277267

278-
// Get timer for profiling `DepNode` interning
279-
let node_intern_timer =
280-
self.node_intern_event_id.map(|eid| dcx.profiler().generic_activity_with_event_id(eid));
281268
// Intern the new `DepNode`.
282269
let (dep_node_index, prev_and_color) = data.current.intern_node(
283270
dcx.profiler(),
@@ -287,7 +274,6 @@ impl<K: DepKind> DepGraph<K> {
287274
current_fingerprint,
288275
print_status,
289276
);
290-
drop(node_intern_timer);
291277

292278
hashing_timer.finish_with_query_invocation_id(dep_node_index.into());
293279

@@ -876,10 +862,17 @@ pub(super) struct CurrentDepGraph<K: DepKind> {
876862
/// debugging and only active with `debug_assertions`.
877863
total_read_count: AtomicU64,
878864
total_duplicate_read_count: AtomicU64,
865+
866+
/// The cached event id for profiling node interning. This saves us
867+
/// from having to look up the event id every time we intern a node
868+
/// which may incur too much overhead.
869+
/// This will be None if self-profiling is disabled.
870+
node_intern_event_id: Option<EventId>,
879871
}
880872

881873
impl<K: DepKind> CurrentDepGraph<K> {
882874
fn new(
875+
profiler: &SelfProfilerRef,
883876
prev_graph_node_count: usize,
884877
encoder: FileEncoder,
885878
record_graph: bool,
@@ -908,6 +901,10 @@ impl<K: DepKind> CurrentDepGraph<K> {
908901

909902
let new_node_count_estimate = 102 * prev_graph_node_count / 100 + 200;
910903

904+
let node_intern_event_id = profiler
905+
.get_or_alloc_cached_string("incr_comp_intern_dep_graph_node")
906+
.map(EventId::from_label);
907+
911908
CurrentDepGraph {
912909
encoder: Steal::new(GraphEncoder::new(
913910
encoder,
@@ -927,6 +924,7 @@ impl<K: DepKind> CurrentDepGraph<K> {
927924
forbidden_edge,
928925
total_read_count: AtomicU64::new(0),
929926
total_duplicate_read_count: AtomicU64::new(0),
927+
node_intern_event_id,
930928
}
931929
}
932930

@@ -970,6 +968,10 @@ impl<K: DepKind> CurrentDepGraph<K> {
970968
) -> (DepNodeIndex, Option<(SerializedDepNodeIndex, DepNodeColor)>) {
971969
let print_status = cfg!(debug_assertions) && print_status;
972970

971+
// Get timer for profiling `DepNode` interning
972+
let _node_intern_timer =
973+
self.node_intern_event_id.map(|eid| profiler.generic_activity_with_event_id(eid));
974+
973975
if let Some(prev_index) = prev_graph.node_to_index_opt(&key) {
974976
// Determine the color and index of the new `DepNode`.
975977
if let Some(fingerprint) = fingerprint {

0 commit comments

Comments
 (0)