@@ -33,12 +33,6 @@ pub struct DepGraph<K: DepKind> {
33
33
/// each task has a `DepNodeIndex` that uniquely identifies it. This unique
34
34
/// ID is used for self-profiling.
35
35
virtual_dep_node_index : Lrc < AtomicU32 > ,
36
-
37
- /// The cached event id for profiling node interning. This saves us
38
- /// from having to look up the event id every time we intern a node
39
- /// which may incur too much overhead.
40
- /// This will be None if self-profiling is disabled.
41
- node_intern_event_id : Option < EventId > ,
42
36
}
43
37
44
38
rustc_index:: newtype_index! {
@@ -116,8 +110,13 @@ impl<K: DepKind> DepGraph<K> {
116
110
) -> DepGraph < K > {
117
111
let prev_graph_node_count = prev_graph. node_count ( ) ;
118
112
119
- let current =
120
- CurrentDepGraph :: new ( prev_graph_node_count, encoder, record_graph, record_stats) ;
113
+ let current = CurrentDepGraph :: new (
114
+ profiler,
115
+ prev_graph_node_count,
116
+ encoder,
117
+ record_graph,
118
+ record_stats,
119
+ ) ;
121
120
122
121
// Instantiate a dependy-less node only once for anonymous queries.
123
122
let _green_node_index = current. intern_new_node (
@@ -128,10 +127,6 @@ impl<K: DepKind> DepGraph<K> {
128
127
) ;
129
128
debug_assert_eq ! ( _green_node_index, DepNodeIndex :: SINGLETON_DEPENDENCYLESS_ANON_NODE ) ;
130
129
131
- let node_intern_event_id = profiler
132
- . get_or_alloc_cached_string ( "incr_comp_intern_dep_graph_node" )
133
- . map ( EventId :: from_label) ;
134
-
135
130
DepGraph {
136
131
data : Some ( Lrc :: new ( DepGraphData {
137
132
previous_work_products : prev_work_products,
@@ -142,16 +137,11 @@ impl<K: DepKind> DepGraph<K> {
142
137
colors : DepNodeColorMap :: new ( prev_graph_node_count) ,
143
138
} ) ) ,
144
139
virtual_dep_node_index : Lrc :: new ( AtomicU32 :: new ( 0 ) ) ,
145
- node_intern_event_id,
146
140
}
147
141
}
148
142
149
143
pub fn new_disabled ( ) -> DepGraph < K > {
150
- DepGraph {
151
- data : None ,
152
- virtual_dep_node_index : Lrc :: new ( AtomicU32 :: new ( 0 ) ) ,
153
- node_intern_event_id : None ,
154
- }
144
+ DepGraph { data : None , virtual_dep_node_index : Lrc :: new ( AtomicU32 :: new ( 0 ) ) }
155
145
}
156
146
157
147
/// Returns `true` if we are actually building the full dep-graph, and `false` otherwise.
@@ -275,9 +265,6 @@ impl<K: DepKind> DepGraph<K> {
275
265
276
266
let print_status = cfg ! ( debug_assertions) && dcx. sess ( ) . opts . debugging_opts . dep_tasks ;
277
267
278
- // Get timer for profiling `DepNode` interning
279
- let node_intern_timer =
280
- self . node_intern_event_id . map ( |eid| dcx. profiler ( ) . generic_activity_with_event_id ( eid) ) ;
281
268
// Intern the new `DepNode`.
282
269
let ( dep_node_index, prev_and_color) = data. current . intern_node (
283
270
dcx. profiler ( ) ,
@@ -287,7 +274,6 @@ impl<K: DepKind> DepGraph<K> {
287
274
current_fingerprint,
288
275
print_status,
289
276
) ;
290
- drop ( node_intern_timer) ;
291
277
292
278
hashing_timer. finish_with_query_invocation_id ( dep_node_index. into ( ) ) ;
293
279
@@ -876,10 +862,17 @@ pub(super) struct CurrentDepGraph<K: DepKind> {
876
862
/// debugging and only active with `debug_assertions`.
877
863
total_read_count : AtomicU64 ,
878
864
total_duplicate_read_count : AtomicU64 ,
865
+
866
+ /// The cached event id for profiling node interning. This saves us
867
+ /// from having to look up the event id every time we intern a node
868
+ /// which may incur too much overhead.
869
+ /// This will be None if self-profiling is disabled.
870
+ node_intern_event_id : Option < EventId > ,
879
871
}
880
872
881
873
impl < K : DepKind > CurrentDepGraph < K > {
882
874
fn new (
875
+ profiler : & SelfProfilerRef ,
883
876
prev_graph_node_count : usize ,
884
877
encoder : FileEncoder ,
885
878
record_graph : bool ,
@@ -908,6 +901,10 @@ impl<K: DepKind> CurrentDepGraph<K> {
908
901
909
902
let new_node_count_estimate = 102 * prev_graph_node_count / 100 + 200 ;
910
903
904
+ let node_intern_event_id = profiler
905
+ . get_or_alloc_cached_string ( "incr_comp_intern_dep_graph_node" )
906
+ . map ( EventId :: from_label) ;
907
+
911
908
CurrentDepGraph {
912
909
encoder : Steal :: new ( GraphEncoder :: new (
913
910
encoder,
@@ -927,6 +924,7 @@ impl<K: DepKind> CurrentDepGraph<K> {
927
924
forbidden_edge,
928
925
total_read_count : AtomicU64 :: new ( 0 ) ,
929
926
total_duplicate_read_count : AtomicU64 :: new ( 0 ) ,
927
+ node_intern_event_id,
930
928
}
931
929
}
932
930
@@ -970,6 +968,10 @@ impl<K: DepKind> CurrentDepGraph<K> {
970
968
) -> ( DepNodeIndex , Option < ( SerializedDepNodeIndex , DepNodeColor ) > ) {
971
969
let print_status = cfg ! ( debug_assertions) && print_status;
972
970
971
+ // Get timer for profiling `DepNode` interning
972
+ let _node_intern_timer =
973
+ self . node_intern_event_id . map ( |eid| profiler. generic_activity_with_event_id ( eid) ) ;
974
+
973
975
if let Some ( prev_index) = prev_graph. node_to_index_opt ( & key) {
974
976
// Determine the color and index of the new `DepNode`.
975
977
if let Some ( fingerprint) = fingerprint {
0 commit comments