@@ -7,6 +7,7 @@ use rustc_data_structures::sync::{Lrc, Lock, AtomicU32, Ordering};
7
7
use std:: env;
8
8
use std:: hash:: Hash ;
9
9
use std:: collections:: hash_map:: Entry ;
10
+ use std:: mem;
10
11
use crate :: ty:: { self , TyCtxt } ;
11
12
use crate :: util:: common:: { ProfileQueriesMsg , profq_msg} ;
12
13
use parking_lot:: { Mutex , Condvar } ;
@@ -61,11 +62,11 @@ struct DepGraphData {
61
62
62
63
colors : DepNodeColorMap ,
63
64
64
- /// A set of loaded diagnostics that have been emitted.
65
- emitted_diagnostics : Mutex < FxHashSet < DepNodeIndex > > ,
65
+ /// A set of loaded diagnostics that is in the progress of being emitted.
66
+ emitting_diagnostics : Mutex < FxHashSet < DepNodeIndex > > ,
66
67
67
68
/// Used to wait for diagnostics to be emitted.
68
- emitted_diagnostics_cond_var : Condvar ,
69
+ emitting_diagnostics_cond_var : Condvar ,
69
70
70
71
/// When we load, there may be `.o` files, cached MIR, or other such
71
72
/// things available to us. If we find that they are not dirty, we
@@ -99,8 +100,8 @@ impl DepGraph {
99
100
previous_work_products : prev_work_products,
100
101
dep_node_debug : Default :: default ( ) ,
101
102
current : Lock :: new ( CurrentDepGraph :: new ( prev_graph_node_count) ) ,
102
- emitted_diagnostics : Default :: default ( ) ,
103
- emitted_diagnostics_cond_var : Condvar :: new ( ) ,
103
+ emitting_diagnostics : Default :: default ( ) ,
104
+ emitting_diagnostics_cond_var : Condvar :: new ( ) ,
104
105
previous : prev_graph,
105
106
colors : DepNodeColorMap :: new ( prev_graph_node_count) ,
106
107
loaded_from_cache : Default :: default ( ) ,
@@ -744,7 +745,7 @@ impl DepGraph {
744
745
745
746
// There may be multiple threads trying to mark the same dep node green concurrently
746
747
747
- let ( dep_node_index, did_allocation ) = {
748
+ let dep_node_index = {
748
749
let mut current = data. current . borrow_mut ( ) ;
749
750
750
751
// Copy the fingerprint from the previous graph,
@@ -758,17 +759,22 @@ impl DepGraph {
758
759
759
760
// ... emitting any stored diagnostic ...
760
761
762
+ // FIXME: Store the fact that a node has diagnostics in a bit in the dep graph somewhere
763
+ // Maybe store a list on disk and encode this fact in the DepNodeState
761
764
let diagnostics = tcx. queries . on_disk_cache
762
- . load_diagnostics ( tcx, prev_dep_node_index) ;
765
+ . load_diagnostics ( tcx, prev_dep_node_index) ;
763
766
764
767
if unlikely ! ( diagnostics. len( ) > 0 ) {
765
768
self . emit_diagnostics (
766
769
tcx,
767
770
data,
768
771
dep_node_index,
769
- did_allocation ,
772
+ prev_dep_node_index ,
770
773
diagnostics
771
774
) ;
775
+ } else {
776
+ // Avoid calling the destructor, since LLVM fails to optimize it away
777
+ mem:: forget ( diagnostics) ;
772
778
}
773
779
774
780
// ... and finally storing a "Green" entry in the color map.
@@ -784,45 +790,58 @@ impl DepGraph {
784
790
Some ( dep_node_index)
785
791
}
786
792
787
- /// Atomically emits some loaded diagnotics, assuming that this only gets called with
788
- /// `did_allocation` set to `true` on a single thread .
793
+ /// Atomically emits some loaded diagnotics.
794
+ /// This may be called concurrently on multiple threads for the same dep node .
789
795
#[ cold]
790
796
#[ inline( never) ]
791
797
fn emit_diagnostics < ' tcx > (
792
798
& self ,
793
799
tcx : TyCtxt < ' tcx > ,
794
800
data : & DepGraphData ,
795
801
dep_node_index : DepNodeIndex ,
796
- did_allocation : bool ,
802
+ prev_dep_node_index : SerializedDepNodeIndex ,
797
803
diagnostics : Vec < Diagnostic > ,
798
804
) {
799
- if did_allocation || !cfg ! ( parallel_compiler) {
800
- // Only the thread which did the allocation emits the error messages
801
- let handle = tcx. sess . diagnostic ( ) ;
805
+ let mut emitting = data. emitting_diagnostics . lock ( ) ;
806
+
807
+ if data. colors . get ( prev_dep_node_index) == Some ( DepNodeColor :: Green ( dep_node_index) ) {
808
+ // The node is already green so diagnostics must have been emitted already
809
+ return ;
810
+ }
811
+
812
+ if emitting. insert ( dep_node_index) {
813
+ // We were the first to insert the node in the set so this thread
814
+ // must emit the diagnostics and signal other potentially waiting
815
+ // threads after.
816
+ mem:: drop ( emitting) ;
802
817
803
818
// Promote the previous diagnostics to the current session.
804
819
tcx. queries . on_disk_cache
805
- . store_diagnostics ( dep_node_index, diagnostics. clone ( ) . into ( ) ) ;
820
+ . store_diagnostics ( dep_node_index, diagnostics. clone ( ) . into ( ) ) ;
821
+
822
+ let handle = tcx. sess . diagnostic ( ) ;
806
823
807
824
for diagnostic in diagnostics {
808
825
DiagnosticBuilder :: new_diagnostic ( handle, diagnostic) . emit ( ) ;
809
826
}
810
827
811
- #[ cfg( parallel_compiler) ]
812
- {
813
- // Mark the diagnostics and emitted and wake up waiters
814
- data. emitted_diagnostics . lock ( ) . insert ( dep_node_index) ;
815
- data. emitted_diagnostics_cond_var . notify_all ( ) ;
816
- }
828
+ // Mark the node as green now that diagnostics are emitted
829
+ data. colors . insert ( prev_dep_node_index, DepNodeColor :: Green ( dep_node_index) ) ;
830
+
831
+ // Remove the node from the set
832
+ data. emitting_diagnostics . lock ( ) . remove ( & dep_node_index) ;
833
+
834
+ // Wake up waiters
835
+ data. emitting_diagnostics_cond_var . notify_all ( ) ;
817
836
} else {
818
- // The other threads will wait for the diagnostics to be emitted
837
+ // We must wait for the other thread to finish emitting the diagnostic
819
838
820
- let mut emitted_diagnostics = data. emitted_diagnostics . lock ( ) ;
821
839
loop {
822
- if emitted_diagnostics. contains ( & dep_node_index) {
840
+ data. emitting_diagnostics_cond_var . wait ( & mut emitting) ;
841
+ if data. colors
842
+ . get ( prev_dep_node_index) == Some ( DepNodeColor :: Green ( dep_node_index) ) {
823
843
break ;
824
844
}
825
- data. emitted_diagnostics_cond_var . wait ( & mut emitted_diagnostics) ;
826
845
}
827
846
}
828
847
}
@@ -1038,7 +1057,7 @@ impl CurrentDepGraph {
1038
1057
hash : self . anon_id_seed . combine ( hasher. finish ( ) ) ,
1039
1058
} ;
1040
1059
1041
- self . intern_node ( target_dep_node, task_deps. reads , Fingerprint :: ZERO ) . 0
1060
+ self . intern_node ( target_dep_node, task_deps. reads , Fingerprint :: ZERO )
1042
1061
}
1043
1062
1044
1063
fn alloc_node (
@@ -1048,19 +1067,19 @@ impl CurrentDepGraph {
1048
1067
fingerprint : Fingerprint
1049
1068
) -> DepNodeIndex {
1050
1069
debug_assert ! ( !self . node_to_node_index. contains_key( & dep_node) ) ;
1051
- self . intern_node ( dep_node, edges, fingerprint) . 0
1070
+ self . intern_node ( dep_node, edges, fingerprint)
1052
1071
}
1053
1072
1054
1073
fn intern_node (
1055
1074
& mut self ,
1056
1075
dep_node : DepNode ,
1057
1076
edges : SmallVec < [ DepNodeIndex ; 8 ] > ,
1058
1077
fingerprint : Fingerprint
1059
- ) -> ( DepNodeIndex , bool ) {
1078
+ ) -> DepNodeIndex {
1060
1079
debug_assert_eq ! ( self . node_to_node_index. len( ) , self . data. len( ) ) ;
1061
1080
1062
1081
match self . node_to_node_index . entry ( dep_node) {
1063
- Entry :: Occupied ( entry) => ( * entry. get ( ) , false ) ,
1082
+ Entry :: Occupied ( entry) => * entry. get ( ) ,
1064
1083
Entry :: Vacant ( entry) => {
1065
1084
let dep_node_index = DepNodeIndex :: new ( self . data . len ( ) ) ;
1066
1085
self . data . push ( DepNodeData {
@@ -1069,7 +1088,7 @@ impl CurrentDepGraph {
1069
1088
fingerprint
1070
1089
} ) ;
1071
1090
entry. insert ( dep_node_index) ;
1072
- ( dep_node_index, true )
1091
+ dep_node_index
1073
1092
}
1074
1093
}
1075
1094
}
0 commit comments