Skip to content

Commit 241aad3

Browse files
committed
Refactor diagnostic emission for green nodes
1 parent 136783d commit 241aad3

File tree

1 file changed

+49
-30
lines changed

1 file changed

+49
-30
lines changed

src/librustc/dep_graph/graph.rs

Lines changed: 49 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ use rustc_data_structures::sync::{Lrc, Lock, AtomicU32, Ordering};
77
use std::env;
88
use std::hash::Hash;
99
use std::collections::hash_map::Entry;
10+
use std::mem;
1011
use crate::ty::{self, TyCtxt};
1112
use crate::util::common::{ProfileQueriesMsg, profq_msg};
1213
use parking_lot::{Mutex, Condvar};
@@ -61,11 +62,11 @@ struct DepGraphData {
6162

6263
colors: DepNodeColorMap,
6364

64-
/// A set of loaded diagnostics that have been emitted.
65-
emitted_diagnostics: Mutex<FxHashSet<DepNodeIndex>>,
65+
/// A set of loaded diagnostics that is in the progress of being emitted.
66+
emitting_diagnostics: Mutex<FxHashSet<DepNodeIndex>>,
6667

6768
/// Used to wait for diagnostics to be emitted.
68-
emitted_diagnostics_cond_var: Condvar,
69+
emitting_diagnostics_cond_var: Condvar,
6970

7071
/// When we load, there may be `.o` files, cached MIR, or other such
7172
/// things available to us. If we find that they are not dirty, we
@@ -99,8 +100,8 @@ impl DepGraph {
99100
previous_work_products: prev_work_products,
100101
dep_node_debug: Default::default(),
101102
current: Lock::new(CurrentDepGraph::new(prev_graph_node_count)),
102-
emitted_diagnostics: Default::default(),
103-
emitted_diagnostics_cond_var: Condvar::new(),
103+
emitting_diagnostics: Default::default(),
104+
emitting_diagnostics_cond_var: Condvar::new(),
104105
previous: prev_graph,
105106
colors: DepNodeColorMap::new(prev_graph_node_count),
106107
loaded_from_cache: Default::default(),
@@ -744,7 +745,7 @@ impl DepGraph {
744745

745746
// There may be multiple threads trying to mark the same dep node green concurrently
746747

747-
let (dep_node_index, did_allocation) = {
748+
let dep_node_index = {
748749
let mut current = data.current.borrow_mut();
749750

750751
// Copy the fingerprint from the previous graph,
@@ -758,17 +759,22 @@ impl DepGraph {
758759

759760
// ... emitting any stored diagnostic ...
760761

762+
// FIXME: Store the fact that a node has diagnostics in a bit in the dep graph somewhere
763+
// Maybe store a list on disk and encode this fact in the DepNodeState
761764
let diagnostics = tcx.queries.on_disk_cache
762-
.load_diagnostics(tcx, prev_dep_node_index);
765+
.load_diagnostics(tcx, prev_dep_node_index);
763766

764767
if unlikely!(diagnostics.len() > 0) {
765768
self.emit_diagnostics(
766769
tcx,
767770
data,
768771
dep_node_index,
769-
did_allocation,
772+
prev_dep_node_index,
770773
diagnostics
771774
);
775+
} else {
776+
// Avoid calling the destructor, since LLVM fails to optimize it away
777+
mem::forget(diagnostics);
772778
}
773779

774780
// ... and finally storing a "Green" entry in the color map.
@@ -784,45 +790,58 @@ impl DepGraph {
784790
Some(dep_node_index)
785791
}
786792

787-
/// Atomically emits some loaded diagnotics, assuming that this only gets called with
788-
/// `did_allocation` set to `true` on a single thread.
793+
/// Atomically emits some loaded diagnotics.
794+
/// This may be called concurrently on multiple threads for the same dep node.
789795
#[cold]
790796
#[inline(never)]
791797
fn emit_diagnostics<'tcx>(
792798
&self,
793799
tcx: TyCtxt<'tcx>,
794800
data: &DepGraphData,
795801
dep_node_index: DepNodeIndex,
796-
did_allocation: bool,
802+
prev_dep_node_index: SerializedDepNodeIndex,
797803
diagnostics: Vec<Diagnostic>,
798804
) {
799-
if did_allocation || !cfg!(parallel_compiler) {
800-
// Only the thread which did the allocation emits the error messages
801-
let handle = tcx.sess.diagnostic();
805+
let mut emitting = data.emitting_diagnostics.lock();
806+
807+
if data.colors.get(prev_dep_node_index) == Some(DepNodeColor::Green(dep_node_index)) {
808+
// The node is already green so diagnostics must have been emitted already
809+
return;
810+
}
811+
812+
if emitting.insert(dep_node_index) {
813+
// We were the first to insert the node in the set so this thread
814+
// must emit the diagnostics and signal other potentially waiting
815+
// threads after.
816+
mem::drop(emitting);
802817

803818
// Promote the previous diagnostics to the current session.
804819
tcx.queries.on_disk_cache
805-
.store_diagnostics(dep_node_index, diagnostics.clone().into());
820+
.store_diagnostics(dep_node_index, diagnostics.clone().into());
821+
822+
let handle = tcx.sess.diagnostic();
806823

807824
for diagnostic in diagnostics {
808825
DiagnosticBuilder::new_diagnostic(handle, diagnostic).emit();
809826
}
810827

811-
#[cfg(parallel_compiler)]
812-
{
813-
// Mark the diagnostics and emitted and wake up waiters
814-
data.emitted_diagnostics.lock().insert(dep_node_index);
815-
data.emitted_diagnostics_cond_var.notify_all();
816-
}
828+
// Mark the node as green now that diagnostics are emitted
829+
data.colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index));
830+
831+
// Remove the node from the set
832+
data.emitting_diagnostics.lock().remove(&dep_node_index);
833+
834+
// Wake up waiters
835+
data.emitting_diagnostics_cond_var.notify_all();
817836
} else {
818-
// The other threads will wait for the diagnostics to be emitted
837+
// We must wait for the other thread to finish emitting the diagnostic
819838

820-
let mut emitted_diagnostics = data.emitted_diagnostics.lock();
821839
loop {
822-
if emitted_diagnostics.contains(&dep_node_index) {
840+
data.emitting_diagnostics_cond_var.wait(&mut emitting);
841+
if data.colors
842+
.get(prev_dep_node_index) == Some(DepNodeColor::Green(dep_node_index)) {
823843
break;
824844
}
825-
data.emitted_diagnostics_cond_var.wait(&mut emitted_diagnostics);
826845
}
827846
}
828847
}
@@ -1038,7 +1057,7 @@ impl CurrentDepGraph {
10381057
hash: self.anon_id_seed.combine(hasher.finish()),
10391058
};
10401059

1041-
self.intern_node(target_dep_node, task_deps.reads, Fingerprint::ZERO).0
1060+
self.intern_node(target_dep_node, task_deps.reads, Fingerprint::ZERO)
10421061
}
10431062

10441063
fn alloc_node(
@@ -1048,19 +1067,19 @@ impl CurrentDepGraph {
10481067
fingerprint: Fingerprint
10491068
) -> DepNodeIndex {
10501069
debug_assert!(!self.node_to_node_index.contains_key(&dep_node));
1051-
self.intern_node(dep_node, edges, fingerprint).0
1070+
self.intern_node(dep_node, edges, fingerprint)
10521071
}
10531072

10541073
fn intern_node(
10551074
&mut self,
10561075
dep_node: DepNode,
10571076
edges: SmallVec<[DepNodeIndex; 8]>,
10581077
fingerprint: Fingerprint
1059-
) -> (DepNodeIndex, bool) {
1078+
) -> DepNodeIndex {
10601079
debug_assert_eq!(self.node_to_node_index.len(), self.data.len());
10611080

10621081
match self.node_to_node_index.entry(dep_node) {
1063-
Entry::Occupied(entry) => (*entry.get(), false),
1082+
Entry::Occupied(entry) => *entry.get(),
10641083
Entry::Vacant(entry) => {
10651084
let dep_node_index = DepNodeIndex::new(self.data.len());
10661085
self.data.push(DepNodeData {
@@ -1069,7 +1088,7 @@ impl CurrentDepGraph {
10691088
fingerprint
10701089
});
10711090
entry.insert(dep_node_index);
1072-
(dep_node_index, true)
1091+
dep_node_index
10731092
}
10741093
}
10751094
}

0 commit comments

Comments
 (0)