Rollup merge of #61923 - Zoxc:dep-stream-prefix-2, r=pnkfelix

Prerequisites from dep graph refactoring #2

Split out from https://github.com/rust-lang/rust/pull/60035 and overlaps with https://github.com/rust-lang/rust/pull/60559.
This commit is contained in:
Mazdak Farrokhzad
2019-07-11 04:33:12 +02:00
committed by GitHub
+52 -36
View File
@@ -7,6 +7,7 @@
use std::env;
use std::hash::Hash;
use std::collections::hash_map::Entry;
use std::mem;
use crate::ty::{self, TyCtxt};
use crate::util::common::{ProfileQueriesMsg, profq_msg};
use parking_lot::{Mutex, Condvar};
@@ -61,11 +62,11 @@ struct DepGraphData {
colors: DepNodeColorMap,
/// A set of loaded diagnostics that have been emitted.
emitted_diagnostics: Mutex<FxHashSet<DepNodeIndex>>,
/// A set of loaded diagnostics that is in the progress of being emitted.
emitting_diagnostics: Mutex<FxHashSet<DepNodeIndex>>,
/// Used to wait for diagnostics to be emitted.
emitted_diagnostics_cond_var: Condvar,
emitting_diagnostics_cond_var: Condvar,
/// When we load, there may be `.o` files, cached MIR, or other such
/// things available to us. If we find that they are not dirty, we
@@ -99,8 +100,8 @@ pub fn new(prev_graph: PreviousDepGraph,
previous_work_products: prev_work_products,
dep_node_debug: Default::default(),
current: Lock::new(CurrentDepGraph::new(prev_graph_node_count)),
emitted_diagnostics: Default::default(),
emitted_diagnostics_cond_var: Condvar::new(),
emitting_diagnostics: Default::default(),
emitting_diagnostics_cond_var: Condvar::new(),
previous: prev_graph,
colors: DepNodeColorMap::new(prev_graph_node_count),
loaded_from_cache: Default::default(),
@@ -744,7 +745,7 @@ fn try_mark_previous_green<'tcx>(
// There may be multiple threads trying to mark the same dep node green concurrently
let (dep_node_index, did_allocation) = {
let dep_node_index = {
let mut current = data.current.borrow_mut();
// Copy the fingerprint from the previous graph,
@@ -758,34 +759,36 @@ fn try_mark_previous_green<'tcx>(
// ... emitting any stored diagnostic ...
// FIXME: Store the fact that a node has diagnostics in a bit in the dep graph somewhere
// Maybe store a list on disk and encode this fact in the DepNodeState
let diagnostics = tcx.queries.on_disk_cache
.load_diagnostics(tcx, prev_dep_node_index);
.load_diagnostics(tcx, prev_dep_node_index);
#[cfg(not(parallel_compiler))]
debug_assert!(data.colors.get(prev_dep_node_index).is_none(),
"DepGraph::try_mark_previous_green() - Duplicate DepNodeColor \
insertion for {:?}", dep_node);
if unlikely!(diagnostics.len() > 0) {
self.emit_diagnostics(
tcx,
data,
dep_node_index,
did_allocation,
prev_dep_node_index,
diagnostics
);
}
// ... and finally storing a "Green" entry in the color map.
// Multiple threads can all write the same color here
#[cfg(not(parallel_compiler))]
debug_assert!(data.colors.get(prev_dep_node_index).is_none(),
"DepGraph::try_mark_previous_green() - Duplicate DepNodeColor \
insertion for {:?}", dep_node);
data.colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index));
debug!("try_mark_previous_green({:?}) - END - successfully marked as green", dep_node);
Some(dep_node_index)
}
/// Atomically emits some loaded diagnotics, assuming that this only gets called with
/// `did_allocation` set to `true` on a single thread.
/// Atomically emits some loaded diagnostics.
/// This may be called concurrently on multiple threads for the same dep node.
#[cold]
#[inline(never)]
fn emit_diagnostics<'tcx>(
@@ -793,36 +796,49 @@ fn emit_diagnostics<'tcx>(
tcx: TyCtxt<'tcx>,
data: &DepGraphData,
dep_node_index: DepNodeIndex,
did_allocation: bool,
prev_dep_node_index: SerializedDepNodeIndex,
diagnostics: Vec<Diagnostic>,
) {
if did_allocation || !cfg!(parallel_compiler) {
// Only the thread which did the allocation emits the error messages
let handle = tcx.sess.diagnostic();
let mut emitting = data.emitting_diagnostics.lock();
if data.colors.get(prev_dep_node_index) == Some(DepNodeColor::Green(dep_node_index)) {
// The node is already green so diagnostics must have been emitted already
return;
}
if emitting.insert(dep_node_index) {
// We were the first to insert the node in the set so this thread
// must emit the diagnostics and signal other potentially waiting
// threads after.
mem::drop(emitting);
// Promote the previous diagnostics to the current session.
tcx.queries.on_disk_cache
.store_diagnostics(dep_node_index, diagnostics.clone().into());
.store_diagnostics(dep_node_index, diagnostics.clone().into());
let handle = tcx.sess.diagnostic();
for diagnostic in diagnostics {
DiagnosticBuilder::new_diagnostic(handle, diagnostic).emit();
}
#[cfg(parallel_compiler)]
{
// Mark the diagnostics and emitted and wake up waiters
data.emitted_diagnostics.lock().insert(dep_node_index);
data.emitted_diagnostics_cond_var.notify_all();
}
} else {
// The other threads will wait for the diagnostics to be emitted
// Mark the node as green now that diagnostics are emitted
data.colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index));
// Remove the node from the set
data.emitting_diagnostics.lock().remove(&dep_node_index);
// Wake up waiters
data.emitting_diagnostics_cond_var.notify_all();
} else {
// We must wait for the other thread to finish emitting the diagnostic
let mut emitted_diagnostics = data.emitted_diagnostics.lock();
loop {
if emitted_diagnostics.contains(&dep_node_index) {
data.emitting_diagnostics_cond_var.wait(&mut emitting);
if data.colors
.get(prev_dep_node_index) == Some(DepNodeColor::Green(dep_node_index)) {
break;
}
data.emitted_diagnostics_cond_var.wait(&mut emitted_diagnostics);
}
}
}
@@ -1027,7 +1043,7 @@ fn complete_anon_task(&mut self, kind: DepKind, task_deps: TaskDeps) -> DepNodeI
hash: self.anon_id_seed.combine(hasher.finish()),
};
self.intern_node(target_dep_node, task_deps.reads, Fingerprint::ZERO).0
self.intern_node(target_dep_node, task_deps.reads, Fingerprint::ZERO)
}
fn alloc_node(
@@ -1037,7 +1053,7 @@ fn alloc_node(
fingerprint: Fingerprint
) -> DepNodeIndex {
debug_assert!(!self.node_to_node_index.contains_key(&dep_node));
self.intern_node(dep_node, edges, fingerprint).0
self.intern_node(dep_node, edges, fingerprint)
}
fn intern_node(
@@ -1045,11 +1061,11 @@ fn intern_node(
dep_node: DepNode,
edges: SmallVec<[DepNodeIndex; 8]>,
fingerprint: Fingerprint
) -> (DepNodeIndex, bool) {
) -> DepNodeIndex {
debug_assert_eq!(self.node_to_node_index.len(), self.data.len());
match self.node_to_node_index.entry(dep_node) {
Entry::Occupied(entry) => (*entry.get(), false),
Entry::Occupied(entry) => *entry.get(),
Entry::Vacant(entry) => {
let dep_node_index = DepNodeIndex::new(self.data.len());
self.data.push(DepNodeData {
@@ -1058,7 +1074,7 @@ fn intern_node(
fingerprint
});
entry.insert(dep_node_index);
(dep_node_index, true)
dep_node_index
}
}
}