Streamline ActiveJobGuard completion.

`ActiveJobGuard::complete` and `ActiveJobGuard::drop` have very similar
code, though non-essential structural differences obscure this a little.

This commit factors out the common code into a new method
`ActiveJobGuard::complete_inner`. It also inlines and remove
`expect_job`, which ends up having a single call site.
This commit is contained in:
Nicholas Nethercote
2026-03-06 17:02:31 +11:00
parent e41bf75d1e
commit 18ade8478e
+32 -45
View File
@@ -1,5 +1,5 @@
use std::hash::Hash;
use std::mem;
use std::mem::ManuallyDrop;
use rustc_data_structures::hash_table::{Entry, HashTable};
use rustc_data_structures::stack::ensure_sufficient_stack;
@@ -26,17 +26,6 @@ fn equivalent_key<K: Eq, V>(k: K) -> impl Fn(&(K, V)) -> bool {
move |x| x.0 == k
}
/// Obtains the enclosed [`QueryJob`], or panics if this query evaluation
/// was poisoned by a panic.
fn expect_job<'tcx>(status: ActiveKeyStatus<'tcx>) -> QueryJob<'tcx> {
match status {
ActiveKeyStatus::Started(job) => job,
ActiveKeyStatus::Poisoned => {
panic!("job for query failed to start and was poisoned")
}
}
}
pub(crate) fn all_inactive<'tcx, K>(state: &QueryState<'tcx, K>) -> bool {
state.active.lock_shards().all(|shard| shard.is_empty())
}
@@ -154,33 +143,45 @@ impl<'tcx, K> ActiveJobGuard<'tcx, K>
{
/// Completes the query by updating the query cache with the `result`,
/// signals the waiter, and forgets the guard so it won't poison the query.
fn complete<C>(self, cache: &C, result: C::Value, dep_node_index: DepNodeIndex)
fn complete<C>(self, cache: &C, value: C::Value, dep_node_index: DepNodeIndex)
where
C: QueryCache<Key = K>,
{
// Forget ourself so our destructor won't poison the query.
// (Extract fields by value first to make sure we don't leak anything.)
let Self { state, key, key_hash }: Self = self;
mem::forget(self);
// Mark as complete before we remove the job from the active state
// so no other thread can re-execute this query.
cache.complete(key, result, dep_node_index);
cache.complete(self.key, value, dep_node_index);
let job = {
// don't keep the lock during the `unwrap()` of the retrieved value, or we taint the
// underlying shard.
// since unwinding also wants to look at this map, this can also prevent a double
// panic.
let mut shard = state.active.lock_shard_by_hash(key_hash);
match shard.find_entry(key_hash, equivalent_key(key)) {
Err(_) => None,
Ok(occupied) => Some(occupied.remove().0.1),
let mut this = ManuallyDrop::new(self);
// Drop everything without poisoning the query.
this.drop_and_maybe_poison(/* poison */ false);
}
fn drop_and_maybe_poison(&mut self, poison: bool) {
let status = {
let mut shard = self.state.active.lock_shard_by_hash(self.key_hash);
match shard.find_entry(self.key_hash, equivalent_key(self.key)) {
Err(_) => {
// Note: we must not panic while holding the lock, because unwinding also looks
// at this map, which can result in a double panic. So drop it first.
drop(shard);
panic!();
}
Ok(occupied) => {
let ((key, status), vacant) = occupied.remove();
if poison {
vacant.insert((key, ActiveKeyStatus::Poisoned));
}
status
}
}
};
let job = expect_job(job.expect("active query job entry"));
job.signal_complete();
// Also signal the completion of the job, so waiters will continue execution.
match status {
ActiveKeyStatus::Started(job) => job.signal_complete(),
ActiveKeyStatus::Poisoned => panic!(),
}
}
}
@@ -192,21 +193,7 @@ impl<'tcx, K> Drop for ActiveJobGuard<'tcx, K>
#[cold]
fn drop(&mut self) {
// Poison the query so jobs waiting on it panic.
let Self { state, key, key_hash } = *self;
let job = {
let mut shard = state.active.lock_shard_by_hash(key_hash);
match shard.find_entry(key_hash, equivalent_key(key)) {
Err(_) => panic!(),
Ok(occupied) => {
let ((key, value), vacant) = occupied.remove();
vacant.insert((key, ActiveKeyStatus::Poisoned));
expect_job(value)
}
}
};
// Also signal the completion of the job, so waiters
// will continue execution.
job.signal_complete();
self.drop_and_maybe_poison(/* poison */ true);
}
}