significantly improve provisional cache rebasing

This commit is contained in:
lcnr
2025-06-26 14:51:16 +02:00
parent 577166503a
commit 733aea5e0a
3 changed files with 101 additions and 92 deletions
@@ -48,7 +48,9 @@ fn initial_provisional_result(
) -> QueryResult<I> {
match kind {
PathKind::Coinductive => response_no_constraints(cx, input, Certainty::Yes),
PathKind::Unknown => response_no_constraints(cx, input, Certainty::overflow(false)),
PathKind::Unknown | PathKind::ForcedAmbiguity => {
response_no_constraints(cx, input, Certainty::overflow(false))
}
// Even though we know these cycles to be unproductive, we still return
// overflow during coherence. This is both as we are not 100% confident in
// the implementation yet and any incorrect errors would be unsound there.
+94 -90
View File
@@ -21,10 +21,9 @@
use derive_where::derive_where;
#[cfg(feature = "nightly")]
use rustc_macros::{Decodable_NoContext, Encodable_NoContext, HashStable_NoContext};
use rustc_type_ir::data_structures::HashMap;
use tracing::{debug, instrument};
use crate::data_structures::HashMap;
mod stack;
use stack::{Stack, StackDepth, StackEntry};
mod global_cache;
@@ -137,6 +136,12 @@ pub enum PathKind {
Unknown,
/// A path with at least one coinductive step. Such cycles hold.
Coinductive,
/// A path which is treated as ambiguous. Once a path has this path kind
/// any other segment does not change its kind.
///
/// This is currently only used when fuzzing to support negative reasoning.
/// For more details, see #143054.
ForcedAmbiguity,
}
impl PathKind {
@@ -149,6 +154,9 @@ impl PathKind {
/// to `max(self, rest)`.
fn extend(self, rest: PathKind) -> PathKind {
match (self, rest) {
(PathKind::ForcedAmbiguity, _) | (_, PathKind::ForcedAmbiguity) => {
PathKind::ForcedAmbiguity
}
(PathKind::Coinductive, _) | (_, PathKind::Coinductive) => PathKind::Coinductive,
(PathKind::Unknown, _) | (_, PathKind::Unknown) => PathKind::Unknown,
(PathKind::Inductive, PathKind::Inductive) => PathKind::Inductive,
@@ -187,41 +195,6 @@ fn merge(self, other: impl Into<Self>) -> Self {
}
}
/// For each goal we track whether the paths from this goal
/// to its cycle heads are coinductive.
///
/// This is a necessary condition to rebase provisional cache
/// entries.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum AllPathsToHeadCoinductive {
Yes,
No,
}
impl From<PathKind> for AllPathsToHeadCoinductive {
fn from(path: PathKind) -> AllPathsToHeadCoinductive {
match path {
PathKind::Coinductive => AllPathsToHeadCoinductive::Yes,
_ => AllPathsToHeadCoinductive::No,
}
}
}
impl AllPathsToHeadCoinductive {
#[must_use]
fn merge(self, other: impl Into<Self>) -> Self {
match (self, other.into()) {
(AllPathsToHeadCoinductive::Yes, AllPathsToHeadCoinductive::Yes) => {
AllPathsToHeadCoinductive::Yes
}
(AllPathsToHeadCoinductive::No, _) | (_, AllPathsToHeadCoinductive::No) => {
AllPathsToHeadCoinductive::No
}
}
}
fn and_merge(&mut self, other: impl Into<Self>) {
*self = self.merge(other);
}
}
#[derive(Debug, Clone, Copy)]
struct AvailableDepth(usize);
impl AvailableDepth {
@@ -261,9 +234,9 @@ fn cache_entry_is_applicable(self, additional_depth: usize) -> bool {
///
/// We also track all paths from this goal to that head. This is necessary
/// when rebasing provisional cache results.
#[derive(Clone, Debug, PartialEq, Eq, Default)]
#[derive(Clone, Debug, Default)]
struct CycleHeads {
heads: BTreeMap<StackDepth, AllPathsToHeadCoinductive>,
heads: BTreeMap<StackDepth, PathsToNested>,
}
impl CycleHeads {
@@ -283,27 +256,16 @@ fn opt_lowest_cycle_head(&self) -> Option<StackDepth> {
self.heads.first_key_value().map(|(k, _)| *k)
}
fn remove_highest_cycle_head(&mut self) {
fn remove_highest_cycle_head(&mut self) -> PathsToNested {
let last = self.heads.pop_last();
debug_assert_ne!(last, None);
last.unwrap().1
}
fn insert(
&mut self,
head: StackDepth,
path_from_entry: impl Into<AllPathsToHeadCoinductive> + Copy,
) {
self.heads.entry(head).or_insert(path_from_entry.into()).and_merge(path_from_entry);
fn insert(&mut self, head: StackDepth, path_from_entry: impl Into<PathsToNested> + Copy) {
*self.heads.entry(head).or_insert(path_from_entry.into()) |= path_from_entry.into();
}
fn merge(&mut self, heads: &CycleHeads) {
for (&head, &path_from_entry) in heads.heads.iter() {
self.insert(head, path_from_entry);
debug_assert!(matches!(self.heads[&head], AllPathsToHeadCoinductive::Yes));
}
}
fn iter(&self) -> impl Iterator<Item = (StackDepth, AllPathsToHeadCoinductive)> + '_ {
fn iter(&self) -> impl Iterator<Item = (StackDepth, PathsToNested)> + '_ {
self.heads.iter().map(|(k, v)| (*k, *v))
}
@@ -317,13 +279,7 @@ fn extend_from_child(&mut self, this: StackDepth, step_kind: PathKind, child: &C
Ordering::Equal => continue,
Ordering::Greater => unreachable!(),
}
let path_from_entry = match step_kind {
PathKind::Coinductive => AllPathsToHeadCoinductive::Yes,
PathKind::Unknown | PathKind::Inductive => path_from_entry,
};
self.insert(head, path_from_entry);
self.insert(head, path_from_entry.extend_with(step_kind));
}
}
}
@@ -332,13 +288,14 @@ fn extend_from_child(&mut self, this: StackDepth, step_kind: PathKind, child: &C
/// Tracks how nested goals have been accessed. This is necessary to disable
/// global cache entries if computing them would otherwise result in a cycle or
/// access a provisional cache entry.
#[derive(Debug, Clone, Copy)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct PathsToNested: u8 {
/// The initial value when adding a goal to its own nested goals.
const EMPTY = 1 << 0;
const INDUCTIVE = 1 << 1;
const UNKNOWN = 1 << 2;
const COINDUCTIVE = 1 << 3;
const FORCED_AMBIGUITY = 1 << 4;
}
}
impl From<PathKind> for PathsToNested {
@@ -347,6 +304,7 @@ fn from(path: PathKind) -> PathsToNested {
PathKind::Inductive => PathsToNested::INDUCTIVE,
PathKind::Unknown => PathsToNested::UNKNOWN,
PathKind::Coinductive => PathsToNested::COINDUCTIVE,
PathKind::ForcedAmbiguity => PathsToNested::FORCED_AMBIGUITY,
}
}
}
@@ -379,10 +337,45 @@ fn extend_with(mut self, path: PathKind) -> Self {
self.insert(PathsToNested::COINDUCTIVE);
}
}
PathKind::ForcedAmbiguity => {
if self.intersects(
PathsToNested::EMPTY
| PathsToNested::INDUCTIVE
| PathsToNested::UNKNOWN
| PathsToNested::COINDUCTIVE,
) {
self.remove(
PathsToNested::EMPTY
| PathsToNested::INDUCTIVE
| PathsToNested::UNKNOWN
| PathsToNested::COINDUCTIVE,
);
self.insert(PathsToNested::FORCED_AMBIGUITY);
}
}
}
self
}
#[must_use]
fn extend_with_paths(self, path: PathsToNested) -> Self {
let mut new = PathsToNested::empty();
for p in path.iter_paths() {
new |= self.extend_with(p);
}
new
}
fn iter_paths(self) -> impl Iterator<Item = PathKind> {
let (PathKind::Inductive
| PathKind::Unknown
| PathKind::Coinductive
| PathKind::ForcedAmbiguity);
[PathKind::Inductive, PathKind::Unknown, PathKind::Coinductive, PathKind::ForcedAmbiguity]
.into_iter()
.filter(move |&p| self.contains(p.into()))
}
}
/// The nested goals of each stack entry and the path from the
@@ -693,7 +686,7 @@ pub fn evaluate_goal(
if let Some((_scope, expected)) = validate_cache {
// Do not try to move a goal into the cache again if we're testing
// the global cache.
assert_eq!(evaluation_result.result, expected, "input={input:?}");
assert_eq!(expected, evaluation_result.result, "input={input:?}");
} else if D::inspect_is_noop(inspect) {
self.insert_global_cache(cx, input, evaluation_result, dep_node)
}
@@ -763,14 +756,11 @@ fn clear_dependent_provisional_results(&mut self) {
/// provisional cache entry is still applicable. We need to keep the cache entries to
/// prevent hangs.
///
/// What we therefore do is check whether the cycle kind of all cycles the goal of a
/// provisional cache entry is involved in would stay the same when computing the
/// goal without its cycle head on the stack. For more details, see the relevant
/// This can be thought of as pretending to reevaluate the popped head as nested goals
/// of this provisional result. For this to be correct, all cycles encountered while
/// we'd reevaluate the cycle head as a nested goal must keep the same cycle kind.
/// [rustc-dev-guide chapter](https://rustc-dev-guide.rust-lang.org/solve/caching.html).
///
/// This can be thought of rotating the sub-tree of this provisional result and changing
/// its entry point while making sure that all paths through this sub-tree stay the same.
///
/// In case the popped cycle head failed to reach a fixpoint anything which depends on
/// its provisional result is invalid. Actually discarding provisional cache entries in
/// this case would cause hangs, so we instead change the result of dependant provisional
@@ -782,7 +772,7 @@ fn rebase_provisional_cache_entries(
stack_entry: &StackEntry<X>,
mut mutate_result: impl FnMut(X::Input, X::Result) -> X::Result,
) {
let head = self.stack.next_index();
let popped_head = self.stack.next_index();
#[allow(rustc::potential_query_instability)]
self.provisional_cache.retain(|&input, entries| {
entries.retain_mut(|entry| {
@@ -792,30 +782,44 @@ fn rebase_provisional_cache_entries(
path_from_head,
result,
} = entry;
if heads.highest_cycle_head() == head {
let ep = if heads.highest_cycle_head() == popped_head {
heads.remove_highest_cycle_head()
} else {
return true;
};
// We're rebasing an entry `e` over a head `p`. This head
// has a number of own heads `h` it depends on. We need to
// make sure that the path kind of all paths `hph` remain the
// same after rebasing.
//
// After rebasing the cycles `hph` will go through `e`. We need to make
// sure that forall possible paths `hep`, `heph` is equal to `hph.`
for (h, ph) in stack_entry.heads.iter() {
let hp =
Self::cycle_path_kind(&self.stack, stack_entry.step_kind_from_parent, h);
// We first validate that all cycles while computing `p` would stay
// the same if we were to recompute it as a nested goal of `e`.
let he = hp.extend(*path_from_head);
for ph in ph.iter_paths() {
let hph = hp.extend(ph);
for ep in ep.iter_paths() {
let hep = ep.extend(he);
let heph = hep.extend(ph);
if hph != heph {
return false;
}
}
}
// If so, all paths reached while computing `p` have to get added
// the heads of `e` to make sure that rebasing `e` again also considers
// them.
let eph = ep.extend_with_paths(ph);
heads.insert(h, eph);
}
// We only try to rebase if all paths from the cache entry
// to its heads are coinductive. In this case these cycle
// kinds won't change, no matter the goals between these
// heads and the provisional cache entry.
if heads.iter().any(|(_, p)| matches!(p, AllPathsToHeadCoinductive::No)) {
return false;
}
// The same for nested goals of the cycle head.
if stack_entry.heads.iter().any(|(_, p)| matches!(p, AllPathsToHeadCoinductive::No))
{
return false;
}
// Merge the cycle heads of the provisional cache entry and the
// popped head. If the popped cycle head was a root, discard all
// provisional cache entries which depend on it.
heads.merge(&stack_entry.heads);
let Some(head) = heads.opt_highest_cycle_head() else {
return false;
};
@@ -3,7 +3,7 @@
use derive_where::derive_where;
use rustc_index::IndexVec;
use super::{AvailableDepth, Cx, CycleHeads, NestedGoals, PathKind, UsageKind};
use crate::search_graph::{AvailableDepth, Cx, CycleHeads, NestedGoals, PathKind, UsageKind};
rustc_index::newtype_index! {
#[orderable]
@@ -79,6 +79,9 @@ pub(super) fn next_index(&self) -> StackDepth {
}
pub(super) fn push(&mut self, entry: StackEntry<X>) -> StackDepth {
if cfg!(debug_assertions) && self.entries.iter().any(|e| e.input == entry.input) {
panic!("pushing duplicate entry on stack: {entry:?} {:?}", self.entries);
}
self.entries.push(entry)
}