Use &C::Key less in queries.

Currently we use a mix of `C::Key` and `&C::Key` parameters. The former
is more common and a bit nicer, so convert some of the latter. This
results in less converting between the two types, and fewer sigils.
This commit is contained in:
Nicholas Nethercote
2026-03-06 10:03:42 +11:00
parent c7b206bba4
commit face186874
6 changed files with 28 additions and 29 deletions
+2 -3
View File
@@ -355,11 +355,10 @@ fn make_helpers_for_query(query: &Query, streams: &mut HelperTokenStreams) {
// Generate a function to check whether we should cache the query to disk, for some key.
if let Some(CacheOnDiskIf { block, .. }) = modifiers.cache_on_disk_if.as_ref() {
// `disallowed_pass_by_ref` is needed because some keys are `rustc_pass_by_value`.
streams.cache_on_disk_if_fns_stream.extend(quote! {
#[allow(unused_variables, rustc::disallowed_pass_by_ref)]
#[allow(unused_variables)]
#[inline]
pub fn #erased_name<'tcx>(tcx: TyCtxt<'tcx>, #key_pat: &#key_ty) -> bool
pub fn #erased_name<'tcx>(tcx: TyCtxt<'tcx>, #key_pat: #key_ty) -> bool
#block
});
}
+3 -3
View File
@@ -2391,7 +2391,7 @@
/// sets of different crates do not intersect.
query exported_non_generic_symbols(cnum: CrateNum) -> &'tcx [(ExportedSymbol<'tcx>, SymbolExportInfo)] {
desc { "collecting exported non-generic symbols for crate `{}`", cnum}
cache_on_disk_if { *cnum == LOCAL_CRATE }
cache_on_disk_if { cnum == LOCAL_CRATE }
separate_provide_extern
}
@@ -2404,7 +2404,7 @@
/// sets of different crates do not intersect.
query exported_generic_symbols(cnum: CrateNum) -> &'tcx [(ExportedSymbol<'tcx>, SymbolExportInfo)] {
desc { "collecting exported generic symbols for crate `{}`", cnum}
cache_on_disk_if { *cnum == LOCAL_CRATE }
cache_on_disk_if { cnum == LOCAL_CRATE }
separate_provide_extern
}
@@ -2771,7 +2771,7 @@
query externally_implementable_items(cnum: CrateNum) -> &'tcx FxIndexMap<DefId, (EiiDecl, FxIndexMap<DefId, EiiImpl>)> {
arena_cache
desc { "looking up the externally implementable items of a crate" }
cache_on_disk_if { *cnum == LOCAL_CRATE }
cache_on_disk_if { cnum == LOCAL_CRATE }
separate_provide_extern
}
+6 -6
View File
@@ -15,11 +15,11 @@
///
/// (Also performs some associated bookkeeping, if a value was found.)
#[inline(always)]
fn try_get_cached<'tcx, C>(tcx: TyCtxt<'tcx>, cache: &C, key: &C::Key) -> Option<C::Value>
fn try_get_cached<'tcx, C>(tcx: TyCtxt<'tcx>, cache: &C, key: C::Key) -> Option<C::Value>
where
C: QueryCache,
{
match cache.lookup(key) {
match cache.lookup(&key) {
Some((value, index)) => {
tcx.prof.query_cache_hit(index.into());
tcx.dep_graph.read_index(index);
@@ -41,7 +41,7 @@ pub(crate) fn query_get_at<'tcx, C>(
where
C: QueryCache,
{
match try_get_cached(tcx, &query.cache, &key) {
match try_get_cached(tcx, &query.cache, key) {
Some(value) => value,
None => (query.execute_query_fn)(tcx, span, key, QueryMode::Get).unwrap(),
}
@@ -58,7 +58,7 @@ pub(crate) fn query_ensure_ok_or_done<'tcx, C>(
) where
C: QueryCache,
{
match try_get_cached(tcx, &query.cache, &key) {
match try_get_cached(tcx, &query.cache, key) {
Some(_value) => {}
None => {
(query.execute_query_fn)(tcx, DUMMY_SP, key, QueryMode::Ensure { ensure_mode });
@@ -78,7 +78,7 @@ pub(crate) fn query_ensure_result<'tcx, C, T>(
C: QueryCache<Value = Erased<Result<T, ErrorGuaranteed>>>,
Result<T, ErrorGuaranteed>: Erasable,
{
match try_get_cached(tcx, &query.cache, &key) {
match try_get_cached(tcx, &query.cache, key) {
Some(value) => erase::restore_val(value).map(drop),
None => (query.execute_query_fn)(
tcx,
@@ -112,7 +112,7 @@ pub(crate) fn query_feed<'tcx, C>(
let format_value = query_vtable.format_value;
// Check whether the in-memory cache already has a value for this key.
match try_get_cached(tcx, &query_vtable.cache, &key) {
match try_get_cached(tcx, &query_vtable.cache, key) {
Some(old) => {
// The query already has a cached value for this key.
// That's OK if both values are the same, i.e. they have the same hash,
+3 -3
View File
@@ -121,17 +121,17 @@ pub struct QueryVTable<'tcx, C: QueryCache> {
/// This should be the only code that calls the provider function.
pub invoke_provider_fn: fn(tcx: TyCtxt<'tcx>, key: C::Key) -> C::Value,
pub will_cache_on_disk_for_key_fn: fn(tcx: TyCtxt<'tcx>, key: &C::Key) -> bool,
pub will_cache_on_disk_for_key_fn: fn(tcx: TyCtxt<'tcx>, key: C::Key) -> bool,
pub try_load_from_disk_fn: fn(
tcx: TyCtxt<'tcx>,
key: &C::Key,
key: C::Key,
prev_index: SerializedDepNodeIndex,
index: DepNodeIndex,
) -> Option<C::Value>,
pub is_loadable_from_disk_fn:
fn(tcx: TyCtxt<'tcx>, key: &C::Key, index: SerializedDepNodeIndex) -> bool,
fn(tcx: TyCtxt<'tcx>, key: C::Key, index: SerializedDepNodeIndex) -> bool,
/// Function pointer that hashes this query's result values.
///
+12 -12
View File
@@ -22,8 +22,8 @@
use crate::plumbing::{current_query_job, next_job_id, start_query};
#[inline]
fn equivalent_key<K: Eq, V>(k: &K) -> impl Fn(&(K, V)) -> bool + '_ {
move |x| x.0 == *k
fn equivalent_key<K: Eq, V>(k: K) -> impl Fn(&(K, V)) -> bool {
move |x| x.0 == k
}
/// Obtains the enclosed [`QueryJob`], or panics if this query evaluation
@@ -173,7 +173,7 @@ fn complete<C>(self, cache: &C, result: C::Value, dep_node_index: DepNodeIndex)
// since unwinding also wants to look at this map, this can also prevent a double
// panic.
let mut shard = state.active.lock_shard_by_hash(key_hash);
match shard.find_entry(key_hash, equivalent_key(&key)) {
match shard.find_entry(key_hash, equivalent_key(key)) {
Err(_) => None,
Ok(occupied) => Some(occupied.remove().0.1),
}
@@ -195,7 +195,7 @@ fn drop(&mut self) {
let Self { state, key, key_hash } = *self;
let job = {
let mut shard = state.active.lock_shard_by_hash(key_hash);
match shard.find_entry(key_hash, equivalent_key(&key)) {
match shard.find_entry(key_hash, equivalent_key(key)) {
Err(_) => panic!(),
Ok(occupied) => {
let ((key, value), vacant) = occupied.remove();
@@ -254,7 +254,7 @@ fn wait_for_query<'tcx, C: QueryCache>(
// poisoned due to a panic instead.
let key_hash = sharded::make_hash(&key);
let shard = query.state.active.lock_shard_by_hash(key_hash);
match shard.find(key_hash, equivalent_key(&key)) {
match shard.find(key_hash, equivalent_key(key)) {
// The query we waited on panicked. Continue unwinding here.
Some((_, ActiveKeyStatus::Poisoned)) => FatalError.raise(),
_ => panic!(
@@ -303,7 +303,7 @@ fn try_execute_query<'tcx, C: QueryCache, const INCR: bool>(
let current_job_id = current_query_job();
match state_lock.entry(key_hash, equivalent_key(&key), |(k, _)| sharded::make_hash(k)) {
match state_lock.entry(key_hash, equivalent_key(key), |(k, _)| sharded::make_hash(k)) {
Entry::Vacant(entry) => {
// Nothing has computed or is computing the query, so we start a new job and insert it in the
// state map.
@@ -459,7 +459,7 @@ fn execute_job_incr<'tcx, C: QueryCache>(
tcx,
dep_graph_data,
query,
&key,
key,
dep_node,
prev_index,
dep_node_index,
@@ -507,7 +507,7 @@ fn load_from_disk_or_invoke_provider_green<'tcx, C: QueryCache>(
tcx: TyCtxt<'tcx>,
dep_graph_data: &DepGraphData,
query: &'tcx QueryVTable<'tcx, C>,
key: &C::Key,
key: C::Key,
dep_node: &DepNode,
prev_index: SerializedDepNodeIndex,
dep_node_index: DepNodeIndex,
@@ -570,7 +570,7 @@ fn load_from_disk_or_invoke_provider_green<'tcx, C: QueryCache>(
// The dep-graph for this computation is already in-place.
// Call the query provider.
let value = tcx.dep_graph.with_ignore(|| (query.invoke_provider_fn)(tcx, *key));
let value = tcx.dep_graph.with_ignore(|| (query.invoke_provider_fn)(tcx, key));
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
@@ -615,7 +615,7 @@ struct EnsureCanSkip {
fn check_if_ensure_can_skip_execution<'tcx, C: QueryCache>(
query: &'tcx QueryVTable<'tcx, C>,
tcx: TyCtxt<'tcx>,
key: &C::Key,
key: C::Key,
ensure_mode: EnsureMode,
) -> EnsureCanSkip {
// Queries with `eval_always` should never skip execution.
@@ -626,7 +626,7 @@ fn check_if_ensure_can_skip_execution<'tcx, C: QueryCache>(
// Ensuring an anonymous query makes no sense
assert!(!query.anon);
let dep_node = DepNode::construct(tcx, query.dep_kind, key);
let dep_node = DepNode::construct(tcx, query.dep_kind, &key);
let dep_graph = &tcx.dep_graph;
let serialized_dep_node_index = match dep_graph.try_mark_green(tcx, &dep_node) {
@@ -695,7 +695,7 @@ pub(super) fn execute_query_incr_inner<'tcx, C: QueryCache>(
let dep_node: Option<DepNode> = match mode {
QueryMode::Ensure { ensure_mode } => {
let EnsureCanSkip { skip_execution, dep_node } =
check_if_ensure_can_skip_execution(query, tcx, &key, ensure_mode);
check_if_ensure_can_skip_execution(query, tcx, key, ensure_mode);
if skip_execution {
// Return early to skip execution.
return None;
+2 -2
View File
@@ -163,7 +163,7 @@ pub(crate) fn encode_query_results<'a, 'tcx, C, V>(
assert!(all_inactive(&query.state));
query.cache.for_each(&mut |key, value, dep_node| {
if (query.will_cache_on_disk_for_key_fn)(tcx, key) {
if (query.will_cache_on_disk_for_key_fn)(tcx, *key) {
let dep_node = SerializedDepNodeIndex::new(dep_node.index());
// Record position of the cache entry.
@@ -219,7 +219,7 @@ pub(crate) fn promote_from_disk_inner<'tcx, Q: GetQueryVTable<'tcx>>(
// If the recovered key isn't eligible for cache-on-disk, then there's no
// value on disk to promote.
if !(query.will_cache_on_disk_for_key_fn)(tcx, &key) {
if !(query.will_cache_on_disk_for_key_fn)(tcx, key) {
return;
}