Rename all-query functions.

There are four functions that use `for_each_query_vtable!` to call an "inner"
function. They are:

- collect_active_jobs_from_all_queries -> gather_active_jobs
- alloc_self_profile_query_strings -> alloc_self_profile_query_strings_for_query_cache
- encode_all_query_results -> encode_query_results
- query_key_hash_verify_all -> query_key_hash_verify

These names are all over the place. This commit renames them as follows:

- collect_active_query_jobs{,_inner}
- alloc_self_profile_query_strings{,_inner}
- encode_query_values{,_inner}
- verify_query_key_hashes{,_inner}

This:
- puts the verb at the start
- uses "inner" for all the inners (which makes sense now that the inners are
  all next to their callers)
- uses `_query_` consistently
- avoids `all`, because the plurals are enough
- uses `values` instead of `results`
- removes the `collect`/`gather` distinction, which is no longer
  important
This commit is contained in:
Nicholas Nethercote
2026-03-12 17:39:01 +11:00
parent d27207d3d6
commit b41b0c494c
9 changed files with 33 additions and 40 deletions
+2 -2
View File
@@ -18,7 +18,7 @@
use rustc_metadata::{DylibError, EncodedMetadata, load_symbol_from_dylib};
use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
use rustc_middle::ty::{CurrentGcx, TyCtxt};
use rustc_query_impl::{CollectActiveJobsKind, collect_active_jobs_from_all_queries};
use rustc_query_impl::{CollectActiveJobsKind, collect_active_query_jobs};
use rustc_session::config::{
Cfg, CrateType, OutFileName, OutputFilenames, OutputTypes, Sysroot, host_tuple,
};
@@ -255,7 +255,7 @@ pub(crate) fn run_in_thread_pool_with_globals<
// Ensure there were no errors collecting all active jobs.
// We need the complete map to ensure we find a cycle to
// break.
collect_active_jobs_from_all_queries(
collect_active_query_jobs(
tcx,
CollectActiveJobsKind::FullNoContention,
)
+2 -2
View File
@@ -98,7 +98,7 @@ fn clone(&self) -> Self { *self }
/// Trying to execute a query afterwards would attempt to read the result cache we just dropped.
hook save_dep_graph() -> ();
hook query_key_hash_verify_all() -> ();
hook verify_query_key_hashes() -> ();
/// Ensure the given scalar is valid for the given type.
/// This checks non-recursive runtime validity.
@@ -109,7 +109,7 @@ fn clone(&self) -> Self { *self }
/// Creates the MIR for a given `DefId`, including unreachable code.
hook build_mir_inner_impl(def: LocalDefId) -> mir::Body<'tcx>;
hook encode_all_query_results(
hook encode_query_values(
encoder: &mut CacheEncoder<'_, 'tcx>,
query_result_index: &mut EncodedDepNodeIndex
) -> ();
@@ -246,10 +246,10 @@ pub fn serialize(&self, tcx: TyCtxt<'_>, encoder: FileEncoder) -> FileEncodeResu
// Encode query results.
let mut query_result_index = EncodedDepNodeIndex::new();
tcx.sess.time("encode_query_results", || {
tcx.sess.time("encode_query_values", || {
let enc = &mut encoder;
let qri = &mut query_result_index;
tcx.encode_all_query_results(enc, qri);
tcx.encode_query_values(enc, qri);
});
// Encode side effects.
+1 -1
View File
@@ -1678,7 +1678,7 @@ pub fn finish(self) {
self.alloc_self_profile_query_strings();
self.save_dep_graph();
self.query_key_hash_verify_all();
self.verify_query_key_hashes();
if let Err((path, error)) = self.dep_graph.finish_encoding() {
self.sess.dcx().emit_fatal(crate::error::FailedWritingFile { path: &path, error });
+8 -15
View File
@@ -56,14 +56,14 @@ pub enum CollectActiveJobsKind {
/// Prefer passing `false` to `require_complete` to avoid potential deadlocks,
/// especially when called from within a deadlock handler, unless a
/// complete map is needed and no deadlock is possible at this call site.
pub fn collect_active_jobs_from_all_queries<'tcx>(
pub fn collect_active_query_jobs<'tcx>(
tcx: TyCtxt<'tcx>,
collect_kind: CollectActiveJobsKind,
) -> QueryJobMap<'tcx> {
let mut job_map = QueryJobMap::default();
for_each_query_vtable!(ALL, tcx, |query| {
gather_active_jobs(query, collect_kind, &mut job_map);
collect_active_query_jobs_inner(query, collect_kind, &mut job_map);
});
job_map
@@ -71,14 +71,8 @@ pub fn collect_active_jobs_from_all_queries<'tcx>(
/// Internal plumbing for collecting the set of active jobs for this query.
///
/// Should only be called from `collect_active_jobs_from_all_queries`.
///
/// (We arbitrarily use the word "gather" when collecting the jobs for
/// each individual query, so that we have distinct function names to
/// grep for.)
///
/// Aborts if jobs can't be gathered as specified by `collect_kind`.
fn gather_active_jobs<'tcx, C>(
fn collect_active_query_jobs_inner<'tcx, C>(
query: &'tcx QueryVTable<'tcx, C>,
collect_kind: CollectActiveJobsKind,
job_map: &mut QueryJobMap<'tcx>,
@@ -86,7 +80,7 @@ fn gather_active_jobs<'tcx, C>(
C: QueryCache<Key: QueryKey + DynSend + DynSync>,
QueryVTable<'tcx, C>: DynSync,
{
let mut gather_shard_jobs = |shard: &HashTable<(C::Key, ActiveKeyStatus<'tcx>)>| {
let mut collect_shard_jobs = |shard: &HashTable<(C::Key, ActiveKeyStatus<'tcx>)>| {
for (key, status) in shard.iter() {
if let ActiveKeyStatus::Started(job) = status {
// This function is safe to call with the shard locked because it is very simple.
@@ -99,13 +93,13 @@ fn gather_active_jobs<'tcx, C>(
match collect_kind {
CollectActiveJobsKind::Full => {
for shard in query.state.active.lock_shards() {
gather_shard_jobs(&shard);
collect_shard_jobs(&shard);
}
}
CollectActiveJobsKind::FullNoContention => {
for shard in query.state.active.try_lock_shards() {
match shard {
Some(shard) => gather_shard_jobs(&shard),
Some(shard) => collect_shard_jobs(&shard),
None => panic!("Failed to collect active jobs for query `{}`!", query.name),
}
}
@@ -113,7 +107,7 @@ fn gather_active_jobs<'tcx, C>(
CollectActiveJobsKind::PartialAllowed => {
for shard in query.state.active.try_lock_shards() {
match shard {
Some(shard) => gather_shard_jobs(&shard),
Some(shard) => collect_shard_jobs(&shard),
None => warn!("Failed to collect active jobs for query `{}`!", query.name),
}
}
@@ -218,8 +212,7 @@ fn cycle_error<'tcx, C: QueryCache>(
) -> (C::Value, Option<DepNodeIndex>) {
// Ensure there were no errors collecting all active jobs.
// We need the complete map to ensure we find a cycle to break.
let job_map =
collect_active_jobs_from_all_queries(tcx, CollectActiveJobsKind::FullNoContention);
let job_map = collect_active_query_jobs(tcx, CollectActiveJobsKind::FullNoContention);
let error = find_cycle_in_stack(try_execute, job_map, &current_query_job(), span);
(mk_cycle(query, tcx, key, error), None)
+4 -4
View File
@@ -12,10 +12,10 @@
use rustc_middle::ty::TyCtxt;
use rustc_span::{DUMMY_SP, Span};
use crate::{CollectActiveJobsKind, collect_active_jobs_from_all_queries};
use crate::{CollectActiveJobsKind, collect_active_query_jobs};
/// Map from query job IDs to job information collected by
/// `collect_active_jobs_from_all_queries`.
/// `collect_active_query_jobs`.
#[derive(Debug, Default)]
pub struct QueryJobMap<'tcx> {
map: FxHashMap<QueryJobId, QueryJobInfo<'tcx>>,
@@ -24,7 +24,7 @@ pub struct QueryJobMap<'tcx> {
impl<'tcx> QueryJobMap<'tcx> {
/// Adds information about a job ID to the job map.
///
/// Should only be called by `gather_active_jobs`.
/// Should only be called by `collect_active_query_jobs_inner`.
pub(crate) fn insert(&mut self, id: QueryJobId, info: QueryJobInfo<'tcx>) {
self.map.insert(id, info);
}
@@ -407,7 +407,7 @@ pub fn print_query_stack<'tcx>(
let mut count_total = 0;
// Make use of a partial query job map if we fail to take locks collecting active queries.
let job_map = collect_active_jobs_from_all_queries(tcx, CollectActiveJobsKind::PartialAllowed);
let job_map = collect_active_query_jobs(tcx, CollectActiveJobsKind::PartialAllowed);
if let Some(ref mut file) = file {
let _ = writeln!(file, "\n\nquery stack during panic:");
+3 -3
View File
@@ -17,7 +17,7 @@
use rustc_middle::ty::TyCtxt;
pub use crate::dep_kind_vtables::make_dep_kind_vtables;
pub use crate::execution::{CollectActiveJobsKind, collect_active_jobs_from_all_queries};
pub use crate::execution::{CollectActiveJobsKind, collect_active_query_jobs};
pub use crate::job::{QueryJobMap, break_query_cycles, print_query_stack};
mod dep_kind_vtables;
@@ -64,6 +64,6 @@ pub fn query_system<'tcx>(
pub fn provide(providers: &mut rustc_middle::util::Providers) {
providers.hooks.alloc_self_profile_query_strings =
profiling_support::alloc_self_profile_query_strings;
providers.hooks.query_key_hash_verify_all = plumbing::query_key_hash_verify_all;
providers.hooks.encode_all_query_results = plumbing::encode_all_query_results;
providers.hooks.verify_query_key_hashes = plumbing::verify_query_key_hashes;
providers.hooks.encode_query_values = plumbing::encode_query_values;
}
+9 -9
View File
@@ -26,10 +26,10 @@
use crate::execution::{all_inactive, force_query};
use crate::job::find_dep_kind_root;
use crate::query_impl::for_each_query_vtable;
use crate::{CollectActiveJobsKind, GetQueryVTable, collect_active_jobs_from_all_queries};
use crate::{CollectActiveJobsKind, GetQueryVTable, collect_active_query_jobs};
fn depth_limit_error<'tcx>(tcx: TyCtxt<'tcx>, job: QueryJobId) {
let job_map = collect_active_jobs_from_all_queries(tcx, CollectActiveJobsKind::Full);
let job_map = collect_active_query_jobs(tcx, CollectActiveJobsKind::Full);
let (span, desc, depth) = find_dep_kind_root(tcx, job, job_map);
let suggested_limit = match tcx.recursion_limit() {
@@ -99,17 +99,17 @@ pub(crate) fn create_query_stack_frame<'tcx, C>(
}
}
pub(crate) fn encode_all_query_results<'tcx>(
pub(crate) fn encode_query_values<'tcx>(
tcx: TyCtxt<'tcx>,
encoder: &mut CacheEncoder<'_, 'tcx>,
query_result_index: &mut EncodedDepNodeIndex,
) {
for_each_query_vtable!(CACHE_ON_DISK, tcx, |query| {
encode_query_results(tcx, query, encoder, query_result_index)
encode_query_values_inner(tcx, query, encoder, query_result_index)
});
}
fn encode_query_results<'a, 'tcx, C, V>(
fn encode_query_values_inner<'a, 'tcx, C, V>(
tcx: TyCtxt<'tcx>,
query: &'tcx QueryVTable<'tcx, C>,
encoder: &mut CacheEncoder<'a, 'tcx>,
@@ -135,17 +135,17 @@ fn encode_query_results<'a, 'tcx, C, V>(
});
}
pub(crate) fn query_key_hash_verify_all<'tcx>(tcx: TyCtxt<'tcx>) {
pub(crate) fn verify_query_key_hashes<'tcx>(tcx: TyCtxt<'tcx>) {
if tcx.sess.opts.unstable_opts.incremental_verify_ich || cfg!(debug_assertions) {
tcx.sess.time("query_key_hash_verify_all", || {
tcx.sess.time("verify_query_key_hashes", || {
for_each_query_vtable!(ALL, tcx, |query| {
query_key_hash_verify(query, tcx);
verify_query_key_hashes_inner(query, tcx);
});
});
}
}
fn query_key_hash_verify<'tcx, C: QueryCache>(
fn verify_query_key_hashes_inner<'tcx, C: QueryCache>(
query: &'tcx QueryVTable<'tcx, C>,
tcx: TyCtxt<'tcx>,
) {
@@ -192,7 +192,7 @@ pub(crate) fn alloc_self_profile_query_strings(tcx: TyCtxt<'_>) {
let mut string_cache = QueryKeyStringCache::new();
for_each_query_vtable!(ALL, tcx, |query| {
alloc_self_profile_query_strings_for_query_cache(tcx, query, &mut string_cache);
alloc_self_profile_query_strings_inner(tcx, query, &mut string_cache);
});
tcx.sess.prof.store_query_cache_hits();
@@ -201,7 +201,7 @@ pub(crate) fn alloc_self_profile_query_strings(tcx: TyCtxt<'_>) {
/// Allocate the self-profiling query strings for a single query cache. This
/// method is called from `alloc_self_profile_query_strings` which knows all
/// the queries via macro magic.
fn alloc_self_profile_query_strings_for_query_cache<'tcx, C>(
fn alloc_self_profile_query_strings_inner<'tcx, C>(
tcx: TyCtxt<'tcx>,
query: &'tcx QueryVTable<'tcx, C>,
string_cache: &mut QueryKeyStringCache,