Merge pull request #21331 from rust-lang/rustc-pull

minor: Rustc pull update
This commit is contained in:
Laurențiu Nicola
2025-12-25 06:00:15 +00:00
committed by GitHub
406 changed files with 5307 additions and 2548 deletions
+4 -3
View File
@@ -15,6 +15,7 @@ on:
- try
- try-perf
- automation/bors/try
- automation/bors/auto
pull_request:
branches:
- "**"
@@ -56,7 +57,7 @@ jobs:
- name: Test citool
# Only test citool on the auto branch, to reduce latency of the calculate matrix job
# on PR/try builds.
if: ${{ github.ref == 'refs/heads/auto' }}
if: ${{ github.ref == 'refs/heads/auto' || github.ref == 'refs/heads/automation/bors/auto' }}
run: |
cd src/ci/citool
CARGO_INCREMENTAL=0 cargo test
@@ -79,7 +80,7 @@ jobs:
# access the environment.
#
# We only enable the environment for the rust-lang/rust repository, so that CI works on forks.
environment: ${{ ((github.repository == 'rust-lang/rust' && (github.ref == 'refs/heads/try' || github.ref == 'refs/heads/try-perf' || github.ref == 'refs/heads/automation/bors/try' || github.ref == 'refs/heads/auto')) && 'bors') || '' }}
environment: ${{ ((github.repository == 'rust-lang/rust' && (github.ref == 'refs/heads/try' || github.ref == 'refs/heads/try-perf' || github.ref == 'refs/heads/automation/bors/try' || github.ref == 'refs/heads/auto' || github.ref == 'refs/heads/automation/bors/auto')) && 'bors') || '' }}
env:
CI_JOB_NAME: ${{ matrix.name }}
CI_JOB_DOC_URL: ${{ matrix.doc_url }}
@@ -313,7 +314,7 @@ jobs:
needs: [ calculate_matrix, job ]
# !cancelled() executes the job regardless of whether the previous jobs passed or failed
if: ${{ !cancelled() && contains(fromJSON('["auto", "try"]'), needs.calculate_matrix.outputs.run_type) }}
environment: ${{ ((github.repository == 'rust-lang/rust' && (github.ref == 'refs/heads/try' || github.ref == 'refs/heads/try-perf' || github.ref == 'refs/heads/automation/bors/try' || github.ref == 'refs/heads/auto')) && 'bors') || '' }}
environment: ${{ ((github.repository == 'rust-lang/rust' && (github.ref == 'refs/heads/try' || github.ref == 'refs/heads/try-perf' || github.ref == 'refs/heads/automation/bors/try' || github.ref == 'refs/heads/auto' || github.ref == 'refs/heads/automation/bors/auto')) && 'bors') || '' }}
steps:
- name: checkout the source code
uses: actions/checkout@v5
+2 -2
View File
@@ -5357,9 +5357,9 @@ dependencies = [
[[package]]
name = "stringdex"
version = "0.0.3"
version = "0.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "556a6126952cb2f5150057c98a77cc6c771027dea2825bf7fa03d3d638b0a4f8"
checksum = "c6204af9e1e433f1ef9b6d44475c7089be33c91111d896463b9dfa20464b87f1"
dependencies = [
"stacker",
]
+1 -1
View File
@@ -1810,7 +1810,7 @@ pub enum ExprKind {
/// or a `gen` block (`gen move { ... }`).
///
/// The span is the "decl", which is the header before the body `{ }`
/// including the `asyng`/`gen` keywords and possibly `move`.
/// including the `async`/`gen` keywords and possibly `move`.
Gen(CaptureBy, Box<Block>, GenBlockKind, Span),
/// An await expression (`my_future.await`). Span is of await keyword.
Await(Box<Expr>, Span),
+2 -1
View File
@@ -114,7 +114,8 @@ pub(super) fn lower_expr_mut(&mut self, e: &Expr) -> hir::Expr<'hir> {
}
ExprKind::Tup(elts) => hir::ExprKind::Tup(self.lower_exprs(elts)),
ExprKind::Call(f, args) => {
if let Some(legacy_args) = self.resolver.legacy_const_generic_args(f) {
if let Some(legacy_args) = self.resolver.legacy_const_generic_args(f, self.tcx)
{
self.lower_legacy_const_generics((**f).clone(), args.clone(), &legacy_args)
} else {
let f = self.lower_expr(f);
+7
View File
@@ -281,6 +281,13 @@ fn visit_expr_field(&mut self, field: &'hir ExprField<'hir>) {
});
}
fn visit_const_arg_expr_field(&mut self, field: &'hir ConstArgExprField<'hir>) {
self.insert(field.span, field.hir_id, Node::ConstArgExprField(field));
self.with_parent(field.hir_id, |this| {
intravisit::walk_const_arg_expr_field(this, field);
})
}
fn visit_stmt(&mut self, stmt: &'hir Stmt<'hir>) {
self.insert(stmt.span, stmt.hir_id, Node::Stmt(stmt));
+50 -7
View File
@@ -47,13 +47,14 @@
use rustc_data_structures::sync::spawn;
use rustc_data_structures::tagged_ptr::TaggedRef;
use rustc_errors::{DiagArgFromDisplay, DiagCtxtHandle};
use rustc_hir::attrs::AttributeKind;
use rustc_hir::def::{DefKind, LifetimeRes, Namespace, PartialRes, PerNS, Res};
use rustc_hir::def_id::{CRATE_DEF_ID, LOCAL_CRATE, LocalDefId};
use rustc_hir::definitions::{DefPathData, DisambiguatorState};
use rustc_hir::lints::DelayedLint;
use rustc_hir::{
self as hir, AngleBrackets, ConstArg, GenericArg, HirId, ItemLocalMap, LifetimeSource,
LifetimeSyntax, ParamName, Target, TraitCandidate,
LifetimeSyntax, ParamName, Target, TraitCandidate, find_attr,
};
use rustc_index::{Idx, IndexSlice, IndexVec};
use rustc_macros::extension;
@@ -236,7 +237,7 @@ fn lower(&self, span: Span) -> Span {
#[extension(trait ResolverAstLoweringExt)]
impl ResolverAstLowering {
fn legacy_const_generic_args(&self, expr: &Expr) -> Option<Vec<usize>> {
fn legacy_const_generic_args(&self, expr: &Expr, tcx: TyCtxt<'_>) -> Option<Vec<usize>> {
let ExprKind::Path(None, path) = &expr.kind else {
return None;
};
@@ -256,11 +257,12 @@ fn legacy_const_generic_args(&self, expr: &Expr) -> Option<Vec<usize>> {
return None;
}
if let Some(v) = self.legacy_const_generic_args.get(&def_id) {
return v.clone();
}
None
find_attr!(
// we can use parsed attrs here since for other crates they're already available
tcx.get_all_attrs(def_id),
AttributeKind::RustcLegacyConstGenerics{fn_indexes,..} => fn_indexes
)
.map(|fn_indexes| fn_indexes.iter().map(|(num, _)| *num).collect())
}
fn get_partial_res(&self, id: NodeId) -> Option<PartialRes> {
@@ -2408,6 +2410,47 @@ fn lower_expr_to_const_arg_direct(&mut self, expr: &Expr) -> hir::ConstArg<'hir>
ConstArg { hir_id: self.next_id(), kind: hir::ConstArgKind::Path(qpath) }
}
ExprKind::Struct(se) => {
let path = self.lower_qpath(
expr.id,
&se.qself,
&se.path,
// FIXME(mgca): we may want this to be `Optional` instead, but
// we would also need to make sure that HIR ty lowering errors
// when these paths wind up in signatures.
ParamMode::Explicit,
AllowReturnTypeNotation::No,
ImplTraitContext::Disallowed(ImplTraitPosition::Path),
None,
);
let fields = self.arena.alloc_from_iter(se.fields.iter().map(|f| {
let hir_id = self.lower_node_id(f.id);
// FIXME(mgca): This might result in lowering attributes that
// then go unused as the `Target::ExprField` is not actually
// corresponding to `Node::ExprField`.
self.lower_attrs(hir_id, &f.attrs, f.span, Target::ExprField);
let expr = if let ExprKind::ConstBlock(anon_const) = &f.expr.kind {
let def_id = self.local_def_id(anon_const.id);
let def_kind = self.tcx.def_kind(def_id);
assert_eq!(DefKind::AnonConst, def_kind);
self.lower_anon_const_to_const_arg_direct(anon_const)
} else {
self.lower_expr_to_const_arg_direct(&f.expr)
};
&*self.arena.alloc(hir::ConstArgExprField {
hir_id,
field: self.lower_ident(f.ident),
expr: self.arena.alloc(expr),
span: self.lower_span(f.span),
})
}));
ConstArg { hir_id: self.next_id(), kind: hir::ConstArgKind::Struct(path, fields) }
}
ExprKind::Underscore => ConstArg {
hir_id: self.lower_node_id(expr.id),
kind: hir::ConstArgKind::Infer(expr.span, ()),
@@ -33,6 +33,9 @@ pub fn where_bound_predicate_to_string(where_bound_predicate: &ast::WhereBoundPr
State::new().where_bound_predicate_to_string(where_bound_predicate)
}
/// # Panics
///
/// Panics if `pat.kind` is `PatKind::Missing`.
pub fn pat_to_string(pat: &ast::Pat) -> String {
State::new().pat_to_string(pat)
}
@@ -0,0 +1,33 @@
use super::prelude::*;
pub(crate) struct CfiEncodingParser;
impl<S: Stage> SingleAttributeParser<S> for CfiEncodingParser {
const PATH: &[Symbol] = &[sym::cfi_encoding];
const ALLOWED_TARGETS: AllowedTargets = AllowedTargets::AllowListWarnRest(&[
Allow(Target::Struct),
Allow(Target::ForeignTy),
Allow(Target::Enum),
Allow(Target::Union),
]);
const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepInnermost;
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Error;
const TEMPLATE: AttributeTemplate = template!(NameValueStr: "encoding");
fn convert(cx: &mut AcceptContext<'_, '_, S>, args: &ArgParser) -> Option<AttributeKind> {
let Some(name_value) = args.name_value() else {
cx.expected_name_value(cx.attr_span, Some(sym::cfi_encoding));
return None;
};
let Some(value_str) = name_value.value_as_str() else {
cx.expected_string_literal(name_value.value_span, None);
return None;
};
if value_str.as_str().trim().is_empty() {
cx.expected_non_empty_string_literal(name_value.value_span);
return None;
}
Some(AttributeKind::CfiEncoding { encoding: value_str })
}
}
@@ -33,6 +33,7 @@
pub(crate) mod body;
pub(crate) mod cfg;
pub(crate) mod cfg_select;
pub(crate) mod cfi_encoding;
pub(crate) mod codegen_attrs;
pub(crate) mod confusables;
pub(crate) mod crate_level;
@@ -19,6 +19,7 @@
AllowConstFnUnstableParser, AllowInternalUnstableParser, UnstableFeatureBoundParser,
};
use crate::attributes::body::CoroutineParser;
use crate::attributes::cfi_encoding::CfiEncodingParser;
use crate::attributes::codegen_attrs::{
ColdParser, CoverageParser, EiiExternItemParser, ExportNameParser, ForceTargetFeatureParser,
NakedParser, NoMangleParser, ObjcClassParser, ObjcSelectorParser, OptimizeParser,
@@ -187,6 +188,7 @@ mod late {
// tidy-alphabetical-end
// tidy-alphabetical-start
Single<CfiEncodingParser>,
Single<CoverageParser>,
Single<CrateNameParser>,
Single<CustomMirParser>,
@@ -498,6 +500,10 @@ pub(crate) fn expected_nv_or_no_args(&self, span: Span) -> ErrorGuaranteed {
self.emit_parse_error(span, AttributeParseErrorReason::ExpectedNameValueOrNoArgs)
}
pub(crate) fn expected_non_empty_string_literal(&self, span: Span) -> ErrorGuaranteed {
self.emit_parse_error(span, AttributeParseErrorReason::ExpectedNonEmptyStringLiteral)
}
pub(crate) fn expected_no_args(&self, span: Span) -> ErrorGuaranteed {
self.emit_parse_error(span, AttributeParseErrorReason::ExpectedNoArgs)
}
@@ -521,6 +521,7 @@ pub(crate) enum AttributeParseErrorReason<'a> {
ExpectedList,
ExpectedListOrNoArgs,
ExpectedNameValueOrNoArgs,
ExpectedNonEmptyStringLiteral,
UnexpectedLiteral,
ExpectedNameValue(Option<Symbol>),
DuplicateKey(Symbol),
@@ -599,6 +600,9 @@ fn into_diag(self, dcx: DiagCtxtHandle<'a>, level: Level) -> Diag<'a, G> {
AttributeParseErrorReason::ExpectedNameValueOrNoArgs => {
diag.span_label(self.span, "didn't expect a list here");
}
AttributeParseErrorReason::ExpectedNonEmptyStringLiteral => {
diag.span_label(self.span, "string is not allowed to be empty");
}
AttributeParseErrorReason::DuplicateKey(key) => {
diag.span_label(self.span, format!("found `{key}` used as a key more than once"));
diag.code(E0538);
@@ -764,6 +764,7 @@ fn later_use_kind(
{
// Just point to the function, to reduce the chance of overlapping spans.
let function_span = match func {
Operand::RuntimeChecks(_) => span,
Operand::Constant(c) => c.span,
Operand::Copy(place) | Operand::Move(place) => {
if let Some(l) = place.as_local() {
@@ -809,6 +810,7 @@ fn later_use_kind(
{
// Just point to the function, to reduce the chance of overlapping spans.
let function_span = match func {
Operand::RuntimeChecks(_) => span,
Operand::Constant(c) => c.span,
Operand::Copy(place) | Operand::Move(place) => {
if let Some(l) = place.as_local() {
+6 -10
View File
@@ -1,4 +1,4 @@
//! This query borrow-checks the MIR to (further) ensure it is not broken.
//! This crate implemens MIR typeck and MIR borrowck.
// tidy-alphabetical-start
#![allow(internal_features)]
@@ -111,9 +111,9 @@ pub fn provide(providers: &mut Providers) {
*providers = Providers { mir_borrowck, ..*providers };
}
/// Provider for `query mir_borrowck`. Similar to `typeck`, this must
/// only be called for typeck roots which will then borrowck all
/// nested bodies as well.
/// Provider for `query mir_borrowck`. Unlike `typeck`, this must
/// only be called for typeck roots which *similar* to `typeck` will
/// then borrowck all nested bodies as well.
fn mir_borrowck(
tcx: TyCtxt<'_>,
def: LocalDefId,
@@ -1559,10 +1559,6 @@ fn consume_rvalue(
self.consume_operand(location, (operand2, span), state);
}
Rvalue::NullaryOp(_op) => {
// nullary ops take no dynamic input; no borrowck effect.
}
Rvalue::Aggregate(aggregate_kind, operands) => {
// We need to report back the list of mutable upvars that were
// moved into the closure and subsequently used by the closure,
@@ -1699,7 +1695,7 @@ fn propagate_closure_used_mut_upvar(&mut self, operand: &Operand<'tcx>) {
_ => propagate_closure_used_mut_place(self, place),
}
}
Operand::Constant(..) => {}
Operand::Constant(..) | Operand::RuntimeChecks(_) => {}
}
}
@@ -1750,7 +1746,7 @@ fn consume_operand(
state,
);
}
Operand::Constant(_) => {}
Operand::Constant(_) | Operand::RuntimeChecks(_) => {}
}
}
@@ -247,7 +247,7 @@ fn consume_operand(&mut self, location: Location, operand: &Operand<'tcx>) {
LocalMutationIsAllowed::Yes,
);
}
Operand::Constant(_) => {}
Operand::Constant(_) | Operand::RuntimeChecks(_) => {}
}
}
@@ -314,8 +314,6 @@ fn consume_rvalue(&mut self, location: Location, rvalue: &Rvalue<'tcx>) {
self.consume_operand(location, operand2);
}
Rvalue::NullaryOp(_op) => {}
Rvalue::Aggregate(_, operands) => {
for operand in operands {
self.consume_operand(location, operand);
+1 -1
View File
@@ -255,7 +255,7 @@ pub(super) fn do_mir_borrowck(&mut self) {
}
// We now apply the closure requirements of nested bodies modulo
// regions. In case a body does not depend on opaque types, we
// opaques. In case a body does not depend on opaque types, we
// eagerly check its region constraints and use the final closure
// requirements.
//
@@ -1023,7 +1023,7 @@ fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) {
// element, so we require the `Copy` trait.
if len.try_to_target_usize(tcx).is_none_or(|len| len > 1) {
match operand {
Operand::Copy(..) | Operand::Constant(..) => {
Operand::Copy(..) | Operand::Constant(..) | Operand::RuntimeChecks(_) => {
// These are always okay: direct use of a const, or a value that can
// evidently be copied.
}
@@ -1046,8 +1046,6 @@ fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) {
}
}
&Rvalue::NullaryOp(NullOp::RuntimeChecks(_)) => {}
Rvalue::ShallowInitBox(_operand, ty) => {
let trait_ref =
ty::TraitRef::new(tcx, tcx.require_lang_item(LangItem::Sized, span), [*ty]);
@@ -2276,7 +2274,6 @@ fn rvalue_user_ty(&self, rvalue: &Rvalue<'tcx>) -> Option<UserTypeAnnotationInde
| Rvalue::Cast(..)
| Rvalue::ShallowInitBox(..)
| Rvalue::BinaryOp(..)
| Rvalue::NullaryOp(..)
| Rvalue::CopyForDeref(..)
| Rvalue::UnaryOp(..)
| Rvalue::Discriminant(..)
@@ -1,13 +1,12 @@
//! Code to extract the universally quantified regions declared on a
//! function and the relationships between them. For example:
//! function. For example:
//!
//! ```
//! fn foo<'a, 'b, 'c: 'b>() { }
//! ```
//!
//! here we would return a map assigning each of `{'a, 'b, 'c}`
//! to an index, as well as the `FreeRegionMap` which can compute
//! relationships between them.
//! to an index.
//!
//! The code in this file doesn't *do anything* with those results; it
//! just returns them for other code to use.
@@ -271,8 +270,7 @@ impl<'tcx> UniversalRegions<'tcx> {
/// Creates a new and fully initialized `UniversalRegions` that
/// contains indices for all the free regions found in the given
/// MIR -- that is, all the regions that appear in the function's
/// signature. This will also compute the relationships that are
/// known between those regions.
/// signature.
pub(crate) fn new(infcx: &BorrowckInferCtxt<'tcx>, mir_def: LocalDefId) -> Self {
UniversalRegionsBuilder { infcx, mir_def }.build()
}
@@ -648,17 +646,14 @@ fn defining_ty(&self) -> DefiningTy<'tcx> {
BodyOwnerKind::Const { .. } | BodyOwnerKind::Static(..) => {
let identity_args = GenericArgs::identity_for_item(tcx, typeck_root_def_id);
if self.mir_def.to_def_id() == typeck_root_def_id
// Do not ICE when checking default_field_values consts with lifetimes (#135649)
&& DefKind::Field != tcx.def_kind(tcx.parent(typeck_root_def_id))
{
if self.mir_def.to_def_id() == typeck_root_def_id {
let args = self.infcx.replace_free_regions_with_nll_infer_vars(
NllRegionVariableOrigin::FreeRegion,
identity_args,
);
DefiningTy::Const(self.mir_def.to_def_id(), args)
} else {
// FIXME this line creates a dependency between borrowck and typeck.
// FIXME: this line creates a query dependency between borrowck and typeck.
//
// This is required for `AscribeUserType` canonical query, which will call
// `type_of(inline_const_def_id)`. That `type_of` would inject erased lifetimes
@@ -699,30 +694,14 @@ fn compute_indices(
let tcx = self.infcx.tcx;
let typeck_root_def_id = tcx.typeck_root_def_id(self.mir_def.to_def_id());
let identity_args = GenericArgs::identity_for_item(tcx, typeck_root_def_id);
let fr_args = match defining_ty {
DefiningTy::Closure(_, args)
| DefiningTy::CoroutineClosure(_, args)
| DefiningTy::Coroutine(_, args)
| DefiningTy::InlineConst(_, args) => {
// In the case of closures, we rely on the fact that
// the first N elements in the ClosureArgs are
// inherited from the `typeck_root_def_id`.
// Therefore, when we zip together (below) with
// `identity_args`, we will get only those regions
// that correspond to early-bound regions declared on
// the `typeck_root_def_id`.
assert!(args.len() >= identity_args.len());
assert_eq!(args.regions().count(), identity_args.regions().count());
args
}
DefiningTy::FnDef(_, args) | DefiningTy::Const(_, args) => args,
DefiningTy::GlobalAsm(_) => ty::List::empty(),
};
let renumbered_args = defining_ty.args();
let global_mapping = iter::once((tcx.lifetimes.re_static, fr_static));
let arg_mapping = iter::zip(identity_args.regions(), fr_args.regions().map(|r| r.as_var()));
// This relies on typeck roots being generics_of parents with their
// parameters at the start of nested bodies' generics.
assert!(renumbered_args.len() >= identity_args.len());
let arg_mapping =
iter::zip(identity_args.regions(), renumbered_args.regions().map(|r| r.as_var()));
UniversalRegionIndices {
indices: global_mapping.chain(arg_mapping).collect(),
@@ -862,8 +841,8 @@ fn compute_inputs_and_output(
};
// FIXME(#129952): We probably want a more principled approach here.
if let Err(terr) = inputs_and_output.skip_binder().error_reported() {
self.infcx.set_tainted_by_errors(terr);
if let Err(e) = inputs_and_output.error_reported() {
self.infcx.set_tainted_by_errors(e);
}
inputs_and_output
@@ -55,7 +55,6 @@ builtin_macros_assert_requires_expression = macro requires an expression as an a
builtin_macros_autodiff = autodiff must be applied to function
builtin_macros_autodiff_missing_config = autodiff requires at least a name and mode
builtin_macros_autodiff_mode_activity = {$act} can not be used in {$mode} Mode
builtin_macros_autodiff_not_build = this rustc version does not support autodiff
builtin_macros_autodiff_number_activities = expected {$expected} activities, but found {$found}
builtin_macros_autodiff_ret_activity = invalid return activity {$act} in {$mode} Mode
builtin_macros_autodiff_ty_activity = {$act} can not be used for this type
@@ -209,11 +209,6 @@ pub(crate) fn expand_with_mode(
mut item: Annotatable,
mode: DiffMode,
) -> Vec<Annotatable> {
// FIXME(bjorn3) maybe have the backend directly tell if autodiff is supported?
if cfg!(not(feature = "llvm_enzyme")) {
ecx.sess.dcx().emit_err(errors::AutoDiffSupportNotBuild { span: meta_item.span });
return vec![item];
}
let dcx = ecx.sess.dcx();
// first get information about the annotable item: visibility, signature, name and generic
+20 -25
View File
@@ -60,12 +60,18 @@ fn eii_(
) -> Vec<Annotatable> {
let eii_attr_span = ecx.with_def_site_ctxt(eii_attr_span);
let (item, stmt) = if let Annotatable::Item(item) = item {
(item, false)
let (item, wrap_item): (_, &dyn Fn(_) -> _) = if let Annotatable::Item(item) = item {
(item, &Annotatable::Item)
} else if let Annotatable::Stmt(ref stmt) = item
&& let StmtKind::Item(ref item) = stmt.kind
{
(item.clone(), true)
(item.clone(), &|item| {
Annotatable::Stmt(Box::new(Stmt {
id: DUMMY_NODE_ID,
kind: StmtKind::Item(item),
span: eii_attr_span,
}))
})
} else {
ecx.dcx().emit_err(EiiSharedMacroExpectedFunction {
span: eii_attr_span,
@@ -74,23 +80,25 @@ fn eii_(
return vec![item];
};
let orig_item = item.clone();
let item = *item;
let ast::Item { attrs, id: _, span: _, vis, kind: ItemKind::Fn(func), tokens: _ } = item else {
let ast::Item { attrs, id: _, span: _, vis, kind: ItemKind::Fn(func), tokens: _ } =
item.as_ref()
else {
ecx.dcx().emit_err(EiiSharedMacroExpectedFunction {
span: eii_attr_span,
name: path_to_string(&meta_item.path),
});
return vec![Annotatable::Item(Box::new(item))];
return vec![wrap_item(item)];
};
// only clone what we need
let attrs = attrs.clone();
let func = (**func).clone();
let vis = vis.clone();
let attrs_from_decl =
filter_attrs_for_multiple_eii_attr(ecx, attrs, eii_attr_span, &meta_item.path);
let Ok(macro_name) = name_for_impl_macro(ecx, &func, &meta_item) else {
return vec![Annotatable::Item(orig_item)];
return vec![wrap_item(item)];
};
// span of the declaring item without attributes
@@ -115,7 +123,7 @@ fn eii_(
ecx,
eii_attr_span,
item_span,
*func,
func,
vis,
&attrs_from_decl,
)));
@@ -128,20 +136,7 @@ fn eii_(
decl_span,
)));
if stmt {
return_items
.into_iter()
.map(|i| {
Annotatable::Stmt(Box::new(Stmt {
id: DUMMY_NODE_ID,
kind: StmtKind::Item(i),
span: eii_attr_span,
}))
})
.collect()
} else {
return_items.into_iter().map(|i| Annotatable::Item(i)).collect()
}
return_items.into_iter().map(wrap_item).collect()
}
/// Decide on the name of the macro that can be used to implement the EII.
@@ -216,17 +216,6 @@ pub(crate) struct AutoDiffInvalidApplication {
}
}
pub(crate) use ad_fallback::*;
mod ad_fallback {
use super::*;
#[derive(Diagnostic)]
#[diag(builtin_macros_autodiff_not_build)]
pub(crate) struct AutoDiffSupportNotBuild {
#[primary_span]
pub(crate) span: Span,
}
}
#[derive(Diagnostic)]
#[diag(builtin_macros_concat_bytes_invalid)]
pub(crate) struct ConcatBytesInvalid {
@@ -144,9 +144,7 @@ fn make_expr(self: Box<ExpandInclude<'a>>) -> Option<Box<ast::Expr>> {
let mut p = unwrap_or_emit_fatal(new_parser_from_file(
self.psess,
&self.path,
// Don't strip frontmatter for backward compatibility, `---` may be the start of a
// manifold negation. FIXME: Ideally, we wouldn't strip shebangs here either.
StripTokens::Shebang,
StripTokens::Nothing,
Some(self.span),
));
let expr = parse_expr(&mut p).ok()?;
@@ -12,7 +12,7 @@ jobs:
steps:
- uses: actions/checkout@v4
- run: |
sed -i 's/components.*/components = []/' rust-toolchain
sed -i 's/components.*/components = []/' rust-toolchain.toml
- uses: rustsec/audit-check@v1.4.1
with:
token: ${{ secrets.GITHUB_TOKEN }}
+51 -122
View File
@@ -43,42 +43,42 @@ checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801"
[[package]]
name = "cranelift-assembler-x64"
version = "0.126.0"
version = "0.127.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bf7631e609c97f063f9777aae405e8492abf9bf92336d7aa3f875403dd4ffd7d"
checksum = "8bd963a645179fa33834ba61fa63353998543b07f877e208da9eb47d4a70d1e7"
dependencies = [
"cranelift-assembler-x64-meta",
]
[[package]]
name = "cranelift-assembler-x64-meta"
version = "0.126.0"
version = "0.127.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c030edccdc4a5bbf28fbfe7701b5cd1f9854b4445184dd34af2a7e8f8db6f45"
checksum = "3f6d5739c9dc6b5553ca758d78d87d127dd19f397f776efecf817b8ba8d0bb01"
dependencies = [
"cranelift-srcgen",
]
[[package]]
name = "cranelift-bforest"
version = "0.126.0"
version = "0.127.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bb544c1242d0ca98baf01873ebba96c79d5df155d5108d9bb699aefc741f5e6d"
checksum = "ff402c11bb1c9652b67a3e885e84b1b8d00c13472c8fd85211e06a41a63c3e03"
dependencies = [
"cranelift-entity",
]
[[package]]
name = "cranelift-bitset"
version = "0.126.0"
version = "0.127.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f0325aecbafec053d3d3f082edfdca7937e2945e7f09c5ff9672e05198312282"
checksum = "769a0d88c2f5539e9c5536a93a7bf164b0dc68d91e3d00723e5b4ffc1440afdc"
[[package]]
name = "cranelift-codegen"
version = "0.126.0"
version = "0.127.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "abb3236fd319ae897ba00c8a25105081de5c1348576def0e96c062ad259f87a7"
checksum = "d4351f721fb3b26add1c180f0a75c7474bab2f903c8b777c6ca65238ded59a78"
dependencies = [
"bumpalo",
"cranelift-assembler-x64",
@@ -102,9 +102,9 @@ dependencies = [
[[package]]
name = "cranelift-codegen-meta"
version = "0.126.0"
version = "0.127.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7b8791c911a361c539130ace34fb726b16aca4216470ec75d75264b1495c8a3a"
checksum = "61f86c0ba5b96713643f4dd0de0df12844de9c7bb137d6829b174b706939aa74"
dependencies = [
"cranelift-assembler-x64-meta",
"cranelift-codegen-shared",
@@ -114,33 +114,33 @@ dependencies = [
[[package]]
name = "cranelift-codegen-shared"
version = "0.126.0"
version = "0.127.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "12ead718c2a10990870c19b2497b5a04b8aae6024485e33da25b5d02e35819e0"
checksum = "f08605eee8d51fd976a970bd5b16c9529b51b624f8af68f80649ffb172eb85a4"
[[package]]
name = "cranelift-control"
version = "0.126.0"
version = "0.127.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c0a57fc972b5651047efddccb99440d103d9d8c13393ccebde15ddd5b6a1181b"
checksum = "623aab0a09e40f0cf0b5d35eb7832bae4c4f13e3768228e051a6c1a60e88ef5f"
dependencies = [
"arbitrary",
]
[[package]]
name = "cranelift-entity"
version = "0.126.0"
version = "0.127.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5aae980b4a1678b601eab2f52e372ed0b3c9565a31c17f380008cb97b3a699c5"
checksum = "ea0f066e07e3bcbe38884cc5c94c32c7a90267d69df80f187d9dfe421adaa7c4"
dependencies = [
"cranelift-bitset",
]
[[package]]
name = "cranelift-frontend"
version = "0.126.0"
version = "0.127.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a78877016b607982ca1708c0dd4ce23bde04581a39854c9b43a1dca43625b54c"
checksum = "40865b02a0e52ca8e580ad64feef530cb1d05f6bb4972b4eef05e3eaeae81701"
dependencies = [
"cranelift-codegen",
"log",
@@ -150,15 +150,15 @@ dependencies = [
[[package]]
name = "cranelift-isle"
version = "0.126.0"
version = "0.127.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5dc46a68b46d4f53f9f2f02ab8d3a34b00f03a21c124a7a965b8cbf5fdb6773b"
checksum = "104b3c117ae513e9af1d90679842101193a5ccb96ac9f997966d85ea25be2852"
[[package]]
name = "cranelift-jit"
version = "0.126.0"
version = "0.127.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7df920009af919ad9df52eb7b47b1895145822e0c29da9b715a876fc8ecc6d82"
checksum = "3aa5f855cfb8e4253ed2d0dfc1a0b6ebe4912e67aa8b7ee14026ff55ca17f1fe"
dependencies = [
"anyhow",
"cranelift-codegen",
@@ -171,14 +171,14 @@ dependencies = [
"region",
"target-lexicon",
"wasmtime-internal-jit-icache-coherence",
"windows-sys 0.60.2",
"windows-sys 0.61.2",
]
[[package]]
name = "cranelift-module"
version = "0.126.0"
version = "0.127.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ddcf313629071ce74de8e59f02092f5453d1a01047607fc4ad36886b8bd1486c"
checksum = "b1d01806b191b59f4fc4680293dd5f554caf2de5b62f95eff5beef7acb46c29c"
dependencies = [
"anyhow",
"cranelift-codegen",
@@ -187,9 +187,9 @@ dependencies = [
[[package]]
name = "cranelift-native"
version = "0.126.0"
version = "0.127.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "03faa07ec8cf373250a8252eb773d098ff88259fa1c19ee1ecde8012839f4097"
checksum = "e5c54e0a358bc05b48f2032e1c320e7f468da068604f2869b77052eab68eb0fe"
dependencies = [
"cranelift-codegen",
"libc",
@@ -198,9 +198,9 @@ dependencies = [
[[package]]
name = "cranelift-object"
version = "0.126.0"
version = "0.127.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7cca62c14f3c2e4f438192562bbf82d1a98a59543cc66ba04fb658ba99f515a6"
checksum = "3d17e0216be5daabab616647c1918e06dae0708474ba5f7b7762ac24ea5eb126"
dependencies = [
"anyhow",
"cranelift-codegen",
@@ -213,9 +213,9 @@ dependencies = [
[[package]]
name = "cranelift-srcgen"
version = "0.126.0"
version = "0.127.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0484cb32c527a742e1bba09ef174acac0afb1dcf623ef1adda42849200edcd2e"
checksum = "cc6f4b039f453b66c75e9f7886e5a2af96276e151f44dc19b24b58f9a0c98009"
[[package]]
name = "crc32fast"
@@ -293,7 +293,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "754ca22de805bb5744484a5b151a9e1a8e837d5dc232c2d7d8c2e3492edc8b60"
dependencies = [
"cfg-if",
"windows-link 0.2.1",
"windows-link",
]
[[package]]
@@ -469,31 +469,25 @@ checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5"
[[package]]
name = "wasmtime-internal-jit-icache-coherence"
version = "39.0.0"
version = "40.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3f67986f5c499274ae5b2ba5b173bba0b95d1381f5ca70d8eec657f2392117d8"
checksum = "0858b470463f3e7c73acd6049046049e64be17b98901c2db5047450cf83df1fe"
dependencies = [
"anyhow",
"cfg-if",
"libc",
"windows-sys 0.60.2",
"windows-sys 0.61.2",
]
[[package]]
name = "wasmtime-internal-math"
version = "39.0.0"
version = "40.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a681733e9b5d5d8804ee6cacd59f92c0d87ba2274f42ee1d4e5a943828d0075d"
checksum = "222e1a590ece4e898f20af1e541b61d2cb803f2557e7eaff23e6c1db5434454a"
dependencies = [
"libm",
]
[[package]]
name = "windows-link"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a"
[[package]]
name = "windows-link"
version = "0.2.1"
@@ -506,16 +500,16 @@ version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d"
dependencies = [
"windows-targets 0.52.6",
"windows-targets",
]
[[package]]
name = "windows-sys"
version = "0.60.2"
version = "0.61.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb"
checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc"
dependencies = [
"windows-targets 0.53.3",
"windows-link",
]
[[package]]
@@ -524,31 +518,14 @@ version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973"
dependencies = [
"windows_aarch64_gnullvm 0.52.6",
"windows_aarch64_msvc 0.52.6",
"windows_i686_gnu 0.52.6",
"windows_i686_gnullvm 0.52.6",
"windows_i686_msvc 0.52.6",
"windows_x86_64_gnu 0.52.6",
"windows_x86_64_gnullvm 0.52.6",
"windows_x86_64_msvc 0.52.6",
]
[[package]]
name = "windows-targets"
version = "0.53.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d5fe6031c4041849d7c496a8ded650796e7b6ecc19df1a431c1a363342e5dc91"
dependencies = [
"windows-link 0.1.3",
"windows_aarch64_gnullvm 0.53.0",
"windows_aarch64_msvc 0.53.0",
"windows_i686_gnu 0.53.0",
"windows_i686_gnullvm 0.53.0",
"windows_i686_msvc 0.53.0",
"windows_x86_64_gnu 0.53.0",
"windows_x86_64_gnullvm 0.53.0",
"windows_x86_64_msvc 0.53.0",
"windows_aarch64_gnullvm",
"windows_aarch64_msvc",
"windows_i686_gnu",
"windows_i686_gnullvm",
"windows_i686_msvc",
"windows_x86_64_gnu",
"windows_x86_64_gnullvm",
"windows_x86_64_msvc",
]
[[package]]
@@ -557,92 +534,44 @@ version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764"
[[package]]
name = "windows_aarch64_msvc"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
[[package]]
name = "windows_aarch64_msvc"
version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c"
[[package]]
name = "windows_i686_gnu"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
[[package]]
name = "windows_i686_gnu"
version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3"
[[package]]
name = "windows_i686_gnullvm"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
[[package]]
name = "windows_i686_gnullvm"
version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11"
[[package]]
name = "windows_i686_msvc"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
[[package]]
name = "windows_i686_msvc"
version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d"
[[package]]
name = "windows_x86_64_gnu"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
[[package]]
name = "windows_x86_64_gnu"
version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57"
[[package]]
name = "windows_x86_64_msvc"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
[[package]]
name = "windows_x86_64_msvc"
version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486"
+12 -12
View File
@@ -8,12 +8,12 @@ crate-type = ["dylib"]
[dependencies]
# These have to be in sync with each other
cranelift-codegen = { version = "0.126.0", default-features = false, features = ["std", "timing", "unwind", "all-native-arch"] }
cranelift-frontend = { version = "0.126.0" }
cranelift-module = { version = "0.126.0" }
cranelift-native = { version = "0.126.0" }
cranelift-jit = { version = "0.126.0", optional = true }
cranelift-object = { version = "0.126.0" }
cranelift-codegen = { version = "0.127.0", default-features = false, features = ["std", "timing", "unwind", "all-native-arch"] }
cranelift-frontend = { version = "0.127.0" }
cranelift-module = { version = "0.127.0" }
cranelift-native = { version = "0.127.0" }
cranelift-jit = { version = "0.127.0", optional = true }
cranelift-object = { version = "0.127.0" }
target-lexicon = "0.13"
gimli = { version = "0.32", default-features = false, features = ["write"] }
object = { version = "0.37.3", default-features = false, features = ["std", "read_core", "write", "archive", "coff", "elf", "macho", "pe"] }
@@ -24,12 +24,12 @@ smallvec = "1.8.1"
[patch.crates-io]
# Uncomment to use an unreleased version of cranelift
#cranelift-codegen = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-39.0.0" }
#cranelift-frontend = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-39.0.0" }
#cranelift-module = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-39.0.0" }
#cranelift-native = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-39.0.0" }
#cranelift-jit = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-39.0.0" }
#cranelift-object = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-39.0.0" }
#cranelift-codegen = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-40.0.0" }
#cranelift-frontend = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-40.0.0" }
#cranelift-module = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-40.0.0" }
#cranelift-native = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-40.0.0" }
#cranelift-jit = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-40.0.0" }
#cranelift-object = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-40.0.0" }
# Uncomment to use local checkout of cranelift
#cranelift-codegen = { path = "../wasmtime/cranelift/codegen" }
@@ -1,4 +1,4 @@
[toolchain]
channel = "nightly-2025-12-18"
channel = "nightly-2025-12-23"
components = ["rust-src", "rustc-dev", "llvm-tools", "rustfmt"]
profile = "minimal"
+6 -12
View File
@@ -10,7 +10,7 @@
use rustc_index::IndexVec;
use rustc_middle::ty::TypeVisitableExt;
use rustc_middle::ty::adjustment::PointerCoercion;
use rustc_middle::ty::layout::{FnAbiOf, HasTypingEnv};
use rustc_middle::ty::layout::FnAbiOf;
use rustc_middle::ty::print::with_no_trimmed_paths;
use rustc_session::config::OutputFilenames;
use rustc_span::Symbol;
@@ -853,17 +853,6 @@ fn is_wide_ptr<'tcx>(fx: &FunctionCx<'_, '_, 'tcx>, ty: Ty<'tcx>) -> bool {
fx.bcx.ins().nop();
}
}
Rvalue::NullaryOp(ref null_op) => {
assert!(lval.layout().ty.is_sized(fx.tcx, fx.typing_env()));
let val = match null_op {
NullOp::RuntimeChecks(kind) => kind.value(fx.tcx.sess),
};
let val = CValue::by_val(
fx.bcx.ins().iconst(types::I8, i64::from(val)),
fx.layout_of(fx.tcx.types.bool),
);
lval.write_cvalue(fx, val);
}
Rvalue::Aggregate(ref kind, ref operands)
if matches!(**kind, AggregateKind::RawPtr(..)) =>
{
@@ -1050,6 +1039,11 @@ pub(crate) fn codegen_operand<'tcx>(
cplace.to_cvalue(fx)
}
Operand::Constant(const_) => crate::constant::codegen_constant_operand(fx, const_),
Operand::RuntimeChecks(checks) => {
let val = checks.value(fx.tcx.sess);
let layout = fx.layout_of(fx.tcx.types.bool);
return CValue::const_val(fx, layout, val.into());
}
}
}
@@ -540,6 +540,7 @@ pub(crate) fn mir_operand_get_const_val<'tcx>(
operand: &Operand<'tcx>,
) -> Option<ScalarInt> {
match operand {
Operand::RuntimeChecks(checks) => Some(checks.value(fx.tcx.sess).into()),
Operand::Constant(const_) => eval_mir_constant(fx, const_).0.try_to_scalar_int(),
// FIXME(rust-lang/rust#85105): Casts like `IMM8 as u32` result in the const being stored
// inside a temporary before being passed to the intrinsic requiring the const argument.
@@ -130,7 +130,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
return;
}
let idx = generic_args[2].expect_const().to_value().valtree.unwrap_branch();
let idx = generic_args[2].expect_const().to_branch();
assert_eq!(x.layout(), y.layout());
let layout = x.layout();
@@ -143,7 +143,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
let total_len = lane_count * 2;
let indexes = idx.iter().map(|idx| idx.unwrap_leaf().to_u32()).collect::<Vec<u32>>();
let indexes = idx.iter().map(|idx| idx.to_leaf().to_u32()).collect::<Vec<u32>>();
for &idx in &indexes {
assert!(u64::from(idx) < total_len, "idx {} out of range 0..{}", idx, total_len);
@@ -961,9 +961,8 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
let lane_clif_ty = fx.clif_type(val_lane_ty).unwrap();
let ptr_val = ptr.load_scalar(fx);
let alignment = generic_args[3].expect_const().to_value().valtree.unwrap_branch()[0]
.unwrap_leaf()
.to_simd_alignment();
let alignment =
generic_args[3].expect_const().to_branch()[0].to_leaf().to_simd_alignment();
let memflags = match alignment {
SimdAlign::Unaligned => MemFlags::new().with_notrap(),
@@ -1006,15 +1005,6 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
let lane_clif_ty = fx.clif_type(val_lane_ty).unwrap();
let ret_lane_layout = fx.layout_of(ret_lane_ty);
let alignment = generic_args[3].expect_const().to_value().valtree.unwrap_branch()[0]
.unwrap_leaf()
.to_simd_alignment();
let memflags = match alignment {
SimdAlign::Unaligned => MemFlags::new().with_notrap(),
_ => MemFlags::trusted(),
};
for lane_idx in 0..ptr_lane_count {
let val_lane = val.value_lane(fx, lane_idx).load_scalar(fx);
let ptr_lane = ptr.value_lane(fx, lane_idx).load_scalar(fx);
@@ -1030,7 +1020,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
fx.bcx.seal_block(if_disabled);
fx.bcx.switch_to_block(if_enabled);
let res = fx.bcx.ins().load(lane_clif_ty, memflags, ptr_lane, 0);
let res = fx.bcx.ins().load(lane_clif_ty, MemFlags::trusted(), ptr_lane, 0);
fx.bcx.ins().jump(next, &[res.into()]);
fx.bcx.switch_to_block(if_disabled);
@@ -1059,9 +1049,8 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
let ret_lane_layout = fx.layout_of(ret_lane_ty);
let ptr_val = ptr.load_scalar(fx);
let alignment = generic_args[3].expect_const().to_value().valtree.unwrap_branch()[0]
.unwrap_leaf()
.to_simd_alignment();
let alignment =
generic_args[3].expect_const().to_branch()[0].to_leaf().to_simd_alignment();
let memflags = match alignment {
SimdAlign::Unaligned => MemFlags::new().with_notrap(),
+2 -2
View File
@@ -14,7 +14,7 @@ bitflags = "2.4.1"
gimli = "0.31"
itertools = "0.12"
libc = "0.2"
libloading = { version = "0.9.0", optional = true }
libloading = { version = "0.9.0" }
measureme = "12.0.1"
object = { version = "0.37.0", default-features = false, features = ["std", "read"] }
rustc-demangle = "0.1.21"
@@ -47,7 +47,7 @@ tracing = "0.1"
[features]
# tidy-alphabetical-start
check_only = ["rustc_llvm/check_only"]
llvm_enzyme = ["dep:libloading"]
llvm_enzyme = []
llvm_offload = []
# tidy-alphabetical-end
+6 -1
View File
@@ -19,7 +19,12 @@ codegen_llvm_lto_bitcode_from_rlib = failed to get bitcode from object file for
codegen_llvm_mismatch_data_layout =
data-layout for target `{$rustc_target}`, `{$rustc_layout}`, differs from LLVM target's `{$llvm_target}` default layout, `{$llvm_layout}`
codegen_llvm_offload_without_enable = using the offload feature requires -Z offload=Enable
codegen_llvm_offload_bundleimages_failed = call to BundleImages failed, `host.out` was not created
codegen_llvm_offload_embed_failed = call to EmbedBufferInModule failed, `host.o` was not created
codegen_llvm_offload_no_abs_path = using the `-Z offload=Host=/absolute/path/to/host.out` flag requires an absolute path
codegen_llvm_offload_no_host_out = using the `-Z offload=Host=/absolute/path/to/host.out` flag must point to a `host.out` file
codegen_llvm_offload_nonexisting = the given path/file to `host.out` does not exist. Did you forget to run the device compilation first?
codegen_llvm_offload_without_enable = using the offload feature requires -Z offload=<Device or Host=/absolute/path/to/host.out>
codegen_llvm_offload_without_fat_lto = using the offload feature requires -C lto=fat
codegen_llvm_parse_bitcode = failed to parse bitcode for LTO module
@@ -528,7 +528,6 @@ fn thin_lto(
}
}
#[cfg(feature = "llvm_enzyme")]
pub(crate) fn enable_autodiff_settings(ad: &[config::AutoDiff]) {
let mut enzyme = llvm::EnzymeWrapper::get_instance();
+70 -8
View File
@@ -568,8 +568,7 @@ pub(crate) unsafe fn llvm_optimize(
// FIXME(ZuseZ4): In a future update we could figure out how to only optimize individual functions getting
// differentiated.
let consider_ad =
cfg!(feature = "llvm_enzyme") && config.autodiff.contains(&config::AutoDiff::Enable);
let consider_ad = config.autodiff.contains(&config::AutoDiff::Enable);
let run_enzyme = autodiff_stage == AutodiffStage::DuringAD;
let print_before_enzyme = config.autodiff.contains(&config::AutoDiff::PrintModBefore);
let print_after_enzyme = config.autodiff.contains(&config::AutoDiff::PrintModAfter);
@@ -704,10 +703,9 @@ fn handle_offload<'ll>(cx: &'ll SimpleCx<'_>, old_fn: &llvm::Value) {
llvm::set_value_name(new_fn, &name);
}
if cgcx.target_is_like_gpu && config.offload.contains(&config::Offload::Enable) {
if cgcx.target_is_like_gpu && config.offload.contains(&config::Offload::Device) {
let cx =
SimpleCx::new(module.module_llvm.llmod(), module.module_llvm.llcx, cgcx.pointer_size);
for func in cx.get_functions() {
let offload_kernel = "offload-kernel";
if attributes::has_string_attr(func, offload_kernel) {
@@ -776,12 +774,77 @@ fn handle_offload<'ll>(cx: &'ll SimpleCx<'_>, old_fn: &llvm::Value) {
)
};
if cgcx.target_is_like_gpu && config.offload.contains(&config::Offload::Enable) {
if cgcx.target_is_like_gpu && config.offload.contains(&config::Offload::Device) {
let device_path = cgcx.output_filenames.path(OutputType::Object);
let device_dir = device_path.parent().unwrap();
let device_out = device_dir.join("host.out");
let device_out_c = path_to_c_string(device_out.as_path());
unsafe {
llvm::LLVMRustBundleImages(module.module_llvm.llmod(), module.module_llvm.tm.raw());
// 1) Bundle device module into offload image host.out (device TM)
let ok = llvm::LLVMRustBundleImages(
module.module_llvm.llmod(),
module.module_llvm.tm.raw(),
device_out_c.as_ptr(),
);
if !ok || !device_out.exists() {
dcx.emit_err(crate::errors::OffloadBundleImagesFailed);
}
}
}
// This assumes that we previously compiled our kernels for a gpu target, which created a
// `host.out` artifact. The user is supposed to provide us with a path to this artifact, we
// don't need any other artifacts from the previous run. We will embed this artifact into our
// LLVM-IR host module, to create a `host.o` ObjectFile, which we will write to disk.
// The last, not yet automated steps uses the `clang-linker-wrapper` to process `host.o`.
if !cgcx.target_is_like_gpu {
if let Some(device_path) = config
.offload
.iter()
.find_map(|o| if let config::Offload::Host(path) = o { Some(path) } else { None })
{
let device_pathbuf = PathBuf::from(device_path);
if device_pathbuf.is_relative() {
dcx.emit_err(crate::errors::OffloadWithoutAbsPath);
} else if device_pathbuf
.file_name()
.and_then(|n| n.to_str())
.is_some_and(|n| n != "host.out")
{
dcx.emit_err(crate::errors::OffloadWrongFileName);
} else if !device_pathbuf.exists() {
dcx.emit_err(crate::errors::OffloadNonexistingPath);
}
let host_path = cgcx.output_filenames.path(OutputType::Object);
let host_dir = host_path.parent().unwrap();
let out_obj = host_dir.join("host.o");
let host_out_c = path_to_c_string(device_pathbuf.as_path());
// 2) Finalize host: lib.bc + host.out -> host.o (host TM)
// We create a full clone of our LLVM host module, since we will embed the device IR
// into it, and this might break caching or incremental compilation otherwise.
let llmod2 = llvm::LLVMCloneModule(module.module_llvm.llmod());
let ok =
unsafe { llvm::LLVMRustOffloadEmbedBufferInModule(llmod2, host_out_c.as_ptr()) };
if !ok {
dcx.emit_err(crate::errors::OffloadEmbedFailed);
}
write_output_file(
dcx,
module.module_llvm.tm.raw(),
config.no_builtins,
llmod2,
&out_obj,
None,
llvm::FileType::ObjectFile,
&cgcx.prof,
true,
);
// We ignore cgcx.save_temps here and unconditionally always keep our `host.out` artifact.
// Otherwise, recompiling the host code would fail since we deleted that device artifact
// in the previous host compilation, which would be confusing at best.
}
}
result.into_result().unwrap_or_else(|()| llvm_err(dcx, LlvmError::RunLlvmPasses))
}
@@ -818,8 +881,7 @@ pub(crate) fn optimize(
// If we know that we will later run AD, then we disable vectorization and loop unrolling.
// Otherwise we pretend AD is already done and run the normal opt pipeline (=PostAD).
let consider_ad =
cfg!(feature = "llvm_enzyme") && config.autodiff.contains(&config::AutoDiff::Enable);
let consider_ad = config.autodiff.contains(&config::AutoDiff::Enable);
let autodiff_stage = if consider_ad { AutodiffStage::PreAD } else { AutodiffStage::PostAD };
// The embedded bitcode is used to run LTO/ThinLTO.
// The bitcode obtained during the `codegen` phase is no longer suitable for performing LTO.
+3 -3
View File
@@ -93,9 +93,9 @@ fn module_codegen(tcx: TyCtxt<'_>, cgu_name: Symbol) -> ModuleCodegen<ModuleLlvm
// They are necessary for correct offload execution. We do this here to simplify the
// `offload` intrinsic, avoiding the need for tracking whether it's the first
// intrinsic call or not.
if cx.sess().opts.unstable_opts.offload.contains(&Offload::Enable)
&& !cx.sess().target.is_like_gpu
{
let has_host_offload =
cx.sess().opts.unstable_opts.offload.iter().any(|o| matches!(o, Offload::Host(_)));
if has_host_offload && !cx.sess().target.is_like_gpu {
cx.offload_globals.replace(Some(OffloadGlobals::declare(&cx)));
}
+20 -1
View File
@@ -32,7 +32,6 @@ fn into_diag(self, dcx: DiagCtxtHandle<'_>, level: Level) -> Diag<'_, G> {
}
}
#[cfg(feature = "llvm_enzyme")]
#[derive(Diagnostic)]
#[diag(codegen_llvm_autodiff_component_unavailable)]
pub(crate) struct AutoDiffComponentUnavailable;
@@ -53,6 +52,26 @@ fn into_diag(self, dcx: DiagCtxtHandle<'_>, level: Level) -> Diag<'_, G> {
#[diag(codegen_llvm_offload_without_fat_lto)]
pub(crate) struct OffloadWithoutFatLTO;
#[derive(Diagnostic)]
#[diag(codegen_llvm_offload_no_abs_path)]
pub(crate) struct OffloadWithoutAbsPath;
#[derive(Diagnostic)]
#[diag(codegen_llvm_offload_no_host_out)]
pub(crate) struct OffloadWrongFileName;
#[derive(Diagnostic)]
#[diag(codegen_llvm_offload_nonexisting)]
pub(crate) struct OffloadNonexistingPath;
#[derive(Diagnostic)]
#[diag(codegen_llvm_offload_bundleimages_failed)]
pub(crate) struct OffloadBundleImagesFailed;
#[derive(Diagnostic)]
#[diag(codegen_llvm_offload_embed_failed)]
pub(crate) struct OffloadEmbedFailed;
#[derive(Diagnostic)]
#[diag(codegen_llvm_lto_bitcode_from_rlib)]
pub(crate) struct LtoBitcodeFromRlib {
+6 -16
View File
@@ -202,13 +202,7 @@ fn codegen_intrinsic_call(
return Ok(());
}
sym::offload => {
if !tcx
.sess
.opts
.unstable_opts
.offload
.contains(&rustc_session::config::Offload::Enable)
{
if tcx.sess.opts.unstable_opts.offload.is_empty() {
let _ = tcx.dcx().emit_almost_fatal(OffloadWithoutEnable);
}
@@ -351,7 +345,7 @@ fn codegen_intrinsic_call(
_ => bug!(),
};
let ptr = args[0].immediate();
let locality = fn_args.const_at(1).to_value().valtree.unwrap_leaf().to_i32();
let locality = fn_args.const_at(1).to_leaf().to_i32();
self.call_intrinsic(
"llvm.prefetch",
&[self.val_ty(ptr)],
@@ -1533,7 +1527,7 @@ fn vector_mask_to_bitmask<'a, 'll, 'tcx>(
}
if name == sym::simd_shuffle_const_generic {
let idx = fn_args[2].expect_const().to_value().valtree.unwrap_branch();
let idx = fn_args[2].expect_const().to_branch();
let n = idx.len() as u64;
let (out_len, out_ty) = require_simd!(ret_ty, SimdReturn);
@@ -1552,7 +1546,7 @@ fn vector_mask_to_bitmask<'a, 'll, 'tcx>(
.iter()
.enumerate()
.map(|(arg_idx, val)| {
let idx = val.unwrap_leaf().to_i32();
let idx = val.to_leaf().to_i32();
if idx >= i32::try_from(total_len).unwrap() {
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdIndexOutOfBounds {
span,
@@ -1964,9 +1958,7 @@ fn llvm_alignment<'ll, 'tcx>(
// those lanes whose `mask` bit is enabled.
// The memory addresses corresponding to the “off” lanes are not accessed.
let alignment = fn_args[3].expect_const().to_value().valtree.unwrap_branch()[0]
.unwrap_leaf()
.to_simd_alignment();
let alignment = fn_args[3].expect_const().to_branch()[0].to_leaf().to_simd_alignment();
// The element type of the "mask" argument must be a signed integer type of any width
let mask_ty = in_ty;
@@ -2059,9 +2051,7 @@ fn llvm_alignment<'ll, 'tcx>(
// those lanes whose `mask` bit is enabled.
// The memory addresses corresponding to the “off” lanes are not accessed.
let alignment = fn_args[3].expect_const().to_value().valtree.unwrap_branch()[0]
.unwrap_leaf()
.to_simd_alignment();
let alignment = fn_args[3].expect_const().to_branch()[0].to_leaf().to_simd_alignment();
// The element type of the "mask" argument must be a signed integer type of any width
let mask_ty = in_ty;
+3 -1
View File
@@ -241,7 +241,9 @@ fn name(&self) -> &'static str {
fn init(&self, sess: &Session) {
llvm_util::init(sess); // Make sure llvm is inited
#[cfg(feature = "llvm_enzyme")]
// autodiff is based on Enzyme, a library which we might not have available, when it was
// neither build, nor downloaded via rustup. If autodiff is used, but not available we emit
// an early error here and abort compilation.
{
use rustc_session::config::AutoDiff;
@@ -86,10 +86,8 @@ pub(crate) enum LLVMRustVerifierFailureAction {
LLVMReturnStatusAction = 2,
}
#[cfg(feature = "llvm_enzyme")]
pub(crate) use self::Enzyme_AD::*;
#[cfg(feature = "llvm_enzyme")]
pub(crate) mod Enzyme_AD {
use std::ffi::{c_char, c_void};
use std::sync::{Mutex, MutexGuard, OnceLock};
@@ -450,147 +448,6 @@ fn get_enzyme_path(sysroot: &Sysroot) -> Result<String, String> {
}
}
#[cfg(not(feature = "llvm_enzyme"))]
pub(crate) use self::Fallback_AD::*;
#[cfg(not(feature = "llvm_enzyme"))]
pub(crate) mod Fallback_AD {
#![allow(unused_variables)]
use std::ffi::c_void;
use std::sync::{Mutex, MutexGuard};
use libc::c_char;
use rustc_codegen_ssa::back::write::CodegenContext;
use rustc_codegen_ssa::traits::WriteBackendMethods;
use super::{CConcreteType, CTypeTreeRef, Context, EnzymeTypeTree};
pub(crate) struct EnzymeWrapper {
pub registerEnzymeAndPassPipeline: *const c_void,
}
impl EnzymeWrapper {
pub(crate) fn get_or_init(
_sysroot: &rustc_session::config::Sysroot,
) -> Result<MutexGuard<'static, Self>, Box<dyn std::error::Error>> {
unimplemented!("Enzyme not available: build with llvm_enzyme feature")
}
pub(crate) fn init<'a, B: WriteBackendMethods>(
_cgcx: &'a CodegenContext<B>,
) -> &'static Mutex<Self> {
unimplemented!("Enzyme not available: build with llvm_enzyme feature")
}
pub(crate) fn get_instance() -> MutexGuard<'static, Self> {
unimplemented!("Enzyme not available: build with llvm_enzyme feature")
}
pub(crate) fn new_type_tree(&self) -> CTypeTreeRef {
unimplemented!()
}
pub(crate) fn new_type_tree_ct(
&self,
t: CConcreteType,
ctx: &Context,
) -> *mut EnzymeTypeTree {
unimplemented!()
}
pub(crate) fn new_type_tree_tr(&self, tree: CTypeTreeRef) -> CTypeTreeRef {
unimplemented!()
}
pub(crate) fn free_type_tree(&self, tree: CTypeTreeRef) {
unimplemented!()
}
pub(crate) fn merge_type_tree(&self, tree1: CTypeTreeRef, tree2: CTypeTreeRef) -> bool {
unimplemented!()
}
pub(crate) fn tree_only_eq(&self, tree: CTypeTreeRef, num: i64) {
unimplemented!()
}
pub(crate) fn tree_data0_eq(&self, tree: CTypeTreeRef) {
unimplemented!()
}
pub(crate) fn shift_indicies_eq(
&self,
tree: CTypeTreeRef,
data_layout: *const c_char,
offset: i64,
max_size: i64,
add_offset: u64,
) {
unimplemented!()
}
pub(crate) fn tree_insert_eq(
&self,
tree: CTypeTreeRef,
indices: *const i64,
len: usize,
ct: CConcreteType,
ctx: &Context,
) {
unimplemented!()
}
pub(crate) fn tree_to_string(&self, tree: *mut EnzymeTypeTree) -> *const c_char {
unimplemented!()
}
pub(crate) fn tree_to_string_free(&self, ch: *const c_char) {
unimplemented!()
}
pub(crate) fn get_max_type_depth(&self) -> usize {
unimplemented!()
}
pub(crate) fn set_inline(&mut self, val: bool) {
unimplemented!()
}
pub(crate) fn set_print_perf(&mut self, print: bool) {
unimplemented!()
}
pub(crate) fn set_print_activity(&mut self, print: bool) {
unimplemented!()
}
pub(crate) fn set_print_type(&mut self, print: bool) {
unimplemented!()
}
pub(crate) fn set_print_type_fun(&mut self, fun_name: &str) {
unimplemented!()
}
pub(crate) fn set_print(&mut self, print: bool) {
unimplemented!()
}
pub(crate) fn set_strict_aliasing(&mut self, strict: bool) {
unimplemented!()
}
pub(crate) fn set_loose_types(&mut self, loose: bool) {
unimplemented!()
}
pub(crate) fn set_rust_rules(&mut self, val: bool) {
unimplemented!()
}
}
}
impl TypeTree {
pub(crate) fn new() -> TypeTree {
let wrapper = EnzymeWrapper::get_instance();
+20 -2
View File
@@ -1666,7 +1666,15 @@ mod Offload {
use super::*;
unsafe extern "C" {
/// Processes the module and writes it in an offload compatible way into a "host.out" file.
pub(crate) fn LLVMRustBundleImages<'a>(M: &'a Module, TM: &'a TargetMachine) -> bool;
pub(crate) fn LLVMRustBundleImages<'a>(
M: &'a Module,
TM: &'a TargetMachine,
host_out: *const c_char,
) -> bool;
pub(crate) unsafe fn LLVMRustOffloadEmbedBufferInModule<'a>(
_M: &'a Module,
_host_out: *const c_char,
) -> bool;
pub(crate) fn LLVMRustOffloadMapper<'a>(OldFn: &'a Value, NewFn: &'a Value);
}
}
@@ -1680,7 +1688,17 @@ mod Offload_fallback {
/// Processes the module and writes it in an offload compatible way into a "host.out" file.
/// Marked as unsafe to match the real offload wrapper which is unsafe due to FFI.
#[allow(unused_unsafe)]
pub(crate) unsafe fn LLVMRustBundleImages<'a>(_M: &'a Module, _TM: &'a TargetMachine) -> bool {
pub(crate) unsafe fn LLVMRustBundleImages<'a>(
_M: &'a Module,
_TM: &'a TargetMachine,
_host_out: *const c_char,
) -> bool {
unimplemented!("This rustc version was not built with LLVM Offload support!");
}
pub(crate) unsafe fn LLVMRustOffloadEmbedBufferInModule<'a>(
_M: &'a Module,
_host_out: *const c_char,
) -> bool {
unimplemented!("This rustc version was not built with LLVM Offload support!");
}
#[allow(unused_unsafe)]
@@ -383,6 +383,7 @@ fn update_target_reliable_float_cfg(sess: &Session, cfg: &mut TargetConfig) {
// Infinite recursion <https://github.com/llvm/llvm-project/issues/97981>
(Arch::CSky, _) => false,
(Arch::Hexagon, _) if major < 21 => false, // (fixed in llvm21)
(Arch::LoongArch32 | Arch::LoongArch64, _) if major < 21 => false, // (fixed in llvm21)
(Arch::PowerPC | Arch::PowerPC64, _) => false,
(Arch::Sparc | Arch::Sparc64, _) => false,
(Arch::Wasm32 | Arch::Wasm64, _) => false,
+14 -22
View File
@@ -1,15 +1,10 @@
use rustc_ast::expand::typetree::FncTree;
#[cfg(feature = "llvm_enzyme")]
use {
crate::attributes,
crate::llvm::EnzymeWrapper,
rustc_ast::expand::typetree::TypeTree as RustTypeTree,
std::ffi::{CString, c_char, c_uint},
};
use std::ffi::{CString, c_char, c_uint};
use crate::llvm::{self, Value};
use rustc_ast::expand::typetree::{FncTree, TypeTree as RustTypeTree};
use crate::attributes;
use crate::llvm::{self, EnzymeWrapper, Value};
#[cfg(feature = "llvm_enzyme")]
fn to_enzyme_typetree(
rust_typetree: RustTypeTree,
_data_layout: &str,
@@ -19,7 +14,6 @@ fn to_enzyme_typetree(
process_typetree_recursive(&mut enzyme_tt, &rust_typetree, &[], llcx);
enzyme_tt
}
#[cfg(feature = "llvm_enzyme")]
fn process_typetree_recursive(
enzyme_tt: &mut llvm::TypeTree,
rust_typetree: &RustTypeTree,
@@ -57,13 +51,21 @@ fn process_typetree_recursive(
}
}
#[cfg(feature = "llvm_enzyme")]
#[cfg_attr(not(feature = "llvm_enzyme"), allow(unused))]
pub(crate) fn add_tt<'ll>(
llmod: &'ll llvm::Module,
llcx: &'ll llvm::Context,
fn_def: &'ll Value,
tt: FncTree,
) {
// TypeTree processing uses functions from Enzyme, which we might not have available if we did
// not build this compiler with `llvm_enzyme`. This feature is not strictly necessary, but
// skipping this function increases the chance that Enzyme fails to compile some code.
// FIXME(autodiff): In the future we should conditionally run this function even without the
// `llvm_enzyme` feature, in case that libEnzyme was provided via rustup.
#[cfg(not(feature = "llvm_enzyme"))]
return;
let inputs = tt.args;
let ret_tt: RustTypeTree = tt.ret;
@@ -113,13 +115,3 @@ pub(crate) fn add_tt<'ll>(
enzyme_wrapper.tree_to_string_free(c_str.as_ptr());
}
}
#[cfg(not(feature = "llvm_enzyme"))]
pub(crate) fn add_tt<'ll>(
_llmod: &'ll llvm::Module,
_llcx: &'ll llvm::Context,
_fn_def: &'ll Value,
_tt: FncTree,
) {
unimplemented!()
}
+6 -5
View File
@@ -103,17 +103,18 @@ pub fn link_binary(
});
if outputs.outputs.should_link() {
let tmpdir = TempDirBuilder::new()
.prefix("rustc")
.tempdir()
.unwrap_or_else(|error| sess.dcx().emit_fatal(errors::CreateTempDir { error }));
let path = MaybeTempDir::new(tmpdir, sess.opts.cg.save_temps);
let output = out_filename(
sess,
crate_type,
outputs,
codegen_results.crate_info.local_crate_name,
);
let tmpdir = TempDirBuilder::new()
.prefix("rustc")
.tempdir_in(output.parent().unwrap_or_else(|| Path::new(".")))
.unwrap_or_else(|error| sess.dcx().emit_fatal(errors::CreateTempDir { error }));
let path = MaybeTempDir::new(tmpdir, sess.opts.cg.save_temps);
let crate_name = format!("{}", codegen_results.crate_info.local_crate_name);
let out_filename = output.file_for_writing(
outputs,
+9 -10
View File
@@ -77,22 +77,21 @@ pub fn immediate_const_vector(
.flatten()
.map(|val| {
// A SIMD type has a single field, which is an array.
let fields = val.unwrap_branch();
let fields = val.to_branch();
assert_eq!(fields.len(), 1);
let array = fields[0].unwrap_branch();
let array = fields[0].to_branch();
// Iterate over the array elements to obtain the values in the vector.
let values: Vec<_> = array
.iter()
.map(|field| {
if let Some(prim) = field.try_to_scalar() {
let layout = bx.layout_of(field_ty);
let BackendRepr::Scalar(scalar) = layout.backend_repr else {
bug!("from_const: invalid ByVal layout: {:#?}", layout);
};
bx.scalar_to_backend(prim, scalar, bx.immediate_backend_type(layout))
} else {
let Some(prim) = field.try_to_scalar() else {
bug!("field is not a scalar {:?}", field)
}
};
let layout = bx.layout_of(field_ty);
let BackendRepr::Scalar(scalar) = layout.backend_repr else {
bug!("from_const: invalid ByVal layout: {:#?}", layout);
};
bx.scalar_to_backend(prim, scalar, bx.immediate_backend_type(layout))
})
.collect();
bx.const_vector(&values)
@@ -102,7 +102,7 @@ pub fn codegen_intrinsic_call(
};
let parse_atomic_ordering = |ord: ty::Value<'tcx>| {
let discr = ord.valtree.unwrap_branch()[0].unwrap_leaf();
let discr = ord.to_branch()[0].to_leaf();
discr.to_atomic_ordering()
};
@@ -1056,6 +1056,17 @@ pub fn codegen_operand(
OperandRef { move_annotation, ..self.codegen_consume(bx, place.as_ref()) }
}
mir::Operand::RuntimeChecks(checks) => {
let layout = bx.layout_of(bx.tcx().types.bool);
let BackendRepr::Scalar(scalar) = layout.backend_repr else {
bug!("from_const: invalid ByVal layout: {:#?}", layout);
};
let x = Scalar::from_bool(checks.value(bx.tcx().sess));
let llval = bx.scalar_to_backend(x, scalar, bx.immediate_backend_type(layout));
let val = OperandValue::Immediate(llval);
OperandRef { val, layout, move_annotation: None }
}
mir::Operand::Constant(ref constant) => {
let constant_ty = self.monomorphize(constant.ty());
// Most SIMD vector constants should be passed as immediates.
@@ -619,21 +619,6 @@ pub(crate) fn codegen_rvalue_operand(
}
}
mir::Rvalue::NullaryOp(ref null_op) => {
let val = match null_op {
mir::NullOp::RuntimeChecks(kind) => {
let val = kind.value(bx.tcx().sess);
bx.cx().const_bool(val)
}
};
let tcx = self.cx.tcx();
OperandRef {
val: OperandValue::Immediate(val),
layout: self.cx.layout_of(null_op.ty(tcx)),
move_annotation: None,
}
}
mir::Rvalue::ThreadLocalRef(def_id) => {
assert!(bx.cx().tcx().is_static(def_id));
let layout = bx.layout_of(bx.cx().tcx().static_ptr_ty(def_id, bx.typing_env()));
@@ -645,7 +645,6 @@ fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) {
Rvalue::Cast(_, _, _) => {}
Rvalue::NullaryOp(NullOp::RuntimeChecks(_)) => {}
Rvalue::ShallowInitBox(_, _) => {}
Rvalue::UnaryOp(op, operand) => {
@@ -230,9 +230,7 @@ pub fn in_rvalue<'tcx, Q, F>(
F: FnMut(Local) -> bool,
{
match rvalue {
Rvalue::ThreadLocalRef(_) | Rvalue::NullaryOp(..) => {
Q::in_any_value_of_ty(cx, rvalue.ty(cx.body, cx.tcx))
}
Rvalue::ThreadLocalRef(_) => Q::in_any_value_of_ty(cx, rvalue.ty(cx.body, cx.tcx)),
Rvalue::Discriminant(place) => in_place::<Q, _>(cx, in_local, place.as_ref()),
@@ -340,6 +338,7 @@ pub fn in_operand<'tcx, Q, F>(
Operand::Copy(place) | Operand::Move(place) => {
return in_place::<Q, _>(cx, in_local, place.as_ref());
}
Operand::RuntimeChecks(_) => return Q::in_any_value_of_ty(cx, cx.tcx.types.bool),
Operand::Constant(c) => c,
};
@@ -198,7 +198,6 @@ fn visit_rvalue(&mut self, rvalue: &mir::Rvalue<'tcx>, location: Location) {
| mir::Rvalue::ThreadLocalRef(..)
| mir::Rvalue::Repeat(..)
| mir::Rvalue::BinaryOp(..)
| mir::Rvalue::NullaryOp(..)
| mir::Rvalue::UnaryOp(..)
| mir::Rvalue::Discriminant(..)
| mir::Rvalue::Aggregate(..)
@@ -122,6 +122,13 @@ fn assert_panic(
unimplemented!()
}
#[inline(always)]
fn runtime_checks(_ecx: &InterpCx<'tcx, Self>, r: RuntimeChecks) -> InterpResult<'tcx, bool> {
// Runtime checks have different value depending on the crate they are codegenned in.
// Verify we aren't trying to evaluate them in mir-optimizations.
panic!("compiletime machine evaluated {r:?}")
}
fn binary_ptr_op(
ecx: &InterpCx<'tcx, Self>,
bin_op: BinOp,
@@ -637,6 +637,16 @@ fn assert_panic(
Err(ConstEvalErrKind::AssertFailure(err)).into()
}
#[inline(always)]
fn runtime_checks(
_ecx: &InterpCx<'tcx, Self>,
_r: mir::RuntimeChecks,
) -> InterpResult<'tcx, bool> {
// We can't look at `tcx.sess` here as that can differ across crates, which can lead to
// unsound differences in evaluating the same constant at different instantiation sites.
interp_ok(true)
}
fn binary_ptr_op(
_ecx: &InterpCx<'tcx, Self>,
_bin_op: mir::BinOp,
@@ -36,13 +36,17 @@ fn branches<'tcx>(
// For enums, we prepend their variant index before the variant's fields so we can figure out
// the variant again when just seeing a valtree.
if let Some(variant) = variant {
branches.push(ty::ValTree::from_scalar_int(*ecx.tcx, variant.as_u32().into()));
branches.push(ty::Const::new_value(
*ecx.tcx,
ty::ValTree::from_scalar_int(*ecx.tcx, variant.as_u32().into()),
ecx.tcx.types.u32,
));
}
for i in 0..field_count {
let field = ecx.project_field(&place, FieldIdx::from_usize(i)).unwrap();
let valtree = const_to_valtree_inner(ecx, &field, num_nodes)?;
branches.push(valtree);
branches.push(ty::Const::new_value(*ecx.tcx, valtree, field.layout.ty));
}
// Have to account for ZSTs here
@@ -65,7 +69,7 @@ fn slice_branches<'tcx>(
for i in 0..n {
let place_elem = ecx.project_index(place, i).unwrap();
let valtree = const_to_valtree_inner(ecx, &place_elem, num_nodes)?;
elems.push(valtree);
elems.push(ty::Const::new_value(*ecx.tcx, valtree, place_elem.layout.ty));
}
Ok(ty::ValTree::from_branches(*ecx.tcx, elems))
@@ -200,8 +204,8 @@ fn reconstruct_place_meta<'tcx>(
&ObligationCause::dummy(),
|ty| ty,
|| {
let branches = last_valtree.unwrap_branch();
last_valtree = *branches.last().unwrap();
let branches = last_valtree.to_branch();
last_valtree = branches.last().unwrap().to_value().valtree;
debug!(?branches, ?last_valtree);
},
);
@@ -212,7 +216,7 @@ fn reconstruct_place_meta<'tcx>(
};
// Get the number of elements in the unsized field.
let num_elems = last_valtree.unwrap_branch().len();
let num_elems = last_valtree.to_branch().len();
MemPlaceMeta::Meta(Scalar::from_target_usize(num_elems as u64, &tcx))
}
@@ -274,7 +278,7 @@ pub fn valtree_to_const_value<'tcx>(
mir::ConstValue::ZeroSized
}
ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char | ty::RawPtr(_, _) => {
mir::ConstValue::Scalar(Scalar::Int(cv.valtree.unwrap_leaf()))
mir::ConstValue::Scalar(Scalar::Int(cv.to_leaf()))
}
ty::Pat(ty, _) => {
let cv = ty::Value { valtree: cv.valtree, ty };
@@ -301,12 +305,13 @@ pub fn valtree_to_const_value<'tcx>(
|| matches!(cv.ty.kind(), ty::Adt(def, _) if def.is_struct()))
{
// A Scalar tuple/struct; we can avoid creating an allocation.
let branches = cv.valtree.unwrap_branch();
let branches = cv.to_branch();
// Find the non-ZST field. (There can be aligned ZST!)
for (i, &inner_valtree) in branches.iter().enumerate() {
let field = layout.field(&LayoutCx::new(tcx, typing_env), i);
if !field.is_zst() {
let cv = ty::Value { valtree: inner_valtree, ty: field.ty };
let cv =
ty::Value { valtree: inner_valtree.to_value().valtree, ty: field.ty };
return valtree_to_const_value(tcx, typing_env, cv);
}
}
@@ -381,7 +386,7 @@ fn valtree_into_mplace<'tcx>(
// Zero-sized type, nothing to do.
}
ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char | ty::RawPtr(..) => {
let scalar_int = valtree.unwrap_leaf();
let scalar_int = valtree.to_leaf();
debug!("writing trivial valtree {:?} to place {:?}", scalar_int, place);
ecx.write_immediate(Immediate::Scalar(scalar_int.into()), place).unwrap();
}
@@ -391,13 +396,13 @@ fn valtree_into_mplace<'tcx>(
ecx.write_immediate(imm, place).unwrap();
}
ty::Adt(_, _) | ty::Tuple(_) | ty::Array(_, _) | ty::Str | ty::Slice(_) => {
let branches = valtree.unwrap_branch();
let branches = valtree.to_branch();
// Need to downcast place for enums
let (place_adjusted, branches, variant_idx) = match ty.kind() {
ty::Adt(def, _) if def.is_enum() => {
// First element of valtree corresponds to variant
let scalar_int = branches[0].unwrap_leaf();
let scalar_int = branches[0].to_leaf();
let variant_idx = VariantIdx::from_u32(scalar_int.to_u32());
let variant = def.variant(variant_idx);
debug!(?variant);
@@ -425,7 +430,7 @@ fn valtree_into_mplace<'tcx>(
};
debug!(?place_inner);
valtree_into_mplace(ecx, &place_inner, *inner_valtree);
valtree_into_mplace(ecx, &place_inner, inner_valtree.to_value().valtree);
dump_place(ecx, &place_inner);
}
@@ -545,7 +545,7 @@ enum Op {
let (right, right_len) = self.project_to_simd(&args[1])?;
let (dest, dest_len) = self.project_to_simd(&dest)?;
let index = generic_args[2].expect_const().to_value().valtree.unwrap_branch();
let index = generic_args[2].expect_const().to_branch();
let index_len = index.len();
assert_eq!(left_len, right_len);
@@ -553,7 +553,7 @@ enum Op {
for i in 0..dest_len {
let src_index: u64 =
index[usize::try_from(i).unwrap()].unwrap_leaf().to_u32().into();
index[usize::try_from(i).unwrap()].to_leaf().to_u32().into();
let dest = self.project_index(&dest, i)?;
let val = if src_index < left_len {
@@ -657,9 +657,7 @@ enum Op {
self.check_simd_ptr_alignment(
ptr,
dest_layout,
generic_args[3].expect_const().to_value().valtree.unwrap_branch()[0]
.unwrap_leaf()
.to_simd_alignment(),
generic_args[3].expect_const().to_branch()[0].to_leaf().to_simd_alignment(),
)?;
for i in 0..dest_len {
@@ -689,9 +687,7 @@ enum Op {
self.check_simd_ptr_alignment(
ptr,
args[2].layout,
generic_args[3].expect_const().to_value().valtree.unwrap_branch()[0]
.unwrap_leaf()
.to_simd_alignment(),
generic_args[3].expect_const().to_branch()[0].to_leaf().to_simd_alignment(),
)?;
for i in 0..vals_len {
@@ -298,7 +298,7 @@ fn before_terminator(_ecx: &mut InterpCx<'tcx, Self>) -> InterpResult<'tcx> {
interp_ok(())
}
/// Determines the result of a `NullaryOp::RuntimeChecks` invocation.
/// Determines the result of a `Operand::RuntimeChecks` invocation.
fn runtime_checks(
_ecx: &InterpCx<'tcx, Self>,
r: mir::RuntimeChecks,
@@ -680,16 +680,6 @@ fn float_fuse_mul_add(_ecx: &InterpCx<$tcx, Self>) -> bool {
true
}
#[inline(always)]
fn runtime_checks(
_ecx: &InterpCx<$tcx, Self>,
_r: mir::RuntimeChecks,
) -> InterpResult<$tcx, bool> {
// We can't look at `tcx.sess` here as that can differ across crates, which can lead to
// unsound differences in evaluating the same constant at different instantiation sites.
interp_ok(true)
}
#[inline(always)]
fn adjust_global_allocation<'b>(
_ecx: &InterpCx<$tcx, Self>,
@@ -845,6 +845,11 @@ pub fn eval_operand(
// FIXME: do some more logic on `move` to invalidate the old location
&Copy(place) | &Move(place) => self.eval_place_to_op(place, layout)?,
&RuntimeChecks(checks) => {
let val = M::runtime_checks(self, checks)?;
ImmTy::from_bool(val, self.tcx()).into()
}
Constant(constant) => {
let c = self.instantiate_from_current_frame_and_normalize_erasing_regions(
constant.const_,
@@ -1,7 +1,6 @@
use either::Either;
use rustc_abi::Size;
use rustc_apfloat::{Float, FloatConvert};
use rustc_middle::mir::NullOp;
use rustc_middle::mir::interpret::{InterpResult, PointerArithmetic, Scalar};
use rustc_middle::ty::layout::TyAndLayout;
use rustc_middle::ty::{self, FloatTy, ScalarInt};
@@ -505,11 +504,4 @@ pub fn unary_op(
}
}
}
pub fn nullary_op(&self, null_op: NullOp) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
use rustc_middle::mir::NullOp::*;
interp_ok(match null_op {
RuntimeChecks(r) => ImmTy::from_bool(M::runtime_checks(self, r)?, *self.tcx),
})
}
}
@@ -203,11 +203,6 @@ pub fn eval_rvalue_into_place(
self.write_immediate(*result, &dest)?;
}
NullaryOp(null_op) => {
let val = self.nullary_op(null_op)?;
self.write_immediate(*val, &dest)?;
}
Aggregate(box ref kind, ref operands) => {
self.write_aggregate(kind, operands, &dest)?;
}
@@ -392,7 +387,7 @@ fn eval_fn_call_argument(
move_definitely_disjoint: bool,
) -> InterpResult<'tcx, FnArg<'tcx, M::Provenance>> {
interp_ok(match op {
mir::Operand::Copy(_) | mir::Operand::Constant(_) => {
mir::Operand::Copy(_) | mir::Operand::Constant(_) | mir::Operand::RuntimeChecks(_) => {
// Make a regular copy.
let op = self.eval_operand(op, None)?;
FnArg::Copy(op)
@@ -4,7 +4,6 @@
use std::num::NonZero;
use rustc_abi::{FieldIdx, FieldsShape, VariantIdx, Variants};
use rustc_index::{Idx as _, IndexVec};
use rustc_middle::mir::interpret::InterpResult;
use rustc_middle::ty::{self, Ty};
use tracing::trace;
@@ -24,20 +23,6 @@ fn read_discriminant(&mut self, v: &Self::V) -> InterpResult<'tcx, VariantIdx> {
self.ecx().read_discriminant(&v.to_op(self.ecx())?)
}
/// This function provides the chance to reorder the order in which fields are visited for
/// `FieldsShape::Aggregate`.
///
/// The default means we iterate in source declaration order; alternatively this can use
/// `in_memory_order` to iterate in memory order.
#[inline(always)]
fn aggregate_field_iter(
in_memory_order: &IndexVec<u32, FieldIdx>,
) -> impl Iterator<Item = FieldIdx> {
// Allow the optimizer to elide the bounds checking when creating each index.
let _ = FieldIdx::new(in_memory_order.len());
(0..in_memory_order.len()).map(FieldIdx::new)
}
// Recursive actions, ready to be overloaded.
/// Visits the given value, dispatching as appropriate to more specialized visitors.
#[inline(always)]
@@ -171,7 +156,7 @@ fn walk_value(&mut self, v: &Self::V) -> InterpResult<'tcx> {
self.visit_union(v, fields)?;
}
FieldsShape::Arbitrary { in_memory_order, .. } => {
for idx in Self::aggregate_field_iter(in_memory_order) {
for idx in in_memory_order.iter().copied() {
let field = self.ecx().project_field(v, idx)?;
self.visit_field(v, idx.as_usize(), &field)?;
}
+2 -2
View File
@@ -1531,15 +1531,15 @@ fn report_ice(
.map(PathBuf::from)
.map(|env_var| session_diagnostics::IcePathErrorEnv { env_var }),
});
dcx.emit_note(session_diagnostics::IceVersion { version, triple: tuple });
None
}
}
} else {
dcx.emit_note(session_diagnostics::IceVersion { version, triple: tuple });
None
};
dcx.emit_note(session_diagnostics::IceVersion { version, triple: tuple });
if let Some((flags, excluded_cargo_defaults)) = rustc_session::utils::extra_compiler_flags() {
dcx.emit_note(session_diagnostics::IceFlags { flags: flags.join(" ") });
if excluded_cargo_defaults {
+1 -5
View File
@@ -1,6 +1,6 @@
use std::ffi::{CString, OsStr};
use std::path::{Path, PathBuf, absolute};
use std::{env, fs, io};
use std::{fs, io};
use tempfile::TempDir;
@@ -139,8 +139,4 @@ pub fn tempdir_in<P: AsRef<Path>>(&self, dir: P) -> io::Result<TempDir> {
}
self.builder.tempdir_in(dir)
}
pub fn tempdir(&self) -> io::Result<TempDir> {
self.tempdir_in(env::temp_dir())
}
}
+1 -1
View File
@@ -416,7 +416,7 @@ pub fn as_slice(&'a self) -> &'a str {
/// it in the generated .dot file. They can also provide more
/// elaborate (and non-unique) label text that is used in the graphviz
/// rendered output.
///
/// The graph instance is responsible for providing the DOT compatible
/// identifiers for the nodes and (optionally) rendered labels for the nodes and
/// edges, as well as an identifier for the graph itself.
@@ -703,6 +703,9 @@ pub enum AttributeKind {
span: Span,
},
/// Represents `#[cfi_encoding]`
CfiEncoding { encoding: Symbol },
/// Represents `#[rustc_coinductive]`.
Coinductive(Span),
@@ -26,6 +26,7 @@ pub fn encode_cross_crate(&self) -> EncodeCrossCrate {
AsPtr(..) => Yes,
AutomaticallyDerived(..) => Yes,
BodyStability { .. } => No,
CfiEncoding { .. } => Yes,
Coinductive(..) => No,
Cold(..) => No,
Confusables { .. } => Yes,
+13
View File
@@ -494,6 +494,7 @@ pub fn anon_const_hir_id(&self) -> Option<HirId> {
pub fn span(&self) -> Span {
match self.kind {
ConstArgKind::Struct(path, _) => path.span(),
ConstArgKind::Path(path) => path.span(),
ConstArgKind::Anon(anon) => anon.span,
ConstArgKind::Error(span, _) => span,
@@ -513,6 +514,8 @@ pub enum ConstArgKind<'hir, Unambig = ()> {
/// However, in the future, we'll be using it for all of those.
Path(QPath<'hir>),
Anon(&'hir AnonConst),
/// Represents construction of struct/struct variants
Struct(QPath<'hir>, &'hir [&'hir ConstArgExprField<'hir>]),
/// Error const
Error(Span, ErrorGuaranteed),
/// This variant is not always used to represent inference consts, sometimes
@@ -520,6 +523,14 @@ pub enum ConstArgKind<'hir, Unambig = ()> {
Infer(Span, Unambig),
}
#[derive(Clone, Copy, Debug, HashStable_Generic)]
pub struct ConstArgExprField<'hir> {
pub hir_id: HirId,
pub span: Span,
pub field: Ident,
pub expr: &'hir ConstArg<'hir>,
}
#[derive(Clone, Copy, Debug, HashStable_Generic)]
pub struct InferArg {
#[stable_hasher(ignore)]
@@ -4714,6 +4725,7 @@ pub enum Node<'hir> {
ConstArg(&'hir ConstArg<'hir>),
Expr(&'hir Expr<'hir>),
ExprField(&'hir ExprField<'hir>),
ConstArgExprField(&'hir ConstArgExprField<'hir>),
Stmt(&'hir Stmt<'hir>),
PathSegment(&'hir PathSegment<'hir>),
Ty(&'hir Ty<'hir>),
@@ -4773,6 +4785,7 @@ pub fn ident(&self) -> Option<Ident> {
Node::AssocItemConstraint(c) => Some(c.ident),
Node::PatField(f) => Some(f.ident),
Node::ExprField(f) => Some(f.ident),
Node::ConstArgExprField(f) => Some(f.field),
Node::PreciseCapturingNonLifetimeArg(a) => Some(a.ident),
Node::Param(..)
| Node::AnonConst(..)
+23
View File
@@ -396,6 +396,9 @@ fn visit_expr(&mut self, ex: &'v Expr<'v>) -> Self::Result {
fn visit_expr_field(&mut self, field: &'v ExprField<'v>) -> Self::Result {
walk_expr_field(self, field)
}
fn visit_const_arg_expr_field(&mut self, field: &'v ConstArgExprField<'v>) -> Self::Result {
walk_const_arg_expr_field(self, field)
}
fn visit_pattern_type_pattern(&mut self, p: &'v TyPat<'v>) -> Self::Result {
walk_ty_pat(self, p)
}
@@ -954,6 +957,17 @@ pub fn walk_expr_field<'v, V: Visitor<'v>>(visitor: &mut V, field: &'v ExprField
try_visit!(visitor.visit_ident(*ident));
visitor.visit_expr(*expr)
}
pub fn walk_const_arg_expr_field<'v, V: Visitor<'v>>(
visitor: &mut V,
field: &'v ConstArgExprField<'v>,
) -> V::Result {
let ConstArgExprField { hir_id, field, expr, span: _ } = field;
try_visit!(visitor.visit_id(*hir_id));
try_visit!(visitor.visit_ident(*field));
visitor.visit_const_arg_unambig(*expr)
}
/// We track whether an infer var is from a [`Ty`], [`ConstArg`], or [`GenericArg`] so that
/// HIR visitors overriding [`Visitor::visit_infer`] can determine what kind of infer is being visited
pub enum InferKind<'hir> {
@@ -1068,6 +1082,15 @@ pub fn walk_const_arg<'v, V: Visitor<'v>>(
let ConstArg { hir_id, kind } = const_arg;
try_visit!(visitor.visit_id(*hir_id));
match kind {
ConstArgKind::Struct(qpath, field_exprs) => {
try_visit!(visitor.visit_qpath(qpath, *hir_id, qpath.span()));
for field_expr in *field_exprs {
try_visit!(visitor.visit_const_arg_expr_field(field_expr));
}
V::Result::output()
}
ConstArgKind::Path(qpath) => visitor.visit_qpath(qpath, *hir_id, qpath.span()),
ConstArgKind::Anon(anon) => visitor.visit_anon_const(*anon),
ConstArgKind::Error(_, _) => V::Result::output(), // errors and spans are not important
@@ -10,6 +10,7 @@
use rustc_middle::ty::{self, DefiningScopeKind, IsSuggestable, Ty, TyCtxt, TypeVisitableExt};
use rustc_middle::{bug, span_bug};
use rustc_span::{DUMMY_SP, Ident, Span};
use tracing::instrument;
use super::{HirPlaceholderCollector, ItemCtxt, bad_placeholder};
use crate::check::wfcheck::check_static_item;
@@ -17,85 +18,7 @@
mod opaque;
fn anon_const_type_of<'tcx>(icx: &ItemCtxt<'tcx>, def_id: LocalDefId) -> Ty<'tcx> {
use hir::*;
use rustc_middle::ty::Ty;
let tcx = icx.tcx;
let hir_id = tcx.local_def_id_to_hir_id(def_id);
let node = tcx.hir_node(hir_id);
let Node::AnonConst(&AnonConst { span, .. }) = node else {
span_bug!(
tcx.def_span(def_id),
"expected anon const in `anon_const_type_of`, got {node:?}"
);
};
let parent_node_id = tcx.parent_hir_id(hir_id);
let parent_node = tcx.hir_node(parent_node_id);
match parent_node {
// Anon consts "inside" the type system.
Node::ConstArg(&ConstArg {
hir_id: arg_hir_id,
kind: ConstArgKind::Anon(&AnonConst { hir_id: anon_hir_id, .. }),
..
}) if anon_hir_id == hir_id => const_arg_anon_type_of(icx, arg_hir_id, span),
Node::Variant(Variant { disr_expr: Some(e), .. }) if e.hir_id == hir_id => {
tcx.adt_def(tcx.hir_get_parent_item(hir_id)).repr().discr_type().to_ty(tcx)
}
Node::Field(&hir::FieldDef { default: Some(c), def_id: field_def_id, .. })
if c.hir_id == hir_id =>
{
tcx.type_of(field_def_id).instantiate_identity()
}
_ => Ty::new_error_with_message(
tcx,
span,
format!("unexpected anon const parent in type_of(): {parent_node:?}"),
),
}
}
fn const_arg_anon_type_of<'tcx>(icx: &ItemCtxt<'tcx>, arg_hir_id: HirId, span: Span) -> Ty<'tcx> {
use hir::*;
use rustc_middle::ty::Ty;
let tcx = icx.tcx;
match tcx.parent_hir_node(arg_hir_id) {
// Array length const arguments do not have `type_of` fed as there is never a corresponding
// generic parameter definition.
Node::Ty(&hir::Ty { kind: TyKind::Array(_, ref constant), .. })
| Node::Expr(&Expr { kind: ExprKind::Repeat(_, ref constant), .. })
if constant.hir_id == arg_hir_id =>
{
tcx.types.usize
}
Node::TyPat(pat) => {
let node = match tcx.parent_hir_node(pat.hir_id) {
// Or patterns can be nested one level deep
Node::TyPat(p) => tcx.parent_hir_node(p.hir_id),
other => other,
};
let hir::TyKind::Pat(ty, _) = node.expect_ty().kind else { bug!() };
icx.lower_ty(ty)
}
// This is not a `bug!` as const arguments in path segments that did not resolve to anything
// will result in `type_of` never being fed.
_ => Ty::new_error_with_message(
tcx,
span,
"`type_of` called on const argument's anon const before the const argument was lowered",
),
}
}
#[instrument(level = "debug", skip(tcx), ret)]
pub(super) fn type_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::EarlyBinder<'_, Ty<'_>> {
use rustc_hir::*;
use rustc_middle::ty::Ty;
@@ -408,6 +331,85 @@ pub(super) fn type_of_opaque_hir_typeck(
}
}
fn anon_const_type_of<'tcx>(icx: &ItemCtxt<'tcx>, def_id: LocalDefId) -> Ty<'tcx> {
use hir::*;
use rustc_middle::ty::Ty;
let tcx = icx.tcx;
let hir_id = tcx.local_def_id_to_hir_id(def_id);
let node = tcx.hir_node(hir_id);
let Node::AnonConst(&AnonConst { span, .. }) = node else {
span_bug!(
tcx.def_span(def_id),
"expected anon const in `anon_const_type_of`, got {node:?}"
);
};
let parent_node_id = tcx.parent_hir_id(hir_id);
let parent_node = tcx.hir_node(parent_node_id);
match parent_node {
// Anon consts "inside" the type system.
Node::ConstArg(&ConstArg {
hir_id: arg_hir_id,
kind: ConstArgKind::Anon(&AnonConst { hir_id: anon_hir_id, .. }),
..
}) if anon_hir_id == hir_id => const_arg_anon_type_of(icx, arg_hir_id, span),
Node::Variant(Variant { disr_expr: Some(e), .. }) if e.hir_id == hir_id => {
tcx.adt_def(tcx.hir_get_parent_item(hir_id)).repr().discr_type().to_ty(tcx)
}
Node::Field(&hir::FieldDef { default: Some(c), def_id: field_def_id, .. })
if c.hir_id == hir_id =>
{
tcx.type_of(field_def_id).instantiate_identity()
}
_ => Ty::new_error_with_message(
tcx,
span,
format!("unexpected anon const parent in type_of(): {parent_node:?}"),
),
}
}
fn const_arg_anon_type_of<'tcx>(icx: &ItemCtxt<'tcx>, arg_hir_id: HirId, span: Span) -> Ty<'tcx> {
use hir::*;
use rustc_middle::ty::Ty;
let tcx = icx.tcx;
match tcx.parent_hir_node(arg_hir_id) {
// Array length const arguments do not have `type_of` fed as there is never a corresponding
// generic parameter definition.
Node::Ty(&hir::Ty { kind: TyKind::Array(_, ref constant), .. })
| Node::Expr(&Expr { kind: ExprKind::Repeat(_, ref constant), .. })
if constant.hir_id == arg_hir_id =>
{
tcx.types.usize
}
Node::TyPat(pat) => {
let node = match tcx.parent_hir_node(pat.hir_id) {
// Or patterns can be nested one level deep
Node::TyPat(p) => tcx.parent_hir_node(p.hir_id),
other => other,
};
let hir::TyKind::Pat(ty, _) = node.expect_ty().kind else { bug!() };
icx.lower_ty(ty)
}
// This is not a `bug!` as const arguments in path segments that did not resolve to anything
// will result in `type_of` never being fed.
_ => Ty::new_error_with_message(
tcx,
span,
"`type_of` called on const argument's anon const before the const argument was lowered",
),
}
}
fn infer_placeholder_type<'tcx>(
cx: &dyn HirTyLowerer<'tcx>,
def_id: LocalDefId,
@@ -2263,12 +2263,120 @@ pub fn lower_const_arg(
)
.unwrap_or_else(|guar| Const::new_error(tcx, guar))
}
hir::ConstArgKind::Anon(anon) => self.lower_anon_const(anon),
hir::ConstArgKind::Struct(qpath, inits) => {
self.lower_const_arg_struct(hir_id, qpath, inits, const_arg.span())
}
hir::ConstArgKind::Anon(anon) => self.lower_const_arg_anon(anon),
hir::ConstArgKind::Infer(span, ()) => self.ct_infer(None, span),
hir::ConstArgKind::Error(_, e) => ty::Const::new_error(tcx, e),
}
}
fn lower_const_arg_struct(
&self,
hir_id: HirId,
qpath: hir::QPath<'tcx>,
inits: &'tcx [&'tcx hir::ConstArgExprField<'tcx>],
span: Span,
) -> Const<'tcx> {
// FIXME(mgca): try to deduplicate this function with
// the equivalent HIR typeck logic.
let tcx = self.tcx();
let non_adt_or_variant_res = || {
let e = tcx.dcx().span_err(span, "struct expression with invalid base path");
ty::Const::new_error(tcx, e)
};
let (ty, variant_did) = match qpath {
hir::QPath::Resolved(maybe_qself, path) => {
debug!(?maybe_qself, ?path);
let opt_self_ty = maybe_qself.as_ref().map(|qself| self.lower_ty(qself));
let ty =
self.lower_resolved_ty_path(opt_self_ty, path, hir_id, PermitVariants::Yes);
let variant_did = match path.res {
Res::Def(DefKind::Variant | DefKind::Struct, did) => did,
_ => return non_adt_or_variant_res(),
};
(ty, variant_did)
}
hir::QPath::TypeRelative(hir_self_ty, segment) => {
debug!(?hir_self_ty, ?segment);
let self_ty = self.lower_ty(hir_self_ty);
let opt_res = self.lower_type_relative_ty_path(
self_ty,
hir_self_ty,
segment,
hir_id,
span,
PermitVariants::Yes,
);
let (ty, _, res_def_id) = match opt_res {
Ok(r @ (_, DefKind::Variant | DefKind::Struct, _)) => r,
Ok(_) => return non_adt_or_variant_res(),
Err(e) => return ty::Const::new_error(tcx, e),
};
(ty, res_def_id)
}
};
let ty::Adt(adt_def, adt_args) = ty.kind() else { unreachable!() };
let variant_def = adt_def.variant_with_id(variant_did);
let variant_idx = adt_def.variant_index_with_id(variant_did).as_u32();
let fields = variant_def
.fields
.iter()
.map(|field_def| {
// FIXME(mgca): we aren't really handling privacy, stability,
// or macro hygeniene but we should.
let mut init_expr =
inits.iter().filter(|init_expr| init_expr.field.name == field_def.name);
match init_expr.next() {
Some(expr) => {
if let Some(expr) = init_expr.next() {
let e = tcx.dcx().span_err(
expr.span,
format!(
"struct expression with multiple initialisers for `{}`",
field_def.name,
),
);
return ty::Const::new_error(tcx, e);
}
self.lower_const_arg(expr.expr, FeedConstTy::Param(field_def.did, adt_args))
}
None => {
let e = tcx.dcx().span_err(
span,
format!(
"struct expression with missing field initialiser for `{}`",
field_def.name
),
);
ty::Const::new_error(tcx, e)
}
}
})
.collect::<Vec<_>>();
let opt_discr_const = if adt_def.is_enum() {
let valtree = ty::ValTree::from_scalar_int(tcx, variant_idx.into());
Some(ty::Const::new_value(tcx, valtree, tcx.types.u32))
} else {
None
};
let valtree = ty::ValTree::from_branches(tcx, opt_discr_const.into_iter().chain(fields));
ty::Const::new_value(tcx, valtree, ty)
}
/// Lower a [resolved][hir::QPath::Resolved] path to a (type-level) constant.
fn lower_resolved_const_path(
&self,
@@ -2372,7 +2480,7 @@ fn lower_resolved_const_path(
/// Literals are eagerly converted to a constant, everything else becomes `Unevaluated`.
#[instrument(skip(self), level = "debug")]
fn lower_anon_const(&self, anon: &AnonConst) -> Const<'tcx> {
fn lower_const_arg_anon(&self, anon: &AnonConst) -> Const<'tcx> {
let tcx = self.tcx();
let expr = &tcx.hir_body(anon.body).value;
@@ -2403,8 +2511,8 @@ fn try_lower_anon_const_lit(
) -> Option<Const<'tcx>> {
let tcx = self.tcx();
// Unwrap a block, so that e.g. `{ P }` is recognised as a parameter. Const arguments
// currently have to be wrapped in curly brackets, so it's necessary to special-case.
// Unwrap a block, so that e.g. `{ 1 }` is recognised as a literal. This makes the
// performance optimisation of directly lowering anon consts occur more often.
let expr = match &expr.kind {
hir::ExprKind::Block(block, _) if block.stmts.is_empty() && block.expr.is_some() => {
block.expr.as_ref().unwrap()
@@ -2412,15 +2520,18 @@ fn try_lower_anon_const_lit(
_ => expr,
};
// FIXME(mgca): remove this delayed bug once we start checking this
// when lowering `Ty/ConstKind::Param`s more generally.
if let hir::ExprKind::Path(hir::QPath::Resolved(
_,
&hir::Path { res: Res::Def(DefKind::ConstParam, _), .. },
)) = expr.kind
{
span_bug!(
let e = tcx.dcx().span_delayed_bug(
expr.span,
"try_lower_anon_const_lit: received const param which shouldn't be possible"
"try_lower_anon_const_lit: received const param which shouldn't be possible",
);
return Some(ty::Const::new_error(tcx, e));
};
let lit_input = match expr.kind {
+4
View File
@@ -180,6 +180,8 @@ fn print_node(&mut self, node: Node<'_>) {
Node::ConstArg(a) => self.print_const_arg(a),
Node::Expr(a) => self.print_expr(a),
Node::ExprField(a) => self.print_expr_field(a),
// FIXME(mgca): proper printing for struct exprs
Node::ConstArgExprField(_) => self.word("/* STRUCT EXPR */"),
Node::Stmt(a) => self.print_stmt(a),
Node::PathSegment(a) => self.print_path_segment(a),
Node::Ty(a) => self.print_type(a),
@@ -1135,6 +1137,8 @@ fn print_const_item_rhs(&mut self, ct_rhs: hir::ConstItemRhs<'_>) {
fn print_const_arg(&mut self, const_arg: &hir::ConstArg<'_>) {
match &const_arg.kind {
// FIXME(mgca): proper printing for struct exprs
ConstArgKind::Struct(..) => self.word("/* STRUCT EXPR */"),
ConstArgKind::Path(qpath) => self.print_qpath(qpath, true),
ConstArgKind::Anon(anon) => self.print_anon_const(anon),
ConstArgKind::Error(_, _) => self.word("/*ERROR*/"),
+14 -1
View File
@@ -1838,7 +1838,20 @@ fn report_return_mismatched_types<'infcx>(
hir::ExprKind::Match(.., hir::MatchSource::TryDesugar(_))
)
{
err.span_label(cond_expr.span, "expected this to be `()`");
if let ObligationCauseCode::BlockTailExpression(hir_id, hir::MatchSource::Normal) =
cause.code()
&& let hir::Node::Block(block) = fcx.tcx.hir_node(*hir_id)
&& let hir::Node::Expr(expr) = fcx.tcx.parent_hir_node(block.hir_id)
&& let hir::Node::Expr(if_expr) = fcx.tcx.parent_hir_node(expr.hir_id)
&& let hir::ExprKind::If(_cond, _then, None) = if_expr.kind
{
err.span_label(
cond_expr.span,
"`if` expressions without `else` arms expect their inner expression to be `()`",
);
} else {
err.span_label(cond_expr.span, "expected this to be `()`");
}
if expr.can_have_side_effects() {
fcx.suggest_semicolon_at_end(cond_expr.span, &mut err);
}
+12
View File
@@ -115,6 +115,18 @@ fn typeck_with_inspect<'tcx>(
return tcx.typeck(typeck_root_def_id);
}
// We can't handle bodies containing generic parameters even though
// these generic parameters aren't part of its `generics_of` right now.
//
// See the FIXME on `check_anon_const_invalid_param_uses`.
if tcx.features().min_generic_const_args()
&& let DefKind::AnonConst = tcx.def_kind(def_id)
&& let ty::AnonConstKind::MCG = tcx.anon_const_kind(def_id)
&& let Err(e) = tcx.check_anon_const_invalid_param_uses(def_id)
{
e.raise_fatal();
}
let id = tcx.local_def_id_to_hir_id(def_id);
let node = tcx.hir_node(id);
let span = tcx.def_span(def_id);
+1 -1
View File
@@ -837,7 +837,7 @@ macro_rules! tracked {
tracked!(no_profiler_runtime, true);
tracked!(no_trait_vptr, true);
tracked!(no_unique_section_names, true);
tracked!(offload, vec![Offload::Enable]);
tracked!(offload, vec![Offload::Device]);
tracked!(on_broken_pipe, OnBrokenPipe::Kill);
tracked!(osx_rpath_install_name, true);
tracked!(packed_bundled_libs, true);
@@ -1,6 +1,7 @@
use rustc_hir::attrs::AttributeKind;
use rustc_hir::def::{DefKind, Res};
use rustc_hir::{Expr, ExprKind, ItemKind, Node, find_attr};
use rustc_middle::ty::adjustment::Adjust;
use rustc_session::{declare_lint, declare_lint_pass};
use crate::lints::{ConstItemInteriorMutationsDiag, ConstItemInteriorMutationsSuggestionStatic};
@@ -77,6 +78,13 @@ fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx Expr<'tcx>) {
if let ExprKind::Path(qpath) = &receiver.kind
&& let Res::Def(DefKind::Const | DefKind::AssocConst, const_did) =
typeck.qpath_res(qpath, receiver.hir_id)
// Don't consider derefs as those can do arbitrary things
// like using thread local (see rust-lang/rust#150157)
&& !cx
.typeck_results()
.expr_adjustments(receiver)
.into_iter()
.any(|adj| matches!(adj.kind, Adjust::Deref(_)))
// Let's do the attribute check after the other checks for perf reasons
&& find_attr!(
cx.tcx.get_all_attrs(method_did),
@@ -43,8 +43,10 @@
// available. As such, we only try to build it in the first place, if
// llvm.offload is enabled.
#ifdef OFFLOAD
#include "llvm/Bitcode/BitcodeReader.h"
#include "llvm/Object/OffloadBinary.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Transforms/Utils/ModuleUtils.h"
#endif
// for raw `write` in the bad-alloc handler
@@ -174,12 +176,13 @@ static Error writeFile(StringRef Filename, StringRef Data) {
// --image=file=device.bc,triple=amdgcn-amd-amdhsa,arch=gfx90a,kind=openmp
// The input module is the rust code compiled for a gpu target like amdgpu.
// Based on clang/tools/clang-offload-packager/ClangOffloadPackager.cpp
extern "C" bool LLVMRustBundleImages(LLVMModuleRef M, TargetMachine &TM) {
extern "C" bool LLVMRustBundleImages(LLVMModuleRef M, TargetMachine &TM,
const char *HostOutPath) {
std::string Storage;
llvm::raw_string_ostream OS1(Storage);
llvm::WriteBitcodeToFile(*unwrap(M), OS1);
OS1.flush();
auto MB = llvm::MemoryBuffer::getMemBufferCopy(Storage, "module.bc");
auto MB = llvm::MemoryBuffer::getMemBufferCopy(Storage, "device.bc");
SmallVector<char, 1024> BinaryData;
raw_svector_ostream OS2(BinaryData);
@@ -188,19 +191,38 @@ extern "C" bool LLVMRustBundleImages(LLVMModuleRef M, TargetMachine &TM) {
ImageBinary.TheImageKind = object::IMG_Bitcode;
ImageBinary.Image = std::move(MB);
ImageBinary.TheOffloadKind = object::OFK_OpenMP;
ImageBinary.StringData["triple"] = TM.getTargetTriple().str();
ImageBinary.StringData["arch"] = TM.getTargetCPU();
std::string TripleStr = TM.getTargetTriple().str();
llvm::StringRef CPURef = TM.getTargetCPU();
ImageBinary.StringData["triple"] = TripleStr;
ImageBinary.StringData["arch"] = CPURef;
llvm::SmallString<0> Buffer = OffloadBinary::write(ImageBinary);
if (Buffer.size() % OffloadBinary::getAlignment() != 0)
// Offload binary has invalid size alignment
return false;
OS2 << Buffer;
if (Error E = writeFile("host.out",
if (Error E = writeFile(HostOutPath,
StringRef(BinaryData.begin(), BinaryData.size())))
return false;
return true;
}
extern "C" bool LLVMRustOffloadEmbedBufferInModule(LLVMModuleRef HostM,
const char *HostOutPath) {
auto MBOrErr = MemoryBuffer::getFile(HostOutPath);
if (!MBOrErr) {
auto E = MBOrErr.getError();
auto _B = errorCodeToError(E);
return false;
}
MemoryBufferRef Buf = (*MBOrErr)->getMemBufferRef();
Module *M = unwrap(HostM);
StringRef SectionName = ".llvm.offloading";
Align Alignment = Align(8);
llvm::embedBufferInModule(*M, Buf, SectionName, Alignment);
return true;
}
extern "C" void LLVMRustOffloadMapper(LLVMValueRef OldFn, LLVMValueRef NewFn) {
llvm::Function *oldFn = llvm::unwrap<llvm::Function>(OldFn);
llvm::Function *newFn = llvm::unwrap<llvm::Function>(NewFn);
@@ -1440,6 +1440,7 @@ fn encode_def_ids(&mut self) {
hir::Node::ConstArg(hir::ConstArg { kind, .. }) => match kind {
// Skip encoding defs for these as they should not have had a `DefId` created
hir::ConstArgKind::Error(..)
| hir::ConstArgKind::Struct(..)
| hir::ConstArgKind::Path(..)
| hir::ConstArgKind::Infer(..) => true,
hir::ConstArgKind::Anon(..) => false,
+1 -1
View File
@@ -92,7 +92,7 @@ macro_rules! arena_types {
[] name_set: rustc_data_structures::unord::UnordSet<rustc_span::Symbol>,
[] autodiff_item: rustc_ast::expand::autodiff_attrs::AutoDiffItem,
[] ordered_name_set: rustc_data_structures::fx::FxIndexSet<rustc_span::Symbol>,
[] valtree: rustc_middle::ty::ValTreeKind<'tcx>,
[] valtree: rustc_middle::ty::ValTreeKind<rustc_middle::ty::TyCtxt<'tcx>>,
[] stable_order_of_exportable_impls:
rustc_data_structures::fx::FxIndexMap<rustc_hir::def_id::DefId, usize>,
+50
View File
@@ -2,6 +2,8 @@
//! eliminated, and all its methods are now on `TyCtxt`. But the module name
//! stays as `map` because there isn't an obviously better name for it.
use std::ops::ControlFlow;
use rustc_abi::ExternAbi;
use rustc_ast::visit::{VisitorResult, walk_list};
use rustc_data_structures::fingerprint::Fingerprint;
@@ -737,6 +739,7 @@ pub fn hir_id_to_string(self, id: HirId) -> String {
Node::ConstArg(_) => node_str("const"),
Node::Expr(_) => node_str("expr"),
Node::ExprField(_) => node_str("expr field"),
Node::ConstArgExprField(_) => node_str("const arg expr field"),
Node::Stmt(_) => node_str("stmt"),
Node::PathSegment(_) => node_str("path segment"),
Node::Ty(_) => node_str("type"),
@@ -1005,6 +1008,7 @@ pub fn hir_span_with_body(self, hir_id: HirId) -> Span {
Node::ConstArg(const_arg) => const_arg.span(),
Node::Expr(expr) => expr.span,
Node::ExprField(field) => field.span,
Node::ConstArgExprField(field) => field.span,
Node::Stmt(stmt) => stmt.span,
Node::PathSegment(seg) => {
let ident_span = seg.ident.span;
@@ -1086,6 +1090,52 @@ pub fn hir_maybe_get_struct_pattern_shorthand_field(self, expr: &Expr<'_>) -> Op
None
}
// FIXME(mgca): this is pretty iffy. In the long term we should make
// HIR ty lowering able to return `Error` versions of types/consts when
// lowering them in contexts that aren't supposed to use generic parameters.
//
// This current impl strategy is incomplete and doesn't handle `Self` ty aliases.
pub fn check_anon_const_invalid_param_uses(
self,
anon: LocalDefId,
) -> Result<(), ErrorGuaranteed> {
struct GenericParamVisitor<'tcx>(TyCtxt<'tcx>);
impl<'tcx> Visitor<'tcx> for GenericParamVisitor<'tcx> {
type NestedFilter = nested_filter::OnlyBodies;
type Result = ControlFlow<ErrorGuaranteed>;
fn maybe_tcx(&mut self) -> TyCtxt<'tcx> {
self.0
}
fn visit_path(
&mut self,
path: &crate::hir::Path<'tcx>,
_id: HirId,
) -> ControlFlow<ErrorGuaranteed> {
if let Res::Def(
DefKind::TyParam | DefKind::ConstParam | DefKind::LifetimeParam,
_,
) = path.res
{
let e = self.0.dcx().struct_span_err(
path.span,
"generic parameters may not be used in const operations",
);
return ControlFlow::Break(e.emit());
}
intravisit::walk_path(self, path)
}
}
let body = self.hir_maybe_body_owned_by(anon).unwrap();
match GenericParamVisitor(self).visit_expr(&body.value) {
ControlFlow::Break(e) => Err(e),
ControlFlow::Continue(()) => Ok(()),
}
}
}
impl<'tcx> intravisit::HirTyCtxt<'tcx> for TyCtxt<'tcx> {
+1
View File
@@ -359,6 +359,7 @@ pub fn expr_guaranteed_to_constitute_read_for_never(self, expr: &Expr<'_>) -> bo
| Node::Infer(_)
| Node::WherePredicate(_)
| Node::PreciseCapturingNonLifetimeArg(_)
| Node::ConstArgExprField(_)
| Node::OpaqueTy(_) => {
unreachable!("no sub-expr expected for {parent_node:?}")
}
+6 -21
View File
@@ -302,15 +302,7 @@ pub fn is_required_const(&self) -> bool {
#[inline]
pub fn try_to_scalar(self) -> Option<Scalar> {
match self {
Const::Ty(_, c) => match c.kind() {
ty::ConstKind::Value(cv) if cv.ty.is_primitive() => {
// A valtree of a type where leaves directly represent the scalar const value.
// Just checking whether it is a leaf is insufficient as e.g. references are leafs
// but the leaf value is the value they point to, not the reference itself!
Some(cv.valtree.unwrap_leaf().into())
}
_ => None,
},
Const::Ty(_, c) => c.try_to_scalar(),
Const::Val(val, _) => val.try_to_scalar(),
Const::Unevaluated(..) => None,
}
@@ -321,10 +313,7 @@ pub fn try_to_scalar_int(self) -> Option<ScalarInt> {
// This is equivalent to `self.try_to_scalar()?.try_to_int().ok()`, but measurably faster.
match self {
Const::Val(ConstValue::Scalar(Scalar::Int(x)), _) => Some(x),
Const::Ty(_, c) => match c.kind() {
ty::ConstKind::Value(cv) if cv.ty.is_primitive() => Some(cv.valtree.unwrap_leaf()),
_ => None,
},
Const::Ty(_, c) => c.try_to_leaf(),
_ => None,
}
}
@@ -377,14 +366,10 @@ pub fn try_eval_scalar(
tcx: TyCtxt<'tcx>,
typing_env: ty::TypingEnv<'tcx>,
) -> Option<Scalar> {
if let Const::Ty(_, c) = self
&& let ty::ConstKind::Value(cv) = c.kind()
&& cv.ty.is_primitive()
{
// Avoid the `valtree_to_const_val` query. Can only be done on primitive types that
// are valtree leaves, and *not* on references. (References should return the
// pointer here, which valtrees don't represent.)
Some(cv.valtree.unwrap_leaf().into())
if let Const::Ty(_, c) = self {
// We don't evaluate anything for type system constants as normalizing
// the MIR will handle this for us
c.try_to_scalar()
} else {
self.eval(tcx, typing_env, DUMMY_SP).ok()?.try_to_scalar()
}
+4 -3
View File
@@ -620,6 +620,10 @@ fn try_const_mono_switchint<'a>(
let bits = eval_mono_const(constant)?;
return Some((bits, targets));
}
Operand::RuntimeChecks(check) => {
let bits = check.value(tcx.sess) as u128;
return Some((bits, targets));
}
Operand::Move(place) | Operand::Copy(place) => place,
};
@@ -649,9 +653,6 @@ fn try_const_mono_switchint<'a>(
}
match rvalue {
Rvalue::NullaryOp(NullOp::RuntimeChecks(kind)) => {
Some((kind.value(tcx.sess) as u128, targets))
}
Rvalue::Use(Operand::Constant(constant)) => {
let bits = eval_mono_const(constant)?;
Some((bits, targets))
+1 -9
View File
@@ -1097,15 +1097,6 @@ fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
BinaryOp(ref op, box (ref a, ref b)) => write!(fmt, "{op:?}({a:?}, {b:?})"),
UnaryOp(ref op, ref a) => write!(fmt, "{op:?}({a:?})"),
Discriminant(ref place) => write!(fmt, "discriminant({place:?})"),
NullaryOp(ref op) => match op {
NullOp::RuntimeChecks(RuntimeChecks::UbChecks) => write!(fmt, "UbChecks()"),
NullOp::RuntimeChecks(RuntimeChecks::ContractChecks) => {
write!(fmt, "ContractChecks()")
}
NullOp::RuntimeChecks(RuntimeChecks::OverflowChecks) => {
write!(fmt, "OverflowChecks()")
}
},
ThreadLocalRef(did) => ty::tls::with(|tcx| {
let muta = tcx.static_mutability(did).unwrap().prefix_str();
write!(fmt, "&/*tls*/ {}{}", muta, tcx.def_path_str(did))
@@ -1264,6 +1255,7 @@ fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
Constant(ref a) => write!(fmt, "{a:?}"),
Copy(ref place) => write!(fmt, "copy {place:?}"),
Move(ref place) => write!(fmt, "move {place:?}"),
RuntimeChecks(checks) => write!(fmt, "{checks:?}"),
}
}
}
+6 -13
View File
@@ -642,7 +642,7 @@ pub fn const_from_scalar(
pub fn to_copy(&self) -> Self {
match *self {
Operand::Copy(_) | Operand::Constant(_) => self.clone(),
Operand::Copy(_) | Operand::Constant(_) | Operand::RuntimeChecks(_) => self.clone(),
Operand::Move(place) => Operand::Copy(place),
}
}
@@ -652,7 +652,7 @@ pub fn to_copy(&self) -> Self {
pub fn place(&self) -> Option<Place<'tcx>> {
match self {
Operand::Copy(place) | Operand::Move(place) => Some(*place),
Operand::Constant(_) => None,
Operand::Constant(_) | Operand::RuntimeChecks(_) => None,
}
}
@@ -661,7 +661,7 @@ pub fn place(&self) -> Option<Place<'tcx>> {
pub fn constant(&self) -> Option<&ConstOperand<'tcx>> {
match self {
Operand::Constant(x) => Some(&**x),
Operand::Copy(_) | Operand::Move(_) => None,
Operand::Copy(_) | Operand::Move(_) | Operand::RuntimeChecks(_) => None,
}
}
@@ -681,6 +681,7 @@ pub fn ty<D>(&self, local_decls: &D, tcx: TyCtxt<'tcx>) -> Ty<'tcx>
match self {
&Operand::Copy(ref l) | &Operand::Move(ref l) => l.ty(local_decls, tcx).ty,
Operand::Constant(c) => c.const_.ty(),
Operand::RuntimeChecks(_) => tcx.types.bool,
}
}
@@ -693,6 +694,8 @@ pub fn span<D>(&self, local_decls: &D) -> Span
local_decls.local_decls()[l.local].source_info.span
}
Operand::Constant(c) => c.span,
// User code should not contain this operand, so we should not need this span.
Operand::RuntimeChecks(_) => DUMMY_SP,
}
}
}
@@ -756,7 +759,6 @@ pub fn is_safe_to_remove(&self) -> bool {
_,
)
| Rvalue::BinaryOp(_, _)
| Rvalue::NullaryOp(_)
| Rvalue::UnaryOp(_, _)
| Rvalue::Discriminant(_)
| Rvalue::Aggregate(_, _)
@@ -794,7 +796,6 @@ pub fn ty<D>(&self, local_decls: &D, tcx: TyCtxt<'tcx>) -> Ty<'tcx>
op.ty(tcx, arg_ty)
}
Rvalue::Discriminant(ref place) => place.ty(local_decls, tcx).ty.discriminant_ty(tcx),
Rvalue::NullaryOp(NullOp::RuntimeChecks(_)) => tcx.types.bool,
Rvalue::Aggregate(ref ak, ref ops) => match **ak {
AggregateKind::Array(ty) => Ty::new_array(tcx, ty, ops.len() as u64),
AggregateKind::Tuple => {
@@ -858,14 +859,6 @@ pub fn to_mutbl_lossy(self) -> hir::Mutability {
}
}
impl NullOp {
pub fn ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
match self {
NullOp::RuntimeChecks(_) => tcx.types.bool,
}
}
}
impl<'tcx> UnOp {
pub fn ty(&self, tcx: TyCtxt<'tcx>, arg_ty: Ty<'tcx>) -> Ty<'tcx> {
match self {
+4 -9
View File
@@ -1327,6 +1327,10 @@ pub enum Operand<'tcx> {
/// Constants are already semantically values, and remain unchanged.
Constant(Box<ConstOperand<'tcx>>),
/// Query the compilation session of the current crate for a particular flag. This is not quite
/// a const since its value can differ across crates within a single crate graph.
RuntimeChecks(RuntimeChecks),
}
#[derive(Clone, Copy, PartialEq, TyEncodable, TyDecodable, Hash, HashStable)]
@@ -1418,9 +1422,6 @@ pub enum Rvalue<'tcx> {
/// matching types and return a value of that type.
BinaryOp(BinOp, Box<(Operand<'tcx>, Operand<'tcx>)>),
/// Computes a value as described by the operation.
NullaryOp(NullOp),
/// Exactly like `BinaryOp`, but less operands.
///
/// Also does two's-complement arithmetic. Negation requires a signed integer or a float;
@@ -1561,12 +1562,6 @@ pub enum AggregateKind<'tcx> {
RawPtr(Ty<'tcx>, Mutability),
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, Hash, HashStable)]
pub enum NullOp {
/// Returns whether we should perform some checking at runtime.
RuntimeChecks(RuntimeChecks),
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, Hash, HashStable)]
pub enum RuntimeChecks {
/// Returns whether we should perform some UB-checking at runtime.
+2 -2
View File
@@ -293,9 +293,9 @@ pub fn reverse_postorder<'a, 'tcx>(
/// reachable.
///
/// Such a traversal is mostly useful because it lets us skip lowering the `false` side
/// of `if <T as Trait>::CONST`, as well as [`NullOp::RuntimeChecks`].
/// of `if <T as Trait>::CONST`, as well as [`Operand::RuntimeChecks`].
///
/// [`NullOp::RuntimeChecks`]: rustc_middle::mir::NullOp::RuntimeChecks
/// [`Operand::RuntimeChecks`]: rustc_middle::mir::Operand::RuntimeChecks
pub fn mono_reachable<'a, 'tcx>(
body: &'a Body<'tcx>,
tcx: TyCtxt<'tcx>,
+2 -6
View File
@@ -775,8 +775,6 @@ fn super_rvalue(
);
}
Rvalue::NullaryOp(_op) => {}
Rvalue::Aggregate(kind, operands) => {
let kind = &$($mutability)? **kind;
match kind {
@@ -847,6 +845,7 @@ fn super_operand(
Operand::Constant(constant) => {
self.visit_const_operand(constant, location);
}
Operand::RuntimeChecks(_) => {}
}
}
@@ -972,10 +971,7 @@ fn super_const_operand(
self.visit_span($(& $mutability)? *span);
match const_ {
Const::Ty(_, ct) => self.visit_ty_const($(&$mutability)? *ct, location),
Const::Val(_, ty) => {
self.visit_ty($(& $mutability)? *ty, TyContext::Location(location));
}
Const::Unevaluated(_, ty) => {
Const::Val(_, ty) | Const::Unevaluated(_, ty) => {
self.visit_ty($(& $mutability)? *ty, TyContext::Location(location));
}
}
+4 -4
View File
@@ -928,7 +928,7 @@ pub fn is_full_range(&self, tcx: TyCtxt<'tcx>) -> Option<bool> {
let lo_is_min = match self.lo {
PatRangeBoundary::NegInfinity => true,
PatRangeBoundary::Finite(value) => {
let lo = value.try_to_scalar_int().unwrap().to_bits(size) ^ bias;
let lo = value.to_leaf().to_bits(size) ^ bias;
lo <= min
}
PatRangeBoundary::PosInfinity => false,
@@ -937,7 +937,7 @@ pub fn is_full_range(&self, tcx: TyCtxt<'tcx>) -> Option<bool> {
let hi_is_max = match self.hi {
PatRangeBoundary::NegInfinity => false,
PatRangeBoundary::Finite(value) => {
let hi = value.try_to_scalar_int().unwrap().to_bits(size) ^ bias;
let hi = value.to_leaf().to_bits(size) ^ bias;
hi > max || hi == max && self.end == RangeEnd::Included
}
PatRangeBoundary::PosInfinity => true,
@@ -1029,7 +1029,7 @@ pub fn as_finite(self) -> Option<ty::ValTree<'tcx>> {
}
pub fn to_bits(self, ty: Ty<'tcx>, tcx: TyCtxt<'tcx>) -> u128 {
match self {
Self::Finite(value) => value.try_to_scalar_int().unwrap().to_bits_unchecked(),
Self::Finite(value) => value.to_leaf().to_bits_unchecked(),
Self::NegInfinity => {
// Unwrap is ok because the type is known to be numeric.
ty.numeric_min_and_max_as_bits(tcx).unwrap().0
@@ -1057,7 +1057,7 @@ pub fn compare_with(self, other: Self, ty: Ty<'tcx>, tcx: TyCtxt<'tcx>) -> Optio
// many ranges such as '\u{037A}'..='\u{037F}', and chars can be compared
// in this way.
(Finite(a), Finite(b)) if matches!(ty.kind(), ty::Int(_) | ty::Uint(_) | ty::Char) => {
if let (Some(a), Some(b)) = (a.try_to_scalar_int(), b.try_to_scalar_int()) {
if let (Some(a), Some(b)) = (a.try_to_leaf(), b.try_to_leaf()) {
let sz = ty.primitive_size(tcx);
let cmp = match ty.kind() {
ty::Uint(_) | ty::Char => a.to_uint(sz).cmp(&b.to_uint(sz)),
+41 -1
View File
@@ -6,6 +6,7 @@
use rustc_type_ir::walk::TypeWalker;
use rustc_type_ir::{self as ir, TypeFlags, WithCachedTypeInfo};
use crate::mir::interpret::Scalar;
use crate::ty::{self, Ty, TyCtxt};
mod int;
@@ -260,7 +261,7 @@ pub fn to_value(self) -> ty::Value<'tcx> {
/// Attempts to convert to a value.
///
/// Note that this does not evaluate the constant.
/// Note that this does not normalize the constant.
pub fn try_to_value(self) -> Option<ty::Value<'tcx>> {
match self.kind() {
ty::ConstKind::Value(cv) => Some(cv),
@@ -268,6 +269,45 @@ pub fn try_to_value(self) -> Option<ty::Value<'tcx>> {
}
}
/// Converts to a `ValTreeKind::Leaf` value, `panic`'ing
/// if this constant is some other kind.
///
/// Note that this does not normalize the constant.
#[inline]
pub fn to_leaf(self) -> ScalarInt {
self.to_value().to_leaf()
}
/// Converts to a `ValTreeKind::Branch` value, `panic`'ing
/// if this constant is some other kind.
///
/// Note that this does not normalize the constant.
#[inline]
pub fn to_branch(self) -> &'tcx [ty::Const<'tcx>] {
self.to_value().to_branch()
}
/// Attempts to convert to a `ValTreeKind::Leaf` value.
///
/// Note that this does not normalize the constant.
pub fn try_to_leaf(self) -> Option<ScalarInt> {
self.try_to_value()?.try_to_leaf()
}
/// Attempts to convert to a `ValTreeKind::Leaf` value.
///
/// Note that this does not normalize the constant.
pub fn try_to_scalar(self) -> Option<Scalar> {
self.try_to_leaf().map(Scalar::Int)
}
/// Attempts to convert to a `ValTreeKind::Branch` value.
///
/// Note that this does not normalize the constant.
pub fn try_to_branch(self) -> Option<&'tcx [ty::Const<'tcx>]> {
self.try_to_value()?.try_to_branch()
}
/// Convenience method to extract the value of a usize constant,
/// useful to get the length of an array type.
///
+75 -82
View File
@@ -3,89 +3,38 @@
use rustc_data_structures::intern::Interned;
use rustc_hir::def::Namespace;
use rustc_macros::{HashStable, Lift, TyDecodable, TyEncodable, TypeFoldable, TypeVisitable};
use rustc_macros::{
HashStable, Lift, TyDecodable, TyEncodable, TypeFoldable, TypeVisitable, extension,
};
use super::ScalarInt;
use crate::mir::interpret::{ErrorHandled, Scalar};
use crate::ty::print::{FmtPrinter, PrettyPrinter};
use crate::ty::{self, Ty, TyCtxt};
use crate::ty::{self, Ty, TyCtxt, ValTreeKind};
/// This datastructure is used to represent the value of constants used in the type system.
///
/// We explicitly choose a different datastructure from the way values are processed within
/// CTFE, as in the type system equal values (according to their `PartialEq`) must also have
/// equal representation (`==` on the rustc data structure, e.g. `ValTree`) and vice versa.
/// Since CTFE uses `AllocId` to represent pointers, it often happens that two different
/// `AllocId`s point to equal values. So we may end up with different representations for
/// two constants whose value is `&42`. Furthermore any kind of struct that has padding will
/// have arbitrary values within that padding, even if the values of the struct are the same.
///
/// `ValTree` does not have this problem with representation, as it only contains integers or
/// lists of (nested) `ValTree`.
#[derive(Clone, Debug, Hash, Eq, PartialEq)]
#[derive(HashStable, TyEncodable, TyDecodable)]
pub enum ValTreeKind<'tcx> {
/// integers, `bool`, `char` are represented as scalars.
/// See the `ScalarInt` documentation for how `ScalarInt` guarantees that equal values
/// of these types have the same representation.
Leaf(ScalarInt),
//SliceOrStr(ValSlice<'tcx>),
// don't use SliceOrStr for now
/// The fields of any kind of aggregate. Structs, tuples and arrays are represented by
/// listing their fields' values in order.
///
/// Enums are represented by storing their variant index as a u32 field, followed by all
/// the fields of the variant.
///
/// ZST types are represented as an empty slice.
Branch(Box<[ValTree<'tcx>]>),
}
impl<'tcx> ValTreeKind<'tcx> {
#[inline]
pub fn unwrap_leaf(&self) -> ScalarInt {
match self {
Self::Leaf(s) => *s,
_ => bug!("expected leaf, got {:?}", self),
}
}
#[inline]
pub fn unwrap_branch(&self) -> &[ValTree<'tcx>] {
match self {
Self::Branch(branch) => &**branch,
_ => bug!("expected branch, got {:?}", self),
}
}
pub fn try_to_scalar(&self) -> Option<Scalar> {
self.try_to_scalar_int().map(Scalar::Int)
}
pub fn try_to_scalar_int(&self) -> Option<ScalarInt> {
match self {
Self::Leaf(s) => Some(*s),
Self::Branch(_) => None,
}
}
pub fn try_to_branch(&self) -> Option<&[ValTree<'tcx>]> {
match self {
Self::Branch(branch) => Some(&**branch),
Self::Leaf(_) => None,
}
#[extension(pub trait ValTreeKindExt<'tcx>)]
impl<'tcx> ty::ValTreeKind<TyCtxt<'tcx>> {
fn try_to_scalar(&self) -> Option<Scalar> {
self.try_to_leaf().map(Scalar::Int)
}
}
/// An interned valtree. Use this rather than `ValTreeKind`, whenever possible.
///
/// See the docs of [`ValTreeKind`] or the [dev guide] for an explanation of this type.
/// See the docs of [`ty::ValTreeKind`] or the [dev guide] for an explanation of this type.
///
/// [dev guide]: https://rustc-dev-guide.rust-lang.org/mir/index.html#valtrees
#[derive(Copy, Clone, Hash, Eq, PartialEq)]
#[derive(HashStable)]
pub struct ValTree<'tcx>(pub(crate) Interned<'tcx, ValTreeKind<'tcx>>);
// FIXME(mgca): Try not interning here. We already intern `ty::Const` which `ValTreeKind`
// recurses through
pub struct ValTree<'tcx>(pub(crate) Interned<'tcx, ty::ValTreeKind<TyCtxt<'tcx>>>);
impl<'tcx> rustc_type_ir::inherent::ValTree<TyCtxt<'tcx>> for ValTree<'tcx> {
fn kind(&self) -> &ty::ValTreeKind<TyCtxt<'tcx>> {
&self
}
}
impl<'tcx> ValTree<'tcx> {
/// Returns the zero-sized valtree: `Branch([])`.
@@ -94,28 +43,33 @@ pub fn zst(tcx: TyCtxt<'tcx>) -> Self {
}
pub fn is_zst(self) -> bool {
matches!(*self, ValTreeKind::Branch(box []))
matches!(*self, ty::ValTreeKind::Branch(box []))
}
pub fn from_raw_bytes(tcx: TyCtxt<'tcx>, bytes: &[u8]) -> Self {
let branches = bytes.iter().map(|&b| Self::from_scalar_int(tcx, b.into()));
let branches = bytes.iter().map(|&b| {
ty::Const::new_value(tcx, Self::from_scalar_int(tcx, b.into()), tcx.types.u8)
});
Self::from_branches(tcx, branches)
}
pub fn from_branches(tcx: TyCtxt<'tcx>, branches: impl IntoIterator<Item = Self>) -> Self {
tcx.intern_valtree(ValTreeKind::Branch(branches.into_iter().collect()))
pub fn from_branches(
tcx: TyCtxt<'tcx>,
branches: impl IntoIterator<Item = ty::Const<'tcx>>,
) -> Self {
tcx.intern_valtree(ty::ValTreeKind::Branch(branches.into_iter().collect()))
}
pub fn from_scalar_int(tcx: TyCtxt<'tcx>, i: ScalarInt) -> Self {
tcx.intern_valtree(ValTreeKind::Leaf(i))
tcx.intern_valtree(ty::ValTreeKind::Leaf(i))
}
}
impl<'tcx> Deref for ValTree<'tcx> {
type Target = &'tcx ValTreeKind<'tcx>;
type Target = &'tcx ty::ValTreeKind<TyCtxt<'tcx>>;
#[inline]
fn deref(&self) -> &&'tcx ValTreeKind<'tcx> {
fn deref(&self) -> &&'tcx ty::ValTreeKind<TyCtxt<'tcx>> {
&self.0.0
}
}
@@ -154,7 +108,7 @@ pub fn try_to_bits(self, tcx: TyCtxt<'tcx>, typing_env: ty::TypingEnv<'tcx>) ->
let (ty::Bool | ty::Char | ty::Uint(_) | ty::Int(_) | ty::Float(_)) = self.ty.kind() else {
return None;
};
let scalar = self.valtree.try_to_scalar_int()?;
let scalar = self.try_to_leaf()?;
let input = typing_env.with_post_analysis_normalized(tcx).as_query_input(self.ty);
let size = tcx.layout_of(input).ok()?.size;
Some(scalar.to_bits(size))
@@ -164,14 +118,14 @@ pub fn try_to_bool(self) -> Option<bool> {
if !self.ty.is_bool() {
return None;
}
self.valtree.try_to_scalar_int()?.try_to_bool().ok()
self.try_to_leaf()?.try_to_bool().ok()
}
pub fn try_to_target_usize(self, tcx: TyCtxt<'tcx>) -> Option<u64> {
if !self.ty.is_usize() {
return None;
}
self.valtree.try_to_scalar_int().map(|s| s.to_target_usize(tcx))
self.try_to_leaf().map(|s| s.to_target_usize(tcx))
}
/// Get the values inside the ValTree as a slice of bytes. This only works for
@@ -192,9 +146,48 @@ pub fn try_to_raw_bytes(self, tcx: TyCtxt<'tcx>) -> Option<&'tcx [u8]> {
_ => return None,
}
Some(tcx.arena.alloc_from_iter(
self.valtree.unwrap_branch().into_iter().map(|v| v.unwrap_leaf().to_u8()),
))
Some(tcx.arena.alloc_from_iter(self.to_branch().into_iter().map(|ct| ct.to_leaf().to_u8())))
}
/// Converts to a `ValTreeKind::Leaf` value, `panic`'ing
/// if this constant is some other kind.
#[inline]
pub fn to_leaf(self) -> ScalarInt {
match &**self.valtree {
ValTreeKind::Leaf(s) => *s,
ValTreeKind::Branch(..) => bug!("expected leaf, got {:?}", self),
}
}
/// Converts to a `ValTreeKind::Branch` value, `panic`'ing
/// if this constant is some other kind.
#[inline]
pub fn to_branch(self) -> &'tcx [ty::Const<'tcx>] {
match &**self.valtree {
ValTreeKind::Branch(branch) => &**branch,
ValTreeKind::Leaf(..) => bug!("expected branch, got {:?}", self),
}
}
/// Attempts to convert to a `ValTreeKind::Leaf` value.
pub fn try_to_leaf(self) -> Option<ScalarInt> {
match &**self.valtree {
ValTreeKind::Leaf(s) => Some(*s),
ValTreeKind::Branch(_) => None,
}
}
/// Attempts to convert to a `ValTreeKind::Leaf` value.
pub fn try_to_scalar(&self) -> Option<Scalar> {
self.try_to_leaf().map(Scalar::Int)
}
/// Attempts to convert to a `ValTreeKind::Branch` value.
pub fn try_to_branch(self) -> Option<&'tcx [ty::Const<'tcx>]> {
match &**self.valtree {
ValTreeKind::Branch(branch) => Some(&**branch),
ValTreeKind::Leaf(_) => None,
}
}
}
+10 -3
View File
@@ -165,6 +165,7 @@ fn with_cached_task<T>(self, task: impl FnOnce() -> T) -> (T, DepNodeIndex) {
type ValueConst = ty::Value<'tcx>;
type ExprConst = ty::Expr<'tcx>;
type ValTree = ty::ValTree<'tcx>;
type ScalarInt = ty::ScalarInt;
type Region = Region<'tcx>;
type EarlyParamRegion = ty::EarlyParamRegion;
@@ -954,7 +955,7 @@ pub struct CtxtInterners<'tcx> {
fields: InternedSet<'tcx, List<FieldIdx>>,
local_def_ids: InternedSet<'tcx, List<LocalDefId>>,
captures: InternedSet<'tcx, List<&'tcx ty::CapturedPlace<'tcx>>>,
valtree: InternedSet<'tcx, ty::ValTreeKind<'tcx>>,
valtree: InternedSet<'tcx, ty::ValTreeKind<TyCtxt<'tcx>>>,
patterns: InternedSet<'tcx, List<ty::Pattern<'tcx>>>,
outlives: InternedSet<'tcx, List<ty::ArgOutlivesPredicate<'tcx>>>,
}
@@ -2272,6 +2273,12 @@ pub fn serialize_query_result_cache(self, encoder: FileEncoder) -> FileEncodeRes
#[inline]
pub fn local_crate_exports_generics(self) -> bool {
// compiler-builtins has some special treatment in codegen, which can result in confusing
// behavior if another crate ends up calling into its monomorphizations.
// https://github.com/rust-lang/rust/issues/150173
if self.is_compiler_builtins(LOCAL_CRATE) {
return false;
}
self.crate_types().iter().any(|crate_type| {
match crate_type {
CrateType::Executable
@@ -2654,7 +2661,7 @@ pub fn debug_stats(self) -> impl fmt::Debug {
impl<'tcx, T: 'tcx + ?Sized + PointeeSized> Clone for InternedInSet<'tcx, T> {
fn clone(&self) -> Self {
InternedInSet(self.0)
*self
}
}
@@ -2777,7 +2784,7 @@ impl<'tcx> TyCtxt<'tcx> {
// crate only, and have a corresponding `mk_` function.
direct_interners! {
region: pub(crate) intern_region(RegionKind<'tcx>): Region -> Region<'tcx>,
valtree: pub(crate) intern_valtree(ValTreeKind<'tcx>): ValTree -> ValTree<'tcx>,
valtree: pub(crate) intern_valtree(ValTreeKind<TyCtxt<'tcx>>): ValTree -> ValTree<'tcx>,
pat: pub mk_pat(PatternKind<'tcx>): Pattern -> Pattern<'tcx>,
const_allocation: pub mk_const_alloc(Allocation): ConstAllocation -> ConstAllocation<'tcx>,
layout: pub mk_layout(LayoutData<FieldIdx, VariantIdx>): Layout -> Layout<'tcx>,
+2 -3
View File
@@ -31,7 +31,7 @@
use rustc_ast::expand::typetree::{FncTree, Kind, Type, TypeTree};
use rustc_ast::node_id::NodeMap;
pub use rustc_ast_ir::{Movability, Mutability, try_visit};
use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap, FxIndexSet};
use rustc_data_structures::fx::{FxHashSet, FxIndexMap, FxIndexSet};
use rustc_data_structures::intern::Interned;
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_data_structures::steal::Steal;
@@ -77,7 +77,7 @@
};
pub use self::consts::{
AnonConstKind, AtomicOrdering, Const, ConstInt, ConstKind, ConstToValTreeResult, Expr,
ExprKind, ScalarInt, SimdAlign, UnevaluatedConst, ValTree, ValTreeKind, Value,
ExprKind, ScalarInt, SimdAlign, UnevaluatedConst, ValTree, ValTreeKindExt, Value,
};
pub use self::context::{
CtxtInterners, CurrentGcx, Feed, FreeRegionInfo, GlobalCtxt, Lift, TyCtxt, TyCtxtFeed, tls,
@@ -196,7 +196,6 @@ pub struct ResolverGlobalCtxt {
/// This struct is meant to be consumed by lowering.
#[derive(Debug)]
pub struct ResolverAstLowering {
pub legacy_const_generic_args: FxHashMap<DefId, Option<Vec<usize>>>,
/// Resolutions for nodes that have a single resolution.
pub partial_res_map: NodeMap<hir::def::PartialRes>,
/// Resolutions for import nodes, which have multiple resolutions in different namespaces.
+1 -1
View File
@@ -72,7 +72,7 @@ fn print(t: &PatternKind<'tcx>, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{start}")?;
if let Some(c) = end.try_to_value() {
let end = c.valtree.unwrap_leaf();
let end = c.to_leaf();
let size = end.size();
let max = match c.ty.kind() {
ty::Int(_) => {
@@ -243,7 +243,6 @@ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
crate::mir::FakeReadCause,
crate::mir::Local,
crate::mir::MirPhase,
crate::mir::NullOp,
crate::mir::Promoted,
crate::mir::RawPtrKind,
crate::mir::RetagKind,
@@ -257,8 +256,8 @@ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
crate::ty::AssocItem,
crate::ty::AssocKind,
crate::ty::BoundRegion,
crate::ty::ScalarInt,
crate::ty::UserTypeAnnotationIndex,
crate::ty::ValTree<'tcx>,
crate::ty::abstract_const::NotConstEvaluatable,
crate::ty::adjustment::AutoBorrowMutability,
crate::ty::adjustment::PointerCoercion,
@@ -284,6 +283,7 @@ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// interners).
TrivialTypeTraversalAndLiftImpls! {
// tidy-alphabetical-start
crate::mir::RuntimeChecks,
crate::ty::BoundTy,
crate::ty::ParamTy,
crate::ty::instance::ReifyReason,
@@ -697,6 +697,37 @@ fn super_visit_with<V: TypeVisitor<TyCtxt<'tcx>>>(&self, visitor: &mut V) -> V::
}
}
impl<'tcx> TypeVisitable<TyCtxt<'tcx>> for ty::ValTree<'tcx> {
fn visit_with<V: TypeVisitor<TyCtxt<'tcx>>>(&self, visitor: &mut V) -> V::Result {
let inner: &ty::ValTreeKind<TyCtxt<'tcx>> = &*self;
inner.visit_with(visitor)
}
}
impl<'tcx> TypeFoldable<TyCtxt<'tcx>> for ty::ValTree<'tcx> {
fn try_fold_with<F: FallibleTypeFolder<TyCtxt<'tcx>>>(
self,
folder: &mut F,
) -> Result<Self, F::Error> {
let inner: &ty::ValTreeKind<TyCtxt<'tcx>> = &*self;
let new_inner = inner.clone().try_fold_with(folder)?;
if inner == &new_inner {
Ok(self)
} else {
let valtree = folder.cx().intern_valtree(new_inner);
Ok(valtree)
}
}
fn fold_with<F: TypeFolder<TyCtxt<'tcx>>>(self, folder: &mut F) -> Self {
let inner: &ty::ValTreeKind<TyCtxt<'tcx>> = &*self;
let new_inner = inner.clone().fold_with(folder);
if inner == &new_inner { self } else { folder.cx().intern_valtree(new_inner) }
}
}
impl<'tcx> TypeVisitable<TyCtxt<'tcx>> for rustc_span::ErrorGuaranteed {
fn visit_with<V: TypeVisitor<TyCtxt<'tcx>>>(&self, visitor: &mut V) -> V::Result {
visitor.visit_error(*self)
+4 -2
View File
@@ -1394,8 +1394,10 @@ pub fn has_significant_drop(self, tcx: TyCtxt<'tcx>, typing_env: ty::TypingEnv<'
// This doesn't depend on regions, so try to minimize distinct
// query keys used.
let erased = tcx.normalize_erasing_regions(typing_env, query_ty);
tcx.has_significant_drop_raw(typing_env.as_query_input(erased))
// FIX: Use try_normalize to avoid crashing. If it fails, return true.
tcx.try_normalize_erasing_regions(typing_env, query_ty)
.map(|erased| tcx.has_significant_drop_raw(typing_env.as_query_input(erased)))
.unwrap_or(true)
}
}
}
@@ -261,6 +261,7 @@ fn parse_debuginfo(&mut self, stmts: impl Iterator<Item = StmtId>) -> PResult<()
let value = match operand {
Operand::Constant(c) => VarDebugInfoContents::Const(*c),
Operand::Copy(p) | Operand::Move(p) => VarDebugInfoContents::Place(p),
Operand::RuntimeChecks(_) => unreachable!(),
};
let dbginfo = VarDebugInfo {
name,
@@ -157,7 +157,7 @@ fn parse_match(&self, arms: &[ArmId], span: Span) -> PResult<SwitchTargets> {
});
}
};
values.push(value.valtree.unwrap_leaf().to_bits_unchecked());
values.push(value.to_leaf().to_bits_unchecked());
targets.push(self.parse_block(arm.body)?);
}
@@ -6,8 +6,7 @@
use tracing::debug;
use crate::builder::Builder;
use crate::builder::matches::test::is_switch_ty;
use crate::builder::matches::{Candidate, Test, TestBranch, TestKind, TestableCase};
use crate::builder::matches::{Candidate, PatConstKind, Test, TestBranch, TestKind, TestableCase};
/// Output of [`Builder::partition_candidates_into_buckets`].
pub(crate) struct PartitionedCandidates<'tcx, 'b, 'c> {
@@ -157,11 +156,10 @@ fn choose_bucket_for_candidate(
//
// FIXME(#29623) we could use PatKind::Range to rule
// things out here, in some cases.
//
// FIXME(Zalathar): Is the `is_switch_ty` test unnecessary?
(TestKind::SwitchInt, &TestableCase::Constant { value })
if is_switch_ty(match_pair.pattern_ty) =>
{
(
TestKind::SwitchInt,
&TestableCase::Constant { value, kind: PatConstKind::IntOrChar },
) => {
// An important invariant of candidate bucketing is that a candidate
// must not match in multiple branches. For `SwitchInt` tests, adding
// a new value might invalidate that property for range patterns that
@@ -206,7 +204,7 @@ fn choose_bucket_for_candidate(
})
}
(TestKind::If, TestableCase::Constant { value }) => {
(TestKind::If, TestableCase::Constant { value, kind: PatConstKind::Bool }) => {
fully_matched = true;
let value = value.try_to_bool().unwrap_or_else(|| {
span_bug!(test.span, "expected boolean value but got {value:?}")
@@ -218,7 +216,7 @@ fn choose_bucket_for_candidate(
&TestKind::Len { len: test_len, op: BinOp::Eq },
&TestableCase::Slice { len, variable_length },
) => {
match (test_len.cmp(&(len as u64)), variable_length) {
match (test_len.cmp(&len), variable_length) {
(Ordering::Equal, false) => {
// on true, min_len = len = $actual_length,
// on false, len != $actual_length
@@ -251,7 +249,7 @@ fn choose_bucket_for_candidate(
&TestableCase::Slice { len, variable_length },
) => {
// the test is `$actual_len >= test_len`
match (test_len.cmp(&(len as u64)), variable_length) {
match (test_len.cmp(&len), variable_length) {
(Ordering::Equal, true) => {
// $actual_len >= test_len = pat_len,
// so we can match.
@@ -291,7 +289,13 @@ fn choose_bucket_for_candidate(
if !test.overlaps(pat, self.tcx)? { Some(TestBranch::Failure) } else { None }
}
}
(TestKind::Range(range), &TestableCase::Constant { value }) => {
(
TestKind::Range(range),
&TestableCase::Constant {
value,
kind: PatConstKind::Bool | PatConstKind::IntOrChar | PatConstKind::Float,
},
) => {
fully_matched = false;
if !range.contains(value, self.tcx)? {
// `value` is not contained in the testing range,
@@ -302,7 +306,13 @@ fn choose_bucket_for_candidate(
}
}
(TestKind::Eq { value: test_val, .. }, TestableCase::Constant { value: case_val }) => {
(
TestKind::Eq { value: test_val, .. },
TestableCase::Constant {
value: case_val,
kind: PatConstKind::Float | PatConstKind::Other,
},
) => {
if test_val == case_val {
fully_matched = true;
Some(TestBranch::Success)
@@ -7,7 +7,9 @@
use crate::builder::Builder;
use crate::builder::expr::as_place::{PlaceBase, PlaceBuilder};
use crate::builder::matches::{FlatPat, MatchPairTree, PatternExtraData, TestableCase};
use crate::builder::matches::{
FlatPat, MatchPairTree, PatConstKind, PatternExtraData, TestableCase,
};
impl<'a, 'tcx> Builder<'a, 'tcx> {
/// Builds and pushes [`MatchPairTree`] subtrees, one for each pattern in
@@ -156,7 +158,29 @@ pub(super) fn for_pattern(
}
}
PatKind::Constant { value } => Some(TestableCase::Constant { value }),
PatKind::Constant { value } => {
// CAUTION: The type of the pattern node (`pattern.ty`) is
// _often_ the same as the type of the const value (`value.ty`),
// but there are some cases where those types differ
// (e.g. when `deref!(..)` patterns interact with `String`).
// Classify the constant-pattern into further kinds, to
// reduce the number of ad-hoc type tests needed later on.
let pat_ty = pattern.ty;
let const_kind = if pat_ty.is_bool() {
PatConstKind::Bool
} else if pat_ty.is_integral() || pat_ty.is_char() {
PatConstKind::IntOrChar
} else if pat_ty.is_floating_point() {
PatConstKind::Float
} else {
// FIXME(Zalathar): This still covers several different
// categories (e.g. raw pointer, string, pattern-type)
// which could be split out into their own kinds.
PatConstKind::Other
};
Some(TestableCase::Constant { value, kind: const_kind })
}
PatKind::AscribeUserType {
ascription: Ascription { ref annotation, variance },
@@ -256,7 +280,7 @@ pub(super) fn for_pattern(
None
} else {
Some(TestableCase::Slice {
len: prefix.len() + suffix.len(),
len: u64::try_from(prefix.len() + suffix.len()).unwrap(),
variable_length: slice.is_some(),
})
}
@@ -1262,9 +1262,9 @@ struct Ascription<'tcx> {
#[derive(Debug, Clone)]
enum TestableCase<'tcx> {
Variant { adt_def: ty::AdtDef<'tcx>, variant_index: VariantIdx },
Constant { value: ty::Value<'tcx> },
Constant { value: ty::Value<'tcx>, kind: PatConstKind },
Range(Arc<PatRange<'tcx>>),
Slice { len: usize, variable_length: bool },
Slice { len: u64, variable_length: bool },
Deref { temp: Place<'tcx>, mutability: Mutability },
Never,
Or { pats: Box<[FlatPat<'tcx>]> },
@@ -1276,6 +1276,28 @@ fn as_range(&self) -> Option<&PatRange<'tcx>> {
}
}
/// Sub-classification of [`TestableCase::Constant`], which helps to avoid
/// some redundant ad-hoc checks when preparing and lowering tests.
#[derive(Debug, Clone)]
enum PatConstKind {
/// The primitive `bool` type, which is like an integer but simpler,
/// having only two values.
Bool,
/// Primitive unsigned/signed integer types, plus `char`.
/// These types interact nicely with `SwitchInt`.
IntOrChar,
/// Floating-point primitives, e.g. `f32`, `f64`.
/// These types don't support `SwitchInt` and require an equality test,
/// but can also interact with range pattern tests.
Float,
/// Any other constant-pattern is usually tested via some kind of equality
/// check. Types that might be encountered here include:
/// - `&str`
/// - raw pointers derived from integer values
/// - pattern types, e.g. `pattern_type!(u32 is 1..)`
Other,
}
/// Node in a tree of "match pairs", where each pair consists of a place to be
/// tested, and a test to perform on that place.
///
@@ -2935,7 +2957,8 @@ fn static_pattern_match_inner(
bug!("malformed valtree for an enum")
};
let ValTreeKind::Leaf(actual_variant_idx) = ***actual_variant_idx else {
let ValTreeKind::Leaf(actual_variant_idx) = *actual_variant_idx.to_value().valtree
else {
bug!("malformed valtree for an enum")
};
@@ -2943,7 +2966,7 @@ fn static_pattern_match_inner(
}
Constructor::IntRange(int_range) => {
let size = pat.ty().primitive_size(self.tcx);
let actual_int = valtree.unwrap_leaf().to_bits(size);
let actual_int = valtree.to_leaf().to_bits(size);
let actual_int = if pat.ty().is_signed() {
MaybeInfiniteInt::new_finite_int(actual_int, size.bits())
} else {
@@ -2951,33 +2974,33 @@ fn static_pattern_match_inner(
};
IntRange::from_singleton(actual_int).is_subrange(int_range)
}
Constructor::Bool(pattern_value) => match valtree.unwrap_leaf().try_to_bool() {
Constructor::Bool(pattern_value) => match valtree.to_leaf().try_to_bool() {
Ok(actual_value) => *pattern_value == actual_value,
Err(()) => bug!("bool value with invalid bits"),
},
Constructor::F16Range(l, h, end) => {
let actual = valtree.unwrap_leaf().to_f16();
let actual = valtree.to_leaf().to_f16();
match end {
RangeEnd::Included => (*l..=*h).contains(&actual),
RangeEnd::Excluded => (*l..*h).contains(&actual),
}
}
Constructor::F32Range(l, h, end) => {
let actual = valtree.unwrap_leaf().to_f32();
let actual = valtree.to_leaf().to_f32();
match end {
RangeEnd::Included => (*l..=*h).contains(&actual),
RangeEnd::Excluded => (*l..*h).contains(&actual),
}
}
Constructor::F64Range(l, h, end) => {
let actual = valtree.unwrap_leaf().to_f64();
let actual = valtree.to_leaf().to_f64();
match end {
RangeEnd::Included => (*l..=*h).contains(&actual),
RangeEnd::Excluded => (*l..*h).contains(&actual),
}
}
Constructor::F128Range(l, h, end) => {
let actual = valtree.unwrap_leaf().to_f128();
let actual = valtree.to_leaf().to_f128();
match end {
RangeEnd::Included => (*l..=*h).contains(&actual),
RangeEnd::Excluded => (*l..*h).contains(&actual),
@@ -19,7 +19,9 @@
use tracing::{debug, instrument};
use crate::builder::Builder;
use crate::builder::matches::{MatchPairTree, Test, TestBranch, TestKind, TestableCase};
use crate::builder::matches::{
MatchPairTree, PatConstKind, Test, TestBranch, TestKind, TestableCase,
};
impl<'a, 'tcx> Builder<'a, 'tcx> {
/// Identifies what test is needed to decide if `match_pair` is applicable.
@@ -32,11 +34,14 @@ pub(super) fn pick_test_for_match_pair(
let kind = match match_pair.testable_case {
TestableCase::Variant { adt_def, variant_index: _ } => TestKind::Switch { adt_def },
TestableCase::Constant { .. } if match_pair.pattern_ty.is_bool() => TestKind::If,
TestableCase::Constant { .. } if is_switch_ty(match_pair.pattern_ty) => {
TestableCase::Constant { value: _, kind: PatConstKind::Bool } => TestKind::If,
TestableCase::Constant { value: _, kind: PatConstKind::IntOrChar } => {
TestKind::SwitchInt
}
TestableCase::Constant { value } => {
TestableCase::Constant { value, kind: PatConstKind::Float } => {
TestKind::Eq { value, cast_ty: match_pair.pattern_ty }
}
TestableCase::Constant { value, kind: PatConstKind::Other } => {
TestKind::Eq { value, cast_ty: match_pair.pattern_ty }
}
@@ -47,7 +52,7 @@ pub(super) fn pick_test_for_match_pair(
TestableCase::Slice { len, variable_length } => {
let op = if variable_length { BinOp::Ge } else { BinOp::Eq };
TestKind::Len { len: len as u64, op }
TestKind::Len { len, op }
}
TestableCase::Deref { temp, mutability } => TestKind::Deref { temp, mutability },
@@ -116,7 +121,7 @@ pub(super) fn perform_test(
let switch_targets = SwitchTargets::new(
target_blocks.iter().filter_map(|(&branch, &block)| {
if let TestBranch::Constant(value) = branch {
let bits = value.valtree.unwrap_leaf().to_bits_unchecked();
let bits = value.to_leaf().to_bits_unchecked();
Some((bits, block))
} else {
None
@@ -491,11 +496,6 @@ fn string_compare(
}
}
/// Returns true if this type be used with [`TestKind::SwitchInt`].
pub(crate) fn is_switch_ty(ty: Ty<'_>) -> bool {
ty.is_integral() || ty.is_char()
}
fn trait_method<'tcx>(
tcx: TyCtxt<'tcx>,
trait_def_id: DefId,
+10 -3
View File
@@ -897,7 +897,14 @@ pub(crate) fn break_const_continuable_scope(
self.tcx,
ValTree::from_branches(
self.tcx,
[ValTree::from_scalar_int(self.tcx, variant_index.as_u32().into())],
[ty::Const::new_value(
self.tcx,
ValTree::from_scalar_int(
self.tcx,
variant_index.as_u32().into(),
),
self.tcx.types.u32,
)],
),
self.thir[value].ty,
),
@@ -1099,7 +1106,7 @@ pub(crate) fn break_for_tail_call(
Some(DropData { source_info, local, kind: DropKind::Value })
}
Operand::Constant(_) => None,
Operand::Constant(_) | Operand::RuntimeChecks(_) => None,
})
.collect();
@@ -1563,7 +1570,7 @@ pub(crate) fn record_operands_moved(&mut self, operands: &[Spanned<Operand<'tcx>
// look for moves of a local variable, like `MOVE(_X)`
let locals_moved = operands.iter().flat_map(|operand| match operand.node {
Operand::Copy(_) | Operand::Constant(_) => None,
Operand::Copy(_) | Operand::Constant(_) | Operand::RuntimeChecks(_) => None,
Operand::Move(place) => place.as_local(),
});
@@ -63,7 +63,7 @@ pub(crate) fn lit_to_const<'tcx>(
// A CStr is a newtype around a byte slice, so we create the inner slice here.
// We need a branch for each "level" of the data structure.
let bytes = ty::ValTree::from_raw_bytes(tcx, byte_sym.as_byte_str());
ty::ValTree::from_branches(tcx, [bytes])
ty::ValTree::from_branches(tcx, [ty::Const::new_value(tcx, bytes, *inner_ty)])
}
(ast::LitKind::Int(n, _), ty::Uint(ui)) if !neg => {
let scalar_int = trunc(n.get(), *ui);

Some files were not shown because too many files have changed in this diff Show More