Merge pull request #20321 from rust-lang/rustc-pull

Rustc pull update
This commit is contained in:
Laurențiu Nicola
2025-07-28 17:42:46 +00:00
committed by GitHub
2119 changed files with 25486 additions and 17464 deletions
-23
View File
@@ -1,23 +0,0 @@
# This workflow runs spellcheck job
name: Spellcheck
on:
pull_request:
branches:
- "**"
jobs:
spellcheck:
name: run spellchecker
runs-on: ubuntu-latest
steps:
- name: Checkout the source code
uses: actions/checkout@v4
- name: check typos
# sync version with src/tools/tidy/src/ext_tool_checks.rs in spellcheck_runner
uses: crate-ci/typos@v1.34.0
with:
# sync target files with src/tools/tidy/src/ext_tool_checks.rs in check_impl
files: ./compiler ./library ./src/bootstrap ./src/librustdoc
config: ./typos.toml
-2
View File
@@ -85,8 +85,6 @@ __pycache__/
## Node
node_modules
package-lock.json
package.json
/src/doc/rustc-dev-guide/mermaid.min.js
## Rustdoc GUI tests
+112 -55
View File
@@ -384,7 +384,7 @@ dependencies = [
name = "cargo-miri"
version = "0.1.0"
dependencies = [
"cargo_metadata 0.19.2",
"cargo_metadata 0.21.0",
"directories",
"rustc-build-sysroot",
"rustc_tools_util 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -402,6 +402,31 @@ dependencies = [
"serde",
]
[[package]]
name = "cargo-platform"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "84982c6c0ae343635a3a4ee6dedef965513735c8b183caa7289fa6e27399ebd4"
dependencies = [
"serde",
]
[[package]]
name = "cargo-util-schemas"
version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7dc1a6f7b5651af85774ae5a34b4e8be397d9cf4bc063b7e6dbd99a841837830"
dependencies = [
"semver",
"serde",
"serde-untagged",
"serde-value",
"thiserror 2.0.12",
"toml 0.8.23",
"unicode-xid",
"url",
]
[[package]]
name = "cargo_metadata"
version = "0.18.1"
@@ -409,7 +434,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2d886547e41f740c616ae73108f6eb70afe6d940c7bc697cb30f13daec073037"
dependencies = [
"camino",
"cargo-platform",
"cargo-platform 0.1.9",
"semver",
"serde",
"serde_json",
@@ -418,12 +443,13 @@ dependencies = [
[[package]]
name = "cargo_metadata"
version = "0.19.2"
version = "0.21.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dd5eb614ed4c27c5d706420e4320fbe3216ab31fa1c33cd8246ac36dae4479ba"
checksum = "5cfca2aaa699835ba88faf58a06342a314a950d2b9686165e038286c30316868"
dependencies = [
"camino",
"cargo-platform",
"cargo-platform 0.2.0",
"cargo-util-schemas",
"semver",
"serde",
"serde_json",
@@ -561,7 +587,7 @@ dependencies = [
"tempfile",
"termize",
"toml 0.7.8",
"ui_test 0.30.2",
"ui_test",
"walkdir",
]
@@ -703,6 +729,15 @@ dependencies = [
"windows-sys 0.59.0",
]
[[package]]
name = "colored"
version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fde0e0ec90c9dfb3b4b1a0891a7dcd0e2bffde2f7efed5fe7c9bb00e5bfb915e"
dependencies = [
"windows-sys 0.59.0",
]
[[package]]
name = "comma"
version = "1.0.0"
@@ -716,7 +751,7 @@ dependencies = [
"anstyle-svg",
"build_helper",
"camino",
"colored",
"colored 2.2.0",
"diff",
"getopts",
"glob",
@@ -1143,6 +1178,16 @@ version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f"
[[package]]
name = "erased-serde"
version = "0.4.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e004d887f51fcb9fef17317a2f3525c887d8aa3f4f50fed920816a688284a5b7"
dependencies = [
"serde",
"typeid",
]
[[package]]
name = "errno"
version = "0.3.13"
@@ -1305,7 +1350,7 @@ version = "0.1.0"
dependencies = [
"anyhow",
"askama",
"cargo_metadata 0.18.1",
"cargo_metadata 0.21.0",
"serde",
"serde_json",
"thiserror 1.0.69",
@@ -1831,17 +1876,16 @@ dependencies = [
[[package]]
name = "ipc-channel"
version = "0.19.0"
version = "0.20.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6fb8251fb7bcd9ccd3725ed8deae9fe7db8e586495c9eb5b0c52e6233e5e75ea"
checksum = "5b1c98b70019c830a1fc39cecfe1f60ff99c4122f0a189697c810c90ec545c14"
dependencies = [
"bincode",
"crossbeam-channel",
"fnv",
"lazy_static",
"libc",
"mio",
"rand 0.8.5",
"rand 0.9.1",
"serde",
"tempfile",
"uuid",
@@ -2240,7 +2284,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "78bed444cc8a2160f01cbcf811ef18cac863ad68ae8ca62092e8db51d51c761c"
dependencies = [
"libc",
"log",
"wasi 0.11.1+wasi-snapshot-preview1",
"windows-sys 0.59.0",
]
@@ -2263,7 +2306,7 @@ dependencies = [
"capstone",
"chrono",
"chrono-tz",
"colored",
"colored 3.0.0",
"directories",
"getrandom 0.3.3",
"ipc-channel",
@@ -2280,7 +2323,7 @@ dependencies = [
"smallvec",
"tempfile",
"tikv-jemalloc-sys",
"ui_test 0.29.2",
"ui_test",
]
[[package]]
@@ -2560,6 +2603,15 @@ version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d"
[[package]]
name = "ordered-float"
version = "2.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "68f19d67e5a2795c94e73e0bb1cc1a7edeb2e28efd39e2e1c9b7a40c1108b11c"
dependencies = [
"num-traits",
]
[[package]]
name = "overload"
version = "0.1.1"
@@ -4189,6 +4241,7 @@ dependencies = [
"rustc-literal-escaper",
"rustc_ast",
"rustc_ast_pretty",
"rustc_attr_parsing",
"rustc_data_structures",
"rustc_errors",
"rustc_feature",
@@ -4497,7 +4550,10 @@ dependencies = [
"rustc_macros",
"rustc_serialize",
"rustc_span",
"serde",
"serde_derive",
"serde_json",
"serde_path_to_error",
"tracing",
]
@@ -4845,6 +4901,27 @@ dependencies = [
"serde_derive",
]
[[package]]
name = "serde-untagged"
version = "0.1.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "299d9c19d7d466db4ab10addd5703e4c615dec2a5a16dbbafe191045e87ee66e"
dependencies = [
"erased-serde",
"serde",
"typeid",
]
[[package]]
name = "serde-value"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f3a1a3341211875ef120e117ea7fd5228530ae7e7036a779fdc9117be6b3282c"
dependencies = [
"ordered-float",
"serde",
]
[[package]]
name = "serde_derive"
version = "1.0.219"
@@ -4868,6 +4945,16 @@ dependencies = [
"serde",
]
[[package]]
name = "serde_path_to_error"
version = "0.1.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "59fab13f937fa393d08645bf3a84bdfe86e296747b506ada67bb15f10f218b2a"
dependencies = [
"itoa",
"serde",
]
[[package]]
name = "serde_spanned"
version = "0.6.9"
@@ -4942,16 +5029,6 @@ dependencies = [
"windows-sys 0.52.0",
]
[[package]]
name = "spanned"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "86af297923fbcfd107c20a189a6e9c872160df71a7190ae4a7a6c5dce4b2feb6"
dependencies = [
"bstr",
"color-eyre",
]
[[package]]
name = "spanned"
version = "0.4.1"
@@ -5279,7 +5356,7 @@ name = "tidy"
version = "0.1.0"
dependencies = [
"build_helper",
"cargo_metadata 0.19.2",
"cargo_metadata 0.21.0",
"fluent-syntax",
"ignore",
"miropt-test-tools",
@@ -5529,6 +5606,12 @@ dependencies = [
"rustc-hash 2.1.1",
]
[[package]]
name = "typeid"
version = "1.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bc7d623258602320d5c55d1bc22793b57daff0ec7efc270ea7d55ce1d5f5471c"
[[package]]
name = "typenum"
version = "1.18.0"
@@ -5550,32 +5633,6 @@ version = "0.1.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971"
[[package]]
name = "ui_test"
version = "0.29.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1211b1111c752c73b33073d2958072be08825fd97c9ab4d83444da361a06634b"
dependencies = [
"annotate-snippets 0.11.5",
"anyhow",
"bstr",
"cargo-platform",
"cargo_metadata 0.18.1",
"color-eyre",
"colored",
"comma",
"crossbeam-channel",
"indicatif",
"levenshtein",
"prettydiff",
"regex",
"rustc_version",
"rustfix",
"serde",
"serde_json",
"spanned 0.3.0",
]
[[package]]
name = "ui_test"
version = "0.30.2"
@@ -5585,10 +5642,10 @@ dependencies = [
"annotate-snippets 0.11.5",
"anyhow",
"bstr",
"cargo-platform",
"cargo-platform 0.1.9",
"cargo_metadata 0.18.1",
"color-eyre",
"colored",
"colored 2.2.0",
"comma",
"crossbeam-channel",
"indicatif",
@@ -5599,7 +5656,7 @@ dependencies = [
"rustfix",
"serde",
"serde_json",
"spanned 0.4.1",
"spanned",
]
[[package]]
+2
View File
@@ -37,6 +37,8 @@ path = [
"rust-bors.toml",
"triagebot.toml",
"typos.toml",
"package.json",
"package-lock.json",
"x",
"x.ps1",
"x.py",
+2 -10
View File
@@ -313,7 +313,6 @@ pub fn layout_of_struct_or_enum<
scalar_valid_range: (Bound<u128>, Bound<u128>),
discr_range_of_repr: impl Fn(i128, i128) -> (Integer, bool),
discriminants: impl Iterator<Item = (VariantIdx, i128)>,
dont_niche_optimize_enum: bool,
always_sized: bool,
) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
let (present_first, present_second) = {
@@ -352,13 +351,7 @@ pub fn layout_of_struct_or_enum<
// structs. (We have also handled univariant enums
// that allow representation optimization.)
assert!(is_enum);
self.layout_of_enum(
repr,
variants,
discr_range_of_repr,
discriminants,
dont_niche_optimize_enum,
)
self.layout_of_enum(repr, variants, discr_range_of_repr, discriminants)
}
}
@@ -599,7 +592,6 @@ fn layout_of_enum<
variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, F>>,
discr_range_of_repr: impl Fn(i128, i128) -> (Integer, bool),
discriminants: impl Iterator<Item = (VariantIdx, i128)>,
dont_niche_optimize_enum: bool,
) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> {
// Until we've decided whether to use the tagged or
// niche filling LayoutData, we don't want to intern the
@@ -618,7 +610,7 @@ struct TmpLayout<FieldIdx: Idx, VariantIdx: Idx> {
}
let calculate_niche_filling_layout = || -> Option<TmpLayout<FieldIdx, VariantIdx>> {
if dont_niche_optimize_enum {
if repr.inhibit_enum_layout_opt() {
return None;
}
+22
View File
@@ -1376,6 +1376,28 @@ pub fn contains(&self, v: u128) -> bool {
}
}
/// Returns `true` if all the values in `other` are contained in this range,
/// when the values are considered as having width `size`.
#[inline(always)]
pub fn contains_range(&self, other: Self, size: Size) -> bool {
if self.is_full_for(size) {
true
} else {
let trunc = |x| size.truncate(x);
let delta = self.start;
let max = trunc(self.end.wrapping_sub(delta));
let other_start = trunc(other.start.wrapping_sub(delta));
let other_end = trunc(other.end.wrapping_sub(delta));
// Having shifted both input ranges by `delta`, now we only need to check
// whether `0..=max` contains `other_start..=other_end`, which can only
// happen if the other doesn't wrap since `self` isn't everything.
(other_start <= other_end) && (other_end <= max)
}
}
/// Returns `self` with replaced `start`
#[inline(always)]
fn with_start(mut self, start: u128) -> Self {
+63
View File
@@ -5,3 +5,66 @@ fn align_constants() {
assert_eq!(Align::ONE, Align::from_bytes(1).unwrap());
assert_eq!(Align::EIGHT, Align::from_bytes(8).unwrap());
}
#[test]
fn wrapping_range_contains_range() {
let size16 = Size::from_bytes(16);
let a = WrappingRange { start: 10, end: 20 };
assert!(a.contains_range(a, size16));
assert!(a.contains_range(WrappingRange { start: 11, end: 19 }, size16));
assert!(a.contains_range(WrappingRange { start: 10, end: 10 }, size16));
assert!(a.contains_range(WrappingRange { start: 20, end: 20 }, size16));
assert!(!a.contains_range(WrappingRange { start: 10, end: 21 }, size16));
assert!(!a.contains_range(WrappingRange { start: 9, end: 20 }, size16));
assert!(!a.contains_range(WrappingRange { start: 4, end: 6 }, size16));
assert!(!a.contains_range(WrappingRange { start: 24, end: 26 }, size16));
assert!(!a.contains_range(WrappingRange { start: 16, end: 14 }, size16));
let b = WrappingRange { start: 20, end: 10 };
assert!(b.contains_range(b, size16));
assert!(b.contains_range(WrappingRange { start: 20, end: 20 }, size16));
assert!(b.contains_range(WrappingRange { start: 10, end: 10 }, size16));
assert!(b.contains_range(WrappingRange { start: 0, end: 10 }, size16));
assert!(b.contains_range(WrappingRange { start: 20, end: 30 }, size16));
assert!(b.contains_range(WrappingRange { start: 20, end: 9 }, size16));
assert!(b.contains_range(WrappingRange { start: 21, end: 10 }, size16));
assert!(b.contains_range(WrappingRange { start: 999, end: 9999 }, size16));
assert!(b.contains_range(WrappingRange { start: 999, end: 9 }, size16));
assert!(!b.contains_range(WrappingRange { start: 19, end: 19 }, size16));
assert!(!b.contains_range(WrappingRange { start: 11, end: 11 }, size16));
assert!(!b.contains_range(WrappingRange { start: 19, end: 11 }, size16));
assert!(!b.contains_range(WrappingRange { start: 11, end: 19 }, size16));
let f = WrappingRange { start: 0, end: u128::MAX };
assert!(f.contains_range(WrappingRange { start: 10, end: 20 }, size16));
assert!(f.contains_range(WrappingRange { start: 20, end: 10 }, size16));
let g = WrappingRange { start: 2, end: 1 };
assert!(g.contains_range(WrappingRange { start: 10, end: 20 }, size16));
assert!(g.contains_range(WrappingRange { start: 20, end: 10 }, size16));
let size1 = Size::from_bytes(1);
let u8r = WrappingRange { start: 0, end: 255 };
let i8r = WrappingRange { start: 128, end: 127 };
assert!(u8r.contains_range(i8r, size1));
assert!(i8r.contains_range(u8r, size1));
assert!(!u8r.contains_range(i8r, size16));
assert!(i8r.contains_range(u8r, size16));
let boolr = WrappingRange { start: 0, end: 1 };
assert!(u8r.contains_range(boolr, size1));
assert!(i8r.contains_range(boolr, size1));
assert!(!boolr.contains_range(u8r, size1));
assert!(!boolr.contains_range(i8r, size1));
let cmpr = WrappingRange { start: 255, end: 1 };
assert!(u8r.contains_range(cmpr, size1));
assert!(i8r.contains_range(cmpr, size1));
assert!(!cmpr.contains_range(u8r, size1));
assert!(!cmpr.contains_range(i8r, size1));
assert!(!boolr.contains_range(cmpr, size1));
assert!(cmpr.contains_range(boolr, size1));
}
+149 -136
View File
@@ -28,7 +28,7 @@
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_data_structures::stack::ensure_sufficient_stack;
use rustc_data_structures::tagged_ptr::Tag;
use rustc_macros::{Decodable, Encodable, HashStable_Generic};
use rustc_macros::{Decodable, Encodable, HashStable_Generic, Walkable};
pub use rustc_span::AttrId;
use rustc_span::source_map::{Spanned, respan};
use rustc_span::{ByteSymbol, DUMMY_SP, ErrorGuaranteed, Ident, Span, Symbol, kw, sym};
@@ -39,6 +39,7 @@
use crate::token::{self, CommentKind, Delimiter};
use crate::tokenstream::{DelimSpan, LazyAttrTokenStream, TokenStream};
use crate::util::parser::{ExprPrecedence, Fixity};
use crate::visit::{AssocCtxt, BoundKind, LifetimeCtxt};
/// A "Label" is an identifier of some point in sources,
/// e.g. in the following code:
@@ -50,7 +51,7 @@
/// ```
///
/// `'outer` is a label.
#[derive(Clone, Encodable, Decodable, Copy, HashStable_Generic, Eq, PartialEq)]
#[derive(Clone, Encodable, Decodable, Copy, HashStable_Generic, Eq, PartialEq, Walkable)]
pub struct Label {
pub ident: Ident,
}
@@ -63,7 +64,7 @@ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
/// A "Lifetime" is an annotation of the scope in which variable
/// can be used, e.g. `'a` in `&'a i32`.
#[derive(Clone, Encodable, Decodable, Copy, PartialEq, Eq, Hash)]
#[derive(Clone, Encodable, Decodable, Copy, PartialEq, Eq, Hash, Walkable)]
pub struct Lifetime {
pub id: NodeId,
pub ident: Ident,
@@ -87,7 +88,7 @@ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
/// along with a bunch of supporting information.
///
/// E.g., `std::cmp::PartialEq`.
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub struct Path {
pub span: Span,
/// The segments in the path: the things separated by `::`.
@@ -211,7 +212,7 @@ pub fn join_path_idents(path: impl IntoIterator<Item = impl Borrow<Ident>>) -> S
/// A segment of a path: an identifier, an optional lifetime, and a set of types.
///
/// E.g., `std`, `String` or `Box<T>`.
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub struct PathSegment {
/// The identifier portion of this path segment.
pub ident: Ident,
@@ -255,7 +256,7 @@ pub fn span(&self) -> Span {
/// The generic arguments and associated item constraints of a path segment.
///
/// E.g., `<A, B>` as in `Foo<A, B>` or `(A, B)` as in `Foo(A, B)`.
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub enum GenericArgs {
/// The `<'a, A, B, C>` in `foo::bar::baz::<'a, A, B, C>`.
AngleBracketed(AngleBracketedArgs),
@@ -280,10 +281,10 @@ pub fn span(&self) -> Span {
}
/// Concrete argument in the sequence of generic args.
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub enum GenericArg {
/// `'a` in `Foo<'a>`.
Lifetime(Lifetime),
Lifetime(#[visitable(extra = LifetimeCtxt::GenericArg)] Lifetime),
/// `Bar` in `Foo<Bar>`.
Type(P<Ty>),
/// `1` in `Foo<1>`.
@@ -301,7 +302,7 @@ pub fn span(&self) -> Span {
}
/// A path like `Foo<'a, T>`.
#[derive(Clone, Encodable, Decodable, Debug, Default)]
#[derive(Clone, Encodable, Decodable, Debug, Default, Walkable)]
pub struct AngleBracketedArgs {
/// The overall span.
pub span: Span,
@@ -310,7 +311,7 @@ pub struct AngleBracketedArgs {
}
/// Either an argument for a generic parameter or a constraint on an associated item.
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub enum AngleBracketedArg {
/// A generic argument for a generic parameter.
Arg(GenericArg),
@@ -340,7 +341,7 @@ fn from(val: ParenthesizedArgs) -> Self {
}
/// A path like `Foo(A, B) -> C`.
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub struct ParenthesizedArgs {
/// ```text
/// Foo(A, B) -> C
@@ -376,7 +377,7 @@ pub fn as_angle_bracketed_args(&self) -> AngleBracketedArgs {
pub use crate::node_id::{CRATE_NODE_ID, DUMMY_NODE_ID, NodeId};
/// Modifiers on a trait bound like `[const]`, `?` and `!`.
#[derive(Copy, Clone, PartialEq, Eq, Encodable, Decodable, Debug)]
#[derive(Copy, Clone, PartialEq, Eq, Encodable, Decodable, Debug, Walkable)]
pub struct TraitBoundModifiers {
pub constness: BoundConstness,
pub asyncness: BoundAsyncness,
@@ -391,10 +392,10 @@ impl TraitBoundModifiers {
};
}
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub enum GenericBound {
Trait(PolyTraitRef),
Outlives(Lifetime),
Outlives(#[visitable(extra = LifetimeCtxt::Bound)] Lifetime),
/// Precise capturing syntax: `impl Sized + use<'a>`
Use(ThinVec<PreciseCapturingArg>, Span),
}
@@ -429,7 +430,7 @@ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
}
}
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub enum GenericParamKind {
/// A lifetime definition (e.g., `'a: 'b + 'c + 'd`).
Lifetime,
@@ -445,11 +446,12 @@ pub enum GenericParamKind {
},
}
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub struct GenericParam {
pub id: NodeId,
pub ident: Ident,
pub attrs: AttrVec,
#[visitable(extra = BoundKind::Bound)]
pub bounds: GenericBounds,
pub is_placeholder: bool,
pub kind: GenericParamKind,
@@ -470,7 +472,7 @@ pub fn span(&self) -> Span {
/// Represents lifetime, type and const parameters attached to a declaration of
/// a function, enum, trait, etc.
#[derive(Clone, Encodable, Decodable, Debug, Default)]
#[derive(Clone, Encodable, Decodable, Debug, Default, Walkable)]
pub struct Generics {
pub params: ThinVec<GenericParam>,
pub where_clause: WhereClause,
@@ -478,7 +480,7 @@ pub struct Generics {
}
/// A where-clause in a definition.
#[derive(Clone, Encodable, Decodable, Debug, Default)]
#[derive(Clone, Encodable, Decodable, Debug, Default, Walkable)]
pub struct WhereClause {
/// `true` if we ate a `where` token.
///
@@ -496,7 +498,7 @@ pub fn is_empty(&self) -> bool {
}
/// A single predicate in a where-clause.
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub struct WherePredicate {
pub attrs: AttrVec,
pub kind: WherePredicateKind,
@@ -506,7 +508,7 @@ pub struct WherePredicate {
}
/// Predicate kind in where-clause.
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub enum WherePredicateKind {
/// A type bound (e.g., `for<'c> Foo: Send + Clone + 'c`).
BoundPredicate(WhereBoundPredicate),
@@ -519,42 +521,45 @@ pub enum WherePredicateKind {
/// A type bound.
///
/// E.g., `for<'c> Foo: Send + Clone + 'c`.
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub struct WhereBoundPredicate {
/// Any generics from a `for` binding.
pub bound_generic_params: ThinVec<GenericParam>,
/// The type being bounded.
pub bounded_ty: P<Ty>,
/// Trait and lifetime bounds (`Clone + Send + 'static`).
#[visitable(extra = BoundKind::Bound)]
pub bounds: GenericBounds,
}
/// A lifetime predicate.
///
/// E.g., `'a: 'b + 'c`.
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub struct WhereRegionPredicate {
#[visitable(extra = LifetimeCtxt::Bound)]
pub lifetime: Lifetime,
#[visitable(extra = BoundKind::Bound)]
pub bounds: GenericBounds,
}
/// An equality predicate (unsupported).
///
/// E.g., `T = int`.
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub struct WhereEqPredicate {
pub lhs_ty: P<Ty>,
pub rhs_ty: P<Ty>,
}
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub struct Crate {
pub attrs: AttrVec,
pub items: ThinVec<P<Item>>,
pub spans: ModSpans,
/// Must be equal to `CRATE_NODE_ID` after the crate root is expanded, but may hold
/// expansion placeholders or an unassigned value (`DUMMY_NODE_ID`) before that.
pub id: NodeId,
pub attrs: AttrVec,
pub items: ThinVec<P<Item>>,
pub spans: ModSpans,
pub is_placeholder: bool,
}
@@ -608,7 +613,7 @@ pub enum MetaItemInner {
/// A block (`{ .. }`).
///
/// E.g., `{ .. }` as in `fn foo() { .. }`.
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub struct Block {
/// The statements in the block.
pub stmts: ThinVec<Stmt>,
@@ -622,7 +627,7 @@ pub struct Block {
/// A match pattern.
///
/// Patterns appear in match statements and some other contexts, such as `let` and `if let`.
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub struct Pat {
pub id: NodeId,
pub kind: PatKind,
@@ -770,7 +775,7 @@ fn from(value: P<Pat>) -> Self {
/// Patterns like the fields of `Foo { x, ref y, ref mut z }`
/// are treated the same as `x: x, y: ref y, z: ref mut z`,
/// except when `is_shorthand` is true.
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub struct PatField {
/// The identifier for the field.
pub ident: Ident,
@@ -784,7 +789,7 @@ pub struct PatField {
}
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
#[derive(Encodable, Decodable, HashStable_Generic)]
#[derive(Encodable, Decodable, HashStable_Generic, Walkable)]
pub enum ByRef {
Yes(Mutability),
No,
@@ -806,7 +811,7 @@ pub fn cap_ref_mutability(mut self, mutbl: Mutability) -> Self {
/// `.0` is the by-reference mode (`ref`, `ref mut`, or by value),
/// `.1` is the mutability of the binding.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
#[derive(Encodable, Decodable, HashStable_Generic)]
#[derive(Encodable, Decodable, HashStable_Generic, Walkable)]
pub struct BindingMode(pub ByRef, pub Mutability);
impl BindingMode {
@@ -829,7 +834,7 @@ pub fn prefix_str(self) -> &'static str {
}
}
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub enum RangeEnd {
/// `..=` or `...`
Included(RangeSyntax),
@@ -837,7 +842,7 @@ pub enum RangeEnd {
Excluded,
}
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub enum RangeSyntax {
/// `...`
DotDotDot,
@@ -848,7 +853,7 @@ pub enum RangeSyntax {
/// All the different flavors of pattern that Rust recognizes.
//
// Adding a new variant? Please update `test_pat` in `tests/ui/macros/stringify.rs`.
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub enum PatKind {
/// A missing pattern, e.g. for an anonymous param in a bare fn like `fn f(u32)`.
Missing,
@@ -930,7 +935,7 @@ pub enum PatKind {
}
/// Whether the `..` is present in a struct fields pattern.
#[derive(Clone, Copy, Encodable, Decodable, Debug, PartialEq)]
#[derive(Clone, Copy, Encodable, Decodable, Debug, PartialEq, Walkable)]
pub enum PatFieldsRest {
/// `module::StructName { field, ..}`
Rest,
@@ -943,7 +948,7 @@ pub enum PatFieldsRest {
/// The kind of borrow in an `AddrOf` expression,
/// e.g., `&place` or `&raw const place`.
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
#[derive(Encodable, Decodable, HashStable_Generic)]
#[derive(Encodable, Decodable, HashStable_Generic, Walkable)]
pub enum BorrowKind {
/// A normal borrow, `&$expr` or `&mut $expr`.
/// The resulting type is either `&'a T` or `&'a mut T`
@@ -959,7 +964,7 @@ pub enum BorrowKind {
Pin,
}
#[derive(Clone, Copy, Debug, PartialEq, Encodable, Decodable, HashStable_Generic)]
#[derive(Clone, Copy, Debug, PartialEq, Encodable, Decodable, HashStable_Generic, Walkable)]
pub enum BinOpKind {
/// The `+` operator (addition)
Add,
@@ -1089,7 +1094,7 @@ fn from(op: AssignOpKind) -> BinOpKind {
}
}
#[derive(Clone, Copy, Debug, PartialEq, Encodable, Decodable, HashStable_Generic)]
#[derive(Clone, Copy, Debug, PartialEq, Encodable, Decodable, HashStable_Generic, Walkable)]
pub enum AssignOpKind {
/// The `+=` operator (addition)
AddAssign,
@@ -1141,7 +1146,7 @@ pub fn is_by_value(self) -> bool {
/// Unary operator.
///
/// Note that `&data` is not an operator, it's an `AddrOf` expression.
#[derive(Clone, Copy, Debug, PartialEq, Encodable, Decodable, HashStable_Generic)]
#[derive(Clone, Copy, Debug, PartialEq, Encodable, Decodable, HashStable_Generic, Walkable)]
pub enum UnOp {
/// The `*` operator for dereferencing
Deref,
@@ -1215,7 +1220,7 @@ pub fn is_expr(&self) -> bool {
}
// Adding a new variant? Please update `test_stmt` in `tests/ui/macros/stringify.rs`.
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub enum StmtKind {
/// A local (let) binding.
Let(P<Local>),
@@ -1231,7 +1236,7 @@ pub enum StmtKind {
MacCall(P<MacCallStmt>),
}
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub struct MacCallStmt {
pub mac: P<MacCall>,
pub style: MacStmtStyle,
@@ -1239,7 +1244,7 @@ pub struct MacCallStmt {
pub tokens: Option<LazyAttrTokenStream>,
}
#[derive(Clone, Copy, PartialEq, Encodable, Decodable, Debug)]
#[derive(Clone, Copy, PartialEq, Encodable, Decodable, Debug, Walkable)]
pub enum MacStmtStyle {
/// The macro statement had a trailing semicolon (e.g., `foo! { ... };`
/// `foo!(...);`, `foo![...];`).
@@ -1253,7 +1258,7 @@ pub enum MacStmtStyle {
}
/// Local represents a `let` statement, e.g., `let <pat>:<ty> = <expr>;`.
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub struct Local {
pub id: NodeId,
pub super_: Option<Span>,
@@ -1266,7 +1271,7 @@ pub struct Local {
pub tokens: Option<LazyAttrTokenStream>,
}
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub enum LocalKind {
/// Local declaration.
/// Example: `let x;`
@@ -1306,7 +1311,7 @@ pub fn init_else_opt(&self) -> Option<(&Expr, Option<&Block>)> {
/// _ => { println!("no match!") },
/// }
/// ```
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub struct Arm {
pub attrs: AttrVec,
/// Match arm pattern, e.g. `10` in `match foo { 10 => {}, _ => {} }`.
@@ -1321,7 +1326,7 @@ pub struct Arm {
}
/// A single field in a struct expression, e.g. `x: value` and `y` in `Foo { x: value, y }`.
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub struct ExprField {
pub attrs: AttrVec,
pub id: NodeId,
@@ -1332,13 +1337,13 @@ pub struct ExprField {
pub is_placeholder: bool,
}
#[derive(Clone, PartialEq, Encodable, Decodable, Debug, Copy)]
#[derive(Clone, PartialEq, Encodable, Decodable, Debug, Copy, Walkable)]
pub enum BlockCheckMode {
Default,
Unsafe(UnsafeSource),
}
#[derive(Clone, PartialEq, Encodable, Decodable, Debug, Copy)]
#[derive(Clone, PartialEq, Encodable, Decodable, Debug, Copy, Walkable)]
pub enum UnsafeSource {
CompilerGenerated,
UserProvided,
@@ -1349,7 +1354,7 @@ pub enum UnsafeSource {
/// These are usually found nested inside types (e.g., array lengths)
/// or expressions (e.g., repeat counts), and also used to define
/// explicit discriminant values for enum variants.
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub struct AnonConst {
pub id: NodeId,
pub value: P<Expr>,
@@ -1633,7 +1638,7 @@ fn from(value: P<Expr>) -> Self {
}
}
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub struct Closure {
pub binder: ClosureBinder,
pub capture_clause: CaptureBy,
@@ -1649,7 +1654,7 @@ pub struct Closure {
}
/// Limit types of a range (inclusive or exclusive).
#[derive(Copy, Clone, PartialEq, Encodable, Decodable, Debug)]
#[derive(Copy, Clone, PartialEq, Encodable, Decodable, Debug, Walkable)]
pub enum RangeLimits {
/// Inclusive at the beginning, exclusive at the end.
HalfOpen,
@@ -1680,7 +1685,7 @@ pub struct MethodCall {
pub span: Span,
}
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub enum StructRest {
/// `..x`.
Base(P<Expr>),
@@ -1690,7 +1695,7 @@ pub enum StructRest {
None,
}
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub struct StructExpr {
pub qself: Option<P<QSelf>>,
pub path: Path,
@@ -1880,14 +1885,14 @@ pub enum ExprKind {
}
/// Used to differentiate between `for` loops and `for await` loops.
#[derive(Clone, Copy, Encodable, Decodable, Debug, PartialEq, Eq)]
#[derive(Clone, Copy, Encodable, Decodable, Debug, PartialEq, Eq, Walkable)]
pub enum ForLoopKind {
For,
ForAwait,
}
/// Used to differentiate between `async {}` blocks and `gen {}` blocks.
#[derive(Clone, Encodable, Decodable, Debug, PartialEq, Eq)]
#[derive(Clone, Encodable, Decodable, Debug, PartialEq, Eq, Walkable)]
pub enum GenBlockKind {
Async,
Gen,
@@ -1912,7 +1917,7 @@ pub fn modifier(&self) -> &'static str {
/// Whether we're unwrapping or wrapping an unsafe binder
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
#[derive(Encodable, Decodable, HashStable_Generic)]
#[derive(Encodable, Decodable, HashStable_Generic, Walkable)]
pub enum UnsafeBinderCastKind {
// e.g. `&i32` -> `unsafe<'a> &'a i32`
Wrap,
@@ -1934,7 +1939,7 @@ pub enum UnsafeBinderCastKind {
/// ^~~~~ ^
/// ty position = 0
/// ```
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub struct QSelf {
pub ty: P<Ty>,
@@ -1946,7 +1951,7 @@ pub struct QSelf {
}
/// A capture clause used in closures and `async` blocks.
#[derive(Clone, Copy, PartialEq, Encodable, Decodable, Debug, HashStable_Generic)]
#[derive(Clone, Copy, PartialEq, Encodable, Decodable, Debug, HashStable_Generic, Walkable)]
pub enum CaptureBy {
/// `move |x| y + x`.
Value {
@@ -1967,7 +1972,7 @@ pub enum CaptureBy {
}
/// Closure lifetime binder, `for<'a, 'b>` in `for<'a, 'b> |_: &'a (), _: &'b ()|`.
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub enum ClosureBinder {
/// The binder is not present, all closure lifetimes are inferred.
NotPresent,
@@ -1993,7 +1998,7 @@ pub enum ClosureBinder {
/// Represents a macro invocation. The `path` indicates which macro
/// is being invoked, and the `args` are arguments passed to it.
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub struct MacCall {
pub path: Path,
pub args: P<DelimArgs>,
@@ -2006,7 +2011,7 @@ pub fn span(&self) -> Span {
}
/// Arguments passed to an attribute macro.
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub enum AttrArgs {
/// No arguments: `#[attr]`.
Empty,
@@ -2041,7 +2046,7 @@ pub fn inner_tokens(&self) -> TokenStream {
}
/// Delimited arguments, as used in `#[attr()/[]/{}]` or `mac!()/[]/{}`.
#[derive(Clone, Encodable, Decodable, Debug, HashStable_Generic)]
#[derive(Clone, Encodable, Decodable, Debug, HashStable_Generic, Walkable)]
pub struct DelimArgs {
pub dspan: DelimSpan,
pub delim: Delimiter, // Note: `Delimiter::Invisible` never occurs
@@ -2057,7 +2062,7 @@ pub fn need_semicolon(&self) -> bool {
}
/// Represents a macro definition.
#[derive(Clone, Encodable, Decodable, Debug, HashStable_Generic)]
#[derive(Clone, Encodable, Decodable, Debug, HashStable_Generic, Walkable)]
pub struct MacroDef {
pub body: P<DelimArgs>,
/// `true` if macro was defined with `macro_rules`.
@@ -2065,7 +2070,7 @@ pub struct MacroDef {
}
#[derive(Clone, Encodable, Decodable, Debug, Copy, Hash, Eq, PartialEq)]
#[derive(HashStable_Generic)]
#[derive(HashStable_Generic, Walkable)]
pub enum StrStyle {
/// A regular string, like `"foo"`.
Cooked,
@@ -2076,7 +2081,7 @@ pub enum StrStyle {
}
/// The kind of match expression
#[derive(Clone, Copy, Encodable, Decodable, Debug, PartialEq)]
#[derive(Clone, Copy, Encodable, Decodable, Debug, PartialEq, Walkable)]
pub enum MatchKind {
/// match expr { ... }
Prefix,
@@ -2085,7 +2090,7 @@ pub enum MatchKind {
}
/// The kind of yield expression
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub enum YieldKind {
/// yield expr { ... }
Prefix(Option<P<Expr>>),
@@ -2136,7 +2141,7 @@ pub struct MetaItemLit {
}
/// Similar to `MetaItemLit`, but restricted to string literals.
#[derive(Clone, Copy, Encodable, Decodable, Debug)]
#[derive(Clone, Copy, Encodable, Decodable, Debug, Walkable)]
pub struct StrLit {
/// The original literal as written in source code.
pub symbol: Symbol,
@@ -2265,7 +2270,7 @@ pub fn is_suffixed(&self) -> bool {
// N.B., If you change this, you'll probably want to change the corresponding
// type structure in `middle/ty.rs` as well.
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub struct MutTy {
pub ty: P<Ty>,
pub mutbl: Mutability,
@@ -2389,7 +2394,7 @@ pub fn name(&self) -> Symbol {
/// * the `RetTy` in `Trait(ArgTy, ArgTy) -> RetTy`
/// * the `C = { Ct }` in `Trait<C = { Ct }>` (feature `associated_const_equality`)
/// * the `f(..): Bound` in `Trait<f(..): Bound>` (feature `return_type_notation`)
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub struct AssocItemConstraint {
pub id: NodeId,
pub ident: Ident,
@@ -2398,7 +2403,7 @@ pub struct AssocItemConstraint {
pub span: Span,
}
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub enum Term {
Ty(P<Ty>),
Const(AnonConst),
@@ -2417,7 +2422,7 @@ fn from(v: AnonConst) -> Self {
}
/// The kind of [associated item constraint][AssocItemConstraint].
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub enum AssocItemConstraintKind {
/// An equality constraint for an associated item (e.g., `AssocTy = Ty` in `Trait<AssocTy = Ty>`).
///
@@ -2427,10 +2432,13 @@ pub enum AssocItemConstraintKind {
/// bindings*. Similarly with associated const equality constraints and *associated const bindings*.
Equality { term: Term },
/// A bound on an associated type (e.g., `AssocTy: Bound` in `Trait<AssocTy: Bound>`).
Bound { bounds: GenericBounds },
Bound {
#[visitable(extra = BoundKind::Bound)]
bounds: GenericBounds,
},
}
#[derive(Encodable, Decodable, Debug)]
#[derive(Encodable, Decodable, Debug, Walkable)]
pub struct Ty {
pub id: NodeId,
pub kind: TyKind,
@@ -2474,7 +2482,7 @@ pub fn is_maybe_parenthesised_infer(&self) -> bool {
}
}
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub struct FnPtrTy {
pub safety: Safety,
pub ext: Extern,
@@ -2485,7 +2493,7 @@ pub struct FnPtrTy {
pub decl_span: Span,
}
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub struct UnsafeBinderTy {
pub generic_params: ThinVec<GenericParam>,
pub inner_ty: P<Ty>,
@@ -2494,7 +2502,7 @@ pub struct UnsafeBinderTy {
/// The various kinds of type recognized by the compiler.
//
// Adding a new variant? Please update `test_ty` in `tests/ui/macros/stringify.rs`.
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub enum TyKind {
/// A variable-length slice (`[T]`).
Slice(P<Ty>),
@@ -2503,11 +2511,11 @@ pub enum TyKind {
/// A raw pointer (`*const T` or `*mut T`).
Ptr(MutTy),
/// A reference (`&'a T` or `&'a mut T`).
Ref(Option<Lifetime>, MutTy),
Ref(#[visitable(extra = LifetimeCtxt::Ref)] Option<Lifetime>, MutTy),
/// A pinned reference (`&'a pin const T` or `&'a pin mut T`).
///
/// Desugars into `Pin<&'a T>` or `Pin<&'a mut T>`.
PinnedRef(Option<Lifetime>, MutTy),
PinnedRef(#[visitable(extra = LifetimeCtxt::Ref)] Option<Lifetime>, MutTy),
/// A function pointer type (e.g., `fn(usize) -> bool`).
FnPtr(P<FnPtrTy>),
/// An unsafe existential lifetime binder (e.g., `unsafe<'a> &'a ()`).
@@ -2523,14 +2531,14 @@ pub enum TyKind {
Path(Option<P<QSelf>>, Path),
/// A trait object type `Bound1 + Bound2 + Bound3`
/// where `Bound` is a trait or a lifetime.
TraitObject(GenericBounds, TraitObjectSyntax),
TraitObject(#[visitable(extra = BoundKind::TraitObject)] GenericBounds, TraitObjectSyntax),
/// An `impl Bound1 + Bound2 + Bound3` type
/// where `Bound` is a trait or a lifetime.
///
/// The `NodeId` exists to prevent lowering from having to
/// generate `NodeId`s on the fly, which would complicate
/// the generation of opaque `type Foo = impl Trait` items significantly.
ImplTrait(NodeId, GenericBounds),
ImplTrait(NodeId, #[visitable(extra = BoundKind::Impl)] GenericBounds),
/// No-op; kept solely so that we can pretty-print faithfully.
Paren(P<Ty>),
/// Unused for now.
@@ -2608,7 +2616,7 @@ pub fn maybe_scalar(&self) -> bool {
}
/// A pattern type pattern.
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub struct TyPat {
pub id: NodeId,
pub kind: TyPatKind,
@@ -2619,7 +2627,7 @@ pub struct TyPat {
/// All the different flavors of pattern that Rust recognizes.
//
// Adding a new variant? Please update `test_pat` in `tests/ui/macros/stringify.rs`.
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub enum TyPatKind {
/// A range pattern (e.g., `1...2`, `1..2`, `1..`, `..2`, `1..=2`, `..=2`).
Range(Option<P<AnonConst>>, Option<P<AnonConst>>, Spanned<RangeEnd>),
@@ -2631,7 +2639,7 @@ pub enum TyPatKind {
}
/// Syntax used to declare a trait object.
#[derive(Clone, Copy, PartialEq, Encodable, Decodable, Debug, HashStable_Generic)]
#[derive(Clone, Copy, PartialEq, Encodable, Decodable, Debug, HashStable_Generic, Walkable)]
#[repr(u8)]
pub enum TraitObjectSyntax {
// SAFETY: When adding new variants make sure to update the `Tag` impl.
@@ -2658,10 +2666,10 @@ unsafe fn from_usize(tag: usize) -> Self {
}
}
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub enum PreciseCapturingArg {
/// Lifetime parameter.
Lifetime(Lifetime),
Lifetime(#[visitable(extra = LifetimeCtxt::GenericArg)] Lifetime),
/// Type or const parameter.
Arg(Path, NodeId),
}
@@ -2669,7 +2677,7 @@ pub enum PreciseCapturingArg {
/// Inline assembly operand explicit register or register class.
///
/// E.g., `"eax"` as in `asm!("mov eax, 2", out("eax") result)`.
#[derive(Clone, Copy, Encodable, Decodable, Debug)]
#[derive(Clone, Copy, Encodable, Decodable, Debug, Walkable)]
pub enum InlineAsmRegOrRegClass {
Reg(Symbol),
RegClass(Symbol),
@@ -2738,7 +2746,7 @@ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
}
}
#[derive(Clone, PartialEq, Encodable, Decodable, Debug, Hash, HashStable_Generic)]
#[derive(Clone, PartialEq, Encodable, Decodable, Debug, Hash, HashStable_Generic, Walkable)]
pub enum InlineAsmTemplatePiece {
String(Cow<'static, str>),
Placeholder { operand_idx: usize, modifier: Option<char>, span: Span },
@@ -2786,7 +2794,7 @@ pub fn to_string(s: &[Self]) -> String {
/// `DefCollector`. Instead this is deferred until AST lowering where we
/// lower it to an `AnonConst` (for functions) or a `Path` (for statics)
/// depending on what the path resolves to.
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub struct InlineAsmSym {
pub id: NodeId,
pub qself: Option<P<QSelf>>,
@@ -2796,7 +2804,7 @@ pub struct InlineAsmSym {
/// Inline assembly operand.
///
/// E.g., `out("eax") result` as in `asm!("mov eax, 2", out("eax") result)`.
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub enum InlineAsmOperand {
In {
reg: InlineAsmRegOrRegClass,
@@ -2841,7 +2849,7 @@ pub fn reg(&self) -> Option<&InlineAsmRegOrRegClass> {
}
}
#[derive(Clone, Copy, Encodable, Decodable, Debug, HashStable_Generic)]
#[derive(Clone, Copy, Encodable, Decodable, Debug, HashStable_Generic, Walkable, PartialEq, Eq)]
pub enum AsmMacro {
/// The `asm!` macro
Asm,
@@ -2880,13 +2888,14 @@ pub const fn diverges(self, options: InlineAsmOptions) -> bool {
/// Inline assembly.
///
/// E.g., `asm!("NOP");`.
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub struct InlineAsm {
pub asm_macro: AsmMacro,
pub template: Vec<InlineAsmTemplatePiece>,
pub template_strs: Box<[(Symbol, Option<Symbol>, Span)]>,
pub operands: Vec<(InlineAsmOperand, Span)>,
pub clobber_abis: Vec<(Symbol, Span)>,
#[visitable(ignore)]
pub options: InlineAsmOptions,
pub line_spans: Vec<Span>,
}
@@ -2894,7 +2903,7 @@ pub struct InlineAsm {
/// A parameter in a function header.
///
/// E.g., `bar: usize` as in `fn foo(bar: usize)`.
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub struct Param {
pub attrs: AttrVec,
pub ty: P<Ty>,
@@ -3022,7 +3031,7 @@ pub fn from_self(attrs: AttrVec, eself: ExplicitSelf, eself_ident: Ident) -> Par
///
/// Please note that it's different from `FnHeader` structure
/// which contains metadata about function safety, asyncness, constness and ABI.
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub struct FnDecl {
pub inputs: ThinVec<Param>,
pub output: FnRetTy,
@@ -3038,7 +3047,7 @@ pub fn c_variadic(&self) -> bool {
}
/// Is the trait definition an auto trait?
#[derive(Copy, Clone, PartialEq, Encodable, Decodable, Debug, HashStable_Generic)]
#[derive(Copy, Clone, PartialEq, Encodable, Decodable, Debug, HashStable_Generic, Walkable)]
pub enum IsAuto {
Yes,
No,
@@ -3046,7 +3055,7 @@ pub enum IsAuto {
/// Safety of items.
#[derive(Copy, Clone, PartialEq, Eq, Hash, Encodable, Decodable, Debug)]
#[derive(HashStable_Generic)]
#[derive(HashStable_Generic, Walkable)]
pub enum Safety {
/// `unsafe` an item is explicitly marked as `unsafe`.
Unsafe(Span),
@@ -3062,7 +3071,7 @@ pub enum Safety {
/// Coroutine markers are things that cause the function to generate a coroutine, such as `async`,
/// which makes the function return `impl Future`, or `gen`, which makes the function return `impl
/// Iterator`.
#[derive(Copy, Clone, Encodable, Decodable, Debug)]
#[derive(Copy, Clone, Encodable, Decodable, Debug, Walkable)]
pub enum CoroutineKind {
/// `async`, which returns an `impl Future`.
Async { span: Span, closure_id: NodeId, return_impl_trait_id: NodeId },
@@ -3111,7 +3120,7 @@ pub fn return_id(self) -> (NodeId, Span) {
}
#[derive(Copy, Clone, PartialEq, Eq, Hash, Encodable, Decodable, Debug)]
#[derive(HashStable_Generic)]
#[derive(HashStable_Generic, Walkable)]
pub enum Const {
Yes(Span),
No,
@@ -3119,13 +3128,13 @@ pub enum Const {
/// Item defaultness.
/// For details see the [RFC #2532](https://github.com/rust-lang/rfcs/pull/2532).
#[derive(Copy, Clone, PartialEq, Encodable, Decodable, Debug, HashStable_Generic)]
#[derive(Copy, Clone, PartialEq, Encodable, Decodable, Debug, HashStable_Generic, Walkable)]
pub enum Defaultness {
Default(Span),
Final,
}
#[derive(Copy, Clone, PartialEq, Encodable, Decodable, HashStable_Generic)]
#[derive(Copy, Clone, PartialEq, Encodable, Decodable, HashStable_Generic, Walkable)]
pub enum ImplPolarity {
/// `impl Trait for Type`
Positive,
@@ -3144,7 +3153,7 @@ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
/// The polarity of a trait bound.
#[derive(Copy, Clone, PartialEq, Eq, Encodable, Decodable, Debug, Hash)]
#[derive(HashStable_Generic)]
#[derive(HashStable_Generic, Walkable)]
pub enum BoundPolarity {
/// `Type: Trait`
Positive,
@@ -3166,7 +3175,7 @@ pub fn as_str(self) -> &'static str {
/// The constness of a trait bound.
#[derive(Copy, Clone, PartialEq, Eq, Encodable, Decodable, Debug, Hash)]
#[derive(HashStable_Generic)]
#[derive(HashStable_Generic, Walkable)]
pub enum BoundConstness {
/// `Type: Trait`
Never,
@@ -3188,7 +3197,7 @@ pub fn as_str(self) -> &'static str {
/// The asyncness of a trait bound.
#[derive(Copy, Clone, PartialEq, Eq, Encodable, Decodable, Debug)]
#[derive(HashStable_Generic)]
#[derive(HashStable_Generic, Walkable)]
pub enum BoundAsyncness {
/// `Type: Trait`
Normal,
@@ -3205,7 +3214,7 @@ pub fn as_str(self) -> &'static str {
}
}
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub enum FnRetTy {
/// Returns type is not specified.
///
@@ -3225,14 +3234,14 @@ pub fn span(&self) -> Span {
}
}
#[derive(Clone, Copy, PartialEq, Encodable, Decodable, Debug)]
#[derive(Clone, Copy, PartialEq, Encodable, Decodable, Debug, Walkable)]
pub enum Inline {
Yes,
No,
}
/// Module item kind.
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub enum ModKind {
/// Module with inlined definition `mod foo { ... }`,
/// or with definition outlined to a separate file `mod foo;` and already loaded from it.
@@ -3243,7 +3252,7 @@ pub enum ModKind {
Unloaded,
}
#[derive(Copy, Clone, Encodable, Decodable, Debug, Default)]
#[derive(Copy, Clone, Encodable, Decodable, Debug, Default, Walkable)]
pub struct ModSpans {
/// `inner_span` covers the body of the module; for a file module, its the whole file.
/// For an inline module, its the span inside the `{ ... }`, not including the curly braces.
@@ -3254,7 +3263,7 @@ pub struct ModSpans {
/// Foreign module declaration.
///
/// E.g., `extern { .. }` or `extern "C" { .. }`.
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub struct ForeignMod {
/// Span of the `extern` keyword.
pub extern_span: Span,
@@ -3265,12 +3274,13 @@ pub struct ForeignMod {
pub items: ThinVec<P<ForeignItem>>,
}
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub struct EnumDef {
pub variants: ThinVec<Variant>,
}
/// Enum variant.
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub struct Variant {
/// Attributes of the variant.
pub attrs: AttrVec,
@@ -3292,7 +3302,7 @@ pub struct Variant {
}
/// Part of `use` item to the right of its prefix.
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub enum UseTreeKind {
/// `use prefix` or `use prefix as rename`
Simple(Option<Ident>),
@@ -3311,7 +3321,7 @@ pub enum UseTreeKind {
/// A tree of paths sharing common prefixes.
/// Used in `use` items both at top-level and inside of braces in import groups.
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub struct UseTree {
pub prefix: Path,
pub kind: UseTreeKind,
@@ -3333,7 +3343,7 @@ pub fn ident(&self) -> Ident {
/// Distinguishes between `Attribute`s that decorate items and Attributes that
/// are contained as statements within items. These two cases need to be
/// distinguished for pretty-printing.
#[derive(Clone, PartialEq, Encodable, Decodable, Debug, Copy, HashStable_Generic)]
#[derive(Clone, PartialEq, Encodable, Decodable, Debug, Copy, HashStable_Generic, Walkable)]
pub enum AttrStyle {
Outer,
Inner,
@@ -3343,7 +3353,7 @@ pub enum AttrStyle {
pub type AttrVec = ThinVec<Attribute>;
/// A syntax-level representation of an attribute.
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub struct Attribute {
pub kind: AttrKind,
pub id: AttrId,
@@ -3353,7 +3363,7 @@ pub struct Attribute {
pub span: Span,
}
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub enum AttrKind {
/// A normal attribute.
Normal(P<NormalAttr>),
@@ -3364,7 +3374,7 @@ pub enum AttrKind {
DocComment(CommentKind, Symbol),
}
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub struct NormalAttr {
pub item: AttrItem,
// Tokens for the full attribute, e.g. `#[foo]`, `#![bar]`.
@@ -3385,7 +3395,7 @@ pub fn from_ident(ident: Ident) -> Self {
}
}
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub struct AttrItem {
pub unsafety: Safety,
pub path: Path,
@@ -3411,20 +3421,20 @@ pub fn is_valid_for_outer_style(&self) -> bool {
/// that the `ref_id` is for. The `impl_id` maps to the "self type" of this impl.
/// If this impl is an `ItemKind::Impl`, the `impl_id` is redundant (it could be the
/// same as the impl's `NodeId`).
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub struct TraitRef {
pub path: Path,
pub ref_id: NodeId,
}
/// Whether enclosing parentheses are present or not.
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub enum Parens {
Yes,
No,
}
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub struct PolyTraitRef {
/// The `'a` in `for<'a> Foo<&'a T>`.
pub bound_generic_params: ThinVec<GenericParam>,
@@ -3460,14 +3470,14 @@ pub fn new(
}
}
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub struct Visibility {
pub kind: VisibilityKind,
pub span: Span,
pub tokens: Option<LazyAttrTokenStream>,
}
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub enum VisibilityKind {
Public,
Restricted { path: P<Path>, id: NodeId, shorthand: bool },
@@ -3483,7 +3493,7 @@ pub fn is_pub(&self) -> bool {
/// Field definition in a struct, variant or union.
///
/// E.g., `bar: usize` as in `struct Foo { bar: usize }`.
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub struct FieldDef {
pub attrs: AttrVec,
pub id: NodeId,
@@ -3498,14 +3508,14 @@ pub struct FieldDef {
}
/// Was parsing recovery performed?
#[derive(Copy, Clone, Debug, Encodable, Decodable, HashStable_Generic)]
#[derive(Copy, Clone, Debug, Encodable, Decodable, HashStable_Generic, Walkable)]
pub enum Recovered {
No,
Yes(ErrorGuaranteed),
}
/// Fields and constructor ids of enum variants and structs.
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub enum VariantData {
/// Struct variant.
///
@@ -3591,7 +3601,7 @@ pub fn opt_generics(&self) -> Option<&Generics> {
}
/// `extern` qualifier on a function item or function type.
#[derive(Clone, Copy, Encodable, Decodable, Debug)]
#[derive(Clone, Copy, Encodable, Decodable, Debug, Walkable)]
pub enum Extern {
/// No explicit extern keyword was used.
///
@@ -3622,7 +3632,7 @@ pub fn from_abi(abi: Option<StrLit>, span: Span) -> Extern {
///
/// All the information between the visibility and the name of the function is
/// included in this struct (e.g., `async unsafe fn` or `const extern "C" fn`).
#[derive(Clone, Copy, Encodable, Decodable, Debug)]
#[derive(Clone, Copy, Encodable, Decodable, Debug, Walkable)]
pub struct FnHeader {
/// Whether this is `unsafe`, or has a default safety.
pub safety: Safety,
@@ -3688,14 +3698,16 @@ fn default() -> FnHeader {
}
}
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub struct Trait {
pub constness: Const,
pub safety: Safety,
pub is_auto: IsAuto,
pub ident: Ident,
pub generics: Generics,
#[visitable(extra = BoundKind::SuperTraits)]
pub bounds: GenericBounds,
#[visitable(extra = AssocCtxt::Trait)]
pub items: ThinVec<P<AssocItem>>,
}
@@ -3717,14 +3729,14 @@ pub struct Trait {
/// ```
///
/// If there is no where clause, then this is `false` with `DUMMY_SP`.
#[derive(Copy, Clone, Encodable, Decodable, Debug, Default)]
#[derive(Copy, Clone, Encodable, Decodable, Debug, Default, Walkable)]
pub struct TyAliasWhereClause {
pub has_where_token: bool,
pub span: Span,
}
/// The span information for the two where clauses on a `TyAlias`.
#[derive(Copy, Clone, Encodable, Decodable, Debug, Default)]
#[derive(Copy, Clone, Encodable, Decodable, Debug, Default, Walkable)]
pub struct TyAliasWhereClauses {
/// Before the equals sign.
pub before: TyAliasWhereClause,
@@ -3736,12 +3748,13 @@ pub struct TyAliasWhereClauses {
pub split: usize,
}
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub struct TyAlias {
pub defaultness: Defaultness,
pub ident: Ident,
pub generics: Generics,
pub where_clauses: TyAliasWhereClauses,
#[visitable(extra = BoundKind::Bound)]
pub bounds: GenericBounds,
pub ty: Option<P<Ty>>,
}
@@ -3759,7 +3772,7 @@ pub struct Impl {
pub items: ThinVec<P<AssocItem>>,
}
#[derive(Clone, Encodable, Decodable, Debug, Default)]
#[derive(Clone, Encodable, Decodable, Debug, Default, Walkable)]
pub struct FnContract {
pub requires: Option<P<Expr>>,
pub ensures: Option<P<Expr>>,
@@ -3776,7 +3789,7 @@ pub struct Fn {
pub body: Option<P<Block>>,
}
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub struct Delegation {
/// Path resolution id.
pub id: NodeId,
@@ -3789,7 +3802,7 @@ pub struct Delegation {
pub from_glob: bool,
}
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub struct DelegationMac {
pub qself: Option<P<QSelf>>,
pub prefix: Path,
@@ -3798,7 +3811,7 @@ pub struct DelegationMac {
pub body: Option<P<Block>>,
}
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub struct StaticItem {
pub ident: Ident,
pub ty: P<Ty>,
@@ -3808,7 +3821,7 @@ pub struct StaticItem {
pub define_opaque: Option<ThinVec<(NodeId, Path)>>,
}
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub struct ConstItem {
pub defaultness: Defaultness,
pub ident: Ident,
+12 -8
View File
@@ -1,5 +1,5 @@
use rustc_data_structures::fx::FxHashMap;
use rustc_macros::{Decodable, Encodable};
use rustc_macros::{Decodable, Encodable, Walkable};
use rustc_span::{Ident, Span, Symbol};
use crate::Expr;
@@ -41,7 +41,7 @@
/// Basically the "AST" for a complete `format_args!()`.
///
/// E.g., `format_args!("hello {name}");`.
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub struct FormatArgs {
pub span: Span,
pub template: Vec<FormatArgsPiece>,
@@ -63,7 +63,7 @@ pub struct FormatArgs {
/// A piece of a format template string.
///
/// E.g. "hello" or "{name}".
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub enum FormatArgsPiece {
Literal(Symbol),
Placeholder(FormatPlaceholder),
@@ -73,7 +73,7 @@ pub enum FormatArgsPiece {
///
/// E.g. `1, 2, name="ferris", n=3`,
/// but also implicit captured arguments like `x` in `format_args!("{x}")`.
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub struct FormatArguments {
arguments: Vec<FormatArgument>,
num_unnamed_args: usize,
@@ -144,13 +144,13 @@ pub fn all_args_mut(&mut self) -> &mut Vec<FormatArgument> {
}
}
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub struct FormatArgument {
pub kind: FormatArgumentKind,
pub expr: P<Expr>,
}
#[derive(Clone, Encodable, Decodable, Debug)]
#[derive(Clone, Encodable, Decodable, Debug, Walkable)]
pub enum FormatArgumentKind {
/// `format_args(…, arg)`
Normal,
@@ -170,24 +170,28 @@ pub fn ident(&self) -> Option<Ident> {
}
}
#[derive(Clone, Encodable, Decodable, Debug, PartialEq, Eq)]
#[derive(Clone, Encodable, Decodable, Debug, PartialEq, Eq, Walkable)]
pub struct FormatPlaceholder {
/// Index into [`FormatArgs::arguments`].
pub argument: FormatArgPosition,
/// The span inside the format string for the full `{…}` placeholder.
pub span: Option<Span>,
/// `{}`, `{:?}`, or `{:x}`, etc.
#[visitable(ignore)]
pub format_trait: FormatTrait,
/// `{}` or `{:.5}` or `{:-^20}`, etc.
#[visitable(ignore)]
pub format_options: FormatOptions,
}
#[derive(Clone, Encodable, Decodable, Debug, PartialEq, Eq)]
#[derive(Clone, Encodable, Decodable, Debug, PartialEq, Eq, Walkable)]
pub struct FormatArgPosition {
/// Which argument this position refers to (Ok),
/// or would've referred to if it existed (Err).
#[visitable(ignore)]
pub index: Result<usize, usize>,
/// What kind of position this is. See [`FormatArgPositionKind`].
#[visitable(ignore)]
pub kind: FormatArgPositionKind,
/// The span of the name or number.
pub span: Option<Span>,
+240 -19
View File
@@ -12,14 +12,14 @@
use rustc_data_structures::flat_map_in_place::FlatMapInPlace;
use rustc_span::source_map::Spanned;
use rustc_span::{Ident, Span};
use rustc_span::{Ident, Span, Symbol};
use smallvec::{SmallVec, smallvec};
use thin_vec::ThinVec;
use crate::ast::*;
use crate::ptr::P;
use crate::tokenstream::*;
use crate::visit::{AssocCtxt, BoundKind, FnCtxt, VisitorResult, try_visit, visit_opt, walk_list};
use crate::visit::{AssocCtxt, BoundKind, FnCtxt, LifetimeCtxt, VisitorResult, try_visit};
mod sealed {
use rustc_ast_ir::visit::VisitorResult;
@@ -36,11 +36,249 @@ impl<T> MutVisitorResult for T {
use sealed::MutVisitorResult;
pub(crate) trait MutVisitable<V: MutVisitor> {
type Extra: Copy;
fn visit_mut(&mut self, visitor: &mut V, extra: Self::Extra);
}
impl<V: MutVisitor, T: ?Sized> MutVisitable<V> for P<T>
where
T: MutVisitable<V>,
{
type Extra = T::Extra;
fn visit_mut(&mut self, visitor: &mut V, extra: Self::Extra) {
(**self).visit_mut(visitor, extra)
}
}
impl<V: MutVisitor, T> MutVisitable<V> for Option<T>
where
T: MutVisitable<V>,
{
type Extra = T::Extra;
fn visit_mut(&mut self, visitor: &mut V, extra: Self::Extra) {
if let Some(this) = self {
this.visit_mut(visitor, extra)
}
}
}
impl<V: MutVisitor, T> MutVisitable<V> for Spanned<T>
where
T: MutVisitable<V>,
{
type Extra = T::Extra;
fn visit_mut(&mut self, visitor: &mut V, extra: Self::Extra) {
let Spanned { span, node } = self;
span.visit_mut(visitor, ());
node.visit_mut(visitor, extra);
}
}
impl<V: MutVisitor, T> MutVisitable<V> for [T]
where
T: MutVisitable<V>,
{
type Extra = T::Extra;
fn visit_mut(&mut self, visitor: &mut V, extra: Self::Extra) {
for item in self {
item.visit_mut(visitor, extra);
}
}
}
impl<V: MutVisitor, T> MutVisitable<V> for Vec<T>
where
T: MutVisitable<V>,
{
type Extra = T::Extra;
fn visit_mut(&mut self, visitor: &mut V, extra: Self::Extra) {
for item in self {
item.visit_mut(visitor, extra);
}
}
}
impl<V: MutVisitor, T> MutVisitable<V> for (T,)
where
T: MutVisitable<V>,
{
type Extra = T::Extra;
fn visit_mut(&mut self, visitor: &mut V, extra: Self::Extra) {
self.0.visit_mut(visitor, extra);
}
}
impl<V: MutVisitor, T1, T2> MutVisitable<V> for (T1, T2)
where
T1: MutVisitable<V, Extra = ()>,
T2: MutVisitable<V, Extra = ()>,
{
type Extra = ();
fn visit_mut(&mut self, visitor: &mut V, extra: Self::Extra) {
self.0.visit_mut(visitor, extra);
self.1.visit_mut(visitor, extra);
}
}
impl<V: MutVisitor, T1, T2, T3> MutVisitable<V> for (T1, T2, T3)
where
T1: MutVisitable<V, Extra = ()>,
T2: MutVisitable<V, Extra = ()>,
T3: MutVisitable<V, Extra = ()>,
{
type Extra = ();
fn visit_mut(&mut self, visitor: &mut V, extra: Self::Extra) {
self.0.visit_mut(visitor, extra);
self.1.visit_mut(visitor, extra);
self.2.visit_mut(visitor, extra);
}
}
impl<V: MutVisitor, T1, T2, T3, T4> MutVisitable<V> for (T1, T2, T3, T4)
where
T1: MutVisitable<V, Extra = ()>,
T2: MutVisitable<V, Extra = ()>,
T3: MutVisitable<V, Extra = ()>,
T4: MutVisitable<V, Extra = ()>,
{
type Extra = ();
fn visit_mut(&mut self, visitor: &mut V, extra: Self::Extra) {
self.0.visit_mut(visitor, extra);
self.1.visit_mut(visitor, extra);
self.2.visit_mut(visitor, extra);
self.3.visit_mut(visitor, extra);
}
}
pub trait MutWalkable<V: MutVisitor> {
fn walk_mut(&mut self, visitor: &mut V);
}
macro_rules! visit_visitable {
(mut $visitor:expr, $($expr:expr),* $(,)?) => {{
$(MutVisitable::visit_mut($expr, $visitor, ());)*
}};
}
macro_rules! visit_visitable_with {
(mut $visitor:expr, $expr:expr, $extra:expr $(,)?) => {
MutVisitable::visit_mut($expr, $visitor, $extra)
};
}
macro_rules! walk_walkable {
($visitor:expr, $expr:expr, mut) => {
MutWalkable::walk_mut($expr, $visitor)
};
}
macro_rules! impl_visitable {
(|&mut $self:ident: $self_ty:ty,
$vis:ident: &mut $vis_ty:ident,
$extra:ident: $extra_ty:ty| $block:block) => {
#[allow(unused_parens, non_local_definitions)]
impl<$vis_ty: MutVisitor> MutVisitable<$vis_ty> for $self_ty {
type Extra = $extra_ty;
fn visit_mut(&mut $self, $vis: &mut $vis_ty, $extra: Self::Extra) -> V::Result {
$block
}
}
};
}
macro_rules! impl_walkable {
($(<$K:ident: $Kb:ident>)? |&mut $self:ident: $self_ty:ty,
$vis:ident: &mut $vis_ty:ident| $block:block) => {
#[allow(unused_parens, non_local_definitions)]
impl<$($K: $Kb,)? $vis_ty: MutVisitor> MutWalkable<$vis_ty> for $self_ty {
fn walk_mut(&mut $self, $vis: &mut $vis_ty) -> V::Result {
$block
}
}
};
}
macro_rules! impl_visitable_noop {
(<mut> $($ty:ty,)*) => {
$(
impl_visitable!(|&mut self: $ty, _vis: &mut V, _extra: ()| {});
)*
};
}
macro_rules! impl_visitable_list {
(<mut> $($ty:ty,)*) => {
$(impl<V: MutVisitor, T> MutVisitable<V> for $ty
where
for<'a> &'a mut $ty: IntoIterator<Item = &'a mut T>,
T: MutVisitable<V>,
{
type Extra = <T as MutVisitable<V>>::Extra;
#[inline]
fn visit_mut(&mut self, visitor: &mut V, extra: Self::Extra) {
for i in self {
i.visit_mut(visitor, extra);
}
}
})*
}
}
macro_rules! impl_visitable_direct {
(<mut> $($ty:ty,)*) => {
$(impl_visitable!(
|&mut self: $ty, visitor: &mut V, _extra: ()| {
MutWalkable::walk_mut(self, visitor)
}
);)*
}
}
macro_rules! impl_visitable_calling_walkable {
(<mut>
$( fn $method:ident($ty:ty $(, $extra_name:ident: $extra_ty:ty)?); )*
) => {
$(fn $method(&mut self, node: &mut $ty $(, $extra_name:$extra_ty)?) {
impl_visitable!(|&mut self: $ty, visitor: &mut V, extra: ($($extra_ty)?)| {
let ($($extra_name)?) = extra;
visitor.$method(self $(, $extra_name)?);
});
walk_walkable!(self, node, mut)
})*
}
}
macro_rules! define_named_walk {
((mut) $Visitor:ident
$( pub fn $method:ident($ty:ty); )*
) => {
$(pub fn $method<V: $Visitor>(visitor: &mut V, node: &mut $ty) {
walk_walkable!(visitor, node, mut)
})*
};
}
super::common_visitor_and_walkers!((mut) MutVisitor);
macro_rules! generate_flat_map_visitor_fns {
($($name:ident, $Ty:ty, $flat_map_fn:ident$(, $param:ident: $ParamTy:ty)*;)+) => {
$(
#[allow(unused_parens)]
impl<V: MutVisitor> MutVisitable<V> for ThinVec<$Ty> {
type Extra = ($($ParamTy),*);
#[inline]
fn visit_mut(
&mut self,
visitor: &mut V,
($($param),*): Self::Extra,
) -> V::Result {
$name(visitor, self $(, $param)*)
}
}
fn $name<V: MutVisitor>(
vis: &mut V,
values: &mut ThinVec<$Ty>,
@@ -78,15 +316,6 @@ pub fn walk_flat_map_pat_field<T: MutVisitor>(
smallvec![fp]
}
fn visit_nested_use_tree<V: MutVisitor>(
vis: &mut V,
nested_tree: &mut UseTree,
nested_id: &mut NodeId,
) {
vis.visit_id(nested_id);
vis.visit_use_tree(nested_tree);
}
macro_rules! generate_walk_flat_map_fns {
($($fn_name:ident($Ty:ty$(,$extra_name:ident: $ExtraTy:ty)*) => $visit_fn_name:ident;)+) => {$(
pub fn $fn_name<V: MutVisitor>(vis: &mut V, mut value: $Ty$(,$extra_name: $ExtraTy)*) -> SmallVec<[$Ty; 1]> {
@@ -109,14 +338,6 @@ macro_rules! generate_walk_flat_map_fns {
walk_flat_map_assoc_item(P<AssocItem>, ctxt: AssocCtxt) => visit_assoc_item;
}
fn walk_ty_alias_where_clauses<T: MutVisitor>(vis: &mut T, tawcs: &mut TyAliasWhereClauses) {
let TyAliasWhereClauses { before, after, split: _ } = tawcs;
let TyAliasWhereClause { has_where_token: _, span: span_before } = before;
let TyAliasWhereClause { has_where_token: _, span: span_after } = after;
vis.visit_span(span_before);
vis.visit_span(span_after);
}
pub fn walk_filter_map_expr<T: MutVisitor>(vis: &mut T, mut e: P<Expr>) -> Option<P<Expr>> {
vis.visit_expr(&mut e);
Some(e)
+2 -2
View File
@@ -20,7 +20,7 @@
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_data_structures::sync;
use rustc_macros::{Decodable, Encodable, HashStable_Generic};
use rustc_macros::{Decodable, Encodable, HashStable_Generic, Walkable};
use rustc_serialize::{Decodable, Encodable};
use rustc_span::{DUMMY_SP, Span, SpanDecoder, SpanEncoder, Symbol, sym};
use thin_vec::ThinVec;
@@ -977,7 +977,7 @@ pub fn inlined_next(&mut self) -> (Token, Spacing) {
}
}
#[derive(Debug, Copy, Clone, PartialEq, Encodable, Decodable, HashStable_Generic)]
#[derive(Debug, Copy, Clone, PartialEq, Encodable, Decodable, HashStable_Generic, Walkable)]
pub struct DelimSpan {
pub open: Span,
pub close: Span,
+723 -1510
View File
@@ -16,7 +16,7 @@
pub use rustc_ast_ir::visit::VisitorResult;
pub use rustc_ast_ir::{try_visit, visit_opt, walk_list, walk_visitable_list};
use rustc_span::source_map::Spanned;
use rustc_span::{Ident, Span};
use rustc_span::{Ident, Span, Symbol};
use thin_vec::ThinVec;
use crate::ast::*;
@@ -75,6 +75,241 @@ pub enum LifetimeCtxt {
GenericArg,
}
pub(crate) trait Visitable<'a, V: Visitor<'a>> {
type Extra: Copy;
#[must_use]
fn visit(&'a self, visitor: &mut V, extra: Self::Extra) -> V::Result;
}
impl<'a, V: Visitor<'a>, T: ?Sized> Visitable<'a, V> for P<T>
where
T: Visitable<'a, V>,
{
type Extra = T::Extra;
fn visit(&'a self, visitor: &mut V, extra: Self::Extra) -> V::Result {
(**self).visit(visitor, extra)
}
}
impl<'a, V: Visitor<'a>, T> Visitable<'a, V> for Option<T>
where
T: Visitable<'a, V>,
{
type Extra = T::Extra;
fn visit(&'a self, visitor: &mut V, extra: Self::Extra) -> V::Result {
if let Some(this) = self {
try_visit!(this.visit(visitor, extra));
}
V::Result::output()
}
}
impl<'a, V: Visitor<'a>, T> Visitable<'a, V> for Spanned<T>
where
T: Visitable<'a, V>,
{
type Extra = T::Extra;
fn visit(&'a self, visitor: &mut V, extra: Self::Extra) -> V::Result {
let Spanned { span: _, node } = self;
node.visit(visitor, extra)
}
}
impl<'a, V: Visitor<'a>, T> Visitable<'a, V> for [T]
where
T: Visitable<'a, V>,
{
type Extra = T::Extra;
fn visit(&'a self, visitor: &mut V, extra: Self::Extra) -> V::Result {
for item in self {
try_visit!(item.visit(visitor, extra));
}
V::Result::output()
}
}
impl<'a, V: Visitor<'a>, T> Visitable<'a, V> for Vec<T>
where
T: Visitable<'a, V>,
{
type Extra = T::Extra;
fn visit(&'a self, visitor: &mut V, extra: Self::Extra) -> V::Result {
for item in self {
try_visit!(item.visit(visitor, extra));
}
V::Result::output()
}
}
impl<'a, V: Visitor<'a>, T> Visitable<'a, V> for (T,)
where
T: Visitable<'a, V>,
{
type Extra = T::Extra;
fn visit(&'a self, visitor: &mut V, extra: Self::Extra) -> V::Result {
self.0.visit(visitor, extra)
}
}
impl<'a, V: Visitor<'a>, T1, T2> Visitable<'a, V> for (T1, T2)
where
T1: Visitable<'a, V, Extra = ()>,
T2: Visitable<'a, V, Extra = ()>,
{
type Extra = ();
fn visit(&'a self, visitor: &mut V, extra: Self::Extra) -> V::Result {
try_visit!(self.0.visit(visitor, extra));
try_visit!(self.1.visit(visitor, extra));
V::Result::output()
}
}
impl<'a, V: Visitor<'a>, T1, T2, T3> Visitable<'a, V> for (T1, T2, T3)
where
T1: Visitable<'a, V, Extra = ()>,
T2: Visitable<'a, V, Extra = ()>,
T3: Visitable<'a, V, Extra = ()>,
{
type Extra = ();
fn visit(&'a self, visitor: &mut V, extra: Self::Extra) -> V::Result {
try_visit!(self.0.visit(visitor, extra));
try_visit!(self.1.visit(visitor, extra));
try_visit!(self.2.visit(visitor, extra));
V::Result::output()
}
}
impl<'a, V: Visitor<'a>, T1, T2, T3, T4> Visitable<'a, V> for (T1, T2, T3, T4)
where
T1: Visitable<'a, V, Extra = ()>,
T2: Visitable<'a, V, Extra = ()>,
T3: Visitable<'a, V, Extra = ()>,
T4: Visitable<'a, V, Extra = ()>,
{
type Extra = ();
fn visit(&'a self, visitor: &mut V, extra: Self::Extra) -> V::Result {
try_visit!(self.0.visit(visitor, extra));
try_visit!(self.1.visit(visitor, extra));
try_visit!(self.2.visit(visitor, extra));
try_visit!(self.3.visit(visitor, extra));
V::Result::output()
}
}
pub(crate) trait Walkable<'a, V: Visitor<'a>> {
#[must_use]
fn walk_ref(&'a self, visitor: &mut V) -> V::Result;
}
macro_rules! visit_visitable {
($visitor:expr, $($expr:expr),* $(,)?) => {{
$(try_visit!(Visitable::visit($expr, $visitor, ()));)*
}};
}
macro_rules! visit_visitable_with {
($visitor:expr, $expr:expr, $extra:expr $(,)?) => {
try_visit!(Visitable::visit($expr, $visitor, $extra))
};
}
macro_rules! walk_walkable {
($visitor:expr, $expr:expr, ) => {
Walkable::walk_ref($expr, $visitor)
};
}
macro_rules! impl_visitable {
(|&$lt:lifetime $self:ident: $self_ty:ty,
$vis:ident: &mut $vis_ty:ident,
$extra:ident: $extra_ty:ty| $block:block) => {
#[allow(unused_parens, non_local_definitions)]
impl<$lt, $vis_ty: Visitor<$lt>> Visitable<$lt, $vis_ty> for $self_ty {
type Extra = $extra_ty;
fn visit(&$lt $self, $vis: &mut $vis_ty, $extra: Self::Extra) -> V::Result {
$block
}
}
};
}
macro_rules! impl_walkable {
($(<$K:ident: $Kb:ident>)? |&$lt:lifetime $self:ident: $self_ty:ty,
$vis:ident: &mut $vis_ty:ident| $block:block) => {
#[allow(unused_parens, non_local_definitions)]
impl<$($K: $Kb,)? $lt, $vis_ty: Visitor<$lt>> Walkable<$lt, $vis_ty> for $self_ty {
fn walk_ref(&$lt $self, $vis: &mut $vis_ty) -> V::Result {
$block
}
}
};
}
macro_rules! impl_visitable_noop {
(<$lt:lifetime> $($ty:ty,)*) => {
$(
impl_visitable!(|&$lt self: $ty, _vis: &mut V, _extra: ()| {
V::Result::output()
});
)*
};
}
macro_rules! impl_visitable_list {
(<$lt:lifetime> $($ty:ty,)*) => {
$(impl<$lt, V: Visitor<$lt>, T> Visitable<$lt, V> for $ty
where
&$lt $ty: IntoIterator<Item = &$lt T>,
T: $lt + Visitable<$lt, V>,
{
type Extra = <T as Visitable<$lt, V>>::Extra;
#[inline]
fn visit(&$lt self, visitor: &mut V, extra: Self::Extra) -> V::Result {
for i in self {
try_visit!(i.visit(visitor, extra));
}
V::Result::output()
}
})*
};
}
macro_rules! impl_visitable_direct {
(<$lt:lifetime> $($ty:ty,)*) => {
$(impl_visitable!(
|&$lt self: $ty, visitor: &mut V, _extra: ()| {
Walkable::walk_ref(self, visitor)
}
);)*
};
}
macro_rules! impl_visitable_calling_walkable {
(<$lt:lifetime>
$( fn $method:ident($ty:ty $(, $extra_name:ident: $extra_ty:ty)?); )*
) => {
$(fn $method(&mut self, node: &$lt $ty $(, $extra_name:$extra_ty)?) -> Self::Result {
impl_visitable!(|&$lt self: $ty, visitor: &mut V, extra: ($($extra_ty)?)| {
let ($($extra_name)?) = extra;
visitor.$method(self $(, $extra_name)?)
});
walk_walkable!(self, node, )
})*
};
}
macro_rules! define_named_walk {
($Visitor:ident<$lt:lifetime>
$( pub fn $method:ident($ty:ty); )*
) => {
$(pub fn $method<$lt, V: $Visitor<$lt>>(visitor: &mut V, node: &$lt $ty) -> V::Result {
walk_walkable!(visitor, node,)
})*
};
}
#[macro_export]
macro_rules! common_visitor_and_walkers {
($(($mut: ident))? $Visitor:ident$(<$lt:lifetime>)?) => {
@@ -120,6 +355,139 @@ pub fn ctxt(&self) -> Option<FnCtxt> {
}
}
// This macro generates `impl Visitable` and `impl MutVisitable` that do nothing.
impl_visitable_noop!(<$($lt)? $($mut)?>
AttrId,
bool,
rustc_span::ByteSymbol,
char,
crate::token::CommentKind,
crate::token::Delimiter,
crate::token::Lit,
crate::token::LitKind,
crate::tokenstream::LazyAttrTokenStream,
crate::tokenstream::TokenStream,
Movability,
Mutability,
Result<(), rustc_span::ErrorGuaranteed>,
rustc_data_structures::fx::FxHashMap<Symbol, usize>,
rustc_span::ErrorGuaranteed,
std::borrow::Cow<'_, str>,
Symbol,
u8,
usize,
);
// `Span` is only a no-op for the non-mutable visitor.
$(impl_visitable_noop!(<$lt> Span,);)?
// This macro generates `impl Visitable` and `impl MutVisitable` that simply iterate over
// their contents. We do not use a generic impl for `ThinVec` because we want to allow
// custom visits for the `MutVisitor`.
impl_visitable_list!(<$($lt)? $($mut)?>
ThinVec<AngleBracketedArg>,
ThinVec<Attribute>,
ThinVec<(Ident, Option<Ident>)>,
ThinVec<(NodeId, Path)>,
ThinVec<PathSegment>,
ThinVec<PreciseCapturingArg>,
ThinVec<P<Pat>>,
ThinVec<P<Ty>>,
ThinVec<P<TyPat>>,
);
// This macro generates `impl Visitable` and `impl MutVisitable` that forward to `Walkable`
// or `MutWalkable`. By default, all types that do not have a custom visit method in the
// visitor should appear here.
impl_visitable_direct!(<$($lt)? $($mut)?>
AngleBracketedArg,
AngleBracketedArgs,
AsmMacro,
AssignOpKind,
AssocItemConstraintKind,
AttrArgs,
AttrItem,
AttrKind,
AttrStyle,
FnPtrTy,
BindingMode,
GenBlockKind,
RangeLimits,
UnsafeBinderCastKind,
BinOpKind,
BlockCheckMode,
BorrowKind,
BoundAsyncness,
BoundConstness,
BoundPolarity,
ByRef,
Closure,
Const,
ConstItem,
Defaultness,
Delegation,
DelegationMac,
DelimArgs,
DelimSpan,
EnumDef,
Extern,
ForLoopKind,
FormatArgPosition,
FormatArgsPiece,
FormatArgument,
FormatArgumentKind,
FormatArguments,
FormatPlaceholder,
GenericParamKind,
Impl,
ImplPolarity,
Inline,
InlineAsmOperand,
InlineAsmRegOrRegClass,
InlineAsmTemplatePiece,
IsAuto,
LocalKind,
MacCallStmt,
MacStmtStyle,
MatchKind,
MethodCall,
ModKind,
ModSpans,
MutTy,
NormalAttr,
Parens,
ParenthesizedArgs,
PatFieldsRest,
PatKind,
RangeEnd,
RangeSyntax,
Recovered,
Safety,
StaticItem,
StrLit,
StrStyle,
StructExpr,
StructRest,
Term,
Trait,
TraitBoundModifiers,
TraitObjectSyntax,
TyAlias,
TyAliasWhereClause,
TyAliasWhereClauses,
TyKind,
TyPatKind,
UnOp,
UnsafeBinderTy,
UnsafeSource,
UseTreeKind,
VisibilityKind,
WhereBoundPredicate,
WhereClause,
WhereEqPredicate,
WhereRegionPredicate,
YieldKind,
);
/// Each method of this trait is a hook to be potentially
/// overridden. Each method's default implementation recursively visits
/// the substructure of the input via the corresponding `walk` method;
@@ -169,47 +537,82 @@ pub trait $Visitor<$($lt)?> : Sized $(${ignore($mut)} + MutVisitorResult<Result
// field access version will continue working and it would be easy to
// forget to add handling for it.
fn visit_ident(&mut self, Ident { name: _, span }: &$($lt)? $($mut)? Ident) -> Self::Result {
impl_visitable!(|&$($lt)? $($mut)? self: Ident, visitor: &mut V, _extra: ()| {
visitor.visit_ident(self)
});
visit_span(self, span)
}
fn visit_foreign_mod(&mut self, nm: &$($lt)? $($mut)? ForeignMod) -> Self::Result {
walk_foreign_mod(self, nm)
}
// This macro defines a custom visit method for each listed type.
// It implements `impl Visitable` and `impl MutVisitable` to call those methods on the
// visitor.
impl_visitable_calling_walkable!(<$($lt)? $($mut)?>
fn visit_anon_const(AnonConst);
fn visit_arm(Arm);
//fn visit_assoc_item(AssocItem, _ctxt: AssocCtxt);
fn visit_assoc_item_constraint(AssocItemConstraint);
fn visit_attribute(Attribute);
fn visit_block(Block);
//fn visit_nested_use_tree((UseTree, NodeId));
fn visit_capture_by(CaptureBy);
fn visit_closure_binder(ClosureBinder);
fn visit_contract(FnContract);
fn visit_coroutine_kind(CoroutineKind);
fn visit_crate(Crate);
fn visit_expr(Expr);
fn visit_expr_field(ExprField);
fn visit_field_def(FieldDef);
fn visit_fn_decl(FnDecl);
fn visit_fn_header(FnHeader);
fn visit_fn_ret_ty(FnRetTy);
//fn visit_foreign_item(ForeignItem);
fn visit_foreign_mod(ForeignMod);
fn visit_format_args(FormatArgs);
fn visit_generic_arg(GenericArg);
fn visit_generic_args(GenericArgs);
fn visit_generic_param(GenericParam);
fn visit_generics(Generics);
fn visit_inline_asm(InlineAsm);
fn visit_inline_asm_sym(InlineAsmSym);
//fn visit_item(Item);
fn visit_label(Label);
fn visit_lifetime(Lifetime, _ctxt: LifetimeCtxt);
fn visit_local(Local);
fn visit_mac_call(MacCall);
fn visit_macro_def(MacroDef);
fn visit_param_bound(GenericBound, _ctxt: BoundKind);
fn visit_param(Param);
fn visit_pat_field(PatField);
fn visit_path(Path);
fn visit_path_segment(PathSegment);
fn visit_pat(Pat);
fn visit_poly_trait_ref(PolyTraitRef);
fn visit_precise_capturing_arg(PreciseCapturingArg);
fn visit_qself(QSelf);
fn visit_trait_ref(TraitRef);
fn visit_ty_pat(TyPat);
fn visit_ty(Ty);
fn visit_use_tree(UseTree);
fn visit_variant_data(VariantData);
fn visit_variant(Variant);
fn visit_vis(Visibility);
fn visit_where_predicate_kind(WherePredicateKind);
fn visit_where_predicate(WherePredicate);
);
fn visit_foreign_item(&mut self, i: &$($lt)? $($mut)? ForeignItem) -> Self::Result {
walk_item(self, i)
}
fn visit_item(&mut self, i: &$($lt)? $($mut)? Item) -> Self::Result {
walk_item(self, i)
}
fn visit_local(&mut self, l: &$($lt)? $($mut)? Local) -> Self::Result {
walk_local(self, l)
}
fn visit_block(&mut self, b: &$($lt)? $($mut)? Block) -> Self::Result {
walk_block(self, b)
}
fn visit_param(&mut self, param: &$($lt)? $($mut)? Param) -> Self::Result {
walk_param(self, param)
}
fn visit_arm(&mut self, a: &$($lt)? $($mut)? Arm) -> Self::Result {
walk_arm(self, a)
}
fn visit_pat(&mut self, p: &$($lt)? $($mut)? Pat) -> Self::Result {
walk_pat(self, p)
}
fn visit_anon_const(&mut self, c: &$($lt)? $($mut)? AnonConst) -> Self::Result {
walk_anon_const(self, c)
}
fn visit_expr(&mut self, ex: &$($lt)? $($mut)? Expr) -> Self::Result {
walk_expr(self, ex)
// We want `Visitor` to take the `NodeId` by value.
fn visit_id(&mut self, _id: $(&$mut)? NodeId) -> Self::Result {
$(impl_visitable!(
|&$lt self: NodeId, visitor: &mut V, _extra: ()| {
visitor.visit_id(*self)
}
);)?
$(impl_visitable!(
|&$mut self: NodeId, visitor: &mut V, _extra: ()| {
visitor.visit_id(self)
}
);)?
Self::Result::output()
}
/// This method is a hack to workaround unstable of `stmt_expr_attributes`.
@@ -218,34 +621,25 @@ fn visit_method_receiver_expr(&mut self, ex: &$($lt)? $($mut)? Expr) -> Self::Re
self.visit_expr(ex)
}
fn visit_ty(&mut self, t: &$($lt)? $($mut)? Ty) -> Self::Result {
walk_ty(self, t)
fn visit_item(&mut self, item: &$($lt)? $($mut)? Item) -> Self::Result {
impl_visitable!(|&$($lt)? $($mut)? self: Item, vis: &mut V, _extra: ()| {
vis.visit_item(self)
});
walk_item(self, item)
}
fn visit_ty_pat(&mut self, t: &$($lt)? $($mut)? TyPat) -> Self::Result {
walk_ty_pat(self, t)
fn visit_foreign_item(&mut self, item: &$($lt)? $($mut)? ForeignItem) -> Self::Result {
impl_visitable!(|&$($lt)? $($mut)? self: ForeignItem, vis: &mut V, _extra: ()| {
vis.visit_foreign_item(self)
});
walk_item(self, item)
}
fn visit_generic_param(&mut self, param: &$($lt)? $($mut)? GenericParam) -> Self::Result {
walk_generic_param(self, param)
}
fn visit_generics(&mut self, g: &$($lt)? $($mut)? Generics) -> Self::Result {
walk_generics(self, g)
}
fn visit_closure_binder(&mut self, b: &$($lt)? $($mut)? ClosureBinder) -> Self::Result {
walk_closure_binder(self, b)
}
fn visit_contract(&mut self, c: &$($lt)? $($mut)? FnContract) -> Self::Result {
walk_contract(self, c)
}
fn visit_where_predicate(&mut self, p: &$($lt)? $($mut)? WherePredicate) -> Self::Result {
walk_where_predicate(self, p)
}
fn visit_where_predicate_kind(&mut self, k: &$($lt)? $($mut)? WherePredicateKind) -> Self::Result {
walk_where_predicate_kind(self, k)
fn visit_assoc_item(&mut self, item: &$($lt)? $($mut)? AssocItem, ctxt: AssocCtxt) -> Self::Result {
impl_visitable!(|&$($lt)? $($mut)? self: AssocItem, vis: &mut V, ctxt: AssocCtxt| {
vis.visit_assoc_item(self, ctxt)
});
walk_assoc_item(self, item, ctxt)
}
// for `MutVisitor`: `Span` and `NodeId` are mutated at the caller site.
@@ -258,141 +652,6 @@ fn visit_fn(
walk_fn(self, fk)
}
fn visit_assoc_item(&mut self, i: &$($lt)? $($mut)? AssocItem, ctxt: AssocCtxt) -> Self::Result {
walk_assoc_item(self, i, ctxt)
}
fn visit_trait_ref(&mut self, t: &$($lt)? $($mut)? TraitRef) -> Self::Result {
walk_trait_ref(self, t)
}
fn visit_param_bound(&mut self, bounds: &$($lt)? $($mut)? GenericBound, _ctxt: BoundKind) -> Self::Result {
walk_param_bound(self, bounds)
}
fn visit_precise_capturing_arg(&mut self, arg: &$($lt)? $($mut)? PreciseCapturingArg) -> Self::Result {
walk_precise_capturing_arg(self, arg)
}
fn visit_poly_trait_ref(&mut self, t: &$($lt)? $($mut)? PolyTraitRef) -> Self::Result {
walk_poly_trait_ref(self, t)
}
fn visit_variant_data(&mut self, s: &$($lt)? $($mut)? VariantData) -> Self::Result {
walk_variant_data(self, s)
}
fn visit_field_def(&mut self, s: &$($lt)? $($mut)? FieldDef) -> Self::Result {
walk_field_def(self, s)
}
fn visit_variant(&mut self, v: &$($lt)? $($mut)? Variant) -> Self::Result {
walk_variant(self, v)
}
fn visit_label(&mut self, label: &$($lt)? $($mut)? Label) -> Self::Result {
walk_label(self, label)
}
fn visit_lifetime(&mut self, lifetime: &$($lt)? $($mut)? Lifetime, $(${ignore($lt)} _: LifetimeCtxt )?) -> Self::Result {
walk_lifetime(self, lifetime)
}
fn visit_mac_call(&mut self, mac: &$($lt)? $($mut)? MacCall) -> Self::Result {
walk_mac(self, mac)
}
fn visit_id(&mut self, _id: $(&$mut)? NodeId) -> Self::Result {
Self::Result::output()
}
fn visit_macro_def(&mut self, macro_def: &$($lt)? $($mut)? MacroDef) -> Self::Result {
walk_macro_def(self, macro_def)
}
fn visit_path(&mut self, path: &$($lt)? $($mut)? Path) -> Self::Result {
walk_path(self, path)
}
fn visit_use_tree(&mut self, use_tree: &$($lt)? $($mut)? UseTree) -> Self::Result {
walk_use_tree(self, use_tree)
}
fn visit_path_segment(&mut self, path_segment: &$($lt)? $($mut)? PathSegment) -> Self::Result {
walk_path_segment(self, path_segment)
}
fn visit_generic_args(&mut self, generic_args: &$($lt)? $($mut)? GenericArgs) -> Self::Result {
walk_generic_args(self, generic_args)
}
fn visit_generic_arg(&mut self, generic_arg: &$($lt)? $($mut)? GenericArg) -> Self::Result {
walk_generic_arg(self, generic_arg)
}
fn visit_assoc_item_constraint(
&mut self,
constraint: &$($lt)? $($mut)? AssocItemConstraint,
) -> Self::Result {
walk_assoc_item_constraint(self, constraint)
}
fn visit_attribute(&mut self, attr: &$($lt)? $($mut)? Attribute) -> Self::Result {
walk_attribute(self, attr)
}
fn visit_vis(&mut self, vis: &$($lt)? $($mut)? Visibility) -> Self::Result {
walk_vis(self, vis)
}
fn visit_fn_ret_ty(&mut self, ret_ty: &$($lt)? $($mut)? FnRetTy) -> Self::Result {
walk_fn_ret_ty(self, ret_ty)
}
fn visit_fn_header(&mut self, header: &$($lt)? $($mut)? FnHeader) -> Self::Result {
walk_fn_header(self, header)
}
fn visit_expr_field(&mut self, f: &$($lt)? $($mut)? ExprField) -> Self::Result {
walk_expr_field(self, f)
}
fn visit_pat_field(&mut self, fp: &$($lt)? $($mut)? PatField) -> Self::Result {
walk_pat_field(self, fp)
}
fn visit_crate(&mut self, krate: &$($lt)? $($mut)? Crate) -> Self::Result {
walk_crate(self, krate)
}
fn visit_inline_asm(&mut self, asm: &$($lt)? $($mut)? InlineAsm) -> Self::Result {
walk_inline_asm(self, asm)
}
fn visit_format_args(&mut self, fmt: &$($lt)? $($mut)? FormatArgs) -> Self::Result {
walk_format_args(self, fmt)
}
fn visit_inline_asm_sym(&mut self, sym: &$($lt)? $($mut)? InlineAsmSym) -> Self::Result {
walk_inline_asm_sym(self, sym)
}
fn visit_capture_by(&mut self, capture_by: &$($lt)? $($mut)? CaptureBy) -> Self::Result {
walk_capture_by(self, capture_by)
}
fn visit_coroutine_kind(&mut self, coroutine_kind: &$($lt)? $($mut)? CoroutineKind) -> Self::Result {
walk_coroutine_kind(self, coroutine_kind)
}
fn visit_fn_decl(&mut self, fn_decl: &$($lt)? $($mut)? FnDecl) -> Self::Result {
walk_fn_decl(self, fn_decl)
}
fn visit_qself(&mut self, qs: &$($lt)? $($mut)? Option<P<QSelf>>) -> Self::Result {
walk_qself(self, qs)
}
// (non-mut) `Visitor`-only methods
$(
fn visit_stmt(&mut self, s: &$lt Stmt) -> Self::Result {
@@ -407,6 +666,16 @@ fn visit_nested_use_tree(&mut self, use_tree: &$lt UseTree, id: NodeId) -> Self:
// `MutVisitor`-only methods
$(
// Span visiting is no longer used, but we keep it for now,
// in case it's needed for something like #127241.
#[inline]
fn visit_span(&mut self, _sp: &$mut Span) {
impl_visitable!(|&mut self: Span, visitor: &mut V, _extra: ()| {
visitor.visit_span(self)
});
// Do nothing.
}
fn flat_map_foreign_item(&mut self, ni: P<ForeignItem>) -> SmallVec<[P<ForeignItem>; 1]> {
walk_flat_map_foreign_item(self, ni)
}
@@ -462,12 +731,6 @@ fn flat_map_where_predicate(
walk_flat_map_where_predicate(self, where_predicate)
}
// Span visiting is no longer used, but we keep it for now,
// in case it's needed for something like #127241.
fn visit_span(&mut self, _sp: &$mut Span) {
// Do nothing.
}
fn flat_map_pat_field(&mut self, fp: PatField) -> SmallVec<[PatField; 1]> {
walk_flat_map_pat_field(self, fp)
}
@@ -492,148 +755,45 @@ fn walk<$($lt,)? V: $Visitor$(<$lt>)?>(
#[inline]
)?
fn visit_span<$($lt,)? V: $Visitor$(<$lt>)?>(vis: &mut V, span: &$($lt)? $($mut)? Span) -> V::Result {
$(
${ignore($mut)}
vis.visit_span(span);
)?
$(${ignore($mut)} vis.visit_span(span))?;
V::Result::output()
}
/// helper since `Visitor` wants `NodeId` but `MutVisitor` wants `&mut NodeId`
$(${ignore($lt)}
#[expect(rustc::pass_by_value)]
)?
#[inline]
fn visit_id<$($lt,)? V: $Visitor$(<$lt>)?>(vis: &mut V, id: &$($lt)? $($mut)? NodeId) -> V::Result {
// deref `&NodeId` into `NodeId` only for `Visitor`
vis.visit_id( $(${ignore($lt)} * )? id)
}
// this is only used by the MutVisitor. We include this symmetry here to make writing other functions easier
fn visit_safety<$($lt,)? V: $Visitor$(<$lt>)?>(vis: &mut V, safety: &$($lt)? $($mut)? Safety) -> V::Result {
match safety {
Safety::Unsafe(span) => visit_span(vis, span),
Safety::Safe(span) => visit_span(vis, span),
Safety::Default => { V::Result::output() }
}
}
fn visit_constness<$($lt,)? V: $Visitor$(<$lt>)?>(vis: &mut V, constness: &$($lt)? $($mut)? Const) -> V::Result {
match constness {
Const::Yes(span) => visit_span(vis, span),
Const::No => {
V::Result::output()
}
}
}
fn visit_defaultness<$($lt,)? V: $Visitor$(<$lt>)?>(vis: &mut V, defaultness: &$($lt)? $($mut)? Defaultness) -> V::Result {
match defaultness {
Defaultness::Default(span) => visit_span(vis, span),
Defaultness::Final => {
V::Result::output()
}
}
}
fn visit_polarity<$($lt,)? V: $Visitor$(<$lt>)?>(
vis: &mut V,
polarity: &$($lt)? $($mut)? ImplPolarity,
) -> V::Result {
match polarity {
ImplPolarity::Positive => { V::Result::output() }
ImplPolarity::Negative(span) => visit_span(vis, span),
}
}
$(${ignore($lt)}
#[inline]
)?
fn visit_modifiers<$($lt,)? V: $Visitor$(<$lt>)?>(
vis: &mut V,
m: &$($lt)? $($mut)? TraitBoundModifiers
) -> V::Result {
let TraitBoundModifiers { constness, asyncness, polarity } = m;
match constness {
BoundConstness::Never => {}
BoundConstness::Always(span) | BoundConstness::Maybe(span) => try_visit!(visit_span(vis, span)),
}
match asyncness {
BoundAsyncness::Normal => {}
BoundAsyncness::Async(span) => try_visit!(visit_span(vis, span)),
}
match polarity {
BoundPolarity::Positive => {}
BoundPolarity::Negative(span) | BoundPolarity::Maybe(span) => try_visit!(visit_span(vis, span)),
$(impl_visitable!(|&$lt self: ThinVec<(UseTree, NodeId)>, vis: &mut V, _extra: ()| {
for (nested_tree, nested_id) in self {
try_visit!(vis.visit_nested_use_tree(nested_tree, *nested_id));
}
V::Result::output()
}
});)?
$(impl_visitable_list!(<$mut> ThinVec<(UseTree, NodeId)>,);)?
$(${ignore($lt)}
#[inline]
)?
fn walk_capture_by<$($lt,)? V: $Visitor$(<$lt>)?>(
vis: &mut V,
capture_by: &$($lt)? $($mut)? CaptureBy
) -> V::Result {
match capture_by {
CaptureBy::Ref => { V::Result::output() }
CaptureBy::Value { move_kw } => {
visit_span(vis, move_kw)
}
CaptureBy::Use { use_kw } => {
visit_span(vis, use_kw)
}
}
}
fn visit_bounds<$($lt,)? V: $Visitor$(<$lt>)?>(visitor: &mut V, bounds: &$($lt)? $($mut)? GenericBounds, ctxt: BoundKind) -> V::Result {
walk_list!(visitor, visit_param_bound, bounds, ctxt);
V::Result::output()
}
pub fn walk_label<$($lt,)? V: $Visitor$(<$lt>)?>(visitor: &mut V, Label { ident }: &$($lt)? $($mut)? Label) -> V::Result {
visitor.visit_ident(ident)
}
pub fn walk_fn_header<$($lt,)? V: $Visitor$(<$lt>)?>(visitor: &mut V, header: &$($lt)? $($mut)? FnHeader) -> V::Result {
let FnHeader { safety, coroutine_kind, constness, ext: _ } = header;
try_visit!(visit_constness(visitor, constness));
visit_opt!(visitor, visit_coroutine_kind, coroutine_kind);
visit_safety(visitor, safety)
}
pub fn walk_lifetime<$($lt,)? V: $Visitor$(<$lt>)?>(visitor: &mut V, Lifetime { id, ident }: &$($lt)? $($mut)? Lifetime) -> V::Result {
try_visit!(visit_id(visitor, id));
visitor.visit_ident(ident)
}
fn walk_item_ctxt<$($lt,)? V: $Visitor$(<$lt>)?, K: WalkItemKind>(
fn walk_item_inner<$($lt,)? K: WalkItemKind, V: $Visitor$(<$lt>)?>(
visitor: &mut V,
item: &$($mut)? $($lt)? Item<K>,
ctxt: K::Ctxt,
) -> V::Result {
let Item { attrs, id, kind, vis, span, tokens: _ } = item;
try_visit!(visit_id(visitor, id));
walk_list!(visitor, visit_attribute, attrs);
try_visit!(visitor.visit_vis(vis));
visit_visitable!($($mut)? visitor, id, attrs, vis);
try_visit!(kind.walk(*span, *id, vis, ctxt, visitor));
visit_span(visitor, span)
visit_visitable!($($mut)? visitor, span);
V::Result::output()
}
pub fn walk_item<$($lt,)? V: $Visitor$(<$lt>)?, K: WalkItemKind<Ctxt = ()>>(
// Do not implement `Walkable`/`MutWalkable` for *Item to avoid confusion.
pub fn walk_item<$($lt,)? K: WalkItemKind<Ctxt = ()>, V: $Visitor$(<$lt>)?>(
visitor: &mut V,
item: &$($mut)? $($lt)? Item<K>,
) -> V::Result {
walk_item_ctxt(visitor, item, ())
walk_item_inner(visitor, item, ())
}
pub fn walk_assoc_item<$($lt,)? V: $Visitor$(<$lt>)?>(
// Do not implement `Walkable`/`MutWalkable` for *Item to avoid confusion.
pub fn walk_assoc_item<$($lt,)? K: WalkItemKind<Ctxt = AssocCtxt>, V: $Visitor$(<$lt>)?>(
visitor: &mut V,
item: &$($mut)? $($lt)? AssocItem,
item: &$($mut)? $($lt)? Item<K>,
ctxt: AssocCtxt,
) -> V::Result {
walk_item_ctxt(visitor, item, ctxt)
walk_item_inner(visitor, item, ctxt)
}
impl WalkItemKind for ItemKind {
@@ -647,180 +807,52 @@ fn walk<$($lt,)? V: $Visitor$(<$lt>)?>(
vis: &mut V,
) -> V::Result {
match self {
ItemKind::ExternCrate(_orig_name, ident) => vis.visit_ident(ident),
ItemKind::Use(use_tree) => vis.visit_use_tree(use_tree),
ItemKind::Static(box StaticItem {
ident,
ty,
safety: _,
mutability: _,
expr,
define_opaque,
}) => {
try_visit!(vis.visit_ident(ident));
try_visit!(vis.visit_ty(ty));
visit_opt!(vis, visit_expr, expr);
walk_define_opaques(vis, define_opaque)
}
ItemKind::Const(item) => {
walk_const_item(vis, item)
}
ItemKind::Fn(func) => {
let kind = FnKind::Fn(FnCtxt::Free, visibility, &$($mut)? *func);
vis.visit_fn(kind, span, id)
}
ItemKind::Mod(safety, ident, mod_kind) => {
try_visit!(visit_safety(vis, safety));
try_visit!(vis.visit_ident(ident));
match mod_kind {
ModKind::Loaded(
items,
_inline,
ModSpans { inner_span, inject_use_span },
_,
) => {
try_visit!(visit_items(vis, items));
try_visit!(visit_span(vis, inner_span));
try_visit!(visit_span(vis, inject_use_span));
}
ModKind::Unloaded => {}
}
V::Result::output()
}
ItemKind::ForeignMod(nm) => vis.visit_foreign_mod(nm),
ItemKind::GlobalAsm(asm) => vis.visit_inline_asm(asm),
ItemKind::TyAlias(box TyAlias {
defaultness,
ident,
generics,
$(${ignore($lt)} #[expect(unused)])?
where_clauses,
bounds,
ty,
}) => {
try_visit!(visit_defaultness(vis, defaultness));
try_visit!(vis.visit_ident(ident));
try_visit!(vis.visit_generics(generics));
try_visit!(visit_bounds(vis, bounds, BoundKind::Bound));
visit_opt!(vis, visit_ty, ty);
$(${ignore($mut)}
walk_ty_alias_where_clauses(vis, where_clauses);
)?
V::Result::output()
}
ItemKind::Enum(ident, generics, enum_definition) => {
try_visit!(vis.visit_ident(ident));
try_visit!(vis.visit_generics(generics));
visit_variants(vis, &$($mut)? enum_definition.variants)
try_visit!(vis.visit_fn(kind, span, id));
}
ItemKind::ExternCrate(orig_name, ident) =>
visit_visitable!($($mut)? vis, orig_name, ident),
ItemKind::Use(use_tree) =>
visit_visitable!($($mut)? vis, use_tree),
ItemKind::Static(item) =>
visit_visitable!($($mut)? vis, item),
ItemKind::Const(item) =>
visit_visitable!($($mut)? vis, item),
ItemKind::Mod(safety, ident, mod_kind) =>
visit_visitable!($($mut)? vis, safety, ident, mod_kind),
ItemKind::ForeignMod(nm) =>
visit_visitable!($($mut)? vis, nm),
ItemKind::GlobalAsm(asm) =>
visit_visitable!($($mut)? vis, asm),
ItemKind::TyAlias(ty_alias) =>
visit_visitable!($($mut)? vis, ty_alias),
ItemKind::Enum(ident, generics, enum_definition) =>
visit_visitable!($($mut)? vis, ident, generics, enum_definition),
ItemKind::Struct(ident, generics, variant_data)
| ItemKind::Union(ident, generics, variant_data) => {
try_visit!(vis.visit_ident(ident));
try_visit!(vis.visit_generics(generics));
vis.visit_variant_data(variant_data)
}
ItemKind::Impl(box Impl {
defaultness,
safety,
generics,
constness,
polarity,
of_trait,
self_ty,
items,
}) => {
try_visit!(visit_defaultness(vis, defaultness));
try_visit!(visit_safety(vis, safety));
try_visit!(vis.visit_generics(generics));
try_visit!(visit_constness(vis, constness));
try_visit!(visit_polarity(vis, polarity));
visit_opt!(vis, visit_trait_ref, of_trait);
try_visit!(vis.visit_ty(self_ty));
visit_assoc_items(vis, items, AssocCtxt::Impl { of_trait: of_trait.is_some() })
}
ItemKind::Trait(box Trait { constness, safety, is_auto: _, ident, generics, bounds, items }) => {
try_visit!(visit_constness(vis, constness));
try_visit!(visit_safety(vis, safety));
try_visit!(vis.visit_ident(ident));
try_visit!(vis.visit_generics(generics));
try_visit!(visit_bounds(vis, bounds, BoundKind::Bound));
visit_assoc_items(vis, items, AssocCtxt::Trait)
}
| ItemKind::Union(ident, generics, variant_data) =>
visit_visitable!($($mut)? vis, ident, generics, variant_data),
ItemKind::Impl(impl_) =>
visit_visitable!($($mut)? vis, impl_),
ItemKind::Trait(trait_) =>
visit_visitable!($($mut)? vis, trait_),
ItemKind::TraitAlias(ident, generics, bounds) => {
try_visit!(vis.visit_ident(ident));
try_visit!(vis.visit_generics(generics));
visit_bounds(vis, bounds, BoundKind::Bound)
}
ItemKind::MacCall(m) => vis.visit_mac_call(m),
ItemKind::MacroDef(ident, def) => {
try_visit!(vis.visit_ident(ident));
vis.visit_macro_def(def)
}
ItemKind::Delegation(box Delegation {
id,
qself,
path,
ident,
rename,
body,
from_glob: _,
}) => {
try_visit!(visit_id(vis, id));
try_visit!(vis.visit_qself(qself));
try_visit!(vis.visit_path(path));
try_visit!(vis.visit_ident(ident));
visit_opt!(vis, visit_ident, rename);
visit_opt!(vis, visit_block, body);
V::Result::output()
}
ItemKind::DelegationMac(box DelegationMac { qself, prefix, suffixes, body }) => {
try_visit!(vis.visit_qself(qself));
try_visit!(vis.visit_path(prefix));
if let Some(suffixes) = suffixes {
for (ident, rename) in suffixes {
try_visit!(vis.visit_ident(ident));
visit_opt!(vis, visit_ident, rename);
}
}
visit_opt!(vis, visit_block, body);
V::Result::output()
visit_visitable!($($mut)? vis, ident, generics);
visit_visitable_with!($($mut)? vis, bounds, BoundKind::Bound)
}
ItemKind::MacCall(m) =>
visit_visitable!($($mut)? vis, m),
ItemKind::MacroDef(ident, def) =>
visit_visitable!($($mut)? vis, ident, def),
ItemKind::Delegation(delegation) =>
visit_visitable!($($mut)? vis, delegation),
ItemKind::DelegationMac(dm) =>
visit_visitable!($($mut)? vis, dm),
}
V::Result::output()
}
}
fn walk_const_item<$($lt,)? V: $Visitor$(<$lt>)?>(
vis: &mut V,
item: &$($lt)? $($mut)? ConstItem,
) -> V::Result {
let ConstItem { defaultness, ident, generics, ty, expr, define_opaque } = item;
try_visit!(visit_defaultness(vis, defaultness));
try_visit!(vis.visit_ident(ident));
try_visit!(vis.visit_generics(generics));
try_visit!(vis.visit_ty(ty));
visit_opt!(vis, visit_expr, expr);
walk_define_opaques(vis, define_opaque)
}
fn walk_foreign_mod<$($lt,)? V: $Visitor$(<$lt>)?>(vis: &mut V, foreign_mod: &$($lt)? $($mut)? ForeignMod) -> V::Result {
let ForeignMod { extern_span: _, safety, abi: _, items } = foreign_mod;
try_visit!(visit_safety(vis, safety));
visit_foreign_items(vis, items)
}
fn walk_define_opaques<$($lt,)? V: $Visitor$(<$lt>)?>(
visitor: &mut V,
define_opaque: &$($lt)? $($mut)? Option<ThinVec<(NodeId, Path)>>,
) -> V::Result {
if let Some(define_opaque) = define_opaque {
for (id, path) in define_opaque {
try_visit!(visit_id(visitor, id));
try_visit!(visitor.visit_path(path));
}
}
V::Result::output()
}
impl WalkItemKind for AssocItemKind {
type Ctxt = AssocCtxt;
fn walk<$($lt,)? V: $Visitor$(<$lt>)?>(
@@ -832,64 +864,22 @@ fn walk<$($lt,)? V: $Visitor$(<$lt>)?>(
vis: &mut V,
) -> V::Result {
match self {
AssocItemKind::Const(item) => {
walk_const_item(vis, item)
}
AssocItemKind::Const(item) =>
visit_visitable!($($mut)? vis, item),
AssocItemKind::Fn(func) => {
vis.visit_fn(FnKind::Fn(FnCtxt::Assoc(ctxt), visibility, &$($mut)? *func), span, id)
}
AssocItemKind::Type(box TyAlias {
generics,
ident,
bounds,
ty,
defaultness,
$(${ignore($lt)} #[expect(unused)])?
where_clauses,
}) => {
try_visit!(visit_defaultness(vis, defaultness));
try_visit!(vis.visit_ident(ident));
try_visit!(vis.visit_generics(generics));
try_visit!(visit_bounds(vis, bounds, BoundKind::Bound));
visit_opt!(vis, visit_ty, ty);
$(${ignore($mut)}
walk_ty_alias_where_clauses(vis, where_clauses);
)?
V::Result::output()
}
AssocItemKind::MacCall(mac) => {
vis.visit_mac_call(mac)
}
AssocItemKind::Delegation(box Delegation {
id,
qself,
path,
ident,
rename,
body,
from_glob: _,
}) => {
try_visit!(visit_id(vis, id));
try_visit!(vis.visit_qself(qself));
try_visit!(vis.visit_path(path));
try_visit!(vis.visit_ident(ident));
visit_opt!(vis, visit_ident, rename);
visit_opt!(vis, visit_block, body);
V::Result::output()
}
AssocItemKind::DelegationMac(box DelegationMac { qself, prefix, suffixes, body }) => {
try_visit!(vis.visit_qself(qself));
try_visit!(vis.visit_path(prefix));
if let Some(suffixes) = suffixes {
for (ident, rename) in suffixes {
try_visit!(vis.visit_ident(ident));
visit_opt!(vis, visit_ident, rename);
}
}
visit_opt!(vis, visit_block, body);
V::Result::output()
let kind = FnKind::Fn(FnCtxt::Assoc(ctxt), visibility, &$($mut)? *func);
try_visit!(vis.visit_fn(kind, span, id))
}
AssocItemKind::Type(alias) =>
visit_visitable!($($mut)? vis, alias),
AssocItemKind::MacCall(mac) =>
visit_visitable!($($mut)? vis, mac),
AssocItemKind::Delegation(delegation) =>
visit_visitable!($($mut)? vis, delegation),
AssocItemKind::DelegationMac(dm) =>
visit_visitable!($($mut)? vis, dm),
}
V::Result::output()
}
}
@@ -904,545 +894,18 @@ fn walk<$($lt,)? V: $Visitor$(<$lt>)?>(
vis: &mut V,
) -> V::Result {
match self {
ForeignItemKind::Static(box StaticItem {
ident,
ty,
mutability: _,
expr,
safety: _,
define_opaque,
}) => {
try_visit!(vis.visit_ident(ident));
try_visit!(vis.visit_ty(ty));
visit_opt!(vis, visit_expr, expr);
walk_define_opaques(vis, define_opaque)
}
ForeignItemKind::Static(item) =>
visit_visitable!($($mut)? vis, item),
ForeignItemKind::Fn(func) => {
vis.visit_fn(FnKind::Fn(FnCtxt::Foreign, visibility, &$($mut)?*func), span, id)
}
ForeignItemKind::TyAlias(box TyAlias {
defaultness,
ident,
generics,
bounds,
ty,
$(${ignore($lt)} #[expect(unused)])?
where_clauses,
}) => {
try_visit!(visit_defaultness(vis, defaultness));
try_visit!(vis.visit_ident(ident));
try_visit!(vis.visit_generics(generics));
try_visit!(visit_bounds(vis, bounds, BoundKind::Bound));
visit_opt!(vis, visit_ty, ty);
$(${ignore($mut)}
walk_ty_alias_where_clauses(vis, where_clauses);
)?
V::Result::output()
}
ForeignItemKind::MacCall(mac) => {
vis.visit_mac_call(mac)
let kind = FnKind::Fn(FnCtxt::Foreign, visibility, &$($mut)?*func);
try_visit!(vis.visit_fn(kind, span, id))
}
ForeignItemKind::TyAlias(alias) =>
visit_visitable!($($mut)? vis, alias),
ForeignItemKind::MacCall(mac) =>
visit_visitable!($($mut)? vis, mac),
}
}
}
fn walk_coroutine_kind<$($lt,)? V: $Visitor$(<$lt>)?>(
vis: &mut V,
coroutine_kind: &$($lt)? $($mut)? CoroutineKind,
) -> V::Result {
let (CoroutineKind::Async { span, closure_id, return_impl_trait_id }
| CoroutineKind::Gen { span, closure_id, return_impl_trait_id }
| CoroutineKind::AsyncGen { span, closure_id, return_impl_trait_id })
= coroutine_kind;
try_visit!(visit_id(vis, closure_id));
try_visit!(visit_id(vis, return_impl_trait_id));
visit_span(vis, span)
}
pub fn walk_pat<$($lt,)? V: $Visitor$(<$lt>)?>(
vis: &mut V,
pattern: &$($lt)? $($mut)? Pat
) -> V::Result {
let Pat { id, kind, span, tokens: _ } = pattern;
try_visit!(visit_id(vis, id));
match kind {
PatKind::Err(_guar) => {}
PatKind::Missing | PatKind::Wild | PatKind::Rest | PatKind::Never => {}
PatKind::Ident(_bmode, ident, optional_subpattern) => {
try_visit!(vis.visit_ident(ident));
visit_opt!(vis, visit_pat, optional_subpattern);
}
PatKind::Expr(expression) => try_visit!(vis.visit_expr(expression)),
PatKind::TupleStruct(opt_qself, path, elems) => {
try_visit!(vis.visit_qself(opt_qself));
try_visit!(vis.visit_path(path));
walk_list!(vis, visit_pat, elems);
}
PatKind::Path(opt_qself, path) => {
try_visit!(vis.visit_qself(opt_qself));
try_visit!(vis.visit_path(path))
}
PatKind::Struct(opt_qself, path, fields, _rest) => {
try_visit!(vis.visit_qself(opt_qself));
try_visit!(vis.visit_path(path));
try_visit!(visit_pat_fields(vis, fields));
}
PatKind::Box(subpattern) | PatKind::Deref(subpattern) | PatKind::Paren(subpattern) => {
try_visit!(vis.visit_pat(subpattern));
}
PatKind::Ref(subpattern, _ /*mutbl*/) => {
try_visit!(vis.visit_pat(subpattern));
}
PatKind::Range(lower_bound, upper_bound, _end) => {
visit_opt!(vis, visit_expr, lower_bound);
visit_opt!(vis, visit_expr, upper_bound);
try_visit!(visit_span(vis, span));
}
PatKind::Guard(subpattern, guard_condition) => {
try_visit!(vis.visit_pat(subpattern));
try_visit!(vis.visit_expr(guard_condition));
}
PatKind::Tuple(elems) | PatKind::Slice(elems) | PatKind::Or(elems) => {
walk_list!(vis, visit_pat, elems);
}
PatKind::MacCall(mac) => try_visit!(vis.visit_mac_call(mac)),
}
visit_span(vis, span)
}
pub fn walk_anon_const<$($lt,)? V: $Visitor$(<$lt>)?>(
vis: &mut V,
constant: &$($lt)? $($mut)? AnonConst,
) -> V::Result {
let AnonConst { id, value } = constant;
try_visit!(visit_id(vis, id));
vis.visit_expr(value)
}
pub fn walk_path_segment<$($lt,)? V: $Visitor$(<$lt>)?>(
vis: &mut V,
segment: &$($lt)? $($mut)? PathSegment,
) -> V::Result {
let PathSegment { ident, id, args } = segment;
try_visit!(visit_id(vis, id));
try_visit!(vis.visit_ident(ident));
visit_opt!(vis, visit_generic_args, args);
V::Result::output()
}
pub fn walk_block<$($lt,)? V: $Visitor$(<$lt>)?>(
vis: &mut V,
block: &$($lt)? $($mut)? Block
) -> V::Result {
let Block { stmts, id, rules: _, span, tokens: _ } = block;
try_visit!(visit_id(vis, id));
try_visit!(visit_stmts(vis, stmts));
visit_span(vis, span)
}
pub fn walk_ty<$($lt,)? V: $Visitor$(<$lt>)?>(
vis: &mut V, ty: &$($lt)? $($mut)? Ty
) -> V::Result {
let Ty { id, kind, span, tokens: _ } = ty;
try_visit!(visit_id(vis, id));
match kind {
TyKind::Err(_guar) => {}
TyKind::Infer | TyKind::ImplicitSelf | TyKind::Dummy | TyKind::Never | TyKind::CVarArgs => {}
TyKind::Slice(ty) | TyKind::Paren(ty) => try_visit!(vis.visit_ty(ty)),
TyKind::Ptr(MutTy { ty, mutbl: _ }) => try_visit!(vis.visit_ty(ty)),
TyKind::Ref(opt_lifetime, MutTy { ty, mutbl: _ })
| TyKind::PinnedRef(opt_lifetime, MutTy { ty, mutbl: _ }) => {
// FIXME(fee1-dead) asymmetry
visit_opt!(vis, visit_lifetime, opt_lifetime$(${ignore($lt)}, LifetimeCtxt::Ref)?);
try_visit!(vis.visit_ty(ty));
}
TyKind::Tup(tuple_element_types) => {
walk_list!(vis, visit_ty, tuple_element_types);
}
TyKind::FnPtr(function_declaration) => {
let FnPtrTy { safety, ext: _, generic_params, decl, decl_span } =
&$($mut)? **function_declaration;
try_visit!(visit_safety(vis, safety));
try_visit!(visit_generic_params(vis, generic_params));
try_visit!(vis.visit_fn_decl(decl));
try_visit!(visit_span(vis, decl_span));
}
TyKind::UnsafeBinder(binder) => {
try_visit!(visit_generic_params(vis, &$($mut)? binder.generic_params));
try_visit!(vis.visit_ty(&$($mut)? binder.inner_ty));
}
TyKind::Path(maybe_qself, path) => {
try_visit!(vis.visit_qself(maybe_qself));
try_visit!(vis.visit_path(path));
}
TyKind::Pat(ty, pat) => {
try_visit!(vis.visit_ty(ty));
try_visit!(vis.visit_ty_pat(pat));
}
TyKind::Array(ty, length) => {
try_visit!(vis.visit_ty(ty));
try_visit!(vis.visit_anon_const(length));
}
TyKind::TraitObject(bounds, _syntax) => {
walk_list!(vis, visit_param_bound, bounds, BoundKind::TraitObject);
}
TyKind::ImplTrait(id, bounds) => {
try_visit!(visit_id(vis, id));
walk_list!(vis, visit_param_bound, bounds, BoundKind::Impl);
}
TyKind::Typeof(expression) => try_visit!(vis.visit_anon_const(expression)),
TyKind::MacCall(mac) => try_visit!(vis.visit_mac_call(mac)),
}
visit_span(vis, span)
}
pub fn walk_crate<$($lt,)? V: $Visitor$(<$lt>)?>(
vis: &mut V,
krate: &$($lt)? $($mut)? Crate,
) -> V::Result {
let Crate { attrs, items, spans, id, is_placeholder: _ } = krate;
try_visit!(visit_id(vis, id));
walk_list!(vis, visit_attribute, attrs);
try_visit!(visit_items(vis, items));
let ModSpans { inner_span, inject_use_span } = spans;
try_visit!(visit_span(vis, inner_span));
visit_span(vis, inject_use_span)
}
pub fn walk_local<$($lt,)? V: $Visitor$(<$lt>)?>(
vis: &mut V,
local: &$($lt)? $($mut)? Local,
) -> V::Result {
let Local { id, super_, pat, ty, kind, span, colon_sp, attrs, tokens: _ } = local;
if let Some(sp) = super_ {
try_visit!(visit_span(vis, sp));
}
try_visit!(visit_id(vis, id));
walk_list!(vis, visit_attribute, attrs);
try_visit!(vis.visit_pat(pat));
visit_opt!(vis, visit_ty, ty);
match kind {
LocalKind::Decl => {}
LocalKind::Init(init) => {
try_visit!(vis.visit_expr(init))
}
LocalKind::InitElse(init, els) => {
try_visit!(vis.visit_expr(init));
try_visit!(vis.visit_block(els));
}
}
if let Some(sp) = colon_sp {
try_visit!(visit_span(vis, sp));
}
visit_span(vis, span)
}
pub fn walk_poly_trait_ref<$($lt,)? V: $Visitor$(<$lt>)?>(
vis: &mut V,
p: &$($lt)? $($mut)? PolyTraitRef,
) -> V::Result {
let PolyTraitRef { bound_generic_params, modifiers, trait_ref, span, parens: _ } = p;
try_visit!(visit_modifiers(vis, modifiers));
try_visit!(visit_generic_params(vis, bound_generic_params));
try_visit!(vis.visit_trait_ref(trait_ref));
visit_span(vis, span)
}
pub fn walk_trait_ref<$($lt,)? V: $Visitor$(<$lt>)?>(
vis: &mut V,
TraitRef { path, ref_id }: &$($lt)? $($mut)? TraitRef,
) -> V::Result {
try_visit!(vis.visit_path(path));
visit_id(vis, ref_id)
}
pub fn walk_variant<$($lt,)? V: $Visitor$(<$lt>)?>(
vis: &mut V,
variant: &$($lt)? $($mut)? Variant,
) -> V::Result {
let Variant { attrs, id, span, vis: visibility, ident, data, disr_expr, is_placeholder: _ } = variant;
try_visit!(visit_id(vis, id));
walk_list!(vis, visit_attribute, attrs);
try_visit!(vis.visit_vis(visibility));
try_visit!(vis.visit_ident(ident));
try_visit!(vis.visit_variant_data(data));
visit_opt!(vis, visit_anon_const, disr_expr);
visit_span(vis, span)
}
pub fn walk_expr_field<$($lt,)? V: $Visitor$(<$lt>)?>(
vis: &mut V,
f: &$($lt)? $($mut)? ExprField,
) -> V::Result {
let ExprField { attrs, id, span, ident, expr, is_shorthand: _, is_placeholder: _ } = f;
try_visit!(visit_id(vis, id));
walk_list!(vis, visit_attribute, attrs);
try_visit!(vis.visit_ident(ident));
try_visit!(vis.visit_expr(expr));
visit_span(vis, span)
}
pub fn walk_pat_field<$($lt,)? V: $Visitor$(<$lt>)?>(
vis: &mut V,
fp: &$($lt)? $($mut)? PatField,
) -> V::Result {
let PatField { ident, pat, is_shorthand: _, attrs, id, span, is_placeholder: _ } = fp;
try_visit!(visit_id(vis, id));
walk_list!(vis, visit_attribute, attrs);
try_visit!(vis.visit_ident(ident));
try_visit!(vis.visit_pat(pat));
visit_span(vis, span)
}
pub fn walk_ty_pat<$($lt,)? V: $Visitor$(<$lt>)?>(
vis: &mut V,
tp: &$($lt)? $($mut)? TyPat,
) -> V::Result {
let TyPat { id, kind, span, tokens: _ } = tp;
try_visit!(visit_id(vis, id));
match kind {
TyPatKind::Range(start, end, Spanned { span, node: _include_end }) => {
visit_opt!(vis, visit_anon_const, start);
visit_opt!(vis, visit_anon_const, end);
try_visit!(visit_span(vis, span));
}
TyPatKind::Or(variants) => walk_list!(vis, visit_ty_pat, variants),
TyPatKind::Err(_) => {}
}
visit_span(vis, span)
}
fn walk_qself<$($lt,)? V: $Visitor$(<$lt>)?>(
vis: &mut V,
qself: &$($lt)? $($mut)? Option<P<QSelf>>,
) -> V::Result {
if let Some(qself) = qself {
let QSelf { ty, path_span, position: _ } = &$($mut)? **qself;
try_visit!(vis.visit_ty(ty));
try_visit!(visit_span(vis, path_span));
}
V::Result::output()
}
pub fn walk_path<$($lt,)? V: $Visitor$(<$lt>)?>(
vis: &mut V,
path: &$($lt)? $($mut)? Path,
) -> V::Result {
let Path { span, segments, tokens: _ } = path;
walk_list!(vis, visit_path_segment, segments);
visit_span(vis, span)
}
pub fn walk_use_tree<$($lt,)? V: $Visitor$(<$lt>)?>(
vis: &mut V,
use_tree: &$($lt)? $($mut)? UseTree,
) -> V::Result {
let UseTree { prefix, kind, span } = use_tree;
try_visit!(vis.visit_path(prefix));
match kind {
UseTreeKind::Simple(rename) => {
// The extra IDs are handled during AST lowering.
visit_opt!(vis, visit_ident, rename);
}
UseTreeKind::Glob => {}
UseTreeKind::Nested { items, span } => {
for (nested_tree, nested_id) in items {
try_visit!(visit_nested_use_tree(vis, nested_tree, nested_id));
}
try_visit!(visit_span(vis, span));
}
}
visit_span(vis, span)
}
pub fn walk_generic_args<$($lt,)? V: $Visitor$(<$lt>)?>(
vis: &mut V,
generic_args: &$($lt)? $($mut)? GenericArgs
) -> V::Result {
match generic_args {
GenericArgs::AngleBracketed(AngleBracketedArgs { span, args }) => {
for arg in args {
match arg {
AngleBracketedArg::Arg(a) => try_visit!(vis.visit_generic_arg(a)),
AngleBracketedArg::Constraint(c) => {
try_visit!(vis.visit_assoc_item_constraint(c))
}
}
}
visit_span(vis, span)
}
GenericArgs::Parenthesized(data) => {
let ParenthesizedArgs { span, inputs, inputs_span, output } = data;
walk_list!(vis, visit_ty, inputs);
try_visit!(vis.visit_fn_ret_ty(output));
try_visit!(visit_span(vis, span));
visit_span(vis, inputs_span)
}
GenericArgs::ParenthesizedElided(span) => visit_span(vis, span)
}
}
pub fn walk_generic_arg<$($lt,)? V: $Visitor$(<$lt>)?>(
vis: &mut V,
generic_arg: &$($lt)? $($mut)? GenericArg,
) -> V::Result {
match generic_arg {
GenericArg::Lifetime(lt) => vis.visit_lifetime(lt, $(${ignore($lt)} LifetimeCtxt::GenericArg)? ),
GenericArg::Type(ty) => vis.visit_ty(ty),
GenericArg::Const(ct) => vis.visit_anon_const(ct),
}
}
pub fn walk_assoc_item_constraint<$($lt,)? V: $Visitor$(<$lt>)?>(
vis: &mut V,
constraint: &$($lt)? $($mut)? AssocItemConstraint,
) -> V::Result {
let AssocItemConstraint { id, ident, gen_args, kind, span } = constraint;
try_visit!(visit_id(vis, id));
try_visit!(vis.visit_ident(ident));
visit_opt!(vis, visit_generic_args, gen_args);
match kind {
AssocItemConstraintKind::Equality { term } => match term {
Term::Ty(ty) => try_visit!(vis.visit_ty(ty)),
Term::Const(c) => try_visit!(vis.visit_anon_const(c)),
},
AssocItemConstraintKind::Bound { bounds } => {
try_visit!(visit_bounds(vis, bounds, BoundKind::Bound));
}
}
visit_span(vis, span)
}
pub fn walk_param_bound<$($lt,)? V: $Visitor$(<$lt>)?>(vis: &mut V, bound: &$($lt)? $($mut)? GenericBound) -> V::Result {
match bound {
GenericBound::Trait(trait_ref) => vis.visit_poly_trait_ref(trait_ref),
GenericBound::Outlives(lifetime) => vis.visit_lifetime(lifetime, $(${ignore($lt)} LifetimeCtxt::Bound)?),
GenericBound::Use(args, span) => {
walk_list!(vis, visit_precise_capturing_arg, args);
visit_span(vis, span)
}
}
}
pub fn walk_precise_capturing_arg<$($lt,)? V: $Visitor$(<$lt>)?>(
vis: &mut V,
arg: &$($lt)? $($mut)? PreciseCapturingArg,
) -> V::Result {
match arg {
PreciseCapturingArg::Lifetime(lt) => vis.visit_lifetime(lt, $(${ignore($lt)} LifetimeCtxt::GenericArg)?),
PreciseCapturingArg::Arg(path, id) => {
try_visit!(visit_id(vis, id));
vis.visit_path(path)
}
}
}
pub fn walk_generic_param<$($lt,)? V: $Visitor$(<$lt>)?>(
vis: &mut V,
param: &$($lt)? $($mut)? GenericParam,
) -> V::Result {
let GenericParam { id, ident, attrs, bounds, is_placeholder: _, kind, colon_span } =
param;
try_visit!(visit_id(vis, id));
walk_list!(vis, visit_attribute, attrs);
try_visit!(vis.visit_ident(ident));
walk_list!(vis, visit_param_bound, bounds, BoundKind::Bound);
match kind {
GenericParamKind::Lifetime => (),
GenericParamKind::Type { default } => visit_opt!(vis, visit_ty, default),
GenericParamKind::Const { ty, default, span } => {
try_visit!(vis.visit_ty(ty));
visit_opt!(vis, visit_anon_const, default);
try_visit!(visit_span(vis, span));
}
}
if let Some(sp) = colon_span {
try_visit!(visit_span(vis, sp))
}
V::Result::output()
}
pub fn walk_generics<$($lt,)? V: $Visitor$(<$lt>)?>(vis: &mut V, generics: &$($lt)? $($mut)? Generics) -> V::Result {
let Generics { params, where_clause, span } = generics;
let WhereClause { has_where_token: _, predicates, span: where_clause_span } = where_clause;
try_visit!(visit_generic_params(vis, params));
try_visit!(visit_where_predicates(vis, predicates));
try_visit!(visit_span(vis, span));
visit_span(vis, where_clause_span)
}
pub fn walk_contract<$($lt,)? V: $Visitor$(<$lt>)?>(vis: &mut V, c: &$($lt)? $($mut)? FnContract) -> V::Result {
let FnContract { requires, ensures } = c;
visit_opt!(vis, visit_expr, requires);
visit_opt!(vis, visit_expr, ensures);
V::Result::output()
}
pub fn walk_where_predicate<$($lt,)? V: $Visitor$(<$lt>)?>(
vis: &mut V,
predicate: &$($lt)? $($mut)? WherePredicate,
) -> V::Result {
let WherePredicate { attrs, kind, id, span, is_placeholder: _ } = predicate;
try_visit!(visit_id(vis, id));
walk_list!(vis, visit_attribute, attrs);
try_visit!(visit_span(vis, span));
vis.visit_where_predicate_kind(kind)
}
pub fn walk_closure_binder<$($lt,)? V: $Visitor$(<$lt>)?>(
vis: &mut V,
binder: &$($lt)? $($mut)? ClosureBinder,
) -> V::Result {
match binder {
ClosureBinder::NotPresent => {}
ClosureBinder::For { generic_params, span } => {
try_visit!(visit_generic_params(vis, generic_params));
try_visit!(visit_span(vis, span));
}
}
V::Result::output()
}
pub fn walk_where_predicate_kind<$($lt,)? V: $Visitor$(<$lt>)?>(
vis: &mut V,
kind: &$($lt)? $($mut)? WherePredicateKind,
) -> V::Result {
match kind {
WherePredicateKind::BoundPredicate(WhereBoundPredicate {
bounded_ty,
bounds,
bound_generic_params,
}) => {
try_visit!(visit_generic_params(vis, bound_generic_params));
try_visit!(vis.visit_ty(bounded_ty));
walk_list!(vis, visit_param_bound, bounds, BoundKind::Bound);
}
WherePredicateKind::RegionPredicate(WhereRegionPredicate { lifetime, bounds }) => {
try_visit!(vis.visit_lifetime(lifetime, $(${ignore($lt)} LifetimeCtxt::Bound )?));
walk_list!(vis, visit_param_bound, bounds, BoundKind::Bound);
}
WherePredicateKind::EqPredicate(WhereEqPredicate { lhs_ty, rhs_ty }) => {
try_visit!(vis.visit_ty(lhs_ty));
try_visit!(vis.visit_ty(rhs_ty));
}
}
V::Result::output()
}
pub fn walk_fn_decl<$($lt,)? V: $Visitor$(<$lt>)?>(
vis: &mut V,
FnDecl { inputs, output }: &$($lt)? $($mut)? FnDecl,
) -> V::Result {
try_visit!(visit_params(vis, inputs));
vis.visit_fn_ret_ty(output)
}
pub fn walk_fn_ret_ty<$($lt,)? V: $Visitor$(<$lt>)?>(vis: &mut V, ret_ty: &$($lt)? $($mut)? FnRetTy) -> V::Result {
match ret_ty {
FnRetTy::Default(span) => visit_span(vis, span),
FnRetTy::Ty(output_ty) => vis.visit_ty(output_ty),
V::Result::output()
}
}
@@ -1450,455 +913,200 @@ pub fn walk_fn<$($lt,)? V: $Visitor$(<$lt>)?>(vis: &mut V, kind: FnKind<$($lt)?
match kind {
FnKind::Fn(
_ctxt,
_vis,
Fn {
defaultness,
ident,
sig: FnSig { header, decl, span },
generics,
contract,
body,
define_opaque,
},
) => {
// Visibility is visited as a part of the item.
try_visit!(visit_defaultness(vis, defaultness));
try_visit!(vis.visit_ident(ident));
try_visit!(vis.visit_fn_header(header));
try_visit!(vis.visit_generics(generics));
try_visit!(vis.visit_fn_decl(decl));
visit_opt!(vis, visit_contract, contract);
visit_opt!(vis, visit_block, body);
try_visit!(visit_span(vis, span));
walk_define_opaques(vis, define_opaque)
_vis,
Fn { defaultness, ident, sig, generics, contract, body, define_opaque },
) => {
let FnSig { header, decl, span } = sig;
visit_visitable!($($mut)? vis,
defaultness, ident, header, generics, decl,
contract, body, span, define_opaque
)
}
FnKind::Closure(binder, coroutine_kind, decl, body) => {
try_visit!(vis.visit_closure_binder(binder));
visit_opt!(vis, visit_coroutine_kind, coroutine_kind);
try_visit!(vis.visit_fn_decl(decl));
vis.visit_expr(body)
}
}
}
pub fn walk_variant_data<$($lt,)? V: $Visitor$(<$lt>)?>(vis: &mut V, data: &$($lt)? $($mut)? VariantData) -> V::Result {
match data {
VariantData::Struct { fields, recovered: _ } => {
visit_field_defs(vis, fields)
}
VariantData::Tuple(fields, id) => {
try_visit!(visit_id(vis, id));
visit_field_defs(vis, fields)
}
VariantData::Unit(id) => visit_id(vis, id),
}
}
pub fn walk_field_def<$($lt,)? V: $Visitor$(<$lt>)?>(vis: &mut V, field: &$($lt)? $($mut)? FieldDef) -> V::Result {
let FieldDef { attrs, id, span, vis: visibility, ident, ty, is_placeholder: _, safety: _, default } =
field;
try_visit!(visit_id(vis, id));
walk_list!(vis, visit_attribute, attrs);
try_visit!(vis.visit_vis(visibility));
visit_opt!(vis, visit_ident, ident);
try_visit!(vis.visit_ty(ty));
visit_opt!(vis, visit_anon_const, default);
visit_span(vis, span)
}
fn visit_delim_args<$($lt,)? V: $Visitor$(<$lt>)?>(vis: &mut V, args: &$($lt)? $($mut)? DelimArgs) -> V::Result {
let DelimArgs { dspan, delim: _, tokens: _ } = args;
let DelimSpan { open, close } = dspan;
try_visit!(visit_span(vis, open));
visit_span(vis, close)
}
pub fn walk_mac<$($lt,)? V: $Visitor$(<$lt>)?>(vis: &mut V, mac: &$($lt)? $($mut)? MacCall) -> V::Result {
let MacCall { path, args } = mac;
try_visit!(vis.visit_path(path));
visit_delim_args(vis, args)
}
fn walk_macro_def<$($lt,)? V: $Visitor$(<$lt>)?>(vis: &mut V, macro_def: &$($lt)? $($mut)? MacroDef) -> V::Result {
let MacroDef { body, macro_rules: _ } = macro_def;
visit_delim_args(vis, body)
}
pub fn walk_inline_asm<$($lt,)? V: $Visitor$(<$lt>)?>(vis: &mut V, asm: &$($lt)? $($mut)? InlineAsm) -> V::Result {
let InlineAsm {
asm_macro: _,
template,
template_strs,
operands,
clobber_abis,
options: _,
line_spans,
} = asm;
for piece in template {
match piece {
InlineAsmTemplatePiece::String(_str) => {}
InlineAsmTemplatePiece::Placeholder { operand_idx: _, modifier: _, span } => {
try_visit!(visit_span(vis, span));
}
}
}
for (_s1, _s2, span) in template_strs {
try_visit!(visit_span(vis, span));
}
for (op, span) in operands {
match op {
InlineAsmOperand::In { expr, reg: _ }
| InlineAsmOperand::Out { expr: Some(expr), reg: _, late: _ }
| InlineAsmOperand::InOut { expr, reg: _, late: _ } => {
try_visit!(vis.visit_expr(expr))
}
InlineAsmOperand::Out { expr: None, reg: _, late: _ } => {}
InlineAsmOperand::SplitInOut { in_expr, out_expr, reg: _, late: _ } => {
try_visit!(vis.visit_expr(in_expr));
visit_opt!(vis, visit_expr, out_expr);
}
InlineAsmOperand::Const { anon_const } => {
try_visit!(vis.visit_anon_const(anon_const))
}
InlineAsmOperand::Sym { sym } => try_visit!(vis.visit_inline_asm_sym(sym)),
InlineAsmOperand::Label { block } => try_visit!(vis.visit_block(block)),
}
try_visit!(visit_span(vis, span));
}
for (_s1, span) in clobber_abis {
try_visit!(visit_span(vis, span))
}
for span in line_spans {
try_visit!(visit_span(vis, span))
FnKind::Closure(binder, coroutine_kind, decl, body) =>
visit_visitable!($($mut)? vis, binder, coroutine_kind, decl, body),
}
V::Result::output()
}
pub fn walk_inline_asm_sym<$($lt,)? V: $Visitor$(<$lt>)?>(
vis: &mut V,
InlineAsmSym { id, qself, path }: &$($lt)? $($mut)? InlineAsmSym,
) -> V::Result {
try_visit!(visit_id(vis, id));
try_visit!(vis.visit_qself(qself));
vis.visit_path(path)
}
pub fn walk_format_args<$($lt,)? V: $Visitor$(<$lt>)?>(vis: &mut V, fmt: &$($lt)? $($mut)? FormatArgs) -> V::Result {
let FormatArgs { span, template, arguments, uncooked_fmt_str: _, is_source_literal: _ } = fmt;
let args = $(${ignore($mut)} arguments.all_args_mut())? $(${ignore($lt)} arguments.all_args())? ;
for FormatArgument { kind, expr } in args {
match kind {
FormatArgumentKind::Named(ident) | FormatArgumentKind::Captured(ident) => {
try_visit!(vis.visit_ident(ident))
}
FormatArgumentKind::Normal => {}
}
try_visit!(vis.visit_expr(expr));
}
for piece in template {
match piece {
FormatArgsPiece::Literal(_symbol) => {}
FormatArgsPiece::Placeholder(placeholder) => try_visit!(walk_format_placeholder(vis, placeholder)),
}
}
visit_span(vis, span)
}
fn walk_format_placeholder<$($lt,)? V: $Visitor$(<$lt>)?>(
vis: &mut V,
placeholder: &$($lt)? $($mut)? FormatPlaceholder,
) -> V::Result {
let FormatPlaceholder { argument, span, format_options, format_trait: _ } = placeholder;
if let Some(span) = span {
try_visit!(visit_span(vis, span));
}
let FormatArgPosition { span, index: _, kind: _ } = argument;
if let Some(span) = span {
try_visit!(visit_span(vis, span));
}
let FormatOptions {
width,
precision,
alignment: _,
fill: _,
sign: _,
alternate: _,
zero_pad: _,
debug_hex: _,
} = format_options;
match width {
None => {}
Some(FormatCount::Literal(_)) => {}
Some(FormatCount::Argument(FormatArgPosition { span, index: _, kind: _ })) => {
if let Some(span) = span {
try_visit!(visit_span(vis, span));
}
}
}
match precision {
None => {}
Some(FormatCount::Literal(_)) => {}
Some(FormatCount::Argument(FormatArgPosition { span, index: _, kind: _ })) => {
if let Some(span) = span {
try_visit!(visit_span(vis, span));
}
}
}
impl_walkable!(|&$($mut)? $($lt)? self: Impl, vis: &mut V| {
let Impl { defaultness, safety, generics, constness, polarity, of_trait, self_ty, items } = self;
visit_visitable!($($mut)? vis, defaultness, safety, generics, constness, polarity, of_trait, self_ty);
visit_visitable_with!($($mut)? vis, items, AssocCtxt::Impl { of_trait: of_trait.is_some() });
V::Result::output()
}
});
pub fn walk_expr<$($lt,)? V: $Visitor$(<$lt>)?>(vis: &mut V, expression: &$($lt)? $($mut)? Expr) -> V::Result {
let Expr { id, kind, span, attrs, tokens: _ } = expression;
try_visit!(visit_id(vis, id));
walk_list!(vis, visit_attribute, attrs);
// Special case to call `visit_method_receiver_expr`.
impl_walkable!(|&$($mut)? $($lt)? self: MethodCall, vis: &mut V| {
let MethodCall { seg, receiver, args, span } = self;
try_visit!(vis.visit_method_receiver_expr(receiver));
visit_visitable!($($mut)? vis, seg, args, span);
V::Result::output()
});
impl_walkable!(|&$($mut)? $($lt)? self: Expr, vis: &mut V| {
let Expr { id, kind, span, attrs, tokens: _ } = self;
visit_visitable!($($mut)? vis, id, attrs);
match kind {
ExprKind::Array(exprs) => {
try_visit!(visit_exprs(vis, exprs));
}
ExprKind::ConstBlock(anon_const) => try_visit!(vis.visit_anon_const(anon_const)),
ExprKind::Repeat(element, count) => {
try_visit!(vis.visit_expr(element));
try_visit!(vis.visit_anon_const(count));
}
ExprKind::Struct(se) => {
let StructExpr { qself, path, fields, rest } = &$($mut)?**se;
try_visit!(vis.visit_qself(qself));
try_visit!(vis.visit_path(path));
try_visit!(visit_expr_fields(vis, fields));
match rest {
StructRest::Base(expr) => try_visit!(vis.visit_expr(expr)),
StructRest::Rest(span) => try_visit!(visit_span(vis, span)),
StructRest::None => {}
}
}
ExprKind::Tup(exprs) => {
try_visit!(visit_exprs(vis, exprs));
}
ExprKind::Call(callee_expression, arguments) => {
try_visit!(vis.visit_expr(callee_expression));
try_visit!(visit_exprs(vis, arguments));
}
ExprKind::MethodCall(box MethodCall { seg, receiver, args, span }) => {
try_visit!(vis.visit_method_receiver_expr(receiver));
try_visit!(vis.visit_path_segment(seg));
try_visit!(visit_exprs(vis, args));
try_visit!(visit_span(vis, span));
}
ExprKind::Binary(Spanned { span, node: _ }, left_expression, right_expression) => {
try_visit!(vis.visit_expr(left_expression));
try_visit!(vis.visit_expr(right_expression));
try_visit!(visit_span(vis, span))
}
ExprKind::AddrOf(_kind, _mutbl, subexpression) => {
try_visit!(vis.visit_expr(subexpression));
}
ExprKind::Unary(_op, subexpression) => {
try_visit!(vis.visit_expr(subexpression));
}
ExprKind::Cast(subexpression, typ) | ExprKind::Type(subexpression, typ) => {
try_visit!(vis.visit_expr(subexpression));
try_visit!(vis.visit_ty(typ));
}
ExprKind::Let(pat, expr, span, _recovered) => {
try_visit!(vis.visit_pat(pat));
try_visit!(vis.visit_expr(expr));
try_visit!(visit_span(vis, span))
}
ExprKind::If(head_expression, if_block, optional_else) => {
try_visit!(vis.visit_expr(head_expression));
try_visit!(vis.visit_block(if_block));
visit_opt!(vis, visit_expr, optional_else);
}
ExprKind::While(subexpression, block, opt_label) => {
visit_opt!(vis, visit_label, opt_label);
try_visit!(vis.visit_expr(subexpression));
try_visit!(vis.visit_block(block));
}
ExprKind::ForLoop { pat, iter, body, label, kind: _ } => {
visit_opt!(vis, visit_label, label);
try_visit!(vis.visit_pat(pat));
try_visit!(vis.visit_expr(iter));
try_visit!(vis.visit_block(body));
}
ExprKind::Loop(block, opt_label, span) => {
visit_opt!(vis, visit_label, opt_label);
try_visit!(vis.visit_block(block));
try_visit!(visit_span(vis, span))
}
ExprKind::Match(subexpression, arms, _kind) => {
try_visit!(vis.visit_expr(subexpression));
try_visit!(visit_arms(vis, arms));
}
ExprKind::Array(exprs) =>
visit_visitable!($($mut)? vis, exprs),
ExprKind::ConstBlock(anon_const) =>
visit_visitable!($($mut)? vis, anon_const),
ExprKind::Repeat(element, count) =>
visit_visitable!($($mut)? vis, element, count),
ExprKind::Struct(se) =>
visit_visitable!($($mut)? vis, se),
ExprKind::Tup(exprs) =>
visit_visitable!($($mut)? vis, exprs),
ExprKind::Call(callee_expression, arguments) =>
visit_visitable!($($mut)? vis, callee_expression, arguments),
ExprKind::MethodCall(mc) =>
visit_visitable!($($mut)? vis, mc),
ExprKind::Binary(op, lhs, rhs) =>
visit_visitable!($($mut)? vis, op, lhs, rhs),
ExprKind::AddrOf(kind, mutbl, subexpression) =>
visit_visitable!($($mut)? vis, kind, mutbl, subexpression),
ExprKind::Unary(op, subexpression) =>
visit_visitable!($($mut)? vis, op, subexpression),
ExprKind::Cast(subexpression, typ) | ExprKind::Type(subexpression, typ) =>
visit_visitable!($($mut)? vis, subexpression, typ),
ExprKind::Let(pat, expr, span, _recovered) =>
visit_visitable!($($mut)? vis, pat, expr, span),
ExprKind::If(head_expression, if_block, optional_else) =>
visit_visitable!($($mut)? vis, head_expression, if_block, optional_else),
ExprKind::While(subexpression, block, opt_label) =>
visit_visitable!($($mut)? vis, subexpression, block, opt_label),
ExprKind::ForLoop { pat, iter, body, label, kind } =>
visit_visitable!($($mut)? vis, pat, iter, body, label, kind),
ExprKind::Loop(block, opt_label, span) =>
visit_visitable!($($mut)? vis, block, opt_label, span),
ExprKind::Match(subexpression, arms, kind) =>
visit_visitable!($($mut)? vis, subexpression, arms, kind),
ExprKind::Closure(box Closure {
binder,
capture_clause,
coroutine_kind,
constness,
movability: _,
movability,
fn_decl,
body,
fn_decl_span,
fn_arg_span,
}) => {
try_visit!(visit_constness(vis, constness));
try_visit!(vis.visit_capture_by(capture_clause));
try_visit!(vis.visit_fn(
FnKind::Closure(binder, coroutine_kind, fn_decl, body),
*span,
*id
));
try_visit!(visit_span(vis, fn_decl_span));
try_visit!(visit_span(vis, fn_arg_span));
}
ExprKind::Block(block, opt_label) => {
visit_opt!(vis, visit_label, opt_label);
try_visit!(vis.visit_block(block));
}
ExprKind::Gen(capture_clause, body, _kind, decl_span) => {
try_visit!(vis.visit_capture_by(capture_clause));
try_visit!(vis.visit_block(body));
try_visit!(visit_span(vis, decl_span));
}
ExprKind::Await(expr, span) => {
try_visit!(vis.visit_expr(expr));
try_visit!(visit_span(vis, span));
}
ExprKind::Use(expr, span) => {
try_visit!(vis.visit_expr(expr));
try_visit!(visit_span(vis, span));
}
ExprKind::Assign(lhs, rhs, span) => {
try_visit!(vis.visit_expr(lhs));
try_visit!(vis.visit_expr(rhs));
try_visit!(visit_span(vis, span));
}
ExprKind::AssignOp(Spanned { span, node: _ }, left_expression, right_expression) => {
try_visit!(vis.visit_expr(left_expression));
try_visit!(vis.visit_expr(right_expression));
try_visit!(visit_span(vis, span));
}
ExprKind::Field(subexpression, ident) => {
try_visit!(vis.visit_expr(subexpression));
try_visit!(vis.visit_ident(ident));
}
ExprKind::Index(main_expression, index_expression, span) => {
try_visit!(vis.visit_expr(main_expression));
try_visit!(vis.visit_expr(index_expression));
try_visit!(visit_span(vis, span));
}
ExprKind::Range(start, end, _limit) => {
visit_opt!(vis, visit_expr, start);
visit_opt!(vis, visit_expr, end);
visit_visitable!($($mut)? vis, constness, movability, capture_clause);
let kind = FnKind::Closure(binder, coroutine_kind, fn_decl, body);
try_visit!(vis.visit_fn(kind, *span, *id));
visit_visitable!($($mut)? vis, fn_decl_span, fn_arg_span);
}
ExprKind::Block(block, opt_label) =>
visit_visitable!($($mut)? vis, block, opt_label),
ExprKind::Gen(capt, body, kind, decl_span) =>
visit_visitable!($($mut)? vis, capt, body, kind, decl_span),
ExprKind::Await(expr, span) | ExprKind::Use(expr, span) =>
visit_visitable!($($mut)? vis, expr, span),
ExprKind::Assign(lhs, rhs, span) =>
visit_visitable!($($mut)? vis, lhs, rhs, span),
ExprKind::AssignOp(op, lhs, rhs) =>
visit_visitable!($($mut)? vis, op, lhs, rhs),
ExprKind::Field(subexpression, ident) =>
visit_visitable!($($mut)? vis, subexpression, ident),
ExprKind::Index(main_expression, index_expression, span) =>
visit_visitable!($($mut)? vis, main_expression, index_expression, span),
ExprKind::Range(start, end, limit) =>
visit_visitable!($($mut)? vis, start, end, limit),
ExprKind::Underscore => {}
ExprKind::Path(maybe_qself, path) => {
try_visit!(vis.visit_qself(maybe_qself));
try_visit!(vis.visit_path(path));
}
ExprKind::Break(opt_label, opt_expr) => {
visit_opt!(vis, visit_label, opt_label);
visit_opt!(vis, visit_expr, opt_expr);
}
ExprKind::Continue(opt_label) => {
visit_opt!(vis, visit_label, opt_label);
}
ExprKind::Ret(optional_expression) => {
visit_opt!(vis, visit_expr, optional_expression);
}
ExprKind::Yeet(optional_expression) => {
visit_opt!(vis, visit_expr, optional_expression);
}
ExprKind::Become(expr) => try_visit!(vis.visit_expr(expr)),
ExprKind::MacCall(mac) => try_visit!(vis.visit_mac_call(mac)),
ExprKind::Paren(subexpression) => try_visit!(vis.visit_expr(subexpression)),
ExprKind::InlineAsm(asm) => try_visit!(vis.visit_inline_asm(asm)),
ExprKind::FormatArgs(f) => try_visit!(vis.visit_format_args(f)),
ExprKind::OffsetOf(container, fields) => {
try_visit!(vis.visit_ty(container));
walk_list!(vis, visit_ident, fields);
}
ExprKind::Yield(kind) => {
match kind {
YieldKind::Postfix(expr) => {
try_visit!(vis.visit_expr(expr));
}
YieldKind::Prefix(expr) => {
visit_opt!(vis, visit_expr, expr);
}
}
}
ExprKind::Try(subexpression) => try_visit!(vis.visit_expr(subexpression)),
ExprKind::TryBlock(body) => try_visit!(vis.visit_block(body)),
ExprKind::Lit(_token) => {}
ExprKind::IncludedBytes(_bytes) => {}
ExprKind::UnsafeBinderCast(_kind, expr, ty) => {
try_visit!(vis.visit_expr(expr));
visit_opt!(vis, visit_ty, ty);
}
ExprKind::Path(maybe_qself, path) =>
visit_visitable!($($mut)? vis, maybe_qself, path),
ExprKind::Break(opt_label, opt_expr) =>
visit_visitable!($($mut)? vis, opt_label, opt_expr),
ExprKind::Continue(opt_label) =>
visit_visitable!($($mut)? vis, opt_label),
ExprKind::Ret(optional_expression) | ExprKind::Yeet(optional_expression) =>
visit_visitable!($($mut)? vis, optional_expression),
ExprKind::Become(expr) =>
visit_visitable!($($mut)? vis, expr),
ExprKind::MacCall(mac) =>
visit_visitable!($($mut)? vis, mac),
ExprKind::Paren(subexpression) =>
visit_visitable!($($mut)? vis, subexpression),
ExprKind::InlineAsm(asm) =>
visit_visitable!($($mut)? vis, asm),
ExprKind::FormatArgs(f) =>
visit_visitable!($($mut)? vis, f),
ExprKind::OffsetOf(container, fields) =>
visit_visitable!($($mut)? vis, container, fields),
ExprKind::Yield(kind) =>
visit_visitable!($($mut)? vis, kind),
ExprKind::Try(subexpression) =>
visit_visitable!($($mut)? vis, subexpression),
ExprKind::TryBlock(body) =>
visit_visitable!($($mut)? vis, body),
ExprKind::Lit(token) =>
visit_visitable!($($mut)? vis, token),
ExprKind::IncludedBytes(bytes) =>
visit_visitable!($($mut)? vis, bytes),
ExprKind::UnsafeBinderCast(kind, expr, ty) =>
visit_visitable!($($mut)? vis, kind, expr, ty),
ExprKind::Err(_guar) => {}
ExprKind::Dummy => {}
}
visit_span(vis, span)
}
});
pub fn walk_param<$($lt,)? V: $Visitor$(<$lt>)?>(vis: &mut V, param: &$($lt)? $($mut)? Param) -> V::Result {
let Param { attrs, ty, pat, id, span, is_placeholder: _ } = param;
try_visit!(visit_id(vis, id));
walk_list!(vis, visit_attribute, attrs);
try_visit!(vis.visit_pat(pat));
try_visit!(vis.visit_ty(ty));
visit_span(vis, span)
}
pub fn walk_arm<$($lt,)? V: $Visitor$(<$lt>)?>(vis: &mut V, arm: &$($lt)? $($mut)? Arm) -> V::Result {
let Arm { attrs, pat, guard, body, span, id, is_placeholder: _ } = arm;
try_visit!(visit_id(vis, id));
walk_list!(vis, visit_attribute, attrs);
try_visit!(vis.visit_pat(pat));
visit_opt!(vis, visit_expr, guard);
visit_opt!(vis, visit_expr, body);
visit_span(vis, span)
}
pub fn walk_vis<$($lt,)? V: $Visitor$(<$lt>)?>(vis: &mut V, visibility: &$($lt)? $($mut)? Visibility) -> V::Result {
let Visibility { kind, span, tokens: _ } = visibility;
match kind {
VisibilityKind::Restricted { path, id, shorthand: _ } => {
try_visit!(visit_id(vis, id));
try_visit!(vis.visit_path(path));
}
VisibilityKind::Public | VisibilityKind::Inherited => {}
}
visit_span(vis, span)
}
pub fn walk_attribute<$($lt,)? V: $Visitor$(<$lt>)?>(vis: &mut V, attr: &$($lt)? $($mut)? Attribute) -> V::Result {
let Attribute { kind, id: _, style: _, span } = attr;
match kind {
AttrKind::Normal(normal) => {
let NormalAttr { item, tokens: _ } = &$($mut)?**normal;
let AttrItem { unsafety: _, path, args, tokens: _ } = item;
try_visit!(vis.visit_path(path));
try_visit!(walk_attr_args(vis, args));
}
AttrKind::DocComment(_kind, _sym) => {}
}
visit_span(vis, span)
}
pub fn walk_attr_args<$($lt,)? V: $Visitor$(<$lt>)?>(vis: &mut V, args: &$($lt)? $($mut)? AttrArgs) -> V::Result {
match args {
AttrArgs::Empty => {}
AttrArgs::Delimited(args) => try_visit!(visit_delim_args(vis, args)),
AttrArgs::Eq { eq_span, expr } => {
try_visit!(vis.visit_expr(expr));
try_visit!(visit_span(vis, eq_span));
}
}
V::Result::output()
}
define_named_walk!($(($mut))? $Visitor$(<$lt>)?
pub fn walk_anon_const(AnonConst);
pub fn walk_arm(Arm);
//pub fn walk_assoc_item(AssocItem, _ctxt: AssocCtxt);
pub fn walk_assoc_item_constraint(AssocItemConstraint);
pub fn walk_attribute(Attribute);
pub fn walk_block(Block);
//pub fn walk_nested_use_tree((UseTree, NodeId));
pub fn walk_capture_by(CaptureBy);
pub fn walk_closure_binder(ClosureBinder);
pub fn walk_contract(FnContract);
pub fn walk_coroutine_kind(CoroutineKind);
pub fn walk_crate(Crate);
pub fn walk_expr(Expr);
pub fn walk_expr_field(ExprField);
pub fn walk_field_def(FieldDef);
pub fn walk_fn_decl(FnDecl);
pub fn walk_fn_header(FnHeader);
pub fn walk_fn_ret_ty(FnRetTy);
//pub fn walk_foreign_item(ForeignItem);
pub fn walk_foreign_mod(ForeignMod);
pub fn walk_format_args(FormatArgs);
pub fn walk_generic_arg(GenericArg);
pub fn walk_generic_args(GenericArgs);
pub fn walk_generic_param(GenericParam);
pub fn walk_generics(Generics);
pub fn walk_inline_asm(InlineAsm);
pub fn walk_inline_asm_sym(InlineAsmSym);
//pub fn walk_item(Item);
pub fn walk_label(Label);
pub fn walk_lifetime(Lifetime);
pub fn walk_local(Local);
pub fn walk_mac(MacCall);
pub fn walk_macro_def(MacroDef);
pub fn walk_param_bound(GenericBound);
pub fn walk_param(Param);
pub fn walk_pat_field(PatField);
pub fn walk_path(Path);
pub fn walk_path_segment(PathSegment);
pub fn walk_pat(Pat);
pub fn walk_poly_trait_ref(PolyTraitRef);
pub fn walk_precise_capturing_arg(PreciseCapturingArg);
pub fn walk_qself(QSelf);
pub fn walk_trait_ref(TraitRef);
pub fn walk_ty_pat(TyPat);
pub fn walk_ty(Ty);
pub fn walk_use_tree(UseTree);
pub fn walk_variant_data(VariantData);
pub fn walk_variant(Variant);
pub fn walk_vis(Visibility);
pub fn walk_where_predicate_kind(WherePredicateKind);
pub fn walk_where_predicate(WherePredicate);
);
};
}
@@ -1907,6 +1115,20 @@ pub fn walk_attr_args<$($lt,)? V: $Visitor$(<$lt>)?>(vis: &mut V, args: &$($lt)?
macro_rules! generate_list_visit_fns {
($($name:ident, $Ty:ty, $visit_fn:ident$(, $param:ident: $ParamTy:ty)*;)+) => {
$(
#[allow(unused_parens)]
impl<'a, V: Visitor<'a>> Visitable<'a, V> for ThinVec<$Ty> {
type Extra = ($($ParamTy),*);
#[inline]
fn visit(
&'a self,
visitor: &mut V,
($($param),*): Self::Extra,
) -> V::Result {
$name(visitor, self $(, $param)*)
}
}
fn $name<'a, V: Visitor<'a>>(
vis: &mut V,
values: &'a ThinVec<$Ty>,
@@ -1937,18 +1159,9 @@ fn $name<'a, V: Visitor<'a>>(
visit_arms, Arm, visit_arm;
}
#[expect(rustc::pass_by_value)] // needed for symmetry with mut_visit
fn visit_nested_use_tree<'a, V: Visitor<'a>>(
vis: &mut V,
nested_tree: &'a UseTree,
&nested_id: &NodeId,
) -> V::Result {
vis.visit_nested_use_tree(nested_tree, nested_id)
}
pub fn walk_stmt<'a, V: Visitor<'a>>(visitor: &mut V, statement: &'a Stmt) -> V::Result {
let Stmt { id, kind, span: _ } = statement;
try_visit!(visit_id(visitor, id));
try_visit!(visitor.visit_id(*id));
match kind {
StmtKind::Let(local) => try_visit!(visitor.visit_local(local)),
StmtKind::Item(item) => try_visit!(visitor.visit_item(item)),
+18 -11
View File
@@ -874,25 +874,32 @@ fn lifetime_res_to_generic_param(
/// name resolver owing to lifetime elision; this also populates the resolver's node-id->def-id
/// map, so that later calls to `opt_node_id_to_def_id` that refer to these extra lifetime
/// parameters will be successful.
#[instrument(level = "debug", skip(self))]
#[instrument(level = "debug", skip(self), ret)]
#[inline]
fn lower_lifetime_binder(
&mut self,
binder: NodeId,
generic_params: &[GenericParam],
) -> &'hir [hir::GenericParam<'hir>] {
let mut generic_params: Vec<_> = self
.lower_generic_params_mut(generic_params, hir::GenericParamSource::Binder)
.collect();
// Start by creating params for extra lifetimes params, as this creates the definitions
// that may be referred to by the AST inside `generic_params`.
let extra_lifetimes = self.resolver.extra_lifetime_params(binder);
debug!(?extra_lifetimes);
generic_params.extend(extra_lifetimes.into_iter().filter_map(|(ident, node_id, res)| {
self.lifetime_res_to_generic_param(ident, node_id, res, hir::GenericParamSource::Binder)
}));
let generic_params = self.arena.alloc_from_iter(generic_params);
debug!(?generic_params);
generic_params
let extra_lifetimes: Vec<_> = extra_lifetimes
.into_iter()
.filter_map(|(ident, node_id, res)| {
self.lifetime_res_to_generic_param(
ident,
node_id,
res,
hir::GenericParamSource::Binder,
)
})
.collect();
let arena = self.arena;
let explicit_generic_params =
self.lower_generic_params_mut(generic_params, hir::GenericParamSource::Binder);
arena.alloc_from_iter(explicit_generic_params.chain(extra_lifetimes.into_iter()))
}
fn with_dyn_type_scope<T>(&mut self, in_scope: bool, f: impl FnOnce(&mut Self) -> T) -> T {
@@ -157,6 +157,19 @@ pub enum UsedBy {
Linker,
}
#[derive(Encodable, Decodable, Clone, Debug, PartialEq, Eq, Hash)]
#[derive(HashStable_Generic, PrintAttribute)]
pub enum MacroUseArgs {
UseAll,
UseSpecific(ThinVec<Ident>),
}
impl Default for MacroUseArgs {
fn default() -> Self {
Self::UseSpecific(ThinVec::new())
}
}
#[derive(Debug, Clone, Encodable, Decodable, HashStable_Generic)]
pub struct StrippedCfgItem<ModId = DefId> {
pub parent_module: ModId,
@@ -234,6 +247,7 @@ pub enum CfgEntry {
pub enum AttributeKind {
// tidy-alphabetical-start
/// Represents `#[align(N)]`.
// FIXME(#82232, #143834): temporarily renamed to mitigate `#[align]` nameres ambiguity
Align { align: Align, span: Span },
/// Represents `#[rustc_allow_const_fn_unstable]`.
@@ -350,9 +364,15 @@ pub enum AttributeKind {
/// Represents `#[loop_match]`.
LoopMatch(Span),
/// Represents `#[macro_escape]`.
MacroEscape(Span),
/// Represents `#[rustc_macro_transparency]`.
MacroTransparency(Transparency),
/// Represents `#[macro_use]`.
MacroUse { span: Span, arguments: MacroUseArgs },
/// Represents `#[marker]`.
Marker(Span),
@@ -396,12 +416,24 @@ pub enum AttributeKind {
/// Represents `#[pointee]`
Pointee(Span),
/// Represents `#[proc_macro]`
ProcMacro(Span),
/// Represents `#[proc_macro_attribute]`
ProcMacroAttribute(Span),
/// Represents `#[proc_macro_derive]`
ProcMacroDerive { trait_name: Symbol, helper_attrs: ThinVec<Symbol>, span: Span },
/// Represents `#[rustc_pub_transparent]` (used by the `repr_transparent_external_private_fields` lint).
PubTransparent(Span),
/// Represents [`#[repr]`](https://doc.rust-lang.org/stable/reference/type-layout.html#representations).
Repr { reprs: ThinVec<(ReprAttr, Span)>, first_span: Span },
/// Represents `#[rustc_builtin_macro]`.
RustcBuiltinMacro { builtin_name: Option<Symbol>, helper_attrs: ThinVec<Symbol>, span: Span },
/// Represents `#[rustc_layout_scalar_valid_range_end]`.
RustcLayoutScalarValidRangeEnd(Box<u128>, Span),
@@ -45,7 +45,9 @@ pub fn encode_cross_crate(&self) -> EncodeCrossCrate {
LinkOrdinal { .. } => No,
LinkSection { .. } => Yes, // Needed for rustdoc
LoopMatch(..) => No,
MacroEscape(..) => No,
MacroTransparency(..) => Yes,
MacroUse { .. } => No,
Marker(..) => No,
MayDangle(..) => No,
MustUse { .. } => Yes,
@@ -59,8 +61,12 @@ pub fn encode_cross_crate(&self) -> EncodeCrossCrate {
PassByValue(..) => Yes,
Path(..) => No,
Pointee(..) => No,
ProcMacro(..) => No,
ProcMacroAttribute(..) => No,
ProcMacroDerive { .. } => No,
PubTransparent(..) => Yes,
Repr { .. } => No,
RustcBuiltinMacro { .. } => Yes,
RustcLayoutScalarValidRangeEnd(..) => Yes,
RustcLayoutScalarValidRangeStart(..) => Yes,
RustcObjectLifetimeDefault => No,
@@ -24,7 +24,7 @@
use rustc_ast::{AttrStyle, IntTy, UintTy};
use rustc_ast_pretty::pp::Printer;
use rustc_span::hygiene::Transparency;
use rustc_span::{ErrorGuaranteed, Span, Symbol};
use rustc_span::{ErrorGuaranteed, Ident, Span, Symbol};
pub use stability::*;
use thin_vec::ThinVec;
pub use version::*;
@@ -172,7 +172,7 @@ fn print_attribute(&self, p: &mut Printer) {
print_tup!(A B C D E F G H);
print_skip!(Span, (), ErrorGuaranteed);
print_disp!(u16, bool, NonZero<u32>);
print_debug!(Symbol, UintTy, IntTy, Align, AttrStyle, CommentKind, Transparency);
print_debug!(Symbol, Ident, UintTy, IntTy, Align, AttrStyle, CommentKind, Transparency);
/// Finds attributes in sequences of attributes by pattern matching.
///
@@ -177,7 +177,8 @@ fn finalize(self, cx: &FinalizeContext<'_, '_, S>) -> Option<AttributeKind> {
sym::instruction_set,
sym::repr,
sym::rustc_std_internal_symbol,
sym::align,
// FIXME(#82232, #143834): temporarily renamed to mitigate `#[align]` nameres ambiguity
sym::rustc_align,
// obviously compatible with self
sym::naked,
// documentation
@@ -0,0 +1,115 @@
use rustc_attr_data_structures::{AttributeKind, MacroUseArgs};
use rustc_errors::DiagArgValue;
use rustc_feature::{AttributeTemplate, template};
use rustc_span::{Span, Symbol, sym};
use thin_vec::ThinVec;
use crate::attributes::{AcceptMapping, AttributeParser, NoArgsAttributeParser, OnDuplicate};
use crate::context::{AcceptContext, FinalizeContext, Stage};
use crate::parser::ArgParser;
use crate::session_diagnostics;
pub(crate) struct MacroEscapeParser;
impl<S: Stage> NoArgsAttributeParser<S> for MacroEscapeParser {
const PATH: &[Symbol] = &[sym::macro_escape];
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Warn;
const CREATE: fn(Span) -> AttributeKind = AttributeKind::MacroEscape;
}
/// `#[macro_use]` attributes can either:
/// - Use all macros from a crate, if provided without arguments
/// - Use specific macros from a crate, if provided with arguments `#[macro_use(macro1, macro2)]`
/// A warning should be provided if an use all is combined with specific uses, or if multiple use-alls are used.
#[derive(Default)]
pub(crate) struct MacroUseParser {
state: MacroUseArgs,
/// Spans of all `#[macro_use]` arguments with arguments, used for linting
uses_attr_spans: ThinVec<Span>,
/// If `state` is `UseSpecific`, stores the span of the first `#[macro_use]` argument, used as the span for this attribute
/// If `state` is `UseAll`, stores the span of the first `#[macro_use]` arguments without arguments
first_span: Option<Span>,
}
const MACRO_USE_TEMPLATE: AttributeTemplate = template!(Word, List: "name1, name2, ...");
impl<S: Stage> AttributeParser<S> for MacroUseParser {
const ATTRIBUTES: AcceptMapping<Self, S> = &[(
&[sym::macro_use],
MACRO_USE_TEMPLATE,
|group: &mut Self, cx: &mut AcceptContext<'_, '_, S>, args| {
let span = cx.attr_span;
group.first_span.get_or_insert(span);
match args {
ArgParser::NoArgs => {
match group.state {
MacroUseArgs::UseAll => {
let first_span = group.first_span.expect(
"State is UseAll is some so this is not the first attribute",
);
// Since there is a `#[macro_use]` import already, give a warning
cx.warn_unused_duplicate(first_span, span);
}
MacroUseArgs::UseSpecific(_) => {
group.state = MacroUseArgs::UseAll;
group.first_span = Some(span);
// If there is a `#[macro_use]` attribute, warn on all `#[macro_use(...)]` attributes since everything is already imported
for specific_use in group.uses_attr_spans.drain(..) {
cx.warn_unused_duplicate(span, specific_use);
}
}
}
}
ArgParser::List(list) => {
if list.is_empty() {
cx.warn_empty_attribute(list.span);
return;
}
match &mut group.state {
MacroUseArgs::UseAll => {
let first_span = group.first_span.expect(
"State is UseAll is some so this is not the first attribute",
);
cx.warn_unused_duplicate(first_span, span);
}
MacroUseArgs::UseSpecific(arguments) => {
// Store here so if we encounter a `UseAll` later we can still lint this attribute
group.uses_attr_spans.push(cx.attr_span);
for item in list.mixed() {
let Some(item) = item.meta_item() else {
cx.expected_identifier(item.span());
continue;
};
if let Err(err_span) = item.args().no_args() {
cx.expected_no_args(err_span);
continue;
}
let Some(item) = item.path().word() else {
cx.expected_identifier(item.span());
continue;
};
arguments.push(item);
}
}
}
}
ArgParser::NameValue(_) => {
let suggestions = MACRO_USE_TEMPLATE.suggestions(false, sym::macro_use);
cx.emit_err(session_diagnostics::IllFormedAttributeInputLint {
num_suggestions: suggestions.len(),
suggestions: DiagArgValue::StrListSepByAnd(
suggestions.into_iter().map(|s| format!("`{s}`").into()).collect(),
),
span,
});
}
}
},
)];
fn finalize(self, _cx: &FinalizeContext<'_, '_, S>) -> Option<AttributeKind> {
Some(AttributeKind::MacroUse { span: self.first_span?, arguments: self.state })
}
}
@@ -36,10 +36,12 @@
pub(crate) mod link_attrs;
pub(crate) mod lint_helpers;
pub(crate) mod loop_match;
pub(crate) mod macro_attrs;
pub(crate) mod must_use;
pub(crate) mod no_implicit_prelude;
pub(crate) mod non_exhaustive;
pub(crate) mod path;
pub(crate) mod proc_macro_attrs;
pub(crate) mod repr;
pub(crate) mod rustc_internal;
pub(crate) mod semantics;
@@ -34,7 +34,7 @@ fn convert(cx: &mut AcceptContext<'_, '_, S>, args: &ArgParser<'_>) -> Option<At
ArgParser::List(_) => {
let suggestions =
<Self as SingleAttributeParser<S>>::TEMPLATE.suggestions(false, "must_use");
cx.emit_err(session_diagnostics::MustUseIllFormedAttributeInput {
cx.emit_err(session_diagnostics::IllFormedAttributeInputLint {
num_suggestions: suggestions.len(),
suggestions: DiagArgValue::StrListSepByAnd(
suggestions.into_iter().map(|s| format!("`{s}`").into()).collect(),
@@ -0,0 +1,139 @@
use rustc_attr_data_structures::AttributeKind;
use rustc_feature::{AttributeTemplate, template};
use rustc_span::{Span, Symbol, sym};
use thin_vec::ThinVec;
use crate::attributes::{
AttributeOrder, NoArgsAttributeParser, OnDuplicate, SingleAttributeParser,
};
use crate::context::{AcceptContext, Stage};
use crate::parser::ArgParser;
pub(crate) struct ProcMacroParser;
impl<S: Stage> NoArgsAttributeParser<S> for ProcMacroParser {
const PATH: &[Symbol] = &[sym::proc_macro];
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Error;
const CREATE: fn(Span) -> AttributeKind = AttributeKind::ProcMacro;
}
pub(crate) struct ProcMacroAttributeParser;
impl<S: Stage> NoArgsAttributeParser<S> for ProcMacroAttributeParser {
const PATH: &[Symbol] = &[sym::proc_macro_attribute];
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Error;
const CREATE: fn(Span) -> AttributeKind = AttributeKind::ProcMacroAttribute;
}
pub(crate) struct ProcMacroDeriveParser;
impl<S: Stage> SingleAttributeParser<S> for ProcMacroDeriveParser {
const PATH: &[Symbol] = &[sym::proc_macro_derive];
const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepOutermost;
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Error;
const TEMPLATE: AttributeTemplate =
template!(List: "TraitName, /*opt*/ attributes(name1, name2, ...)");
fn convert(cx: &mut AcceptContext<'_, '_, S>, args: &ArgParser<'_>) -> Option<AttributeKind> {
let (trait_name, helper_attrs) = parse_derive_like(cx, args, true)?;
Some(AttributeKind::ProcMacroDerive {
trait_name: trait_name.expect("Trait name is mandatory, so it is present"),
helper_attrs,
span: cx.attr_span,
})
}
}
pub(crate) struct RustcBuiltinMacroParser;
impl<S: Stage> SingleAttributeParser<S> for RustcBuiltinMacroParser {
const PATH: &[Symbol] = &[sym::rustc_builtin_macro];
const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepOutermost;
const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::Error;
const TEMPLATE: AttributeTemplate =
template!(List: "TraitName, /*opt*/ attributes(name1, name2, ...)");
fn convert(cx: &mut AcceptContext<'_, '_, S>, args: &ArgParser<'_>) -> Option<AttributeKind> {
let (builtin_name, helper_attrs) = parse_derive_like(cx, args, false)?;
Some(AttributeKind::RustcBuiltinMacro { builtin_name, helper_attrs, span: cx.attr_span })
}
}
fn parse_derive_like<S: Stage>(
cx: &mut AcceptContext<'_, '_, S>,
args: &ArgParser<'_>,
trait_name_mandatory: bool,
) -> Option<(Option<Symbol>, ThinVec<Symbol>)> {
let Some(list) = args.list() else {
// For #[rustc_builtin_macro], it is permitted to leave out the trait name
if args.no_args().is_ok() && !trait_name_mandatory {
return Some((None, ThinVec::new()));
}
cx.expected_list(cx.attr_span);
return None;
};
let mut items = list.mixed();
// Parse the name of the trait that is derived.
let Some(trait_attr) = items.next() else {
cx.expected_at_least_one_argument(list.span);
return None;
};
let Some(trait_attr) = trait_attr.meta_item() else {
cx.unexpected_literal(trait_attr.span());
return None;
};
let Some(trait_ident) = trait_attr.path().word() else {
cx.expected_identifier(trait_attr.path().span());
return None;
};
if !trait_ident.name.can_be_raw() {
cx.expected_identifier(trait_ident.span);
return None;
}
if let Err(e) = trait_attr.args().no_args() {
cx.expected_no_args(e);
return None;
};
// Parse optional attributes
let mut attributes = ThinVec::new();
if let Some(attrs) = items.next() {
let Some(attr_list) = attrs.meta_item() else {
cx.expected_list(attrs.span());
return None;
};
if !attr_list.path().word_is(sym::attributes) {
cx.expected_specific_argument(attrs.span(), vec!["attributes"]);
return None;
}
let Some(attr_list) = attr_list.args().list() else {
cx.expected_list(attrs.span());
return None;
};
// Parse item in `attributes(...)` argument
for attr in attr_list.mixed() {
let Some(attr) = attr.meta_item() else {
cx.expected_identifier(attr.span());
return None;
};
if let Err(e) = attr.args().no_args() {
cx.expected_no_args(e);
return None;
};
let Some(ident) = attr.path().word() else {
cx.expected_identifier(attr.path().span());
return None;
};
if !ident.name.can_be_raw() {
cx.expected_identifier(ident.span);
return None;
}
attributes.push(ident.name);
}
}
// If anything else is specified, we should reject it
if let Some(next) = items.next() {
cx.expected_no_args(next.span());
}
Some((Some(trait_ident.name), attributes))
}
@@ -274,7 +274,7 @@ fn parse_alignment(node: &LitKind) -> Result<Align, &'static str> {
pub(crate) struct AlignParser(Option<(Align, Span)>);
impl AlignParser {
const PATH: &'static [Symbol] = &[sym::align];
const PATH: &'static [Symbol] = &[sym::rustc_align];
const TEMPLATE: AttributeTemplate = template!(List: "<alignment in bytes>");
fn parse<'c, S: Stage>(
@@ -74,8 +74,15 @@ impl<S: Stage> AttributeParser<S> for StabilityParser {
template!(NameValueStr: "deprecation message"),
|this, cx, args| {
reject_outside_std!(cx);
this.allowed_through_unstable_modules =
args.name_value().and_then(|i| i.value_as_str())
let Some(nv) = args.name_value() else {
cx.expected_name_value(cx.attr_span, None);
return;
};
let Some(value_str) = nv.value_as_str() else {
cx.expected_string_literal(nv.value_span, Some(nv.value_as_lit()));
return;
};
this.allowed_through_unstable_modules = Some(value_str);
},
),
];
@@ -247,7 +254,12 @@ pub(crate) fn parse_stability<S: Stage>(
let mut feature = None;
let mut since = None;
for param in args.list()?.mixed() {
let ArgParser::List(list) = args else {
cx.expected_list(cx.attr_span);
return None;
};
for param in list.mixed() {
let param_span = param.span();
let Some(param) = param.meta_item() else {
cx.emit_err(session_diagnostics::UnsupportedLiteral {
@@ -322,7 +334,13 @@ pub(crate) fn parse_unstability<S: Stage>(
let mut is_soft = false;
let mut implied_by = None;
let mut old_name = None;
for param in args.list()?.mixed() {
let ArgParser::List(list) = args else {
cx.expected_list(cx.attr_span);
return None;
};
for param in list.mixed() {
let Some(param) = param.meta_item() else {
cx.emit_err(session_diagnostics::UnsupportedLiteral {
span: param.span(),
@@ -33,10 +33,14 @@
AsPtrParser, AutomaticallyDerivedParser, PassByValueParser, PubTransparentParser,
};
use crate::attributes::loop_match::{ConstContinueParser, LoopMatchParser};
use crate::attributes::macro_attrs::{MacroEscapeParser, MacroUseParser};
use crate::attributes::must_use::MustUseParser;
use crate::attributes::no_implicit_prelude::NoImplicitPreludeParser;
use crate::attributes::non_exhaustive::NonExhaustiveParser;
use crate::attributes::path::PathParser as PathAttributeParser;
use crate::attributes::proc_macro_attrs::{
ProcMacroAttributeParser, ProcMacroDeriveParser, ProcMacroParser, RustcBuiltinMacroParser,
};
use crate::attributes::repr::{AlignParser, ReprParser};
use crate::attributes::rustc_internal::{
RustcLayoutScalarValidRangeEnd, RustcLayoutScalarValidRangeStart,
@@ -126,6 +130,7 @@ mod late {
BodyStabilityParser,
ConfusablesParser,
ConstStabilityParser,
MacroUseParser,
NakedParser,
StabilityParser,
UsedParser,
@@ -152,6 +157,8 @@ mod late {
Single<MustUseParser>,
Single<OptimizeParser>,
Single<PathAttributeParser>,
Single<ProcMacroDeriveParser>,
Single<RustcBuiltinMacroParser>,
Single<RustcForceInlineParser>,
Single<RustcLayoutScalarValidRangeEnd>,
Single<RustcLayoutScalarValidRangeStart>,
@@ -174,6 +181,7 @@ mod late {
Single<WithoutArgs<FfiPureParser>>,
Single<WithoutArgs<FundamentalParser>>,
Single<WithoutArgs<LoopMatchParser>>,
Single<WithoutArgs<MacroEscapeParser>>,
Single<WithoutArgs<MarkerParser>>,
Single<WithoutArgs<MayDangleParser>>,
Single<WithoutArgs<NoImplicitPreludeParser>>,
@@ -183,6 +191,8 @@ mod late {
Single<WithoutArgs<ParenSugarParser>>,
Single<WithoutArgs<PassByValueParser>>,
Single<WithoutArgs<PointeeParser>>,
Single<WithoutArgs<ProcMacroAttributeParser>>,
Single<WithoutArgs<ProcMacroParser>>,
Single<WithoutArgs<PubTransparentParser>>,
Single<WithoutArgs<SpecializationTraitParser>>,
Single<WithoutArgs<StdInternalSymbolParser>>,
@@ -386,6 +396,17 @@ pub(crate) fn expected_no_args(&self, args_span: Span) -> ErrorGuaranteed {
})
}
/// emit an error that a `name` was expected here
pub(crate) fn expected_identifier(&self, span: Span) -> ErrorGuaranteed {
self.emit_err(AttributeParseError {
span,
attr_span: self.attr_span,
template: self.template.clone(),
attribute: self.attr_path.clone(),
reason: AttributeParseErrorReason::ExpectedIdentifier,
})
}
/// emit an error that a `name = value` pair was expected at this span. The symbol can be given for
/// a nicer error message talking about the specific name that was found lacking a value.
pub(crate) fn expected_name_value(&self, span: Span, name: Option<Symbol>) -> ErrorGuaranteed {
@@ -438,7 +438,7 @@ pub(crate) struct IllFormedAttributeInput {
#[derive(Diagnostic)]
#[diag(attr_parsing_ill_formed_attribute_input)]
pub(crate) struct MustUseIllFormedAttributeInput {
pub(crate) struct IllFormedAttributeInputLint {
#[primary_span]
pub span: Span,
pub num_suggestions: usize,
@@ -549,6 +549,7 @@ pub(crate) enum AttributeParseErrorReason {
/// Should we tell the user to write a list when they didn't?
list: bool,
},
ExpectedIdentifier,
}
pub(crate) struct AttributeParseError {
@@ -600,11 +601,11 @@ fn into_diag(self, dcx: DiagCtxtHandle<'a>, level: Level) -> Diag<'a, G> {
diag.code(E0538);
}
AttributeParseErrorReason::UnexpectedLiteral => {
diag.span_label(self.span, format!("didn't expect a literal here"));
diag.span_label(self.span, "didn't expect a literal here");
diag.code(E0565);
}
AttributeParseErrorReason::ExpectedNoArgs => {
diag.span_label(self.span, format!("didn't expect any arguments here"));
diag.span_label(self.span, "didn't expect any arguments here");
diag.code(E0565);
}
AttributeParseErrorReason::ExpectedNameValue(None) => {
@@ -684,6 +685,9 @@ fn into_diag(self, dcx: DiagCtxtHandle<'a>, level: Level) -> Diag<'a, G> {
}
}
}
AttributeParseErrorReason::ExpectedIdentifier => {
diag.span_label(self.span, "expected a valid identifier here");
}
}
let suggestions = self.template.suggestions(false, &name);
@@ -1290,6 +1290,58 @@ pub(crate) fn suggest_cloning(
span,
format!("if `{ty}` implemented `Clone`, you could clone the value"),
);
} else if let ty::Adt(_, _) = ty.kind()
&& let Some(clone_trait) = self.infcx.tcx.lang_items().clone_trait()
{
// For cases like `Option<NonClone>`, where `Option<T>: Clone` if `T: Clone`, we point
// at the types that should be `Clone`.
let ocx = ObligationCtxt::new_with_diagnostics(self.infcx);
let cause = ObligationCause::misc(expr.span, self.mir_def_id());
ocx.register_bound(cause, self.infcx.param_env, ty, clone_trait);
let errors = ocx.select_all_or_error();
if errors.iter().all(|error| {
match error.obligation.predicate.as_clause().and_then(|c| c.as_trait_clause()) {
Some(clause) => match clause.self_ty().skip_binder().kind() {
ty::Adt(def, _) => def.did().is_local() && clause.def_id() == clone_trait,
_ => false,
},
None => false,
}
}) {
let mut type_spans = vec![];
let mut types = FxIndexSet::default();
for clause in errors
.iter()
.filter_map(|e| e.obligation.predicate.as_clause())
.filter_map(|c| c.as_trait_clause())
{
let ty::Adt(def, _) = clause.self_ty().skip_binder().kind() else { continue };
type_spans.push(self.infcx.tcx.def_span(def.did()));
types.insert(
self.infcx
.tcx
.short_string(clause.self_ty().skip_binder(), &mut err.long_ty_path()),
);
}
let mut span: MultiSpan = type_spans.clone().into();
for sp in type_spans {
span.push_span_label(sp, "consider implementing `Clone` for this type");
}
span.push_span_label(expr.span, "you could clone this value");
let types: Vec<_> = types.into_iter().collect();
let msg = match &types[..] {
[only] => format!("`{only}`"),
[head @ .., last] => format!(
"{} and `{last}`",
head.iter().map(|t| format!("`{t}`")).collect::<Vec<_>>().join(", ")
),
[] => unreachable!(),
};
err.span_note(
span,
format!("if {msg} implemented `Clone`, you could clone the value"),
);
}
}
}
@@ -115,10 +115,8 @@ fn group_move_errors(&mut self) -> Vec<GroupedMoveError<'tcx>> {
fn append_to_grouped_errors(
&self,
grouped_errors: &mut Vec<GroupedMoveError<'tcx>>,
error: MoveError<'tcx>,
MoveError { place: original_path, location, kind }: MoveError<'tcx>,
) {
let MoveError { place: original_path, location, kind } = error;
// Note: that the only time we assign a place isn't a temporary
// to a user variable is when initializing it.
// If that ever stops being the case, then the ever initialized
@@ -251,62 +249,56 @@ fn append_binding_error(
}
fn report(&mut self, error: GroupedMoveError<'tcx>) {
let (mut err, err_span) = {
let (span, use_spans, original_path, kind) = match error {
GroupedMoveError::MovesFromPlace { span, original_path, ref kind, .. }
| GroupedMoveError::MovesFromValue { span, original_path, ref kind, .. } => {
(span, None, original_path, kind)
}
GroupedMoveError::OtherIllegalMove { use_spans, original_path, ref kind } => {
(use_spans.args_or_use(), Some(use_spans), original_path, kind)
}
};
debug!(
"report: original_path={:?} span={:?}, kind={:?} \
original_path.is_upvar_field_projection={:?}",
original_path,
span,
kind,
self.is_upvar_field_projection(original_path.as_ref())
);
if self.has_ambiguous_copy(original_path.ty(self.body, self.infcx.tcx).ty) {
// If the type may implement Copy, skip the error.
// It's an error with the Copy implementation (e.g. duplicate Copy) rather than borrow check
self.dcx().span_delayed_bug(
span,
"Type may implement copy, but there is no other error.",
);
return;
let (span, use_spans, original_path, kind) = match error {
GroupedMoveError::MovesFromPlace { span, original_path, ref kind, .. }
| GroupedMoveError::MovesFromValue { span, original_path, ref kind, .. } => {
(span, None, original_path, kind)
}
GroupedMoveError::OtherIllegalMove { use_spans, original_path, ref kind } => {
(use_spans.args_or_use(), Some(use_spans), original_path, kind)
}
};
debug!(
"report: original_path={:?} span={:?}, kind={:?} \
original_path.is_upvar_field_projection={:?}",
original_path,
span,
kind,
self.is_upvar_field_projection(original_path.as_ref())
);
if self.has_ambiguous_copy(original_path.ty(self.body, self.infcx.tcx).ty) {
// If the type may implement Copy, skip the error.
// It's an error with the Copy implementation (e.g. duplicate Copy) rather than borrow check
self.dcx()
.span_delayed_bug(span, "Type may implement copy, but there is no other error.");
return;
}
let mut err = match kind {
&IllegalMoveOriginKind::BorrowedContent { target_place } => self
.report_cannot_move_from_borrowed_content(
original_path,
target_place,
span,
use_spans,
),
&IllegalMoveOriginKind::InteriorOfTypeWithDestructor { container_ty: ty } => {
self.cannot_move_out_of_interior_of_drop(span, ty)
}
&IllegalMoveOriginKind::InteriorOfSliceOrArray { ty, is_index } => {
self.cannot_move_out_of_interior_noncopy(span, ty, Some(is_index))
}
(
match kind {
&IllegalMoveOriginKind::BorrowedContent { target_place } => self
.report_cannot_move_from_borrowed_content(
original_path,
target_place,
span,
use_spans,
),
&IllegalMoveOriginKind::InteriorOfTypeWithDestructor { container_ty: ty } => {
self.cannot_move_out_of_interior_of_drop(span, ty)
}
&IllegalMoveOriginKind::InteriorOfSliceOrArray { ty, is_index } => {
self.cannot_move_out_of_interior_noncopy(span, ty, Some(is_index))
}
},
span,
)
};
self.add_move_hints(error, &mut err, err_span);
self.add_move_hints(error, &mut err, span);
self.buffer_error(err);
}
fn has_ambiguous_copy(&mut self, ty: Ty<'tcx>) -> bool {
let Some(copy_trait_def) = self.infcx.tcx.lang_items().copy_trait() else { return false };
// This is only going to be ambiguous if there are incoherent impls, because otherwise
// ambiguity should never happen in MIR.
self.infcx.type_implements_trait(copy_trait_def, [ty], self.infcx.param_env).may_apply()
let Some(copy_def_id) = self.infcx.tcx.lang_items().copy_trait() else { return false };
// Avoid bogus move errors because of an incoherent `Copy` impl.
self.infcx.type_implements_trait(copy_def_id, [ty], self.infcx.param_env).may_apply()
&& self.infcx.tcx.coherent_trait(copy_def_id).is_err()
}
fn report_cannot_move_from_static(&mut self, place: Place<'tcx>, span: Span) -> Diag<'infcx> {
@@ -482,7 +474,8 @@ fn report_cannot_move_from_borrowed_content(
self.cannot_move_out_of_interior_noncopy(span, ty, None)
}
ty::Closure(def_id, closure_args)
if def_id.as_local() == Some(self.mir_def_id()) && upvar_field.is_some() =>
if def_id.as_local() == Some(self.mir_def_id())
&& let Some(upvar_field) = upvar_field =>
{
let closure_kind_ty = closure_args.as_closure().kind_ty();
let closure_kind = match closure_kind_ty.to_opt_closure_kind() {
@@ -495,7 +488,7 @@ fn report_cannot_move_from_borrowed_content(
let capture_description =
format!("captured variable in an `{closure_kind}` closure");
let upvar = &self.upvars[upvar_field.unwrap().index()];
let upvar = &self.upvars[upvar_field.index()];
let upvar_hir_id = upvar.get_root_variable();
let upvar_name = upvar.to_string(tcx);
let upvar_span = tcx.hir_span(upvar_hir_id);
@@ -605,7 +598,7 @@ fn add_move_hints(&self, error: GroupedMoveError<'tcx>, err: &mut Diag<'_>, span
}
// No binding. Nothing to suggest.
GroupedMoveError::OtherIllegalMove { ref original_path, use_spans, .. } => {
let use_span = use_spans.var_or_use();
let mut use_span = use_spans.var_or_use();
let place_ty = original_path.ty(self.body, self.infcx.tcx).ty;
let place_desc = match self.describe_place(original_path.as_ref()) {
Some(desc) => format!("`{desc}`"),
@@ -622,6 +615,36 @@ fn add_move_hints(&self, error: GroupedMoveError<'tcx>, err: &mut Diag<'_>, span
);
}
if let Some(upvar_field) = self
.prefixes(original_path.as_ref(), PrefixSet::All)
.find_map(|p| self.is_upvar_field_projection(p))
{
// Look for the introduction of the original binding being moved.
let upvar = &self.upvars[upvar_field.index()];
let upvar_hir_id = upvar.get_root_variable();
use_span = match self.infcx.tcx.parent_hir_node(upvar_hir_id) {
hir::Node::Param(param) => {
// Instead of pointing at the path where we access the value within a
// closure, we point at the type of the outer `fn` argument.
param.ty_span
}
hir::Node::LetStmt(stmt) => match (stmt.ty, stmt.init) {
// We point at the type of the outer let-binding.
(Some(ty), _) => ty.span,
// We point at the initializer of the outer let-binding, but only if it
// isn't something that spans multiple lines, like a closure, as the
// ASCII art gets messy.
(None, Some(init))
if !self.infcx.tcx.sess.source_map().is_multiline(init.span) =>
{
init.span
}
_ => use_span,
},
_ => use_span,
};
}
err.subdiagnostic(crate::session_diagnostics::TypeNoCopy::Label {
is_partial_move: false,
ty: place_ty,
@@ -629,12 +652,22 @@ fn add_move_hints(&self, error: GroupedMoveError<'tcx>, err: &mut Diag<'_>, span
span: use_span,
});
let mut pointed_at_span = false;
use_spans.args_subdiag(err, |args_span| {
if args_span == span || args_span == use_span {
pointed_at_span = true;
}
crate::session_diagnostics::CaptureArgLabel::MoveOutPlace {
place: place_desc,
place: place_desc.clone(),
args_span,
}
});
if !pointed_at_span && use_span != span {
err.subdiagnostic(crate::session_diagnostics::CaptureArgLabel::MoveOutPlace {
place: place_desc,
args_span: span,
});
}
self.add_note_for_packed_struct_derive(err, original_path.local);
}
@@ -1,11 +1,13 @@
use std::mem;
use std::{mem, slice};
use rustc_ast::ptr::P;
use rustc_ast::visit::{self, Visitor};
use rustc_ast::{self as ast, NodeId, attr};
use rustc_ast::{self as ast, HasNodeId, NodeId, attr};
use rustc_ast_pretty::pprust;
use rustc_attr_data_structures::AttributeKind;
use rustc_attr_parsing::AttributeParser;
use rustc_errors::DiagCtxtHandle;
use rustc_expand::base::{ExtCtxt, ResolverExpand, parse_macro_name_and_helper_attrs};
use rustc_expand::base::{ExtCtxt, ResolverExpand};
use rustc_expand::expand::{AstFragment, ExpansionConfig};
use rustc_feature::Features;
use rustc_session::Session;
@@ -22,7 +24,7 @@ struct ProcMacroDerive {
trait_name: Symbol,
function_ident: Ident,
span: Span,
attrs: Vec<Symbol>,
attrs: ThinVec<Symbol>,
}
struct ProcMacroDef {
@@ -41,6 +43,7 @@ struct CollectProcMacros<'a> {
macros: Vec<ProcMacro>,
in_root: bool,
dcx: DiagCtxtHandle<'a>,
session: &'a Session,
source_map: &'a SourceMap,
is_proc_macro_crate: bool,
is_test_crate: bool,
@@ -63,6 +66,7 @@ pub fn inject(
macros: Vec::new(),
in_root: true,
dcx,
session: sess,
source_map: sess.source_map(),
is_proc_macro_crate,
is_test_crate,
@@ -98,8 +102,18 @@ fn collect_custom_derive(
function_ident: Ident,
attr: &'a ast::Attribute,
) {
let Some((trait_name, proc_attrs)) =
parse_macro_name_and_helper_attrs(self.dcx, attr, "derive")
let Some(rustc_hir::Attribute::Parsed(AttributeKind::ProcMacroDerive {
trait_name,
helper_attrs,
..
})) = AttributeParser::parse_limited(
self.session,
slice::from_ref(attr),
sym::proc_macro_derive,
item.span,
item.node_id(),
None,
)
else {
return;
};
@@ -110,7 +124,7 @@ fn collect_custom_derive(
span: item.span,
trait_name,
function_ident,
attrs: proc_attrs,
attrs: helper_attrs,
}));
} else {
let msg = if !self.in_root {
@@ -74,7 +74,7 @@ pub(crate) fn codegen_tls_ref<'tcx>(
pub(crate) fn eval_mir_constant<'tcx>(
fx: &FunctionCx<'_, '_, 'tcx>,
constant: &ConstOperand<'tcx>,
) -> (ConstValue<'tcx>, Ty<'tcx>) {
) -> (ConstValue, Ty<'tcx>) {
let cv = fx.monomorphize(constant.const_);
// This cannot fail because we checked all required_consts in advance.
let val = cv
@@ -93,7 +93,7 @@ pub(crate) fn codegen_constant_operand<'tcx>(
pub(crate) fn codegen_const_value<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
const_val: ConstValue<'tcx>,
const_val: ConstValue,
ty: Ty<'tcx>,
) -> CValue<'tcx> {
let layout = fx.layout_of(ty);
@@ -210,8 +210,7 @@ pub(crate) fn codegen_const_value<'tcx>(
.offset_i64(fx, i64::try_from(offset.bytes()).unwrap()),
layout,
),
ConstValue::Slice { data, meta } => {
let alloc_id = fx.tcx.reserve_and_set_memory_alloc(data);
ConstValue::Slice { alloc_id, meta } => {
let ptr = pointer_for_allocation(fx, alloc_id).get_addr(fx);
let len = fx.bcx.ins().iconst(fx.pointer_type, meta as i64);
CValue::by_val_pair(ptr, len, layout)
@@ -588,7 +588,7 @@ fn asm_tests(env: &Env, args: &TestArg) -> Result<(), String> {
&"always",
&"--stage",
&"0",
&"tests/assembly/asm",
&"tests/assembly-llvm/asm",
&"--compiletest-rustc-args",
&rustc_args,
],
-8
View File
@@ -3,12 +3,4 @@ codegen_gcc_unwinding_inline_asm =
codegen_gcc_copy_bitcode = failed to copy bitcode to object file: {$err}
codegen_gcc_dynamic_linking_with_lto =
cannot prefer dynamic linking when performing LTO
.note = only 'staticlib', 'bin', and 'cdylib' outputs are supported with LTO
codegen_gcc_lto_disallowed = lto can only be run for executables, cdylibs and static library outputs
codegen_gcc_lto_dylib = lto cannot be used for `dylib` crate type without `-Zdylib-lto`
codegen_gcc_lto_bitcode_from_rlib = failed to get bitcode from object file for LTO ({$gcc_err})
+8 -80
View File
@@ -25,35 +25,21 @@
use gccjit::{Context, OutputKind};
use object::read::archive::ArchiveFile;
use rustc_codegen_ssa::back::lto::{SerializedModule, ThinModule, ThinShared};
use rustc_codegen_ssa::back::symbol_export;
use rustc_codegen_ssa::back::write::{CodegenContext, FatLtoInput};
use rustc_codegen_ssa::traits::*;
use rustc_codegen_ssa::{ModuleCodegen, ModuleKind, looks_like_rust_object_file};
use rustc_data_structures::memmap::Mmap;
use rustc_errors::{DiagCtxtHandle, FatalError};
use rustc_hir::def_id::LOCAL_CRATE;
use rustc_middle::bug;
use rustc_middle::dep_graph::WorkProduct;
use rustc_middle::middle::exported_symbols::{SymbolExportInfo, SymbolExportLevel};
use rustc_session::config::{CrateType, Lto};
use rustc_session::config::Lto;
use rustc_target::spec::RelocModel;
use tempfile::{TempDir, tempdir};
use crate::back::write::save_temp_bitcode;
use crate::errors::{DynamicLinkingWithLTO, LtoBitcodeFromRlib, LtoDisallowed, LtoDylib};
use crate::errors::LtoBitcodeFromRlib;
use crate::{GccCodegenBackend, GccContext, SyncContext, to_gcc_opt_level};
pub fn crate_type_allows_lto(crate_type: CrateType) -> bool {
match crate_type {
CrateType::Executable
| CrateType::Dylib
| CrateType::Staticlib
| CrateType::Cdylib
| CrateType::Sdylib => true,
CrateType::Rlib | CrateType::ProcMacro => false,
}
}
struct LtoData {
// TODO(antoyo): use symbols_below_threshold.
//symbols_below_threshold: Vec<String>,
@@ -63,18 +49,9 @@ struct LtoData {
fn prepare_lto(
cgcx: &CodegenContext<GccCodegenBackend>,
each_linked_rlib_for_lto: &[PathBuf],
dcx: DiagCtxtHandle<'_>,
) -> Result<LtoData, FatalError> {
let export_threshold = match cgcx.lto {
// We're just doing LTO for our one crate
Lto::ThinLocal => SymbolExportLevel::Rust,
// We're doing LTO for the entire crate graph
Lto::Fat | Lto::Thin => symbol_export::crates_export_threshold(&cgcx.crate_types),
Lto::No => panic!("didn't request LTO but we're doing LTO"),
};
let tmp_path = match tempdir() {
Ok(tmp_path) => tmp_path,
Err(error) => {
@@ -83,20 +60,6 @@ fn prepare_lto(
}
};
let symbol_filter = &|&(ref name, info): &(String, SymbolExportInfo)| {
if info.level.is_below_threshold(export_threshold) || info.used {
Some(name.clone())
} else {
None
}
};
let exported_symbols = cgcx.exported_symbols.as_ref().expect("needs exported symbols for LTO");
let mut symbols_below_threshold = {
let _timer = cgcx.prof.generic_activity("GCC_lto_generate_symbols_below_threshold");
exported_symbols[&LOCAL_CRATE].iter().filter_map(symbol_filter).collect::<Vec<String>>()
};
info!("{} symbols to preserve in this crate", symbols_below_threshold.len());
// If we're performing LTO for the entire crate graph, then for each of our
// upstream dependencies, find the corresponding rlib and load the bitcode
// from the archive.
@@ -105,32 +68,7 @@ fn prepare_lto(
// with either fat or thin LTO
let mut upstream_modules = Vec::new();
if cgcx.lto != Lto::ThinLocal {
// Make sure we actually can run LTO
for crate_type in cgcx.crate_types.iter() {
if !crate_type_allows_lto(*crate_type) {
dcx.emit_err(LtoDisallowed);
return Err(FatalError);
}
if *crate_type == CrateType::Dylib && !cgcx.opts.unstable_opts.dylib_lto {
dcx.emit_err(LtoDylib);
return Err(FatalError);
}
}
if cgcx.opts.cg.prefer_dynamic && !cgcx.opts.unstable_opts.dylib_lto {
dcx.emit_err(DynamicLinkingWithLTO);
return Err(FatalError);
}
for &(cnum, ref path) in cgcx.each_linked_rlib_for_lto.iter() {
let exported_symbols =
cgcx.exported_symbols.as_ref().expect("needs exported symbols for LTO");
{
let _timer = cgcx.prof.generic_activity("GCC_lto_generate_symbols_below_threshold");
symbols_below_threshold
.extend(exported_symbols[&cnum].iter().filter_map(symbol_filter));
}
for path in each_linked_rlib_for_lto {
let archive_data = unsafe {
Mmap::map(File::open(path).expect("couldn't open rlib")).expect("couldn't map rlib")
};
@@ -174,19 +112,18 @@ fn save_as_file(obj: &[u8], path: &Path) -> Result<(), LtoBitcodeFromRlib> {
/// for further optimization.
pub(crate) fn run_fat(
cgcx: &CodegenContext<GccCodegenBackend>,
each_linked_rlib_for_lto: &[PathBuf],
modules: Vec<FatLtoInput<GccCodegenBackend>>,
cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
) -> Result<ModuleCodegen<GccContext>, FatalError> {
let dcx = cgcx.create_dcx();
let dcx = dcx.handle();
let lto_data = prepare_lto(cgcx, dcx)?;
let lto_data = prepare_lto(cgcx, each_linked_rlib_for_lto, dcx)?;
/*let symbols_below_threshold =
lto_data.symbols_below_threshold.iter().map(|c| c.as_ptr()).collect::<Vec<_>>();*/
fat_lto(
cgcx,
dcx,
modules,
cached_modules,
lto_data.upstream_modules,
lto_data.tmp_path,
//&lto_data.symbols_below_threshold,
@@ -197,7 +134,6 @@ fn fat_lto(
cgcx: &CodegenContext<GccCodegenBackend>,
_dcx: DiagCtxtHandle<'_>,
modules: Vec<FatLtoInput<GccCodegenBackend>>,
cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
mut serialized_modules: Vec<(SerializedModule<ModuleBuffer>, CString)>,
tmp_path: TempDir,
//symbols_below_threshold: &[String],
@@ -211,21 +147,12 @@ fn fat_lto(
// modules that are serialized in-memory.
// * `in_memory` contains modules which are already parsed and in-memory,
// such as from multi-CGU builds.
//
// All of `cached_modules` (cached from previous incremental builds) can
// immediately go onto the `serialized_modules` modules list and then we can
// split the `modules` array into these two lists.
let mut in_memory = Vec::new();
serialized_modules.extend(cached_modules.into_iter().map(|(buffer, wp)| {
info!("pushing cached module {:?}", wp.cgu_name);
(buffer, CString::new(wp.cgu_name).unwrap())
}));
for module in modules {
match module {
FatLtoInput::InMemory(m) => in_memory.push(m),
FatLtoInput::Serialized { name, buffer } => {
info!("pushing serialized module {:?}", name);
let buffer = SerializedModule::Local(buffer);
serialized_modules.push((buffer, CString::new(name).unwrap()));
}
}
@@ -356,12 +283,13 @@ fn data(&self) -> &[u8] {
/// can simply be copied over from the incr. comp. cache.
pub(crate) fn run_thin(
cgcx: &CodegenContext<GccCodegenBackend>,
each_linked_rlib_for_lto: &[PathBuf],
modules: Vec<(String, ThinBuffer)>,
cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
) -> Result<(Vec<ThinModule<GccCodegenBackend>>, Vec<WorkProduct>), FatalError> {
let dcx = cgcx.create_dcx();
let dcx = dcx.handle();
let lto_data = prepare_lto(cgcx, dcx)?;
let lto_data = prepare_lto(cgcx, each_linked_rlib_for_lto, dcx)?;
if cgcx.opts.cg.linker_plugin_lto.enabled() {
unreachable!(
"We should never reach this case if the LTO step \
-13
View File
@@ -14,19 +14,6 @@ pub(crate) struct CopyBitcode {
pub err: std::io::Error,
}
#[derive(Diagnostic)]
#[diag(codegen_gcc_dynamic_linking_with_lto)]
#[note]
pub(crate) struct DynamicLinkingWithLTO;
#[derive(Diagnostic)]
#[diag(codegen_gcc_lto_disallowed)]
pub(crate) struct LtoDisallowed;
#[derive(Diagnostic)]
#[diag(codegen_gcc_lto_dylib)]
pub(crate) struct LtoDylib;
#[derive(Diagnostic)]
#[diag(codegen_gcc_lto_bitcode_from_rlib)]
pub(crate) struct LtoBitcodeFromRlib {
+9 -3
View File
@@ -81,6 +81,7 @@
use std::any::Any;
use std::fmt::Debug;
use std::ops::Deref;
use std::path::PathBuf;
#[cfg(not(feature = "master"))]
use std::sync::atomic::AtomicBool;
#[cfg(not(feature = "master"))]
@@ -358,23 +359,28 @@ impl WriteBackendMethods for GccCodegenBackend {
fn run_and_optimize_fat_lto(
cgcx: &CodegenContext<Self>,
// FIXME(bjorn3): Limit LTO exports to these symbols
_exported_symbols_for_lto: &[String],
each_linked_rlib_for_lto: &[PathBuf],
modules: Vec<FatLtoInput<Self>>,
cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
diff_fncs: Vec<AutoDiffItem>,
) -> Result<ModuleCodegen<Self::Module>, FatalError> {
if !diff_fncs.is_empty() {
unimplemented!();
}
back::lto::run_fat(cgcx, modules, cached_modules)
back::lto::run_fat(cgcx, each_linked_rlib_for_lto, modules)
}
fn run_thin_lto(
cgcx: &CodegenContext<Self>,
// FIXME(bjorn3): Limit LTO exports to these symbols
_exported_symbols_for_lto: &[String],
each_linked_rlib_for_lto: &[PathBuf],
modules: Vec<(String, Self::ThinBuffer)>,
cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
) -> Result<(Vec<ThinModule<Self>>, Vec<WorkProduct>), FatalError> {
back::lto::run_thin(cgcx, modules, cached_modules)
back::lto::run_thin(cgcx, each_linked_rlib_for_lto, modules, cached_modules)
}
fn print_pass_timings(&self) {
-10
View File
@@ -2,10 +2,6 @@ codegen_llvm_autodiff_without_enable = using the autodiff feature requires -Z au
codegen_llvm_copy_bitcode = failed to copy bitcode to object file: {$err}
codegen_llvm_dynamic_linking_with_lto =
cannot prefer dynamic linking when performing LTO
.note = only 'staticlib', 'bin', and 'cdylib' outputs are supported with LTO
codegen_llvm_fixed_x18_invalid_arch = the `-Zfixed-x18` flag is not supported on the `{$arch}` architecture
@@ -18,12 +14,6 @@ codegen_llvm_load_bitcode_with_llvm_err = failed to load bitcode of module "{$na
codegen_llvm_lto_bitcode_from_rlib = failed to get bitcode from object file for LTO ({$llvm_err})
codegen_llvm_lto_disallowed = lto can only be run for executables, cdylibs and static library outputs
codegen_llvm_lto_dylib = lto cannot be used for `dylib` crate type without `-Zdylib-lto`
codegen_llvm_lto_proc_macro = lto cannot be used for `proc-macro` crate type without `-Zdylib-lto`
codegen_llvm_mismatch_data_layout =
data-layout for target `{$rustc_target}`, `{$rustc_layout}`, differs from LLVM target's `{$llvm_target}` default layout, `{$llvm_layout}`
+30 -91
View File
@@ -1,33 +1,28 @@
use std::collections::BTreeMap;
use std::ffi::{CStr, CString};
use std::fs::File;
use std::path::Path;
use std::path::{Path, PathBuf};
use std::ptr::NonNull;
use std::sync::Arc;
use std::{io, iter, slice};
use object::read::archive::ArchiveFile;
use rustc_codegen_ssa::back::lto::{SerializedModule, ThinModule, ThinShared};
use rustc_codegen_ssa::back::symbol_export;
use rustc_codegen_ssa::back::write::{CodegenContext, FatLtoInput};
use rustc_codegen_ssa::traits::*;
use rustc_codegen_ssa::{ModuleCodegen, ModuleKind, looks_like_rust_object_file};
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::memmap::Mmap;
use rustc_errors::{DiagCtxtHandle, FatalError};
use rustc_hir::def_id::LOCAL_CRATE;
use rustc_middle::bug;
use rustc_middle::dep_graph::WorkProduct;
use rustc_middle::middle::exported_symbols::{SymbolExportInfo, SymbolExportLevel};
use rustc_session::config::{self, CrateType, Lto};
use rustc_session::config::{self, Lto};
use tracing::{debug, info};
use crate::back::write::{
self, CodegenDiagnosticsStage, DiagnosticHandlers, bitcode_section_name, save_temp_bitcode,
};
use crate::errors::{
DynamicLinkingWithLTO, LlvmError, LtoBitcodeFromRlib, LtoDisallowed, LtoDylib, LtoProcMacro,
};
use crate::errors::{LlvmError, LtoBitcodeFromRlib};
use crate::llvm::AttributePlace::Function;
use crate::llvm::{self, build_string};
use crate::{LlvmCodegenBackend, ModuleLlvm, SimpleCx, attributes};
@@ -36,45 +31,21 @@
/// session to determine which CGUs we can reuse.
const THIN_LTO_KEYS_INCR_COMP_FILE_NAME: &str = "thin-lto-past-keys.bin";
fn crate_type_allows_lto(crate_type: CrateType) -> bool {
match crate_type {
CrateType::Executable
| CrateType::Dylib
| CrateType::Staticlib
| CrateType::Cdylib
| CrateType::ProcMacro
| CrateType::Sdylib => true,
CrateType::Rlib => false,
}
}
fn prepare_lto(
cgcx: &CodegenContext<LlvmCodegenBackend>,
exported_symbols_for_lto: &[String],
each_linked_rlib_for_lto: &[PathBuf],
dcx: DiagCtxtHandle<'_>,
) -> Result<(Vec<CString>, Vec<(SerializedModule<ModuleBuffer>, CString)>), FatalError> {
let export_threshold = match cgcx.lto {
// We're just doing LTO for our one crate
Lto::ThinLocal => SymbolExportLevel::Rust,
let mut symbols_below_threshold = exported_symbols_for_lto
.iter()
.map(|symbol| CString::new(symbol.to_owned()).unwrap())
.collect::<Vec<CString>>();
// We're doing LTO for the entire crate graph
Lto::Fat | Lto::Thin => symbol_export::crates_export_threshold(&cgcx.crate_types),
Lto::No => panic!("didn't request LTO but we're doing LTO"),
};
let symbol_filter = &|&(ref name, info): &(String, SymbolExportInfo)| {
if info.level.is_below_threshold(export_threshold) || info.used {
Some(CString::new(name.as_str()).unwrap())
} else {
None
}
};
let exported_symbols = cgcx.exported_symbols.as_ref().expect("needs exported symbols for LTO");
let mut symbols_below_threshold = {
let _timer = cgcx.prof.generic_activity("LLVM_lto_generate_symbols_below_threshold");
exported_symbols[&LOCAL_CRATE].iter().filter_map(symbol_filter).collect::<Vec<CString>>()
};
info!("{} symbols to preserve in this crate", symbols_below_threshold.len());
// __llvm_profile_counter_bias is pulled in at link time by an undefined reference to
// __llvm_profile_runtime, therefore we won't know until link time if this symbol
// should have default visibility.
symbols_below_threshold.push(c"__llvm_profile_counter_bias".to_owned());
// If we're performing LTO for the entire crate graph, then for each of our
// upstream dependencies, find the corresponding rlib and load the bitcode
@@ -84,37 +55,7 @@ fn prepare_lto(
// with either fat or thin LTO
let mut upstream_modules = Vec::new();
if cgcx.lto != Lto::ThinLocal {
// Make sure we actually can run LTO
for crate_type in cgcx.crate_types.iter() {
if !crate_type_allows_lto(*crate_type) {
dcx.emit_err(LtoDisallowed);
return Err(FatalError);
} else if *crate_type == CrateType::Dylib {
if !cgcx.opts.unstable_opts.dylib_lto {
dcx.emit_err(LtoDylib);
return Err(FatalError);
}
} else if *crate_type == CrateType::ProcMacro && !cgcx.opts.unstable_opts.dylib_lto {
dcx.emit_err(LtoProcMacro);
return Err(FatalError);
}
}
if cgcx.opts.cg.prefer_dynamic && !cgcx.opts.unstable_opts.dylib_lto {
dcx.emit_err(DynamicLinkingWithLTO);
return Err(FatalError);
}
for &(cnum, ref path) in cgcx.each_linked_rlib_for_lto.iter() {
let exported_symbols =
cgcx.exported_symbols.as_ref().expect("needs exported symbols for LTO");
{
let _timer =
cgcx.prof.generic_activity("LLVM_lto_generate_symbols_below_threshold");
symbols_below_threshold
.extend(exported_symbols[&cnum].iter().filter_map(symbol_filter));
}
for path in each_linked_rlib_for_lto {
let archive_data = unsafe {
Mmap::map(std::fs::File::open(&path).expect("couldn't open rlib"))
.expect("couldn't map rlib")
@@ -147,10 +88,6 @@ fn prepare_lto(
}
}
// __llvm_profile_counter_bias is pulled in at link time by an undefined reference to
// __llvm_profile_runtime, therefore we won't know until link time if this symbol
// should have default visibility.
symbols_below_threshold.push(c"__llvm_profile_counter_bias".to_owned());
Ok((symbols_below_threshold, upstream_modules))
}
@@ -199,15 +136,17 @@ fn get_bitcode_slice_from_object_data<'a>(
/// for further optimization.
pub(crate) fn run_fat(
cgcx: &CodegenContext<LlvmCodegenBackend>,
exported_symbols_for_lto: &[String],
each_linked_rlib_for_lto: &[PathBuf],
modules: Vec<FatLtoInput<LlvmCodegenBackend>>,
cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
) -> Result<ModuleCodegen<ModuleLlvm>, FatalError> {
let dcx = cgcx.create_dcx();
let dcx = dcx.handle();
let (symbols_below_threshold, upstream_modules) = prepare_lto(cgcx, dcx)?;
let (symbols_below_threshold, upstream_modules) =
prepare_lto(cgcx, exported_symbols_for_lto, each_linked_rlib_for_lto, dcx)?;
let symbols_below_threshold =
symbols_below_threshold.iter().map(|c| c.as_ptr()).collect::<Vec<_>>();
fat_lto(cgcx, dcx, modules, cached_modules, upstream_modules, &symbols_below_threshold)
fat_lto(cgcx, dcx, modules, upstream_modules, &symbols_below_threshold)
}
/// Performs thin LTO by performing necessary global analysis and returning two
@@ -215,12 +154,15 @@ pub(crate) fn run_fat(
/// can simply be copied over from the incr. comp. cache.
pub(crate) fn run_thin(
cgcx: &CodegenContext<LlvmCodegenBackend>,
exported_symbols_for_lto: &[String],
each_linked_rlib_for_lto: &[PathBuf],
modules: Vec<(String, ThinBuffer)>,
cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
) -> Result<(Vec<ThinModule<LlvmCodegenBackend>>, Vec<WorkProduct>), FatalError> {
let dcx = cgcx.create_dcx();
let dcx = dcx.handle();
let (symbols_below_threshold, upstream_modules) = prepare_lto(cgcx, dcx)?;
let (symbols_below_threshold, upstream_modules) =
prepare_lto(cgcx, exported_symbols_for_lto, each_linked_rlib_for_lto, dcx)?;
let symbols_below_threshold =
symbols_below_threshold.iter().map(|c| c.as_ptr()).collect::<Vec<_>>();
if cgcx.opts.cg.linker_plugin_lto.enabled() {
@@ -245,7 +187,6 @@ fn fat_lto(
cgcx: &CodegenContext<LlvmCodegenBackend>,
dcx: DiagCtxtHandle<'_>,
modules: Vec<FatLtoInput<LlvmCodegenBackend>>,
cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
mut serialized_modules: Vec<(SerializedModule<ModuleBuffer>, CString)>,
symbols_below_threshold: &[*const libc::c_char],
) -> Result<ModuleCodegen<ModuleLlvm>, FatalError> {
@@ -258,21 +199,12 @@ fn fat_lto(
// modules that are serialized in-memory.
// * `in_memory` contains modules which are already parsed and in-memory,
// such as from multi-CGU builds.
//
// All of `cached_modules` (cached from previous incremental builds) can
// immediately go onto the `serialized_modules` modules list and then we can
// split the `modules` array into these two lists.
let mut in_memory = Vec::new();
serialized_modules.extend(cached_modules.into_iter().map(|(buffer, wp)| {
info!("pushing cached module {:?}", wp.cgu_name);
(buffer, CString::new(wp.cgu_name).unwrap())
}));
for module in modules {
match module {
FatLtoInput::InMemory(m) => in_memory.push(m),
FatLtoInput::Serialized { name, buffer } => {
info!("pushing serialized module {:?}", name);
let buffer = SerializedModule::Local(buffer);
serialized_modules.push((buffer, CString::new(name).unwrap()));
}
}
@@ -654,6 +586,7 @@ pub(crate) fn run_pass_manager(
// We then run the llvm_optimize function a second time, to optimize the code which we generated
// in the enzyme differentiation pass.
let enable_ad = config.autodiff.contains(&config::AutoDiff::Enable);
let enable_gpu = config.offload.contains(&config::Offload::Enable);
let stage = if thin {
write::AutodiffStage::PreAD
} else {
@@ -668,6 +601,12 @@ pub(crate) fn run_pass_manager(
write::llvm_optimize(cgcx, dcx, module, None, config, opt_level, opt_stage, stage)?;
}
if enable_gpu && !thin {
let cx =
SimpleCx::new(module.module_llvm.llmod(), &module.module_llvm.llcx, cgcx.pointer_size);
crate::builder::gpu_offload::handle_gpu_code(cgcx, &cx);
}
if cfg!(llvm_enzyme) && enable_ad && !thin {
let cx =
SimpleCx::new(module.module_llvm.llmod(), &module.module_llvm.llcx, cgcx.pointer_size);
@@ -3,6 +3,7 @@
use std::{iter, ptr};
pub(crate) mod autodiff;
pub(crate) mod gpu_offload;
use libc::{c_char, c_uint, size_t};
use rustc_abi as abi;
@@ -117,6 +118,74 @@ pub(crate) fn build(cx: &'a GenericCx<'ll, CX>, llbb: &'ll BasicBlock) -> Self {
}
bx
}
// The generic builder has less functionality and thus (unlike the other alloca) we can not
// easily jump to the beginning of the function to place our allocas there. We trust the user
// to manually do that. FIXME(offload): improve the genericCx and add more llvm wrappers to
// handle this.
pub(crate) fn direct_alloca(&mut self, ty: &'ll Type, align: Align, name: &str) -> &'ll Value {
let val = unsafe {
let alloca = llvm::LLVMBuildAlloca(self.llbuilder, ty, UNNAMED);
llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
// Cast to default addrspace if necessary
llvm::LLVMBuildPointerCast(self.llbuilder, alloca, self.cx.type_ptr(), UNNAMED)
};
if name != "" {
let name = std::ffi::CString::new(name).unwrap();
llvm::set_value_name(val, &name.as_bytes());
}
val
}
pub(crate) fn inbounds_gep(
&mut self,
ty: &'ll Type,
ptr: &'ll Value,
indices: &[&'ll Value],
) -> &'ll Value {
unsafe {
llvm::LLVMBuildGEPWithNoWrapFlags(
self.llbuilder,
ty,
ptr,
indices.as_ptr(),
indices.len() as c_uint,
UNNAMED,
GEPNoWrapFlags::InBounds,
)
}
}
pub(crate) fn store(&mut self, val: &'ll Value, ptr: &'ll Value, align: Align) -> &'ll Value {
debug!("Store {:?} -> {:?}", val, ptr);
assert_eq!(self.cx.type_kind(self.cx.val_ty(ptr)), TypeKind::Pointer);
unsafe {
let store = llvm::LLVMBuildStore(self.llbuilder, val, ptr);
llvm::LLVMSetAlignment(store, align.bytes() as c_uint);
store
}
}
pub(crate) fn load(&mut self, ty: &'ll Type, ptr: &'ll Value, align: Align) -> &'ll Value {
unsafe {
let load = llvm::LLVMBuildLoad2(self.llbuilder, ty, ptr, UNNAMED);
llvm::LLVMSetAlignment(load, align.bytes() as c_uint);
load
}
}
fn memset(&mut self, ptr: &'ll Value, fill_byte: &'ll Value, size: &'ll Value, align: Align) {
unsafe {
llvm::LLVMRustBuildMemSet(
self.llbuilder,
ptr,
align.bytes() as c_uint,
fill_byte,
size,
false,
);
}
}
}
/// Empty string, to be used where LLVM expects an instruction name, indicating
@@ -0,0 +1,439 @@
use std::ffi::CString;
use llvm::Linkage::*;
use rustc_abi::Align;
use rustc_codegen_ssa::back::write::CodegenContext;
use rustc_codegen_ssa::traits::BaseTypeCodegenMethods;
use crate::builder::SBuilder;
use crate::common::AsCCharPtr;
use crate::llvm::AttributePlace::Function;
use crate::llvm::{self, Linkage, Type, Value};
use crate::{LlvmCodegenBackend, SimpleCx, attributes};
pub(crate) fn handle_gpu_code<'ll>(
_cgcx: &CodegenContext<LlvmCodegenBackend>,
cx: &'ll SimpleCx<'_>,
) {
// The offload memory transfer type for each kernel
let mut o_types = vec![];
let mut kernels = vec![];
let offload_entry_ty = add_tgt_offload_entry(&cx);
for num in 0..9 {
let kernel = cx.get_function(&format!("kernel_{num}"));
if let Some(kernel) = kernel {
o_types.push(gen_define_handling(&cx, kernel, offload_entry_ty, num));
kernels.push(kernel);
}
}
gen_call_handling(&cx, &kernels, &o_types);
}
// What is our @1 here? A magic global, used in our data_{begin/update/end}_mapper:
// @0 = private unnamed_addr constant [23 x i8] c";unknown;unknown;0;0;;\00", align 1
// @1 = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 22, ptr @0 }, align 8
fn generate_at_one<'ll>(cx: &'ll SimpleCx<'_>) -> &'ll llvm::Value {
// @0 = private unnamed_addr constant [23 x i8] c";unknown;unknown;0;0;;\00", align 1
let unknown_txt = ";unknown;unknown;0;0;;";
let c_entry_name = CString::new(unknown_txt).unwrap();
let c_val = c_entry_name.as_bytes_with_nul();
let initializer = crate::common::bytes_in_context(cx.llcx, c_val);
let at_zero = add_unnamed_global(&cx, &"", initializer, PrivateLinkage);
llvm::set_alignment(at_zero, Align::ONE);
// @1 = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 22, ptr @0 }, align 8
let struct_ident_ty = cx.type_named_struct("struct.ident_t");
let struct_elems = vec![
cx.get_const_i32(0),
cx.get_const_i32(2),
cx.get_const_i32(0),
cx.get_const_i32(22),
at_zero,
];
let struct_elems_ty: Vec<_> = struct_elems.iter().map(|&x| cx.val_ty(x)).collect();
let initializer = crate::common::named_struct(struct_ident_ty, &struct_elems);
cx.set_struct_body(struct_ident_ty, &struct_elems_ty, false);
let at_one = add_unnamed_global(&cx, &"", initializer, PrivateLinkage);
llvm::set_alignment(at_one, Align::EIGHT);
at_one
}
pub(crate) fn add_tgt_offload_entry<'ll>(cx: &'ll SimpleCx<'_>) -> &'ll llvm::Type {
let offload_entry_ty = cx.type_named_struct("struct.__tgt_offload_entry");
let tptr = cx.type_ptr();
let ti64 = cx.type_i64();
let ti32 = cx.type_i32();
let ti16 = cx.type_i16();
// For each kernel to run on the gpu, we will later generate one entry of this type.
// copied from LLVM
// typedef struct {
// uint64_t Reserved;
// uint16_t Version;
// uint16_t Kind;
// uint32_t Flags; Flags associated with the entry (see Target Region Entry Flags)
// void *Address; Address of global symbol within device image (function or global)
// char *SymbolName;
// uint64_t Size; Size of the entry info (0 if it is a function)
// uint64_t Data;
// void *AuxAddr;
// } __tgt_offload_entry;
let entry_elements = vec![ti64, ti16, ti16, ti32, tptr, tptr, ti64, ti64, tptr];
cx.set_struct_body(offload_entry_ty, &entry_elements, false);
offload_entry_ty
}
fn gen_tgt_kernel_global<'ll>(cx: &'ll SimpleCx<'_>) {
let kernel_arguments_ty = cx.type_named_struct("struct.__tgt_kernel_arguments");
let tptr = cx.type_ptr();
let ti64 = cx.type_i64();
let ti32 = cx.type_i32();
let tarr = cx.type_array(ti32, 3);
// Taken from the LLVM APITypes.h declaration:
//struct KernelArgsTy {
// uint32_t Version = 0; // Version of this struct for ABI compatibility.
// uint32_t NumArgs = 0; // Number of arguments in each input pointer.
// void **ArgBasePtrs =
// nullptr; // Base pointer of each argument (e.g. a struct).
// void **ArgPtrs = nullptr; // Pointer to the argument data.
// int64_t *ArgSizes = nullptr; // Size of the argument data in bytes.
// int64_t *ArgTypes = nullptr; // Type of the data (e.g. to / from).
// void **ArgNames = nullptr; // Name of the data for debugging, possibly null.
// void **ArgMappers = nullptr; // User-defined mappers, possibly null.
// uint64_t Tripcount =
// 0; // Tripcount for the teams / distribute loop, 0 otherwise.
// struct {
// uint64_t NoWait : 1; // Was this kernel spawned with a `nowait` clause.
// uint64_t IsCUDA : 1; // Was this kernel spawned via CUDA.
// uint64_t Unused : 62;
// } Flags = {0, 0, 0};
// // The number of teams (for x,y,z dimension).
// uint32_t NumTeams[3] = {0, 0, 0};
// // The number of threads (for x,y,z dimension).
// uint32_t ThreadLimit[3] = {0, 0, 0};
// uint32_t DynCGroupMem = 0; // Amount of dynamic cgroup memory requested.
//};
let kernel_elements =
vec![ti32, ti32, tptr, tptr, tptr, tptr, tptr, tptr, ti64, ti64, tarr, tarr, ti32];
cx.set_struct_body(kernel_arguments_ty, &kernel_elements, false);
// For now we don't handle kernels, so for now we just add a global dummy
// to make sure that the __tgt_offload_entry is defined and handled correctly.
cx.declare_global("my_struct_global2", kernel_arguments_ty);
}
fn gen_tgt_data_mappers<'ll>(
cx: &'ll SimpleCx<'_>,
) -> (&'ll llvm::Value, &'ll llvm::Value, &'ll llvm::Value, &'ll llvm::Type) {
let tptr = cx.type_ptr();
let ti64 = cx.type_i64();
let ti32 = cx.type_i32();
let args = vec![tptr, ti64, ti32, tptr, tptr, tptr, tptr, tptr, tptr];
let mapper_fn_ty = cx.type_func(&args, cx.type_void());
let mapper_begin = "__tgt_target_data_begin_mapper";
let mapper_update = "__tgt_target_data_update_mapper";
let mapper_end = "__tgt_target_data_end_mapper";
let begin_mapper_decl = declare_offload_fn(&cx, mapper_begin, mapper_fn_ty);
let update_mapper_decl = declare_offload_fn(&cx, mapper_update, mapper_fn_ty);
let end_mapper_decl = declare_offload_fn(&cx, mapper_end, mapper_fn_ty);
let nounwind = llvm::AttributeKind::NoUnwind.create_attr(cx.llcx);
attributes::apply_to_llfn(begin_mapper_decl, Function, &[nounwind]);
attributes::apply_to_llfn(update_mapper_decl, Function, &[nounwind]);
attributes::apply_to_llfn(end_mapper_decl, Function, &[nounwind]);
(begin_mapper_decl, update_mapper_decl, end_mapper_decl, mapper_fn_ty)
}
fn add_priv_unnamed_arr<'ll>(cx: &SimpleCx<'ll>, name: &str, vals: &[u64]) -> &'ll llvm::Value {
let ti64 = cx.type_i64();
let mut size_val = Vec::with_capacity(vals.len());
for &val in vals {
size_val.push(cx.get_const_i64(val));
}
let initializer = cx.const_array(ti64, &size_val);
add_unnamed_global(cx, name, initializer, PrivateLinkage)
}
pub(crate) fn add_unnamed_global<'ll>(
cx: &SimpleCx<'ll>,
name: &str,
initializer: &'ll llvm::Value,
l: Linkage,
) -> &'ll llvm::Value {
let llglobal = add_global(cx, name, initializer, l);
llvm::LLVMSetUnnamedAddress(llglobal, llvm::UnnamedAddr::Global);
llglobal
}
pub(crate) fn add_global<'ll>(
cx: &SimpleCx<'ll>,
name: &str,
initializer: &'ll llvm::Value,
l: Linkage,
) -> &'ll llvm::Value {
let c_name = CString::new(name).unwrap();
let llglobal: &'ll llvm::Value = llvm::add_global(cx.llmod, cx.val_ty(initializer), &c_name);
llvm::set_global_constant(llglobal, true);
llvm::set_linkage(llglobal, l);
llvm::set_initializer(llglobal, initializer);
llglobal
}
fn gen_define_handling<'ll>(
cx: &'ll SimpleCx<'_>,
kernel: &'ll llvm::Value,
offload_entry_ty: &'ll llvm::Type,
num: i64,
) -> &'ll llvm::Value {
let types = cx.func_params_types(cx.get_type_of_global(kernel));
// It seems like non-pointer values are automatically mapped. So here, we focus on pointer (or
// reference) types.
let num_ptr_types = types
.iter()
.map(|&x| matches!(cx.type_kind(x), rustc_codegen_ssa::common::TypeKind::Pointer))
.count();
// We do not know their size anymore at this level, so hardcode a placeholder.
// A follow-up pr will track these from the frontend, where we still have Rust types.
// Then, we will be able to figure out that e.g. `&[f32;256]` will result in 4*256 bytes.
// I decided that 1024 bytes is a great placeholder value for now.
add_priv_unnamed_arr(&cx, &format!(".offload_sizes.{num}"), &vec![1024; num_ptr_types]);
// Here we figure out whether something needs to be copied to the gpu (=1), from the gpu (=2),
// or both to and from the gpu (=3). Other values shouldn't affect us for now.
// A non-mutable reference or pointer will be 1, an array that's not read, but fully overwritten
// will be 2. For now, everything is 3, until we have our frontend set up.
let o_types =
add_priv_unnamed_arr(&cx, &format!(".offload_maptypes.{num}"), &vec![3; num_ptr_types]);
// Next: For each function, generate these three entries. A weak constant,
// the llvm.rodata entry name, and the omp_offloading_entries value
let name = format!(".kernel_{num}.region_id");
let initializer = cx.get_const_i8(0);
let region_id = add_unnamed_global(&cx, &name, initializer, WeakAnyLinkage);
let c_entry_name = CString::new(format!("kernel_{num}")).unwrap();
let c_val = c_entry_name.as_bytes_with_nul();
let offload_entry_name = format!(".offloading.entry_name.{num}");
let initializer = crate::common::bytes_in_context(cx.llcx, c_val);
let llglobal = add_unnamed_global(&cx, &offload_entry_name, initializer, InternalLinkage);
llvm::set_alignment(llglobal, Align::ONE);
llvm::set_section(llglobal, c".llvm.rodata.offloading");
// Not actively used yet, for calling real kernels
let name = format!(".offloading.entry.kernel_{num}");
// See the __tgt_offload_entry documentation above.
let reserved = cx.get_const_i64(0);
let version = cx.get_const_i16(1);
let kind = cx.get_const_i16(1);
let flags = cx.get_const_i32(0);
let size = cx.get_const_i64(0);
let data = cx.get_const_i64(0);
let aux_addr = cx.const_null(cx.type_ptr());
let elems = vec![reserved, version, kind, flags, region_id, llglobal, size, data, aux_addr];
let initializer = crate::common::named_struct(offload_entry_ty, &elems);
let c_name = CString::new(name).unwrap();
let llglobal = llvm::add_global(cx.llmod, offload_entry_ty, &c_name);
llvm::set_global_constant(llglobal, true);
llvm::set_linkage(llglobal, WeakAnyLinkage);
llvm::set_initializer(llglobal, initializer);
llvm::set_alignment(llglobal, Align::ONE);
let c_section_name = CString::new(".omp_offloading_entries").unwrap();
llvm::set_section(llglobal, &c_section_name);
o_types
}
fn declare_offload_fn<'ll>(
cx: &'ll SimpleCx<'_>,
name: &str,
ty: &'ll llvm::Type,
) -> &'ll llvm::Value {
crate::declare::declare_simple_fn(
cx,
name,
llvm::CallConv::CCallConv,
llvm::UnnamedAddr::No,
llvm::Visibility::Default,
ty,
)
}
// For each kernel *call*, we now use some of our previous declared globals to move data to and from
// the gpu. We don't have a proper frontend yet, so we assume that every call to a kernel function
// from main is intended to run on the GPU. For now, we only handle the data transfer part of it.
// If two consecutive kernels use the same memory, we still move it to the host and back to the gpu.
// Since in our frontend users (by default) don't have to specify data transfer, this is something
// we should optimize in the future! We also assume that everything should be copied back and forth,
// but sometimes we can directly zero-allocate on the device and only move back, or if something is
// immutable, we might only copy it to the device, but not back.
//
// Current steps:
// 0. Alloca some variables for the following steps
// 1. set insert point before kernel call.
// 2. generate all the GEPS and stores, to be used in 3)
// 3. generate __tgt_target_data_begin calls to move data to the GPU
//
// unchanged: keep kernel call. Later move the kernel to the GPU
//
// 4. set insert point after kernel call.
// 5. generate all the GEPS and stores, to be used in 6)
// 6. generate __tgt_target_data_end calls to move data from the GPU
fn gen_call_handling<'ll>(
cx: &'ll SimpleCx<'_>,
_kernels: &[&'ll llvm::Value],
o_types: &[&'ll llvm::Value],
) {
// %struct.__tgt_bin_desc = type { i32, ptr, ptr, ptr }
let tptr = cx.type_ptr();
let ti32 = cx.type_i32();
let tgt_bin_desc_ty = vec![ti32, tptr, tptr, tptr];
let tgt_bin_desc = cx.type_named_struct("struct.__tgt_bin_desc");
cx.set_struct_body(tgt_bin_desc, &tgt_bin_desc_ty, false);
gen_tgt_kernel_global(&cx);
let (begin_mapper_decl, _, end_mapper_decl, fn_ty) = gen_tgt_data_mappers(&cx);
let main_fn = cx.get_function("main");
let Some(main_fn) = main_fn else { return };
let kernel_name = "kernel_1";
let call = unsafe {
llvm::LLVMRustGetFunctionCall(main_fn, kernel_name.as_c_char_ptr(), kernel_name.len())
};
let Some(kernel_call) = call else {
return;
};
let kernel_call_bb = unsafe { llvm::LLVMGetInstructionParent(kernel_call) };
let called = unsafe { llvm::LLVMGetCalledValue(kernel_call).unwrap() };
let mut builder = SBuilder::build(cx, kernel_call_bb);
let types = cx.func_params_types(cx.get_type_of_global(called));
let num_args = types.len() as u64;
// Step 0)
// %struct.__tgt_bin_desc = type { i32, ptr, ptr, ptr }
// %6 = alloca %struct.__tgt_bin_desc, align 8
unsafe { llvm::LLVMRustPositionBuilderPastAllocas(builder.llbuilder, main_fn) };
let tgt_bin_desc_alloca = builder.direct_alloca(tgt_bin_desc, Align::EIGHT, "EmptyDesc");
let ty = cx.type_array(cx.type_ptr(), num_args);
// Baseptr are just the input pointer to the kernel, stored in a local alloca
let a1 = builder.direct_alloca(ty, Align::EIGHT, ".offload_baseptrs");
// Ptrs are the result of a gep into the baseptr, at least for our trivial types.
let a2 = builder.direct_alloca(ty, Align::EIGHT, ".offload_ptrs");
// These represent the sizes in bytes, e.g. the entry for `&[f64; 16]` will be 8*16.
let ty2 = cx.type_array(cx.type_i64(), num_args);
let a4 = builder.direct_alloca(ty2, Align::EIGHT, ".offload_sizes");
// Now we allocate once per function param, a copy to be passed to one of our maps.
let mut vals = vec![];
let mut geps = vec![];
let i32_0 = cx.get_const_i32(0);
for (index, in_ty) in types.iter().enumerate() {
// get function arg, store it into the alloca, and read it.
let p = llvm::get_param(called, index as u32);
let name = llvm::get_value_name(p);
let name = str::from_utf8(&name).unwrap();
let arg_name = format!("{name}.addr");
let alloca = builder.direct_alloca(in_ty, Align::EIGHT, &arg_name);
builder.store(p, alloca, Align::EIGHT);
let val = builder.load(in_ty, alloca, Align::EIGHT);
let gep = builder.inbounds_gep(cx.type_f32(), val, &[i32_0]);
vals.push(val);
geps.push(gep);
}
// Step 1)
unsafe { llvm::LLVMRustPositionBefore(builder.llbuilder, kernel_call) };
builder.memset(tgt_bin_desc_alloca, cx.get_const_i8(0), cx.get_const_i64(32), Align::EIGHT);
let mapper_fn_ty = cx.type_func(&[cx.type_ptr()], cx.type_void());
let register_lib_decl = declare_offload_fn(&cx, "__tgt_register_lib", mapper_fn_ty);
let unregister_lib_decl = declare_offload_fn(&cx, "__tgt_unregister_lib", mapper_fn_ty);
let init_ty = cx.type_func(&[], cx.type_void());
let init_rtls_decl = declare_offload_fn(cx, "__tgt_init_all_rtls", init_ty);
// call void @__tgt_register_lib(ptr noundef %6)
builder.call(mapper_fn_ty, register_lib_decl, &[tgt_bin_desc_alloca], None);
// call void @__tgt_init_all_rtls()
builder.call(init_ty, init_rtls_decl, &[], None);
for i in 0..num_args {
let idx = cx.get_const_i32(i);
let gep1 = builder.inbounds_gep(ty, a1, &[i32_0, idx]);
builder.store(vals[i as usize], gep1, Align::EIGHT);
let gep2 = builder.inbounds_gep(ty, a2, &[i32_0, idx]);
builder.store(geps[i as usize], gep2, Align::EIGHT);
let gep3 = builder.inbounds_gep(ty2, a4, &[i32_0, idx]);
// As mentioned above, we don't use Rust type information yet. So for now we will just
// assume that we have 1024 bytes, 256 f32 values.
// FIXME(offload): write an offload frontend and handle arbitrary types.
builder.store(cx.get_const_i64(1024), gep3, Align::EIGHT);
}
// For now we have a very simplistic indexing scheme into our
// offload_{baseptrs,ptrs,sizes}. We will probably improve this along with our gpu frontend pr.
fn get_geps<'a, 'll>(
builder: &mut SBuilder<'a, 'll>,
cx: &'ll SimpleCx<'ll>,
ty: &'ll Type,
ty2: &'ll Type,
a1: &'ll Value,
a2: &'ll Value,
a4: &'ll Value,
) -> (&'ll Value, &'ll Value, &'ll Value) {
let i32_0 = cx.get_const_i32(0);
let gep1 = builder.inbounds_gep(ty, a1, &[i32_0, i32_0]);
let gep2 = builder.inbounds_gep(ty, a2, &[i32_0, i32_0]);
let gep3 = builder.inbounds_gep(ty2, a4, &[i32_0, i32_0]);
(gep1, gep2, gep3)
}
fn generate_mapper_call<'a, 'll>(
builder: &mut SBuilder<'a, 'll>,
cx: &'ll SimpleCx<'ll>,
geps: (&'ll Value, &'ll Value, &'ll Value),
o_type: &'ll Value,
fn_to_call: &'ll Value,
fn_ty: &'ll Type,
num_args: u64,
s_ident_t: &'ll Value,
) {
let nullptr = cx.const_null(cx.type_ptr());
let i64_max = cx.get_const_i64(u64::MAX);
let num_args = cx.get_const_i32(num_args);
let args =
vec![s_ident_t, i64_max, num_args, geps.0, geps.1, geps.2, o_type, nullptr, nullptr];
builder.call(fn_ty, fn_to_call, &args, None);
}
// Step 2)
let s_ident_t = generate_at_one(&cx);
let o = o_types[0];
let geps = get_geps(&mut builder, &cx, ty, ty2, a1, a2, a4);
generate_mapper_call(&mut builder, &cx, geps, o, begin_mapper_decl, fn_ty, num_args, s_ident_t);
// Step 3)
// Here we will add code for the actual kernel launches in a follow-up PR.
// FIXME(offload): launch kernels
// Step 4)
unsafe { llvm::LLVMRustPositionAfter(builder.llbuilder, kernel_call) };
let geps = get_geps(&mut builder, &cx, ty, ty2, a1, a2, a4);
generate_mapper_call(&mut builder, &cx, geps, o, end_mapper_decl, fn_ty, num_args, s_ident_t);
builder.call(mapper_fn_ty, unregister_lib_decl, &[tgt_bin_desc_alloca], None);
// With this we generated the following begin and end mappers. We could easily generate the
// update mapper in an update.
// call void @__tgt_target_data_begin_mapper(ptr @1, i64 -1, i32 3, ptr %27, ptr %28, ptr %29, ptr @.offload_maptypes, ptr null, ptr null)
// call void @__tgt_target_data_update_mapper(ptr @1, i64 -1, i32 2, ptr %46, ptr %47, ptr %48, ptr @.offload_maptypes.1, ptr null, ptr null)
// call void @__tgt_target_data_end_mapper(ptr @1, i64 -1, i32 3, ptr %49, ptr %50, ptr %51, ptr @.offload_maptypes, ptr null, ptr null)
}
@@ -118,6 +118,10 @@ pub(crate) fn const_get_elt(&self, v: &'ll Value, idx: u64) -> &'ll Value {
r
}
}
pub(crate) fn const_null(&self, t: &'ll Type) -> &'ll Value {
unsafe { llvm::LLVMConstNull(t) }
}
}
impl<'ll, 'tcx> ConstCodegenMethods for CodegenCx<'ll, 'tcx> {
@@ -377,6 +381,11 @@ pub(crate) fn bytes_in_context<'ll>(llcx: &'ll llvm::Context, bytes: &[u8]) -> &
}
}
pub(crate) fn named_struct<'ll>(ty: &'ll Type, elts: &[&'ll Value]) -> &'ll Value {
let len = c_uint::try_from(elts.len()).expect("LLVMConstStructInContext elements len overflow");
unsafe { llvm::LLVMConstNamedStruct(ty, elts.as_ptr(), len) }
}
fn struct_in_context<'ll>(
llcx: &'ll llvm::Context,
elts: &[&'ll Value],
+17 -1
View File
@@ -216,7 +216,7 @@ pub(crate) unsafe fn create_module<'ll>(
// Ensure the data-layout values hardcoded remain the defaults.
{
let tm = crate::back::write::create_informational_target_machine(tcx.sess, false);
let tm = crate::back::write::create_informational_target_machine(sess, false);
unsafe {
llvm::LLVMRustSetDataLayoutFromTargetMachine(llmod, tm.raw());
}
@@ -685,6 +685,22 @@ pub(crate) fn get_const_int(&self, ty: &'ll Type, val: u64) -> &'ll Value {
unsafe { llvm::LLVMConstInt(ty, val, llvm::False) }
}
pub(crate) fn get_const_i64(&self, n: u64) -> &'ll Value {
self.get_const_int(self.type_i64(), n)
}
pub(crate) fn get_const_i32(&self, n: u64) -> &'ll Value {
self.get_const_int(self.type_i32(), n)
}
pub(crate) fn get_const_i16(&self, n: u64) -> &'ll Value {
self.get_const_int(self.type_i16(), n)
}
pub(crate) fn get_const_i8(&self, n: u64) -> &'ll Value {
self.get_const_int(self.type_i8(), n)
}
pub(crate) fn get_function(&self, name: &str) -> Option<&'ll Value> {
let name = SmallCStr::new(name);
unsafe { llvm::LLVMGetNamedFunction((**self).borrow().llmod, name.as_ptr()) }
+4 -2
View File
@@ -215,7 +215,9 @@ pub(crate) fn declare_fn(
llfn
}
}
impl<'ll, CX: Borrow<SCx<'ll>>> GenericCx<'ll, CX> {
/// Declare a global with an intention to define it.
///
/// Use this function when you intend to define a global. This function will
@@ -234,13 +236,13 @@ pub(crate) fn define_global(&self, name: &str, ty: &'ll Type) -> Option<&'ll Val
///
/// Use this function when you intend to define a global without a name.
pub(crate) fn define_private_global(&self, ty: &'ll Type) -> &'ll Value {
unsafe { llvm::LLVMRustInsertPrivateGlobal(self.llmod, ty) }
unsafe { llvm::LLVMRustInsertPrivateGlobal(self.llmod(), ty) }
}
/// Gets declared value by name.
pub(crate) fn get_declared_value(&self, name: &str) -> Option<&'ll Value> {
debug!("get_declared_value(name={:?})", name);
unsafe { llvm::LLVMRustGetNamedValue(self.llmod, name.as_c_char_ptr(), name.len()) }
unsafe { llvm::LLVMRustGetNamedValue(self.llmod(), name.as_c_char_ptr(), name.len()) }
}
/// Gets defined or externally defined (AvailableExternally linkage) value by
-17
View File
@@ -20,11 +20,6 @@ pub(crate) struct SymbolAlreadyDefined<'a> {
#[diag(codegen_llvm_sanitizer_memtag_requires_mte)]
pub(crate) struct SanitizerMemtagRequiresMte;
#[derive(Diagnostic)]
#[diag(codegen_llvm_dynamic_linking_with_lto)]
#[note]
pub(crate) struct DynamicLinkingWithLTO;
pub(crate) struct ParseTargetMachineConfig<'a>(pub LlvmError<'a>);
impl<G: EmissionGuarantee> Diagnostic<'_, G> for ParseTargetMachineConfig<'_> {
@@ -41,18 +36,6 @@ fn into_diag(self, dcx: DiagCtxtHandle<'_>, level: Level) -> Diag<'_, G> {
#[diag(codegen_llvm_autodiff_without_enable)]
pub(crate) struct AutoDiffWithoutEnable;
#[derive(Diagnostic)]
#[diag(codegen_llvm_lto_disallowed)]
pub(crate) struct LtoDisallowed;
#[derive(Diagnostic)]
#[diag(codegen_llvm_lto_dylib)]
pub(crate) struct LtoDylib;
#[derive(Diagnostic)]
#[diag(codegen_llvm_lto_proc_macro)]
pub(crate) struct LtoProcMacro;
#[derive(Diagnostic)]
#[diag(codegen_llvm_lto_bitcode_from_rlib)]
pub(crate) struct LtoBitcodeFromRlib {
+9 -19
View File
@@ -382,26 +382,16 @@ fn codegen_intrinsic_call(
let width = size.bits();
let llty = self.type_ix(width);
match name {
sym::ctlz | sym::cttz => {
let y = self.const_bool(false);
let ret = self.call_intrinsic(
format!("llvm.{name}"),
&[llty],
&[args[0].immediate(), y],
);
self.intcast(ret, result.layout.llvm_type(self), false)
}
sym::ctlz_nonzero => {
let y = self.const_bool(true);
sym::ctlz | sym::ctlz_nonzero | sym::cttz | sym::cttz_nonzero => {
let y =
self.const_bool(name == sym::ctlz_nonzero || name == sym::cttz_nonzero);
let llvm_name = if name == sym::ctlz || name == sym::ctlz_nonzero {
"llvm.ctlz"
} else {
"llvm.cttz"
};
let ret =
self.call_intrinsic("llvm.ctlz", &[llty], &[args[0].immediate(), y]);
self.intcast(ret, result.layout.llvm_type(self), false)
}
sym::cttz_nonzero => {
let y = self.const_bool(true);
let ret =
self.call_intrinsic("llvm.cttz", &[llty], &[args[0].immediate(), y]);
self.call_intrinsic(llvm_name, &[llty], &[args[0].immediate(), y]);
self.intcast(ret, result.layout.llvm_type(self), false)
}
sym::ctpop => {
+29 -10
View File
@@ -22,6 +22,7 @@
use std::any::Any;
use std::ffi::CStr;
use std::mem::ManuallyDrop;
use std::path::PathBuf;
use back::owned_target_machine::OwnedTargetMachine;
use back::write::{create_informational_target_machine, create_target_machine};
@@ -176,11 +177,13 @@ fn run_link(
}
fn run_and_optimize_fat_lto(
cgcx: &CodegenContext<Self>,
exported_symbols_for_lto: &[String],
each_linked_rlib_for_lto: &[PathBuf],
modules: Vec<FatLtoInput<Self>>,
cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
diff_fncs: Vec<AutoDiffItem>,
) -> Result<ModuleCodegen<Self::Module>, FatalError> {
let mut module = back::lto::run_fat(cgcx, modules, cached_modules)?;
let mut module =
back::lto::run_fat(cgcx, exported_symbols_for_lto, each_linked_rlib_for_lto, modules)?;
if !diff_fncs.is_empty() {
builder::autodiff::differentiate(&module, cgcx, diff_fncs)?;
@@ -194,10 +197,18 @@ fn run_and_optimize_fat_lto(
}
fn run_thin_lto(
cgcx: &CodegenContext<Self>,
exported_symbols_for_lto: &[String],
each_linked_rlib_for_lto: &[PathBuf],
modules: Vec<(String, Self::ThinBuffer)>,
cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
) -> Result<(Vec<ThinModule<Self>>, Vec<WorkProduct>), FatalError> {
back::lto::run_thin(cgcx, modules, cached_modules)
back::lto::run_thin(
cgcx,
exported_symbols_for_lto,
each_linked_rlib_for_lto,
modules,
cached_modules,
)
}
fn optimize(
cgcx: &CodegenContext<Self>,
@@ -412,6 +423,20 @@ fn new_metadata(tcx: TyCtxt<'_>, mod_name: &str) -> Self {
}
}
fn tm_from_cgcx(
cgcx: &CodegenContext<LlvmCodegenBackend>,
name: &str,
dcx: DiagCtxtHandle<'_>,
) -> Result<OwnedTargetMachine, FatalError> {
let tm_factory_config = TargetMachineFactoryConfig::new(cgcx, name);
match (cgcx.tm_factory)(tm_factory_config) {
Ok(m) => Ok(m),
Err(e) => {
return Err(dcx.emit_almost_fatal(ParseTargetMachineConfig(e)));
}
}
}
fn parse(
cgcx: &CodegenContext<LlvmCodegenBackend>,
name: &CStr,
@@ -421,13 +446,7 @@ fn parse(
unsafe {
let llcx = llvm::LLVMRustContextCreate(cgcx.fewer_names);
let llmod_raw = back::lto::parse_module(llcx, name, buffer, dcx)?;
let tm_factory_config = TargetMachineFactoryConfig::new(cgcx, name.to_str().unwrap());
let tm = match (cgcx.tm_factory)(tm_factory_config) {
Ok(m) => m,
Err(e) => {
return Err(dcx.emit_almost_fatal(ParseTargetMachineConfig(e)));
}
};
let tm = ModuleLlvm::tm_from_cgcx(cgcx, name.to_str().unwrap(), dcx)?;
Ok(ModuleLlvm { llmod_raw, llcx, tm: ManuallyDrop::new(tm) })
}
@@ -4,7 +4,7 @@
use super::MetadataKindId;
use super::ffi::{AttributeKind, BasicBlock, Metadata, Module, Type, Value};
use crate::llvm::Bool;
use crate::llvm::{Bool, Builder};
#[link(name = "llvm-wrapper", kind = "static")]
unsafe extern "C" {
@@ -31,6 +31,14 @@ pub(crate) fn LLVMRustRemoveEnumAttributeAtIndex(
index: c_uint,
kind: AttributeKind,
);
pub(crate) fn LLVMRustPositionBefore<'a>(B: &'a Builder<'_>, I: &'a Value);
pub(crate) fn LLVMRustPositionAfter<'a>(B: &'a Builder<'_>, I: &'a Value);
pub(crate) fn LLVMRustGetFunctionCall(
F: &Value,
name: *const c_char,
NameLen: libc::size_t,
) -> Option<&Value>;
}
unsafe extern "C" {
@@ -1138,6 +1138,11 @@ pub(crate) fn LLVMConstStructInContext<'a>(
Count: c_uint,
Packed: Bool,
) -> &'a Value;
pub(crate) fn LLVMConstNamedStruct<'a>(
StructTy: &'a Type,
ConstantVals: *const &'a Value,
Count: c_uint,
) -> &'a Value;
pub(crate) fn LLVMConstVector(ScalarConstantVals: *const &Value, Size: c_uint) -> &Value;
// Constant expressions
@@ -1217,6 +1222,8 @@ pub(crate) fn LLVMAppendBasicBlockInContext<'a>(
) -> &'a BasicBlock;
// Operations on instructions
pub(crate) fn LLVMGetInstructionParent(Inst: &Value) -> &BasicBlock;
pub(crate) fn LLVMGetCalledValue(CallInst: &Value) -> Option<&Value>;
pub(crate) fn LLVMIsAInstruction(Val: &Value) -> Option<&Value>;
pub(crate) fn LLVMGetFirstBasicBlock(Fn: &Value) -> &BasicBlock;
pub(crate) fn LLVMGetOperand(Val: &Value, Index: c_uint) -> Option<&Value>;
@@ -2557,6 +2564,7 @@ pub(crate) fn LLVMRustArchiveMemberNew<'a>(
pub(crate) fn LLVMRustSetDataLayoutFromTargetMachine<'a>(M: &'a Module, TM: &'a TargetMachine);
pub(crate) fn LLVMRustPositionBuilderPastAllocas<'a>(B: &Builder<'a>, Fn: &'a Value);
pub(crate) fn LLVMRustPositionBuilderAtStart<'a>(B: &Builder<'a>, BB: &'a BasicBlock);
pub(crate) fn LLVMRustSetModulePICLevel(M: &Module);
@@ -405,6 +405,8 @@ fn update_target_reliable_float_cfg(sess: &Session, cfg: &mut TargetConfig) {
("mips64" | "mips64r6", _) => false,
// Selection bug <https://github.com/llvm/llvm-project/issues/95471>
("nvptx64", _) => false,
// Unsupported https://github.com/llvm/llvm-project/issues/121122
("amdgpu", _) => false,
// ABI bugs <https://github.com/rust-lang/rust/issues/125109> et al. (full
// list at <https://github.com/rust-lang/rust/issues/116909>)
("powerpc" | "powerpc64", _) => false,
@@ -433,6 +435,9 @@ fn update_target_reliable_float_cfg(sess: &Session, cfg: &mut TargetConfig) {
// This rules out anything that doesn't have `long double` = `binary128`; <= 32 bits
// (ld is `f64`), anything other than Linux (Windows and MacOS use `f64`), and `x86`
// (ld is 80-bit extended precision).
//
// musl does not implement the symbols required for f128 math at all.
_ if target_env == "musl" => false,
("x86_64", _) => false,
(_, "linux") if target_pointer_width == 64 => true,
_ => false,
+10
View File
@@ -35,6 +35,10 @@ codegen_ssa_dlltool_fail_import_library =
{$stdout}
{$stderr}
codegen_ssa_dynamic_linking_with_lto =
cannot prefer dynamic linking when performing LTO
.note = only 'staticlib', 'bin', and 'cdylib' outputs are supported with LTO
codegen_ssa_error_calling_dlltool =
Error calling dlltool '{$dlltool_path}': {$error}
@@ -191,6 +195,12 @@ codegen_ssa_linker_unsupported_modifier = `as-needed` modifier not supported for
codegen_ssa_linking_failed = linking with `{$linker_path}` failed: {$exit_status}
codegen_ssa_lto_disallowed = lto can only be run for executables, cdylibs and static library outputs
codegen_ssa_lto_dylib = lto cannot be used for `dylib` crate type without `-Zdylib-lto`
codegen_ssa_lto_proc_macro = lto cannot be used for `proc-macro` crate type without `-Zdylib-lto`
codegen_ssa_malformed_cgu_name =
found malformed codegen unit name `{$user_path}`. codegen units names must always start with the name of the crate (`{$crate_name}` in this case).
@@ -4,7 +4,7 @@
use rustc_abi::Endian;
use rustc_data_structures::base_n::{CASE_INSENSITIVE, ToBaseN};
use rustc_data_structures::fx::FxIndexMap;
use rustc_data_structures::fx::{FxHashMap, FxIndexMap};
use rustc_data_structures::stable_hasher::StableHasher;
use rustc_hashes::Hash128;
use rustc_session::Session;
@@ -214,7 +214,7 @@ pub(super) fn create_raw_dylib_elf_stub_shared_objects<'a>(
/// It exports all the provided symbols, but is otherwise empty.
fn create_elf_raw_dylib_stub(sess: &Session, soname: &str, symbols: &[DllImport]) -> Vec<u8> {
use object::write::elf as write;
use object::{Architecture, elf};
use object::{AddressSize, Architecture, elf};
let mut stub_buf = Vec::new();
@@ -226,47 +226,6 @@ fn create_elf_raw_dylib_stub(sess: &Session, soname: &str, symbols: &[DllImport]
// It is important that the order of reservation matches the order of writing.
// The object crate contains many debug asserts that fire if you get this wrong.
let endianness = match sess.target.options.endian {
Endian::Little => object::Endianness::Little,
Endian::Big => object::Endianness::Big,
};
let mut stub = write::Writer::new(endianness, true, &mut stub_buf);
// These initial reservations don't reserve any bytes in the binary yet,
// they just allocate in the internal data structures.
// First, we crate the dynamic symbol table. It starts with a null symbol
// and then all the symbols and their dynamic strings.
stub.reserve_null_dynamic_symbol_index();
let dynstrs = symbols
.iter()
.map(|sym| {
stub.reserve_dynamic_symbol_index();
(sym, stub.add_dynamic_string(sym.name.as_str().as_bytes()))
})
.collect::<Vec<_>>();
let soname = stub.add_dynamic_string(soname.as_bytes());
// Reserve the sections.
// We have the minimal sections for a dynamic SO and .text where we point our dummy symbols to.
stub.reserve_shstrtab_section_index();
let text_section_name = stub.add_section_name(".text".as_bytes());
let text_section = stub.reserve_section_index();
stub.reserve_dynstr_section_index();
stub.reserve_dynsym_section_index();
stub.reserve_dynamic_section_index();
// These reservations now determine the actual layout order of the object file.
stub.reserve_file_header();
stub.reserve_shstrtab();
stub.reserve_section_headers();
stub.reserve_dynstr();
stub.reserve_dynsym();
stub.reserve_dynamic(2); // DT_SONAME, DT_NULL
// First write the ELF header with the arch information.
let Some((arch, sub_arch)) = sess.target.object_architecture(&sess.unstable_target_features)
else {
sess.dcx().fatal(format!(
@@ -274,6 +233,87 @@ fn create_elf_raw_dylib_stub(sess: &Session, soname: &str, symbols: &[DllImport]
sess.target.arch
));
};
let endianness = match sess.target.options.endian {
Endian::Little => object::Endianness::Little,
Endian::Big => object::Endianness::Big,
};
let is_64 = match arch.address_size() {
Some(AddressSize::U8 | AddressSize::U16 | AddressSize::U32) => false,
Some(AddressSize::U64) => true,
_ => sess.dcx().fatal(format!(
"raw-dylib is not supported for the architecture `{}`",
sess.target.arch
)),
};
let mut stub = write::Writer::new(endianness, is_64, &mut stub_buf);
let mut vers = Vec::new();
let mut vers_map = FxHashMap::default();
let mut syms = Vec::new();
for symbol in symbols {
let symbol_name = symbol.name.as_str();
if let Some((name, version_name)) = symbol_name.split_once('@') {
assert!(!version_name.contains('@'));
let dynstr = stub.add_dynamic_string(name.as_bytes());
let ver = if let Some(&ver_id) = vers_map.get(version_name) {
ver_id
} else {
let id = vers.len();
vers_map.insert(version_name, id);
let dynstr = stub.add_dynamic_string(version_name.as_bytes());
vers.push((version_name, dynstr));
id
};
syms.push((name, dynstr, Some(ver)));
} else {
let dynstr = stub.add_dynamic_string(symbol_name.as_bytes());
syms.push((symbol_name, dynstr, None));
}
}
let soname = stub.add_dynamic_string(soname.as_bytes());
// These initial reservations don't reserve any bytes in the binary yet,
// they just allocate in the internal data structures.
// First, we create the dynamic symbol table. It starts with a null symbol
// and then all the symbols and their dynamic strings.
stub.reserve_null_dynamic_symbol_index();
for _ in syms.iter() {
stub.reserve_dynamic_symbol_index();
}
// Reserve the sections.
// We have the minimal sections for a dynamic SO and .text where we point our dummy symbols to.
stub.reserve_shstrtab_section_index();
let text_section_name = stub.add_section_name(".text".as_bytes());
let text_section = stub.reserve_section_index();
stub.reserve_dynsym_section_index();
stub.reserve_dynstr_section_index();
if !vers.is_empty() {
stub.reserve_gnu_versym_section_index();
stub.reserve_gnu_verdef_section_index();
}
stub.reserve_dynamic_section_index();
// These reservations now determine the actual layout order of the object file.
stub.reserve_file_header();
stub.reserve_shstrtab();
stub.reserve_section_headers();
stub.reserve_dynsym();
stub.reserve_dynstr();
if !vers.is_empty() {
stub.reserve_gnu_versym();
stub.reserve_gnu_verdef(1 + vers.len(), 1 + vers.len());
}
stub.reserve_dynamic(2); // DT_SONAME, DT_NULL
// First write the ELF header with the arch information.
let e_machine = match (arch, sub_arch) {
(Architecture::Aarch64, None) => elf::EM_AARCH64,
(Architecture::Aarch64_Ilp32, None) => elf::EM_AARCH64,
@@ -342,18 +382,19 @@ fn create_elf_raw_dylib_stub(sess: &Session, soname: &str, symbols: &[DllImport]
sh_addralign: 1,
sh_entsize: 0,
});
stub.write_dynstr_section_header(0);
stub.write_dynsym_section_header(0, 1);
stub.write_dynstr_section_header(0);
if !vers.is_empty() {
stub.write_gnu_versym_section_header(0);
stub.write_gnu_verdef_section_header(0);
}
stub.write_dynamic_section_header(0);
// .dynstr
stub.write_dynstr();
// .dynsym
stub.write_null_dynamic_symbol();
for (_, name) in dynstrs {
for (_name, dynstr, _ver) in syms.iter().copied() {
stub.write_dynamic_symbol(&write::Sym {
name: Some(name),
name: Some(dynstr),
st_info: (elf::STB_GLOBAL << 4) | elf::STT_NOTYPE,
st_other: elf::STV_DEFAULT,
section: Some(text_section),
@@ -363,10 +404,47 @@ fn create_elf_raw_dylib_stub(sess: &Session, soname: &str, symbols: &[DllImport]
});
}
// .dynstr
stub.write_dynstr();
// ld.bfd is unhappy if these sections exist without any symbols, so we only generate them when necessary.
if !vers.is_empty() {
// .gnu_version
stub.write_null_gnu_versym();
for (_name, _dynstr, ver) in syms.iter().copied() {
stub.write_gnu_versym(if let Some(ver) = ver {
assert!((2 + ver as u16) < elf::VERSYM_HIDDEN);
elf::VERSYM_HIDDEN | (2 + ver as u16)
} else {
1
});
}
// .gnu_version_d
stub.write_align_gnu_verdef();
stub.write_gnu_verdef(&write::Verdef {
version: elf::VER_DEF_CURRENT,
flags: elf::VER_FLG_BASE,
index: 1,
aux_count: 1,
name: soname,
});
for (ver, (_name, dynstr)) in vers.into_iter().enumerate() {
stub.write_gnu_verdef(&write::Verdef {
version: elf::VER_DEF_CURRENT,
flags: 0,
index: 2 + ver as u16,
aux_count: 1,
name: dynstr,
});
}
}
// .dynamic
// the DT_SONAME will be used by the linker to populate DT_NEEDED
// which the loader uses to find the library.
// DT_NULL terminates the .dynamic table.
stub.write_align_dynamic();
stub.write_dynamic_string(elf::DT_SONAME, soname);
stub.write_dynamic(elf::DT_NULL, 0);
@@ -2,7 +2,15 @@
use std::sync::Arc;
use rustc_data_structures::memmap::Mmap;
use rustc_hir::def_id::{CrateNum, LOCAL_CRATE};
use rustc_middle::middle::exported_symbols::{ExportedSymbol, SymbolExportInfo, SymbolExportLevel};
use rustc_middle::ty::TyCtxt;
use rustc_session::config::{CrateType, Lto};
use tracing::info;
use crate::back::symbol_export::{self, symbol_name_for_instance_in_crate};
use crate::back::write::CodegenContext;
use crate::errors::{DynamicLinkingWithLTO, LtoDisallowed, LtoDylib, LtoProcMacro};
use crate::traits::*;
pub struct ThinModule<B: WriteBackendMethods> {
@@ -52,3 +60,86 @@ pub fn data(&self) -> &[u8] {
}
}
}
fn crate_type_allows_lto(crate_type: CrateType) -> bool {
match crate_type {
CrateType::Executable
| CrateType::Dylib
| CrateType::Staticlib
| CrateType::Cdylib
| CrateType::ProcMacro
| CrateType::Sdylib => true,
CrateType::Rlib => false,
}
}
pub(super) fn exported_symbols_for_lto(
tcx: TyCtxt<'_>,
each_linked_rlib_for_lto: &[CrateNum],
) -> Vec<String> {
let export_threshold = match tcx.sess.lto() {
// We're just doing LTO for our one crate
Lto::ThinLocal => SymbolExportLevel::Rust,
// We're doing LTO for the entire crate graph
Lto::Fat | Lto::Thin => symbol_export::crates_export_threshold(&tcx.crate_types()),
Lto::No => return vec![],
};
let copy_symbols = |cnum| {
tcx.exported_non_generic_symbols(cnum)
.iter()
.chain(tcx.exported_generic_symbols(cnum))
.filter_map(|&(s, info): &(ExportedSymbol<'_>, SymbolExportInfo)| {
if info.level.is_below_threshold(export_threshold) || info.used {
Some(symbol_name_for_instance_in_crate(tcx, s, cnum))
} else {
None
}
})
.collect::<Vec<_>>()
};
let mut symbols_below_threshold = {
let _timer = tcx.prof.generic_activity("lto_generate_symbols_below_threshold");
copy_symbols(LOCAL_CRATE)
};
info!("{} symbols to preserve in this crate", symbols_below_threshold.len());
// If we're performing LTO for the entire crate graph, then for each of our
// upstream dependencies, include their exported symbols.
if tcx.sess.lto() != Lto::ThinLocal {
for &cnum in each_linked_rlib_for_lto {
let _timer = tcx.prof.generic_activity("lto_generate_symbols_below_threshold");
symbols_below_threshold.extend(copy_symbols(cnum));
}
}
symbols_below_threshold
}
pub(super) fn check_lto_allowed<B: WriteBackendMethods>(cgcx: &CodegenContext<B>) {
if cgcx.lto == Lto::ThinLocal {
// Crate local LTO is always allowed
return;
}
let dcx = cgcx.create_dcx();
// Make sure we actually can run LTO
for crate_type in cgcx.crate_types.iter() {
if !crate_type_allows_lto(*crate_type) {
dcx.handle().emit_fatal(LtoDisallowed);
} else if *crate_type == CrateType::Dylib {
if !cgcx.opts.unstable_opts.dylib_lto {
dcx.handle().emit_fatal(LtoDylib);
}
} else if *crate_type == CrateType::ProcMacro && !cgcx.opts.unstable_opts.dylib_lto {
dcx.handle().emit_fatal(LtoProcMacro);
}
}
if cgcx.opts.cg.prefer_dynamic && !cgcx.opts.unstable_opts.dylib_lto {
dcx.handle().emit_fatal(DynamicLinkingWithLTO);
}
}
+81 -105
View File
@@ -9,7 +9,7 @@
use rustc_abi::Size;
use rustc_ast::attr;
use rustc_ast::expand::autodiff_attrs::AutoDiffItem;
use rustc_data_structures::fx::{FxHashMap, FxIndexMap};
use rustc_data_structures::fx::FxIndexMap;
use rustc_data_structures::jobserver::{self, Acquired};
use rustc_data_structures::memmap::Mmap;
use rustc_data_structures::profiling::{SelfProfilerRef, VerboseTimingGuard};
@@ -20,14 +20,12 @@
Suggestions,
};
use rustc_fs_util::link_or_copy;
use rustc_hir::def_id::{CrateNum, LOCAL_CRATE};
use rustc_incremental::{
copy_cgu_workproduct_to_incr_comp_cache_dir, in_incr_comp_dir, in_incr_comp_dir_sess,
};
use rustc_metadata::fs::copy_to_stdout;
use rustc_middle::bug;
use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
use rustc_middle::middle::exported_symbols::SymbolExportInfo;
use rustc_middle::ty::TyCtxt;
use rustc_session::Session;
use rustc_session::config::{
@@ -40,7 +38,7 @@
use super::link::{self, ensure_removed};
use super::lto::{self, SerializedModule};
use super::symbol_export::symbol_name_for_instance_in_crate;
use crate::back::lto::check_lto_allowed;
use crate::errors::{AutodiffWithoutLto, ErrorCreatingRemarkDir};
use crate::traits::*;
use crate::{
@@ -120,6 +118,7 @@ pub struct ModuleConfig {
pub emit_lifetime_markers: bool,
pub llvm_plugins: Vec<String>,
pub autodiff: Vec<config::AutoDiff>,
pub offload: Vec<config::Offload>,
}
impl ModuleConfig {
@@ -268,6 +267,7 @@ macro_rules! if_regular {
emit_lifetime_markers: sess.emit_lifetime_markers(),
llvm_plugins: if_regular!(sess.opts.unstable_opts.llvm_plugins.clone(), vec![]),
autodiff: if_regular!(sess.opts.unstable_opts.autodiff.clone(), vec![]),
offload: if_regular!(sess.opts.unstable_opts.offload.clone(), vec![]),
}
}
@@ -330,8 +330,6 @@ pub fn new(
+ Sync,
>;
type ExportedSymbols = FxHashMap<CrateNum, Arc<Vec<(String, SymbolExportInfo)>>>;
/// Additional resources used by optimize_and_codegen (not module specific)
#[derive(Clone)]
pub struct CodegenContext<B: WriteBackendMethods> {
@@ -341,10 +339,8 @@ pub struct CodegenContext<B: WriteBackendMethods> {
pub save_temps: bool,
pub fewer_names: bool,
pub time_trace: bool,
pub exported_symbols: Option<Arc<ExportedSymbols>>,
pub opts: Arc<config::Options>,
pub crate_types: Vec<CrateType>,
pub each_linked_rlib_for_lto: Vec<(CrateNum, PathBuf)>,
pub output_filenames: Arc<OutputFilenames>,
pub invocation_temp: Option<String>,
pub regular_module_config: Arc<ModuleConfig>,
@@ -399,13 +395,21 @@ pub fn config(&self, kind: ModuleKind) -> &ModuleConfig {
fn generate_thin_lto_work<B: ExtraBackendMethods>(
cgcx: &CodegenContext<B>,
exported_symbols_for_lto: &[String],
each_linked_rlib_for_lto: &[PathBuf],
needs_thin_lto: Vec<(String, B::ThinBuffer)>,
import_only_modules: Vec<(SerializedModule<B::ModuleBuffer>, WorkProduct)>,
) -> Vec<(WorkItem<B>, u64)> {
let _prof_timer = cgcx.prof.generic_activity("codegen_thin_generate_lto_work");
let (lto_modules, copy_jobs) =
B::run_thin_lto(cgcx, needs_thin_lto, import_only_modules).unwrap_or_else(|e| e.raise());
let (lto_modules, copy_jobs) = B::run_thin_lto(
cgcx,
exported_symbols_for_lto,
each_linked_rlib_for_lto,
needs_thin_lto,
import_only_modules,
)
.unwrap_or_else(|e| e.raise());
lto_modules
.into_iter()
.map(|module| {
@@ -721,6 +725,8 @@ pub(crate) enum WorkItem<B: WriteBackendMethods> {
CopyPostLtoArtifacts(CachedModuleCodegen),
/// Performs fat LTO on the given module.
FatLto {
exported_symbols_for_lto: Arc<Vec<String>>,
each_linked_rlib_for_lto: Vec<PathBuf>,
needs_fat_lto: Vec<FatLtoInput<B>>,
import_only_modules: Vec<(SerializedModule<B::ModuleBuffer>, WorkProduct)>,
autodiff: Vec<AutoDiffItem>,
@@ -808,7 +814,7 @@ pub(crate) enum WorkItemResult<B: WriteBackendMethods> {
}
pub enum FatLtoInput<B: WriteBackendMethods> {
Serialized { name: String, buffer: B::ModuleBuffer },
Serialized { name: String, buffer: SerializedModule<B::ModuleBuffer> },
InMemory(ModuleCodegen<B::Module>),
}
@@ -897,7 +903,10 @@ fn execute_optimize_work_item<B: ExtraBackendMethods>(
fs::write(&path, buffer.data()).unwrap_or_else(|e| {
panic!("Error writing pre-lto-bitcode file `{}`: {}", path.display(), e);
});
Ok(WorkItemResult::NeedsFatLto(FatLtoInput::Serialized { name, buffer }))
Ok(WorkItemResult::NeedsFatLto(FatLtoInput::Serialized {
name,
buffer: SerializedModule::Local(buffer),
}))
}
None => Ok(WorkItemResult::NeedsFatLto(FatLtoInput::InMemory(module))),
},
@@ -990,12 +999,24 @@ fn execute_copy_from_cache_work_item<B: ExtraBackendMethods>(
fn execute_fat_lto_work_item<B: ExtraBackendMethods>(
cgcx: &CodegenContext<B>,
needs_fat_lto: Vec<FatLtoInput<B>>,
exported_symbols_for_lto: &[String],
each_linked_rlib_for_lto: &[PathBuf],
mut needs_fat_lto: Vec<FatLtoInput<B>>,
import_only_modules: Vec<(SerializedModule<B::ModuleBuffer>, WorkProduct)>,
autodiff: Vec<AutoDiffItem>,
module_config: &ModuleConfig,
) -> Result<WorkItemResult<B>, FatalError> {
let module = B::run_and_optimize_fat_lto(cgcx, needs_fat_lto, import_only_modules, autodiff)?;
for (module, wp) in import_only_modules {
needs_fat_lto.push(FatLtoInput::Serialized { name: wp.cgu_name, buffer: module })
}
let module = B::run_and_optimize_fat_lto(
cgcx,
exported_symbols_for_lto,
each_linked_rlib_for_lto,
needs_fat_lto,
autodiff,
)?;
let module = B::codegen(cgcx, module, module_config)?;
Ok(WorkItemResult::Finished(module))
}
@@ -1030,7 +1051,7 @@ pub(crate) enum Message<B: WriteBackendMethods> {
/// The backend has finished processing a work item for a codegen unit.
/// Sent from a backend worker thread.
WorkItem { result: Result<WorkItemResult<B>, Option<WorkerFatalError>>, worker_id: usize },
WorkItem { result: Result<WorkItemResult<B>, Option<WorkerFatalError>> },
/// The frontend has finished generating something (backend IR or a
/// post-LTO artifact) for a codegen unit, and it should be passed to the
@@ -1111,42 +1132,18 @@ fn start_executing_work<B: ExtraBackendMethods>(
let autodiff_items = autodiff_items.to_vec();
let mut each_linked_rlib_for_lto = Vec::new();
let mut each_linked_rlib_file_for_lto = Vec::new();
drop(link::each_linked_rlib(crate_info, None, &mut |cnum, path| {
if link::ignored_for_lto(sess, crate_info, cnum) {
return;
}
each_linked_rlib_for_lto.push((cnum, path.to_path_buf()));
each_linked_rlib_for_lto.push(cnum);
each_linked_rlib_file_for_lto.push(path.to_path_buf());
}));
// Compute the set of symbols we need to retain when doing LTO (if we need to)
let exported_symbols = {
let mut exported_symbols = FxHashMap::default();
let copy_symbols = |cnum| {
let symbols = tcx
.exported_non_generic_symbols(cnum)
.iter()
.chain(tcx.exported_generic_symbols(cnum))
.map(|&(s, lvl)| (symbol_name_for_instance_in_crate(tcx, s, cnum), lvl))
.collect();
Arc::new(symbols)
};
match sess.lto() {
Lto::No => None,
Lto::ThinLocal => {
exported_symbols.insert(LOCAL_CRATE, copy_symbols(LOCAL_CRATE));
Some(Arc::new(exported_symbols))
}
Lto::Fat | Lto::Thin => {
exported_symbols.insert(LOCAL_CRATE, copy_symbols(LOCAL_CRATE));
for &(cnum, ref _path) in &each_linked_rlib_for_lto {
exported_symbols.insert(cnum, copy_symbols(cnum));
}
Some(Arc::new(exported_symbols))
}
}
};
let exported_symbols_for_lto =
Arc::new(lto::exported_symbols_for_lto(tcx, &each_linked_rlib_for_lto));
// First up, convert our jobserver into a helper thread so we can use normal
// mpsc channels to manage our messages and such.
@@ -1181,14 +1178,12 @@ fn start_executing_work<B: ExtraBackendMethods>(
let cgcx = CodegenContext::<B> {
crate_types: tcx.crate_types().to_vec(),
each_linked_rlib_for_lto,
lto: sess.lto(),
fewer_names: sess.fewer_names(),
save_temps: sess.opts.cg.save_temps,
time_trace: sess.opts.unstable_opts.llvm_time_trace,
opts: Arc::new(sess.opts.clone()),
prof: sess.prof.clone(),
exported_symbols,
remark: sess.opts.cg.remark.clone(),
remark_dir,
incr_comp_session_dir: sess.incr_comp_session_dir_opt().map(|r| r.clone()),
@@ -1348,18 +1343,6 @@ fn start_executing_work<B: ExtraBackendMethods>(
// necessary. There's already optimizations in place to avoid sending work
// back to the coordinator if LTO isn't requested.
return B::spawn_named_thread(cgcx.time_trace, "coordinator".to_string(), move || {
let mut worker_id_counter = 0;
let mut free_worker_ids = Vec::new();
let mut get_worker_id = |free_worker_ids: &mut Vec<usize>| {
if let Some(id) = free_worker_ids.pop() {
id
} else {
let id = worker_id_counter;
worker_id_counter += 1;
id
}
};
// This is where we collect codegen units that have gone all the way
// through codegen and LLVM.
let mut compiled_modules = vec![];
@@ -1440,12 +1423,7 @@ enum CodegenState {
let (item, _) =
work_items.pop().expect("queue empty - queue_full_enough() broken?");
main_thread_state = MainThreadState::Lending;
spawn_work(
&cgcx,
&mut llvm_start_time,
get_worker_id(&mut free_worker_ids),
item,
);
spawn_work(&cgcx, &mut llvm_start_time, item);
}
}
} else if codegen_state == Completed {
@@ -1472,12 +1450,18 @@ enum CodegenState {
let needs_fat_lto = mem::take(&mut needs_fat_lto);
let needs_thin_lto = mem::take(&mut needs_thin_lto);
let import_only_modules = mem::take(&mut lto_import_only_modules);
let each_linked_rlib_file_for_lto =
mem::take(&mut each_linked_rlib_file_for_lto);
check_lto_allowed(&cgcx);
if !needs_fat_lto.is_empty() {
assert!(needs_thin_lto.is_empty());
work_items.push((
WorkItem::FatLto {
exported_symbols_for_lto: Arc::clone(&exported_symbols_for_lto),
each_linked_rlib_for_lto: each_linked_rlib_file_for_lto,
needs_fat_lto,
import_only_modules,
autodiff: autodiff_items.clone(),
@@ -1493,9 +1477,13 @@ enum CodegenState {
dcx.handle().emit_fatal(AutodiffWithoutLto {});
}
for (work, cost) in
generate_thin_lto_work(&cgcx, needs_thin_lto, import_only_modules)
{
for (work, cost) in generate_thin_lto_work(
&cgcx,
&exported_symbols_for_lto,
&each_linked_rlib_file_for_lto,
needs_thin_lto,
import_only_modules,
) {
let insertion_index = work_items
.binary_search_by_key(&cost, |&(_, cost)| cost)
.unwrap_or_else(|e| e);
@@ -1514,12 +1502,7 @@ enum CodegenState {
MainThreadState::Idle => {
if let Some((item, _)) = work_items.pop() {
main_thread_state = MainThreadState::Lending;
spawn_work(
&cgcx,
&mut llvm_start_time,
get_worker_id(&mut free_worker_ids),
item,
);
spawn_work(&cgcx, &mut llvm_start_time, item);
} else {
// There is no unstarted work, so let the main thread
// take over for a running worker. Otherwise the
@@ -1555,12 +1538,7 @@ enum CodegenState {
while running_with_own_token < tokens.len()
&& let Some((item, _)) = work_items.pop()
{
spawn_work(
&cgcx,
&mut llvm_start_time,
get_worker_id(&mut free_worker_ids),
item,
);
spawn_work(&cgcx, &mut llvm_start_time, item);
running_with_own_token += 1;
}
}
@@ -1568,21 +1546,6 @@ enum CodegenState {
// Relinquish accidentally acquired extra tokens.
tokens.truncate(running_with_own_token);
// If a thread exits successfully then we drop a token associated
// with that worker and update our `running_with_own_token` count.
// We may later re-acquire a token to continue running more work.
// We may also not actually drop a token here if the worker was
// running with an "ephemeral token".
let mut free_worker = |worker_id| {
if main_thread_state == MainThreadState::Lending {
main_thread_state = MainThreadState::Idle;
} else {
running_with_own_token -= 1;
}
free_worker_ids.push(worker_id);
};
let msg = coordinator_receive.recv().unwrap();
match *msg.downcast::<Message<B>>().ok().unwrap() {
// Save the token locally and the next turn of the loop will use
@@ -1651,8 +1614,17 @@ enum CodegenState {
codegen_state = Aborted;
}
Message::WorkItem { result, worker_id } => {
free_worker(worker_id);
Message::WorkItem { result } => {
// If a thread exits successfully then we drop a token associated
// with that worker and update our `running_with_own_token` count.
// We may later re-acquire a token to continue running more work.
// We may also not actually drop a token here if the worker was
// running with an "ephemeral token".
if main_thread_state == MainThreadState::Lending {
main_thread_state = MainThreadState::Idle;
} else {
running_with_own_token -= 1;
}
match result {
Ok(WorkItemResult::Finished(compiled_module)) => {
@@ -1798,7 +1770,6 @@ fn queue_full_enough(items_in_queue: usize, workers_running: usize) -> bool {
fn spawn_work<'a, B: ExtraBackendMethods>(
cgcx: &'a CodegenContext<B>,
llvm_start_time: &mut Option<VerboseTimingGuard<'a>>,
worker_id: usize,
work: WorkItem<B>,
) {
if cgcx.config(work.module_kind()).time_module && llvm_start_time.is_none() {
@@ -1813,24 +1784,21 @@ fn spawn_work<'a, B: ExtraBackendMethods>(
struct Bomb<B: ExtraBackendMethods> {
coordinator_send: Sender<Box<dyn Any + Send>>,
result: Option<Result<WorkItemResult<B>, FatalError>>,
worker_id: usize,
}
impl<B: ExtraBackendMethods> Drop for Bomb<B> {
fn drop(&mut self) {
let worker_id = self.worker_id;
let msg = match self.result.take() {
Some(Ok(result)) => Message::WorkItem::<B> { result: Ok(result), worker_id },
Some(Ok(result)) => Message::WorkItem::<B> { result: Ok(result) },
Some(Err(FatalError)) => {
Message::WorkItem::<B> { result: Err(Some(WorkerFatalError)), worker_id }
Message::WorkItem::<B> { result: Err(Some(WorkerFatalError)) }
}
None => Message::WorkItem::<B> { result: Err(None), worker_id },
None => Message::WorkItem::<B> { result: Err(None) },
};
drop(self.coordinator_send.send(Box::new(msg)));
}
}
let mut bomb =
Bomb::<B> { coordinator_send: cgcx.coordinator_send.clone(), result: None, worker_id };
let mut bomb = Bomb::<B> { coordinator_send: cgcx.coordinator_send.clone(), result: None };
// Execute the work itself, and if it finishes successfully then flag
// ourselves as a success as well.
@@ -1854,12 +1822,20 @@ fn drop(&mut self) {
);
Ok(execute_copy_from_cache_work_item(&cgcx, m, module_config))
}
WorkItem::FatLto { needs_fat_lto, import_only_modules, autodiff } => {
WorkItem::FatLto {
exported_symbols_for_lto,
each_linked_rlib_for_lto,
needs_fat_lto,
import_only_modules,
autodiff,
} => {
let _timer = cgcx
.prof
.generic_activity_with_arg("codegen_module_perform_lto", "everything");
execute_fat_lto_work_item(
&cgcx,
&exported_symbols_for_lto,
&each_linked_rlib_for_lto,
needs_fat_lto,
import_only_modules,
autodiff,
@@ -511,15 +511,6 @@ fn codegen_fn_attrs(tcx: TyCtxt<'_>, did: LocalDefId) -> CodegenFnAttrs {
err.emit();
}
// Any linkage to LLVM intrinsics for now forcibly marks them all as never
// unwinds since LLVM sometimes can't handle codegen which `invoke`s
// intrinsic functions.
if let Some(name) = &codegen_fn_attrs.link_name
&& name.as_str().starts_with("llvm.")
{
codegen_fn_attrs.flags |= CodegenFnAttrFlags::NEVER_UNWIND;
}
if let Some(features) = check_tied_features(
tcx.sess,
&codegen_fn_attrs
+1 -1
View File
@@ -148,7 +148,7 @@ pub(crate) fn shift_mask_val<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
pub fn asm_const_to_str<'tcx>(
tcx: TyCtxt<'tcx>,
sp: Span,
const_value: mir::ConstValue<'tcx>,
const_value: mir::ConstValue,
ty_and_layout: TyAndLayout<'tcx>,
) -> String {
let mir::ConstValue::Scalar(scalar) = const_value else {
+17
View File
@@ -1294,3 +1294,20 @@ pub(crate) struct FeatureNotValid<'a> {
#[help]
pub plus_hint: bool,
}
#[derive(Diagnostic)]
#[diag(codegen_ssa_lto_disallowed)]
pub(crate) struct LtoDisallowed;
#[derive(Diagnostic)]
#[diag(codegen_ssa_lto_dylib)]
pub(crate) struct LtoDylib;
#[derive(Diagnostic)]
#[diag(codegen_ssa_lto_proc_macro)]
pub(crate) struct LtoProcMacro;
#[derive(Diagnostic)]
#[diag(codegen_ssa_dynamic_linking_with_lto)]
#[note]
pub(crate) struct DynamicLinkingWithLTO;
+64 -56
View File
@@ -1,12 +1,13 @@
//! An analysis to determine which locals require allocas and
//! which do not.
use rustc_abi as abi;
use rustc_data_structures::graph::dominators::Dominators;
use rustc_index::bit_set::DenseBitSet;
use rustc_index::{IndexSlice, IndexVec};
use rustc_middle::mir::visit::{MutatingUseContext, NonMutatingUseContext, PlaceContext, Visitor};
use rustc_middle::mir::{self, DefLocation, Location, TerminatorKind, traversal};
use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
use rustc_middle::ty::layout::LayoutOf;
use rustc_middle::{bug, span_bug};
use tracing::debug;
@@ -99,63 +100,75 @@ fn process_place(
context: PlaceContext,
location: Location,
) {
let cx = self.fx.cx;
if !place_ref.projection.is_empty() {
const COPY_CONTEXT: PlaceContext =
PlaceContext::NonMutatingUse(NonMutatingUseContext::Copy);
if let Some((place_base, elem)) = place_ref.last_projection() {
let mut base_context = if context.is_mutating_use() {
PlaceContext::MutatingUse(MutatingUseContext::Projection)
} else {
PlaceContext::NonMutatingUse(NonMutatingUseContext::Projection)
};
// Allow uses of projections that are ZSTs or from scalar fields.
let is_consume = matches!(
context,
PlaceContext::NonMutatingUse(
NonMutatingUseContext::Copy | NonMutatingUseContext::Move,
)
);
if is_consume {
let base_ty = place_base.ty(self.fx.mir, cx.tcx());
let base_ty = self.fx.monomorphize(base_ty);
// ZSTs don't require any actual memory access.
let elem_ty = base_ty.projection_ty(cx.tcx(), self.fx.monomorphize(elem)).ty;
let span = self.fx.mir.local_decls[place_ref.local].source_info.span;
if cx.spanned_layout_of(elem_ty, span).is_zst() {
return;
// `PlaceElem::Index` is the only variant that can mention other `Local`s,
// so check for those up-front before any potential short-circuits.
for elem in place_ref.projection {
if let mir::PlaceElem::Index(index_local) = *elem {
self.visit_local(index_local, COPY_CONTEXT, location);
}
}
if let mir::ProjectionElem::Field(..) = elem {
let layout = cx.spanned_layout_of(base_ty.ty, span);
if cx.is_backend_immediate(layout) || cx.is_backend_scalar_pair(layout) {
// Recurse with the same context, instead of `Projection`,
// potentially stopping at non-operand projections,
// which would trigger `not_ssa` on locals.
base_context = context;
// If our local is already memory, nothing can make it *more* memory
// so we don't need to bother checking the projections further.
if self.locals[place_ref.local] == LocalKind::Memory {
return;
}
if place_ref.is_indirect_first_projection() {
// If this starts with a `Deref`, we only need to record a read of the
// pointer being dereferenced, as all the subsequent projections are
// working on a place which is always supported. (And because we're
// looking at codegen MIR, it can only happen as the first projection.)
self.visit_local(place_ref.local, COPY_CONTEXT, location);
return;
}
if context.is_mutating_use() {
// If it's a mutating use it doesn't matter what the projections are,
// if there are *any* then we need a place to write. (For example,
// `_1 = Foo()` works in SSA but `_2.0 = Foo()` does not.)
let mut_projection = PlaceContext::MutatingUse(MutatingUseContext::Projection);
self.visit_local(place_ref.local, mut_projection, location);
return;
}
// Scan through to ensure the only projections are those which
// `FunctionCx::maybe_codegen_consume_direct` can handle.
let base_ty = self.fx.monomorphized_place_ty(mir::PlaceRef::from(place_ref.local));
let mut layout = self.fx.cx.layout_of(base_ty);
for elem in place_ref.projection {
layout = match *elem {
mir::PlaceElem::Field(fidx, ..) => layout.field(self.fx.cx, fidx.as_usize()),
mir::PlaceElem::Downcast(_, vidx)
if let abi::Variants::Single { index: single_variant } =
layout.variants
&& vidx == single_variant =>
{
layout.for_variant(self.fx.cx, vidx)
}
mir::PlaceElem::Subtype(subtype_ty) => {
let subtype_ty = self.fx.monomorphize(subtype_ty);
self.fx.cx.layout_of(subtype_ty)
}
_ => {
self.locals[place_ref.local] = LocalKind::Memory;
return;
}
}
}
if let mir::ProjectionElem::Deref = elem {
// Deref projections typically only read the pointer.
base_context = PlaceContext::NonMutatingUse(NonMutatingUseContext::Copy);
}
self.process_place(&place_base, base_context, location);
// HACK(eddyb) this emulates the old `visit_projection_elem`, this
// entire `visit_place`-like `process_place` method should be rewritten,
// now that we have moved to the "slice of projections" representation.
if let mir::ProjectionElem::Index(local) = elem {
self.visit_local(
local,
PlaceContext::NonMutatingUse(NonMutatingUseContext::Copy),
location,
);
}
} else {
self.visit_local(place_ref.local, context, location);
debug_assert!(
!self.fx.cx.is_backend_ref(layout),
"Post-projection {place_ref:?} layout should be non-Ref, but it's {layout:?}",
);
}
// Even with supported projections, we still need to have `visit_local`
// check for things that can't be done in SSA (like `SharedBorrow`).
self.visit_local(place_ref.local, context, location);
}
}
@@ -170,11 +183,6 @@ fn visit_assign(
if let Some(local) = place.as_local() {
self.define(local, DefLocation::Assignment(location));
if self.locals[local] != LocalKind::Memory {
if !self.fx.rvalue_creates_operand(rvalue) {
self.locals[local] = LocalKind::Memory;
}
}
} else {
self.visit_place(place, PlaceContext::MutatingUse(MutatingUseContext::Store), location);
}
@@ -20,7 +20,7 @@ pub(crate) fn eval_mir_constant_to_operand(
OperandRef::from_const(bx, val, ty)
}
pub fn eval_mir_constant(&self, constant: &mir::ConstOperand<'tcx>) -> mir::ConstValue<'tcx> {
pub fn eval_mir_constant(&self, constant: &mir::ConstOperand<'tcx>) -> mir::ConstValue {
// `MirUsedCollector` visited all required_consts before codegen began, so if we got here
// there can be no more constants that fail to evaluate.
self.monomorphize(constant.const_)
+19 -27
View File
@@ -140,7 +140,7 @@ pub fn zero_sized(layout: TyAndLayout<'tcx>) -> OperandRef<'tcx, V> {
pub(crate) fn from_const<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
bx: &mut Bx,
val: mir::ConstValue<'tcx>,
val: mir::ConstValue,
ty: Ty<'tcx>,
) -> Self {
let layout = bx.layout_of(ty);
@@ -154,14 +154,11 @@ pub(crate) fn from_const<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
OperandValue::Immediate(llval)
}
ConstValue::ZeroSized => return OperandRef::zero_sized(layout),
ConstValue::Slice { data, meta } => {
ConstValue::Slice { alloc_id, meta } => {
let BackendRepr::ScalarPair(a_scalar, _) = layout.backend_repr else {
bug!("from_const: invalid ScalarPair layout: {:#?}", layout);
};
let a = Scalar::from_pointer(
Pointer::new(bx.tcx().reserve_and_set_memory_alloc(data).into(), Size::ZERO),
&bx.tcx(),
);
let a = Scalar::from_pointer(Pointer::new(alloc_id.into(), Size::ZERO), &bx.tcx());
let a_llval = bx.scalar_to_backend(
a,
a_scalar,
@@ -338,13 +335,6 @@ pub(crate) fn extract_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
let val = if field.is_zst() {
OperandValue::ZeroSized
} else if let BackendRepr::SimdVector { .. } = self.layout.backend_repr {
// codegen_transmute_operand doesn't support SIMD, but since the previous
// check handled ZSTs, the only possible field access into something SIMD
// is to the `non_1zst_field` that's the same SIMD. (Other things, even
// just padding, would change the wrapper's representation type.)
assert_eq!(field.size, self.layout.size);
self.val
} else if field.size == self.layout.size {
assert_eq!(offset.bytes(), 0);
fx.codegen_transmute_operand(bx, *self, field)
@@ -931,9 +921,10 @@ fn maybe_codegen_consume_direct(
match self.locals[place_ref.local] {
LocalRef::Operand(mut o) => {
// Moves out of scalar and scalar pair fields are trivial.
for elem in place_ref.projection.iter() {
match elem {
// We only need to handle the projections that
// `LocalAnalyzer::process_place` let make it here.
for elem in place_ref.projection {
match *elem {
mir::ProjectionElem::Field(f, _) => {
assert!(
!o.layout.ty.is_any_ptr(),
@@ -942,17 +933,18 @@ fn maybe_codegen_consume_direct(
);
o = o.extract_field(self, bx, f.index());
}
mir::ProjectionElem::Index(_)
| mir::ProjectionElem::ConstantIndex { .. } => {
// ZSTs don't require any actual memory access.
// FIXME(eddyb) deduplicate this with the identical
// checks in `codegen_consume` and `extract_field`.
let elem = o.layout.field(bx.cx(), 0);
if elem.is_zst() {
o = OperandRef::zero_sized(elem);
} else {
return None;
}
mir::PlaceElem::Downcast(_, vidx) => {
debug_assert_eq!(
o.layout.variants,
abi::Variants::Single { index: vidx },
);
let layout = o.layout.for_variant(bx.cx(), vidx);
o = OperandRef { val: o.val, layout }
}
mir::PlaceElem::Subtype(subtype_ty) => {
let subtype_ty = self.monomorphize(subtype_ty);
let layout = self.cx.layout_of(subtype_ty);
o = OperandRef { val: o.val, layout }
}
_ => return None,
}
+91 -89
View File
@@ -2,12 +2,12 @@
use rustc_middle::ty::adjustment::PointerCoercion;
use rustc_middle::ty::layout::{HasTyCtxt, HasTypingEnv, LayoutOf, TyAndLayout};
use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
use rustc_middle::{bug, mir};
use rustc_middle::{bug, mir, span_bug};
use rustc_session::config::OptLevel;
use tracing::{debug, instrument};
use super::operand::{OperandRef, OperandRefBuilder, OperandValue};
use super::place::{PlaceRef, codegen_tag_value};
use super::place::{PlaceRef, PlaceValue, codegen_tag_value};
use super::{FunctionCx, LocalRef};
use crate::common::{IntPredicate, TypeKind};
use crate::traits::*;
@@ -180,7 +180,6 @@ pub(crate) fn codegen_rvalue(
}
_ => {
assert!(self.rvalue_creates_operand(rvalue));
let temp = self.codegen_rvalue_operand(bx, rvalue);
temp.val.store(bx, dest);
}
@@ -218,17 +217,26 @@ fn codegen_transmute(
/// Transmutes an `OperandValue` to another `OperandValue`.
///
/// This is supported only for cases where [`Self::rvalue_creates_operand`]
/// returns `true`, and will ICE otherwise. (In particular, anything that
/// would need to `alloca` in order to return a `PlaceValue` will ICE,
/// expecting those to go via [`Self::codegen_transmute`] instead where
/// the destination place is already allocated.)
/// This is supported for all cases where the `cast` type is SSA,
/// but for non-ZSTs with [`abi::BackendRepr::Memory`] it ICEs.
pub(crate) fn codegen_transmute_operand(
&mut self,
bx: &mut Bx,
operand: OperandRef<'tcx, Bx::Value>,
cast: TyAndLayout<'tcx>,
) -> OperandValue<Bx::Value> {
if let abi::BackendRepr::Memory { .. } = cast.backend_repr
&& !cast.is_zst()
{
span_bug!(self.mir.span, "Use `codegen_transmute` to transmute to {cast:?}");
}
// `Layout` is interned, so we can do a cheap check for things that are
// exactly the same and thus don't need any handling.
if abi::Layout::eq(&operand.layout.layout, &cast.layout) {
return operand.val;
}
// Check for transmutes that are always UB.
if operand.layout.size != cast.size
|| operand.layout.is_uninhabited()
@@ -241,11 +249,22 @@ pub(crate) fn codegen_transmute_operand(
return OperandValue::poison(bx, cast);
}
// To or from pointers takes different methods, so we use this to restrict
// the SimdVector case to types which can be `bitcast` between each other.
#[inline]
fn vector_can_bitcast(x: abi::Scalar) -> bool {
matches!(
x,
abi::Scalar::Initialized {
value: abi::Primitive::Int(..) | abi::Primitive::Float(..),
..
}
)
}
let cx = bx.cx();
match (operand.val, operand.layout.backend_repr, cast.backend_repr) {
_ if cast.is_zst() => OperandValue::ZeroSized,
(_, _, abi::BackendRepr::Memory { .. }) => {
bug!("Cannot `codegen_transmute_operand` to non-ZST memory-ABI output {cast:?}");
}
(OperandValue::Ref(source_place_val), abi::BackendRepr::Memory { .. }, _) => {
assert_eq!(source_place_val.llextra, None);
// The existing alignment is part of `source_place_val`,
@@ -256,16 +275,46 @@ pub(crate) fn codegen_transmute_operand(
OperandValue::Immediate(imm),
abi::BackendRepr::Scalar(from_scalar),
abi::BackendRepr::Scalar(to_scalar),
) => OperandValue::Immediate(transmute_scalar(bx, imm, from_scalar, to_scalar)),
) if from_scalar.size(cx) == to_scalar.size(cx) => {
OperandValue::Immediate(transmute_scalar(bx, imm, from_scalar, to_scalar))
}
(
OperandValue::Immediate(imm),
abi::BackendRepr::SimdVector { element: from_scalar, .. },
abi::BackendRepr::SimdVector { element: to_scalar, .. },
) if vector_can_bitcast(from_scalar) && vector_can_bitcast(to_scalar) => {
let to_backend_ty = bx.cx().immediate_backend_type(cast);
OperandValue::Immediate(bx.bitcast(imm, to_backend_ty))
}
(
OperandValue::Pair(imm_a, imm_b),
abi::BackendRepr::ScalarPair(in_a, in_b),
abi::BackendRepr::ScalarPair(out_a, out_b),
) => OperandValue::Pair(
transmute_scalar(bx, imm_a, in_a, out_a),
transmute_scalar(bx, imm_b, in_b, out_b),
),
_ => bug!("Cannot `codegen_transmute_operand` {operand:?} to {cast:?}"),
) if in_a.size(cx) == out_a.size(cx) && in_b.size(cx) == out_b.size(cx) => {
OperandValue::Pair(
transmute_scalar(bx, imm_a, in_a, out_a),
transmute_scalar(bx, imm_b, in_b, out_b),
)
}
_ => {
// For any other potentially-tricky cases, make a temporary instead.
// If anything else wants the target local to be in memory this won't
// be hit, as `codegen_transmute` will get called directly. Thus this
// is only for places where everything else wants the operand form,
// and thus it's not worth making those places get it from memory.
//
// Notably, Scalar ⇌ ScalarPair cases go here to avoid padding
// and endianness issues, as do SimdVector ones to avoid worrying
// about things like f32x8 ⇌ ptrx4 that would need multiple steps.
let align = Ord::max(operand.layout.align.abi, cast.align.abi);
let size = Ord::max(operand.layout.size, cast.size);
let temp = PlaceValue::alloca(bx, size, align);
bx.lifetime_start(temp.llval, size);
operand.val.store(bx, temp.with_type(operand.layout));
let val = bx.load_operand(temp.with_type(cast)).val;
bx.lifetime_end(temp.llval, size);
val
}
}
}
@@ -288,7 +337,7 @@ fn cast_immediate(
// valid ranges. For example, `char`s are passed as just `i32`, with no
// way for LLVM to know that they're 0x10FFFF at most. Thus we assume
// the range of the input value too, not just the output range.
assume_scalar_range(bx, imm, from_scalar, from_backend_ty);
assume_scalar_range(bx, imm, from_scalar, from_backend_ty, None);
imm = match (from_scalar.primitive(), to_scalar.primitive()) {
(Int(_, is_signed), Int(..)) => bx.intcast(imm, to_backend_ty, is_signed),
@@ -326,8 +375,6 @@ pub(crate) fn codegen_rvalue_operand(
bx: &mut Bx,
rvalue: &mir::Rvalue<'tcx>,
) -> OperandRef<'tcx, Bx::Value> {
assert!(self.rvalue_creates_operand(rvalue), "cannot codegen {rvalue:?} to operand",);
match *rvalue {
mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
let operand = self.codegen_operand(bx, source);
@@ -653,8 +700,6 @@ pub(crate) fn codegen_rvalue_operand(
let ty = self.monomorphize(ty);
let layout = self.cx.layout_of(ty);
// `rvalue_creates_operand` has arranged that we only get here if
// we can build the aggregate immediate from the field immediates.
let mut builder = OperandRefBuilder::new(layout);
for (field_idx, field) in fields.iter_enumerated() {
let op = self.codegen_operand(bx, field);
@@ -869,7 +914,7 @@ fn codegen_scalar_binop(
let ltext = bx.zext(is_lt, bx.type_i8());
bx.unchecked_ssub(gtext, ltext)
} else {
// These operations are those expected by `tests/codegen/integer-cmp.rs`,
// These operations are those expected by `tests/codegen-llvm/integer-cmp.rs`,
// from <https://github.com/rust-lang/rust/pull/63767>.
let is_lt = bx.icmp(pred(mir::BinOp::Lt), lhs, rhs);
let is_ne = bx.icmp(pred(mir::BinOp::Ne), lhs, rhs);
@@ -955,69 +1000,6 @@ fn codegen_scalar_checked_binop(
OperandValue::Pair(val, of)
}
/// Returns `true` if the `rvalue` can be computed into an [`OperandRef`],
/// rather than needing a full `PlaceRef` for the assignment destination.
///
/// This is used by the [`super::analyze`] code to decide which MIR locals
/// can stay as SSA values (as opposed to generating `alloca` slots for them).
/// As such, some paths here return `true` even where the specific rvalue
/// will not actually take the operand path because the result type is such
/// that it always gets an `alloca`, but where it's not worth re-checking the
/// layout in this code when the right thing will happen anyway.
pub(crate) fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>) -> bool {
match *rvalue {
mir::Rvalue::Cast(mir::CastKind::Transmute, ref operand, cast_ty) => {
let operand_ty = operand.ty(self.mir, self.cx.tcx());
let cast_layout = self.cx.layout_of(self.monomorphize(cast_ty));
let operand_layout = self.cx.layout_of(self.monomorphize(operand_ty));
match (operand_layout.backend_repr, cast_layout.backend_repr) {
// When the output will be in memory anyway, just use its place
// (instead of the operand path) unless it's the trivial ZST case.
(_, abi::BackendRepr::Memory { .. }) => cast_layout.is_zst(),
// Otherwise (for a non-memory output) if the input is memory
// then we can just read the value from the place.
(abi::BackendRepr::Memory { .. }, _) => true,
// When we have scalar immediates, we can only convert things
// where the sizes match, to avoid endianness questions.
(abi::BackendRepr::Scalar(a), abi::BackendRepr::Scalar(b)) =>
a.size(self.cx) == b.size(self.cx),
(abi::BackendRepr::ScalarPair(a0, a1), abi::BackendRepr::ScalarPair(b0, b1)) =>
a0.size(self.cx) == b0.size(self.cx) && a1.size(self.cx) == b1.size(self.cx),
// Mixing Scalars and ScalarPairs can get quite complicated when
// padding and undef get involved, so leave that to the memory path.
(abi::BackendRepr::Scalar(_), abi::BackendRepr::ScalarPair(_, _)) |
(abi::BackendRepr::ScalarPair(_, _), abi::BackendRepr::Scalar(_)) => false,
// SIMD vectors aren't worth the trouble of dealing with complex
// cases like from vectors of f32 to vectors of pointers or
// from fat pointers to vectors of u16. (See #143194 #110021 ...)
(abi::BackendRepr::SimdVector { .. }, _) | (_, abi::BackendRepr::SimdVector { .. }) => false,
}
}
mir::Rvalue::Ref(..) |
mir::Rvalue::CopyForDeref(..) |
mir::Rvalue::RawPtr(..) |
mir::Rvalue::Len(..) |
mir::Rvalue::Cast(..) | // (*)
mir::Rvalue::ShallowInitBox(..) | // (*)
mir::Rvalue::BinaryOp(..) |
mir::Rvalue::UnaryOp(..) |
mir::Rvalue::Discriminant(..) |
mir::Rvalue::NullaryOp(..) |
mir::Rvalue::ThreadLocalRef(_) |
mir::Rvalue::Use(..) |
mir::Rvalue::Repeat(..) | // (*)
mir::Rvalue::Aggregate(..) | // (*)
mir::Rvalue::WrapUnsafeBinder(..) => // (*)
true,
}
// (*) this is only true if the type is suitable
}
}
/// Transmutes a single scalar value `imm` from `from_scalar` to `to_scalar`.
@@ -1064,7 +1046,7 @@ pub(super) fn transmute_scalar<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
// That said, last time we tried removing this, it didn't actually help
// the rustc-perf results, so might as well keep doing it
// <https://github.com/rust-lang/rust/pull/135610#issuecomment-2599275182>
assume_scalar_range(bx, imm, from_scalar, from_backend_ty);
assume_scalar_range(bx, imm, from_scalar, from_backend_ty, Some(&to_scalar));
imm = match (from_scalar.primitive(), to_scalar.primitive()) {
(Int(..) | Float(_), Int(..) | Float(_)) => bx.bitcast(imm, to_backend_ty),
@@ -1092,22 +1074,42 @@ pub(super) fn transmute_scalar<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
// since it's never passed to something with parameter metadata (especially
// after MIR inlining) so the only way to tell the backend about the
// constraint that the `transmute` introduced is to `assume` it.
assume_scalar_range(bx, imm, to_scalar, to_backend_ty);
assume_scalar_range(bx, imm, to_scalar, to_backend_ty, Some(&from_scalar));
imm = bx.to_immediate_scalar(imm, to_scalar);
imm
}
/// Emits an `assume` call that `imm`'s value is within the known range of `scalar`.
///
/// If `known` is `Some`, only emits the assume if it's more specific than
/// whatever is already known from the range of *that* scalar.
fn assume_scalar_range<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
bx: &mut Bx,
imm: Bx::Value,
scalar: abi::Scalar,
backend_ty: Bx::Type,
known: Option<&abi::Scalar>,
) {
if matches!(bx.cx().sess().opts.optimize, OptLevel::No) || scalar.is_always_valid(bx.cx()) {
if matches!(bx.cx().sess().opts.optimize, OptLevel::No) {
return;
}
match (scalar, known) {
(abi::Scalar::Union { .. }, _) => return,
(_, None) => {
if scalar.is_always_valid(bx.cx()) {
return;
}
}
(abi::Scalar::Initialized { valid_range, .. }, Some(known)) => {
let known_range = known.valid_range(bx.cx());
if valid_range.contains_range(known_range, scalar.size(bx.cx())) {
return;
}
}
}
match scalar.primitive() {
abi::Primitive::Int(..) => {
let range = scalar.valid_range(bx.cx());
@@ -1,3 +1,5 @@
use std::path::PathBuf;
use rustc_ast::expand::autodiff_attrs::AutoDiffItem;
use rustc_errors::{DiagCtxtHandle, FatalError};
use rustc_middle::dep_graph::WorkProduct;
@@ -24,8 +26,9 @@ fn run_link(
/// if necessary and running any further optimizations
fn run_and_optimize_fat_lto(
cgcx: &CodegenContext<Self>,
exported_symbols_for_lto: &[String],
each_linked_rlib_for_lto: &[PathBuf],
modules: Vec<FatLtoInput<Self>>,
cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
diff_fncs: Vec<AutoDiffItem>,
) -> Result<ModuleCodegen<Self::Module>, FatalError>;
/// Performs thin LTO by performing necessary global analysis and returning two
@@ -33,6 +36,8 @@ fn run_and_optimize_fat_lto(
/// can simply be copied over from the incr. comp. cache.
fn run_thin_lto(
cgcx: &CodegenContext<Self>,
exported_symbols_for_lto: &[String],
each_linked_rlib_for_lto: &[PathBuf],
modules: Vec<(String, Self::ThinBuffer)>,
cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
) -> Result<(Vec<ThinModule<Self>>, Vec<WorkProduct>), FatalError>;
+3 -3
View File
@@ -128,15 +128,15 @@ const_eval_frame_note_inner = inside {$where_ ->
const_eval_frame_note_last = the failure occurred here
const_eval_incompatible_arg_types =
calling a function whose parameter #{$arg_idx} has type {$callee_ty} passing argument of type {$caller_ty}
const_eval_incompatible_calling_conventions =
calling a function with calling convention "{$callee_conv}" using calling convention "{$caller_conv}"
const_eval_incompatible_return_types =
calling a function with return type {$callee_ty} passing return place of type {$caller_ty}
const_eval_incompatible_types =
calling a function with argument of type {$callee_ty} passing data of type {$caller_ty}
const_eval_interior_mutable_borrow_escaping =
interior mutable shared borrows of temporaries that have their lifetime extended until the end of the program are not allowed
.label = this borrow of an interior mutable value refers to such a temporary
@@ -152,7 +152,7 @@ pub(crate) fn mk_eval_cx_to_read_const_val<'tcx>(
pub fn mk_eval_cx_for_const_val<'tcx>(
tcx: TyCtxtAt<'tcx>,
typing_env: ty::TypingEnv<'tcx>,
val: mir::ConstValue<'tcx>,
val: mir::ConstValue,
ty: Ty<'tcx>,
) -> Option<(CompileTimeInterpCx<'tcx>, OpTy<'tcx>)> {
let ecx = mk_eval_cx_to_read_const_val(tcx.tcx, tcx.span, typing_env, CanAccessMutGlobal::No);
@@ -172,7 +172,7 @@ pub(super) fn op_to_const<'tcx>(
ecx: &CompileTimeInterpCx<'tcx>,
op: &OpTy<'tcx>,
for_diagnostics: bool,
) -> ConstValue<'tcx> {
) -> ConstValue {
// Handle ZST consistently and early.
if op.layout.is_zst() {
return ConstValue::ZeroSized;
@@ -241,10 +241,9 @@ pub(super) fn op_to_const<'tcx>(
let (prov, offset) =
ptr.into_pointer_or_addr().expect(msg).prov_and_relative_offset();
let alloc_id = prov.alloc_id();
let data = ecx.tcx.global_alloc(alloc_id).unwrap_memory();
assert!(offset == abi::Size::ZERO, "{}", msg);
let meta = b.to_target_usize(ecx).expect(msg);
ConstValue::Slice { data, meta }
ConstValue::Slice { alloc_id, meta }
}
Immediate::Uninit => bug!("`Uninit` is not a valid value for {}", op.layout.ty),
},
@@ -256,7 +255,7 @@ pub(crate) fn turn_into_const_value<'tcx>(
tcx: TyCtxt<'tcx>,
constant: ConstAlloc<'tcx>,
key: ty::PseudoCanonicalInput<'tcx, GlobalId<'tcx>>,
) -> ConstValue<'tcx> {
) -> ConstValue {
let cid = key.value;
let def_id = cid.instance.def.def_id();
let is_static = tcx.is_static(def_id);
@@ -279,23 +279,15 @@ fn hook_special_const_fn(
fn guaranteed_cmp(&mut self, a: Scalar, b: Scalar) -> InterpResult<'tcx, u8> {
interp_ok(match (a, b) {
// Comparisons between integers are always known.
(Scalar::Int { .. }, Scalar::Int { .. }) => {
if a == b {
1
} else {
0
}
}
// Comparisons of abstract pointers with null pointers are known if the pointer
// is in bounds, because if they are in bounds, the pointer can't be null.
// Inequality with integers other than null can never be known for sure.
(Scalar::Int(int), ptr @ Scalar::Ptr(..))
| (ptr @ Scalar::Ptr(..), Scalar::Int(int))
(Scalar::Int(a), Scalar::Int(b)) => (a == b) as u8,
// Comparisons of null with an arbitrary scalar can be known if `scalar_may_be_null`
// indicates that the scalar can definitely *not* be null.
(Scalar::Int(int), ptr) | (ptr, Scalar::Int(int))
if int.is_null() && !self.scalar_may_be_null(ptr)? =>
{
0
}
// Equality with integers can never be known for sure.
// Other ways of comparing integers and pointers can never be known for sure.
(Scalar::Int { .. }, Scalar::Ptr(..)) | (Scalar::Ptr(..), Scalar::Int { .. }) => 2,
// FIXME: return a `1` for when both sides are the same pointer, *except* that
// some things (like functions and vtables) do not have stable addresses
@@ -28,7 +28,7 @@
#[instrument(skip(tcx), level = "debug")]
pub(crate) fn try_destructure_mir_constant_for_user_output<'tcx>(
tcx: TyCtxt<'tcx>,
val: mir::ConstValue<'tcx>,
val: mir::ConstValue,
ty: Ty<'tcx>,
) -> Option<mir::DestructuredConstant<'tcx>> {
let typing_env = ty::TypingEnv::fully_monomorphized();
@@ -259,7 +259,7 @@ pub fn valtree_to_const_value<'tcx>(
tcx: TyCtxt<'tcx>,
typing_env: ty::TypingEnv<'tcx>,
cv: ty::Value<'tcx>,
) -> mir::ConstValue<'tcx> {
) -> mir::ConstValue {
// Basic idea: We directly construct `Scalar` values from trivial `ValTree`s
// (those for constants with type bool, int, uint, float or char).
// For all other types we create an `MPlace` and fill that by walking
+10 -6
View File
@@ -500,7 +500,7 @@ fn diagnostic_message(&self) -> DiagMessage {
InvalidNichedEnumVariantWritten { .. } => {
const_eval_invalid_niched_enum_variant_written
}
AbiMismatchArgument { .. } => const_eval_incompatible_types,
AbiMismatchArgument { .. } => const_eval_incompatible_arg_types,
AbiMismatchReturn { .. } => const_eval_incompatible_return_types,
}
}
@@ -625,12 +625,16 @@ fn add_args<G: EmissionGuarantee>(self, diag: &mut Diag<'_, G>) {
diag.arg("data_size", info.data_size);
}
InvalidNichedEnumVariantWritten { enum_ty } => {
diag.arg("ty", enum_ty.to_string());
diag.arg("ty", enum_ty);
}
AbiMismatchArgument { caller_ty, callee_ty }
| AbiMismatchReturn { caller_ty, callee_ty } => {
diag.arg("caller_ty", caller_ty.to_string());
diag.arg("callee_ty", callee_ty.to_string());
AbiMismatchArgument { arg_idx, caller_ty, callee_ty } => {
diag.arg("arg_idx", arg_idx + 1); // adjust for 1-indexed lists in output
diag.arg("caller_ty", caller_ty);
diag.arg("callee_ty", callee_ty);
}
AbiMismatchReturn { caller_ty, callee_ty } => {
diag.arg("caller_ty", caller_ty);
diag.arg("callee_ty", callee_ty);
}
}
}
@@ -270,6 +270,7 @@ fn pass_argument<'x, 'y>(
Item = (&'x FnArg<'tcx, M::Provenance>, &'y ArgAbi<'tcx, Ty<'tcx>>),
>,
callee_abi: &ArgAbi<'tcx, Ty<'tcx>>,
callee_arg_idx: usize,
callee_arg: &mir::Place<'tcx>,
callee_ty: Ty<'tcx>,
already_live: bool,
@@ -298,6 +299,7 @@ fn pass_argument<'x, 'y>(
// Check compatibility
if !self.check_argument_compat(caller_abi, callee_abi)? {
throw_ub!(AbiMismatchArgument {
arg_idx: callee_arg_idx,
caller_ty: caller_abi.layout.ty,
callee_ty: callee_abi.layout.ty
});
@@ -424,7 +426,7 @@ pub fn init_stack_frame(
// this is a single iterator (that handles `spread_arg`), then
// `pass_argument` would be the loop body. It takes care to
// not advance `caller_iter` for ignored arguments.
let mut callee_args_abis = callee_fn_abi.args.iter();
let mut callee_args_abis = callee_fn_abi.args.iter().enumerate();
for local in body.args_iter() {
// Construct the destination place for this argument. At this point all
// locals are still dead, so we cannot construct a `PlaceTy`.
@@ -445,10 +447,11 @@ pub fn init_stack_frame(
&[mir::ProjectionElem::Field(FieldIdx::from_usize(i), field_ty)],
*self.tcx,
);
let callee_abi = callee_args_abis.next().unwrap();
let (idx, callee_abi) = callee_args_abis.next().unwrap();
self.pass_argument(
&mut caller_args,
callee_abi,
idx,
&dest,
field_ty,
/* already_live */ true,
@@ -456,10 +459,11 @@ pub fn init_stack_frame(
}
} else {
// Normal argument. Cannot mark it as live yet, it might be unsized!
let callee_abi = callee_args_abis.next().unwrap();
let (idx, callee_abi) = callee_args_abis.next().unwrap();
self.pass_argument(
&mut caller_args,
callee_abi,
idx,
&dest,
ty,
/* already_live */ false,
@@ -582,8 +582,7 @@ pub fn eval_mir_constant(
span: Span,
layout: Option<TyAndLayout<'tcx>>,
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
M::eval_mir_constant(self, *val, span, layout, |ecx, val, span, layout| {
let const_val = val.eval(*ecx.tcx, ecx.typing_env, span).map_err(|err| {
let const_val = val.eval(*self.tcx, self.typing_env, span).map_err(|err| {
if M::ALL_CONSTS_ARE_PRECHECKED {
match err {
ErrorHandled::TooGeneric(..) => {},
@@ -599,11 +598,10 @@ pub fn eval_mir_constant(
}
}
}
err.emit_note(*ecx.tcx);
err.emit_note(*self.tcx);
err
})?;
ecx.const_val_to_op(const_val, val.ty(), layout)
})
self.const_val_to_op(const_val, val.ty(), layout)
}
#[must_use]
@@ -6,7 +6,7 @@
use rustc_abi::{FieldIdx, HasDataLayout, Size};
use rustc_apfloat::ieee::{Double, Half, Quad, Single};
use rustc_middle::mir::interpret::{read_target_uint, write_target_uint};
use rustc_middle::mir::interpret::{CTFE_ALLOC_SALT, read_target_uint, write_target_uint};
use rustc_middle::mir::{self, BinOp, ConstValue, NonDivergingIntrinsic};
use rustc_middle::ty::layout::TyAndLayout;
use rustc_middle::ty::{Ty, TyCtxt};
@@ -17,17 +17,18 @@
use super::memory::MemoryKind;
use super::util::ensure_monomorphic_enough;
use super::{
Allocation, CheckInAllocMsg, ConstAllocation, ImmTy, InterpCx, InterpResult, Machine, OpTy,
PlaceTy, Pointer, PointerArithmetic, Provenance, Scalar, err_ub_custom, err_unsup_format,
interp_ok, throw_inval, throw_ub_custom, throw_ub_format,
AllocId, CheckInAllocMsg, ImmTy, InterpCx, InterpResult, Machine, OpTy, PlaceTy, Pointer,
PointerArithmetic, Provenance, Scalar, err_ub_custom, err_unsup_format, interp_ok, throw_inval,
throw_ub_custom, throw_ub_format,
};
use crate::fluent_generated as fluent;
/// Directly returns an `Allocation` containing an absolute path representation of the given type.
pub(crate) fn alloc_type_name<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> ConstAllocation<'tcx> {
pub(crate) fn alloc_type_name<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> (AllocId, u64) {
let path = crate::util::type_name(tcx, ty);
let alloc = Allocation::from_bytes_byte_aligned_immutable(path.into_bytes(), ());
tcx.mk_const_alloc(alloc)
let bytes = path.into_bytes();
let len = bytes.len().try_into().unwrap();
(tcx.allocate_bytes_dedup(bytes, CTFE_ALLOC_SALT), len)
}
impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
/// Generates a value of `TypeId` for `ty` in-place.
@@ -126,8 +127,8 @@ pub fn eval_intrinsic(
sym::type_name => {
let tp_ty = instance.args.type_at(0);
ensure_monomorphic_enough(tcx, tp_ty)?;
let alloc = alloc_type_name(tcx, tp_ty);
let val = ConstValue::Slice { data: alloc, meta: alloc.inner().size().bytes() };
let (alloc_id, meta) = alloc_type_name(tcx, tp_ty);
let val = ConstValue::Slice { alloc_id, meta };
let val = self.const_val_to_op(val, dest.layout.ty, Some(dest.layout))?;
self.copy_op(&val, dest)?;
}
@@ -12,7 +12,6 @@
use rustc_middle::ty::Ty;
use rustc_middle::ty::layout::TyAndLayout;
use rustc_middle::{mir, ty};
use rustc_span::Span;
use rustc_span::def_id::DefId;
use rustc_target::callconv::FnAbi;
@@ -587,27 +586,6 @@ fn after_local_moved_to_memory(
interp_ok(())
}
/// Evaluate the given constant. The `eval` function will do all the required evaluation,
/// but this hook has the chance to do some pre/postprocessing.
#[inline(always)]
fn eval_mir_constant<F>(
ecx: &InterpCx<'tcx, Self>,
val: mir::Const<'tcx>,
span: Span,
layout: Option<TyAndLayout<'tcx>>,
eval: F,
) -> InterpResult<'tcx, OpTy<'tcx, Self::Provenance>>
where
F: Fn(
&InterpCx<'tcx, Self>,
mir::Const<'tcx>,
Span,
Option<TyAndLayout<'tcx>>,
) -> InterpResult<'tcx, OpTy<'tcx, Self::Provenance>>,
{
eval(ecx, val, span, layout)
}
/// Returns the salt to be used for a deduplicated global alloation.
/// If the allocation is for a function, the instance is provided as well
/// (this lets Miri ensure unique addresses for some functions).
@@ -67,8 +67,10 @@ pub enum AllocKind {
LiveData,
/// A function allocation (that fn ptrs point to).
Function,
/// A "virtual" allocation, used for vtables and TypeId.
Virtual,
/// A vtable allocation.
VTable,
/// A TypeId allocation.
TypeId,
/// A dead allocation.
Dead,
}
@@ -952,7 +954,8 @@ pub fn get_alloc_info(&self, id: AllocId) -> AllocInfo {
let kind = match global_alloc {
GlobalAlloc::Static { .. } | GlobalAlloc::Memory { .. } => AllocKind::LiveData,
GlobalAlloc::Function { .. } => bug!("We already checked function pointers above"),
GlobalAlloc::VTable { .. } | GlobalAlloc::TypeId { .. } => AllocKind::Virtual,
GlobalAlloc::VTable { .. } => AllocKind::VTable,
GlobalAlloc::TypeId { .. } => AllocKind::TypeId,
};
return AllocInfo::new(size, align, kind, mutbl);
}
@@ -997,7 +1000,7 @@ pub fn get_ptr_type_id(
ptr: Pointer<Option<M::Provenance>>,
) -> InterpResult<'tcx, (Ty<'tcx>, u64)> {
let (alloc_id, offset, _meta) = self.ptr_get_alloc_id(ptr, 0)?;
let GlobalAlloc::TypeId { ty } = self.tcx.global_alloc(alloc_id) else {
let Some(GlobalAlloc::TypeId { ty }) = self.tcx.try_get_global_alloc(alloc_id) else {
throw_ub_format!("invalid `TypeId` value: not all bytes carry type id metadata")
};
interp_ok((ty, offset.bytes()))
@@ -1617,6 +1620,13 @@ pub fn scalar_may_be_null(&self, scalar: Scalar<M::Provenance>) -> InterpResult<
match self.ptr_try_get_alloc_id(ptr, 0) {
Ok((alloc_id, offset, _)) => {
let info = self.get_alloc_info(alloc_id);
if matches!(info.kind, AllocKind::TypeId) {
// We *could* actually precisely answer this question since here,
// the offset *is* the integer value. But the entire point of making
// this a pointer is not to leak the integer value, so we say everything
// might be null.
return interp_ok(true);
}
// If the pointer is in-bounds (including "at the end"), it is definitely not null.
if offset <= info.size {
return interp_ok(false);
@@ -836,7 +836,7 @@ pub fn eval_operand(
pub(crate) fn const_val_to_op(
&self,
val_val: mir::ConstValue<'tcx>,
val_val: mir::ConstValue,
ty: Ty<'tcx>,
layout: Option<TyAndLayout<'tcx>>,
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
@@ -860,9 +860,8 @@ pub(crate) fn const_val_to_op(
}
mir::ConstValue::Scalar(x) => adjust_scalar(x)?.into(),
mir::ConstValue::ZeroSized => Immediate::Uninit,
mir::ConstValue::Slice { data, meta } => {
mir::ConstValue::Slice { alloc_id, meta } => {
// This is const data, no mutation allowed.
let alloc_id = self.tcx.reserve_and_set_memory_alloc(data);
let ptr = Pointer::new(CtfeProvenance::from(alloc_id).as_immutable(), Size::ZERO);
Immediate::new_slice(self.global_root_pointer(ptr)?.into(), meta, self)
}
@@ -57,7 +57,7 @@ pub(crate) fn const_caller_location_provider(
file: Symbol,
line: u32,
col: u32,
) -> mir::ConstValue<'_> {
) -> mir::ConstValue {
trace!("const_caller_location: {}:{}:{}", file, line, col);
let mut ecx = mk_eval_cx_to_read_const_val(
tcx,
@@ -551,6 +551,11 @@ pub fn llvm_recording_enabled(&self) -> bool {
pub fn get_self_profiler(&self) -> Option<Arc<SelfProfiler>> {
self.profiler.clone()
}
/// Is expensive recording of query keys and/or function arguments enabled?
pub fn is_args_recording_enabled(&self) -> bool {
self.enabled() && self.event_filter_mask.intersects(EventFilter::ARGS)
}
}
/// A helper for recording costly arguments to self-profiling events. Used with
@@ -1,8 +1,10 @@
#### Note: this error code is no longer emitted by the compiler.
Macro import declaration was malformed.
Erroneous code examples:
```compile_fail,E0466
```compile_fail
#[macro_use(a_macro(another_macro))] // error: invalid import declaration
extern crate core as some_crate;
+4 -4
View File
@@ -18,7 +18,7 @@
pub(crate) fn entrypoint(stream: &MdStream<'_>, buf: &mut Buffer) -> io::Result<()> {
#[cfg(not(test))]
if let Some((w, _)) = termize::dimensions() {
WIDTH.with(|c| c.set(std::cmp::min(w, DEFAULT_COLUMN_WIDTH)));
WIDTH.set(std::cmp::min(w, DEFAULT_COLUMN_WIDTH));
}
write_stream(stream, buf, None, 0)?;
buf.write_all(b"\n")
@@ -84,7 +84,7 @@ fn write_tt(tt: &MdTree<'_>, buf: &mut Buffer, indent: usize) -> io::Result<()>
reset_cursor();
}
MdTree::HorizontalRule => {
(0..WIDTH.with(Cell::get)).for_each(|_| buf.write_all(b"-").unwrap());
(0..WIDTH.get()).for_each(|_| buf.write_all(b"-").unwrap());
reset_cursor();
}
MdTree::Heading(n, stream) => {
@@ -121,7 +121,7 @@ fn write_tt(tt: &MdTree<'_>, buf: &mut Buffer, indent: usize) -> io::Result<()>
/// End of that block, just wrap the line
fn reset_cursor() {
CURSOR.with(|cur| cur.set(0));
CURSOR.set(0);
}
/// Change to be generic on Write for testing. If we have a link URL, we don't
@@ -144,7 +144,7 @@ fn write_wrapping<B: io::Write>(
buf.write_all(ind_ws)?;
cur.set(indent);
}
let ch_count = WIDTH.with(Cell::get) - cur.get();
let ch_count = WIDTH.get() - cur.get();
let mut iter = to_write.char_indices();
let Some((end_idx, _ch)) = iter.nth(ch_count) else {
// Write entire line
-27
View File
@@ -1,26 +1,8 @@
expand_arg_not_attributes =
second argument must be `attributes`
expand_attr_no_arguments =
attribute must have either one or two arguments
expand_attribute_meta_item =
attribute must be a meta item, not a literal
expand_attribute_single_word =
attribute must only be a single word
expand_attributes_on_expressions_experimental =
attributes on expressions are experimental
.help_outer_doc = `///` is used for outer documentation comments; for a plain comment, use `//`
.help_inner_doc = `//!` is used for inner documentation comments; for a plain comment, use `//` by removing the `!` or inserting a space in between them: `// !`
expand_attributes_wrong_form =
attribute must be of form: `attributes(foo, bar)`
expand_cannot_be_name_of_macro =
`{$trait_ident}` cannot be a name of {$macro_type} macro
expand_collapse_debuginfo_illegal =
illegal value for attribute #[collapse_debuginfo(no|external|yes)]
@@ -71,9 +53,6 @@ expand_glob_delegation_outside_impls =
expand_glob_delegation_traitless_qpath =
qualified path without a trait in glob delegation
expand_helper_attribute_name_invalid =
`{$name}` cannot be a name of derive helper attribute
expand_incomplete_parse =
macro expansion ignores {$descr} and any tokens following
.label = caused by the macro expansion here
@@ -165,12 +144,6 @@ expand_mve_unrecognized_var =
expand_non_inline_modules_in_proc_macro_input_are_unstable =
non-inline modules in proc macro input are unstable
expand_not_a_meta_item =
not a meta item
expand_only_one_word =
must only be one word
expand_proc_macro_back_compat = using an old version of `{$crate_name}`
.note = older versions of the `{$crate_name}` crate no longer compile; please update to `{$crate_name}` v{$fixed_version}, or switch to one of the `{$crate_name}` alternatives
+26 -93
View File
@@ -1,3 +1,4 @@
use std::any::Any;
use std::default::Default;
use std::iter;
use std::path::Component::Prefix;
@@ -361,17 +362,13 @@ fn expand<'cx>(
}
/// Represents a thing that maps token trees to Macro Results
pub trait TTMacroExpander {
pub trait TTMacroExpander: Any {
fn expand<'cx>(
&self,
ecx: &'cx mut ExtCtxt<'_>,
span: Span,
input: TokenStream,
) -> MacroExpanderResult<'cx>;
fn get_unused_rule(&self, _rule_i: usize) -> Option<(&Ident, Span)> {
None
}
}
pub type MacroExpanderResult<'cx> = ExpandResult<Box<dyn MacResult + 'cx>, ()>;
@@ -379,7 +376,7 @@ fn get_unused_rule(&self, _rule_i: usize) -> Option<(&Ident, Span)> {
pub type MacroExpanderFn =
for<'cx> fn(&'cx mut ExtCtxt<'_>, Span, TokenStream) -> MacroExpanderResult<'cx>;
impl<F> TTMacroExpander for F
impl<F: 'static> TTMacroExpander for F
where
F: for<'cx> Fn(&'cx mut ExtCtxt<'_>, Span, TokenStream) -> MacroExpanderResult<'cx>,
{
@@ -864,7 +861,7 @@ fn collapse_debuginfo_by_name(
/// | (unspecified) | no | if-ext | if-ext | yes |
/// | external | no | if-ext | if-ext | yes |
/// | yes | yes | yes | yes | yes |
fn get_collapse_debuginfo(sess: &Session, attrs: &[impl AttributeExt], ext: bool) -> bool {
fn get_collapse_debuginfo(sess: &Session, attrs: &[hir::Attribute], ext: bool) -> bool {
let flag = sess.opts.cg.collapse_macro_debuginfo;
let attr = ast::attr::find_by_name(attrs, sym::collapse_debuginfo)
.and_then(|attr| {
@@ -875,7 +872,7 @@ fn get_collapse_debuginfo(sess: &Session, attrs: &[impl AttributeExt], ext: bool
.ok()
})
.unwrap_or_else(|| {
if ast::attr::contains_name(attrs, sym::rustc_builtin_macro) {
if find_attr!(attrs, AttributeKind::RustcBuiltinMacro { .. }) {
CollapseMacroDebuginfo::Yes
} else {
CollapseMacroDebuginfo::Unspecified
@@ -918,16 +915,18 @@ pub fn new(
let collapse_debuginfo = Self::get_collapse_debuginfo(sess, attrs, !is_local);
tracing::debug!(?name, ?local_inner_macros, ?collapse_debuginfo, ?allow_internal_unsafe);
let (builtin_name, helper_attrs) = ast::attr::find_by_name(attrs, sym::rustc_builtin_macro)
.map(|attr| {
// Override `helper_attrs` passed above if it's a built-in macro,
// marking `proc_macro_derive` macros as built-in is not a realistic use case.
parse_macro_name_and_helper_attrs(sess.dcx(), attr, "built-in").map_or_else(
|| (Some(name), Vec::new()),
|(name, helper_attrs)| (Some(name), helper_attrs),
)
})
.unwrap_or_else(|| (None, helper_attrs));
let (builtin_name, helper_attrs) = match find_attr!(attrs, AttributeKind::RustcBuiltinMacro { builtin_name, helper_attrs, .. } => (builtin_name, helper_attrs))
{
// Override `helper_attrs` passed above if it's a built-in macro,
// marking `proc_macro_derive` macros as built-in is not a realistic use case.
Some((Some(name), helper_attrs)) => {
(Some(*name), helper_attrs.iter().copied().collect())
}
Some((None, _)) => (Some(name), Vec::new()),
// Not a built-in macro
None => (None, helper_attrs),
};
let stability = find_attr!(attrs, AttributeKind::Stability { stability, .. } => *stability);
@@ -1144,7 +1143,7 @@ fn append_stripped_cfg_item(
/// Names of specific methods to which glob delegation expands.
fn glob_delegation_suffixes(
&mut self,
&self,
trait_def_id: DefId,
impl_def_id: LocalDefId,
) -> Result<Vec<(Ident, Option<Ident>)>, Indeterminate>;
@@ -1227,6 +1226,7 @@ pub struct ExtCtxt<'a> {
pub(super) expanded_inert_attrs: MarkedAttrs,
/// `-Zmacro-stats` data.
pub macro_stats: FxHashMap<(Symbol, MacroKind), MacroStat>,
pub nb_macro_errors: usize,
}
impl<'a> ExtCtxt<'a> {
@@ -1257,6 +1257,7 @@ pub fn new(
expanded_inert_attrs: MarkedAttrs::new(),
buffered_early_lint: vec![],
macro_stats: Default::default(),
nb_macro_errors: 0,
}
}
@@ -1318,6 +1319,12 @@ pub fn expansion_cause(&self) -> Option<Span> {
self.current_expansion.id.expansion_cause()
}
/// This method increases the internal macro errors count and then call `trace_macros_diag`.
pub fn macro_error_and_trace_macros_diag(&mut self) {
self.nb_macro_errors += 1;
self.trace_macros_diag();
}
pub fn trace_macros_diag(&mut self) {
for (span, notes) in self.expansions.iter() {
let mut db = self.dcx().create_note(errors::TraceMacro { span: *span });
@@ -1385,80 +1392,6 @@ pub fn resolve_path(sess: &Session, path: impl Into<PathBuf>, span: Span) -> PRe
}
}
pub fn parse_macro_name_and_helper_attrs(
dcx: DiagCtxtHandle<'_>,
attr: &impl AttributeExt,
macro_type: &str,
) -> Option<(Symbol, Vec<Symbol>)> {
// Once we've located the `#[proc_macro_derive]` attribute, verify
// that it's of the form `#[proc_macro_derive(Foo)]` or
// `#[proc_macro_derive(Foo, attributes(A, ..))]`
let list = attr.meta_item_list()?;
let ([trait_attr] | [trait_attr, _]) = list.as_slice() else {
dcx.emit_err(errors::AttrNoArguments { span: attr.span() });
return None;
};
let Some(trait_attr) = trait_attr.meta_item() else {
dcx.emit_err(errors::NotAMetaItem { span: trait_attr.span() });
return None;
};
let trait_ident = match trait_attr.ident() {
Some(trait_ident) if trait_attr.is_word() => trait_ident,
_ => {
dcx.emit_err(errors::OnlyOneWord { span: trait_attr.span });
return None;
}
};
if !trait_ident.name.can_be_raw() {
dcx.emit_err(errors::CannotBeNameOfMacro {
span: trait_attr.span,
trait_ident,
macro_type,
});
}
let attributes_attr = list.get(1);
let proc_attrs: Vec<_> = if let Some(attr) = attributes_attr {
if !attr.has_name(sym::attributes) {
dcx.emit_err(errors::ArgumentNotAttributes { span: attr.span() });
}
attr.meta_item_list()
.unwrap_or_else(|| {
dcx.emit_err(errors::AttributesWrongForm { span: attr.span() });
&[]
})
.iter()
.filter_map(|attr| {
let Some(attr) = attr.meta_item() else {
dcx.emit_err(errors::AttributeMetaItem { span: attr.span() });
return None;
};
let ident = match attr.ident() {
Some(ident) if attr.is_word() => ident,
_ => {
dcx.emit_err(errors::AttributeSingleWord { span: attr.span });
return None;
}
};
if !ident.name.can_be_raw() {
dcx.emit_err(errors::HelperAttributeNameInvalid {
span: attr.span,
name: ident,
});
}
Some(ident.name)
})
.collect()
} else {
Vec::new()
};
Some((trait_ident.name, proc_attrs))
}
/// If this item looks like a specific enums from `rental`, emit a fatal error.
/// See #73345 and #83125 for more details.
/// FIXME(#73933): Remove this eventually.
-66
View File
@@ -78,72 +78,6 @@ pub(crate) struct MacroBodyStability {
pub head_span: Span,
}
#[derive(Diagnostic)]
#[diag(expand_attr_no_arguments)]
pub(crate) struct AttrNoArguments {
#[primary_span]
pub span: Span,
}
#[derive(Diagnostic)]
#[diag(expand_not_a_meta_item)]
pub(crate) struct NotAMetaItem {
#[primary_span]
pub span: Span,
}
#[derive(Diagnostic)]
#[diag(expand_only_one_word)]
pub(crate) struct OnlyOneWord {
#[primary_span]
pub span: Span,
}
#[derive(Diagnostic)]
#[diag(expand_cannot_be_name_of_macro)]
pub(crate) struct CannotBeNameOfMacro<'a> {
#[primary_span]
pub span: Span,
pub trait_ident: Ident,
pub macro_type: &'a str,
}
#[derive(Diagnostic)]
#[diag(expand_arg_not_attributes)]
pub(crate) struct ArgumentNotAttributes {
#[primary_span]
pub span: Span,
}
#[derive(Diagnostic)]
#[diag(expand_attributes_wrong_form)]
pub(crate) struct AttributesWrongForm {
#[primary_span]
pub span: Span,
}
#[derive(Diagnostic)]
#[diag(expand_attribute_meta_item)]
pub(crate) struct AttributeMetaItem {
#[primary_span]
pub span: Span,
}
#[derive(Diagnostic)]
#[diag(expand_attribute_single_word)]
pub(crate) struct AttributeSingleWord {
#[primary_span]
pub span: Span,
}
#[derive(Diagnostic)]
#[diag(expand_helper_attribute_name_invalid)]
pub(crate) struct HelperAttributeNameInvalid {
#[primary_span]
pub span: Span,
pub name: Ident,
}
#[derive(Diagnostic)]
#[diag(expand_feature_removed, code = E0557)]
#[note]
+3 -3
View File
@@ -693,7 +693,7 @@ fn error_recursion_limit_reached(&mut self) -> ErrorGuaranteed {
crate_name: self.cx.ecfg.crate_name,
});
self.cx.trace_macros_diag();
self.cx.macro_error_and_trace_macros_diag();
guar
}
@@ -707,7 +707,7 @@ fn error_wrong_fragment_kind(
) -> ErrorGuaranteed {
let guar =
self.cx.dcx().emit_err(WrongFragmentKind { span, kind: kind.name(), name: &mac.path });
self.cx.trace_macros_diag();
self.cx.macro_error_and_trace_macros_diag();
guar
}
@@ -1048,7 +1048,7 @@ fn parse_ast_fragment(
}
annotate_err_with_kind(&mut err, kind, span);
let guar = err.emit();
self.cx.trace_macros_diag();
self.cx.macro_error_and_trace_macros_diag();
kind.dummy(span, guar)
}
}
+1 -1
View File
@@ -22,7 +22,7 @@
mod proc_macro_server;
mod stats;
pub use mbe::macro_rules::compile_declarative_macro;
pub use mbe::macro_rules::{MacroRulesMacroExpander, compile_declarative_macro};
pub mod base;
pub mod config;
pub mod expand;
@@ -299,6 +299,7 @@ enum EofMatcherPositions {
}
/// Represents the possible results of an attempted parse.
#[derive(Debug)]
pub(crate) enum ParseResult<T, F> {
/// Parsed successfully.
Success(T),
+10 -8
View File
@@ -128,7 +128,7 @@ pub(super) struct MacroRule {
rhs: mbe::TokenTree,
}
struct MacroRulesMacroExpander {
pub struct MacroRulesMacroExpander {
node_id: NodeId,
name: Ident,
span: Span,
@@ -136,6 +136,14 @@ struct MacroRulesMacroExpander {
rules: Vec<MacroRule>,
}
impl MacroRulesMacroExpander {
pub fn get_unused_rule(&self, rule_i: usize) -> Option<(&Ident, Span)> {
// If the rhs contains an invocation like `compile_error!`, don't report it as unused.
let rule = &self.rules[rule_i];
if has_compile_error_macro(&rule.rhs) { None } else { Some((&self.name, rule.lhs_span)) }
}
}
impl TTMacroExpander for MacroRulesMacroExpander {
fn expand<'cx>(
&self,
@@ -154,12 +162,6 @@ fn expand<'cx>(
&self.rules,
))
}
fn get_unused_rule(&self, rule_i: usize) -> Option<(&Ident, Span)> {
// If the rhs contains an invocation like `compile_error!`, don't report it as unused.
let rule = &self.rules[rule_i];
if has_compile_error_macro(&rule.rhs) { None } else { Some((&self.name, rule.lhs_span)) }
}
}
struct DummyExpander(ErrorGuaranteed);
@@ -278,7 +280,7 @@ fn expand_macro<'cx>(
// Retry and emit a better error.
let (span, guar) =
diagnostics::failed_to_match_macro(cx.psess(), sp, def_span, name, arg, rules);
cx.trace_macros_diag();
cx.macro_error_and_trace_macros_diag();
DummyResult::any(span, guar)
}
}
+2 -1
View File
@@ -490,7 +490,8 @@ pub struct BuiltinAttribute {
),
ungated!(no_link, Normal, template!(Word), WarnFollowing, EncodeCrossCrate::No),
ungated!(repr, Normal, template!(List: "C"), DuplicatesOk, EncodeCrossCrate::No),
gated!(align, Normal, template!(List: "alignment"), DuplicatesOk, EncodeCrossCrate::No, fn_align, experimental!(align)),
// FIXME(#82232, #143834): temporarily renamed to mitigate `#[align]` nameres ambiguity
gated!(rustc_align, Normal, template!(List: "alignment"), DuplicatesOk, EncodeCrossCrate::No, fn_align, experimental!(rustc_align)),
ungated!(unsafe(Edition2024) export_name, Normal, template!(NameValueStr: "name"), FutureWarnPreceding, EncodeCrossCrate::No),
ungated!(unsafe(Edition2024) link_section, Normal, template!(NameValueStr: "name"), FutureWarnPreceding, EncodeCrossCrate::No),
ungated!(unsafe(Edition2024) no_mangle, Normal, template!(Word), WarnFollowing, EncodeCrossCrate::No),
+12
View File
@@ -1302,6 +1302,7 @@ fn span(&self) -> Span {
// FIXME: should not be needed anymore when all attrs are parsed
Attribute::Parsed(AttributeKind::Deprecation { span, .. }) => *span,
Attribute::Parsed(AttributeKind::DocComment { span, .. }) => *span,
Attribute::Parsed(AttributeKind::MacroUse { span, .. }) => *span,
Attribute::Parsed(AttributeKind::MayDangle(span)) => *span,
Attribute::Parsed(AttributeKind::Ignore { span, .. }) => *span,
Attribute::Parsed(AttributeKind::AutomaticallyDerived(span)) => *span,
@@ -1362,6 +1363,17 @@ fn doc_resolution_scope(&self) -> Option<AttrStyle> {
_ => None,
}
}
fn is_proc_macro_attr(&self) -> bool {
matches!(
self,
Attribute::Parsed(
AttributeKind::ProcMacro(..)
| AttributeKind::ProcMacroAttribute(..)
| AttributeKind::ProcMacroDerive { .. }
)
)
}
}
// FIXME(fn_delegation): use function delegation instead of manually forwarding
-3
View File
@@ -345,9 +345,6 @@ pub fn extract(attrs: &[impl AttributeExt]) -> Option<(Symbol, Span)> {
OwnedBox, sym::owned_box, owned_box, Target::Struct, GenericRequirement::Minimum(1);
GlobalAlloc, sym::global_alloc_ty, global_alloc_ty, Target::Struct, GenericRequirement::None;
// Experimental lang item for Miri
PtrUnique, sym::ptr_unique, ptr_unique, Target::Struct, GenericRequirement::Exact(1);
PhantomData, sym::phantom_data, phantom_data, Target::Struct, GenericRequirement::Exact(1);
ManuallyDrop, sym::manually_drop, manually_drop, Target::Struct, GenericRequirement::None;
+33 -10
View File
@@ -767,7 +767,10 @@ pub(crate) fn check_item_type(tcx: TyCtxt<'_>, def_id: LocalDefId) -> Result<(),
DefKind::Static { .. } => {
check_static_inhabited(tcx, def_id);
check_static_linkage(tcx, def_id);
res = res.and(wfcheck::check_static_item(tcx, def_id));
let ty = tcx.type_of(def_id).instantiate_identity();
res = res.and(wfcheck::check_static_item(
tcx, def_id, ty, /* should_check_for_sync */ true,
));
}
DefKind::Const => res = res.and(wfcheck::check_const_item(tcx, def_id)),
_ => unreachable!(),
@@ -1642,20 +1645,40 @@ fn check_enum(tcx: TyCtxt<'_>, def_id: LocalDefId) {
if def.repr().int.is_none() {
let is_unit = |var: &ty::VariantDef| matches!(var.ctor_kind(), Some(CtorKind::Const));
let has_disr = |var: &ty::VariantDef| matches!(var.discr, ty::VariantDiscr::Explicit(_));
let get_disr = |var: &ty::VariantDef| match var.discr {
ty::VariantDiscr::Explicit(disr) => Some(disr),
ty::VariantDiscr::Relative(_) => None,
};
let has_non_units = def.variants().iter().any(|var| !is_unit(var));
let disr_units = def.variants().iter().any(|var| is_unit(var) && has_disr(var));
let disr_non_unit = def.variants().iter().any(|var| !is_unit(var) && has_disr(var));
let non_unit = def.variants().iter().find(|var| !is_unit(var));
let disr_unit =
def.variants().iter().filter(|var| is_unit(var)).find_map(|var| get_disr(var));
let disr_non_unit =
def.variants().iter().filter(|var| !is_unit(var)).find_map(|var| get_disr(var));
if disr_non_unit || (disr_units && has_non_units) {
struct_span_code_err!(
if disr_non_unit.is_some() || (disr_unit.is_some() && non_unit.is_some()) {
let mut err = struct_span_code_err!(
tcx.dcx(),
tcx.def_span(def_id),
E0732,
"`#[repr(inttype)]` must be specified"
)
.emit();
"`#[repr(inttype)]` must be specified for enums with explicit discriminants and non-unit variants"
);
if let Some(disr_non_unit) = disr_non_unit {
err.span_label(
tcx.def_span(disr_non_unit),
"explicit discriminant on non-unit variant specified here",
);
} else {
err.span_label(
tcx.def_span(disr_unit.unwrap()),
"explicit discriminant specified here",
);
err.span_label(
tcx.def_span(non_unit.unwrap().def_id),
"non-unit discriminant declared here",
);
}
err.emit();
}
}
@@ -1180,12 +1180,13 @@ fn check_item_fn(
}
#[instrument(level = "debug", skip(tcx))]
pub(super) fn check_static_item(
tcx: TyCtxt<'_>,
pub(crate) fn check_static_item<'tcx>(
tcx: TyCtxt<'tcx>,
item_id: LocalDefId,
ty: Ty<'tcx>,
should_check_for_sync: bool,
) -> Result<(), ErrorGuaranteed> {
enter_wf_checking_ctxt(tcx, item_id, |wfcx| {
let ty = tcx.type_of(item_id).instantiate_identity();
let span = tcx.ty_span(item_id);
let item_ty = wfcx.deeply_normalize(span, Some(WellFormedLoc::Ty(item_id)), ty);
@@ -1212,9 +1213,9 @@ pub(super) fn check_static_item(
}
// Ensure that the end result is `Sync` in a non-thread local `static`.
let should_check_for_sync = tcx.static_mutability(item_id.to_def_id())
== Some(hir::Mutability::Not)
let should_check_for_sync = should_check_for_sync
&& !is_foreign_item
&& tcx.static_mutability(item_id.to_def_id()) == Some(hir::Mutability::Not)
&& !tcx.is_thread_local_static(item_id.to_def_id());
if should_check_for_sync {
@@ -18,7 +18,7 @@ pub(super) fn check_unused_traits(tcx: TyCtxt<'_>, (): ()) {
used_trait_imports.extend_unord(imports.items().copied());
}
for &id in tcx.maybe_unused_trait_imports(()) {
for &id in tcx.resolutions(()).maybe_unused_trait_imports.iter() {
debug_assert_eq!(tcx.def_kind(id), DefKind::Use);
if tcx.visibility(id).is_public() {
continue;
@@ -14,6 +14,7 @@
use rustc_span::{DUMMY_SP, Ident, Span};
use super::{HirPlaceholderCollector, ItemCtxt, bad_placeholder};
use crate::check::wfcheck::check_static_item;
use crate::errors::TypeofReservedKeywordUsed;
use crate::hir_ty_lowering::HirTyLowerer;
@@ -217,7 +218,15 @@ pub(super) fn type_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::EarlyBinder<'_
"static variable",
)
} else {
icx.lower_ty(ty)
let ty = icx.lower_ty(ty);
// MIR relies on references to statics being scalars.
// Verify that here to avoid ill-formed MIR.
// We skip the `Sync` check to avoid cycles for type-alias-impl-trait,
// relying on the fact that non-Sync statics don't ICE the rest of the compiler.
match check_static_item(tcx, def_id, ty, /* should_check_for_sync */ false) {
Ok(()) => ty,
Err(guar) => Ty::new_error(tcx, guar),
}
}
}
ItemKind::Const(ident, _, ty, body_id) => {
@@ -275,7 +284,17 @@ pub(super) fn type_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::EarlyBinder<'_
let args = ty::GenericArgs::identity_for_item(tcx, def_id);
Ty::new_fn_def(tcx, def_id.to_def_id(), args)
}
ForeignItemKind::Static(t, _, _) => icx.lower_ty(t),
ForeignItemKind::Static(ty, _, _) => {
let ty = icx.lower_ty(ty);
// MIR relies on references to statics being scalars.
// Verify that here to avoid ill-formed MIR.
// We skip the `Sync` check to avoid cycles for type-alias-impl-trait,
// relying on the fact that non-Sync statics don't ICE the rest of the compiler.
match check_static_item(tcx, def_id, ty, /* should_check_for_sync */ false) {
Ok(()) => ty,
Err(guar) => Ty::new_error(tcx, guar),
}
}
ForeignItemKind::Type => Ty::new_foreign(tcx, def_id.to_def_id()),
},
@@ -447,17 +447,30 @@ fn maybe_suggest_impl_trait(&self, self_ty: &hir::Ty<'_>, diag: &mut Diag<'_>) -
fn maybe_suggest_assoc_ty_bound(&self, self_ty: &hir::Ty<'_>, diag: &mut Diag<'_>) {
let mut parents = self.tcx().hir_parent_iter(self_ty.hir_id);
if let Some((_, hir::Node::AssocItemConstraint(constraint))) = parents.next()
if let Some((c_hir_id, hir::Node::AssocItemConstraint(constraint))) = parents.next()
&& let Some(obj_ty) = constraint.ty()
&& let Some((_, hir::Node::TraitRef(trait_ref))) = parents.next()
{
if let Some((_, hir::Node::TraitRef(..))) = parents.next()
&& let Some((_, hir::Node::Ty(ty))) = parents.next()
if let Some((_, hir::Node::Ty(ty))) = parents.next()
&& let hir::TyKind::TraitObject(..) = ty.kind
{
// Assoc ty bounds aren't permitted inside trait object types.
return;
}
if trait_ref
.path
.segments
.iter()
.find_map(|seg| {
seg.args.filter(|args| args.constraints.iter().any(|c| c.hir_id == c_hir_id))
})
.is_none_or(|args| args.parenthesized != hir::GenericArgsParentheses::No)
{
// Only consider angle-bracketed args (where we have a `=` to replace with `:`).
return;
}
let lo = if constraint.gen_args.span_ext.is_dummy() {
constraint.ident.span
} else {
@@ -1302,8 +1302,10 @@ pub(crate) fn suggest_clone_for_ref(
None => ".clone()".to_string(),
};
let span = expr.span.find_oldest_ancestor_in_same_ctxt().shrink_to_hi();
diag.span_suggestion_verbose(
expr.span.shrink_to_hi(),
span,
"consider using clone here",
suggestion,
Applicability::MachineApplicable,
@@ -264,6 +264,7 @@ pub(crate) fn report_method_error(
err.span_label(within_macro_span, "due to this macro variable");
}
self.suggest_valid_traits(&mut err, item_name, out_of_scope_traits, true);
self.suggest_unwrapping_inner_self(&mut err, source, rcvr_ty, item_name);
err.emit()
}
+1 -1
View File
@@ -1047,7 +1047,7 @@ fn perform_2229_migration_analysis(
}
}
}
lint.note("for more information, see <https://doc.rust-lang.org/nightly/edition-guide/rust-2021/disjoint-capture-in-closures.html>");
lint.note("for more information, see <https://doc.rust-lang.org/edition-guide/rust-2021/disjoint-capture-in-closures.html>");
let diagnostic_msg = format!(
"add a dummy let to cause {migrated_variables_concat} to be fully captured"
+4
View File
@@ -208,6 +208,10 @@ fn configure_and_expand(
// Expand macros now!
let krate = sess.time("expand_crate", || ecx.monotonic_expander().expand_crate(krate));
if ecx.nb_macro_errors > 0 {
sess.dcx().abort_if_errors();
}
// The rest is error reporting and stats
sess.psess.buffered_lints.with_lock(|buffered_lints: &mut Vec<BufferedEarlyLint>| {
+5 -4
View File
@@ -13,10 +13,10 @@
CoverageOptions, DebugInfo, DumpMonoStatsFormat, ErrorOutputType, ExternEntry, ExternLocation,
Externs, FmtDebug, FunctionReturn, InliningThreshold, Input, InstrumentCoverage,
InstrumentXRay, LinkSelfContained, LinkerPluginLto, LocationDetail, LtoCli, MirIncludeSpans,
NextSolverConfig, OomStrategy, Options, OutFileName, OutputType, OutputTypes, PAuthKey, PacRet,
Passes, PatchableFunctionEntry, Polonius, ProcMacroExecutionStrategy, Strip, SwitchWithOptPath,
SymbolManglingVersion, WasiExecModel, build_configuration, build_session_options,
rustc_optgroups,
NextSolverConfig, Offload, OomStrategy, Options, OutFileName, OutputType, OutputTypes,
PAuthKey, PacRet, Passes, PatchableFunctionEntry, Polonius, ProcMacroExecutionStrategy, Strip,
SwitchWithOptPath, SymbolManglingVersion, WasiExecModel, build_configuration,
build_session_options, rustc_optgroups,
};
use rustc_session::lint::Level;
use rustc_session::search_paths::SearchPath;
@@ -833,6 +833,7 @@ macro_rules! tracked {
tracked!(no_profiler_runtime, true);
tracked!(no_trait_vptr, true);
tracked!(no_unique_section_names, true);
tracked!(offload, vec![Offload::Enable]);
tracked!(on_broken_pipe, OnBrokenPipe::Kill);
tracked!(oom, OomStrategy::Panic);
tracked!(osx_rpath_install_name, true);
+1 -1
View File
@@ -593,7 +593,7 @@ lint_non_camel_case_type = {$sort} `{$name}` should have an upper camel case nam
lint_non_fmt_panic = panic message is not a string literal
.note = this usage of `{$name}!()` is deprecated; it will be a hard error in Rust 2021
.more_info_note = for more information, see <https://doc.rust-lang.org/nightly/edition-guide/rust-2021/panic-macro-consistency.html>
.more_info_note = for more information, see <https://doc.rust-lang.org/edition-guide/rust-2021/panic-macro-consistency.html>
.supports_fmt_note = the `{$name}!()` macro supports formatting, so there's no need for the `format!()` macro here
.supports_fmt_suggestion = remove the `format!(..)` macro call
.display_suggestion = add a "{"{"}{"}"}" format string to `Display` the message
+12 -3
View File
@@ -1654,7 +1654,7 @@ fn check_expr(&mut self, cx: &EarlyContext<'_>, expr: &ast::Expr) {
"`...` range patterns are deprecated",
@future_incompatible = FutureIncompatibleInfo {
reason: FutureIncompatibilityReason::EditionError(Edition::Edition2021),
reference: "<https://doc.rust-lang.org/nightly/edition-guide/rust-2021/warnings-promoted-to-error.html>",
reference: "<https://doc.rust-lang.org/edition-guide/rust-2021/warnings-promoted-to-error.html>",
};
}
@@ -1835,7 +1835,7 @@ fn check_pat_post(&mut self, _cx: &EarlyContext<'_>, pat: &ast::Pat) {
"detects edition keywords being used as an identifier",
@future_incompatible = FutureIncompatibleInfo {
reason: FutureIncompatibilityReason::EditionError(Edition::Edition2024),
reference: "<https://doc.rust-lang.org/nightly/edition-guide/rust-2024/gen-keyword.html>",
reference: "<https://doc.rust-lang.org/edition-guide/rust-2024/gen-keyword.html>",
};
}
@@ -2870,7 +2870,7 @@ fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx hir::Expr<'tcx>) {
if let hir::Expr {
kind:
hir::ExprKind::InlineAsm(hir::InlineAsm {
asm_macro: AsmMacro::Asm | AsmMacro::NakedAsm,
asm_macro: asm_macro @ (AsmMacro::Asm | AsmMacro::NakedAsm),
template_strs,
options,
..
@@ -2878,6 +2878,15 @@ fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx hir::Expr<'tcx>) {
..
} = expr
{
// Non-generic naked functions are allowed to define arbitrary
// labels.
if *asm_macro == AsmMacro::NakedAsm {
let def_id = expr.hir_id.owner.def_id;
if !cx.tcx.generics_of(def_id).requires_monomorphization(cx.tcx) {
return;
}
}
// asm with `options(raw)` does not do replacement with `{` and `}`.
let raw = options.contains(InlineAsmOptions::RAW);
+1 -1
View File
@@ -87,7 +87,7 @@
rewriting in `match` is an option to preserve the semantics up to Edition 2021",
@future_incompatible = FutureIncompatibleInfo {
reason: FutureIncompatibilityReason::EditionSemanticsChange(Edition::Edition2024),
reference: "<https://doc.rust-lang.org/nightly/edition-guide/rust-2024/temporary-if-let-scope.html>",
reference: "<https://doc.rust-lang.org/edition-guide/rust-2024/temporary-if-let-scope.html>",
};
}
@@ -71,7 +71,7 @@
"`impl Trait` will capture more lifetimes than possibly intended in edition 2024",
@future_incompatible = FutureIncompatibleInfo {
reason: FutureIncompatibilityReason::EditionSemanticsChange(Edition::Edition2024),
reference: "<https://doc.rust-lang.org/nightly/edition-guide/rust-2024/rpit-lifetime-capture.html>",
reference: "<https://doc.rust-lang.org/edition-guide/rust-2024/rpit-lifetime-capture.html>",
};
}
+10 -1
View File
@@ -356,7 +356,16 @@ pub fn late_lint_mod<'tcx, T: LateLintPass<'tcx> + 'tcx>(
let store = unerased_lint_store(tcx.sess);
if store.late_module_passes.is_empty() {
late_lint_mod_inner(tcx, module_def_id, context, builtin_lints);
// If all builtin lints can be skipped, there is no point in running `late_lint_mod_inner`
// at all. This happens often for dependencies built with `--cap-lints=allow`.
let dont_need_to_run = tcx.lints_that_dont_need_to_run(());
let can_skip_lints = builtin_lints
.get_lints()
.iter()
.all(|lint| dont_need_to_run.contains(&LintId::of(lint)));
if !can_skip_lints {
late_lint_mod_inner(tcx, module_def_id, context, builtin_lints);
}
} else {
let builtin_lints = Box::new(builtin_lints) as Box<dyn LateLintPass<'tcx>>;
let mut binding = store

Some files were not shown because too many files have changed in this diff Show More