Merge from rustc

This commit is contained in:
The Miri Cronjob Bot
2024-06-27 05:01:59 +00:00
309 changed files with 5872 additions and 2695 deletions
+1
View File
@@ -0,0 +1 @@
BasedOnStyle: LLVM
+1
View File
@@ -29,6 +29,7 @@ Files: compiler/*
x
x.ps1
x.py
.clang-format
.editorconfig
.git-blame-ignore-revs
.gitattributes
+1
View File
@@ -5627,6 +5627,7 @@ dependencies = [
"regex",
"rustc-hash",
"semver",
"similar",
"termcolor",
"walkdir",
]
+2 -1
View File
@@ -2126,7 +2126,8 @@ pub struct BareFnTy {
pub ext: Extern,
pub generic_params: ThinVec<GenericParam>,
pub decl: P<FnDecl>,
/// Span of the `fn(...) -> ...` part.
/// Span of the `[unsafe] [extern] fn(...) -> ...` part, i.e. everything
/// after the generic params (if there are any, e.g. `for<'a>`).
pub decl_span: Span,
}
+25 -28
View File
@@ -233,8 +233,7 @@ pub fn can_continue_expr_unambiguously(&self) -> bool {
pub const PREC_RANGE: i8 = -10;
// The range 2..=14 is reserved for AssocOp binary operator precedences.
pub const PREC_PREFIX: i8 = 50;
pub const PREC_POSTFIX: i8 = 60;
pub const PREC_PAREN: i8 = 99;
pub const PREC_UNAMBIGUOUS: i8 = 60;
pub const PREC_FORCE_PAREN: i8 = 100;
#[derive(Debug, Clone, Copy)]
@@ -325,37 +324,35 @@ pub fn order(self) -> i8 {
| ExprPrecedence::Let
| ExprPrecedence::Unary => PREC_PREFIX,
// Unary, postfix
ExprPrecedence::Await
| ExprPrecedence::Call
| ExprPrecedence::MethodCall
| ExprPrecedence::Field
| ExprPrecedence::Index
| ExprPrecedence::Try
| ExprPrecedence::InlineAsm
| ExprPrecedence::Mac
| ExprPrecedence::FormatArgs
| ExprPrecedence::OffsetOf
| ExprPrecedence::PostfixMatch => PREC_POSTFIX,
// Never need parens
ExprPrecedence::Array
| ExprPrecedence::Repeat
| ExprPrecedence::Tup
| ExprPrecedence::Lit
| ExprPrecedence::Path
| ExprPrecedence::Paren
| ExprPrecedence::If
| ExprPrecedence::While
| ExprPrecedence::ForLoop
| ExprPrecedence::Loop
| ExprPrecedence::Match
| ExprPrecedence::ConstBlock
| ExprPrecedence::Await
| ExprPrecedence::Block
| ExprPrecedence::TryBlock
| ExprPrecedence::Call
| ExprPrecedence::ConstBlock
| ExprPrecedence::Field
| ExprPrecedence::ForLoop
| ExprPrecedence::FormatArgs
| ExprPrecedence::Gen
| ExprPrecedence::If
| ExprPrecedence::Index
| ExprPrecedence::InlineAsm
| ExprPrecedence::Lit
| ExprPrecedence::Loop
| ExprPrecedence::Mac
| ExprPrecedence::Match
| ExprPrecedence::MethodCall
| ExprPrecedence::OffsetOf
| ExprPrecedence::Paren
| ExprPrecedence::Path
| ExprPrecedence::PostfixMatch
| ExprPrecedence::Repeat
| ExprPrecedence::Struct
| ExprPrecedence::Err => PREC_PAREN,
| ExprPrecedence::Try
| ExprPrecedence::TryBlock
| ExprPrecedence::Tup
| ExprPrecedence::While
| ExprPrecedence::Err => PREC_UNAMBIGUOUS,
}
}
}
+3
View File
@@ -130,6 +130,9 @@ ast_lowering_never_pattern_with_guard =
ast_lowering_no_precise_captures_on_apit = `use<...>` precise capturing syntax not allowed in argument-position `impl Trait`
ast_lowering_no_precise_captures_on_rpitit = `use<...>` precise capturing syntax is currently not allowed in return-position `impl Trait` in traits
.note = currently, return-position `impl Trait` in traits and trait implementations capture all lifetimes in scope
ast_lowering_previously_used_here = previously used here
ast_lowering_register1 = register `{$reg1_name}`
+49 -58
View File
@@ -66,14 +66,18 @@ pub(crate) fn delegation_has_self(&self, item_id: NodeId, path_id: NodeId, span:
let Ok(sig_id) = sig_id else {
return false;
};
if let Some(local_sig_id) = sig_id.as_local() {
self.has_self(sig_id, span)
}
fn has_self(&self, def_id: DefId, span: Span) -> bool {
if let Some(local_sig_id) = def_id.as_local() {
// The value may be missing due to recursive delegation.
// Error will be emmited later during HIR ty lowering.
self.resolver.delegation_fn_sigs.get(&local_sig_id).map_or(false, |sig| sig.has_self)
} else {
match self.tcx.def_kind(sig_id) {
match self.tcx.def_kind(def_id) {
DefKind::Fn => false,
DefKind::AssocFn => self.tcx.associated_item(sig_id).fn_has_self_parameter,
DefKind::AssocFn => self.tcx.associated_item(def_id).fn_has_self_parameter,
_ => span_bug!(span, "unexpected DefKind for delegation item"),
}
}
@@ -107,12 +111,17 @@ fn get_delegation_sig_id(
span: Span,
) -> Result<DefId, ErrorGuaranteed> {
let sig_id = if self.is_in_trait_impl { item_id } else { path_id };
let sig_id =
self.resolver.get_partial_res(sig_id).and_then(|r| r.expect_full_res().opt_def_id());
sig_id.ok_or_else(|| {
self.tcx
.dcx()
.span_delayed_bug(span, "LoweringContext: couldn't resolve delegation item")
self.get_resolution_id(sig_id, span)
}
fn get_resolution_id(&self, node_id: NodeId, span: Span) -> Result<DefId, ErrorGuaranteed> {
let def_id =
self.resolver.get_partial_res(node_id).and_then(|r| r.expect_full_res().opt_def_id());
def_id.ok_or_else(|| {
self.tcx.dcx().span_delayed_bug(
span,
format!("LoweringContext: couldn't resolve node {:?} in delegation item", node_id),
)
})
}
@@ -122,7 +131,7 @@ fn lower_delegation_generics(&mut self, span: Span) -> &'hir hir::Generics<'hir>
predicates: &[],
has_where_clause_predicates: false,
where_clause_span: span,
span: span,
span,
})
}
@@ -222,12 +231,7 @@ fn generate_arg(&mut self, param_id: HirId, span: Span) -> hir::Expr<'hir> {
}));
let path = self.arena.alloc(hir::Path { span, res: Res::Local(param_id), segments });
hir::Expr {
hir_id: self.next_id(),
kind: hir::ExprKind::Path(hir::QPath::Resolved(None, path)),
span,
}
self.mk_expr(hir::ExprKind::Path(hir::QPath::Resolved(None, path)), span)
}
fn lower_delegation_body(
@@ -236,19 +240,11 @@ fn lower_delegation_body(
param_count: usize,
span: Span,
) -> BodyId {
let path = self.lower_qpath(
delegation.id,
&delegation.qself,
&delegation.path,
ParamMode::Optional,
ImplTraitContext::Disallowed(ImplTraitPosition::Path),
None,
);
let block = delegation.body.as_deref();
self.lower_body(|this| {
let mut parameters: Vec<hir::Param<'_>> = Vec::new();
let mut args: Vec<hir::Expr<'hir>> = Vec::new();
let mut parameters: Vec<hir::Param<'_>> = Vec::with_capacity(param_count);
let mut args: Vec<hir::Expr<'_>> = Vec::with_capacity(param_count);
for idx in 0..param_count {
let (param, pat_node_id) = this.generate_param(span);
@@ -264,11 +260,7 @@ fn lower_delegation_body(
};
self_resolver.visit_block(block);
let block = this.lower_block(block, false);
hir::Expr {
hir_id: this.next_id(),
kind: hir::ExprKind::Block(block, None),
span: block.span,
}
this.mk_expr(hir::ExprKind::Block(block, None), block.span)
} else {
let pat_hir_id = this.lower_node_id(pat_node_id);
this.generate_arg(pat_hir_id, span)
@@ -276,43 +268,41 @@ fn lower_delegation_body(
args.push(arg);
}
let args = self.arena.alloc_from_iter(args);
let final_expr = this.generate_call(path, args);
let final_expr = this.finalize_body_lowering(delegation, args, span);
(this.arena.alloc_from_iter(parameters), final_expr)
})
}
fn generate_call(
// Generates fully qualified call for the resulting body.
fn finalize_body_lowering(
&mut self,
path: hir::QPath<'hir>,
args: &'hir [hir::Expr<'hir>],
delegation: &Delegation,
args: Vec<hir::Expr<'hir>>,
span: Span,
) -> hir::Expr<'hir> {
let callee = self.arena.alloc(hir::Expr {
hir_id: self.next_id(),
kind: hir::ExprKind::Path(path),
span: path.span(),
});
let path = self.lower_qpath(
delegation.id,
&delegation.qself,
&delegation.path,
ParamMode::Optional,
ImplTraitContext::Disallowed(ImplTraitPosition::Path),
None,
);
let expr = self.arena.alloc(hir::Expr {
hir_id: self.next_id(),
kind: hir::ExprKind::Call(callee, args),
span: path.span(),
});
let args = self.arena.alloc_from_iter(args);
let path_expr = self.arena.alloc(self.mk_expr(hir::ExprKind::Path(path), span));
let call = self.arena.alloc(self.mk_expr(hir::ExprKind::Call(path_expr, args), span));
let block = self.arena.alloc(hir::Block {
stmts: &[],
expr: Some(expr),
expr: Some(call),
hir_id: self.next_id(),
rules: hir::BlockCheckMode::DefaultBlock,
span: path.span(),
span,
targeted_by_break: false,
});
hir::Expr {
hir_id: self.next_id(),
kind: hir::ExprKind::Block(block, None),
span: path.span(),
}
self.mk_expr(hir::ExprKind::Block(block, None), span)
}
fn generate_delegation_error(
@@ -333,11 +323,7 @@ fn generate_delegation_error(
let header = self.generate_header_error();
let sig = hir::FnSig { decl, header, span };
let body_id = self.lower_body(|this| {
let expr =
hir::Expr { hir_id: this.next_id(), kind: hir::ExprKind::Err(err), span: span };
(&[], expr)
});
let body_id = self.lower_body(|this| (&[], this.mk_expr(hir::ExprKind::Err(err), span)));
DelegationResults { generics, body_id, sig }
}
@@ -349,6 +335,11 @@ fn generate_header_error(&self) -> hir::FnHeader {
abi: abi::Abi::Rust,
}
}
#[inline]
fn mk_expr(&mut self, kind: hir::ExprKind<'hir>, span: Span) -> hir::Expr<'hir> {
hir::Expr { hir_id: self.next_id(), kind, span }
}
}
struct SelfResolver<'a> {
@@ -424,6 +424,14 @@ pub(crate) struct NoPreciseCapturesOnApit {
pub span: Span,
}
#[derive(Diagnostic)]
#[diag(ast_lowering_no_precise_captures_on_rpitit)]
#[note]
pub(crate) struct NoPreciseCapturesOnRpitit {
#[primary_span]
pub span: Span,
}
#[derive(Diagnostic)]
#[diag(ast_lowering_yield_in_closure)]
pub(crate) struct YieldInClosure {
+20
View File
@@ -1594,6 +1594,26 @@ fn lower_opaque_impl_trait(
};
debug!(?captured_lifetimes_to_duplicate);
match fn_kind {
// Deny `use<>` on RPITIT in trait/trait-impl for now.
Some(FnDeclKind::Trait | FnDeclKind::Impl) => {
if let Some(span) = bounds.iter().find_map(|bound| match *bound {
ast::GenericBound::Use(_, span) => Some(span),
_ => None,
}) {
self.tcx.dcx().emit_err(errors::NoPreciseCapturesOnRpitit { span });
}
}
None
| Some(
FnDeclKind::Fn
| FnDeclKind::Inherent
| FnDeclKind::ExternFn
| FnDeclKind::Closure
| FnDeclKind::Pointer,
) => {}
}
self.lower_opaque_inner(
opaque_ty_node_id,
origin,
@@ -464,7 +464,7 @@ fn check_item_safety(&self, span: Span, safety: Safety) {
{
self.dcx().emit_err(errors::InvalidSafetyOnExtern {
item_span: span,
block: self.current_extern_span(),
block: self.current_extern_span().shrink_to_lo(),
});
}
}
+1 -1
View File
@@ -221,7 +221,7 @@ pub enum ExternBlockSuggestion {
pub struct InvalidSafetyOnExtern {
#[primary_span]
pub item_span: Span,
#[suggestion(code = "", applicability = "maybe-incorrect")]
#[suggestion(code = "unsafe ", applicability = "machine-applicable", style = "verbose")]
pub block: Span,
}
@@ -217,7 +217,7 @@ fn print_expr_tup(&mut self, exprs: &[P<ast::Expr>]) {
fn print_expr_call(&mut self, func: &ast::Expr, args: &[P<ast::Expr>], fixup: FixupContext) {
let prec = match func.kind {
ast::ExprKind::Field(..) => parser::PREC_FORCE_PAREN,
_ => parser::PREC_POSTFIX,
_ => parser::PREC_UNAMBIGUOUS,
};
// Independent of parenthesization related to precedence, we must
@@ -257,7 +257,7 @@ fn print_expr_method_call(
// boundaries, `$receiver.method()` can be parsed back as a statement
// containing an expression if and only if `$receiver` can be parsed as
// a statement containing an expression.
self.print_expr_maybe_paren(receiver, parser::PREC_POSTFIX, fixup);
self.print_expr_maybe_paren(receiver, parser::PREC_UNAMBIGUOUS, fixup);
self.word(".");
self.print_ident(segment.ident);
@@ -489,7 +489,7 @@ pub(super) fn print_expr_outer_attr_style(
self.space();
}
MatchKind::Postfix => {
self.print_expr_maybe_paren(expr, parser::PREC_POSTFIX, fixup);
self.print_expr_maybe_paren(expr, parser::PREC_UNAMBIGUOUS, fixup);
self.word_nbsp(".match");
}
}
@@ -549,7 +549,7 @@ pub(super) fn print_expr_outer_attr_style(
self.print_block_with_attrs(blk, attrs);
}
ast::ExprKind::Await(expr, _) => {
self.print_expr_maybe_paren(expr, parser::PREC_POSTFIX, fixup);
self.print_expr_maybe_paren(expr, parser::PREC_UNAMBIGUOUS, fixup);
self.word(".await");
}
ast::ExprKind::Assign(lhs, rhs, _) => {
@@ -568,14 +568,14 @@ pub(super) fn print_expr_outer_attr_style(
self.print_expr_maybe_paren(rhs, prec, fixup.subsequent_subexpression());
}
ast::ExprKind::Field(expr, ident) => {
self.print_expr_maybe_paren(expr, parser::PREC_POSTFIX, fixup);
self.print_expr_maybe_paren(expr, parser::PREC_UNAMBIGUOUS, fixup);
self.word(".");
self.print_ident(*ident);
}
ast::ExprKind::Index(expr, index, _) => {
self.print_expr_maybe_paren(
expr,
parser::PREC_POSTFIX,
parser::PREC_UNAMBIGUOUS,
fixup.leftmost_subexpression(),
);
self.word("[");
@@ -713,7 +713,7 @@ pub(super) fn print_expr_outer_attr_style(
}
}
ast::ExprKind::Try(e) => {
self.print_expr_maybe_paren(e, parser::PREC_POSTFIX, fixup);
self.print_expr_maybe_paren(e, parser::PREC_UNAMBIGUOUS, fixup);
self.word("?")
}
ast::ExprKind::TryBlock(blk) => {
@@ -1151,7 +1151,9 @@ fn suggest_deref_closure_return(&self, diag: &mut Diag<'_>) {
// Get the arguments for the found method, only specifying that `Self` is the receiver type.
let Some(possible_rcvr_ty) = typeck_results.node_type_opt(rcvr.hir_id) else { return };
let args = GenericArgs::for_item(tcx, method_def_id, |param, _| {
if param.index == 0 {
if let ty::GenericParamDefKind::Lifetime = param.kind {
tcx.lifetimes.re_erased.into()
} else if param.index == 0 && param.name == kw::SelfUpper {
possible_rcvr_ty.into()
} else if param.index == closure_param.index {
closure_ty.into()
@@ -1168,7 +1170,7 @@ fn suggest_deref_closure_return(&self, diag: &mut Diag<'_>) {
Obligation::misc(tcx, span, self.mir_def_id(), self.param_env, pred)
}));
if ocx.select_all_or_error().is_empty() {
if ocx.select_all_or_error().is_empty() && count > 0 {
diag.span_suggestion_verbose(
tcx.hir().body(*body).value.peel_blocks().span.shrink_to_lo(),
"dereference the return value",
+1 -1
View File
@@ -5,6 +5,7 @@
#![allow(internal_features)]
#![allow(rustc::diagnostic_outside_of_impl)]
#![allow(rustc::untranslatable_diagnostic)]
#![cfg_attr(bootstrap, feature(lint_reasons))]
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
#![doc(rust_logo)]
#![feature(assert_matches)]
@@ -12,7 +13,6 @@
#![feature(decl_macro)]
#![feature(if_let_guard)]
#![feature(let_chains)]
#![feature(lint_reasons)]
#![feature(proc_macro_internals)]
#![feature(proc_macro_quote)]
#![feature(rustdoc_internals)]
+2 -5
View File
@@ -1490,11 +1490,6 @@ fn print_native_static_libs(
let mut lib_args: Vec<_> = all_native_libs
.iter()
.filter(|l| relevant_lib(sess, l))
// Deduplication of successive repeated libraries, see rust-lang/rust#113209
//
// note: we don't use PartialEq/Eq because NativeLib transitively depends on local
// elements like spans, which we don't care about and would make the deduplication impossible
.dedup_by(|l1, l2| l1.name == l2.name && l1.kind == l2.kind && l1.verbatim == l2.verbatim)
.filter_map(|lib| {
let name = lib.name;
match lib.kind {
@@ -1521,6 +1516,8 @@ fn print_native_static_libs(
| NativeLibKind::RawDylib => None,
}
})
// deduplication of consecutive repeated libraries, see rust-lang/rust#113209
.dedup()
.collect();
for path in all_rust_dylibs {
// FIXME deduplicate with add_dynamic_crate
+1 -1
View File
@@ -10,6 +10,7 @@
#![allow(internal_features)]
#![allow(rustc::default_hash_types)]
#![allow(rustc::potential_query_instability)]
#![cfg_attr(bootstrap, feature(lint_reasons))]
#![cfg_attr(not(parallel_compiler), feature(cell_leak))]
#![deny(unsafe_op_in_unsafe_fn)]
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
@@ -24,7 +25,6 @@
#![feature(extend_one)]
#![feature(hash_raw_entry)]
#![feature(hasher_prefixfree_extras)]
#![feature(lint_reasons)]
#![feature(macro_metavar_expr)]
#![feature(map_try_insert)]
#![feature(min_specialization)]
+6 -1
View File
@@ -135,7 +135,12 @@ fn emit_future_breakage_report(&mut self, diags: Vec<crate::DiagInner>) {
let data: Vec<FutureBreakageItem<'_>> = diags
.into_iter()
.map(|mut diag| {
if diag.level == crate::Level::Allow {
// Allowed or expected lints don't normally (by definition) emit a lint
// but future incompat lints are special and are emitted anyway.
//
// So to avoid ICEs and confused users we "upgrade" the lint level for
// those `FutureBreakageItem` to warn.
if matches!(diag.level, crate::Level::Allow | crate::Level::Expect(..)) {
diag.level = crate::Level::Warning;
}
FutureBreakageItem {
+2
View File
@@ -232,6 +232,8 @@ macro_rules! declare_features {
(accepted, label_break_value, "1.65.0", Some(48594)),
/// Allows `let...else` statements.
(accepted, let_else, "1.65.0", Some(87335)),
/// Allows using `reason` in lint attributes and the `#[expect(lint)]` lint check.
(accepted, lint_reasons, "CURRENT_RUSTC_VERSION", Some(54503)),
/// Allows `break {expr}` with a value inside `loop`s.
(accepted, loop_break_value, "1.19.0", Some(37339)),
/// Allows use of `?` as the Kleene "at most one" operator in macros.
+3 -3
View File
@@ -369,9 +369,9 @@ pub struct BuiltinAttribute {
allow, Normal, template!(List: r#"lint1, lint2, ..., /*opt*/ reason = "...""#),
DuplicatesOk, EncodeCrossCrate::No,
),
gated!(
expect, Normal, template!(List: r#"lint1, lint2, ..., /*opt*/ reason = "...""#), DuplicatesOk,
EncodeCrossCrate::No, lint_reasons, experimental!(expect)
ungated!(
expect, Normal, template!(List: r#"lint1, lint2, ..., /*opt*/ reason = "...""#),
DuplicatesOk, EncodeCrossCrate::No,
),
ungated!(
forbid, Normal, template!(List: r#"lint1, lint2, ..., /*opt*/ reason = "...""#),
-2
View File
@@ -512,8 +512,6 @@ pub fn internal(&self, feature: Symbol) -> bool {
/// Allows using `#[link(kind = "link-arg", name = "...")]`
/// to pass custom arguments to the linker.
(unstable, link_arg_attribute, "1.76.0", Some(99427)),
/// Allows using `reason` in lint attributes and the `#[expect(lint)]` lint check.
(unstable, lint_reasons, "1.31.0", Some(54503)),
/// Give access to additional metadata about declarative macro meta-variables.
(unstable, macro_metavar_expr, "1.61.0", Some(83527)),
/// Provides a way to concatenate identifiers using metavariable expressions.
@@ -171,10 +171,10 @@ pub(super) fn check_refining_return_position_impl_trait_in_trait<'tcx>(
}
// Resolve any lifetime variables that may have been introduced during normalization.
let Ok((trait_bounds, impl_bounds)) = infcx.fully_resolve((trait_bounds, impl_bounds)) else {
// This code path is not reached in any tests, but may be reachable. If
// this is triggered, it should be converted to `delayed_bug` and the
// triggering case turned into a test.
tcx.dcx().bug("encountered errors when checking RPITIT refinement (resolution)");
// If resolution didn't fully complete, we cannot continue checking RPITIT refinement, and
// delay a bug as the original code contains load-bearing errors.
tcx.dcx().delayed_bug("encountered errors when checking RPITIT refinement (resolution)");
return;
};
// For quicker lookup, use an `IndexSet` (we don't use one earlier because
+12 -5
View File
@@ -211,11 +211,18 @@ fn missing_items_err(
.collect::<Vec<_>>()
.join("`, `");
// `Span` before impl block closing brace.
let hi = full_impl_span.hi() - BytePos(1);
// Point at the place right before the closing brace of the relevant `impl` to suggest
// adding the associated item at the end of its body.
let sugg_sp = full_impl_span.with_lo(hi).with_hi(hi);
let sugg_sp = if let Ok(snippet) = tcx.sess.source_map().span_to_snippet(full_impl_span)
&& snippet.ends_with("}")
{
// `Span` before impl block closing brace.
let hi = full_impl_span.hi() - BytePos(1);
// Point at the place right before the closing brace of the relevant `impl` to suggest
// adding the associated item at the end of its body.
full_impl_span.with_lo(hi).with_hi(hi)
} else {
full_impl_span.shrink_to_hi()
};
// Obtain the level of indentation ending in `sugg_sp`.
let padding =
tcx.sess.source_map().indentation_before(sugg_sp).unwrap_or_else(|| String::new());
+4 -4
View File
@@ -1120,7 +1120,7 @@ fn print_expr_tup(&mut self, exprs: &[hir::Expr<'_>]) {
fn print_expr_call(&mut self, func: &hir::Expr<'_>, args: &[hir::Expr<'_>]) {
let prec = match func.kind {
hir::ExprKind::Field(..) => parser::PREC_FORCE_PAREN,
_ => parser::PREC_POSTFIX,
_ => parser::PREC_UNAMBIGUOUS,
};
self.print_expr_maybe_paren(func, prec);
@@ -1134,7 +1134,7 @@ fn print_expr_method_call(
args: &[hir::Expr<'_>],
) {
let base_args = args;
self.print_expr_maybe_paren(receiver, parser::PREC_POSTFIX);
self.print_expr_maybe_paren(receiver, parser::PREC_UNAMBIGUOUS);
self.word(".");
self.print_ident(segment.ident);
@@ -1478,12 +1478,12 @@ fn print_expr(&mut self, expr: &hir::Expr<'_>) {
self.print_expr_maybe_paren(rhs, prec);
}
hir::ExprKind::Field(expr, ident) => {
self.print_expr_maybe_paren(expr, parser::PREC_POSTFIX);
self.print_expr_maybe_paren(expr, parser::PREC_UNAMBIGUOUS);
self.word(".");
self.print_ident(ident);
}
hir::ExprKind::Index(expr, index, _) => {
self.print_expr_maybe_paren(expr, parser::PREC_POSTFIX);
self.print_expr_maybe_paren(expr, parser::PREC_UNAMBIGUOUS);
self.word("[");
self.print_expr(index);
self.word("]");
+2 -2
View File
@@ -3,7 +3,7 @@
use super::{Expectation, FnCtxt, TupleArgumentsFlag};
use crate::errors;
use rustc_ast::util::parser::PREC_POSTFIX;
use rustc_ast::util::parser::PREC_UNAMBIGUOUS;
use rustc_errors::{Applicability, Diag, ErrorGuaranteed, StashKey};
use rustc_hir::def::{self, CtorKind, Namespace, Res};
use rustc_hir::def_id::DefId;
@@ -656,7 +656,7 @@ fn suggest_call_as_method(
};
if let Ok(rest_snippet) = rest_snippet {
let sugg = if callee_expr.precedence().order() >= PREC_POSTFIX {
let sugg = if callee_expr.precedence().order() >= PREC_UNAMBIGUOUS {
vec![
(up_to_rcvr_span, "".to_string()),
(rest_span, format!(".{}({rest_snippet}", segment.ident)),
+1 -1
View File
@@ -946,7 +946,7 @@ fn cenum_impl_drop_lint(&self, fcx: &FnCtxt<'a, 'tcx>) {
fn lossy_provenance_ptr2int_lint(&self, fcx: &FnCtxt<'a, 'tcx>, t_c: ty::cast::IntTy) {
let expr_prec = self.expr.precedence().order();
let needs_parens = expr_prec < rustc_ast::util::parser::PREC_POSTFIX;
let needs_parens = expr_prec < rustc_ast::util::parser::PREC_UNAMBIGUOUS;
let needs_cast = !matches!(t_c, ty::cast::IntTy::U(ty::UintTy::Usize));
let cast_span = self.expr_span.shrink_to_hi().to(self.cast_span);
@@ -9,7 +9,7 @@
use core::cmp::min;
use core::iter;
use hir::def_id::LocalDefId;
use rustc_ast::util::parser::{ExprPrecedence, PREC_POSTFIX};
use rustc_ast::util::parser::{ExprPrecedence, PREC_UNAMBIGUOUS};
use rustc_data_structures::packed::Pu128;
use rustc_errors::{Applicability, Diag, MultiSpan};
use rustc_hir as hir;
@@ -1329,7 +1329,7 @@ pub(crate) fn suggest_into(
{
let span = expr.span.find_oldest_ancestor_in_same_ctxt();
let mut sugg = if expr.precedence().order() >= PREC_POSTFIX {
let mut sugg = if expr.precedence().order() >= PREC_UNAMBIGUOUS {
vec![(span.shrink_to_hi(), ".into()".to_owned())]
} else {
vec![
@@ -2868,7 +2868,7 @@ pub(crate) fn suggest_cast(
"change the type of the numeric literal from `{checked_ty}` to `{expected_ty}`",
);
let close_paren = if expr.precedence().order() < PREC_POSTFIX {
let close_paren = if expr.precedence().order() < PREC_UNAMBIGUOUS {
sugg.push((expr.span.shrink_to_lo(), "(".to_string()));
")"
} else {
@@ -2893,7 +2893,7 @@ pub(crate) fn suggest_cast(
let len = src.trim_end_matches(&checked_ty.to_string()).len();
expr.span.with_lo(expr.span.lo() + BytePos(len as u32))
},
if expr.precedence().order() < PREC_POSTFIX {
if expr.precedence().order() < PREC_UNAMBIGUOUS {
// Readd `)`
format!("{expected_ty})")
} else {
@@ -209,8 +209,10 @@ pub(super) fn suggest_await_on_expect_found(
}
(Some(ty), _) if self.same_type_modulo_infer(ty, exp_found.found) => match cause.code()
{
ObligationCauseCode::Pattern { span: Some(then_span), .. } => {
Some(ConsiderAddingAwait::FutureSugg { span: then_span.shrink_to_hi() })
ObligationCauseCode::Pattern { span: Some(then_span), origin_expr, .. } => {
origin_expr.then_some(ConsiderAddingAwait::FutureSugg {
span: then_span.shrink_to_hi(),
})
}
ObligationCauseCode::IfExpression(box IfExpressionCause { then_id, .. }) => {
let then_span = self.find_block_span_from_hir_id(*then_id);
-5
View File
@@ -3,7 +3,6 @@
use rustc_middle::ty::TyCtxt;
use rustc_session::lint::builtin::UNFULFILLED_LINT_EXPECTATIONS;
use rustc_session::lint::LintExpectationId;
use rustc_span::symbol::sym;
use rustc_span::Symbol;
pub(crate) fn provide(providers: &mut Providers) {
@@ -11,10 +10,6 @@ pub(crate) fn provide(providers: &mut Providers) {
}
fn check_expectations(tcx: TyCtxt<'_>, tool_filter: Option<Symbol>) {
if !tcx.features().active(sym::lint_reasons) {
return;
}
let lint_expectations = tcx.lint_expectations(());
let fulfilled_expectations = tcx.dcx().steal_fulfilled_expectation_ids();
-10
View File
@@ -37,7 +37,6 @@
},
Level, Lint, LintExpectationId, LintId,
};
use rustc_session::parse::feature_err;
use rustc_session::Session;
use rustc_span::symbol::{sym, Symbol};
use rustc_span::{Span, DUMMY_SP};
@@ -788,15 +787,6 @@ fn add(&mut self, attrs: &[ast::Attribute], is_crate_node: bool, source_hir_id:
ast::MetaItemKind::NameValue(ref name_value) => {
if item.path == sym::reason {
if let ast::LitKind::Str(rationale, _) = name_value.kind {
if !self.features.lint_reasons {
feature_err(
&self.sess,
sym::lint_reasons,
item.span,
"lint reasons are experimental",
)
.emit();
}
reason = Some(rationale);
} else {
sess.dcx().emit_err(MalformedAttribute {
+8 -18
View File
@@ -608,13 +608,13 @@
}
declare_lint! {
/// The `unfulfilled_lint_expectations` lint detects lint trigger expectations
/// that have not been fulfilled.
/// The `unfulfilled_lint_expectations` lint detects when a lint expectation is
/// unfulfilled.
///
/// ### Example
///
/// ```rust
/// #![feature(lint_reasons)]
/// #![cfg_attr(bootstrap, feature(lint_reasons))]
///
/// #[expect(unused_variables)]
/// let x = 10;
@@ -625,24 +625,14 @@
///
/// ### Explanation
///
/// It was expected that the marked code would emit a lint. This expectation
/// has not been fulfilled.
/// The `#[expect]` attribute can be used to create a lint expectation. The
/// expectation is fulfilled, if a `#[warn]` attribute at the same location
/// would result in a lint emission. If the expectation is unfulfilled,
/// because no lint was emitted, this lint will be emitted on the attribute.
///
/// The `expect` attribute can be removed if this is intended behavior otherwise
/// it should be investigated why the expected lint is no longer issued.
///
/// In rare cases, the expectation might be emitted at a different location than
/// shown in the shown code snippet. In most cases, the `#[expect]` attribute
/// works when added to the outer scope. A few lints can only be expected
/// on a crate level.
///
/// Part of RFC 2383. The progress is being tracked in [#54503]
///
/// [#54503]: https://github.com/rust-lang/rust/issues/54503
pub UNFULFILLED_LINT_EXPECTATIONS,
Warn,
"unfulfilled lint expectation",
@feature_gate = rustc_span::sym::lint_reasons;
"unfulfilled lint expectation"
}
declare_lint! {
@@ -13,10 +13,7 @@ struct RustArchiveMember {
Archive::Child Child;
RustArchiveMember()
: Filename(nullptr), Name(nullptr),
Child(nullptr, nullptr, nullptr)
{
}
: Filename(nullptr), Name(nullptr), Child(nullptr, nullptr, nullptr) {}
~RustArchiveMember() {}
};
@@ -27,11 +24,8 @@ struct RustArchiveIterator {
std::unique_ptr<Error> Err;
RustArchiveIterator(Archive::child_iterator Cur, Archive::child_iterator End,
std::unique_ptr<Error> Err)
: First(true),
Cur(Cur),
End(End),
Err(std::move(Err)) {}
std::unique_ptr<Error> Err)
: First(true), Cur(Cur), End(End), Err(std::move(Err)) {}
};
enum class LLVMRustArchiveKind {
@@ -66,8 +60,8 @@ typedef Archive::Child const *LLVMRustArchiveChildConstRef;
typedef RustArchiveIterator *LLVMRustArchiveIteratorRef;
extern "C" LLVMRustArchiveRef LLVMRustOpenArchive(char *Path) {
ErrorOr<std::unique_ptr<MemoryBuffer>> BufOr =
MemoryBuffer::getFile(Path, /*IsText*/false, /*RequiresNullTerminator=*/false);
ErrorOr<std::unique_ptr<MemoryBuffer>> BufOr = MemoryBuffer::getFile(
Path, /*IsText*/ false, /*RequiresNullTerminator=*/false);
if (!BufOr) {
LLVMRustSetLastError(BufOr.getError().message().c_str());
return nullptr;
@@ -146,8 +140,8 @@ extern "C" const char *
LLVMRustArchiveChildName(LLVMRustArchiveChildConstRef Child, size_t *Size) {
Expected<StringRef> NameOrErr = Child->getName();
if (!NameOrErr) {
// rustc_codegen_llvm currently doesn't use this error string, but it might be
// useful in the future, and in the meantime this tells LLVM that the
// rustc_codegen_llvm currently doesn't use this error string, but it might
// be useful in the future, and in the meantime this tells LLVM that the
// error was not ignored and that it shouldn't abort the process.
LLVMRustSetLastError(toString(NameOrErr.takeError()).c_str());
return nullptr;
@@ -172,10 +166,9 @@ extern "C" void LLVMRustArchiveMemberFree(LLVMRustArchiveMemberRef Member) {
delete Member;
}
extern "C" LLVMRustResult
LLVMRustWriteArchive(char *Dst, size_t NumMembers,
const LLVMRustArchiveMemberRef *NewMembers,
bool WriteSymbtab, LLVMRustArchiveKind RustKind, bool isEC) {
extern "C" LLVMRustResult LLVMRustWriteArchive(
char *Dst, size_t NumMembers, const LLVMRustArchiveMemberRef *NewMembers,
bool WriteSymbtab, LLVMRustArchiveKind RustKind, bool isEC) {
std::vector<NewArchiveMember> Members;
auto Kind = fromRust(RustKind);
@@ -206,8 +199,10 @@ LLVMRustWriteArchive(char *Dst, size_t NumMembers,
#if LLVM_VERSION_LT(18, 0)
auto Result = writeArchive(Dst, Members, WriteSymbtab, Kind, true, false);
#else
auto SymtabMode = WriteSymbtab ? SymtabWritingMode::NormalSymtab : SymtabWritingMode::NoSymtab;
auto Result = writeArchive(Dst, Members, SymtabMode, Kind, true, false, nullptr, isEC);
auto SymtabMode = WriteSymbtab ? SymtabWritingMode::NormalSymtab
: SymtabWritingMode::NoSymtab;
auto Result =
writeArchive(Dst, Members, SymtabMode, Kind, true, false, nullptr, isEC);
#endif
if (!Result)
return LLVMRustResult::Success;
+5 -13
View File
@@ -1,5 +1,5 @@
#include "SuppressLLVMWarnings.h"
#include "llvm/Linker/Linker.h"
#include "SuppressLLVMWarnings.h"
#include "LLVMWrapper.h"
@@ -9,26 +9,18 @@ struct RustLinker {
Linker L;
LLVMContext &Ctx;
RustLinker(Module &M) :
L(M),
Ctx(M.getContext())
{}
RustLinker(Module &M) : L(M), Ctx(M.getContext()) {}
};
extern "C" RustLinker*
LLVMRustLinkerNew(LLVMModuleRef DstRef) {
extern "C" RustLinker *LLVMRustLinkerNew(LLVMModuleRef DstRef) {
Module *Dst = unwrap(DstRef);
return new RustLinker(*Dst);
}
extern "C" void
LLVMRustLinkerFree(RustLinker *L) {
delete L;
}
extern "C" void LLVMRustLinkerFree(RustLinker *L) { delete L; }
extern "C" bool
LLVMRustLinkerAdd(RustLinker *L, char *BC, size_t Len) {
extern "C" bool LLVMRustLinkerAdd(RustLinker *L, char *BC, size_t Len) {
std::unique_ptr<MemoryBuffer> Buf =
MemoryBuffer::getMemBufferCopy(StringRef(BC, Len));
+228 -244
View File
@@ -2,23 +2,25 @@
#include <cstddef>
#include <iomanip>
#include <vector>
#include <set>
#include <vector>
#include "LLVMWrapper.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Bitcode/BitcodeWriter.h"
#include "llvm/CodeGen/CommandFlags.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/IR/AutoUpgrade.h"
#include "llvm/IR/AssemblyAnnotationWriter.h"
#include "llvm/IR/AutoUpgrade.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Verifier.h"
#include "llvm/LTO/LTO.h"
#include "llvm/MC/TargetRegistry.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Object/IRObjectFile.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Passes/PassBuilder.h"
#include "llvm/Passes/PassPlugin.h"
#include "llvm/Passes/StandardInstrumentations.h"
@@ -33,26 +35,24 @@
#include "llvm/Transforms/IPO/ThinLTOBitcodeWriter.h"
#include "llvm/Transforms/Utils/AddDiscriminators.h"
#include "llvm/Transforms/Utils/FunctionImportUtils.h"
#include "llvm/LTO/LTO.h"
#include "llvm/Bitcode/BitcodeWriter.h"
#if LLVM_VERSION_GE(18, 0)
#include "llvm/TargetParser/Host.h"
#endif
#include "llvm/Support/TimeProfiler.h"
#include "llvm/Transforms/Instrumentation.h"
#include "llvm/Transforms/Instrumentation/AddressSanitizer.h"
#include "llvm/Transforms/Instrumentation/DataFlowSanitizer.h"
#include "llvm/Support/TimeProfiler.h"
#if LLVM_VERSION_GE(19, 0)
#include "llvm/Support/PGOOptions.h"
#endif
#include "llvm/Transforms/Instrumentation/GCOVProfiler.h"
#include "llvm/Transforms/Instrumentation/InstrProfiling.h"
#include "llvm/Transforms/Instrumentation/ThreadSanitizer.h"
#include "llvm/Transforms/Instrumentation/MemorySanitizer.h"
#include "llvm/Transforms/Instrumentation/HWAddressSanitizer.h"
#include "llvm/Transforms/Instrumentation/InstrProfiling.h"
#include "llvm/Transforms/Instrumentation/MemorySanitizer.h"
#include "llvm/Transforms/Instrumentation/ThreadSanitizer.h"
#include "llvm/Transforms/Utils.h"
#include "llvm/Transforms/Utils/CanonicalizeAliases.h"
#include "llvm/Transforms/Utils/NameAnonGlobals.h"
#include "llvm/Transforms/Utils.h"
using namespace llvm;
@@ -74,7 +74,7 @@ extern "C" void LLVMRustTimeTraceProfilerFinishThread() {
timeTraceProfilerFinishThread();
}
extern "C" void LLVMRustTimeTraceProfilerFinish(const char* FileName) {
extern "C" void LLVMRustTimeTraceProfilerFinish(const char *FileName) {
auto FN = StringRef(FileName);
std::error_code EC;
auto OS = raw_fd_ostream(FN, EC, sys::fs::CD_CreateAlways);
@@ -188,7 +188,7 @@ extern "C" void LLVMRustTimeTraceProfilerFinish(const char* FileName) {
SUBTARGET_HEXAGON \
SUBTARGET_XTENSA \
SUBTARGET_RISCV \
SUBTARGET_LOONGARCH \
SUBTARGET_LOONGARCH
#define SUBTARGET(x) \
namespace llvm { \
@@ -215,8 +215,7 @@ enum class LLVMRustCodeModel {
None,
};
static std::optional<CodeModel::Model>
fromRust(LLVMRustCodeModel Model) {
static std::optional<CodeModel::Model> fromRust(LLVMRustCodeModel Model) {
switch (Model) {
case LLVMRustCodeModel::Tiny:
return CodeModel::Tiny;
@@ -243,9 +242,9 @@ enum class LLVMRustCodeGenOptLevel {
};
#if LLVM_VERSION_GE(18, 0)
using CodeGenOptLevelEnum = llvm::CodeGenOptLevel;
using CodeGenOptLevelEnum = llvm::CodeGenOptLevel;
#else
using CodeGenOptLevelEnum = llvm::CodeGenOpt::Level;
using CodeGenOptLevelEnum = llvm::CodeGenOpt::Level;
#endif
static CodeGenOptLevelEnum fromRust(LLVMRustCodeGenOptLevel Level) {
@@ -319,48 +318,49 @@ static Reloc::Model fromRust(LLVMRustRelocModel RustReloc) {
}
/// getLongestEntryLength - Return the length of the longest entry in the table.
template<typename KV>
static size_t getLongestEntryLength(ArrayRef<KV> Table) {
template <typename KV> static size_t getLongestEntryLength(ArrayRef<KV> Table) {
size_t MaxLen = 0;
for (auto &I : Table)
MaxLen = std::max(MaxLen, std::strlen(I.Key));
return MaxLen;
}
using PrintBackendInfo = void(void*, const char* Data, size_t Len);
using PrintBackendInfo = void(void *, const char *Data, size_t Len);
extern "C" void LLVMRustPrintTargetCPUs(LLVMTargetMachineRef TM,
const char* TargetCPU,
PrintBackendInfo Print,
void* Out) {
const char *TargetCPU,
PrintBackendInfo Print, void *Out) {
const TargetMachine *Target = unwrap(TM);
const Triple::ArchType HostArch = Triple(sys::getDefaultTargetTriple()).getArch();
const Triple::ArchType HostArch =
Triple(sys::getDefaultTargetTriple()).getArch();
const Triple::ArchType TargetArch = Target->getTargetTriple().getArch();
std::ostringstream Buf;
const MCSubtargetInfo *MCInfo = Target->getMCSubtargetInfo();
const ArrayRef<SubtargetSubTypeKV> CPUTable = MCInfo->getAllProcessorDescriptions();
const ArrayRef<SubtargetSubTypeKV> CPUTable =
MCInfo->getAllProcessorDescriptions();
unsigned MaxCPULen = getLongestEntryLength(CPUTable);
Buf << "Available CPUs for this target:\n";
// Don't print the "native" entry when the user specifies --target with a
// different arch since that could be wrong or misleading.
if (HostArch == TargetArch) {
MaxCPULen = std::max(MaxCPULen, (unsigned) std::strlen("native"));
MaxCPULen = std::max(MaxCPULen, (unsigned)std::strlen("native"));
const StringRef HostCPU = sys::getHostCPUName();
Buf << " " << std::left << std::setw(MaxCPULen) << "native"
<< " - Select the CPU of the current host "
"(currently " << HostCPU.str() << ").\n";
"(currently "
<< HostCPU.str() << ").\n";
}
for (auto &CPU : CPUTable) {
// Compare cpu against current target to label the default
if (strcmp(CPU.Key, TargetCPU) == 0) {
Buf << " " << std::left << std::setw(MaxCPULen) << CPU.Key
<< " - This is the default target CPU for the current build target "
"(currently " << Target->getTargetTriple().str() << ").";
}
else {
"(currently "
<< Target->getTargetTriple().str() << ").";
} else {
Buf << " " << CPU.Key;
}
Buf << "\n";
@@ -374,7 +374,8 @@ extern "C" size_t LLVMRustGetTargetFeaturesCount(LLVMTargetMachineRef TM) {
#if LLVM_VERSION_GE(18, 0)
const TargetMachine *Target = unwrap(TM);
const MCSubtargetInfo *MCInfo = Target->getMCSubtargetInfo();
const ArrayRef<SubtargetFeatureKV> FeatTable = MCInfo->getAllProcessorFeatures();
const ArrayRef<SubtargetFeatureKV> FeatTable =
MCInfo->getAllProcessorFeatures();
return FeatTable.size();
#else
return 0;
@@ -382,18 +383,20 @@ extern "C" size_t LLVMRustGetTargetFeaturesCount(LLVMTargetMachineRef TM) {
}
extern "C" void LLVMRustGetTargetFeature(LLVMTargetMachineRef TM, size_t Index,
const char** Feature, const char** Desc) {
const char **Feature,
const char **Desc) {
#if LLVM_VERSION_GE(18, 0)
const TargetMachine *Target = unwrap(TM);
const MCSubtargetInfo *MCInfo = Target->getMCSubtargetInfo();
const ArrayRef<SubtargetFeatureKV> FeatTable = MCInfo->getAllProcessorFeatures();
const ArrayRef<SubtargetFeatureKV> FeatTable =
MCInfo->getAllProcessorFeatures();
const SubtargetFeatureKV Feat = FeatTable[Index];
*Feature = Feat.Key;
*Desc = Feat.Desc;
#endif
}
extern "C" const char* LLVMRustGetHostCPUName(size_t *len) {
extern "C" const char *LLVMRustGetHostCPUName(size_t *len) {
StringRef Name = sys::getHostCPUName();
*len = Name.size();
return Name.data();
@@ -403,19 +406,11 @@ extern "C" LLVMTargetMachineRef LLVMRustCreateTargetMachine(
const char *TripleStr, const char *CPU, const char *Feature,
const char *ABIStr, LLVMRustCodeModel RustCM, LLVMRustRelocModel RustReloc,
LLVMRustCodeGenOptLevel RustOptLevel, bool UseSoftFloat,
bool FunctionSections,
bool DataSections,
bool UniqueSectionNames,
bool TrapUnreachable,
bool Singlethread,
bool AsmComments,
bool EmitStackSizeSection,
bool RelaxELFRelocations,
bool UseInitArray,
const char *SplitDwarfFile,
const char *OutputObjFile,
const char *DebugInfoCompression,
bool UseEmulatedTls,
bool FunctionSections, bool DataSections, bool UniqueSectionNames,
bool TrapUnreachable, bool Singlethread, bool AsmComments,
bool EmitStackSizeSection, bool RelaxELFRelocations, bool UseInitArray,
const char *SplitDwarfFile, const char *OutputObjFile,
const char *DebugInfoCompression, bool UseEmulatedTls,
const char *ArgsCstrBuff, size_t ArgsCstrBuffLen) {
auto OptLevel = fromRust(RustOptLevel);
@@ -444,18 +439,20 @@ extern "C" LLVMTargetMachineRef LLVMRustCreateTargetMachine(
Options.MCOptions.PreserveAsmComments = AsmComments;
Options.MCOptions.ABIName = ABIStr;
if (SplitDwarfFile) {
Options.MCOptions.SplitDwarfFile = SplitDwarfFile;
Options.MCOptions.SplitDwarfFile = SplitDwarfFile;
}
if (OutputObjFile) {
Options.ObjectFilenameForDebug = OutputObjFile;
Options.ObjectFilenameForDebug = OutputObjFile;
}
if (!strcmp("zlib", DebugInfoCompression) && llvm::compression::zlib::isAvailable()) {
if (!strcmp("zlib", DebugInfoCompression) &&
llvm::compression::zlib::isAvailable()) {
#if LLVM_VERSION_GE(19, 0)
Options.MCOptions.CompressDebugSections = DebugCompressionType::Zlib;
#else
Options.CompressDebugSections = DebugCompressionType::Zlib;
#endif
} else if (!strcmp("zstd", DebugInfoCompression) && llvm::compression::zstd::isAvailable()) {
} else if (!strcmp("zstd", DebugInfoCompression) &&
llvm::compression::zstd::isAvailable()) {
#if LLVM_VERSION_GE(19, 0)
Options.MCOptions.CompressDebugSections = DebugCompressionType::Zstd;
#else
@@ -499,24 +496,21 @@ extern "C" LLVMTargetMachineRef LLVMRustCreateTargetMachine(
Options.EmitStackSizeSection = EmitStackSizeSection;
if (ArgsCstrBuff != nullptr)
{
if (ArgsCstrBuff != nullptr) {
int buffer_offset = 0;
assert(ArgsCstrBuff[ArgsCstrBuffLen - 1] == '\0');
const size_t arg0_len = std::strlen(ArgsCstrBuff);
char* arg0 = new char[arg0_len + 1];
char *arg0 = new char[arg0_len + 1];
memcpy(arg0, ArgsCstrBuff, arg0_len);
arg0[arg0_len] = '\0';
buffer_offset += arg0_len + 1;
const int num_cmd_arg_strings =
std::count(&ArgsCstrBuff[buffer_offset], &ArgsCstrBuff[ArgsCstrBuffLen], '\0');
const int num_cmd_arg_strings = std::count(
&ArgsCstrBuff[buffer_offset], &ArgsCstrBuff[ArgsCstrBuffLen], '\0');
std::string* cmd_arg_strings = new std::string[num_cmd_arg_strings];
for (int i = 0; i < num_cmd_arg_strings; ++i)
{
std::string *cmd_arg_strings = new std::string[num_cmd_arg_strings];
for (int i = 0; i < num_cmd_arg_strings; ++i) {
assert(buffer_offset < ArgsCstrBuffLen);
const int len = std::strlen(ArgsCstrBuff + buffer_offset);
cmd_arg_strings[i] = std::string(&ArgsCstrBuff[buffer_offset], len);
@@ -527,7 +521,7 @@ extern "C" LLVMTargetMachineRef LLVMRustCreateTargetMachine(
Options.MCOptions.Argv0 = arg0;
Options.MCOptions.CommandLineArgs =
llvm::ArrayRef<std::string>(cmd_arg_strings, num_cmd_arg_strings);
llvm::ArrayRef<std::string>(cmd_arg_strings, num_cmd_arg_strings);
}
TargetMachine *TM = TheTarget->createTargetMachine(
@@ -537,7 +531,7 @@ extern "C" LLVMTargetMachineRef LLVMRustCreateTargetMachine(
extern "C" void LLVMRustDisposeTargetMachine(LLVMTargetMachineRef TM) {
MCTargetOptions& MCOptions = unwrap(TM)->Options.MCOptions;
MCTargetOptions &MCOptions = unwrap(TM)->Options.MCOptions;
delete[] MCOptions.Argv0;
delete[] MCOptions.CommandLineArgs.data();
@@ -613,7 +607,7 @@ LLVMRustWriteOutputFile(LLVMTargetMachineRef Target, LLVMPassManagerRef PMR,
auto DOS = raw_fd_ostream(DwoPath, EC, sys::fs::OF_None);
EC.clear();
if (EC)
ErrorInfo = EC.message();
ErrorInfo = EC.message();
if (ErrorInfo != "") {
LLVMRustSetLastError(ErrorInfo.c_str());
return LLVMRustResult::Failure;
@@ -633,10 +627,12 @@ LLVMRustWriteOutputFile(LLVMTargetMachineRef Target, LLVMPassManagerRef PMR,
return LLVMRustResult::Success;
}
extern "C" typedef void (*LLVMRustSelfProfileBeforePassCallback)(void*, // LlvmSelfProfiler
const char*, // pass name
const char*); // IR name
extern "C" typedef void (*LLVMRustSelfProfileAfterPassCallback)(void*); // LlvmSelfProfiler
extern "C" typedef void (*LLVMRustSelfProfileBeforePassCallback)(
void *, // LlvmSelfProfiler
const char *, // pass name
const char *); // IR name
extern "C" typedef void (*LLVMRustSelfProfileAfterPassCallback)(
void *); // LlvmSelfProfiler
std::string LLVMRustwrappedIrGetName(const llvm::Any &WrappedIr) {
if (const auto *Cast = any_cast<const Module *>(&WrappedIr))
@@ -650,35 +646,35 @@ std::string LLVMRustwrappedIrGetName(const llvm::Any &WrappedIr) {
return "<UNKNOWN>";
}
void LLVMSelfProfileInitializeCallbacks(
PassInstrumentationCallbacks& PIC, void* LlvmSelfProfiler,
PassInstrumentationCallbacks &PIC, void *LlvmSelfProfiler,
LLVMRustSelfProfileBeforePassCallback BeforePassCallback,
LLVMRustSelfProfileAfterPassCallback AfterPassCallback) {
PIC.registerBeforeNonSkippedPassCallback([LlvmSelfProfiler, BeforePassCallback](
StringRef Pass, llvm::Any Ir) {
std::string PassName = Pass.str();
std::string IrName = LLVMRustwrappedIrGetName(Ir);
BeforePassCallback(LlvmSelfProfiler, PassName.c_str(), IrName.c_str());
});
PIC.registerBeforeNonSkippedPassCallback(
[LlvmSelfProfiler, BeforePassCallback](StringRef Pass, llvm::Any Ir) {
std::string PassName = Pass.str();
std::string IrName = LLVMRustwrappedIrGetName(Ir);
BeforePassCallback(LlvmSelfProfiler, PassName.c_str(), IrName.c_str());
});
PIC.registerAfterPassCallback(
[LlvmSelfProfiler, AfterPassCallback](StringRef Pass, llvm::Any IR,
const PreservedAnalyses &Preserved) {
[LlvmSelfProfiler, AfterPassCallback](
StringRef Pass, llvm::Any IR, const PreservedAnalyses &Preserved) {
AfterPassCallback(LlvmSelfProfiler);
});
PIC.registerAfterPassInvalidatedCallback(
[LlvmSelfProfiler, AfterPassCallback](StringRef Pass, const PreservedAnalyses &Preserved) {
[LlvmSelfProfiler,
AfterPassCallback](StringRef Pass, const PreservedAnalyses &Preserved) {
AfterPassCallback(LlvmSelfProfiler);
});
PIC.registerBeforeAnalysisCallback([LlvmSelfProfiler, BeforePassCallback](
StringRef Pass, llvm::Any Ir) {
std::string PassName = Pass.str();
std::string IrName = LLVMRustwrappedIrGetName(Ir);
BeforePassCallback(LlvmSelfProfiler, PassName.c_str(), IrName.c_str());
});
PIC.registerBeforeAnalysisCallback(
[LlvmSelfProfiler, BeforePassCallback](StringRef Pass, llvm::Any Ir) {
std::string PassName = Pass.str();
std::string IrName = LLVMRustwrappedIrGetName(Ir);
BeforePassCallback(LlvmSelfProfiler, PassName.c_str(), IrName.c_str());
});
PIC.registerAfterAnalysisCallback(
[LlvmSelfProfiler, AfterPassCallback](StringRef Pass, llvm::Any Ir) {
@@ -704,7 +700,7 @@ struct LLVMRustSanitizerOptions {
bool SanitizeKCFI;
bool SanitizeMemory;
bool SanitizeMemoryRecover;
int SanitizeMemoryTrackOrigins;
int SanitizeMemoryTrackOrigins;
bool SanitizeThread;
bool SanitizeHWAddress;
bool SanitizeHWAddressRecover;
@@ -712,31 +708,25 @@ struct LLVMRustSanitizerOptions {
bool SanitizeKernelAddressRecover;
};
extern "C" LLVMRustResult
LLVMRustOptimize(
LLVMModuleRef ModuleRef,
LLVMTargetMachineRef TMRef,
LLVMRustPassBuilderOptLevel OptLevelRust,
LLVMRustOptStage OptStage,
bool IsLinkerPluginLTO,
bool NoPrepopulatePasses, bool VerifyIR, bool UseThinLTOBuffers,
bool MergeFunctions, bool UnrollLoops, bool SLPVectorize, bool LoopVectorize,
bool DisableSimplifyLibCalls, bool EmitLifetimeMarkers,
LLVMRustSanitizerOptions *SanitizerOptions,
const char *PGOGenPath, const char *PGOUsePath,
bool InstrumentCoverage, const char *InstrProfileOutput,
bool InstrumentGCOV,
extern "C" LLVMRustResult LLVMRustOptimize(
LLVMModuleRef ModuleRef, LLVMTargetMachineRef TMRef,
LLVMRustPassBuilderOptLevel OptLevelRust, LLVMRustOptStage OptStage,
bool IsLinkerPluginLTO, bool NoPrepopulatePasses, bool VerifyIR,
bool UseThinLTOBuffers, bool MergeFunctions, bool UnrollLoops,
bool SLPVectorize, bool LoopVectorize, bool DisableSimplifyLibCalls,
bool EmitLifetimeMarkers, LLVMRustSanitizerOptions *SanitizerOptions,
const char *PGOGenPath, const char *PGOUsePath, bool InstrumentCoverage,
const char *InstrProfileOutput, bool InstrumentGCOV,
const char *PGOSampleUsePath, bool DebugInfoForProfiling,
void* LlvmSelfProfiler,
void *LlvmSelfProfiler,
LLVMRustSelfProfileBeforePassCallback BeforePassCallback,
LLVMRustSelfProfileAfterPassCallback AfterPassCallback,
const char *ExtraPasses, size_t ExtraPassesLen,
const char *LLVMPlugins, size_t LLVMPluginsLen) {
const char *ExtraPasses, size_t ExtraPassesLen, const char *LLVMPlugins,
size_t LLVMPluginsLen) {
Module *TheModule = unwrap(ModuleRef);
TargetMachine *TM = unwrap(TMRef);
OptimizationLevel OptLevel = fromRust(OptLevelRust);
PipelineTuningOptions PTO;
PTO.LoopUnrolling = UnrollLoops;
PTO.LoopInterleaving = UnrollLoops;
@@ -751,38 +741,39 @@ LLVMRustOptimize(
StandardInstrumentations SI(TheModule->getContext(), DebugPassManager);
SI.registerCallbacks(PIC);
if (LlvmSelfProfiler){
LLVMSelfProfileInitializeCallbacks(PIC,LlvmSelfProfiler,BeforePassCallback,AfterPassCallback);
if (LlvmSelfProfiler) {
LLVMSelfProfileInitializeCallbacks(PIC, LlvmSelfProfiler,
BeforePassCallback, AfterPassCallback);
}
std::optional<PGOOptions> PGOOpt;
auto FS = vfs::getRealFileSystem();
if (PGOGenPath) {
assert(!PGOUsePath && !PGOSampleUsePath);
PGOOpt = PGOOptions(PGOGenPath, "", "", "", FS,
PGOOptions::IRInstr, PGOOptions::NoCSAction,
PGOOpt = PGOOptions(PGOGenPath, "", "", "", FS, PGOOptions::IRInstr,
PGOOptions::NoCSAction,
#if LLVM_VERSION_GE(19, 0)
PGOOptions::ColdFuncOpt::Default,
#endif
DebugInfoForProfiling);
} else if (PGOUsePath) {
assert(!PGOSampleUsePath);
PGOOpt = PGOOptions(PGOUsePath, "", "", "", FS,
PGOOptions::IRUse, PGOOptions::NoCSAction,
PGOOpt = PGOOptions(PGOUsePath, "", "", "", FS, PGOOptions::IRUse,
PGOOptions::NoCSAction,
#if LLVM_VERSION_GE(19, 0)
PGOOptions::ColdFuncOpt::Default,
#endif
DebugInfoForProfiling);
} else if (PGOSampleUsePath) {
PGOOpt = PGOOptions(PGOSampleUsePath, "", "", "", FS,
PGOOptions::SampleUse, PGOOptions::NoCSAction,
PGOOpt = PGOOptions(PGOSampleUsePath, "", "", "", FS, PGOOptions::SampleUse,
PGOOptions::NoCSAction,
#if LLVM_VERSION_GE(19, 0)
PGOOptions::ColdFuncOpt::Default,
#endif
DebugInfoForProfiling);
} else if (DebugInfoForProfiling) {
PGOOpt = PGOOptions("", "", "", "", FS,
PGOOptions::NoAction, PGOOptions::NoCSAction,
PGOOpt = PGOOptions("", "", "", "", FS, PGOOptions::NoAction,
PGOOptions::NoCSAction,
#if LLVM_VERSION_GE(19, 0)
PGOOptions::ColdFuncOpt::Default,
#endif
@@ -799,7 +790,7 @@ LLVMRustOptimize(
auto PluginsStr = StringRef(LLVMPlugins, LLVMPluginsLen);
SmallVector<StringRef> Plugins;
PluginsStr.split(Plugins, ',', -1, false);
for (auto PluginPath: Plugins) {
for (auto PluginPath : Plugins) {
auto Plugin = PassPlugin::Load(PluginPath.str());
if (!Plugin) {
auto Err = Plugin.takeError();
@@ -814,7 +805,8 @@ LLVMRustOptimize(
FAM.registerPass([&] { return PB.buildDefaultAAPipeline(); });
Triple TargetTriple(TheModule->getTargetTriple());
std::unique_ptr<TargetLibraryInfoImpl> TLII(new TargetLibraryInfoImpl(TargetTriple));
std::unique_ptr<TargetLibraryInfoImpl> TLII(
new TargetLibraryInfoImpl(TargetTriple));
if (DisableSimplifyLibCalls)
TLII->disableAllFunctions();
FAM.registerPass([&] { return TargetLibraryAnalysis(*TLII); });
@@ -825,58 +817,53 @@ LLVMRustOptimize(
PB.registerLoopAnalyses(LAM);
PB.crossRegisterProxies(LAM, FAM, CGAM, MAM);
// We manually collect pipeline callbacks so we can apply them at O0, where the
// PassBuilder does not create a pipeline.
// We manually collect pipeline callbacks so we can apply them at O0, where
// the PassBuilder does not create a pipeline.
std::vector<std::function<void(ModulePassManager &, OptimizationLevel)>>
PipelineStartEPCallbacks;
std::vector<std::function<void(ModulePassManager &, OptimizationLevel)>>
OptimizerLastEPCallbacks;
if (!IsLinkerPluginLTO
&& SanitizerOptions && SanitizerOptions->SanitizeCFI
&& !NoPrepopulatePasses) {
if (!IsLinkerPluginLTO && SanitizerOptions && SanitizerOptions->SanitizeCFI &&
!NoPrepopulatePasses) {
PipelineStartEPCallbacks.push_back(
[](ModulePassManager &MPM, OptimizationLevel Level) {
MPM.addPass(LowerTypeTestsPass(/*ExportSummary=*/nullptr,
/*ImportSummary=*/nullptr,
/*DropTypeTests=*/false));
}
);
[](ModulePassManager &MPM, OptimizationLevel Level) {
MPM.addPass(LowerTypeTestsPass(/*ExportSummary=*/nullptr,
/*ImportSummary=*/nullptr,
/*DropTypeTests=*/false));
});
}
if (VerifyIR) {
PipelineStartEPCallbacks.push_back(
[VerifyIR](ModulePassManager &MPM, OptimizationLevel Level) {
MPM.addPass(VerifierPass());
}
);
[VerifyIR](ModulePassManager &MPM, OptimizationLevel Level) {
MPM.addPass(VerifierPass());
});
}
if (InstrumentGCOV) {
PipelineStartEPCallbacks.push_back(
[](ModulePassManager &MPM, OptimizationLevel Level) {
MPM.addPass(GCOVProfilerPass(GCOVOptions::getDefault()));
}
);
[](ModulePassManager &MPM, OptimizationLevel Level) {
MPM.addPass(GCOVProfilerPass(GCOVOptions::getDefault()));
});
}
if (InstrumentCoverage) {
PipelineStartEPCallbacks.push_back(
[InstrProfileOutput](ModulePassManager &MPM, OptimizationLevel Level) {
InstrProfOptions Options;
if (InstrProfileOutput) {
Options.InstrProfileOutput = InstrProfileOutput;
}
// cargo run tests in multhreading mode by default
// so use atomics for coverage counters
Options.Atomic = true;
[InstrProfileOutput](ModulePassManager &MPM, OptimizationLevel Level) {
InstrProfOptions Options;
if (InstrProfileOutput) {
Options.InstrProfileOutput = InstrProfileOutput;
}
// cargo run tests in multhreading mode by default
// so use atomics for coverage counters
Options.Atomic = true;
#if LLVM_VERSION_GE(18, 0)
MPM.addPass(InstrProfilingLoweringPass(Options, false));
MPM.addPass(InstrProfilingLoweringPass(Options, false));
#else
MPM.addPass(InstrProfiling(Options, false));
MPM.addPass(InstrProfiling(Options, false));
#endif
}
);
});
}
if (SanitizerOptions) {
@@ -886,10 +873,9 @@ LLVMRustOptimize(
SanitizerOptions->SanitizeDataFlowABIList +
SanitizerOptions->SanitizeDataFlowABIListLen);
OptimizerLastEPCallbacks.push_back(
[ABIListFiles](ModulePassManager &MPM, OptimizationLevel Level) {
MPM.addPass(DataFlowSanitizerPass(ABIListFiles));
}
);
[ABIListFiles](ModulePassManager &MPM, OptimizationLevel Level) {
MPM.addPass(DataFlowSanitizerPass(ABIListFiles));
});
}
if (SanitizerOptions->SanitizeMemory) {
@@ -899,54 +885,54 @@ LLVMRustOptimize(
/*CompileKernel=*/false,
/*EagerChecks=*/true);
OptimizerLastEPCallbacks.push_back(
[Options](ModulePassManager &MPM, OptimizationLevel Level) {
MPM.addPass(MemorySanitizerPass(Options));
}
);
[Options](ModulePassManager &MPM, OptimizationLevel Level) {
MPM.addPass(MemorySanitizerPass(Options));
});
}
if (SanitizerOptions->SanitizeThread) {
OptimizerLastEPCallbacks.push_back(
[](ModulePassManager &MPM, OptimizationLevel Level) {
MPM.addPass(ModuleThreadSanitizerPass());
MPM.addPass(createModuleToFunctionPassAdaptor(ThreadSanitizerPass()));
}
);
OptimizerLastEPCallbacks.push_back([](ModulePassManager &MPM,
OptimizationLevel Level) {
MPM.addPass(ModuleThreadSanitizerPass());
MPM.addPass(createModuleToFunctionPassAdaptor(ThreadSanitizerPass()));
});
}
if (SanitizerOptions->SanitizeAddress || SanitizerOptions->SanitizeKernelAddress) {
if (SanitizerOptions->SanitizeAddress ||
SanitizerOptions->SanitizeKernelAddress) {
OptimizerLastEPCallbacks.push_back(
[SanitizerOptions](ModulePassManager &MPM, OptimizationLevel Level) {
auto CompileKernel = SanitizerOptions->SanitizeKernelAddress;
AddressSanitizerOptions opts = AddressSanitizerOptions{
CompileKernel,
SanitizerOptions->SanitizeAddressRecover
|| SanitizerOptions->SanitizeKernelAddressRecover,
/*UseAfterScope=*/true,
AsanDetectStackUseAfterReturnMode::Runtime,
};
MPM.addPass(AddressSanitizerPass(opts));
}
);
[SanitizerOptions](ModulePassManager &MPM, OptimizationLevel Level) {
auto CompileKernel = SanitizerOptions->SanitizeKernelAddress;
AddressSanitizerOptions opts = AddressSanitizerOptions{
CompileKernel,
SanitizerOptions->SanitizeAddressRecover ||
SanitizerOptions->SanitizeKernelAddressRecover,
/*UseAfterScope=*/true,
AsanDetectStackUseAfterReturnMode::Runtime,
};
MPM.addPass(AddressSanitizerPass(opts));
});
}
if (SanitizerOptions->SanitizeHWAddress) {
OptimizerLastEPCallbacks.push_back(
[SanitizerOptions](ModulePassManager &MPM, OptimizationLevel Level) {
HWAddressSanitizerOptions opts(
/*CompileKernel=*/false, SanitizerOptions->SanitizeHWAddressRecover,
/*DisableOptimization=*/false);
MPM.addPass(HWAddressSanitizerPass(opts));
}
);
[SanitizerOptions](ModulePassManager &MPM, OptimizationLevel Level) {
HWAddressSanitizerOptions opts(
/*CompileKernel=*/false,
SanitizerOptions->SanitizeHWAddressRecover,
/*DisableOptimization=*/false);
MPM.addPass(HWAddressSanitizerPass(opts));
});
}
}
ModulePassManager MPM;
bool NeedThinLTOBufferPasses = UseThinLTOBuffers;
if (!NoPrepopulatePasses) {
// The pre-link pipelines don't support O0 and require using buildO0DefaultPipeline() instead.
// At the same time, the LTO pipelines do support O0 and using them is required.
bool IsLTO = OptStage == LLVMRustOptStage::ThinLTO || OptStage == LLVMRustOptStage::FatLTO;
// The pre-link pipelines don't support O0 and require using
// buildO0DefaultPipeline() instead. At the same time, the LTO pipelines do
// support O0 and using them is required.
bool IsLTO = OptStage == LLVMRustOptStage::ThinLTO ||
OptStage == LLVMRustOptStage::FatLTO;
if (OptLevel == OptimizationLevel::O0 && !IsLTO) {
for (const auto &C : PipelineStartEPCallbacks)
PB.registerPipelineStartEPCallback(C);
@@ -993,7 +979,8 @@ LLVMRustOptimize(
}
if (ExtraPassesLen) {
if (auto Err = PB.parsePassPipeline(MPM, StringRef(ExtraPasses, ExtraPassesLen))) {
if (auto Err =
PB.parsePassPipeline(MPM, StringRef(ExtraPasses, ExtraPassesLen))) {
std::string ErrMsg = toString(std::move(Err));
LLVMRustSetLastError(ErrMsg.c_str());
return LLVMRustResult::Failure;
@@ -1020,8 +1007,7 @@ LLVMRustOptimize(
// * output buffer
// * output buffer len
// Returns len of demangled string, or 0 if demangle failed.
typedef size_t (*DemangleFn)(const char*, size_t, char*, size_t);
typedef size_t (*DemangleFn)(const char *, size_t, char *, size_t);
namespace {
@@ -1064,7 +1050,7 @@ public:
formatted_raw_ostream &OS) override {
StringRef Demangled = CallDemangle(F->getName());
if (Demangled.empty()) {
return;
return;
}
OS << "; " << Demangled << "\n";
@@ -1077,7 +1063,7 @@ public:
if (const CallInst *CI = dyn_cast<CallInst>(I)) {
Name = "call";
Value = CI->getCalledOperand();
} else if (const InvokeInst* II = dyn_cast<InvokeInst>(I)) {
} else if (const InvokeInst *II = dyn_cast<InvokeInst>(I)) {
Name = "invoke";
Value = II->getCalledOperand();
} else {
@@ -1101,8 +1087,8 @@ public:
} // namespace
extern "C" LLVMRustResult
LLVMRustPrintModule(LLVMModuleRef M, const char *Path, DemangleFn Demangle) {
extern "C" LLVMRustResult LLVMRustPrintModule(LLVMModuleRef M, const char *Path,
DemangleFn Demangle) {
std::string ErrorInfo;
std::error_code EC;
auto OS = raw_fd_ostream(Path, EC, sys::fs::OF_None);
@@ -1264,11 +1250,9 @@ getFirstDefinitionForLinker(const GlobalValueSummaryList &GVSummaryList) {
// The main entry point for creating the global ThinLTO analysis. The structure
// here is basically the same as before threads are spawned in the `run`
// function of `lib/LTO/ThinLTOCodeGenerator.cpp`.
extern "C" LLVMRustThinLTOData*
LLVMRustCreateThinLTOData(LLVMRustThinLTOModule *modules,
int num_modules,
const char **preserved_symbols,
int num_symbols) {
extern "C" LLVMRustThinLTOData *
LLVMRustCreateThinLTOData(LLVMRustThinLTOModule *modules, int num_modules,
const char **preserved_symbols, int num_symbols) {
auto Ret = std::make_unique<LLVMRustThinLTOData>();
// Load each module's summary and merge it into one combined index
@@ -1290,7 +1274,8 @@ LLVMRustCreateThinLTOData(LLVMRustThinLTOModule *modules,
}
// Collect for each module the list of function it defines (GUID -> Summary)
Ret->Index.collectDefinedGVSummariesPerModule(Ret->ModuleToDefinedGVSummaries);
Ret->Index.collectDefinedGVSummariesPerModule(
Ret->ModuleToDefinedGVSummaries);
// Convert the preserved symbols set from string to GUID, this is then needed
// for internalization.
@@ -1310,7 +1295,8 @@ LLVMRustCreateThinLTOData(LLVMRustThinLTOModule *modules,
// crate, so we need `ImportEnabled = false` to limit internalization.
// Otherwise, we sometimes lose `static` values -- see #60184.
computeDeadSymbolsWithConstProp(Ret->Index, Ret->GUIDPreservedSymbols,
deadIsPrevailing, /* ImportEnabled = */ false);
deadIsPrevailing,
/* ImportEnabled = */ false);
// Resolve LinkOnce/Weak symbols, this has to be computed early be cause it
// impacts the caching.
//
@@ -1319,7 +1305,8 @@ LLVMRustCreateThinLTOData(LLVMRustThinLTOModule *modules,
DenseMap<GlobalValue::GUID, const GlobalValueSummary *> PrevailingCopy;
for (auto &I : Ret->Index) {
if (I.second.SummaryList.size() > 1)
PrevailingCopy[I.first] = getFirstDefinitionForLinker(I.second.SummaryList);
PrevailingCopy[I.first] =
getFirstDefinitionForLinker(I.second.SummaryList);
}
auto isPrevailing = [&](GlobalValue::GUID GUID, const GlobalValueSummary *S) {
const auto &Prevailing = PrevailingCopy.find(GUID);
@@ -1327,13 +1314,8 @@ LLVMRustCreateThinLTOData(LLVMRustThinLTOModule *modules,
return true;
return Prevailing->second == S;
};
ComputeCrossModuleImport(
Ret->Index,
Ret->ModuleToDefinedGVSummaries,
isPrevailing,
Ret->ImportLists,
Ret->ExportLists
);
ComputeCrossModuleImport(Ret->Index, Ret->ModuleToDefinedGVSummaries,
isPrevailing, Ret->ImportLists, Ret->ExportLists);
auto recordNewLinkage = [&](StringRef ModuleIdentifier,
GlobalValue::GUID GUID,
@@ -1345,8 +1327,8 @@ LLVMRustCreateThinLTOData(LLVMRustThinLTOModule *modules,
// formats. We probably could and should use ELF visibility scheme for many of
// our targets, however.
lto::Config conf;
thinLTOResolvePrevailingInIndex(conf, Ret->Index, isPrevailing, recordNewLinkage,
Ret->GUIDPreservedSymbols);
thinLTOResolvePrevailingInIndex(conf, Ret->Index, isPrevailing,
recordNewLinkage, Ret->GUIDPreservedSymbols);
// Here we calculate an `ExportedGUIDs` set for use in the `isExported`
// callback below. This callback below will dictate the linkage for all
@@ -1355,7 +1337,7 @@ LLVMRustCreateThinLTOData(LLVMRustThinLTOModule *modules,
// linkage will stay as external, and internal will stay as internal.
std::set<GlobalValue::GUID> ExportedGUIDs;
for (auto &List : Ret->Index) {
for (auto &GVS: List.second.SummaryList) {
for (auto &GVS : List.second.SummaryList) {
if (GlobalValue::isLocalLinkage(GVS->linkage()))
continue;
auto GUID = GVS->getOriginalName();
@@ -1366,16 +1348,15 @@ LLVMRustCreateThinLTOData(LLVMRustThinLTOModule *modules,
auto isExported = [&](StringRef ModuleIdentifier, ValueInfo VI) {
const auto &ExportList = Ret->ExportLists.find(ModuleIdentifier);
return (ExportList != Ret->ExportLists.end() &&
ExportList->second.count(VI)) ||
ExportedGUIDs.count(VI.getGUID());
ExportList->second.count(VI)) ||
ExportedGUIDs.count(VI.getGUID());
};
thinLTOInternalizeAndPromoteInIndex(Ret->Index, isExported, isPrevailing);
return Ret.release();
}
extern "C" void
LLVMRustFreeThinLTOData(LLVMRustThinLTOData *Data) {
extern "C" void LLVMRustFreeThinLTOData(LLVMRustThinLTOData *Data) {
delete Data;
}
@@ -1387,20 +1368,18 @@ LLVMRustFreeThinLTOData(LLVMRustThinLTOData *Data) {
// `ProcessThinLTOModule` function. Here they're split up into separate steps
// so rustc can save off the intermediate bytecode between each step.
static bool
clearDSOLocalOnDeclarations(Module &Mod, TargetMachine &TM) {
static bool clearDSOLocalOnDeclarations(Module &Mod, TargetMachine &TM) {
// When linking an ELF shared object, dso_local should be dropped. We
// conservatively do this for -fpic.
bool ClearDSOLocalOnDeclarations =
TM.getTargetTriple().isOSBinFormatELF() &&
TM.getRelocationModel() != Reloc::Static &&
Mod.getPIELevel() == PIELevel::Default;
bool ClearDSOLocalOnDeclarations = TM.getTargetTriple().isOSBinFormatELF() &&
TM.getRelocationModel() != Reloc::Static &&
Mod.getPIELevel() == PIELevel::Default;
return ClearDSOLocalOnDeclarations;
}
extern "C" bool
LLVMRustPrepareThinLTORename(const LLVMRustThinLTOData *Data, LLVMModuleRef M,
LLVMTargetMachineRef TM) {
extern "C" bool LLVMRustPrepareThinLTORename(const LLVMRustThinLTOData *Data,
LLVMModuleRef M,
LLVMTargetMachineRef TM) {
Module &Mod = *unwrap(M);
TargetMachine &Target = *unwrap(TM);
@@ -1415,24 +1394,28 @@ LLVMRustPrepareThinLTORename(const LLVMRustThinLTOData *Data, LLVMModuleRef M,
}
extern "C" bool
LLVMRustPrepareThinLTOResolveWeak(const LLVMRustThinLTOData *Data, LLVMModuleRef M) {
LLVMRustPrepareThinLTOResolveWeak(const LLVMRustThinLTOData *Data,
LLVMModuleRef M) {
Module &Mod = *unwrap(M);
const auto &DefinedGlobals = Data->ModuleToDefinedGVSummaries.lookup(Mod.getModuleIdentifier());
const auto &DefinedGlobals =
Data->ModuleToDefinedGVSummaries.lookup(Mod.getModuleIdentifier());
thinLTOFinalizeInModule(Mod, DefinedGlobals, /*PropagateAttrs=*/true);
return true;
}
extern "C" bool
LLVMRustPrepareThinLTOInternalize(const LLVMRustThinLTOData *Data, LLVMModuleRef M) {
LLVMRustPrepareThinLTOInternalize(const LLVMRustThinLTOData *Data,
LLVMModuleRef M) {
Module &Mod = *unwrap(M);
const auto &DefinedGlobals = Data->ModuleToDefinedGVSummaries.lookup(Mod.getModuleIdentifier());
const auto &DefinedGlobals =
Data->ModuleToDefinedGVSummaries.lookup(Mod.getModuleIdentifier());
thinLTOInternalizeModule(Mod, DefinedGlobals);
return true;
}
extern "C" bool
LLVMRustPrepareThinLTOImport(const LLVMRustThinLTOData *Data, LLVMModuleRef M,
LLVMTargetMachineRef TM) {
extern "C" bool LLVMRustPrepareThinLTOImport(const LLVMRustThinLTOData *Data,
LLVMModuleRef M,
LLVMTargetMachineRef TM) {
Module &Mod = *unwrap(M);
TargetMachine &Target = *unwrap(TM);
@@ -1464,7 +1447,8 @@ LLVMRustPrepareThinLTOImport(const LLVMRustThinLTOData *Data, LLVMModuleRef M,
return Ret;
}
auto *WasmCustomSections = (*MOrErr)->getNamedMetadata("wasm.custom_sections");
auto *WasmCustomSections =
(*MOrErr)->getNamedMetadata("wasm.custom_sections");
if (WasmCustomSections)
WasmCustomSections->eraseFromParent();
@@ -1498,7 +1482,7 @@ struct LLVMRustThinLTOBuffer {
std::string thin_link_data;
};
extern "C" LLVMRustThinLTOBuffer*
extern "C" LLVMRustThinLTOBuffer *
LLVMRustThinLTOBufferCreate(LLVMModuleRef M, bool is_thin, bool emit_summary) {
auto Ret = std::make_unique<LLVMRustThinLTOBuffer>();
{
@@ -1520,7 +1504,8 @@ LLVMRustThinLTOBufferCreate(LLVMModuleRef M, bool is_thin, bool emit_summary) {
// We only pass ThinLinkOS to be filled in if we want the summary,
// because otherwise LLVM does extra work and may double-emit some
// errors or warnings.
MPM.addPass(ThinLTOBitcodeWriterPass(OS, emit_summary ? &ThinLinkOS : nullptr));
MPM.addPass(
ThinLTOBitcodeWriterPass(OS, emit_summary ? &ThinLinkOS : nullptr));
MPM.run(*unwrap(M), MAM);
} else {
WriteBitcodeToFile(*unwrap(M), OS);
@@ -1530,12 +1515,11 @@ LLVMRustThinLTOBufferCreate(LLVMModuleRef M, bool is_thin, bool emit_summary) {
return Ret.release();
}
extern "C" void
LLVMRustThinLTOBufferFree(LLVMRustThinLTOBuffer *Buffer) {
extern "C" void LLVMRustThinLTOBufferFree(LLVMRustThinLTOBuffer *Buffer) {
delete Buffer;
}
extern "C" const void*
extern "C" const void *
LLVMRustThinLTOBufferPtr(const LLVMRustThinLTOBuffer *Buffer) {
return Buffer->data.data();
}
@@ -1545,7 +1529,7 @@ LLVMRustThinLTOBufferLen(const LLVMRustThinLTOBuffer *Buffer) {
return Buffer->data.length();
}
extern "C" const void*
extern "C" const void *
LLVMRustThinLTOBufferThinLinkDataPtr(const LLVMRustThinLTOBuffer *Buffer) {
return Buffer->thin_link_data.data();
}
@@ -1558,11 +1542,10 @@ LLVMRustThinLTOBufferThinLinkDataLen(const LLVMRustThinLTOBuffer *Buffer) {
// This is what we used to parse upstream bitcode for actual ThinLTO
// processing. We'll call this once per module optimized through ThinLTO, and
// it'll be called concurrently on many threads.
extern "C" LLVMModuleRef
LLVMRustParseBitcodeForLTO(LLVMContextRef Context,
const char *data,
size_t len,
const char *identifier) {
extern "C" LLVMModuleRef LLVMRustParseBitcodeForLTO(LLVMContextRef Context,
const char *data,
size_t len,
const char *identifier) {
auto Data = StringRef(data, len);
auto Buffer = MemoryBufferRef(Data, identifier);
unwrap(Context)->enableDebugTypeODRUniquing();
@@ -1614,8 +1597,9 @@ extern "C" const char *LLVMRustGetSliceFromObjectDataByName(const char *data,
// of access globals, etc).
// The precise details are determined by LLVM in `computeLTOCacheKey`, which is
// used during the normal linker-plugin incremental thin-LTO process.
extern "C" void
LLVMRustComputeLTOCacheKey(RustStringRef KeyOut, const char *ModId, LLVMRustThinLTOData *Data) {
extern "C" void LLVMRustComputeLTOCacheKey(RustStringRef KeyOut,
const char *ModId,
LLVMRustThinLTOData *Data) {
SmallString<40> Key;
llvm::lto::Config conf;
const auto &ImportList = Data->ImportLists.lookup(ModId);
@@ -1633,9 +1617,9 @@ LLVMRustComputeLTOCacheKey(RustStringRef KeyOut, const char *ModId, LLVMRustThin
CfiFunctionDecls.insert(
GlobalValue::getGUID(GlobalValue::dropLLVMManglingEscape(Name)));
llvm::computeLTOCacheKey(Key, conf, Data->Index, ModId,
ImportList, ExportList, ResolvedODR, DefinedGlobals, CfiFunctionDefs, CfiFunctionDecls
);
llvm::computeLTOCacheKey(Key, conf, Data->Index, ModId, ImportList,
ExportList, ResolvedODR, DefinedGlobals,
CfiFunctionDefs, CfiFunctionDecls);
LLVMRustStringWriteImpl(KeyOut, Key.c_str(), Key.size());
}
+486 -519
View File
@@ -1,5 +1,6 @@
#include "LLVMWrapper.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Bitcode/BitcodeWriter.h"
#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/IR/DiagnosticHandler.h"
#include "llvm/IR/DiagnosticInfo.h"
@@ -11,17 +12,16 @@
#include "llvm/IR/LLVMRemarkStreamer.h"
#include "llvm/IR/Mangler.h"
#include "llvm/IR/Value.h"
#include "llvm/Remarks/RemarkStreamer.h"
#include "llvm/Remarks/RemarkSerializer.h"
#include "llvm/Remarks/RemarkFormat.h"
#include "llvm/Support/ToolOutputFile.h"
#include "llvm/Support/ModRef.h"
#include "llvm/Object/Archive.h"
#include "llvm/Object/COFFImportFile.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Pass.h"
#include "llvm/Bitcode/BitcodeWriter.h"
#include "llvm/Remarks/RemarkFormat.h"
#include "llvm/Remarks/RemarkSerializer.h"
#include "llvm/Remarks/RemarkStreamer.h"
#include "llvm/Support/ModRef.h"
#include "llvm/Support/Signals.h"
#include "llvm/Support/ToolOutputFile.h"
#include <iostream>
@@ -71,12 +71,12 @@ static LLVM_THREAD_LOCAL char *LastError;
// Custom error handler for fatal LLVM errors.
//
// Notably it exits the process with code 101, unlike LLVM's default of 1.
static void FatalErrorHandler(void *UserData,
const char* Reason,
static void FatalErrorHandler(void *UserData, const char *Reason,
bool GenCrashDiag) {
// Once upon a time we emitted "LLVM ERROR:" specifically to mimic LLVM. Then,
// we developed crater and other tools which only expose logs, not error codes.
// Use a more greppable prefix that will still match the "LLVM ERROR:" prefix.
// we developed crater and other tools which only expose logs, not error
// codes. Use a more greppable prefix that will still match the "LLVM ERROR:"
// prefix.
std::cerr << "rustc-LLVM ERROR: " << Reason << std::endl;
// Since this error handler exits the process, we have to run any cleanup that
@@ -99,8 +99,7 @@ static void FatalErrorHandler(void *UserData,
//
// It aborts the process without any further allocations, similar to LLVM's
// default except that may be configured to `throw std::bad_alloc()` instead.
static void BadAllocErrorHandler(void *UserData,
const char* Reason,
static void BadAllocErrorHandler(void *UserData, const char *Reason,
bool GenCrashDiag) {
const char *OOM = "rustc-LLVM ERROR: out of memory\n";
(void)!::write(2, OOM, strlen(OOM));
@@ -190,7 +189,8 @@ static CallInst::TailCallKind fromRust(LLVMRustTailCallKind Kind) {
}
}
extern "C" void LLVMRustSetTailCallKind(LLVMValueRef Call, LLVMRustTailCallKind TCK) {
extern "C" void LLVMRustSetTailCallKind(LLVMValueRef Call,
LLVMRustTailCallKind TCK) {
unwrap<CallInst>(Call)->setTailCallKind(fromRust(TCK));
}
@@ -201,12 +201,13 @@ extern "C" LLVMValueRef LLVMRustGetOrInsertFunction(LLVMModuleRef M,
return wrap(unwrap(M)
->getOrInsertFunction(StringRef(Name, NameLen),
unwrap<FunctionType>(FunctionTy))
.getCallee()
);
.getCallee());
}
extern "C" LLVMValueRef
LLVMRustGetOrInsertGlobal(LLVMModuleRef M, const char *Name, size_t NameLen, LLVMTypeRef Ty) {
extern "C" LLVMValueRef LLVMRustGetOrInsertGlobal(LLVMModuleRef M,
const char *Name,
size_t NameLen,
LLVMTypeRef Ty) {
Module *Mod = unwrap(M);
auto NameRef = StringRef(Name, NameLen);
@@ -221,13 +222,10 @@ LLVMRustGetOrInsertGlobal(LLVMModuleRef M, const char *Name, size_t NameLen, LLV
return wrap(GV);
}
extern "C" LLVMValueRef
LLVMRustInsertPrivateGlobal(LLVMModuleRef M, LLVMTypeRef Ty) {
return wrap(new GlobalVariable(*unwrap(M),
unwrap(Ty),
false,
GlobalValue::PrivateLinkage,
nullptr));
extern "C" LLVMValueRef LLVMRustInsertPrivateGlobal(LLVMModuleRef M,
LLVMTypeRef Ty) {
return wrap(new GlobalVariable(*unwrap(M), unwrap(Ty), false,
GlobalValue::PrivateLinkage, nullptr));
}
static Attribute::AttrKind fromRust(LLVMRustAttribute Kind) {
@@ -326,8 +324,9 @@ static Attribute::AttrKind fromRust(LLVMRustAttribute Kind) {
report_fatal_error("bad AttributeKind");
}
template<typename T> static inline void AddAttributes(T *t, unsigned Index,
LLVMAttributeRef *Attrs, size_t AttrsLen) {
template <typename T>
static inline void AddAttributes(T *t, unsigned Index, LLVMAttributeRef *Attrs,
size_t AttrsLen) {
AttributeList PAL = t->getAttributes();
auto B = AttrBuilder(t->getContext());
for (LLVMAttributeRef Attr : ArrayRef<LLVMAttributeRef>(Attrs, AttrsLen))
@@ -337,19 +336,22 @@ template<typename T> static inline void AddAttributes(T *t, unsigned Index,
}
extern "C" void LLVMRustAddFunctionAttributes(LLVMValueRef Fn, unsigned Index,
LLVMAttributeRef *Attrs, size_t AttrsLen) {
LLVMAttributeRef *Attrs,
size_t AttrsLen) {
Function *F = unwrap<Function>(Fn);
AddAttributes(F, Index, Attrs, AttrsLen);
}
extern "C" void LLVMRustAddCallSiteAttributes(LLVMValueRef Instr, unsigned Index,
LLVMAttributeRef *Attrs, size_t AttrsLen) {
extern "C" void LLVMRustAddCallSiteAttributes(LLVMValueRef Instr,
unsigned Index,
LLVMAttributeRef *Attrs,
size_t AttrsLen) {
CallBase *Call = unwrap<CallBase>(Instr);
AddAttributes(Call, Index, Attrs, AttrsLen);
}
extern "C" LLVMAttributeRef LLVMRustCreateAttrNoValue(LLVMContextRef C,
LLVMRustAttribute RustAttr) {
extern "C" LLVMAttributeRef
LLVMRustCreateAttrNoValue(LLVMContextRef C, LLVMRustAttribute RustAttr) {
return wrap(Attribute::get(*unwrap(C), fromRust(RustAttr)));
}
@@ -363,30 +365,36 @@ extern "C" LLVMAttributeRef LLVMRustCreateDereferenceableAttr(LLVMContextRef C,
return wrap(Attribute::getWithDereferenceableBytes(*unwrap(C), Bytes));
}
extern "C" LLVMAttributeRef LLVMRustCreateDereferenceableOrNullAttr(LLVMContextRef C,
uint64_t Bytes) {
extern "C" LLVMAttributeRef
LLVMRustCreateDereferenceableOrNullAttr(LLVMContextRef C, uint64_t Bytes) {
return wrap(Attribute::getWithDereferenceableOrNullBytes(*unwrap(C), Bytes));
}
extern "C" LLVMAttributeRef LLVMRustCreateByValAttr(LLVMContextRef C, LLVMTypeRef Ty) {
extern "C" LLVMAttributeRef LLVMRustCreateByValAttr(LLVMContextRef C,
LLVMTypeRef Ty) {
return wrap(Attribute::getWithByValType(*unwrap(C), unwrap(Ty)));
}
extern "C" LLVMAttributeRef LLVMRustCreateStructRetAttr(LLVMContextRef C, LLVMTypeRef Ty) {
extern "C" LLVMAttributeRef LLVMRustCreateStructRetAttr(LLVMContextRef C,
LLVMTypeRef Ty) {
return wrap(Attribute::getWithStructRetType(*unwrap(C), unwrap(Ty)));
}
extern "C" LLVMAttributeRef LLVMRustCreateElementTypeAttr(LLVMContextRef C, LLVMTypeRef Ty) {
extern "C" LLVMAttributeRef LLVMRustCreateElementTypeAttr(LLVMContextRef C,
LLVMTypeRef Ty) {
return wrap(Attribute::get(*unwrap(C), Attribute::ElementType, unwrap(Ty)));
}
extern "C" LLVMAttributeRef LLVMRustCreateUWTableAttr(LLVMContextRef C, bool Async) {
extern "C" LLVMAttributeRef LLVMRustCreateUWTableAttr(LLVMContextRef C,
bool Async) {
return wrap(Attribute::getWithUWTableKind(
*unwrap(C), Async ? UWTableKind::Async : UWTableKind::Sync));
}
extern "C" LLVMAttributeRef LLVMRustCreateAllocSizeAttr(LLVMContextRef C, uint32_t ElementSizeArg) {
return wrap(Attribute::getWithAllocSizeArgs(*unwrap(C), ElementSizeArg, std::nullopt));
extern "C" LLVMAttributeRef
LLVMRustCreateAllocSizeAttr(LLVMContextRef C, uint32_t ElementSizeArg) {
return wrap(Attribute::getWithAllocSizeArgs(*unwrap(C), ElementSizeArg,
std::nullopt));
}
// These values **must** match ffi::AllocKindFlags.
@@ -403,12 +411,15 @@ enum class LLVMRustAllocKindFlags : uint64_t {
Aligned = 1 << 5,
};
static LLVMRustAllocKindFlags operator&(LLVMRustAllocKindFlags A, LLVMRustAllocKindFlags B) {
static LLVMRustAllocKindFlags operator&(LLVMRustAllocKindFlags A,
LLVMRustAllocKindFlags B) {
return static_cast<LLVMRustAllocKindFlags>(static_cast<uint64_t>(A) &
static_cast<uint64_t>(B));
static_cast<uint64_t>(B));
}
static bool isSet(LLVMRustAllocKindFlags F) { return F != LLVMRustAllocKindFlags::Unknown; }
static bool isSet(LLVMRustAllocKindFlags F) {
return F != LLVMRustAllocKindFlags::Unknown;
}
static llvm::AllocFnKind allocKindFromRust(LLVMRustAllocKindFlags F) {
llvm::AllocFnKind AFK = llvm::AllocFnKind::Unknown;
@@ -433,40 +444,47 @@ static llvm::AllocFnKind allocKindFromRust(LLVMRustAllocKindFlags F) {
return AFK;
}
extern "C" LLVMAttributeRef LLVMRustCreateAllocKindAttr(LLVMContextRef C, uint64_t AllocKindArg) {
return wrap(Attribute::get(*unwrap(C), Attribute::AllocKind,
static_cast<uint64_t>(allocKindFromRust(static_cast<LLVMRustAllocKindFlags>(AllocKindArg)))));
extern "C" LLVMAttributeRef LLVMRustCreateAllocKindAttr(LLVMContextRef C,
uint64_t AllocKindArg) {
return wrap(
Attribute::get(*unwrap(C), Attribute::AllocKind,
static_cast<uint64_t>(allocKindFromRust(
static_cast<LLVMRustAllocKindFlags>(AllocKindArg)))));
}
// Simplified representation of `MemoryEffects` across the FFI boundary.
//
// Each variant corresponds to one of the static factory methods on `MemoryEffects`.
// Each variant corresponds to one of the static factory methods on
// `MemoryEffects`.
enum class LLVMRustMemoryEffects {
None,
ReadOnly,
InaccessibleMemOnly,
};
extern "C" LLVMAttributeRef LLVMRustCreateMemoryEffectsAttr(LLVMContextRef C,
LLVMRustMemoryEffects Effects) {
extern "C" LLVMAttributeRef
LLVMRustCreateMemoryEffectsAttr(LLVMContextRef C,
LLVMRustMemoryEffects Effects) {
switch (Effects) {
case LLVMRustMemoryEffects::None:
return wrap(Attribute::getWithMemoryEffects(*unwrap(C), MemoryEffects::none()));
case LLVMRustMemoryEffects::ReadOnly:
return wrap(Attribute::getWithMemoryEffects(*unwrap(C), MemoryEffects::readOnly()));
case LLVMRustMemoryEffects::InaccessibleMemOnly:
return wrap(Attribute::getWithMemoryEffects(*unwrap(C),
MemoryEffects::inaccessibleMemOnly()));
default:
report_fatal_error("bad MemoryEffects.");
case LLVMRustMemoryEffects::None:
return wrap(
Attribute::getWithMemoryEffects(*unwrap(C), MemoryEffects::none()));
case LLVMRustMemoryEffects::ReadOnly:
return wrap(
Attribute::getWithMemoryEffects(*unwrap(C), MemoryEffects::readOnly()));
case LLVMRustMemoryEffects::InaccessibleMemOnly:
return wrap(Attribute::getWithMemoryEffects(
*unwrap(C), MemoryEffects::inaccessibleMemOnly()));
default:
report_fatal_error("bad MemoryEffects.");
}
}
// Enable all fast-math flags, including those which will cause floating-point operations
// to return poison for some well-defined inputs. This function can only be used to build
// unsafe Rust intrinsics. That unsafety does permit additional optimizations, but at the
// time of writing, their value is not well-understood relative to those enabled by
// LLVMRustSetAlgebraicMath.
// Enable all fast-math flags, including those which will cause floating-point
// operations to return poison for some well-defined inputs. This function can
// only be used to build unsafe Rust intrinsics. That unsafety does permit
// additional optimizations, but at the time of writing, their value is not
// well-understood relative to those enabled by LLVMRustSetAlgebraicMath.
//
// https://llvm.org/docs/LangRef.html#fast-math-flags
extern "C" void LLVMRustSetFastMath(LLVMValueRef V) {
@@ -475,14 +493,12 @@ extern "C" void LLVMRustSetFastMath(LLVMValueRef V) {
}
}
// Enable fast-math flags which permit algebraic transformations that are not allowed by
// IEEE floating point. For example:
// a + (b + c) = (a + b) + c
// and
// a / b = a * (1 / b)
// Note that this does NOT enable any flags which can cause a floating-point operation on
// well-defined inputs to return poison, and therefore this function can be used to build
// safe Rust intrinsics (such as fadd_algebraic).
// Enable fast-math flags which permit algebraic transformations that are not
// allowed by IEEE floating point. For example: a + (b + c) = (a + b) + c and a
// / b = a * (1 / b) Note that this does NOT enable any flags which can cause a
// floating-point operation on well-defined inputs to return poison, and
// therefore this function can be used to build safe Rust intrinsics (such as
// fadd_algebraic).
//
// https://llvm.org/docs/LangRef.html#fast-math-flags
extern "C" void LLVMRustSetAlgebraicMath(LLVMValueRef V) {
@@ -497,9 +513,9 @@ extern "C" void LLVMRustSetAlgebraicMath(LLVMValueRef V) {
// Enable the reassoc fast-math flag, allowing transformations that pretend
// floating-point addition and multiplication are associative.
//
// Note that this does NOT enable any flags which can cause a floating-point operation on
// well-defined inputs to return poison, and therefore this function can be used to build
// safe Rust intrinsics (such as fadd_algebraic).
// Note that this does NOT enable any flags which can cause a floating-point
// operation on well-defined inputs to return poison, and therefore this
// function can be used to build safe Rust intrinsics (such as fadd_algebraic).
//
// https://llvm.org/docs/LangRef.html#fast-math-flags
extern "C" void LLVMRustSetAllowReassoc(LLVMValueRef V) {
@@ -547,11 +563,10 @@ LLVMRustInlineAsm(LLVMTypeRef Ty, char *AsmString, size_t AsmStringLen,
char *Constraints, size_t ConstraintsLen,
LLVMBool HasSideEffects, LLVMBool IsAlignStack,
LLVMRustAsmDialect Dialect, LLVMBool CanThrow) {
return wrap(InlineAsm::get(unwrap<FunctionType>(Ty),
StringRef(AsmString, AsmStringLen),
StringRef(Constraints, ConstraintsLen),
HasSideEffects, IsAlignStack,
fromRust(Dialect), CanThrow));
return wrap(InlineAsm::get(
unwrap<FunctionType>(Ty), StringRef(AsmString, AsmStringLen),
StringRef(Constraints, ConstraintsLen), HasSideEffects, IsAlignStack,
fromRust(Dialect), CanThrow));
}
extern "C" bool LLVMRustInlineAsmVerify(LLVMTypeRef Ty, char *Constraints,
@@ -705,19 +720,22 @@ enum class LLVMRustDISPFlags : uint32_t {
inline LLVMRustDISPFlags operator&(LLVMRustDISPFlags A, LLVMRustDISPFlags B) {
return static_cast<LLVMRustDISPFlags>(static_cast<uint32_t>(A) &
static_cast<uint32_t>(B));
static_cast<uint32_t>(B));
}
inline LLVMRustDISPFlags operator|(LLVMRustDISPFlags A, LLVMRustDISPFlags B) {
return static_cast<LLVMRustDISPFlags>(static_cast<uint32_t>(A) |
static_cast<uint32_t>(B));
static_cast<uint32_t>(B));
}
inline LLVMRustDISPFlags &operator|=(LLVMRustDISPFlags &A, LLVMRustDISPFlags B) {
inline LLVMRustDISPFlags &operator|=(LLVMRustDISPFlags &A,
LLVMRustDISPFlags B) {
return A = A | B;
}
inline bool isSet(LLVMRustDISPFlags F) { return F != LLVMRustDISPFlags::SPFlagZero; }
inline bool isSet(LLVMRustDISPFlags F) {
return F != LLVMRustDISPFlags::SPFlagZero;
}
inline LLVMRustDISPFlags virtuality(LLVMRustDISPFlags F) {
return static_cast<LLVMRustDISPFlags>(static_cast<uint32_t>(F) & 0x3);
@@ -761,7 +779,8 @@ enum class LLVMRustDebugEmissionKind {
DebugDirectivesOnly,
};
static DICompileUnit::DebugEmissionKind fromRust(LLVMRustDebugEmissionKind Kind) {
static DICompileUnit::DebugEmissionKind
fromRust(LLVMRustDebugEmissionKind Kind) {
switch (Kind) {
case LLVMRustDebugEmissionKind::NoDebug:
return DICompileUnit::DebugEmissionKind::NoDebug;
@@ -777,12 +796,13 @@ static DICompileUnit::DebugEmissionKind fromRust(LLVMRustDebugEmissionKind Kind)
}
enum class LLVMRustDebugNameTableKind {
Default,
GNU,
None,
Default,
GNU,
None,
};
static DICompileUnit::DebugNameTableKind fromRust(LLVMRustDebugNameTableKind Kind) {
static DICompileUnit::DebugNameTableKind
fromRust(LLVMRustDebugNameTableKind Kind) {
switch (Kind) {
case LLVMRustDebugNameTableKind::Default:
return DICompileUnit::DebugNameTableKind::Default;
@@ -827,22 +847,18 @@ extern "C" uint32_t LLVMRustVersionMinor() { return LLVM_VERSION_MINOR; }
extern "C" uint32_t LLVMRustVersionMajor() { return LLVM_VERSION_MAJOR; }
extern "C" void LLVMRustAddModuleFlagU32(
LLVMModuleRef M,
Module::ModFlagBehavior MergeBehavior,
const char *Name,
uint32_t Value) {
extern "C" void LLVMRustAddModuleFlagU32(LLVMModuleRef M,
Module::ModFlagBehavior MergeBehavior,
const char *Name, uint32_t Value) {
unwrap(M)->addModuleFlag(MergeBehavior, Name, Value);
}
extern "C" void LLVMRustAddModuleFlagString(
LLVMModuleRef M,
Module::ModFlagBehavior MergeBehavior,
const char *Name,
const char *Value,
size_t ValueLen) {
unwrap(M)->addModuleFlag(MergeBehavior, Name,
MDString::get(unwrap(M)->getContext(), StringRef(Value, ValueLen)));
LLVMModuleRef M, Module::ModFlagBehavior MergeBehavior, const char *Name,
const char *Value, size_t ValueLen) {
unwrap(M)->addModuleFlag(
MergeBehavior, Name,
MDString::get(unwrap(M)->getContext(), StringRef(Value, ValueLen)));
}
extern "C" bool LLVMRustHasModuleFlag(LLVMModuleRef M, const char *Name,
@@ -850,8 +866,8 @@ extern "C" bool LLVMRustHasModuleFlag(LLVMModuleRef M, const char *Name,
return unwrap(M)->getModuleFlag(StringRef(Name, Len)) != nullptr;
}
extern "C" void LLVMRustGlobalAddMetadata(
LLVMValueRef Global, unsigned Kind, LLVMMetadataRef MD) {
extern "C" void LLVMRustGlobalAddMetadata(LLVMValueRef Global, unsigned Kind,
LLVMMetadataRef MD) {
unwrap<GlobalObject>(Global)->addMetadata(Kind, *unwrap<MDNode>(MD));
}
@@ -870,33 +886,29 @@ extern "C" void LLVMRustDIBuilderFinalize(LLVMRustDIBuilderRef Builder) {
extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateCompileUnit(
LLVMRustDIBuilderRef Builder, unsigned Lang, LLVMMetadataRef FileRef,
const char *Producer, size_t ProducerLen, bool isOptimized,
const char *Flags, unsigned RuntimeVer,
const char *SplitName, size_t SplitNameLen,
LLVMRustDebugEmissionKind Kind,
uint64_t DWOId, bool SplitDebugInlining,
LLVMRustDebugNameTableKind TableKind) {
const char *Flags, unsigned RuntimeVer, const char *SplitName,
size_t SplitNameLen, LLVMRustDebugEmissionKind Kind, uint64_t DWOId,
bool SplitDebugInlining, LLVMRustDebugNameTableKind TableKind) {
auto *File = unwrapDI<DIFile>(FileRef);
return wrap(Builder->createCompileUnit(Lang, File, StringRef(Producer, ProducerLen),
isOptimized, Flags, RuntimeVer,
StringRef(SplitName, SplitNameLen),
fromRust(Kind), DWOId, SplitDebugInlining,
false, fromRust(TableKind)));
return wrap(Builder->createCompileUnit(
Lang, File, StringRef(Producer, ProducerLen), isOptimized, Flags,
RuntimeVer, StringRef(SplitName, SplitNameLen), fromRust(Kind), DWOId,
SplitDebugInlining, false, fromRust(TableKind)));
}
extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateFile(
LLVMRustDIBuilderRef Builder,
const char *Filename, size_t FilenameLen,
const char *Directory, size_t DirectoryLen, LLVMRustChecksumKind CSKind,
const char *Checksum, size_t ChecksumLen) {
extern "C" LLVMMetadataRef
LLVMRustDIBuilderCreateFile(LLVMRustDIBuilderRef Builder, const char *Filename,
size_t FilenameLen, const char *Directory,
size_t DirectoryLen, LLVMRustChecksumKind CSKind,
const char *Checksum, size_t ChecksumLen) {
std::optional<DIFile::ChecksumKind> llvmCSKind = fromRust(CSKind);
std::optional<DIFile::ChecksumInfo<StringRef>> CSInfo{};
if (llvmCSKind)
CSInfo.emplace(*llvmCSKind, StringRef{Checksum, ChecksumLen});
return wrap(Builder->createFile(StringRef(Filename, FilenameLen),
StringRef(Directory, DirectoryLen),
CSInfo));
StringRef(Directory, DirectoryLen), CSInfo));
}
extern "C" LLVMMetadataRef
@@ -907,63 +919,59 @@ LLVMRustDIBuilderCreateSubroutineType(LLVMRustDIBuilderRef Builder,
}
extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateFunction(
LLVMRustDIBuilderRef Builder, LLVMMetadataRef Scope,
const char *Name, size_t NameLen,
const char *LinkageName, size_t LinkageNameLen,
LLVMMetadataRef File, unsigned LineNo,
LLVMMetadataRef Ty, unsigned ScopeLine, LLVMRustDIFlags Flags,
LLVMRustDISPFlags SPFlags, LLVMValueRef MaybeFn, LLVMMetadataRef TParam,
LLVMMetadataRef Decl) {
LLVMRustDIBuilderRef Builder, LLVMMetadataRef Scope, const char *Name,
size_t NameLen, const char *LinkageName, size_t LinkageNameLen,
LLVMMetadataRef File, unsigned LineNo, LLVMMetadataRef Ty,
unsigned ScopeLine, LLVMRustDIFlags Flags, LLVMRustDISPFlags SPFlags,
LLVMValueRef MaybeFn, LLVMMetadataRef TParam, LLVMMetadataRef Decl) {
DITemplateParameterArray TParams =
DITemplateParameterArray(unwrap<MDTuple>(TParam));
DISubprogram::DISPFlags llvmSPFlags = fromRust(SPFlags);
DINode::DIFlags llvmFlags = fromRust(Flags);
DISubprogram *Sub = Builder->createFunction(
unwrapDI<DIScope>(Scope),
StringRef(Name, NameLen),
StringRef(LinkageName, LinkageNameLen),
unwrapDI<DIFile>(File), LineNo,
unwrapDI<DISubroutineType>(Ty), ScopeLine, llvmFlags,
llvmSPFlags, TParams, unwrapDIPtr<DISubprogram>(Decl));
unwrapDI<DIScope>(Scope), StringRef(Name, NameLen),
StringRef(LinkageName, LinkageNameLen), unwrapDI<DIFile>(File), LineNo,
unwrapDI<DISubroutineType>(Ty), ScopeLine, llvmFlags, llvmSPFlags,
TParams, unwrapDIPtr<DISubprogram>(Decl));
if (MaybeFn)
unwrap<Function>(MaybeFn)->setSubprogram(Sub);
return wrap(Sub);
}
extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateMethod(
LLVMRustDIBuilderRef Builder, LLVMMetadataRef Scope,
const char *Name, size_t NameLen,
const char *LinkageName, size_t LinkageNameLen,
LLVMMetadataRef File, unsigned LineNo,
LLVMMetadataRef Ty, LLVMRustDIFlags Flags,
LLVMRustDISPFlags SPFlags, LLVMMetadataRef TParam) {
LLVMRustDIBuilderRef Builder, LLVMMetadataRef Scope, const char *Name,
size_t NameLen, const char *LinkageName, size_t LinkageNameLen,
LLVMMetadataRef File, unsigned LineNo, LLVMMetadataRef Ty,
LLVMRustDIFlags Flags, LLVMRustDISPFlags SPFlags, LLVMMetadataRef TParam) {
DITemplateParameterArray TParams =
DITemplateParameterArray(unwrap<MDTuple>(TParam));
DISubprogram::DISPFlags llvmSPFlags = fromRust(SPFlags);
DINode::DIFlags llvmFlags = fromRust(Flags);
DISubprogram *Sub = Builder->createMethod(
unwrapDI<DIScope>(Scope),
StringRef(Name, NameLen),
StringRef(LinkageName, LinkageNameLen),
unwrapDI<DIFile>(File), LineNo,
unwrapDI<DISubroutineType>(Ty),
0, 0, nullptr, // VTable params aren't used
unwrapDI<DIScope>(Scope), StringRef(Name, NameLen),
StringRef(LinkageName, LinkageNameLen), unwrapDI<DIFile>(File), LineNo,
unwrapDI<DISubroutineType>(Ty), 0, 0,
nullptr, // VTable params aren't used
llvmFlags, llvmSPFlags, TParams);
return wrap(Sub);
}
extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateBasicType(
LLVMRustDIBuilderRef Builder, const char *Name, size_t NameLen,
uint64_t SizeInBits, unsigned Encoding) {
return wrap(Builder->createBasicType(StringRef(Name, NameLen), SizeInBits, Encoding));
extern "C" LLVMMetadataRef
LLVMRustDIBuilderCreateBasicType(LLVMRustDIBuilderRef Builder, const char *Name,
size_t NameLen, uint64_t SizeInBits,
unsigned Encoding) {
return wrap(
Builder->createBasicType(StringRef(Name, NameLen), SizeInBits, Encoding));
}
extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateTypedef(
LLVMRustDIBuilderRef Builder, LLVMMetadataRef Type, const char *Name, size_t NameLen,
LLVMMetadataRef File, unsigned LineNo, LLVMMetadataRef Scope) {
extern "C" LLVMMetadataRef
LLVMRustDIBuilderCreateTypedef(LLVMRustDIBuilderRef Builder,
LLVMMetadataRef Type, const char *Name,
size_t NameLen, LLVMMetadataRef File,
unsigned LineNo, LLVMMetadataRef Scope) {
return wrap(Builder->createTypedef(
unwrap<DIType>(Type), StringRef(Name, NameLen), unwrap<DIFile>(File),
LineNo, unwrapDIPtr<DIScope>(Scope)));
unwrap<DIType>(Type), StringRef(Name, NameLen), unwrap<DIFile>(File),
LineNo, unwrapDIPtr<DIScope>(Scope)));
}
extern "C" LLVMMetadataRef LLVMRustDIBuilderCreatePointerType(
@@ -971,118 +979,98 @@ extern "C" LLVMMetadataRef LLVMRustDIBuilderCreatePointerType(
uint64_t SizeInBits, uint32_t AlignInBits, unsigned AddressSpace,
const char *Name, size_t NameLen) {
return wrap(Builder->createPointerType(unwrapDI<DIType>(PointeeTy),
SizeInBits, AlignInBits,
AddressSpace,
SizeInBits, AlignInBits, AddressSpace,
StringRef(Name, NameLen)));
}
extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateStructType(
LLVMRustDIBuilderRef Builder, LLVMMetadataRef Scope,
const char *Name, size_t NameLen,
LLVMMetadataRef File, unsigned LineNumber, uint64_t SizeInBits,
uint32_t AlignInBits, LLVMRustDIFlags Flags,
LLVMMetadataRef DerivedFrom, LLVMMetadataRef Elements,
unsigned RunTimeLang, LLVMMetadataRef VTableHolder,
const char *UniqueId, size_t UniqueIdLen) {
LLVMRustDIBuilderRef Builder, LLVMMetadataRef Scope, const char *Name,
size_t NameLen, LLVMMetadataRef File, unsigned LineNumber,
uint64_t SizeInBits, uint32_t AlignInBits, LLVMRustDIFlags Flags,
LLVMMetadataRef DerivedFrom, LLVMMetadataRef Elements, unsigned RunTimeLang,
LLVMMetadataRef VTableHolder, const char *UniqueId, size_t UniqueIdLen) {
return wrap(Builder->createStructType(
unwrapDI<DIDescriptor>(Scope), StringRef(Name, NameLen),
unwrapDI<DIFile>(File), LineNumber,
SizeInBits, AlignInBits, fromRust(Flags), unwrapDI<DIType>(DerivedFrom),
unwrapDI<DIFile>(File), LineNumber, SizeInBits, AlignInBits,
fromRust(Flags), unwrapDI<DIType>(DerivedFrom),
DINodeArray(unwrapDI<MDTuple>(Elements)), RunTimeLang,
unwrapDI<DIType>(VTableHolder), StringRef(UniqueId, UniqueIdLen)));
}
extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateVariantPart(
LLVMRustDIBuilderRef Builder, LLVMMetadataRef Scope,
const char *Name, size_t NameLen,
LLVMMetadataRef File, unsigned LineNumber, uint64_t SizeInBits,
uint32_t AlignInBits, LLVMRustDIFlags Flags, LLVMMetadataRef Discriminator,
LLVMMetadataRef Elements, const char *UniqueId, size_t UniqueIdLen) {
LLVMRustDIBuilderRef Builder, LLVMMetadataRef Scope, const char *Name,
size_t NameLen, LLVMMetadataRef File, unsigned LineNumber,
uint64_t SizeInBits, uint32_t AlignInBits, LLVMRustDIFlags Flags,
LLVMMetadataRef Discriminator, LLVMMetadataRef Elements,
const char *UniqueId, size_t UniqueIdLen) {
return wrap(Builder->createVariantPart(
unwrapDI<DIDescriptor>(Scope), StringRef(Name, NameLen),
unwrapDI<DIFile>(File), LineNumber,
SizeInBits, AlignInBits, fromRust(Flags), unwrapDI<DIDerivedType>(Discriminator),
DINodeArray(unwrapDI<MDTuple>(Elements)), StringRef(UniqueId, UniqueIdLen)));
unwrapDI<DIFile>(File), LineNumber, SizeInBits, AlignInBits,
fromRust(Flags), unwrapDI<DIDerivedType>(Discriminator),
DINodeArray(unwrapDI<MDTuple>(Elements)),
StringRef(UniqueId, UniqueIdLen)));
}
extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateMemberType(
LLVMRustDIBuilderRef Builder, LLVMMetadataRef Scope,
const char *Name, size_t NameLen,
LLVMMetadataRef File, unsigned LineNo, uint64_t SizeInBits,
LLVMRustDIBuilderRef Builder, LLVMMetadataRef Scope, const char *Name,
size_t NameLen, LLVMMetadataRef File, unsigned LineNo, uint64_t SizeInBits,
uint32_t AlignInBits, uint64_t OffsetInBits, LLVMRustDIFlags Flags,
LLVMMetadataRef Ty) {
return wrap(Builder->createMemberType(unwrapDI<DIDescriptor>(Scope),
StringRef(Name, NameLen),
unwrapDI<DIFile>(File), LineNo,
SizeInBits, AlignInBits, OffsetInBits,
fromRust(Flags), unwrapDI<DIType>(Ty)));
return wrap(Builder->createMemberType(
unwrapDI<DIDescriptor>(Scope), StringRef(Name, NameLen),
unwrapDI<DIFile>(File), LineNo, SizeInBits, AlignInBits, OffsetInBits,
fromRust(Flags), unwrapDI<DIType>(Ty)));
}
extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateVariantMemberType(
LLVMRustDIBuilderRef Builder, LLVMMetadataRef Scope,
const char *Name, size_t NameLen, LLVMMetadataRef File, unsigned LineNo,
uint64_t SizeInBits, uint32_t AlignInBits, uint64_t OffsetInBits, LLVMValueRef Discriminant,
LLVMRustDIBuilderRef Builder, LLVMMetadataRef Scope, const char *Name,
size_t NameLen, LLVMMetadataRef File, unsigned LineNo, uint64_t SizeInBits,
uint32_t AlignInBits, uint64_t OffsetInBits, LLVMValueRef Discriminant,
LLVMRustDIFlags Flags, LLVMMetadataRef Ty) {
llvm::ConstantInt* D = nullptr;
llvm::ConstantInt *D = nullptr;
if (Discriminant) {
D = unwrap<llvm::ConstantInt>(Discriminant);
}
return wrap(Builder->createVariantMemberType(unwrapDI<DIDescriptor>(Scope),
StringRef(Name, NameLen),
unwrapDI<DIFile>(File), LineNo,
SizeInBits, AlignInBits, OffsetInBits, D,
fromRust(Flags), unwrapDI<DIType>(Ty)));
return wrap(Builder->createVariantMemberType(
unwrapDI<DIDescriptor>(Scope), StringRef(Name, NameLen),
unwrapDI<DIFile>(File), LineNo, SizeInBits, AlignInBits, OffsetInBits, D,
fromRust(Flags), unwrapDI<DIType>(Ty)));
}
extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateStaticMemberType(
LLVMRustDIBuilderRef Builder,
LLVMMetadataRef Scope,
const char *Name,
size_t NameLen,
LLVMMetadataRef File,
unsigned LineNo,
LLVMMetadataRef Ty,
LLVMRustDIFlags Flags,
LLVMValueRef val,
uint32_t AlignInBits
) {
LLVMRustDIBuilderRef Builder, LLVMMetadataRef Scope, const char *Name,
size_t NameLen, LLVMMetadataRef File, unsigned LineNo, LLVMMetadataRef Ty,
LLVMRustDIFlags Flags, LLVMValueRef val, uint32_t AlignInBits) {
return wrap(Builder->createStaticMemberType(
unwrapDI<DIDescriptor>(Scope),
StringRef(Name, NameLen),
unwrapDI<DIFile>(File),
LineNo,
unwrapDI<DIType>(Ty),
fromRust(Flags),
unwrap<llvm::ConstantInt>(val),
unwrapDI<DIDescriptor>(Scope), StringRef(Name, NameLen),
unwrapDI<DIFile>(File), LineNo, unwrapDI<DIType>(Ty), fromRust(Flags),
unwrap<llvm::ConstantInt>(val),
#if LLVM_VERSION_GE(18, 0)
llvm::dwarf::DW_TAG_member,
llvm::dwarf::DW_TAG_member,
#endif
AlignInBits
));
AlignInBits));
}
extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateLexicalBlock(
LLVMRustDIBuilderRef Builder, LLVMMetadataRef Scope,
LLVMMetadataRef File, unsigned Line, unsigned Col) {
extern "C" LLVMMetadataRef
LLVMRustDIBuilderCreateLexicalBlock(LLVMRustDIBuilderRef Builder,
LLVMMetadataRef Scope, LLVMMetadataRef File,
unsigned Line, unsigned Col) {
return wrap(Builder->createLexicalBlock(unwrapDI<DIDescriptor>(Scope),
unwrapDI<DIFile>(File), Line, Col));
}
extern "C" LLVMMetadataRef
LLVMRustDIBuilderCreateLexicalBlockFile(LLVMRustDIBuilderRef Builder,
LLVMMetadataRef Scope,
LLVMMetadataRef File) {
extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateLexicalBlockFile(
LLVMRustDIBuilderRef Builder, LLVMMetadataRef Scope, LLVMMetadataRef File) {
return wrap(Builder->createLexicalBlockFile(unwrapDI<DIDescriptor>(Scope),
unwrapDI<DIFile>(File)));
}
extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateStaticVariable(
LLVMRustDIBuilderRef Builder, LLVMMetadataRef Context,
const char *Name, size_t NameLen,
const char *LinkageName, size_t LinkageNameLen,
LLVMMetadataRef File, unsigned LineNo,
LLVMMetadataRef Ty, bool IsLocalToUnit, LLVMValueRef V,
LLVMMetadataRef Decl = nullptr, uint32_t AlignInBits = 0) {
LLVMRustDIBuilderRef Builder, LLVMMetadataRef Context, const char *Name,
size_t NameLen, const char *LinkageName, size_t LinkageNameLen,
LLVMMetadataRef File, unsigned LineNo, LLVMMetadataRef Ty,
bool IsLocalToUnit, LLVMValueRef V, LLVMMetadataRef Decl = nullptr,
uint32_t AlignInBits = 0) {
llvm::GlobalVariable *InitVal = cast<llvm::GlobalVariable>(unwrap(V));
llvm::DIExpression *InitExpr = nullptr;
@@ -1095,14 +1083,13 @@ extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateStaticVariable(
FPVal->getValueAPF().bitcastToAPInt().getZExtValue());
}
llvm::DIGlobalVariableExpression *VarExpr = Builder->createGlobalVariableExpression(
unwrapDI<DIDescriptor>(Context), StringRef(Name, NameLen),
StringRef(LinkageName, LinkageNameLen),
unwrapDI<DIFile>(File), LineNo, unwrapDI<DIType>(Ty), IsLocalToUnit,
/* isDefined */ true,
InitExpr, unwrapDIPtr<MDNode>(Decl),
/* templateParams */ nullptr,
AlignInBits);
llvm::DIGlobalVariableExpression *VarExpr =
Builder->createGlobalVariableExpression(
unwrapDI<DIDescriptor>(Context), StringRef(Name, NameLen),
StringRef(LinkageName, LinkageNameLen), unwrapDI<DIFile>(File),
LineNo, unwrapDI<DIType>(Ty), IsLocalToUnit,
/* isDefined */ true, InitExpr, unwrapDIPtr<MDNode>(Decl),
/* templateParams */ nullptr, AlignInBits);
InitVal->setMetadata("dbg", VarExpr);
@@ -1111,20 +1098,19 @@ extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateStaticVariable(
extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateVariable(
LLVMRustDIBuilderRef Builder, unsigned Tag, LLVMMetadataRef Scope,
const char *Name, size_t NameLen,
LLVMMetadataRef File, unsigned LineNo,
const char *Name, size_t NameLen, LLVMMetadataRef File, unsigned LineNo,
LLVMMetadataRef Ty, bool AlwaysPreserve, LLVMRustDIFlags Flags,
unsigned ArgNo, uint32_t AlignInBits) {
if (Tag == 0x100) { // DW_TAG_auto_variable
return wrap(Builder->createAutoVariable(
unwrapDI<DIDescriptor>(Scope), StringRef(Name, NameLen),
unwrapDI<DIFile>(File), LineNo,
unwrapDI<DIType>(Ty), AlwaysPreserve, fromRust(Flags), AlignInBits));
unwrapDI<DIFile>(File), LineNo, unwrapDI<DIType>(Ty), AlwaysPreserve,
fromRust(Flags), AlignInBits));
} else {
return wrap(Builder->createParameterVariable(
unwrapDI<DIDescriptor>(Scope), StringRef(Name, NameLen), ArgNo,
unwrapDI<DIFile>(File), LineNo,
unwrapDI<DIType>(Ty), AlwaysPreserve, fromRust(Flags)));
unwrapDI<DIFile>(File), LineNo, unwrapDI<DIType>(Ty), AlwaysPreserve,
fromRust(Flags)));
}
}
@@ -1157,9 +1143,9 @@ extern "C" LLVMValueRef LLVMRustDIBuilderInsertDeclareAtEnd(
LLVMBasicBlockRef InsertAtEnd) {
auto Result = Builder->insertDeclare(
unwrap(V), unwrap<DILocalVariable>(VarInfo),
Builder->createExpression(llvm::ArrayRef<uint64_t>(AddrOps, AddrOpsCount)),
DebugLoc(cast<MDNode>(unwrap(DL))),
unwrap(InsertAtEnd));
Builder->createExpression(
llvm::ArrayRef<uint64_t>(AddrOps, AddrOpsCount)),
DebugLoc(cast<MDNode>(unwrap(DL))), unwrap(InsertAtEnd));
#if LLVM_VERSION_GE(19, 0)
return wrap(Result.get<llvm::Instruction *>());
#else
@@ -1170,21 +1156,20 @@ extern "C" LLVMValueRef LLVMRustDIBuilderInsertDeclareAtEnd(
extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateEnumerator(
LLVMRustDIBuilderRef Builder, const char *Name, size_t NameLen,
const uint64_t Value[2], unsigned SizeInBits, bool IsUnsigned) {
return wrap(Builder->createEnumerator(StringRef(Name, NameLen),
return wrap(Builder->createEnumerator(
StringRef(Name, NameLen),
APSInt(APInt(SizeInBits, ArrayRef<uint64_t>(Value, 2)), IsUnsigned)));
}
extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateEnumerationType(
LLVMRustDIBuilderRef Builder, LLVMMetadataRef Scope,
const char *Name, size_t NameLen,
LLVMMetadataRef File, unsigned LineNumber, uint64_t SizeInBits,
uint32_t AlignInBits, LLVMMetadataRef Elements,
LLVMRustDIBuilderRef Builder, LLVMMetadataRef Scope, const char *Name,
size_t NameLen, LLVMMetadataRef File, unsigned LineNumber,
uint64_t SizeInBits, uint32_t AlignInBits, LLVMMetadataRef Elements,
LLVMMetadataRef ClassTy, bool IsScoped) {
return wrap(Builder->createEnumerationType(
unwrapDI<DIDescriptor>(Scope), StringRef(Name, NameLen),
unwrapDI<DIFile>(File), LineNumber,
SizeInBits, AlignInBits, DINodeArray(unwrapDI<MDTuple>(Elements)),
unwrapDI<DIType>(ClassTy),
unwrapDI<DIFile>(File), LineNumber, SizeInBits, AlignInBits,
DINodeArray(unwrapDI<MDTuple>(Elements)), unwrapDI<DIType>(ClassTy),
#if LLVM_VERSION_GE(18, 0)
/* RunTimeLang */ 0,
#endif
@@ -1192,39 +1177,38 @@ extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateEnumerationType(
}
extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateUnionType(
LLVMRustDIBuilderRef Builder, LLVMMetadataRef Scope,
const char *Name, size_t NameLen,
LLVMMetadataRef File, unsigned LineNumber, uint64_t SizeInBits,
uint32_t AlignInBits, LLVMRustDIFlags Flags, LLVMMetadataRef Elements,
unsigned RunTimeLang, const char *UniqueId, size_t UniqueIdLen) {
LLVMRustDIBuilderRef Builder, LLVMMetadataRef Scope, const char *Name,
size_t NameLen, LLVMMetadataRef File, unsigned LineNumber,
uint64_t SizeInBits, uint32_t AlignInBits, LLVMRustDIFlags Flags,
LLVMMetadataRef Elements, unsigned RunTimeLang, const char *UniqueId,
size_t UniqueIdLen) {
return wrap(Builder->createUnionType(
unwrapDI<DIDescriptor>(Scope), StringRef(Name, NameLen), unwrapDI<DIFile>(File),
LineNumber, SizeInBits, AlignInBits, fromRust(Flags),
DINodeArray(unwrapDI<MDTuple>(Elements)), RunTimeLang,
unwrapDI<DIDescriptor>(Scope), StringRef(Name, NameLen),
unwrapDI<DIFile>(File), LineNumber, SizeInBits, AlignInBits,
fromRust(Flags), DINodeArray(unwrapDI<MDTuple>(Elements)), RunTimeLang,
StringRef(UniqueId, UniqueIdLen)));
}
extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateTemplateTypeParameter(
LLVMRustDIBuilderRef Builder, LLVMMetadataRef Scope,
const char *Name, size_t NameLen, LLVMMetadataRef Ty) {
LLVMRustDIBuilderRef Builder, LLVMMetadataRef Scope, const char *Name,
size_t NameLen, LLVMMetadataRef Ty) {
bool IsDefault = false; // FIXME: should we ever set this true?
return wrap(Builder->createTemplateTypeParameter(
unwrapDI<DIDescriptor>(Scope), StringRef(Name, NameLen), unwrapDI<DIType>(Ty), IsDefault));
unwrapDI<DIDescriptor>(Scope), StringRef(Name, NameLen),
unwrapDI<DIType>(Ty), IsDefault));
}
extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateNameSpace(
LLVMRustDIBuilderRef Builder, LLVMMetadataRef Scope,
const char *Name, size_t NameLen, bool ExportSymbols) {
extern "C" LLVMMetadataRef
LLVMRustDIBuilderCreateNameSpace(LLVMRustDIBuilderRef Builder,
LLVMMetadataRef Scope, const char *Name,
size_t NameLen, bool ExportSymbols) {
return wrap(Builder->createNameSpace(
unwrapDI<DIDescriptor>(Scope), StringRef(Name, NameLen), ExportSymbols
));
unwrapDI<DIDescriptor>(Scope), StringRef(Name, NameLen), ExportSymbols));
}
extern "C" void
LLVMRustDICompositeTypeReplaceArrays(LLVMRustDIBuilderRef Builder,
LLVMMetadataRef CompositeTy,
LLVMMetadataRef Elements,
LLVMMetadataRef Params) {
extern "C" void LLVMRustDICompositeTypeReplaceArrays(
LLVMRustDIBuilderRef Builder, LLVMMetadataRef CompositeTy,
LLVMMetadataRef Elements, LLVMMetadataRef Params) {
DICompositeType *Tmp = unwrapDI<DICompositeType>(CompositeTy);
Builder->replaceArrays(Tmp, DINodeArray(unwrap<MDTuple>(Elements)),
DINodeArray(unwrap<MDTuple>(Params)));
@@ -1235,9 +1219,8 @@ LLVMRustDIBuilderCreateDebugLocation(unsigned Line, unsigned Column,
LLVMMetadataRef ScopeRef,
LLVMMetadataRef InlinedAt) {
MDNode *Scope = unwrapDIPtr<MDNode>(ScopeRef);
DILocation *Loc = DILocation::get(
Scope->getContext(), Line, Column, Scope,
unwrapDIPtr<MDNode>(InlinedAt));
DILocation *Loc = DILocation::get(Scope->getContext(), Line, Column, Scope,
unwrapDIPtr<MDNode>(InlinedAt));
return wrap(Loc);
}
@@ -1258,8 +1241,7 @@ extern "C" void LLVMRustWriteTypeToString(LLVMTypeRef Ty, RustStringRef Str) {
unwrap<llvm::Type>(Ty)->print(OS);
}
extern "C" void LLVMRustWriteValueToString(LLVMValueRef V,
RustStringRef Str) {
extern "C" void LLVMRustWriteValueToString(LLVMValueRef V, RustStringRef Str) {
auto OS = RawRustStringOstream(Str);
if (!V) {
OS << "(null)";
@@ -1281,7 +1263,7 @@ extern "C" void LLVMRustWriteTwineToString(LLVMTwineRef T, RustStringRef Str) {
extern "C" void LLVMRustUnpackOptimizationDiagnostic(
LLVMDiagnosticInfoRef DI, RustStringRef PassNameOut,
LLVMValueRef *FunctionOut, unsigned* Line, unsigned* Column,
LLVMValueRef *FunctionOut, unsigned *Line, unsigned *Column,
RustStringRef FilenameOut, RustStringRef MessageOut) {
// Undefined to call this not on an optimization diagnostic!
llvm::DiagnosticInfoOptimizationBase *Opt =
@@ -1304,17 +1286,15 @@ extern "C" void LLVMRustUnpackOptimizationDiagnostic(
}
enum class LLVMRustDiagnosticLevel {
Error,
Warning,
Note,
Remark,
Error,
Warning,
Note,
Remark,
};
extern "C" void
LLVMRustUnpackInlineAsmDiagnostic(LLVMDiagnosticInfoRef DI,
LLVMRustDiagnosticLevel *LevelOut,
uint64_t *CookieOut,
LLVMTwineRef *MessageOut) {
extern "C" void LLVMRustUnpackInlineAsmDiagnostic(
LLVMDiagnosticInfoRef DI, LLVMRustDiagnosticLevel *LevelOut,
uint64_t *CookieOut, LLVMTwineRef *MessageOut) {
// Undefined to call this not on an inline assembly diagnostic!
llvm::DiagnosticInfoInlineAsm *IA =
static_cast<llvm::DiagnosticInfoInlineAsm *>(unwrap(DI));
@@ -1323,20 +1303,20 @@ LLVMRustUnpackInlineAsmDiagnostic(LLVMDiagnosticInfoRef DI,
*MessageOut = wrap(&IA->getMsgStr());
switch (IA->getSeverity()) {
case DS_Error:
*LevelOut = LLVMRustDiagnosticLevel::Error;
break;
case DS_Warning:
*LevelOut = LLVMRustDiagnosticLevel::Warning;
break;
case DS_Note:
*LevelOut = LLVMRustDiagnosticLevel::Note;
break;
case DS_Remark:
*LevelOut = LLVMRustDiagnosticLevel::Remark;
break;
default:
report_fatal_error("Invalid LLVMRustDiagnosticLevel value!");
case DS_Error:
*LevelOut = LLVMRustDiagnosticLevel::Error;
break;
case DS_Warning:
*LevelOut = LLVMRustDiagnosticLevel::Warning;
break;
case DS_Note:
*LevelOut = LLVMRustDiagnosticLevel::Note;
break;
case DS_Remark:
*LevelOut = LLVMRustDiagnosticLevel::Remark;
break;
default:
report_fatal_error("Invalid LLVMRustDiagnosticLevel value!");
}
}
@@ -1454,61 +1434,61 @@ extern "C" LLVMTypeKind LLVMRustGetTypeKind(LLVMTypeRef Ty) {
return LLVMBFloatTypeKind;
case Type::X86_AMXTyID:
return LLVMX86_AMXTypeKind;
default:
{
std::string error;
auto stream = llvm::raw_string_ostream(error);
stream << "Rust does not support the TypeID: " << unwrap(Ty)->getTypeID()
<< " for the type: " << *unwrap(Ty);
stream.flush();
report_fatal_error(error.c_str());
}
default: {
std::string error;
auto stream = llvm::raw_string_ostream(error);
stream << "Rust does not support the TypeID: " << unwrap(Ty)->getTypeID()
<< " for the type: " << *unwrap(Ty);
stream.flush();
report_fatal_error(error.c_str());
}
}
}
DEFINE_SIMPLE_CONVERSION_FUNCTIONS(SMDiagnostic, LLVMSMDiagnosticRef)
extern "C" LLVMSMDiagnosticRef LLVMRustGetSMDiagnostic(
LLVMDiagnosticInfoRef DI, unsigned *Cookie) {
llvm::DiagnosticInfoSrcMgr *SM = static_cast<llvm::DiagnosticInfoSrcMgr *>(unwrap(DI));
extern "C" LLVMSMDiagnosticRef LLVMRustGetSMDiagnostic(LLVMDiagnosticInfoRef DI,
unsigned *Cookie) {
llvm::DiagnosticInfoSrcMgr *SM =
static_cast<llvm::DiagnosticInfoSrcMgr *>(unwrap(DI));
*Cookie = SM->getLocCookie();
return wrap(&SM->getSMDiag());
}
extern "C" bool LLVMRustUnpackSMDiagnostic(LLVMSMDiagnosticRef DRef,
RustStringRef MessageOut,
RustStringRef BufferOut,
LLVMRustDiagnosticLevel* LevelOut,
unsigned* LocOut,
unsigned* RangesOut,
size_t* NumRanges) {
SMDiagnostic& D = *unwrap(DRef);
extern "C" bool
LLVMRustUnpackSMDiagnostic(LLVMSMDiagnosticRef DRef, RustStringRef MessageOut,
RustStringRef BufferOut,
LLVMRustDiagnosticLevel *LevelOut, unsigned *LocOut,
unsigned *RangesOut, size_t *NumRanges) {
SMDiagnostic &D = *unwrap(DRef);
auto MessageOS = RawRustStringOstream(MessageOut);
MessageOS << D.getMessage();
switch (D.getKind()) {
case SourceMgr::DK_Error:
*LevelOut = LLVMRustDiagnosticLevel::Error;
break;
case SourceMgr::DK_Warning:
*LevelOut = LLVMRustDiagnosticLevel::Warning;
break;
case SourceMgr::DK_Note:
*LevelOut = LLVMRustDiagnosticLevel::Note;
break;
case SourceMgr::DK_Remark:
*LevelOut = LLVMRustDiagnosticLevel::Remark;
break;
default:
report_fatal_error("Invalid LLVMRustDiagnosticLevel value!");
case SourceMgr::DK_Error:
*LevelOut = LLVMRustDiagnosticLevel::Error;
break;
case SourceMgr::DK_Warning:
*LevelOut = LLVMRustDiagnosticLevel::Warning;
break;
case SourceMgr::DK_Note:
*LevelOut = LLVMRustDiagnosticLevel::Note;
break;
case SourceMgr::DK_Remark:
*LevelOut = LLVMRustDiagnosticLevel::Remark;
break;
default:
report_fatal_error("Invalid LLVMRustDiagnosticLevel value!");
}
if (D.getLoc() == SMLoc())
return false;
const SourceMgr &LSM = *D.getSourceMgr();
const MemoryBuffer *LBuf = LSM.getMemoryBuffer(LSM.FindBufferContainingLoc(D.getLoc()));
LLVMRustStringWriteImpl(BufferOut, LBuf->getBufferStart(), LBuf->getBufferSize());
const MemoryBuffer *LBuf =
LSM.getMemoryBuffer(LSM.FindBufferContainingLoc(D.getLoc()));
LLVMRustStringWriteImpl(BufferOut, LBuf->getBufferStart(),
LBuf->getBufferSize());
*LocOut = D.getLoc().getPointer() - LBuf->getBufferStart();
@@ -1525,7 +1505,8 @@ extern "C" bool LLVMRustUnpackSMDiagnostic(LLVMSMDiagnosticRef DRef,
extern "C" OperandBundleDef *LLVMRustBuildOperandBundleDef(const char *Name,
LLVMValueRef *Inputs,
unsigned NumInputs) {
return new OperandBundleDef(Name, ArrayRef<Value*>(unwrap(Inputs), NumInputs));
return new OperandBundleDef(Name,
ArrayRef<Value *>(unwrap(Inputs), NumInputs));
}
extern "C" void LLVMRustFreeOperandBundleDef(OperandBundleDef *Bundle) {
@@ -1533,8 +1514,9 @@ extern "C" void LLVMRustFreeOperandBundleDef(OperandBundleDef *Bundle) {
}
// OpBundlesIndirect is an array of pointers (*not* a pointer to an array).
extern "C" LLVMValueRef LLVMRustBuildCall(LLVMBuilderRef B, LLVMTypeRef Ty, LLVMValueRef Fn,
LLVMValueRef *Args, unsigned NumArgs,
extern "C" LLVMValueRef LLVMRustBuildCall(LLVMBuilderRef B, LLVMTypeRef Ty,
LLVMValueRef Fn, LLVMValueRef *Args,
unsigned NumArgs,
OperandBundleDef **OpBundlesIndirect,
unsigned NumOpBundles) {
Value *Callee = unwrap(Fn);
@@ -1547,17 +1529,19 @@ extern "C" LLVMValueRef LLVMRustBuildCall(LLVMBuilderRef B, LLVMTypeRef Ty, LLVM
OpBundles.push_back(*OpBundlesIndirect[i]);
}
return wrap(unwrap(B)->CreateCall(
FTy, Callee, ArrayRef<Value*>(unwrap(Args), NumArgs),
ArrayRef<OperandBundleDef>(OpBundles)));
return wrap(unwrap(B)->CreateCall(FTy, Callee,
ArrayRef<Value *>(unwrap(Args), NumArgs),
ArrayRef<OperandBundleDef>(OpBundles)));
}
extern "C" LLVMValueRef LLVMRustGetInstrProfIncrementIntrinsic(LLVMModuleRef M) {
extern "C" LLVMValueRef
LLVMRustGetInstrProfIncrementIntrinsic(LLVMModuleRef M) {
return wrap(llvm::Intrinsic::getDeclaration(
unwrap(M), llvm::Intrinsic::instrprof_increment));
}
extern "C" LLVMValueRef LLVMRustGetInstrProfMCDCParametersIntrinsic(LLVMModuleRef M) {
extern "C" LLVMValueRef
LLVMRustGetInstrProfMCDCParametersIntrinsic(LLVMModuleRef M) {
#if LLVM_VERSION_GE(18, 0)
return wrap(llvm::Intrinsic::getDeclaration(
unwrap(M), llvm::Intrinsic::instrprof_mcdc_parameters));
@@ -1566,7 +1550,8 @@ extern "C" LLVMValueRef LLVMRustGetInstrProfMCDCParametersIntrinsic(LLVMModuleRe
#endif
}
extern "C" LLVMValueRef LLVMRustGetInstrProfMCDCTVBitmapUpdateIntrinsic(LLVMModuleRef M) {
extern "C" LLVMValueRef
LLVMRustGetInstrProfMCDCTVBitmapUpdateIntrinsic(LLVMModuleRef M) {
#if LLVM_VERSION_GE(18, 0)
return wrap(llvm::Intrinsic::getDeclaration(
unwrap(M), llvm::Intrinsic::instrprof_mcdc_tvbitmap_update));
@@ -1575,7 +1560,8 @@ extern "C" LLVMValueRef LLVMRustGetInstrProfMCDCTVBitmapUpdateIntrinsic(LLVMModu
#endif
}
extern "C" LLVMValueRef LLVMRustGetInstrProfMCDCCondBitmapIntrinsic(LLVMModuleRef M) {
extern "C" LLVMValueRef
LLVMRustGetInstrProfMCDCCondBitmapIntrinsic(LLVMModuleRef M) {
#if LLVM_VERSION_GE(18, 0)
return wrap(llvm::Intrinsic::getDeclaration(
unwrap(M), llvm::Intrinsic::instrprof_mcdc_condbitmap_update));
@@ -1584,32 +1570,31 @@ extern "C" LLVMValueRef LLVMRustGetInstrProfMCDCCondBitmapIntrinsic(LLVMModuleRe
#endif
}
extern "C" LLVMValueRef LLVMRustBuildMemCpy(LLVMBuilderRef B,
LLVMValueRef Dst, unsigned DstAlign,
LLVMValueRef Src, unsigned SrcAlign,
LLVMValueRef Size, bool IsVolatile) {
return wrap(unwrap(B)->CreateMemCpy(
unwrap(Dst), MaybeAlign(DstAlign),
unwrap(Src), MaybeAlign(SrcAlign),
unwrap(Size), IsVolatile));
extern "C" LLVMValueRef LLVMRustBuildMemCpy(LLVMBuilderRef B, LLVMValueRef Dst,
unsigned DstAlign, LLVMValueRef Src,
unsigned SrcAlign,
LLVMValueRef Size,
bool IsVolatile) {
return wrap(unwrap(B)->CreateMemCpy(unwrap(Dst), MaybeAlign(DstAlign),
unwrap(Src), MaybeAlign(SrcAlign),
unwrap(Size), IsVolatile));
}
extern "C" LLVMValueRef LLVMRustBuildMemMove(LLVMBuilderRef B,
LLVMValueRef Dst, unsigned DstAlign,
LLVMValueRef Src, unsigned SrcAlign,
LLVMValueRef Size, bool IsVolatile) {
return wrap(unwrap(B)->CreateMemMove(
unwrap(Dst), MaybeAlign(DstAlign),
unwrap(Src), MaybeAlign(SrcAlign),
unwrap(Size), IsVolatile));
extern "C" LLVMValueRef
LLVMRustBuildMemMove(LLVMBuilderRef B, LLVMValueRef Dst, unsigned DstAlign,
LLVMValueRef Src, unsigned SrcAlign, LLVMValueRef Size,
bool IsVolatile) {
return wrap(unwrap(B)->CreateMemMove(unwrap(Dst), MaybeAlign(DstAlign),
unwrap(Src), MaybeAlign(SrcAlign),
unwrap(Size), IsVolatile));
}
extern "C" LLVMValueRef LLVMRustBuildMemSet(LLVMBuilderRef B,
LLVMValueRef Dst, unsigned DstAlign,
LLVMValueRef Val,
LLVMValueRef Size, bool IsVolatile) {
return wrap(unwrap(B)->CreateMemSet(
unwrap(Dst), unwrap(Val), unwrap(Size), MaybeAlign(DstAlign), IsVolatile));
extern "C" LLVMValueRef LLVMRustBuildMemSet(LLVMBuilderRef B, LLVMValueRef Dst,
unsigned DstAlign, LLVMValueRef Val,
LLVMValueRef Size,
bool IsVolatile) {
return wrap(unwrap(B)->CreateMemSet(unwrap(Dst), unwrap(Val), unwrap(Size),
MaybeAlign(DstAlign), IsVolatile));
}
// OpBundlesIndirect is an array of pointers (*not* a pointer to an array).
@@ -1630,7 +1615,7 @@ LLVMRustBuildInvoke(LLVMBuilderRef B, LLVMTypeRef Ty, LLVMValueRef Fn,
}
return wrap(unwrap(B)->CreateInvoke(FTy, Callee, unwrap(Then), unwrap(Catch),
ArrayRef<Value*>(unwrap(Args), NumArgs),
ArrayRef<Value *>(unwrap(Args), NumArgs),
ArrayRef<OperandBundleDef>(OpBundles),
Name));
}
@@ -1647,7 +1632,7 @@ LLVMRustBuildCallBr(LLVMBuilderRef B, LLVMTypeRef Ty, LLVMValueRef Fn,
FunctionType *FTy = unwrap<FunctionType>(Ty);
// FIXME: Is there a way around this?
std::vector<BasicBlock*> IndirectDestsUnwrapped;
std::vector<BasicBlock *> IndirectDestsUnwrapped;
IndirectDestsUnwrapped.reserve(NumIndirectDests);
for (unsigned i = 0; i < NumIndirectDests; ++i) {
IndirectDestsUnwrapped.push_back(unwrap(IndirectDests[i]));
@@ -1660,12 +1645,11 @@ LLVMRustBuildCallBr(LLVMBuilderRef B, LLVMTypeRef Ty, LLVMValueRef Fn,
OpBundles.push_back(*OpBundlesIndirect[i]);
}
return wrap(unwrap(B)->CreateCallBr(
FTy, Callee, unwrap(DefaultDest),
ArrayRef<BasicBlock*>(IndirectDestsUnwrapped),
ArrayRef<Value*>(unwrap(Args), NumArgs),
ArrayRef<OperandBundleDef>(OpBundles),
Name));
return wrap(
unwrap(B)->CreateCallBr(FTy, Callee, unwrap(DefaultDest),
ArrayRef<BasicBlock *>(IndirectDestsUnwrapped),
ArrayRef<Value *>(unwrap(Args), NumArgs),
ArrayRef<OperandBundleDef>(OpBundles), Name));
}
extern "C" void LLVMRustPositionBuilderAtStart(LLVMBuilderRef B,
@@ -1765,28 +1749,30 @@ extern "C" void LLVMRustSetLinkage(LLVMValueRef V,
}
extern "C" bool LLVMRustConstIntGetZExtValue(LLVMValueRef CV, uint64_t *value) {
auto C = unwrap<llvm::ConstantInt>(CV);
if (C->getBitWidth() > 64)
return false;
*value = C->getZExtValue();
return true;
auto C = unwrap<llvm::ConstantInt>(CV);
if (C->getBitWidth() > 64)
return false;
*value = C->getZExtValue();
return true;
}
// Returns true if both high and low were successfully set. Fails in case constant wasnt any of
// the common sizes (1, 8, 16, 32, 64, 128 bits)
extern "C" bool LLVMRustConstInt128Get(LLVMValueRef CV, bool sext, uint64_t *high, uint64_t *low)
{
auto C = unwrap<llvm::ConstantInt>(CV);
if (C->getBitWidth() > 128) { return false; }
APInt AP;
if (sext) {
AP = C->getValue().sext(128);
} else {
AP = C->getValue().zext(128);
}
*low = AP.getLoBits(64).getZExtValue();
*high = AP.getHiBits(64).getZExtValue();
return true;
// Returns true if both high and low were successfully set. Fails in case
// constant wasnt any of the common sizes (1, 8, 16, 32, 64, 128 bits)
extern "C" bool LLVMRustConstInt128Get(LLVMValueRef CV, bool sext,
uint64_t *high, uint64_t *low) {
auto C = unwrap<llvm::ConstantInt>(CV);
if (C->getBitWidth() > 128) {
return false;
}
APInt AP;
if (sext) {
AP = C->getValue().sext(128);
} else {
AP = C->getValue().zext(128);
}
*low = AP.getLoBits(64).getZExtValue();
*high = AP.getHiBits(64).getZExtValue();
return true;
}
enum class LLVMRustVisibility {
@@ -1836,8 +1822,7 @@ struct LLVMRustModuleBuffer {
std::string data;
};
extern "C" LLVMRustModuleBuffer*
LLVMRustModuleBufferCreate(LLVMModuleRef M) {
extern "C" LLVMRustModuleBuffer *LLVMRustModuleBufferCreate(LLVMModuleRef M) {
auto Ret = std::make_unique<LLVMRustModuleBuffer>();
{
auto OS = raw_string_ostream(Ret->data);
@@ -1846,30 +1831,26 @@ LLVMRustModuleBufferCreate(LLVMModuleRef M) {
return Ret.release();
}
extern "C" void
LLVMRustModuleBufferFree(LLVMRustModuleBuffer *Buffer) {
extern "C" void LLVMRustModuleBufferFree(LLVMRustModuleBuffer *Buffer) {
delete Buffer;
}
extern "C" const void*
extern "C" const void *
LLVMRustModuleBufferPtr(const LLVMRustModuleBuffer *Buffer) {
return Buffer->data.data();
}
extern "C" size_t
LLVMRustModuleBufferLen(const LLVMRustModuleBuffer *Buffer) {
extern "C" size_t LLVMRustModuleBufferLen(const LLVMRustModuleBuffer *Buffer) {
return Buffer->data.length();
}
extern "C" uint64_t
LLVMRustModuleCost(LLVMModuleRef M) {
extern "C" uint64_t LLVMRustModuleCost(LLVMModuleRef M) {
auto f = unwrap(M)->functions();
return std::distance(std::begin(f), std::end(f));
}
extern "C" void
LLVMRustModuleInstructionStats(LLVMModuleRef M, RustStringRef Str)
{
extern "C" void LLVMRustModuleInstructionStats(LLVMModuleRef M,
RustStringRef Str) {
auto OS = RawRustStringOstream(Str);
auto JOS = llvm::json::OStream(OS);
auto Module = unwrap(M);
@@ -1881,41 +1862,45 @@ LLVMRustModuleInstructionStats(LLVMModuleRef M, RustStringRef Str)
}
// Vector reductions:
extern "C" LLVMValueRef
LLVMRustBuildVectorReduceFAdd(LLVMBuilderRef B, LLVMValueRef Acc, LLVMValueRef Src) {
return wrap(unwrap(B)->CreateFAddReduce(unwrap(Acc),unwrap(Src)));
extern "C" LLVMValueRef LLVMRustBuildVectorReduceFAdd(LLVMBuilderRef B,
LLVMValueRef Acc,
LLVMValueRef Src) {
return wrap(unwrap(B)->CreateFAddReduce(unwrap(Acc), unwrap(Src)));
}
extern "C" LLVMValueRef
LLVMRustBuildVectorReduceFMul(LLVMBuilderRef B, LLVMValueRef Acc, LLVMValueRef Src) {
return wrap(unwrap(B)->CreateFMulReduce(unwrap(Acc),unwrap(Src)));
extern "C" LLVMValueRef LLVMRustBuildVectorReduceFMul(LLVMBuilderRef B,
LLVMValueRef Acc,
LLVMValueRef Src) {
return wrap(unwrap(B)->CreateFMulReduce(unwrap(Acc), unwrap(Src)));
}
extern "C" LLVMValueRef
LLVMRustBuildVectorReduceAdd(LLVMBuilderRef B, LLVMValueRef Src) {
return wrap(unwrap(B)->CreateAddReduce(unwrap(Src)));
extern "C" LLVMValueRef LLVMRustBuildVectorReduceAdd(LLVMBuilderRef B,
LLVMValueRef Src) {
return wrap(unwrap(B)->CreateAddReduce(unwrap(Src)));
}
extern "C" LLVMValueRef
LLVMRustBuildVectorReduceMul(LLVMBuilderRef B, LLVMValueRef Src) {
return wrap(unwrap(B)->CreateMulReduce(unwrap(Src)));
extern "C" LLVMValueRef LLVMRustBuildVectorReduceMul(LLVMBuilderRef B,
LLVMValueRef Src) {
return wrap(unwrap(B)->CreateMulReduce(unwrap(Src)));
}
extern "C" LLVMValueRef
LLVMRustBuildVectorReduceAnd(LLVMBuilderRef B, LLVMValueRef Src) {
return wrap(unwrap(B)->CreateAndReduce(unwrap(Src)));
extern "C" LLVMValueRef LLVMRustBuildVectorReduceAnd(LLVMBuilderRef B,
LLVMValueRef Src) {
return wrap(unwrap(B)->CreateAndReduce(unwrap(Src)));
}
extern "C" LLVMValueRef
LLVMRustBuildVectorReduceOr(LLVMBuilderRef B, LLVMValueRef Src) {
return wrap(unwrap(B)->CreateOrReduce(unwrap(Src)));
extern "C" LLVMValueRef LLVMRustBuildVectorReduceOr(LLVMBuilderRef B,
LLVMValueRef Src) {
return wrap(unwrap(B)->CreateOrReduce(unwrap(Src)));
}
extern "C" LLVMValueRef
LLVMRustBuildVectorReduceXor(LLVMBuilderRef B, LLVMValueRef Src) {
return wrap(unwrap(B)->CreateXorReduce(unwrap(Src)));
extern "C" LLVMValueRef LLVMRustBuildVectorReduceXor(LLVMBuilderRef B,
LLVMValueRef Src) {
return wrap(unwrap(B)->CreateXorReduce(unwrap(Src)));
}
extern "C" LLVMValueRef
LLVMRustBuildVectorReduceMin(LLVMBuilderRef B, LLVMValueRef Src, bool IsSigned) {
return wrap(unwrap(B)->CreateIntMinReduce(unwrap(Src), IsSigned));
extern "C" LLVMValueRef LLVMRustBuildVectorReduceMin(LLVMBuilderRef B,
LLVMValueRef Src,
bool IsSigned) {
return wrap(unwrap(B)->CreateIntMinReduce(unwrap(Src), IsSigned));
}
extern "C" LLVMValueRef
LLVMRustBuildVectorReduceMax(LLVMBuilderRef B, LLVMValueRef Src, bool IsSigned) {
return wrap(unwrap(B)->CreateIntMaxReduce(unwrap(Src), IsSigned));
extern "C" LLVMValueRef LLVMRustBuildVectorReduceMax(LLVMBuilderRef B,
LLVMValueRef Src,
bool IsSigned) {
return wrap(unwrap(B)->CreateIntMaxReduce(unwrap(Src), IsSigned));
}
extern "C" LLVMValueRef
LLVMRustBuildVectorReduceFMin(LLVMBuilderRef B, LLVMValueRef Src, bool NoNaN) {
@@ -1930,32 +1915,28 @@ LLVMRustBuildVectorReduceFMax(LLVMBuilderRef B, LLVMValueRef Src, bool NoNaN) {
return wrap(I);
}
extern "C" LLVMValueRef
LLVMRustBuildMinNum(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS) {
return wrap(unwrap(B)->CreateMinNum(unwrap(LHS),unwrap(RHS)));
extern "C" LLVMValueRef LLVMRustBuildMinNum(LLVMBuilderRef B, LLVMValueRef LHS,
LLVMValueRef RHS) {
return wrap(unwrap(B)->CreateMinNum(unwrap(LHS), unwrap(RHS)));
}
extern "C" LLVMValueRef
LLVMRustBuildMaxNum(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS) {
return wrap(unwrap(B)->CreateMaxNum(unwrap(LHS),unwrap(RHS)));
extern "C" LLVMValueRef LLVMRustBuildMaxNum(LLVMBuilderRef B, LLVMValueRef LHS,
LLVMValueRef RHS) {
return wrap(unwrap(B)->CreateMaxNum(unwrap(LHS), unwrap(RHS)));
}
// This struct contains all necessary info about a symbol exported from a DLL.
struct LLVMRustCOFFShortExport {
const char* name;
const char *name;
bool ordinal_present;
// The value of `ordinal` is only meaningful if `ordinal_present` is true.
uint16_t ordinal;
};
// Machine must be a COFF machine type, as defined in PE specs.
extern "C" LLVMRustResult LLVMRustWriteImportLibrary(
const char* ImportName,
const char* Path,
const LLVMRustCOFFShortExport* Exports,
size_t NumExports,
uint16_t Machine,
bool MinGW)
{
extern "C" LLVMRustResult
LLVMRustWriteImportLibrary(const char *ImportName, const char *Path,
const LLVMRustCOFFShortExport *Exports,
size_t NumExports, uint16_t Machine, bool MinGW) {
std::vector<llvm::object::COFFShortExport> ConvertedExports;
ConvertedExports.reserve(NumExports);
@@ -1963,27 +1944,24 @@ extern "C" LLVMRustResult LLVMRustWriteImportLibrary(
bool ordinal_present = Exports[i].ordinal_present;
uint16_t ordinal = ordinal_present ? Exports[i].ordinal : 0;
ConvertedExports.push_back(llvm::object::COFFShortExport{
Exports[i].name, // Name
std::string{}, // ExtName
std::string{}, // SymbolName
std::string{}, // AliasTarget
Exports[i].name, // Name
std::string{}, // ExtName
std::string{}, // SymbolName
std::string{}, // AliasTarget
#if LLVM_VERSION_GE(19, 0)
std::string{}, // ExportAs
std::string{}, // ExportAs
#endif
ordinal, // Ordinal
ordinal_present, // Noname
false, // Data
false, // Private
false // Constant
ordinal, // Ordinal
ordinal_present, // Noname
false, // Data
false, // Private
false // Constant
});
}
auto Error = llvm::object::writeImportLibrary(
ImportName,
Path,
ConvertedExports,
static_cast<llvm::COFF::MachineTypes>(Machine),
MinGW);
ImportName, Path, ConvertedExports,
static_cast<llvm::COFF::MachineTypes>(Machine), MinGW);
if (Error) {
std::string errorString;
auto stream = llvm::raw_string_ostream(errorString);
@@ -2019,27 +1997,23 @@ using LLVMDiagnosticHandlerTy = DiagnosticHandler::DiagnosticHandlerTy;
// the RemarkPasses array specifies individual passes for which remarks will be
// enabled.
//
// If RemarkFilePath is not NULL, optimization remarks will be streamed directly into this file,
// bypassing the diagnostics handler.
// If RemarkFilePath is not NULL, optimization remarks will be streamed directly
// into this file, bypassing the diagnostics handler.
extern "C" void LLVMRustContextConfigureDiagnosticHandler(
LLVMContextRef C, LLVMDiagnosticHandlerTy DiagnosticHandlerCallback,
void *DiagnosticHandlerContext, bool RemarkAllPasses,
const char * const * RemarkPasses, size_t RemarkPassesLen,
const char * RemarkFilePath,
bool PGOAvailable
) {
const char *const *RemarkPasses, size_t RemarkPassesLen,
const char *RemarkFilePath, bool PGOAvailable) {
class RustDiagnosticHandler final : public DiagnosticHandler {
public:
RustDiagnosticHandler(
LLVMDiagnosticHandlerTy DiagnosticHandlerCallback,
void *DiagnosticHandlerContext,
bool RemarkAllPasses,
std::vector<std::string> RemarkPasses,
std::unique_ptr<ToolOutputFile> RemarksFile,
std::unique_ptr<llvm::remarks::RemarkStreamer> RemarkStreamer,
std::unique_ptr<LLVMRemarkStreamer> LlvmRemarkStreamer
)
LLVMDiagnosticHandlerTy DiagnosticHandlerCallback,
void *DiagnosticHandlerContext, bool RemarkAllPasses,
std::vector<std::string> RemarkPasses,
std::unique_ptr<ToolOutputFile> RemarksFile,
std::unique_ptr<llvm::remarks::RemarkStreamer> RemarkStreamer,
std::unique_ptr<LLVMRemarkStreamer> LlvmRemarkStreamer)
: DiagnosticHandlerCallback(DiagnosticHandlerCallback),
DiagnosticHandlerContext(DiagnosticHandlerContext),
RemarkAllPasses(RemarkAllPasses),
@@ -2049,11 +2023,13 @@ extern "C" void LLVMRustContextConfigureDiagnosticHandler(
LlvmRemarkStreamer(std::move(LlvmRemarkStreamer)) {}
virtual bool handleDiagnostics(const DiagnosticInfo &DI) override {
// If this diagnostic is one of the optimization remark kinds, we can check if it's enabled
// before emitting it. This can avoid many short-lived allocations when unpacking the
// diagnostic and converting its various C++ strings into rust strings.
// FIXME: some diagnostic infos still allocate before we get here, and avoiding that would be
// good in the future. That will require changing a few call sites in LLVM.
// If this diagnostic is one of the optimization remark kinds, we can
// check if it's enabled before emitting it. This can avoid many
// short-lived allocations when unpacking the diagnostic and converting
// its various C++ strings into rust strings.
// FIXME: some diagnostic infos still allocate before we get here, and
// avoiding that would be good in the future. That will require changing a
// few call sites in LLVM.
if (auto *OptDiagBase = dyn_cast<DiagnosticInfoOptimizationBase>(&DI)) {
if (OptDiagBase->isEnabled()) {
if (this->LlvmRemarkStreamer) {
@@ -2109,16 +2085,15 @@ extern "C" void LLVMRustContextConfigureDiagnosticHandler(
bool RemarkAllPasses = false;
std::vector<std::string> RemarkPasses;
// Since LlvmRemarkStreamer contains a pointer to RemarkStreamer, the ordering of the three
// members below is important.
// Since LlvmRemarkStreamer contains a pointer to RemarkStreamer, the
// ordering of the three members below is important.
std::unique_ptr<ToolOutputFile> RemarksFile;
std::unique_ptr<llvm::remarks::RemarkStreamer> RemarkStreamer;
std::unique_ptr<LLVMRemarkStreamer> LlvmRemarkStreamer;
};
std::vector<std::string> Passes;
for (size_t I = 0; I != RemarkPassesLen; ++I)
{
for (size_t I = 0; I != RemarkPassesLen; ++I) {
Passes.push_back(RemarkPasses[I]);
}
@@ -2135,13 +2110,10 @@ extern "C" void LLVMRustContextConfigureDiagnosticHandler(
std::error_code EC;
RemarkFile = std::make_unique<ToolOutputFile>(
RemarkFilePath,
EC,
llvm::sys::fs::OF_TextWithCRLF
);
RemarkFilePath, EC, llvm::sys::fs::OF_TextWithCRLF);
if (EC) {
std::string Error = std::string("Cannot create remark file: ") +
toString(errorCodeToError(EC));
toString(errorCodeToError(EC));
report_fatal_error(Twine(Error));
}
@@ -2149,28 +2121,22 @@ extern "C" void LLVMRustContextConfigureDiagnosticHandler(
RemarkFile->keep();
auto RemarkSerializer = remarks::createRemarkSerializer(
llvm::remarks::Format::YAML,
remarks::SerializerMode::Separate,
RemarkFile->os()
);
if (Error E = RemarkSerializer.takeError())
{
std::string Error = std::string("Cannot create remark serializer: ") + toString(std::move(E));
llvm::remarks::Format::YAML, remarks::SerializerMode::Separate,
RemarkFile->os());
if (Error E = RemarkSerializer.takeError()) {
std::string Error = std::string("Cannot create remark serializer: ") +
toString(std::move(E));
report_fatal_error(Twine(Error));
}
RemarkStreamer = std::make_unique<llvm::remarks::RemarkStreamer>(std::move(*RemarkSerializer));
RemarkStreamer = std::make_unique<llvm::remarks::RemarkStreamer>(
std::move(*RemarkSerializer));
LlvmRemarkStreamer = std::make_unique<LLVMRemarkStreamer>(*RemarkStreamer);
}
unwrap(C)->setDiagnosticHandler(std::make_unique<RustDiagnosticHandler>(
DiagnosticHandlerCallback,
DiagnosticHandlerContext,
RemarkAllPasses,
Passes,
std::move(RemarkFile),
std::move(RemarkStreamer),
std::move(LlvmRemarkStreamer)
));
DiagnosticHandlerCallback, DiagnosticHandlerContext, RemarkAllPasses,
Passes, std::move(RemarkFile), std::move(RemarkStreamer),
std::move(LlvmRemarkStreamer)));
}
extern "C" void LLVMRustGetMangledName(LLVMValueRef V, RustStringRef Str) {
@@ -2180,14 +2146,14 @@ extern "C" void LLVMRustGetMangledName(LLVMValueRef V, RustStringRef Str) {
}
extern "C" int32_t LLVMRustGetElementTypeArgIndex(LLVMValueRef CallSite) {
auto *CB = unwrap<CallBase>(CallSite);
switch (CB->getIntrinsicID()) {
case Intrinsic::arm_ldrex:
return 0;
case Intrinsic::arm_strex:
return 1;
}
return -1;
auto *CB = unwrap<CallBase>(CallSite);
switch (CB->getIntrinsicID()) {
case Intrinsic::arm_ldrex:
return 0;
case Intrinsic::arm_strex:
return 1;
}
return -1;
}
extern "C" bool LLVMRustIsBitcode(char *ptr, size_t len) {
@@ -2214,10 +2180,10 @@ extern "C" bool LLVMRustLLVMHasZstdCompressionForDebugSymbols() {
}
// Operations on composite constants.
// These are clones of LLVM api functions that will become available in future releases.
// They can be removed once Rust's minimum supported LLVM version supports them.
// See https://github.com/rust-lang/rust/issues/121868
// See https://llvm.org/doxygen/group__LLVMCCoreValueConstantComposite.html
// These are clones of LLVM api functions that will become available in future
// releases. They can be removed once Rust's minimum supported LLVM version
// supports them. See https://github.com/rust-lang/rust/issues/121868 See
// https://llvm.org/doxygen/group__LLVMCCoreValueConstantComposite.html
// FIXME: Remove when Rust's minimum supported LLVM version reaches 19.
// https://github.com/llvm/llvm-project/commit/e1405e4f71c899420ebf8262d5e9745598419df8
@@ -2226,6 +2192,7 @@ extern "C" LLVMValueRef LLVMConstStringInContext2(LLVMContextRef C,
const char *Str,
size_t Length,
bool DontNullTerminate) {
return wrap(ConstantDataArray::getString(*unwrap(C), StringRef(Str, Length), !DontNullTerminate));
return wrap(ConstantDataArray::getString(*unwrap(C), StringRef(Str, Length),
!DontNullTerminate));
}
#endif
@@ -1,13 +1,17 @@
#ifndef _rustc_llvm_SuppressLLVMWarnings_h
#define _rustc_llvm_SuppressLLVMWarnings_h
// LLVM currently generates many warnings when compiled using MSVC. These warnings make it difficult
// to diagnose real problems when working on C++ code, so we suppress them.
// LLVM currently generates many warnings when compiled using MSVC. These
// warnings make it difficult to diagnose real problems when working on C++
// code, so we suppress them.
#ifdef _MSC_VER
#pragma warning(disable:4530) // C++ exception handler used, but unwind semantics are not enabled.
#pragma warning(disable:4624) // 'xxx': destructor was implicitly defined as deleted
#pragma warning(disable:4244) // conversion from 'xxx' to 'yyy', possible loss of data
#pragma warning(disable : 4530) // C++ exception handler used, but unwind
// semantics are not enabled.
#pragma warning( \
disable : 4624) // 'xxx': destructor was implicitly defined as deleted
#pragma warning( \
disable : 4244) // conversion from 'xxx' to 'yyy', possible loss of data
#endif
#endif // _rustc_llvm_SuppressLLVMWarnings_h
@@ -34,14 +34,15 @@ static bool isArchiveSymbol(const object::BasicSymbolRef &S) {
typedef void *(*LLVMRustGetSymbolsCallback)(void *, const char *);
typedef void *(*LLVMRustGetSymbolsErrorCallback)(const char *);
// Note: This is implemented in C++ instead of using the C api from Rust as IRObjectFile doesn't
// implement getSymbolName, only printSymbolName, which is inaccessible from the C api.
extern "C" void *LLVMRustGetSymbols(
char *BufPtr, size_t BufLen, void *State, LLVMRustGetSymbolsCallback Callback,
LLVMRustGetSymbolsErrorCallback ErrorCallback) {
std::unique_ptr<MemoryBuffer> Buf =
MemoryBuffer::getMemBuffer(StringRef(BufPtr, BufLen), StringRef("LLVMRustGetSymbolsObject"),
false);
// Note: This is implemented in C++ instead of using the C api from Rust as
// IRObjectFile doesn't implement getSymbolName, only printSymbolName, which is
// inaccessible from the C api.
extern "C" void *
LLVMRustGetSymbols(char *BufPtr, size_t BufLen, void *State,
LLVMRustGetSymbolsCallback Callback,
LLVMRustGetSymbolsErrorCallback ErrorCallback) {
std::unique_ptr<MemoryBuffer> Buf = MemoryBuffer::getMemBuffer(
StringRef(BufPtr, BufLen), StringRef("LLVMRustGetSymbolsObject"), false);
SmallString<0> SymNameBuf;
auto SymName = raw_svector_ostream(SymNameBuf);
@@ -57,7 +58,7 @@ extern "C" void *LLVMRustGetSymbols(
if (Type == file_magic::bitcode) {
auto ObjOrErr = object::SymbolicFile::createSymbolicFile(
Buf->getMemBufferRef(), file_magic::bitcode, &Context);
Buf->getMemBufferRef(), file_magic::bitcode, &Context);
if (!ObjOrErr) {
Error E = ObjOrErr.takeError();
SmallString<0> ErrorBuf;
@@ -67,7 +68,8 @@ extern "C" void *LLVMRustGetSymbols(
}
Obj = std::move(*ObjOrErr);
} else {
auto ObjOrErr = object::SymbolicFile::createSymbolicFile(Buf->getMemBufferRef());
auto ObjOrErr =
object::SymbolicFile::createSymbolicFile(Buf->getMemBufferRef());
if (!ObjOrErr) {
Error E = ObjOrErr.takeError();
SmallString<0> ErrorBuf;
@@ -78,7 +80,6 @@ extern "C" void *LLVMRustGetSymbols(
Obj = std::move(*ObjOrErr);
}
for (const object::BasicSymbolRef &S : Obj->symbols()) {
if (!isArchiveSymbol(S))
continue;
@@ -358,8 +358,11 @@ fn create_match_candidates<'pat>(
where
'a: 'pat,
{
// Assemble a list of candidates: there is one candidate per pattern,
// which means there may be more than one candidate *per arm*.
// Assemble the initial list of candidates. These top-level candidates
// are 1:1 with the original match arms, but other parts of match
// lowering also introduce subcandidates (for subpatterns), and will
// also flatten candidates in some cases. So in general a list of
// candidates does _not_ necessarily correspond to a list of arms.
arms.iter()
.copied()
.map(|arm| {
@@ -1031,6 +1034,12 @@ fn is_empty(&self) -> bool {
}
/// A pattern in a form suitable for generating code.
///
/// Here, "flat" indicates that the pattern's match pairs have been recursively
/// simplified by [`Builder::simplify_match_pairs`]. They are not necessarily
/// flat in an absolute sense.
///
/// Will typically be incorporated into a [`Candidate`].
#[derive(Debug, Clone)]
struct FlatPat<'pat, 'tcx> {
/// To match the pattern, all of these must be satisfied...
@@ -1042,23 +1051,25 @@ struct FlatPat<'pat, 'tcx> {
}
impl<'tcx, 'pat> FlatPat<'pat, 'tcx> {
/// Creates a `FlatPat` containing a simplified [`MatchPair`] list/forest
/// for the given pattern.
fn new(
place: PlaceBuilder<'tcx>,
pattern: &'pat Pat<'tcx>,
cx: &mut Builder<'_, 'tcx>,
) -> Self {
let is_never = pattern.is_never_pattern();
let mut flat_pat = FlatPat {
match_pairs: vec![MatchPair::new(place, pattern, cx)],
extra_data: PatternExtraData {
span: pattern.span,
bindings: Vec::new(),
ascriptions: Vec::new(),
is_never,
},
// First, recursively build a tree of match pairs for the given pattern.
let mut match_pairs = vec![MatchPair::new(place, pattern, cx)];
let mut extra_data = PatternExtraData {
span: pattern.span,
bindings: Vec::new(),
ascriptions: Vec::new(),
is_never: pattern.is_never_pattern(),
};
cx.simplify_match_pairs(&mut flat_pat.match_pairs, &mut flat_pat.extra_data);
flat_pat
// Partly-flatten and sort the match pairs, while recording extra data.
cx.simplify_match_pairs(&mut match_pairs, &mut extra_data);
Self { match_pairs, extra_data }
}
}
@@ -1104,9 +1115,12 @@ fn new(
has_guard: bool,
cx: &mut Builder<'_, 'tcx>,
) -> Self {
// Use `FlatPat` to build simplified match pairs, then immediately
// incorporate them into a new candidate.
Self::from_flat_pat(FlatPat::new(place, pattern, cx), has_guard)
}
/// Incorporates an already-simplified [`FlatPat`] into a new candidate.
fn from_flat_pat(flat_pat: FlatPat<'pat, 'tcx>, has_guard: bool) -> Self {
Candidate {
match_pairs: flat_pat.match_pairs,
@@ -95,6 +95,8 @@ pub(crate) fn false_edges(
}
impl<'pat, 'tcx> MatchPair<'pat, 'tcx> {
/// Recursively builds a `MatchPair` tree for the given pattern and its
/// subpatterns.
pub(in crate::build) fn new(
mut place_builder: PlaceBuilder<'tcx>,
pattern: &'pat Pat<'tcx>,
@@ -32,14 +32,14 @@ pub(super) fn compute_alias_relate_goal(
&mut self,
goal: Goal<I, (I::Term, I::Term, ty::AliasRelationDirection)>,
) -> QueryResult<I> {
let tcx = self.cx();
let cx = self.cx();
let Goal { param_env, predicate: (lhs, rhs, direction) } = goal;
debug_assert!(lhs.to_alias_term().is_some() || rhs.to_alias_term().is_some());
// Structurally normalize the lhs.
let lhs = if let Some(alias) = lhs.to_alias_term() {
let term = self.next_term_infer_of_kind(lhs);
self.add_normalizes_to_goal(goal.with(tcx, ty::NormalizesTo { alias, term }));
self.add_normalizes_to_goal(goal.with(cx, ty::NormalizesTo { alias, term }));
term
} else {
lhs
@@ -48,7 +48,7 @@ pub(super) fn compute_alias_relate_goal(
// Structurally normalize the rhs.
let rhs = if let Some(alias) = rhs.to_alias_term() {
let term = self.next_term_infer_of_kind(rhs);
self.add_normalizes_to_goal(goal.with(tcx, ty::NormalizesTo { alias, term }));
self.add_normalizes_to_goal(goal.with(cx, ty::NormalizesTo { alias, term }));
term
} else {
rhs
@@ -36,11 +36,11 @@ pub(super) trait GoalKind<D, I = <D as SolverDelegate>::Interner>:
{
fn self_ty(self) -> I::Ty;
fn trait_ref(self, tcx: I) -> ty::TraitRef<I>;
fn trait_ref(self, cx: I) -> ty::TraitRef<I>;
fn with_self_ty(self, tcx: I, self_ty: I::Ty) -> Self;
fn with_self_ty(self, cx: I, self_ty: I::Ty) -> Self;
fn trait_def_id(self, tcx: I) -> I::DefId;
fn trait_def_id(self, cx: I) -> I::DefId;
/// Try equating an assumption predicate against a goal's predicate. If it
/// holds, then execute the `then` callback, which should do any additional
@@ -82,7 +82,7 @@ fn probe_and_consider_object_bound_candidate(
assumption: I::Clause,
) -> Result<Candidate<I>, NoSolution> {
Self::probe_and_match_goal_against_assumption(ecx, source, goal, assumption, |ecx| {
let tcx = ecx.cx();
let cx = ecx.cx();
let ty::Dynamic(bounds, _, _) = goal.predicate.self_ty().kind() else {
panic!("expected object type in `probe_and_consider_object_bound_candidate`");
};
@@ -91,7 +91,7 @@ fn probe_and_consider_object_bound_candidate(
structural_traits::predicates_for_object_candidate(
ecx,
goal.param_env,
goal.predicate.trait_ref(tcx),
goal.predicate.trait_ref(cx),
bounds,
),
);
@@ -340,15 +340,15 @@ fn assemble_impl_candidates<G: GoalKind<D>>(
goal: Goal<I, G>,
candidates: &mut Vec<Candidate<I>>,
) {
let tcx = self.cx();
tcx.for_each_relevant_impl(
goal.predicate.trait_def_id(tcx),
let cx = self.cx();
cx.for_each_relevant_impl(
goal.predicate.trait_def_id(cx),
goal.predicate.self_ty(),
|impl_def_id| {
// For every `default impl`, there's always a non-default `impl`
// that will *also* apply. There's no reason to register a candidate
// for this impl, since it is *not* proof that the trait goal holds.
if tcx.impl_is_default(impl_def_id) {
if cx.impl_is_default(impl_def_id) {
return;
}
@@ -366,8 +366,8 @@ fn assemble_builtin_impl_candidates<G: GoalKind<D>>(
goal: Goal<I, G>,
candidates: &mut Vec<Candidate<I>>,
) {
let tcx = self.cx();
let trait_def_id = goal.predicate.trait_def_id(tcx);
let cx = self.cx();
let trait_def_id = goal.predicate.trait_def_id(cx);
// N.B. When assembling built-in candidates for lang items that are also
// `auto` traits, then the auto trait candidate that is assembled in
@@ -378,47 +378,47 @@ fn assemble_builtin_impl_candidates<G: GoalKind<D>>(
// `solve::trait_goals` instead.
let result = if let Err(guar) = goal.predicate.error_reported() {
G::consider_error_guaranteed_candidate(self, guar)
} else if tcx.trait_is_auto(trait_def_id) {
} else if cx.trait_is_auto(trait_def_id) {
G::consider_auto_trait_candidate(self, goal)
} else if tcx.trait_is_alias(trait_def_id) {
} else if cx.trait_is_alias(trait_def_id) {
G::consider_trait_alias_candidate(self, goal)
} else if tcx.is_lang_item(trait_def_id, TraitSolverLangItem::Sized) {
} else if cx.is_lang_item(trait_def_id, TraitSolverLangItem::Sized) {
G::consider_builtin_sized_candidate(self, goal)
} else if tcx.is_lang_item(trait_def_id, TraitSolverLangItem::Copy)
|| tcx.is_lang_item(trait_def_id, TraitSolverLangItem::Clone)
} else if cx.is_lang_item(trait_def_id, TraitSolverLangItem::Copy)
|| cx.is_lang_item(trait_def_id, TraitSolverLangItem::Clone)
{
G::consider_builtin_copy_clone_candidate(self, goal)
} else if tcx.is_lang_item(trait_def_id, TraitSolverLangItem::PointerLike) {
} else if cx.is_lang_item(trait_def_id, TraitSolverLangItem::PointerLike) {
G::consider_builtin_pointer_like_candidate(self, goal)
} else if tcx.is_lang_item(trait_def_id, TraitSolverLangItem::FnPtrTrait) {
} else if cx.is_lang_item(trait_def_id, TraitSolverLangItem::FnPtrTrait) {
G::consider_builtin_fn_ptr_trait_candidate(self, goal)
} else if let Some(kind) = self.cx().fn_trait_kind_from_def_id(trait_def_id) {
G::consider_builtin_fn_trait_candidates(self, goal, kind)
} else if let Some(kind) = self.cx().async_fn_trait_kind_from_def_id(trait_def_id) {
G::consider_builtin_async_fn_trait_candidates(self, goal, kind)
} else if tcx.is_lang_item(trait_def_id, TraitSolverLangItem::AsyncFnKindHelper) {
} else if cx.is_lang_item(trait_def_id, TraitSolverLangItem::AsyncFnKindHelper) {
G::consider_builtin_async_fn_kind_helper_candidate(self, goal)
} else if tcx.is_lang_item(trait_def_id, TraitSolverLangItem::Tuple) {
} else if cx.is_lang_item(trait_def_id, TraitSolverLangItem::Tuple) {
G::consider_builtin_tuple_candidate(self, goal)
} else if tcx.is_lang_item(trait_def_id, TraitSolverLangItem::PointeeTrait) {
} else if cx.is_lang_item(trait_def_id, TraitSolverLangItem::PointeeTrait) {
G::consider_builtin_pointee_candidate(self, goal)
} else if tcx.is_lang_item(trait_def_id, TraitSolverLangItem::Future) {
} else if cx.is_lang_item(trait_def_id, TraitSolverLangItem::Future) {
G::consider_builtin_future_candidate(self, goal)
} else if tcx.is_lang_item(trait_def_id, TraitSolverLangItem::Iterator) {
} else if cx.is_lang_item(trait_def_id, TraitSolverLangItem::Iterator) {
G::consider_builtin_iterator_candidate(self, goal)
} else if tcx.is_lang_item(trait_def_id, TraitSolverLangItem::FusedIterator) {
} else if cx.is_lang_item(trait_def_id, TraitSolverLangItem::FusedIterator) {
G::consider_builtin_fused_iterator_candidate(self, goal)
} else if tcx.is_lang_item(trait_def_id, TraitSolverLangItem::AsyncIterator) {
} else if cx.is_lang_item(trait_def_id, TraitSolverLangItem::AsyncIterator) {
G::consider_builtin_async_iterator_candidate(self, goal)
} else if tcx.is_lang_item(trait_def_id, TraitSolverLangItem::Coroutine) {
} else if cx.is_lang_item(trait_def_id, TraitSolverLangItem::Coroutine) {
G::consider_builtin_coroutine_candidate(self, goal)
} else if tcx.is_lang_item(trait_def_id, TraitSolverLangItem::DiscriminantKind) {
} else if cx.is_lang_item(trait_def_id, TraitSolverLangItem::DiscriminantKind) {
G::consider_builtin_discriminant_kind_candidate(self, goal)
} else if tcx.is_lang_item(trait_def_id, TraitSolverLangItem::AsyncDestruct) {
} else if cx.is_lang_item(trait_def_id, TraitSolverLangItem::AsyncDestruct) {
G::consider_builtin_async_destruct_candidate(self, goal)
} else if tcx.is_lang_item(trait_def_id, TraitSolverLangItem::Destruct) {
} else if cx.is_lang_item(trait_def_id, TraitSolverLangItem::Destruct) {
G::consider_builtin_destruct_candidate(self, goal)
} else if tcx.is_lang_item(trait_def_id, TraitSolverLangItem::TransmuteTrait) {
} else if cx.is_lang_item(trait_def_id, TraitSolverLangItem::TransmuteTrait) {
G::consider_builtin_transmute_candidate(self, goal)
} else {
Err(NoSolution)
@@ -428,7 +428,7 @@ fn assemble_builtin_impl_candidates<G: GoalKind<D>>(
// There may be multiple unsize candidates for a trait with several supertraits:
// `trait Foo: Bar<A> + Bar<B>` and `dyn Foo: Unsize<dyn Bar<_>>`
if tcx.is_lang_item(trait_def_id, TraitSolverLangItem::Unsize) {
if cx.is_lang_item(trait_def_id, TraitSolverLangItem::Unsize) {
candidates.extend(G::consider_structural_builtin_unsize_candidates(self, goal));
}
}
@@ -557,8 +557,8 @@ fn assemble_object_bound_candidates<G: GoalKind<D>>(
goal: Goal<I, G>,
candidates: &mut Vec<Candidate<I>>,
) {
let tcx = self.cx();
if !tcx.trait_may_be_implemented_via_object(goal.predicate.trait_def_id(tcx)) {
let cx = self.cx();
if !cx.trait_may_be_implemented_via_object(goal.predicate.trait_def_id(cx)) {
return;
}
@@ -596,7 +596,7 @@ fn assemble_object_bound_candidates<G: GoalKind<D>>(
};
// Do not consider built-in object impls for non-object-safe types.
if bounds.principal_def_id().is_some_and(|def_id| !tcx.trait_is_object_safe(def_id)) {
if bounds.principal_def_id().is_some_and(|def_id| !cx.trait_is_object_safe(def_id)) {
return;
}
@@ -614,7 +614,7 @@ fn assemble_object_bound_candidates<G: GoalKind<D>>(
self,
CandidateSource::BuiltinImpl(BuiltinImplSource::Misc),
goal,
bound.with_self_ty(tcx, self_ty),
bound.with_self_ty(cx, self_ty),
));
}
}
@@ -624,14 +624,13 @@ fn assemble_object_bound_candidates<G: GoalKind<D>>(
// since we don't need to look at any supertrait or anything if we are doing
// a projection goal.
if let Some(principal) = bounds.principal() {
let principal_trait_ref = principal.with_self_ty(tcx, self_ty);
for (idx, assumption) in D::elaborate_supertraits(tcx, principal_trait_ref).enumerate()
{
let principal_trait_ref = principal.with_self_ty(cx, self_ty);
for (idx, assumption) in D::elaborate_supertraits(cx, principal_trait_ref).enumerate() {
candidates.extend(G::probe_and_consider_object_bound_candidate(
self,
CandidateSource::BuiltinImpl(BuiltinImplSource::Object(idx)),
goal,
assumption.upcast(tcx),
assumption.upcast(cx),
));
}
}
@@ -649,11 +648,11 @@ fn assemble_coherence_unknowable_candidates<G: GoalKind<D>>(
goal: Goal<I, G>,
candidates: &mut Vec<Candidate<I>>,
) {
let tcx = self.cx();
let cx = self.cx();
candidates.extend(self.probe_trait_candidate(CandidateSource::CoherenceUnknowable).enter(
|ecx| {
let trait_ref = goal.predicate.trait_ref(tcx);
let trait_ref = goal.predicate.trait_ref(cx);
if ecx.trait_ref_is_knowable(goal.param_env, trait_ref)? {
Err(NoSolution)
} else {
@@ -678,9 +677,9 @@ fn discard_impls_shadowed_by_env<G: GoalKind<D>>(
goal: Goal<I, G>,
candidates: &mut Vec<Candidate<I>>,
) {
let tcx = self.cx();
let cx = self.cx();
let trait_goal: Goal<I, ty::TraitPredicate<I>> =
goal.with(tcx, goal.predicate.trait_ref(tcx));
goal.with(cx, goal.predicate.trait_ref(cx));
let mut trait_candidates_from_env = vec![];
self.probe(|_| ProbeKind::ShadowedEnvProbing).enter(|ecx| {
@@ -23,7 +23,7 @@ pub(in crate::solve) fn instantiate_constituent_tys_for_auto_trait<D, I>(
D: SolverDelegate<Interner = I>,
I: Interner,
{
let tcx = ecx.cx();
let cx = ecx.cx();
match ty.kind() {
ty::Uint(_)
| ty::Int(_)
@@ -36,7 +36,7 @@ pub(in crate::solve) fn instantiate_constituent_tys_for_auto_trait<D, I>(
| ty::Char => Ok(vec![]),
// Treat `str` like it's defined as `struct str([u8]);`
ty::Str => Ok(vec![ty::Binder::dummy(Ty::new_slice(tcx, Ty::new_u8(tcx)))]),
ty::Str => Ok(vec![ty::Binder::dummy(Ty::new_slice(cx, Ty::new_u8(cx)))]),
ty::Dynamic(..)
| ty::Param(..)
@@ -79,21 +79,21 @@ pub(in crate::solve) fn instantiate_constituent_tys_for_auto_trait<D, I>(
.cx()
.bound_coroutine_hidden_types(def_id)
.into_iter()
.map(|bty| bty.instantiate(tcx, args))
.map(|bty| bty.instantiate(cx, args))
.collect()),
// For `PhantomData<T>`, we pass `T`.
ty::Adt(def, args) if def.is_phantom_data() => Ok(vec![ty::Binder::dummy(args.type_at(0))]),
ty::Adt(def, args) => {
Ok(def.all_field_tys(tcx).iter_instantiated(tcx, args).map(ty::Binder::dummy).collect())
Ok(def.all_field_tys(cx).iter_instantiated(cx, args).map(ty::Binder::dummy).collect())
}
ty::Alias(ty::Opaque, ty::AliasTy { def_id, args, .. }) => {
// We can resolve the `impl Trait` to its concrete type,
// which enforces a DAG between the functions requiring
// the auto trait bounds in question.
Ok(vec![ty::Binder::dummy(tcx.type_of(def_id).instantiate(tcx, args))])
Ok(vec![ty::Binder::dummy(cx.type_of(def_id).instantiate(cx, args))])
}
}
}
@@ -247,18 +247,18 @@ pub(in crate::solve) fn instantiate_constituent_tys_for_copy_clone_trait<D, I>(
// Returns a binder of the tupled inputs types and output type from a builtin callable type.
pub(in crate::solve) fn extract_tupled_inputs_and_output_from_callable<I: Interner>(
tcx: I,
cx: I,
self_ty: I::Ty,
goal_kind: ty::ClosureKind,
) -> Result<Option<ty::Binder<I, (I::Ty, I::Ty)>>, NoSolution> {
match self_ty.kind() {
// keep this in sync with assemble_fn_pointer_candidates until the old solver is removed.
ty::FnDef(def_id, args) => {
let sig = tcx.fn_sig(def_id);
if sig.skip_binder().is_fn_trait_compatible() && !tcx.has_target_features(def_id) {
let sig = cx.fn_sig(def_id);
if sig.skip_binder().is_fn_trait_compatible() && !cx.has_target_features(def_id) {
Ok(Some(
sig.instantiate(tcx, args)
.map_bound(|sig| (Ty::new_tup(tcx, sig.inputs().as_slice()), sig.output())),
sig.instantiate(cx, args)
.map_bound(|sig| (Ty::new_tup(cx, sig.inputs().as_slice()), sig.output())),
))
} else {
Err(NoSolution)
@@ -268,7 +268,7 @@ pub(in crate::solve) fn extract_tupled_inputs_and_output_from_callable<I: Intern
ty::FnPtr(sig) => {
if sig.is_fn_trait_compatible() {
Ok(Some(
sig.map_bound(|sig| (Ty::new_tup(tcx, sig.inputs().as_slice()), sig.output())),
sig.map_bound(|sig| (Ty::new_tup(cx, sig.inputs().as_slice()), sig.output())),
))
} else {
Err(NoSolution)
@@ -323,10 +323,10 @@ pub(in crate::solve) fn extract_tupled_inputs_and_output_from_callable<I: Intern
}
coroutine_closure_to_certain_coroutine(
tcx,
cx,
goal_kind,
// No captures by ref, so this doesn't matter.
Region::new_static(tcx),
Region::new_static(cx),
def_id,
args,
sig,
@@ -339,9 +339,9 @@ pub(in crate::solve) fn extract_tupled_inputs_and_output_from_callable<I: Intern
}
coroutine_closure_to_ambiguous_coroutine(
tcx,
cx,
goal_kind, // No captures by ref, so this doesn't matter.
Region::new_static(tcx),
Region::new_static(cx),
def_id,
args,
sig,
@@ -403,7 +403,7 @@ pub(in crate::solve) struct AsyncCallableRelevantTypes<I: Interner> {
// which enforces the closure is actually callable with the given trait. When we
// know the kind already, we can short-circuit this check.
pub(in crate::solve) fn extract_tupled_inputs_and_output_from_async_callable<I: Interner>(
tcx: I,
cx: I,
self_ty: I::Ty,
goal_kind: ty::ClosureKind,
env_region: I::Region,
@@ -422,9 +422,7 @@ pub(in crate::solve) fn extract_tupled_inputs_and_output_from_async_callable<I:
return Err(NoSolution);
}
coroutine_closure_to_certain_coroutine(
tcx, goal_kind, env_region, def_id, args, sig,
)
coroutine_closure_to_certain_coroutine(cx, goal_kind, env_region, def_id, args, sig)
} else {
// When we don't know the closure kind (and therefore also the closure's upvars,
// which are computed at the same time), we must delay the computation of the
@@ -435,15 +433,15 @@ pub(in crate::solve) fn extract_tupled_inputs_and_output_from_async_callable<I:
// coroutine upvars respecting the closure kind.
nested.push(
ty::TraitRef::new(
tcx,
tcx.require_lang_item(TraitSolverLangItem::AsyncFnKindHelper),
[kind_ty, Ty::from_closure_kind(tcx, goal_kind)],
cx,
cx.require_lang_item(TraitSolverLangItem::AsyncFnKindHelper),
[kind_ty, Ty::from_closure_kind(cx, goal_kind)],
)
.upcast(tcx),
.upcast(cx),
);
coroutine_closure_to_ambiguous_coroutine(
tcx, goal_kind, env_region, def_id, args, sig,
cx, goal_kind, env_region, def_id, args, sig,
)
};
@@ -458,21 +456,21 @@ pub(in crate::solve) fn extract_tupled_inputs_and_output_from_async_callable<I:
}
ty::FnDef(..) | ty::FnPtr(..) => {
let bound_sig = self_ty.fn_sig(tcx);
let bound_sig = self_ty.fn_sig(cx);
let sig = bound_sig.skip_binder();
let future_trait_def_id = tcx.require_lang_item(TraitSolverLangItem::Future);
let future_trait_def_id = cx.require_lang_item(TraitSolverLangItem::Future);
// `FnDef` and `FnPtr` only implement `AsyncFn*` when their
// return type implements `Future`.
let nested = vec![
bound_sig
.rebind(ty::TraitRef::new(tcx, future_trait_def_id, [sig.output()]))
.upcast(tcx),
.rebind(ty::TraitRef::new(cx, future_trait_def_id, [sig.output()]))
.upcast(cx),
];
let future_output_def_id = tcx.require_lang_item(TraitSolverLangItem::FutureOutput);
let future_output_ty = Ty::new_projection(tcx, future_output_def_id, [sig.output()]);
let future_output_def_id = cx.require_lang_item(TraitSolverLangItem::FutureOutput);
let future_output_ty = Ty::new_projection(cx, future_output_def_id, [sig.output()]);
Ok((
bound_sig.rebind(AsyncCallableRelevantTypes {
tupled_inputs_ty: Ty::new_tup(tcx, sig.inputs().as_slice()),
tupled_inputs_ty: Ty::new_tup(cx, sig.inputs().as_slice()),
output_coroutine_ty: sig.output(),
coroutine_return_ty: future_output_ty,
}),
@@ -483,13 +481,13 @@ pub(in crate::solve) fn extract_tupled_inputs_and_output_from_async_callable<I:
let args = args.as_closure();
let bound_sig = args.sig();
let sig = bound_sig.skip_binder();
let future_trait_def_id = tcx.require_lang_item(TraitSolverLangItem::Future);
let future_trait_def_id = cx.require_lang_item(TraitSolverLangItem::Future);
// `Closure`s only implement `AsyncFn*` when their return type
// implements `Future`.
let mut nested = vec![
bound_sig
.rebind(ty::TraitRef::new(tcx, future_trait_def_id, [sig.output()]))
.upcast(tcx),
.rebind(ty::TraitRef::new(cx, future_trait_def_id, [sig.output()]))
.upcast(cx),
];
// Additionally, we need to check that the closure kind
@@ -501,7 +499,7 @@ pub(in crate::solve) fn extract_tupled_inputs_and_output_from_async_callable<I:
}
} else {
let async_fn_kind_trait_def_id =
tcx.require_lang_item(TraitSolverLangItem::AsyncFnKindHelper);
cx.require_lang_item(TraitSolverLangItem::AsyncFnKindHelper);
// When we don't know the closure kind (and therefore also the closure's upvars,
// which are computed at the same time), we must delay the computation of the
// generator's upvars. We do this using the `AsyncFnKindHelper`, which as a trait
@@ -511,16 +509,16 @@ pub(in crate::solve) fn extract_tupled_inputs_and_output_from_async_callable<I:
// coroutine upvars respecting the closure kind.
nested.push(
ty::TraitRef::new(
tcx,
cx,
async_fn_kind_trait_def_id,
[kind_ty, Ty::from_closure_kind(tcx, goal_kind)],
[kind_ty, Ty::from_closure_kind(cx, goal_kind)],
)
.upcast(tcx),
.upcast(cx),
);
}
let future_output_def_id = tcx.require_lang_item(TraitSolverLangItem::FutureOutput);
let future_output_ty = Ty::new_projection(tcx, future_output_def_id, [sig.output()]);
let future_output_def_id = cx.require_lang_item(TraitSolverLangItem::FutureOutput);
let future_output_ty = Ty::new_projection(cx, future_output_def_id, [sig.output()]);
Ok((
bound_sig.rebind(AsyncCallableRelevantTypes {
tupled_inputs_ty: sig.inputs().get(0).unwrap(),
@@ -565,7 +563,7 @@ pub(in crate::solve) fn extract_tupled_inputs_and_output_from_async_callable<I:
/// Given a coroutine-closure, project to its returned coroutine when we are *certain*
/// that the closure's kind is compatible with the goal.
fn coroutine_closure_to_certain_coroutine<I: Interner>(
tcx: I,
cx: I,
goal_kind: ty::ClosureKind,
goal_region: I::Region,
def_id: I::DefId,
@@ -573,9 +571,9 @@ fn coroutine_closure_to_certain_coroutine<I: Interner>(
sig: ty::CoroutineClosureSignature<I>,
) -> I::Ty {
sig.to_coroutine_given_kind_and_upvars(
tcx,
cx,
args.parent_args(),
tcx.coroutine_for_closure(def_id),
cx.coroutine_for_closure(def_id),
goal_kind,
goal_region,
args.tupled_upvars_ty(),
@@ -589,20 +587,20 @@ fn coroutine_closure_to_certain_coroutine<I: Interner>(
///
/// Note that we do not also push a `AsyncFnKindHelper` goal here.
fn coroutine_closure_to_ambiguous_coroutine<I: Interner>(
tcx: I,
cx: I,
goal_kind: ty::ClosureKind,
goal_region: I::Region,
def_id: I::DefId,
args: ty::CoroutineClosureArgs<I>,
sig: ty::CoroutineClosureSignature<I>,
) -> I::Ty {
let upvars_projection_def_id = tcx.require_lang_item(TraitSolverLangItem::AsyncFnKindUpvars);
let upvars_projection_def_id = cx.require_lang_item(TraitSolverLangItem::AsyncFnKindUpvars);
let tupled_upvars_ty = Ty::new_projection(
tcx,
cx,
upvars_projection_def_id,
[
I::GenericArg::from(args.kind_ty()),
Ty::from_closure_kind(tcx, goal_kind).into(),
Ty::from_closure_kind(cx, goal_kind).into(),
goal_region.into(),
sig.tupled_inputs_ty.into(),
args.tupled_upvars_ty().into(),
@@ -610,10 +608,10 @@ fn coroutine_closure_to_ambiguous_coroutine<I: Interner>(
],
);
sig.to_coroutine(
tcx,
cx,
args.parent_args(),
Ty::from_closure_kind(tcx, goal_kind),
tcx.coroutine_for_closure(def_id),
Ty::from_closure_kind(cx, goal_kind),
cx.coroutine_for_closure(def_id),
tupled_upvars_ty,
)
}
@@ -668,28 +666,28 @@ pub(in crate::solve) fn predicates_for_object_candidate<D, I>(
D: SolverDelegate<Interner = I>,
I: Interner,
{
let tcx = ecx.cx();
let cx = ecx.cx();
let mut requirements = vec![];
requirements
.extend(tcx.super_predicates_of(trait_ref.def_id).iter_instantiated(tcx, trait_ref.args));
.extend(cx.super_predicates_of(trait_ref.def_id).iter_instantiated(cx, trait_ref.args));
// FIXME(associated_const_equality): Also add associated consts to
// the requirements here.
for associated_type_def_id in tcx.associated_type_def_ids(trait_ref.def_id) {
for associated_type_def_id in cx.associated_type_def_ids(trait_ref.def_id) {
// associated types that require `Self: Sized` do not show up in the built-in
// implementation of `Trait for dyn Trait`, and can be dropped here.
if tcx.generics_require_sized_self(associated_type_def_id) {
if cx.generics_require_sized_self(associated_type_def_id) {
continue;
}
requirements
.extend(tcx.item_bounds(associated_type_def_id).iter_instantiated(tcx, trait_ref.args));
.extend(cx.item_bounds(associated_type_def_id).iter_instantiated(cx, trait_ref.args));
}
let mut replace_projection_with = HashMap::default();
for bound in object_bounds.iter() {
if let ty::ExistentialPredicate::Projection(proj) = bound.skip_binder() {
let proj = proj.with_self_ty(tcx, trait_ref.self_ty());
let proj = proj.with_self_ty(cx, trait_ref.self_ty());
let old_ty = replace_projection_with.insert(proj.def_id(), bound.rebind(proj));
assert_eq!(
old_ty,
@@ -709,7 +707,7 @@ pub(in crate::solve) fn predicates_for_object_candidate<D, I>(
folder
.nested
.into_iter()
.chain(folded_requirements.into_iter().map(|clause| Goal::new(tcx, param_env, clause)))
.chain(folded_requirements.into_iter().map(|clause| Goal::new(cx, param_env, clause)))
.collect()
}
@@ -239,14 +239,14 @@ pub(super) fn enter_root<R>(
/// This function takes care of setting up the inference context, setting the anchor,
/// and registering opaques from the canonicalized input.
fn enter_canonical<R>(
tcx: I,
cx: I,
search_graph: &'a mut search_graph::SearchGraph<I>,
canonical_input: CanonicalInput<I>,
canonical_goal_evaluation: &mut ProofTreeBuilder<D>,
f: impl FnOnce(&mut EvalCtxt<'_, D>, Goal<I, I::Predicate>) -> R,
) -> R {
let (ref delegate, input, var_values) =
SolverDelegate::build_with_canonical(tcx, search_graph.solver_mode(), &canonical_input);
SolverDelegate::build_with_canonical(cx, search_graph.solver_mode(), &canonical_input);
let mut ecx = EvalCtxt {
delegate,
@@ -292,9 +292,9 @@ fn enter_canonical<R>(
/// Instead of calling this function directly, use either [EvalCtxt::evaluate_goal]
/// if you're inside of the solver or [SolverDelegateEvalExt::evaluate_root_goal] if you're
/// outside of it.
#[instrument(level = "debug", skip(tcx, search_graph, goal_evaluation), ret)]
#[instrument(level = "debug", skip(cx, search_graph, goal_evaluation), ret)]
fn evaluate_canonical_goal(
tcx: I,
cx: I,
search_graph: &'a mut search_graph::SearchGraph<I>,
canonical_input: CanonicalInput<I>,
goal_evaluation: &mut ProofTreeBuilder<D>,
@@ -307,12 +307,12 @@ fn evaluate_canonical_goal(
// The actual solver logic happens in `ecx.compute_goal`.
let result = ensure_sufficient_stack(|| {
search_graph.with_new_goal(
tcx,
cx,
canonical_input,
&mut canonical_goal_evaluation,
|search_graph, canonical_goal_evaluation| {
EvalCtxt::enter_canonical(
tcx,
cx,
search_graph,
canonical_input,
canonical_goal_evaluation,
@@ -506,7 +506,7 @@ pub(super) fn try_evaluate_added_goals(&mut self) -> Result<Certainty, NoSolutio
///
/// Goals for the next step get directly added to the nested goals of the `EvalCtxt`.
fn evaluate_added_goals_step(&mut self) -> Result<Option<Certainty>, NoSolution> {
let tcx = self.cx();
let cx = self.cx();
let mut goals = core::mem::take(&mut self.nested_goals);
// If this loop did not result in any progress, what's our final certainty.
@@ -516,7 +516,7 @@ fn evaluate_added_goals_step(&mut self) -> Result<Option<Certainty>, NoSolution>
// RHS does not affect projection candidate assembly.
let unconstrained_rhs = self.next_term_infer_of_kind(goal.predicate.term);
let unconstrained_goal = goal.with(
tcx,
cx,
ty::NormalizesTo { alias: goal.predicate.alias, term: unconstrained_rhs },
);
@@ -777,7 +777,7 @@ pub(super) fn relate_rigid_alias_non_alias(
// NOTE: this check is purely an optimization, the structural eq would
// always fail if the term is not an inference variable.
if term.is_infer() {
let tcx = self.cx();
let cx = self.cx();
// We need to relate `alias` to `term` treating only the outermost
// constructor as rigid, relating any contained generic arguments as
// normal. We do this by first structurally equating the `term`
@@ -787,8 +787,8 @@ pub(super) fn relate_rigid_alias_non_alias(
// Alternatively we could modify `Equate` for this case by adding another
// variant to `StructurallyRelateAliases`.
let identity_args = self.fresh_args_for_item(alias.def_id);
let rigid_ctor = ty::AliasTerm::new_from_args(tcx, alias.def_id, identity_args);
let ctor_term = rigid_ctor.to_term(tcx);
let rigid_ctor = ty::AliasTerm::new_from_args(cx, alias.def_id, identity_args);
let ctor_term = rigid_ctor.to_term(cx);
let obligations =
self.delegate.eq_structurally_relating_aliases(param_env, term, ctor_term)?;
debug_assert!(obligations.is_empty());
@@ -323,13 +323,13 @@ pub fn new_canonical_goal_evaluation(
pub fn finalize_canonical_goal_evaluation(
&mut self,
tcx: I,
cx: I,
) -> Option<I::CanonicalGoalEvaluationStepRef> {
self.as_mut().map(|this| match this {
DebugSolver::CanonicalGoalEvaluation(evaluation) => {
let final_revision = mem::take(&mut evaluation.final_revision).unwrap();
let final_revision =
tcx.intern_canonical_goal_evaluation_step(final_revision.finalize());
cx.intern_canonical_goal_evaluation_step(final_revision.finalize());
let kind = WipCanonicalGoalEvaluationKind::Interned { final_revision };
assert_eq!(evaluation.kind.replace(kind), None);
final_revision
@@ -34,7 +34,7 @@
/// How many fixpoint iterations we should attempt inside of the solver before bailing
/// with overflow.
///
/// We previously used `tcx.recursion_limit().0.checked_ilog2().unwrap_or(0)` for this.
/// We previously used `cx.recursion_limit().0.checked_ilog2().unwrap_or(0)` for this.
/// However, it feels unlikely that uncreasing the recursion limit by a power of two
/// to get one more itereation is every useful or desirable. We now instead used a constant
/// here. If there ever ends up some use-cases where a bigger number of fixpoint iterations
@@ -285,7 +285,7 @@ fn structurally_normalize_ty(
}
fn response_no_constraints_raw<I: Interner>(
tcx: I,
cx: I,
max_universe: ty::UniverseIndex,
variables: I::CanonicalVars,
certainty: Certainty,
@@ -294,10 +294,10 @@ fn response_no_constraints_raw<I: Interner>(
max_universe,
variables,
value: Response {
var_values: ty::CanonicalVarValues::make_identity(tcx, variables),
// FIXME: maybe we should store the "no response" version in tcx, like
// we do for tcx.types and stuff.
external_constraints: tcx.mk_external_constraints(ExternalConstraintsData::default()),
var_values: ty::CanonicalVarValues::make_identity(cx, variables),
// FIXME: maybe we should store the "no response" version in cx, like
// we do for cx.types and stuff.
external_constraints: cx.mk_external_constraints(ExternalConstraintsData::default()),
certainty,
},
defining_opaque_types: Default::default(),
@@ -19,21 +19,21 @@ pub(super) fn normalize_inherent_associated_type(
&mut self,
goal: Goal<I, ty::NormalizesTo<I>>,
) -> QueryResult<I> {
let tcx = self.cx();
let inherent = goal.predicate.alias.expect_ty(tcx);
let cx = self.cx();
let inherent = goal.predicate.alias.expect_ty(cx);
let impl_def_id = tcx.parent(inherent.def_id);
let impl_def_id = cx.parent(inherent.def_id);
let impl_args = self.fresh_args_for_item(impl_def_id);
// Equate impl header and add impl where clauses
self.eq(
goal.param_env,
inherent.self_ty(),
tcx.type_of(impl_def_id).instantiate(tcx, impl_args),
cx.type_of(impl_def_id).instantiate(cx, impl_args),
)?;
// Equate IAT with the RHS of the project goal
let inherent_args = inherent.rebase_inherent_args_onto_impl(impl_args, tcx);
let inherent_args = inherent.rebase_inherent_args_onto_impl(impl_args, cx);
// Check both where clauses on the impl and IAT
//
@@ -43,12 +43,12 @@ pub(super) fn normalize_inherent_associated_type(
// and I don't think the assoc item where-bounds are allowed to be coinductive.
self.add_goals(
GoalSource::Misc,
tcx.predicates_of(inherent.def_id)
.iter_instantiated(tcx, inherent_args)
.map(|pred| goal.with(tcx, pred)),
cx.predicates_of(inherent.def_id)
.iter_instantiated(cx, inherent_args)
.map(|pred| goal.with(cx, pred)),
);
let normalized = tcx.type_of(inherent.def_id).instantiate(tcx, inherent_args);
let normalized = cx.type_of(inherent.def_id).instantiate(cx, inherent_args);
self.instantiate_normalizes_to_term(goal, normalized.into());
self.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
}
@@ -84,16 +84,16 @@ fn self_ty(self) -> I::Ty {
self.self_ty()
}
fn trait_ref(self, tcx: I) -> ty::TraitRef<I> {
self.alias.trait_ref(tcx)
fn trait_ref(self, cx: I) -> ty::TraitRef<I> {
self.alias.trait_ref(cx)
}
fn with_self_ty(self, tcx: I, self_ty: I::Ty) -> Self {
self.with_self_ty(tcx, self_ty)
fn with_self_ty(self, cx: I, self_ty: I::Ty) -> Self {
self.with_self_ty(cx, self_ty)
}
fn trait_def_id(self, tcx: I) -> I::DefId {
self.trait_def_id(tcx)
fn trait_def_id(self, cx: I) -> I::DefId {
self.trait_def_id(cx)
}
fn probe_and_match_goal_against_assumption(
@@ -105,7 +105,7 @@ fn probe_and_match_goal_against_assumption(
) -> Result<Candidate<I>, NoSolution> {
if let Some(projection_pred) = assumption.as_projection_clause() {
if projection_pred.projection_def_id() == goal.predicate.def_id() {
let tcx = ecx.cx();
let cx = ecx.cx();
ecx.probe_trait_candidate(source).enter(|ecx| {
let assumption_projection_pred =
ecx.instantiate_binder_with_infer(projection_pred);
@@ -120,9 +120,9 @@ fn probe_and_match_goal_against_assumption(
// Add GAT where clauses from the trait's definition
ecx.add_goals(
GoalSource::Misc,
tcx.own_predicates_of(goal.predicate.def_id())
.iter_instantiated(tcx, goal.predicate.alias.args)
.map(|pred| goal.with(tcx, pred)),
cx.own_predicates_of(goal.predicate.def_id())
.iter_instantiated(cx, goal.predicate.alias.args)
.map(|pred| goal.with(cx, pred)),
);
then(ecx)
@@ -140,19 +140,19 @@ fn consider_impl_candidate(
goal: Goal<I, NormalizesTo<I>>,
impl_def_id: I::DefId,
) -> Result<Candidate<I>, NoSolution> {
let tcx = ecx.cx();
let cx = ecx.cx();
let goal_trait_ref = goal.predicate.alias.trait_ref(tcx);
let impl_trait_ref = tcx.impl_trait_ref(impl_def_id);
let goal_trait_ref = goal.predicate.alias.trait_ref(cx);
let impl_trait_ref = cx.impl_trait_ref(impl_def_id);
if !ecx.cx().args_may_unify_deep(
goal.predicate.alias.trait_ref(tcx).args,
goal.predicate.alias.trait_ref(cx).args,
impl_trait_ref.skip_binder().args,
) {
return Err(NoSolution);
}
// We have to ignore negative impls when projecting.
let impl_polarity = tcx.impl_polarity(impl_def_id);
let impl_polarity = cx.impl_polarity(impl_def_id);
match impl_polarity {
ty::ImplPolarity::Negative => return Err(NoSolution),
ty::ImplPolarity::Reservation => {
@@ -163,22 +163,22 @@ fn consider_impl_candidate(
ecx.probe_trait_candidate(CandidateSource::Impl(impl_def_id)).enter(|ecx| {
let impl_args = ecx.fresh_args_for_item(impl_def_id);
let impl_trait_ref = impl_trait_ref.instantiate(tcx, impl_args);
let impl_trait_ref = impl_trait_ref.instantiate(cx, impl_args);
ecx.eq(goal.param_env, goal_trait_ref, impl_trait_ref)?;
let where_clause_bounds = tcx
let where_clause_bounds = cx
.predicates_of(impl_def_id)
.iter_instantiated(tcx, impl_args)
.map(|pred| goal.with(tcx, pred));
.iter_instantiated(cx, impl_args)
.map(|pred| goal.with(cx, pred));
ecx.add_goals(GoalSource::ImplWhereBound, where_clause_bounds);
// Add GAT where clauses from the trait's definition
ecx.add_goals(
GoalSource::Misc,
tcx.own_predicates_of(goal.predicate.def_id())
.iter_instantiated(tcx, goal.predicate.alias.args)
.map(|pred| goal.with(tcx, pred)),
cx.own_predicates_of(goal.predicate.def_id())
.iter_instantiated(cx, goal.predicate.alias.args)
.map(|pred| goal.with(cx, pred)),
);
// In case the associated item is hidden due to specialization, we have to
@@ -195,21 +195,21 @@ fn consider_impl_candidate(
};
let error_response = |ecx: &mut EvalCtxt<'_, D>, msg: &str| {
let guar = tcx.delay_bug(msg);
let error_term = match goal.predicate.alias.kind(tcx) {
ty::AliasTermKind::ProjectionTy => Ty::new_error(tcx, guar).into(),
ty::AliasTermKind::ProjectionConst => Const::new_error(tcx, guar).into(),
let guar = cx.delay_bug(msg);
let error_term = match goal.predicate.alias.kind(cx) {
ty::AliasTermKind::ProjectionTy => Ty::new_error(cx, guar).into(),
ty::AliasTermKind::ProjectionConst => Const::new_error(cx, guar).into(),
kind => panic!("expected projection, found {kind:?}"),
};
ecx.instantiate_normalizes_to_term(goal, error_term);
ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
};
if !tcx.has_item_definition(target_item_def_id) {
if !cx.has_item_definition(target_item_def_id) {
return error_response(ecx, "missing item");
}
let target_container_def_id = tcx.parent(target_item_def_id);
let target_container_def_id = cx.parent(target_item_def_id);
// Getting the right args here is complex, e.g. given:
// - a goal `<Vec<u32> as Trait<i32>>::Assoc<u64>`
@@ -229,22 +229,22 @@ fn consider_impl_candidate(
target_container_def_id,
)?;
if !tcx.check_args_compatible(target_item_def_id, target_args) {
if !cx.check_args_compatible(target_item_def_id, target_args) {
return error_response(ecx, "associated item has mismatched arguments");
}
// Finally we construct the actual value of the associated type.
let term = match goal.predicate.alias.kind(tcx) {
let term = match goal.predicate.alias.kind(cx) {
ty::AliasTermKind::ProjectionTy => {
tcx.type_of(target_item_def_id).map_bound(|ty| ty.into())
cx.type_of(target_item_def_id).map_bound(|ty| ty.into())
}
ty::AliasTermKind::ProjectionConst => {
if tcx.features().associated_const_equality() {
if cx.features().associated_const_equality() {
panic!("associated const projection is not supported yet")
} else {
ty::EarlyBinder::bind(
Const::new_error_with_message(
tcx,
cx,
"associated const projection is not supported yet",
)
.into(),
@@ -254,7 +254,7 @@ fn consider_impl_candidate(
kind => panic!("expected projection, found {kind:?}"),
};
ecx.instantiate_normalizes_to_term(goal, term.instantiate(tcx, target_args));
ecx.instantiate_normalizes_to_term(goal, term.instantiate(cx, target_args));
ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
})
}
@@ -316,10 +316,10 @@ fn consider_builtin_fn_trait_candidates(
goal: Goal<I, Self>,
goal_kind: ty::ClosureKind,
) -> Result<Candidate<I>, NoSolution> {
let tcx = ecx.cx();
let cx = ecx.cx();
let tupled_inputs_and_output =
match structural_traits::extract_tupled_inputs_and_output_from_callable(
tcx,
cx,
goal.predicate.self_ty(),
goal_kind,
)? {
@@ -329,19 +329,19 @@ fn consider_builtin_fn_trait_candidates(
}
};
let output_is_sized_pred = tupled_inputs_and_output.map_bound(|(_, output)| {
ty::TraitRef::new(tcx, tcx.require_lang_item(TraitSolverLangItem::Sized), [output])
ty::TraitRef::new(cx, cx.require_lang_item(TraitSolverLangItem::Sized), [output])
});
let pred = tupled_inputs_and_output
.map_bound(|(inputs, output)| ty::ProjectionPredicate {
projection_term: ty::AliasTerm::new(
tcx,
cx,
goal.predicate.def_id(),
[goal.predicate.self_ty(), inputs],
),
term: output.into(),
})
.upcast(tcx);
.upcast(cx);
// A built-in `Fn` impl only holds if the output is sized.
// (FIXME: technically we only need to check this if the type is a fn ptr...)
@@ -350,7 +350,7 @@ fn consider_builtin_fn_trait_candidates(
CandidateSource::BuiltinImpl(BuiltinImplSource::Misc),
goal,
pred,
[(GoalSource::ImplWhereBound, goal.with(tcx, output_is_sized_pred))],
[(GoalSource::ImplWhereBound, goal.with(cx, output_is_sized_pred))],
)
}
@@ -359,27 +359,23 @@ fn consider_builtin_async_fn_trait_candidates(
goal: Goal<I, Self>,
goal_kind: ty::ClosureKind,
) -> Result<Candidate<I>, NoSolution> {
let tcx = ecx.cx();
let cx = ecx.cx();
let env_region = match goal_kind {
ty::ClosureKind::Fn | ty::ClosureKind::FnMut => goal.predicate.alias.args.region_at(2),
// Doesn't matter what this region is
ty::ClosureKind::FnOnce => Region::new_static(tcx),
ty::ClosureKind::FnOnce => Region::new_static(cx),
};
let (tupled_inputs_and_output_and_coroutine, nested_preds) =
structural_traits::extract_tupled_inputs_and_output_from_async_callable(
tcx,
cx,
goal.predicate.self_ty(),
goal_kind,
env_region,
)?;
let output_is_sized_pred = tupled_inputs_and_output_and_coroutine.map_bound(
|AsyncCallableRelevantTypes { output_coroutine_ty: output_ty, .. }| {
ty::TraitRef::new(
tcx,
tcx.require_lang_item(TraitSolverLangItem::Sized),
[output_ty],
)
ty::TraitRef::new(cx, cx.require_lang_item(TraitSolverLangItem::Sized), [output_ty])
},
);
@@ -390,23 +386,23 @@ fn consider_builtin_async_fn_trait_candidates(
output_coroutine_ty,
coroutine_return_ty,
}| {
let (projection_term, term) = if tcx
let (projection_term, term) = if cx
.is_lang_item(goal.predicate.def_id(), TraitSolverLangItem::CallOnceFuture)
{
(
ty::AliasTerm::new(
tcx,
cx,
goal.predicate.def_id(),
[goal.predicate.self_ty(), tupled_inputs_ty],
),
output_coroutine_ty.into(),
)
} else if tcx
} else if cx
.is_lang_item(goal.predicate.def_id(), TraitSolverLangItem::CallRefFuture)
{
(
ty::AliasTerm::new(
tcx,
cx,
goal.predicate.def_id(),
[
I::GenericArg::from(goal.predicate.self_ty()),
@@ -416,13 +412,13 @@ fn consider_builtin_async_fn_trait_candidates(
),
output_coroutine_ty.into(),
)
} else if tcx.is_lang_item(
} else if cx.is_lang_item(
goal.predicate.def_id(),
TraitSolverLangItem::AsyncFnOnceOutput,
) {
(
ty::AliasTerm::new(
tcx,
cx,
goal.predicate.def_id(),
[
I::GenericArg::from(goal.predicate.self_ty()),
@@ -440,7 +436,7 @@ fn consider_builtin_async_fn_trait_candidates(
ty::ProjectionPredicate { projection_term, term }
},
)
.upcast(tcx);
.upcast(cx);
// A built-in `AsyncFn` impl only holds if the output is sized.
// (FIXME: technically we only need to check this if the type is a fn ptr...)
@@ -449,9 +445,9 @@ fn consider_builtin_async_fn_trait_candidates(
CandidateSource::BuiltinImpl(BuiltinImplSource::Misc),
goal,
pred,
[goal.with(tcx, output_is_sized_pred)]
[goal.with(cx, output_is_sized_pred)]
.into_iter()
.chain(nested_preds.into_iter().map(|pred| goal.with(tcx, pred)))
.chain(nested_preds.into_iter().map(|pred| goal.with(cx, pred)))
.map(|goal| (GoalSource::ImplWhereBound, goal)),
)
}
@@ -514,8 +510,8 @@ fn consider_builtin_pointee_candidate(
ecx: &mut EvalCtxt<'_, D>,
goal: Goal<I, Self>,
) -> Result<Candidate<I>, NoSolution> {
let tcx = ecx.cx();
let metadata_def_id = tcx.require_lang_item(TraitSolverLangItem::Metadata);
let cx = ecx.cx();
let metadata_def_id = cx.require_lang_item(TraitSolverLangItem::Metadata);
assert_eq!(metadata_def_id, goal.predicate.def_id());
ecx.probe_builtin_trait_candidate(BuiltinImplSource::Misc).enter(|ecx| {
let metadata_ty = match goal.predicate.self_ty().kind() {
@@ -537,16 +533,16 @@ fn consider_builtin_pointee_candidate(
| ty::CoroutineWitness(..)
| ty::Never
| ty::Foreign(..)
| ty::Dynamic(_, _, ty::DynStar) => Ty::new_unit(tcx),
| ty::Dynamic(_, _, ty::DynStar) => Ty::new_unit(cx),
ty::Error(e) => Ty::new_error(tcx, e),
ty::Error(e) => Ty::new_error(cx, e),
ty::Str | ty::Slice(_) => Ty::new_usize(tcx),
ty::Str | ty::Slice(_) => Ty::new_usize(cx),
ty::Dynamic(_, _, ty::Dyn) => {
let dyn_metadata = tcx.require_lang_item(TraitSolverLangItem::DynMetadata);
tcx.type_of(dyn_metadata)
.instantiate(tcx, &[I::GenericArg::from(goal.predicate.self_ty())])
let dyn_metadata = cx.require_lang_item(TraitSolverLangItem::DynMetadata);
cx.type_of(dyn_metadata)
.instantiate(cx, &[I::GenericArg::from(goal.predicate.self_ty())])
}
ty::Alias(_, _) | ty::Param(_) | ty::Placeholder(..) => {
@@ -555,26 +551,26 @@ fn consider_builtin_pointee_candidate(
// FIXME(ptr_metadata): This impl overlaps with the other impls and shouldn't
// exist. Instead, `Pointee<Metadata = ()>` should be a supertrait of `Sized`.
let sized_predicate = ty::TraitRef::new(
tcx,
tcx.require_lang_item(TraitSolverLangItem::Sized),
cx,
cx.require_lang_item(TraitSolverLangItem::Sized),
[I::GenericArg::from(goal.predicate.self_ty())],
);
// FIXME(-Znext-solver=coinductive): Should this be `GoalSource::ImplWhereBound`?
ecx.add_goal(GoalSource::Misc, goal.with(tcx, sized_predicate));
Ty::new_unit(tcx)
ecx.add_goal(GoalSource::Misc, goal.with(cx, sized_predicate));
Ty::new_unit(cx)
}
ty::Adt(def, args) if def.is_struct() => match def.struct_tail_ty(tcx) {
None => Ty::new_unit(tcx),
ty::Adt(def, args) if def.is_struct() => match def.struct_tail_ty(cx) {
None => Ty::new_unit(cx),
Some(tail_ty) => {
Ty::new_projection(tcx, metadata_def_id, [tail_ty.instantiate(tcx, args)])
Ty::new_projection(cx, metadata_def_id, [tail_ty.instantiate(cx, args)])
}
},
ty::Adt(_, _) => Ty::new_unit(tcx),
ty::Adt(_, _) => Ty::new_unit(cx),
ty::Tuple(elements) => match elements.last() {
None => Ty::new_unit(tcx),
Some(tail_ty) => Ty::new_projection(tcx, metadata_def_id, [tail_ty]),
None => Ty::new_unit(cx),
Some(tail_ty) => Ty::new_projection(cx, metadata_def_id, [tail_ty]),
},
ty::Infer(
@@ -601,8 +597,8 @@ fn consider_builtin_future_candidate(
};
// Coroutines are not futures unless they come from `async` desugaring
let tcx = ecx.cx();
if !tcx.coroutine_is_async(def_id) {
let cx = ecx.cx();
if !cx.coroutine_is_async(def_id) {
return Err(NoSolution);
}
@@ -616,7 +612,7 @@ fn consider_builtin_future_candidate(
projection_term: ty::AliasTerm::new(ecx.cx(), goal.predicate.def_id(), [self_ty]),
term,
}
.upcast(tcx),
.upcast(cx),
// Technically, we need to check that the future type is Sized,
// but that's already proven by the coroutine being WF.
[],
@@ -633,8 +629,8 @@ fn consider_builtin_iterator_candidate(
};
// Coroutines are not Iterators unless they come from `gen` desugaring
let tcx = ecx.cx();
if !tcx.coroutine_is_gen(def_id) {
let cx = ecx.cx();
if !cx.coroutine_is_gen(def_id) {
return Err(NoSolution);
}
@@ -648,7 +644,7 @@ fn consider_builtin_iterator_candidate(
projection_term: ty::AliasTerm::new(ecx.cx(), goal.predicate.def_id(), [self_ty]),
term,
}
.upcast(tcx),
.upcast(cx),
// Technically, we need to check that the iterator type is Sized,
// but that's already proven by the generator being WF.
[],
@@ -672,8 +668,8 @@ fn consider_builtin_async_iterator_candidate(
};
// Coroutines are not AsyncIterators unless they come from `gen` desugaring
let tcx = ecx.cx();
if !tcx.coroutine_is_async_gen(def_id) {
let cx = ecx.cx();
if !cx.coroutine_is_async_gen(def_id) {
return Err(NoSolution);
}
@@ -682,12 +678,12 @@ fn consider_builtin_async_iterator_candidate(
// Take `AsyncIterator<Item = I>` and turn it into the corresponding
// coroutine yield ty `Poll<Option<I>>`.
let wrapped_expected_ty = Ty::new_adt(
tcx,
tcx.adt_def(tcx.require_lang_item(TraitSolverLangItem::Poll)),
tcx.mk_args(&[Ty::new_adt(
tcx,
tcx.adt_def(tcx.require_lang_item(TraitSolverLangItem::Option)),
tcx.mk_args(&[expected_ty.into()]),
cx,
cx.adt_def(cx.require_lang_item(TraitSolverLangItem::Poll)),
cx.mk_args(&[Ty::new_adt(
cx,
cx.adt_def(cx.require_lang_item(TraitSolverLangItem::Option)),
cx.mk_args(&[expected_ty.into()]),
)
.into()]),
);
@@ -708,18 +704,17 @@ fn consider_builtin_coroutine_candidate(
};
// `async`-desugared coroutines do not implement the coroutine trait
let tcx = ecx.cx();
if !tcx.is_general_coroutine(def_id) {
let cx = ecx.cx();
if !cx.is_general_coroutine(def_id) {
return Err(NoSolution);
}
let coroutine = args.as_coroutine();
let term = if tcx
.is_lang_item(goal.predicate.def_id(), TraitSolverLangItem::CoroutineReturn)
let term = if cx.is_lang_item(goal.predicate.def_id(), TraitSolverLangItem::CoroutineReturn)
{
coroutine.return_ty().into()
} else if tcx.is_lang_item(goal.predicate.def_id(), TraitSolverLangItem::CoroutineYield) {
} else if cx.is_lang_item(goal.predicate.def_id(), TraitSolverLangItem::CoroutineYield) {
coroutine.yield_ty().into()
} else {
panic!("unexpected associated item `{:?}` for `{self_ty:?}`", goal.predicate.def_id())
@@ -737,7 +732,7 @@ fn consider_builtin_coroutine_candidate(
),
term,
}
.upcast(tcx),
.upcast(cx),
// Technically, we need to check that the coroutine type is Sized,
// but that's already proven by the coroutine being WF.
[],
@@ -884,29 +879,29 @@ fn translate_args(
impl_trait_ref: rustc_type_ir::TraitRef<I>,
target_container_def_id: I::DefId,
) -> Result<I::GenericArgs, NoSolution> {
let tcx = self.cx();
let cx = self.cx();
Ok(if target_container_def_id == impl_trait_ref.def_id {
// Default value from the trait definition. No need to rebase.
goal.predicate.alias.args
} else if target_container_def_id == impl_def_id {
// Same impl, no need to fully translate, just a rebase from
// the trait is sufficient.
goal.predicate.alias.args.rebase_onto(tcx, impl_trait_ref.def_id, impl_args)
goal.predicate.alias.args.rebase_onto(cx, impl_trait_ref.def_id, impl_args)
} else {
let target_args = self.fresh_args_for_item(target_container_def_id);
let target_trait_ref =
tcx.impl_trait_ref(target_container_def_id).instantiate(tcx, target_args);
cx.impl_trait_ref(target_container_def_id).instantiate(cx, target_args);
// Relate source impl to target impl by equating trait refs.
self.eq(goal.param_env, impl_trait_ref, target_trait_ref)?;
// Also add predicates since they may be needed to constrain the
// target impl's params.
self.add_goals(
GoalSource::Misc,
tcx.predicates_of(target_container_def_id)
.iter_instantiated(tcx, target_args)
.map(|pred| goal.with(tcx, pred)),
cx.predicates_of(target_container_def_id)
.iter_instantiated(cx, target_args)
.map(|pred| goal.with(cx, pred)),
);
goal.predicate.alias.args.rebase_onto(tcx, impl_trait_ref.def_id, target_args)
goal.predicate.alias.args.rebase_onto(cx, impl_trait_ref.def_id, target_args)
})
}
}
@@ -18,7 +18,7 @@ pub(super) fn normalize_opaque_type(
&mut self,
goal: Goal<I, ty::NormalizesTo<I>>,
) -> QueryResult<I> {
let tcx = self.cx();
let cx = self.cx();
let opaque_ty = goal.predicate.alias;
let expected = goal.predicate.term.as_type().expect("no such thing as an opaque const");
@@ -86,7 +86,7 @@ pub(super) fn normalize_opaque_type(
}
(Reveal::All, _) => {
// FIXME: Add an assertion that opaque type storage is empty.
let actual = tcx.type_of(opaque_ty.def_id).instantiate(tcx, opaque_ty.args);
let actual = cx.type_of(opaque_ty.def_id).instantiate(cx, opaque_ty.args);
self.eq(goal.param_env, expected, actual)?;
self.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
}
@@ -98,7 +98,7 @@ pub(super) fn normalize_opaque_type(
///
/// FIXME: Interner argument is needed to constrain the `I` parameter.
pub fn uses_unique_placeholders_ignoring_regions<I: Interner>(
_interner: I,
_cx: I,
args: I::GenericArgs,
) -> Result<(), NotUniqueParam<I>> {
let mut seen = GrowableBitSet::default();
@@ -18,18 +18,18 @@ pub(super) fn normalize_weak_type(
&mut self,
goal: Goal<I, ty::NormalizesTo<I>>,
) -> QueryResult<I> {
let tcx = self.cx();
let cx = self.cx();
let weak_ty = goal.predicate.alias;
// Check where clauses
self.add_goals(
GoalSource::Misc,
tcx.predicates_of(weak_ty.def_id)
.iter_instantiated(tcx, weak_ty.args)
.map(|pred| goal.with(tcx, pred)),
cx.predicates_of(weak_ty.def_id)
.iter_instantiated(cx, weak_ty.args)
.map(|pred| goal.with(cx, pred)),
);
let actual = tcx.type_of(weak_ty.def_id).instantiate(tcx, weak_ty.args);
let actual = cx.type_of(weak_ty.def_id).instantiate(cx, weak_ty.args);
self.instantiate_normalizes_to_term(goal, actual.into());
self.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
@@ -14,10 +14,10 @@ pub(super) fn compute_projection_goal(
&mut self,
goal: Goal<I, ProjectionPredicate<I>>,
) -> QueryResult<I> {
let tcx = self.cx();
let projection_term = goal.predicate.projection_term.to_term(tcx);
let cx = self.cx();
let projection_term = goal.predicate.projection_term.to_term(cx);
let goal = goal.with(
tcx,
cx,
ty::PredicateKind::AliasRelate(
projection_term,
goal.predicate.term,
@@ -164,7 +164,7 @@ pub(super) fn is_empty(&self) -> bool {
/// the remaining depth of all nested goals to prevent hangs
/// in case there is exponential blowup.
fn allowed_depth_for_nested(
tcx: I,
cx: I,
stack: &IndexVec<StackDepth, StackEntry<I>>,
) -> Option<SolverLimit> {
if let Some(last) = stack.raw.last() {
@@ -178,18 +178,18 @@ fn allowed_depth_for_nested(
SolverLimit(last.available_depth.0 - 1)
})
} else {
Some(SolverLimit(tcx.recursion_limit()))
Some(SolverLimit(cx.recursion_limit()))
}
}
fn stack_coinductive_from(
tcx: I,
cx: I,
stack: &IndexVec<StackDepth, StackEntry<I>>,
head: StackDepth,
) -> bool {
stack.raw[head.index()..]
.iter()
.all(|entry| entry.input.value.goal.predicate.is_coinductive(tcx))
.all(|entry| entry.input.value.goal.predicate.is_coinductive(cx))
}
// When encountering a solver cycle, the result of the current goal
@@ -247,8 +247,8 @@ fn clear_dependent_provisional_results(
/// so we use a separate cache. Alternatively we could use
/// a single cache and share it between coherence and ordinary
/// trait solving.
pub(super) fn global_cache(&self, tcx: I) -> I::EvaluationCache {
tcx.evaluation_cache(self.mode)
pub(super) fn global_cache(&self, cx: I) -> I::EvaluationCache {
cx.evaluation_cache(self.mode)
}
/// Probably the most involved method of the whole solver.
@@ -257,24 +257,24 @@ pub(super) fn global_cache(&self, tcx: I) -> I::EvaluationCache {
/// handles caching, overflow, and coinductive cycles.
pub(super) fn with_new_goal<D: SolverDelegate<Interner = I>>(
&mut self,
tcx: I,
cx: I,
input: CanonicalInput<I>,
inspect: &mut ProofTreeBuilder<D>,
mut prove_goal: impl FnMut(&mut Self, &mut ProofTreeBuilder<D>) -> QueryResult<I>,
) -> QueryResult<I> {
self.check_invariants();
// Check for overflow.
let Some(available_depth) = Self::allowed_depth_for_nested(tcx, &self.stack) else {
let Some(available_depth) = Self::allowed_depth_for_nested(cx, &self.stack) else {
if let Some(last) = self.stack.raw.last_mut() {
last.encountered_overflow = true;
}
inspect
.canonical_goal_evaluation_kind(inspect::WipCanonicalGoalEvaluationKind::Overflow);
return Self::response_no_constraints(tcx, input, Certainty::overflow(true));
return Self::response_no_constraints(cx, input, Certainty::overflow(true));
};
if let Some(result) = self.lookup_global_cache(tcx, input, available_depth, inspect) {
if let Some(result) = self.lookup_global_cache(cx, input, available_depth, inspect) {
debug!("global cache hit");
return result;
}
@@ -287,12 +287,12 @@ pub(super) fn with_new_goal<D: SolverDelegate<Interner = I>>(
if let Some(entry) = cache_entry
.with_coinductive_stack
.as_ref()
.filter(|p| Self::stack_coinductive_from(tcx, &self.stack, p.head))
.filter(|p| Self::stack_coinductive_from(cx, &self.stack, p.head))
.or_else(|| {
cache_entry
.with_inductive_stack
.as_ref()
.filter(|p| !Self::stack_coinductive_from(tcx, &self.stack, p.head))
.filter(|p| !Self::stack_coinductive_from(cx, &self.stack, p.head))
})
{
debug!("provisional cache hit");
@@ -315,7 +315,7 @@ pub(super) fn with_new_goal<D: SolverDelegate<Interner = I>>(
inspect.canonical_goal_evaluation_kind(
inspect::WipCanonicalGoalEvaluationKind::CycleInStack,
);
let is_coinductive_cycle = Self::stack_coinductive_from(tcx, &self.stack, stack_depth);
let is_coinductive_cycle = Self::stack_coinductive_from(cx, &self.stack, stack_depth);
let usage_kind = if is_coinductive_cycle {
HasBeenUsed::COINDUCTIVE_CYCLE
} else {
@@ -328,9 +328,9 @@ pub(super) fn with_new_goal<D: SolverDelegate<Interner = I>>(
return if let Some(result) = self.stack[stack_depth].provisional_result {
result
} else if is_coinductive_cycle {
Self::response_no_constraints(tcx, input, Certainty::Yes)
Self::response_no_constraints(cx, input, Certainty::Yes)
} else {
Self::response_no_constraints(tcx, input, Certainty::overflow(false))
Self::response_no_constraints(cx, input, Certainty::overflow(false))
};
} else {
// No entry, we push this goal on the stack and try to prove it.
@@ -355,9 +355,9 @@ pub(super) fn with_new_goal<D: SolverDelegate<Interner = I>>(
// not tracked by the cache key and from outside of this anon task, it
// must not be added to the global cache. Notably, this is the case for
// trait solver cycles participants.
let ((final_entry, result), dep_node) = tcx.with_cached_task(|| {
let ((final_entry, result), dep_node) = cx.with_cached_task(|| {
for _ in 0..FIXPOINT_STEP_LIMIT {
match self.fixpoint_step_in_task(tcx, input, inspect, &mut prove_goal) {
match self.fixpoint_step_in_task(cx, input, inspect, &mut prove_goal) {
StepResult::Done(final_entry, result) => return (final_entry, result),
StepResult::HasChanged => debug!("fixpoint changed provisional results"),
}
@@ -366,17 +366,17 @@ pub(super) fn with_new_goal<D: SolverDelegate<Interner = I>>(
debug!("canonical cycle overflow");
let current_entry = self.pop_stack();
debug_assert!(current_entry.has_been_used.is_empty());
let result = Self::response_no_constraints(tcx, input, Certainty::overflow(false));
let result = Self::response_no_constraints(cx, input, Certainty::overflow(false));
(current_entry, result)
});
let proof_tree = inspect.finalize_canonical_goal_evaluation(tcx);
let proof_tree = inspect.finalize_canonical_goal_evaluation(cx);
// We're now done with this goal. In case this goal is involved in a larger cycle
// do not remove it from the provisional cache and update its provisional result.
// We only add the root of cycles to the global cache.
if let Some(head) = final_entry.non_root_cycle_participant {
let coinductive_stack = Self::stack_coinductive_from(tcx, &self.stack, head);
let coinductive_stack = Self::stack_coinductive_from(cx, &self.stack, head);
let entry = self.provisional_cache.get_mut(&input).unwrap();
entry.stack_depth = None;
@@ -396,8 +396,8 @@ pub(super) fn with_new_goal<D: SolverDelegate<Interner = I>>(
// participant is on the stack. This is necessary to prevent unstable
// results. See the comment of `StackEntry::cycle_participants` for
// more details.
self.global_cache(tcx).insert(
tcx,
self.global_cache(cx).insert(
cx,
input,
proof_tree,
reached_depth,
@@ -418,15 +418,15 @@ pub(super) fn with_new_goal<D: SolverDelegate<Interner = I>>(
/// this goal.
fn lookup_global_cache<D: SolverDelegate<Interner = I>>(
&mut self,
tcx: I,
cx: I,
input: CanonicalInput<I>,
available_depth: SolverLimit,
inspect: &mut ProofTreeBuilder<D>,
) -> Option<QueryResult<I>> {
let CacheData { result, proof_tree, additional_depth, encountered_overflow } = self
.global_cache(tcx)
.global_cache(cx)
// FIXME: Awkward `Limit -> usize -> Limit`.
.get(tcx, input, self.stack.iter().map(|e| e.input), available_depth.0)?;
.get(cx, input, self.stack.iter().map(|e| e.input), available_depth.0)?;
// If we're building a proof tree and the current cache entry does not
// contain a proof tree, we do not use the entry but instead recompute
@@ -467,7 +467,7 @@ impl<I: Interner> SearchGraph<I> {
/// point we are done.
fn fixpoint_step_in_task<D, F>(
&mut self,
tcx: I,
cx: I,
input: CanonicalInput<I>,
inspect: &mut ProofTreeBuilder<D>,
prove_goal: &mut F,
@@ -506,9 +506,9 @@ fn fixpoint_step_in_task<D, F>(
let reached_fixpoint = if let Some(r) = stack_entry.provisional_result {
r == result
} else if stack_entry.has_been_used == HasBeenUsed::COINDUCTIVE_CYCLE {
Self::response_no_constraints(tcx, input, Certainty::Yes) == result
Self::response_no_constraints(cx, input, Certainty::Yes) == result
} else if stack_entry.has_been_used == HasBeenUsed::INDUCTIVE_CYCLE {
Self::response_no_constraints(tcx, input, Certainty::overflow(false)) == result
Self::response_no_constraints(cx, input, Certainty::overflow(false)) == result
} else {
false
};
@@ -528,11 +528,11 @@ fn fixpoint_step_in_task<D, F>(
}
fn response_no_constraints(
tcx: I,
cx: I,
goal: CanonicalInput<I>,
certainty: Certainty,
) -> QueryResult<I> {
Ok(super::response_no_constraints_raw(tcx, goal.max_universe, goal.variables, certainty))
Ok(super::response_no_constraints_raw(cx, goal.max_universe, goal.variables, certainty))
}
#[allow(rustc::potential_query_instability)]
@@ -30,8 +30,8 @@ fn trait_ref(self, _: I) -> ty::TraitRef<I> {
self.trait_ref
}
fn with_self_ty(self, tcx: I, self_ty: I::Ty) -> Self {
self.with_self_ty(tcx, self_ty)
fn with_self_ty(self, cx: I, self_ty: I::Ty) -> Self {
self.with_self_ty(cx, self_ty)
}
fn trait_def_id(self, _: I) -> I::DefId {
@@ -43,18 +43,17 @@ fn consider_impl_candidate(
goal: Goal<I, TraitPredicate<I>>,
impl_def_id: I::DefId,
) -> Result<Candidate<I>, NoSolution> {
let tcx = ecx.cx();
let cx = ecx.cx();
let impl_trait_ref = tcx.impl_trait_ref(impl_def_id);
if !tcx
.args_may_unify_deep(goal.predicate.trait_ref.args, impl_trait_ref.skip_binder().args)
let impl_trait_ref = cx.impl_trait_ref(impl_def_id);
if !cx.args_may_unify_deep(goal.predicate.trait_ref.args, impl_trait_ref.skip_binder().args)
{
return Err(NoSolution);
}
// An upper bound of the certainty of this goal, used to lower the certainty
// of reservation impl to ambiguous during coherence.
let impl_polarity = tcx.impl_polarity(impl_def_id);
let impl_polarity = cx.impl_polarity(impl_def_id);
let maximal_certainty = match (impl_polarity, goal.predicate.polarity) {
// In intercrate mode, this is ambiguous. But outside of intercrate,
// it's not a real impl.
@@ -77,13 +76,13 @@ fn consider_impl_candidate(
ecx.probe_trait_candidate(CandidateSource::Impl(impl_def_id)).enter(|ecx| {
let impl_args = ecx.fresh_args_for_item(impl_def_id);
ecx.record_impl_args(impl_args);
let impl_trait_ref = impl_trait_ref.instantiate(tcx, impl_args);
let impl_trait_ref = impl_trait_ref.instantiate(cx, impl_args);
ecx.eq(goal.param_env, goal.predicate.trait_ref, impl_trait_ref)?;
let where_clause_bounds = tcx
let where_clause_bounds = cx
.predicates_of(impl_def_id)
.iter_instantiated(tcx, impl_args)
.map(|pred| goal.with(tcx, pred));
.iter_instantiated(cx, impl_args)
.map(|pred| goal.with(cx, pred));
ecx.add_goals(GoalSource::ImplWhereBound, where_clause_bounds);
ecx.evaluate_added_goals_and_make_canonical_response(maximal_certainty)
@@ -181,13 +180,13 @@ fn consider_trait_alias_candidate(
return Err(NoSolution);
}
let tcx = ecx.cx();
let cx = ecx.cx();
ecx.probe_builtin_trait_candidate(BuiltinImplSource::Misc).enter(|ecx| {
let nested_obligations = tcx
let nested_obligations = cx
.predicates_of(goal.predicate.def_id())
.iter_instantiated(tcx, goal.predicate.trait_ref.args)
.map(|p| goal.with(tcx, p));
.iter_instantiated(cx, goal.predicate.trait_ref.args)
.map(|p| goal.with(cx, p));
// FIXME(-Znext-solver=coinductive): Should this be `GoalSource::ImplWhereBound`?
ecx.add_goals(GoalSource::Misc, nested_obligations);
ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
@@ -232,13 +231,13 @@ fn consider_builtin_pointer_like_candidate(
return Err(NoSolution);
}
let tcx = ecx.cx();
let cx = ecx.cx();
// But if there are inference variables, we have to wait until it's resolved.
if (goal.param_env, goal.predicate.self_ty()).has_non_region_infer() {
return ecx.forced_ambiguity(MaybeCause::Ambiguity);
}
if tcx.layout_is_pointer_like(goal.param_env, goal.predicate.self_ty()) {
if cx.layout_is_pointer_like(goal.param_env, goal.predicate.self_ty()) {
ecx.probe_builtin_trait_candidate(BuiltinImplSource::Misc)
.enter(|ecx| ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes))
} else {
@@ -286,10 +285,10 @@ fn consider_builtin_fn_trait_candidates(
return Err(NoSolution);
}
let tcx = ecx.cx();
let cx = ecx.cx();
let tupled_inputs_and_output =
match structural_traits::extract_tupled_inputs_and_output_from_callable(
tcx,
cx,
goal.predicate.self_ty(),
goal_kind,
)? {
@@ -299,14 +298,14 @@ fn consider_builtin_fn_trait_candidates(
}
};
let output_is_sized_pred = tupled_inputs_and_output.map_bound(|(_, output)| {
ty::TraitRef::new(tcx, tcx.require_lang_item(TraitSolverLangItem::Sized), [output])
ty::TraitRef::new(cx, cx.require_lang_item(TraitSolverLangItem::Sized), [output])
});
let pred = tupled_inputs_and_output
.map_bound(|(inputs, _)| {
ty::TraitRef::new(tcx, goal.predicate.def_id(), [goal.predicate.self_ty(), inputs])
ty::TraitRef::new(cx, goal.predicate.def_id(), [goal.predicate.self_ty(), inputs])
})
.upcast(tcx);
.upcast(cx);
// A built-in `Fn` impl only holds if the output is sized.
// (FIXME: technically we only need to check this if the type is a fn ptr...)
Self::probe_and_consider_implied_clause(
@@ -314,7 +313,7 @@ fn consider_builtin_fn_trait_candidates(
CandidateSource::BuiltinImpl(BuiltinImplSource::Misc),
goal,
pred,
[(GoalSource::ImplWhereBound, goal.with(tcx, output_is_sized_pred))],
[(GoalSource::ImplWhereBound, goal.with(cx, output_is_sized_pred))],
)
}
@@ -327,20 +326,20 @@ fn consider_builtin_async_fn_trait_candidates(
return Err(NoSolution);
}
let tcx = ecx.cx();
let cx = ecx.cx();
let (tupled_inputs_and_output_and_coroutine, nested_preds) =
structural_traits::extract_tupled_inputs_and_output_from_async_callable(
tcx,
cx,
goal.predicate.self_ty(),
goal_kind,
// This region doesn't matter because we're throwing away the coroutine type
Region::new_static(tcx),
Region::new_static(cx),
)?;
let output_is_sized_pred = tupled_inputs_and_output_and_coroutine.map_bound(
|AsyncCallableRelevantTypes { output_coroutine_ty, .. }| {
ty::TraitRef::new(
tcx,
tcx.require_lang_item(TraitSolverLangItem::Sized),
cx,
cx.require_lang_item(TraitSolverLangItem::Sized),
[output_coroutine_ty],
)
},
@@ -349,12 +348,12 @@ fn consider_builtin_async_fn_trait_candidates(
let pred = tupled_inputs_and_output_and_coroutine
.map_bound(|AsyncCallableRelevantTypes { tupled_inputs_ty, .. }| {
ty::TraitRef::new(
tcx,
cx,
goal.predicate.def_id(),
[goal.predicate.self_ty(), tupled_inputs_ty],
)
})
.upcast(tcx);
.upcast(cx);
// A built-in `AsyncFn` impl only holds if the output is sized.
// (FIXME: technically we only need to check this if the type is a fn ptr...)
Self::probe_and_consider_implied_clause(
@@ -362,9 +361,9 @@ fn consider_builtin_async_fn_trait_candidates(
CandidateSource::BuiltinImpl(BuiltinImplSource::Misc),
goal,
pred,
[goal.with(tcx, output_is_sized_pred)]
[goal.with(cx, output_is_sized_pred)]
.into_iter()
.chain(nested_preds.into_iter().map(|pred| goal.with(tcx, pred)))
.chain(nested_preds.into_iter().map(|pred| goal.with(cx, pred)))
.map(|goal| (GoalSource::ImplWhereBound, goal)),
)
}
@@ -437,8 +436,8 @@ fn consider_builtin_future_candidate(
};
// Coroutines are not futures unless they come from `async` desugaring
let tcx = ecx.cx();
if !tcx.coroutine_is_async(def_id) {
let cx = ecx.cx();
if !cx.coroutine_is_async(def_id) {
return Err(NoSolution);
}
@@ -463,8 +462,8 @@ fn consider_builtin_iterator_candidate(
};
// Coroutines are not iterators unless they come from `gen` desugaring
let tcx = ecx.cx();
if !tcx.coroutine_is_gen(def_id) {
let cx = ecx.cx();
if !cx.coroutine_is_gen(def_id) {
return Err(NoSolution);
}
@@ -489,8 +488,8 @@ fn consider_builtin_fused_iterator_candidate(
};
// Coroutines are not iterators unless they come from `gen` desugaring
let tcx = ecx.cx();
if !tcx.coroutine_is_gen(def_id) {
let cx = ecx.cx();
if !cx.coroutine_is_gen(def_id) {
return Err(NoSolution);
}
@@ -513,8 +512,8 @@ fn consider_builtin_async_iterator_candidate(
};
// Coroutines are not iterators unless they come from `gen` desugaring
let tcx = ecx.cx();
if !tcx.coroutine_is_async_gen(def_id) {
let cx = ecx.cx();
if !cx.coroutine_is_async_gen(def_id) {
return Err(NoSolution);
}
@@ -540,8 +539,8 @@ fn consider_builtin_coroutine_candidate(
};
// `async`-desugared coroutines do not implement the coroutine trait
let tcx = ecx.cx();
if !tcx.is_general_coroutine(def_id) {
let cx = ecx.cx();
if !cx.is_general_coroutine(def_id) {
return Err(NoSolution);
}
@@ -550,8 +549,8 @@ fn consider_builtin_coroutine_candidate(
ecx,
CandidateSource::BuiltinImpl(BuiltinImplSource::Misc),
goal,
ty::TraitRef::new(tcx, goal.predicate.def_id(), [self_ty, coroutine.resume_ty()])
.upcast(tcx),
ty::TraitRef::new(cx, goal.predicate.def_id(), [self_ty, coroutine.resume_ty()])
.upcast(cx),
// Technically, we need to check that the coroutine types are Sized,
// but that's already proven by the coroutine being WF.
[],
@@ -727,7 +726,7 @@ fn consider_builtin_dyn_upcast_candidates(
b_data: I::BoundExistentialPredicates,
b_region: I::Region,
) -> Vec<Candidate<I>> {
let tcx = self.cx();
let cx = self.cx();
let Goal { predicate: (a_ty, _b_ty), .. } = goal;
let mut responses = vec![];
@@ -745,7 +744,7 @@ fn consider_builtin_dyn_upcast_candidates(
));
} else if let Some(a_principal) = a_data.principal() {
for new_a_principal in
D::elaborate_supertraits(self.cx(), a_principal.with_self_ty(tcx, a_ty)).skip(1)
D::elaborate_supertraits(self.cx(), a_principal.with_self_ty(cx, a_ty)).skip(1)
{
responses.extend(self.consider_builtin_upcast_to_principal(
goal,
@@ -755,7 +754,7 @@ fn consider_builtin_dyn_upcast_candidates(
b_data,
b_region,
Some(new_a_principal.map_bound(|trait_ref| {
ty::ExistentialTraitRef::erase_self_ty(tcx, trait_ref)
ty::ExistentialTraitRef::erase_self_ty(cx, trait_ref)
})),
));
}
@@ -770,11 +769,11 @@ fn consider_builtin_unsize_to_dyn_candidate(
b_data: I::BoundExistentialPredicates,
b_region: I::Region,
) -> Result<Candidate<I>, NoSolution> {
let tcx = self.cx();
let cx = self.cx();
let Goal { predicate: (a_ty, _), .. } = goal;
// Can only unsize to an object-safe trait.
if b_data.principal_def_id().is_some_and(|def_id| !tcx.trait_is_object_safe(def_id)) {
if b_data.principal_def_id().is_some_and(|def_id| !cx.trait_is_object_safe(def_id)) {
return Err(NoSolution);
}
@@ -783,24 +782,20 @@ fn consider_builtin_unsize_to_dyn_candidate(
// (i.e. the principal, all of the associated types match, and any auto traits)
ecx.add_goals(
GoalSource::ImplWhereBound,
b_data.iter().map(|pred| goal.with(tcx, pred.with_self_ty(tcx, a_ty))),
b_data.iter().map(|pred| goal.with(cx, pred.with_self_ty(cx, a_ty))),
);
// The type must be `Sized` to be unsized.
ecx.add_goal(
GoalSource::ImplWhereBound,
goal.with(
tcx,
ty::TraitRef::new(
tcx,
tcx.require_lang_item(TraitSolverLangItem::Sized),
[a_ty],
),
cx,
ty::TraitRef::new(cx, cx.require_lang_item(TraitSolverLangItem::Sized), [a_ty]),
),
);
// The type must outlive the lifetime of the `dyn` we're unsizing into.
ecx.add_goal(GoalSource::Misc, goal.with(tcx, ty::OutlivesPredicate(a_ty, b_region)));
ecx.add_goal(GoalSource::Misc, goal.with(cx, ty::OutlivesPredicate(a_ty, b_region)));
ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
})
}
@@ -941,28 +936,28 @@ fn consider_builtin_struct_unsize(
a_args: I::GenericArgs,
b_args: I::GenericArgs,
) -> Result<Candidate<I>, NoSolution> {
let tcx = self.cx();
let cx = self.cx();
let Goal { predicate: (_a_ty, b_ty), .. } = goal;
let unsizing_params = tcx.unsizing_params_for_adt(def.def_id());
let unsizing_params = cx.unsizing_params_for_adt(def.def_id());
// We must be unsizing some type parameters. This also implies
// that the struct has a tail field.
if unsizing_params.is_empty() {
return Err(NoSolution);
}
let tail_field_ty = def.struct_tail_ty(tcx).unwrap();
let tail_field_ty = def.struct_tail_ty(cx).unwrap();
let a_tail_ty = tail_field_ty.instantiate(tcx, a_args);
let b_tail_ty = tail_field_ty.instantiate(tcx, b_args);
let a_tail_ty = tail_field_ty.instantiate(cx, a_args);
let b_tail_ty = tail_field_ty.instantiate(cx, b_args);
// Instantiate just the unsizing params from B into A. The type after
// this instantiation must be equal to B. This is so we don't unsize
// unrelated type parameters.
let new_a_args = tcx.mk_args_from_iter(a_args.iter().enumerate().map(|(i, a)| {
let new_a_args = cx.mk_args_from_iter(a_args.iter().enumerate().map(|(i, a)| {
if unsizing_params.contains(i as u32) { b_args.get(i).unwrap() } else { a }
}));
let unsized_a_ty = Ty::new_adt(tcx, def, new_a_args);
let unsized_a_ty = Ty::new_adt(cx, def, new_a_args);
// Finally, we require that `TailA: Unsize<TailB>` for the tail field
// types.
@@ -970,10 +965,10 @@ fn consider_builtin_struct_unsize(
self.add_goal(
GoalSource::ImplWhereBound,
goal.with(
tcx,
cx,
ty::TraitRef::new(
tcx,
tcx.require_lang_item(TraitSolverLangItem::Unsize),
cx,
cx.require_lang_item(TraitSolverLangItem::Unsize),
[a_tail_ty, b_tail_ty],
),
),
@@ -998,25 +993,24 @@ fn consider_builtin_tuple_unsize(
a_tys: I::Tys,
b_tys: I::Tys,
) -> Result<Candidate<I>, NoSolution> {
let tcx = self.cx();
let cx = self.cx();
let Goal { predicate: (_a_ty, b_ty), .. } = goal;
let (&a_last_ty, a_rest_tys) = a_tys.split_last().unwrap();
let b_last_ty = b_tys.last().unwrap();
// Instantiate just the tail field of B., and require that they're equal.
let unsized_a_ty =
Ty::new_tup_from_iter(tcx, a_rest_tys.iter().copied().chain([b_last_ty]));
let unsized_a_ty = Ty::new_tup_from_iter(cx, a_rest_tys.iter().copied().chain([b_last_ty]));
self.eq(goal.param_env, unsized_a_ty, b_ty)?;
// Similar to ADTs, require that we can unsize the tail.
self.add_goal(
GoalSource::ImplWhereBound,
goal.with(
tcx,
cx,
ty::TraitRef::new(
tcx,
tcx.require_lang_item(TraitSolverLangItem::Unsize),
cx,
cx.require_lang_item(TraitSolverLangItem::Unsize),
[a_last_ty, b_last_ty],
),
),
+1 -1
View File
@@ -608,7 +608,7 @@ fn parse_ty_bare_fn(
self.dcx().emit_err(FnPointerCannotBeAsync { span: whole_span, qualifier: span });
}
// FIXME(gen_blocks): emit a similar error for `gen fn()`
let decl_span = span_start.to(self.token.span);
let decl_span = span_start.to(self.prev_token.span);
Ok(TyKind::BareFn(P(BareFnTy { ext, safety, generic_params: params, decl, decl_span })))
}
+50 -16
View File
@@ -155,7 +155,10 @@ fn insert_def_id(&mut self, def_id: DefId) {
fn handle_res(&mut self, res: Res) {
match res {
Res::Def(DefKind::Const | DefKind::AssocConst | DefKind::TyAlias, def_id) => {
Res::Def(
DefKind::Const | DefKind::AssocConst | DefKind::AssocTy | DefKind::TyAlias,
def_id,
) => {
self.check_def_id(def_id);
}
_ if self.in_pat => {}
@@ -399,6 +402,31 @@ fn should_ignore_item(&mut self, def_id: DefId) -> bool {
return false;
}
// don't ignore impls for Enums and pub Structs whose methods don't have self receiver,
// cause external crate may call such methods to construct values of these types
if let Some(local_impl_of) = impl_of.as_local()
&& let Some(local_def_id) = def_id.as_local()
&& let Some(fn_sig) =
self.tcx.hir().fn_sig_by_hir_id(self.tcx.local_def_id_to_hir_id(local_def_id))
&& matches!(fn_sig.decl.implicit_self, hir::ImplicitSelfKind::None)
&& let TyKind::Path(hir::QPath::Resolved(_, path)) =
self.tcx.hir().expect_item(local_impl_of).expect_impl().self_ty.kind
&& let Res::Def(def_kind, did) = path.res
{
match def_kind {
// for example, #[derive(Default)] pub struct T(i32);
// external crate can call T::default() to construct T,
// so that don't ignore impl Default for pub Enum and Structs
DefKind::Struct | DefKind::Union if self.tcx.visibility(did).is_public() => {
return false;
}
// don't ignore impl Default for Enums,
// cause we don't know which variant is constructed
DefKind::Enum => return false,
_ => (),
};
}
if let Some(trait_of) = self.tcx.trait_id_of_impl(impl_of)
&& self.tcx.has_attr(trait_of, sym::rustc_trivial_field_reads)
{
@@ -441,7 +469,7 @@ fn visit_node(&mut self, node: Node<'tcx>) {
intravisit::walk_item(self, item)
}
hir::ItemKind::ForeignMod { .. } => {}
hir::ItemKind::Trait(..) => {
hir::ItemKind::Trait(_, _, _, _, trait_item_refs) => {
for impl_def_id in self.tcx.all_impls(item.owner_id.to_def_id()) {
if let Some(local_def_id) = impl_def_id.as_local()
&& let ItemKind::Impl(impl_ref) =
@@ -454,7 +482,12 @@ fn visit_node(&mut self, node: Node<'tcx>) {
intravisit::walk_path(self, impl_ref.of_trait.unwrap().path);
}
}
// mark assoc ty live if the trait is live
for trait_item in trait_item_refs {
if let hir::AssocItemKind::Type = trait_item.kind {
self.check_def_id(trait_item.id.owner_id.to_def_id());
}
}
intravisit::walk_item(self, item)
}
_ => intravisit::walk_item(self, item),
@@ -471,9 +504,8 @@ fn visit_node(&mut self, node: Node<'tcx>) {
&& let ItemKind::Impl(impl_ref) =
self.tcx.hir().expect_item(local_impl_id).kind
{
if !matches!(trait_item.kind, hir::TraitItemKind::Type(..))
&& !ty_ref_to_pub_struct(self.tcx, impl_ref.self_ty)
.ty_and_all_fields_are_public
if !ty_ref_to_pub_struct(self.tcx, impl_ref.self_ty)
.ty_and_all_fields_are_public
{
// skip impl-items of non pure pub ty,
// cause we don't know the ty is constructed or not,
@@ -812,9 +844,8 @@ fn check_item<'tcx>(
// for trait impl blocks,
// mark the method live if the self_ty is public,
// or the method is public and may construct self
if of_trait && matches!(tcx.def_kind(local_def_id), DefKind::AssocTy)
|| tcx.visibility(local_def_id).is_public()
&& (ty_and_all_fields_are_public || may_construct_self)
if tcx.visibility(local_def_id).is_public()
&& (ty_and_all_fields_are_public || may_construct_self)
{
// if the impl item is public,
// and the ty may be constructed or can be constructed in foreign crates,
@@ -851,10 +882,13 @@ fn check_trait_item(
worklist: &mut Vec<(LocalDefId, ComesFromAllowExpect)>,
id: hir::TraitItemId,
) {
use hir::TraitItemKind::{Const, Fn};
if matches!(tcx.def_kind(id.owner_id), DefKind::AssocConst | DefKind::AssocFn) {
use hir::TraitItemKind::{Const, Fn, Type};
if matches!(
tcx.def_kind(id.owner_id),
DefKind::AssocConst | DefKind::AssocTy | DefKind::AssocFn
) {
let trait_item = tcx.hir().trait_item(id);
if matches!(trait_item.kind, Const(_, Some(_)) | Fn(..))
if matches!(trait_item.kind, Const(_, Some(_)) | Type(_, Some(_)) | Fn(..))
&& let Some(comes_from_allow) =
has_allow_dead_code_or_lang_attr(tcx, trait_item.owner_id.def_id)
{
@@ -896,7 +930,7 @@ fn create_and_seed_worklist(
// checks impls, impl-items and pub structs with all public fields later
match tcx.def_kind(id) {
DefKind::Impl { .. } => false,
DefKind::AssocConst | DefKind::AssocFn => !matches!(tcx.associated_item(id).container, AssocItemContainer::ImplContainer),
DefKind::AssocConst | DefKind::AssocTy | DefKind::AssocFn => !matches!(tcx.associated_item(id).container, AssocItemContainer::ImplContainer),
DefKind::Struct => struct_all_fields_are_public(tcx, id.to_def_id()) || has_allow_dead_code_or_lang_attr(tcx, id).is_some(),
_ => true
})
@@ -1183,6 +1217,7 @@ fn check_definition(&mut self, def_id: LocalDefId) {
}
match self.tcx.def_kind(def_id) {
DefKind::AssocConst
| DefKind::AssocTy
| DefKind::AssocFn
| DefKind::Fn
| DefKind::Static { .. }
@@ -1224,15 +1259,14 @@ fn check_mod_deathness(tcx: TyCtxt<'_>, module: LocalModDefId) {
|| (def_kind == DefKind::Trait && live_symbols.contains(&item.owner_id.def_id))
{
for &def_id in tcx.associated_item_def_ids(item.owner_id.def_id) {
// We have diagnosed unused assoc consts and fns in traits
// We have diagnosed unused assocs in traits
if matches!(def_kind, DefKind::Impl { of_trait: true })
&& matches!(tcx.def_kind(def_id), DefKind::AssocConst | DefKind::AssocFn)
&& matches!(tcx.def_kind(def_id), DefKind::AssocConst | DefKind::AssocTy | DefKind::AssocFn)
// skip unused public inherent methods,
// cause we have diagnosed unconstructed struct
|| matches!(def_kind, DefKind::Impl { of_trait: false })
&& tcx.visibility(def_id).is_public()
&& ty_ref_to_pub_struct(tcx, tcx.hir().item(item).expect_impl().self_ty).ty_is_public
|| def_kind == DefKind::Trait && tcx.def_kind(def_id) == DefKind::AssocTy
{
continue;
}
+2 -10
View File
@@ -30,7 +30,7 @@
use rustc_hir::intravisit::{self, Visitor};
use rustc_hir::Node;
use rustc_middle::bug;
use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs};
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use rustc_middle::middle::privacy::{self, Level};
use rustc_middle::mir::interpret::{ConstAllocation, ErrorHandled, GlobalAlloc};
use rustc_middle::query::Providers;
@@ -178,15 +178,7 @@ fn propagate_node(&mut self, node: &Node<'tcx>, search_item: LocalDefId) {
if !self.any_library {
// If we are building an executable, only explicitly extern
// types need to be exported.
let codegen_attrs = if self.tcx.def_kind(search_item).has_codegen_attrs() {
self.tcx.codegen_fn_attrs(search_item)
} else {
CodegenFnAttrs::EMPTY
};
let is_extern = codegen_attrs.contains_extern_indicator();
let std_internal =
codegen_attrs.flags.contains(CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL);
if is_extern || std_internal {
if has_custom_linkage(self.tcx, search_item) {
self.reachable_symbols.insert(search_item);
}
} else {
@@ -99,7 +99,7 @@ pub(crate) fn compute_effective_visibilities<'c>(
// is the maximum value among visibilities of bindings corresponding to that def id.
for (binding, eff_vis) in visitor.import_effective_visibilities.iter() {
let NameBindingKind::Import { import, .. } = binding.kind else { unreachable!() };
if !binding.is_ambiguity() {
if !binding.is_ambiguity_recursive() {
if let Some(node_id) = import.id() {
r.effective_visibilities.update_eff_vis(r.local_def_id(node_id), eff_vis, r.tcx)
}
+32 -42
View File
@@ -325,8 +325,8 @@ pub(crate) fn try_define(
}
match (old_binding.is_glob_import(), binding.is_glob_import()) {
(true, true) => {
// FIXME: remove `!binding.is_ambiguity()` after delete the warning ambiguity.
if !binding.is_ambiguity()
// FIXME: remove `!binding.is_ambiguity_recursive()` after delete the warning ambiguity.
if !binding.is_ambiguity_recursive()
&& let NameBindingKind::Import { import: old_import, .. } =
old_binding.kind
&& let NameBindingKind::Import { import, .. } = binding.kind
@@ -337,21 +337,17 @@ pub(crate) fn try_define(
// imported from the same glob-import statement.
resolution.binding = Some(binding);
} else if res != old_binding.res() {
let binding = if warn_ambiguity {
this.warn_ambiguity(AmbiguityKind::GlobVsGlob, old_binding, binding)
} else {
this.ambiguity(AmbiguityKind::GlobVsGlob, old_binding, binding)
};
resolution.binding = Some(binding);
resolution.binding = Some(this.new_ambiguity_binding(
AmbiguityKind::GlobVsGlob,
old_binding,
binding,
warn_ambiguity,
));
} else if !old_binding.vis.is_at_least(binding.vis, this.tcx) {
// We are glob-importing the same item but with greater visibility.
resolution.binding = Some(binding);
} else if binding.is_ambiguity() {
resolution.binding =
Some(self.arenas.alloc_name_binding(NameBindingData {
warn_ambiguity: true,
..(*binding).clone()
}));
} else if binding.is_ambiguity_recursive() {
resolution.binding = Some(this.new_warn_ambiguity_binding(binding));
}
}
(old_glob @ true, false) | (old_glob @ false, true) => {
@@ -361,24 +357,26 @@ pub(crate) fn try_define(
&& nonglob_binding.expansion != LocalExpnId::ROOT
&& glob_binding.res() != nonglob_binding.res()
{
resolution.binding = Some(this.ambiguity(
resolution.binding = Some(this.new_ambiguity_binding(
AmbiguityKind::GlobVsExpanded,
nonglob_binding,
glob_binding,
false,
));
} else {
resolution.binding = Some(nonglob_binding);
}
if let Some(old_binding) = resolution.shadowed_glob {
assert!(old_binding.is_glob_import());
if glob_binding.res() != old_binding.res() {
resolution.shadowed_glob = Some(this.ambiguity(
if let Some(old_shadowed_glob) = resolution.shadowed_glob {
assert!(old_shadowed_glob.is_glob_import());
if glob_binding.res() != old_shadowed_glob.res() {
resolution.shadowed_glob = Some(this.new_ambiguity_binding(
AmbiguityKind::GlobVsGlob,
old_binding,
old_shadowed_glob,
glob_binding,
false,
));
} else if !old_binding.vis.is_at_least(binding.vis, this.tcx) {
} else if !old_shadowed_glob.vis.is_at_least(binding.vis, this.tcx) {
resolution.shadowed_glob = Some(glob_binding);
}
} else {
@@ -397,29 +395,21 @@ pub(crate) fn try_define(
})
}
fn ambiguity(
fn new_ambiguity_binding(
&self,
kind: AmbiguityKind,
ambiguity_kind: AmbiguityKind,
primary_binding: NameBinding<'a>,
secondary_binding: NameBinding<'a>,
warn_ambiguity: bool,
) -> NameBinding<'a> {
self.arenas.alloc_name_binding(NameBindingData {
ambiguity: Some((secondary_binding, kind)),
..(*primary_binding).clone()
})
let ambiguity = Some((secondary_binding, ambiguity_kind));
let data = NameBindingData { ambiguity, warn_ambiguity, ..*primary_binding };
self.arenas.alloc_name_binding(data)
}
fn warn_ambiguity(
&self,
kind: AmbiguityKind,
primary_binding: NameBinding<'a>,
secondary_binding: NameBinding<'a>,
) -> NameBinding<'a> {
self.arenas.alloc_name_binding(NameBindingData {
ambiguity: Some((secondary_binding, kind)),
warn_ambiguity: true,
..(*primary_binding).clone()
})
fn new_warn_ambiguity_binding(&self, binding: NameBinding<'a>) -> NameBinding<'a> {
assert!(binding.is_ambiguity_recursive());
self.arenas.alloc_name_binding(NameBindingData { warn_ambiguity: true, ..*binding })
}
// Use `f` to mutate the resolution of the name in the module.
@@ -1381,8 +1371,8 @@ pub(crate) fn check_for_redundant_imports(&mut self, import: Import<'a>) -> bool
target_bindings[ns].get(),
) {
Ok(other_binding) => {
is_redundant =
binding.res() == other_binding.res() && !other_binding.is_ambiguity();
is_redundant = binding.res() == other_binding.res()
&& !other_binding.is_ambiguity_recursive();
if is_redundant {
redundant_span[ns] =
Some((other_binding.span, other_binding.is_import()));
@@ -1455,7 +1445,7 @@ fn resolve_glob_import(&mut self, import: Import<'a>) {
.resolution(import.parent_scope.module, key)
.borrow()
.binding()
.is_some_and(|binding| binding.is_warn_ambiguity());
.is_some_and(|binding| binding.warn_ambiguity_recursive());
let _ = self.try_define(
import.parent_scope.module,
key,
@@ -1480,7 +1470,7 @@ fn finalize_resolutions_in(&mut self, module: Module<'a>) {
module.for_each_child(self, |this, ident, _, binding| {
let res = binding.res().expect_non_local();
let error_ambiguity = binding.is_ambiguity() && !binding.warn_ambiguity;
let error_ambiguity = binding.is_ambiguity_recursive() && !binding.warn_ambiguity;
if res != def::Res::Err && !error_ambiguity {
let mut reexport_chain = SmallVec::new();
let mut next_binding = binding;
+1 -1
View File
@@ -3730,7 +3730,7 @@ fn try_resolve_as_non_binding(
let ls_binding = self.maybe_resolve_ident_in_lexical_scope(ident, ValueNS)?;
let (res, binding) = match ls_binding {
LexicalScopeBinding::Item(binding)
if is_syntactic_ambiguity && binding.is_ambiguity() =>
if is_syntactic_ambiguity && binding.is_ambiguity_recursive() =>
{
// For ambiguous bindings we don't know all their definitions and cannot check
// whether they can be shadowed by fresh bindings or not, so force an error.
+8 -6
View File
@@ -694,10 +694,12 @@ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
}
/// Records a possibly-private value, type, or module definition.
#[derive(Clone, Debug)]
#[derive(Clone, Copy, Debug)]
struct NameBindingData<'a> {
kind: NameBindingKind<'a>,
ambiguity: Option<(NameBinding<'a>, AmbiguityKind)>,
/// Produce a warning instead of an error when reporting ambiguities inside this binding.
/// May apply to indirect ambiguities under imports, so `ambiguity.is_some()` is not required.
warn_ambiguity: bool,
expansion: LocalExpnId,
span: Span,
@@ -718,7 +720,7 @@ fn to_name_binding(self, _: &'a ResolverArenas<'a>) -> NameBinding<'a> {
}
}
#[derive(Clone, Debug)]
#[derive(Clone, Copy, Debug)]
enum NameBindingKind<'a> {
Res(Res),
Module(Module<'a>),
@@ -830,18 +832,18 @@ fn res(&self) -> Res {
}
}
fn is_ambiguity(&self) -> bool {
fn is_ambiguity_recursive(&self) -> bool {
self.ambiguity.is_some()
|| match self.kind {
NameBindingKind::Import { binding, .. } => binding.is_ambiguity(),
NameBindingKind::Import { binding, .. } => binding.is_ambiguity_recursive(),
_ => false,
}
}
fn is_warn_ambiguity(&self) -> bool {
fn warn_ambiguity_recursive(&self) -> bool {
self.warn_ambiguity
|| match self.kind {
NameBindingKind::Import { binding, .. } => binding.is_warn_ambiguity(),
NameBindingKind::Import { binding, .. } => binding.warn_ambiguity_recursive(),
_ => false,
}
}
+1 -1
View File
@@ -396,7 +396,7 @@ fn show_arc() {
// Make sure deriving works with Arc<T>
#[derive(Eq, Ord, PartialEq, PartialOrd, Clone, Debug, Default)]
struct Foo {
struct _Foo {
inner: Arc<i32>,
}
+1
View File
@@ -103,6 +103,7 @@
/// ```
#[cfg_attr(not(test), rustc_diagnostic_item = "Default")]
#[stable(feature = "rust1", since = "1.0.0")]
#[cfg_attr(not(bootstrap), rustc_trivial_field_reads)]
pub trait Default: Sized {
/// Returns the "default value" for a type.
///
+5 -4
View File
@@ -484,7 +484,7 @@ mod sealed_trait {
all supported platforms",
issue = "44930"
)]
pub trait VaArgSafe {}
pub unsafe trait VaArgSafe {}
}
macro_rules! impl_va_arg_safe {
@@ -494,7 +494,7 @@ macro_rules! impl_va_arg_safe {
reason = "the `c_variadic` feature has not been properly tested on \
all supported platforms",
issue = "44930")]
impl sealed_trait::VaArgSafe for $t {}
unsafe impl sealed_trait::VaArgSafe for $t {}
)+
}
}
@@ -509,14 +509,15 @@ impl sealed_trait::VaArgSafe for $t {}
all supported platforms",
issue = "44930"
)]
impl<T> sealed_trait::VaArgSafe for *mut T {}
unsafe impl<T> sealed_trait::VaArgSafe for *mut T {}
#[unstable(
feature = "c_variadic",
reason = "the `c_variadic` feature has not been properly tested on \
all supported platforms",
issue = "44930"
)]
impl<T> sealed_trait::VaArgSafe for *const T {}
unsafe impl<T> sealed_trait::VaArgSafe for *const T {}
#[unstable(
feature = "c_variadic",
+1 -1
View File
@@ -2579,7 +2579,7 @@ pub const fn ptr_guaranteed_cmp<T>(ptr: *const T, other: *const T) -> u8 {
/// fn runtime() -> i32 { 1 }
/// const fn compiletime() -> i32 { 2 }
///
// // ⚠ This code violates the required equivalence of `compiletime`
/// // ⚠ This code violates the required equivalence of `compiletime`
/// // and `runtime`.
/// const_eval_select((), compiletime, runtime)
/// }
+45 -45
View File
@@ -3,7 +3,7 @@
use crate::num::NonZero;
use crate::ops::Try;
use core::array;
use core::mem::{ManuallyDrop, MaybeUninit};
use core::mem::MaybeUninit;
use core::ops::ControlFlow;
/// An iterator that filters the elements of `iter` with `predicate`.
@@ -27,6 +27,42 @@ pub(in crate::iter) fn new(iter: I, predicate: P) -> Filter<I, P> {
}
}
impl<I, P> Filter<I, P>
where
I: Iterator,
P: FnMut(&I::Item) -> bool,
{
#[inline]
fn next_chunk_dropless<const N: usize>(
&mut self,
) -> Result<[I::Item; N], array::IntoIter<I::Item, N>> {
let mut array: [MaybeUninit<I::Item>; N] = [const { MaybeUninit::uninit() }; N];
let mut initialized = 0;
let result = self.iter.try_for_each(|element| {
let idx = initialized;
// branchless index update combined with unconditionally copying the value even when
// it is filtered reduces branching and dependencies in the loop.
initialized = idx + (self.predicate)(&element) as usize;
// SAFETY: Loop conditions ensure the index is in bounds.
unsafe { array.get_unchecked_mut(idx) }.write(element);
if initialized < N { ControlFlow::Continue(()) } else { ControlFlow::Break(()) }
});
match result {
ControlFlow::Break(()) => {
// SAFETY: The loop above is only explicitly broken when the array has been fully initialized
Ok(unsafe { MaybeUninit::array_assume_init(array) })
}
ControlFlow::Continue(()) => {
// SAFETY: The range is in bounds since the loop breaks when reaching N elements.
Err(unsafe { array::IntoIter::new_unchecked(array, 0..initialized) })
}
}
}
}
#[stable(feature = "core_impl_debug", since = "1.9.0")]
impl<I: fmt::Debug, P> fmt::Debug for Filter<I, P> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
@@ -64,52 +100,16 @@ fn next(&mut self) -> Option<I::Item> {
fn next_chunk<const N: usize>(
&mut self,
) -> Result<[Self::Item; N], array::IntoIter<Self::Item, N>> {
let mut array: [MaybeUninit<Self::Item>; N] = [const { MaybeUninit::uninit() }; N];
struct Guard<'a, T> {
array: &'a mut [MaybeUninit<T>],
initialized: usize,
}
impl<T> Drop for Guard<'_, T> {
#[inline]
fn drop(&mut self) {
if const { crate::mem::needs_drop::<T>() } {
// SAFETY: self.initialized is always <= N, which also is the length of the array.
unsafe {
core::ptr::drop_in_place(MaybeUninit::slice_assume_init_mut(
self.array.get_unchecked_mut(..self.initialized),
));
}
}
// avoid codegen for the dead branch
let fun = const {
if crate::mem::needs_drop::<I::Item>() {
array::iter_next_chunk::<I::Item, N>
} else {
Self::next_chunk_dropless::<N>
}
}
};
let mut guard = Guard { array: &mut array, initialized: 0 };
let result = self.iter.try_for_each(|element| {
let idx = guard.initialized;
guard.initialized = idx + (self.predicate)(&element) as usize;
// SAFETY: Loop conditions ensure the index is in bounds.
unsafe { guard.array.get_unchecked_mut(idx) }.write(element);
if guard.initialized < N { ControlFlow::Continue(()) } else { ControlFlow::Break(()) }
});
let guard = ManuallyDrop::new(guard);
match result {
ControlFlow::Break(()) => {
// SAFETY: The loop above is only explicitly broken when the array has been fully initialized
Ok(unsafe { MaybeUninit::array_assume_init(array) })
}
ControlFlow::Continue(()) => {
let initialized = guard.initialized;
// SAFETY: The range is in bounds since the loop breaks when reaching N elements.
Err(unsafe { array::IntoIter::new_unchecked(array, 0..initialized) })
}
}
fun(self)
}
#[inline]
+672 -2
View File
@@ -11,6 +11,7 @@
#![unstable(feature = "f128", issue = "116909")]
use crate::convert::FloatToInt;
use crate::mem;
/// Basic mathematical constants.
@@ -220,21 +221,145 @@ impl f128 {
#[unstable(feature = "f128", issue = "116909")]
pub const MAX_10_EXP: i32 = 4_932;
/// Not a Number (NaN).
///
/// Note that IEEE 754 doesn't define just a single NaN value;
/// a plethora of bit patterns are considered to be NaN.
/// Furthermore, the standard makes a difference
/// between a "signaling" and a "quiet" NaN,
/// and allows inspecting its "payload" (the unspecified bits in the bit pattern).
/// This constant isn't guaranteed to equal to any specific NaN bitpattern,
/// and the stability of its representation over Rust versions
/// and target platforms isn't guaranteed.
#[cfg(not(bootstrap))]
#[allow(clippy::eq_op)]
#[rustc_diagnostic_item = "f128_nan"]
#[unstable(feature = "f128", issue = "116909")]
pub const NAN: f128 = 0.0_f128 / 0.0_f128;
/// Infinity (∞).
#[cfg(not(bootstrap))]
#[unstable(feature = "f128", issue = "116909")]
pub const INFINITY: f128 = 1.0_f128 / 0.0_f128;
/// Negative infinity (−∞).
#[cfg(not(bootstrap))]
#[unstable(feature = "f128", issue = "116909")]
pub const NEG_INFINITY: f128 = -1.0_f128 / 0.0_f128;
/// Sign bit
#[cfg(not(bootstrap))]
pub(crate) const SIGN_MASK: u128 = 0x8000_0000_0000_0000_0000_0000_0000_0000;
/// Minimum representable positive value (min subnormal)
#[cfg(not(bootstrap))]
const TINY_BITS: u128 = 0x1;
/// Minimum representable negative value (min negative subnormal)
#[cfg(not(bootstrap))]
const NEG_TINY_BITS: u128 = Self::TINY_BITS | Self::SIGN_MASK;
/// Returns `true` if this value is NaN.
///
/// ```
/// #![feature(f128)]
/// # // FIXME(f16_f128): remove when `unordtf2` is available
/// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
///
/// let nan = f128::NAN;
/// let f = 7.0_f128;
///
/// assert!(nan.is_nan());
/// assert!(!f.is_nan());
/// # }
/// ```
#[inline]
#[must_use]
#[cfg(not(bootstrap))]
#[unstable(feature = "f128", issue = "116909")]
#[allow(clippy::eq_op)] // > if you intended to check if the operand is NaN, use `.is_nan()` instead :)
pub const fn is_nan(self) -> bool {
self != self
}
// FIXME(#50145): `abs` is publicly unavailable in core due to
// concerns about portability, so this implementation is for
// private use internally.
#[inline]
#[cfg(not(bootstrap))]
#[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
pub(crate) const fn abs_private(self) -> f128 {
// SAFETY: This transmutation is fine. Probably. For the reasons std is using it.
unsafe {
mem::transmute::<u128, f128>(mem::transmute::<f128, u128>(self) & !Self::SIGN_MASK)
}
}
/// Returns `true` if this value is positive infinity or negative infinity, and
/// `false` otherwise.
///
/// ```
/// #![feature(f128)]
/// # // FIXME(f16_f128): remove when `eqtf2` is available
/// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
///
/// let f = 7.0f128;
/// let inf = f128::INFINITY;
/// let neg_inf = f128::NEG_INFINITY;
/// let nan = f128::NAN;
///
/// assert!(!f.is_infinite());
/// assert!(!nan.is_infinite());
///
/// assert!(inf.is_infinite());
/// assert!(neg_inf.is_infinite());
/// # }
/// ```
#[inline]
#[must_use]
#[cfg(not(bootstrap))]
#[unstable(feature = "f128", issue = "116909")]
#[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
pub const fn is_infinite(self) -> bool {
(self == f128::INFINITY) | (self == f128::NEG_INFINITY)
}
/// Returns `true` if this number is neither infinite nor NaN.
///
/// ```
/// #![feature(f128)]
/// # // FIXME(f16_f128): remove when `lttf2` is available
/// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
///
/// let f = 7.0f128;
/// let inf: f128 = f128::INFINITY;
/// let neg_inf: f128 = f128::NEG_INFINITY;
/// let nan: f128 = f128::NAN;
///
/// assert!(f.is_finite());
///
/// assert!(!nan.is_finite());
/// assert!(!inf.is_finite());
/// assert!(!neg_inf.is_finite());
/// # }
/// ```
#[inline]
#[must_use]
#[cfg(not(bootstrap))]
#[unstable(feature = "f128", issue = "116909")]
#[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
pub const fn is_finite(self) -> bool {
// There's no need to handle NaN separately: if self is NaN,
// the comparison is not true, exactly as desired.
self.abs_private() < Self::INFINITY
}
/// Returns `true` if `self` has a positive sign, including `+0.0`, NaNs with
/// positive sign bit and positive infinity. Note that IEEE 754 doesn't assign any
/// meaning to the sign bit in case of a NaN, and as Rust doesn't guarantee that
/// the bit pattern of NaNs are conserved over arithmetic operations, the result of
/// `is_sign_positive` on a NaN might produce an unexpected result in some cases.
/// See [explanation of NaN as a special value](f32) for more info.
/// See [explanation of NaN as a special value](f128) for more info.
///
/// ```
/// #![feature(f128)]
@@ -257,7 +382,7 @@ pub fn is_sign_positive(self) -> bool {
/// meaning to the sign bit in case of a NaN, and as Rust doesn't guarantee that
/// the bit pattern of NaNs are conserved over arithmetic operations, the result of
/// `is_sign_negative` on a NaN might produce an unexpected result in some cases.
/// See [explanation of NaN as a special value](f32) for more info.
/// See [explanation of NaN as a special value](f128) for more info.
///
/// ```
/// #![feature(f128)]
@@ -278,6 +403,222 @@ pub fn is_sign_negative(self) -> bool {
(self.to_bits() & (1 << 127)) != 0
}
/// Returns the least number greater than `self`.
///
/// Let `TINY` be the smallest representable positive `f128`. Then,
/// - if `self.is_nan()`, this returns `self`;
/// - if `self` is [`NEG_INFINITY`], this returns [`MIN`];
/// - if `self` is `-TINY`, this returns -0.0;
/// - if `self` is -0.0 or +0.0, this returns `TINY`;
/// - if `self` is [`MAX`] or [`INFINITY`], this returns [`INFINITY`];
/// - otherwise the unique least value greater than `self` is returned.
///
/// The identity `x.next_up() == -(-x).next_down()` holds for all non-NaN `x`. When `x`
/// is finite `x == x.next_up().next_down()` also holds.
///
/// ```rust
/// #![feature(f128)]
/// #![feature(float_next_up_down)]
/// # // FIXME(f16_f128): remove when `eqtf2` is available
/// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
///
/// // f128::EPSILON is the difference between 1.0 and the next number up.
/// assert_eq!(1.0f128.next_up(), 1.0 + f128::EPSILON);
/// // But not for most numbers.
/// assert!(0.1f128.next_up() < 0.1 + f128::EPSILON);
/// assert_eq!(4611686018427387904f128.next_up(), 4611686018427387904.000000000000001);
/// # }
/// ```
///
/// [`NEG_INFINITY`]: Self::NEG_INFINITY
/// [`INFINITY`]: Self::INFINITY
/// [`MIN`]: Self::MIN
/// [`MAX`]: Self::MAX
#[inline]
#[cfg(not(bootstrap))]
#[unstable(feature = "f128", issue = "116909")]
// #[unstable(feature = "float_next_up_down", issue = "91399")]
pub fn next_up(self) -> Self {
// Some targets violate Rust's assumption of IEEE semantics, e.g. by flushing
// denormals to zero. This is in general unsound and unsupported, but here
// we do our best to still produce the correct result on such targets.
let bits = self.to_bits();
if self.is_nan() || bits == Self::INFINITY.to_bits() {
return self;
}
let abs = bits & !Self::SIGN_MASK;
let next_bits = if abs == 0 {
Self::TINY_BITS
} else if bits == abs {
bits + 1
} else {
bits - 1
};
Self::from_bits(next_bits)
}
/// Returns the greatest number less than `self`.
///
/// Let `TINY` be the smallest representable positive `f128`. Then,
/// - if `self.is_nan()`, this returns `self`;
/// - if `self` is [`INFINITY`], this returns [`MAX`];
/// - if `self` is `TINY`, this returns 0.0;
/// - if `self` is -0.0 or +0.0, this returns `-TINY`;
/// - if `self` is [`MIN`] or [`NEG_INFINITY`], this returns [`NEG_INFINITY`];
/// - otherwise the unique greatest value less than `self` is returned.
///
/// The identity `x.next_down() == -(-x).next_up()` holds for all non-NaN `x`. When `x`
/// is finite `x == x.next_down().next_up()` also holds.
///
/// ```rust
/// #![feature(f128)]
/// #![feature(float_next_up_down)]
/// # // FIXME(f16_f128): remove when `eqtf2` is available
/// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
///
/// let x = 1.0f128;
/// // Clamp value into range [0, 1).
/// let clamped = x.clamp(0.0, 1.0f128.next_down());
/// assert!(clamped < 1.0);
/// assert_eq!(clamped.next_up(), 1.0);
/// # }
/// ```
///
/// [`NEG_INFINITY`]: Self::NEG_INFINITY
/// [`INFINITY`]: Self::INFINITY
/// [`MIN`]: Self::MIN
/// [`MAX`]: Self::MAX
#[inline]
#[cfg(not(bootstrap))]
#[unstable(feature = "f128", issue = "116909")]
// #[unstable(feature = "float_next_up_down", issue = "91399")]
pub fn next_down(self) -> Self {
// Some targets violate Rust's assumption of IEEE semantics, e.g. by flushing
// denormals to zero. This is in general unsound and unsupported, but here
// we do our best to still produce the correct result on such targets.
let bits = self.to_bits();
if self.is_nan() || bits == Self::NEG_INFINITY.to_bits() {
return self;
}
let abs = bits & !Self::SIGN_MASK;
let next_bits = if abs == 0 {
Self::NEG_TINY_BITS
} else if bits == abs {
bits - 1
} else {
bits + 1
};
Self::from_bits(next_bits)
}
/// Takes the reciprocal (inverse) of a number, `1/x`.
///
/// ```
/// #![feature(f128)]
/// # // FIXME(f16_f128): remove when `eqtf2` is available
/// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
///
/// let x = 2.0_f128;
/// let abs_difference = (x.recip() - (1.0 / x)).abs();
///
/// assert!(abs_difference <= f128::EPSILON);
/// # }
/// ```
#[inline]
#[cfg(not(bootstrap))]
#[unstable(feature = "f128", issue = "116909")]
#[must_use = "this returns the result of the operation, without modifying the original"]
pub fn recip(self) -> Self {
1.0 / self
}
/// Converts radians to degrees.
///
/// ```
/// #![feature(f128)]
/// # // FIXME(f16_f128): remove when `eqtf2` is available
/// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
///
/// let angle = std::f128::consts::PI;
///
/// let abs_difference = (angle.to_degrees() - 180.0).abs();
/// assert!(abs_difference <= f128::EPSILON);
/// # }
/// ```
#[inline]
#[cfg(not(bootstrap))]
#[unstable(feature = "f128", issue = "116909")]
#[must_use = "this returns the result of the operation, without modifying the original"]
pub fn to_degrees(self) -> Self {
// Use a literal for better precision.
const PIS_IN_180: f128 = 57.2957795130823208767981548141051703324054724665643215491602_f128;
self * PIS_IN_180
}
/// Converts degrees to radians.
///
/// ```
/// #![feature(f128)]
/// # // FIXME(f16_f128): remove when `eqtf2` is available
/// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
///
/// let angle = 180.0f128;
///
/// let abs_difference = (angle.to_radians() - std::f128::consts::PI).abs();
///
/// assert!(abs_difference <= 1e-30);
/// # }
/// ```
#[inline]
#[cfg(not(bootstrap))]
#[unstable(feature = "f128", issue = "116909")]
#[must_use = "this returns the result of the operation, without modifying the original"]
pub fn to_radians(self) -> f128 {
// Use a literal for better precision.
const RADS_PER_DEG: f128 =
0.0174532925199432957692369076848861271344287188854172545609719_f128;
self * RADS_PER_DEG
}
/// Rounds toward zero and converts to any primitive integer type,
/// assuming that the value is finite and fits in that type.
///
/// ```
/// #![feature(f128)]
/// # // FIXME(f16_f128): remove when `float*itf` is available
/// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
///
/// let value = 4.6_f128;
/// let rounded = unsafe { value.to_int_unchecked::<u16>() };
/// assert_eq!(rounded, 4);
///
/// let value = -128.9_f128;
/// let rounded = unsafe { value.to_int_unchecked::<i8>() };
/// assert_eq!(rounded, i8::MIN);
/// # }
/// ```
///
/// # Safety
///
/// The value must:
///
/// * Not be `NaN`
/// * Not be infinite
/// * Be representable in the return type `Int`, after truncating off its fractional part
#[inline]
#[unstable(feature = "f128", issue = "116909")]
#[must_use = "this returns the result of the operation, without modifying the original"]
pub unsafe fn to_int_unchecked<Int>(self) -> Int
where
Self: FloatToInt<Int>,
{
// SAFETY: the caller must uphold the safety contract for
// `FloatToInt::to_int_unchecked`.
unsafe { FloatToInt::<Int>::to_int_unchecked(self) }
}
/// Raw transmutation to `u128`.
///
/// This is currently identical to `transmute::<f128, u128>(self)` on all platforms.
@@ -287,6 +628,14 @@ pub fn is_sign_negative(self) -> bool {
///
/// Note that this function is distinct from `as` casting, which attempts to
/// preserve the *numeric* value, and not the bitwise value.
///
/// ```
/// #![feature(f128)]
///
/// # // FIXME(f16_f128): enable this once const casting works
/// # // assert_ne!((1f128).to_bits(), 1f128 as u128); // to_bits() is not casting!
/// assert_eq!((12.5f128).to_bits(), 0x40029000000000000000000000000000);
/// ```
#[inline]
#[unstable(feature = "f128", issue = "116909")]
#[must_use = "this returns the result of the operation, without modifying the original"]
@@ -326,6 +675,16 @@ pub fn to_bits(self) -> u128 {
///
/// Note that this function is distinct from `as` casting, which attempts to
/// preserve the *numeric* value, and not the bitwise value.
///
/// ```
/// #![feature(f128)]
/// # // FIXME(f16_f128): remove when `eqtf2` is available
/// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
///
/// let v = f128::from_bits(0x40029000000000000000000000000000);
/// assert_eq!(v, 12.5);
/// # }
/// ```
#[inline]
#[must_use]
#[unstable(feature = "f128", issue = "116909")]
@@ -335,4 +694,315 @@ pub fn from_bits(v: u128) -> Self {
// Stability concerns.
unsafe { mem::transmute(v) }
}
/// Return the memory representation of this floating point number as a byte array in
/// big-endian (network) byte order.
///
/// See [`from_bits`](Self::from_bits) for some discussion of the
/// portability of this operation (there are almost no issues).
///
/// # Examples
///
/// ```
/// #![feature(f128)]
///
/// let bytes = 12.5f128.to_be_bytes();
/// assert_eq!(
/// bytes,
/// [0x40, 0x02, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00,
/// 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
/// );
/// ```
#[inline]
#[unstable(feature = "f128", issue = "116909")]
#[must_use = "this returns the result of the operation, without modifying the original"]
pub fn to_be_bytes(self) -> [u8; 16] {
self.to_bits().to_be_bytes()
}
/// Return the memory representation of this floating point number as a byte array in
/// little-endian byte order.
///
/// See [`from_bits`](Self::from_bits) for some discussion of the
/// portability of this operation (there are almost no issues).
///
/// # Examples
///
/// ```
/// #![feature(f128)]
///
/// let bytes = 12.5f128.to_le_bytes();
/// assert_eq!(
/// bytes,
/// [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
/// 0x00, 0x00, 0x00, 0x00, 0x00, 0x90, 0x02, 0x40]
/// );
/// ```
#[inline]
#[unstable(feature = "f128", issue = "116909")]
#[must_use = "this returns the result of the operation, without modifying the original"]
pub fn to_le_bytes(self) -> [u8; 16] {
self.to_bits().to_le_bytes()
}
/// Return the memory representation of this floating point number as a byte array in
/// native byte order.
///
/// As the target platform's native endianness is used, portable code
/// should use [`to_be_bytes`] or [`to_le_bytes`], as appropriate, instead.
///
/// [`to_be_bytes`]: f128::to_be_bytes
/// [`to_le_bytes`]: f128::to_le_bytes
///
/// See [`from_bits`](Self::from_bits) for some discussion of the
/// portability of this operation (there are almost no issues).
///
/// # Examples
///
/// ```
/// #![feature(f128)]
///
/// let bytes = 12.5f128.to_ne_bytes();
/// assert_eq!(
/// bytes,
/// if cfg!(target_endian = "big") {
/// [0x40, 0x02, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00,
/// 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
/// } else {
/// [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
/// 0x00, 0x00, 0x00, 0x00, 0x00, 0x90, 0x02, 0x40]
/// }
/// );
/// ```
#[inline]
#[unstable(feature = "f128", issue = "116909")]
#[must_use = "this returns the result of the operation, without modifying the original"]
pub fn to_ne_bytes(self) -> [u8; 16] {
self.to_bits().to_ne_bytes()
}
/// Create a floating point value from its representation as a byte array in big endian.
///
/// See [`from_bits`](Self::from_bits) for some discussion of the
/// portability of this operation (there are almost no issues).
///
/// # Examples
///
/// ```
/// #![feature(f128)]
/// # // FIXME(f16_f128): remove when `eqtf2` is available
/// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
///
/// let value = f128::from_be_bytes(
/// [0x40, 0x02, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00,
/// 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
/// );
/// assert_eq!(value, 12.5);
/// # }
/// ```
#[inline]
#[must_use]
#[unstable(feature = "f128", issue = "116909")]
pub fn from_be_bytes(bytes: [u8; 16]) -> Self {
Self::from_bits(u128::from_be_bytes(bytes))
}
/// Create a floating point value from its representation as a byte array in little endian.
///
/// See [`from_bits`](Self::from_bits) for some discussion of the
/// portability of this operation (there are almost no issues).
///
/// # Examples
///
/// ```
/// #![feature(f128)]
/// # // FIXME(f16_f128): remove when `eqtf2` is available
/// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
///
/// let value = f128::from_le_bytes(
/// [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
/// 0x00, 0x00, 0x00, 0x00, 0x00, 0x90, 0x02, 0x40]
/// );
/// assert_eq!(value, 12.5);
/// # }
/// ```
#[inline]
#[must_use]
#[unstable(feature = "f128", issue = "116909")]
pub fn from_le_bytes(bytes: [u8; 16]) -> Self {
Self::from_bits(u128::from_le_bytes(bytes))
}
/// Create a floating point value from its representation as a byte array in native endian.
///
/// As the target platform's native endianness is used, portable code
/// likely wants to use [`from_be_bytes`] or [`from_le_bytes`], as
/// appropriate instead.
///
/// [`from_be_bytes`]: f128::from_be_bytes
/// [`from_le_bytes`]: f128::from_le_bytes
///
/// See [`from_bits`](Self::from_bits) for some discussion of the
/// portability of this operation (there are almost no issues).
///
/// # Examples
///
/// ```
/// #![feature(f128)]
/// # // FIXME(f16_f128): remove when `eqtf2` is available
/// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
///
/// let value = f128::from_ne_bytes(if cfg!(target_endian = "big") {
/// [0x40, 0x02, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00,
/// 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
/// } else {
/// [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
/// 0x00, 0x00, 0x00, 0x00, 0x00, 0x90, 0x02, 0x40]
/// });
/// assert_eq!(value, 12.5);
/// # }
/// ```
#[inline]
#[must_use]
#[unstable(feature = "f128", issue = "116909")]
pub fn from_ne_bytes(bytes: [u8; 16]) -> Self {
Self::from_bits(u128::from_ne_bytes(bytes))
}
/// Return the ordering between `self` and `other`.
///
/// Unlike the standard partial comparison between floating point numbers,
/// this comparison always produces an ordering in accordance to
/// the `totalOrder` predicate as defined in the IEEE 754 (2008 revision)
/// floating point standard. The values are ordered in the following sequence:
///
/// - negative quiet NaN
/// - negative signaling NaN
/// - negative infinity
/// - negative numbers
/// - negative subnormal numbers
/// - negative zero
/// - positive zero
/// - positive subnormal numbers
/// - positive numbers
/// - positive infinity
/// - positive signaling NaN
/// - positive quiet NaN.
///
/// The ordering established by this function does not always agree with the
/// [`PartialOrd`] and [`PartialEq`] implementations of `f128`. For example,
/// they consider negative and positive zero equal, while `total_cmp`
/// doesn't.
///
/// The interpretation of the signaling NaN bit follows the definition in
/// the IEEE 754 standard, which may not match the interpretation by some of
/// the older, non-conformant (e.g. MIPS) hardware implementations.
///
/// # Example
///
/// ```
/// #![feature(f128)]
///
/// struct GoodBoy {
/// name: &'static str,
/// weight: f128,
/// }
///
/// let mut bois = vec![
/// GoodBoy { name: "Pucci", weight: 0.1 },
/// GoodBoy { name: "Woofer", weight: 99.0 },
/// GoodBoy { name: "Yapper", weight: 10.0 },
/// GoodBoy { name: "Chonk", weight: f128::INFINITY },
/// GoodBoy { name: "Abs. Unit", weight: f128::NAN },
/// GoodBoy { name: "Floaty", weight: -5.0 },
/// ];
///
/// bois.sort_by(|a, b| a.weight.total_cmp(&b.weight));
///
/// // `f128::NAN` could be positive or negative, which will affect the sort order.
/// if f128::NAN.is_sign_negative() {
/// bois.into_iter().map(|b| b.weight)
/// .zip([f128::NAN, -5.0, 0.1, 10.0, 99.0, f128::INFINITY].iter())
/// .for_each(|(a, b)| assert_eq!(a.to_bits(), b.to_bits()))
/// } else {
/// bois.into_iter().map(|b| b.weight)
/// .zip([-5.0, 0.1, 10.0, 99.0, f128::INFINITY, f128::NAN].iter())
/// .for_each(|(a, b)| assert_eq!(a.to_bits(), b.to_bits()))
/// }
/// ```
#[inline]
#[must_use]
#[cfg(not(bootstrap))]
#[unstable(feature = "f128", issue = "116909")]
pub fn total_cmp(&self, other: &Self) -> crate::cmp::Ordering {
let mut left = self.to_bits() as i128;
let mut right = other.to_bits() as i128;
// In case of negatives, flip all the bits except the sign
// to achieve a similar layout as two's complement integers
//
// Why does this work? IEEE 754 floats consist of three fields:
// Sign bit, exponent and mantissa. The set of exponent and mantissa
// fields as a whole have the property that their bitwise order is
// equal to the numeric magnitude where the magnitude is defined.
// The magnitude is not normally defined on NaN values, but
// IEEE 754 totalOrder defines the NaN values also to follow the
// bitwise order. This leads to order explained in the doc comment.
// However, the representation of magnitude is the same for negative
// and positive numbers only the sign bit is different.
// To easily compare the floats as signed integers, we need to
// flip the exponent and mantissa bits in case of negative numbers.
// We effectively convert the numbers to "two's complement" form.
//
// To do the flipping, we construct a mask and XOR against it.
// We branchlessly calculate an "all-ones except for the sign bit"
// mask from negative-signed values: right shifting sign-extends
// the integer, so we "fill" the mask with sign bits, and then
// convert to unsigned to push one more zero bit.
// On positive values, the mask is all zeros, so it's a no-op.
left ^= (((left >> 127) as u128) >> 1) as i128;
right ^= (((right >> 127) as u128) >> 1) as i128;
left.cmp(&right)
}
/// Restrict a value to a certain interval unless it is NaN.
///
/// Returns `max` if `self` is greater than `max`, and `min` if `self` is
/// less than `min`. Otherwise this returns `self`.
///
/// Note that this function returns NaN if the initial value was NaN as
/// well.
///
/// # Panics
///
/// Panics if `min > max`, `min` is NaN, or `max` is NaN.
///
/// # Examples
///
/// ```
/// #![feature(f128)]
/// # // FIXME(f16_f128): remove when `{eq,gt,unord}tf` are available
/// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
///
/// assert!((-3.0f128).clamp(-2.0, 1.0) == -2.0);
/// assert!((0.0f128).clamp(-2.0, 1.0) == 0.0);
/// assert!((2.0f128).clamp(-2.0, 1.0) == 1.0);
/// assert!((f128::NAN).clamp(-2.0, 1.0).is_nan());
/// # }
/// ```
#[inline]
#[cfg(not(bootstrap))]
#[unstable(feature = "f128", issue = "116909")]
#[must_use = "method returns a new number and does not mutate the original value"]
pub fn clamp(mut self, min: f128, max: f128) -> f128 {
assert!(min <= max, "min > max, or either was NaN. min = {min:?}, max = {max:?}");
if self < min {
self = min;
}
if self > max {
self = max;
}
self
}
}
+644 -2
View File
@@ -11,6 +11,7 @@
#![unstable(feature = "f16", issue = "116909")]
use crate::convert::FloatToInt;
use crate::mem;
/// Basic mathematical constants.
@@ -215,21 +216,140 @@ impl f16 {
#[unstable(feature = "f16", issue = "116909")]
pub const MAX_10_EXP: i32 = 4;
/// Not a Number (NaN).
///
/// Note that IEEE 754 doesn't define just a single NaN value;
/// a plethora of bit patterns are considered to be NaN.
/// Furthermore, the standard makes a difference
/// between a "signaling" and a "quiet" NaN,
/// and allows inspecting its "payload" (the unspecified bits in the bit pattern).
/// This constant isn't guaranteed to equal to any specific NaN bitpattern,
/// and the stability of its representation over Rust versions
/// and target platforms isn't guaranteed.
#[cfg(not(bootstrap))]
#[allow(clippy::eq_op)]
#[rustc_diagnostic_item = "f16_nan"]
#[unstable(feature = "f16", issue = "116909")]
pub const NAN: f16 = 0.0_f16 / 0.0_f16;
/// Infinity (∞).
#[cfg(not(bootstrap))]
#[unstable(feature = "f16", issue = "116909")]
pub const INFINITY: f16 = 1.0_f16 / 0.0_f16;
/// Negative infinity (−∞).
#[cfg(not(bootstrap))]
#[unstable(feature = "f16", issue = "116909")]
pub const NEG_INFINITY: f16 = -1.0_f16 / 0.0_f16;
/// Sign bit
#[cfg(not(bootstrap))]
const SIGN_MASK: u16 = 0x8000;
/// Minimum representable positive value (min subnormal)
#[cfg(not(bootstrap))]
const TINY_BITS: u16 = 0x1;
/// Minimum representable negative value (min negative subnormal)
#[cfg(not(bootstrap))]
const NEG_TINY_BITS: u16 = Self::TINY_BITS | Self::SIGN_MASK;
/// Returns `true` if this value is NaN.
///
/// ```
/// #![feature(f16)]
/// # #[cfg(target_arch = "aarch64")] { // FIXME(f16_F128): rust-lang/rust#123885
///
/// let nan = f16::NAN;
/// let f = 7.0_f16;
///
/// assert!(nan.is_nan());
/// assert!(!f.is_nan());
/// # }
/// ```
#[inline]
#[must_use]
#[cfg(not(bootstrap))]
#[unstable(feature = "f16", issue = "116909")]
#[allow(clippy::eq_op)] // > if you intended to check if the operand is NaN, use `.is_nan()` instead :)
pub const fn is_nan(self) -> bool {
self != self
}
// FIXMxE(#50145): `abs` is publicly unavailable in core due to
// concerns about portability, so this implementation is for
// private use internally.
#[inline]
#[cfg(not(bootstrap))]
#[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
pub(crate) const fn abs_private(self) -> f16 {
// SAFETY: This transmutation is fine. Probably. For the reasons std is using it.
unsafe { mem::transmute::<u16, f16>(mem::transmute::<f16, u16>(self) & !Self::SIGN_MASK) }
}
/// Returns `true` if this value is positive infinity or negative infinity, and
/// `false` otherwise.
///
/// ```
/// #![feature(f16)]
/// # #[cfg(target_arch = "aarch64")] { // FIXME(f16_F128): rust-lang/rust#123885
///
/// let f = 7.0f16;
/// let inf = f16::INFINITY;
/// let neg_inf = f16::NEG_INFINITY;
/// let nan = f16::NAN;
///
/// assert!(!f.is_infinite());
/// assert!(!nan.is_infinite());
///
/// assert!(inf.is_infinite());
/// assert!(neg_inf.is_infinite());
/// # }
/// ```
#[inline]
#[must_use]
#[cfg(not(bootstrap))]
#[unstable(feature = "f16", issue = "116909")]
#[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
pub const fn is_infinite(self) -> bool {
(self == f16::INFINITY) | (self == f16::NEG_INFINITY)
}
/// Returns `true` if this number is neither infinite nor NaN.
///
/// ```
/// #![feature(f16)]
/// # #[cfg(target_arch = "aarch64")] { // FIXME(f16_F128): rust-lang/rust#123885
///
/// let f = 7.0f16;
/// let inf: f16 = f16::INFINITY;
/// let neg_inf: f16 = f16::NEG_INFINITY;
/// let nan: f16 = f16::NAN;
///
/// assert!(f.is_finite());
///
/// assert!(!nan.is_finite());
/// assert!(!inf.is_finite());
/// assert!(!neg_inf.is_finite());
/// # }
/// ```
#[inline]
#[must_use]
#[cfg(not(bootstrap))]
#[unstable(feature = "f16", issue = "116909")]
#[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
pub const fn is_finite(self) -> bool {
// There's no need to handle NaN separately: if self is NaN,
// the comparison is not true, exactly as desired.
self.abs_private() < Self::INFINITY
}
/// Returns `true` if `self` has a positive sign, including `+0.0`, NaNs with
/// positive sign bit and positive infinity. Note that IEEE 754 doesn't assign any
/// meaning to the sign bit in case of a NaN, and as Rust doesn't guarantee that
/// the bit pattern of NaNs are conserved over arithmetic operations, the result of
/// `is_sign_positive` on a NaN might produce an unexpected result in some cases.
/// See [explanation of NaN as a special value](f32) for more info.
/// See [explanation of NaN as a special value](f16) for more info.
///
/// ```
/// #![feature(f16)]
@@ -252,7 +372,7 @@ pub fn is_sign_positive(self) -> bool {
/// meaning to the sign bit in case of a NaN, and as Rust doesn't guarantee that
/// the bit pattern of NaNs are conserved over arithmetic operations, the result of
/// `is_sign_negative` on a NaN might produce an unexpected result in some cases.
/// See [explanation of NaN as a special value](f32) for more info.
/// See [explanation of NaN as a special value](f16) for more info.
///
/// ```
/// #![feature(f16)]
@@ -273,6 +393,220 @@ pub fn is_sign_negative(self) -> bool {
(self.to_bits() & (1 << 15)) != 0
}
/// Returns the least number greater than `self`.
///
/// Let `TINY` be the smallest representable positive `f16`. Then,
/// - if `self.is_nan()`, this returns `self`;
/// - if `self` is [`NEG_INFINITY`], this returns [`MIN`];
/// - if `self` is `-TINY`, this returns -0.0;
/// - if `self` is -0.0 or +0.0, this returns `TINY`;
/// - if `self` is [`MAX`] or [`INFINITY`], this returns [`INFINITY`];
/// - otherwise the unique least value greater than `self` is returned.
///
/// The identity `x.next_up() == -(-x).next_down()` holds for all non-NaN `x`. When `x`
/// is finite `x == x.next_up().next_down()` also holds.
///
/// ```rust
/// #![feature(f16)]
/// #![feature(float_next_up_down)]
/// # // FIXME(f16_f128): ABI issues on MSVC
/// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
///
/// // f16::EPSILON is the difference between 1.0 and the next number up.
/// assert_eq!(1.0f16.next_up(), 1.0 + f16::EPSILON);
/// // But not for most numbers.
/// assert!(0.1f16.next_up() < 0.1 + f16::EPSILON);
/// assert_eq!(4356f16.next_up(), 4360.0);
/// # }
/// ```
///
/// [`NEG_INFINITY`]: Self::NEG_INFINITY
/// [`INFINITY`]: Self::INFINITY
/// [`MIN`]: Self::MIN
/// [`MAX`]: Self::MAX
#[inline]
#[cfg(not(bootstrap))]
#[unstable(feature = "f16", issue = "116909")]
// #[unstable(feature = "float_next_up_down", issue = "91399")]
pub fn next_up(self) -> Self {
// Some targets violate Rust's assumption of IEEE semantics, e.g. by flushing
// denormals to zero. This is in general unsound and unsupported, but here
// we do our best to still produce the correct result on such targets.
let bits = self.to_bits();
if self.is_nan() || bits == Self::INFINITY.to_bits() {
return self;
}
let abs = bits & !Self::SIGN_MASK;
let next_bits = if abs == 0 {
Self::TINY_BITS
} else if bits == abs {
bits + 1
} else {
bits - 1
};
Self::from_bits(next_bits)
}
/// Returns the greatest number less than `self`.
///
/// Let `TINY` be the smallest representable positive `f16`. Then,
/// - if `self.is_nan()`, this returns `self`;
/// - if `self` is [`INFINITY`], this returns [`MAX`];
/// - if `self` is `TINY`, this returns 0.0;
/// - if `self` is -0.0 or +0.0, this returns `-TINY`;
/// - if `self` is [`MIN`] or [`NEG_INFINITY`], this returns [`NEG_INFINITY`];
/// - otherwise the unique greatest value less than `self` is returned.
///
/// The identity `x.next_down() == -(-x).next_up()` holds for all non-NaN `x`. When `x`
/// is finite `x == x.next_down().next_up()` also holds.
///
/// ```rust
/// #![feature(f16)]
/// #![feature(float_next_up_down)]
/// # // FIXME(f16_f128): ABI issues on MSVC
/// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
///
/// let x = 1.0f16;
/// // Clamp value into range [0, 1).
/// let clamped = x.clamp(0.0, 1.0f16.next_down());
/// assert!(clamped < 1.0);
/// assert_eq!(clamped.next_up(), 1.0);
/// # }
/// ```
///
/// [`NEG_INFINITY`]: Self::NEG_INFINITY
/// [`INFINITY`]: Self::INFINITY
/// [`MIN`]: Self::MIN
/// [`MAX`]: Self::MAX
#[inline]
#[cfg(not(bootstrap))]
#[unstable(feature = "f16", issue = "116909")]
// #[unstable(feature = "float_next_up_down", issue = "91399")]
pub fn next_down(self) -> Self {
// Some targets violate Rust's assumption of IEEE semantics, e.g. by flushing
// denormals to zero. This is in general unsound and unsupported, but here
// we do our best to still produce the correct result on such targets.
let bits = self.to_bits();
if self.is_nan() || bits == Self::NEG_INFINITY.to_bits() {
return self;
}
let abs = bits & !Self::SIGN_MASK;
let next_bits = if abs == 0 {
Self::NEG_TINY_BITS
} else if bits == abs {
bits - 1
} else {
bits + 1
};
Self::from_bits(next_bits)
}
/// Takes the reciprocal (inverse) of a number, `1/x`.
///
/// ```
/// #![feature(f16)]
/// # // FIXME(f16_f128): remove when `extendhfsf2` and `truncsfhf2` are available
/// # #[cfg(target_os = "linux")] {
///
/// let x = 2.0_f16;
/// let abs_difference = (x.recip() - (1.0 / x)).abs();
///
/// assert!(abs_difference <= f16::EPSILON);
/// # }
/// ```
#[inline]
#[cfg(not(bootstrap))]
#[unstable(feature = "f16", issue = "116909")]
#[must_use = "this returns the result of the operation, without modifying the original"]
pub fn recip(self) -> Self {
1.0 / self
}
/// Converts radians to degrees.
///
/// ```
/// #![feature(f16)]
/// # // FIXME(f16_f128): remove when `extendhfsf2` and `truncsfhf2` are available
/// # #[cfg(target_os = "linux")] {
///
/// let angle = std::f16::consts::PI;
///
/// let abs_difference = (angle.to_degrees() - 180.0).abs();
/// assert!(abs_difference <= 0.5);
/// # }
/// ```
#[inline]
#[cfg(not(bootstrap))]
#[unstable(feature = "f16", issue = "116909")]
#[must_use = "this returns the result of the operation, without modifying the original"]
pub fn to_degrees(self) -> Self {
// Use a literal for better precision.
const PIS_IN_180: f16 = 57.2957795130823208767981548141051703_f16;
self * PIS_IN_180
}
/// Converts degrees to radians.
///
/// ```
/// #![feature(f16)]
/// # // FIXME(f16_f128): remove when `extendhfsf2` and `truncsfhf2` are available
/// # #[cfg(target_os = "linux")] {
///
/// let angle = 180.0f16;
///
/// let abs_difference = (angle.to_radians() - std::f16::consts::PI).abs();
///
/// assert!(abs_difference <= 0.01);
/// # }
/// ```
#[inline]
#[cfg(not(bootstrap))]
#[unstable(feature = "f16", issue = "116909")]
#[must_use = "this returns the result of the operation, without modifying the original"]
pub fn to_radians(self) -> f16 {
// Use a literal for better precision.
const RADS_PER_DEG: f16 = 0.017453292519943295769236907684886_f16;
self * RADS_PER_DEG
}
/// Rounds toward zero and converts to any primitive integer type,
/// assuming that the value is finite and fits in that type.
///
/// ```
/// #![feature(f16)]
/// # #[cfg(target_arch = "aarch64")] { // FIXME(f16_F128): rust-lang/rust#123885
///
/// let value = 4.6_f16;
/// let rounded = unsafe { value.to_int_unchecked::<u16>() };
/// assert_eq!(rounded, 4);
///
/// let value = -128.9_f16;
/// let rounded = unsafe { value.to_int_unchecked::<i8>() };
/// assert_eq!(rounded, i8::MIN);
/// # }
/// ```
///
/// # Safety
///
/// The value must:
///
/// * Not be `NaN`
/// * Not be infinite
/// * Be representable in the return type `Int`, after truncating off its fractional part
#[inline]
#[unstable(feature = "f16", issue = "116909")]
#[must_use = "this returns the result of the operation, without modifying the original"]
pub unsafe fn to_int_unchecked<Int>(self) -> Int
where
Self: FloatToInt<Int>,
{
// SAFETY: the caller must uphold the safety contract for
// `FloatToInt::to_int_unchecked`.
unsafe { FloatToInt::<Int>::to_int_unchecked(self) }
}
/// Raw transmutation to `u16`.
///
/// This is currently identical to `transmute::<f16, u16>(self)` on all platforms.
@@ -282,6 +616,16 @@ pub fn is_sign_negative(self) -> bool {
///
/// Note that this function is distinct from `as` casting, which attempts to
/// preserve the *numeric* value, and not the bitwise value.
///
/// ```
/// #![feature(f16)]
/// # #[cfg(target_arch = "aarch64")] { // FIXME(f16_F128): rust-lang/rust#123885
///
/// # // FIXME(f16_f128): enable this once const casting works
/// # // assert_ne!((1f16).to_bits(), 1f16 as u128); // to_bits() is not casting!
/// assert_eq!((12.5f16).to_bits(), 0x4a40);
/// # }
/// ```
#[inline]
#[unstable(feature = "f16", issue = "116909")]
#[must_use = "this returns the result of the operation, without modifying the original"]
@@ -321,6 +665,15 @@ pub fn to_bits(self) -> u16 {
///
/// Note that this function is distinct from `as` casting, which attempts to
/// preserve the *numeric* value, and not the bitwise value.
///
/// ```
/// #![feature(f16)]
/// # #[cfg(target_arch = "aarch64")] { // FIXME(f16_F128): rust-lang/rust#123885
///
/// let v = f16::from_bits(0x4a40);
/// assert_eq!(v, 12.5);
/// # }
/// ```
#[inline]
#[must_use]
#[unstable(feature = "f16", issue = "116909")]
@@ -330,4 +683,293 @@ pub fn from_bits(v: u16) -> Self {
// Stability concerns.
unsafe { mem::transmute(v) }
}
/// Return the memory representation of this floating point number as a byte array in
/// big-endian (network) byte order.
///
/// See [`from_bits`](Self::from_bits) for some discussion of the
/// portability of this operation (there are almost no issues).
///
/// # Examples
///
/// ```
/// #![feature(f16)]
///
/// let bytes = 12.5f16.to_be_bytes();
/// assert_eq!(bytes, [0x4a, 0x40]);
/// ```
#[inline]
#[unstable(feature = "f16", issue = "116909")]
#[must_use = "this returns the result of the operation, without modifying the original"]
pub fn to_be_bytes(self) -> [u8; 2] {
self.to_bits().to_be_bytes()
}
/// Return the memory representation of this floating point number as a byte array in
/// little-endian byte order.
///
/// See [`from_bits`](Self::from_bits) for some discussion of the
/// portability of this operation (there are almost no issues).
///
/// # Examples
///
/// ```
/// #![feature(f16)]
///
/// let bytes = 12.5f16.to_le_bytes();
/// assert_eq!(bytes, [0x40, 0x4a]);
/// ```
#[inline]
#[unstable(feature = "f16", issue = "116909")]
#[must_use = "this returns the result of the operation, without modifying the original"]
pub fn to_le_bytes(self) -> [u8; 2] {
self.to_bits().to_le_bytes()
}
/// Return the memory representation of this floating point number as a byte array in
/// native byte order.
///
/// As the target platform's native endianness is used, portable code
/// should use [`to_be_bytes`] or [`to_le_bytes`], as appropriate, instead.
///
/// [`to_be_bytes`]: f16::to_be_bytes
/// [`to_le_bytes`]: f16::to_le_bytes
///
/// See [`from_bits`](Self::from_bits) for some discussion of the
/// portability of this operation (there are almost no issues).
///
/// # Examples
///
/// ```
/// #![feature(f16)]
///
/// let bytes = 12.5f16.to_ne_bytes();
/// assert_eq!(
/// bytes,
/// if cfg!(target_endian = "big") {
/// [0x4a, 0x40]
/// } else {
/// [0x40, 0x4a]
/// }
/// );
/// ```
#[inline]
#[unstable(feature = "f16", issue = "116909")]
#[must_use = "this returns the result of the operation, without modifying the original"]
pub fn to_ne_bytes(self) -> [u8; 2] {
self.to_bits().to_ne_bytes()
}
/// Create a floating point value from its representation as a byte array in big endian.
///
/// See [`from_bits`](Self::from_bits) for some discussion of the
/// portability of this operation (there are almost no issues).
///
/// # Examples
///
/// ```
/// #![feature(f16)]
/// # #[cfg(target_arch = "aarch64")] { // FIXME(f16_F128): rust-lang/rust#123885
///
/// let value = f16::from_be_bytes([0x4a, 0x40]);
/// assert_eq!(value, 12.5);
/// # }
/// ```
#[inline]
#[must_use]
#[unstable(feature = "f16", issue = "116909")]
pub fn from_be_bytes(bytes: [u8; 2]) -> Self {
Self::from_bits(u16::from_be_bytes(bytes))
}
/// Create a floating point value from its representation as a byte array in little endian.
///
/// See [`from_bits`](Self::from_bits) for some discussion of the
/// portability of this operation (there are almost no issues).
///
/// # Examples
///
/// ```
/// #![feature(f16)]
/// # #[cfg(target_arch = "aarch64")] { // FIXME(f16_F128): rust-lang/rust#123885
///
/// let value = f16::from_le_bytes([0x40, 0x4a]);
/// assert_eq!(value, 12.5);
/// # }
/// ```
#[inline]
#[must_use]
#[unstable(feature = "f16", issue = "116909")]
pub fn from_le_bytes(bytes: [u8; 2]) -> Self {
Self::from_bits(u16::from_le_bytes(bytes))
}
/// Create a floating point value from its representation as a byte array in native endian.
///
/// As the target platform's native endianness is used, portable code
/// likely wants to use [`from_be_bytes`] or [`from_le_bytes`], as
/// appropriate instead.
///
/// [`from_be_bytes`]: f16::from_be_bytes
/// [`from_le_bytes`]: f16::from_le_bytes
///
/// See [`from_bits`](Self::from_bits) for some discussion of the
/// portability of this operation (there are almost no issues).
///
/// # Examples
///
/// ```
/// #![feature(f16)]
/// # #[cfg(target_arch = "aarch64")] { // FIXME(f16_F128): rust-lang/rust#123885
///
/// let value = f16::from_ne_bytes(if cfg!(target_endian = "big") {
/// [0x4a, 0x40]
/// } else {
/// [0x40, 0x4a]
/// });
/// assert_eq!(value, 12.5);
/// # }
/// ```
#[inline]
#[must_use]
#[unstable(feature = "f16", issue = "116909")]
pub fn from_ne_bytes(bytes: [u8; 2]) -> Self {
Self::from_bits(u16::from_ne_bytes(bytes))
}
/// Return the ordering between `self` and `other`.
///
/// Unlike the standard partial comparison between floating point numbers,
/// this comparison always produces an ordering in accordance to
/// the `totalOrder` predicate as defined in the IEEE 754 (2008 revision)
/// floating point standard. The values are ordered in the following sequence:
///
/// - negative quiet NaN
/// - negative signaling NaN
/// - negative infinity
/// - negative numbers
/// - negative subnormal numbers
/// - negative zero
/// - positive zero
/// - positive subnormal numbers
/// - positive numbers
/// - positive infinity
/// - positive signaling NaN
/// - positive quiet NaN.
///
/// The ordering established by this function does not always agree with the
/// [`PartialOrd`] and [`PartialEq`] implementations of `f16`. For example,
/// they consider negative and positive zero equal, while `total_cmp`
/// doesn't.
///
/// The interpretation of the signaling NaN bit follows the definition in
/// the IEEE 754 standard, which may not match the interpretation by some of
/// the older, non-conformant (e.g. MIPS) hardware implementations.
///
/// # Example
///
/// ```
/// #![feature(f16)]
///
/// struct GoodBoy {
/// name: &'static str,
/// weight: f16,
/// }
///
/// let mut bois = vec![
/// GoodBoy { name: "Pucci", weight: 0.1 },
/// GoodBoy { name: "Woofer", weight: 99.0 },
/// GoodBoy { name: "Yapper", weight: 10.0 },
/// GoodBoy { name: "Chonk", weight: f16::INFINITY },
/// GoodBoy { name: "Abs. Unit", weight: f16::NAN },
/// GoodBoy { name: "Floaty", weight: -5.0 },
/// ];
///
/// bois.sort_by(|a, b| a.weight.total_cmp(&b.weight));
///
/// // `f16::NAN` could be positive or negative, which will affect the sort order.
/// if f16::NAN.is_sign_negative() {
/// bois.into_iter().map(|b| b.weight)
/// .zip([f16::NAN, -5.0, 0.1, 10.0, 99.0, f16::INFINITY].iter())
/// .for_each(|(a, b)| assert_eq!(a.to_bits(), b.to_bits()))
/// } else {
/// bois.into_iter().map(|b| b.weight)
/// .zip([-5.0, 0.1, 10.0, 99.0, f16::INFINITY, f16::NAN].iter())
/// .for_each(|(a, b)| assert_eq!(a.to_bits(), b.to_bits()))
/// }
/// ```
#[inline]
#[must_use]
#[cfg(not(bootstrap))]
#[unstable(feature = "f16", issue = "116909")]
pub fn total_cmp(&self, other: &Self) -> crate::cmp::Ordering {
let mut left = self.to_bits() as i16;
let mut right = other.to_bits() as i16;
// In case of negatives, flip all the bits except the sign
// to achieve a similar layout as two's complement integers
//
// Why does this work? IEEE 754 floats consist of three fields:
// Sign bit, exponent and mantissa. The set of exponent and mantissa
// fields as a whole have the property that their bitwise order is
// equal to the numeric magnitude where the magnitude is defined.
// The magnitude is not normally defined on NaN values, but
// IEEE 754 totalOrder defines the NaN values also to follow the
// bitwise order. This leads to order explained in the doc comment.
// However, the representation of magnitude is the same for negative
// and positive numbers only the sign bit is different.
// To easily compare the floats as signed integers, we need to
// flip the exponent and mantissa bits in case of negative numbers.
// We effectively convert the numbers to "two's complement" form.
//
// To do the flipping, we construct a mask and XOR against it.
// We branchlessly calculate an "all-ones except for the sign bit"
// mask from negative-signed values: right shifting sign-extends
// the integer, so we "fill" the mask with sign bits, and then
// convert to unsigned to push one more zero bit.
// On positive values, the mask is all zeros, so it's a no-op.
left ^= (((left >> 15) as u16) >> 1) as i16;
right ^= (((right >> 15) as u16) >> 1) as i16;
left.cmp(&right)
}
/// Restrict a value to a certain interval unless it is NaN.
///
/// Returns `max` if `self` is greater than `max`, and `min` if `self` is
/// less than `min`. Otherwise this returns `self`.
///
/// Note that this function returns NaN if the initial value was NaN as
/// well.
///
/// # Panics
///
/// Panics if `min > max`, `min` is NaN, or `max` is NaN.
///
/// # Examples
///
/// ```
/// #![feature(f16)]
/// # #[cfg(target_arch = "aarch64")] { // FIXME(f16_F128): rust-lang/rust#123885
///
/// assert!((-3.0f16).clamp(-2.0, 1.0) == -2.0);
/// assert!((0.0f16).clamp(-2.0, 1.0) == 0.0);
/// assert!((2.0f16).clamp(-2.0, 1.0) == 1.0);
/// assert!((f16::NAN).clamp(-2.0, 1.0).is_nan());
/// # }
/// ```
#[inline]
#[cfg(not(bootstrap))]
#[unstable(feature = "f16", issue = "116909")]
#[must_use = "method returns a new number and does not mutate the original value"]
pub fn clamp(mut self, min: f16, max: f16) -> f16 {
assert!(min <= max, "min > max, or either was NaN. min = {min:?}, max = {max:?}");
if self < min {
self = min;
}
if self > max {
self = max;
}
self
}
}
+30 -25
View File
@@ -490,6 +490,21 @@ impl f32 {
#[stable(feature = "assoc_int_consts", since = "1.43.0")]
pub const NEG_INFINITY: f32 = -1.0_f32 / 0.0_f32;
/// Sign bit
const SIGN_MASK: u32 = 0x8000_0000;
/// Exponent mask
const EXP_MASK: u32 = 0x7f80_0000;
/// Mantissa mask
const MAN_MASK: u32 = 0x007f_ffff;
/// Minimum representable positive value (min subnormal)
const TINY_BITS: u32 = 0x1;
/// Minimum representable negative value (min negative subnormal)
const NEG_TINY_BITS: u32 = Self::TINY_BITS | Self::SIGN_MASK;
/// Returns `true` if this value is NaN.
///
/// ```
@@ -515,7 +530,7 @@ pub const fn is_nan(self) -> bool {
#[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
pub(crate) const fn abs_private(self) -> f32 {
// SAFETY: This transmutation is fine. Probably. For the reasons std is using it.
unsafe { mem::transmute::<u32, f32>(mem::transmute::<f32, u32>(self) & 0x7fff_ffff) }
unsafe { mem::transmute::<u32, f32>(mem::transmute::<f32, u32>(self) & !Self::SIGN_MASK) }
}
/// Returns `true` if this value is positive infinity or negative infinity, and
@@ -682,12 +697,9 @@ pub const fn classify(self) -> FpCategory {
// runtime-deviating logic which may or may not be acceptable.
#[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
const unsafe fn partial_classify(self) -> FpCategory {
const EXP_MASK: u32 = 0x7f800000;
const MAN_MASK: u32 = 0x007fffff;
// SAFETY: The caller is not asking questions for which this will tell lies.
let b = unsafe { mem::transmute::<f32, u32>(self) };
match (b & MAN_MASK, b & EXP_MASK) {
match (b & Self::MAN_MASK, b & Self::EXP_MASK) {
(0, 0) => FpCategory::Zero,
(_, 0) => FpCategory::Subnormal,
_ => FpCategory::Normal,
@@ -699,12 +711,9 @@ pub const fn classify(self) -> FpCategory {
// plus a transmute. We do not live in a just world, but we can make it more so.
#[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
const fn classify_bits(b: u32) -> FpCategory {
const EXP_MASK: u32 = 0x7f800000;
const MAN_MASK: u32 = 0x007fffff;
match (b & MAN_MASK, b & EXP_MASK) {
(0, EXP_MASK) => FpCategory::Infinite,
(_, EXP_MASK) => FpCategory::Nan,
match (b & Self::MAN_MASK, b & Self::EXP_MASK) {
(0, Self::EXP_MASK) => FpCategory::Infinite,
(_, Self::EXP_MASK) => FpCategory::Nan,
(0, 0) => FpCategory::Zero,
(_, 0) => FpCategory::Subnormal,
_ => FpCategory::Normal,
@@ -787,19 +796,17 @@ pub const fn is_sign_negative(self) -> bool {
#[unstable(feature = "float_next_up_down", issue = "91399")]
#[rustc_const_unstable(feature = "float_next_up_down", issue = "91399")]
pub const fn next_up(self) -> Self {
// We must use strictly integer arithmetic to prevent denormals from
// flushing to zero after an arithmetic operation on some platforms.
const TINY_BITS: u32 = 0x1; // Smallest positive f32.
const CLEAR_SIGN_MASK: u32 = 0x7fff_ffff;
// Some targets violate Rust's assumption of IEEE semantics, e.g. by flushing
// denormals to zero. This is in general unsound and unsupported, but here
// we do our best to still produce the correct result on such targets.
let bits = self.to_bits();
if self.is_nan() || bits == Self::INFINITY.to_bits() {
return self;
}
let abs = bits & CLEAR_SIGN_MASK;
let abs = bits & !Self::SIGN_MASK;
let next_bits = if abs == 0 {
TINY_BITS
Self::TINY_BITS
} else if bits == abs {
bits + 1
} else {
@@ -837,19 +844,17 @@ pub const fn next_up(self) -> Self {
#[unstable(feature = "float_next_up_down", issue = "91399")]
#[rustc_const_unstable(feature = "float_next_up_down", issue = "91399")]
pub const fn next_down(self) -> Self {
// We must use strictly integer arithmetic to prevent denormals from
// flushing to zero after an arithmetic operation on some platforms.
const NEG_TINY_BITS: u32 = 0x8000_0001; // Smallest (in magnitude) negative f32.
const CLEAR_SIGN_MASK: u32 = 0x7fff_ffff;
// Some targets violate Rust's assumption of IEEE semantics, e.g. by flushing
// denormals to zero. This is in general unsound and unsupported, but here
// we do our best to still produce the correct result on such targets.
let bits = self.to_bits();
if self.is_nan() || bits == Self::NEG_INFINITY.to_bits() {
return self;
}
let abs = bits & CLEAR_SIGN_MASK;
let abs = bits & !Self::SIGN_MASK;
let next_bits = if abs == 0 {
NEG_TINY_BITS
Self::NEG_TINY_BITS
} else if bits == abs {
bits - 1
} else {
+32 -29
View File
@@ -489,6 +489,21 @@ impl f64 {
#[stable(feature = "assoc_int_consts", since = "1.43.0")]
pub const NEG_INFINITY: f64 = -1.0_f64 / 0.0_f64;
/// Sign bit
const SIGN_MASK: u64 = 0x8000_0000_0000_0000;
/// Exponent mask
const EXP_MASK: u64 = 0x7ff0_0000_0000_0000;
/// Mantissa mask
const MAN_MASK: u64 = 0x000f_ffff_ffff_ffff;
/// Minimum representable positive value (min subnormal)
const TINY_BITS: u64 = 0x1;
/// Minimum representable negative value (min negative subnormal)
const NEG_TINY_BITS: u64 = Self::TINY_BITS | Self::SIGN_MASK;
/// Returns `true` if this value is NaN.
///
/// ```
@@ -514,9 +529,7 @@ pub const fn is_nan(self) -> bool {
#[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
pub(crate) const fn abs_private(self) -> f64 {
// SAFETY: This transmutation is fine. Probably. For the reasons std is using it.
unsafe {
mem::transmute::<u64, f64>(mem::transmute::<f64, u64>(self) & 0x7fff_ffff_ffff_ffff)
}
unsafe { mem::transmute::<u64, f64>(mem::transmute::<f64, u64>(self) & !Self::SIGN_MASK) }
}
/// Returns `true` if this value is positive infinity or negative infinity, and
@@ -673,13 +686,10 @@ pub const fn classify(self) -> FpCategory {
// and some normal floating point numbers truncated from an x87 FPU.
#[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
const unsafe fn partial_classify(self) -> FpCategory {
const EXP_MASK: u64 = 0x7ff0000000000000;
const MAN_MASK: u64 = 0x000fffffffffffff;
// SAFETY: The caller is not asking questions for which this will tell lies.
let b = unsafe { mem::transmute::<f64, u64>(self) };
match (b & MAN_MASK, b & EXP_MASK) {
(0, EXP_MASK) => FpCategory::Infinite,
match (b & Self::MAN_MASK, b & Self::EXP_MASK) {
(0, Self::EXP_MASK) => FpCategory::Infinite,
(0, 0) => FpCategory::Zero,
(_, 0) => FpCategory::Subnormal,
_ => FpCategory::Normal,
@@ -691,12 +701,9 @@ pub const fn classify(self) -> FpCategory {
// plus a transmute. We do not live in a just world, but we can make it more so.
#[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
const fn classify_bits(b: u64) -> FpCategory {
const EXP_MASK: u64 = 0x7ff0000000000000;
const MAN_MASK: u64 = 0x000fffffffffffff;
match (b & MAN_MASK, b & EXP_MASK) {
(0, EXP_MASK) => FpCategory::Infinite,
(_, EXP_MASK) => FpCategory::Nan,
match (b & Self::MAN_MASK, b & Self::EXP_MASK) {
(0, Self::EXP_MASK) => FpCategory::Infinite,
(_, Self::EXP_MASK) => FpCategory::Nan,
(0, 0) => FpCategory::Zero,
(_, 0) => FpCategory::Subnormal,
_ => FpCategory::Normal,
@@ -756,7 +763,7 @@ pub const fn is_sign_negative(self) -> bool {
// IEEE754 says: isSignMinus(x) is true if and only if x has negative sign. isSignMinus
// applies to zeros and NaNs as well.
// SAFETY: This is just transmuting to get the sign bit, it's fine.
unsafe { mem::transmute::<f64, u64>(self) & 0x8000_0000_0000_0000 != 0 }
unsafe { mem::transmute::<f64, u64>(self) & Self::SIGN_MASK != 0 }
}
#[must_use]
@@ -797,19 +804,17 @@ pub fn is_negative(self) -> bool {
#[unstable(feature = "float_next_up_down", issue = "91399")]
#[rustc_const_unstable(feature = "float_next_up_down", issue = "91399")]
pub const fn next_up(self) -> Self {
// We must use strictly integer arithmetic to prevent denormals from
// flushing to zero after an arithmetic operation on some platforms.
const TINY_BITS: u64 = 0x1; // Smallest positive f64.
const CLEAR_SIGN_MASK: u64 = 0x7fff_ffff_ffff_ffff;
// Some targets violate Rust's assumption of IEEE semantics, e.g. by flushing
// denormals to zero. This is in general unsound and unsupported, but here
// we do our best to still produce the correct result on such targets.
let bits = self.to_bits();
if self.is_nan() || bits == Self::INFINITY.to_bits() {
return self;
}
let abs = bits & CLEAR_SIGN_MASK;
let abs = bits & !Self::SIGN_MASK;
let next_bits = if abs == 0 {
TINY_BITS
Self::TINY_BITS
} else if bits == abs {
bits + 1
} else {
@@ -847,19 +852,17 @@ pub const fn next_up(self) -> Self {
#[unstable(feature = "float_next_up_down", issue = "91399")]
#[rustc_const_unstable(feature = "float_next_up_down", issue = "91399")]
pub const fn next_down(self) -> Self {
// We must use strictly integer arithmetic to prevent denormals from
// flushing to zero after an arithmetic operation on some platforms.
const NEG_TINY_BITS: u64 = 0x8000_0000_0000_0001; // Smallest (in magnitude) negative f64.
const CLEAR_SIGN_MASK: u64 = 0x7fff_ffff_ffff_ffff;
// Some targets violate Rust's assumption of IEEE semantics, e.g. by flushing
// denormals to zero. This is in general unsound and unsupported, but here
// we do our best to still produce the correct result on such targets.
let bits = self.to_bits();
if self.is_nan() || bits == Self::NEG_INFINITY.to_bits() {
return self;
}
let abs = bits & CLEAR_SIGN_MASK;
let abs = bits & !Self::SIGN_MASK;
let next_bits = if abs == 0 {
NEG_TINY_BITS
Self::NEG_TINY_BITS
} else if bits == abs {
bits - 1
} else {
@@ -1,4 +1,5 @@
use core::iter::*;
use std::rc::Rc;
#[test]
fn test_iterator_filter_count() {
@@ -50,3 +51,15 @@ fn test_double_ended_filter() {
assert_eq!(it.next().unwrap(), &2);
assert_eq!(it.next_back(), None);
}
#[test]
fn test_next_chunk_does_not_leak() {
let drop_witness: [_; 5] = std::array::from_fn(|_| Rc::new(()));
let v = (0..5).map(|i| drop_witness[i].clone()).collect::<Vec<_>>();
let _ = v.into_iter().filter(|_| false).next_chunk::<1>();
for ref w in drop_witness {
assert_eq!(Rc::strong_count(w), 1);
}
}
+62
View File
@@ -7,6 +7,10 @@ fn main() {
let target_vendor =
env::var("CARGO_CFG_TARGET_VENDOR").expect("CARGO_CFG_TARGET_VENDOR was not set");
let target_env = env::var("CARGO_CFG_TARGET_ENV").expect("CARGO_CFG_TARGET_ENV was not set");
let target_pointer_width: u32 = env::var("CARGO_CFG_TARGET_POINTER_WIDTH")
.expect("CARGO_CFG_TARGET_POINTER_WIDTH was not set")
.parse()
.unwrap();
println!("cargo:rustc-check-cfg=cfg(netbsd10)");
if target_os == "netbsd" && env::var("RUSTC_STD_NETBSD10").is_ok() {
@@ -70,4 +74,62 @@ fn main() {
println!("cargo:rustc-cfg=backtrace_in_libstd");
println!("cargo:rustc-env=STD_ENV_ARCH={}", env::var("CARGO_CFG_TARGET_ARCH").unwrap());
// Emit these on platforms that have no known ABI bugs, LLVM selection bugs, lowering bugs,
// missing symbols, or other problems, to determine when tests get run.
// If more broken platforms are found, please update the tracking issue at
// <https://github.com/rust-lang/rust/issues/116909>
//
// Some of these match arms are redundant; the goal is to separate reasons that the type is
// unreliable, even when multiple reasons might fail the same platform.
println!("cargo:rustc-check-cfg=cfg(reliable_f16)");
println!("cargo:rustc-check-cfg=cfg(reliable_f128)");
let has_reliable_f16 = match (target_arch.as_str(), target_os.as_str()) {
// Selection failure until recent LLVM <https://github.com/llvm/llvm-project/issues/93894>
// FIXME(llvm19): can probably be removed at the version bump
("loongarch64", _) => false,
// Selection failure <https://github.com/llvm/llvm-project/issues/50374>
("s390x", _) => false,
// Unsupported <https://github.com/llvm/llvm-project/issues/94434>
("arm64ec", _) => false,
// MinGW ABI bugs <https://gcc.gnu.org/bugzilla/show_bug.cgi?id=115054>
("x86", "windows") => false,
// x86 has ABI bugs that show up with optimizations. This should be partially fixed with
// the compiler-builtins update. <https://github.com/rust-lang/rust/issues/123885>
("x86" | "x86_64", _) => false,
// Missing `__gnu_h2f_ieee` and `__gnu_f2h_ieee`
("powerpc" | "powerpc64" | "powerpc64le", _) => false,
// Missing `__extendhfsf` and `__truncsfhf`
("riscv32" | "riscv64", _) => false,
// Most OSs are missing `__extendhfsf` and `__truncsfhf`
(_, "linux" | "macos") => true,
// Almost all OSs besides Linux and MacOS are missing symbols until compiler-builtins can
// be updated. <https://github.com/rust-lang/rust/pull/125016> will get some of these, the
// next CB update should get the rest.
_ => false,
};
let has_reliable_f128 = match (target_arch.as_str(), target_os.as_str()) {
// Unsupported <https://github.com/llvm/llvm-project/issues/94434>
("arm64ec", _) => false,
// ABI and precision bugs <https://github.com/rust-lang/rust/issues/125109>
// <https://github.com/rust-lang/rust/issues/125102>
("powerpc" | "powerpc64", _) => false,
// Selection bug <https://github.com/llvm/llvm-project/issues/95471>
("nvptx64", _) => false,
// ABI unsupported <https://github.com/llvm/llvm-project/issues/41838>
("sparc", _) => false,
// 64-bit Linux is about the only platform to have f128 symbols by default
(_, "linux") if target_pointer_width == 64 => true,
// Same as for f16, except MacOS is also missing f128 symbols.
_ => false,
};
if has_reliable_f16 {
println!("cargo:rustc-cfg=reliable_f16");
}
if has_reliable_f128 {
println!("cargo:rustc-cfg=reliable_f128");
}
}
+30
View File
@@ -32,4 +32,34 @@ impl f128 {
pub fn powi(self, n: i32) -> f128 {
unsafe { intrinsics::powif128(self, n) }
}
/// Computes the absolute value of `self`.
///
/// This function always returns the precise result.
///
/// # Examples
///
/// ```
/// #![feature(f128)]
/// # #[cfg(reliable_f128)] { // FIXME(f16_f128): reliable_f128
///
/// let x = 3.5_f128;
/// let y = -3.5_f128;
///
/// assert_eq!(x.abs(), x);
/// assert_eq!(y.abs(), -y);
///
/// assert!(f128::NAN.abs().is_nan());
/// # }
/// ```
#[inline]
#[cfg(not(bootstrap))]
#[rustc_allow_incoherent_impl]
#[unstable(feature = "f128", issue = "116909")]
#[must_use = "method returns a new number and does not mutate the original value"]
pub fn abs(self) -> Self {
// FIXME(f16_f128): replace with `intrinsics::fabsf128` when available
// We don't do this now because LLVM has lowering bugs for f128 math.
Self::from_bits(self.to_bits() & !(1 << 127))
}
}
+511 -16
View File
@@ -1,29 +1,31 @@
#![allow(dead_code)] // FIXME(f16_f128): remove once constants are used
#![cfg(not(bootstrap))]
// FIXME(f16_f128): only tested on platforms that have symbols and aren't buggy
#![cfg(reliable_f128)]
use crate::f128::consts;
use crate::num::*;
/// Smallest number
const TINY_BITS: u128 = 0x1;
/// Next smallest number
const TINY_UP_BITS: u128 = 0x2;
/// Exponent = 0b11...10, Sifnificand 0b1111..10. Min val > 0
const MAX_DOWN_BITS: u128 = 0x7ffeffffffffffffffffffffffffffff;
const MAX_DOWN_BITS: u128 = 0x7ffefffffffffffffffffffffffffffe;
/// Zeroed exponent, full significant
const LARGEST_SUBNORMAL_BITS: u128 = 0x0000ffffffffffffffffffffffffffff;
/// Exponent = 0b1, zeroed significand
const SMALLEST_NORMAL_BITS: u128 = 0x00010000000000000000000000000000;
/// First pattern over the mantissa
const NAN_MASK1: u128 = 0x0000aaaaaaaaaaaaaaaaaaaaaaaaaaaa;
/// Second pattern over the mantissa
const NAN_MASK2: u128 = 0x00005555555555555555555555555555;
/// Compare by value
#[allow(unused_macros)]
macro_rules! assert_f128_eq {
($a:expr, $b:expr) => {
let (l, r): (&f128, &f128) = (&$a, &$b);
assert_eq!(*l, *r, "\na: {:#0130x}\nb: {:#0130x}", l.to_bits(), r.to_bits())
};
}
/// Compare by representation
#[allow(unused_macros)]
macro_rules! assert_f128_biteq {
@@ -31,10 +33,503 @@ macro_rules! assert_f128_biteq {
let (l, r): (&f128, &f128) = (&$a, &$b);
let lb = l.to_bits();
let rb = r.to_bits();
assert_eq!(
lb, rb,
"float {:?} is not bitequal to {:?}.\na: {:#0130x}\nb: {:#0130x}",
*l, *r, lb, rb
);
assert_eq!(lb, rb, "float {l:?} is not bitequal to {r:?}.\na: {lb:#034x}\nb: {rb:#034x}");
};
}
#[test]
fn test_num_f128() {
test_num(10f128, 2f128);
}
// FIXME(f16_f128): add min and max tests when available
#[test]
fn test_nan() {
let nan: f128 = f128::NAN;
assert!(nan.is_nan());
assert!(!nan.is_infinite());
assert!(!nan.is_finite());
assert!(nan.is_sign_positive());
assert!(!nan.is_sign_negative());
// FIXME(f16_f128): classify
// assert!(!nan.is_normal());
// assert_eq!(Fp::Nan, nan.classify());
}
#[test]
fn test_infinity() {
let inf: f128 = f128::INFINITY;
assert!(inf.is_infinite());
assert!(!inf.is_finite());
assert!(inf.is_sign_positive());
assert!(!inf.is_sign_negative());
assert!(!inf.is_nan());
// FIXME(f16_f128): classify
// assert!(!inf.is_normal());
// assert_eq!(Fp::Infinite, inf.classify());
}
#[test]
fn test_neg_infinity() {
let neg_inf: f128 = f128::NEG_INFINITY;
assert!(neg_inf.is_infinite());
assert!(!neg_inf.is_finite());
assert!(!neg_inf.is_sign_positive());
assert!(neg_inf.is_sign_negative());
assert!(!neg_inf.is_nan());
// FIXME(f16_f128): classify
// assert!(!neg_inf.is_normal());
// assert_eq!(Fp::Infinite, neg_inf.classify());
}
#[test]
fn test_zero() {
let zero: f128 = 0.0f128;
assert_eq!(0.0, zero);
assert!(!zero.is_infinite());
assert!(zero.is_finite());
assert!(zero.is_sign_positive());
assert!(!zero.is_sign_negative());
assert!(!zero.is_nan());
// FIXME(f16_f128): classify
// assert!(!zero.is_normal());
// assert_eq!(Fp::Zero, zero.classify());
}
#[test]
fn test_neg_zero() {
let neg_zero: f128 = -0.0;
assert_eq!(0.0, neg_zero);
assert!(!neg_zero.is_infinite());
assert!(neg_zero.is_finite());
assert!(!neg_zero.is_sign_positive());
assert!(neg_zero.is_sign_negative());
assert!(!neg_zero.is_nan());
// FIXME(f16_f128): classify
// assert!(!neg_zero.is_normal());
// assert_eq!(Fp::Zero, neg_zero.classify());
}
#[test]
fn test_one() {
let one: f128 = 1.0f128;
assert_eq!(1.0, one);
assert!(!one.is_infinite());
assert!(one.is_finite());
assert!(one.is_sign_positive());
assert!(!one.is_sign_negative());
assert!(!one.is_nan());
// FIXME(f16_f128): classify
// assert!(one.is_normal());
// assert_eq!(Fp::Normal, one.classify());
}
#[test]
fn test_is_nan() {
let nan: f128 = f128::NAN;
let inf: f128 = f128::INFINITY;
let neg_inf: f128 = f128::NEG_INFINITY;
assert!(nan.is_nan());
assert!(!0.0f128.is_nan());
assert!(!5.3f128.is_nan());
assert!(!(-10.732f128).is_nan());
assert!(!inf.is_nan());
assert!(!neg_inf.is_nan());
}
#[test]
fn test_is_infinite() {
let nan: f128 = f128::NAN;
let inf: f128 = f128::INFINITY;
let neg_inf: f128 = f128::NEG_INFINITY;
assert!(!nan.is_infinite());
assert!(inf.is_infinite());
assert!(neg_inf.is_infinite());
assert!(!0.0f128.is_infinite());
assert!(!42.8f128.is_infinite());
assert!(!(-109.2f128).is_infinite());
}
#[test]
fn test_is_finite() {
let nan: f128 = f128::NAN;
let inf: f128 = f128::INFINITY;
let neg_inf: f128 = f128::NEG_INFINITY;
assert!(!nan.is_finite());
assert!(!inf.is_finite());
assert!(!neg_inf.is_finite());
assert!(0.0f128.is_finite());
assert!(42.8f128.is_finite());
assert!((-109.2f128).is_finite());
}
// FIXME(f16_f128): add `test_is_normal` and `test_classify` when classify is working
// FIXME(f16_f128): add missing math functions when available
#[test]
fn test_abs() {
assert_eq!(f128::INFINITY.abs(), f128::INFINITY);
assert_eq!(1f128.abs(), 1f128);
assert_eq!(0f128.abs(), 0f128);
assert_eq!((-0f128).abs(), 0f128);
assert_eq!((-1f128).abs(), 1f128);
assert_eq!(f128::NEG_INFINITY.abs(), f128::INFINITY);
assert_eq!((1f128 / f128::NEG_INFINITY).abs(), 0f128);
assert!(f128::NAN.abs().is_nan());
}
#[test]
fn test_is_sign_positive() {
assert!(f128::INFINITY.is_sign_positive());
assert!(1f128.is_sign_positive());
assert!(0f128.is_sign_positive());
assert!(!(-0f128).is_sign_positive());
assert!(!(-1f128).is_sign_positive());
assert!(!f128::NEG_INFINITY.is_sign_positive());
assert!(!(1f128 / f128::NEG_INFINITY).is_sign_positive());
assert!(f128::NAN.is_sign_positive());
assert!(!(-f128::NAN).is_sign_positive());
}
#[test]
fn test_is_sign_negative() {
assert!(!f128::INFINITY.is_sign_negative());
assert!(!1f128.is_sign_negative());
assert!(!0f128.is_sign_negative());
assert!((-0f128).is_sign_negative());
assert!((-1f128).is_sign_negative());
assert!(f128::NEG_INFINITY.is_sign_negative());
assert!((1f128 / f128::NEG_INFINITY).is_sign_negative());
assert!(!f128::NAN.is_sign_negative());
assert!((-f128::NAN).is_sign_negative());
}
#[test]
fn test_next_up() {
let tiny = f128::from_bits(TINY_BITS);
let tiny_up = f128::from_bits(TINY_UP_BITS);
let max_down = f128::from_bits(MAX_DOWN_BITS);
let largest_subnormal = f128::from_bits(LARGEST_SUBNORMAL_BITS);
let smallest_normal = f128::from_bits(SMALLEST_NORMAL_BITS);
assert_f128_biteq!(f128::NEG_INFINITY.next_up(), f128::MIN);
assert_f128_biteq!(f128::MIN.next_up(), -max_down);
assert_f128_biteq!((-1.0 - f128::EPSILON).next_up(), -1.0);
assert_f128_biteq!((-smallest_normal).next_up(), -largest_subnormal);
assert_f128_biteq!((-tiny_up).next_up(), -tiny);
assert_f128_biteq!((-tiny).next_up(), -0.0f128);
assert_f128_biteq!((-0.0f128).next_up(), tiny);
assert_f128_biteq!(0.0f128.next_up(), tiny);
assert_f128_biteq!(tiny.next_up(), tiny_up);
assert_f128_biteq!(largest_subnormal.next_up(), smallest_normal);
assert_f128_biteq!(1.0f128.next_up(), 1.0 + f128::EPSILON);
assert_f128_biteq!(f128::MAX.next_up(), f128::INFINITY);
assert_f128_biteq!(f128::INFINITY.next_up(), f128::INFINITY);
// Check that NaNs roundtrip.
let nan0 = f128::NAN;
let nan1 = f128::from_bits(f128::NAN.to_bits() ^ 0x002a_aaaa);
let nan2 = f128::from_bits(f128::NAN.to_bits() ^ 0x0055_5555);
assert_f128_biteq!(nan0.next_up(), nan0);
assert_f128_biteq!(nan1.next_up(), nan1);
assert_f128_biteq!(nan2.next_up(), nan2);
}
#[test]
fn test_next_down() {
let tiny = f128::from_bits(TINY_BITS);
let tiny_up = f128::from_bits(TINY_UP_BITS);
let max_down = f128::from_bits(MAX_DOWN_BITS);
let largest_subnormal = f128::from_bits(LARGEST_SUBNORMAL_BITS);
let smallest_normal = f128::from_bits(SMALLEST_NORMAL_BITS);
assert_f128_biteq!(f128::NEG_INFINITY.next_down(), f128::NEG_INFINITY);
assert_f128_biteq!(f128::MIN.next_down(), f128::NEG_INFINITY);
assert_f128_biteq!((-max_down).next_down(), f128::MIN);
assert_f128_biteq!((-1.0f128).next_down(), -1.0 - f128::EPSILON);
assert_f128_biteq!((-largest_subnormal).next_down(), -smallest_normal);
assert_f128_biteq!((-tiny).next_down(), -tiny_up);
assert_f128_biteq!((-0.0f128).next_down(), -tiny);
assert_f128_biteq!((0.0f128).next_down(), -tiny);
assert_f128_biteq!(tiny.next_down(), 0.0f128);
assert_f128_biteq!(tiny_up.next_down(), tiny);
assert_f128_biteq!(smallest_normal.next_down(), largest_subnormal);
assert_f128_biteq!((1.0 + f128::EPSILON).next_down(), 1.0f128);
assert_f128_biteq!(f128::MAX.next_down(), max_down);
assert_f128_biteq!(f128::INFINITY.next_down(), f128::MAX);
// Check that NaNs roundtrip.
let nan0 = f128::NAN;
let nan1 = f128::from_bits(f128::NAN.to_bits() ^ 0x002a_aaaa);
let nan2 = f128::from_bits(f128::NAN.to_bits() ^ 0x0055_5555);
assert_f128_biteq!(nan0.next_down(), nan0);
assert_f128_biteq!(nan1.next_down(), nan1);
assert_f128_biteq!(nan2.next_down(), nan2);
}
#[test]
fn test_recip() {
let nan: f128 = f128::NAN;
let inf: f128 = f128::INFINITY;
let neg_inf: f128 = f128::NEG_INFINITY;
assert_eq!(1.0f128.recip(), 1.0);
assert_eq!(2.0f128.recip(), 0.5);
assert_eq!((-0.4f128).recip(), -2.5);
assert_eq!(0.0f128.recip(), inf);
assert!(nan.recip().is_nan());
assert_eq!(inf.recip(), 0.0);
assert_eq!(neg_inf.recip(), 0.0);
}
#[test]
fn test_to_degrees() {
let pi: f128 = consts::PI;
let nan: f128 = f128::NAN;
let inf: f128 = f128::INFINITY;
let neg_inf: f128 = f128::NEG_INFINITY;
assert_eq!(0.0f128.to_degrees(), 0.0);
assert_approx_eq!((-5.8f128).to_degrees(), -332.315521);
assert_eq!(pi.to_degrees(), 180.0);
assert!(nan.to_degrees().is_nan());
assert_eq!(inf.to_degrees(), inf);
assert_eq!(neg_inf.to_degrees(), neg_inf);
assert_eq!(1_f128.to_degrees(), 57.2957795130823208767981548141051703);
}
#[test]
fn test_to_radians() {
let pi: f128 = consts::PI;
let nan: f128 = f128::NAN;
let inf: f128 = f128::INFINITY;
let neg_inf: f128 = f128::NEG_INFINITY;
assert_eq!(0.0f128.to_radians(), 0.0);
assert_approx_eq!(154.6f128.to_radians(), 2.698279);
assert_approx_eq!((-332.31f128).to_radians(), -5.799903);
// check approx rather than exact because round trip for pi doesn't fall on an exactly
// representable value (unlike `f32` and `f64`).
assert_approx_eq!(180.0f128.to_radians(), pi);
assert!(nan.to_radians().is_nan());
assert_eq!(inf.to_radians(), inf);
assert_eq!(neg_inf.to_radians(), neg_inf);
}
#[test]
fn test_real_consts() {
// FIXME(f16_f128): add math tests when available
use super::consts;
let pi: f128 = consts::PI;
let frac_pi_2: f128 = consts::FRAC_PI_2;
let frac_pi_3: f128 = consts::FRAC_PI_3;
let frac_pi_4: f128 = consts::FRAC_PI_4;
let frac_pi_6: f128 = consts::FRAC_PI_6;
let frac_pi_8: f128 = consts::FRAC_PI_8;
let frac_1_pi: f128 = consts::FRAC_1_PI;
let frac_2_pi: f128 = consts::FRAC_2_PI;
// let frac_2_sqrtpi: f128 = consts::FRAC_2_SQRT_PI;
// let sqrt2: f128 = consts::SQRT_2;
// let frac_1_sqrt2: f128 = consts::FRAC_1_SQRT_2;
// let e: f128 = consts::E;
// let log2_e: f128 = consts::LOG2_E;
// let log10_e: f128 = consts::LOG10_E;
// let ln_2: f128 = consts::LN_2;
// let ln_10: f128 = consts::LN_10;
assert_approx_eq!(frac_pi_2, pi / 2f128);
assert_approx_eq!(frac_pi_3, pi / 3f128);
assert_approx_eq!(frac_pi_4, pi / 4f128);
assert_approx_eq!(frac_pi_6, pi / 6f128);
assert_approx_eq!(frac_pi_8, pi / 8f128);
assert_approx_eq!(frac_1_pi, 1f128 / pi);
assert_approx_eq!(frac_2_pi, 2f128 / pi);
// assert_approx_eq!(frac_2_sqrtpi, 2f128 / pi.sqrt());
// assert_approx_eq!(sqrt2, 2f128.sqrt());
// assert_approx_eq!(frac_1_sqrt2, 1f128 / 2f128.sqrt());
// assert_approx_eq!(log2_e, e.log2());
// assert_approx_eq!(log10_e, e.log10());
// assert_approx_eq!(ln_2, 2f128.ln());
// assert_approx_eq!(ln_10, 10f128.ln());
}
#[test]
fn test_float_bits_conv() {
assert_eq!((1f128).to_bits(), 0x3fff0000000000000000000000000000);
assert_eq!((12.5f128).to_bits(), 0x40029000000000000000000000000000);
assert_eq!((1337f128).to_bits(), 0x40094e40000000000000000000000000);
assert_eq!((-14.25f128).to_bits(), 0xc002c800000000000000000000000000);
assert_approx_eq!(f128::from_bits(0x3fff0000000000000000000000000000), 1.0);
assert_approx_eq!(f128::from_bits(0x40029000000000000000000000000000), 12.5);
assert_approx_eq!(f128::from_bits(0x40094e40000000000000000000000000), 1337.0);
assert_approx_eq!(f128::from_bits(0xc002c800000000000000000000000000), -14.25);
// Check that NaNs roundtrip their bits regardless of signaling-ness
// 0xA is 0b1010; 0x5 is 0b0101 -- so these two together clobbers all the mantissa bits
let masked_nan1 = f128::NAN.to_bits() ^ NAN_MASK1;
let masked_nan2 = f128::NAN.to_bits() ^ NAN_MASK2;
assert!(f128::from_bits(masked_nan1).is_nan());
assert!(f128::from_bits(masked_nan2).is_nan());
assert_eq!(f128::from_bits(masked_nan1).to_bits(), masked_nan1);
assert_eq!(f128::from_bits(masked_nan2).to_bits(), masked_nan2);
}
#[test]
#[should_panic]
fn test_clamp_min_greater_than_max() {
let _ = 1.0f128.clamp(3.0, 1.0);
}
#[test]
#[should_panic]
fn test_clamp_min_is_nan() {
let _ = 1.0f128.clamp(f128::NAN, 1.0);
}
#[test]
#[should_panic]
fn test_clamp_max_is_nan() {
let _ = 1.0f128.clamp(3.0, f128::NAN);
}
#[test]
fn test_total_cmp() {
use core::cmp::Ordering;
fn quiet_bit_mask() -> u128 {
1 << (f128::MANTISSA_DIGITS - 2)
}
// FIXME(f16_f128): test subnormals when powf is available
// fn min_subnorm() -> f128 {
// f128::MIN_POSITIVE / f128::powf(2.0, f128::MANTISSA_DIGITS as f128 - 1.0)
// }
// fn max_subnorm() -> f128 {
// f128::MIN_POSITIVE - min_subnorm()
// }
fn q_nan() -> f128 {
f128::from_bits(f128::NAN.to_bits() | quiet_bit_mask())
}
fn s_nan() -> f128 {
f128::from_bits((f128::NAN.to_bits() & !quiet_bit_mask()) + 42)
}
assert_eq!(Ordering::Equal, (-q_nan()).total_cmp(&-q_nan()));
assert_eq!(Ordering::Equal, (-s_nan()).total_cmp(&-s_nan()));
assert_eq!(Ordering::Equal, (-f128::INFINITY).total_cmp(&-f128::INFINITY));
assert_eq!(Ordering::Equal, (-f128::MAX).total_cmp(&-f128::MAX));
assert_eq!(Ordering::Equal, (-2.5_f128).total_cmp(&-2.5));
assert_eq!(Ordering::Equal, (-1.0_f128).total_cmp(&-1.0));
assert_eq!(Ordering::Equal, (-1.5_f128).total_cmp(&-1.5));
assert_eq!(Ordering::Equal, (-0.5_f128).total_cmp(&-0.5));
assert_eq!(Ordering::Equal, (-f128::MIN_POSITIVE).total_cmp(&-f128::MIN_POSITIVE));
// assert_eq!(Ordering::Equal, (-max_subnorm()).total_cmp(&-max_subnorm()));
// assert_eq!(Ordering::Equal, (-min_subnorm()).total_cmp(&-min_subnorm()));
assert_eq!(Ordering::Equal, (-0.0_f128).total_cmp(&-0.0));
assert_eq!(Ordering::Equal, 0.0_f128.total_cmp(&0.0));
// assert_eq!(Ordering::Equal, min_subnorm().total_cmp(&min_subnorm()));
// assert_eq!(Ordering::Equal, max_subnorm().total_cmp(&max_subnorm()));
assert_eq!(Ordering::Equal, f128::MIN_POSITIVE.total_cmp(&f128::MIN_POSITIVE));
assert_eq!(Ordering::Equal, 0.5_f128.total_cmp(&0.5));
assert_eq!(Ordering::Equal, 1.0_f128.total_cmp(&1.0));
assert_eq!(Ordering::Equal, 1.5_f128.total_cmp(&1.5));
assert_eq!(Ordering::Equal, 2.5_f128.total_cmp(&2.5));
assert_eq!(Ordering::Equal, f128::MAX.total_cmp(&f128::MAX));
assert_eq!(Ordering::Equal, f128::INFINITY.total_cmp(&f128::INFINITY));
assert_eq!(Ordering::Equal, s_nan().total_cmp(&s_nan()));
assert_eq!(Ordering::Equal, q_nan().total_cmp(&q_nan()));
assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-s_nan()));
assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-f128::INFINITY));
assert_eq!(Ordering::Less, (-f128::INFINITY).total_cmp(&-f128::MAX));
assert_eq!(Ordering::Less, (-f128::MAX).total_cmp(&-2.5));
assert_eq!(Ordering::Less, (-2.5_f128).total_cmp(&-1.5));
assert_eq!(Ordering::Less, (-1.5_f128).total_cmp(&-1.0));
assert_eq!(Ordering::Less, (-1.0_f128).total_cmp(&-0.5));
assert_eq!(Ordering::Less, (-0.5_f128).total_cmp(&-f128::MIN_POSITIVE));
// assert_eq!(Ordering::Less, (-f128::MIN_POSITIVE).total_cmp(&-max_subnorm()));
// assert_eq!(Ordering::Less, (-max_subnorm()).total_cmp(&-min_subnorm()));
// assert_eq!(Ordering::Less, (-min_subnorm()).total_cmp(&-0.0));
assert_eq!(Ordering::Less, (-0.0_f128).total_cmp(&0.0));
// assert_eq!(Ordering::Less, 0.0_f128.total_cmp(&min_subnorm()));
// assert_eq!(Ordering::Less, min_subnorm().total_cmp(&max_subnorm()));
// assert_eq!(Ordering::Less, max_subnorm().total_cmp(&f128::MIN_POSITIVE));
assert_eq!(Ordering::Less, f128::MIN_POSITIVE.total_cmp(&0.5));
assert_eq!(Ordering::Less, 0.5_f128.total_cmp(&1.0));
assert_eq!(Ordering::Less, 1.0_f128.total_cmp(&1.5));
assert_eq!(Ordering::Less, 1.5_f128.total_cmp(&2.5));
assert_eq!(Ordering::Less, 2.5_f128.total_cmp(&f128::MAX));
assert_eq!(Ordering::Less, f128::MAX.total_cmp(&f128::INFINITY));
assert_eq!(Ordering::Less, f128::INFINITY.total_cmp(&s_nan()));
assert_eq!(Ordering::Less, s_nan().total_cmp(&q_nan()));
assert_eq!(Ordering::Greater, (-s_nan()).total_cmp(&-q_nan()));
assert_eq!(Ordering::Greater, (-f128::INFINITY).total_cmp(&-s_nan()));
assert_eq!(Ordering::Greater, (-f128::MAX).total_cmp(&-f128::INFINITY));
assert_eq!(Ordering::Greater, (-2.5_f128).total_cmp(&-f128::MAX));
assert_eq!(Ordering::Greater, (-1.5_f128).total_cmp(&-2.5));
assert_eq!(Ordering::Greater, (-1.0_f128).total_cmp(&-1.5));
assert_eq!(Ordering::Greater, (-0.5_f128).total_cmp(&-1.0));
assert_eq!(Ordering::Greater, (-f128::MIN_POSITIVE).total_cmp(&-0.5));
// assert_eq!(Ordering::Greater, (-max_subnorm()).total_cmp(&-f128::MIN_POSITIVE));
// assert_eq!(Ordering::Greater, (-min_subnorm()).total_cmp(&-max_subnorm()));
// assert_eq!(Ordering::Greater, (-0.0_f128).total_cmp(&-min_subnorm()));
assert_eq!(Ordering::Greater, 0.0_f128.total_cmp(&-0.0));
// assert_eq!(Ordering::Greater, min_subnorm().total_cmp(&0.0));
// assert_eq!(Ordering::Greater, max_subnorm().total_cmp(&min_subnorm()));
// assert_eq!(Ordering::Greater, f128::MIN_POSITIVE.total_cmp(&max_subnorm()));
assert_eq!(Ordering::Greater, 0.5_f128.total_cmp(&f128::MIN_POSITIVE));
assert_eq!(Ordering::Greater, 1.0_f128.total_cmp(&0.5));
assert_eq!(Ordering::Greater, 1.5_f128.total_cmp(&1.0));
assert_eq!(Ordering::Greater, 2.5_f128.total_cmp(&1.5));
assert_eq!(Ordering::Greater, f128::MAX.total_cmp(&2.5));
assert_eq!(Ordering::Greater, f128::INFINITY.total_cmp(&f128::MAX));
assert_eq!(Ordering::Greater, s_nan().total_cmp(&f128::INFINITY));
assert_eq!(Ordering::Greater, q_nan().total_cmp(&s_nan()));
assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-s_nan()));
assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-f128::INFINITY));
assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-f128::MAX));
assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-2.5));
assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-1.5));
assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-1.0));
assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-0.5));
assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-f128::MIN_POSITIVE));
// assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-max_subnorm()));
// assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-min_subnorm()));
assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-0.0));
assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&0.0));
// assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&min_subnorm()));
// assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&max_subnorm()));
assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&f128::MIN_POSITIVE));
assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&0.5));
assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&1.0));
assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&1.5));
assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&2.5));
assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&f128::MAX));
assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&f128::INFINITY));
assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&s_nan()));
assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-f128::INFINITY));
assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-f128::MAX));
assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-2.5));
assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-1.5));
assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-1.0));
assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-0.5));
assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-f128::MIN_POSITIVE));
// assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-max_subnorm()));
// assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-min_subnorm()));
assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-0.0));
assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&0.0));
// assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&min_subnorm()));
// assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&max_subnorm()));
assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&f128::MIN_POSITIVE));
assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&0.5));
assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&1.0));
assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&1.5));
assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&2.5));
assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&f128::MAX));
assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&f128::INFINITY));
assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&s_nan()));
}
+29
View File
@@ -32,4 +32,33 @@ impl f16 {
pub fn powi(self, n: i32) -> f16 {
unsafe { intrinsics::powif16(self, n) }
}
/// Computes the absolute value of `self`.
///
/// This function always returns the precise result.
///
/// # Examples
///
/// ```
/// #![feature(f16)]
/// # #[cfg(reliable_f16)] {
///
/// let x = 3.5_f16;
/// let y = -3.5_f16;
///
/// assert_eq!(x.abs(), x);
/// assert_eq!(y.abs(), -y);
///
/// assert!(f16::NAN.abs().is_nan());
/// # }
/// ```
#[inline]
#[cfg(not(bootstrap))]
#[rustc_allow_incoherent_impl]
#[unstable(feature = "f16", issue = "116909")]
#[must_use = "method returns a new number and does not mutate the original value"]
pub fn abs(self) -> Self {
// FIXME(f16_f128): replace with `intrinsics::fabsf16` when available
Self::from_bits(self.to_bits() & !(1 << 15))
}
}
+509 -17
View File
@@ -1,35 +1,37 @@
#![allow(dead_code)] // FIXME(f16_f128): remove once constants are used
#![cfg(not(bootstrap))]
// FIXME(f16_f128): only tested on platforms that have symbols and aren't buggy
#![cfg(reliable_f16)]
use crate::f16::consts;
use crate::num::*;
// We run out of precision pretty quickly with f16
const F16_APPROX_L1: f16 = 0.001;
// const F16_APPROX_L1: f16 = 0.001;
const F16_APPROX_L2: f16 = 0.01;
const F16_APPROX_L3: f16 = 0.1;
// const F16_APPROX_L3: f16 = 0.1;
const F16_APPROX_L4: f16 = 0.5;
/// Smallest number
const TINY_BITS: u16 = 0x1;
/// Next smallest number
const TINY_UP_BITS: u16 = 0x2;
/// Exponent = 0b11...10, Sifnificand 0b1111..10. Min val > 0
const MAX_DOWN_BITS: u16 = 0x7bfe;
/// Zeroed exponent, full significant
const LARGEST_SUBNORMAL_BITS: u16 = 0x03ff;
/// Exponent = 0b1, zeroed significand
const SMALLEST_NORMAL_BITS: u16 = 0x0400;
/// First pattern over the mantissa
const NAN_MASK1: u16 = 0x02aa;
/// Second pattern over the mantissa
const NAN_MASK2: u16 = 0x0155;
/// Compare by value
#[allow(unused_macros)]
macro_rules! assert_f16_eq {
($a:expr, $b:expr) => {
let (l, r): (&f16, &f16) = (&$a, &$b);
assert_eq!(*l, *r, "\na: {:#018x}\nb: {:#018x}", l.to_bits(), r.to_bits())
};
}
/// Compare by representation
#[allow(unused_macros)]
macro_rules! assert_f16_biteq {
@@ -37,10 +39,500 @@ macro_rules! assert_f16_biteq {
let (l, r): (&f16, &f16) = (&$a, &$b);
let lb = l.to_bits();
let rb = r.to_bits();
assert_eq!(
lb, rb,
"float {:?} is not bitequal to {:?}.\na: {:#018x}\nb: {:#018x}",
*l, *r, lb, rb
);
assert_eq!(lb, rb, "float {l:?} ({lb:#04x}) is not bitequal to {r:?} ({rb:#04x})");
};
}
#[test]
fn test_num_f16() {
test_num(10f16, 2f16);
}
// FIXME(f16_f128): add min and max tests when available
#[test]
fn test_nan() {
let nan: f16 = f16::NAN;
assert!(nan.is_nan());
assert!(!nan.is_infinite());
assert!(!nan.is_finite());
assert!(nan.is_sign_positive());
assert!(!nan.is_sign_negative());
// FIXME(f16_f128): classify
// assert!(!nan.is_normal());
// assert_eq!(Fp::Nan, nan.classify());
}
#[test]
fn test_infinity() {
let inf: f16 = f16::INFINITY;
assert!(inf.is_infinite());
assert!(!inf.is_finite());
assert!(inf.is_sign_positive());
assert!(!inf.is_sign_negative());
assert!(!inf.is_nan());
// FIXME(f16_f128): classify
// assert!(!inf.is_normal());
// assert_eq!(Fp::Infinite, inf.classify());
}
#[test]
fn test_neg_infinity() {
let neg_inf: f16 = f16::NEG_INFINITY;
assert!(neg_inf.is_infinite());
assert!(!neg_inf.is_finite());
assert!(!neg_inf.is_sign_positive());
assert!(neg_inf.is_sign_negative());
assert!(!neg_inf.is_nan());
// FIXME(f16_f128): classify
// assert!(!neg_inf.is_normal());
// assert_eq!(Fp::Infinite, neg_inf.classify());
}
#[test]
fn test_zero() {
let zero: f16 = 0.0f16;
assert_eq!(0.0, zero);
assert!(!zero.is_infinite());
assert!(zero.is_finite());
assert!(zero.is_sign_positive());
assert!(!zero.is_sign_negative());
assert!(!zero.is_nan());
// FIXME(f16_f128): classify
// assert!(!zero.is_normal());
// assert_eq!(Fp::Zero, zero.classify());
}
#[test]
fn test_neg_zero() {
let neg_zero: f16 = -0.0;
assert_eq!(0.0, neg_zero);
assert!(!neg_zero.is_infinite());
assert!(neg_zero.is_finite());
assert!(!neg_zero.is_sign_positive());
assert!(neg_zero.is_sign_negative());
assert!(!neg_zero.is_nan());
// FIXME(f16_f128): classify
// assert!(!neg_zero.is_normal());
// assert_eq!(Fp::Zero, neg_zero.classify());
}
#[test]
fn test_one() {
let one: f16 = 1.0f16;
assert_eq!(1.0, one);
assert!(!one.is_infinite());
assert!(one.is_finite());
assert!(one.is_sign_positive());
assert!(!one.is_sign_negative());
assert!(!one.is_nan());
// FIXME(f16_f128): classify
// assert!(one.is_normal());
// assert_eq!(Fp::Normal, one.classify());
}
#[test]
fn test_is_nan() {
let nan: f16 = f16::NAN;
let inf: f16 = f16::INFINITY;
let neg_inf: f16 = f16::NEG_INFINITY;
assert!(nan.is_nan());
assert!(!0.0f16.is_nan());
assert!(!5.3f16.is_nan());
assert!(!(-10.732f16).is_nan());
assert!(!inf.is_nan());
assert!(!neg_inf.is_nan());
}
#[test]
fn test_is_infinite() {
let nan: f16 = f16::NAN;
let inf: f16 = f16::INFINITY;
let neg_inf: f16 = f16::NEG_INFINITY;
assert!(!nan.is_infinite());
assert!(inf.is_infinite());
assert!(neg_inf.is_infinite());
assert!(!0.0f16.is_infinite());
assert!(!42.8f16.is_infinite());
assert!(!(-109.2f16).is_infinite());
}
#[test]
fn test_is_finite() {
let nan: f16 = f16::NAN;
let inf: f16 = f16::INFINITY;
let neg_inf: f16 = f16::NEG_INFINITY;
assert!(!nan.is_finite());
assert!(!inf.is_finite());
assert!(!neg_inf.is_finite());
assert!(0.0f16.is_finite());
assert!(42.8f16.is_finite());
assert!((-109.2f16).is_finite());
}
// FIXME(f16_f128): add `test_is_normal` and `test_classify` when classify is working
// FIXME(f16_f128): add missing math functions when available
#[test]
fn test_abs() {
assert_eq!(f16::INFINITY.abs(), f16::INFINITY);
assert_eq!(1f16.abs(), 1f16);
assert_eq!(0f16.abs(), 0f16);
assert_eq!((-0f16).abs(), 0f16);
assert_eq!((-1f16).abs(), 1f16);
assert_eq!(f16::NEG_INFINITY.abs(), f16::INFINITY);
assert_eq!((1f16 / f16::NEG_INFINITY).abs(), 0f16);
assert!(f16::NAN.abs().is_nan());
}
#[test]
fn test_is_sign_positive() {
assert!(f16::INFINITY.is_sign_positive());
assert!(1f16.is_sign_positive());
assert!(0f16.is_sign_positive());
assert!(!(-0f16).is_sign_positive());
assert!(!(-1f16).is_sign_positive());
assert!(!f16::NEG_INFINITY.is_sign_positive());
assert!(!(1f16 / f16::NEG_INFINITY).is_sign_positive());
assert!(f16::NAN.is_sign_positive());
assert!(!(-f16::NAN).is_sign_positive());
}
#[test]
fn test_is_sign_negative() {
assert!(!f16::INFINITY.is_sign_negative());
assert!(!1f16.is_sign_negative());
assert!(!0f16.is_sign_negative());
assert!((-0f16).is_sign_negative());
assert!((-1f16).is_sign_negative());
assert!(f16::NEG_INFINITY.is_sign_negative());
assert!((1f16 / f16::NEG_INFINITY).is_sign_negative());
assert!(!f16::NAN.is_sign_negative());
assert!((-f16::NAN).is_sign_negative());
}
#[test]
fn test_next_up() {
let tiny = f16::from_bits(TINY_BITS);
let tiny_up = f16::from_bits(TINY_UP_BITS);
let max_down = f16::from_bits(MAX_DOWN_BITS);
let largest_subnormal = f16::from_bits(LARGEST_SUBNORMAL_BITS);
let smallest_normal = f16::from_bits(SMALLEST_NORMAL_BITS);
assert_f16_biteq!(f16::NEG_INFINITY.next_up(), f16::MIN);
assert_f16_biteq!(f16::MIN.next_up(), -max_down);
assert_f16_biteq!((-1.0 - f16::EPSILON).next_up(), -1.0);
assert_f16_biteq!((-smallest_normal).next_up(), -largest_subnormal);
assert_f16_biteq!((-tiny_up).next_up(), -tiny);
assert_f16_biteq!((-tiny).next_up(), -0.0f16);
assert_f16_biteq!((-0.0f16).next_up(), tiny);
assert_f16_biteq!(0.0f16.next_up(), tiny);
assert_f16_biteq!(tiny.next_up(), tiny_up);
assert_f16_biteq!(largest_subnormal.next_up(), smallest_normal);
assert_f16_biteq!(1.0f16.next_up(), 1.0 + f16::EPSILON);
assert_f16_biteq!(f16::MAX.next_up(), f16::INFINITY);
assert_f16_biteq!(f16::INFINITY.next_up(), f16::INFINITY);
// Check that NaNs roundtrip.
let nan0 = f16::NAN;
let nan1 = f16::from_bits(f16::NAN.to_bits() ^ NAN_MASK1);
let nan2 = f16::from_bits(f16::NAN.to_bits() ^ NAN_MASK2);
assert_f16_biteq!(nan0.next_up(), nan0);
assert_f16_biteq!(nan1.next_up(), nan1);
assert_f16_biteq!(nan2.next_up(), nan2);
}
#[test]
fn test_next_down() {
let tiny = f16::from_bits(TINY_BITS);
let tiny_up = f16::from_bits(TINY_UP_BITS);
let max_down = f16::from_bits(MAX_DOWN_BITS);
let largest_subnormal = f16::from_bits(LARGEST_SUBNORMAL_BITS);
let smallest_normal = f16::from_bits(SMALLEST_NORMAL_BITS);
assert_f16_biteq!(f16::NEG_INFINITY.next_down(), f16::NEG_INFINITY);
assert_f16_biteq!(f16::MIN.next_down(), f16::NEG_INFINITY);
assert_f16_biteq!((-max_down).next_down(), f16::MIN);
assert_f16_biteq!((-1.0f16).next_down(), -1.0 - f16::EPSILON);
assert_f16_biteq!((-largest_subnormal).next_down(), -smallest_normal);
assert_f16_biteq!((-tiny).next_down(), -tiny_up);
assert_f16_biteq!((-0.0f16).next_down(), -tiny);
assert_f16_biteq!((0.0f16).next_down(), -tiny);
assert_f16_biteq!(tiny.next_down(), 0.0f16);
assert_f16_biteq!(tiny_up.next_down(), tiny);
assert_f16_biteq!(smallest_normal.next_down(), largest_subnormal);
assert_f16_biteq!((1.0 + f16::EPSILON).next_down(), 1.0f16);
assert_f16_biteq!(f16::MAX.next_down(), max_down);
assert_f16_biteq!(f16::INFINITY.next_down(), f16::MAX);
// Check that NaNs roundtrip.
let nan0 = f16::NAN;
let nan1 = f16::from_bits(f16::NAN.to_bits() ^ NAN_MASK1);
let nan2 = f16::from_bits(f16::NAN.to_bits() ^ NAN_MASK2);
assert_f16_biteq!(nan0.next_down(), nan0);
assert_f16_biteq!(nan1.next_down(), nan1);
assert_f16_biteq!(nan2.next_down(), nan2);
}
#[test]
fn test_recip() {
let nan: f16 = f16::NAN;
let inf: f16 = f16::INFINITY;
let neg_inf: f16 = f16::NEG_INFINITY;
assert_eq!(1.0f16.recip(), 1.0);
assert_eq!(2.0f16.recip(), 0.5);
assert_eq!((-0.4f16).recip(), -2.5);
assert_eq!(0.0f16.recip(), inf);
assert!(nan.recip().is_nan());
assert_eq!(inf.recip(), 0.0);
assert_eq!(neg_inf.recip(), 0.0);
}
#[test]
fn test_to_degrees() {
let pi: f16 = consts::PI;
let nan: f16 = f16::NAN;
let inf: f16 = f16::INFINITY;
let neg_inf: f16 = f16::NEG_INFINITY;
assert_eq!(0.0f16.to_degrees(), 0.0);
assert_approx_eq!((-5.8f16).to_degrees(), -332.315521);
assert_approx_eq!(pi.to_degrees(), 180.0, F16_APPROX_L4);
assert!(nan.to_degrees().is_nan());
assert_eq!(inf.to_degrees(), inf);
assert_eq!(neg_inf.to_degrees(), neg_inf);
assert_eq!(1_f16.to_degrees(), 57.2957795130823208767981548141051703);
}
#[test]
fn test_to_radians() {
let pi: f16 = consts::PI;
let nan: f16 = f16::NAN;
let inf: f16 = f16::INFINITY;
let neg_inf: f16 = f16::NEG_INFINITY;
assert_eq!(0.0f16.to_radians(), 0.0);
assert_approx_eq!(154.6f16.to_radians(), 2.698279);
assert_approx_eq!((-332.31f16).to_radians(), -5.799903);
assert_approx_eq!(180.0f16.to_radians(), pi, F16_APPROX_L2);
assert!(nan.to_radians().is_nan());
assert_eq!(inf.to_radians(), inf);
assert_eq!(neg_inf.to_radians(), neg_inf);
}
#[test]
fn test_real_consts() {
// FIXME(f16_f128): add math tests when available
use super::consts;
let pi: f16 = consts::PI;
let frac_pi_2: f16 = consts::FRAC_PI_2;
let frac_pi_3: f16 = consts::FRAC_PI_3;
let frac_pi_4: f16 = consts::FRAC_PI_4;
let frac_pi_6: f16 = consts::FRAC_PI_6;
let frac_pi_8: f16 = consts::FRAC_PI_8;
let frac_1_pi: f16 = consts::FRAC_1_PI;
let frac_2_pi: f16 = consts::FRAC_2_PI;
// let frac_2_sqrtpi: f16 = consts::FRAC_2_SQRT_PI;
// let sqrt2: f16 = consts::SQRT_2;
// let frac_1_sqrt2: f16 = consts::FRAC_1_SQRT_2;
// let e: f16 = consts::E;
// let log2_e: f16 = consts::LOG2_E;
// let log10_e: f16 = consts::LOG10_E;
// let ln_2: f16 = consts::LN_2;
// let ln_10: f16 = consts::LN_10;
assert_approx_eq!(frac_pi_2, pi / 2f16);
assert_approx_eq!(frac_pi_3, pi / 3f16);
assert_approx_eq!(frac_pi_4, pi / 4f16);
assert_approx_eq!(frac_pi_6, pi / 6f16);
assert_approx_eq!(frac_pi_8, pi / 8f16);
assert_approx_eq!(frac_1_pi, 1f16 / pi);
assert_approx_eq!(frac_2_pi, 2f16 / pi);
// assert_approx_eq!(frac_2_sqrtpi, 2f16 / pi.sqrt());
// assert_approx_eq!(sqrt2, 2f16.sqrt());
// assert_approx_eq!(frac_1_sqrt2, 1f16 / 2f16.sqrt());
// assert_approx_eq!(log2_e, e.log2());
// assert_approx_eq!(log10_e, e.log10());
// assert_approx_eq!(ln_2, 2f16.ln());
// assert_approx_eq!(ln_10, 10f16.ln());
}
#[test]
fn test_float_bits_conv() {
assert_eq!((1f16).to_bits(), 0x3c00);
assert_eq!((12.5f16).to_bits(), 0x4a40);
assert_eq!((1337f16).to_bits(), 0x6539);
assert_eq!((-14.25f16).to_bits(), 0xcb20);
assert_approx_eq!(f16::from_bits(0x3c00), 1.0);
assert_approx_eq!(f16::from_bits(0x4a40), 12.5);
assert_approx_eq!(f16::from_bits(0x6539), 1337.0);
assert_approx_eq!(f16::from_bits(0xcb20), -14.25);
// Check that NaNs roundtrip their bits regardless of signaling-ness
let masked_nan1 = f16::NAN.to_bits() ^ NAN_MASK1;
let masked_nan2 = f16::NAN.to_bits() ^ NAN_MASK2;
assert!(f16::from_bits(masked_nan1).is_nan());
assert!(f16::from_bits(masked_nan2).is_nan());
assert_eq!(f16::from_bits(masked_nan1).to_bits(), masked_nan1);
assert_eq!(f16::from_bits(masked_nan2).to_bits(), masked_nan2);
}
#[test]
#[should_panic]
fn test_clamp_min_greater_than_max() {
let _ = 1.0f16.clamp(3.0, 1.0);
}
#[test]
#[should_panic]
fn test_clamp_min_is_nan() {
let _ = 1.0f16.clamp(f16::NAN, 1.0);
}
#[test]
#[should_panic]
fn test_clamp_max_is_nan() {
let _ = 1.0f16.clamp(3.0, f16::NAN);
}
#[test]
fn test_total_cmp() {
use core::cmp::Ordering;
fn quiet_bit_mask() -> u16 {
1 << (f16::MANTISSA_DIGITS - 2)
}
// FIXME(f16_f128): test subnormals when powf is available
// fn min_subnorm() -> f16 {
// f16::MIN_POSITIVE / f16::powf(2.0, f16::MANTISSA_DIGITS as f16 - 1.0)
// }
// fn max_subnorm() -> f16 {
// f16::MIN_POSITIVE - min_subnorm()
// }
fn q_nan() -> f16 {
f16::from_bits(f16::NAN.to_bits() | quiet_bit_mask())
}
fn s_nan() -> f16 {
f16::from_bits((f16::NAN.to_bits() & !quiet_bit_mask()) + 42)
}
assert_eq!(Ordering::Equal, (-q_nan()).total_cmp(&-q_nan()));
assert_eq!(Ordering::Equal, (-s_nan()).total_cmp(&-s_nan()));
assert_eq!(Ordering::Equal, (-f16::INFINITY).total_cmp(&-f16::INFINITY));
assert_eq!(Ordering::Equal, (-f16::MAX).total_cmp(&-f16::MAX));
assert_eq!(Ordering::Equal, (-2.5_f16).total_cmp(&-2.5));
assert_eq!(Ordering::Equal, (-1.0_f16).total_cmp(&-1.0));
assert_eq!(Ordering::Equal, (-1.5_f16).total_cmp(&-1.5));
assert_eq!(Ordering::Equal, (-0.5_f16).total_cmp(&-0.5));
assert_eq!(Ordering::Equal, (-f16::MIN_POSITIVE).total_cmp(&-f16::MIN_POSITIVE));
// assert_eq!(Ordering::Equal, (-max_subnorm()).total_cmp(&-max_subnorm()));
// assert_eq!(Ordering::Equal, (-min_subnorm()).total_cmp(&-min_subnorm()));
assert_eq!(Ordering::Equal, (-0.0_f16).total_cmp(&-0.0));
assert_eq!(Ordering::Equal, 0.0_f16.total_cmp(&0.0));
// assert_eq!(Ordering::Equal, min_subnorm().total_cmp(&min_subnorm()));
// assert_eq!(Ordering::Equal, max_subnorm().total_cmp(&max_subnorm()));
assert_eq!(Ordering::Equal, f16::MIN_POSITIVE.total_cmp(&f16::MIN_POSITIVE));
assert_eq!(Ordering::Equal, 0.5_f16.total_cmp(&0.5));
assert_eq!(Ordering::Equal, 1.0_f16.total_cmp(&1.0));
assert_eq!(Ordering::Equal, 1.5_f16.total_cmp(&1.5));
assert_eq!(Ordering::Equal, 2.5_f16.total_cmp(&2.5));
assert_eq!(Ordering::Equal, f16::MAX.total_cmp(&f16::MAX));
assert_eq!(Ordering::Equal, f16::INFINITY.total_cmp(&f16::INFINITY));
assert_eq!(Ordering::Equal, s_nan().total_cmp(&s_nan()));
assert_eq!(Ordering::Equal, q_nan().total_cmp(&q_nan()));
assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-s_nan()));
assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-f16::INFINITY));
assert_eq!(Ordering::Less, (-f16::INFINITY).total_cmp(&-f16::MAX));
assert_eq!(Ordering::Less, (-f16::MAX).total_cmp(&-2.5));
assert_eq!(Ordering::Less, (-2.5_f16).total_cmp(&-1.5));
assert_eq!(Ordering::Less, (-1.5_f16).total_cmp(&-1.0));
assert_eq!(Ordering::Less, (-1.0_f16).total_cmp(&-0.5));
assert_eq!(Ordering::Less, (-0.5_f16).total_cmp(&-f16::MIN_POSITIVE));
// assert_eq!(Ordering::Less, (-f16::MIN_POSITIVE).total_cmp(&-max_subnorm()));
// assert_eq!(Ordering::Less, (-max_subnorm()).total_cmp(&-min_subnorm()));
// assert_eq!(Ordering::Less, (-min_subnorm()).total_cmp(&-0.0));
assert_eq!(Ordering::Less, (-0.0_f16).total_cmp(&0.0));
// assert_eq!(Ordering::Less, 0.0_f16.total_cmp(&min_subnorm()));
// assert_eq!(Ordering::Less, min_subnorm().total_cmp(&max_subnorm()));
// assert_eq!(Ordering::Less, max_subnorm().total_cmp(&f16::MIN_POSITIVE));
assert_eq!(Ordering::Less, f16::MIN_POSITIVE.total_cmp(&0.5));
assert_eq!(Ordering::Less, 0.5_f16.total_cmp(&1.0));
assert_eq!(Ordering::Less, 1.0_f16.total_cmp(&1.5));
assert_eq!(Ordering::Less, 1.5_f16.total_cmp(&2.5));
assert_eq!(Ordering::Less, 2.5_f16.total_cmp(&f16::MAX));
assert_eq!(Ordering::Less, f16::MAX.total_cmp(&f16::INFINITY));
assert_eq!(Ordering::Less, f16::INFINITY.total_cmp(&s_nan()));
assert_eq!(Ordering::Less, s_nan().total_cmp(&q_nan()));
assert_eq!(Ordering::Greater, (-s_nan()).total_cmp(&-q_nan()));
assert_eq!(Ordering::Greater, (-f16::INFINITY).total_cmp(&-s_nan()));
assert_eq!(Ordering::Greater, (-f16::MAX).total_cmp(&-f16::INFINITY));
assert_eq!(Ordering::Greater, (-2.5_f16).total_cmp(&-f16::MAX));
assert_eq!(Ordering::Greater, (-1.5_f16).total_cmp(&-2.5));
assert_eq!(Ordering::Greater, (-1.0_f16).total_cmp(&-1.5));
assert_eq!(Ordering::Greater, (-0.5_f16).total_cmp(&-1.0));
assert_eq!(Ordering::Greater, (-f16::MIN_POSITIVE).total_cmp(&-0.5));
// assert_eq!(Ordering::Greater, (-max_subnorm()).total_cmp(&-f16::MIN_POSITIVE));
// assert_eq!(Ordering::Greater, (-min_subnorm()).total_cmp(&-max_subnorm()));
// assert_eq!(Ordering::Greater, (-0.0_f16).total_cmp(&-min_subnorm()));
assert_eq!(Ordering::Greater, 0.0_f16.total_cmp(&-0.0));
// assert_eq!(Ordering::Greater, min_subnorm().total_cmp(&0.0));
// assert_eq!(Ordering::Greater, max_subnorm().total_cmp(&min_subnorm()));
// assert_eq!(Ordering::Greater, f16::MIN_POSITIVE.total_cmp(&max_subnorm()));
assert_eq!(Ordering::Greater, 0.5_f16.total_cmp(&f16::MIN_POSITIVE));
assert_eq!(Ordering::Greater, 1.0_f16.total_cmp(&0.5));
assert_eq!(Ordering::Greater, 1.5_f16.total_cmp(&1.0));
assert_eq!(Ordering::Greater, 2.5_f16.total_cmp(&1.5));
assert_eq!(Ordering::Greater, f16::MAX.total_cmp(&2.5));
assert_eq!(Ordering::Greater, f16::INFINITY.total_cmp(&f16::MAX));
assert_eq!(Ordering::Greater, s_nan().total_cmp(&f16::INFINITY));
assert_eq!(Ordering::Greater, q_nan().total_cmp(&s_nan()));
assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-s_nan()));
assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-f16::INFINITY));
assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-f16::MAX));
assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-2.5));
assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-1.5));
assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-1.0));
assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-0.5));
assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-f16::MIN_POSITIVE));
// assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-max_subnorm()));
// assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-min_subnorm()));
assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&-0.0));
assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&0.0));
// assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&min_subnorm()));
// assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&max_subnorm()));
assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&f16::MIN_POSITIVE));
assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&0.5));
assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&1.0));
assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&1.5));
assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&2.5));
assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&f16::MAX));
assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&f16::INFINITY));
assert_eq!(Ordering::Less, (-q_nan()).total_cmp(&s_nan()));
assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-f16::INFINITY));
assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-f16::MAX));
assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-2.5));
assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-1.5));
assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-1.0));
assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-0.5));
assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-f16::MIN_POSITIVE));
// assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-max_subnorm()));
// assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-min_subnorm()));
assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&-0.0));
assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&0.0));
// assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&min_subnorm()));
// assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&max_subnorm()));
assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&f16::MIN_POSITIVE));
assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&0.5));
assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&1.0));
assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&1.5));
assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&2.5));
assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&f16::MAX));
assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&f16::INFINITY));
assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&s_nan()));
}
+55 -27
View File
@@ -2,6 +2,45 @@
use crate::num::FpCategory as Fp;
use crate::num::*;
/// Smallest number
#[allow(dead_code)] // unused on x86
const TINY_BITS: u32 = 0x1;
/// Next smallest number
#[allow(dead_code)] // unused on x86
const TINY_UP_BITS: u32 = 0x2;
/// Exponent = 0b11...10, Sifnificand 0b1111..10. Min val > 0
#[allow(dead_code)] // unused on x86
const MAX_DOWN_BITS: u32 = 0x7f7f_fffe;
/// Zeroed exponent, full significant
#[allow(dead_code)] // unused on x86
const LARGEST_SUBNORMAL_BITS: u32 = 0x007f_ffff;
/// Exponent = 0b1, zeroed significand
#[allow(dead_code)] // unused on x86
const SMALLEST_NORMAL_BITS: u32 = 0x0080_0000;
/// First pattern over the mantissa
#[allow(dead_code)] // unused on x86
const NAN_MASK1: u32 = 0x002a_aaaa;
/// Second pattern over the mantissa
#[allow(dead_code)] // unused on x86
const NAN_MASK2: u32 = 0x0055_5555;
#[allow(unused_macros)]
macro_rules! assert_f32_biteq {
($left : expr, $right : expr) => {
let l: &f32 = &$left;
let r: &f32 = &$right;
let lb = l.to_bits();
let rb = r.to_bits();
assert_eq!(lb, rb, "float {l} ({lb:#010x}) is not bitequal to {r} ({rb:#010x})");
};
}
#[test]
fn test_num_f32() {
test_num(10f32, 2f32);
@@ -315,27 +354,16 @@ fn test_is_sign_negative() {
assert!((-f32::NAN).is_sign_negative());
}
#[allow(unused_macros)]
macro_rules! assert_f32_biteq {
($left : expr, $right : expr) => {
let l: &f32 = &$left;
let r: &f32 = &$right;
let lb = l.to_bits();
let rb = r.to_bits();
assert_eq!(lb, rb, "float {} ({:#x}) is not equal to {} ({:#x})", *l, lb, *r, rb);
};
}
// Ignore test on x87 floating point, these platforms do not guarantee NaN
// payloads are preserved and flush denormals to zero, failing the tests.
#[cfg(not(target_arch = "x86"))]
#[test]
fn test_next_up() {
let tiny = f32::from_bits(1);
let tiny_up = f32::from_bits(2);
let max_down = f32::from_bits(0x7f7f_fffe);
let largest_subnormal = f32::from_bits(0x007f_ffff);
let smallest_normal = f32::from_bits(0x0080_0000);
let tiny = f32::from_bits(TINY_BITS);
let tiny_up = f32::from_bits(TINY_UP_BITS);
let max_down = f32::from_bits(MAX_DOWN_BITS);
let largest_subnormal = f32::from_bits(LARGEST_SUBNORMAL_BITS);
let smallest_normal = f32::from_bits(SMALLEST_NORMAL_BITS);
assert_f32_biteq!(f32::NEG_INFINITY.next_up(), f32::MIN);
assert_f32_biteq!(f32::MIN.next_up(), -max_down);
assert_f32_biteq!((-1.0 - f32::EPSILON).next_up(), -1.0);
@@ -352,8 +380,8 @@ fn test_next_up() {
// Check that NaNs roundtrip.
let nan0 = f32::NAN;
let nan1 = f32::from_bits(f32::NAN.to_bits() ^ 0x002a_aaaa);
let nan2 = f32::from_bits(f32::NAN.to_bits() ^ 0x0055_5555);
let nan1 = f32::from_bits(f32::NAN.to_bits() ^ NAN_MASK1);
let nan2 = f32::from_bits(f32::NAN.to_bits() ^ NAN_MASK2);
assert_f32_biteq!(nan0.next_up(), nan0);
assert_f32_biteq!(nan1.next_up(), nan1);
assert_f32_biteq!(nan2.next_up(), nan2);
@@ -364,11 +392,11 @@ fn test_next_up() {
#[cfg(not(target_arch = "x86"))]
#[test]
fn test_next_down() {
let tiny = f32::from_bits(1);
let tiny_up = f32::from_bits(2);
let max_down = f32::from_bits(0x7f7f_fffe);
let largest_subnormal = f32::from_bits(0x007f_ffff);
let smallest_normal = f32::from_bits(0x0080_0000);
let tiny = f32::from_bits(TINY_BITS);
let tiny_up = f32::from_bits(TINY_UP_BITS);
let max_down = f32::from_bits(MAX_DOWN_BITS);
let largest_subnormal = f32::from_bits(LARGEST_SUBNORMAL_BITS);
let smallest_normal = f32::from_bits(SMALLEST_NORMAL_BITS);
assert_f32_biteq!(f32::NEG_INFINITY.next_down(), f32::NEG_INFINITY);
assert_f32_biteq!(f32::MIN.next_down(), f32::NEG_INFINITY);
assert_f32_biteq!((-max_down).next_down(), f32::MIN);
@@ -386,8 +414,8 @@ fn test_next_down() {
// Check that NaNs roundtrip.
let nan0 = f32::NAN;
let nan1 = f32::from_bits(f32::NAN.to_bits() ^ 0x002a_aaaa);
let nan2 = f32::from_bits(f32::NAN.to_bits() ^ 0x0055_5555);
let nan1 = f32::from_bits(f32::NAN.to_bits() ^ NAN_MASK1);
let nan2 = f32::from_bits(f32::NAN.to_bits() ^ NAN_MASK2);
assert_f32_biteq!(nan0.next_down(), nan0);
assert_f32_biteq!(nan1.next_down(), nan1);
assert_f32_biteq!(nan2.next_down(), nan2);
@@ -734,8 +762,8 @@ fn test_float_bits_conv() {
// Check that NaNs roundtrip their bits regardless of signaling-ness
// 0xA is 0b1010; 0x5 is 0b0101 -- so these two together clobbers all the mantissa bits
let masked_nan1 = f32::NAN.to_bits() ^ 0x002A_AAAA;
let masked_nan2 = f32::NAN.to_bits() ^ 0x0055_5555;
let masked_nan1 = f32::NAN.to_bits() ^ NAN_MASK1;
let masked_nan2 = f32::NAN.to_bits() ^ NAN_MASK2;
assert!(f32::from_bits(masked_nan1).is_nan());
assert!(f32::from_bits(masked_nan2).is_nan());
+55 -28
View File
@@ -2,6 +2,45 @@
use crate::num::FpCategory as Fp;
use crate::num::*;
/// Smallest number
#[allow(dead_code)] // unused on x86
const TINY_BITS: u64 = 0x1;
/// Next smallest number
#[allow(dead_code)] // unused on x86
const TINY_UP_BITS: u64 = 0x2;
/// Exponent = 0b11...10, Sifnificand 0b1111..10. Min val > 0
#[allow(dead_code)] // unused on x86
const MAX_DOWN_BITS: u64 = 0x7fef_ffff_ffff_fffe;
/// Zeroed exponent, full significant
#[allow(dead_code)] // unused on x86
const LARGEST_SUBNORMAL_BITS: u64 = 0x000f_ffff_ffff_ffff;
/// Exponent = 0b1, zeroed significand
#[allow(dead_code)] // unused on x86
const SMALLEST_NORMAL_BITS: u64 = 0x0010_0000_0000_0000;
/// First pattern over the mantissa
#[allow(dead_code)] // unused on x86
const NAN_MASK1: u64 = 0x000a_aaaa_aaaa_aaaa;
/// Second pattern over the mantissa
#[allow(dead_code)] // unused on x86
const NAN_MASK2: u64 = 0x0005_5555_5555_5555;
#[allow(unused_macros)]
macro_rules! assert_f64_biteq {
($left : expr, $right : expr) => {
let l: &f64 = &$left;
let r: &f64 = &$right;
let lb = l.to_bits();
let rb = r.to_bits();
assert_eq!(lb, rb, "float {l} ({lb:#018x}) is not bitequal to {r} ({rb:#018x})");
};
}
#[test]
fn test_num_f64() {
test_num(10f64, 2f64);
@@ -305,27 +344,16 @@ fn test_is_sign_negative() {
assert!((-f64::NAN).is_sign_negative());
}
#[allow(unused_macros)]
macro_rules! assert_f64_biteq {
($left : expr, $right : expr) => {
let l: &f64 = &$left;
let r: &f64 = &$right;
let lb = l.to_bits();
let rb = r.to_bits();
assert_eq!(lb, rb, "float {} ({:#x}) is not equal to {} ({:#x})", *l, lb, *r, rb);
};
}
// Ignore test on x87 floating point, these platforms do not guarantee NaN
// payloads are preserved and flush denormals to zero, failing the tests.
#[cfg(not(target_arch = "x86"))]
#[test]
fn test_next_up() {
let tiny = f64::from_bits(1);
let tiny_up = f64::from_bits(2);
let max_down = f64::from_bits(0x7fef_ffff_ffff_fffe);
let largest_subnormal = f64::from_bits(0x000f_ffff_ffff_ffff);
let smallest_normal = f64::from_bits(0x0010_0000_0000_0000);
let tiny = f64::from_bits(TINY_BITS);
let tiny_up = f64::from_bits(TINY_UP_BITS);
let max_down = f64::from_bits(MAX_DOWN_BITS);
let largest_subnormal = f64::from_bits(LARGEST_SUBNORMAL_BITS);
let smallest_normal = f64::from_bits(SMALLEST_NORMAL_BITS);
assert_f64_biteq!(f64::NEG_INFINITY.next_up(), f64::MIN);
assert_f64_biteq!(f64::MIN.next_up(), -max_down);
assert_f64_biteq!((-1.0 - f64::EPSILON).next_up(), -1.0);
@@ -341,8 +369,8 @@ fn test_next_up() {
assert_f64_biteq!(f64::INFINITY.next_up(), f64::INFINITY);
let nan0 = f64::NAN;
let nan1 = f64::from_bits(f64::NAN.to_bits() ^ 0x000a_aaaa_aaaa_aaaa);
let nan2 = f64::from_bits(f64::NAN.to_bits() ^ 0x0005_5555_5555_5555);
let nan1 = f64::from_bits(f64::NAN.to_bits() ^ NAN_MASK1);
let nan2 = f64::from_bits(f64::NAN.to_bits() ^ NAN_MASK2);
assert_f64_biteq!(nan0.next_up(), nan0);
assert_f64_biteq!(nan1.next_up(), nan1);
assert_f64_biteq!(nan2.next_up(), nan2);
@@ -353,11 +381,11 @@ fn test_next_up() {
#[cfg(not(target_arch = "x86"))]
#[test]
fn test_next_down() {
let tiny = f64::from_bits(1);
let tiny_up = f64::from_bits(2);
let max_down = f64::from_bits(0x7fef_ffff_ffff_fffe);
let largest_subnormal = f64::from_bits(0x000f_ffff_ffff_ffff);
let smallest_normal = f64::from_bits(0x0010_0000_0000_0000);
let tiny = f64::from_bits(TINY_BITS);
let tiny_up = f64::from_bits(TINY_UP_BITS);
let max_down = f64::from_bits(MAX_DOWN_BITS);
let largest_subnormal = f64::from_bits(LARGEST_SUBNORMAL_BITS);
let smallest_normal = f64::from_bits(SMALLEST_NORMAL_BITS);
assert_f64_biteq!(f64::NEG_INFINITY.next_down(), f64::NEG_INFINITY);
assert_f64_biteq!(f64::MIN.next_down(), f64::NEG_INFINITY);
assert_f64_biteq!((-max_down).next_down(), f64::MIN);
@@ -374,8 +402,8 @@ fn test_next_down() {
assert_f64_biteq!(f64::INFINITY.next_down(), f64::MAX);
let nan0 = f64::NAN;
let nan1 = f64::from_bits(f64::NAN.to_bits() ^ 0x000a_aaaa_aaaa_aaaa);
let nan2 = f64::from_bits(f64::NAN.to_bits() ^ 0x0005_5555_5555_5555);
let nan1 = f64::from_bits(f64::NAN.to_bits() ^ NAN_MASK1);
let nan2 = f64::from_bits(f64::NAN.to_bits() ^ NAN_MASK2);
assert_f64_biteq!(nan0.next_down(), nan0);
assert_f64_biteq!(nan1.next_down(), nan1);
assert_f64_biteq!(nan2.next_down(), nan2);
@@ -715,9 +743,8 @@ fn test_float_bits_conv() {
assert_approx_eq!(f64::from_bits(0xc02c800000000000), -14.25);
// Check that NaNs roundtrip their bits regardless of signaling-ness
// 0xA is 0b1010; 0x5 is 0b0101 -- so these two together clobbers all the mantissa bits
let masked_nan1 = f64::NAN.to_bits() ^ 0x000A_AAAA_AAAA_AAAA;
let masked_nan2 = f64::NAN.to_bits() ^ 0x0005_5555_5555_5555;
let masked_nan1 = f64::NAN.to_bits() ^ NAN_MASK1;
let masked_nan2 = f64::NAN.to_bits() ^ NAN_MASK2;
assert!(f64::from_bits(masked_nan1).is_nan());
assert!(f64::from_bits(masked_nan2).is_nan());
+13 -3
View File
@@ -552,10 +552,20 @@ pub fn leak<'a>(self) -> &'a mut OsStr {
OsStr::from_inner_mut(self.inner.leak())
}
/// Part of a hack to make PathBuf::push/pop more efficient.
/// Provides plumbing to core `Vec::truncate`.
/// More well behaving alternative to allowing outer types
/// full mutable access to the core `Vec`.
#[inline]
pub(crate) fn as_mut_vec_for_path_buf(&mut self) -> &mut Vec<u8> {
self.inner.as_mut_vec_for_path_buf()
pub(crate) fn truncate(&mut self, len: usize) {
self.inner.truncate(len);
}
/// Provides plumbing to core `Vec::extend_from_slice`.
/// More well behaving alternative to allowing outer types
/// full mutable access to the core `Vec`.
#[inline]
pub(crate) fn extend_from_slice(&mut self, other: &[u8]) {
self.inner.extend_from_slice(other);
}
}
+9 -2
View File
@@ -373,10 +373,17 @@ macro_rules! dbg {
};
}
/// Verify that floats are within a tolerance of each other, 1.0e-6 by default.
#[cfg(test)]
macro_rules! assert_approx_eq {
($a:expr, $b:expr) => {{
($a:expr, $b:expr) => {{ assert_approx_eq!($a, $b, 1.0e-6) }};
($a:expr, $b:expr, $lim:expr) => {{
let (a, b) = (&$a, &$b);
assert!((*a - *b).abs() < 1.0e-6, "{} is not approximately equal to {}", *a, *b);
let diff = (*a - *b).abs();
assert!(
diff < $lim,
"{a:?} is not approximately equal to {b:?} (threshold {lim:?}, actual {diff:?})",
lim = $lim
);
}};
}
+13 -18
View File
@@ -1163,11 +1163,6 @@ pub struct PathBuf {
}
impl PathBuf {
#[inline]
fn as_mut_vec(&mut self) -> &mut Vec<u8> {
self.inner.as_mut_vec_for_path_buf()
}
/// Allocates an empty `PathBuf`.
///
/// # Examples
@@ -1290,7 +1285,8 @@ pub fn push<P: AsRef<Path>>(&mut self, path: P) {
fn _push(&mut self, path: &Path) {
// in general, a separator is needed if the rightmost byte is not a separator
let mut need_sep = self.as_mut_vec().last().map(|c| !is_sep_byte(*c)).unwrap_or(false);
let buf = self.inner.as_encoded_bytes();
let mut need_sep = buf.last().map(|c| !is_sep_byte(*c)).unwrap_or(false);
// in the special case of `C:` on Windows, do *not* add a separator
let comps = self.components();
@@ -1304,7 +1300,7 @@ fn _push(&mut self, path: &Path) {
// absolute `path` replaces `self`
if path.is_absolute() || path.prefix().is_some() {
self.as_mut_vec().truncate(0);
self.inner.truncate(0);
// verbatim paths need . and .. removed
} else if comps.prefix_verbatim() && !path.inner.is_empty() {
@@ -1349,7 +1345,7 @@ fn _push(&mut self, path: &Path) {
// `path` has a root but no prefix, e.g., `\windows` (Windows only)
} else if path.has_root() {
let prefix_len = self.components().prefix_remaining();
self.as_mut_vec().truncate(prefix_len);
self.inner.truncate(prefix_len);
// `path` is a pure relative path
} else if need_sep {
@@ -1382,7 +1378,7 @@ fn _push(&mut self, path: &Path) {
pub fn pop(&mut self) -> bool {
match self.parent().map(|p| p.as_u8_slice().len()) {
Some(len) => {
self.as_mut_vec().truncate(len);
self.inner.truncate(len);
true
}
None => false,
@@ -1510,15 +1506,14 @@ fn _set_extension(&mut self, extension: &OsStr) -> bool {
// truncate until right after the file stem
let end_file_stem = file_stem[file_stem.len()..].as_ptr().addr();
let start = self.inner.as_encoded_bytes().as_ptr().addr();
let v = self.as_mut_vec();
v.truncate(end_file_stem.wrapping_sub(start));
self.inner.truncate(end_file_stem.wrapping_sub(start));
// add the new extension, if any
let new = extension.as_encoded_bytes();
let new = extension;
if !new.is_empty() {
v.reserve_exact(new.len() + 1);
v.push(b'.');
v.extend_from_slice(new);
self.inner.reserve_exact(new.len() + 1);
self.inner.push(OsStr::new("."));
self.inner.push(new);
}
true
@@ -2645,18 +2640,18 @@ fn _with_extension(&self, extension: &OsStr) -> PathBuf {
None => {
// Enough capacity for the extension and the dot
let capacity = self_len + extension.len() + 1;
let whole_path = self_bytes.iter();
let whole_path = self_bytes;
(capacity, whole_path)
}
Some(previous_extension) => {
let capacity = self_len + extension.len() - previous_extension.len();
let path_till_dot = self_bytes[..self_len - previous_extension.len()].iter();
let path_till_dot = &self_bytes[..self_len - previous_extension.len()];
(capacity, path_till_dot)
}
};
let mut new_path = PathBuf::with_capacity(new_capacity);
new_path.as_mut_vec().extend(slice_to_copy);
new_path.inner.extend_from_slice(slice_to_copy);
new_path.set_extension(extension);
new_path
}
+13 -3
View File
@@ -202,10 +202,20 @@ pub fn into_rc(&self) -> Rc<Slice> {
self.as_slice().into_rc()
}
/// Part of a hack to make PathBuf::push/pop more efficient.
/// Provides plumbing to core `Vec::truncate`.
/// More well behaving alternative to allowing outer types
/// full mutable access to the core `Vec`.
#[inline]
pub(crate) fn as_mut_vec_for_path_buf(&mut self) -> &mut Vec<u8> {
&mut self.inner
pub(crate) fn truncate(&mut self, len: usize) {
self.inner.truncate(len);
}
/// Provides plumbing to core `Vec::extend_from_slice`.
/// More well behaving alternative to allowing outer types
/// full mutable access to the core `Vec`.
#[inline]
pub(crate) fn extend_from_slice(&mut self, other: &[u8]) {
self.inner.extend_from_slice(other);
}
}
+13 -3
View File
@@ -165,10 +165,20 @@ pub fn into_rc(&self) -> Rc<Slice> {
self.as_slice().into_rc()
}
/// Part of a hack to make PathBuf::push/pop more efficient.
/// Provides plumbing to core `Vec::truncate`.
/// More well behaving alternative to allowing outer types
/// full mutable access to the core `Vec`.
#[inline]
pub(crate) fn as_mut_vec_for_path_buf(&mut self) -> &mut Vec<u8> {
self.inner.as_mut_vec_for_path_buf()
pub(crate) fn truncate(&mut self, len: usize) {
self.inner.truncate(len);
}
/// Provides plumbing to core `Vec::extend_from_slice`.
/// More well behaving alternative to allowing outer types
/// full mutable access to the core `Vec`.
#[inline]
pub(crate) fn extend_from_slice(&mut self, other: &[u8]) {
self.inner.extend_from_slice(other);
}
}
+6 -6
View File
@@ -474,13 +474,13 @@ pub fn from_box(boxed: Box<Wtf8>) -> Wtf8Buf {
Wtf8Buf { bytes: bytes.into_vec(), is_known_utf8: false }
}
/// Part of a hack to make PathBuf::push/pop more efficient.
/// Provides plumbing to core `Vec::extend_from_slice`.
/// More well behaving alternative to allowing outer types
/// full mutable access to the core `Vec`.
#[inline]
pub(crate) fn as_mut_vec_for_path_buf(&mut self) -> &mut Vec<u8> {
// FIXME: this function should not even exist, as it implies violating Wtf8Buf invariants
// For now, simply assume that is about to happen.
self.is_known_utf8 = false;
&mut self.bytes
pub(crate) fn extend_from_slice(&mut self, other: &[u8]) {
self.bytes.extend_from_slice(other);
self.is_known_utf8 = self.is_known_utf8 || self.next_surrogate(0).is_none();
}
}
+9 -6
View File
@@ -211,6 +211,13 @@ pub fn prepare_tool_cargo(
// See https://github.com/rust-lang/rust/issues/116538
cargo.rustflag("-Zunstable-options");
// `-Zon-broken-pipe=kill` breaks cargo tests
if !path.ends_with("cargo") {
// If the output is piped to e.g. `head -n1` we want the process to be killed,
// rather than having an error bubble up and cause a panic.
cargo.rustflag("-Zon-broken-pipe=kill");
}
cargo
}
@@ -575,7 +582,8 @@ fn run(self, builder: &Builder<'_>) -> PathBuf {
features.push("jemalloc".to_string());
}
let mut cargo = prepare_tool_cargo(
// NOTE: Never modify the rustflags here, it breaks the build cache for other tools!
let cargo = prepare_tool_cargo(
builder,
build_compiler,
Mode::ToolRustc,
@@ -586,11 +594,6 @@ fn run(self, builder: &Builder<'_>) -> PathBuf {
features.as_slice(),
);
// If the rustdoc output is piped to e.g. `head -n1` we want the process
// to be killed, rather than having an error bubble up and cause a
// panic.
cargo.rustflag("-Zon-broken-pipe=kill");
let _guard = builder.msg_tool(
Kind::Build,
Mode::ToolRustc,
@@ -35,4 +35,4 @@ COPY host-x86_64/mingw-check/validate-error-codes.sh /scripts/
# NOTE: intentionally uses python2 for x.py so we can test it still works.
# validate-toolstate only runs in our CI, so it's ok for it to only support python3.
ENV SCRIPT TIDY_PRINT_DIFF=1 python2.7 ../x.py test \
--stage 0 src/tools/tidy tidyselftest --extra-checks=py:lint
--stage 0 src/tools/tidy tidyselftest --extra-checks=py:lint,cpp:fmt
+55 -5
View File
@@ -1,12 +1,13 @@
# Lint Levels
In `rustc`, lints are divided into five *levels*:
In `rustc`, lints are divided into six *levels*:
1. allow
2. warn
3. force-warn
4. deny
5. forbid
2. expect
3. warn
4. force-warn
5. deny
6. forbid
Each lint has a default level (explained in the lint listing later in this
chapter), and the compiler has a default warning level. First, let's explain
@@ -33,6 +34,40 @@ But this code violates the `missing_docs` lint.
These lints exist mostly to be manually turned on via configuration, as we'll
talk about later in this section.
## expect
Sometimes, it can be helpful to suppress lints, but at the same time ensure that
the code in question still emits them. The 'expect' level does exactly this. If
the lint in question is not emitted, the `unfulfilled_lint_expectation` lint
triggers on the `expect` attribute, notifying you that the expectation is no
longer fulfilled.
```rust
fn main() {
#[expect(unused_variables)]
let unused = "Everyone ignores me";
#[expect(unused_variables)] // `unused_variables` lint is not emitted
let used = "I'm useful"; // the expectation is therefore unfulfilled
println!("The `used` value is equal to: {:?}", used);
}
```
This will produce the following warning:
```txt
warning: this lint expectation is unfulfilled
--> src/main.rs:7:14
|
7 | #[expect(unused_variables)]
| ^^^^^^^^^^^^^^^^
|
= note: `#[warn(unfulfilled_lint_expectations)]` on by default
```
This level can only be defined via the `#[expect]` attribute, there is no equivalent
flag. Lints with the special 'force-warn' level will still be emitted as usual.
## warn
The 'warn' lint level will produce a warning if you violate the lint. For example,
@@ -240,6 +275,21 @@ And use multiple attributes together:
pub fn foo() {}
```
All lint attributes support an additional `reason` parameter, to give context why
a certain attribute was added. This reason will be displayed as part of the lint
message, if the lint is emitted at the defined level.
```rust
use std::path::PathBuf;
pub fn get_path() -> PathBuf {
#[allow(unused_mut, reason = "this is only modified on some platforms")]
let mut file_name = PathBuf::from("git");
#[cfg(target_os = "windows")]
file_name.set_extension("exe");
file_name
}
```
### Capping lints
`rustc` supports a flag, `--cap-lints LEVEL` that sets the "lint cap level."
+1 -1
View File
@@ -1943,7 +1943,7 @@ Released 2022-05-19
[#8218](https://github.com/rust-lang/rust-clippy/pull/8218)
* [`needless_match`]
[#8471](https://github.com/rust-lang/rust-clippy/pull/8471)
* [`allow_attributes_without_reason`] (Requires `#![feature(lint_reasons)]`)
* [`allow_attributes_without_reason`]
[#8504](https://github.com/rust-lang/rust-clippy/pull/8504)
* [`print_in_format_impl`]
[#8253](https://github.com/rust-lang/rust-clippy/pull/8253)
@@ -669,6 +669,8 @@ The minimum rust version that the project supports. Defaults to the `rust-versio
---
**Affected lints:**
* [`allow_attributes`](https://rust-lang.github.io/rust-clippy/master/index.html#allow_attributes)
* [`allow_attributes_without_reason`](https://rust-lang.github.io/rust-clippy/master/index.html#allow_attributes_without_reason)
* [`almost_complete_range`](https://rust-lang.github.io/rust-clippy/master/index.html#almost_complete_range)
* [`approx_constant`](https://rust-lang.github.io/rust-clippy/master/index.html#approx_constant)
* [`assigning_clones`](https://rust-lang.github.io/rust-clippy/master/index.html#assigning_clones)
+1 -1
View File
@@ -265,7 +265,7 @@ pub fn get_configuration_metadata() -> Vec<ClippyConfiguration> {
///
/// Suppress lints whenever the suggested change would cause breakage for other crates.
(avoid_breaking_exported_api: bool = true),
/// Lint: MANUAL_SPLIT_ONCE, MANUAL_STR_REPEAT, CLONED_INSTEAD_OF_COPIED, REDUNDANT_FIELD_NAMES, OPTION_MAP_UNWRAP_OR, REDUNDANT_STATIC_LIFETIMES, FILTER_MAP_NEXT, CHECKED_CONVERSIONS, MANUAL_RANGE_CONTAINS, USE_SELF, MEM_REPLACE_WITH_DEFAULT, MANUAL_NON_EXHAUSTIVE, OPTION_AS_REF_DEREF, MAP_UNWRAP_OR, MATCH_LIKE_MATCHES_MACRO, MANUAL_STRIP, MISSING_CONST_FOR_FN, UNNESTED_OR_PATTERNS, FROM_OVER_INTO, PTR_AS_PTR, IF_THEN_SOME_ELSE_NONE, APPROX_CONSTANT, DEPRECATED_CFG_ATTR, INDEX_REFUTABLE_SLICE, MAP_CLONE, BORROW_AS_PTR, MANUAL_BITS, ERR_EXPECT, CAST_ABS_TO_UNSIGNED, UNINLINED_FORMAT_ARGS, MANUAL_CLAMP, MANUAL_LET_ELSE, UNCHECKED_DURATION_SUBTRACTION, COLLAPSIBLE_STR_REPLACE, SEEK_FROM_CURRENT, SEEK_REWIND, UNNECESSARY_LAZY_EVALUATIONS, TRANSMUTE_PTR_TO_REF, ALMOST_COMPLETE_RANGE, NEEDLESS_BORROW, DERIVABLE_IMPLS, MANUAL_IS_ASCII_CHECK, MANUAL_REM_EUCLID, MANUAL_RETAIN, TYPE_REPETITION_IN_BOUNDS, TUPLE_ARRAY_CONVERSIONS, MANUAL_TRY_FOLD, MANUAL_HASH_ONE, ITER_KV_MAP, MANUAL_C_STR_LITERALS, ASSIGNING_CLONES, LEGACY_NUMERIC_CONSTANTS.
/// Lint: MANUAL_SPLIT_ONCE, MANUAL_STR_REPEAT, CLONED_INSTEAD_OF_COPIED, REDUNDANT_FIELD_NAMES, OPTION_MAP_UNWRAP_OR, REDUNDANT_STATIC_LIFETIMES, FILTER_MAP_NEXT, CHECKED_CONVERSIONS, MANUAL_RANGE_CONTAINS, USE_SELF, MEM_REPLACE_WITH_DEFAULT, MANUAL_NON_EXHAUSTIVE, OPTION_AS_REF_DEREF, MAP_UNWRAP_OR, MATCH_LIKE_MATCHES_MACRO, MANUAL_STRIP, MISSING_CONST_FOR_FN, UNNESTED_OR_PATTERNS, FROM_OVER_INTO, PTR_AS_PTR, IF_THEN_SOME_ELSE_NONE, APPROX_CONSTANT, DEPRECATED_CFG_ATTR, INDEX_REFUTABLE_SLICE, MAP_CLONE, BORROW_AS_PTR, MANUAL_BITS, ERR_EXPECT, CAST_ABS_TO_UNSIGNED, UNINLINED_FORMAT_ARGS, MANUAL_CLAMP, MANUAL_LET_ELSE, UNCHECKED_DURATION_SUBTRACTION, COLLAPSIBLE_STR_REPLACE, SEEK_FROM_CURRENT, SEEK_REWIND, UNNECESSARY_LAZY_EVALUATIONS, TRANSMUTE_PTR_TO_REF, ALMOST_COMPLETE_RANGE, NEEDLESS_BORROW, DERIVABLE_IMPLS, MANUAL_IS_ASCII_CHECK, MANUAL_REM_EUCLID, MANUAL_RETAIN, TYPE_REPETITION_IN_BOUNDS, TUPLE_ARRAY_CONVERSIONS, MANUAL_TRY_FOLD, MANUAL_HASH_ONE, ITER_KV_MAP, MANUAL_C_STR_LITERALS, ASSIGNING_CLONES, LEGACY_NUMERIC_CONSTANTS, ALLOW_ATTRIBUTES, ALLOW_ATTRIBUTES_WITHOUT_REASON.
///
/// The minimum rust version that the project supports. Defaults to the `rust-version` field in `Cargo.toml`
#[default_text = ""]
@@ -17,6 +17,7 @@ macro_rules! msrv_aliases {
// names may refer to stabilized feature flags or library items
msrv_aliases! {
1,81,0 { LINT_REASONS_STABILIZATION }
1,77,0 { C_STR_LITERALS }
1,76,0 { PTR_FROM_REF }
1,71,0 { TUPLE_ARRAY_CONVERSIONS, BUILD_HASHER_HASH_ONE }
@@ -1,74 +0,0 @@
use ast::{AttrStyle, Attribute};
use clippy_utils::diagnostics::span_lint_and_sugg;
use clippy_utils::is_from_proc_macro;
use rustc_ast as ast;
use rustc_errors::Applicability;
use rustc_lint::{LateContext, LateLintPass, LintContext};
use rustc_middle::lint::in_external_macro;
use rustc_session::declare_lint_pass;
declare_clippy_lint! {
/// ### What it does
/// Checks for usage of the `#[allow]` attribute and suggests replacing it with
/// the `#[expect]` (See [RFC 2383](https://rust-lang.github.io/rfcs/2383-lint-reasons.html))
///
/// The expect attribute is still unstable and requires the `lint_reasons`
/// on nightly. It can be enabled by adding `#![feature(lint_reasons)]` to
/// the crate root.
///
/// This lint only warns outer attributes (`#[allow]`), as inner attributes
/// (`#![allow]`) are usually used to enable or disable lints on a global scale.
///
/// ### Why restrict this?
/// `#[allow]` attributes can linger after their reason for existence is gone.
/// `#[expect]` attributes suppress the lint emission, but emit a warning if
/// the expectation is unfulfilled. This can be useful to be notified when the
/// lint is no longer triggered, which may indicate the attribute can be removed.
///
/// ### Example
/// ```rust,ignore
/// #[allow(unused_mut)]
/// fn foo() -> usize {
/// let mut a = Vec::new();
/// a.len()
/// }
/// ```
/// Use instead:
/// ```rust,ignore
/// #![feature(lint_reasons)]
/// #[expect(unused_mut)]
/// fn foo() -> usize {
/// let mut a = Vec::new();
/// a.len()
/// }
/// ```
#[clippy::version = "1.70.0"]
pub ALLOW_ATTRIBUTES,
restriction,
"`#[allow]` will not trigger if a warning isn't found. `#[expect]` triggers if there are no warnings."
}
declare_lint_pass!(AllowAttribute => [ALLOW_ATTRIBUTES]);
impl LateLintPass<'_> for AllowAttribute {
// Separate each crate's features.
fn check_attribute<'cx>(&mut self, cx: &LateContext<'cx>, attr: &'cx Attribute) {
if !in_external_macro(cx.sess(), attr.span)
&& cx.tcx.features().lint_reasons
&& let AttrStyle::Outer = attr.style
&& let Some(ident) = attr.ident()
&& ident.name == rustc_span::symbol::sym::allow
&& !is_from_proc_macro(cx, &attr)
{
span_lint_and_sugg(
cx,
ALLOW_ATTRIBUTES,
ident.span,
"#[allow] attribute found",
"replace it with",
"expect".into(),
Applicability::MachineApplicable,
);
}
}
}
@@ -0,0 +1,26 @@
use clippy_utils::diagnostics::span_lint_and_sugg;
use clippy_utils::is_from_proc_macro;
use rustc_ast::{AttrStyle, Attribute};
use rustc_errors::Applicability;
use rustc_lint::{LateContext, LintContext};
use rustc_middle::lint::in_external_macro;
use super::ALLOW_ATTRIBUTES;
// Separate each crate's features.
pub fn check<'cx>(cx: &LateContext<'cx>, attr: &'cx Attribute) {
if !in_external_macro(cx.sess(), attr.span)
&& let AttrStyle::Outer = attr.style
&& let Some(ident) = attr.ident()
&& !is_from_proc_macro(cx, &attr)
{
span_lint_and_sugg(
cx,
ALLOW_ATTRIBUTES,
ident.span,
"#[allow] attribute found",
"replace it with",
"expect".into(),
Applicability::MachineApplicable,
);
}
}
@@ -8,11 +8,6 @@
use rustc_span::symbol::Symbol;
pub(super) fn check<'cx>(cx: &LateContext<'cx>, name: Symbol, items: &[NestedMetaItem], attr: &'cx Attribute) {
// Check for the feature
if !cx.tcx.features().lint_reasons {
return;
}
// Check if the reason is present
if let Some(item) = items.last().and_then(NestedMetaItem::meta_item)
&& let MetaItemKind::NameValue(_) = &item.kind
+61 -10
View File
@@ -1,6 +1,7 @@
//! checks for attributes
mod allow_attributes_without_reason;
mod allow_attributes;
mod blanket_clippy_restriction_lints;
mod deprecated_cfg_attr;
mod deprecated_semver;
@@ -14,11 +15,11 @@
mod useless_attribute;
mod utils;
use clippy_config::msrvs::Msrv;
use clippy_config::msrvs::{self, Msrv};
use rustc_ast::{Attribute, MetaItemKind, NestedMetaItem};
use rustc_hir::{ImplItem, Item, ItemKind, TraitItem};
use rustc_lint::{EarlyContext, EarlyLintPass, LateContext, LateLintPass};
use rustc_session::{declare_lint_pass, impl_lint_pass};
use rustc_session::impl_lint_pass;
use rustc_span::sym;
use utils::{is_lint_level, is_relevant_impl, is_relevant_item, is_relevant_trait};
@@ -272,23 +273,17 @@
/// ### What it does
/// Checks for attributes that allow lints without a reason.
///
/// (This requires the `lint_reasons` feature)
///
/// ### Why restrict this?
/// Justifying each `allow` helps readers understand the reasoning,
/// and may allow removing `allow` attributes if their purpose is obsolete.
///
/// ### Example
/// ```no_run
/// #![feature(lint_reasons)]
///
/// #![allow(clippy::some_lint)]
/// ```
///
/// Use instead:
/// ```no_run
/// #![feature(lint_reasons)]
///
/// #![allow(clippy::some_lint, reason = "False positive rust-lang/rust-clippy#1002020")]
/// ```
#[clippy::version = "1.61.0"]
@@ -297,6 +292,41 @@
"ensures that all `allow` and `expect` attributes have a reason"
}
declare_clippy_lint! {
/// ### What it does
/// Checks for usage of the `#[allow]` attribute and suggests replacing it with
/// the `#[expect]` (See [RFC 2383](https://rust-lang.github.io/rfcs/2383-lint-reasons.html))
///
/// This lint only warns outer attributes (`#[allow]`), as inner attributes
/// (`#![allow]`) are usually used to enable or disable lints on a global scale.
///
/// ### Why is this bad?
/// `#[expect]` attributes suppress the lint emission, but emit a warning, if
/// the expectation is unfulfilled. This can be useful to be notified when the
/// lint is no longer triggered.
///
/// ### Example
/// ```rust,ignore
/// #[allow(unused_mut)]
/// fn foo() -> usize {
/// let mut a = Vec::new();
/// a.len()
/// }
/// ```
/// Use instead:
/// ```rust,ignore
/// #[expect(unused_mut)]
/// fn foo() -> usize {
/// let mut a = Vec::new();
/// a.len()
/// }
/// ```
#[clippy::version = "1.70.0"]
pub ALLOW_ATTRIBUTES,
restriction,
"`#[allow]` will not trigger if a warning isn't found. `#[expect]` triggers if there are no warnings."
}
declare_clippy_lint! {
/// ### What it does
/// Checks for `#[should_panic]` attributes without specifying the expected panic message.
@@ -469,7 +499,12 @@
"duplicated attribute"
}
declare_lint_pass!(Attributes => [
#[derive(Clone)]
pub struct Attributes {
msrv: Msrv,
}
impl_lint_pass!(Attributes => [
ALLOW_ATTRIBUTES_WITHOUT_REASON,
INLINE_ALWAYS,
DEPRECATED_SEMVER,
@@ -480,6 +515,13 @@
DUPLICATED_ATTRIBUTES,
]);
impl Attributes {
#[must_use]
pub fn new(msrv: Msrv) -> Self {
Self { msrv }
}
}
impl<'tcx> LateLintPass<'tcx> for Attributes {
fn check_crate(&mut self, cx: &LateContext<'tcx>) {
blanket_clippy_restriction_lints::check_command_line(cx);
@@ -492,8 +534,15 @@ fn check_attribute(&mut self, cx: &LateContext<'tcx>, attr: &'tcx Attribute) {
if is_lint_level(ident.name, attr.id) {
blanket_clippy_restriction_lints::check(cx, ident.name, items);
}
if matches!(ident.name, sym::allow) {
if self.msrv.meets(msrvs::LINT_REASONS_STABILIZATION) {
allow_attributes::check(cx, attr);
}
}
if matches!(ident.name, sym::allow | sym::expect) {
allow_attributes_without_reason::check(cx, ident.name, items, attr);
if self.msrv.meets(msrvs::LINT_REASONS_STABILIZATION) {
allow_attributes_without_reason::check(cx, ident.name, items, attr);
}
}
if items.is_empty() || !attr.has_name(sym::deprecated) {
return;
@@ -537,6 +586,8 @@ fn check_trait_item(&mut self, cx: &LateContext<'tcx>, item: &'tcx TraitItem<'_>
inline_always::check(cx, item.span, item.ident.name, cx.tcx.hir().attrs(item.hir_id()));
}
}
extract_msrv_attr!(LateContext);
}
pub struct EarlyAttributes {
@@ -38,7 +38,6 @@
#[cfg(feature = "internal")]
crate::utils::internal_lints::unsorted_clippy_utils_paths::UNSORTED_CLIPPY_UTILS_PATHS_INFO,
crate::absolute_paths::ABSOLUTE_PATHS_INFO,
crate::allow_attributes::ALLOW_ATTRIBUTES_INFO,
crate::almost_complete_range::ALMOST_COMPLETE_RANGE_INFO,
crate::approx_const::APPROX_CONSTANT_INFO,
crate::arc_with_non_send_sync::ARC_WITH_NON_SEND_SYNC_INFO,
@@ -49,6 +48,7 @@
crate::assertions_on_result_states::ASSERTIONS_ON_RESULT_STATES_INFO,
crate::assigning_clones::ASSIGNING_CLONES_INFO,
crate::async_yields_async::ASYNC_YIELDS_ASYNC_INFO,
crate::attrs::ALLOW_ATTRIBUTES_INFO,
crate::attrs::ALLOW_ATTRIBUTES_WITHOUT_REASON_INFO,
crate::attrs::BLANKET_CLIPPY_RESTRICTION_LINTS_INFO,
crate::attrs::DEPRECATED_CFG_ATTR_INFO,
@@ -6,7 +6,7 @@
expr_use_ctxt, get_parent_expr, is_block_like, is_lint_allowed, path_to_local, DefinedTy, ExprUseNode,
};
use core::mem;
use rustc_ast::util::parser::{PREC_POSTFIX, PREC_PREFIX};
use rustc_ast::util::parser::{PREC_UNAMBIGUOUS, PREC_PREFIX};
use rustc_data_structures::fx::FxIndexMap;
use rustc_errors::Applicability;
use rustc_hir::intravisit::{walk_ty, Visitor};
@@ -1013,7 +1013,7 @@ fn report<'tcx>(
let (precedence, calls_field) = match cx.tcx.parent_hir_node(data.first_expr.hir_id) {
Node::Expr(e) => match e.kind {
ExprKind::Call(callee, _) if callee.hir_id != data.first_expr.hir_id => (0, false),
ExprKind::Call(..) => (PREC_POSTFIX, matches!(expr.kind, ExprKind::Field(..))),
ExprKind::Call(..) => (PREC_UNAMBIGUOUS, matches!(expr.kind, ExprKind::Field(..))),
_ => (e.precedence().order(), false),
},
_ => (0, false),
@@ -1160,7 +1160,7 @@ fn check_local_usage(&mut self, cx: &LateContext<'tcx>, e: &Expr<'tcx>, local: H
},
Some(parent) if !parent.span.from_expansion() => {
// Double reference might be needed at this point.
if parent.precedence().order() == PREC_POSTFIX {
if parent.precedence().order() == PREC_UNAMBIGUOUS {
// Parentheses would be needed here, don't lint.
*outer_pat = None;
} else {
+2 -4
View File
@@ -6,7 +6,7 @@
#![feature(if_let_guard)]
#![feature(iter_intersperse)]
#![feature(let_chains)]
#![feature(lint_reasons)]
#![cfg_attr(bootstrap, feature(lint_reasons))]
#![feature(never_type)]
#![feature(rustc_private)]
#![feature(stmt_expr_attributes)]
@@ -73,7 +73,6 @@
// begin lints modules, do not remove this comment, its used in `update_lints`
mod absolute_paths;
mod allow_attributes;
mod almost_complete_range;
mod approx_const;
mod arc_with_non_send_sync;
@@ -699,7 +698,7 @@ pub fn register_lints(store: &mut rustc_lint::LintStore, conf: &'static Conf) {
store.register_late_pass(|_| Box::new(mut_reference::UnnecessaryMutPassed));
store.register_late_pass(|_| Box::<significant_drop_tightening::SignificantDropTightening<'_>>::default());
store.register_late_pass(|_| Box::new(len_zero::LenZero));
store.register_late_pass(|_| Box::new(attrs::Attributes));
store.register_late_pass(move |_| Box::new(attrs::Attributes::new(msrv())));
store.register_late_pass(|_| Box::new(blocks_in_conditions::BlocksInConditions));
store.register_late_pass(|_| Box::new(unicode::Unicode));
store.register_late_pass(|_| Box::new(uninit_vec::UninitVec));
@@ -1065,7 +1064,6 @@ pub fn register_lints(store: &mut rustc_lint::LintStore, conf: &'static Conf) {
store.register_late_pass(|_| Box::new(needless_maybe_sized::NeedlessMaybeSized));
store.register_late_pass(|_| Box::new(redundant_async_block::RedundantAsyncBlock));
store.register_late_pass(|_| Box::new(let_with_type_underscore::UnderscoreTyped));
store.register_late_pass(|_| Box::new(allow_attributes::AllowAttribute));
store.register_late_pass(move |_| Box::new(manual_main_separator_str::ManualMainSeparatorStr::new(msrv())));
store.register_late_pass(|_| Box::new(unnecessary_struct_initialization::UnnecessaryStruct));
store.register_late_pass(move |_| {
@@ -7,7 +7,7 @@
can_move_expr_to_closure, is_else_clause, is_lint_allowed, is_res_lang_ctor, path_res, path_to_local_id,
peel_blocks, peel_hir_expr_refs, peel_hir_expr_while, CaptureKind,
};
use rustc_ast::util::parser::PREC_POSTFIX;
use rustc_ast::util::parser::PREC_UNAMBIGUOUS;
use rustc_errors::Applicability;
use rustc_hir::def::Res;
use rustc_hir::LangItem::{OptionNone, OptionSome};
@@ -117,7 +117,7 @@ pub(super) fn check_with<'tcx, F>(
// it's being passed by value.
let scrutinee = peel_hir_expr_refs(scrutinee).0;
let (scrutinee_str, _) = snippet_with_context(cx, scrutinee.span, expr_ctxt, "..", &mut app);
let scrutinee_str = if scrutinee.span.eq_ctxt(expr.span) && scrutinee.precedence().order() < PREC_POSTFIX {
let scrutinee_str = if scrutinee.span.eq_ctxt(expr.span) && scrutinee.precedence().order() < PREC_UNAMBIGUOUS {
format!("({scrutinee_str})")
} else {
scrutinee_str.into()
+1 -1
View File
@@ -5,7 +5,7 @@
#![feature(f16)]
#![feature(if_let_guard)]
#![feature(let_chains)]
#![feature(lint_reasons)]
#![cfg_attr(bootstrap, feature(lint_reasons))]
#![feature(never_type)]
#![feature(rustc_private)]
#![feature(assert_matches)]
+1 -1
View File
@@ -2,7 +2,7 @@
#![allow(rustc::untranslatable_diagnostic)]
#![feature(rustc_private)]
#![feature(let_chains)]
#![feature(lint_reasons)]
#![cfg_attr(bootstrap, feature(lint_reasons))]
#![cfg_attr(feature = "deny-warnings", deny(warnings))]
// warn on lints, that are included in `rust-lang/rust`s bootstrap
#![warn(rust_2018_idioms, unused_lifetimes)]
@@ -1,4 +1,3 @@
#![feature(lint_reasons)]
mod a;

Some files were not shown because too many files have changed in this diff Show More