Merge remote-tracking branch 'origin/master' into gen

This commit is contained in:
Alex Crichton
2017-08-07 22:30:39 -07:00
280 changed files with 17756 additions and 2240 deletions
Vendored
-1
View File
@@ -437,7 +437,6 @@ opt local-rust 0 "use an installed rustc rather than downloading a snapshot"
opt local-rebuild 0 "assume local-rust matches the current version, for rebuilds; implies local-rust, and is implied if local-rust already matches the current version"
opt llvm-static-stdcpp 0 "statically link to libstdc++ for LLVM"
opt llvm-link-shared 0 "prefer shared linking to LLVM (llvm-config --link-shared)"
opt llvm-clean-rebuild 0 "delete LLVM build directory on rebuild"
opt rpath 1 "build rpaths into rustc itself"
opt stage0-landing-pads 1 "enable landing pads during bootstrap with stage0"
# This is used by the automation to produce single-target nightlies
+31 -11
View File
@@ -187,7 +187,7 @@ dependencies = [
"curl 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
"docopt 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
"error-chain 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)",
"error-chain 0.11.0-rc.2 (registry+https://github.com/rust-lang/crates.io-index)",
"filetime 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
"flate2 0.2.19 (registry+https://github.com/rust-lang/crates.io-index)",
"fs2 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -323,7 +323,7 @@ name = "crates-io"
version = "0.11.0"
dependencies = [
"curl 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
"error-chain 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)",
"error-chain 0.11.0-rc.2 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -436,6 +436,14 @@ dependencies = [
"backtrace 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "error-chain"
version = "0.11.0-rc.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"backtrace 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "error_index_generator"
version = "0.0.0"
@@ -1132,8 +1140,8 @@ dependencies = [
"lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"racer 2.0.9 (registry+https://github.com/rust-lang/crates.io-index)",
"rls-analysis 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
"rls-data 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rls-analysis 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
"rls-data 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rls-vfs 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)",
"rustfmt-nightly 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -1147,23 +1155,25 @@ dependencies = [
[[package]]
name = "rls-analysis"
version = "0.4.5"
version = "0.6.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"derive-new 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"rls-data 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rls-data 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "rls-data"
version = "0.9.0"
version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@@ -1242,6 +1252,13 @@ dependencies = [
"syntax_pos 0.0.0",
]
[[package]]
name = "rustc_apfloat"
version = "0.0.0"
dependencies = [
"rustc_bitflags 0.0.0",
]
[[package]]
name = "rustc_asan"
version = "0.0.0"
@@ -1299,6 +1316,7 @@ dependencies = [
name = "rustc_const_math"
version = "0.0.0"
dependencies = [
"rustc_apfloat 0.0.0",
"serialize 0.0.0",
"syntax 0.0.0",
]
@@ -1496,10 +1514,11 @@ name = "rustc_save_analysis"
version = "0.0.0"
dependencies = [
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"rls-data 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rls-data 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc 0.0.0",
"rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc_data_structures 0.0.0",
"rustc_typeck 0.0.0",
"syntax 0.0.0",
"syntax_pos 0.0.0",
@@ -1509,11 +1528,11 @@ dependencies = [
name = "rustc_trans"
version = "0.0.0"
dependencies = [
"crossbeam 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)",
"flate2 0.2.19 (registry+https://github.com/rust-lang/crates.io-index)",
"gcc 0.3.51 (registry+https://github.com/rust-lang/crates.io-index)",
"jobserver 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"num_cpus 1.6.2 (registry+https://github.com/rust-lang/crates.io-index)",
"owning_ref 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc 0.0.0",
"rustc-demangle 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -2145,6 +2164,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
"checksum env_logger 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "15abd780e45b3ea4f76b4e9a26ff4843258dd8a3eed2775a0e7368c2e7936c2f"
"checksum env_logger 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3ddf21e73e016298f5cb37d6ef8e8da8e39f91f9ec8b0df44b7deb16a9f8cd5b"
"checksum error-chain 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d9435d864e017c3c6afeac1654189b06cdb491cf2ff73dbf0d73b0f292f42ff8"
"checksum error-chain 0.11.0-rc.2 (registry+https://github.com/rust-lang/crates.io-index)" = "38d3a55d9a7a456748f2a3912c0941a5d9a68006eb15b3c3c9836b8420dc102d"
"checksum filetime 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "5363ab8e4139b8568a6237db5248646e5a8a2f89bd5ccb02092182b11fd3e922"
"checksum flate2 0.2.19 (registry+https://github.com/rust-lang/crates.io-index)" = "36df0166e856739905cd3d7e0b210fe818592211a008862599845e012d8d304c"
"checksum fnv 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "6cc484842f1e2884faf56f529f960cc12ad8c71ce96cc7abba0a067c98fee344"
@@ -2209,8 +2229,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
"checksum regex 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1731164734096285ec2a5ec7fea5248ae2f5485b3feeb0115af4fda2183b2d1b"
"checksum regex-syntax 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "f9ec002c35e86791825ed294b50008eea9ddfc8def4420124fbc6b08db834957"
"checksum regex-syntax 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ad890a5eef7953f55427c50575c680c42841653abd2b028b68cd223d157f62db"
"checksum rls-analysis 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "0127cfae9c726461facbbbc8327e782adf8afd61f7fcc6adf8ea9ad8fc428ed0"
"checksum rls-data 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f274ec7f966337dc2601fe9bde060b551d1293c277af782dc65cd7200ca070c0"
"checksum rls-analysis 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)" = "d2cb40c0371765897ae428b5706bb17135705ad4f6d1b8b6afbaabcf8c9b5cff"
"checksum rls-data 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "11d339f1888e33e74d8032de0f83c40b2bdaaaf04a8cfc03b32186c3481fb534"
"checksum rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5d7c7046dc6a92f2ae02ed302746db4382e75131b9ce20ce967259f6b5867a6a"
"checksum rls-vfs 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "ffd34691a510938bb67fe0444fb363103c73ffb31c121d1e16bc92d8945ea8ff"
"checksum rustc-demangle 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "3058a43ada2c2d0b92b3ae38007a2d0fa5e9db971be260e0171408a4ff471c95"
+1 -1
View File
@@ -188,7 +188,7 @@ fn main() {
cmd.arg("-Zsave-analysis");
cmd.env("RUST_SAVE_ANALYSIS_CONFIG",
"{\"output_file\": null,\"full_docs\": false,\"pub_only\": true,\
\"signatures\": false,\"borrow_data\": false}");
\"distro_crate\": true,\"signatures\": false,\"borrow_data\": false}");
}
// Dealing with rpath here is a little special, so let's go into some
+3 -1
View File
@@ -28,6 +28,7 @@
use flags::Subcommand;
use doc;
use tool;
use native;
pub use Compiler;
@@ -256,7 +257,8 @@ macro_rules! describe {
compile::StartupObjects, tool::BuildManifest, tool::Rustbook, tool::ErrorIndex,
tool::UnstableBookGen, tool::Tidy, tool::Linkchecker, tool::CargoTest,
tool::Compiletest, tool::RemoteTestServer, tool::RemoteTestClient,
tool::RustInstaller, tool::Cargo, tool::Rls, tool::Rustdoc),
tool::RustInstaller, tool::Cargo, tool::Rls, tool::Rustdoc,
native::Llvm),
Kind::Test => describe!(check::Tidy, check::Bootstrap, check::DefaultCompiletest,
check::HostCompiletest, check::Crate, check::CrateLibrustc, check::Linkcheck,
check::Cargotest, check::Cargo, check::Rls, check::Docs, check::ErrorIndex,
+13 -67
View File
@@ -1050,11 +1050,8 @@ fn run(self, builder: &Builder) {
dylib_path.insert(0, PathBuf::from(&*builder.sysroot_libdir(compiler, target)));
cargo.env(dylib_path_var(), env::join_paths(&dylib_path).unwrap());
if target.contains("emscripten") || build.remote_tested(target) {
cargo.arg("--no-run");
}
cargo.arg("--");
cargo.args(&build.flags.cmd.test_args());
if build.config.quiet_tests {
cargo.arg("--quiet");
@@ -1063,75 +1060,24 @@ fn run(self, builder: &Builder) {
let _time = util::timeit();
if target.contains("emscripten") {
build.run(&mut cargo);
krate_emscripten(build, compiler, target, mode);
cargo.env(format!("CARGO_TARGET_{}_RUNNER", envify(&target)),
build.config.nodejs.as_ref().expect("nodejs not configured"));
} else if build.remote_tested(target) {
build.run(&mut cargo);
krate_remote(builder, compiler, target, mode);
} else {
cargo.args(&build.flags.cmd.test_args());
try_run(build, &mut cargo);
cargo.env(format!("CARGO_TARGET_{}_RUNNER", envify(&target)),
format!("{} run",
builder.tool_exe(Tool::RemoteTestClient).display()));
}
try_run(build, &mut cargo);
}
}
fn krate_emscripten(build: &Build,
compiler: Compiler,
target: Interned<String>,
mode: Mode) {
let out_dir = build.cargo_out(compiler, mode, target);
let tests = find_tests(&out_dir.join("deps"), target);
let nodejs = build.config.nodejs.as_ref().expect("nodejs not configured");
for test in tests {
println!("running {}", test.display());
let mut cmd = Command::new(nodejs);
cmd.arg(&test);
if build.config.quiet_tests {
cmd.arg("--quiet");
fn envify(s: &str) -> String {
s.chars().map(|c| {
match c {
'-' => '_',
c => c,
}
try_run(build, &mut cmd);
}
}
fn krate_remote(builder: &Builder,
compiler: Compiler,
target: Interned<String>,
mode: Mode) {
let build = builder.build;
let out_dir = build.cargo_out(compiler, mode, target);
let tests = find_tests(&out_dir.join("deps"), target);
let tool = builder.tool_exe(Tool::RemoteTestClient);
for test in tests {
let mut cmd = Command::new(&tool);
cmd.arg("run")
.arg(&test);
if build.config.quiet_tests {
cmd.arg("--quiet");
}
cmd.args(&build.flags.cmd.test_args());
try_run(build, &mut cmd);
}
}
fn find_tests(dir: &Path, target: Interned<String>) -> Vec<PathBuf> {
let mut dst = Vec::new();
for e in t!(dir.read_dir()).map(|e| t!(e)) {
let file_type = t!(e.file_type());
if !file_type.is_file() {
continue
}
let filename = e.file_name().into_string().unwrap();
if (target.contains("windows") && filename.ends_with(".exe")) ||
(!target.contains("windows") && !filename.contains(".")) ||
(target.contains("emscripten") &&
filename.ends_with(".js") &&
!filename.ends_with(".asm.js")) {
dst.push(e.path());
}
}
dst
}).flat_map(|c| c.to_uppercase()).collect()
}
/// Some test suites are run inside emulators or on remote devices, and most
-4
View File
@@ -62,7 +62,6 @@ pub struct Config {
pub llvm_targets: Option<String>,
pub llvm_experimental_targets: Option<String>,
pub llvm_link_jobs: Option<u32>,
pub llvm_clean_rebuild: bool,
// rust codegen options
pub rust_optimize: bool,
@@ -203,7 +202,6 @@ struct Llvm {
targets: Option<String>,
experimental_targets: Option<String>,
link_jobs: Option<u32>,
clean_rebuild: Option<bool>,
}
#[derive(Deserialize, Default, Clone)]
@@ -352,7 +350,6 @@ pub fn parse(build: &str, file: Option<PathBuf>) -> Config {
set(&mut config.llvm_release_debuginfo, llvm.release_debuginfo);
set(&mut config.llvm_version_check, llvm.version_check);
set(&mut config.llvm_static_stdcpp, llvm.static_libstdcpp);
set(&mut config.llvm_clean_rebuild, llvm.clean_rebuild);
config.llvm_targets = llvm.targets.clone();
config.llvm_experimental_targets = llvm.experimental_targets.clone();
config.llvm_link_jobs = llvm.link_jobs;
@@ -477,7 +474,6 @@ macro_rules! check {
("LLVM_VERSION_CHECK", self.llvm_version_check),
("LLVM_STATIC_STDCPP", self.llvm_static_stdcpp),
("LLVM_LINK_SHARED", self.llvm_link_shared),
("LLVM_CLEAN_REBUILD", self.llvm_clean_rebuild),
("OPTIMIZE", self.rust_optimize),
("DEBUG_ASSERTIONS", self.rust_debug_assertions),
("DEBUGINFO", self.rust_debuginfo),
+1 -6
View File
@@ -69,11 +69,6 @@
# controlled by rustbuild's -j parameter.
#link-jobs = 0
# Delete LLVM build directory on LLVM rebuild.
# This option defaults to `false` for local development, but CI may want to
# always perform clean full builds (possibly accelerated by (s)ccache).
#clean-rebuild = false
# =============================================================================
# General build configuration options
# =============================================================================
@@ -208,7 +203,7 @@
#codegen-units = 1
# Whether or not debug assertions are enabled for the compiler and standard
# library
# library. Also enables compilation of debug! and trace! logging macros.
#debug-assertions = false
# Whether or not debuginfo is emitted
+21 -3
View File
@@ -48,6 +48,10 @@ fn should_run(run: ShouldRun) -> ShouldRun {
run.path("src/llvm")
}
fn make_run(run: RunConfig) {
run.builder.ensure(Llvm { target: run.target })
}
/// Compile LLVM for `target`.
fn run(self, builder: &Builder) {
let build = builder.build;
@@ -76,9 +80,6 @@ fn run(self, builder: &Builder) {
return
}
}
if build.config.llvm_clean_rebuild {
drop(fs::remove_dir_all(&out_dir));
}
let _folder = build.fold_output(|| "llvm");
println!("Building LLVM for {}", target);
@@ -128,6 +129,15 @@ fn run(self, builder: &Builder) {
.define("LLVM_TARGET_ARCH", target.split('-').next().unwrap())
.define("LLVM_DEFAULT_TARGET_TRIPLE", target);
// This setting makes the LLVM tools link to the dynamic LLVM library,
// which saves both memory during parallel links and overall disk space
// for the tools. We don't distribute any of those tools, so this is
// just a local concern. However, it doesn't work well everywhere.
if target.contains("linux-gnu") || target.contains("apple-darwin") {
cfg.define("LLVM_LINK_LLVM_DYLIB", "ON");
}
if target.contains("msvc") {
cfg.define("LLVM_USE_CRT_DEBUG", "MT");
cfg.define("LLVM_USE_CRT_RELEASE", "MT");
@@ -154,6 +164,14 @@ fn run(self, builder: &Builder) {
let host = build.llvm_out(build.build).join("bin/llvm-tblgen");
cfg.define("CMAKE_CROSSCOMPILING", "True")
.define("LLVM_TABLEGEN", &host);
if target.contains("netbsd") {
cfg.define("CMAKE_SYSTEM_NAME", "NetBSD");
} else if target.contains("freebsd") {
cfg.define("CMAKE_SYSTEM_NAME", "FreeBSD");
}
cfg.define("LLVM_NATIVE_BUILD", build.llvm_out(build.build).join("build"));
}
let sanitize_cc = |cc: &Path| {
+1 -20
View File
@@ -13,7 +13,6 @@
extern crate filetime;
use std::fs::File;
use std::io;
use std::path::{Path, PathBuf};
use std::process::{Command, Stdio};
use std::{fs, env};
@@ -211,7 +210,7 @@ pub fn native_lib_boilerplate(src_name: &str,
let out_dir = env::var_os("RUSTBUILD_NATIVE_DIR").unwrap_or(env::var_os("OUT_DIR").unwrap());
let out_dir = PathBuf::from(out_dir).join(out_name);
t!(create_dir_racy(&out_dir));
t!(fs::create_dir_all(&out_dir));
if link_name.contains('=') {
println!("cargo:rustc-link-lib={}", link_name);
} else {
@@ -260,21 +259,3 @@ fn fail(s: &str) -> ! {
println!("\n\n{}\n\n", s);
std::process::exit(1);
}
fn create_dir_racy(path: &Path) -> io::Result<()> {
match fs::create_dir(path) {
Ok(()) => return Ok(()),
Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => return Ok(()),
Err(ref e) if e.kind() == io::ErrorKind::NotFound => {}
Err(e) => return Err(e),
}
match path.parent() {
Some(p) => try!(create_dir_racy(p)),
None => return Err(io::Error::new(io::ErrorKind::Other, "failed to create whole tree")),
}
match fs::create_dir(path) {
Ok(()) => Ok(()),
Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => Ok(()),
Err(e) => Err(e),
}
}
@@ -13,7 +13,7 @@ set -ex
ARCH=$1
BINUTILS=2.25.1
GCC=5.3.0
GCC=6.4.0
hide_output() {
set +x
@@ -86,7 +86,7 @@ rm -rf freebsd
# Finally, download and build gcc to target FreeBSD
mkdir gcc
cd gcc
curl https://ftp.gnu.org/gnu/gcc/gcc-$GCC/gcc-$GCC.tar.bz2 | tar xjf -
curl https://ftp.gnu.org/gnu/gcc/gcc-$GCC/gcc-$GCC.tar.gz | tar xzf -
cd gcc-$GCC
./contrib/download_prerequisites
@@ -13,7 +13,7 @@ set -ex
ARCH=$1
BINUTILS=2.25.1
GCC=5.3.0
GCC=6.4.0
hide_output() {
set +x
@@ -86,7 +86,7 @@ rm -rf freebsd
# Finally, download and build gcc to target FreeBSD
mkdir gcc
cd gcc
curl https://ftp.gnu.org/gnu/gcc/gcc-$GCC/gcc-$GCC.tar.bz2 | tar xjf -
curl https://ftp.gnu.org/gnu/gcc/gcc-$GCC/gcc-$GCC.tar.gz | tar xzf -
cd gcc-$GCC
./contrib/download_prerequisites
+7
View File
@@ -67,6 +67,13 @@ else
args="$args --env SCCACHE_DIR=/sccache --volume $HOME/.cache/sccache:/sccache"
fi
# Run containers as privileged as it should give them access to some more
# syscalls such as ptrace and whatnot. In the upgrade to LLVM 5.0 it was
# discovered that the leak sanitizer apparently needs these syscalls nowadays so
# we'll need `--privileged` for at least the `x86_64-gnu` builder, so this just
# goes ahead and sets it for all builders.
args="$args --privileged"
exec docker \
run \
--volume "$root_dir:/checkout:ro" \
-1
View File
@@ -31,7 +31,6 @@ RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --enable-sccache"
RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --disable-manage-submodules"
RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --enable-locked-deps"
RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --enable-cargo-openssl-static"
RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --enable-llvm-clean-rebuild"
if [ "$DIST_SRC" = "" ]; then
RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --disable-dist-src"
+58 -2
View File
@@ -3,8 +3,15 @@
"intrinsic_prefix": "_vec_",
"llvm_prefix": "llvm.ppc.altivec.",
"number_info": {
"unsigned": {},
"signed": {}
"unsigned": {
"kind" : "u",
"data_type_short": { "8": "b", "16": "h", "32": "w", "64": "d" }
},
"signed": {
"kind" : "s",
"data_type_short": { "8": "b", "16": "h", "32": "w", "64": "d" }
},
"float": {}
},
"width_info": {
"128": { "width": "" }
@@ -16,6 +23,55 @@
"llvm": "vperm",
"ret": "s32",
"args": ["0", "0", "s8"]
},
{
"intrinsic": "mradds",
"width": [128],
"llvm": "vmhraddshs",
"ret": "s16",
"args": ["0", "0", "0"]
},
{
"intrinsic": "cmpb",
"width": [128],
"llvm": "vcmpbfp",
"ret": "s32",
"args": ["f32", "f32"]
},
{
"intrinsic": "cmpeq{0.data_type_short}",
"width": [128],
"llvm": "vcmpequ{0.data_type_short}",
"ret": "s(8-32)",
"args": ["0", "0"]
},
{
"intrinsic": "cmpgt{1.kind}{1.data_type_short}",
"width": [128],
"llvm": "vcmpgt{1.kind}{1.data_type_short}",
"ret": "s(8-32)",
"args": ["0u", "1"]
},
{
"intrinsic": "cmpgt{1.kind}{1.data_type_short}",
"width": [128],
"llvm": "vcmpgt{1.kind}{1.data_type_short}",
"ret": "s(8-32)",
"args": ["0", "1"]
},
{
"intrinsic": "max{0.kind}{0.data_type_short}",
"width": [128],
"llvm": "vmax{0.kind}{0.data_type_short}",
"ret": "i(8-32)",
"args": ["0", "0"]
},
{
"intrinsic": "min{0.kind}{0.data_type_short}",
"width": [128],
"llvm": "vmin{0.kind}{0.data_type_short}",
"ret": "i(8-32)",
"args": ["0", "0"]
}
]
}
+1
View File
@@ -215,6 +215,7 @@ pub fn padding_needed_for(&self, align: usize) -> usize {
/// of each element in the array.
///
/// On arithmetic overflow, returns `None`.
#[inline]
pub fn repeat(&self, n: usize) -> Option<(Self, usize)> {
let padded_size = match self.size.checked_add(self.padding_needed_for(self.align)) {
None => return None,
+32 -11
View File
@@ -273,7 +273,10 @@ pub fn is_char_boundary(&self, index: usize) -> bool {
core_str::StrExt::is_char_boundary(self, index)
}
/// Converts a string slice to a byte slice.
/// Converts a string slice to a byte slice. To convert the byte slice back
/// into a string slice, use the [`str::from_utf8`] function.
///
/// [`str::from_utf8`]: ./str/fn.from_utf8.html
///
/// # Examples
///
@@ -289,7 +292,11 @@ pub fn as_bytes(&self) -> &[u8] {
core_str::StrExt::as_bytes(self)
}
/// Converts a mutable string slice to a mutable byte slice.
/// Converts a mutable string slice to a mutable byte slice. To convert the
/// mutable byte slice back into a mutable string slice, use the
/// [`str::from_utf8_mut`] function.
///
/// [`str::from_utf8_mut`]: ./str/fn.from_utf8_mut.html
#[stable(feature = "str_mut_extras", since = "1.20.0")]
#[inline(always)]
pub unsafe fn as_bytes_mut(&mut self) -> &mut [u8] {
@@ -328,11 +335,16 @@ pub fn as_ptr(&self) -> *const u8 {
/// # Examples
///
/// ```
/// let v = "🗻∈🌏";
/// let mut v = String::from("🗻∈🌏");
///
/// assert_eq!(Some("🗻"), v.get(0..4));
/// assert!(v.get(1..).is_none());
/// assert!(v.get(..8).is_none());
/// assert!(v.get(..42).is_none());
///
/// // indices not on UTF-8 sequence boundaries
/// assert!(v.get_mut(1..).is_none());
/// assert!(v.get_mut(..8).is_none());
///
/// // out of bounds
/// assert!(v.get_mut(..42).is_none());
/// ```
#[stable(feature = "str_checked_slicing", since = "1.20.0")]
#[inline]
@@ -351,9 +363,14 @@ pub fn get<I: SliceIndex<str>>(&self, i: I) -> Option<&I::Output> {
///
/// ```
/// let mut v = String::from("🗻∈🌏");
///
/// assert_eq!(Some("🗻"), v.get_mut(0..4).map(|v| &*v));
///
/// // indices not on UTF-8 sequence boundaries
/// assert!(v.get_mut(1..).is_none());
/// assert!(v.get_mut(..8).is_none());
///
/// // out of bounds
/// assert!(v.get_mut(..42).is_none());
/// ```
#[stable(feature = "str_checked_slicing", since = "1.20.0")]
@@ -563,12 +580,16 @@ pub fn split_at(&self, mid: usize) -> (&str, &str) {
/// Basic usage:
///
/// ```
/// use std::ascii::AsciiExt;
///
/// let mut s = "Per Martin-Löf".to_string();
///
/// let (first, last) = s.split_at_mut(3);
///
/// assert_eq!("Per", first);
/// assert_eq!(" Martin-Löf", last);
/// {
/// let (first, last) = s.split_at_mut(3);
/// first.make_ascii_uppercase();
/// assert_eq!("PER", first);
/// assert_eq!(" Martin-Löf", last);
/// }
/// assert_eq!("PER Martin-Löf", s);
/// ```
#[inline]
#[stable(feature = "str_split_at", since = "1.4.0")]
+28
View File
@@ -188,6 +188,34 @@
/// A mutable memory location.
///
/// # Examples
///
/// Here you can see how using `Cell<T>` allows to use mutable field inside
/// immutable struct (which is also called 'interior mutability').
///
/// ```
/// use std::cell::Cell;
///
/// struct SomeStruct {
/// regular_field: u8,
/// special_field: Cell<u8>,
/// }
///
/// let my_struct = SomeStruct {
/// regular_field: 0,
/// special_field: Cell::new(1),
/// };
///
/// let new_value = 100;
///
/// // ERROR, because my_struct is immutable
/// // my_struct.regular_field = new_value;
///
/// // WORKS, although `my_struct` is immutable, field `special_field` is mutable because it is Cell
/// my_struct.special_field.set(new_value);
/// assert_eq!(my_struct.special_field.get(), new_value);
/// ```
///
/// See the [module-level documentation](index.html) for more.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Cell<T> {
+1 -1
View File
@@ -1247,7 +1247,7 @@ fn by_ref(&mut self) -> &mut Self where Self: Sized { self }
/// assert_eq!(vec![2, 4, 6], doubled);
/// ```
///
/// Because `collect()` cares about what you're collecting into, you can
/// Because `collect()` only cares about what you're collecting into, you can
/// still use a partial type hint, `_`, with the turbofish:
///
/// ```
+8 -25
View File
@@ -147,22 +147,13 @@ pub trait FromIterator<A>: Sized {
///
/// ```
/// let v = vec![1, 2, 3];
///
/// let mut iter = v.into_iter();
///
/// let n = iter.next();
/// assert_eq!(Some(1), n);
///
/// let n = iter.next();
/// assert_eq!(Some(2), n);
///
/// let n = iter.next();
/// assert_eq!(Some(3), n);
///
/// let n = iter.next();
/// assert_eq!(None, n);
/// assert_eq!(Some(1), iter.next());
/// assert_eq!(Some(2), iter.next());
/// assert_eq!(Some(3), iter.next());
/// assert_eq!(None, iter.next());
/// ```
///
/// Implementing `IntoIterator` for your type:
///
/// ```
@@ -227,20 +218,12 @@ pub trait IntoIterator {
///
/// ```
/// let v = vec![1, 2, 3];
///
/// let mut iter = v.into_iter();
///
/// let n = iter.next();
/// assert_eq!(Some(1), n);
///
/// let n = iter.next();
/// assert_eq!(Some(2), n);
///
/// let n = iter.next();
/// assert_eq!(Some(3), n);
///
/// let n = iter.next();
/// assert_eq!(None, n);
/// assert_eq!(Some(1), iter.next());
/// assert_eq!(Some(2), iter.next());
/// assert_eq!(Some(3), iter.next());
/// assert_eq!(None, iter.next());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn into_iter(self) -> Self::IntoIter;
+4
View File
@@ -131,6 +131,10 @@ pub const fn max_value() -> Self {
///
/// Leading and trailing whitespace represent an error.
///
/// # Panics
///
/// This function panics if `radix` is not in the range from 2 to 36.
///
/// # Examples
///
/// Basic usage:
+71 -8
View File
@@ -509,14 +509,49 @@ fn joint(first: char, rest: Token, is_joint: bool, span: &mut syntax_pos::Span,
Ident(ident) | Lifetime(ident) => TokenNode::Term(Term(ident.name)),
Literal(..) | DocComment(..) => TokenNode::Literal(self::Literal(token)),
Interpolated(ref nt) => __internal::with_sess(|(sess, _)| {
TokenNode::Group(Delimiter::None, TokenStream(nt.1.force(|| {
// FIXME(jseyfried): Avoid this pretty-print + reparse hack
let name = "<macro expansion>".to_owned();
let source = pprust::token_to_string(&token);
parse_stream_from_source_str(name, source, sess, Some(span))
})))
}),
Interpolated(ref nt) => {
// An `Interpolated` token means that we have a `Nonterminal`
// which is often a parsed AST item. At this point we now need
// to convert the parsed AST to an actual token stream, e.g.
// un-parse it basically.
//
// Unfortunately there's not really a great way to do that in a
// guaranteed lossless fashion right now. The fallback here is
// to just stringify the AST node and reparse it, but this loses
// all span information.
//
// As a result, some AST nodes are annotated with the token
// stream they came from. Attempt to extract these lossless
// token streams before we fall back to the stringification.
let mut tokens = None;
match nt.0 {
Nonterminal::NtItem(ref item) => {
tokens = prepend_attrs(&item.attrs, item.tokens.as_ref(), span);
}
Nonterminal::NtTraitItem(ref item) => {
tokens = prepend_attrs(&item.attrs, item.tokens.as_ref(), span);
}
Nonterminal::NtImplItem(ref item) => {
tokens = prepend_attrs(&item.attrs, item.tokens.as_ref(), span);
}
_ => {}
}
tokens.map(|tokens| {
TokenNode::Group(Delimiter::None,
TokenStream(tokens.clone()))
}).unwrap_or_else(|| {
__internal::with_sess(|(sess, _)| {
TokenNode::Group(Delimiter::None, TokenStream(nt.1.force(|| {
// FIXME(jseyfried): Avoid this pretty-print + reparse hack
let name = "<macro expansion>".to_owned();
let source = pprust::token_to_string(&token);
parse_stream_from_source_str(name, source, sess, Some(span))
})))
})
})
}
OpenDelim(..) | CloseDelim(..) => unreachable!(),
Whitespace | Comment | Shebang(..) | Eof => unreachable!(),
@@ -580,6 +615,34 @@ fn to_internal(self) -> tokenstream::TokenStream {
}
}
fn prepend_attrs(attrs: &[ast::Attribute],
tokens: Option<&tokenstream::TokenStream>,
span: syntax_pos::Span)
-> Option<tokenstream::TokenStream>
{
let tokens = match tokens {
Some(tokens) => tokens,
None => return None,
};
if attrs.len() == 0 {
return Some(tokens.clone())
}
let mut builder = tokenstream::TokenStreamBuilder::new();
for attr in attrs {
assert_eq!(attr.style, ast::AttrStyle::Outer,
"inner attributes should prevent cached tokens from existing");
let stream = __internal::with_sess(|(sess, _)| {
// FIXME: Avoid this pretty-print + reparse hack as bove
let name = "<macro expansion>".to_owned();
let source = pprust::attr_to_string(attr);
parse_stream_from_source_str(name, source, sess, Some(span))
});
builder.push(stream);
}
builder.push(tokens.clone());
Some(builder.build())
}
/// Permanently unstable internal implementation details of this crate. This
/// should not be used.
///
+10 -7
View File
@@ -66,7 +66,6 @@
use ich::Fingerprint;
use ty::{TyCtxt, Instance, InstanceDef};
use ty::fast_reject::SimplifiedType;
use ty::subst::Substs;
use rustc_data_structures::stable_hasher::{StableHasher, HashStable};
use ich::StableHashingContext;
use std::fmt;
@@ -104,6 +103,8 @@ pub fn can_reconstruct_query_key<$tcx>(&self) -> bool {
match *self {
$(
DepKind :: $variant => {
$(return !anon_attr_to_bool!($anon);)*
// tuple args
$({
return <( $($tuple_arg,)* ) as DepNodeParams>
@@ -112,6 +113,7 @@ pub fn can_reconstruct_query_key<$tcx>(&self) -> bool {
// struct args
$({
return <( $($struct_arg_ty,)* ) as DepNodeParams>
::CAN_RECONSTRUCT_QUERY_KEY;
})*
@@ -394,6 +396,7 @@ pub fn to_dep_node(self, tcx: TyCtxt, kind: DepKind) -> DepNode {
// Represents different phases in the compiler.
[] RegionMaps(DefId),
[] Coherence,
[] CoherenceInherentImplOverlapCheck,
[] Resolve,
[] CoherenceCheckTrait(DefId),
[] PrivacyAccessLevels(CrateNum),
@@ -444,17 +447,17 @@ pub fn to_dep_node(self, tcx: TyCtxt, kind: DepKind) -> DepNode {
[] TypeckBodiesKrate,
[] TypeckTables(DefId),
[] HasTypeckTables(DefId),
[] ConstEval { def_id: DefId, substs: &'tcx Substs<'tcx> },
[anon] ConstEval,
[] SymbolName(DefId),
[] InstanceSymbolName { instance: Instance<'tcx> },
[] SpecializationGraph(DefId),
[] ObjectSafety(DefId),
[anon] IsCopy(DefId),
[anon] IsSized(DefId),
[anon] IsFreeze(DefId),
[anon] NeedsDrop(DefId),
[anon] Layout(DefId),
[anon] IsCopy,
[anon] IsSized,
[anon] IsFreeze,
[anon] NeedsDrop,
[anon] Layout,
// The set of impls for a given trait.
[] TraitImpls(DefId),
+10
View File
@@ -23,6 +23,11 @@ pub struct DepGraphEdges {
edges: FxHashSet<(DepNodeIndex, DepNodeIndex)>,
task_stack: Vec<OpenTask>,
forbidden_edge: Option<EdgeFilter>,
// A set to help assert that no two tasks use the same DepNode. This is a
// temporary measure. Once we load the previous dep-graph as readonly, this
// check will fall out of the graph implementation naturally.
opened_once: FxHashSet<DepNode>,
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
@@ -80,6 +85,7 @@ pub fn new() -> DepGraphEdges {
edges: FxHashSet(),
task_stack: Vec::new(),
forbidden_edge,
opened_once: FxHashSet(),
}
}
@@ -97,6 +103,10 @@ pub fn pop_ignore(&mut self) {
}
pub fn push_task(&mut self, key: DepNode) {
if !self.opened_once.insert(key) {
bug!("Re-opened node {:?}", key)
}
self.task_stack.push(OpenTask::Regular {
node: key,
reads: Vec::new(),
+13 -9
View File
@@ -2222,7 +2222,7 @@ fn lower_expr(&mut self, e: &Expr) -> hir::Expr {
let next_ident = self.str_to_ident("__next");
let next_pat = self.pat_ident_binding_mode(e.span,
next_ident,
hir::BindByValue(hir::MutMutable));
hir::BindingAnnotation::Mutable);
// `::std::option::Option::Some(val) => next = val`
let pat_arm = {
@@ -2246,8 +2246,9 @@ fn lower_expr(&mut self, e: &Expr) -> hir::Expr {
};
// `mut iter`
let iter_pat = self.pat_ident_binding_mode(e.span, iter,
hir::BindByValue(hir::MutMutable));
let iter_pat = self.pat_ident_binding_mode(e.span,
iter,
hir::BindingAnnotation::Mutable);
// `match ::std::iter::Iterator::next(&mut iter) { ... }`
let match_expr = {
@@ -2534,10 +2535,13 @@ fn lower_block_check_mode(&mut self, b: &BlockCheckMode) -> hir::BlockCheckMode
}
}
fn lower_binding_mode(&mut self, b: &BindingMode) -> hir::BindingMode {
fn lower_binding_mode(&mut self, b: &BindingMode) -> hir::BindingAnnotation {
match *b {
BindingMode::ByRef(m) => hir::BindByRef(self.lower_mutability(m)),
BindingMode::ByValue(m) => hir::BindByValue(self.lower_mutability(m)),
BindingMode::ByValue(Mutability::Immutable) =>
hir::BindingAnnotation::Unannotated,
BindingMode::ByRef(Mutability::Immutable) => hir::BindingAnnotation::Ref,
BindingMode::ByValue(Mutability::Mutable) => hir::BindingAnnotation::Mutable,
BindingMode::ByRef(Mutability::Mutable) => hir::BindingAnnotation::RefMut,
}
}
@@ -2678,7 +2682,7 @@ fn stmt_let_pat(&mut self,
fn stmt_let(&mut self, sp: Span, mutbl: bool, ident: Name, ex: P<hir::Expr>)
-> (hir::Stmt, NodeId) {
let pat = if mutbl {
self.pat_ident_binding_mode(sp, ident, hir::BindByValue(hir::MutMutable))
self.pat_ident_binding_mode(sp, ident, hir::BindingAnnotation::Mutable)
} else {
self.pat_ident(sp, ident)
};
@@ -2734,10 +2738,10 @@ fn pat_std_enum(&mut self,
}
fn pat_ident(&mut self, span: Span, name: Name) -> P<hir::Pat> {
self.pat_ident_binding_mode(span, name, hir::BindByValue(hir::MutImmutable))
self.pat_ident_binding_mode(span, name, hir::BindingAnnotation::Unannotated)
}
fn pat_ident_binding_mode(&mut self, span: Span, name: Name, bm: hir::BindingMode)
fn pat_ident_binding_mode(&mut self, span: Span, name: Name, bm: hir::BindingAnnotation)
-> P<hir::Pat> {
let id = self.next_id();
let parent_def = self.parent_def.unwrap();
+12
View File
@@ -192,6 +192,18 @@ pub fn constness(self) -> ast::Constness {
}
}
pub fn unsafety(self) -> ast::Unsafety {
match self.kind() {
FnKind::ItemFn(_, _, unsafety, ..) => {
unsafety
}
FnKind::Method(_, m, ..) => {
m.unsafety
}
_ => ast::Unsafety::Normal
}
}
pub fn kind(self) -> FnKind<'a> {
let item = |p: ItemFnParts<'a>| -> FnKind<'a> {
FnKind::ItemFn(p.name, p.generics, p.unsafety, p.constness, p.abi, p.vis, p.attrs)
+16 -16
View File
@@ -18,7 +18,7 @@
use hir::def_id::{CrateNum, DefId, DefIndex, LOCAL_CRATE, DefIndexAddressSpace,
CRATE_DEF_INDEX};
use ich::Fingerprint;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::indexed_vec::IndexVec;
use rustc_data_structures::stable_hasher::StableHasher;
use serialize::{Encodable, Decodable, Encoder, Decoder};
@@ -153,7 +153,7 @@ pub struct Definitions {
pub(super) node_to_hir_id: IndexVec<ast::NodeId, hir::HirId>,
macro_def_scopes: FxHashMap<Mark, DefId>,
expansions: FxHashMap<DefIndex, Mark>,
keys_created: FxHashSet<DefKey>,
next_disambiguator: FxHashMap<(DefIndex, DefPathData), u32>,
}
// Unfortunately we have to provide a manual impl of Clone because of the
@@ -170,7 +170,7 @@ fn clone(&self) -> Self {
node_to_hir_id: self.node_to_hir_id.clone(),
macro_def_scopes: self.macro_def_scopes.clone(),
expansions: self.expansions.clone(),
keys_created: self.keys_created.clone(),
next_disambiguator: self.next_disambiguator.clone(),
}
}
}
@@ -402,7 +402,7 @@ pub fn new() -> Definitions {
node_to_hir_id: IndexVec::new(),
macro_def_scopes: FxHashMap(),
expansions: FxHashMap(),
keys_created: FxHashSet(),
next_disambiguator: FxHashMap(),
}
}
@@ -516,20 +516,20 @@ pub fn create_def_with_parent(&mut self,
// The root node must be created with create_root_def()
assert!(data != DefPathData::CrateRoot);
// Find a unique DefKey. This basically means incrementing the disambiguator
// until we get no match.
let mut key = DefKey {
parent: Some(parent),
disambiguated_data: DisambiguatedDefPathData {
data,
disambiguator: 0
}
// Find the next free disambiguator for this key.
let disambiguator = {
let next_disamb = self.next_disambiguator.entry((parent, data.clone())).or_insert(0);
let disambiguator = *next_disamb;
*next_disamb = next_disamb.checked_add(1).expect("disambiguator overflow");
disambiguator
};
while self.keys_created.contains(&key) {
key.disambiguated_data.disambiguator += 1;
}
self.keys_created.insert(key.clone());
let key = DefKey {
parent: Some(parent),
disambiguated_data: DisambiguatedDefPathData {
data, disambiguator
}
};
let parent_hash = self.table.def_path_hash(parent);
let def_path_hash = key.compute_stable_hash(parent_hash);
+4 -2
View File
@@ -555,7 +555,9 @@ pub fn find(&self, id: NodeId) -> Option<Node<'hir>> {
}
/// Similar to get_parent, returns the parent node id or id if there is no
/// parent.
/// parent. Note that the parent may be CRATE_NODE_ID, which is not itself
/// present in the map -- so passing the return value of get_parent_node to
/// get may actually panic.
/// This function returns the immediate parent in the AST, whereas get_parent
/// returns the enclosing item. Note that this might not be the actual parent
/// node in the AST - some kinds of nodes are not in the map and these will
@@ -631,7 +633,7 @@ fn walk_parent_nodes<F, F2>(&self,
}
/// Retrieve the NodeId for `id`'s enclosing method, unless there's a
/// `while` or `loop` before reacing it, as block tail returns are not
/// `while` or `loop` before reaching it, as block tail returns are not
/// available in them.
///
/// ```
+43 -9
View File
@@ -10,7 +10,6 @@
// The Rust HIR.
pub use self::BindingMode::*;
pub use self::BinOp_::*;
pub use self::BlockCheckMode::*;
pub use self::CaptureClause::*;
@@ -49,7 +48,7 @@
use std::collections::BTreeMap;
use std::fmt;
/// HIR doesn't commit to a concrete storage type and have its own alias for a vector.
/// HIR doesn't commit to a concrete storage type and has its own alias for a vector.
/// It can be `Vec`, `P<[T]>` or potentially `Box<[T]>`, or some other container with similar
/// behavior. Unlike AST, HIR is mostly a static structure, so we can use an owned slice instead
/// of `Vec` to avoid keeping extra capacity.
@@ -76,14 +75,14 @@ macro_rules! hir_vec {
pub mod print;
pub mod svh;
/// A HirId uniquely identifies a node in the HIR of then current crate. It is
/// A HirId uniquely identifies a node in the HIR of the current crate. It is
/// composed of the `owner`, which is the DefIndex of the directly enclosing
/// hir::Item, hir::TraitItem, or hir::ImplItem (i.e. the closest "item-like"),
/// and the `local_id` which is unique within the given owner.
///
/// This two-level structure makes for more stable values: One can move an item
/// around within the source code, or add or remove stuff before it, without
/// the local_id part of the HirId changing, which is a very useful property
/// the local_id part of the HirId changing, which is a very useful property in
/// incremental compilation where we have to persist things through changes to
/// the code base.
#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Debug,
@@ -628,10 +627,28 @@ pub struct FieldPat {
pub is_shorthand: bool,
}
/// Explicit binding annotations given in the HIR for a binding. Note
/// that this is not the final binding *mode* that we infer after type
/// inference.
#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)]
pub enum BindingMode {
BindByRef(Mutability),
BindByValue(Mutability),
pub enum BindingAnnotation {
/// No binding annotation given: this means that the final binding mode
/// will depend on whether we have skipped through a `&` reference
/// when matching. For example, the `x` in `Some(x)` will have binding
/// mode `None`; if you do `let Some(x) = &Some(22)`, it will
/// ultimately be inferred to be by-reference.
///
/// Note that implicit reference skipping is not implemented yet (#42640).
Unannotated,
/// Annotated with `mut x` -- could be either ref or not, similar to `None`.
Mutable,
/// Annotated as `ref`, like `ref x`
Ref,
/// Annotated as `ref mut x`.
RefMut,
}
#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
@@ -647,7 +664,7 @@ pub enum PatKind {
/// A fresh binding `ref mut binding @ OPT_SUBPATTERN`.
/// The `DefId` is for the definition of the variable being bound.
Binding(BindingMode, DefId, Spanned<Name>, Option<P<Pat>>),
Binding(BindingAnnotation, DefId, Spanned<Name>, Option<P<Pat>>),
/// A struct or struct variant pattern, e.g. `Variant {x, y, ..}`.
/// The `bool` is `true` in the presence of a `..`.
@@ -684,6 +701,16 @@ pub enum Mutability {
MutImmutable,
}
impl Mutability {
/// Return MutMutable only if both arguments are mutable.
pub fn and(self, other: Self) -> Self {
match self {
MutMutable => other,
MutImmutable => MutImmutable,
}
}
}
#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)]
pub enum BinOp_ {
/// The `+` operator (addition)
@@ -892,6 +919,13 @@ pub fn attrs(&self) -> &[Attribute] {
DeclItem(_) => &[]
}
}
pub fn is_local(&self) -> bool {
match *self {
Decl_::DeclLocal(_) => true,
_ => false,
}
}
}
/// represents one arm of a 'match'
@@ -1686,7 +1720,7 @@ pub struct Item {
#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
pub enum Item_ {
/// An`extern crate` item, with optional original crate name,
/// An `extern crate` item, with optional original crate name,
///
/// e.g. `extern crate foo` or `extern crate foo_bar as foo`
ItemExternCrate(Option<Name>),
+23 -17
View File
@@ -87,7 +87,7 @@ pub fn is_const(&self) -> bool {
/// Call `f` on every "binding" in a pattern, e.g., on `a` in
/// `match foo() { Some(a) => (), None => () }`
pub fn each_binding<F>(&self, mut f: F)
where F: FnMut(hir::BindingMode, ast::NodeId, Span, &Spanned<ast::Name>),
where F: FnMut(hir::BindingAnnotation, ast::NodeId, Span, &Spanned<ast::Name>),
{
self.walk(|p| {
if let PatKind::Binding(binding_mode, _, ref pth, _) = p.node {
@@ -130,12 +130,10 @@ pub fn contains_bindings_or_wild(&self) -> bool {
pub fn simple_name(&self) -> Option<ast::Name> {
match self.node {
PatKind::Binding(hir::BindByValue(..), _, ref path1, None) => {
Some(path1.node)
}
_ => {
None
}
PatKind::Binding(hir::BindingAnnotation::Unannotated, _, ref path1, None) |
PatKind::Binding(hir::BindingAnnotation::Mutable, _, ref path1, None) =>
Some(path1.node),
_ => None,
}
}
@@ -163,16 +161,22 @@ pub fn necessary_variants(&self) -> Vec<DefId> {
}
/// Checks if the pattern contains any `ref` or `ref mut` bindings,
/// and if yes whether its containing mutable ones or just immutables ones.
pub fn contains_ref_binding(&self) -> Option<hir::Mutability> {
/// and if yes whether it contains mutable or just immutables ones.
///
/// FIXME(tschottdorf): this is problematic as the HIR is being scraped,
/// but ref bindings may be implicit after #42640.
pub fn contains_explicit_ref_binding(&self) -> Option<hir::Mutability> {
let mut result = None;
self.each_binding(|mode, _, _, _| {
if let hir::BindingMode::BindByRef(m) = mode {
// Pick Mutable as maximum
match result {
None | Some(hir::MutImmutable) => result = Some(m),
_ => (),
self.each_binding(|annotation, _, _, _| {
match annotation {
hir::BindingAnnotation::Ref => {
match result {
None | Some(hir::MutImmutable) => result = Some(hir::MutImmutable),
_ => (),
}
}
hir::BindingAnnotation::RefMut => result = Some(hir::MutMutable),
_ => (),
}
});
result
@@ -182,9 +186,11 @@ pub fn contains_ref_binding(&self) -> Option<hir::Mutability> {
impl hir::Arm {
/// Checks if the patterns for this arm contain any `ref` or `ref mut`
/// bindings, and if yes whether its containing mutable ones or just immutables ones.
pub fn contains_ref_binding(&self) -> Option<hir::Mutability> {
pub fn contains_explicit_ref_binding(&self) -> Option<hir::Mutability> {
// FIXME(tschottdorf): contains_explicit_ref_binding() must be removed
// for #42640.
self.pats.iter()
.filter_map(|pat| pat.contains_ref_binding())
.filter_map(|pat| pat.contains_explicit_ref_binding())
.max_by_key(|m| match *m {
hir::MutMutable => 1,
hir::MutImmutable => 0,
+8 -4
View File
@@ -1655,12 +1655,16 @@ pub fn print_pat(&mut self, pat: &hir::Pat) -> io::Result<()> {
PatKind::Wild => self.s.word("_")?,
PatKind::Binding(binding_mode, _, ref path1, ref sub) => {
match binding_mode {
hir::BindByRef(mutbl) => {
hir::BindingAnnotation::Ref => {
self.word_nbsp("ref")?;
self.print_mutability(mutbl)?;
self.print_mutability(hir::MutImmutable)?;
}
hir::BindByValue(hir::MutImmutable) => {}
hir::BindByValue(hir::MutMutable) => {
hir::BindingAnnotation::RefMut => {
self.word_nbsp("ref")?;
self.print_mutability(hir::MutMutable)?;
}
hir::BindingAnnotation::Unannotated => {}
hir::BindingAnnotation::Mutable => {
self.word_nbsp("mut")?;
}
}
+3 -3
View File
@@ -11,9 +11,9 @@
//! This module contains `HashStable` implementations for various data types
//! from `rustc_const_math` in no particular order.
impl_stable_hash_for!(enum ::rustc_const_math::ConstFloat {
F32(val),
F64(val)
impl_stable_hash_for!(struct ::rustc_const_math::ConstFloat {
ty,
bits
});
impl_stable_hash_for!(enum ::rustc_const_math::ConstInt {
+5 -3
View File
@@ -442,9 +442,11 @@ fn hash_stable<W: StableHasherResult>(&self,
is_shorthand
});
impl_stable_hash_for!(enum hir::BindingMode {
BindByRef(mutability),
BindByValue(mutability)
impl_stable_hash_for!(enum hir::BindingAnnotation {
Unannotated,
Mutable,
Ref,
RefMut
});
impl_stable_hash_for!(enum hir::RangeEnd {
+32 -9
View File
@@ -239,8 +239,12 @@ fn hash_stable<W: StableHasherResult>(&self,
mir::StatementKind::StorageDead(ref lvalue) => {
lvalue.hash_stable(hcx, hasher);
}
mir::StatementKind::EndRegion(ref extents) => {
extents.hash_stable(hcx, hasher);
mir::StatementKind::EndRegion(ref extent) => {
extent.hash_stable(hcx, hasher);
}
mir::StatementKind::Validate(ref op, ref lvalues) => {
op.hash_stable(hcx, hasher);
lvalues.hash_stable(hcx, hasher);
}
mir::StatementKind::Nop => {}
mir::StatementKind::InlineAsm { ref asm, ref outputs, ref inputs } => {
@@ -252,6 +256,23 @@ fn hash_stable<W: StableHasherResult>(&self,
}
}
impl<'a, 'gcx, 'tcx, T> HashStable<StableHashingContext<'a, 'gcx, 'tcx>>
for mir::ValidationOperand<'tcx, T>
where T: HashStable<StableHashingContext<'a, 'gcx, 'tcx>>
{
fn hash_stable<W: StableHasherResult>(&self,
hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>)
{
self.lval.hash_stable(hcx, hasher);
self.ty.hash_stable(hcx, hasher);
self.re.hash_stable(hcx, hasher);
self.mutbl.hash_stable(hcx, hasher);
}
}
impl_stable_hash_for!(enum mir::ValidationOp { Acquire, Release, Suspend(extent) });
impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for mir::Lvalue<'tcx> {
fn hash_stable<W: StableHasherResult>(&self,
hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
@@ -271,10 +292,11 @@ fn hash_stable<W: StableHasherResult>(&self,
}
}
impl<'a, 'gcx, 'tcx, B, V> HashStable<StableHashingContext<'a, 'gcx, 'tcx>>
for mir::Projection<'tcx, B, V>
impl<'a, 'gcx, 'tcx, B, V, T> HashStable<StableHashingContext<'a, 'gcx, 'tcx>>
for mir::Projection<'tcx, B, V, T>
where B: HashStable<StableHashingContext<'a, 'gcx, 'tcx>>,
V: HashStable<StableHashingContext<'a, 'gcx, 'tcx>>
V: HashStable<StableHashingContext<'a, 'gcx, 'tcx>>,
T: HashStable<StableHashingContext<'a, 'gcx, 'tcx>>
{
fn hash_stable<W: StableHasherResult>(&self,
hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
@@ -289,9 +311,10 @@ fn hash_stable<W: StableHasherResult>(&self,
}
}
impl<'a, 'gcx, 'tcx, V> HashStable<StableHashingContext<'a, 'gcx, 'tcx>>
for mir::ProjectionElem<'tcx, V>
where V: HashStable<StableHashingContext<'a, 'gcx, 'tcx>>
impl<'a, 'gcx, 'tcx, V, T> HashStable<StableHashingContext<'a, 'gcx, 'tcx>>
for mir::ProjectionElem<'tcx, V, T>
where V: HashStable<StableHashingContext<'a, 'gcx, 'tcx>>,
T: HashStable<StableHashingContext<'a, 'gcx, 'tcx>>
{
fn hash_stable<W: StableHasherResult>(&self,
hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
@@ -299,7 +322,7 @@ fn hash_stable<W: StableHasherResult>(&self,
mem::discriminant(self).hash_stable(hcx, hasher);
match *self {
mir::ProjectionElem::Deref => {}
mir::ProjectionElem::Field(field, ty) => {
mir::ProjectionElem::Field(field, ref ty) => {
field.hash_stable(hcx, hasher);
ty.hash_stable(hcx, hasher);
}
+2 -1
View File
@@ -375,7 +375,6 @@ fn hash_stable<W: StableHasherResult>(&self,
name,
def_id,
index,
issue_32330: _,
pure_wrt_drop
} = *self;
@@ -630,6 +629,7 @@ fn hash_stable<W: StableHasherResult>(&self,
ref node_types,
ref node_substs,
ref adjustments,
ref pat_binding_modes,
ref upvar_capture_map,
ref closure_tys,
ref closure_kinds,
@@ -652,6 +652,7 @@ fn hash_stable<W: StableHasherResult>(&self,
ich::hash_stable_nodemap(hcx, hasher, node_types);
ich::hash_stable_nodemap(hcx, hasher, node_substs);
ich::hash_stable_nodemap(hcx, hasher, adjustments);
ich::hash_stable_nodemap(hcx, hasher, pat_binding_modes);
ich::hash_stable_hashmap(hcx, hasher, upvar_capture_map, |hcx, up_var_id| {
let ty::UpvarId {
var_id,
@@ -77,10 +77,10 @@ pub fn try_report_anon_anon_conflict(&self, error: &RegionResolutionError<'tcx>)
struct_span_err!(self.tcx.sess, span, E0623, "lifetime mismatch")
.span_label(ty1.span,
format!("these references must have the same lifetime"))
format!("these references are not declared with the same lifetime..."))
.span_label(ty2.span, format!(""))
.span_label(span,
format!("data{}flows{}here", span_label_var1, span_label_var2))
format!("...but data{}flows{}here", span_label_var1, span_label_var2))
.emit();
} else {
return false;
+2 -33
View File
@@ -66,8 +66,7 @@
use hir::def_id::DefId;
use middle::region;
use traits::{ObligationCause, ObligationCauseCode};
use ty::{self, TyCtxt, TypeFoldable};
use ty::{Region, Issue32330};
use ty::{self, Region, TyCtxt, TypeFoldable};
use ty::error::TypeError;
use syntax::ast::DUMMY_NODE_ID;
use syntax_pos::{Pos, Span};
@@ -713,35 +712,6 @@ pub fn note_type_err(&self,
self.tcx.note_and_explain_type_err(diag, terr, span);
}
pub fn note_issue_32330(&self,
diag: &mut DiagnosticBuilder<'tcx>,
terr: &TypeError<'tcx>)
{
debug!("note_issue_32330: terr={:?}", terr);
match *terr {
TypeError::RegionsInsufficientlyPolymorphic(_, _, Some(box Issue32330 {
fn_def_id, region_name
})) |
TypeError::RegionsOverlyPolymorphic(_, _, Some(box Issue32330 {
fn_def_id, region_name
})) => {
diag.note(
&format!("lifetime parameter `{0}` declared on fn `{1}` \
appears only in the return type, \
but here is required to be higher-ranked, \
which means that `{0}` must appear in both \
argument and return types",
region_name,
self.tcx.item_path_str(fn_def_id)));
diag.note(
&format!("this error is the result of a recent bug fix; \
for more information, see issue #33685 \
<https://github.com/rust-lang/rust/issues/33685>"));
}
_ => {}
}
}
pub fn report_and_explain_type_error(&self,
trace: TypeTrace<'tcx>,
terr: &TypeError<'tcx>)
@@ -761,7 +731,6 @@ pub fn report_and_explain_type_error(&self,
}
};
self.note_type_err(&mut diag, &trace.cause, None, Some(trace.values), terr);
self.note_issue_32330(&mut diag, terr);
diag
}
@@ -934,7 +903,7 @@ fn report_inference_failure(&self,
format!(" for lifetime parameter {}in trait containing associated type `{}`",
br_string(br), type_name)
}
infer::EarlyBoundRegion(_, name, _) => {
infer::EarlyBoundRegion(_, name) => {
format!(" for lifetime parameter `{}`",
name)
}
+7 -49
View File
@@ -13,9 +13,7 @@
use super::{CombinedSnapshot,
InferCtxt,
LateBoundRegion,
HigherRankedType,
RegionVariableOrigin,
SubregionOrigin,
SkolemizationMap};
use super::combine::CombineFields;
@@ -29,15 +27,6 @@
pub struct HrMatchResult<U> {
pub value: U,
/// Normally, when we do a higher-ranked match operation, we
/// expect all higher-ranked regions to be constrained as part of
/// the match operation. However, in the transition period for
/// #32330, it can happen that we sometimes have unconstrained
/// regions that get instantiated with fresh variables. In that
/// case, we collect the set of unconstrained bound regions here
/// and replace them with fresh variables.
pub unconstrained_regions: Vec<ty::BoundRegion>,
}
impl<'a, 'gcx, 'tcx> CombineFields<'a, 'gcx, 'tcx> {
@@ -108,7 +97,6 @@ pub fn higher_ranked_sub<T>(&mut self, a: &Binder<T>, b: &Binder<T>, a_is_expect
/// that do not appear in `T`. If that happens, those regions are
/// unconstrained, and this routine replaces them with `'static`.
pub fn higher_ranked_match<T, U>(&mut self,
span: Span,
a_pair: &Binder<(T, U)>,
b_match: &T,
a_is_expected: bool)
@@ -158,28 +146,16 @@ pub fn higher_ranked_match<T, U>(&mut self,
// be any region from the sets above, except for other members of
// `skol_map`. There should always be a representative if things
// are properly well-formed.
let mut unconstrained_regions = vec![];
let skol_representatives: FxHashMap<_, _> =
skol_resolution_map
.iter()
.map(|(&skol, &(br, ref regions))| {
.map(|(&skol, &(_, ref regions))| {
let representative =
regions.iter()
.filter(|&&r| !skol_resolution_map.contains_key(r))
.cloned()
.next()
.unwrap_or_else(|| { // [1]
unconstrained_regions.push(br);
self.infcx.next_region_var(
LateBoundRegion(span, br, HigherRankedType))
});
// [1] There should always be a representative,
// unless the higher-ranked region did not appear
// in the values being matched. We should reject
// as ill-formed cases that can lead to this, but
// right now we sometimes issue warnings (see
// #32330).
.expect("no representative region");
(skol, representative)
})
@@ -216,10 +192,7 @@ pub fn higher_ranked_match<T, U>(&mut self,
// We are now done with these skolemized variables.
self.infcx.pop_skolemized(skol_map, snapshot);
Ok(HrMatchResult {
value: a_value,
unconstrained_regions,
})
Ok(HrMatchResult { value: a_value })
});
}
@@ -657,28 +630,13 @@ pub fn leak_check(&self,
skol_br,
tainted_region);
let issue_32330 = if let &ty::ReVar(vid) = tainted_region {
match self.region_vars.var_origin(vid) {
RegionVariableOrigin::EarlyBoundRegion(_, _, issue_32330) => {
issue_32330.map(Box::new)
}
_ => None
}
} else {
None
};
if overly_polymorphic {
return Err(if overly_polymorphic {
debug!("Overly polymorphic!");
return Err(TypeError::RegionsOverlyPolymorphic(skol_br,
tainted_region,
issue_32330));
TypeError::RegionsOverlyPolymorphic(skol_br, tainted_region)
} else {
debug!("Not as polymorphic!");
return Err(TypeError::RegionsInsufficientlyPolymorphic(skol_br,
tainted_region,
issue_32330));
}
TypeError::RegionsInsufficientlyPolymorphic(skol_br, tainted_region)
})
}
}
+3 -4
View File
@@ -299,7 +299,7 @@ pub enum RegionVariableOrigin {
Coercion(Span),
// Region variables created as the values for early-bound regions
EarlyBoundRegion(Span, ast::Name, Option<ty::Issue32330>),
EarlyBoundRegion(Span, ast::Name),
// Region variables created for bound regions
// in a function or method that is called
@@ -989,7 +989,7 @@ pub fn region_var_for_def(&self,
span: Span,
def: &ty::RegionParameterDef)
-> ty::Region<'tcx> {
self.next_region_var(EarlyBoundRegion(span, def.name, def.issue_32330))
self.next_region_var(EarlyBoundRegion(span, def.name))
}
/// Create a type inference variable for the given
@@ -1278,14 +1278,13 @@ pub fn match_poly_projection_predicate(&self,
-> InferResult<'tcx, HrMatchResult<Ty<'tcx>>>
{
let match_pair = match_a.map_bound(|p| (p.projection_ty.trait_ref(self.tcx), p.ty));
let span = cause.span;
let trace = TypeTrace {
cause,
values: TraitRefs(ExpectedFound::new(true, match_pair.skip_binder().0, match_b))
};
let mut combine = self.combine_fields(trace, param_env);
let result = combine.higher_ranked_match(span, &match_pair, &match_b, true)?;
let result = combine.higher_ranked_match(&match_pair, &match_b, true)?;
Ok(InferOk { value: result, obligations: combine.obligations })
}
+17 -15
View File
@@ -18,9 +18,9 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// the rustc crate store interface. This also includes types that
// are *mostly* used as a part of that interface, but these should
// probably get a better home if someone can find one.
//! the rustc crate store interface. This also includes types that
//! are *mostly* used as a part of that interface, but these should
//! probably get a better home if someone can find one.
use hir::def;
use hir::def_id::{CrateNum, DefId, DefIndex};
@@ -50,13 +50,13 @@
// lonely orphan structs and enums looking for a better home
#[derive(Clone, Debug)]
#[derive(Clone, Debug, Copy)]
pub struct LinkMeta {
pub crate_hash: Svh,
}
// Where a crate came from on the local filesystem. One of these three options
// must be non-None.
/// Where a crate came from on the local filesystem. One of these three options
/// must be non-None.
#[derive(PartialEq, Clone, Debug)]
pub struct CrateSource {
pub dylib: Option<(PathBuf, PathKind)>,
@@ -120,10 +120,14 @@ pub enum LinkagePreference {
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)]
pub enum NativeLibraryKind {
NativeStatic, // native static library (.a archive)
NativeStaticNobundle, // native static library, which doesn't get bundled into .rlibs
NativeFramework, // macOS-specific
NativeUnknown, // default way to specify a dynamic library
/// native static library (.a archive)
NativeStatic,
/// native static library, which doesn't get bundled into .rlibs
NativeStaticNobundle,
/// macOS-specific
NativeFramework,
/// default way to specify a dynamic library
NativeUnknown,
}
#[derive(Clone, Hash, RustcEncodable, RustcDecodable)]
@@ -161,15 +165,13 @@ pub struct ExternCrate {
}
pub struct EncodedMetadata {
pub raw_data: Vec<u8>,
pub hashes: EncodedMetadataHashes,
pub raw_data: Vec<u8>
}
impl EncodedMetadata {
pub fn new() -> EncodedMetadata {
EncodedMetadata {
raw_data: Vec::new(),
hashes: EncodedMetadataHashes::new(),
}
}
}
@@ -294,7 +296,7 @@ fn encode_metadata<'a, 'tcx>(&self,
tcx: TyCtxt<'a, 'tcx, 'tcx>,
link_meta: &LinkMeta,
reachable: &NodeSet)
-> EncodedMetadata;
-> (EncodedMetadata, EncodedMetadataHashes);
fn metadata_encoding_version(&self) -> &[u8];
}
@@ -424,7 +426,7 @@ fn encode_metadata<'a, 'tcx>(&self,
tcx: TyCtxt<'a, 'tcx, 'tcx>,
link_meta: &LinkMeta,
reachable: &NodeSet)
-> EncodedMetadata {
-> (EncodedMetadata, EncodedMetadataHashes) {
bug!("encode_metadata")
}
fn metadata_encoding_version(&self) -> &[u8] { bug!("metadata_encoding_version") }
+13 -4
View File
@@ -22,6 +22,9 @@
use std::usize;
use syntax::ast;
use syntax::print::pprust::PrintState;
use rustc_data_structures::graph::OUTGOING;
use util::nodemap::NodeMap;
use hir;
use hir::intravisit::{self, IdRange};
@@ -523,12 +526,16 @@ pub fn propagate(&mut self, cfg: &cfg::CFG, body: &hir::Body) {
changed: true
};
let nodes_po = cfg.graph.nodes_in_postorder(OUTGOING, cfg.entry);
let mut temp = vec![0; words_per_id];
let mut num_passes = 0;
while propcx.changed {
num_passes += 1;
propcx.changed = false;
propcx.reset(&mut temp);
propcx.walk_cfg(cfg, &mut temp);
propcx.walk_cfg(cfg, &nodes_po, &mut temp);
}
debug!("finished in {} iterations", num_passes);
}
debug!("Dataflow result for {}:", self.analysis_name);
@@ -543,12 +550,15 @@ pub fn propagate(&mut self, cfg: &cfg::CFG, body: &hir::Body) {
impl<'a, 'b, 'tcx, O:DataFlowOperator> PropagationContext<'a, 'b, 'tcx, O> {
fn walk_cfg(&mut self,
cfg: &cfg::CFG,
nodes_po: &[CFGIndex],
in_out: &mut [usize]) {
debug!("DataFlowContext::walk_cfg(in_out={}) {}",
bits_to_string(in_out), self.dfcx.analysis_name);
assert!(self.dfcx.bits_per_id > 0);
cfg.graph.each_node(|node_index, node| {
// Iterate over nodes in reverse postorder
for &node_index in nodes_po.iter().rev() {
let node = cfg.graph.node(node_index);
debug!("DataFlowContext::walk_cfg idx={:?} id={} begin in_out={}",
node_index, node.data.id(), bits_to_string(in_out));
@@ -563,8 +573,7 @@ fn walk_cfg(&mut self,
// Propagate state on-exit from node into its successors.
self.propagate_bits_into_graph_successors_of(in_out, cfg, node_index);
true // continue to next node
});
}
}
fn reset(&mut self, bits: &mut [usize]) {
+28 -3
View File
@@ -13,7 +13,7 @@
// from live codes are live, and everything else is dead.
use hir::map as hir_map;
use hir::{self, PatKind};
use hir::{self, Item_, PatKind};
use hir::intravisit::{self, Visitor, NestedVisitorMap};
use hir::itemlikevisit::ItemLikeVisitor;
@@ -189,6 +189,22 @@ fn visit_node(&mut self, node: &hir_map::Node<'tcx>) {
self.struct_has_extern_repr = had_extern_repr;
self.inherited_pub_visibility = had_inherited_pub_visibility;
}
fn mark_as_used_if_union(&mut self, did: DefId, fields: &hir::HirVec<hir::Field>) {
if let Some(node_id) = self.tcx.hir.as_local_node_id(did) {
if let Some(hir_map::NodeItem(item)) = self.tcx.hir.find(node_id) {
if let Item_::ItemUnion(ref variant, _) = item.node {
if variant.fields().len() > 1 {
for field in variant.fields() {
if fields.iter().find(|x| x.name.node == field.name).is_some() {
self.live_symbols.insert(field.id);
}
}
}
}
}
}
}
}
impl<'a, 'tcx> Visitor<'tcx> for MarkSymbolVisitor<'a, 'tcx> {
@@ -231,6 +247,13 @@ fn visit_expr(&mut self, expr: &'tcx hir::Expr) {
hir::ExprTupField(ref lhs, idx) => {
self.handle_tup_field_access(&lhs, idx.node);
}
hir::ExprStruct(_, ref fields, _) => {
if let ty::TypeVariants::TyAdt(ref def, _) = self.tables.expr_ty(expr).sty {
if def.is_union() {
self.mark_as_used_if_union(def.did, fields);
}
}
}
_ => ()
}
@@ -561,7 +584,6 @@ fn visit_struct_field(&mut self, field: &'tcx hir::StructField) {
self.warn_dead_code(field.id, field.span,
field.name, "field");
}
intravisit::walk_struct_field(self, field);
}
@@ -603,6 +625,9 @@ pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
let access_levels = &tcx.privacy_access_levels(LOCAL_CRATE);
let krate = tcx.hir.krate();
let live_symbols = find_live(tcx, access_levels, krate);
let mut visitor = DeadVisitor { tcx: tcx, live_symbols: live_symbols };
let mut visitor = DeadVisitor {
tcx: tcx,
live_symbols: live_symbols,
};
intravisit::walk_crate(&mut visitor, krate);
}
+16 -12
View File
@@ -800,16 +800,19 @@ fn determine_pat_move_mode(&mut self,
debug!("determine_pat_move_mode cmt_discr={:?} pat={:?}", cmt_discr,
pat);
return_if_err!(self.mc.cat_pattern(cmt_discr, pat, |cmt_pat, pat| {
match pat.node {
PatKind::Binding(hir::BindByRef(..), ..) =>
mode.lub(BorrowingMatch),
PatKind::Binding(hir::BindByValue(..), ..) => {
match copy_or_move(&self.mc, self.param_env, &cmt_pat, PatBindingMove) {
Copy => mode.lub(CopyingMatch),
Move(..) => mode.lub(MovingMatch),
if let PatKind::Binding(..) = pat.node {
let bm = *self.mc.tables.pat_binding_modes.get(&pat.id)
.expect("missing binding mode");
match bm {
ty::BindByReference(..) =>
mode.lub(BorrowingMatch),
ty::BindByValue(..) => {
match copy_or_move(&self.mc, self.param_env, &cmt_pat, PatBindingMove) {
Copy => mode.lub(CopyingMatch),
Move(..) => mode.lub(MovingMatch),
}
}
}
_ => {}
}
}));
}
@@ -822,8 +825,9 @@ fn walk_pat(&mut self, cmt_discr: mc::cmt<'tcx>, pat: &hir::Pat, match_mode: Mat
let ExprUseVisitor { ref mc, ref mut delegate, param_env } = *self;
return_if_err!(mc.cat_pattern(cmt_discr.clone(), pat, |cmt_pat, pat| {
if let PatKind::Binding(bmode, def_id, ..) = pat.node {
if let PatKind::Binding(_, def_id, ..) = pat.node {
debug!("binding cmt_pat={:?} pat={:?} match_mode={:?}", cmt_pat, pat, match_mode);
let bm = *mc.tables.pat_binding_modes.get(&pat.id).expect("missing binding mode");
// pat_ty: the type of the binding being produced.
let pat_ty = return_if_err!(mc.node_ty(pat.id));
@@ -836,14 +840,14 @@ fn walk_pat(&mut self, cmt_discr: mc::cmt<'tcx>, pat: &hir::Pat, match_mode: Mat
}
// It is also a borrow or copy/move of the value being matched.
match bmode {
hir::BindByRef(m) => {
match bm {
ty::BindByReference(m) => {
if let ty::TyRef(r, _) = pat_ty.sty {
let bk = ty::BorrowKind::from_mutbl(m);
delegate.borrow(pat.id, pat.span, cmt_pat, r, bk, RefBinding);
}
}
hir::BindByValue(..) => {
ty::BindByValue(..) => {
let mode = copy_or_move(mc, param_env, &cmt_pat, PatBindingMove);
debug!("walk_pat binding consuming pat");
delegate.consume_pat(pat, cmt_pat, mode);
+20 -14
View File
@@ -330,11 +330,12 @@ fn from_pointer_kind(base_mutbl: MutabilityCategory,
ret
}
fn from_local(tcx: TyCtxt, id: ast::NodeId) -> MutabilityCategory {
fn from_local(tcx: TyCtxt, tables: &ty::TypeckTables, id: ast::NodeId) -> MutabilityCategory {
let ret = match tcx.hir.get(id) {
hir_map::NodeLocal(p) => match p.node {
PatKind::Binding(bind_mode, ..) => {
if bind_mode == hir::BindByValue(hir::MutMutable) {
PatKind::Binding(..) => {
let bm = *tables.pat_binding_modes.get(&p.id).expect("missing binding mode");
if bm == ty::BindByValue(hir::MutMutable) {
McDeclared
} else {
McImmutable
@@ -475,16 +476,21 @@ fn pat_ty(&self, pat: &hir::Pat) -> McResult<Ty<'tcx>> {
// *being borrowed* is. But ideally we would put in a more
// fundamental fix to this conflated use of the node id.
let ret_ty = match pat.node {
PatKind::Binding(hir::BindByRef(_), ..) => {
// a bind-by-ref means that the base_ty will be the type of the ident itself,
// but what we want here is the type of the underlying value being borrowed.
// So peel off one-level, turning the &T into T.
match base_ty.builtin_deref(false, ty::NoPreference) {
Some(t) => t.ty,
None => {
debug!("By-ref binding of non-derefable type {:?}", base_ty);
return Err(());
PatKind::Binding(..) => {
let bm = *self.tables.pat_binding_modes.get(&pat.id).expect("missing binding mode");
if let ty::BindByReference(_) = bm {
// a bind-by-ref means that the base_ty will be the type of the ident itself,
// but what we want here is the type of the underlying value being borrowed.
// So peel off one-level, turning the &T into T.
match base_ty.builtin_deref(false, ty::NoPreference) {
Some(t) => t.ty,
None => {
debug!("By-ref binding of non-derefable type {:?}", base_ty);
return Err(());
}
}
} else {
base_ty
}
}
_ => base_ty,
@@ -659,7 +665,7 @@ pub fn cat_def(&self,
id,
span,
cat: Categorization::Local(vid),
mutbl: MutabilityCategory::from_local(self.tcx, vid),
mutbl: MutabilityCategory::from_local(self.tcx, self.tables, vid),
ty: expr_ty,
note: NoteNone
}))
@@ -717,7 +723,7 @@ fn cat_upvar(&self,
let var_ty = self.node_ty(var_id)?;
// Mutability of original variable itself
let var_mutbl = MutabilityCategory::from_local(self.tcx, var_id);
let var_mutbl = MutabilityCategory::from_local(self.tcx, self.tables, var_id);
// Construct the upvar. This represents access to the field
// from the environment (perhaps we should eventually desugar
+29 -5
View File
@@ -459,10 +459,10 @@ pub fn nearest_common_ancestor(&self,
-> CodeExtent {
if scope_a == scope_b { return scope_a; }
/// [1] The initial values for `a_buf` and `b_buf` are not used.
/// The `ancestors_of` function will return some prefix that
/// is re-initialized with new values (or else fallback to a
/// heap-allocated vector).
// [1] The initial values for `a_buf` and `b_buf` are not used.
// The `ancestors_of` function will return some prefix that
// is re-initialized with new values (or else fallback to a
// heap-allocated vector).
let mut a_buf: [CodeExtent; 32] = [scope_a /* [1] */; 32];
let mut a_vec: Vec<CodeExtent> = vec![];
let mut b_buf: [CodeExtent; 32] = [scope_b /* [1] */; 32];
@@ -890,8 +890,32 @@ fn resolve_local<'a, 'tcx>(visitor: &mut RegionResolutionVisitor<'a, 'tcx>,
/// | ( ..., P&, ... )
/// | box P&
fn is_binding_pat(pat: &hir::Pat) -> bool {
// Note that the code below looks for *explicit* refs only, that is, it won't
// know about *implicit* refs as introduced in #42640.
//
// This is not a problem. For example, consider
//
// let (ref x, ref y) = (Foo { .. }, Bar { .. });
//
// Due to the explicit refs on the left hand side, the below code would signal
// that the temporary value on the right hand side should live until the end of
// the enclosing block (as opposed to being dropped after the let is complete).
//
// To create an implicit ref, however, you must have a borrowed value on the RHS
// already, as in this example (which won't compile before #42640):
//
// let Foo { x, .. } = &Foo { x: ..., ... };
//
// in place of
//
// let Foo { ref x, .. } = Foo { ... };
//
// In the former case (the implicit ref version), the temporary is created by the
// & expression, and its lifetime would be extended to the end of the block (due
// to a different rule, not the below code).
match pat.node {
PatKind::Binding(hir::BindByRef(_), ..) => true,
PatKind::Binding(hir::BindingAnnotation::Ref, ..) |
PatKind::Binding(hir::BindingAnnotation::RefMut, ..) => true,
PatKind::Struct(_, ref field_pats, _) => {
field_pats.iter().any(|fp| is_binding_pat(&fp.node.pat))
+6 -32
View File
@@ -153,10 +153,6 @@ pub struct NamedRegionMap {
// (b) it DOES appear in the arguments.
pub late_bound: NodeSet,
// Contains the node-ids for lifetimes that were (incorrectly) categorized
// as late-bound, until #32330 was fixed.
pub issue_32330: NodeMap<ty::Issue32330>,
// For each type and trait definition, maps type parameters
// to the trait object lifetime defaults computed from them.
pub object_lifetime_defaults: NodeMap<Vec<ObjectLifetimeDefault>>,
@@ -261,7 +257,6 @@ pub fn krate(sess: &Session,
let mut map = NamedRegionMap {
defs: NodeMap(),
late_bound: NodeSet(),
issue_32330: NodeMap(),
object_lifetime_defaults: compute_object_lifetime_defaults(sess, hir_map),
};
sess.track_errors(|| {
@@ -303,7 +298,7 @@ fn visit_nested_body(&mut self, body: hir::BodyId) {
fn visit_item(&mut self, item: &'tcx hir::Item) {
match item.node {
hir::ItemFn(ref decl, _, _, _, ref generics, _) => {
self.visit_early_late(item.id, None, decl, generics, |this| {
self.visit_early_late(None, decl, generics, |this| {
intravisit::walk_item(this, item);
});
}
@@ -355,7 +350,7 @@ fn visit_item(&mut self, item: &'tcx hir::Item) {
fn visit_foreign_item(&mut self, item: &'tcx hir::ForeignItem) {
match item.node {
hir::ForeignItemFn(ref decl, _, ref generics) => {
self.visit_early_late(item.id, None, decl, generics, |this| {
self.visit_early_late(None, decl, generics, |this| {
intravisit::walk_foreign_item(this, item);
})
}
@@ -406,7 +401,6 @@ fn visit_ty(&mut self, ty: &'tcx hir::Ty) {
fn visit_trait_item(&mut self, trait_item: &'tcx hir::TraitItem) {
if let hir::TraitItemKind::Method(ref sig, _) = trait_item.node {
self.visit_early_late(
trait_item.id,
Some(self.hir_map.get_parent(trait_item.id)),
&sig.decl, &sig.generics,
|this| intravisit::walk_trait_item(this, trait_item))
@@ -418,7 +412,6 @@ fn visit_trait_item(&mut self, trait_item: &'tcx hir::TraitItem) {
fn visit_impl_item(&mut self, impl_item: &'tcx hir::ImplItem) {
if let hir::ImplItemKind::Method(ref sig, _) = impl_item.node {
self.visit_early_late(
impl_item.id,
Some(self.hir_map.get_parent(impl_item.id)),
&sig.decl, &sig.generics,
|this| intravisit::walk_impl_item(this, impl_item))
@@ -811,18 +804,13 @@ fn with<F>(&mut self, wrap_scope: Scope, f: F) where
/// bound lifetimes are resolved by name and associated with a binder id (`binder_id`), so the
/// ordering is not important there.
fn visit_early_late<F>(&mut self,
fn_id: ast::NodeId,
parent_id: Option<ast::NodeId>,
decl: &'tcx hir::FnDecl,
generics: &'tcx hir::Generics,
walk: F) where
F: for<'b, 'c> FnOnce(&'b mut LifetimeContext<'c, 'tcx>),
{
let fn_def_id = self.hir_map.local_def_id(fn_id);
insert_late_bound_lifetimes(self.map,
fn_def_id,
decl,
generics);
insert_late_bound_lifetimes(self.map, decl, generics);
// Find the start of nested early scopes, e.g. in methods.
let mut index = 0;
@@ -1549,7 +1537,6 @@ fn insert_lifetime(&mut self,
/// not amongst the inputs to a projection. In other words, `<&'a
/// T as Trait<''b>>::Foo` does not constrain `'a` or `'b`.
fn insert_late_bound_lifetimes(map: &mut NamedRegionMap,
fn_def_id: DefId,
decl: &hir::FnDecl,
generics: &hir::Generics) {
debug!("insert_late_bound_lifetimes(decl={:?}, generics={:?})", decl, generics);
@@ -1607,22 +1594,9 @@ fn insert_late_bound_lifetimes(map: &mut NamedRegionMap,
// any `impl Trait` in the return type? early-bound.
if appears_in_output.impl_trait { continue; }
// does not appear in the inputs, but appears in the return
// type? eventually this will be early-bound, but for now we
// just mark it so we can issue warnings.
let constrained_by_input = constrained_by_input.regions.contains(&name);
let appears_in_output = appears_in_output.regions.contains(&name);
if !constrained_by_input && appears_in_output {
debug!("inserting issue_32330 entry for {:?}, {:?} on {:?}",
lifetime.lifetime.id,
name,
fn_def_id);
map.issue_32330.insert(
lifetime.lifetime.id,
ty::Issue32330 {
fn_def_id,
region_name: name,
});
// does not appear in the inputs, but appears in the return type? early-bound.
if !constrained_by_input.regions.contains(&name) &&
appears_in_output.regions.contains(&name) {
continue;
}
+91 -11
View File
@@ -25,7 +25,7 @@
use ty::fold::{TypeFoldable, TypeFolder, TypeVisitor};
use util::ppaux;
use rustc_back::slice;
use hir::InlineAsm;
use hir::{self, InlineAsm};
use std::ascii;
use std::borrow::{Cow};
use std::cell::Ref;
@@ -894,12 +894,18 @@ pub enum StatementKind<'tcx> {
/// End the current live range for the storage of the local.
StorageDead(Lvalue<'tcx>),
/// Execute a piece of inline Assembly.
InlineAsm {
asm: Box<InlineAsm>,
outputs: Vec<Lvalue<'tcx>>,
inputs: Vec<Operand<'tcx>>
},
/// Assert the given lvalues to be valid inhabitants of their type. These statements are
/// currently only interpreted by miri and only generated when "-Z mir-emit-validate" is passed.
/// See <https://internals.rust-lang.org/t/types-as-contracts/5562/73> for more details.
Validate(ValidationOp, Vec<ValidationOperand<'tcx, Lvalue<'tcx>>>),
/// Mark one terminating point of an extent (i.e. static region).
/// (The starting point(s) arise implicitly from borrows.)
EndRegion(CodeExtent),
@@ -908,6 +914,57 @@ pub enum StatementKind<'tcx> {
Nop,
}
/// The `ValidationOp` describes what happens with each of the operands of a
/// `Validate` statement.
#[derive(Copy, Clone, RustcEncodable, RustcDecodable, PartialEq, Eq)]
pub enum ValidationOp {
/// Recursively traverse the lvalue following the type and validate that all type
/// invariants are maintained. Furthermore, acquire exclusive/read-only access to the
/// memory reachable from the lvalue.
Acquire,
/// Recursive traverse the *mutable* part of the type and relinquish all exclusive
/// access.
Release,
/// Recursive traverse the *mutable* part of the type and relinquish all exclusive
/// access *until* the given region ends. Then, access will be recovered.
Suspend(CodeExtent),
}
impl Debug for ValidationOp {
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
use self::ValidationOp::*;
match *self {
Acquire => write!(fmt, "Acquire"),
Release => write!(fmt, "Release"),
// (reuse lifetime rendering policy from ppaux.)
Suspend(ref ce) => write!(fmt, "Suspend({})", ty::ReScope(*ce)),
}
}
}
// This is generic so that it can be reused by miri
#[derive(Clone, RustcEncodable, RustcDecodable)]
pub struct ValidationOperand<'tcx, T> {
pub lval: T,
pub ty: Ty<'tcx>,
pub re: Option<CodeExtent>,
pub mutbl: hir::Mutability,
}
impl<'tcx, T: Debug> Debug for ValidationOperand<'tcx, T> {
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
write!(fmt, "{:?}: {:?}", self.lval, self.ty)?;
if let Some(ce) = self.re {
// (reuse lifetime rendering policy from ppaux.)
write!(fmt, "/{}", ty::ReScope(ce))?;
}
if let hir::MutImmutable = self.mutbl {
write!(fmt, " (imm)")?;
}
Ok(())
}
}
impl<'tcx> Debug for Statement<'tcx> {
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
use self::StatementKind::*;
@@ -915,6 +972,7 @@ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
Assign(ref lv, ref rv) => write!(fmt, "{:?} = {:?}", lv, rv),
// (reuse lifetime rendering policy from ppaux.)
EndRegion(ref ce) => write!(fmt, "EndRegion({})", ty::ReScope(*ce)),
Validate(ref op, ref lvalues) => write!(fmt, "Validate({:?}, {:?})", op, lvalues),
StorageLive(ref lv) => write!(fmt, "StorageLive({:?})", lv),
StorageDead(ref lv) => write!(fmt, "StorageDead({:?})", lv),
SetDiscriminant{lvalue: ref lv, variant_index: index} => {
@@ -963,15 +1021,15 @@ pub struct Static<'tcx> {
/// shared between `Constant` and `Lvalue`. See the aliases
/// `LvalueProjection` etc below.
#[derive(Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)]
pub struct Projection<'tcx, B, V> {
pub struct Projection<'tcx, B, V, T> {
pub base: B,
pub elem: ProjectionElem<'tcx, V>,
pub elem: ProjectionElem<'tcx, V, T>,
}
#[derive(Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)]
pub enum ProjectionElem<'tcx, V> {
pub enum ProjectionElem<'tcx, V, T> {
Deref,
Field(Field, Ty<'tcx>),
Field(Field, T),
Index(V),
/// These indices are generated by slice patterns. Easiest to explain
@@ -1008,11 +1066,11 @@ pub enum ProjectionElem<'tcx, V> {
/// Alias for projections as they appear in lvalues, where the base is an lvalue
/// and the index is an operand.
pub type LvalueProjection<'tcx> = Projection<'tcx, Lvalue<'tcx>, Operand<'tcx>>;
pub type LvalueProjection<'tcx> = Projection<'tcx, Lvalue<'tcx>, Operand<'tcx>, Ty<'tcx>>;
/// Alias for projections as they appear in lvalues, where the base is an lvalue
/// and the index is an operand.
pub type LvalueElem<'tcx> = ProjectionElem<'tcx, Operand<'tcx>>;
pub type LvalueElem<'tcx> = ProjectionElem<'tcx, Operand<'tcx>, Ty<'tcx>>;
newtype_index!(Field, "field");
@@ -1606,6 +1664,21 @@ fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
}
}
impl<'tcx> TypeFoldable<'tcx> for ValidationOperand<'tcx, Lvalue<'tcx>> {
fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
ValidationOperand {
lval: self.lval.fold_with(folder),
ty: self.ty.fold_with(folder),
re: self.re,
mutbl: self.mutbl,
}
}
fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
self.lval.visit_with(visitor) || self.ty.visit_with(visitor)
}
}
impl<'tcx> TypeFoldable<'tcx> for Statement<'tcx> {
fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
use mir::StatementKind::*;
@@ -1630,6 +1703,10 @@ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F)
// trait with a `fn fold_extent`.
EndRegion(ref extent) => EndRegion(extent.clone()),
Validate(ref op, ref lvals) =>
Validate(op.clone(),
lvals.iter().map(|operand| operand.fold_with(folder)).collect()),
Nop => Nop,
};
Statement {
@@ -1655,6 +1732,9 @@ fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
// trait with a `fn visit_extent`.
EndRegion(ref _extent) => false,
Validate(ref _op, ref lvalues) =>
lvalues.iter().any(|ty_and_lvalue| ty_and_lvalue.visit_with(visitor)),
Nop => false,
}
}
@@ -1857,8 +1937,8 @@ fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
}
}
impl<'tcx, B, V> TypeFoldable<'tcx> for Projection<'tcx, B, V>
where B: TypeFoldable<'tcx>, V: TypeFoldable<'tcx>
impl<'tcx, B, V, T> TypeFoldable<'tcx> for Projection<'tcx, B, V, T>
where B: TypeFoldable<'tcx>, V: TypeFoldable<'tcx>, T: TypeFoldable<'tcx>
{
fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
use mir::ProjectionElem::*;
@@ -1866,7 +1946,7 @@ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F)
let base = self.base.fold_with(folder);
let elem = match self.elem {
Deref => Deref,
Field(f, ty) => Field(f, ty.fold_with(folder)),
Field(f, ref ty) => Field(f, ty.fold_with(folder)),
Index(ref v) => Index(v.fold_with(folder)),
ref elem => elem.clone()
};
@@ -1882,7 +1962,7 @@ fn super_visit_with<Vs: TypeVisitor<'tcx>>(&self, visitor: &mut Vs) -> bool {
self.base.visit_with(visitor) ||
match self.elem {
Field(_, ty) => ty.visit_with(visitor),
Field(_, ref ty) => ty.visit_with(visitor),
Index(ref v) => v.visit_with(visitor),
_ => false
}
+23 -6
View File
@@ -14,7 +14,6 @@
use ty::{ClosureSubsts, Region, Ty};
use mir::*;
use rustc_const_math::ConstUsize;
use rustc_data_structures::indexed_vec::Idx;
use syntax_pos::Span;
// # The MIR Visitor
@@ -264,9 +263,15 @@ fn visit_visibility_scope(&mut self,
fn super_mir(&mut self,
mir: & $($mutability)* Mir<'tcx>) {
for index in 0..mir.basic_blocks().len() {
let block = BasicBlock::new(index);
self.visit_basic_block_data(block, &$($mutability)* mir[block]);
// for best performance, we want to use an iterator rather
// than a for-loop, to avoid calling Mir::invalidate for
// each basic block.
macro_rules! basic_blocks {
(mut) => (mir.basic_blocks_mut().iter_enumerated_mut());
() => (mir.basic_blocks().iter_enumerated());
};
for (bb, data) in basic_blocks!($($mutability)*) {
self.visit_basic_block_data(bb, data);
}
for scope in &$($mutability)* mir.visibility_scopes {
@@ -337,6 +342,13 @@ fn super_statement(&mut self,
self.visit_assign(block, lvalue, rvalue, location);
}
StatementKind::EndRegion(_) => {}
StatementKind::Validate(_, ref $($mutability)* lvalues) => {
for operand in lvalues {
self.visit_lvalue(& $($mutability)* operand.lval,
LvalueContext::Validate, location);
self.visit_ty(& $($mutability)* operand.ty, Lookup::Loc(location));
}
}
StatementKind::SetDiscriminant{ ref $($mutability)* lvalue, .. } => {
self.visit_lvalue(lvalue, LvalueContext::Store, location);
}
@@ -807,6 +819,9 @@ pub enum LvalueContext<'tcx> {
// Starting and ending a storage live range
StorageLive,
StorageDead,
// Validation command
Validate,
}
impl<'tcx> LvalueContext<'tcx> {
@@ -853,7 +868,8 @@ pub fn is_mutating_use(&self) -> bool {
LvalueContext::Borrow { kind: BorrowKind::Shared, .. } |
LvalueContext::Borrow { kind: BorrowKind::Unique, .. } |
LvalueContext::Projection(Mutability::Not) | LvalueContext::Consume |
LvalueContext::StorageLive | LvalueContext::StorageDead => false,
LvalueContext::StorageLive | LvalueContext::StorageDead |
LvalueContext::Validate => false,
}
}
@@ -865,7 +881,8 @@ pub fn is_nonmutating_use(&self) -> bool {
LvalueContext::Projection(Mutability::Not) | LvalueContext::Consume => true,
LvalueContext::Borrow { kind: BorrowKind::Mut, .. } | LvalueContext::Store |
LvalueContext::Call | LvalueContext::Projection(Mutability::Mut) |
LvalueContext::Drop | LvalueContext::StorageLive | LvalueContext::StorageDead => false,
LvalueContext::Drop | LvalueContext::StorageLive | LvalueContext::StorageDead |
LvalueContext::Validate => false,
}
}
+22
View File
@@ -1025,6 +1025,9 @@ fn parse_optimization_fuel(slot: &mut Option<(String, u64)>, v: Option<&str>) ->
"the directory the MIR is dumped into"),
dump_mir_exclude_pass_number: bool = (false, parse_bool, [UNTRACKED],
"if set, exclude the pass number when dumping MIR (used in tests)"),
mir_emit_validate: usize = (0, parse_uint, [TRACKED],
"emit Validate MIR statements, interpreted e.g. by miri (0: do not emit; 1: if function \
contains unsafe block, only validate arguments; 2: always emit full validation)"),
perf_stats: bool = (false, parse_bool, [UNTRACKED],
"print some performance-related statistics"),
hir_stats: bool = (false, parse_bool, [UNTRACKED],
@@ -1059,6 +1062,8 @@ fn parse_optimization_fuel(slot: &mut Option<(String, u64)>, v: Option<&str>) ->
"choose which RELRO level to use"),
nll: bool = (false, parse_bool, [UNTRACKED],
"run the non-lexical lifetimes MIR pass"),
trans_time_graph: bool = (false, parse_bool, [UNTRACKED],
"generate a graphical HTML report of time spent in trans and LLVM"),
}
pub fn default_lib_output() -> CrateType {
@@ -1498,6 +1503,23 @@ pub fn build_session_options_and_crate_config(matches: &getopts::Matches)
early_error(error_format, "Value for codegen units must be a positive nonzero integer");
}
// It's possible that we have `codegen_units > 1` but only one item in
// `trans.modules`. We could theoretically proceed and do LTO in that
// case, but it would be confusing to have the validity of
// `-Z lto -C codegen-units=2` depend on details of the crate being
// compiled, so we complain regardless.
if cg.lto && cg.codegen_units > 1 {
// This case is impossible to handle because LTO expects to be able
// to combine the entire crate and all its dependencies into a
// single compilation unit, but each codegen unit is in a separate
// LLVM context, so they can't easily be combined.
early_error(error_format, "can't perform LTO when using multiple codegen units");
}
if cg.lto && debugging_opts.incremental.is_some() {
early_error(error_format, "can't perform LTO when compiling incrementally");
}
let mut prints = Vec::<PrintRequest>::new();
if cg.target_cpu.as_ref().map_or(false, |s| s == "help") {
prints.push(PrintRequest::TargetCPUs);
+21 -36
View File
@@ -463,13 +463,19 @@ fn opt_normalize_projection_type<'a, 'b, 'gcx, 'tcx>(
selcx.infcx().report_overflow_error(&obligation, false);
}
Err(ProjectionCacheEntry::NormalizedTy(ty)) => {
// If we find the value in the cache, then the obligations
// have already been returned from the previous entry (and
// should therefore have been honored).
// If we find the value in the cache, then return it along
// with the obligations that went along with it. Note
// that, when using a fulfillment context, these
// obligations could in principle be ignored: they have
// already been registered when the cache entry was
// created (and hence the new ones will quickly be
// discarded as duplicated). But when doing trait
// evaluation this is not the case, and dropping the trait
// evaluations can causes ICEs (e.g. #43132).
debug!("opt_normalize_projection_type: \
found normalized ty `{:?}`",
ty);
return Some(NormalizedTy { value: ty, obligations: vec![] });
return Some(ty);
}
Err(ProjectionCacheEntry::Error) => {
debug!("opt_normalize_projection_type: \
@@ -480,9 +486,7 @@ fn opt_normalize_projection_type<'a, 'b, 'gcx, 'tcx>(
let obligation = Obligation::with_depth(cause.clone(), depth, param_env, projection_ty);
match project_type(selcx, &obligation) {
Ok(ProjectedTy::Progress(Progress { ty: projected_ty,
mut obligations,
cacheable })) => {
Ok(ProjectedTy::Progress(Progress { ty: projected_ty, mut obligations })) => {
// if projection succeeded, then what we get out of this
// is also non-normalized (consider: it was derived from
// an impl, where-clause etc) and hence we must
@@ -491,12 +495,10 @@ fn opt_normalize_projection_type<'a, 'b, 'gcx, 'tcx>(
debug!("opt_normalize_projection_type: \
projected_ty={:?} \
depth={} \
obligations={:?} \
cacheable={:?}",
obligations={:?}",
projected_ty,
depth,
obligations,
cacheable);
obligations);
let result = if projected_ty.has_projection_types() {
let mut normalizer = AssociatedTypeNormalizer::new(selcx,
@@ -521,8 +523,7 @@ fn opt_normalize_projection_type<'a, 'b, 'gcx, 'tcx>(
obligations,
}
};
infcx.projection_cache.borrow_mut()
.complete(projection_ty, &result, cacheable);
infcx.projection_cache.borrow_mut().complete(projection_ty, &result);
Some(result)
}
Ok(ProjectedTy::NoProgress(projected_ty)) => {
@@ -533,8 +534,7 @@ fn opt_normalize_projection_type<'a, 'b, 'gcx, 'tcx>(
value: projected_ty,
obligations: vec![]
};
infcx.projection_cache.borrow_mut()
.complete(projection_ty, &result, true);
infcx.projection_cache.borrow_mut().complete(projection_ty, &result);
Some(result)
}
Err(ProjectionTyError::TooManyCandidates) => {
@@ -607,7 +607,6 @@ enum ProjectedTy<'tcx> {
struct Progress<'tcx> {
ty: Ty<'tcx>,
obligations: Vec<PredicateObligation<'tcx>>,
cacheable: bool,
}
impl<'tcx> Progress<'tcx> {
@@ -615,7 +614,6 @@ fn error<'a,'gcx>(tcx: TyCtxt<'a,'gcx,'tcx>) -> Self {
Progress {
ty: tcx.types.err,
obligations: vec![],
cacheable: true
}
}
@@ -1286,7 +1284,6 @@ fn confirm_param_env_candidate<'cx, 'gcx, 'tcx>(
Progress {
ty: ty_match.value,
obligations,
cacheable: ty_match.unconstrained_regions.is_empty(),
}
}
Err(e) => {
@@ -1330,7 +1327,6 @@ fn confirm_impl_candidate<'cx, 'gcx, 'tcx>(
Progress {
ty: ty.subst(tcx, substs),
obligations: nested,
cacheable: true
}
}
@@ -1394,7 +1390,7 @@ enum ProjectionCacheEntry<'tcx> {
InProgress,
Ambiguous,
Error,
NormalizedTy(Ty<'tcx>),
NormalizedTy(NormalizedTy<'tcx>),
}
// NB: intentionally not Clone
@@ -1438,22 +1434,11 @@ fn try_start(&mut self, key: ty::ProjectionTy<'tcx>)
Ok(())
}
/// Indicates that `key` was normalized to `value`. If `cacheable` is false,
/// then this result is sadly not cacheable.
fn complete(&mut self,
key: ty::ProjectionTy<'tcx>,
value: &NormalizedTy<'tcx>,
cacheable: bool) {
let fresh_key = if cacheable {
debug!("ProjectionCacheEntry::complete: adding cache entry: key={:?}, value={:?}",
key, value);
self.map.insert(key, ProjectionCacheEntry::NormalizedTy(value.value))
} else {
debug!("ProjectionCacheEntry::complete: cannot cache: key={:?}, value={:?}",
key, value);
!self.map.remove(key)
};
/// Indicates that `key` was normalized to `value`.
fn complete(&mut self, key: ty::ProjectionTy<'tcx>, value: &NormalizedTy<'tcx>) {
debug!("ProjectionCacheEntry::complete: adding cache entry: key={:?}, value={:?}",
key, value);
let fresh_key = self.map.insert(key, ProjectionCacheEntry::NormalizedTy(value.clone()));
assert!(!fresh_key, "never started projecting `{:?}`", key);
}
+35
View File
@@ -0,0 +1,35 @@
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use hir::BindingAnnotation::*;
use hir::BindingAnnotation;
use hir::Mutability;
#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)]
pub enum BindingMode {
BindByReference(Mutability),
BindByValue(Mutability),
}
impl BindingMode {
pub fn convert(ba: BindingAnnotation) -> BindingMode {
match ba {
Unannotated => BindingMode::BindByValue(Mutability::MutImmutable),
Mutable => BindingMode::BindByValue(Mutability::MutMutable),
Ref => BindingMode::BindByReference(Mutability::MutImmutable),
RefMut => BindingMode::BindByReference(Mutability::MutMutable),
}
}
}
impl_stable_hash_for!(enum self::BindingMode {
BindByReference(mutability),
BindByValue(mutability)
});
+5
View File
@@ -40,6 +40,7 @@
use ty::inhabitedness::DefIdForest;
use ty::maps;
use ty::steal::Steal;
use ty::BindingMode;
use util::nodemap::{NodeMap, NodeSet, DefIdSet};
use util::nodemap::{FxHashMap, FxHashSet};
use rustc_data_structures::accumulate_vec::AccumulateVec;
@@ -223,6 +224,9 @@ pub struct TypeckTables<'tcx> {
pub adjustments: NodeMap<Vec<ty::adjustment::Adjustment<'tcx>>>,
// Stores the actual binding mode for all instances of hir::BindingAnnotation.
pub pat_binding_modes: NodeMap<BindingMode>,
/// Borrows
pub upvar_capture_map: ty::UpvarCaptureMap<'tcx>,
@@ -278,6 +282,7 @@ pub fn empty() -> TypeckTables<'tcx> {
node_types: FxHashMap(),
node_substs: NodeMap(),
adjustments: NodeMap(),
pat_binding_modes: NodeMap(),
upvar_capture_map: FxHashMap(),
generator_sigs: NodeMap(),
generator_interiors: NodeMap(),
+7 -7
View File
@@ -39,8 +39,8 @@ pub enum TypeError<'tcx> {
RegionsDoesNotOutlive(Region<'tcx>, Region<'tcx>),
RegionsNotSame(Region<'tcx>, Region<'tcx>),
RegionsNoOverlap(Region<'tcx>, Region<'tcx>),
RegionsInsufficientlyPolymorphic(BoundRegion, Region<'tcx>, Option<Box<ty::Issue32330>>),
RegionsOverlyPolymorphic(BoundRegion, Region<'tcx>, Option<Box<ty::Issue32330>>),
RegionsInsufficientlyPolymorphic(BoundRegion, Region<'tcx>),
RegionsOverlyPolymorphic(BoundRegion, Region<'tcx>),
Sorts(ExpectedFound<Ty<'tcx>>),
IntMismatch(ExpectedFound<ty::IntVarValue>),
FloatMismatch(ExpectedFound<ast::FloatTy>),
@@ -116,13 +116,13 @@ fn report_maybe_different(f: &mut fmt::Formatter,
RegionsNoOverlap(..) => {
write!(f, "lifetimes do not intersect")
}
RegionsInsufficientlyPolymorphic(br, _, _) => {
RegionsInsufficientlyPolymorphic(br, _) => {
write!(f,
"expected bound lifetime parameter{}{}, found concrete lifetime",
if br.is_named() { " " } else { "" },
br)
}
RegionsOverlyPolymorphic(br, _, _) => {
RegionsOverlyPolymorphic(br, _) => {
write!(f,
"expected concrete lifetime, found bound lifetime parameter{}{}",
if br.is_named() { " " } else { "" },
@@ -258,15 +258,15 @@ pub fn note_and_explain_type_err(self,
self.note_and_explain_region(db, "...does not overlap ",
region2, "");
}
RegionsInsufficientlyPolymorphic(_, conc_region, _) => {
RegionsInsufficientlyPolymorphic(_, conc_region) => {
self.note_and_explain_region(db, "concrete lifetime that was found is ",
conc_region, "");
}
RegionsOverlyPolymorphic(_, &ty::ReVar(_), _) => {
RegionsOverlyPolymorphic(_, &ty::ReVar(_)) => {
// don't bother to print out the message below for
// inference variables, it's not very illuminating.
}
RegionsOverlyPolymorphic(_, conc_region, _) => {
RegionsOverlyPolymorphic(_, conc_region) => {
self.note_and_explain_region(db, "expected concrete lifetime is ",
conc_region, "");
}
+14 -14
View File
@@ -581,14 +581,14 @@ pub struct Struct {
pub min_size: Size,
}
// Info required to optimize struct layout.
/// Info required to optimize struct layout.
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug)]
enum StructKind {
// A tuple, closure, or univariant which cannot be coerced to unsized.
/// A tuple, closure, or univariant which cannot be coerced to unsized.
AlwaysSizedUnivariant,
// A univariant, the last field of which may be coerced to unsized.
/// A univariant, the last field of which may be coerced to unsized.
MaybeUnsizedUnivariant,
// A univariant, but part of an enum.
/// A univariant, but part of an enum.
EnumVariant,
}
@@ -1020,7 +1020,7 @@ pub enum Layout {
/// TyRawPtr or TyRef with a !Sized pointee.
FatPointer {
metadata: Primitive,
// If true, the pointer cannot be null.
/// If true, the pointer cannot be null.
non_zero: bool
},
@@ -1031,8 +1031,8 @@ pub enum Layout {
discr: Integer,
signed: bool,
non_zero: bool,
// Inclusive discriminant range.
// If min > max, it represents min...u64::MAX followed by 0...max.
/// Inclusive discriminant range.
/// If min > max, it represents min...u64::MAX followed by 0...max.
// FIXME(eddyb) always use the shortest range, e.g. by finding
// the largest space between two consecutive discriminants and
// taking everything else as the (shortest) discriminant range.
@@ -1043,7 +1043,7 @@ pub enum Layout {
/// Single-case enums, and structs/tuples.
Univariant {
variant: Struct,
// If true, the structure is NonZero.
/// If true, the structure is NonZero.
// FIXME(eddyb) use a newtype Layout kind for this.
non_zero: bool
},
@@ -1084,9 +1084,9 @@ pub enum Layout {
StructWrappedNullablePointer {
nndiscr: u64,
nonnull: Struct,
// N.B. There is a 0 at the start, for LLVM GEP through a pointer.
/// N.B. There is a 0 at the start, for LLVM GEP through a pointer.
discrfield: FieldPath,
// Like discrfield, but in source order. For debuginfo.
/// Like discrfield, but in source order. For debuginfo.
discrfield_source: FieldPath
}
}
@@ -1954,11 +1954,11 @@ pub enum SizeSkeleton<'tcx> {
/// A potentially-fat pointer.
Pointer {
// If true, this pointer is never null.
/// If true, this pointer is never null.
non_zero: bool,
// The type which determines the unsized metadata, if any,
// of this pointer. Either a type parameter or a projection
// depending on one, with regions erased.
/// The type which determines the unsized metadata, if any,
/// of this pointer. Either a type parameter or a projection
/// depending on one, with regions erased.
tail: Ty<'tcx>
}
}
+18 -25
View File
@@ -9,7 +9,7 @@
// except according to those terms.
use dep_graph::{DepConstructor, DepNode, DepNodeIndex};
use hir::def_id::{CrateNum, CRATE_DEF_INDEX, DefId, LOCAL_CRATE};
use hir::def_id::{CrateNum, DefId, LOCAL_CRATE};
use hir::def::Def;
use hir;
use middle::const_val;
@@ -942,7 +942,7 @@ fn default() -> Self {
/// Checks all types in the krate for overlap in their inherent impls. Reports errors.
/// Not meant to be used directly outside of coherence.
/// (Defined only for LOCAL_CRATE)
[] crate_inherent_impls_overlap_check: crate_inherent_impls_dep_node(CrateNum) -> (),
[] crate_inherent_impls_overlap_check: inherent_impls_overlap_check_dep_node(CrateNum) -> (),
/// Results of evaluating const items or constants embedded in
/// other items (such as enum variant explicit discriminants).
@@ -1025,6 +1025,10 @@ fn crate_inherent_impls_dep_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> {
DepConstructor::Coherence
}
fn inherent_impls_overlap_check_dep_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> {
DepConstructor::CoherenceInherentImplOverlapCheck
}
fn reachability_dep_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> {
DepConstructor::Reachability
}
@@ -1043,10 +1047,9 @@ fn typeck_item_bodies_dep_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> {
DepConstructor::TypeckBodiesKrate
}
fn const_eval_dep_node<'tcx>(key: ty::ParamEnvAnd<'tcx, (DefId, &'tcx Substs<'tcx>)>)
fn const_eval_dep_node<'tcx>(_: ty::ParamEnvAnd<'tcx, (DefId, &'tcx Substs<'tcx>)>)
-> DepConstructor<'tcx> {
let (def_id, substs) = key.value;
DepConstructor::ConstEval { def_id, substs }
DepConstructor::ConstEval
}
fn mir_keys<'tcx>(_: CrateNum) -> DepConstructor<'tcx> {
@@ -1061,32 +1064,22 @@ fn relevant_trait_impls_for<'tcx>((def_id, t): (DefId, SimplifiedType)) -> DepCo
DepConstructor::RelevantTraitImpls(def_id, t)
}
fn is_copy_dep_node<'tcx>(key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> {
let def_id = ty::item_path::characteristic_def_id_of_type(key.value)
.unwrap_or(DefId::local(CRATE_DEF_INDEX));
DepConstructor::IsCopy(def_id)
fn is_copy_dep_node<'tcx>(_: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> {
DepConstructor::IsCopy
}
fn is_sized_dep_node<'tcx>(key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> {
let def_id = ty::item_path::characteristic_def_id_of_type(key.value)
.unwrap_or(DefId::local(CRATE_DEF_INDEX));
DepConstructor::IsSized(def_id)
fn is_sized_dep_node<'tcx>(_: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> {
DepConstructor::IsSized
}
fn is_freeze_dep_node<'tcx>(key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> {
let def_id = ty::item_path::characteristic_def_id_of_type(key.value)
.unwrap_or(DefId::local(CRATE_DEF_INDEX));
DepConstructor::IsFreeze(def_id)
fn is_freeze_dep_node<'tcx>(_: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> {
DepConstructor::IsFreeze
}
fn needs_drop_dep_node<'tcx>(key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> {
let def_id = ty::item_path::characteristic_def_id_of_type(key.value)
.unwrap_or(DefId::local(CRATE_DEF_INDEX));
DepConstructor::NeedsDrop(def_id)
fn needs_drop_dep_node<'tcx>(_: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> {
DepConstructor::NeedsDrop
}
fn layout_dep_node<'tcx>(key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> {
let def_id = ty::item_path::characteristic_def_id_of_type(key.value)
.unwrap_or(DefId::local(CRATE_DEF_INDEX));
DepConstructor::Layout(def_id)
fn layout_dep_node<'tcx>(_: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> {
DepConstructor::Layout
}
+6 -4
View File
@@ -68,13 +68,15 @@
pub use self::sty::{ExistentialProjection, PolyExistentialProjection};
pub use self::sty::{BoundRegion, EarlyBoundRegion, FreeRegion, Region};
pub use self::sty::RegionKind;
pub use self::sty::Issue32330;
pub use self::sty::{TyVid, IntVid, FloatVid, RegionVid, SkolemizedRegionVid};
pub use self::sty::BoundRegion::*;
pub use self::sty::InferTy::*;
pub use self::sty::RegionKind::*;
pub use self::sty::TypeVariants::*;
pub use self::binding::BindingMode;
pub use self::binding::BindingMode::*;
pub use self::context::{TyCtxt, GlobalArenas, tls};
pub use self::context::{Lift, TypeckTables};
@@ -85,6 +87,7 @@
pub use self::maps::queries;
pub mod adjustment;
pub mod binding;
pub mod cast;
pub mod error;
pub mod fast_reject;
@@ -158,7 +161,7 @@ pub struct ImplHeader<'tcx> {
pub predicates: Vec<Predicate<'tcx>>,
}
#[derive(Copy, Clone, Debug)]
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub struct AssociatedItem {
pub def_id: DefId,
pub name: Name,
@@ -172,7 +175,7 @@ pub struct AssociatedItem {
pub method_has_self_argument: bool,
}
#[derive(Copy, Clone, PartialEq, Eq, Debug, RustcEncodable, RustcDecodable)]
#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, RustcEncodable, RustcDecodable)]
pub enum AssociatedKind {
Const,
Method,
@@ -679,7 +682,6 @@ pub struct RegionParameterDef {
pub name: Name,
pub def_id: DefId,
pub index: u32,
pub issue_32330: Option<ty::Issue32330>,
/// `pure_wrt_drop`, set by the (unsafe) `#[may_dangle]` attribute
/// on generic parameter `'a`, asserts data of lifetime `'a`
+10 -14
View File
@@ -377,13 +377,11 @@ fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option<Self::Lif
RegionsNoOverlap(a, b) => {
return tcx.lift(&(a, b)).map(|(a, b)| RegionsNoOverlap(a, b))
}
RegionsInsufficientlyPolymorphic(a, b, ref c) => {
let c = c.clone();
return tcx.lift(&b).map(|b| RegionsInsufficientlyPolymorphic(a, b, c))
RegionsInsufficientlyPolymorphic(a, b) => {
return tcx.lift(&b).map(|b| RegionsInsufficientlyPolymorphic(a, b))
}
RegionsOverlyPolymorphic(a, b, ref c) => {
let c = c.clone();
return tcx.lift(&b).map(|b| RegionsOverlyPolymorphic(a, b, c))
RegionsOverlyPolymorphic(a, b) => {
return tcx.lift(&b).map(|b| RegionsOverlyPolymorphic(a, b))
}
IntMismatch(x) => IntMismatch(x),
FloatMismatch(x) => FloatMismatch(x),
@@ -1065,13 +1063,11 @@ fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F)
RegionsNoOverlap(a, b) => {
RegionsNoOverlap(a.fold_with(folder), b.fold_with(folder))
},
RegionsInsufficientlyPolymorphic(a, b, ref c) => {
let c = c.clone();
RegionsInsufficientlyPolymorphic(a, b.fold_with(folder), c)
RegionsInsufficientlyPolymorphic(a, b) => {
RegionsInsufficientlyPolymorphic(a, b.fold_with(folder))
},
RegionsOverlyPolymorphic(a, b, ref c) => {
let c = c.clone();
RegionsOverlyPolymorphic(a, b.fold_with(folder), c)
RegionsOverlyPolymorphic(a, b) => {
RegionsOverlyPolymorphic(a, b.fold_with(folder))
},
IntMismatch(x) => IntMismatch(x),
FloatMismatch(x) => FloatMismatch(x),
@@ -1097,8 +1093,8 @@ fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
RegionsNoOverlap(a, b) => {
a.visit_with(visitor) || b.visit_with(visitor)
},
RegionsInsufficientlyPolymorphic(_, b, _) |
RegionsOverlyPolymorphic(_, b, _) => {
RegionsInsufficientlyPolymorphic(_, b) |
RegionsOverlyPolymorphic(_, b) => {
b.visit_with(visitor)
},
Sorts(x) => x.visit_with(visitor),
-14
View File
@@ -77,20 +77,6 @@ pub fn is_named(&self) -> bool {
}
}
/// When a region changed from late-bound to early-bound when #32330
/// was fixed, its `RegionParameterDef` will have one of these
/// structures that we can use to give nicer errors.
#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash,
RustcEncodable, RustcDecodable)]
pub struct Issue32330 {
/// fn where is region declared
pub fn_def_id: DefId,
/// name of region; duplicates the info in BrNamed but convenient
/// to have it here, and this code is only temporary
pub region_name: ast::Name,
}
/// NB: If you change this, you'll probably want to change the corresponding
/// AST structure in libsyntax/ast.rs as well.
#[derive(Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)]
+27 -5
View File
@@ -57,6 +57,32 @@ pub fn time<T, F>(do_it: bool, what: &str, f: F) -> T where
let rv = f();
let dur = start.elapsed();
print_time_passes_entry_internal(what, dur);
TIME_DEPTH.with(|slot| slot.set(old));
rv
}
pub fn print_time_passes_entry(do_it: bool, what: &str, dur: Duration) {
if !do_it {
return
}
let old = TIME_DEPTH.with(|slot| {
let r = slot.get();
slot.set(r + 1);
r
});
print_time_passes_entry_internal(what, dur);
TIME_DEPTH.with(|slot| slot.set(old));
}
fn print_time_passes_entry_internal(what: &str, dur: Duration) {
let indentation = TIME_DEPTH.with(|slot| slot.get());
let mem_string = match get_resident() {
Some(n) => {
let mb = n as f64 / 1_000_000.0;
@@ -65,14 +91,10 @@ pub fn time<T, F>(do_it: bool, what: &str, f: F) -> T where
None => "".to_owned(),
};
println!("{}time: {}{}\t{}",
repeat(" ").take(old).collect::<String>(),
repeat(" ").take(indentation).collect::<String>(),
duration_to_secs_str(dur),
mem_string,
what);
TIME_DEPTH.with(|slot| slot.set(old));
rv
}
// Hack up our own formatting for the duration to make it easier for scripts
+11
View File
@@ -0,0 +1,11 @@
[package]
authors = ["The Rust Project Developers"]
name = "rustc_apfloat"
version = "0.0.0"
[lib]
name = "rustc_apfloat"
path = "lib.rs"
[dependencies]
rustc_bitflags = { path = "../librustc_bitflags" }
+2733
View File
@@ -0,0 +1,2733 @@
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use {Category, ExpInt, IEK_INF, IEK_NAN, IEK_ZERO};
use {Float, FloatConvert, ParseError, Round, Status, StatusAnd};
use std::cmp::{self, Ordering};
use std::convert::TryFrom;
use std::fmt::{self, Write};
use std::marker::PhantomData;
use std::mem;
use std::ops::Neg;
#[must_use]
pub struct IeeeFloat<S> {
/// Absolute significand value (including the integer bit).
sig: [Limb; 1],
/// The signed unbiased exponent of the value.
exp: ExpInt,
/// What kind of floating point number this is.
category: Category,
/// Sign bit of the number.
sign: bool,
marker: PhantomData<S>,
}
/// Fundamental unit of big integer arithmetic, but also
/// large to store the largest significands by itself.
type Limb = u128;
const LIMB_BITS: usize = 128;
fn limbs_for_bits(bits: usize) -> usize {
(bits + LIMB_BITS - 1) / LIMB_BITS
}
/// Enum that represents what fraction of the LSB truncated bits of an fp number
/// represent.
///
/// This essentially combines the roles of guard and sticky bits.
#[must_use]
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
enum Loss {
// Example of truncated bits:
ExactlyZero, // 000000
LessThanHalf, // 0xxxxx x's not all zero
ExactlyHalf, // 100000
MoreThanHalf, // 1xxxxx x's not all zero
}
/// Represents floating point arithmetic semantics.
pub trait Semantics: Sized {
/// Total number of bits in the in-memory format.
const BITS: usize;
/// Number of bits in the significand. This includes the integer bit.
const PRECISION: usize;
/// The largest E such that 2^E is representable; this matches the
/// definition of IEEE 754.
const MAX_EXP: ExpInt;
/// The smallest E such that 2^E is a normalized number; this
/// matches the definition of IEEE 754.
const MIN_EXP: ExpInt = -Self::MAX_EXP + 1;
/// The significand bit that marks NaN as quiet.
const QNAN_BIT: usize = Self::PRECISION - 2;
/// The significand bitpattern to mark a NaN as quiet.
/// NOTE: for X87DoubleExtended we need to set two bits instead of 2.
const QNAN_SIGNIFICAND: Limb = 1 << Self::QNAN_BIT;
fn from_bits(bits: u128) -> IeeeFloat<Self> {
assert!(Self::BITS > Self::PRECISION);
let sign = bits & (1 << (Self::BITS - 1));
let exponent = (bits & !sign) >> (Self::PRECISION - 1);
let mut r = IeeeFloat {
sig: [bits & ((1 << (Self::PRECISION - 1)) - 1)],
// Convert the exponent from its bias representation to a signed integer.
exp: (exponent as ExpInt) - Self::MAX_EXP,
category: Category::Zero,
sign: sign != 0,
marker: PhantomData,
};
if r.exp == Self::MIN_EXP - 1 && r.sig == [0] {
// Exponent, significand meaningless.
r.category = Category::Zero;
} else if r.exp == Self::MAX_EXP + 1 && r.sig == [0] {
// Exponent, significand meaningless.
r.category = Category::Infinity;
} else if r.exp == Self::MAX_EXP + 1 && r.sig != [0] {
// Sign, exponent, significand meaningless.
r.category = Category::NaN;
} else {
r.category = Category::Normal;
if r.exp == Self::MIN_EXP - 1 {
// Denormal.
r.exp = Self::MIN_EXP;
} else {
// Set integer bit.
sig::set_bit(&mut r.sig, Self::PRECISION - 1);
}
}
r
}
fn to_bits(x: IeeeFloat<Self>) -> u128 {
assert!(Self::BITS > Self::PRECISION);
// Split integer bit from significand.
let integer_bit = sig::get_bit(&x.sig, Self::PRECISION - 1);
let mut significand = x.sig[0] & ((1 << (Self::PRECISION - 1)) - 1);
let exponent = match x.category {
Category::Normal => {
if x.exp == Self::MIN_EXP && !integer_bit {
// Denormal.
Self::MIN_EXP - 1
} else {
x.exp
}
}
Category::Zero => {
// FIXME(eddyb) Maybe we should guarantee an invariant instead?
significand = 0;
Self::MIN_EXP - 1
}
Category::Infinity => {
// FIXME(eddyb) Maybe we should guarantee an invariant instead?
significand = 0;
Self::MAX_EXP + 1
}
Category::NaN => Self::MAX_EXP + 1,
};
// Convert the exponent from a signed integer to its bias representation.
let exponent = (exponent + Self::MAX_EXP) as u128;
((x.sign as u128) << (Self::BITS - 1)) | (exponent << (Self::PRECISION - 1)) | significand
}
}
impl<S> Copy for IeeeFloat<S> {}
impl<S> Clone for IeeeFloat<S> {
fn clone(&self) -> Self {
*self
}
}
macro_rules! ieee_semantics {
($($name:ident = $sem:ident($bits:tt : $exp_bits:tt)),*) => {
$(pub struct $sem;)*
$(pub type $name = IeeeFloat<$sem>;)*
$(impl Semantics for $sem {
const BITS: usize = $bits;
const PRECISION: usize = ($bits - 1 - $exp_bits) + 1;
const MAX_EXP: ExpInt = (1 << ($exp_bits - 1)) - 1;
})*
}
}
ieee_semantics! {
Half = HalfS(16:5),
Single = SingleS(32:8),
Double = DoubleS(64:11),
Quad = QuadS(128:15)
}
pub struct X87DoubleExtendedS;
pub type X87DoubleExtended = IeeeFloat<X87DoubleExtendedS>;
impl Semantics for X87DoubleExtendedS {
const BITS: usize = 80;
const PRECISION: usize = 64;
const MAX_EXP: ExpInt = (1 << (15 - 1)) - 1;
/// For x87 extended precision, we want to make a NaN, not a
/// pseudo-NaN. Maybe we should expose the ability to make
/// pseudo-NaNs?
const QNAN_SIGNIFICAND: Limb = 0b11 << Self::QNAN_BIT;
/// Integer bit is explicit in this format. Intel hardware (387 and later)
/// does not support these bit patterns:
/// exponent = all 1's, integer bit 0, significand 0 ("pseudoinfinity")
/// exponent = all 1's, integer bit 0, significand nonzero ("pseudoNaN")
/// exponent = 0, integer bit 1 ("pseudodenormal")
/// exponent!=0 nor all 1's, integer bit 0 ("unnormal")
/// At the moment, the first two are treated as NaNs, the second two as Normal.
fn from_bits(bits: u128) -> IeeeFloat<Self> {
let sign = bits & (1 << (Self::BITS - 1));
let exponent = (bits & !sign) >> Self::PRECISION;
let mut r = IeeeFloat {
sig: [bits & ((1 << (Self::PRECISION - 1)) - 1)],
// Convert the exponent from its bias representation to a signed integer.
exp: (exponent as ExpInt) - Self::MAX_EXP,
category: Category::Zero,
sign: sign != 0,
marker: PhantomData,
};
if r.exp == Self::MIN_EXP - 1 && r.sig == [0] {
// Exponent, significand meaningless.
r.category = Category::Zero;
} else if r.exp == Self::MAX_EXP + 1 && r.sig == [1 << (Self::PRECISION - 1)] {
// Exponent, significand meaningless.
r.category = Category::Infinity;
} else if r.exp == Self::MAX_EXP + 1 && r.sig != [1 << (Self::PRECISION - 1)] {
// Sign, exponent, significand meaningless.
r.category = Category::NaN;
} else {
r.category = Category::Normal;
if r.exp == Self::MIN_EXP - 1 {
// Denormal.
r.exp = Self::MIN_EXP;
}
}
r
}
fn to_bits(x: IeeeFloat<Self>) -> u128 {
// Get integer bit from significand.
let integer_bit = sig::get_bit(&x.sig, Self::PRECISION - 1);
let mut significand = x.sig[0] & ((1 << Self::PRECISION) - 1);
let exponent = match x.category {
Category::Normal => {
if x.exp == Self::MIN_EXP && !integer_bit {
// Denormal.
Self::MIN_EXP - 1
} else {
x.exp
}
}
Category::Zero => {
// FIXME(eddyb) Maybe we should guarantee an invariant instead?
significand = 0;
Self::MIN_EXP - 1
}
Category::Infinity => {
// FIXME(eddyb) Maybe we should guarantee an invariant instead?
significand = 1 << (Self::PRECISION - 1);
Self::MAX_EXP + 1
}
Category::NaN => Self::MAX_EXP + 1,
};
// Convert the exponent from a signed integer to its bias representation.
let exponent = (exponent + Self::MAX_EXP) as u128;
((x.sign as u128) << (Self::BITS - 1)) | (exponent << Self::PRECISION) | significand
}
}
float_common_impls!(IeeeFloat<S>);
impl<S: Semantics> PartialEq for IeeeFloat<S> {
fn eq(&self, rhs: &Self) -> bool {
self.partial_cmp(rhs) == Some(Ordering::Equal)
}
}
impl<S: Semantics> PartialOrd for IeeeFloat<S> {
fn partial_cmp(&self, rhs: &Self) -> Option<Ordering> {
match (self.category, rhs.category) {
(Category::NaN, _) |
(_, Category::NaN) => None,
(Category::Infinity, Category::Infinity) => Some((!self.sign).cmp(&(!rhs.sign))),
(Category::Zero, Category::Zero) => Some(Ordering::Equal),
(Category::Infinity, _) |
(Category::Normal, Category::Zero) => Some((!self.sign).cmp(&self.sign)),
(_, Category::Infinity) |
(Category::Zero, Category::Normal) => Some(rhs.sign.cmp(&(!rhs.sign))),
(Category::Normal, Category::Normal) => {
// Two normal numbers. Do they have the same sign?
Some((!self.sign).cmp(&(!rhs.sign)).then_with(|| {
// Compare absolute values; invert result if negative.
let result = self.cmp_abs_normal(*rhs);
if self.sign { result.reverse() } else { result }
}))
}
}
}
}
impl<S> Neg for IeeeFloat<S> {
type Output = Self;
fn neg(mut self) -> Self {
self.sign = !self.sign;
self
}
}
/// Prints this value as a decimal string.
///
/// \param precision The maximum number of digits of
/// precision to output. If there are fewer digits available,
/// zero padding will not be used unless the value is
/// integral and small enough to be expressed in
/// precision digits. 0 means to use the natural
/// precision of the number.
/// \param width The maximum number of zeros to
/// consider inserting before falling back to scientific
/// notation. 0 means to always use scientific notation.
///
/// \param alternate Indicate whether to remove the trailing zero in
/// fraction part or not. Also setting this parameter to true forces
/// producing of output more similar to default printf behavior.
/// Specifically the lower e is used as exponent delimiter and exponent
/// always contains no less than two digits.
///
/// Number precision width Result
/// ------ --------- ----- ------
/// 1.01E+4 5 2 10100
/// 1.01E+4 4 2 1.01E+4
/// 1.01E+4 5 1 1.01E+4
/// 1.01E-2 5 2 0.0101
/// 1.01E-2 4 2 0.0101
/// 1.01E-2 4 1 1.01E-2
impl<S: Semantics> fmt::Display for IeeeFloat<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let width = f.width().unwrap_or(3);
let alternate = f.alternate();
match self.category {
Category::Infinity => {
if self.sign {
return f.write_str("-Inf");
} else {
return f.write_str("+Inf");
}
}
Category::NaN => return f.write_str("NaN"),
Category::Zero => {
if self.sign {
f.write_char('-')?;
}
if width == 0 {
if alternate {
f.write_str("0.0")?;
if let Some(n) = f.precision() {
for _ in 1..n {
f.write_char('0')?;
}
}
f.write_str("e+00")?;
} else {
f.write_str("0.0E+0")?;
}
} else {
f.write_char('0')?;
}
return Ok(());
}
Category::Normal => {}
}
if self.sign {
f.write_char('-')?;
}
// We use enough digits so the number can be round-tripped back to an
// APFloat. The formula comes from "How to Print Floating-Point Numbers
// Accurately" by Steele and White.
// FIXME: Using a formula based purely on the precision is conservative;
// we can print fewer digits depending on the actual value being printed.
// precision = 2 + floor(S::PRECISION / lg_2(10))
let precision = f.precision().unwrap_or(2 + S::PRECISION * 59 / 196);
// Decompose the number into an APInt and an exponent.
let mut exp = self.exp - (S::PRECISION as ExpInt - 1);
let mut sig = vec![self.sig[0]];
// Ignore trailing binary zeros.
let trailing_zeros = sig[0].trailing_zeros();
let _: Loss = sig::shift_right(&mut sig, &mut exp, trailing_zeros as usize);
// Change the exponent from 2^e to 10^e.
if exp == 0 {
// Nothing to do.
} else if exp > 0 {
// Just shift left.
let shift = exp as usize;
sig.resize(limbs_for_bits(S::PRECISION + shift), 0);
sig::shift_left(&mut sig, &mut exp, shift);
} else {
// exp < 0
let mut texp = -exp as usize;
// We transform this using the identity:
// (N)(2^-e) == (N)(5^e)(10^-e)
// Multiply significand by 5^e.
// N * 5^0101 == N * 5^(1*1) * 5^(0*2) * 5^(1*4) * 5^(0*8)
let mut sig_scratch = vec![];
let mut p5 = vec![];
let mut p5_scratch = vec![];
while texp != 0 {
if p5.is_empty() {
p5.push(5);
} else {
p5_scratch.resize(p5.len() * 2, 0);
let _: Loss =
sig::mul(&mut p5_scratch, &mut 0, &p5, &p5, p5.len() * 2 * LIMB_BITS);
while p5_scratch.last() == Some(&0) {
p5_scratch.pop();
}
mem::swap(&mut p5, &mut p5_scratch);
}
if texp & 1 != 0 {
sig_scratch.resize(sig.len() + p5.len(), 0);
let _: Loss = sig::mul(
&mut sig_scratch,
&mut 0,
&sig,
&p5,
(sig.len() + p5.len()) * LIMB_BITS,
);
while sig_scratch.last() == Some(&0) {
sig_scratch.pop();
}
mem::swap(&mut sig, &mut sig_scratch);
}
texp >>= 1;
}
}
// Fill the buffer.
let mut buffer = vec![];
// Ignore digits from the significand until it is no more
// precise than is required for the desired precision.
// 196/59 is a very slight overestimate of lg_2(10).
let required = (precision * 196 + 58) / 59;
let mut discard_digits = sig::omsb(&sig).saturating_sub(required) * 59 / 196;
let mut in_trail = true;
while !sig.is_empty() {
// Perform short division by 10 to extract the rightmost digit.
// rem <- sig % 10
// sig <- sig / 10
let mut rem = 0;
for limb in sig.iter_mut().rev() {
// We don't have an integer doubly wide than Limb,
// so we have to split the divrem on two halves.
const HALF_BITS: usize = LIMB_BITS / 2;
let mut halves = [*limb & ((1 << HALF_BITS) - 1), *limb >> HALF_BITS];
for half in halves.iter_mut().rev() {
*half |= rem << HALF_BITS;
rem = *half % 10;
*half /= 10;
}
*limb = halves[0] | (halves[1] << HALF_BITS);
}
// Reduce the sigificand to avoid wasting time dividing 0's.
while sig.last() == Some(&0) {
sig.pop();
}
let digit = rem;
// Ignore digits we don't need.
if discard_digits > 0 {
discard_digits -= 1;
exp += 1;
continue;
}
// Drop trailing zeros.
if in_trail && digit == 0 {
exp += 1;
} else {
in_trail = false;
buffer.push(b'0' + digit as u8);
}
}
assert!(!buffer.is_empty(), "no characters in buffer!");
// Drop down to precision.
// FIXME: don't do more precise calculations above than are required.
if buffer.len() > precision {
// The most significant figures are the last ones in the buffer.
let mut first_sig = buffer.len() - precision;
// Round.
// FIXME: this probably shouldn't use 'round half up'.
// Rounding down is just a truncation, except we also want to drop
// trailing zeros from the new result.
if buffer[first_sig - 1] < b'5' {
while first_sig < buffer.len() && buffer[first_sig] == b'0' {
first_sig += 1;
}
} else {
// Rounding up requires a decimal add-with-carry. If we continue
// the carry, the newly-introduced zeros will just be truncated.
for x in &mut buffer[first_sig..] {
if *x == b'9' {
first_sig += 1;
} else {
*x += 1;
break;
}
}
}
exp += first_sig as ExpInt;
buffer.drain(..first_sig);
// If we carried through, we have exactly one digit of precision.
if buffer.is_empty() {
buffer.push(b'1');
}
}
let digits = buffer.len();
// Check whether we should use scientific notation.
let scientific = if width == 0 {
true
} else {
if exp >= 0 {
// 765e3 --> 765000
// ^^^
// But we shouldn't make the number look more precise than it is.
exp as usize > width || digits + exp as usize > precision
} else {
// Power of the most significant digit.
let msd = exp + (digits - 1) as ExpInt;
if msd >= 0 {
// 765e-2 == 7.65
false
} else {
// 765e-5 == 0.00765
// ^ ^^
-msd as usize > width
}
}
};
// Scientific formatting is pretty straightforward.
if scientific {
exp += digits as ExpInt - 1;
f.write_char(buffer[digits - 1] as char)?;
f.write_char('.')?;
let truncate_zero = !alternate;
if digits == 1 && truncate_zero {
f.write_char('0')?;
} else {
for &d in buffer[..digits - 1].iter().rev() {
f.write_char(d as char)?;
}
}
// Fill with zeros up to precision.
if !truncate_zero && precision > digits - 1 {
for _ in 0..precision - digits + 1 {
f.write_char('0')?;
}
}
// For alternate we use lower 'e'.
f.write_char(if alternate { 'e' } else { 'E' })?;
// Exponent always at least two digits if we do not truncate zeros.
if truncate_zero {
write!(f, "{:+}", exp)?;
} else {
write!(f, "{:+03}", exp)?;
}
return Ok(());
}
// Non-scientific, positive exponents.
if exp >= 0 {
for &d in buffer.iter().rev() {
f.write_char(d as char)?;
}
for _ in 0..exp {
f.write_char('0')?;
}
return Ok(());
}
// Non-scientific, negative exponents.
let unit_place = -exp as usize;
if unit_place < digits {
for &d in buffer[unit_place..].iter().rev() {
f.write_char(d as char)?;
}
f.write_char('.')?;
for &d in buffer[..unit_place].iter().rev() {
f.write_char(d as char)?;
}
} else {
f.write_str("0.")?;
for _ in digits..unit_place {
f.write_char('0')?;
}
for &d in buffer.iter().rev() {
f.write_char(d as char)?;
}
}
Ok(())
}
}
impl<S: Semantics> fmt::Debug for IeeeFloat<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}({:?} | {}{:?} * 2^{})",
self, self.category,
if self.sign { "-" } else { "+" },
self.sig,
self.exp)
}
}
impl<S: Semantics> Float for IeeeFloat<S> {
const BITS: usize = S::BITS;
const PRECISION: usize = S::PRECISION;
const MAX_EXP: ExpInt = S::MAX_EXP;
const MIN_EXP: ExpInt = S::MIN_EXP;
const ZERO: Self = IeeeFloat {
sig: [0],
exp: S::MIN_EXP - 1,
category: Category::Zero,
sign: false,
marker: PhantomData,
};
const INFINITY: Self = IeeeFloat {
sig: [0],
exp: S::MAX_EXP + 1,
category: Category::Infinity,
sign: false,
marker: PhantomData,
};
// FIXME(eddyb) remove when qnan becomes const fn.
const NAN: Self = IeeeFloat {
sig: [S::QNAN_SIGNIFICAND],
exp: S::MAX_EXP + 1,
category: Category::NaN,
sign: false,
marker: PhantomData,
};
fn qnan(payload: Option<u128>) -> Self {
IeeeFloat {
sig: [
S::QNAN_SIGNIFICAND |
payload.map_or(0, |payload| {
// Zero out the excess bits of the significand.
payload & ((1 << S::QNAN_BIT) - 1)
}),
],
exp: S::MAX_EXP + 1,
category: Category::NaN,
sign: false,
marker: PhantomData,
}
}
fn snan(payload: Option<u128>) -> Self {
let mut snan = Self::qnan(payload);
// We always have to clear the QNaN bit to make it an SNaN.
sig::clear_bit(&mut snan.sig, S::QNAN_BIT);
// If there are no bits set in the payload, we have to set
// *something* to make it a NaN instead of an infinity;
// conventionally, this is the next bit down from the QNaN bit.
if snan.sig[0] & !S::QNAN_SIGNIFICAND == 0 {
sig::set_bit(&mut snan.sig, S::QNAN_BIT - 1);
}
snan
}
fn largest() -> Self {
// We want (in interchange format):
// exponent = 1..10
// significand = 1..1
IeeeFloat {
sig: [!0 & ((1 << S::PRECISION) - 1)],
exp: S::MAX_EXP,
category: Category::Normal,
sign: false,
marker: PhantomData,
}
}
// We want (in interchange format):
// exponent = 0..0
// significand = 0..01
const SMALLEST: Self = IeeeFloat {
sig: [1],
exp: S::MIN_EXP,
category: Category::Normal,
sign: false,
marker: PhantomData,
};
fn smallest_normalized() -> Self {
// We want (in interchange format):
// exponent = 0..0
// significand = 10..0
IeeeFloat {
sig: [1 << (S::PRECISION - 1)],
exp: S::MIN_EXP,
category: Category::Normal,
sign: false,
marker: PhantomData,
}
}
fn add_r(mut self, rhs: Self, round: Round) -> StatusAnd<Self> {
let status = match (self.category, rhs.category) {
(Category::Infinity, Category::Infinity) => {
// Differently signed infinities can only be validly
// subtracted.
if self.sign != rhs.sign {
self = Self::NAN;
Status::INVALID_OP
} else {
Status::OK
}
}
// Sign may depend on rounding mode; handled below.
(_, Category::Zero) |
(Category::NaN, _) |
(Category::Infinity, Category::Normal) => Status::OK,
(Category::Zero, _) |
(_, Category::NaN) |
(_, Category::Infinity) => {
self = rhs;
Status::OK
}
// This return code means it was not a simple case.
(Category::Normal, Category::Normal) => {
let loss = sig::add_or_sub(
&mut self.sig,
&mut self.exp,
&mut self.sign,
&mut [rhs.sig[0]],
rhs.exp,
rhs.sign,
);
let status;
self = unpack!(status=, self.normalize(round, loss));
// Can only be zero if we lost no fraction.
assert!(self.category != Category::Zero || loss == Loss::ExactlyZero);
status
}
};
// If two numbers add (exactly) to zero, IEEE 754 decrees it is a
// positive zero unless rounding to minus infinity, except that
// adding two like-signed zeroes gives that zero.
if self.category == Category::Zero &&
(rhs.category != Category::Zero || self.sign != rhs.sign)
{
self.sign = round == Round::TowardNegative;
}
status.and(self)
}
fn mul_r(mut self, rhs: Self, round: Round) -> StatusAnd<Self> {
self.sign ^= rhs.sign;
match (self.category, rhs.category) {
(Category::NaN, _) => {
self.sign = false;
Status::OK.and(self)
}
(_, Category::NaN) => {
self.sign = false;
self.category = Category::NaN;
self.sig = rhs.sig;
Status::OK.and(self)
}
(Category::Zero, Category::Infinity) |
(Category::Infinity, Category::Zero) => Status::INVALID_OP.and(Self::NAN),
(_, Category::Infinity) |
(Category::Infinity, _) => {
self.category = Category::Infinity;
Status::OK.and(self)
}
(Category::Zero, _) |
(_, Category::Zero) => {
self.category = Category::Zero;
Status::OK.and(self)
}
(Category::Normal, Category::Normal) => {
self.exp += rhs.exp;
let mut wide_sig = [0; 2];
let loss = sig::mul(
&mut wide_sig,
&mut self.exp,
&self.sig,
&rhs.sig,
S::PRECISION,
);
self.sig = [wide_sig[0]];
let mut status;
self = unpack!(status=, self.normalize(round, loss));
if loss != Loss::ExactlyZero {
status |= Status::INEXACT;
}
status.and(self)
}
}
}
fn mul_add_r(mut self, multiplicand: Self, addend: Self, round: Round) -> StatusAnd<Self> {
// If and only if all arguments are normal do we need to do an
// extended-precision calculation.
if !self.is_finite_non_zero() || !multiplicand.is_finite_non_zero() || !addend.is_finite() {
let mut status;
self = unpack!(status=, self.mul_r(multiplicand, round));
// FS can only be Status::OK or Status::INVALID_OP. There is no more work
// to do in the latter case. The IEEE-754R standard says it is
// implementation-defined in this case whether, if ADDEND is a
// quiet NaN, we raise invalid op; this implementation does so.
//
// If we need to do the addition we can do so with normal
// precision.
if status == Status::OK {
self = unpack!(status=, self.add_r(addend, round));
}
return status.and(self);
}
// Post-multiplication sign, before addition.
self.sign ^= multiplicand.sign;
// Allocate space for twice as many bits as the original significand, plus one
// extra bit for the addition to overflow into.
assert!(limbs_for_bits(S::PRECISION * 2 + 1) <= 2);
let mut wide_sig = sig::widening_mul(self.sig[0], multiplicand.sig[0]);
let mut loss = Loss::ExactlyZero;
let mut omsb = sig::omsb(&wide_sig);
self.exp += multiplicand.exp;
// Assume the operands involved in the multiplication are single-precision
// FP, and the two multiplicants are:
// lhs = a23 . a22 ... a0 * 2^e1
// rhs = b23 . b22 ... b0 * 2^e2
// the result of multiplication is:
// lhs = c48 c47 c46 . c45 ... c0 * 2^(e1+e2)
// Note that there are three significant bits at the left-hand side of the
// radix point: two for the multiplication, and an overflow bit for the
// addition (that will always be zero at this point). Move the radix point
// toward left by two bits, and adjust exponent accordingly.
self.exp += 2;
if addend.is_non_zero() {
// Normalize our MSB to one below the top bit to allow for overflow.
let ext_precision = 2 * S::PRECISION + 1;
if omsb != ext_precision - 1 {
assert!(ext_precision > omsb);
sig::shift_left(&mut wide_sig, &mut self.exp, (ext_precision - 1) - omsb);
}
// The intermediate result of the multiplication has "2 * S::PRECISION"
// signicant bit; adjust the addend to be consistent with mul result.
let mut ext_addend_sig = [addend.sig[0], 0];
// Extend the addend significand to ext_precision - 1. This guarantees
// that the high bit of the significand is zero (same as wide_sig),
// so the addition will overflow (if it does overflow at all) into the top bit.
sig::shift_left(
&mut ext_addend_sig,
&mut 0,
ext_precision - 1 - S::PRECISION,
);
loss = sig::add_or_sub(
&mut wide_sig,
&mut self.exp,
&mut self.sign,
&mut ext_addend_sig,
addend.exp + 1,
addend.sign,
);
omsb = sig::omsb(&wide_sig);
}
// Convert the result having "2 * S::PRECISION" significant-bits back to the one
// having "S::PRECISION" significant-bits. First, move the radix point from
// poision "2*S::PRECISION - 1" to "S::PRECISION - 1". The exponent need to be
// adjusted by "2*S::PRECISION - 1" - "S::PRECISION - 1" = "S::PRECISION".
self.exp -= S::PRECISION as ExpInt + 1;
// In case MSB resides at the left-hand side of radix point, shift the
// mantissa right by some amount to make sure the MSB reside right before
// the radix point (i.e. "MSB . rest-significant-bits").
if omsb > S::PRECISION {
let bits = omsb - S::PRECISION;
loss = sig::shift_right(&mut wide_sig, &mut self.exp, bits).combine(loss);
}
self.sig[0] = wide_sig[0];
let mut status;
self = unpack!(status=, self.normalize(round, loss));
if loss != Loss::ExactlyZero {
status |= Status::INEXACT;
}
// If two numbers add (exactly) to zero, IEEE 754 decrees it is a
// positive zero unless rounding to minus infinity, except that
// adding two like-signed zeroes gives that zero.
if self.category == Category::Zero && !status.intersects(Status::UNDERFLOW) &&
self.sign != addend.sign
{
self.sign = round == Round::TowardNegative;
}
status.and(self)
}
fn div_r(mut self, rhs: Self, round: Round) -> StatusAnd<Self> {
self.sign ^= rhs.sign;
match (self.category, rhs.category) {
(Category::NaN, _) => {
self.sign = false;
Status::OK.and(self)
}
(_, Category::NaN) => {
self.category = Category::NaN;
self.sig = rhs.sig;
self.sign = false;
Status::OK.and(self)
}
(Category::Infinity, Category::Infinity) |
(Category::Zero, Category::Zero) => Status::INVALID_OP.and(Self::NAN),
(Category::Infinity, _) |
(Category::Zero, _) => Status::OK.and(self),
(Category::Normal, Category::Infinity) => {
self.category = Category::Zero;
Status::OK.and(self)
}
(Category::Normal, Category::Zero) => {
self.category = Category::Infinity;
Status::DIV_BY_ZERO.and(self)
}
(Category::Normal, Category::Normal) => {
self.exp -= rhs.exp;
let dividend = self.sig[0];
let loss = sig::div(
&mut self.sig,
&mut self.exp,
&mut [dividend],
&mut [rhs.sig[0]],
S::PRECISION,
);
let mut status;
self = unpack!(status=, self.normalize(round, loss));
if loss != Loss::ExactlyZero {
status |= Status::INEXACT;
}
status.and(self)
}
}
}
fn c_fmod(mut self, rhs: Self) -> StatusAnd<Self> {
match (self.category, rhs.category) {
(Category::NaN, _) |
(Category::Zero, Category::Infinity) |
(Category::Zero, Category::Normal) |
(Category::Normal, Category::Infinity) => Status::OK.and(self),
(_, Category::NaN) => {
self.sign = false;
self.category = Category::NaN;
self.sig = rhs.sig;
Status::OK.and(self)
}
(Category::Infinity, _) |
(_, Category::Zero) => Status::INVALID_OP.and(Self::NAN),
(Category::Normal, Category::Normal) => {
while self.is_finite_non_zero() && rhs.is_finite_non_zero() &&
self.cmp_abs_normal(rhs) != Ordering::Less
{
let mut v = rhs.scalbn(self.ilogb() - rhs.ilogb());
if self.cmp_abs_normal(v) == Ordering::Less {
v = v.scalbn(-1);
}
v.sign = self.sign;
let status;
self = unpack!(status=, self - v);
assert_eq!(status, Status::OK);
}
Status::OK.and(self)
}
}
}
fn round_to_integral(self, round: Round) -> StatusAnd<Self> {
// If the exponent is large enough, we know that this value is already
// integral, and the arithmetic below would potentially cause it to saturate
// to +/-Inf. Bail out early instead.
if self.is_finite_non_zero() && self.exp + 1 >= S::PRECISION as ExpInt {
return Status::OK.and(self);
}
// The algorithm here is quite simple: we add 2^(p-1), where p is the
// precision of our format, and then subtract it back off again. The choice
// of rounding modes for the addition/subtraction determines the rounding mode
// for our integral rounding as well.
// NOTE: When the input value is negative, we do subtraction followed by
// addition instead.
assert!(S::PRECISION <= 128);
let mut status;
let magic_const = unpack!(status=, Self::from_u128(1 << (S::PRECISION - 1)));
let magic_const = magic_const.copy_sign(self);
if status != Status::OK {
return status.and(self);
}
let mut r = self;
r = unpack!(status=, r.add_r(magic_const, round));
if status != Status::OK && status != Status::INEXACT {
return status.and(self);
}
// Restore the input sign to handle 0.0/-0.0 cases correctly.
r.sub_r(magic_const, round).map(|r| r.copy_sign(self))
}
fn next_up(mut self) -> StatusAnd<Self> {
// Compute nextUp(x), handling each float category separately.
match self.category {
Category::Infinity => {
if self.sign {
// nextUp(-inf) = -largest
Status::OK.and(-Self::largest())
} else {
// nextUp(+inf) = +inf
Status::OK.and(self)
}
}
Category::NaN => {
// IEEE-754R 2008 6.2 Par 2: nextUp(sNaN) = qNaN. Set Invalid flag.
// IEEE-754R 2008 6.2: nextUp(qNaN) = qNaN. Must be identity so we do not
// change the payload.
if self.is_signaling() {
// For consistency, propagate the sign of the sNaN to the qNaN.
Status::INVALID_OP.and(Self::NAN.copy_sign(self))
} else {
Status::OK.and(self)
}
}
Category::Zero => {
// nextUp(pm 0) = +smallest
Status::OK.and(Self::SMALLEST)
}
Category::Normal => {
// nextUp(-smallest) = -0
if self.is_smallest() && self.sign {
return Status::OK.and(-Self::ZERO);
}
// nextUp(largest) == INFINITY
if self.is_largest() && !self.sign {
return Status::OK.and(Self::INFINITY);
}
// Excluding the integral bit. This allows us to test for binade boundaries.
let sig_mask = (1 << (S::PRECISION - 1)) - 1;
// nextUp(normal) == normal + inc.
if self.sign {
// If we are negative, we need to decrement the significand.
// We only cross a binade boundary that requires adjusting the exponent
// if:
// 1. exponent != S::MIN_EXP. This implies we are not in the
// smallest binade or are dealing with denormals.
// 2. Our significand excluding the integral bit is all zeros.
let crossing_binade_boundary = self.exp != S::MIN_EXP &&
self.sig[0] & sig_mask == 0;
// Decrement the significand.
//
// We always do this since:
// 1. If we are dealing with a non-binade decrement, by definition we
// just decrement the significand.
// 2. If we are dealing with a normal -> normal binade decrement, since
// we have an explicit integral bit the fact that all bits but the
// integral bit are zero implies that subtracting one will yield a
// significand with 0 integral bit and 1 in all other spots. Thus we
// must just adjust the exponent and set the integral bit to 1.
// 3. If we are dealing with a normal -> denormal binade decrement,
// since we set the integral bit to 0 when we represent denormals, we
// just decrement the significand.
sig::decrement(&mut self.sig);
if crossing_binade_boundary {
// Our result is a normal number. Do the following:
// 1. Set the integral bit to 1.
// 2. Decrement the exponent.
sig::set_bit(&mut self.sig, S::PRECISION - 1);
self.exp -= 1;
}
} else {
// If we are positive, we need to increment the significand.
// We only cross a binade boundary that requires adjusting the exponent if
// the input is not a denormal and all of said input's significand bits
// are set. If all of said conditions are true: clear the significand, set
// the integral bit to 1, and increment the exponent. If we have a
// denormal always increment since moving denormals and the numbers in the
// smallest normal binade have the same exponent in our representation.
let crossing_binade_boundary = !self.is_denormal() &&
self.sig[0] & sig_mask == sig_mask;
if crossing_binade_boundary {
self.sig = [0];
sig::set_bit(&mut self.sig, S::PRECISION - 1);
assert_ne!(
self.exp,
S::MAX_EXP,
"We can not increment an exponent beyond the MAX_EXP \
allowed by the given floating point semantics."
);
self.exp += 1;
} else {
sig::increment(&mut self.sig);
}
}
Status::OK.and(self)
}
}
}
fn from_bits(input: u128) -> Self {
// Dispatch to semantics.
S::from_bits(input)
}
fn from_u128_r(input: u128, round: Round) -> StatusAnd<Self> {
IeeeFloat {
sig: [input],
exp: S::PRECISION as ExpInt - 1,
category: Category::Normal,
sign: false,
marker: PhantomData,
}.normalize(round, Loss::ExactlyZero)
}
fn from_str_r(mut s: &str, mut round: Round) -> Result<StatusAnd<Self>, ParseError> {
if s.is_empty() {
return Err(ParseError("Invalid string length"));
}
// Handle special cases.
match s {
"inf" | "INFINITY" => return Ok(Status::OK.and(Self::INFINITY)),
"-inf" | "-INFINITY" => return Ok(Status::OK.and(-Self::INFINITY)),
"nan" | "NaN" => return Ok(Status::OK.and(Self::NAN)),
"-nan" | "-NaN" => return Ok(Status::OK.and(-Self::NAN)),
_ => {}
}
// Handle a leading minus sign.
let minus = s.starts_with("-");
if minus || s.starts_with("+") {
s = &s[1..];
if s.is_empty() {
return Err(ParseError("String has no digits"));
}
}
// Adjust the rounding mode for the absolute value below.
if minus {
round = -round;
}
let r = if s.starts_with("0x") || s.starts_with("0X") {
s = &s[2..];
if s.is_empty() {
return Err(ParseError("Invalid string"));
}
Self::from_hexadecimal_string(s, round)?
} else {
Self::from_decimal_string(s, round)?
};
Ok(r.map(|r| if minus { -r } else { r }))
}
fn to_bits(self) -> u128 {
// Dispatch to semantics.
S::to_bits(self)
}
fn to_u128_r(self, width: usize, round: Round, is_exact: &mut bool) -> StatusAnd<u128> {
// The result of trying to convert a number too large.
let overflow = if self.sign {
// Negative numbers cannot be represented as unsigned.
0
} else {
// Largest unsigned integer of the given width.
!0 >> (128 - width)
};
*is_exact = false;
match self.category {
Category::NaN => Status::INVALID_OP.and(0),
Category::Infinity => Status::INVALID_OP.and(overflow),
Category::Zero => {
// Negative zero can't be represented as an int.
*is_exact = !self.sign;
Status::OK.and(0)
}
Category::Normal => {
let mut r = 0;
// Step 1: place our absolute value, with any fraction truncated, in
// the destination.
let truncated_bits = if self.exp < 0 {
// Our absolute value is less than one; truncate everything.
// For exponent -1 the integer bit represents .5, look at that.
// For smaller exponents leftmost truncated bit is 0.
S::PRECISION - 1 + (-self.exp) as usize
} else {
// We want the most significant (exponent + 1) bits; the rest are
// truncated.
let bits = self.exp as usize + 1;
// Hopelessly large in magnitude?
if bits > width {
return Status::INVALID_OP.and(overflow);
}
if bits < S::PRECISION {
// We truncate (S::PRECISION - bits) bits.
r = self.sig[0] >> (S::PRECISION - bits);
S::PRECISION - bits
} else {
// We want at least as many bits as are available.
r = self.sig[0] << (bits - S::PRECISION);
0
}
};
// Step 2: work out any lost fraction, and increment the absolute
// value if we would round away from zero.
let mut loss = Loss::ExactlyZero;
if truncated_bits > 0 {
loss = Loss::through_truncation(&self.sig, truncated_bits);
if loss != Loss::ExactlyZero &&
self.round_away_from_zero(round, loss, truncated_bits)
{
r = r.wrapping_add(1);
if r == 0 {
return Status::INVALID_OP.and(overflow); // Overflow.
}
}
}
// Step 3: check if we fit in the destination.
if r > overflow {
return Status::INVALID_OP.and(overflow);
}
if loss == Loss::ExactlyZero {
*is_exact = true;
Status::OK.and(r)
} else {
Status::INEXACT.and(r)
}
}
}
}
fn cmp_abs_normal(self, rhs: Self) -> Ordering {
assert!(self.is_finite_non_zero());
assert!(rhs.is_finite_non_zero());
// If exponents are equal, do an unsigned comparison of the significands.
self.exp.cmp(&rhs.exp).then_with(
|| sig::cmp(&self.sig, &rhs.sig),
)
}
fn bitwise_eq(self, rhs: Self) -> bool {
if self.category != rhs.category || self.sign != rhs.sign {
return false;
}
if self.category == Category::Zero || self.category == Category::Infinity {
return true;
}
if self.is_finite_non_zero() && self.exp != rhs.exp {
return false;
}
self.sig == rhs.sig
}
fn is_negative(self) -> bool {
self.sign
}
fn is_denormal(self) -> bool {
self.is_finite_non_zero() && self.exp == S::MIN_EXP &&
!sig::get_bit(&self.sig, S::PRECISION - 1)
}
fn is_signaling(self) -> bool {
// IEEE-754R 2008 6.2.1: A signaling NaN bit string should be encoded with the
// first bit of the trailing significand being 0.
self.is_nan() && !sig::get_bit(&self.sig, S::QNAN_BIT)
}
fn category(self) -> Category {
self.category
}
fn get_exact_inverse(self) -> Option<Self> {
// Special floats and denormals have no exact inverse.
if !self.is_finite_non_zero() {
return None;
}
// Check that the number is a power of two by making sure that only the
// integer bit is set in the significand.
if self.sig != [1 << (S::PRECISION - 1)] {
return None;
}
// Get the inverse.
let mut reciprocal = Self::from_u128(1).value;
let status;
reciprocal = unpack!(status=, reciprocal / self);
if status != Status::OK {
return None;
}
// Avoid multiplication with a denormal, it is not safe on all platforms and
// may be slower than a normal division.
if reciprocal.is_denormal() {
return None;
}
assert!(reciprocal.is_finite_non_zero());
assert_eq!(reciprocal.sig, [1 << (S::PRECISION - 1)]);
Some(reciprocal)
}
fn ilogb(mut self) -> ExpInt {
if self.is_nan() {
return IEK_NAN;
}
if self.is_zero() {
return IEK_ZERO;
}
if self.is_infinite() {
return IEK_INF;
}
if !self.is_denormal() {
return self.exp;
}
let sig_bits = (S::PRECISION - 1) as ExpInt;
self.exp += sig_bits;
self = self.normalize(Round::NearestTiesToEven, Loss::ExactlyZero)
.value;
self.exp - sig_bits
}
fn scalbn_r(mut self, exp: ExpInt, round: Round) -> Self {
// If exp is wildly out-of-scale, simply adding it to self.exp will
// overflow; clamp it to a safe range before adding, but ensure that the range
// is large enough that the clamp does not change the result. The range we
// need to support is the difference between the largest possible exponent and
// the normalized exponent of half the smallest denormal.
let sig_bits = (S::PRECISION - 1) as i32;
let max_change = S::MAX_EXP as i32 - (S::MIN_EXP as i32 - sig_bits) + 1;
// Clamp to one past the range ends to let normalize handle overflow.
let exp_change = cmp::min(cmp::max(exp as i32, (-max_change - 1)), max_change);
self.exp = self.exp.saturating_add(exp_change as ExpInt);
self = self.normalize(round, Loss::ExactlyZero).value;
if self.is_nan() {
sig::set_bit(&mut self.sig, S::QNAN_BIT);
}
self
}
fn frexp_r(mut self, exp: &mut ExpInt, round: Round) -> Self {
*exp = self.ilogb();
// Quiet signalling nans.
if *exp == IEK_NAN {
sig::set_bit(&mut self.sig, S::QNAN_BIT);
return self;
}
if *exp == IEK_INF {
return self;
}
// 1 is added because frexp is defined to return a normalized fraction in
// +/-[0.5, 1.0), rather than the usual +/-[1.0, 2.0).
if *exp == IEK_ZERO {
*exp = 0;
} else {
*exp += 1;
}
self.scalbn_r(-*exp, round)
}
}
impl<S: Semantics, T: Semantics> FloatConvert<IeeeFloat<T>> for IeeeFloat<S> {
fn convert_r(self, round: Round, loses_info: &mut bool) -> StatusAnd<IeeeFloat<T>> {
let mut r = IeeeFloat {
sig: self.sig,
exp: self.exp,
category: self.category,
sign: self.sign,
marker: PhantomData,
};
// x86 has some unusual NaNs which cannot be represented in any other
// format; note them here.
fn is_x87_double_extended<S: Semantics>() -> bool {
S::QNAN_SIGNIFICAND == X87DoubleExtendedS::QNAN_SIGNIFICAND
}
let x87_special_nan = is_x87_double_extended::<S>() && !is_x87_double_extended::<T>() &&
r.category == Category::NaN &&
(r.sig[0] & S::QNAN_SIGNIFICAND) != S::QNAN_SIGNIFICAND;
// If this is a truncation of a denormal number, and the target semantics
// has larger exponent range than the source semantics (this can happen
// when truncating from PowerPC double-double to double format), the
// right shift could lose result mantissa bits. Adjust exponent instead
// of performing excessive shift.
let mut shift = T::PRECISION as ExpInt - S::PRECISION as ExpInt;
if shift < 0 && r.is_finite_non_zero() {
let mut exp_change = sig::omsb(&r.sig) as ExpInt - S::PRECISION as ExpInt;
if r.exp + exp_change < T::MIN_EXP {
exp_change = T::MIN_EXP - r.exp;
}
if exp_change < shift {
exp_change = shift;
}
if exp_change < 0 {
shift -= exp_change;
r.exp += exp_change;
}
}
// If this is a truncation, perform the shift.
let mut loss = Loss::ExactlyZero;
if shift < 0 && (r.is_finite_non_zero() || r.category == Category::NaN) {
loss = sig::shift_right(&mut r.sig, &mut 0, -shift as usize);
}
// If this is an extension, perform the shift.
if shift > 0 && (r.is_finite_non_zero() || r.category == Category::NaN) {
sig::shift_left(&mut r.sig, &mut 0, shift as usize);
}
let status;
if r.is_finite_non_zero() {
r = unpack!(status=, r.normalize(round, loss));
*loses_info = status != Status::OK;
} else if r.category == Category::NaN {
*loses_info = loss != Loss::ExactlyZero || x87_special_nan;
// For x87 extended precision, we want to make a NaN, not a special NaN if
// the input wasn't special either.
if !x87_special_nan && is_x87_double_extended::<T>() {
sig::set_bit(&mut r.sig, T::PRECISION - 1);
}
// gcc forces the Quiet bit on, which means (float)(double)(float_sNan)
// does not give you back the same bits. This is dubious, and we
// don't currently do it. You're really supposed to get
// an invalid operation signal at runtime, but nobody does that.
status = Status::OK;
} else {
*loses_info = false;
status = Status::OK;
}
status.and(r)
}
}
impl<S: Semantics> IeeeFloat<S> {
/// Handle positive overflow. We either return infinity or
/// the largest finite number. For negative overflow,
/// negate the `round` argument before calling.
fn overflow_result(round: Round) -> StatusAnd<Self> {
match round {
// Infinity?
Round::NearestTiesToEven | Round::NearestTiesToAway | Round::TowardPositive => {
(Status::OVERFLOW | Status::INEXACT).and(Self::INFINITY)
}
// Otherwise we become the largest finite number.
Round::TowardNegative | Round::TowardZero => Status::INEXACT.and(Self::largest()),
}
}
/// Returns TRUE if, when truncating the current number, with BIT the
/// new LSB, with the given lost fraction and rounding mode, the result
/// would need to be rounded away from zero (i.e., by increasing the
/// signficand). This routine must work for Category::Zero of both signs, and
/// Category::Normal numbers.
fn round_away_from_zero(&self, round: Round, loss: Loss, bit: usize) -> bool {
// NaNs and infinities should not have lost fractions.
assert!(self.is_finite_non_zero() || self.is_zero());
// Current callers never pass this so we don't handle it.
assert_ne!(loss, Loss::ExactlyZero);
match round {
Round::NearestTiesToAway => loss == Loss::ExactlyHalf || loss == Loss::MoreThanHalf,
Round::NearestTiesToEven => {
if loss == Loss::MoreThanHalf {
return true;
}
// Our zeros don't have a significand to test.
if loss == Loss::ExactlyHalf && self.category != Category::Zero {
return sig::get_bit(&self.sig, bit);
}
false
}
Round::TowardZero => false,
Round::TowardPositive => !self.sign,
Round::TowardNegative => self.sign,
}
}
fn normalize(mut self, round: Round, mut loss: Loss) -> StatusAnd<Self> {
if !self.is_finite_non_zero() {
return Status::OK.and(self);
}
// Before rounding normalize the exponent of Category::Normal numbers.
let mut omsb = sig::omsb(&self.sig);
if omsb > 0 {
// OMSB is numbered from 1. We want to place it in the integer
// bit numbered PRECISION if possible, with a compensating change in
// the exponent.
let mut final_exp = self.exp.saturating_add(
omsb as ExpInt - S::PRECISION as ExpInt,
);
// If the resulting exponent is too high, overflow according to
// the rounding mode.
if final_exp > S::MAX_EXP {
let round = if self.sign { -round } else { round };
return Self::overflow_result(round).map(|r| r.copy_sign(self));
}
// Subnormal numbers have exponent MIN_EXP, and their MSB
// is forced based on that.
if final_exp < S::MIN_EXP {
final_exp = S::MIN_EXP;
}
// Shifting left is easy as we don't lose precision.
if final_exp < self.exp {
assert_eq!(loss, Loss::ExactlyZero);
let exp_change = (self.exp - final_exp) as usize;
sig::shift_left(&mut self.sig, &mut self.exp, exp_change);
return Status::OK.and(self);
}
// Shift right and capture any new lost fraction.
if final_exp > self.exp {
let exp_change = (final_exp - self.exp) as usize;
loss = sig::shift_right(&mut self.sig, &mut self.exp, exp_change).combine(loss);
// Keep OMSB up-to-date.
omsb = omsb.saturating_sub(exp_change);
}
}
// Now round the number according to round given the lost
// fraction.
// As specified in IEEE 754, since we do not trap we do not report
// underflow for exact results.
if loss == Loss::ExactlyZero {
// Canonicalize zeros.
if omsb == 0 {
self.category = Category::Zero;
}
return Status::OK.and(self);
}
// Increment the significand if we're rounding away from zero.
if self.round_away_from_zero(round, loss, 0) {
if omsb == 0 {
self.exp = S::MIN_EXP;
}
// We should never overflow.
assert_eq!(sig::increment(&mut self.sig), 0);
omsb = sig::omsb(&self.sig);
// Did the significand increment overflow?
if omsb == S::PRECISION + 1 {
// Renormalize by incrementing the exponent and shifting our
// significand right one. However if we already have the
// maximum exponent we overflow to infinity.
if self.exp == S::MAX_EXP {
self.category = Category::Infinity;
return (Status::OVERFLOW | Status::INEXACT).and(self);
}
let _: Loss = sig::shift_right(&mut self.sig, &mut self.exp, 1);
return Status::INEXACT.and(self);
}
}
// The normal case - we were and are not denormal, and any
// significand increment above didn't overflow.
if omsb == S::PRECISION {
return Status::INEXACT.and(self);
}
// We have a non-zero denormal.
assert!(omsb < S::PRECISION);
// Canonicalize zeros.
if omsb == 0 {
self.category = Category::Zero;
}
// The Category::Zero case is a denormal that underflowed to zero.
(Status::UNDERFLOW | Status::INEXACT).and(self)
}
fn from_hexadecimal_string(s: &str, round: Round) -> Result<StatusAnd<Self>, ParseError> {
let mut r = IeeeFloat {
sig: [0],
exp: 0,
category: Category::Normal,
sign: false,
marker: PhantomData,
};
let mut any_digits = false;
let mut has_exp = false;
let mut bit_pos = LIMB_BITS as isize;
let mut loss = None;
// Without leading or trailing zeros, irrespective of the dot.
let mut first_sig_digit = None;
let mut dot = s.len();
for (p, c) in s.char_indices() {
// Skip leading zeros and any (hexa)decimal point.
if c == '.' {
if dot != s.len() {
return Err(ParseError("String contains multiple dots"));
}
dot = p;
} else if let Some(hex_value) = c.to_digit(16) {
any_digits = true;
if first_sig_digit.is_none() {
if hex_value == 0 {
continue;
}
first_sig_digit = Some(p);
}
// Store the number while we have space.
bit_pos -= 4;
if bit_pos >= 0 {
r.sig[0] |= (hex_value as Limb) << bit_pos;
} else {
// If zero or one-half (the hexadecimal digit 8) are followed
// by non-zero, they're a little more than zero or one-half.
if let Some(ref mut loss) = loss {
if hex_value != 0 {
if *loss == Loss::ExactlyZero {
*loss = Loss::LessThanHalf;
}
if *loss == Loss::ExactlyHalf {
*loss = Loss::MoreThanHalf;
}
}
} else {
loss = Some(match hex_value {
0 => Loss::ExactlyZero,
1...7 => Loss::LessThanHalf,
8 => Loss::ExactlyHalf,
9...15 => Loss::MoreThanHalf,
_ => unreachable!(),
});
}
}
} else if c == 'p' || c == 'P' {
if !any_digits {
return Err(ParseError("Significand has no digits"));
}
if dot == s.len() {
dot = p;
}
let mut chars = s[p + 1..].chars().peekable();
// Adjust for the given exponent.
let exp_minus = chars.peek() == Some(&'-');
if exp_minus || chars.peek() == Some(&'+') {
chars.next();
}
for c in chars {
if let Some(value) = c.to_digit(10) {
has_exp = true;
r.exp = r.exp.saturating_mul(10).saturating_add(value as ExpInt);
} else {
return Err(ParseError("Invalid character in exponent"));
}
}
if !has_exp {
return Err(ParseError("Exponent has no digits"));
}
if exp_minus {
r.exp = -r.exp;
}
break;
} else {
return Err(ParseError("Invalid character in significand"));
}
}
if !any_digits {
return Err(ParseError("Significand has no digits"));
}
// Hex floats require an exponent but not a hexadecimal point.
if !has_exp {
return Err(ParseError("Hex strings require an exponent"));
}
// Ignore the exponent if we are zero.
let first_sig_digit = match first_sig_digit {
Some(p) => p,
None => return Ok(Status::OK.and(Self::ZERO)),
};
// Calculate the exponent adjustment implicit in the number of
// significant digits and adjust for writing the significand starting
// at the most significant nibble.
let exp_adjustment = if dot > first_sig_digit {
ExpInt::try_from(dot - first_sig_digit).unwrap()
} else {
-ExpInt::try_from(first_sig_digit - dot - 1).unwrap()
};
let exp_adjustment = exp_adjustment
.saturating_mul(4)
.saturating_sub(1)
.saturating_add(S::PRECISION as ExpInt)
.saturating_sub(LIMB_BITS as ExpInt);
r.exp = r.exp.saturating_add(exp_adjustment);
Ok(r.normalize(round, loss.unwrap_or(Loss::ExactlyZero)))
}
fn from_decimal_string(s: &str, round: Round) -> Result<StatusAnd<Self>, ParseError> {
// Given a normal decimal floating point number of the form
//
// dddd.dddd[eE][+-]ddd
//
// where the decimal point and exponent are optional, fill out the
// variables below. Exponent is appropriate if the significand is
// treated as an integer, and normalized_exp if the significand
// is taken to have the decimal point after a single leading
// non-zero digit.
//
// If the value is zero, first_sig_digit is None.
let mut any_digits = false;
let mut dec_exp = 0i32;
// Without leading or trailing zeros, irrespective of the dot.
let mut first_sig_digit = None;
let mut last_sig_digit = 0;
let mut dot = s.len();
for (p, c) in s.char_indices() {
if c == '.' {
if dot != s.len() {
return Err(ParseError("String contains multiple dots"));
}
dot = p;
} else if let Some(dec_value) = c.to_digit(10) {
any_digits = true;
if dec_value != 0 {
if first_sig_digit.is_none() {
first_sig_digit = Some(p);
}
last_sig_digit = p;
}
} else if c == 'e' || c == 'E' {
if !any_digits {
return Err(ParseError("Significand has no digits"));
}
if dot == s.len() {
dot = p;
}
let mut chars = s[p + 1..].chars().peekable();
// Adjust for the given exponent.
let exp_minus = chars.peek() == Some(&'-');
if exp_minus || chars.peek() == Some(&'+') {
chars.next();
}
any_digits = false;
for c in chars {
if let Some(value) = c.to_digit(10) {
any_digits = true;
dec_exp = dec_exp.saturating_mul(10).saturating_add(value as i32);
} else {
return Err(ParseError("Invalid character in exponent"));
}
}
if !any_digits {
return Err(ParseError("Exponent has no digits"));
}
if exp_minus {
dec_exp = -dec_exp;
}
break;
} else {
return Err(ParseError("Invalid character in significand"));
}
}
if !any_digits {
return Err(ParseError("Significand has no digits"));
}
// Test if we have a zero number allowing for non-zero exponents.
let first_sig_digit = match first_sig_digit {
Some(p) => p,
None => return Ok(Status::OK.and(Self::ZERO)),
};
// Adjust the exponents for any decimal point.
if dot > last_sig_digit {
dec_exp = dec_exp.saturating_add((dot - last_sig_digit - 1) as i32);
} else {
dec_exp = dec_exp.saturating_sub((last_sig_digit - dot) as i32);
}
let significand_digits = last_sig_digit - first_sig_digit + 1 -
(dot > first_sig_digit && dot < last_sig_digit) as usize;
let normalized_exp = dec_exp.saturating_add(significand_digits as i32 - 1);
// Handle the cases where exponents are obviously too large or too
// small. Writing L for log 10 / log 2, a number d.ddddd*10^dec_exp
// definitely overflows if
//
// (dec_exp - 1) * L >= MAX_EXP
//
// and definitely underflows to zero where
//
// (dec_exp + 1) * L <= MIN_EXP - PRECISION
//
// With integer arithmetic the tightest bounds for L are
//
// 93/28 < L < 196/59 [ numerator <= 256 ]
// 42039/12655 < L < 28738/8651 [ numerator <= 65536 ]
// Check for MAX_EXP.
if normalized_exp.saturating_sub(1).saturating_mul(42039) >= 12655 * S::MAX_EXP as i32 {
// Overflow and round.
return Ok(Self::overflow_result(round));
}
// Check for MIN_EXP.
if normalized_exp.saturating_add(1).saturating_mul(28738) <=
8651 * (S::MIN_EXP as i32 - S::PRECISION as i32)
{
// Underflow to zero and round.
let r = if round == Round::TowardPositive {
IeeeFloat::SMALLEST
} else {
IeeeFloat::ZERO
};
return Ok((Status::UNDERFLOW | Status::INEXACT).and(r));
}
// A tight upper bound on number of bits required to hold an
// N-digit decimal integer is N * 196 / 59. Allocate enough space
// to hold the full significand, and an extra limb required by
// tcMultiplyPart.
let max_limbs = limbs_for_bits(1 + 196 * significand_digits / 59);
let mut dec_sig = Vec::with_capacity(max_limbs);
// Convert to binary efficiently - we do almost all multiplication
// in a Limb. When this would overflow do we do a single
// bignum multiplication, and then revert again to multiplication
// in a Limb.
let mut chars = s[first_sig_digit..last_sig_digit + 1].chars();
loop {
let mut val = 0;
let mut multiplier = 1;
loop {
let dec_value = match chars.next() {
Some('.') => continue,
Some(c) => c.to_digit(10).unwrap(),
None => break,
};
multiplier *= 10;
val = val * 10 + dec_value as Limb;
// The maximum number that can be multiplied by ten with any
// digit added without overflowing a Limb.
if multiplier > (!0 - 9) / 10 {
break;
}
}
// If we've consumed no digits, we're done.
if multiplier == 1 {
break;
}
// Multiply out the current limb.
let mut carry = val;
for x in &mut dec_sig {
let [low, mut high] = sig::widening_mul(*x, multiplier);
// Now add carry.
let (low, overflow) = low.overflowing_add(carry);
high += overflow as Limb;
*x = low;
carry = high;
}
// If we had carry, we need another limb (likely but not guaranteed).
if carry > 0 {
dec_sig.push(carry);
}
}
// Calculate pow(5, abs(dec_exp)) into `pow5_full`.
// The *_calc Vec's are reused scratch space, as an optimization.
let (pow5_full, mut pow5_calc, mut sig_calc, mut sig_scratch_calc) = {
let mut power = dec_exp.abs() as usize;
const FIRST_EIGHT_POWERS: [Limb; 8] = [1, 5, 25, 125, 625, 3125, 15625, 78125];
let mut p5_scratch = vec![];
let mut p5 = vec![FIRST_EIGHT_POWERS[4]];
let mut r_scratch = vec![];
let mut r = vec![FIRST_EIGHT_POWERS[power & 7]];
power >>= 3;
while power > 0 {
// Calculate pow(5,pow(2,n+3)).
p5_scratch.resize(p5.len() * 2, 0);
let _: Loss = sig::mul(&mut p5_scratch, &mut 0, &p5, &p5, p5.len() * 2 * LIMB_BITS);
while p5_scratch.last() == Some(&0) {
p5_scratch.pop();
}
mem::swap(&mut p5, &mut p5_scratch);
if power & 1 != 0 {
r_scratch.resize(r.len() + p5.len(), 0);
let _: Loss = sig::mul(
&mut r_scratch,
&mut 0,
&r,
&p5,
(r.len() + p5.len()) * LIMB_BITS,
);
while r_scratch.last() == Some(&0) {
r_scratch.pop();
}
mem::swap(&mut r, &mut r_scratch);
}
power >>= 1;
}
(r, r_scratch, p5, p5_scratch)
};
// Attempt dec_sig * 10^dec_exp with increasing precision.
let mut attempt = 1;
loop {
let calc_precision = (LIMB_BITS << attempt) - 1;
attempt += 1;
let calc_normal_from_limbs = |sig: &mut Vec<Limb>,
limbs: &[Limb]|
-> StatusAnd<ExpInt> {
sig.resize(limbs_for_bits(calc_precision), 0);
let (mut loss, mut exp) = sig::from_limbs(sig, limbs, calc_precision);
// Before rounding normalize the exponent of Category::Normal numbers.
let mut omsb = sig::omsb(sig);
assert_ne!(omsb, 0);
// OMSB is numbered from 1. We want to place it in the integer
// bit numbered PRECISION if possible, with a compensating change in
// the exponent.
let final_exp = exp.saturating_add(omsb as ExpInt - calc_precision as ExpInt);
// Shifting left is easy as we don't lose precision.
if final_exp < exp {
assert_eq!(loss, Loss::ExactlyZero);
let exp_change = (exp - final_exp) as usize;
sig::shift_left(sig, &mut exp, exp_change);
return Status::OK.and(exp);
}
// Shift right and capture any new lost fraction.
if final_exp > exp {
let exp_change = (final_exp - exp) as usize;
loss = sig::shift_right(sig, &mut exp, exp_change).combine(loss);
// Keep OMSB up-to-date.
omsb = omsb.saturating_sub(exp_change);
}
assert_eq!(omsb, calc_precision);
// Now round the number according to round given the lost
// fraction.
// As specified in IEEE 754, since we do not trap we do not report
// underflow for exact results.
if loss == Loss::ExactlyZero {
return Status::OK.and(exp);
}
// Increment the significand if we're rounding away from zero.
if loss == Loss::MoreThanHalf || loss == Loss::ExactlyHalf && sig::get_bit(sig, 0) {
// We should never overflow.
assert_eq!(sig::increment(sig), 0);
omsb = sig::omsb(sig);
// Did the significand increment overflow?
if omsb == calc_precision + 1 {
let _: Loss = sig::shift_right(sig, &mut exp, 1);
return Status::INEXACT.and(exp);
}
}
// The normal case - we were and are not denormal, and any
// significand increment above didn't overflow.
Status::INEXACT.and(exp)
};
let status;
let mut exp = unpack!(status=,
calc_normal_from_limbs(&mut sig_calc, &dec_sig));
let pow5_status;
let pow5_exp = unpack!(pow5_status=,
calc_normal_from_limbs(&mut pow5_calc, &pow5_full));
// Add dec_exp, as 10^n = 5^n * 2^n.
exp += dec_exp as ExpInt;
let mut used_bits = S::PRECISION;
let mut truncated_bits = calc_precision - used_bits;
let half_ulp_err1 = (status != Status::OK) as Limb;
let (calc_loss, half_ulp_err2);
if dec_exp >= 0 {
exp += pow5_exp;
sig_scratch_calc.resize(sig_calc.len() + pow5_calc.len(), 0);
calc_loss = sig::mul(
&mut sig_scratch_calc,
&mut exp,
&sig_calc,
&pow5_calc,
calc_precision,
);
mem::swap(&mut sig_calc, &mut sig_scratch_calc);
half_ulp_err2 = (pow5_status != Status::OK) as Limb;
} else {
exp -= pow5_exp;
sig_scratch_calc.resize(sig_calc.len(), 0);
calc_loss = sig::div(
&mut sig_scratch_calc,
&mut exp,
&mut sig_calc,
&mut pow5_calc,
calc_precision,
);
mem::swap(&mut sig_calc, &mut sig_scratch_calc);
// Denormal numbers have less precision.
if exp < S::MIN_EXP {
truncated_bits += (S::MIN_EXP - exp) as usize;
used_bits = calc_precision.saturating_sub(truncated_bits);
}
// Extra half-ulp lost in reciprocal of exponent.
half_ulp_err2 = 2 *
(pow5_status != Status::OK || calc_loss != Loss::ExactlyZero) as Limb;
}
// Both sig::mul and sig::div return the
// result with the integer bit set.
assert!(sig::get_bit(&sig_calc, calc_precision - 1));
// The error from the true value, in half-ulps, on multiplying two
// floating point numbers, which differ from the value they
// approximate by at most half_ulp_err1 and half_ulp_err2 half-ulps, is strictly less
// than the returned value.
//
// See "How to Read Floating Point Numbers Accurately" by William D Clinger.
assert!(
half_ulp_err1 < 2 || half_ulp_err2 < 2 || (half_ulp_err1 + half_ulp_err2 < 8)
);
let inexact = (calc_loss != Loss::ExactlyZero) as Limb;
let half_ulp_err = if half_ulp_err1 + half_ulp_err2 == 0 {
inexact * 2 // <= inexact half-ulps.
} else {
inexact + 2 * (half_ulp_err1 + half_ulp_err2)
};
let ulps_from_boundary = {
let bits = calc_precision - used_bits - 1;
let i = bits / LIMB_BITS;
let limb = sig_calc[i] & (!0 >> (LIMB_BITS - 1 - bits % LIMB_BITS));
let boundary = match round {
Round::NearestTiesToEven | Round::NearestTiesToAway => 1 << (bits % LIMB_BITS),
_ => 0,
};
if i == 0 {
let delta = limb.wrapping_sub(boundary);
cmp::min(delta, delta.wrapping_neg())
} else if limb == boundary {
if !sig::is_all_zeros(&sig_calc[1..i]) {
!0 // A lot.
} else {
sig_calc[0]
}
} else if limb == boundary.wrapping_sub(1) {
if sig_calc[1..i].iter().any(|&x| x.wrapping_neg() != 1) {
!0 // A lot.
} else {
sig_calc[0].wrapping_neg()
}
} else {
!0 // A lot.
}
};
// Are we guaranteed to round correctly if we truncate?
if ulps_from_boundary.saturating_mul(2) >= half_ulp_err {
let mut r = IeeeFloat {
sig: [0],
exp,
category: Category::Normal,
sign: false,
marker: PhantomData,
};
sig::extract(&mut r.sig, &sig_calc, used_bits, calc_precision - used_bits);
// If we extracted less bits above we must adjust our exponent
// to compensate for the implicit right shift.
r.exp += (S::PRECISION - used_bits) as ExpInt;
let loss = Loss::through_truncation(&sig_calc, truncated_bits);
return Ok(r.normalize(round, loss));
}
}
}
}
impl Loss {
/// Combine the effect of two lost fractions.
fn combine(self, less_significant: Loss) -> Loss {
let mut more_significant = self;
if less_significant != Loss::ExactlyZero {
if more_significant == Loss::ExactlyZero {
more_significant = Loss::LessThanHalf;
} else if more_significant == Loss::ExactlyHalf {
more_significant = Loss::MoreThanHalf;
}
}
more_significant
}
/// Return the fraction lost were a bignum truncated losing the least
/// significant `bits` bits.
fn through_truncation(limbs: &[Limb], bits: usize) -> Loss {
if bits == 0 {
return Loss::ExactlyZero;
}
let half_bit = bits - 1;
let half_limb = half_bit / LIMB_BITS;
let (half_limb, rest) = if half_limb < limbs.len() {
(limbs[half_limb], &limbs[..half_limb])
} else {
(0, limbs)
};
let half = 1 << (half_bit % LIMB_BITS);
let has_half = half_limb & half != 0;
let has_rest = half_limb & (half - 1) != 0 || !sig::is_all_zeros(rest);
match (has_half, has_rest) {
(false, false) => Loss::ExactlyZero,
(false, true) => Loss::LessThanHalf,
(true, false) => Loss::ExactlyHalf,
(true, true) => Loss::MoreThanHalf,
}
}
}
/// Implementation details of IeeeFloat significands, such as big integer arithmetic.
/// As a rule of thumb, no functions in this module should dynamically allocate.
mod sig {
use std::cmp::Ordering;
use std::mem;
use super::{ExpInt, Limb, LIMB_BITS, limbs_for_bits, Loss};
pub(super) fn is_all_zeros(limbs: &[Limb]) -> bool {
limbs.iter().all(|&l| l == 0)
}
/// One, not zero, based MSB. That is, returns 0 for a zeroed significand.
pub(super) fn omsb(limbs: &[Limb]) -> usize {
for i in (0..limbs.len()).rev() {
if limbs[i] != 0 {
return (i + 1) * LIMB_BITS - limbs[i].leading_zeros() as usize;
}
}
0
}
/// Comparison (unsigned) of two significands.
pub(super) fn cmp(a: &[Limb], b: &[Limb]) -> Ordering {
assert_eq!(a.len(), b.len());
for (a, b) in a.iter().zip(b).rev() {
match a.cmp(b) {
Ordering::Equal => {}
o => return o,
}
}
Ordering::Equal
}
/// Extract the given bit.
pub(super) fn get_bit(limbs: &[Limb], bit: usize) -> bool {
limbs[bit / LIMB_BITS] & (1 << (bit % LIMB_BITS)) != 0
}
/// Set the given bit.
pub(super) fn set_bit(limbs: &mut [Limb], bit: usize) {
limbs[bit / LIMB_BITS] |= 1 << (bit % LIMB_BITS);
}
/// Clear the given bit.
pub(super) fn clear_bit(limbs: &mut [Limb], bit: usize) {
limbs[bit / LIMB_BITS] &= !(1 << (bit % LIMB_BITS));
}
/// Shift `dst` left `bits` bits, subtract `bits` from its exponent.
pub(super) fn shift_left(dst: &mut [Limb], exp: &mut ExpInt, bits: usize) {
if bits > 0 {
// Our exponent should not underflow.
*exp = exp.checked_sub(bits as ExpInt).unwrap();
// Jump is the inter-limb jump; shift is is intra-limb shift.
let jump = bits / LIMB_BITS;
let shift = bits % LIMB_BITS;
for i in (0..dst.len()).rev() {
let mut limb;
if i < jump {
limb = 0;
} else {
// dst[i] comes from the two limbs src[i - jump] and, if we have
// an intra-limb shift, src[i - jump - 1].
limb = dst[i - jump];
if shift > 0 {
limb <<= shift;
if i >= jump + 1 {
limb |= dst[i - jump - 1] >> (LIMB_BITS - shift);
}
}
}
dst[i] = limb;
}
}
}
/// Shift `dst` right `bits` bits noting lost fraction.
pub(super) fn shift_right(dst: &mut [Limb], exp: &mut ExpInt, bits: usize) -> Loss {
let loss = Loss::through_truncation(dst, bits);
if bits > 0 {
// Our exponent should not overflow.
*exp = exp.checked_add(bits as ExpInt).unwrap();
// Jump is the inter-limb jump; shift is is intra-limb shift.
let jump = bits / LIMB_BITS;
let shift = bits % LIMB_BITS;
// Perform the shift. This leaves the most significant `bits` bits
// of the result at zero.
for i in 0..dst.len() {
let mut limb;
if i + jump >= dst.len() {
limb = 0;
} else {
limb = dst[i + jump];
if shift > 0 {
limb >>= shift;
if i + jump + 1 < dst.len() {
limb |= dst[i + jump + 1] << (LIMB_BITS - shift);
}
}
}
dst[i] = limb;
}
}
loss
}
/// Copy the bit vector of width `src_bits` from `src`, starting at bit SRC_LSB,
/// to `dst`, such that the bit SRC_LSB becomes the least significant bit of `dst`.
/// All high bits above `src_bits` in `dst` are zero-filled.
pub(super) fn extract(dst: &mut [Limb], src: &[Limb], src_bits: usize, src_lsb: usize) {
if src_bits == 0 {
return;
}
let dst_limbs = limbs_for_bits(src_bits);
assert!(dst_limbs <= dst.len());
let src = &src[src_lsb / LIMB_BITS..];
dst[..dst_limbs].copy_from_slice(&src[..dst_limbs]);
let shift = src_lsb % LIMB_BITS;
let _: Loss = shift_right(&mut dst[..dst_limbs], &mut 0, shift);
// We now have (dst_limbs * LIMB_BITS - shift) bits from `src`
// in `dst`. If this is less that src_bits, append the rest, else
// clear the high bits.
let n = dst_limbs * LIMB_BITS - shift;
if n < src_bits {
let mask = (1 << (src_bits - n)) - 1;
dst[dst_limbs - 1] |= (src[dst_limbs] & mask) << n % LIMB_BITS;
} else if n > src_bits && src_bits % LIMB_BITS > 0 {
dst[dst_limbs - 1] &= (1 << (src_bits % LIMB_BITS)) - 1;
}
// Clear high limbs.
for x in &mut dst[dst_limbs..] {
*x = 0;
}
}
/// We want the most significant PRECISION bits of `src`. There may not
/// be that many; extract what we can.
pub(super) fn from_limbs(dst: &mut [Limb], src: &[Limb], precision: usize) -> (Loss, ExpInt) {
let omsb = omsb(src);
if precision <= omsb {
extract(dst, src, precision, omsb - precision);
(
Loss::through_truncation(src, omsb - precision),
omsb as ExpInt - 1,
)
} else {
extract(dst, src, omsb, 0);
(Loss::ExactlyZero, precision as ExpInt - 1)
}
}
/// Increment in-place, return the carry flag.
pub(super) fn increment(dst: &mut [Limb]) -> Limb {
for x in dst {
*x = x.wrapping_add(1);
if *x != 0 {
return 0;
}
}
1
}
/// Decrement in-place, return the borrow flag.
pub(super) fn decrement(dst: &mut [Limb]) -> Limb {
for x in dst {
*x = x.wrapping_sub(1);
if *x != !0 {
return 0;
}
}
1
}
/// `a += b + c` where `c` is zero or one. Returns the carry flag.
pub(super) fn add(a: &mut [Limb], b: &[Limb], mut c: Limb) -> Limb {
assert!(c <= 1);
for (a, &b) in a.iter_mut().zip(b) {
let (r, overflow) = a.overflowing_add(b);
let (r, overflow2) = r.overflowing_add(c);
*a = r;
c = (overflow | overflow2) as Limb;
}
c
}
/// `a -= b + c` where `c` is zero or one. Returns the borrow flag.
pub(super) fn sub(a: &mut [Limb], b: &[Limb], mut c: Limb) -> Limb {
assert!(c <= 1);
for (a, &b) in a.iter_mut().zip(b) {
let (r, overflow) = a.overflowing_sub(b);
let (r, overflow2) = r.overflowing_sub(c);
*a = r;
c = (overflow | overflow2) as Limb;
}
c
}
/// `a += b` or `a -= b`. Does not preserve `b`.
pub(super) fn add_or_sub(
a_sig: &mut [Limb],
a_exp: &mut ExpInt,
a_sign: &mut bool,
b_sig: &mut [Limb],
b_exp: ExpInt,
b_sign: bool,
) -> Loss {
// Are we bigger exponent-wise than the RHS?
let bits = *a_exp - b_exp;
// Determine if the operation on the absolute values is effectively
// an addition or subtraction.
// Subtraction is more subtle than one might naively expect.
if *a_sign ^ b_sign {
let (reverse, loss);
if bits == 0 {
reverse = cmp(a_sig, b_sig) == Ordering::Less;
loss = Loss::ExactlyZero;
} else if bits > 0 {
loss = shift_right(b_sig, &mut 0, (bits - 1) as usize);
shift_left(a_sig, a_exp, 1);
reverse = false;
} else {
loss = shift_right(a_sig, a_exp, (-bits - 1) as usize);
shift_left(b_sig, &mut 0, 1);
reverse = true;
}
let borrow = (loss != Loss::ExactlyZero) as Limb;
if reverse {
// The code above is intended to ensure that no borrow is necessary.
assert_eq!(sub(b_sig, a_sig, borrow), 0);
a_sig.copy_from_slice(b_sig);
*a_sign = !*a_sign;
} else {
// The code above is intended to ensure that no borrow is necessary.
assert_eq!(sub(a_sig, b_sig, borrow), 0);
}
// Invert the lost fraction - it was on the RHS and subtracted.
match loss {
Loss::LessThanHalf => Loss::MoreThanHalf,
Loss::MoreThanHalf => Loss::LessThanHalf,
_ => loss,
}
} else {
let loss = if bits > 0 {
shift_right(b_sig, &mut 0, bits as usize)
} else {
shift_right(a_sig, a_exp, -bits as usize)
};
// We have a guard bit; generating a carry cannot happen.
assert_eq!(add(a_sig, b_sig, 0), 0);
loss
}
}
/// `[low, high] = a * b`.
///
/// This cannot overflow, because
///
/// `(n - 1) * (n - 1) + 2 * (n - 1) == (n - 1) * (n + 1)`
///
/// which is less than n^2.
pub(super) fn widening_mul(a: Limb, b: Limb) -> [Limb; 2] {
let mut wide = [0, 0];
if a == 0 || b == 0 {
return wide;
}
const HALF_BITS: usize = LIMB_BITS / 2;
let select = |limb, i| (limb >> (i * HALF_BITS)) & ((1 << HALF_BITS) - 1);
for i in 0..2 {
for j in 0..2 {
let mut x = [select(a, i) * select(b, j), 0];
shift_left(&mut x, &mut 0, (i + j) * HALF_BITS);
assert_eq!(add(&mut wide, &x, 0), 0);
}
}
wide
}
/// `dst = a * b` (for normal `a` and `b`). Returns the lost fraction.
pub(super) fn mul<'a>(
dst: &mut [Limb],
exp: &mut ExpInt,
mut a: &'a [Limb],
mut b: &'a [Limb],
precision: usize,
) -> Loss {
// Put the narrower number on the `a` for less loops below.
if a.len() > b.len() {
mem::swap(&mut a, &mut b);
}
for x in &mut dst[..b.len()] {
*x = 0;
}
for i in 0..a.len() {
let mut carry = 0;
for j in 0..b.len() {
let [low, mut high] = widening_mul(a[i], b[j]);
// Now add carry.
let (low, overflow) = low.overflowing_add(carry);
high += overflow as Limb;
// And now `dst[i + j]`, and store the new low part there.
let (low, overflow) = low.overflowing_add(dst[i + j]);
high += overflow as Limb;
dst[i + j] = low;
carry = high;
}
dst[i + b.len()] = carry;
}
// Assume the operands involved in the multiplication are single-precision
// FP, and the two multiplicants are:
// a = a23 . a22 ... a0 * 2^e1
// b = b23 . b22 ... b0 * 2^e2
// the result of multiplication is:
// dst = c48 c47 c46 . c45 ... c0 * 2^(e1+e2)
// Note that there are three significant bits at the left-hand side of the
// radix point: two for the multiplication, and an overflow bit for the
// addition (that will always be zero at this point). Move the radix point
// toward left by two bits, and adjust exponent accordingly.
*exp += 2;
// Convert the result having "2 * precision" significant-bits back to the one
// having "precision" significant-bits. First, move the radix point from
// poision "2*precision - 1" to "precision - 1". The exponent need to be
// adjusted by "2*precision - 1" - "precision - 1" = "precision".
*exp -= precision as ExpInt + 1;
// In case MSB resides at the left-hand side of radix point, shift the
// mantissa right by some amount to make sure the MSB reside right before
// the radix point (i.e. "MSB . rest-significant-bits").
//
// Note that the result is not normalized when "omsb < precision". So, the
// caller needs to call IeeeFloat::normalize() if normalized value is
// expected.
let omsb = omsb(dst);
if omsb <= precision {
Loss::ExactlyZero
} else {
shift_right(dst, exp, omsb - precision)
}
}
/// `quotient = dividend / divisor`. Returns the lost fraction.
/// Does not preserve `dividend` or `divisor`.
pub(super) fn div(
quotient: &mut [Limb],
exp: &mut ExpInt,
dividend: &mut [Limb],
divisor: &mut [Limb],
precision: usize,
) -> Loss {
// Zero the quotient before setting bits in it.
for x in &mut quotient[..limbs_for_bits(precision)] {
*x = 0;
}
// Normalize the divisor.
let bits = precision - omsb(divisor);
shift_left(divisor, &mut 0, bits);
*exp += bits as ExpInt;
// Normalize the dividend.
let bits = precision - omsb(dividend);
shift_left(dividend, exp, bits);
// Ensure the dividend >= divisor initially for the loop below.
// Incidentally, this means that the division loop below is
// guaranteed to set the integer bit to one.
if cmp(dividend, divisor) == Ordering::Less {
shift_left(dividend, exp, 1);
assert_ne!(cmp(dividend, divisor), Ordering::Less)
}
// Long division.
for bit in (0..precision).rev() {
if cmp(dividend, divisor) != Ordering::Less {
sub(dividend, divisor, 0);
set_bit(quotient, bit);
}
shift_left(dividend, &mut 0, 1);
}
// Figure out the lost fraction.
match cmp(dividend, divisor) {
Ordering::Greater => Loss::MoreThanHalf,
Ordering::Equal => Loss::ExactlyHalf,
Ordering::Less => {
if is_all_zeros(dividend) {
Loss::ExactlyZero
} else {
Loss::LessThanHalf
}
}
}
}
}
+693
View File
@@ -0,0 +1,693 @@
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Port of LLVM's APFloat software floating-point implementation from the
//! following C++ sources (please update commit hash when backporting):
//! https://github.com/llvm-mirror/llvm/tree/23efab2bbd424ed13495a420ad8641cb2c6c28f9
//! * `include/llvm/ADT/APFloat.h` -> `Float` and `FloatConvert` traits
//! * `lib/Support/APFloat.cpp` -> `ieee` and `ppc` modules
//! * `unittests/ADT/APFloatTest.cpp` -> `tests` directory
//!
//! The port contains no unsafe code, global state, or side-effects in general,
//! and the only allocations are in the conversion to/from decimal strings.
//!
//! Most of the API and the testcases are intact in some form or another,
//! with some ergonomic changes, such as idiomatic short names, returning
//! new values instead of mutating the receiver, and having separate method
//! variants that take a non-default rounding mode (with the suffix `_r`).
//! Comments have been preserved where possible, only slightly adapted.
//!
//! Instead of keeping a pointer to a configuration struct and inspecting it
//! dynamically on every operation, types (e.g. `ieee::Double`), traits
//! (e.g. `ieee::Semantics`) and associated constants are employed for
//! increased type safety and performance.
//!
//! On-heap bigints are replaced everywhere (except in decimal conversion),
//! with short arrays of `type Limb = u128` elements (instead of `u64`),
//! This allows fitting the largest supported significands in one integer
//! (`ieee::Quad` and `ppc::Fallback` use slightly less than 128 bits).
//! All of the functions in the `ieee::sig` module operate on slices.
//!
//! # Note
//!
//! This API is completely unstable and subject to change.
#![crate_name = "rustc_apfloat"]
#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
html_root_url = "https://doc.rust-lang.org/nightly/")]
#![deny(warnings)]
#![forbid(unsafe_code)]
#![feature(const_fn)]
#![feature(i128_type)]
#![feature(slice_patterns)]
#![feature(try_from)]
#[macro_use]
extern crate rustc_bitflags;
use std::cmp::Ordering;
use std::fmt;
use std::ops::{Neg, Add, Sub, Mul, Div, Rem};
use std::ops::{AddAssign, SubAssign, MulAssign, DivAssign, RemAssign, BitOrAssign};
use std::str::FromStr;
bitflags! {
/// IEEE-754R 7: Default exception handling.
///
/// UNDERFLOW or OVERFLOW are always returned or-ed with INEXACT.
#[must_use]
#[derive(Debug)]
flags Status: u8 {
const OK = 0x00,
const INVALID_OP = 0x01,
const DIV_BY_ZERO = 0x02,
const OVERFLOW = 0x04,
const UNDERFLOW = 0x08,
const INEXACT = 0x10
}
}
impl BitOrAssign for Status {
fn bitor_assign(&mut self, rhs: Self) {
*self = *self | rhs;
}
}
#[must_use]
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug)]
pub struct StatusAnd<T> {
pub status: Status,
pub value: T,
}
impl Status {
pub fn and<T>(self, value: T) -> StatusAnd<T> {
StatusAnd {
status: self,
value,
}
}
}
impl<T> StatusAnd<T> {
fn map<F: FnOnce(T) -> U, U>(self, f: F) -> StatusAnd<U> {
StatusAnd {
status: self.status,
value: f(self.value),
}
}
}
#[macro_export]
macro_rules! unpack {
($status:ident|=, $e:expr) => {
match $e {
$crate::StatusAnd { status, value } => {
$status |= status;
value
}
}
};
($status:ident=, $e:expr) => {
match $e {
$crate::StatusAnd { status, value } => {
$status = status;
value
}
}
}
}
/// Category of internally-represented number.
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum Category {
Infinity,
NaN,
Normal,
Zero,
}
/// IEEE-754R 4.3: Rounding-direction attributes.
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum Round {
NearestTiesToEven,
TowardPositive,
TowardNegative,
TowardZero,
NearestTiesToAway,
}
impl Neg for Round {
type Output = Round;
fn neg(self) -> Round {
match self {
Round::TowardPositive => Round::TowardNegative,
Round::TowardNegative => Round::TowardPositive,
Round::NearestTiesToEven | Round::TowardZero | Round::NearestTiesToAway => self,
}
}
}
/// A signed type to represent a floating point number's unbiased exponent.
pub type ExpInt = i16;
// \c ilogb error results.
pub const IEK_INF: ExpInt = ExpInt::max_value();
pub const IEK_NAN: ExpInt = ExpInt::min_value();
pub const IEK_ZERO: ExpInt = ExpInt::min_value() + 1;
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub struct ParseError(pub &'static str);
/// A self-contained host- and target-independent arbitrary-precision
/// floating-point software implementation.
///
/// `apfloat` uses significand bignum integer arithmetic as provided by functions
/// in the `ieee::sig`.
///
/// Written for clarity rather than speed, in particular with a view to use in
/// the front-end of a cross compiler so that target arithmetic can be correctly
/// performed on the host. Performance should nonetheless be reasonable,
/// particularly for its intended use. It may be useful as a base
/// implementation for a run-time library during development of a faster
/// target-specific one.
///
/// All 5 rounding modes in the IEEE-754R draft are handled correctly for all
/// implemented operations. Currently implemented operations are add, subtract,
/// multiply, divide, fused-multiply-add, conversion-to-float,
/// conversion-to-integer and conversion-from-integer. New rounding modes
/// (e.g. away from zero) can be added with three or four lines of code.
///
/// Four formats are built-in: IEEE single precision, double precision,
/// quadruple precision, and x87 80-bit extended double (when operating with
/// full extended precision). Adding a new format that obeys IEEE semantics
/// only requires adding two lines of code: a declaration and definition of the
/// format.
///
/// All operations return the status of that operation as an exception bit-mask,
/// so multiple operations can be done consecutively with their results or-ed
/// together. The returned status can be useful for compiler diagnostics; e.g.,
/// inexact, underflow and overflow can be easily diagnosed on constant folding,
/// and compiler optimizers can determine what exceptions would be raised by
/// folding operations and optimize, or perhaps not optimize, accordingly.
///
/// At present, underflow tininess is detected after rounding; it should be
/// straight forward to add support for the before-rounding case too.
///
/// The library reads hexadecimal floating point numbers as per C99, and
/// correctly rounds if necessary according to the specified rounding mode.
/// Syntax is required to have been validated by the caller.
///
/// It also reads decimal floating point numbers and correctly rounds according
/// to the specified rounding mode.
///
/// Non-zero finite numbers are represented internally as a sign bit, a 16-bit
/// signed exponent, and the significand as an array of integer limbs. After
/// normalization of a number of precision P the exponent is within the range of
/// the format, and if the number is not denormal the P-th bit of the
/// significand is set as an explicit integer bit. For denormals the most
/// significant bit is shifted right so that the exponent is maintained at the
/// format's minimum, so that the smallest denormal has just the least
/// significant bit of the significand set. The sign of zeros and infinities
/// is significant; the exponent and significand of such numbers is not stored,
/// but has a known implicit (deterministic) value: 0 for the significands, 0
/// for zero exponent, all 1 bits for infinity exponent. For NaNs the sign and
/// significand are deterministic, although not really meaningful, and preserved
/// in non-conversion operations. The exponent is implicitly all 1 bits.
///
/// `apfloat` does not provide any exception handling beyond default exception
/// handling. We represent Signaling NaNs via IEEE-754R 2008 6.2.1 should clause
/// by encoding Signaling NaNs with the first bit of its trailing significand as
/// 0.
///
/// Future work
/// ===========
///
/// Some features that may or may not be worth adding:
///
/// Optional ability to detect underflow tininess before rounding.
///
/// New formats: x87 in single and double precision mode (IEEE apart from
/// extended exponent range) (hard).
///
/// New operations: sqrt, nexttoward.
///
pub trait Float
: Copy
+ Default
+ FromStr<Err = ParseError>
+ PartialOrd
+ fmt::Display
+ Neg<Output = Self>
+ AddAssign
+ SubAssign
+ MulAssign
+ DivAssign
+ RemAssign
+ Add<Output = StatusAnd<Self>>
+ Sub<Output = StatusAnd<Self>>
+ Mul<Output = StatusAnd<Self>>
+ Div<Output = StatusAnd<Self>>
+ Rem<Output = StatusAnd<Self>> {
/// Total number of bits in the in-memory format.
const BITS: usize;
/// Number of bits in the significand. This includes the integer bit.
const PRECISION: usize;
/// The largest E such that 2^E is representable; this matches the
/// definition of IEEE 754.
const MAX_EXP: ExpInt;
/// The smallest E such that 2^E is a normalized number; this
/// matches the definition of IEEE 754.
const MIN_EXP: ExpInt;
/// Positive Zero.
const ZERO: Self;
/// Positive Infinity.
const INFINITY: Self;
/// NaN (Not a Number).
// FIXME(eddyb) provide a default when qnan becomes const fn.
const NAN: Self;
/// Factory for QNaN values.
// FIXME(eddyb) should be const fn.
fn qnan(payload: Option<u128>) -> Self;
/// Factory for SNaN values.
// FIXME(eddyb) should be const fn.
fn snan(payload: Option<u128>) -> Self;
/// Largest finite number.
// FIXME(eddyb) should be const (but FloatPair::largest is nontrivial).
fn largest() -> Self;
/// Smallest (by magnitude) finite number.
/// Might be denormalized, which implies a relative loss of precision.
const SMALLEST: Self;
/// Smallest (by magnitude) normalized finite number.
// FIXME(eddyb) should be const (but FloatPair::smallest_normalized is nontrivial).
fn smallest_normalized() -> Self;
// Arithmetic
fn add_r(self, rhs: Self, round: Round) -> StatusAnd<Self>;
fn sub_r(self, rhs: Self, round: Round) -> StatusAnd<Self> {
self.add_r(-rhs, round)
}
fn mul_r(self, rhs: Self, round: Round) -> StatusAnd<Self>;
fn mul_add_r(self, multiplicand: Self, addend: Self, round: Round) -> StatusAnd<Self>;
fn mul_add(self, multiplicand: Self, addend: Self) -> StatusAnd<Self> {
self.mul_add_r(multiplicand, addend, Round::NearestTiesToEven)
}
fn div_r(self, rhs: Self, round: Round) -> StatusAnd<Self>;
/// IEEE remainder.
// This is not currently correct in all cases.
fn ieee_rem(self, rhs: Self) -> StatusAnd<Self> {
let mut v = self;
let status;
v = unpack!(status=, v / rhs);
if status == Status::DIV_BY_ZERO {
return status.and(self);
}
assert!(Self::PRECISION < 128);
let status;
let x = unpack!(status=, v.to_i128_r(128, Round::NearestTiesToEven, &mut false));
if status == Status::INVALID_OP {
return status.and(self);
}
let status;
let mut v = unpack!(status=, Self::from_i128(x));
assert_eq!(status, Status::OK); // should always work
let status;
v = unpack!(status=, v * rhs);
assert_eq!(status - Status::INEXACT, Status::OK); // should not overflow or underflow
let status;
v = unpack!(status=, self - v);
assert_eq!(status - Status::INEXACT, Status::OK); // likewise
if v.is_zero() {
status.and(v.copy_sign(self)) // IEEE754 requires this
} else {
status.and(v)
}
}
/// C fmod, or llvm frem.
fn c_fmod(self, rhs: Self) -> StatusAnd<Self>;
fn round_to_integral(self, round: Round) -> StatusAnd<Self>;
/// IEEE-754R 2008 5.3.1: nextUp.
fn next_up(self) -> StatusAnd<Self>;
/// IEEE-754R 2008 5.3.1: nextDown.
///
/// *NOTE* since nextDown(x) = -nextUp(-x), we only implement nextUp with
/// appropriate sign switching before/after the computation.
fn next_down(self) -> StatusAnd<Self> {
(-self).next_up().map(|r| -r)
}
fn abs(self) -> Self {
if self.is_negative() { -self } else { self }
}
fn copy_sign(self, rhs: Self) -> Self {
if self.is_negative() != rhs.is_negative() {
-self
} else {
self
}
}
// Conversions
fn from_bits(input: u128) -> Self;
fn from_i128_r(input: i128, round: Round) -> StatusAnd<Self> {
if input < 0 {
Self::from_u128_r(-input as u128, -round).map(|r| -r)
} else {
Self::from_u128_r(input as u128, round)
}
}
fn from_i128(input: i128) -> StatusAnd<Self> {
Self::from_i128_r(input, Round::NearestTiesToEven)
}
fn from_u128_r(input: u128, round: Round) -> StatusAnd<Self>;
fn from_u128(input: u128) -> StatusAnd<Self> {
Self::from_u128_r(input, Round::NearestTiesToEven)
}
fn from_str_r(s: &str, round: Round) -> Result<StatusAnd<Self>, ParseError>;
fn to_bits(self) -> u128;
/// Convert a floating point number to an integer according to the
/// rounding mode. In case of an invalid operation exception,
/// deterministic values are returned, namely zero for NaNs and the
/// minimal or maximal value respectively for underflow or overflow.
/// If the rounded value is in range but the floating point number is
/// not the exact integer, the C standard doesn't require an inexact
/// exception to be raised. IEEE-854 does require it so we do that.
///
/// Note that for conversions to integer type the C standard requires
/// round-to-zero to always be used.
///
/// The *is_exact output tells whether the result is exact, in the sense
/// that converting it back to the original floating point type produces
/// the original value. This is almost equivalent to result==Status::OK,
/// except for negative zeroes.
fn to_i128_r(self, width: usize, round: Round, is_exact: &mut bool) -> StatusAnd<i128> {
let status;
if self.is_negative() {
if self.is_zero() {
// Negative zero can't be represented as an int.
*is_exact = false;
}
let r = unpack!(status=, (-self).to_u128_r(width, -round, is_exact));
// Check for values that don't fit in the signed integer.
if r > (1 << (width - 1)) {
// Return the most negative integer for the given width.
*is_exact = false;
Status::INVALID_OP.and(-1 << (width - 1))
} else {
status.and(r.wrapping_neg() as i128)
}
} else {
// Positive case is simpler, can pretend it's a smaller unsigned
// integer, and `to_u128` will take care of all the edge cases.
self.to_u128_r(width - 1, round, is_exact).map(
|r| r as i128,
)
}
}
fn to_i128(self, width: usize) -> StatusAnd<i128> {
self.to_i128_r(width, Round::TowardZero, &mut true)
}
fn to_u128_r(self, width: usize, round: Round, is_exact: &mut bool) -> StatusAnd<u128>;
fn to_u128(self, width: usize) -> StatusAnd<u128> {
self.to_u128_r(width, Round::TowardZero, &mut true)
}
fn cmp_abs_normal(self, rhs: Self) -> Ordering;
/// Bitwise comparison for equality (QNaNs compare equal, 0!=-0).
fn bitwise_eq(self, rhs: Self) -> bool;
// IEEE-754R 5.7.2 General operations.
/// Implements IEEE minNum semantics. Returns the smaller of the 2 arguments if
/// both are not NaN. If either argument is a NaN, returns the other argument.
fn min(self, other: Self) -> Self {
if self.is_nan() {
other
} else if other.is_nan() {
self
} else if other.partial_cmp(&self) == Some(Ordering::Less) {
other
} else {
self
}
}
/// Implements IEEE maxNum semantics. Returns the larger of the 2 arguments if
/// both are not NaN. If either argument is a NaN, returns the other argument.
fn max(self, other: Self) -> Self {
if self.is_nan() {
other
} else if other.is_nan() {
self
} else if self.partial_cmp(&other) == Some(Ordering::Less) {
other
} else {
self
}
}
/// IEEE-754R isSignMinus: Returns true if and only if the current value is
/// negative.
///
/// This applies to zeros and NaNs as well.
fn is_negative(self) -> bool;
/// IEEE-754R isNormal: Returns true if and only if the current value is normal.
///
/// This implies that the current value of the float is not zero, subnormal,
/// infinite, or NaN following the definition of normality from IEEE-754R.
fn is_normal(self) -> bool {
!self.is_denormal() && self.is_finite_non_zero()
}
/// Returns true if and only if the current value is zero, subnormal, or
/// normal.
///
/// This means that the value is not infinite or NaN.
fn is_finite(self) -> bool {
!self.is_nan() && !self.is_infinite()
}
/// Returns true if and only if the float is plus or minus zero.
fn is_zero(self) -> bool {
self.category() == Category::Zero
}
/// IEEE-754R isSubnormal(): Returns true if and only if the float is a
/// denormal.
fn is_denormal(self) -> bool;
/// IEEE-754R isInfinite(): Returns true if and only if the float is infinity.
fn is_infinite(self) -> bool {
self.category() == Category::Infinity
}
/// Returns true if and only if the float is a quiet or signaling NaN.
fn is_nan(self) -> bool {
self.category() == Category::NaN
}
/// Returns true if and only if the float is a signaling NaN.
fn is_signaling(self) -> bool;
// Simple Queries
fn category(self) -> Category;
fn is_non_zero(self) -> bool {
!self.is_zero()
}
fn is_finite_non_zero(self) -> bool {
self.is_finite() && !self.is_zero()
}
fn is_pos_zero(self) -> bool {
self.is_zero() && !self.is_negative()
}
fn is_neg_zero(self) -> bool {
self.is_zero() && self.is_negative()
}
/// Returns true if and only if the number has the smallest possible non-zero
/// magnitude in the current semantics.
fn is_smallest(self) -> bool {
Self::SMALLEST.copy_sign(self).bitwise_eq(self)
}
/// Returns true if and only if the number has the largest possible finite
/// magnitude in the current semantics.
fn is_largest(self) -> bool {
Self::largest().copy_sign(self).bitwise_eq(self)
}
/// Returns true if and only if the number is an exact integer.
fn is_integer(self) -> bool {
// This could be made more efficient; I'm going for obviously correct.
if !self.is_finite() {
return false;
}
self.round_to_integral(Round::TowardZero).value.bitwise_eq(
self,
)
}
/// If this value has an exact multiplicative inverse, return it.
fn get_exact_inverse(self) -> Option<Self>;
/// Returns the exponent of the internal representation of the Float.
///
/// Because the radix of Float is 2, this is equivalent to floor(log2(x)).
/// For special Float values, this returns special error codes:
///
/// NaN -> \c IEK_NAN
/// 0 -> \c IEK_ZERO
/// Inf -> \c IEK_INF
///
fn ilogb(self) -> ExpInt;
/// Returns: self * 2^exp for integral exponents.
fn scalbn_r(self, exp: ExpInt, round: Round) -> Self;
fn scalbn(self, exp: ExpInt) -> Self {
self.scalbn_r(exp, Round::NearestTiesToEven)
}
/// Equivalent of C standard library function.
///
/// While the C standard says exp is an unspecified value for infinity and nan,
/// this returns INT_MAX for infinities, and INT_MIN for NaNs (see `ilogb`).
fn frexp_r(self, exp: &mut ExpInt, round: Round) -> Self;
fn frexp(self, exp: &mut ExpInt) -> Self {
self.frexp_r(exp, Round::NearestTiesToEven)
}
}
pub trait FloatConvert<T: Float>: Float {
/// Convert a value of one floating point type to another.
/// The return value corresponds to the IEEE754 exceptions. *loses_info
/// records whether the transformation lost information, i.e. whether
/// converting the result back to the original type will produce the
/// original value (this is almost the same as return value==Status::OK,
/// but there are edge cases where this is not so).
fn convert_r(self, round: Round, loses_info: &mut bool) -> StatusAnd<T>;
fn convert(self, loses_info: &mut bool) -> StatusAnd<T> {
self.convert_r(Round::NearestTiesToEven, loses_info)
}
}
macro_rules! float_common_impls {
($ty:ident<$t:tt>) => {
impl<$t> Default for $ty<$t> where Self: Float {
fn default() -> Self {
Self::ZERO
}
}
impl<$t> ::std::str::FromStr for $ty<$t> where Self: Float {
type Err = ParseError;
fn from_str(s: &str) -> Result<Self, ParseError> {
Self::from_str_r(s, Round::NearestTiesToEven).map(|x| x.value)
}
}
// Rounding ties to the nearest even, by default.
impl<$t> ::std::ops::Add for $ty<$t> where Self: Float {
type Output = StatusAnd<Self>;
fn add(self, rhs: Self) -> StatusAnd<Self> {
self.add_r(rhs, Round::NearestTiesToEven)
}
}
impl<$t> ::std::ops::Sub for $ty<$t> where Self: Float {
type Output = StatusAnd<Self>;
fn sub(self, rhs: Self) -> StatusAnd<Self> {
self.sub_r(rhs, Round::NearestTiesToEven)
}
}
impl<$t> ::std::ops::Mul for $ty<$t> where Self: Float {
type Output = StatusAnd<Self>;
fn mul(self, rhs: Self) -> StatusAnd<Self> {
self.mul_r(rhs, Round::NearestTiesToEven)
}
}
impl<$t> ::std::ops::Div for $ty<$t> where Self: Float {
type Output = StatusAnd<Self>;
fn div(self, rhs: Self) -> StatusAnd<Self> {
self.div_r(rhs, Round::NearestTiesToEven)
}
}
impl<$t> ::std::ops::Rem for $ty<$t> where Self: Float {
type Output = StatusAnd<Self>;
fn rem(self, rhs: Self) -> StatusAnd<Self> {
self.c_fmod(rhs)
}
}
impl<$t> ::std::ops::AddAssign for $ty<$t> where Self: Float {
fn add_assign(&mut self, rhs: Self) {
*self = (*self + rhs).value;
}
}
impl<$t> ::std::ops::SubAssign for $ty<$t> where Self: Float {
fn sub_assign(&mut self, rhs: Self) {
*self = (*self - rhs).value;
}
}
impl<$t> ::std::ops::MulAssign for $ty<$t> where Self: Float {
fn mul_assign(&mut self, rhs: Self) {
*self = (*self * rhs).value;
}
}
impl<$t> ::std::ops::DivAssign for $ty<$t> where Self: Float {
fn div_assign(&mut self, rhs: Self) {
*self = (*self / rhs).value;
}
}
impl<$t> ::std::ops::RemAssign for $ty<$t> where Self: Float {
fn rem_assign(&mut self, rhs: Self) {
*self = (*self % rhs).value;
}
}
}
}
pub mod ieee;
pub mod ppc;
+461
View File
@@ -0,0 +1,461 @@
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use {Category, ExpInt, Float, FloatConvert, Round, ParseError, Status, StatusAnd};
use ieee;
use std::cmp::Ordering;
use std::fmt;
use std::ops::Neg;
#[must_use]
#[derive(Copy, Clone, PartialEq, PartialOrd, Debug)]
pub struct DoubleFloat<F>(F, F);
pub type DoubleDouble = DoubleFloat<ieee::Double>;
// These are legacy semantics for the Fallback, inaccrurate implementation of
// IBM double-double, if the accurate DoubleDouble doesn't handle the
// operation. It's equivalent to having an IEEE number with consecutive 106
// bits of mantissa and 11 bits of exponent.
//
// It's not equivalent to IBM double-double. For example, a legit IBM
// double-double, 1 + epsilon:
//
// 1 + epsilon = 1 + (1 >> 1076)
//
// is not representable by a consecutive 106 bits of mantissa.
//
// Currently, these semantics are used in the following way:
//
// DoubleDouble -> (Double, Double) ->
// DoubleDouble's Fallback -> IEEE operations
//
// FIXME: Implement all operations in DoubleDouble, and delete these
// semantics.
// FIXME(eddyb) This shouldn't need to be `pub`, it's only used in bounds.
pub struct FallbackS<F>(F);
type Fallback<F> = ieee::IeeeFloat<FallbackS<F>>;
impl<F: Float> ieee::Semantics for FallbackS<F> {
// Forbid any conversion to/from bits.
const BITS: usize = 0;
const PRECISION: usize = F::PRECISION * 2;
const MAX_EXP: ExpInt = F::MAX_EXP as ExpInt;
const MIN_EXP: ExpInt = F::MIN_EXP as ExpInt + F::PRECISION as ExpInt;
}
// Convert number to F. To avoid spurious underflows, we re-
// normalize against the F exponent range first, and only *then*
// truncate the mantissa. The result of that second conversion
// may be inexact, but should never underflow.
// FIXME(eddyb) This shouldn't need to be `pub`, it's only used in bounds.
pub struct FallbackExtendedS<F>(F);
type FallbackExtended<F> = ieee::IeeeFloat<FallbackExtendedS<F>>;
impl<F: Float> ieee::Semantics for FallbackExtendedS<F> {
// Forbid any conversion to/from bits.
const BITS: usize = 0;
const PRECISION: usize = Fallback::<F>::PRECISION;
const MAX_EXP: ExpInt = F::MAX_EXP as ExpInt;
}
impl<F: Float> From<Fallback<F>> for DoubleFloat<F>
where
F: FloatConvert<FallbackExtended<F>>,
FallbackExtended<F>: FloatConvert<F>,
{
fn from(x: Fallback<F>) -> Self {
let mut status;
let mut loses_info = false;
let extended: FallbackExtended<F> = unpack!(status=, x.convert(&mut loses_info));
assert_eq!((status, loses_info), (Status::OK, false));
let a = unpack!(status=, extended.convert(&mut loses_info));
assert_eq!(status - Status::INEXACT, Status::OK);
// If conversion was exact or resulted in a special case, we're done;
// just set the second double to zero. Otherwise, re-convert back to
// the extended format and compute the difference. This now should
// convert exactly to double.
let b = if a.is_finite_non_zero() && loses_info {
let u: FallbackExtended<F> = unpack!(status=, a.convert(&mut loses_info));
assert_eq!((status, loses_info), (Status::OK, false));
let v = unpack!(status=, extended - u);
assert_eq!(status, Status::OK);
let v = unpack!(status=, v.convert(&mut loses_info));
assert_eq!((status, loses_info), (Status::OK, false));
v
} else {
F::ZERO
};
DoubleFloat(a, b)
}
}
impl<F: FloatConvert<Self>> From<DoubleFloat<F>> for Fallback<F> {
fn from(DoubleFloat(a, b): DoubleFloat<F>) -> Self {
let mut status;
let mut loses_info = false;
// Get the first F and convert to our format.
let a = unpack!(status=, a.convert(&mut loses_info));
assert_eq!((status, loses_info), (Status::OK, false));
// Unless we have a special case, add in second F.
if a.is_finite_non_zero() {
let b = unpack!(status=, b.convert(&mut loses_info));
assert_eq!((status, loses_info), (Status::OK, false));
(a + b).value
} else {
a
}
}
}
float_common_impls!(DoubleFloat<F>);
impl<F: Float> Neg for DoubleFloat<F> {
type Output = Self;
fn neg(self) -> Self {
if self.1.is_finite_non_zero() {
DoubleFloat(-self.0, -self.1)
} else {
DoubleFloat(-self.0, self.1)
}
}
}
impl<F: FloatConvert<Fallback<F>>> fmt::Display for DoubleFloat<F> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(&Fallback::from(*self), f)
}
}
impl<F: FloatConvert<Fallback<F>>> Float for DoubleFloat<F>
where
Self: From<Fallback<F>>,
{
const BITS: usize = F::BITS * 2;
const PRECISION: usize = Fallback::<F>::PRECISION;
const MAX_EXP: ExpInt = Fallback::<F>::MAX_EXP;
const MIN_EXP: ExpInt = Fallback::<F>::MIN_EXP;
const ZERO: Self = DoubleFloat(F::ZERO, F::ZERO);
const INFINITY: Self = DoubleFloat(F::INFINITY, F::ZERO);
// FIXME(eddyb) remove when qnan becomes const fn.
const NAN: Self = DoubleFloat(F::NAN, F::ZERO);
fn qnan(payload: Option<u128>) -> Self {
DoubleFloat(F::qnan(payload), F::ZERO)
}
fn snan(payload: Option<u128>) -> Self {
DoubleFloat(F::snan(payload), F::ZERO)
}
fn largest() -> Self {
let status;
let mut r = DoubleFloat(F::largest(), F::largest());
r.1 = r.1.scalbn(-(F::PRECISION as ExpInt + 1));
r.1 = unpack!(status=, r.1.next_down());
assert_eq!(status, Status::OK);
r
}
const SMALLEST: Self = DoubleFloat(F::SMALLEST, F::ZERO);
fn smallest_normalized() -> Self {
DoubleFloat(
F::smallest_normalized().scalbn(F::PRECISION as ExpInt),
F::ZERO,
)
}
// Implement addition, subtraction, multiplication and division based on:
// "Software for Doubled-Precision Floating-Point Computations",
// by Seppo Linnainmaa, ACM TOMS vol 7 no 3, September 1981, pages 272-283.
fn add_r(mut self, rhs: Self, round: Round) -> StatusAnd<Self> {
match (self.category(), rhs.category()) {
(Category::Infinity, Category::Infinity) => {
if self.is_negative() != rhs.is_negative() {
Status::INVALID_OP.and(Self::NAN.copy_sign(self))
} else {
Status::OK.and(self)
}
}
(_, Category::Zero) |
(Category::NaN, _) |
(Category::Infinity, Category::Normal) => Status::OK.and(self),
(Category::Zero, _) |
(_, Category::NaN) |
(_, Category::Infinity) => Status::OK.and(rhs),
(Category::Normal, Category::Normal) => {
let mut status = Status::OK;
let (a, aa, c, cc) = (self.0, self.1, rhs.0, rhs.1);
let mut z = a;
z = unpack!(status|=, z.add_r(c, round));
if !z.is_finite() {
if !z.is_infinite() {
return status.and(DoubleFloat(z, F::ZERO));
}
status = Status::OK;
let a_cmp_c = a.cmp_abs_normal(c);
z = cc;
z = unpack!(status|=, z.add_r(aa, round));
if a_cmp_c == Ordering::Greater {
// z = cc + aa + c + a;
z = unpack!(status|=, z.add_r(c, round));
z = unpack!(status|=, z.add_r(a, round));
} else {
// z = cc + aa + a + c;
z = unpack!(status|=, z.add_r(a, round));
z = unpack!(status|=, z.add_r(c, round));
}
if !z.is_finite() {
return status.and(DoubleFloat(z, F::ZERO));
}
self.0 = z;
let mut zz = aa;
zz = unpack!(status|=, zz.add_r(cc, round));
if a_cmp_c == Ordering::Greater {
// self.1 = a - z + c + zz;
self.1 = a;
self.1 = unpack!(status|=, self.1.sub_r(z, round));
self.1 = unpack!(status|=, self.1.add_r(c, round));
self.1 = unpack!(status|=, self.1.add_r(zz, round));
} else {
// self.1 = c - z + a + zz;
self.1 = c;
self.1 = unpack!(status|=, self.1.sub_r(z, round));
self.1 = unpack!(status|=, self.1.add_r(a, round));
self.1 = unpack!(status|=, self.1.add_r(zz, round));
}
} else {
// q = a - z;
let mut q = a;
q = unpack!(status|=, q.sub_r(z, round));
// zz = q + c + (a - (q + z)) + aa + cc;
// Compute a - (q + z) as -((q + z) - a) to avoid temporary copies.
let mut zz = q;
zz = unpack!(status|=, zz.add_r(c, round));
q = unpack!(status|=, q.add_r(z, round));
q = unpack!(status|=, q.sub_r(a, round));
q = -q;
zz = unpack!(status|=, zz.add_r(q, round));
zz = unpack!(status|=, zz.add_r(aa, round));
zz = unpack!(status|=, zz.add_r(cc, round));
if zz.is_zero() && !zz.is_negative() {
return Status::OK.and(DoubleFloat(z, F::ZERO));
}
self.0 = z;
self.0 = unpack!(status|=, self.0.add_r(zz, round));
if !self.0.is_finite() {
self.1 = F::ZERO;
return status.and(self);
}
self.1 = z;
self.1 = unpack!(status|=, self.1.sub_r(self.0, round));
self.1 = unpack!(status|=, self.1.add_r(zz, round));
}
status.and(self)
}
}
}
fn mul_r(mut self, rhs: Self, round: Round) -> StatusAnd<Self> {
// Interesting observation: For special categories, finding the lowest
// common ancestor of the following layered graph gives the correct
// return category:
//
// NaN
// / \
// Zero Inf
// \ /
// Normal
//
// e.g. NaN * NaN = NaN
// Zero * Inf = NaN
// Normal * Zero = Zero
// Normal * Inf = Inf
match (self.category(), rhs.category()) {
(Category::NaN, _) => Status::OK.and(self),
(_, Category::NaN) => Status::OK.and(rhs),
(Category::Zero, Category::Infinity) |
(Category::Infinity, Category::Zero) => Status::OK.and(Self::NAN),
(Category::Zero, _) |
(Category::Infinity, _) => Status::OK.and(self),
(_, Category::Zero) |
(_, Category::Infinity) => Status::OK.and(rhs),
(Category::Normal, Category::Normal) => {
let mut status = Status::OK;
let (a, b, c, d) = (self.0, self.1, rhs.0, rhs.1);
// t = a * c
let mut t = a;
t = unpack!(status|=, t.mul_r(c, round));
if !t.is_finite_non_zero() {
return status.and(DoubleFloat(t, F::ZERO));
}
// tau = fmsub(a, c, t), that is -fmadd(-a, c, t).
let mut tau = a;
tau = unpack!(status|=, tau.mul_add_r(c, -t, round));
// v = a * d
let mut v = a;
v = unpack!(status|=, v.mul_r(d, round));
// w = b * c
let mut w = b;
w = unpack!(status|=, w.mul_r(c, round));
v = unpack!(status|=, v.add_r(w, round));
// tau += v + w
tau = unpack!(status|=, tau.add_r(v, round));
// u = t + tau
let mut u = t;
u = unpack!(status|=, u.add_r(tau, round));
self.0 = u;
if !u.is_finite() {
self.1 = F::ZERO;
} else {
// self.1 = (t - u) + tau
t = unpack!(status|=, t.sub_r(u, round));
t = unpack!(status|=, t.add_r(tau, round));
self.1 = t;
}
status.and(self)
}
}
}
fn mul_add_r(self, multiplicand: Self, addend: Self, round: Round) -> StatusAnd<Self> {
Fallback::from(self)
.mul_add_r(Fallback::from(multiplicand), Fallback::from(addend), round)
.map(Self::from)
}
fn div_r(self, rhs: Self, round: Round) -> StatusAnd<Self> {
Fallback::from(self).div_r(Fallback::from(rhs), round).map(
Self::from,
)
}
fn c_fmod(self, rhs: Self) -> StatusAnd<Self> {
Fallback::from(self).c_fmod(Fallback::from(rhs)).map(
Self::from,
)
}
fn round_to_integral(self, round: Round) -> StatusAnd<Self> {
Fallback::from(self).round_to_integral(round).map(
Self::from,
)
}
fn next_up(self) -> StatusAnd<Self> {
Fallback::from(self).next_up().map(Self::from)
}
fn from_bits(input: u128) -> Self {
let (a, b) = (input, input >> F::BITS);
DoubleFloat(
F::from_bits(a & ((1 << F::BITS) - 1)),
F::from_bits(b & ((1 << F::BITS) - 1)),
)
}
fn from_u128_r(input: u128, round: Round) -> StatusAnd<Self> {
Fallback::from_u128_r(input, round).map(Self::from)
}
fn from_str_r(s: &str, round: Round) -> Result<StatusAnd<Self>, ParseError> {
Fallback::from_str_r(s, round).map(|r| r.map(Self::from))
}
fn to_bits(self) -> u128 {
self.0.to_bits() | (self.1.to_bits() << F::BITS)
}
fn to_u128_r(self, width: usize, round: Round, is_exact: &mut bool) -> StatusAnd<u128> {
Fallback::from(self).to_u128_r(width, round, is_exact)
}
fn cmp_abs_normal(self, rhs: Self) -> Ordering {
self.0.cmp_abs_normal(rhs.0).then_with(|| {
let result = self.1.cmp_abs_normal(rhs.1);
if result != Ordering::Equal {
let against = self.0.is_negative() ^ self.1.is_negative();
let rhs_against = rhs.0.is_negative() ^ rhs.1.is_negative();
(!against).cmp(&!rhs_against).then_with(|| if against {
result.reverse()
} else {
result
})
} else {
result
}
})
}
fn bitwise_eq(self, rhs: Self) -> bool {
self.0.bitwise_eq(rhs.0) && self.1.bitwise_eq(rhs.1)
}
fn is_negative(self) -> bool {
self.0.is_negative()
}
fn is_denormal(self) -> bool {
self.category() == Category::Normal &&
(self.0.is_denormal() || self.0.is_denormal() ||
// (double)(Hi + Lo) == Hi defines a normal number.
!(self.0 + self.1).value.bitwise_eq(self.0))
}
fn is_signaling(self) -> bool {
self.0.is_signaling()
}
fn category(self) -> Category {
self.0.category()
}
fn get_exact_inverse(self) -> Option<Self> {
Fallback::from(self).get_exact_inverse().map(Self::from)
}
fn ilogb(self) -> ExpInt {
self.0.ilogb()
}
fn scalbn_r(self, exp: ExpInt, round: Round) -> Self {
DoubleFloat(self.0.scalbn_r(exp, round), self.1.scalbn_r(exp, round))
}
fn frexp_r(self, exp: &mut ExpInt, round: Round) -> Self {
let a = self.0.frexp_r(exp, round);
let mut b = self.1;
if self.category() == Category::Normal {
b = b.scalbn_r(-*exp, round);
}
DoubleFloat(a, b)
}
}
+6891
View File
@@ -0,0 +1,6891 @@
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(i128_type)]
#[macro_use]
extern crate rustc_apfloat;
use rustc_apfloat::{Category, ExpInt, IEK_INF, IEK_NAN, IEK_ZERO};
use rustc_apfloat::{Float, FloatConvert, ParseError, Round, Status};
use rustc_apfloat::ieee::{Half, Single, Double, Quad, X87DoubleExtended};
trait SingleExt {
fn from_f32(input: f32) -> Self;
fn to_f32(self) -> f32;
}
impl SingleExt for Single {
fn from_f32(input: f32) -> Self {
Self::from_bits(input.to_bits() as u128)
}
fn to_f32(self) -> f32 {
f32::from_bits(self.to_bits() as u32)
}
}
trait DoubleExt {
fn from_f64(input: f64) -> Self;
fn to_f64(self) -> f64;
}
impl DoubleExt for Double {
fn from_f64(input: f64) -> Self {
Self::from_bits(input.to_bits() as u128)
}
fn to_f64(self) -> f64 {
f64::from_bits(self.to_bits() as u64)
}
}
#[test]
fn is_signaling() {
// We test qNaN, -qNaN, +sNaN, -sNaN with and without payloads.
let payload = 4;
assert!(!Single::qnan(None).is_signaling());
assert!(!(-Single::qnan(None)).is_signaling());
assert!(!Single::qnan(Some(payload)).is_signaling());
assert!(!(-Single::qnan(Some(payload))).is_signaling());
assert!(Single::snan(None).is_signaling());
assert!((-Single::snan(None)).is_signaling());
assert!(Single::snan(Some(payload)).is_signaling());
assert!((-Single::snan(Some(payload))).is_signaling());
}
#[test]
fn next() {
// 1. Test Special Cases Values.
//
// Test all special values for nextUp and nextDown perscribed by IEEE-754R
// 2008. These are:
// 1. +inf
// 2. -inf
// 3. largest
// 4. -largest
// 5. smallest
// 6. -smallest
// 7. qNaN
// 8. sNaN
// 9. +0
// 10. -0
let mut status;
// nextUp(+inf) = +inf.
let test = unpack!(status=, Quad::INFINITY.next_up());
let expected = Quad::INFINITY;
assert_eq!(status, Status::OK);
assert!(test.is_infinite());
assert!(!test.is_negative());
assert!(test.bitwise_eq(expected));
// nextDown(+inf) = -nextUp(-inf) = -(-largest) = largest
let test = unpack!(status=, Quad::INFINITY.next_down());
let expected = Quad::largest();
assert_eq!(status, Status::OK);
assert!(!test.is_negative());
assert!(test.bitwise_eq(expected));
// nextUp(-inf) = -largest
let test = unpack!(status=, (-Quad::INFINITY).next_up());
let expected = -Quad::largest();
assert_eq!(status, Status::OK);
assert!(test.is_negative());
assert!(test.bitwise_eq(expected));
// nextDown(-inf) = -nextUp(+inf) = -(+inf) = -inf.
let test = unpack!(status=, (-Quad::INFINITY).next_down());
let expected = -Quad::INFINITY;
assert_eq!(status, Status::OK);
assert!(test.is_infinite() && test.is_negative());
assert!(test.bitwise_eq(expected));
// nextUp(largest) = +inf
let test = unpack!(status=, Quad::largest().next_up());
let expected = Quad::INFINITY;
assert_eq!(status, Status::OK);
assert!(test.is_infinite() && !test.is_negative());
assert!(test.bitwise_eq(expected));
// nextDown(largest) = -nextUp(-largest)
// = -(-largest + inc)
// = largest - inc.
let test = unpack!(status=, Quad::largest().next_down());
let expected = "0x1.fffffffffffffffffffffffffffep+16383"
.parse::<Quad>()
.unwrap();
assert_eq!(status, Status::OK);
assert!(!test.is_infinite() && !test.is_negative());
assert!(test.bitwise_eq(expected));
// nextUp(-largest) = -largest + inc.
let test = unpack!(status=, (-Quad::largest()).next_up());
let expected = "-0x1.fffffffffffffffffffffffffffep+16383"
.parse::<Quad>()
.unwrap();
assert_eq!(status, Status::OK);
assert!(test.bitwise_eq(expected));
// nextDown(-largest) = -nextUp(largest) = -(inf) = -inf.
let test = unpack!(status=, (-Quad::largest()).next_down());
let expected = -Quad::INFINITY;
assert_eq!(status, Status::OK);
assert!(test.is_infinite() && test.is_negative());
assert!(test.bitwise_eq(expected));
// nextUp(smallest) = smallest + inc.
let test = unpack!(status=, "0x0.0000000000000000000000000001p-16382"
.parse::<Quad>()
.unwrap()
.next_up());
let expected = "0x0.0000000000000000000000000002p-16382"
.parse::<Quad>()
.unwrap();
assert_eq!(status, Status::OK);
assert!(test.bitwise_eq(expected));
// nextDown(smallest) = -nextUp(-smallest) = -(-0) = +0.
let test = unpack!(status=, "0x0.0000000000000000000000000001p-16382"
.parse::<Quad>()
.unwrap()
.next_down());
let expected = Quad::ZERO;
assert_eq!(status, Status::OK);
assert!(test.is_pos_zero());
assert!(test.bitwise_eq(expected));
// nextUp(-smallest) = -0.
let test = unpack!(status=, "-0x0.0000000000000000000000000001p-16382"
.parse::<Quad>()
.unwrap()
.next_up());
let expected = -Quad::ZERO;
assert_eq!(status, Status::OK);
assert!(test.is_neg_zero());
assert!(test.bitwise_eq(expected));
// nextDown(-smallest) = -nextUp(smallest) = -smallest - inc.
let test = unpack!(status=, "-0x0.0000000000000000000000000001p-16382"
.parse::<Quad>()
.unwrap()
.next_down());
let expected = "-0x0.0000000000000000000000000002p-16382"
.parse::<Quad>()
.unwrap();
assert_eq!(status, Status::OK);
assert!(test.bitwise_eq(expected));
// nextUp(qNaN) = qNaN
let test = unpack!(status=, Quad::qnan(None).next_up());
let expected = Quad::qnan(None);
assert_eq!(status, Status::OK);
assert!(test.bitwise_eq(expected));
// nextDown(qNaN) = qNaN
let test = unpack!(status=, Quad::qnan(None).next_down());
let expected = Quad::qnan(None);
assert_eq!(status, Status::OK);
assert!(test.bitwise_eq(expected));
// nextUp(sNaN) = qNaN
let test = unpack!(status=, Quad::snan(None).next_up());
let expected = Quad::qnan(None);
assert_eq!(status, Status::INVALID_OP);
assert!(test.bitwise_eq(expected));
// nextDown(sNaN) = qNaN
let test = unpack!(status=, Quad::snan(None).next_down());
let expected = Quad::qnan(None);
assert_eq!(status, Status::INVALID_OP);
assert!(test.bitwise_eq(expected));
// nextUp(+0) = +smallest
let test = unpack!(status=, Quad::ZERO.next_up());
let expected = Quad::SMALLEST;
assert_eq!(status, Status::OK);
assert!(test.bitwise_eq(expected));
// nextDown(+0) = -nextUp(-0) = -smallest
let test = unpack!(status=, Quad::ZERO.next_down());
let expected = -Quad::SMALLEST;
assert_eq!(status, Status::OK);
assert!(test.bitwise_eq(expected));
// nextUp(-0) = +smallest
let test = unpack!(status=, (-Quad::ZERO).next_up());
let expected = Quad::SMALLEST;
assert_eq!(status, Status::OK);
assert!(test.bitwise_eq(expected));
// nextDown(-0) = -nextUp(0) = -smallest
let test = unpack!(status=, (-Quad::ZERO).next_down());
let expected = -Quad::SMALLEST;
assert_eq!(status, Status::OK);
assert!(test.bitwise_eq(expected));
// 2. Binade Boundary Tests.
// 2a. Test denormal <-> normal binade boundaries.
// * nextUp(+Largest Denormal) -> +Smallest Normal.
// * nextDown(-Largest Denormal) -> -Smallest Normal.
// * nextUp(-Smallest Normal) -> -Largest Denormal.
// * nextDown(+Smallest Normal) -> +Largest Denormal.
// nextUp(+Largest Denormal) -> +Smallest Normal.
let test = unpack!(status=, "0x0.ffffffffffffffffffffffffffffp-16382"
.parse::<Quad>()
.unwrap()
.next_up());
let expected = "0x1.0000000000000000000000000000p-16382"
.parse::<Quad>()
.unwrap();
assert_eq!(status, Status::OK);
assert!(!test.is_denormal());
assert!(test.bitwise_eq(expected));
// nextDown(-Largest Denormal) -> -Smallest Normal.
let test = unpack!(status=, "-0x0.ffffffffffffffffffffffffffffp-16382"
.parse::<Quad>()
.unwrap()
.next_down());
let expected = "-0x1.0000000000000000000000000000p-16382"
.parse::<Quad>()
.unwrap();
assert_eq!(status, Status::OK);
assert!(!test.is_denormal());
assert!(test.bitwise_eq(expected));
// nextUp(-Smallest Normal) -> -Largest Denormal.
let test = unpack!(status=, "-0x1.0000000000000000000000000000p-16382"
.parse::<Quad>()
.unwrap()
.next_up());
let expected = "-0x0.ffffffffffffffffffffffffffffp-16382"
.parse::<Quad>()
.unwrap();
assert_eq!(status, Status::OK);
assert!(test.is_denormal());
assert!(test.bitwise_eq(expected));
// nextDown(+Smallest Normal) -> +Largest Denormal.
let test = unpack!(status=, "+0x1.0000000000000000000000000000p-16382"
.parse::<Quad>()
.unwrap()
.next_down());
let expected = "+0x0.ffffffffffffffffffffffffffffp-16382"
.parse::<Quad>()
.unwrap();
assert_eq!(status, Status::OK);
assert!(test.is_denormal());
assert!(test.bitwise_eq(expected));
// 2b. Test normal <-> normal binade boundaries.
// * nextUp(-Normal Binade Boundary) -> -Normal Binade Boundary + 1.
// * nextDown(+Normal Binade Boundary) -> +Normal Binade Boundary - 1.
// * nextUp(+Normal Binade Boundary - 1) -> +Normal Binade Boundary.
// * nextDown(-Normal Binade Boundary + 1) -> -Normal Binade Boundary.
// nextUp(-Normal Binade Boundary) -> -Normal Binade Boundary + 1.
let test = unpack!(status=, "-0x1p+1".parse::<Quad>().unwrap().next_up());
let expected = "-0x1.ffffffffffffffffffffffffffffp+0"
.parse::<Quad>()
.unwrap();
assert_eq!(status, Status::OK);
assert!(test.bitwise_eq(expected));
// nextDown(+Normal Binade Boundary) -> +Normal Binade Boundary - 1.
let test = unpack!(status=, "0x1p+1".parse::<Quad>().unwrap().next_down());
let expected = "0x1.ffffffffffffffffffffffffffffp+0"
.parse::<Quad>()
.unwrap();
assert_eq!(status, Status::OK);
assert!(test.bitwise_eq(expected));
// nextUp(+Normal Binade Boundary - 1) -> +Normal Binade Boundary.
let test = unpack!(status=, "0x1.ffffffffffffffffffffffffffffp+0"
.parse::<Quad>()
.unwrap()
.next_up());
let expected = "0x1p+1".parse::<Quad>().unwrap();
assert_eq!(status, Status::OK);
assert!(test.bitwise_eq(expected));
// nextDown(-Normal Binade Boundary + 1) -> -Normal Binade Boundary.
let test = unpack!(status=, "-0x1.ffffffffffffffffffffffffffffp+0"
.parse::<Quad>()
.unwrap()
.next_down());
let expected = "-0x1p+1".parse::<Quad>().unwrap();
assert_eq!(status, Status::OK);
assert!(test.bitwise_eq(expected));
// 2c. Test using next at binade boundaries with a direction away from the
// binade boundary. Away from denormal <-> normal boundaries.
//
// This is to make sure that even though we are at a binade boundary, since
// we are rounding away, we do not trigger the binade boundary code. Thus we
// test:
// * nextUp(-Largest Denormal) -> -Largest Denormal + inc.
// * nextDown(+Largest Denormal) -> +Largest Denormal - inc.
// * nextUp(+Smallest Normal) -> +Smallest Normal + inc.
// * nextDown(-Smallest Normal) -> -Smallest Normal - inc.
// nextUp(-Largest Denormal) -> -Largest Denormal + inc.
let test = unpack!(status=, "-0x0.ffffffffffffffffffffffffffffp-16382"
.parse::<Quad>()
.unwrap()
.next_up());
let expected = "-0x0.fffffffffffffffffffffffffffep-16382"
.parse::<Quad>()
.unwrap();
assert_eq!(status, Status::OK);
assert!(test.is_denormal());
assert!(test.is_negative());
assert!(test.bitwise_eq(expected));
// nextDown(+Largest Denormal) -> +Largest Denormal - inc.
let test = unpack!(status=, "0x0.ffffffffffffffffffffffffffffp-16382"
.parse::<Quad>()
.unwrap()
.next_down());
let expected = "0x0.fffffffffffffffffffffffffffep-16382"
.parse::<Quad>()
.unwrap();
assert_eq!(status, Status::OK);
assert!(test.is_denormal());
assert!(!test.is_negative());
assert!(test.bitwise_eq(expected));
// nextUp(+Smallest Normal) -> +Smallest Normal + inc.
let test = unpack!(status=, "0x1.0000000000000000000000000000p-16382"
.parse::<Quad>()
.unwrap()
.next_up());
let expected = "0x1.0000000000000000000000000001p-16382"
.parse::<Quad>()
.unwrap();
assert_eq!(status, Status::OK);
assert!(!test.is_denormal());
assert!(!test.is_negative());
assert!(test.bitwise_eq(expected));
// nextDown(-Smallest Normal) -> -Smallest Normal - inc.
let test = unpack!(status=, "-0x1.0000000000000000000000000000p-16382"
.parse::<Quad>()
.unwrap()
.next_down());
let expected = "-0x1.0000000000000000000000000001p-16382"
.parse::<Quad>()
.unwrap();
assert_eq!(status, Status::OK);
assert!(!test.is_denormal());
assert!(test.is_negative());
assert!(test.bitwise_eq(expected));
// 2d. Test values which cause our exponent to go to min exponent. This
// is to ensure that guards in the code to check for min exponent
// trigger properly.
// * nextUp(-0x1p-16381) -> -0x1.ffffffffffffffffffffffffffffp-16382
// * nextDown(-0x1.ffffffffffffffffffffffffffffp-16382) ->
// -0x1p-16381
// * nextUp(0x1.ffffffffffffffffffffffffffffp-16382) -> 0x1p-16382
// * nextDown(0x1p-16382) -> 0x1.ffffffffffffffffffffffffffffp-16382
// nextUp(-0x1p-16381) -> -0x1.ffffffffffffffffffffffffffffp-16382
let test = unpack!(status=, "-0x1p-16381".parse::<Quad>().unwrap().next_up());
let expected = "-0x1.ffffffffffffffffffffffffffffp-16382"
.parse::<Quad>()
.unwrap();
assert_eq!(status, Status::OK);
assert!(test.bitwise_eq(expected));
// nextDown(-0x1.ffffffffffffffffffffffffffffp-16382) ->
// -0x1p-16381
let test = unpack!(status=, "-0x1.ffffffffffffffffffffffffffffp-16382"
.parse::<Quad>()
.unwrap()
.next_down());
let expected = "-0x1p-16381".parse::<Quad>().unwrap();
assert_eq!(status, Status::OK);
assert!(test.bitwise_eq(expected));
// nextUp(0x1.ffffffffffffffffffffffffffffp-16382) -> 0x1p-16381
let test = unpack!(status=, "0x1.ffffffffffffffffffffffffffffp-16382"
.parse::<Quad>()
.unwrap()
.next_up());
let expected = "0x1p-16381".parse::<Quad>().unwrap();
assert_eq!(status, Status::OK);
assert!(test.bitwise_eq(expected));
// nextDown(0x1p-16381) -> 0x1.ffffffffffffffffffffffffffffp-16382
let test = unpack!(status=, "0x1p-16381".parse::<Quad>().unwrap().next_down());
let expected = "0x1.ffffffffffffffffffffffffffffp-16382"
.parse::<Quad>()
.unwrap();
assert_eq!(status, Status::OK);
assert!(test.bitwise_eq(expected));
// 3. Now we test both denormal/normal computation which will not cause us
// to go across binade boundaries. Specifically we test:
// * nextUp(+Denormal) -> +Denormal.
// * nextDown(+Denormal) -> +Denormal.
// * nextUp(-Denormal) -> -Denormal.
// * nextDown(-Denormal) -> -Denormal.
// * nextUp(+Normal) -> +Normal.
// * nextDown(+Normal) -> +Normal.
// * nextUp(-Normal) -> -Normal.
// * nextDown(-Normal) -> -Normal.
// nextUp(+Denormal) -> +Denormal.
let test = unpack!(status=, "0x0.ffffffffffffffffffffffff000cp-16382"
.parse::<Quad>()
.unwrap()
.next_up());
let expected = "0x0.ffffffffffffffffffffffff000dp-16382"
.parse::<Quad>()
.unwrap();
assert_eq!(status, Status::OK);
assert!(test.is_denormal());
assert!(!test.is_negative());
assert!(test.bitwise_eq(expected));
// nextDown(+Denormal) -> +Denormal.
let test = unpack!(status=, "0x0.ffffffffffffffffffffffff000cp-16382"
.parse::<Quad>()
.unwrap()
.next_down());
let expected = "0x0.ffffffffffffffffffffffff000bp-16382"
.parse::<Quad>()
.unwrap();
assert_eq!(status, Status::OK);
assert!(test.is_denormal());
assert!(!test.is_negative());
assert!(test.bitwise_eq(expected));
// nextUp(-Denormal) -> -Denormal.
let test = unpack!(status=, "-0x0.ffffffffffffffffffffffff000cp-16382"
.parse::<Quad>()
.unwrap()
.next_up());
let expected = "-0x0.ffffffffffffffffffffffff000bp-16382"
.parse::<Quad>()
.unwrap();
assert_eq!(status, Status::OK);
assert!(test.is_denormal());
assert!(test.is_negative());
assert!(test.bitwise_eq(expected));
// nextDown(-Denormal) -> -Denormal
let test = unpack!(status=, "-0x0.ffffffffffffffffffffffff000cp-16382"
.parse::<Quad>()
.unwrap()
.next_down());
let expected = "-0x0.ffffffffffffffffffffffff000dp-16382"
.parse::<Quad>()
.unwrap();
assert_eq!(status, Status::OK);
assert!(test.is_denormal());
assert!(test.is_negative());
assert!(test.bitwise_eq(expected));
// nextUp(+Normal) -> +Normal.
let test = unpack!(status=, "0x1.ffffffffffffffffffffffff000cp-16000"
.parse::<Quad>()
.unwrap()
.next_up());
let expected = "0x1.ffffffffffffffffffffffff000dp-16000"
.parse::<Quad>()
.unwrap();
assert_eq!(status, Status::OK);
assert!(!test.is_denormal());
assert!(!test.is_negative());
assert!(test.bitwise_eq(expected));
// nextDown(+Normal) -> +Normal.
let test = unpack!(status=, "0x1.ffffffffffffffffffffffff000cp-16000"
.parse::<Quad>()
.unwrap()
.next_down());
let expected = "0x1.ffffffffffffffffffffffff000bp-16000"
.parse::<Quad>()
.unwrap();
assert_eq!(status, Status::OK);
assert!(!test.is_denormal());
assert!(!test.is_negative());
assert!(test.bitwise_eq(expected));
// nextUp(-Normal) -> -Normal.
let test = unpack!(status=, "-0x1.ffffffffffffffffffffffff000cp-16000"
.parse::<Quad>()
.unwrap()
.next_up());
let expected = "-0x1.ffffffffffffffffffffffff000bp-16000"
.parse::<Quad>()
.unwrap();
assert_eq!(status, Status::OK);
assert!(!test.is_denormal());
assert!(test.is_negative());
assert!(test.bitwise_eq(expected));
// nextDown(-Normal) -> -Normal.
let test = unpack!(status=, "-0x1.ffffffffffffffffffffffff000cp-16000"
.parse::<Quad>()
.unwrap()
.next_down());
let expected = "-0x1.ffffffffffffffffffffffff000dp-16000"
.parse::<Quad>()
.unwrap();
assert_eq!(status, Status::OK);
assert!(!test.is_denormal());
assert!(test.is_negative());
assert!(test.bitwise_eq(expected));
}
#[test]
fn fma() {
{
let mut f1 = Single::from_f32(14.5);
let f2 = Single::from_f32(-14.5);
let f3 = Single::from_f32(225.0);
f1 = f1.mul_add(f2, f3).value;
assert_eq!(14.75, f1.to_f32());
}
{
let val2 = Single::from_f32(2.0);
let mut f1 = Single::from_f32(1.17549435e-38);
let mut f2 = Single::from_f32(1.17549435e-38);
f1 /= val2;
f2 /= val2;
let f3 = Single::from_f32(12.0);
f1 = f1.mul_add(f2, f3).value;
assert_eq!(12.0, f1.to_f32());
}
// Test for correct zero sign when answer is exactly zero.
// fma(1.0, -1.0, 1.0) -> +ve 0.
{
let mut f1 = Double::from_f64(1.0);
let f2 = Double::from_f64(-1.0);
let f3 = Double::from_f64(1.0);
f1 = f1.mul_add(f2, f3).value;
assert!(!f1.is_negative() && f1.is_zero());
}
// Test for correct zero sign when answer is exactly zero and rounding towards
// negative.
// fma(1.0, -1.0, 1.0) -> +ve 0.
{
let mut f1 = Double::from_f64(1.0);
let f2 = Double::from_f64(-1.0);
let f3 = Double::from_f64(1.0);
f1 = f1.mul_add_r(f2, f3, Round::TowardNegative).value;
assert!(f1.is_negative() && f1.is_zero());
}
// Test for correct (in this case -ve) sign when adding like signed zeros.
// Test fma(0.0, -0.0, -0.0) -> -ve 0.
{
let mut f1 = Double::from_f64(0.0);
let f2 = Double::from_f64(-0.0);
let f3 = Double::from_f64(-0.0);
f1 = f1.mul_add(f2, f3).value;
assert!(f1.is_negative() && f1.is_zero());
}
// Test -ve sign preservation when small negative results underflow.
{
let mut f1 = "-0x1p-1074".parse::<Double>().unwrap();
let f2 = "+0x1p-1074".parse::<Double>().unwrap();
let f3 = Double::from_f64(0.0);
f1 = f1.mul_add(f2, f3).value;
assert!(f1.is_negative() && f1.is_zero());
}
// Test x87 extended precision case from http://llvm.org/PR20728.
{
let mut m1 = X87DoubleExtended::from_u128(1).value;
let m2 = X87DoubleExtended::from_u128(1).value;
let a = X87DoubleExtended::from_u128(3).value;
let mut loses_info = false;
m1 = m1.mul_add(m2, a).value;
let r: Single = m1.convert(&mut loses_info).value;
assert!(!loses_info);
assert_eq!(4.0, r.to_f32());
}
}
#[test]
fn min_num() {
let f1 = Double::from_f64(1.0);
let f2 = Double::from_f64(2.0);
let nan = Double::NAN;
assert_eq!(1.0, f1.min(f2).to_f64());
assert_eq!(1.0, f2.min(f1).to_f64());
assert_eq!(1.0, f1.min(nan).to_f64());
assert_eq!(1.0, nan.min(f1).to_f64());
}
#[test]
fn max_num() {
let f1 = Double::from_f64(1.0);
let f2 = Double::from_f64(2.0);
let nan = Double::NAN;
assert_eq!(2.0, f1.max(f2).to_f64());
assert_eq!(2.0, f2.max(f1).to_f64());
assert_eq!(1.0, f1.max(nan).to_f64());
assert_eq!(1.0, nan.max(f1).to_f64());
}
#[test]
fn denormal() {
// Test single precision
{
assert!(!Single::from_f32(0.0).is_denormal());
let mut t = "1.17549435082228750797e-38".parse::<Single>().unwrap();
assert!(!t.is_denormal());
let val2 = Single::from_f32(2.0e0);
t /= val2;
assert!(t.is_denormal());
}
// Test double precision
{
assert!(!Double::from_f64(0.0).is_denormal());
let mut t = "2.22507385850720138309e-308".parse::<Double>().unwrap();
assert!(!t.is_denormal());
let val2 = Double::from_f64(2.0e0);
t /= val2;
assert!(t.is_denormal());
}
// Test Intel double-ext
{
assert!(!X87DoubleExtended::from_u128(0).value.is_denormal());
let mut t = "3.36210314311209350626e-4932"
.parse::<X87DoubleExtended>()
.unwrap();
assert!(!t.is_denormal());
t /= X87DoubleExtended::from_u128(2).value;
assert!(t.is_denormal());
}
// Test quadruple precision
{
assert!(!Quad::from_u128(0).value.is_denormal());
let mut t = "3.36210314311209350626267781732175260e-4932"
.parse::<Quad>()
.unwrap();
assert!(!t.is_denormal());
t /= Quad::from_u128(2).value;
assert!(t.is_denormal());
}
}
#[test]
fn decimal_strings_without_null_terminators() {
// Make sure that we can parse strings without null terminators.
// rdar://14323230.
let val = "0.00"[..3].parse::<Double>().unwrap();
assert_eq!(val.to_f64(), 0.0);
let val = "0.01"[..3].parse::<Double>().unwrap();
assert_eq!(val.to_f64(), 0.0);
let val = "0.09"[..3].parse::<Double>().unwrap();
assert_eq!(val.to_f64(), 0.0);
let val = "0.095"[..4].parse::<Double>().unwrap();
assert_eq!(val.to_f64(), 0.09);
let val = "0.00e+3"[..7].parse::<Double>().unwrap();
assert_eq!(val.to_f64(), 0.00);
let val = "0e+3"[..4].parse::<Double>().unwrap();
assert_eq!(val.to_f64(), 0.00);
}
#[test]
fn from_zero_decimal_string() {
assert_eq!(0.0, "0".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "+0".parse::<Double>().unwrap().to_f64());
assert_eq!(-0.0, "-0".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "0.".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "+0.".parse::<Double>().unwrap().to_f64());
assert_eq!(-0.0, "-0.".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, ".0".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "+.0".parse::<Double>().unwrap().to_f64());
assert_eq!(-0.0, "-.0".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "0.0".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "+0.0".parse::<Double>().unwrap().to_f64());
assert_eq!(-0.0, "-0.0".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "00000.".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "+00000.".parse::<Double>().unwrap().to_f64());
assert_eq!(-0.0, "-00000.".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, ".00000".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "+.00000".parse::<Double>().unwrap().to_f64());
assert_eq!(-0.0, "-.00000".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "0000.00000".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "+0000.00000".parse::<Double>().unwrap().to_f64());
assert_eq!(-0.0, "-0000.00000".parse::<Double>().unwrap().to_f64());
}
#[test]
fn from_zero_decimal_single_exponent_string() {
assert_eq!(0.0, "0e1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "+0e1".parse::<Double>().unwrap().to_f64());
assert_eq!(-0.0, "-0e1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "0e+1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "+0e+1".parse::<Double>().unwrap().to_f64());
assert_eq!(-0.0, "-0e+1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "0e-1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "+0e-1".parse::<Double>().unwrap().to_f64());
assert_eq!(-0.0, "-0e-1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "0.e1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "+0.e1".parse::<Double>().unwrap().to_f64());
assert_eq!(-0.0, "-0.e1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "0.e+1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "+0.e+1".parse::<Double>().unwrap().to_f64());
assert_eq!(-0.0, "-0.e+1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "0.e-1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "+0.e-1".parse::<Double>().unwrap().to_f64());
assert_eq!(-0.0, "-0.e-1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, ".0e1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "+.0e1".parse::<Double>().unwrap().to_f64());
assert_eq!(-0.0, "-.0e1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, ".0e+1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "+.0e+1".parse::<Double>().unwrap().to_f64());
assert_eq!(-0.0, "-.0e+1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, ".0e-1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "+.0e-1".parse::<Double>().unwrap().to_f64());
assert_eq!(-0.0, "-.0e-1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "0.0e1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "+0.0e1".parse::<Double>().unwrap().to_f64());
assert_eq!(-0.0, "-0.0e1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "0.0e+1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "+0.0e+1".parse::<Double>().unwrap().to_f64());
assert_eq!(-0.0, "-0.0e+1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "0.0e-1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "+0.0e-1".parse::<Double>().unwrap().to_f64());
assert_eq!(-0.0, "-0.0e-1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "000.0000e1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "+000.0000e+1".parse::<Double>().unwrap().to_f64());
assert_eq!(-0.0, "-000.0000e+1".parse::<Double>().unwrap().to_f64());
}
#[test]
fn from_zero_decimal_large_exponent_string() {
assert_eq!(0.0, "0e1234".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "+0e1234".parse::<Double>().unwrap().to_f64());
assert_eq!(-0.0, "-0e1234".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "0e+1234".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "+0e+1234".parse::<Double>().unwrap().to_f64());
assert_eq!(-0.0, "-0e+1234".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "0e-1234".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "+0e-1234".parse::<Double>().unwrap().to_f64());
assert_eq!(-0.0, "-0e-1234".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "000.0000e1234".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "000.0000e-1234".parse::<Double>().unwrap().to_f64());
}
#[test]
fn from_zero_hexadecimal_string() {
assert_eq!(0.0, "0x0p1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "+0x0p1".parse::<Double>().unwrap().to_f64());
assert_eq!(-0.0, "-0x0p1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "0x0p+1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "+0x0p+1".parse::<Double>().unwrap().to_f64());
assert_eq!(-0.0, "-0x0p+1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "0x0p-1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "+0x0p-1".parse::<Double>().unwrap().to_f64());
assert_eq!(-0.0, "-0x0p-1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "0x0.p1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "+0x0.p1".parse::<Double>().unwrap().to_f64());
assert_eq!(-0.0, "-0x0.p1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "0x0.p+1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "+0x0.p+1".parse::<Double>().unwrap().to_f64());
assert_eq!(-0.0, "-0x0.p+1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "0x0.p-1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "+0x0.p-1".parse::<Double>().unwrap().to_f64());
assert_eq!(-0.0, "-0x0.p-1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "0x.0p1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "+0x.0p1".parse::<Double>().unwrap().to_f64());
assert_eq!(-0.0, "-0x.0p1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "0x.0p+1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "+0x.0p+1".parse::<Double>().unwrap().to_f64());
assert_eq!(-0.0, "-0x.0p+1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "0x.0p-1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "+0x.0p-1".parse::<Double>().unwrap().to_f64());
assert_eq!(-0.0, "-0x.0p-1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "0x0.0p1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "+0x0.0p1".parse::<Double>().unwrap().to_f64());
assert_eq!(-0.0, "-0x0.0p1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "0x0.0p+1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "+0x0.0p+1".parse::<Double>().unwrap().to_f64());
assert_eq!(-0.0, "-0x0.0p+1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "0x0.0p-1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "+0x0.0p-1".parse::<Double>().unwrap().to_f64());
assert_eq!(-0.0, "-0x0.0p-1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "0x00000.p1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "0x0000.00000p1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "0x.00000p1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "0x0.p1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "0x0p1234".parse::<Double>().unwrap().to_f64());
assert_eq!(-0.0, "-0x0p1234".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "0x00000.p1234".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "0x0000.00000p1234".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "0x.00000p1234".parse::<Double>().unwrap().to_f64());
assert_eq!(0.0, "0x0.p1234".parse::<Double>().unwrap().to_f64());
}
#[test]
fn from_decimal_string() {
assert_eq!(1.0, "1".parse::<Double>().unwrap().to_f64());
assert_eq!(2.0, "2.".parse::<Double>().unwrap().to_f64());
assert_eq!(0.5, ".5".parse::<Double>().unwrap().to_f64());
assert_eq!(1.0, "1.0".parse::<Double>().unwrap().to_f64());
assert_eq!(-2.0, "-2".parse::<Double>().unwrap().to_f64());
assert_eq!(-4.0, "-4.".parse::<Double>().unwrap().to_f64());
assert_eq!(-0.5, "-.5".parse::<Double>().unwrap().to_f64());
assert_eq!(-1.5, "-1.5".parse::<Double>().unwrap().to_f64());
assert_eq!(1.25e12, "1.25e12".parse::<Double>().unwrap().to_f64());
assert_eq!(1.25e+12, "1.25e+12".parse::<Double>().unwrap().to_f64());
assert_eq!(1.25e-12, "1.25e-12".parse::<Double>().unwrap().to_f64());
assert_eq!(1024.0, "1024.".parse::<Double>().unwrap().to_f64());
assert_eq!(1024.05, "1024.05000".parse::<Double>().unwrap().to_f64());
assert_eq!(0.05, ".05000".parse::<Double>().unwrap().to_f64());
assert_eq!(2.0, "2.".parse::<Double>().unwrap().to_f64());
assert_eq!(2.0e2, "2.e2".parse::<Double>().unwrap().to_f64());
assert_eq!(2.0e+2, "2.e+2".parse::<Double>().unwrap().to_f64());
assert_eq!(2.0e-2, "2.e-2".parse::<Double>().unwrap().to_f64());
assert_eq!(2.05e2, "002.05000e2".parse::<Double>().unwrap().to_f64());
assert_eq!(2.05e+2, "002.05000e+2".parse::<Double>().unwrap().to_f64());
assert_eq!(2.05e-2, "002.05000e-2".parse::<Double>().unwrap().to_f64());
assert_eq!(2.05e12, "002.05000e12".parse::<Double>().unwrap().to_f64());
assert_eq!(
2.05e+12,
"002.05000e+12".parse::<Double>().unwrap().to_f64()
);
assert_eq!(
2.05e-12,
"002.05000e-12".parse::<Double>().unwrap().to_f64()
);
// These are "carefully selected" to overflow the fast log-base
// calculations in the implementation.
assert!("99e99999".parse::<Double>().unwrap().is_infinite());
assert!("-99e99999".parse::<Double>().unwrap().is_infinite());
assert!("1e-99999".parse::<Double>().unwrap().is_pos_zero());
assert!("-1e-99999".parse::<Double>().unwrap().is_neg_zero());
assert_eq!(2.71828, "2.71828".parse::<Double>().unwrap().to_f64());
}
#[test]
fn from_hexadecimal_string() {
assert_eq!(1.0, "0x1p0".parse::<Double>().unwrap().to_f64());
assert_eq!(1.0, "+0x1p0".parse::<Double>().unwrap().to_f64());
assert_eq!(-1.0, "-0x1p0".parse::<Double>().unwrap().to_f64());
assert_eq!(1.0, "0x1p+0".parse::<Double>().unwrap().to_f64());
assert_eq!(1.0, "+0x1p+0".parse::<Double>().unwrap().to_f64());
assert_eq!(-1.0, "-0x1p+0".parse::<Double>().unwrap().to_f64());
assert_eq!(1.0, "0x1p-0".parse::<Double>().unwrap().to_f64());
assert_eq!(1.0, "+0x1p-0".parse::<Double>().unwrap().to_f64());
assert_eq!(-1.0, "-0x1p-0".parse::<Double>().unwrap().to_f64());
assert_eq!(2.0, "0x1p1".parse::<Double>().unwrap().to_f64());
assert_eq!(2.0, "+0x1p1".parse::<Double>().unwrap().to_f64());
assert_eq!(-2.0, "-0x1p1".parse::<Double>().unwrap().to_f64());
assert_eq!(2.0, "0x1p+1".parse::<Double>().unwrap().to_f64());
assert_eq!(2.0, "+0x1p+1".parse::<Double>().unwrap().to_f64());
assert_eq!(-2.0, "-0x1p+1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.5, "0x1p-1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.5, "+0x1p-1".parse::<Double>().unwrap().to_f64());
assert_eq!(-0.5, "-0x1p-1".parse::<Double>().unwrap().to_f64());
assert_eq!(3.0, "0x1.8p1".parse::<Double>().unwrap().to_f64());
assert_eq!(3.0, "+0x1.8p1".parse::<Double>().unwrap().to_f64());
assert_eq!(-3.0, "-0x1.8p1".parse::<Double>().unwrap().to_f64());
assert_eq!(3.0, "0x1.8p+1".parse::<Double>().unwrap().to_f64());
assert_eq!(3.0, "+0x1.8p+1".parse::<Double>().unwrap().to_f64());
assert_eq!(-3.0, "-0x1.8p+1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.75, "0x1.8p-1".parse::<Double>().unwrap().to_f64());
assert_eq!(0.75, "+0x1.8p-1".parse::<Double>().unwrap().to_f64());
assert_eq!(-0.75, "-0x1.8p-1".parse::<Double>().unwrap().to_f64());
assert_eq!(8192.0, "0x1000.000p1".parse::<Double>().unwrap().to_f64());
assert_eq!(8192.0, "+0x1000.000p1".parse::<Double>().unwrap().to_f64());
assert_eq!(-8192.0, "-0x1000.000p1".parse::<Double>().unwrap().to_f64());
assert_eq!(8192.0, "0x1000.000p+1".parse::<Double>().unwrap().to_f64());
assert_eq!(8192.0, "+0x1000.000p+1".parse::<Double>().unwrap().to_f64());
assert_eq!(
-8192.0,
"-0x1000.000p+1".parse::<Double>().unwrap().to_f64()
);
assert_eq!(2048.0, "0x1000.000p-1".parse::<Double>().unwrap().to_f64());
assert_eq!(2048.0, "+0x1000.000p-1".parse::<Double>().unwrap().to_f64());
assert_eq!(
-2048.0,
"-0x1000.000p-1".parse::<Double>().unwrap().to_f64()
);
assert_eq!(8192.0, "0x1000p1".parse::<Double>().unwrap().to_f64());
assert_eq!(8192.0, "+0x1000p1".parse::<Double>().unwrap().to_f64());
assert_eq!(-8192.0, "-0x1000p1".parse::<Double>().unwrap().to_f64());
assert_eq!(8192.0, "0x1000p+1".parse::<Double>().unwrap().to_f64());
assert_eq!(8192.0, "+0x1000p+1".parse::<Double>().unwrap().to_f64());
assert_eq!(-8192.0, "-0x1000p+1".parse::<Double>().unwrap().to_f64());
assert_eq!(2048.0, "0x1000p-1".parse::<Double>().unwrap().to_f64());
assert_eq!(2048.0, "+0x1000p-1".parse::<Double>().unwrap().to_f64());
assert_eq!(-2048.0, "-0x1000p-1".parse::<Double>().unwrap().to_f64());
assert_eq!(16384.0, "0x10p10".parse::<Double>().unwrap().to_f64());
assert_eq!(16384.0, "+0x10p10".parse::<Double>().unwrap().to_f64());
assert_eq!(-16384.0, "-0x10p10".parse::<Double>().unwrap().to_f64());
assert_eq!(16384.0, "0x10p+10".parse::<Double>().unwrap().to_f64());
assert_eq!(16384.0, "+0x10p+10".parse::<Double>().unwrap().to_f64());
assert_eq!(-16384.0, "-0x10p+10".parse::<Double>().unwrap().to_f64());
assert_eq!(0.015625, "0x10p-10".parse::<Double>().unwrap().to_f64());
assert_eq!(0.015625, "+0x10p-10".parse::<Double>().unwrap().to_f64());
assert_eq!(-0.015625, "-0x10p-10".parse::<Double>().unwrap().to_f64());
assert_eq!(1.0625, "0x1.1p0".parse::<Double>().unwrap().to_f64());
assert_eq!(1.0, "0x1p0".parse::<Double>().unwrap().to_f64());
assert_eq!(
"0x1p-150".parse::<Double>().unwrap().to_f64(),
"+0x800000000000000001.p-221"
.parse::<Double>()
.unwrap()
.to_f64()
);
assert_eq!(
2251799813685248.5,
"0x80000000000004000000.010p-28"
.parse::<Double>()
.unwrap()
.to_f64()
);
}
#[test]
fn to_string() {
let to_string = |d: f64, precision: usize, width: usize| {
let x = Double::from_f64(d);
if precision == 0 {
format!("{:1$}", x, width)
} else {
format!("{:2$.1$}", x, precision, width)
}
};
assert_eq!("10", to_string(10.0, 6, 3));
assert_eq!("1.0E+1", to_string(10.0, 6, 0));
assert_eq!("10100", to_string(1.01E+4, 5, 2));
assert_eq!("1.01E+4", to_string(1.01E+4, 4, 2));
assert_eq!("1.01E+4", to_string(1.01E+4, 5, 1));
assert_eq!("0.0101", to_string(1.01E-2, 5, 2));
assert_eq!("0.0101", to_string(1.01E-2, 4, 2));
assert_eq!("1.01E-2", to_string(1.01E-2, 5, 1));
assert_eq!(
"0.78539816339744828",
to_string(0.78539816339744830961, 0, 3)
);
assert_eq!(
"4.9406564584124654E-324",
to_string(4.9406564584124654e-324, 0, 3)
);
assert_eq!("873.18340000000001", to_string(873.1834, 0, 1));
assert_eq!("8.7318340000000001E+2", to_string(873.1834, 0, 0));
assert_eq!(
"1.7976931348623157E+308",
to_string(1.7976931348623157E+308, 0, 0)
);
let to_string = |d: f64, precision: usize, width: usize| {
let x = Double::from_f64(d);
if precision == 0 {
format!("{:#1$}", x, width)
} else {
format!("{:#2$.1$}", x, precision, width)
}
};
assert_eq!("10", to_string(10.0, 6, 3));
assert_eq!("1.000000e+01", to_string(10.0, 6, 0));
assert_eq!("10100", to_string(1.01E+4, 5, 2));
assert_eq!("1.0100e+04", to_string(1.01E+4, 4, 2));
assert_eq!("1.01000e+04", to_string(1.01E+4, 5, 1));
assert_eq!("0.0101", to_string(1.01E-2, 5, 2));
assert_eq!("0.0101", to_string(1.01E-2, 4, 2));
assert_eq!("1.01000e-02", to_string(1.01E-2, 5, 1));
assert_eq!(
"0.78539816339744828",
to_string(0.78539816339744830961, 0, 3)
);
assert_eq!(
"4.94065645841246540e-324",
to_string(4.9406564584124654e-324, 0, 3)
);
assert_eq!("873.18340000000001", to_string(873.1834, 0, 1));
assert_eq!("8.73183400000000010e+02", to_string(873.1834, 0, 0));
assert_eq!(
"1.79769313486231570e+308",
to_string(1.7976931348623157E+308, 0, 0)
);
}
#[test]
fn to_integer() {
let mut is_exact = false;
assert_eq!(
Status::OK.and(10),
"10".parse::<Double>().unwrap().to_u128_r(
5,
Round::TowardZero,
&mut is_exact,
)
);
assert!(is_exact);
assert_eq!(
Status::INVALID_OP.and(0),
"-10".parse::<Double>().unwrap().to_u128_r(
5,
Round::TowardZero,
&mut is_exact,
)
);
assert!(!is_exact);
assert_eq!(
Status::INVALID_OP.and(31),
"32".parse::<Double>().unwrap().to_u128_r(
5,
Round::TowardZero,
&mut is_exact,
)
);
assert!(!is_exact);
assert_eq!(
Status::INEXACT.and(7),
"7.9".parse::<Double>().unwrap().to_u128_r(
5,
Round::TowardZero,
&mut is_exact,
)
);
assert!(!is_exact);
assert_eq!(
Status::OK.and(-10),
"-10".parse::<Double>().unwrap().to_i128_r(
5,
Round::TowardZero,
&mut is_exact,
)
);
assert!(is_exact);
assert_eq!(
Status::INVALID_OP.and(-16),
"-17".parse::<Double>().unwrap().to_i128_r(
5,
Round::TowardZero,
&mut is_exact,
)
);
assert!(!is_exact);
assert_eq!(
Status::INVALID_OP.and(15),
"16".parse::<Double>().unwrap().to_i128_r(
5,
Round::TowardZero,
&mut is_exact,
)
);
assert!(!is_exact);
}
#[test]
fn nan() {
fn nanbits<T: Float>(signaling: bool, negative: bool, fill: u128) -> u128 {
let x = if signaling {
T::snan(Some(fill))
} else {
T::qnan(Some(fill))
};
if negative {
(-x).to_bits()
} else {
x.to_bits()
}
}
assert_eq!(0x7fc00000, nanbits::<Single>(false, false, 0));
assert_eq!(0xffc00000, nanbits::<Single>(false, true, 0));
assert_eq!(0x7fc0ae72, nanbits::<Single>(false, false, 0xae72));
assert_eq!(0x7fffae72, nanbits::<Single>(false, false, 0xffffae72));
assert_eq!(0x7fa00000, nanbits::<Single>(true, false, 0));
assert_eq!(0xffa00000, nanbits::<Single>(true, true, 0));
assert_eq!(0x7f80ae72, nanbits::<Single>(true, false, 0xae72));
assert_eq!(0x7fbfae72, nanbits::<Single>(true, false, 0xffffae72));
assert_eq!(0x7ff8000000000000, nanbits::<Double>(false, false, 0));
assert_eq!(0xfff8000000000000, nanbits::<Double>(false, true, 0));
assert_eq!(0x7ff800000000ae72, nanbits::<Double>(false, false, 0xae72));
assert_eq!(
0x7fffffffffffae72,
nanbits::<Double>(false, false, 0xffffffffffffae72)
);
assert_eq!(0x7ff4000000000000, nanbits::<Double>(true, false, 0));
assert_eq!(0xfff4000000000000, nanbits::<Double>(true, true, 0));
assert_eq!(0x7ff000000000ae72, nanbits::<Double>(true, false, 0xae72));
assert_eq!(
0x7ff7ffffffffae72,
nanbits::<Double>(true, false, 0xffffffffffffae72)
);
}
#[test]
fn string_decimal_death() {
assert_eq!(
"".parse::<Double>(),
Err(ParseError("Invalid string length"))
);
assert_eq!(
"+".parse::<Double>(),
Err(ParseError("String has no digits"))
);
assert_eq!(
"-".parse::<Double>(),
Err(ParseError("String has no digits"))
);
assert_eq!(
"\0".parse::<Double>(),
Err(ParseError("Invalid character in significand"))
);
assert_eq!(
"1\0".parse::<Double>(),
Err(ParseError("Invalid character in significand"))
);
assert_eq!(
"1\02".parse::<Double>(),
Err(ParseError("Invalid character in significand"))
);
assert_eq!(
"1\02e1".parse::<Double>(),
Err(ParseError("Invalid character in significand"))
);
assert_eq!(
"1e\0".parse::<Double>(),
Err(ParseError("Invalid character in exponent"))
);
assert_eq!(
"1e1\0".parse::<Double>(),
Err(ParseError("Invalid character in exponent"))
);
assert_eq!(
"1e1\02".parse::<Double>(),
Err(ParseError("Invalid character in exponent"))
);
assert_eq!(
"1.0f".parse::<Double>(),
Err(ParseError("Invalid character in significand"))
);
assert_eq!(
"..".parse::<Double>(),
Err(ParseError("String contains multiple dots"))
);
assert_eq!(
"..0".parse::<Double>(),
Err(ParseError("String contains multiple dots"))
);
assert_eq!(
"1.0.0".parse::<Double>(),
Err(ParseError("String contains multiple dots"))
);
}
#[test]
fn string_decimal_significand_death() {
assert_eq!(
".".parse::<Double>(),
Err(ParseError("Significand has no digits"))
);
assert_eq!(
"+.".parse::<Double>(),
Err(ParseError("Significand has no digits"))
);
assert_eq!(
"-.".parse::<Double>(),
Err(ParseError("Significand has no digits"))
);
assert_eq!(
"e".parse::<Double>(),
Err(ParseError("Significand has no digits"))
);
assert_eq!(
"+e".parse::<Double>(),
Err(ParseError("Significand has no digits"))
);
assert_eq!(
"-e".parse::<Double>(),
Err(ParseError("Significand has no digits"))
);
assert_eq!(
"e1".parse::<Double>(),
Err(ParseError("Significand has no digits"))
);
assert_eq!(
"+e1".parse::<Double>(),
Err(ParseError("Significand has no digits"))
);
assert_eq!(
"-e1".parse::<Double>(),
Err(ParseError("Significand has no digits"))
);
assert_eq!(
".e1".parse::<Double>(),
Err(ParseError("Significand has no digits"))
);
assert_eq!(
"+.e1".parse::<Double>(),
Err(ParseError("Significand has no digits"))
);
assert_eq!(
"-.e1".parse::<Double>(),
Err(ParseError("Significand has no digits"))
);
assert_eq!(
".e".parse::<Double>(),
Err(ParseError("Significand has no digits"))
);
assert_eq!(
"+.e".parse::<Double>(),
Err(ParseError("Significand has no digits"))
);
assert_eq!(
"-.e".parse::<Double>(),
Err(ParseError("Significand has no digits"))
);
}
#[test]
fn string_decimal_exponent_death() {
assert_eq!(
"1e".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
"+1e".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
"-1e".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
"1.e".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
"+1.e".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
"-1.e".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
".1e".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
"+.1e".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
"-.1e".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
"1.1e".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
"+1.1e".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
"-1.1e".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
"1e+".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
"1e-".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
".1e".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
".1e+".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
".1e-".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
"1.0e".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
"1.0e+".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
"1.0e-".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
}
#[test]
fn string_hexadecimal_death() {
assert_eq!("0x".parse::<Double>(), Err(ParseError("Invalid string")));
assert_eq!("+0x".parse::<Double>(), Err(ParseError("Invalid string")));
assert_eq!("-0x".parse::<Double>(), Err(ParseError("Invalid string")));
assert_eq!(
"0x0".parse::<Double>(),
Err(ParseError("Hex strings require an exponent"))
);
assert_eq!(
"+0x0".parse::<Double>(),
Err(ParseError("Hex strings require an exponent"))
);
assert_eq!(
"-0x0".parse::<Double>(),
Err(ParseError("Hex strings require an exponent"))
);
assert_eq!(
"0x0.".parse::<Double>(),
Err(ParseError("Hex strings require an exponent"))
);
assert_eq!(
"+0x0.".parse::<Double>(),
Err(ParseError("Hex strings require an exponent"))
);
assert_eq!(
"-0x0.".parse::<Double>(),
Err(ParseError("Hex strings require an exponent"))
);
assert_eq!(
"0x.0".parse::<Double>(),
Err(ParseError("Hex strings require an exponent"))
);
assert_eq!(
"+0x.0".parse::<Double>(),
Err(ParseError("Hex strings require an exponent"))
);
assert_eq!(
"-0x.0".parse::<Double>(),
Err(ParseError("Hex strings require an exponent"))
);
assert_eq!(
"0x0.0".parse::<Double>(),
Err(ParseError("Hex strings require an exponent"))
);
assert_eq!(
"+0x0.0".parse::<Double>(),
Err(ParseError("Hex strings require an exponent"))
);
assert_eq!(
"-0x0.0".parse::<Double>(),
Err(ParseError("Hex strings require an exponent"))
);
assert_eq!(
"0x\0".parse::<Double>(),
Err(ParseError("Invalid character in significand"))
);
assert_eq!(
"0x1\0".parse::<Double>(),
Err(ParseError("Invalid character in significand"))
);
assert_eq!(
"0x1\02".parse::<Double>(),
Err(ParseError("Invalid character in significand"))
);
assert_eq!(
"0x1\02p1".parse::<Double>(),
Err(ParseError("Invalid character in significand"))
);
assert_eq!(
"0x1p\0".parse::<Double>(),
Err(ParseError("Invalid character in exponent"))
);
assert_eq!(
"0x1p1\0".parse::<Double>(),
Err(ParseError("Invalid character in exponent"))
);
assert_eq!(
"0x1p1\02".parse::<Double>(),
Err(ParseError("Invalid character in exponent"))
);
assert_eq!(
"0x1p0f".parse::<Double>(),
Err(ParseError("Invalid character in exponent"))
);
assert_eq!(
"0x..p1".parse::<Double>(),
Err(ParseError("String contains multiple dots"))
);
assert_eq!(
"0x..0p1".parse::<Double>(),
Err(ParseError("String contains multiple dots"))
);
assert_eq!(
"0x1.0.0p1".parse::<Double>(),
Err(ParseError("String contains multiple dots"))
);
}
#[test]
fn string_hexadecimal_significand_death() {
assert_eq!(
"0x.".parse::<Double>(),
Err(ParseError("Significand has no digits"))
);
assert_eq!(
"+0x.".parse::<Double>(),
Err(ParseError("Significand has no digits"))
);
assert_eq!(
"-0x.".parse::<Double>(),
Err(ParseError("Significand has no digits"))
);
assert_eq!(
"0xp".parse::<Double>(),
Err(ParseError("Significand has no digits"))
);
assert_eq!(
"+0xp".parse::<Double>(),
Err(ParseError("Significand has no digits"))
);
assert_eq!(
"-0xp".parse::<Double>(),
Err(ParseError("Significand has no digits"))
);
assert_eq!(
"0xp+".parse::<Double>(),
Err(ParseError("Significand has no digits"))
);
assert_eq!(
"+0xp+".parse::<Double>(),
Err(ParseError("Significand has no digits"))
);
assert_eq!(
"-0xp+".parse::<Double>(),
Err(ParseError("Significand has no digits"))
);
assert_eq!(
"0xp-".parse::<Double>(),
Err(ParseError("Significand has no digits"))
);
assert_eq!(
"+0xp-".parse::<Double>(),
Err(ParseError("Significand has no digits"))
);
assert_eq!(
"-0xp-".parse::<Double>(),
Err(ParseError("Significand has no digits"))
);
assert_eq!(
"0x.p".parse::<Double>(),
Err(ParseError("Significand has no digits"))
);
assert_eq!(
"+0x.p".parse::<Double>(),
Err(ParseError("Significand has no digits"))
);
assert_eq!(
"-0x.p".parse::<Double>(),
Err(ParseError("Significand has no digits"))
);
assert_eq!(
"0x.p+".parse::<Double>(),
Err(ParseError("Significand has no digits"))
);
assert_eq!(
"+0x.p+".parse::<Double>(),
Err(ParseError("Significand has no digits"))
);
assert_eq!(
"-0x.p+".parse::<Double>(),
Err(ParseError("Significand has no digits"))
);
assert_eq!(
"0x.p-".parse::<Double>(),
Err(ParseError("Significand has no digits"))
);
assert_eq!(
"+0x.p-".parse::<Double>(),
Err(ParseError("Significand has no digits"))
);
assert_eq!(
"-0x.p-".parse::<Double>(),
Err(ParseError("Significand has no digits"))
);
}
#[test]
fn string_hexadecimal_exponent_death() {
assert_eq!(
"0x1p".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
"+0x1p".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
"-0x1p".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
"0x1p+".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
"+0x1p+".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
"-0x1p+".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
"0x1p-".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
"+0x1p-".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
"-0x1p-".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
"0x1.p".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
"+0x1.p".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
"-0x1.p".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
"0x1.p+".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
"+0x1.p+".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
"-0x1.p+".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
"0x1.p-".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
"+0x1.p-".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
"-0x1.p-".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
"0x.1p".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
"+0x.1p".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
"-0x.1p".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
"0x.1p+".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
"+0x.1p+".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
"-0x.1p+".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
"0x.1p-".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
"+0x.1p-".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
"-0x.1p-".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
"0x1.1p".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
"+0x1.1p".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
"-0x1.1p".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
"0x1.1p+".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
"+0x1.1p+".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
"-0x1.1p+".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
"0x1.1p-".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
"+0x1.1p-".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
assert_eq!(
"-0x1.1p-".parse::<Double>(),
Err(ParseError("Exponent has no digits"))
);
}
#[test]
fn exact_inverse() {
// Trivial operation.
assert!(
Double::from_f64(2.0)
.get_exact_inverse()
.unwrap()
.bitwise_eq(Double::from_f64(0.5))
);
assert!(
Single::from_f32(2.0)
.get_exact_inverse()
.unwrap()
.bitwise_eq(Single::from_f32(0.5))
);
assert!(
"2.0"
.parse::<Quad>()
.unwrap()
.get_exact_inverse()
.unwrap()
.bitwise_eq("0.5".parse::<Quad>().unwrap())
);
assert!(
"2.0"
.parse::<X87DoubleExtended>()
.unwrap()
.get_exact_inverse()
.unwrap()
.bitwise_eq("0.5".parse::<X87DoubleExtended>().unwrap())
);
// FLT_MIN
assert!(
Single::from_f32(1.17549435e-38)
.get_exact_inverse()
.unwrap()
.bitwise_eq(Single::from_f32(8.5070592e+37))
);
// Large float, inverse is a denormal.
assert!(Single::from_f32(1.7014118e38).get_exact_inverse().is_none());
// Zero
assert!(Double::from_f64(0.0).get_exact_inverse().is_none());
// Denormalized float
assert!(
Single::from_f32(1.40129846e-45)
.get_exact_inverse()
.is_none()
);
}
#[test]
fn round_to_integral() {
let t = Double::from_f64(-0.5);
assert_eq!(-0.0, t.round_to_integral(Round::TowardZero).value.to_f64());
assert_eq!(
-1.0,
t.round_to_integral(Round::TowardNegative).value.to_f64()
);
assert_eq!(
-0.0,
t.round_to_integral(Round::TowardPositive).value.to_f64()
);
assert_eq!(
-0.0,
t.round_to_integral(Round::NearestTiesToEven).value.to_f64()
);
let s = Double::from_f64(3.14);
assert_eq!(3.0, s.round_to_integral(Round::TowardZero).value.to_f64());
assert_eq!(
3.0,
s.round_to_integral(Round::TowardNegative).value.to_f64()
);
assert_eq!(
4.0,
s.round_to_integral(Round::TowardPositive).value.to_f64()
);
assert_eq!(
3.0,
s.round_to_integral(Round::NearestTiesToEven).value.to_f64()
);
let r = Double::largest();
assert_eq!(
r.to_f64(),
r.round_to_integral(Round::TowardZero).value.to_f64()
);
assert_eq!(
r.to_f64(),
r.round_to_integral(Round::TowardNegative).value.to_f64()
);
assert_eq!(
r.to_f64(),
r.round_to_integral(Round::TowardPositive).value.to_f64()
);
assert_eq!(
r.to_f64(),
r.round_to_integral(Round::NearestTiesToEven).value.to_f64()
);
let p = Double::ZERO.round_to_integral(Round::TowardZero).value;
assert_eq!(0.0, p.to_f64());
let p = (-Double::ZERO).round_to_integral(Round::TowardZero).value;
assert_eq!(-0.0, p.to_f64());
let p = Double::NAN.round_to_integral(Round::TowardZero).value;
assert!(p.to_f64().is_nan());
let p = Double::INFINITY.round_to_integral(Round::TowardZero).value;
assert!(p.to_f64().is_infinite() && p.to_f64() > 0.0);
let p = (-Double::INFINITY)
.round_to_integral(Round::TowardZero)
.value;
assert!(p.to_f64().is_infinite() && p.to_f64() < 0.0);
}
#[test]
fn is_integer() {
let t = Double::from_f64(-0.0);
assert!(t.is_integer());
let t = Double::from_f64(3.14159);
assert!(!t.is_integer());
let t = Double::NAN;
assert!(!t.is_integer());
let t = Double::INFINITY;
assert!(!t.is_integer());
let t = -Double::INFINITY;
assert!(!t.is_integer());
let t = Double::largest();
assert!(t.is_integer());
}
#[test]
fn largest() {
assert_eq!(3.402823466e+38, Single::largest().to_f32());
assert_eq!(1.7976931348623158e+308, Double::largest().to_f64());
}
#[test]
fn smallest() {
let test = Single::SMALLEST;
let expected = "0x0.000002p-126".parse::<Single>().unwrap();
assert!(!test.is_negative());
assert!(test.is_finite_non_zero());
assert!(test.is_denormal());
assert!(test.bitwise_eq(expected));
let test = -Single::SMALLEST;
let expected = "-0x0.000002p-126".parse::<Single>().unwrap();
assert!(test.is_negative());
assert!(test.is_finite_non_zero());
assert!(test.is_denormal());
assert!(test.bitwise_eq(expected));
let test = Quad::SMALLEST;
let expected = "0x0.0000000000000000000000000001p-16382"
.parse::<Quad>()
.unwrap();
assert!(!test.is_negative());
assert!(test.is_finite_non_zero());
assert!(test.is_denormal());
assert!(test.bitwise_eq(expected));
let test = -Quad::SMALLEST;
let expected = "-0x0.0000000000000000000000000001p-16382"
.parse::<Quad>()
.unwrap();
assert!(test.is_negative());
assert!(test.is_finite_non_zero());
assert!(test.is_denormal());
assert!(test.bitwise_eq(expected));
}
#[test]
fn smallest_normalized() {
let test = Single::smallest_normalized();
let expected = "0x1p-126".parse::<Single>().unwrap();
assert!(!test.is_negative());
assert!(test.is_finite_non_zero());
assert!(!test.is_denormal());
assert!(test.bitwise_eq(expected));
let test = -Single::smallest_normalized();
let expected = "-0x1p-126".parse::<Single>().unwrap();
assert!(test.is_negative());
assert!(test.is_finite_non_zero());
assert!(!test.is_denormal());
assert!(test.bitwise_eq(expected));
let test = Quad::smallest_normalized();
let expected = "0x1p-16382".parse::<Quad>().unwrap();
assert!(!test.is_negative());
assert!(test.is_finite_non_zero());
assert!(!test.is_denormal());
assert!(test.bitwise_eq(expected));
let test = -Quad::smallest_normalized();
let expected = "-0x1p-16382".parse::<Quad>().unwrap();
assert!(test.is_negative());
assert!(test.is_finite_non_zero());
assert!(!test.is_denormal());
assert!(test.bitwise_eq(expected));
}
#[test]
fn zero() {
assert_eq!(0.0, Single::from_f32(0.0).to_f32());
assert_eq!(-0.0, Single::from_f32(-0.0).to_f32());
assert!(Single::from_f32(-0.0).is_negative());
assert_eq!(0.0, Double::from_f64(0.0).to_f64());
assert_eq!(-0.0, Double::from_f64(-0.0).to_f64());
assert!(Double::from_f64(-0.0).is_negative());
fn test<T: Float>(sign: bool, bits: u128) {
let test = if sign { -T::ZERO } else { T::ZERO };
let pattern = if sign { "-0x0p+0" } else { "0x0p+0" };
let expected = pattern.parse::<T>().unwrap();
assert!(test.is_zero());
assert_eq!(sign, test.is_negative());
assert!(test.bitwise_eq(expected));
assert_eq!(bits, test.to_bits());
}
test::<Half>(false, 0);
test::<Half>(true, 0x8000);
test::<Single>(false, 0);
test::<Single>(true, 0x80000000);
test::<Double>(false, 0);
test::<Double>(true, 0x8000000000000000);
test::<Quad>(false, 0);
test::<Quad>(true, 0x8000000000000000_0000000000000000);
test::<X87DoubleExtended>(false, 0);
test::<X87DoubleExtended>(true, 0x8000_0000000000000000);
}
#[test]
fn copy_sign() {
assert!(Double::from_f64(-42.0).bitwise_eq(
Double::from_f64(42.0).copy_sign(
Double::from_f64(-1.0),
),
));
assert!(Double::from_f64(42.0).bitwise_eq(
Double::from_f64(-42.0).copy_sign(
Double::from_f64(1.0),
),
));
assert!(Double::from_f64(-42.0).bitwise_eq(
Double::from_f64(-42.0).copy_sign(
Double::from_f64(-1.0),
),
));
assert!(Double::from_f64(42.0).bitwise_eq(
Double::from_f64(42.0).copy_sign(
Double::from_f64(1.0),
),
));
}
#[test]
fn convert() {
let mut loses_info = false;
let test = "1.0".parse::<Double>().unwrap();
let test: Single = test.convert(&mut loses_info).value;
assert_eq!(1.0, test.to_f32());
assert!(!loses_info);
let mut test = "0x1p-53".parse::<X87DoubleExtended>().unwrap();
let one = "1.0".parse::<X87DoubleExtended>().unwrap();
test += one;
let test: Double = test.convert(&mut loses_info).value;
assert_eq!(1.0, test.to_f64());
assert!(loses_info);
let mut test = "0x1p-53".parse::<Quad>().unwrap();
let one = "1.0".parse::<Quad>().unwrap();
test += one;
let test: Double = test.convert(&mut loses_info).value;
assert_eq!(1.0, test.to_f64());
assert!(loses_info);
let test = "0xf.fffffffp+28".parse::<X87DoubleExtended>().unwrap();
let test: Double = test.convert(&mut loses_info).value;
assert_eq!(4294967295.0, test.to_f64());
assert!(!loses_info);
let test = Single::snan(None);
let x87_snan = X87DoubleExtended::snan(None);
let test: X87DoubleExtended = test.convert(&mut loses_info).value;
assert!(test.bitwise_eq(x87_snan));
assert!(!loses_info);
let test = Single::qnan(None);
let x87_qnan = X87DoubleExtended::qnan(None);
let test: X87DoubleExtended = test.convert(&mut loses_info).value;
assert!(test.bitwise_eq(x87_qnan));
assert!(!loses_info);
let test = X87DoubleExtended::snan(None);
let test: X87DoubleExtended = test.convert(&mut loses_info).value;
assert!(test.bitwise_eq(x87_snan));
assert!(!loses_info);
let test = X87DoubleExtended::qnan(None);
let test: X87DoubleExtended = test.convert(&mut loses_info).value;
assert!(test.bitwise_eq(x87_qnan));
assert!(!loses_info);
}
#[test]
fn is_negative() {
let t = "0x1p+0".parse::<Single>().unwrap();
assert!(!t.is_negative());
let t = "-0x1p+0".parse::<Single>().unwrap();
assert!(t.is_negative());
assert!(!Single::INFINITY.is_negative());
assert!((-Single::INFINITY).is_negative());
assert!(!Single::ZERO.is_negative());
assert!((-Single::ZERO).is_negative());
assert!(!Single::NAN.is_negative());
assert!((-Single::NAN).is_negative());
assert!(!Single::snan(None).is_negative());
assert!((-Single::snan(None)).is_negative());
}
#[test]
fn is_normal() {
let t = "0x1p+0".parse::<Single>().unwrap();
assert!(t.is_normal());
assert!(!Single::INFINITY.is_normal());
assert!(!Single::ZERO.is_normal());
assert!(!Single::NAN.is_normal());
assert!(!Single::snan(None).is_normal());
assert!(!"0x1p-149".parse::<Single>().unwrap().is_normal());
}
#[test]
fn is_finite() {
let t = "0x1p+0".parse::<Single>().unwrap();
assert!(t.is_finite());
assert!(!Single::INFINITY.is_finite());
assert!(Single::ZERO.is_finite());
assert!(!Single::NAN.is_finite());
assert!(!Single::snan(None).is_finite());
assert!("0x1p-149".parse::<Single>().unwrap().is_finite());
}
#[test]
fn is_infinite() {
let t = "0x1p+0".parse::<Single>().unwrap();
assert!(!t.is_infinite());
assert!(Single::INFINITY.is_infinite());
assert!(!Single::ZERO.is_infinite());
assert!(!Single::NAN.is_infinite());
assert!(!Single::snan(None).is_infinite());
assert!(!"0x1p-149".parse::<Single>().unwrap().is_infinite());
}
#[test]
fn is_nan() {
let t = "0x1p+0".parse::<Single>().unwrap();
assert!(!t.is_nan());
assert!(!Single::INFINITY.is_nan());
assert!(!Single::ZERO.is_nan());
assert!(Single::NAN.is_nan());
assert!(Single::snan(None).is_nan());
assert!(!"0x1p-149".parse::<Single>().unwrap().is_nan());
}
#[test]
fn is_finite_non_zero() {
// Test positive/negative normal value.
assert!("0x1p+0".parse::<Single>().unwrap().is_finite_non_zero());
assert!("-0x1p+0".parse::<Single>().unwrap().is_finite_non_zero());
// Test positive/negative denormal value.
assert!("0x1p-149".parse::<Single>().unwrap().is_finite_non_zero());
assert!("-0x1p-149".parse::<Single>().unwrap().is_finite_non_zero());
// Test +/- Infinity.
assert!(!Single::INFINITY.is_finite_non_zero());
assert!(!(-Single::INFINITY).is_finite_non_zero());
// Test +/- Zero.
assert!(!Single::ZERO.is_finite_non_zero());
assert!(!(-Single::ZERO).is_finite_non_zero());
// Test +/- qNaN. +/- dont mean anything with qNaN but paranoia can't hurt in
// this instance.
assert!(!Single::NAN.is_finite_non_zero());
assert!(!(-Single::NAN).is_finite_non_zero());
// Test +/- sNaN. +/- dont mean anything with sNaN but paranoia can't hurt in
// this instance.
assert!(!Single::snan(None).is_finite_non_zero());
assert!(!(-Single::snan(None)).is_finite_non_zero());
}
#[test]
fn add() {
// Test Special Cases against each other and normal values.
// FIXMES/NOTES:
// 1. Since we perform only default exception handling all operations with
// signaling NaNs should have a result that is a quiet NaN. Currently they
// return sNaN.
let p_inf = Single::INFINITY;
let m_inf = -Single::INFINITY;
let p_zero = Single::ZERO;
let m_zero = -Single::ZERO;
let qnan = Single::NAN;
let p_normal_value = "0x1p+0".parse::<Single>().unwrap();
let m_normal_value = "-0x1p+0".parse::<Single>().unwrap();
let p_largest_value = Single::largest();
let m_largest_value = -Single::largest();
let p_smallest_value = Single::SMALLEST;
let m_smallest_value = -Single::SMALLEST;
let p_smallest_normalized = Single::smallest_normalized();
let m_smallest_normalized = -Single::smallest_normalized();
let overflow_status = Status::OVERFLOW | Status::INEXACT;
let special_cases = [
(p_inf, p_inf, "inf", Status::OK, Category::Infinity),
(p_inf, m_inf, "nan", Status::INVALID_OP, Category::NaN),
(p_inf, p_zero, "inf", Status::OK, Category::Infinity),
(p_inf, m_zero, "inf", Status::OK, Category::Infinity),
(p_inf, qnan, "nan", Status::OK, Category::NaN),
/*
// See Note 1.
(p_inf, snan, "nan", Status::INVALID_OP, Category::NaN),
*/
(p_inf, p_normal_value, "inf", Status::OK, Category::Infinity),
(p_inf, m_normal_value, "inf", Status::OK, Category::Infinity),
(
p_inf,
p_largest_value,
"inf",
Status::OK,
Category::Infinity,
),
(
p_inf,
m_largest_value,
"inf",
Status::OK,
Category::Infinity,
),
(
p_inf,
p_smallest_value,
"inf",
Status::OK,
Category::Infinity,
),
(
p_inf,
m_smallest_value,
"inf",
Status::OK,
Category::Infinity,
),
(
p_inf,
p_smallest_normalized,
"inf",
Status::OK,
Category::Infinity,
),
(
p_inf,
m_smallest_normalized,
"inf",
Status::OK,
Category::Infinity,
),
(m_inf, p_inf, "nan", Status::INVALID_OP, Category::NaN),
(m_inf, m_inf, "-inf", Status::OK, Category::Infinity),
(m_inf, p_zero, "-inf", Status::OK, Category::Infinity),
(m_inf, m_zero, "-inf", Status::OK, Category::Infinity),
(m_inf, qnan, "nan", Status::OK, Category::NaN),
/*
// See Note 1.
(m_inf, snan, "nan", Status::INVALID_OP, Category::NaN),
*/
(
m_inf,
p_normal_value,
"-inf",
Status::OK,
Category::Infinity,
),
(
m_inf,
m_normal_value,
"-inf",
Status::OK,
Category::Infinity,
),
(
m_inf,
p_largest_value,
"-inf",
Status::OK,
Category::Infinity,
),
(
m_inf,
m_largest_value,
"-inf",
Status::OK,
Category::Infinity,
),
(
m_inf,
p_smallest_value,
"-inf",
Status::OK,
Category::Infinity,
),
(
m_inf,
m_smallest_value,
"-inf",
Status::OK,
Category::Infinity,
),
(
m_inf,
p_smallest_normalized,
"-inf",
Status::OK,
Category::Infinity,
),
(
m_inf,
m_smallest_normalized,
"-inf",
Status::OK,
Category::Infinity,
),
(p_zero, p_inf, "inf", Status::OK, Category::Infinity),
(p_zero, m_inf, "-inf", Status::OK, Category::Infinity),
(p_zero, p_zero, "0x0p+0", Status::OK, Category::Zero),
(p_zero, m_zero, "0x0p+0", Status::OK, Category::Zero),
(p_zero, qnan, "nan", Status::OK, Category::NaN),
/*
// See Note 1.
(p_zero, snan, "nan", Status::INVALID_OP, Category::NaN),
*/
(
p_zero,
p_normal_value,
"0x1p+0",
Status::OK,
Category::Normal,
),
(
p_zero,
m_normal_value,
"-0x1p+0",
Status::OK,
Category::Normal,
),
(
p_zero,
p_largest_value,
"0x1.fffffep+127",
Status::OK,
Category::Normal,
),
(
p_zero,
m_largest_value,
"-0x1.fffffep+127",
Status::OK,
Category::Normal,
),
(
p_zero,
p_smallest_value,
"0x1p-149",
Status::OK,
Category::Normal,
),
(
p_zero,
m_smallest_value,
"-0x1p-149",
Status::OK,
Category::Normal,
),
(
p_zero,
p_smallest_normalized,
"0x1p-126",
Status::OK,
Category::Normal,
),
(
p_zero,
m_smallest_normalized,
"-0x1p-126",
Status::OK,
Category::Normal,
),
(m_zero, p_inf, "inf", Status::OK, Category::Infinity),
(m_zero, m_inf, "-inf", Status::OK, Category::Infinity),
(m_zero, p_zero, "0x0p+0", Status::OK, Category::Zero),
(m_zero, m_zero, "-0x0p+0", Status::OK, Category::Zero),
(m_zero, qnan, "nan", Status::OK, Category::NaN),
/*
// See Note 1.
(m_zero, snan, "nan", Status::INVALID_OP, Category::NaN),
*/
(
m_zero,
p_normal_value,
"0x1p+0",
Status::OK,
Category::Normal,
),
(
m_zero,
m_normal_value,
"-0x1p+0",
Status::OK,
Category::Normal,
),
(
m_zero,
p_largest_value,
"0x1.fffffep+127",
Status::OK,
Category::Normal,
),
(
m_zero,
m_largest_value,
"-0x1.fffffep+127",
Status::OK,
Category::Normal,
),
(
m_zero,
p_smallest_value,
"0x1p-149",
Status::OK,
Category::Normal,
),
(
m_zero,
m_smallest_value,
"-0x1p-149",
Status::OK,
Category::Normal,
),
(
m_zero,
p_smallest_normalized,
"0x1p-126",
Status::OK,
Category::Normal,
),
(
m_zero,
m_smallest_normalized,
"-0x1p-126",
Status::OK,
Category::Normal,
),
(qnan, p_inf, "nan", Status::OK, Category::NaN),
(qnan, m_inf, "nan", Status::OK, Category::NaN),
(qnan, p_zero, "nan", Status::OK, Category::NaN),
(qnan, m_zero, "nan", Status::OK, Category::NaN),
(qnan, qnan, "nan", Status::OK, Category::NaN),
/*
// See Note 1.
(qnan, snan, "nan", Status::INVALID_OP, Category::NaN),
*/
(qnan, p_normal_value, "nan", Status::OK, Category::NaN),
(qnan, m_normal_value, "nan", Status::OK, Category::NaN),
(qnan, p_largest_value, "nan", Status::OK, Category::NaN),
(qnan, m_largest_value, "nan", Status::OK, Category::NaN),
(qnan, p_smallest_value, "nan", Status::OK, Category::NaN),
(qnan, m_smallest_value, "nan", Status::OK, Category::NaN),
(
qnan,
p_smallest_normalized,
"nan",
Status::OK,
Category::NaN,
),
(
qnan,
m_smallest_normalized,
"nan",
Status::OK,
Category::NaN,
),
/*
// See Note 1.
(snan, p_inf, "nan", Status::INVALID_OP, Category::NaN),
(snan, m_inf, "nan", Status::INVALID_OP, Category::NaN),
(snan, p_zero, "nan", Status::INVALID_OP, Category::NaN),
(snan, m_zero, "nan", Status::INVALID_OP, Category::NaN),
(snan, qnan, "nan", Status::INVALID_OP, Category::NaN),
(snan, snan, "nan", Status::INVALID_OP, Category::NaN),
(snan, p_normal_value, "nan", Status::INVALID_OP, Category::NaN),
(snan, m_normal_value, "nan", Status::INVALID_OP, Category::NaN),
(snan, p_largest_value, "nan", Status::INVALID_OP, Category::NaN),
(snan, m_largest_value, "nan", Status::INVALID_OP, Category::NaN),
(snan, p_smallest_value, "nan", Status::INVALID_OP, Category::NaN),
(snan, m_smallest_value, "nan", Status::INVALID_OP, Category::NaN),
(snan, p_smallest_normalized, "nan", Status::INVALID_OP, Category::NaN),
(snan, m_smallest_normalized, "nan", Status::INVALID_OP, Category::NaN),
*/
(p_normal_value, p_inf, "inf", Status::OK, Category::Infinity),
(
p_normal_value,
m_inf,
"-inf",
Status::OK,
Category::Infinity,
),
(
p_normal_value,
p_zero,
"0x1p+0",
Status::OK,
Category::Normal,
),
(
p_normal_value,
m_zero,
"0x1p+0",
Status::OK,
Category::Normal,
),
(p_normal_value, qnan, "nan", Status::OK, Category::NaN),
/*
// See Note 1.
(p_normal_value, snan, "nan", Status::INVALID_OP, Category::NaN),
*/
(
p_normal_value,
p_normal_value,
"0x1p+1",
Status::OK,
Category::Normal,
),
(
p_normal_value,
m_normal_value,
"0x0p+0",
Status::OK,
Category::Zero,
),
(
p_normal_value,
p_largest_value,
"0x1.fffffep+127",
Status::INEXACT,
Category::Normal,
),
(
p_normal_value,
m_largest_value,
"-0x1.fffffep+127",
Status::INEXACT,
Category::Normal,
),
(
p_normal_value,
p_smallest_value,
"0x1p+0",
Status::INEXACT,
Category::Normal,
),
(
p_normal_value,
m_smallest_value,
"0x1p+0",
Status::INEXACT,
Category::Normal,
),
(
p_normal_value,
p_smallest_normalized,
"0x1p+0",
Status::INEXACT,
Category::Normal,
),
(
p_normal_value,
m_smallest_normalized,
"0x1p+0",
Status::INEXACT,
Category::Normal,
),
(m_normal_value, p_inf, "inf", Status::OK, Category::Infinity),
(
m_normal_value,
m_inf,
"-inf",
Status::OK,
Category::Infinity,
),
(
m_normal_value,
p_zero,
"-0x1p+0",
Status::OK,
Category::Normal,
),
(
m_normal_value,
m_zero,
"-0x1p+0",
Status::OK,
Category::Normal,
),
(m_normal_value, qnan, "nan", Status::OK, Category::NaN),
/*
// See Note 1.
(m_normal_value, snan, "nan", Status::INVALID_OP, Category::NaN),
*/
(
m_normal_value,
p_normal_value,
"0x0p+0",
Status::OK,
Category::Zero,
),
(
m_normal_value,
m_normal_value,
"-0x1p+1",
Status::OK,
Category::Normal,
),
(
m_normal_value,
p_largest_value,
"0x1.fffffep+127",
Status::INEXACT,
Category::Normal,
),
(
m_normal_value,
m_largest_value,
"-0x1.fffffep+127",
Status::INEXACT,
Category::Normal,
),
(
m_normal_value,
p_smallest_value,
"-0x1p+0",
Status::INEXACT,
Category::Normal,
),
(
m_normal_value,
m_smallest_value,
"-0x1p+0",
Status::INEXACT,
Category::Normal,
),
(
m_normal_value,
p_smallest_normalized,
"-0x1p+0",
Status::INEXACT,
Category::Normal,
),
(
m_normal_value,
m_smallest_normalized,
"-0x1p+0",
Status::INEXACT,
Category::Normal,
),
(
p_largest_value,
p_inf,
"inf",
Status::OK,
Category::Infinity,
),
(
p_largest_value,
m_inf,
"-inf",
Status::OK,
Category::Infinity,
),
(
p_largest_value,
p_zero,
"0x1.fffffep+127",
Status::OK,
Category::Normal,
),
(
p_largest_value,
m_zero,
"0x1.fffffep+127",
Status::OK,
Category::Normal,
),
(p_largest_value, qnan, "nan", Status::OK, Category::NaN),
/*
// See Note 1.
(p_largest_value, snan, "nan", Status::INVALID_OP, Category::NaN),
*/
(
p_largest_value,
p_normal_value,
"0x1.fffffep+127",
Status::INEXACT,
Category::Normal,
),
(
p_largest_value,
m_normal_value,
"0x1.fffffep+127",
Status::INEXACT,
Category::Normal,
),
(
p_largest_value,
p_largest_value,
"inf",
overflow_status,
Category::Infinity,
),
(
p_largest_value,
m_largest_value,
"0x0p+0",
Status::OK,
Category::Zero,
),
(
p_largest_value,
p_smallest_value,
"0x1.fffffep+127",
Status::INEXACT,
Category::Normal,
),
(
p_largest_value,
m_smallest_value,
"0x1.fffffep+127",
Status::INEXACT,
Category::Normal,
),
(
p_largest_value,
p_smallest_normalized,
"0x1.fffffep+127",
Status::INEXACT,
Category::Normal,
),
(
p_largest_value,
m_smallest_normalized,
"0x1.fffffep+127",
Status::INEXACT,
Category::Normal,
),
(
m_largest_value,
p_inf,
"inf",
Status::OK,
Category::Infinity,
),
(
m_largest_value,
m_inf,
"-inf",
Status::OK,
Category::Infinity,
),
(
m_largest_value,
p_zero,
"-0x1.fffffep+127",
Status::OK,
Category::Normal,
),
(
m_largest_value,
m_zero,
"-0x1.fffffep+127",
Status::OK,
Category::Normal,
),
(m_largest_value, qnan, "nan", Status::OK, Category::NaN),
/*
// See Note 1.
(m_largest_value, snan, "nan", Status::INVALID_OP, Category::NaN),
*/
(
m_largest_value,
p_normal_value,
"-0x1.fffffep+127",
Status::INEXACT,
Category::Normal,
),
(
m_largest_value,
m_normal_value,
"-0x1.fffffep+127",
Status::INEXACT,
Category::Normal,
),
(
m_largest_value,
p_largest_value,
"0x0p+0",
Status::OK,
Category::Zero,
),
(
m_largest_value,
m_largest_value,
"-inf",
overflow_status,
Category::Infinity,
),
(
m_largest_value,
p_smallest_value,
"-0x1.fffffep+127",
Status::INEXACT,
Category::Normal,
),
(
m_largest_value,
m_smallest_value,
"-0x1.fffffep+127",
Status::INEXACT,
Category::Normal,
),
(
m_largest_value,
p_smallest_normalized,
"-0x1.fffffep+127",
Status::INEXACT,
Category::Normal,
),
(
m_largest_value,
m_smallest_normalized,
"-0x1.fffffep+127",
Status::INEXACT,
Category::Normal,
),
(
p_smallest_value,
p_inf,
"inf",
Status::OK,
Category::Infinity,
),
(
p_smallest_value,
m_inf,
"-inf",
Status::OK,
Category::Infinity,
),
(
p_smallest_value,
p_zero,
"0x1p-149",
Status::OK,
Category::Normal,
),
(
p_smallest_value,
m_zero,
"0x1p-149",
Status::OK,
Category::Normal,
),
(p_smallest_value, qnan, "nan", Status::OK, Category::NaN),
/*
// See Note 1.
(p_smallest_value, snan, "nan", Status::INVALID_OP, Category::NaN),
*/
(
p_smallest_value,
p_normal_value,
"0x1p+0",
Status::INEXACT,
Category::Normal,
),
(
p_smallest_value,
m_normal_value,
"-0x1p+0",
Status::INEXACT,
Category::Normal,
),
(
p_smallest_value,
p_largest_value,
"0x1.fffffep+127",
Status::INEXACT,
Category::Normal,
),
(
p_smallest_value,
m_largest_value,
"-0x1.fffffep+127",
Status::INEXACT,
Category::Normal,
),
(
p_smallest_value,
p_smallest_value,
"0x1p-148",
Status::OK,
Category::Normal,
),
(
p_smallest_value,
m_smallest_value,
"0x0p+0",
Status::OK,
Category::Zero,
),
(
p_smallest_value,
p_smallest_normalized,
"0x1.000002p-126",
Status::OK,
Category::Normal,
),
(
p_smallest_value,
m_smallest_normalized,
"-0x1.fffffcp-127",
Status::OK,
Category::Normal,
),
(
m_smallest_value,
p_inf,
"inf",
Status::OK,
Category::Infinity,
),
(
m_smallest_value,
m_inf,
"-inf",
Status::OK,
Category::Infinity,
),
(
m_smallest_value,
p_zero,
"-0x1p-149",
Status::OK,
Category::Normal,
),
(
m_smallest_value,
m_zero,
"-0x1p-149",
Status::OK,
Category::Normal,
),
(m_smallest_value, qnan, "nan", Status::OK, Category::NaN),
/*
// See Note 1.
(m_smallest_value, snan, "nan", Status::INVALID_OP, Category::NaN),
*/
(
m_smallest_value,
p_normal_value,
"0x1p+0",
Status::INEXACT,
Category::Normal,
),
(
m_smallest_value,
m_normal_value,
"-0x1p+0",
Status::INEXACT,
Category::Normal,
),
(
m_smallest_value,
p_largest_value,
"0x1.fffffep+127",
Status::INEXACT,
Category::Normal,
),
(
m_smallest_value,
m_largest_value,
"-0x1.fffffep+127",
Status::INEXACT,
Category::Normal,
),
(
m_smallest_value,
p_smallest_value,
"0x0p+0",
Status::OK,
Category::Zero,
),
(
m_smallest_value,
m_smallest_value,
"-0x1p-148",
Status::OK,
Category::Normal,
),
(
m_smallest_value,
p_smallest_normalized,
"0x1.fffffcp-127",
Status::OK,
Category::Normal,
),
(
m_smallest_value,
m_smallest_normalized,
"-0x1.000002p-126",
Status::OK,
Category::Normal,
),
(
p_smallest_normalized,
p_inf,
"inf",
Status::OK,
Category::Infinity,
),
(
p_smallest_normalized,
m_inf,
"-inf",
Status::OK,
Category::Infinity,
),
(
p_smallest_normalized,
p_zero,
"0x1p-126",
Status::OK,
Category::Normal,
),
(
p_smallest_normalized,
m_zero,
"0x1p-126",
Status::OK,
Category::Normal,
),
(
p_smallest_normalized,
qnan,
"nan",
Status::OK,
Category::NaN,
),
/*
// See Note 1.
(p_smallest_normalized, snan, "nan", Status::INVALID_OP, Category::NaN),
*/
(
p_smallest_normalized,
p_normal_value,
"0x1p+0",
Status::INEXACT,
Category::Normal,
),
(
p_smallest_normalized,
m_normal_value,
"-0x1p+0",
Status::INEXACT,
Category::Normal,
),
(
p_smallest_normalized,
p_largest_value,
"0x1.fffffep+127",
Status::INEXACT,
Category::Normal,
),
(
p_smallest_normalized,
m_largest_value,
"-0x1.fffffep+127",
Status::INEXACT,
Category::Normal,
),
(
p_smallest_normalized,
p_smallest_value,
"0x1.000002p-126",
Status::OK,
Category::Normal,
),
(
p_smallest_normalized,
m_smallest_value,
"0x1.fffffcp-127",
Status::OK,
Category::Normal,
),
(
p_smallest_normalized,
p_smallest_normalized,
"0x1p-125",
Status::OK,
Category::Normal,
),
(
p_smallest_normalized,
m_smallest_normalized,
"0x0p+0",
Status::OK,
Category::Zero,
),
(
m_smallest_normalized,
p_inf,
"inf",
Status::OK,
Category::Infinity,
),
(
m_smallest_normalized,
m_inf,
"-inf",
Status::OK,
Category::Infinity,
),
(
m_smallest_normalized,
p_zero,
"-0x1p-126",
Status::OK,
Category::Normal,
),
(
m_smallest_normalized,
m_zero,
"-0x1p-126",
Status::OK,
Category::Normal,
),
(
m_smallest_normalized,
qnan,
"nan",
Status::OK,
Category::NaN,
),
/*
// See Note 1.
(m_smallest_normalized, snan, "nan", Status::INVALID_OP, Category::NaN),
*/
(
m_smallest_normalized,
p_normal_value,
"0x1p+0",
Status::INEXACT,
Category::Normal,
),
(
m_smallest_normalized,
m_normal_value,
"-0x1p+0",
Status::INEXACT,
Category::Normal,
),
(
m_smallest_normalized,
p_largest_value,
"0x1.fffffep+127",
Status::INEXACT,
Category::Normal,
),
(
m_smallest_normalized,
m_largest_value,
"-0x1.fffffep+127",
Status::INEXACT,
Category::Normal,
),
(
m_smallest_normalized,
p_smallest_value,
"-0x1.fffffcp-127",
Status::OK,
Category::Normal,
),
(
m_smallest_normalized,
m_smallest_value,
"-0x1.000002p-126",
Status::OK,
Category::Normal,
),
(
m_smallest_normalized,
p_smallest_normalized,
"0x0p+0",
Status::OK,
Category::Zero,
),
(
m_smallest_normalized,
m_smallest_normalized,
"-0x1p-125",
Status::OK,
Category::Normal,
),
];
for &(x, y, e_result, e_status, e_category) in &special_cases[..] {
let status;
let result = unpack!(status=, x + y);
assert_eq!(status, e_status);
assert_eq!(result.category(), e_category);
assert!(result.bitwise_eq(e_result.parse::<Single>().unwrap()));
}
}
#[test]
fn subtract() {
// Test Special Cases against each other and normal values.
// FIXMES/NOTES:
// 1. Since we perform only default exception handling all operations with
// signaling NaNs should have a result that is a quiet NaN. Currently they
// return sNaN.
let p_inf = Single::INFINITY;
let m_inf = -Single::INFINITY;
let p_zero = Single::ZERO;
let m_zero = -Single::ZERO;
let qnan = Single::NAN;
let p_normal_value = "0x1p+0".parse::<Single>().unwrap();
let m_normal_value = "-0x1p+0".parse::<Single>().unwrap();
let p_largest_value = Single::largest();
let m_largest_value = -Single::largest();
let p_smallest_value = Single::SMALLEST;
let m_smallest_value = -Single::SMALLEST;
let p_smallest_normalized = Single::smallest_normalized();
let m_smallest_normalized = -Single::smallest_normalized();
let overflow_status = Status::OVERFLOW | Status::INEXACT;
let special_cases = [
(p_inf, p_inf, "nan", Status::INVALID_OP, Category::NaN),
(p_inf, m_inf, "inf", Status::OK, Category::Infinity),
(p_inf, p_zero, "inf", Status::OK, Category::Infinity),
(p_inf, m_zero, "inf", Status::OK, Category::Infinity),
(p_inf, qnan, "-nan", Status::OK, Category::NaN),
/*
// See Note 1.
(p_inf, snan, "-nan", Status::INVALID_OP, Category::NaN),
*/
(p_inf, p_normal_value, "inf", Status::OK, Category::Infinity),
(p_inf, m_normal_value, "inf", Status::OK, Category::Infinity),
(
p_inf,
p_largest_value,
"inf",
Status::OK,
Category::Infinity,
),
(
p_inf,
m_largest_value,
"inf",
Status::OK,
Category::Infinity,
),
(
p_inf,
p_smallest_value,
"inf",
Status::OK,
Category::Infinity,
),
(
p_inf,
m_smallest_value,
"inf",
Status::OK,
Category::Infinity,
),
(
p_inf,
p_smallest_normalized,
"inf",
Status::OK,
Category::Infinity,
),
(
p_inf,
m_smallest_normalized,
"inf",
Status::OK,
Category::Infinity,
),
(m_inf, p_inf, "-inf", Status::OK, Category::Infinity),
(m_inf, m_inf, "nan", Status::INVALID_OP, Category::NaN),
(m_inf, p_zero, "-inf", Status::OK, Category::Infinity),
(m_inf, m_zero, "-inf", Status::OK, Category::Infinity),
(m_inf, qnan, "-nan", Status::OK, Category::NaN),
/*
// See Note 1.
(m_inf, snan, "-nan", Status::INVALID_OP, Category::NaN),
*/
(
m_inf,
p_normal_value,
"-inf",
Status::OK,
Category::Infinity,
),
(
m_inf,
m_normal_value,
"-inf",
Status::OK,
Category::Infinity,
),
(
m_inf,
p_largest_value,
"-inf",
Status::OK,
Category::Infinity,
),
(
m_inf,
m_largest_value,
"-inf",
Status::OK,
Category::Infinity,
),
(
m_inf,
p_smallest_value,
"-inf",
Status::OK,
Category::Infinity,
),
(
m_inf,
m_smallest_value,
"-inf",
Status::OK,
Category::Infinity,
),
(
m_inf,
p_smallest_normalized,
"-inf",
Status::OK,
Category::Infinity,
),
(
m_inf,
m_smallest_normalized,
"-inf",
Status::OK,
Category::Infinity,
),
(p_zero, p_inf, "-inf", Status::OK, Category::Infinity),
(p_zero, m_inf, "inf", Status::OK, Category::Infinity),
(p_zero, p_zero, "0x0p+0", Status::OK, Category::Zero),
(p_zero, m_zero, "0x0p+0", Status::OK, Category::Zero),
(p_zero, qnan, "-nan", Status::OK, Category::NaN),
/*
// See Note 1.
(p_zero, snan, "-nan", Status::INVALID_OP, Category::NaN),
*/
(
p_zero,
p_normal_value,
"-0x1p+0",
Status::OK,
Category::Normal,
),
(
p_zero,
m_normal_value,
"0x1p+0",
Status::OK,
Category::Normal,
),
(
p_zero,
p_largest_value,
"-0x1.fffffep+127",
Status::OK,
Category::Normal,
),
(
p_zero,
m_largest_value,
"0x1.fffffep+127",
Status::OK,
Category::Normal,
),
(
p_zero,
p_smallest_value,
"-0x1p-149",
Status::OK,
Category::Normal,
),
(
p_zero,
m_smallest_value,
"0x1p-149",
Status::OK,
Category::Normal,
),
(
p_zero,
p_smallest_normalized,
"-0x1p-126",
Status::OK,
Category::Normal,
),
(
p_zero,
m_smallest_normalized,
"0x1p-126",
Status::OK,
Category::Normal,
),
(m_zero, p_inf, "-inf", Status::OK, Category::Infinity),
(m_zero, m_inf, "inf", Status::OK, Category::Infinity),
(m_zero, p_zero, "-0x0p+0", Status::OK, Category::Zero),
(m_zero, m_zero, "0x0p+0", Status::OK, Category::Zero),
(m_zero, qnan, "-nan", Status::OK, Category::NaN),
/*
// See Note 1.
(m_zero, snan, "-nan", Status::INVALID_OP, Category::NaN),
*/
(
m_zero,
p_normal_value,
"-0x1p+0",
Status::OK,
Category::Normal,
),
(
m_zero,
m_normal_value,
"0x1p+0",
Status::OK,
Category::Normal,
),
(
m_zero,
p_largest_value,
"-0x1.fffffep+127",
Status::OK,
Category::Normal,
),
(
m_zero,
m_largest_value,
"0x1.fffffep+127",
Status::OK,
Category::Normal,
),
(
m_zero,
p_smallest_value,
"-0x1p-149",
Status::OK,
Category::Normal,
),
(
m_zero,
m_smallest_value,
"0x1p-149",
Status::OK,
Category::Normal,
),
(
m_zero,
p_smallest_normalized,
"-0x1p-126",
Status::OK,
Category::Normal,
),
(
m_zero,
m_smallest_normalized,
"0x1p-126",
Status::OK,
Category::Normal,
),
(qnan, p_inf, "nan", Status::OK, Category::NaN),
(qnan, m_inf, "nan", Status::OK, Category::NaN),
(qnan, p_zero, "nan", Status::OK, Category::NaN),
(qnan, m_zero, "nan", Status::OK, Category::NaN),
(qnan, qnan, "nan", Status::OK, Category::NaN),
/*
// See Note 1.
(qnan, snan, "nan", Status::INVALID_OP, Category::NaN),
*/
(qnan, p_normal_value, "nan", Status::OK, Category::NaN),
(qnan, m_normal_value, "nan", Status::OK, Category::NaN),
(qnan, p_largest_value, "nan", Status::OK, Category::NaN),
(qnan, m_largest_value, "nan", Status::OK, Category::NaN),
(qnan, p_smallest_value, "nan", Status::OK, Category::NaN),
(qnan, m_smallest_value, "nan", Status::OK, Category::NaN),
(
qnan,
p_smallest_normalized,
"nan",
Status::OK,
Category::NaN,
),
(
qnan,
m_smallest_normalized,
"nan",
Status::OK,
Category::NaN,
),
/*
// See Note 1.
(snan, p_inf, "nan", Status::INVALID_OP, Category::NaN),
(snan, m_inf, "nan", Status::INVALID_OP, Category::NaN),
(snan, p_zero, "nan", Status::INVALID_OP, Category::NaN),
(snan, m_zero, "nan", Status::INVALID_OP, Category::NaN),
(snan, qnan, "nan", Status::INVALID_OP, Category::NaN),
(snan, snan, "nan", Status::INVALID_OP, Category::NaN),
(snan, p_normal_value, "nan", Status::INVALID_OP, Category::NaN),
(snan, m_normal_value, "nan", Status::INVALID_OP, Category::NaN),
(snan, p_largest_value, "nan", Status::INVALID_OP, Category::NaN),
(snan, m_largest_value, "nan", Status::INVALID_OP, Category::NaN),
(snan, p_smallest_value, "nan", Status::INVALID_OP, Category::NaN),
(snan, m_smallest_value, "nan", Status::INVALID_OP, Category::NaN),
(snan, p_smallest_normalized, "nan", Status::INVALID_OP, Category::NaN),
(snan, m_smallest_normalized, "nan", Status::INVALID_OP, Category::NaN),
*/
(
p_normal_value,
p_inf,
"-inf",
Status::OK,
Category::Infinity,
),
(p_normal_value, m_inf, "inf", Status::OK, Category::Infinity),
(
p_normal_value,
p_zero,
"0x1p+0",
Status::OK,
Category::Normal,
),
(
p_normal_value,
m_zero,
"0x1p+0",
Status::OK,
Category::Normal,
),
(p_normal_value, qnan, "-nan", Status::OK, Category::NaN),
/*
// See Note 1.
(p_normal_value, snan, "-nan", Status::INVALID_OP, Category::NaN),
*/
(
p_normal_value,
p_normal_value,
"0x0p+0",
Status::OK,
Category::Zero,
),
(
p_normal_value,
m_normal_value,
"0x1p+1",
Status::OK,
Category::Normal,
),
(
p_normal_value,
p_largest_value,
"-0x1.fffffep+127",
Status::INEXACT,
Category::Normal,
),
(
p_normal_value,
m_largest_value,
"0x1.fffffep+127",
Status::INEXACT,
Category::Normal,
),
(
p_normal_value,
p_smallest_value,
"0x1p+0",
Status::INEXACT,
Category::Normal,
),
(
p_normal_value,
m_smallest_value,
"0x1p+0",
Status::INEXACT,
Category::Normal,
),
(
p_normal_value,
p_smallest_normalized,
"0x1p+0",
Status::INEXACT,
Category::Normal,
),
(
p_normal_value,
m_smallest_normalized,
"0x1p+0",
Status::INEXACT,
Category::Normal,
),
(
m_normal_value,
p_inf,
"-inf",
Status::OK,
Category::Infinity,
),
(m_normal_value, m_inf, "inf", Status::OK, Category::Infinity),
(
m_normal_value,
p_zero,
"-0x1p+0",
Status::OK,
Category::Normal,
),
(
m_normal_value,
m_zero,
"-0x1p+0",
Status::OK,
Category::Normal,
),
(m_normal_value, qnan, "-nan", Status::OK, Category::NaN),
/*
// See Note 1.
(m_normal_value, snan, "-nan", Status::INVALID_OP, Category::NaN),
*/
(
m_normal_value,
p_normal_value,
"-0x1p+1",
Status::OK,
Category::Normal,
),
(
m_normal_value,
m_normal_value,
"0x0p+0",
Status::OK,
Category::Zero,
),
(
m_normal_value,
p_largest_value,
"-0x1.fffffep+127",
Status::INEXACT,
Category::Normal,
),
(
m_normal_value,
m_largest_value,
"0x1.fffffep+127",
Status::INEXACT,
Category::Normal,
),
(
m_normal_value,
p_smallest_value,
"-0x1p+0",
Status::INEXACT,
Category::Normal,
),
(
m_normal_value,
m_smallest_value,
"-0x1p+0",
Status::INEXACT,
Category::Normal,
),
(
m_normal_value,
p_smallest_normalized,
"-0x1p+0",
Status::INEXACT,
Category::Normal,
),
(
m_normal_value,
m_smallest_normalized,
"-0x1p+0",
Status::INEXACT,
Category::Normal,
),
(
p_largest_value,
p_inf,
"-inf",
Status::OK,
Category::Infinity,
),
(
p_largest_value,
m_inf,
"inf",
Status::OK,
Category::Infinity,
),
(
p_largest_value,
p_zero,
"0x1.fffffep+127",
Status::OK,
Category::Normal,
),
(
p_largest_value,
m_zero,
"0x1.fffffep+127",
Status::OK,
Category::Normal,
),
(p_largest_value, qnan, "-nan", Status::OK, Category::NaN),
/*
// See Note 1.
(p_largest_value, snan, "-nan", Status::INVALID_OP, Category::NaN),
*/
(
p_largest_value,
p_normal_value,
"0x1.fffffep+127",
Status::INEXACT,
Category::Normal,
),
(
p_largest_value,
m_normal_value,
"0x1.fffffep+127",
Status::INEXACT,
Category::Normal,
),
(
p_largest_value,
p_largest_value,
"0x0p+0",
Status::OK,
Category::Zero,
),
(
p_largest_value,
m_largest_value,
"inf",
overflow_status,
Category::Infinity,
),
(
p_largest_value,
p_smallest_value,
"0x1.fffffep+127",
Status::INEXACT,
Category::Normal,
),
(
p_largest_value,
m_smallest_value,
"0x1.fffffep+127",
Status::INEXACT,
Category::Normal,
),
(
p_largest_value,
p_smallest_normalized,
"0x1.fffffep+127",
Status::INEXACT,
Category::Normal,
),
(
p_largest_value,
m_smallest_normalized,
"0x1.fffffep+127",
Status::INEXACT,
Category::Normal,
),
(
m_largest_value,
p_inf,
"-inf",
Status::OK,
Category::Infinity,
),
(
m_largest_value,
m_inf,
"inf",
Status::OK,
Category::Infinity,
),
(
m_largest_value,
p_zero,
"-0x1.fffffep+127",
Status::OK,
Category::Normal,
),
(
m_largest_value,
m_zero,
"-0x1.fffffep+127",
Status::OK,
Category::Normal,
),
(m_largest_value, qnan, "-nan", Status::OK, Category::NaN),
/*
// See Note 1.
(m_largest_value, snan, "-nan", Status::INVALID_OP, Category::NaN),
*/
(
m_largest_value,
p_normal_value,
"-0x1.fffffep+127",
Status::INEXACT,
Category::Normal,
),
(
m_largest_value,
m_normal_value,
"-0x1.fffffep+127",
Status::INEXACT,
Category::Normal,
),
(
m_largest_value,
p_largest_value,
"-inf",
overflow_status,
Category::Infinity,
),
(
m_largest_value,
m_largest_value,
"0x0p+0",
Status::OK,
Category::Zero,
),
(
m_largest_value,
p_smallest_value,
"-0x1.fffffep+127",
Status::INEXACT,
Category::Normal,
),
(
m_largest_value,
m_smallest_value,
"-0x1.fffffep+127",
Status::INEXACT,
Category::Normal,
),
(
m_largest_value,
p_smallest_normalized,
"-0x1.fffffep+127",
Status::INEXACT,
Category::Normal,
),
(
m_largest_value,
m_smallest_normalized,
"-0x1.fffffep+127",
Status::INEXACT,
Category::Normal,
),
(
p_smallest_value,
p_inf,
"-inf",
Status::OK,
Category::Infinity,
),
(
p_smallest_value,
m_inf,
"inf",
Status::OK,
Category::Infinity,
),
(
p_smallest_value,
p_zero,
"0x1p-149",
Status::OK,
Category::Normal,
),
(
p_smallest_value,
m_zero,
"0x1p-149",
Status::OK,
Category::Normal,
),
(p_smallest_value, qnan, "-nan", Status::OK, Category::NaN),
/*
// See Note 1.
(p_smallest_value, snan, "-nan", Status::INVALID_OP, Category::NaN),
*/
(
p_smallest_value,
p_normal_value,
"-0x1p+0",
Status::INEXACT,
Category::Normal,
),
(
p_smallest_value,
m_normal_value,
"0x1p+0",
Status::INEXACT,
Category::Normal,
),
(
p_smallest_value,
p_largest_value,
"-0x1.fffffep+127",
Status::INEXACT,
Category::Normal,
),
(
p_smallest_value,
m_largest_value,
"0x1.fffffep+127",
Status::INEXACT,
Category::Normal,
),
(
p_smallest_value,
p_smallest_value,
"0x0p+0",
Status::OK,
Category::Zero,
),
(
p_smallest_value,
m_smallest_value,
"0x1p-148",
Status::OK,
Category::Normal,
),
(
p_smallest_value,
p_smallest_normalized,
"-0x1.fffffcp-127",
Status::OK,
Category::Normal,
),
(
p_smallest_value,
m_smallest_normalized,
"0x1.000002p-126",
Status::OK,
Category::Normal,
),
(
m_smallest_value,
p_inf,
"-inf",
Status::OK,
Category::Infinity,
),
(
m_smallest_value,
m_inf,
"inf",
Status::OK,
Category::Infinity,
),
(
m_smallest_value,
p_zero,
"-0x1p-149",
Status::OK,
Category::Normal,
),
(
m_smallest_value,
m_zero,
"-0x1p-149",
Status::OK,
Category::Normal,
),
(m_smallest_value, qnan, "-nan", Status::OK, Category::NaN),
/*
// See Note 1.
(m_smallest_value, snan, "-nan", Status::INVALID_OP, Category::NaN),
*/
(
m_smallest_value,
p_normal_value,
"-0x1p+0",
Status::INEXACT,
Category::Normal,
),
(
m_smallest_value,
m_normal_value,
"0x1p+0",
Status::INEXACT,
Category::Normal,
),
(
m_smallest_value,
p_largest_value,
"-0x1.fffffep+127",
Status::INEXACT,
Category::Normal,
),
(
m_smallest_value,
m_largest_value,
"0x1.fffffep+127",
Status::INEXACT,
Category::Normal,
),
(
m_smallest_value,
p_smallest_value,
"-0x1p-148",
Status::OK,
Category::Normal,
),
(
m_smallest_value,
m_smallest_value,
"0x0p+0",
Status::OK,
Category::Zero,
),
(
m_smallest_value,
p_smallest_normalized,
"-0x1.000002p-126",
Status::OK,
Category::Normal,
),
(
m_smallest_value,
m_smallest_normalized,
"0x1.fffffcp-127",
Status::OK,
Category::Normal,
),
(
p_smallest_normalized,
p_inf,
"-inf",
Status::OK,
Category::Infinity,
),
(
p_smallest_normalized,
m_inf,
"inf",
Status::OK,
Category::Infinity,
),
(
p_smallest_normalized,
p_zero,
"0x1p-126",
Status::OK,
Category::Normal,
),
(
p_smallest_normalized,
m_zero,
"0x1p-126",
Status::OK,
Category::Normal,
),
(
p_smallest_normalized,
qnan,
"-nan",
Status::OK,
Category::NaN,
),
/*
// See Note 1.
(p_smallest_normalized, snan, "-nan", Status::INVALID_OP, Category::NaN),
*/
(
p_smallest_normalized,
p_normal_value,
"-0x1p+0",
Status::INEXACT,
Category::Normal,
),
(
p_smallest_normalized,
m_normal_value,
"0x1p+0",
Status::INEXACT,
Category::Normal,
),
(
p_smallest_normalized,
p_largest_value,
"-0x1.fffffep+127",
Status::INEXACT,
Category::Normal,
),
(
p_smallest_normalized,
m_largest_value,
"0x1.fffffep+127",
Status::INEXACT,
Category::Normal,
),
(
p_smallest_normalized,
p_smallest_value,
"0x1.fffffcp-127",
Status::OK,
Category::Normal,
),
(
p_smallest_normalized,
m_smallest_value,
"0x1.000002p-126",
Status::OK,
Category::Normal,
),
(
p_smallest_normalized,
p_smallest_normalized,
"0x0p+0",
Status::OK,
Category::Zero,
),
(
p_smallest_normalized,
m_smallest_normalized,
"0x1p-125",
Status::OK,
Category::Normal,
),
(
m_smallest_normalized,
p_inf,
"-inf",
Status::OK,
Category::Infinity,
),
(
m_smallest_normalized,
m_inf,
"inf",
Status::OK,
Category::Infinity,
),
(
m_smallest_normalized,
p_zero,
"-0x1p-126",
Status::OK,
Category::Normal,
),
(
m_smallest_normalized,
m_zero,
"-0x1p-126",
Status::OK,
Category::Normal,
),
(
m_smallest_normalized,
qnan,
"-nan",
Status::OK,
Category::NaN,
),
/*
// See Note 1.
(m_smallest_normalized, snan, "-nan", Status::INVALID_OP, Category::NaN),
*/
(
m_smallest_normalized,
p_normal_value,
"-0x1p+0",
Status::INEXACT,
Category::Normal,
),
(
m_smallest_normalized,
m_normal_value,
"0x1p+0",
Status::INEXACT,
Category::Normal,
),
(
m_smallest_normalized,
p_largest_value,
"-0x1.fffffep+127",
Status::INEXACT,
Category::Normal,
),
(
m_smallest_normalized,
m_largest_value,
"0x1.fffffep+127",
Status::INEXACT,
Category::Normal,
),
(
m_smallest_normalized,
p_smallest_value,
"-0x1.000002p-126",
Status::OK,
Category::Normal,
),
(
m_smallest_normalized,
m_smallest_value,
"-0x1.fffffcp-127",
Status::OK,
Category::Normal,
),
(
m_smallest_normalized,
p_smallest_normalized,
"-0x1p-125",
Status::OK,
Category::Normal,
),
(
m_smallest_normalized,
m_smallest_normalized,
"0x0p+0",
Status::OK,
Category::Zero,
),
];
for &(x, y, e_result, e_status, e_category) in &special_cases[..] {
let status;
let result = unpack!(status=, x - y);
assert_eq!(status, e_status);
assert_eq!(result.category(), e_category);
assert!(result.bitwise_eq(e_result.parse::<Single>().unwrap()));
}
}
#[test]
fn multiply() {
// Test Special Cases against each other and normal values.
// FIXMES/NOTES:
// 1. Since we perform only default exception handling all operations with
// signaling NaNs should have a result that is a quiet NaN. Currently they
// return sNaN.
let p_inf = Single::INFINITY;
let m_inf = -Single::INFINITY;
let p_zero = Single::ZERO;
let m_zero = -Single::ZERO;
let qnan = Single::NAN;
let p_normal_value = "0x1p+0".parse::<Single>().unwrap();
let m_normal_value = "-0x1p+0".parse::<Single>().unwrap();
let p_largest_value = Single::largest();
let m_largest_value = -Single::largest();
let p_smallest_value = Single::SMALLEST;
let m_smallest_value = -Single::SMALLEST;
let p_smallest_normalized = Single::smallest_normalized();
let m_smallest_normalized = -Single::smallest_normalized();
let overflow_status = Status::OVERFLOW | Status::INEXACT;
let underflow_status = Status::UNDERFLOW | Status::INEXACT;
let special_cases = [
(p_inf, p_inf, "inf", Status::OK, Category::Infinity),
(p_inf, m_inf, "-inf", Status::OK, Category::Infinity),
(p_inf, p_zero, "nan", Status::INVALID_OP, Category::NaN),
(p_inf, m_zero, "nan", Status::INVALID_OP, Category::NaN),
(p_inf, qnan, "nan", Status::OK, Category::NaN),
/*
// See Note 1.
(p_inf, snan, "nan", Status::INVALID_OP, Category::NaN),
*/
(p_inf, p_normal_value, "inf", Status::OK, Category::Infinity),
(
p_inf,
m_normal_value,
"-inf",
Status::OK,
Category::Infinity,
),
(
p_inf,
p_largest_value,
"inf",
Status::OK,
Category::Infinity,
),
(
p_inf,
m_largest_value,
"-inf",
Status::OK,
Category::Infinity,
),
(
p_inf,
p_smallest_value,
"inf",
Status::OK,
Category::Infinity,
),
(
p_inf,
m_smallest_value,
"-inf",
Status::OK,
Category::Infinity,
),
(
p_inf,
p_smallest_normalized,
"inf",
Status::OK,
Category::Infinity,
),
(
p_inf,
m_smallest_normalized,
"-inf",
Status::OK,
Category::Infinity,
),
(m_inf, p_inf, "-inf", Status::OK, Category::Infinity),
(m_inf, m_inf, "inf", Status::OK, Category::Infinity),
(m_inf, p_zero, "nan", Status::INVALID_OP, Category::NaN),
(m_inf, m_zero, "nan", Status::INVALID_OP, Category::NaN),
(m_inf, qnan, "nan", Status::OK, Category::NaN),
/*
// See Note 1.
(m_inf, snan, "nan", Status::INVALID_OP, Category::NaN),
*/
(
m_inf,
p_normal_value,
"-inf",
Status::OK,
Category::Infinity,
),
(m_inf, m_normal_value, "inf", Status::OK, Category::Infinity),
(
m_inf,
p_largest_value,
"-inf",
Status::OK,
Category::Infinity,
),
(
m_inf,
m_largest_value,
"inf",
Status::OK,
Category::Infinity,
),
(
m_inf,
p_smallest_value,
"-inf",
Status::OK,
Category::Infinity,
),
(
m_inf,
m_smallest_value,
"inf",
Status::OK,
Category::Infinity,
),
(
m_inf,
p_smallest_normalized,
"-inf",
Status::OK,
Category::Infinity,
),
(
m_inf,
m_smallest_normalized,
"inf",
Status::OK,
Category::Infinity,
),
(p_zero, p_inf, "nan", Status::INVALID_OP, Category::NaN),
(p_zero, m_inf, "nan", Status::INVALID_OP, Category::NaN),
(p_zero, p_zero, "0x0p+0", Status::OK, Category::Zero),
(p_zero, m_zero, "-0x0p+0", Status::OK, Category::Zero),
(p_zero, qnan, "nan", Status::OK, Category::NaN),
/*
// See Note 1.
(p_zero, snan, "nan", Status::INVALID_OP, Category::NaN),
*/
(p_zero, p_normal_value, "0x0p+0", Status::OK, Category::Zero),
(
p_zero,
m_normal_value,
"-0x0p+0",
Status::OK,
Category::Zero,
),
(
p_zero,
p_largest_value,
"0x0p+0",
Status::OK,
Category::Zero,
),
(
p_zero,
m_largest_value,
"-0x0p+0",
Status::OK,
Category::Zero,
),
(
p_zero,
p_smallest_value,
"0x0p+0",
Status::OK,
Category::Zero,
),
(
p_zero,
m_smallest_value,
"-0x0p+0",
Status::OK,
Category::Zero,
),
(
p_zero,
p_smallest_normalized,
"0x0p+0",
Status::OK,
Category::Zero,
),
(
p_zero,
m_smallest_normalized,
"-0x0p+0",
Status::OK,
Category::Zero,
),
(m_zero, p_inf, "nan", Status::INVALID_OP, Category::NaN),
(m_zero, m_inf, "nan", Status::INVALID_OP, Category::NaN),
(m_zero, p_zero, "-0x0p+0", Status::OK, Category::Zero),
(m_zero, m_zero, "0x0p+0", Status::OK, Category::Zero),
(m_zero, qnan, "nan", Status::OK, Category::NaN),
/*
// See Note 1.
(m_zero, snan, "nan", Status::INVALID_OP, Category::NaN),
*/
(
m_zero,
p_normal_value,
"-0x0p+0",
Status::OK,
Category::Zero,
),
(m_zero, m_normal_value, "0x0p+0", Status::OK, Category::Zero),
(
m_zero,
p_largest_value,
"-0x0p+0",
Status::OK,
Category::Zero,
),
(
m_zero,
m_largest_value,
"0x0p+0",
Status::OK,
Category::Zero,
),
(
m_zero,
p_smallest_value,
"-0x0p+0",
Status::OK,
Category::Zero,
),
(
m_zero,
m_smallest_value,
"0x0p+0",
Status::OK,
Category::Zero,
),
(
m_zero,
p_smallest_normalized,
"-0x0p+0",
Status::OK,
Category::Zero,
),
(
m_zero,
m_smallest_normalized,
"0x0p+0",
Status::OK,
Category::Zero,
),
(qnan, p_inf, "nan", Status::OK, Category::NaN),
(qnan, m_inf, "nan", Status::OK, Category::NaN),
(qnan, p_zero, "nan", Status::OK, Category::NaN),
(qnan, m_zero, "nan", Status::OK, Category::NaN),
(qnan, qnan, "nan", Status::OK, Category::NaN),
/*
// See Note 1.
(qnan, snan, "nan", Status::INVALID_OP, Category::NaN),
*/
(qnan, p_normal_value, "nan", Status::OK, Category::NaN),
(qnan, m_normal_value, "nan", Status::OK, Category::NaN),
(qnan, p_largest_value, "nan", Status::OK, Category::NaN),
(qnan, m_largest_value, "nan", Status::OK, Category::NaN),
(qnan, p_smallest_value, "nan", Status::OK, Category::NaN),
(qnan, m_smallest_value, "nan", Status::OK, Category::NaN),
(
qnan,
p_smallest_normalized,
"nan",
Status::OK,
Category::NaN,
),
(
qnan,
m_smallest_normalized,
"nan",
Status::OK,
Category::NaN,
),
/*
// See Note 1.
(snan, p_inf, "nan", Status::INVALID_OP, Category::NaN),
(snan, m_inf, "nan", Status::INVALID_OP, Category::NaN),
(snan, p_zero, "nan", Status::INVALID_OP, Category::NaN),
(snan, m_zero, "nan", Status::INVALID_OP, Category::NaN),
(snan, qnan, "nan", Status::INVALID_OP, Category::NaN),
(snan, snan, "nan", Status::INVALID_OP, Category::NaN),
(snan, p_normal_value, "nan", Status::INVALID_OP, Category::NaN),
(snan, m_normal_value, "nan", Status::INVALID_OP, Category::NaN),
(snan, p_largest_value, "nan", Status::INVALID_OP, Category::NaN),
(snan, m_largest_value, "nan", Status::INVALID_OP, Category::NaN),
(snan, p_smallest_value, "nan", Status::INVALID_OP, Category::NaN),
(snan, m_smallest_value, "nan", Status::INVALID_OP, Category::NaN),
(snan, p_smallest_normalized, "nan", Status::INVALID_OP, Category::NaN),
(snan, m_smallest_normalized, "nan", Status::INVALID_OP, Category::NaN),
*/
(p_normal_value, p_inf, "inf", Status::OK, Category::Infinity),
(
p_normal_value,
m_inf,
"-inf",
Status::OK,
Category::Infinity,
),
(p_normal_value, p_zero, "0x0p+0", Status::OK, Category::Zero),
(
p_normal_value,
m_zero,
"-0x0p+0",
Status::OK,
Category::Zero,
),
(p_normal_value, qnan, "nan", Status::OK, Category::NaN),
/*
// See Note 1.
(p_normal_value, snan, "nan", Status::INVALID_OP, Category::NaN),
*/
(
p_normal_value,
p_normal_value,
"0x1p+0",
Status::OK,
Category::Normal,
),
(
p_normal_value,
m_normal_value,
"-0x1p+0",
Status::OK,
Category::Normal,
),
(
p_normal_value,
p_largest_value,
"0x1.fffffep+127",
Status::OK,
Category::Normal,
),
(
p_normal_value,
m_largest_value,
"-0x1.fffffep+127",
Status::OK,
Category::Normal,
),
(
p_normal_value,
p_smallest_value,
"0x1p-149",
Status::OK,
Category::Normal,
),
(
p_normal_value,
m_smallest_value,
"-0x1p-149",
Status::OK,
Category::Normal,
),
(
p_normal_value,
p_smallest_normalized,
"0x1p-126",
Status::OK,
Category::Normal,
),
(
p_normal_value,
m_smallest_normalized,
"-0x1p-126",
Status::OK,
Category::Normal,
),
(
m_normal_value,
p_inf,
"-inf",
Status::OK,
Category::Infinity,
),
(m_normal_value, m_inf, "inf", Status::OK, Category::Infinity),
(
m_normal_value,
p_zero,
"-0x0p+0",
Status::OK,
Category::Zero,
),
(m_normal_value, m_zero, "0x0p+0", Status::OK, Category::Zero),
(m_normal_value, qnan, "nan", Status::OK, Category::NaN),
/*
// See Note 1.
(m_normal_value, snan, "nan", Status::INVALID_OP, Category::NaN),
*/
(
m_normal_value,
p_normal_value,
"-0x1p+0",
Status::OK,
Category::Normal,
),
(
m_normal_value,
m_normal_value,
"0x1p+0",
Status::OK,
Category::Normal,
),
(
m_normal_value,
p_largest_value,
"-0x1.fffffep+127",
Status::OK,
Category::Normal,
),
(
m_normal_value,
m_largest_value,
"0x1.fffffep+127",
Status::OK,
Category::Normal,
),
(
m_normal_value,
p_smallest_value,
"-0x1p-149",
Status::OK,
Category::Normal,
),
(
m_normal_value,
m_smallest_value,
"0x1p-149",
Status::OK,
Category::Normal,
),
(
m_normal_value,
p_smallest_normalized,
"-0x1p-126",
Status::OK,
Category::Normal,
),
(
m_normal_value,
m_smallest_normalized,
"0x1p-126",
Status::OK,
Category::Normal,
),
(
p_largest_value,
p_inf,
"inf",
Status::OK,
Category::Infinity,
),
(
p_largest_value,
m_inf,
"-inf",
Status::OK,
Category::Infinity,
),
(
p_largest_value,
p_zero,
"0x0p+0",
Status::OK,
Category::Zero,
),
(
p_largest_value,
m_zero,
"-0x0p+0",
Status::OK,
Category::Zero,
),
(p_largest_value, qnan, "nan", Status::OK, Category::NaN),
/*
// See Note 1.
(p_largest_value, snan, "nan", Status::INVALID_OP, Category::NaN),
*/
(
p_largest_value,
p_normal_value,
"0x1.fffffep+127",
Status::OK,
Category::Normal,
),
(
p_largest_value,
m_normal_value,
"-0x1.fffffep+127",
Status::OK,
Category::Normal,
),
(
p_largest_value,
p_largest_value,
"inf",
overflow_status,
Category::Infinity,
),
(
p_largest_value,
m_largest_value,
"-inf",
overflow_status,
Category::Infinity,
),
(
p_largest_value,
p_smallest_value,
"0x1.fffffep-22",
Status::OK,
Category::Normal,
),
(
p_largest_value,
m_smallest_value,
"-0x1.fffffep-22",
Status::OK,
Category::Normal,
),
(
p_largest_value,
p_smallest_normalized,
"0x1.fffffep+1",
Status::OK,
Category::Normal,
),
(
p_largest_value,
m_smallest_normalized,
"-0x1.fffffep+1",
Status::OK,
Category::Normal,
),
(
m_largest_value,
p_inf,
"-inf",
Status::OK,
Category::Infinity,
),
(
m_largest_value,
m_inf,
"inf",
Status::OK,
Category::Infinity,
),
(
m_largest_value,
p_zero,
"-0x0p+0",
Status::OK,
Category::Zero,
),
(
m_largest_value,
m_zero,
"0x0p+0",
Status::OK,
Category::Zero,
),
(m_largest_value, qnan, "nan", Status::OK, Category::NaN),
/*
// See Note 1.
(m_largest_value, snan, "nan", Status::INVALID_OP, Category::NaN),
*/
(
m_largest_value,
p_normal_value,
"-0x1.fffffep+127",
Status::OK,
Category::Normal,
),
(
m_largest_value,
m_normal_value,
"0x1.fffffep+127",
Status::OK,
Category::Normal,
),
(
m_largest_value,
p_largest_value,
"-inf",
overflow_status,
Category::Infinity,
),
(
m_largest_value,
m_largest_value,
"inf",
overflow_status,
Category::Infinity,
),
(
m_largest_value,
p_smallest_value,
"-0x1.fffffep-22",
Status::OK,
Category::Normal,
),
(
m_largest_value,
m_smallest_value,
"0x1.fffffep-22",
Status::OK,
Category::Normal,
),
(
m_largest_value,
p_smallest_normalized,
"-0x1.fffffep+1",
Status::OK,
Category::Normal,
),
(
m_largest_value,
m_smallest_normalized,
"0x1.fffffep+1",
Status::OK,
Category::Normal,
),
(
p_smallest_value,
p_inf,
"inf",
Status::OK,
Category::Infinity,
),
(
p_smallest_value,
m_inf,
"-inf",
Status::OK,
Category::Infinity,
),
(
p_smallest_value,
p_zero,
"0x0p+0",
Status::OK,
Category::Zero,
),
(
p_smallest_value,
m_zero,
"-0x0p+0",
Status::OK,
Category::Zero,
),
(p_smallest_value, qnan, "nan", Status::OK, Category::NaN),
/*
// See Note 1.
(p_smallest_value, snan, "nan", Status::INVALID_OP, Category::NaN),
*/
(
p_smallest_value,
p_normal_value,
"0x1p-149",
Status::OK,
Category::Normal,
),
(
p_smallest_value,
m_normal_value,
"-0x1p-149",
Status::OK,
Category::Normal,
),
(
p_smallest_value,
p_largest_value,
"0x1.fffffep-22",
Status::OK,
Category::Normal,
),
(
p_smallest_value,
m_largest_value,
"-0x1.fffffep-22",
Status::OK,
Category::Normal,
),
(
p_smallest_value,
p_smallest_value,
"0x0p+0",
underflow_status,
Category::Zero,
),
(
p_smallest_value,
m_smallest_value,
"-0x0p+0",
underflow_status,
Category::Zero,
),
(
p_smallest_value,
p_smallest_normalized,
"0x0p+0",
underflow_status,
Category::Zero,
),
(
p_smallest_value,
m_smallest_normalized,
"-0x0p+0",
underflow_status,
Category::Zero,
),
(
m_smallest_value,
p_inf,
"-inf",
Status::OK,
Category::Infinity,
),
(
m_smallest_value,
m_inf,
"inf",
Status::OK,
Category::Infinity,
),
(
m_smallest_value,
p_zero,
"-0x0p+0",
Status::OK,
Category::Zero,
),
(
m_smallest_value,
m_zero,
"0x0p+0",
Status::OK,
Category::Zero,
),
(m_smallest_value, qnan, "nan", Status::OK, Category::NaN),
/*
// See Note 1.
(m_smallest_value, snan, "nan", Status::INVALID_OP, Category::NaN),
*/
(
m_smallest_value,
p_normal_value,
"-0x1p-149",
Status::OK,
Category::Normal,
),
(
m_smallest_value,
m_normal_value,
"0x1p-149",
Status::OK,
Category::Normal,
),
(
m_smallest_value,
p_largest_value,
"-0x1.fffffep-22",
Status::OK,
Category::Normal,
),
(
m_smallest_value,
m_largest_value,
"0x1.fffffep-22",
Status::OK,
Category::Normal,
),
(
m_smallest_value,
p_smallest_value,
"-0x0p+0",
underflow_status,
Category::Zero,
),
(
m_smallest_value,
m_smallest_value,
"0x0p+0",
underflow_status,
Category::Zero,
),
(
m_smallest_value,
p_smallest_normalized,
"-0x0p+0",
underflow_status,
Category::Zero,
),
(
m_smallest_value,
m_smallest_normalized,
"0x0p+0",
underflow_status,
Category::Zero,
),
(
p_smallest_normalized,
p_inf,
"inf",
Status::OK,
Category::Infinity,
),
(
p_smallest_normalized,
m_inf,
"-inf",
Status::OK,
Category::Infinity,
),
(
p_smallest_normalized,
p_zero,
"0x0p+0",
Status::OK,
Category::Zero,
),
(
p_smallest_normalized,
m_zero,
"-0x0p+0",
Status::OK,
Category::Zero,
),
(
p_smallest_normalized,
qnan,
"nan",
Status::OK,
Category::NaN,
),
/*
// See Note 1.
(p_smallest_normalized, snan, "nan", Status::INVALID_OP, Category::NaN),
*/
(
p_smallest_normalized,
p_normal_value,
"0x1p-126",
Status::OK,
Category::Normal,
),
(
p_smallest_normalized,
m_normal_value,
"-0x1p-126",
Status::OK,
Category::Normal,
),
(
p_smallest_normalized,
p_largest_value,
"0x1.fffffep+1",
Status::OK,
Category::Normal,
),
(
p_smallest_normalized,
m_largest_value,
"-0x1.fffffep+1",
Status::OK,
Category::Normal,
),
(
p_smallest_normalized,
p_smallest_value,
"0x0p+0",
underflow_status,
Category::Zero,
),
(
p_smallest_normalized,
m_smallest_value,
"-0x0p+0",
underflow_status,
Category::Zero,
),
(
p_smallest_normalized,
p_smallest_normalized,
"0x0p+0",
underflow_status,
Category::Zero,
),
(
p_smallest_normalized,
m_smallest_normalized,
"-0x0p+0",
underflow_status,
Category::Zero,
),
(
m_smallest_normalized,
p_inf,
"-inf",
Status::OK,
Category::Infinity,
),
(
m_smallest_normalized,
m_inf,
"inf",
Status::OK,
Category::Infinity,
),
(
m_smallest_normalized,
p_zero,
"-0x0p+0",
Status::OK,
Category::Zero,
),
(
m_smallest_normalized,
m_zero,
"0x0p+0",
Status::OK,
Category::Zero,
),
(
m_smallest_normalized,
qnan,
"nan",
Status::OK,
Category::NaN,
),
/*
// See Note 1.
(m_smallest_normalized, snan, "nan", Status::INVALID_OP, Category::NaN),
*/
(
m_smallest_normalized,
p_normal_value,
"-0x1p-126",
Status::OK,
Category::Normal,
),
(
m_smallest_normalized,
m_normal_value,
"0x1p-126",
Status::OK,
Category::Normal,
),
(
m_smallest_normalized,
p_largest_value,
"-0x1.fffffep+1",
Status::OK,
Category::Normal,
),
(
m_smallest_normalized,
m_largest_value,
"0x1.fffffep+1",
Status::OK,
Category::Normal,
),
(
m_smallest_normalized,
p_smallest_value,
"-0x0p+0",
underflow_status,
Category::Zero,
),
(
m_smallest_normalized,
m_smallest_value,
"0x0p+0",
underflow_status,
Category::Zero,
),
(
m_smallest_normalized,
p_smallest_normalized,
"-0x0p+0",
underflow_status,
Category::Zero,
),
(
m_smallest_normalized,
m_smallest_normalized,
"0x0p+0",
underflow_status,
Category::Zero,
),
];
for &(x, y, e_result, e_status, e_category) in &special_cases[..] {
let status;
let result = unpack!(status=, x * y);
assert_eq!(status, e_status);
assert_eq!(result.category(), e_category);
assert!(result.bitwise_eq(e_result.parse::<Single>().unwrap()));
}
}
#[test]
fn divide() {
// Test Special Cases against each other and normal values.
// FIXMES/NOTES:
// 1. Since we perform only default exception handling all operations with
// signaling NaNs should have a result that is a quiet NaN. Currently they
// return sNaN.
let p_inf = Single::INFINITY;
let m_inf = -Single::INFINITY;
let p_zero = Single::ZERO;
let m_zero = -Single::ZERO;
let qnan = Single::NAN;
let p_normal_value = "0x1p+0".parse::<Single>().unwrap();
let m_normal_value = "-0x1p+0".parse::<Single>().unwrap();
let p_largest_value = Single::largest();
let m_largest_value = -Single::largest();
let p_smallest_value = Single::SMALLEST;
let m_smallest_value = -Single::SMALLEST;
let p_smallest_normalized = Single::smallest_normalized();
let m_smallest_normalized = -Single::smallest_normalized();
let overflow_status = Status::OVERFLOW | Status::INEXACT;
let underflow_status = Status::UNDERFLOW | Status::INEXACT;
let special_cases = [
(p_inf, p_inf, "nan", Status::INVALID_OP, Category::NaN),
(p_inf, m_inf, "nan", Status::INVALID_OP, Category::NaN),
(p_inf, p_zero, "inf", Status::OK, Category::Infinity),
(p_inf, m_zero, "-inf", Status::OK, Category::Infinity),
(p_inf, qnan, "nan", Status::OK, Category::NaN),
/*
// See Note 1.
(p_inf, snan, "nan", Status::INVALID_OP, Category::NaN),
*/
(p_inf, p_normal_value, "inf", Status::OK, Category::Infinity),
(
p_inf,
m_normal_value,
"-inf",
Status::OK,
Category::Infinity,
),
(
p_inf,
p_largest_value,
"inf",
Status::OK,
Category::Infinity,
),
(
p_inf,
m_largest_value,
"-inf",
Status::OK,
Category::Infinity,
),
(
p_inf,
p_smallest_value,
"inf",
Status::OK,
Category::Infinity,
),
(
p_inf,
m_smallest_value,
"-inf",
Status::OK,
Category::Infinity,
),
(
p_inf,
p_smallest_normalized,
"inf",
Status::OK,
Category::Infinity,
),
(
p_inf,
m_smallest_normalized,
"-inf",
Status::OK,
Category::Infinity,
),
(m_inf, p_inf, "nan", Status::INVALID_OP, Category::NaN),
(m_inf, m_inf, "nan", Status::INVALID_OP, Category::NaN),
(m_inf, p_zero, "-inf", Status::OK, Category::Infinity),
(m_inf, m_zero, "inf", Status::OK, Category::Infinity),
(m_inf, qnan, "nan", Status::OK, Category::NaN),
/*
// See Note 1.
(m_inf, snan, "nan", Status::INVALID_OP, Category::NaN),
*/
(
m_inf,
p_normal_value,
"-inf",
Status::OK,
Category::Infinity,
),
(m_inf, m_normal_value, "inf", Status::OK, Category::Infinity),
(
m_inf,
p_largest_value,
"-inf",
Status::OK,
Category::Infinity,
),
(
m_inf,
m_largest_value,
"inf",
Status::OK,
Category::Infinity,
),
(
m_inf,
p_smallest_value,
"-inf",
Status::OK,
Category::Infinity,
),
(
m_inf,
m_smallest_value,
"inf",
Status::OK,
Category::Infinity,
),
(
m_inf,
p_smallest_normalized,
"-inf",
Status::OK,
Category::Infinity,
),
(
m_inf,
m_smallest_normalized,
"inf",
Status::OK,
Category::Infinity,
),
(p_zero, p_inf, "0x0p+0", Status::OK, Category::Zero),
(p_zero, m_inf, "-0x0p+0", Status::OK, Category::Zero),
(p_zero, p_zero, "nan", Status::INVALID_OP, Category::NaN),
(p_zero, m_zero, "nan", Status::INVALID_OP, Category::NaN),
(p_zero, qnan, "nan", Status::OK, Category::NaN),
/*
// See Note 1.
(p_zero, snan, "nan", Status::INVALID_OP, Category::NaN),
*/
(p_zero, p_normal_value, "0x0p+0", Status::OK, Category::Zero),
(
p_zero,
m_normal_value,
"-0x0p+0",
Status::OK,
Category::Zero,
),
(
p_zero,
p_largest_value,
"0x0p+0",
Status::OK,
Category::Zero,
),
(
p_zero,
m_largest_value,
"-0x0p+0",
Status::OK,
Category::Zero,
),
(
p_zero,
p_smallest_value,
"0x0p+0",
Status::OK,
Category::Zero,
),
(
p_zero,
m_smallest_value,
"-0x0p+0",
Status::OK,
Category::Zero,
),
(
p_zero,
p_smallest_normalized,
"0x0p+0",
Status::OK,
Category::Zero,
),
(
p_zero,
m_smallest_normalized,
"-0x0p+0",
Status::OK,
Category::Zero,
),
(m_zero, p_inf, "-0x0p+0", Status::OK, Category::Zero),
(m_zero, m_inf, "0x0p+0", Status::OK, Category::Zero),
(m_zero, p_zero, "nan", Status::INVALID_OP, Category::NaN),
(m_zero, m_zero, "nan", Status::INVALID_OP, Category::NaN),
(m_zero, qnan, "nan", Status::OK, Category::NaN),
/*
// See Note 1.
(m_zero, snan, "nan", Status::INVALID_OP, Category::NaN),
*/
(
m_zero,
p_normal_value,
"-0x0p+0",
Status::OK,
Category::Zero,
),
(m_zero, m_normal_value, "0x0p+0", Status::OK, Category::Zero),
(
m_zero,
p_largest_value,
"-0x0p+0",
Status::OK,
Category::Zero,
),
(
m_zero,
m_largest_value,
"0x0p+0",
Status::OK,
Category::Zero,
),
(
m_zero,
p_smallest_value,
"-0x0p+0",
Status::OK,
Category::Zero,
),
(
m_zero,
m_smallest_value,
"0x0p+0",
Status::OK,
Category::Zero,
),
(
m_zero,
p_smallest_normalized,
"-0x0p+0",
Status::OK,
Category::Zero,
),
(
m_zero,
m_smallest_normalized,
"0x0p+0",
Status::OK,
Category::Zero,
),
(qnan, p_inf, "nan", Status::OK, Category::NaN),
(qnan, m_inf, "nan", Status::OK, Category::NaN),
(qnan, p_zero, "nan", Status::OK, Category::NaN),
(qnan, m_zero, "nan", Status::OK, Category::NaN),
(qnan, qnan, "nan", Status::OK, Category::NaN),
/*
// See Note 1.
(qnan, snan, "nan", Status::INVALID_OP, Category::NaN),
*/
(qnan, p_normal_value, "nan", Status::OK, Category::NaN),
(qnan, m_normal_value, "nan", Status::OK, Category::NaN),
(qnan, p_largest_value, "nan", Status::OK, Category::NaN),
(qnan, m_largest_value, "nan", Status::OK, Category::NaN),
(qnan, p_smallest_value, "nan", Status::OK, Category::NaN),
(qnan, m_smallest_value, "nan", Status::OK, Category::NaN),
(
qnan,
p_smallest_normalized,
"nan",
Status::OK,
Category::NaN,
),
(
qnan,
m_smallest_normalized,
"nan",
Status::OK,
Category::NaN,
),
/*
// See Note 1.
(snan, p_inf, "nan", Status::INVALID_OP, Category::NaN),
(snan, m_inf, "nan", Status::INVALID_OP, Category::NaN),
(snan, p_zero, "nan", Status::INVALID_OP, Category::NaN),
(snan, m_zero, "nan", Status::INVALID_OP, Category::NaN),
(snan, qnan, "nan", Status::INVALID_OP, Category::NaN),
(snan, snan, "nan", Status::INVALID_OP, Category::NaN),
(snan, p_normal_value, "nan", Status::INVALID_OP, Category::NaN),
(snan, m_normal_value, "nan", Status::INVALID_OP, Category::NaN),
(snan, p_largest_value, "nan", Status::INVALID_OP, Category::NaN),
(snan, m_largest_value, "nan", Status::INVALID_OP, Category::NaN),
(snan, p_smallest_value, "nan", Status::INVALID_OP, Category::NaN),
(snan, m_smallest_value, "nan", Status::INVALID_OP, Category::NaN),
(snan, p_smallest_normalized, "nan", Status::INVALID_OP, Category::NaN),
(snan, m_smallest_normalized, "nan", Status::INVALID_OP, Category::NaN),
*/
(p_normal_value, p_inf, "0x0p+0", Status::OK, Category::Zero),
(p_normal_value, m_inf, "-0x0p+0", Status::OK, Category::Zero),
(
p_normal_value,
p_zero,
"inf",
Status::DIV_BY_ZERO,
Category::Infinity,
),
(
p_normal_value,
m_zero,
"-inf",
Status::DIV_BY_ZERO,
Category::Infinity,
),
(p_normal_value, qnan, "nan", Status::OK, Category::NaN),
/*
// See Note 1.
(p_normal_value, snan, "nan", Status::INVALID_OP, Category::NaN),
*/
(
p_normal_value,
p_normal_value,
"0x1p+0",
Status::OK,
Category::Normal,
),
(
p_normal_value,
m_normal_value,
"-0x1p+0",
Status::OK,
Category::Normal,
),
(
p_normal_value,
p_largest_value,
"0x1p-128",
underflow_status,
Category::Normal,
),
(
p_normal_value,
m_largest_value,
"-0x1p-128",
underflow_status,
Category::Normal,
),
(
p_normal_value,
p_smallest_value,
"inf",
overflow_status,
Category::Infinity,
),
(
p_normal_value,
m_smallest_value,
"-inf",
overflow_status,
Category::Infinity,
),
(
p_normal_value,
p_smallest_normalized,
"0x1p+126",
Status::OK,
Category::Normal,
),
(
p_normal_value,
m_smallest_normalized,
"-0x1p+126",
Status::OK,
Category::Normal,
),
(m_normal_value, p_inf, "-0x0p+0", Status::OK, Category::Zero),
(m_normal_value, m_inf, "0x0p+0", Status::OK, Category::Zero),
(
m_normal_value,
p_zero,
"-inf",
Status::DIV_BY_ZERO,
Category::Infinity,
),
(
m_normal_value,
m_zero,
"inf",
Status::DIV_BY_ZERO,
Category::Infinity,
),
(m_normal_value, qnan, "nan", Status::OK, Category::NaN),
/*
// See Note 1.
(m_normal_value, snan, "nan", Status::INVALID_OP, Category::NaN),
*/
(
m_normal_value,
p_normal_value,
"-0x1p+0",
Status::OK,
Category::Normal,
),
(
m_normal_value,
m_normal_value,
"0x1p+0",
Status::OK,
Category::Normal,
),
(
m_normal_value,
p_largest_value,
"-0x1p-128",
underflow_status,
Category::Normal,
),
(
m_normal_value,
m_largest_value,
"0x1p-128",
underflow_status,
Category::Normal,
),
(
m_normal_value,
p_smallest_value,
"-inf",
overflow_status,
Category::Infinity,
),
(
m_normal_value,
m_smallest_value,
"inf",
overflow_status,
Category::Infinity,
),
(
m_normal_value,
p_smallest_normalized,
"-0x1p+126",
Status::OK,
Category::Normal,
),
(
m_normal_value,
m_smallest_normalized,
"0x1p+126",
Status::OK,
Category::Normal,
),
(p_largest_value, p_inf, "0x0p+0", Status::OK, Category::Zero),
(
p_largest_value,
m_inf,
"-0x0p+0",
Status::OK,
Category::Zero,
),
(
p_largest_value,
p_zero,
"inf",
Status::DIV_BY_ZERO,
Category::Infinity,
),
(
p_largest_value,
m_zero,
"-inf",
Status::DIV_BY_ZERO,
Category::Infinity,
),
(p_largest_value, qnan, "nan", Status::OK, Category::NaN),
/*
// See Note 1.
(p_largest_value, snan, "nan", Status::INVALID_OP, Category::NaN),
*/
(
p_largest_value,
p_normal_value,
"0x1.fffffep+127",
Status::OK,
Category::Normal,
),
(
p_largest_value,
m_normal_value,
"-0x1.fffffep+127",
Status::OK,
Category::Normal,
),
(
p_largest_value,
p_largest_value,
"0x1p+0",
Status::OK,
Category::Normal,
),
(
p_largest_value,
m_largest_value,
"-0x1p+0",
Status::OK,
Category::Normal,
),
(
p_largest_value,
p_smallest_value,
"inf",
overflow_status,
Category::Infinity,
),
(
p_largest_value,
m_smallest_value,
"-inf",
overflow_status,
Category::Infinity,
),
(
p_largest_value,
p_smallest_normalized,
"inf",
overflow_status,
Category::Infinity,
),
(
p_largest_value,
m_smallest_normalized,
"-inf",
overflow_status,
Category::Infinity,
),
(
m_largest_value,
p_inf,
"-0x0p+0",
Status::OK,
Category::Zero,
),
(m_largest_value, m_inf, "0x0p+0", Status::OK, Category::Zero),
(
m_largest_value,
p_zero,
"-inf",
Status::DIV_BY_ZERO,
Category::Infinity,
),
(
m_largest_value,
m_zero,
"inf",
Status::DIV_BY_ZERO,
Category::Infinity,
),
(m_largest_value, qnan, "nan", Status::OK, Category::NaN),
/*
// See Note 1.
(m_largest_value, snan, "nan", Status::INVALID_OP, Category::NaN),
*/
(
m_largest_value,
p_normal_value,
"-0x1.fffffep+127",
Status::OK,
Category::Normal,
),
(
m_largest_value,
m_normal_value,
"0x1.fffffep+127",
Status::OK,
Category::Normal,
),
(
m_largest_value,
p_largest_value,
"-0x1p+0",
Status::OK,
Category::Normal,
),
(
m_largest_value,
m_largest_value,
"0x1p+0",
Status::OK,
Category::Normal,
),
(
m_largest_value,
p_smallest_value,
"-inf",
overflow_status,
Category::Infinity,
),
(
m_largest_value,
m_smallest_value,
"inf",
overflow_status,
Category::Infinity,
),
(
m_largest_value,
p_smallest_normalized,
"-inf",
overflow_status,
Category::Infinity,
),
(
m_largest_value,
m_smallest_normalized,
"inf",
overflow_status,
Category::Infinity,
),
(
p_smallest_value,
p_inf,
"0x0p+0",
Status::OK,
Category::Zero,
),
(
p_smallest_value,
m_inf,
"-0x0p+0",
Status::OK,
Category::Zero,
),
(
p_smallest_value,
p_zero,
"inf",
Status::DIV_BY_ZERO,
Category::Infinity,
),
(
p_smallest_value,
m_zero,
"-inf",
Status::DIV_BY_ZERO,
Category::Infinity,
),
(p_smallest_value, qnan, "nan", Status::OK, Category::NaN),
/*
// See Note 1.
(p_smallest_value, snan, "nan", Status::INVALID_OP, Category::NaN),
*/
(
p_smallest_value,
p_normal_value,
"0x1p-149",
Status::OK,
Category::Normal,
),
(
p_smallest_value,
m_normal_value,
"-0x1p-149",
Status::OK,
Category::Normal,
),
(
p_smallest_value,
p_largest_value,
"0x0p+0",
underflow_status,
Category::Zero,
),
(
p_smallest_value,
m_largest_value,
"-0x0p+0",
underflow_status,
Category::Zero,
),
(
p_smallest_value,
p_smallest_value,
"0x1p+0",
Status::OK,
Category::Normal,
),
(
p_smallest_value,
m_smallest_value,
"-0x1p+0",
Status::OK,
Category::Normal,
),
(
p_smallest_value,
p_smallest_normalized,
"0x1p-23",
Status::OK,
Category::Normal,
),
(
p_smallest_value,
m_smallest_normalized,
"-0x1p-23",
Status::OK,
Category::Normal,
),
(
m_smallest_value,
p_inf,
"-0x0p+0",
Status::OK,
Category::Zero,
),
(
m_smallest_value,
m_inf,
"0x0p+0",
Status::OK,
Category::Zero,
),
(
m_smallest_value,
p_zero,
"-inf",
Status::DIV_BY_ZERO,
Category::Infinity,
),
(
m_smallest_value,
m_zero,
"inf",
Status::DIV_BY_ZERO,
Category::Infinity,
),
(m_smallest_value, qnan, "nan", Status::OK, Category::NaN),
/*
// See Note 1.
(m_smallest_value, snan, "nan", Status::INVALID_OP, Category::NaN),
*/
(
m_smallest_value,
p_normal_value,
"-0x1p-149",
Status::OK,
Category::Normal,
),
(
m_smallest_value,
m_normal_value,
"0x1p-149",
Status::OK,
Category::Normal,
),
(
m_smallest_value,
p_largest_value,
"-0x0p+0",
underflow_status,
Category::Zero,
),
(
m_smallest_value,
m_largest_value,
"0x0p+0",
underflow_status,
Category::Zero,
),
(
m_smallest_value,
p_smallest_value,
"-0x1p+0",
Status::OK,
Category::Normal,
),
(
m_smallest_value,
m_smallest_value,
"0x1p+0",
Status::OK,
Category::Normal,
),
(
m_smallest_value,
p_smallest_normalized,
"-0x1p-23",
Status::OK,
Category::Normal,
),
(
m_smallest_value,
m_smallest_normalized,
"0x1p-23",
Status::OK,
Category::Normal,
),
(
p_smallest_normalized,
p_inf,
"0x0p+0",
Status::OK,
Category::Zero,
),
(
p_smallest_normalized,
m_inf,
"-0x0p+0",
Status::OK,
Category::Zero,
),
(
p_smallest_normalized,
p_zero,
"inf",
Status::DIV_BY_ZERO,
Category::Infinity,
),
(
p_smallest_normalized,
m_zero,
"-inf",
Status::DIV_BY_ZERO,
Category::Infinity,
),
(
p_smallest_normalized,
qnan,
"nan",
Status::OK,
Category::NaN,
),
/*
// See Note 1.
(p_smallest_normalized, snan, "nan", Status::INVALID_OP, Category::NaN),
*/
(
p_smallest_normalized,
p_normal_value,
"0x1p-126",
Status::OK,
Category::Normal,
),
(
p_smallest_normalized,
m_normal_value,
"-0x1p-126",
Status::OK,
Category::Normal,
),
(
p_smallest_normalized,
p_largest_value,
"0x0p+0",
underflow_status,
Category::Zero,
),
(
p_smallest_normalized,
m_largest_value,
"-0x0p+0",
underflow_status,
Category::Zero,
),
(
p_smallest_normalized,
p_smallest_value,
"0x1p+23",
Status::OK,
Category::Normal,
),
(
p_smallest_normalized,
m_smallest_value,
"-0x1p+23",
Status::OK,
Category::Normal,
),
(
p_smallest_normalized,
p_smallest_normalized,
"0x1p+0",
Status::OK,
Category::Normal,
),
(
p_smallest_normalized,
m_smallest_normalized,
"-0x1p+0",
Status::OK,
Category::Normal,
),
(
m_smallest_normalized,
p_inf,
"-0x0p+0",
Status::OK,
Category::Zero,
),
(
m_smallest_normalized,
m_inf,
"0x0p+0",
Status::OK,
Category::Zero,
),
(
m_smallest_normalized,
p_zero,
"-inf",
Status::DIV_BY_ZERO,
Category::Infinity,
),
(
m_smallest_normalized,
m_zero,
"inf",
Status::DIV_BY_ZERO,
Category::Infinity,
),
(
m_smallest_normalized,
qnan,
"nan",
Status::OK,
Category::NaN,
),
/*
// See Note 1.
(m_smallest_normalized, snan, "nan", Status::INVALID_OP, Category::NaN),
*/
(
m_smallest_normalized,
p_normal_value,
"-0x1p-126",
Status::OK,
Category::Normal,
),
(
m_smallest_normalized,
m_normal_value,
"0x1p-126",
Status::OK,
Category::Normal,
),
(
m_smallest_normalized,
p_largest_value,
"-0x0p+0",
underflow_status,
Category::Zero,
),
(
m_smallest_normalized,
m_largest_value,
"0x0p+0",
underflow_status,
Category::Zero,
),
(
m_smallest_normalized,
p_smallest_value,
"-0x1p+23",
Status::OK,
Category::Normal,
),
(
m_smallest_normalized,
m_smallest_value,
"0x1p+23",
Status::OK,
Category::Normal,
),
(
m_smallest_normalized,
p_smallest_normalized,
"-0x1p+0",
Status::OK,
Category::Normal,
),
(
m_smallest_normalized,
m_smallest_normalized,
"0x1p+0",
Status::OK,
Category::Normal,
),
];
for &(x, y, e_result, e_status, e_category) in &special_cases[..] {
let status;
let result = unpack!(status=, x / y);
assert_eq!(status, e_status);
assert_eq!(result.category(), e_category);
assert!(result.bitwise_eq(e_result.parse::<Single>().unwrap()));
}
}
#[test]
fn operator_overloads() {
// This is mostly testing that these operator overloads compile.
let one = "0x1p+0".parse::<Single>().unwrap();
let two = "0x2p+0".parse::<Single>().unwrap();
assert!(two.bitwise_eq((one + one).value));
assert!(one.bitwise_eq((two - one).value));
assert!(two.bitwise_eq((one * two).value));
assert!(one.bitwise_eq((two / two).value));
}
#[test]
fn abs() {
let p_inf = Single::INFINITY;
let m_inf = -Single::INFINITY;
let p_zero = Single::ZERO;
let m_zero = -Single::ZERO;
let p_qnan = Single::NAN;
let m_qnan = -Single::NAN;
let p_snan = Single::snan(None);
let m_snan = -Single::snan(None);
let p_normal_value = "0x1p+0".parse::<Single>().unwrap();
let m_normal_value = "-0x1p+0".parse::<Single>().unwrap();
let p_largest_value = Single::largest();
let m_largest_value = -Single::largest();
let p_smallest_value = Single::SMALLEST;
let m_smallest_value = -Single::SMALLEST;
let p_smallest_normalized = Single::smallest_normalized();
let m_smallest_normalized = -Single::smallest_normalized();
assert!(p_inf.bitwise_eq(p_inf.abs()));
assert!(p_inf.bitwise_eq(m_inf.abs()));
assert!(p_zero.bitwise_eq(p_zero.abs()));
assert!(p_zero.bitwise_eq(m_zero.abs()));
assert!(p_qnan.bitwise_eq(p_qnan.abs()));
assert!(p_qnan.bitwise_eq(m_qnan.abs()));
assert!(p_snan.bitwise_eq(p_snan.abs()));
assert!(p_snan.bitwise_eq(m_snan.abs()));
assert!(p_normal_value.bitwise_eq(p_normal_value.abs()));
assert!(p_normal_value.bitwise_eq(m_normal_value.abs()));
assert!(p_largest_value.bitwise_eq(p_largest_value.abs()));
assert!(p_largest_value.bitwise_eq(m_largest_value.abs()));
assert!(p_smallest_value.bitwise_eq(p_smallest_value.abs()));
assert!(p_smallest_value.bitwise_eq(m_smallest_value.abs()));
assert!(p_smallest_normalized.bitwise_eq(
p_smallest_normalized.abs(),
));
assert!(p_smallest_normalized.bitwise_eq(
m_smallest_normalized.abs(),
));
}
#[test]
fn neg() {
let one = "1.0".parse::<Single>().unwrap();
let neg_one = "-1.0".parse::<Single>().unwrap();
let zero = Single::ZERO;
let neg_zero = -Single::ZERO;
let inf = Single::INFINITY;
let neg_inf = -Single::INFINITY;
let qnan = Single::NAN;
let neg_qnan = -Single::NAN;
assert!(neg_one.bitwise_eq(-one));
assert!(one.bitwise_eq(-neg_one));
assert!(neg_zero.bitwise_eq(-zero));
assert!(zero.bitwise_eq(-neg_zero));
assert!(neg_inf.bitwise_eq(-inf));
assert!(inf.bitwise_eq(-neg_inf));
assert!(neg_inf.bitwise_eq(-inf));
assert!(inf.bitwise_eq(-neg_inf));
assert!(neg_qnan.bitwise_eq(-qnan));
assert!(qnan.bitwise_eq(-neg_qnan));
}
#[test]
fn ilogb() {
assert_eq!(-1074, Double::SMALLEST.ilogb());
assert_eq!(-1074, (-Double::SMALLEST).ilogb());
assert_eq!(
-1023,
"0x1.ffffffffffffep-1024".parse::<Double>().unwrap().ilogb()
);
assert_eq!(
-1023,
"0x1.ffffffffffffep-1023".parse::<Double>().unwrap().ilogb()
);
assert_eq!(
-1023,
"-0x1.ffffffffffffep-1023"
.parse::<Double>()
.unwrap()
.ilogb()
);
assert_eq!(-51, "0x1p-51".parse::<Double>().unwrap().ilogb());
assert_eq!(
-1023,
"0x1.c60f120d9f87cp-1023".parse::<Double>().unwrap().ilogb()
);
assert_eq!(-2, "0x0.ffffp-1".parse::<Double>().unwrap().ilogb());
assert_eq!(-1023, "0x1.fffep-1023".parse::<Double>().unwrap().ilogb());
assert_eq!(1023, Double::largest().ilogb());
assert_eq!(1023, (-Double::largest()).ilogb());
assert_eq!(0, "0x1p+0".parse::<Single>().unwrap().ilogb());
assert_eq!(0, "-0x1p+0".parse::<Single>().unwrap().ilogb());
assert_eq!(42, "0x1p+42".parse::<Single>().unwrap().ilogb());
assert_eq!(-42, "0x1p-42".parse::<Single>().unwrap().ilogb());
assert_eq!(IEK_INF, Single::INFINITY.ilogb());
assert_eq!(IEK_INF, (-Single::INFINITY).ilogb());
assert_eq!(IEK_ZERO, Single::ZERO.ilogb());
assert_eq!(IEK_ZERO, (-Single::ZERO).ilogb());
assert_eq!(IEK_NAN, Single::NAN.ilogb());
assert_eq!(IEK_NAN, Single::snan(None).ilogb());
assert_eq!(127, Single::largest().ilogb());
assert_eq!(127, (-Single::largest()).ilogb());
assert_eq!(-149, Single::SMALLEST.ilogb());
assert_eq!(-149, (-Single::SMALLEST).ilogb());
assert_eq!(-126, Single::smallest_normalized().ilogb());
assert_eq!(-126, (-Single::smallest_normalized()).ilogb());
}
#[test]
fn scalbn() {
assert!("0x1p+0".parse::<Single>().unwrap().bitwise_eq(
"0x1p+0".parse::<Single>().unwrap().scalbn(0),
));
assert!("0x1p+42".parse::<Single>().unwrap().bitwise_eq(
"0x1p+0".parse::<Single>().unwrap().scalbn(42),
));
assert!("0x1p-42".parse::<Single>().unwrap().bitwise_eq(
"0x1p+0".parse::<Single>().unwrap().scalbn(-42),
));
let p_inf = Single::INFINITY;
let m_inf = -Single::INFINITY;
let p_zero = Single::ZERO;
let m_zero = -Single::ZERO;
let p_qnan = Single::NAN;
let m_qnan = -Single::NAN;
let snan = Single::snan(None);
assert!(p_inf.bitwise_eq(p_inf.scalbn(0)));
assert!(m_inf.bitwise_eq(m_inf.scalbn(0)));
assert!(p_zero.bitwise_eq(p_zero.scalbn(0)));
assert!(m_zero.bitwise_eq(m_zero.scalbn(0)));
assert!(p_qnan.bitwise_eq(p_qnan.scalbn(0)));
assert!(m_qnan.bitwise_eq(m_qnan.scalbn(0)));
assert!(!snan.scalbn(0).is_signaling());
let scalbn_snan = snan.scalbn(1);
assert!(scalbn_snan.is_nan() && !scalbn_snan.is_signaling());
// Make sure highest bit of payload is preserved.
let payload = (1 << 50) | (1 << 49) | (1234 << 32) | 1;
let snan_with_payload = Double::snan(Some(payload));
let quiet_payload = snan_with_payload.scalbn(1);
assert!(quiet_payload.is_nan() && !quiet_payload.is_signaling());
assert_eq!(payload, quiet_payload.to_bits() & ((1 << 51) - 1));
assert!(p_inf.bitwise_eq(
"0x1p+0".parse::<Single>().unwrap().scalbn(128),
));
assert!(m_inf.bitwise_eq(
"-0x1p+0".parse::<Single>().unwrap().scalbn(128),
));
assert!(p_inf.bitwise_eq(
"0x1p+127".parse::<Single>().unwrap().scalbn(1),
));
assert!(p_zero.bitwise_eq(
"0x1p-127".parse::<Single>().unwrap().scalbn(-127),
));
assert!(m_zero.bitwise_eq(
"-0x1p-127".parse::<Single>().unwrap().scalbn(-127),
));
assert!("-0x1p-149".parse::<Single>().unwrap().bitwise_eq(
"-0x1p-127".parse::<Single>().unwrap().scalbn(-22),
));
assert!(p_zero.bitwise_eq(
"0x1p-126".parse::<Single>().unwrap().scalbn(-24),
));
let smallest_f64 = Double::SMALLEST;
let neg_smallest_f64 = -Double::SMALLEST;
let largest_f64 = Double::largest();
let neg_largest_f64 = -Double::largest();
let largest_denormal_f64 = "0x1.ffffffffffffep-1023".parse::<Double>().unwrap();
let neg_largest_denormal_f64 = "-0x1.ffffffffffffep-1023".parse::<Double>().unwrap();
assert!(smallest_f64.bitwise_eq(
"0x1p-1074".parse::<Double>().unwrap().scalbn(0),
));
assert!(neg_smallest_f64.bitwise_eq(
"-0x1p-1074".parse::<Double>().unwrap().scalbn(0),
));
assert!("0x1p+1023".parse::<Double>().unwrap().bitwise_eq(
smallest_f64.scalbn(
2097,
),
));
assert!(smallest_f64.scalbn(-2097).is_pos_zero());
assert!(smallest_f64.scalbn(-2098).is_pos_zero());
assert!(smallest_f64.scalbn(-2099).is_pos_zero());
assert!("0x1p+1022".parse::<Double>().unwrap().bitwise_eq(
smallest_f64.scalbn(
2096,
),
));
assert!("0x1p+1023".parse::<Double>().unwrap().bitwise_eq(
smallest_f64.scalbn(
2097,
),
));
assert!(smallest_f64.scalbn(2098).is_infinite());
assert!(smallest_f64.scalbn(2099).is_infinite());
// Test for integer overflows when adding to exponent.
assert!(smallest_f64.scalbn(-ExpInt::max_value()).is_pos_zero());
assert!(largest_f64.scalbn(ExpInt::max_value()).is_infinite());
assert!(largest_denormal_f64.bitwise_eq(
largest_denormal_f64.scalbn(0),
));
assert!(neg_largest_denormal_f64.bitwise_eq(
neg_largest_denormal_f64.scalbn(0),
));
assert!(
"0x1.ffffffffffffep-1022"
.parse::<Double>()
.unwrap()
.bitwise_eq(largest_denormal_f64.scalbn(1))
);
assert!(
"-0x1.ffffffffffffep-1021"
.parse::<Double>()
.unwrap()
.bitwise_eq(neg_largest_denormal_f64.scalbn(2))
);
assert!(
"0x1.ffffffffffffep+1"
.parse::<Double>()
.unwrap()
.bitwise_eq(largest_denormal_f64.scalbn(1024))
);
assert!(largest_denormal_f64.scalbn(-1023).is_pos_zero());
assert!(largest_denormal_f64.scalbn(-1024).is_pos_zero());
assert!(largest_denormal_f64.scalbn(-2048).is_pos_zero());
assert!(largest_denormal_f64.scalbn(2047).is_infinite());
assert!(largest_denormal_f64.scalbn(2098).is_infinite());
assert!(largest_denormal_f64.scalbn(2099).is_infinite());
assert!(
"0x1.ffffffffffffep-2"
.parse::<Double>()
.unwrap()
.bitwise_eq(largest_denormal_f64.scalbn(1021))
);
assert!(
"0x1.ffffffffffffep-1"
.parse::<Double>()
.unwrap()
.bitwise_eq(largest_denormal_f64.scalbn(1022))
);
assert!(
"0x1.ffffffffffffep+0"
.parse::<Double>()
.unwrap()
.bitwise_eq(largest_denormal_f64.scalbn(1023))
);
assert!(
"0x1.ffffffffffffep+1023"
.parse::<Double>()
.unwrap()
.bitwise_eq(largest_denormal_f64.scalbn(2046))
);
assert!("0x1p+974".parse::<Double>().unwrap().bitwise_eq(
smallest_f64.scalbn(
2048,
),
));
let random_denormal_f64 = "0x1.c60f120d9f87cp+51".parse::<Double>().unwrap();
assert!(
"0x1.c60f120d9f87cp-972"
.parse::<Double>()
.unwrap()
.bitwise_eq(random_denormal_f64.scalbn(-1023))
);
assert!(
"0x1.c60f120d9f87cp-1"
.parse::<Double>()
.unwrap()
.bitwise_eq(random_denormal_f64.scalbn(-52))
);
assert!(
"0x1.c60f120d9f87cp-2"
.parse::<Double>()
.unwrap()
.bitwise_eq(random_denormal_f64.scalbn(-53))
);
assert!(
"0x1.c60f120d9f87cp+0"
.parse::<Double>()
.unwrap()
.bitwise_eq(random_denormal_f64.scalbn(-51))
);
assert!(random_denormal_f64.scalbn(-2097).is_pos_zero());
assert!(random_denormal_f64.scalbn(-2090).is_pos_zero());
assert!("-0x1p-1073".parse::<Double>().unwrap().bitwise_eq(
neg_largest_f64.scalbn(-2097),
));
assert!("-0x1p-1024".parse::<Double>().unwrap().bitwise_eq(
neg_largest_f64.scalbn(-2048),
));
assert!("0x1p-1073".parse::<Double>().unwrap().bitwise_eq(
largest_f64.scalbn(
-2097,
),
));
assert!("0x1p-1074".parse::<Double>().unwrap().bitwise_eq(
largest_f64.scalbn(
-2098,
),
));
assert!("-0x1p-1074".parse::<Double>().unwrap().bitwise_eq(
neg_largest_f64.scalbn(-2098),
));
assert!(neg_largest_f64.scalbn(-2099).is_neg_zero());
assert!(largest_f64.scalbn(1).is_infinite());
assert!("0x1p+0".parse::<Double>().unwrap().bitwise_eq(
"0x1p+52".parse::<Double>().unwrap().scalbn(-52),
));
assert!("0x1p-103".parse::<Double>().unwrap().bitwise_eq(
"0x1p-51".parse::<Double>().unwrap().scalbn(-52),
));
}
#[test]
fn frexp() {
let p_zero = Double::ZERO;
let m_zero = -Double::ZERO;
let one = Double::from_f64(1.0);
let m_one = Double::from_f64(-1.0);
let largest_denormal = "0x1.ffffffffffffep-1023".parse::<Double>().unwrap();
let neg_largest_denormal = "-0x1.ffffffffffffep-1023".parse::<Double>().unwrap();
let smallest = Double::SMALLEST;
let neg_smallest = -Double::SMALLEST;
let largest = Double::largest();
let neg_largest = -Double::largest();
let p_inf = Double::INFINITY;
let m_inf = -Double::INFINITY;
let p_qnan = Double::NAN;
let m_qnan = -Double::NAN;
let snan = Double::snan(None);
// Make sure highest bit of payload is preserved.
let payload = (1 << 50) | (1 << 49) | (1234 << 32) | 1;
let snan_with_payload = Double::snan(Some(payload));
let mut exp = 0;
let frac = p_zero.frexp(&mut exp);
assert_eq!(0, exp);
assert!(frac.is_pos_zero());
let frac = m_zero.frexp(&mut exp);
assert_eq!(0, exp);
assert!(frac.is_neg_zero());
let frac = one.frexp(&mut exp);
assert_eq!(1, exp);
assert!("0x1p-1".parse::<Double>().unwrap().bitwise_eq(frac));
let frac = m_one.frexp(&mut exp);
assert_eq!(1, exp);
assert!("-0x1p-1".parse::<Double>().unwrap().bitwise_eq(frac));
let frac = largest_denormal.frexp(&mut exp);
assert_eq!(-1022, exp);
assert!(
"0x1.ffffffffffffep-1"
.parse::<Double>()
.unwrap()
.bitwise_eq(frac)
);
let frac = neg_largest_denormal.frexp(&mut exp);
assert_eq!(-1022, exp);
assert!(
"-0x1.ffffffffffffep-1"
.parse::<Double>()
.unwrap()
.bitwise_eq(frac)
);
let frac = smallest.frexp(&mut exp);
assert_eq!(-1073, exp);
assert!("0x1p-1".parse::<Double>().unwrap().bitwise_eq(frac));
let frac = neg_smallest.frexp(&mut exp);
assert_eq!(-1073, exp);
assert!("-0x1p-1".parse::<Double>().unwrap().bitwise_eq(frac));
let frac = largest.frexp(&mut exp);
assert_eq!(1024, exp);
assert!(
"0x1.fffffffffffffp-1"
.parse::<Double>()
.unwrap()
.bitwise_eq(frac)
);
let frac = neg_largest.frexp(&mut exp);
assert_eq!(1024, exp);
assert!(
"-0x1.fffffffffffffp-1"
.parse::<Double>()
.unwrap()
.bitwise_eq(frac)
);
let frac = p_inf.frexp(&mut exp);
assert_eq!(IEK_INF, exp);
assert!(frac.is_infinite() && !frac.is_negative());
let frac = m_inf.frexp(&mut exp);
assert_eq!(IEK_INF, exp);
assert!(frac.is_infinite() && frac.is_negative());
let frac = p_qnan.frexp(&mut exp);
assert_eq!(IEK_NAN, exp);
assert!(frac.is_nan());
let frac = m_qnan.frexp(&mut exp);
assert_eq!(IEK_NAN, exp);
assert!(frac.is_nan());
let frac = snan.frexp(&mut exp);
assert_eq!(IEK_NAN, exp);
assert!(frac.is_nan() && !frac.is_signaling());
let frac = snan_with_payload.frexp(&mut exp);
assert_eq!(IEK_NAN, exp);
assert!(frac.is_nan() && !frac.is_signaling());
assert_eq!(payload, frac.to_bits() & ((1 << 51) - 1));
let frac = "0x0.ffffp-1".parse::<Double>().unwrap().frexp(&mut exp);
assert_eq!(-1, exp);
assert!("0x1.fffep-1".parse::<Double>().unwrap().bitwise_eq(frac));
let frac = "0x1p-51".parse::<Double>().unwrap().frexp(&mut exp);
assert_eq!(-50, exp);
assert!("0x1p-1".parse::<Double>().unwrap().bitwise_eq(frac));
let frac = "0x1.c60f120d9f87cp+51".parse::<Double>().unwrap().frexp(
&mut exp,
);
assert_eq!(52, exp);
assert!(
"0x1.c60f120d9f87cp-1"
.parse::<Double>()
.unwrap()
.bitwise_eq(frac)
);
}
#[test]
fn modulo() {
let mut status;
{
let f1 = "1.5".parse::<Double>().unwrap();
let f2 = "1.0".parse::<Double>().unwrap();
let expected = "0.5".parse::<Double>().unwrap();
assert!(unpack!(status=, f1 % f2).bitwise_eq(expected));
assert_eq!(status, Status::OK);
}
{
let f1 = "0.5".parse::<Double>().unwrap();
let f2 = "1.0".parse::<Double>().unwrap();
let expected = "0.5".parse::<Double>().unwrap();
assert!(unpack!(status=, f1 % f2).bitwise_eq(expected));
assert_eq!(status, Status::OK);
}
{
let f1 = "0x1.3333333333333p-2".parse::<Double>().unwrap(); // 0.3
let f2 = "0x1.47ae147ae147bp-7".parse::<Double>().unwrap(); // 0.01
// 0.009999999999999983
let expected = "0x1.47ae147ae1471p-7".parse::<Double>().unwrap();
assert!(unpack!(status=, f1 % f2).bitwise_eq(expected));
assert_eq!(status, Status::OK);
}
{
let f1 = "0x1p64".parse::<Double>().unwrap(); // 1.8446744073709552e19
let f2 = "1.5".parse::<Double>().unwrap();
let expected = "1.0".parse::<Double>().unwrap();
assert!(unpack!(status=, f1 % f2).bitwise_eq(expected));
assert_eq!(status, Status::OK);
}
{
let f1 = "0x1p1000".parse::<Double>().unwrap();
let f2 = "0x1p-1000".parse::<Double>().unwrap();
let expected = "0.0".parse::<Double>().unwrap();
assert!(unpack!(status=, f1 % f2).bitwise_eq(expected));
assert_eq!(status, Status::OK);
}
{
let f1 = "0.0".parse::<Double>().unwrap();
let f2 = "1.0".parse::<Double>().unwrap();
let expected = "0.0".parse::<Double>().unwrap();
assert!(unpack!(status=, f1 % f2).bitwise_eq(expected));
assert_eq!(status, Status::OK);
}
{
let f1 = "1.0".parse::<Double>().unwrap();
let f2 = "0.0".parse::<Double>().unwrap();
assert!(unpack!(status=, f1 % f2).is_nan());
assert_eq!(status, Status::INVALID_OP);
}
{
let f1 = "0.0".parse::<Double>().unwrap();
let f2 = "0.0".parse::<Double>().unwrap();
assert!(unpack!(status=, f1 % f2).is_nan());
assert_eq!(status, Status::INVALID_OP);
}
{
let f1 = Double::INFINITY;
let f2 = "1.0".parse::<Double>().unwrap();
assert!(unpack!(status=, f1 % f2).is_nan());
assert_eq!(status, Status::INVALID_OP);
}
}
+655
View File
@@ -0,0 +1,655 @@
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern crate rustc_apfloat;
use rustc_apfloat::{Category, Float, Round};
use rustc_apfloat::ppc::DoubleDouble;
use std::cmp::Ordering;
#[test]
fn ppc_double_double() {
let test = DoubleDouble::ZERO;
let expected = "0x0p+0".parse::<DoubleDouble>().unwrap();
assert!(test.is_zero());
assert!(!test.is_negative());
assert!(test.bitwise_eq(expected));
assert_eq!(0, test.to_bits());
let test = -DoubleDouble::ZERO;
let expected = "-0x0p+0".parse::<DoubleDouble>().unwrap();
assert!(test.is_zero());
assert!(test.is_negative());
assert!(test.bitwise_eq(expected));
assert_eq!(0x8000000000000000, test.to_bits());
let test = "1.0".parse::<DoubleDouble>().unwrap();
assert_eq!(0x3ff0000000000000, test.to_bits());
// LDBL_MAX
let test = "1.79769313486231580793728971405301e+308"
.parse::<DoubleDouble>()
.unwrap();
assert_eq!(0x7c8ffffffffffffe_7fefffffffffffff, test.to_bits());
// LDBL_MIN
let test = "2.00416836000897277799610805135016e-292"
.parse::<DoubleDouble>()
.unwrap();
assert_eq!(0x0000000000000000_0360000000000000, test.to_bits());
}
#[test]
fn ppc_double_double_add_special() {
let data = [
// (1 + 0) + (-1 + 0) = Category::Zero
(
0x3ff0000000000000,
0xbff0000000000000,
Category::Zero,
Round::NearestTiesToEven,
),
// LDBL_MAX + (1.1 >> (1023 - 106) + 0)) = Category::Infinity
(
0x7c8ffffffffffffe_7fefffffffffffff,
0x7948000000000000,
Category::Infinity,
Round::NearestTiesToEven,
),
// FIXME: change the 4th 0x75effffffffffffe to 0x75efffffffffffff when
// DoubleDouble's fallback is gone.
// LDBL_MAX + (1.011111... >> (1023 - 106) + (1.1111111...0 >> (1023 -
// 160))) = Category::Normal
(
0x7c8ffffffffffffe_7fefffffffffffff,
0x75effffffffffffe_7947ffffffffffff,
Category::Normal,
Round::NearestTiesToEven,
),
// LDBL_MAX + (1.1 >> (1023 - 106) + 0)) = Category::Infinity
(
0x7c8ffffffffffffe_7fefffffffffffff,
0x7c8ffffffffffffe_7fefffffffffffff,
Category::Infinity,
Round::NearestTiesToEven,
),
// NaN + (1 + 0) = Category::NaN
(
0x7ff8000000000000,
0x3ff0000000000000,
Category::NaN,
Round::NearestTiesToEven,
),
];
for &(op1, op2, expected, round) in &data {
{
let mut a1 = DoubleDouble::from_bits(op1);
let a2 = DoubleDouble::from_bits(op2);
a1 = a1.add_r(a2, round).value;
assert_eq!(expected, a1.category(), "{:#x} + {:#x}", op1, op2);
}
{
let a1 = DoubleDouble::from_bits(op1);
let mut a2 = DoubleDouble::from_bits(op2);
a2 = a2.add_r(a1, round).value;
assert_eq!(expected, a2.category(), "{:#x} + {:#x}", op2, op1);
}
}
}
#[test]
fn ppc_double_double_add() {
let data = [
// (1 + 0) + (1e-105 + 0) = (1 + 1e-105)
(
0x3ff0000000000000,
0x3960000000000000,
0x3960000000000000_3ff0000000000000,
Round::NearestTiesToEven,
),
// (1 + 0) + (1e-106 + 0) = (1 + 1e-106)
(
0x3ff0000000000000,
0x3950000000000000,
0x3950000000000000_3ff0000000000000,
Round::NearestTiesToEven,
),
// (1 + 1e-106) + (1e-106 + 0) = (1 + 1e-105)
(
0x3950000000000000_3ff0000000000000,
0x3950000000000000,
0x3960000000000000_3ff0000000000000,
Round::NearestTiesToEven,
),
// (1 + 0) + (epsilon + 0) = (1 + epsilon)
(
0x3ff0000000000000,
0x0000000000000001,
0x0000000000000001_3ff0000000000000,
Round::NearestTiesToEven,
),
// FIXME: change 0xf950000000000000 to 0xf940000000000000, when
// DoubleDouble's fallback is gone.
// (DBL_MAX - 1 << (1023 - 105)) + (1 << (1023 - 53) + 0) = DBL_MAX +
// 1.11111... << (1023 - 52)
(
0xf950000000000000_7fefffffffffffff,
0x7c90000000000000,
0x7c8ffffffffffffe_7fefffffffffffff,
Round::NearestTiesToEven,
),
// FIXME: change 0xf950000000000000 to 0xf940000000000000, when
// DoubleDouble's fallback is gone.
// (1 << (1023 - 53) + 0) + (DBL_MAX - 1 << (1023 - 105)) = DBL_MAX +
// 1.11111... << (1023 - 52)
(
0x7c90000000000000,
0xf950000000000000_7fefffffffffffff,
0x7c8ffffffffffffe_7fefffffffffffff,
Round::NearestTiesToEven,
),
];
for &(op1, op2, expected, round) in &data {
{
let mut a1 = DoubleDouble::from_bits(op1);
let a2 = DoubleDouble::from_bits(op2);
a1 = a1.add_r(a2, round).value;
assert_eq!(expected, a1.to_bits(), "{:#x} + {:#x}", op1, op2);
}
{
let a1 = DoubleDouble::from_bits(op1);
let mut a2 = DoubleDouble::from_bits(op2);
a2 = a2.add_r(a1, round).value;
assert_eq!(expected, a2.to_bits(), "{:#x} + {:#x}", op2, op1);
}
}
}
#[test]
fn ppc_double_double_subtract() {
let data = [
// (1 + 0) - (-1e-105 + 0) = (1 + 1e-105)
(
0x3ff0000000000000,
0xb960000000000000,
0x3960000000000000_3ff0000000000000,
Round::NearestTiesToEven,
),
// (1 + 0) - (-1e-106 + 0) = (1 + 1e-106)
(
0x3ff0000000000000,
0xb950000000000000,
0x3950000000000000_3ff0000000000000,
Round::NearestTiesToEven,
),
];
for &(op1, op2, expected, round) in &data {
let mut a1 = DoubleDouble::from_bits(op1);
let a2 = DoubleDouble::from_bits(op2);
a1 = a1.sub_r(a2, round).value;
assert_eq!(expected, a1.to_bits(), "{:#x} - {:#x}", op1, op2);
}
}
#[test]
fn ppc_double_double_multiply_special() {
let data = [
// Category::NaN * Category::NaN = Category::NaN
(
0x7ff8000000000000,
0x7ff8000000000000,
Category::NaN,
Round::NearestTiesToEven,
),
// Category::NaN * Category::Zero = Category::NaN
(
0x7ff8000000000000,
0,
Category::NaN,
Round::NearestTiesToEven,
),
// Category::NaN * Category::Infinity = Category::NaN
(
0x7ff8000000000000,
0x7ff0000000000000,
Category::NaN,
Round::NearestTiesToEven,
),
// Category::NaN * Category::Normal = Category::NaN
(
0x7ff8000000000000,
0x3ff0000000000000,
Category::NaN,
Round::NearestTiesToEven,
),
// Category::Infinity * Category::Infinity = Category::Infinity
(
0x7ff0000000000000,
0x7ff0000000000000,
Category::Infinity,
Round::NearestTiesToEven,
),
// Category::Infinity * Category::Zero = Category::NaN
(
0x7ff0000000000000,
0,
Category::NaN,
Round::NearestTiesToEven,
),
// Category::Infinity * Category::Normal = Category::Infinity
(
0x7ff0000000000000,
0x3ff0000000000000,
Category::Infinity,
Round::NearestTiesToEven,
),
// Category::Zero * Category::Zero = Category::Zero
(0, 0, Category::Zero, Round::NearestTiesToEven),
// Category::Zero * Category::Normal = Category::Zero
(
0,
0x3ff0000000000000,
Category::Zero,
Round::NearestTiesToEven,
),
];
for &(op1, op2, expected, round) in &data {
{
let mut a1 = DoubleDouble::from_bits(op1);
let a2 = DoubleDouble::from_bits(op2);
a1 = a1.mul_r(a2, round).value;
assert_eq!(expected, a1.category(), "{:#x} * {:#x}", op1, op2);
}
{
let a1 = DoubleDouble::from_bits(op1);
let mut a2 = DoubleDouble::from_bits(op2);
a2 = a2.mul_r(a1, round).value;
assert_eq!(expected, a2.category(), "{:#x} * {:#x}", op2, op1);
}
}
}
#[test]
fn ppc_double_double_multiply() {
let data = [
// 1/3 * 3 = 1.0
(
0x3c75555555555556_3fd5555555555555,
0x4008000000000000,
0x3ff0000000000000,
Round::NearestTiesToEven,
),
// (1 + epsilon) * (1 + 0) = Category::Zero
(
0x0000000000000001_3ff0000000000000,
0x3ff0000000000000,
0x0000000000000001_3ff0000000000000,
Round::NearestTiesToEven,
),
// (1 + epsilon) * (1 + epsilon) = 1 + 2 * epsilon
(
0x0000000000000001_3ff0000000000000,
0x0000000000000001_3ff0000000000000,
0x0000000000000002_3ff0000000000000,
Round::NearestTiesToEven,
),
// -(1 + epsilon) * (1 + epsilon) = -1
(
0x0000000000000001_bff0000000000000,
0x0000000000000001_3ff0000000000000,
0xbff0000000000000,
Round::NearestTiesToEven,
),
// (0.5 + 0) * (1 + 2 * epsilon) = 0.5 + epsilon
(
0x3fe0000000000000,
0x0000000000000002_3ff0000000000000,
0x0000000000000001_3fe0000000000000,
Round::NearestTiesToEven,
),
// (0.5 + 0) * (1 + epsilon) = 0.5
(
0x3fe0000000000000,
0x0000000000000001_3ff0000000000000,
0x3fe0000000000000,
Round::NearestTiesToEven,
),
// __LDBL_MAX__ * (1 + 1 << 106) = inf
(
0x7c8ffffffffffffe_7fefffffffffffff,
0x3950000000000000_3ff0000000000000,
0x7ff0000000000000,
Round::NearestTiesToEven,
),
// __LDBL_MAX__ * (1 + 1 << 107) > __LDBL_MAX__, but not inf, yes =_=|||
(
0x7c8ffffffffffffe_7fefffffffffffff,
0x3940000000000000_3ff0000000000000,
0x7c8fffffffffffff_7fefffffffffffff,
Round::NearestTiesToEven,
),
// __LDBL_MAX__ * (1 + 1 << 108) = __LDBL_MAX__
(
0x7c8ffffffffffffe_7fefffffffffffff,
0x3930000000000000_3ff0000000000000,
0x7c8ffffffffffffe_7fefffffffffffff,
Round::NearestTiesToEven,
),
];
for &(op1, op2, expected, round) in &data {
{
let mut a1 = DoubleDouble::from_bits(op1);
let a2 = DoubleDouble::from_bits(op2);
a1 = a1.mul_r(a2, round).value;
assert_eq!(expected, a1.to_bits(), "{:#x} * {:#x}", op1, op2);
}
{
let a1 = DoubleDouble::from_bits(op1);
let mut a2 = DoubleDouble::from_bits(op2);
a2 = a2.mul_r(a1, round).value;
assert_eq!(expected, a2.to_bits(), "{:#x} * {:#x}", op2, op1);
}
}
}
#[test]
fn ppc_double_double_divide() {
// FIXME: Only a sanity check for now. Add more edge cases when the
// double-double algorithm is implemented.
let data = [
// 1 / 3 = 1/3
(
0x3ff0000000000000,
0x4008000000000000,
0x3c75555555555556_3fd5555555555555,
Round::NearestTiesToEven,
),
];
for &(op1, op2, expected, round) in &data {
let mut a1 = DoubleDouble::from_bits(op1);
let a2 = DoubleDouble::from_bits(op2);
a1 = a1.div_r(a2, round).value;
assert_eq!(expected, a1.to_bits(), "{:#x} / {:#x}", op1, op2);
}
}
#[test]
fn ppc_double_double_remainder() {
let data = [
// ieee_rem(3.0 + 3.0 << 53, 1.25 + 1.25 << 53) = (0.5 + 0.5 << 53)
(
0x3cb8000000000000_4008000000000000,
0x3ca4000000000000_3ff4000000000000,
0x3c90000000000000_3fe0000000000000,
),
// ieee_rem(3.0 + 3.0 << 53, 1.75 + 1.75 << 53) = (-0.5 - 0.5 << 53)
(
0x3cb8000000000000_4008000000000000,
0x3cac000000000000_3ffc000000000000,
0xbc90000000000000_bfe0000000000000,
),
];
for &(op1, op2, expected) in &data {
let a1 = DoubleDouble::from_bits(op1);
let a2 = DoubleDouble::from_bits(op2);
let result = a1.ieee_rem(a2).value;
assert_eq!(
expected,
result.to_bits(),
"ieee_rem({:#x}, {:#x})",
op1,
op2
);
}
}
#[test]
fn ppc_double_double_mod() {
let data = [
// mod(3.0 + 3.0 << 53, 1.25 + 1.25 << 53) = (0.5 + 0.5 << 53)
(
0x3cb8000000000000_4008000000000000,
0x3ca4000000000000_3ff4000000000000,
0x3c90000000000000_3fe0000000000000,
),
// mod(3.0 + 3.0 << 53, 1.75 + 1.75 << 53) = (1.25 + 1.25 << 53)
// 0xbc98000000000000 doesn't seem right, but it's what we currently have.
// FIXME: investigate
(
0x3cb8000000000000_4008000000000000,
0x3cac000000000000_3ffc000000000000,
0xbc98000000000000_3ff4000000000001,
),
];
for &(op1, op2, expected) in &data {
let a1 = DoubleDouble::from_bits(op1);
let a2 = DoubleDouble::from_bits(op2);
let r = (a1 % a2).value;
assert_eq!(expected, r.to_bits(), "fmod({:#x}, {:#x})", op1, op2);
}
}
#[test]
fn ppc_double_double_fma() {
// Sanity check for now.
let mut a = "2".parse::<DoubleDouble>().unwrap();
a = a.mul_add(
"3".parse::<DoubleDouble>().unwrap(),
"4".parse::<DoubleDouble>().unwrap(),
).value;
assert_eq!(
Some(Ordering::Equal),
"10".parse::<DoubleDouble>().unwrap().partial_cmp(&a)
);
}
#[test]
fn ppc_double_double_round_to_integral() {
{
let a = "1.5".parse::<DoubleDouble>().unwrap();
let a = a.round_to_integral(Round::NearestTiesToEven).value;
assert_eq!(
Some(Ordering::Equal),
"2".parse::<DoubleDouble>().unwrap().partial_cmp(&a)
);
}
{
let a = "2.5".parse::<DoubleDouble>().unwrap();
let a = a.round_to_integral(Round::NearestTiesToEven).value;
assert_eq!(
Some(Ordering::Equal),
"2".parse::<DoubleDouble>().unwrap().partial_cmp(&a)
);
}
}
#[test]
fn ppc_double_double_compare() {
let data = [
// (1 + 0) = (1 + 0)
(
0x3ff0000000000000,
0x3ff0000000000000,
Some(Ordering::Equal),
),
// (1 + 0) < (1.00...1 + 0)
(0x3ff0000000000000, 0x3ff0000000000001, Some(Ordering::Less)),
// (1.00...1 + 0) > (1 + 0)
(
0x3ff0000000000001,
0x3ff0000000000000,
Some(Ordering::Greater),
),
// (1 + 0) < (1 + epsilon)
(
0x3ff0000000000000,
0x0000000000000001_3ff0000000000001,
Some(Ordering::Less),
),
// NaN != NaN
(0x7ff8000000000000, 0x7ff8000000000000, None),
// (1 + 0) != NaN
(0x3ff0000000000000, 0x7ff8000000000000, None),
// Inf = Inf
(
0x7ff0000000000000,
0x7ff0000000000000,
Some(Ordering::Equal),
),
];
for &(op1, op2, expected) in &data {
let a1 = DoubleDouble::from_bits(op1);
let a2 = DoubleDouble::from_bits(op2);
assert_eq!(
expected,
a1.partial_cmp(&a2),
"compare({:#x}, {:#x})",
op1,
op2,
);
}
}
#[test]
fn ppc_double_double_bitwise_eq() {
let data = [
// (1 + 0) = (1 + 0)
(0x3ff0000000000000, 0x3ff0000000000000, true),
// (1 + 0) != (1.00...1 + 0)
(0x3ff0000000000000, 0x3ff0000000000001, false),
// NaN = NaN
(0x7ff8000000000000, 0x7ff8000000000000, true),
// NaN != NaN with a different bit pattern
(
0x7ff8000000000000,
0x3ff0000000000000_7ff8000000000000,
false,
),
// Inf = Inf
(0x7ff0000000000000, 0x7ff0000000000000, true),
];
for &(op1, op2, expected) in &data {
let a1 = DoubleDouble::from_bits(op1);
let a2 = DoubleDouble::from_bits(op2);
assert_eq!(expected, a1.bitwise_eq(a2), "{:#x} = {:#x}", op1, op2);
}
}
#[test]
fn ppc_double_double_change_sign() {
let float = DoubleDouble::from_bits(0xbcb0000000000000_400f000000000000);
{
let actual = float.copy_sign("1".parse::<DoubleDouble>().unwrap());
assert_eq!(0xbcb0000000000000_400f000000000000, actual.to_bits());
}
{
let actual = float.copy_sign("-1".parse::<DoubleDouble>().unwrap());
assert_eq!(0x3cb0000000000000_c00f000000000000, actual.to_bits());
}
}
#[test]
fn ppc_double_double_factories() {
assert_eq!(0, DoubleDouble::ZERO.to_bits());
assert_eq!(
0x7c8ffffffffffffe_7fefffffffffffff,
DoubleDouble::largest().to_bits()
);
assert_eq!(0x0000000000000001, DoubleDouble::SMALLEST.to_bits());
assert_eq!(
0x0360000000000000,
DoubleDouble::smallest_normalized().to_bits()
);
assert_eq!(
0x0000000000000000_8000000000000000,
(-DoubleDouble::ZERO).to_bits()
);
assert_eq!(
0xfc8ffffffffffffe_ffefffffffffffff,
(-DoubleDouble::largest()).to_bits()
);
assert_eq!(
0x0000000000000000_8000000000000001,
(-DoubleDouble::SMALLEST).to_bits()
);
assert_eq!(
0x0000000000000000_8360000000000000,
(-DoubleDouble::smallest_normalized()).to_bits()
);
assert!(DoubleDouble::SMALLEST.is_smallest());
assert!(DoubleDouble::largest().is_largest());
}
#[test]
fn ppc_double_double_is_denormal() {
assert!(DoubleDouble::SMALLEST.is_denormal());
assert!(!DoubleDouble::largest().is_denormal());
assert!(!DoubleDouble::smallest_normalized().is_denormal());
{
// (4 + 3) is not normalized
let data = 0x4008000000000000_4010000000000000;
assert!(DoubleDouble::from_bits(data).is_denormal());
}
}
#[test]
fn ppc_double_double_exact_inverse() {
assert!(
"2.0"
.parse::<DoubleDouble>()
.unwrap()
.get_exact_inverse()
.unwrap()
.bitwise_eq("0.5".parse::<DoubleDouble>().unwrap())
);
}
#[test]
fn ppc_double_double_scalbn() {
// 3.0 + 3.0 << 53
let input = 0x3cb8000000000000_4008000000000000;
let result = DoubleDouble::from_bits(input).scalbn(1);
// 6.0 + 6.0 << 53
assert_eq!(0x3cc8000000000000_4018000000000000, result.to_bits());
}
#[test]
fn ppc_double_double_frexp() {
// 3.0 + 3.0 << 53
let input = 0x3cb8000000000000_4008000000000000;
let mut exp = 0;
// 0.75 + 0.75 << 53
let result = DoubleDouble::from_bits(input).frexp(&mut exp);
assert_eq!(2, exp);
assert_eq!(0x3c98000000000000_3fe8000000000000, result.to_bits());
}
+32
View File
@@ -0,0 +1,32 @@
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use PanicStrategy;
use LinkerFlavor;
use target::{LinkArgs, TargetOptions};
use std::default::Default;
pub fn opts() -> TargetOptions {
let mut pre_link_args = LinkArgs::new();
pre_link_args.insert(LinkerFlavor::Ld, vec![
"-nostdlib".to_string(),
]);
TargetOptions {
executables: true,
has_elf_tls: false,
exe_allocation_crate: Some("alloc_system".to_string()),
panic_strategy: PanicStrategy::Abort,
linker: "ld".to_string(),
pre_link_args: pre_link_args,
target_family: Some("unix".to_string()),
.. Default::default()
}
}
+3
View File
@@ -69,6 +69,7 @@
mod windows_base;
mod windows_msvc_base;
mod thumb_base;
mod l4re_base;
mod fuchsia_base;
mod redox_base;
@@ -193,6 +194,8 @@ fn $module() {
("aarch64-unknown-fuchsia", aarch64_unknown_fuchsia),
("x86_64-unknown-fuchsia", x86_64_unknown_fuchsia),
("x86_64-unknown-l4re-uclibc", x86_64_unknown_l4re_uclibc),
("x86_64-unknown-redox", x86_64_unknown_redox),
("i386-apple-ios", i386_apple_ios),
@@ -0,0 +1,31 @@
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use LinkerFlavor;
use target::{Target, TargetResult};
pub fn target() -> TargetResult {
let mut base = super::l4re_base::opts();
base.cpu = "x86-64".to_string();
base.max_atomic_width = Some(64);
Ok(Target {
llvm_target: "x86_64-unknown-l4re-uclibc".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "64".to_string(),
data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(),
arch: "x86_64".to_string(),
target_os: "l4re".to_string(),
target_env: "uclibc".to_string(),
target_vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Ld,
options: base,
})
}
+43 -21
View File
@@ -111,19 +111,28 @@ fn borrowck<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, owner_def_id: DefId) {
// is not yet stolen.
tcx.mir_validated(owner_def_id).borrow();
let cfg = cfg::CFG::new(bccx.tcx, &body);
let AnalysisData { all_loans,
loans: loan_dfcx,
move_data: flowed_moves } =
build_borrowck_dataflow_data(bccx, &cfg, body_id);
check_loans::check_loans(bccx, &loan_dfcx, &flowed_moves, &all_loans, body);
// option dance because you can't capture an uninitialized variable
// by mut-ref.
let mut cfg = None;
if let Some(AnalysisData { all_loans,
loans: loan_dfcx,
move_data: flowed_moves }) =
build_borrowck_dataflow_data(bccx, false, body_id,
|bccx| {
cfg = Some(cfg::CFG::new(bccx.tcx, &body));
cfg.as_mut().unwrap()
})
{
check_loans::check_loans(bccx, &loan_dfcx, &flowed_moves, &all_loans, body);
}
}
fn build_borrowck_dataflow_data<'a, 'tcx>(this: &mut BorrowckCtxt<'a, 'tcx>,
cfg: &cfg::CFG,
body_id: hir::BodyId)
-> AnalysisData<'a, 'tcx>
fn build_borrowck_dataflow_data<'a, 'c, 'tcx, F>(this: &mut BorrowckCtxt<'a, 'tcx>,
force_analysis: bool,
body_id: hir::BodyId,
get_cfg: F)
-> Option<AnalysisData<'a, 'tcx>>
where F: FnOnce(&mut BorrowckCtxt<'a, 'tcx>) -> &'c cfg::CFG
{
// Check the body of fn items.
let tcx = this.tcx;
@@ -135,6 +144,18 @@ fn build_borrowck_dataflow_data<'a, 'tcx>(this: &mut BorrowckCtxt<'a, 'tcx>,
let (all_loans, move_data) =
gather_loans::gather_loans_in_fn(this, body_id);
if !force_analysis && move_data.is_empty() && all_loans.is_empty() {
// large arrays of data inserted as constants can take a lot of
// time and memory to borrow-check - see issue #36799. However,
// they don't have lvalues, so no borrow-check is actually needed.
// Recognize that case and skip borrow-checking.
debug!("skipping loan propagation for {:?} because of no loans", body_id);
return None;
} else {
debug!("propagating loans in {:?}", body_id);
}
let cfg = get_cfg(this);
let mut loan_dfcx =
DataFlowContext::new(this.tcx,
"borrowck",
@@ -157,9 +178,9 @@ fn build_borrowck_dataflow_data<'a, 'tcx>(this: &mut BorrowckCtxt<'a, 'tcx>,
id_range,
this.body);
AnalysisData { all_loans: all_loans,
loans: loan_dfcx,
move_data:flowed_moves }
Some(AnalysisData { all_loans: all_loans,
loans: loan_dfcx,
move_data:flowed_moves })
}
/// Accessor for introspective clients inspecting `AnalysisData` and
@@ -177,8 +198,8 @@ pub fn build_borrowck_dataflow_data_for_fn<'a, 'tcx>(
let body = tcx.hir.body(body_id);
let mut bccx = BorrowckCtxt { tcx, tables, region_maps, owner_def_id, body };
let dataflow_data = build_borrowck_dataflow_data(&mut bccx, cfg, body_id);
(bccx, dataflow_data)
let dataflow_data = build_borrowck_dataflow_data(&mut bccx, true, body_id, |_| cfg);
(bccx, dataflow_data.unwrap())
}
// ----------------------------------------------------------------------
@@ -1072,14 +1093,15 @@ fn suggest_mut_for_immutable(&self, pty: &hir::Ty, is_implicit_self: bool) -> Op
}
}
fn local_binding_mode(&self, node_id: ast::NodeId) -> hir::BindingMode {
fn local_binding_mode(&self, node_id: ast::NodeId) -> ty::BindingMode {
let pat = match self.tcx.hir.get(node_id) {
hir_map::Node::NodeLocal(pat) => pat,
node => bug!("bad node for local: {:?}", node)
};
match pat.node {
hir::PatKind::Binding(mode, ..) => mode,
hir::PatKind::Binding(..) =>
*self.tables.pat_binding_modes.get(&pat.id).expect("missing binding mode"),
_ => bug!("local is not a binding: {:?}", pat)
}
}
@@ -1114,7 +1136,7 @@ fn note_immutability_blame(&self,
Some(ImmutabilityBlame::ClosureEnv(_)) => {}
Some(ImmutabilityBlame::ImmLocal(node_id)) => {
let let_span = self.tcx.hir.span(node_id);
if let hir::BindingMode::BindByValue(..) = self.local_binding_mode(node_id) {
if let ty::BindByValue(..) = self.local_binding_mode(node_id) {
if let Ok(snippet) = self.tcx.sess.codemap().span_to_snippet(let_span) {
let (_, is_implicit_self) = self.local_ty(node_id);
if is_implicit_self && snippet != "self" {
@@ -1131,7 +1153,7 @@ fn note_immutability_blame(&self,
Some(ImmutabilityBlame::LocalDeref(node_id)) => {
let let_span = self.tcx.hir.span(node_id);
match self.local_binding_mode(node_id) {
hir::BindingMode::BindByRef(..) => {
ty::BindByReference(..) => {
let snippet = self.tcx.sess.codemap().span_to_snippet(let_span);
if let Ok(snippet) = snippet {
db.span_label(
@@ -1141,7 +1163,7 @@ fn note_immutability_blame(&self,
);
}
}
hir::BindingMode::BindByValue(..) => {
ty::BindByValue(..) => {
if let (Some(local_ty), is_implicit_self) = self.local_ty(node_id) {
if let Some(msg) =
self.suggest_mut_for_immutable(local_ty, is_implicit_self) {
@@ -220,6 +220,15 @@ pub fn new() -> MoveData<'tcx> {
}
}
/// return true if there are no trackable assignments or moves
/// in this move data - that means that there is nothing that
/// could cause a borrow error.
pub fn is_empty(&self) -> bool {
self.moves.borrow().is_empty() &&
self.path_assignments.borrow().is_empty() &&
self.var_assignments.borrow().is_empty()
}
pub fn path_loan_path(&self, index: MovePathIndex) -> Rc<LoanPath<'tcx>> {
(*self.paths.borrow())[index.get()].loan_path.clone()
}
+18 -1
View File
@@ -1132,6 +1132,24 @@ fn main() {
```
"##,
E0595: r##"
Closures cannot mutate immutable captured variables.
Erroneous code example:
```compile_fail,E0595
let x = 3; // error: closure cannot assign to immutable local variable `x`
let mut c = || { x += 1 };
```
Make the variable binding mutable:
```
let mut x = 3; // ok!
let mut c = || { x += 1 };
```
"##,
E0596: r##"
This error occurs because you tried to mutably borrow a non-mutable variable.
@@ -1275,6 +1293,5 @@ struct Foo<'a> {
// E0385, // {} in an aliasable location
E0524, // two closures require unique access to `..` at the same time
E0594, // cannot assign to {}
E0595, // closure cannot assign to {}
E0598, // lifetime of {} is too short to guarantee its contents can be...
}
+19 -7
View File
@@ -268,7 +268,12 @@ fn check_irrefutable(&self, pat: &Pat, origin: &str) {
fn check_for_bindings_named_the_same_as_variants(cx: &MatchVisitor, pat: &Pat) {
pat.walk(|p| {
if let PatKind::Binding(hir::BindByValue(hir::MutImmutable), _, name, None) = p.node {
if let PatKind::Binding(_, _, name, None) = p.node {
let bm = *cx.tables.pat_binding_modes.get(&p.id).expect("missing binding mode");
if bm != ty::BindByValue(hir::MutImmutable) {
// Nothing to check.
return true;
}
let pat_ty = cx.tables.pat_ty(p);
if let ty::TyAdt(edef, _) = pat_ty.sty {
if edef.is_enum() && edef.variants.iter().any(|variant| {
@@ -452,8 +457,9 @@ fn check_legality_of_move_bindings(cx: &MatchVisitor,
pats: &[P<Pat>]) {
let mut by_ref_span = None;
for pat in pats {
pat.each_binding(|bm, _, span, _path| {
if let hir::BindByRef(..) = bm {
pat.each_binding(|_, id, span, _path| {
let bm = *cx.tables.pat_binding_modes.get(&id).expect("missing binding mode");
if let ty::BindByReference(..) = bm {
by_ref_span = Some(span);
}
})
@@ -484,10 +490,16 @@ fn check_legality_of_move_bindings(cx: &MatchVisitor,
for pat in pats {
pat.walk(|p| {
if let PatKind::Binding(hir::BindByValue(..), _, _, ref sub) = p.node {
let pat_ty = cx.tables.node_id_to_type(p.id);
if pat_ty.moves_by_default(cx.tcx, cx.param_env, pat.span) {
check_move(p, sub.as_ref().map(|p| &**p));
if let PatKind::Binding(_, _, _, ref sub) = p.node {
let bm = *cx.tables.pat_binding_modes.get(&p.id).expect("missing binding mode");
match bm {
ty::BindByValue(..) => {
let pat_ty = cx.tables.node_id_to_type(p.id);
if pat_ty.moves_by_default(cx.tcx, cx.param_env, pat.span) {
check_move(p, sub.as_ref().map(|p| &**p));
}
}
_ => {}
}
}
true
+27 -28
View File
@@ -26,6 +26,7 @@
use syntax::abi::Abi;
use syntax::ast;
use syntax::attr;
use rustc::hir::{self, Expr};
use syntax_pos::Span;
@@ -560,8 +561,15 @@ fn cast_const_int<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
ty::TyUint(ast::UintTy::Us) => {
Ok(Integral(Usize(ConstUsize::new_truncating(v, tcx.sess.target.uint_type))))
},
ty::TyFloat(ast::FloatTy::F64) => Ok(Float(F64(val.to_f64()))),
ty::TyFloat(ast::FloatTy::F32) => Ok(Float(F32(val.to_f32()))),
ty::TyFloat(fty) => {
if let Some(i) = val.to_u128() {
Ok(Float(ConstFloat::from_u128(i, fty)))
} else {
// The value must be negative, go through signed integers.
let i = val.to_u128_unchecked() as i128;
Ok(Float(ConstFloat::from_i128(i, fty)))
}
}
ty::TyRawPtr(_) => Err(ErrKind::UnimplementedConstVal("casting an address to a raw ptr")),
ty::TyChar => match val {
U8(u) => Ok(Char(u as char)),
@@ -574,30 +582,25 @@ fn cast_const_int<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
fn cast_const_float<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
val: ConstFloat,
ty: Ty<'tcx>) -> CastResult<'tcx> {
let int_width = |ty| {
ty::layout::Integer::from_attr(tcx, ty).size().bits() as usize
};
match ty.sty {
ty::TyInt(_) | ty::TyUint(_) => {
let i = match val {
F32(f) if f >= 0.0 => U128(f as u128),
F64(f) if f >= 0.0 => U128(f as u128),
F32(f) => I128(f as i128),
F64(f) => I128(f as i128)
};
if let (I128(_), &ty::TyUint(_)) = (i, &ty.sty) {
return Err(CannotCast);
ty::TyInt(ity) => {
if let Some(i) = val.to_i128(int_width(attr::SignedInt(ity))) {
cast_const_int(tcx, I128(i), ty)
} else {
Err(CannotCast)
}
cast_const_int(tcx, i, ty)
}
ty::TyFloat(ast::FloatTy::F64) => Ok(Float(F64(match val {
F32(f) => f as f64,
F64(f) => f
}))),
ty::TyFloat(ast::FloatTy::F32) => Ok(Float(F32(match val {
F64(f) => f as f32,
F32(f) => f
}))),
ty::TyUint(uty) => {
if let Some(i) = val.to_u128(int_width(attr::UnsignedInt(uty))) {
cast_const_int(tcx, U128(i), ty)
} else {
Err(CannotCast)
}
}
ty::TyFloat(fty) => Ok(Float(val.convert(fty))),
_ => Err(CannotCast),
}
}
@@ -691,11 +694,7 @@ fn lit_to_const<'a, 'tcx>(lit: &ast::LitKind,
fn parse_float<'tcx>(num: &str, fty: ast::FloatTy)
-> Result<ConstFloat, ErrKind<'tcx>> {
let val = match fty {
ast::FloatTy::F32 => num.parse::<f32>().map(F32),
ast::FloatTy::F64 => num.parse::<f64>().map(F64)
};
val.map_err(|_| {
ConstFloat::from_str(num, fty).map_err(|_| {
// FIXME(#31407) this is only necessary because float parsing is buggy
UnimplementedConstVal("could not evaluate float literal (see issue #31407)")
})
+12 -8
View File
@@ -374,27 +374,31 @@ pub fn lower_pattern(&mut self, pat: &hir::Pat) -> Pattern<'tcx> {
}
}
PatKind::Binding(bm, def_id, ref ident, ref sub) => {
PatKind::Binding(_, def_id, ref ident, ref sub) => {
let id = self.tcx.hir.as_local_node_id(def_id).unwrap();
let var_ty = self.tables.node_id_to_type(pat.id);
let region = match var_ty.sty {
ty::TyRef(r, _) => Some(r),
_ => None,
};
let bm = *self.tables.pat_binding_modes.get(&pat.id)
.expect("missing binding mode");
let (mutability, mode) = match bm {
hir::BindByValue(hir::MutMutable) =>
ty::BindByValue(hir::MutMutable) =>
(Mutability::Mut, BindingMode::ByValue),
hir::BindByValue(hir::MutImmutable) =>
ty::BindByValue(hir::MutImmutable) =>
(Mutability::Not, BindingMode::ByValue),
hir::BindByRef(hir::MutMutable) =>
(Mutability::Not, BindingMode::ByRef(region.unwrap(), BorrowKind::Mut)),
hir::BindByRef(hir::MutImmutable) =>
(Mutability::Not, BindingMode::ByRef(region.unwrap(), BorrowKind::Shared)),
ty::BindByReference(hir::MutMutable) =>
(Mutability::Not, BindingMode::ByRef(
region.unwrap(), BorrowKind::Mut)),
ty::BindByReference(hir::MutImmutable) =>
(Mutability::Not, BindingMode::ByRef(
region.unwrap(), BorrowKind::Shared)),
};
// A ref x pattern is the same node used for x, and as such it has
// x's type, which is &T, where we want T (the type being matched).
if let hir::BindByRef(_) = bm {
if let ty::BindByReference(_) = bm {
if let ty::TyRef(_, mt) = ty.sty {
ty = mt.ty;
} else {
+1
View File
@@ -9,5 +9,6 @@ path = "lib.rs"
crate-type = ["dylib"]
[dependencies]
rustc_apfloat = { path = "../librustc_apfloat" }
serialize = { path = "../libserialize" }
syntax = { path = "../libsyntax" }
+140 -68
View File
@@ -9,102 +9,164 @@
// except according to those terms.
use std::cmp::Ordering;
use std::hash;
use std::mem::transmute;
use std::num::ParseFloatError;
use syntax::ast;
use rustc_apfloat::{Float, FloatConvert, Status};
use rustc_apfloat::ieee::{Single, Double};
use super::err::*;
#[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)]
pub enum ConstFloat {
F32(f32),
F64(f64)
// Note that equality for `ConstFloat` means that the it is the same
// constant, not that the rust values are equal. In particular, `NaN
// == NaN` (at least if it's the same NaN; distinct encodings for NaN
// are considering unequal).
#[derive(Copy, Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)]
pub struct ConstFloat {
pub ty: ast::FloatTy,
// This is a bit inefficient but it makes conversions below more
// ergonomic, and all of this will go away once `miri` is merged.
pub bits: u128,
}
pub use self::ConstFloat::*;
impl ConstFloat {
/// Description of the type, not the value
pub fn description(&self) -> &'static str {
match *self {
F32(_) => "f32",
F64(_) => "f64",
}
self.ty.ty_to_string()
}
pub fn is_nan(&self) -> bool {
match *self {
F32(f) => f.is_nan(),
F64(f) => f.is_nan(),
match self.ty {
ast::FloatTy::F32 => Single::from_bits(self.bits).is_nan(),
ast::FloatTy::F64 => Double::from_bits(self.bits).is_nan(),
}
}
/// Compares the values if they are of the same type
pub fn try_cmp(self, rhs: Self) -> Result<Ordering, ConstMathErr> {
match (self, rhs) {
(F64(a), F64(b)) => {
match (self.ty, rhs.ty) {
(ast::FloatTy::F64, ast::FloatTy::F64) => {
let a = Double::from_bits(self.bits);
let b = Double::from_bits(rhs.bits);
// This is pretty bad but it is the existing behavior.
Ok(if a == b {
Ordering::Equal
} else if a < b {
Ordering::Less
} else {
Ordering::Greater
})
Ok(a.partial_cmp(&b).unwrap_or(Ordering::Greater))
}
(F32(a), F32(b)) => {
Ok(if a == b {
Ordering::Equal
} else if a < b {
Ordering::Less
} else {
Ordering::Greater
})
(ast::FloatTy::F32, ast::FloatTy::F32) => {
let a = Single::from_bits(self.bits);
let b = Single::from_bits(rhs.bits);
Ok(a.partial_cmp(&b).unwrap_or(Ordering::Greater))
}
_ => Err(CmpBetweenUnequalTypes),
}
}
}
/// Note that equality for `ConstFloat` means that the it is the same
/// constant, not that the rust values are equal. In particular, `NaN
/// == NaN` (at least if it's the same NaN; distinct encodings for NaN
/// are considering unequal).
impl PartialEq for ConstFloat {
fn eq(&self, other: &Self) -> bool {
match (*self, *other) {
(F64(a), F64(b)) => {
unsafe{transmute::<_,u64>(a) == transmute::<_,u64>(b)}
pub fn from_i128(input: i128, ty: ast::FloatTy) -> Self {
let bits = match ty {
ast::FloatTy::F32 => Single::from_i128(input).value.to_bits(),
ast::FloatTy::F64 => Double::from_i128(input).value.to_bits()
};
ConstFloat { bits, ty }
}
pub fn from_u128(input: u128, ty: ast::FloatTy) -> Self {
let bits = match ty {
ast::FloatTy::F32 => Single::from_u128(input).value.to_bits(),
ast::FloatTy::F64 => Double::from_u128(input).value.to_bits()
};
ConstFloat { bits, ty }
}
pub fn from_str(num: &str, ty: ast::FloatTy) -> Result<Self, ParseFloatError> {
let bits = match ty {
ast::FloatTy::F32 => {
let rust_bits = num.parse::<f32>()?.to_bits() as u128;
let apfloat = num.parse::<Single>().unwrap_or_else(|e| {
panic!("apfloat::ieee::Single failed to parse `{}`: {:?}", num, e);
});
let apfloat_bits = apfloat.to_bits();
assert!(rust_bits == apfloat_bits,
"apfloat::ieee::Single gave different result for `{}`: \
{}({:#x}) vs Rust's {}({:#x})",
num, apfloat, apfloat_bits,
Single::from_bits(rust_bits), rust_bits);
apfloat_bits
}
(F32(a), F32(b)) => {
unsafe{transmute::<_,u32>(a) == transmute::<_,u32>(b)}
ast::FloatTy::F64 => {
let rust_bits = num.parse::<f64>()?.to_bits() as u128;
let apfloat = num.parse::<Double>().unwrap_or_else(|e| {
panic!("apfloat::ieee::Double failed to parse `{}`: {:?}", num, e);
});
let apfloat_bits = apfloat.to_bits();
assert!(rust_bits == apfloat_bits,
"apfloat::ieee::Double gave different result for `{}`: \
{}({:#x}) vs Rust's {}({:#x})",
num, apfloat, apfloat_bits,
Double::from_bits(rust_bits), rust_bits);
apfloat_bits
}
_ => false
};
Ok(ConstFloat { bits, ty })
}
pub fn to_i128(self, width: usize) -> Option<i128> {
assert!(width <= 128);
let r = match self.ty {
ast::FloatTy::F32 => Single::from_bits(self.bits).to_i128(width),
ast::FloatTy::F64 => Double::from_bits(self.bits).to_i128(width)
};
if r.status.intersects(Status::INVALID_OP) {
None
} else {
Some(r.value)
}
}
}
impl Eq for ConstFloat {}
impl hash::Hash for ConstFloat {
fn hash<H: hash::Hasher>(&self, state: &mut H) {
match *self {
F64(a) => {
unsafe { transmute::<_,u64>(a) }.hash(state)
}
F32(a) => {
unsafe { transmute::<_,u32>(a) }.hash(state)
}
pub fn to_u128(self, width: usize) -> Option<u128> {
assert!(width <= 128);
let r = match self.ty {
ast::FloatTy::F32 => Single::from_bits(self.bits).to_u128(width),
ast::FloatTy::F64 => Double::from_bits(self.bits).to_u128(width)
};
if r.status.intersects(Status::INVALID_OP) {
None
} else {
Some(r.value)
}
}
pub fn convert(self, to: ast::FloatTy) -> Self {
let bits = match (self.ty, to) {
(ast::FloatTy::F32, ast::FloatTy::F32) |
(ast::FloatTy::F64, ast::FloatTy::F64) => return self,
(ast::FloatTy::F32, ast::FloatTy::F64) => {
Double::to_bits(Single::from_bits(self.bits).convert(&mut false).value)
}
(ast::FloatTy::F64, ast::FloatTy::F32) => {
Single::to_bits(Double::from_bits(self.bits).convert(&mut false).value)
}
};
ConstFloat { bits, ty: to }
}
}
impl ::std::fmt::Display for ConstFloat {
fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> {
match *self {
F32(f) => write!(fmt, "{}f32", f),
F64(f) => write!(fmt, "{}f64", f),
match self.ty {
ast::FloatTy::F32 => write!(fmt, "{:#}", Single::from_bits(self.bits))?,
ast::FloatTy::F64 => write!(fmt, "{:#}", Double::from_bits(self.bits))?,
}
write!(fmt, "{}", self.ty)
}
}
impl ::std::fmt::Debug for ConstFloat {
fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> {
::std::fmt::Display::fmt(self, fmt)
}
}
@@ -113,11 +175,20 @@ macro_rules! derive_binop {
impl ::std::ops::$op for ConstFloat {
type Output = Result<Self, ConstMathErr>;
fn $func(self, rhs: Self) -> Result<Self, ConstMathErr> {
match (self, rhs) {
(F32(a), F32(b)) => Ok(F32(a.$func(b))),
(F64(a), F64(b)) => Ok(F64(a.$func(b))),
_ => Err(UnequalTypes(Op::$op)),
}
let bits = match (self.ty, rhs.ty) {
(ast::FloatTy::F32, ast::FloatTy::F32) =>{
let a = Single::from_bits(self.bits);
let b = Single::from_bits(rhs.bits);
a.$func(b).value.to_bits()
}
(ast::FloatTy::F64, ast::FloatTy::F64) => {
let a = Double::from_bits(self.bits);
let b = Double::from_bits(rhs.bits);
a.$func(b).value.to_bits()
}
_ => return Err(UnequalTypes(Op::$op)),
};
Ok(ConstFloat { bits, ty: self.ty })
}
}
}
@@ -132,9 +203,10 @@ fn $func(self, rhs: Self) -> Result<Self, ConstMathErr> {
impl ::std::ops::Neg for ConstFloat {
type Output = Self;
fn neg(self) -> Self {
match self {
F32(f) => F32(-f),
F64(f) => F64(-f),
}
let bits = match self.ty {
ast::FloatTy::F32 => (-Single::from_bits(self.bits)).to_bits(),
ast::FloatTy::F64 => (-Double::from_bits(self.bits)).to_bits(),
};
ConstFloat { bits, ty: self.ty }
}
}
-42
View File
@@ -211,48 +211,6 @@ pub fn to_u128(&self) -> Option<u128> {
}
}
pub fn to_f32(self) -> f32 {
match self {
I8(i) => i as f32,
I16(i) => i as f32,
I32(i) => i as f32,
I64(i) => i as f32,
I128(i) => i as f32,
Isize(Is16(i)) => i as f32,
Isize(Is32(i)) => i as f32,
Isize(Is64(i)) => i as f32,
U8(i) => i as f32,
U16(i) => i as f32,
U32(i) => i as f32,
U64(i) => i as f32,
U128(i) => i as f32,
Usize(Us16(i)) => i as f32,
Usize(Us32(i)) => i as f32,
Usize(Us64(i)) => i as f32,
}
}
pub fn to_f64(self) -> f64 {
match self {
I8(i) => i as f64,
I16(i) => i as f64,
I32(i) => i as f64,
I64(i) => i as f64,
I128(i) => i as f64,
Isize(Is16(i)) => i as f64,
Isize(Is32(i)) => i as f64,
Isize(Is64(i)) => i as f64,
U8(i) => i as f64,
U16(i) => i as f64,
U32(i) => i as f64,
U64(i) => i as f64,
U128(i) => i as f64,
Usize(Us16(i)) => i as f64,
Usize(Us32(i)) => i as f64,
Usize(Us64(i)) => i as f64,
}
}
pub fn is_negative(&self) -> bool {
match *self {
I8(v) => v < 0,
+2
View File
@@ -26,6 +26,8 @@
#![feature(i128)]
#![feature(i128_type)]
extern crate rustc_apfloat;
extern crate syntax;
extern crate serialize as rustc_serialize; // used by deriving
+2
View File
@@ -134,9 +134,11 @@ pub trait BitwiseOperator {
pub struct Union;
impl BitwiseOperator for Union {
#[inline]
fn join(&self, a: usize, b: usize) -> usize { a | b }
}
pub struct Subtract;
impl BitwiseOperator for Subtract {
#[inline]
fn join(&self, a: usize, b: usize) -> usize { a & !b }
}
-66
View File
@@ -1,66 +0,0 @@
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::collections::{HashMap, HashSet};
use std::default::Default;
use std::hash::{Hasher, Hash, BuildHasherDefault};
pub type FnvHashMap<K, V> = HashMap<K, V, BuildHasherDefault<FnvHasher>>;
pub type FnvHashSet<V> = HashSet<V, BuildHasherDefault<FnvHasher>>;
#[allow(non_snake_case)]
pub fn FnvHashMap<K: Hash + Eq, V>() -> FnvHashMap<K, V> {
HashMap::default()
}
#[allow(non_snake_case)]
pub fn FnvHashSet<V: Hash + Eq>() -> FnvHashSet<V> {
HashSet::default()
}
/// A speedy hash algorithm for node ids and def ids. The hashmap in
/// liballoc by default uses SipHash which isn't quite as speedy as we
/// want. In the compiler we're not really worried about DOS attempts, so we
/// just default to a non-cryptographic hash.
///
/// This uses FNV hashing, as described here:
/// http://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function
pub struct FnvHasher(u64);
impl Default for FnvHasher {
/// Creates a `FnvHasher`, with a 64-bit hex initial value.
#[inline]
fn default() -> FnvHasher {
FnvHasher(0xcbf29ce484222325)
}
}
impl Hasher for FnvHasher {
#[inline]
fn write(&mut self, bytes: &[u8]) {
let FnvHasher(mut hash) = *self;
for byte in bytes {
hash = hash ^ (*byte as u64);
hash = hash.wrapping_mul(0x100000001b3);
}
*self = FnvHasher(hash);
}
#[inline]
fn finish(&self) -> u64 {
self.0
}
}
pub fn hash<T: Hash>(v: &T) -> u64 {
let mut state = FnvHasher::default();
v.hash(&mut state);
state.finish()
}
+36
View File
@@ -308,6 +308,42 @@ pub fn depth_traverse<'a>(&'a self,
DepthFirstTraversal::with_start_node(self, start, direction)
}
pub fn nodes_in_postorder<'a>(&'a self,
direction: Direction,
entry_node: NodeIndex)
-> Vec<NodeIndex>
{
let mut visited = BitVector::new(self.len_nodes());
let mut stack = vec![];
let mut result = Vec::with_capacity(self.len_nodes());
let mut push_node = |stack: &mut Vec<_>, node: NodeIndex| {
if visited.insert(node.0) {
stack.push((node, self.adjacent_edges(node, direction)));
}
};
for node in Some(entry_node).into_iter()
.chain(self.enumerated_nodes().map(|(node, _)| node))
{
push_node(&mut stack, node);
while let Some((node, mut iter)) = stack.pop() {
if let Some((_, child)) = iter.next() {
let target = child.source_or_target(direction);
// the current node needs more processing, so
// add it back to the stack
stack.push((node, iter));
// and then push the new node
push_node(&mut stack, target);
} else {
result.push(node);
}
}
}
assert_eq!(result.len(), self.len_nodes());
result
}
/// Whether or not a node can be reached from itself.
pub fn is_node_cyclic(&self, starting_node_index: NodeIndex) -> bool {
// This is similar to depth traversal below, but we
@@ -175,3 +175,46 @@ fn is_node_cyclic_b() {
let graph = create_graph_with_cycle();
assert!(graph.is_node_cyclic(NodeIndex(1)));
}
#[test]
fn nodes_in_postorder() {
let expected = vec![
("A", vec!["C", "E", "D", "B", "A", "F"]),
("B", vec!["C", "E", "D", "B", "A", "F"]),
("C", vec!["C", "E", "D", "B", "A", "F"]),
("D", vec!["C", "E", "D", "B", "A", "F"]),
("E", vec!["C", "E", "D", "B", "A", "F"]),
("F", vec!["C", "E", "D", "B", "F", "A"])
];
let graph = create_graph();
for ((idx, node), &(node_name, ref expected))
in graph.enumerated_nodes().zip(&expected)
{
assert_eq!(node.data, node_name);
assert_eq!(expected,
&graph.nodes_in_postorder(OUTGOING, idx)
.into_iter().map(|idx| *graph.node_data(idx))
.collect::<Vec<&str>>());
}
let expected = vec![
("A", vec!["D", "C", "B", "A"]),
("B", vec!["D", "C", "B", "A"]),
("C", vec!["B", "D", "C", "A"]),
("D", vec!["C", "B", "D", "A"]),
];
let graph = create_graph_with_cycle();
for ((idx, node), &(node_name, ref expected))
in graph.enumerated_nodes().zip(&expected)
{
assert_eq!(node.data, node_name);
assert_eq!(expected,
&graph.nodes_in_postorder(OUTGOING, idx)
.into_iter().map(|idx| *graph.node_data(idx))
.collect::<Vec<&str>>());
}
}
-1
View File
@@ -65,7 +65,6 @@
pub mod stable_hasher;
pub mod transitive_relation;
pub mod unify;
pub mod fnv;
pub mod fx;
pub mod tuple_slice;
pub mod veccell;
+25 -58
View File
@@ -15,8 +15,7 @@
use rustc_mir as mir;
use rustc::session::{Session, CompileResult};
use rustc::session::CompileIncomplete;
use rustc::session::config::{self, Input, OutputFilenames, OutputType,
OutputTypes};
use rustc::session::config::{self, Input, OutputFilenames, OutputType};
use rustc::session::search_paths::PathKind;
use rustc::lint;
use rustc::middle::{self, dependency_format, stability, reachable};
@@ -26,7 +25,6 @@
use rustc::traits;
use rustc::util::common::{ErrorReported, time};
use rustc::util::nodemap::NodeSet;
use rustc::util::fs::rename_or_copy_remove;
use rustc_allocator as allocator;
use rustc_borrowck as borrowck;
use rustc_incremental::{self, IncrementalHashesMap};
@@ -208,7 +206,7 @@ macro_rules! controller_entry_point {
println!("Pre-trans");
tcx.print_debug_stats();
}
let trans = phase_4_translate_to_llvm(tcx, analysis, &incremental_hashes_map,
let trans = phase_4_translate_to_llvm(tcx, analysis, incremental_hashes_map,
&outputs);
if log_enabled!(::log::LogLevel::Info) {
@@ -231,7 +229,7 @@ macro_rules! controller_entry_point {
sess.code_stats.borrow().print_type_sizes();
}
let phase5_result = phase_5_run_llvm_passes(sess, &trans, &outputs);
let (phase5_result, trans) = phase_5_run_llvm_passes(sess, trans);
controller_entry_point!(after_llvm,
sess,
@@ -239,8 +237,6 @@ macro_rules! controller_entry_point {
phase5_result);
phase5_result?;
write::cleanup_llvm(&trans);
phase_6_link_output(sess, &trans, &outputs);
// Now that we won't touch anything in the incremental compilation directory
@@ -933,6 +929,8 @@ macro_rules! try_with_f {
passes.push_pass(MIR_CONST, mir::transform::type_check::TypeckMir);
passes.push_pass(MIR_CONST, mir::transform::rustc_peek::SanityCheck);
// We compute "constant qualifications" betwen MIR_CONST and MIR_VALIDATED.
// What we need to run borrowck etc.
passes.push_pass(MIR_VALIDATED, mir::transform::qualify_consts::QualifyAndPromoteConstants);
passes.push_pass(MIR_VALIDATED,
@@ -940,18 +938,23 @@ macro_rules! try_with_f {
passes.push_pass(MIR_VALIDATED, mir::transform::simplify::SimplifyCfg::new("qualify-consts"));
passes.push_pass(MIR_VALIDATED, mir::transform::nll::NLL);
// Optimizations begin.
passes.push_pass(MIR_OPTIMIZED, mir::transform::no_landing_pads::NoLandingPads);
passes.push_pass(MIR_OPTIMIZED, mir::transform::simplify::SimplifyCfg::new("no-landing-pads"));
// borrowck runs between MIR_VALIDATED and MIR_OPTIMIZED.
// From here on out, regions are gone.
passes.push_pass(MIR_OPTIMIZED, mir::transform::erase_regions::EraseRegions);
// These next passes must be executed together
passes.push_pass(MIR_OPTIMIZED, mir::transform::no_landing_pads::NoLandingPads);
passes.push_pass(MIR_OPTIMIZED, mir::transform::add_call_guards::AddCallGuards);
passes.push_pass(MIR_OPTIMIZED, mir::transform::elaborate_drops::ElaborateDrops);
passes.push_pass(MIR_OPTIMIZED, mir::transform::no_landing_pads::NoLandingPads);
passes.push_pass(MIR_OPTIMIZED, mir::transform::simplify::SimplifyCfg::new("elaborate-drops"));
// No lifetime analysis based on borrowing can be done from here on out.
// AddValidation needs to run after ElaborateDrops and before EraseRegions.
passes.push_pass(MIR_OPTIMIZED, mir::transform::add_validation::AddValidation);
// From here on out, regions are gone.
passes.push_pass(MIR_OPTIMIZED, mir::transform::erase_regions::EraseRegions);
// Optimizations begin.
passes.push_pass(MIR_OPTIMIZED, mir::transform::inline::Inline);
passes.push_pass(MIR_OPTIMIZED, mir::transform::instcombine::InstCombine);
passes.push_pass(MIR_OPTIMIZED, mir::transform::deaggregator::Deaggregator);
@@ -1059,9 +1062,9 @@ macro_rules! try_with_f {
/// be discarded.
pub fn phase_4_translate_to_llvm<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
analysis: ty::CrateAnalysis,
incremental_hashes_map: &IncrementalHashesMap,
incremental_hashes_map: IncrementalHashesMap,
output_filenames: &OutputFilenames)
-> trans::CrateTranslation {
-> write::OngoingCrateTranslation {
let time_passes = tcx.sess.time_passes();
time(time_passes,
@@ -1071,63 +1074,27 @@ pub fn phase_4_translate_to_llvm<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
let translation =
time(time_passes,
"translation",
move || trans::trans_crate(tcx, analysis, &incremental_hashes_map, output_filenames));
move || trans::trans_crate(tcx, analysis, incremental_hashes_map, output_filenames));
time(time_passes,
"assert dep graph",
|| rustc_incremental::assert_dep_graph(tcx));
time(time_passes,
"serialize dep graph",
|| rustc_incremental::save_dep_graph(tcx,
&incremental_hashes_map,
&translation.metadata.hashes,
translation.link.crate_hash));
translation
}
/// Run LLVM itself, producing a bitcode file, assembly file or object file
/// as a side effect.
pub fn phase_5_run_llvm_passes(sess: &Session,
trans: &trans::CrateTranslation,
outputs: &OutputFilenames) -> CompileResult {
if sess.opts.cg.no_integrated_as ||
(sess.target.target.options.no_integrated_as &&
(outputs.outputs.contains_key(&OutputType::Object) ||
outputs.outputs.contains_key(&OutputType::Exe)))
{
let output_types = OutputTypes::new(&[(OutputType::Assembly, None)]);
time(sess.time_passes(),
"LLVM passes",
|| write::run_passes(sess, trans, &output_types, outputs));
trans: write::OngoingCrateTranslation)
-> (CompileResult, trans::CrateTranslation) {
let trans = trans.join(sess);
write::run_assembler(sess, outputs);
// HACK the linker expects the object file to be named foo.0.o but
// `run_assembler` produces an object named just foo.o. Rename it if we
// are going to build an executable
if sess.opts.output_types.contains_key(&OutputType::Exe) {
let f = outputs.path(OutputType::Object);
rename_or_copy_remove(&f,
f.with_file_name(format!("{}.0.o",
f.file_stem().unwrap().to_string_lossy()))).unwrap();
}
// Remove assembly source, unless --save-temps was specified
if !sess.opts.cg.save_temps {
fs::remove_file(&outputs.temp_path(OutputType::Assembly, None)).unwrap();
}
} else {
time(sess.time_passes(),
"LLVM passes",
|| write::run_passes(sess, trans, &sess.opts.output_types, outputs));
if sess.opts.debugging_opts.incremental_info {
write::dump_incremental_data(&trans);
}
time(sess.time_passes(),
"serialize work products",
move || rustc_incremental::save_work_products(sess));
sess.compile_status()
(sess.compile_status(), trans)
}
/// Run the linker on any artifacts that resulted from the LLVM run.
+13 -5
View File
@@ -795,7 +795,12 @@ fn usage(verbose: bool, include_unstable_options: bool) {
(option.apply)(&mut options);
}
let message = format!("Usage: rustc [OPTIONS] INPUT");
let extra_help = if verbose {
let nightly_help = if nightly_options::is_nightly_build() {
"\n -Z help Print internal options for debugging rustc"
} else {
""
};
let verbose_help = if verbose {
""
} else {
"\n --help -v Print the full set of options rustc accepts"
@@ -803,11 +808,10 @@ fn usage(verbose: bool, include_unstable_options: bool) {
println!("{}\nAdditional help:
-C help Print codegen options
-W help \
Print 'lint' options and default settings
-Z help Print internal \
options for debugging rustc{}\n",
Print 'lint' options and default settings{}{}\n",
options.usage(&message),
extra_help);
nightly_help,
verbose_help);
}
fn describe_lints(lint_store: &lint::LintStore, loaded_plugins: bool) {
@@ -1203,6 +1207,10 @@ pub fn diagnostics_registry() -> errors::registry::Registry {
all_errors.extend_from_slice(&rustc_trans::DIAGNOSTICS);
all_errors.extend_from_slice(&rustc_const_eval::DIAGNOSTICS);
all_errors.extend_from_slice(&rustc_metadata::DIAGNOSTICS);
all_errors.extend_from_slice(&rustc_passes::DIAGNOSTICS);
all_errors.extend_from_slice(&rustc_plugin::DIAGNOSTICS);
all_errors.extend_from_slice(&rustc_mir::DIAGNOSTICS);
all_errors.extend_from_slice(&syntax::DIAGNOSTICS);
Registry::new(&all_errors)
}
+6 -6
View File
@@ -82,26 +82,27 @@ pub fn emit(&mut self) {
return;
}
match self.level {
let is_error = match self.level {
Level::Bug |
Level::Fatal |
Level::PhaseFatal |
Level::Error => {
self.handler.bump_err_count();
true
}
Level::Warning |
Level::Note |
Level::Help |
Level::Cancelled => {
false
}
}
};
self.handler.emitter.borrow_mut().emit(&self);
self.cancel();
if self.level == Level::Error {
self.handler.panic_if_treat_err_as_bug();
if is_error {
self.handler.bump_err_count();
}
// if self.is_fatal() {
@@ -210,4 +211,3 @@ fn drop(&mut self) {
}
}
}
+1 -4
View File
@@ -399,7 +399,6 @@ fn panic_if_treat_err_as_bug(&self) {
pub fn span_fatal<S: Into<MultiSpan>>(&self, sp: S, msg: &str) -> FatalError {
self.emit(&sp.into(), msg, Fatal);
self.panic_if_treat_err_as_bug();
FatalError
}
pub fn span_fatal_with_code<S: Into<MultiSpan>>(&self,
@@ -408,12 +407,10 @@ pub fn span_fatal_with_code<S: Into<MultiSpan>>(&self,
code: &str)
-> FatalError {
self.emit_with_code(&sp.into(), msg, code, Fatal);
self.panic_if_treat_err_as_bug();
FatalError
}
pub fn span_err<S: Into<MultiSpan>>(&self, sp: S, msg: &str) {
self.emit(&sp.into(), msg, Error);
self.panic_if_treat_err_as_bug();
}
pub fn mut_span_err<'a, S: Into<MultiSpan>>(&'a self,
sp: S,
@@ -425,7 +422,6 @@ pub fn mut_span_err<'a, S: Into<MultiSpan>>(&'a self,
}
pub fn span_err_with_code<S: Into<MultiSpan>>(&self, sp: S, msg: &str, code: &str) {
self.emit_with_code(&sp.into(), msg, code, Error);
self.panic_if_treat_err_as_bug();
}
pub fn span_warn<S: Into<MultiSpan>>(&self, sp: S, msg: &str) {
self.emit(&sp.into(), msg, Warning);
@@ -494,6 +490,7 @@ pub fn unimpl(&self, msg: &str) -> ! {
}
pub fn bump_err_count(&self) {
self.panic_if_treat_err_as_bug();
self.err_count.set(self.err_count.get() + 1);
}
+2 -2
View File
@@ -34,7 +34,7 @@
use super::work_product;
pub fn save_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
incremental_hashes_map: &IncrementalHashesMap,
incremental_hashes_map: IncrementalHashesMap,
metadata_hashes: &EncodedMetadataHashes,
svh: Svh) {
debug!("save_dep_graph()");
@@ -51,7 +51,7 @@ pub fn save_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
eprintln!("incremental: {} edges in dep-graph", query.graph.len_edges());
}
let mut hcx = HashContext::new(tcx, incremental_hashes_map);
let mut hcx = HashContext::new(tcx, &incremental_hashes_map);
let preds = Predecessors::new(&query, &mut hcx);
let mut current_metadata_hashes = FxHashMap();
+40
View File
@@ -722,6 +722,46 @@ fn check_pat(&mut self, cx: &EarlyContext, pat: &ast::Pat) {
}
}
declare_lint! {
pub UNUSED_DOC_COMMENT,
Warn,
"detects doc comments that aren't used by rustdoc"
}
#[derive(Copy, Clone)]
pub struct UnusedDocComment;
impl LintPass for UnusedDocComment {
fn get_lints(&self) -> LintArray {
lint_array![UNUSED_DOC_COMMENT]
}
}
impl UnusedDocComment {
fn warn_if_doc<'a, 'tcx,
I: Iterator<Item=&'a ast::Attribute>,
C: LintContext<'tcx>>(&self, mut attrs: I, cx: &C) {
if let Some(attr) = attrs.find(|a| a.is_value_str() && a.check_name("doc")) {
cx.struct_span_lint(UNUSED_DOC_COMMENT, attr.span, "doc comment not used by rustdoc")
.emit();
}
}
}
impl EarlyLintPass for UnusedDocComment {
fn check_local(&mut self, cx: &EarlyContext, decl: &ast::Local) {
self.warn_if_doc(decl.attrs.iter(), cx);
}
fn check_arm(&mut self, cx: &EarlyContext, arm: &ast::Arm) {
self.warn_if_doc(arm.attrs.iter(), cx);
}
fn check_expr(&mut self, cx: &EarlyContext, expr: &ast::Expr) {
self.warn_if_doc(expr.attrs.iter(), cx);
}
}
declare_lint! {
pub UNCONDITIONAL_RECURSION,
Warn,
+1
View File
@@ -111,6 +111,7 @@ macro_rules! add_lint_group {
UnusedImportBraces,
AnonymousParameters,
IllegalFloatLiteralPattern,
UnusedDocComment,
);
add_early_builtin_with_new!(sess,
+6 -2
View File
@@ -44,9 +44,13 @@ fn check_unused_mut_pat(&self, cx: &LateContext, pats: &[P<hir::Pat>]) {
let mut mutables = FxHashMap();
for p in pats {
p.each_binding(|mode, id, _, path1| {
p.each_binding(|_, id, span, path1| {
let bm = match cx.tables.pat_binding_modes.get(&id) {
Some(&bm) => bm,
None => span_bug!(span, "missing binding mode"),
};
let name = path1.node;
if let hir::BindByValue(hir::MutMutable) = mode {
if let ty::BindByValue(hir::MutMutable) = bm {
if !name.as_str().starts_with("_") {
match mutables.entry(name) {
Vacant(entry) => {
+3 -3
View File
@@ -39,14 +39,14 @@ impl ArchiveRO {
///
/// If this archive is used with a mutable method, then an error will be
/// raised.
pub fn open(dst: &Path) -> Option<ArchiveRO> {
pub fn open(dst: &Path) -> Result<ArchiveRO, String> {
return unsafe {
let s = path2cstr(dst);
let ar = ::LLVMRustOpenArchive(s.as_ptr());
if ar.is_null() {
None
Err(::last_error().unwrap_or("failed to open archive".to_string()))
} else {
Some(ArchiveRO { ptr: ar })
Ok(ArchiveRO { ptr: ar })
}
};
-1
View File
@@ -598,7 +598,6 @@ pub fn LLVMStructTypeInContext(C: ContextRef,
// Operations on scalar constants
pub fn LLVMConstInt(IntTy: TypeRef, N: c_ulonglong, SignExtend: Bool) -> ValueRef;
pub fn LLVMConstIntOfArbitraryPrecision(IntTy: TypeRef, Wn: c_uint, Ws: *const u64) -> ValueRef;
pub fn LLVMConstReal(RealTy: TypeRef, N: f64) -> ValueRef;
pub fn LLVMConstIntGetZExtValue(ConstantVal: ValueRef) -> c_ulonglong;
pub fn LLVMConstIntGetSExtValue(ConstantVal: ValueRef) -> c_longlong;
pub fn LLVMRustConstInt128Get(ConstantVal: ValueRef, SExt: bool,
+4 -2
View File
@@ -15,7 +15,8 @@
use rustc::ty::maps::QueryConfig;
use rustc::middle::cstore::{CrateStore, CrateSource, LibSource, DepKind,
NativeLibrary, MetadataLoader, LinkMeta,
LinkagePreference, LoadedMacro, EncodedMetadata};
LinkagePreference, LoadedMacro, EncodedMetadata,
EncodedMetadataHashes};
use rustc::hir::def;
use rustc::middle::lang_items;
use rustc::session::Session;
@@ -390,6 +391,7 @@ fn load_macro(&self, id: DefId, sess: &Session) -> LoadedMacro {
legacy: def.legacy,
}),
vis: ast::Visibility::Inherited,
tokens: None,
})
}
@@ -443,7 +445,7 @@ fn encode_metadata<'a, 'tcx>(&self,
tcx: TyCtxt<'a, 'tcx, 'tcx>,
link_meta: &LinkMeta,
reachable: &NodeSet)
-> EncodedMetadata
-> (EncodedMetadata, EncodedMetadataHashes)
{
encoder::encode_metadata(tcx, link_meta, reachable)
}
+2 -5
View File
@@ -1648,7 +1648,7 @@ fn visit_impl_item(&mut self, _impl_item: &'v hir::ImplItem) {
pub fn encode_metadata<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
link_meta: &LinkMeta,
exported_symbols: &NodeSet)
-> EncodedMetadata
-> (EncodedMetadata, EncodedMetadataHashes)
{
let mut cursor = Cursor::new(vec![]);
cursor.write_all(METADATA_HEADER).unwrap();
@@ -1691,10 +1691,7 @@ pub fn encode_metadata<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
result[header + 2] = (pos >> 8) as u8;
result[header + 3] = (pos >> 0) as u8;
EncodedMetadata {
raw_data: result,
hashes: metadata_hashes,
}
(EncodedMetadata { raw_data: result }, metadata_hashes)
}
pub fn get_repr_options<'a, 'tcx, 'gcx>(tcx: &TyCtxt<'a, 'tcx, 'gcx>, did: DefId) -> ReprOptions {
+1 -1
View File
@@ -86,7 +86,7 @@ fn ast_block_stmts(&mut self,
let tcx = this.hir.tcx();
// Enter the remainder scope, i.e. the bindings' destruction scope.
this.push_scope(remainder_scope);
this.push_scope((remainder_scope, source_info));
let_extent_stack.push(remainder_scope);
// Declare the bindings, which may create a visibility scope.
+1 -1
View File
@@ -242,7 +242,7 @@ fn expr_as_rvalue(&mut self,
ExprKind::Yield { value } => {
let value = unpack!(block = this.as_operand(block, scope, value));
let resume = this.cfg.start_new_block();
let cleanup = this.generator_drop_cleanup(expr_span);
let cleanup = this.generator_drop_cleanup();
this.cfg.terminate(block, source_info, TerminatorKind::Yield {
value: value,
resume: resume,
+1 -1
View File
@@ -237,7 +237,7 @@ pub fn into_expr(&mut self,
.collect();
let success = this.cfg.start_new_block();
let cleanup = this.diverge_cleanup(expr_span);
let cleanup = this.diverge_cleanup();
this.cfg.terminate(block, source_info, TerminatorKind::Call {
func: fun,
args: args,

Some files were not shown because too many files have changed in this diff Show More