Change TODO in compiler to FIXME

This commit is contained in:
Redddy
2026-02-27 03:48:05 +00:00
parent 69370dc4a8
commit 50db919f5d
36 changed files with 210 additions and 212 deletions
@@ -175,7 +175,7 @@ rm -r tests/run-make/panic-abort-eh_frame # .eh_frame emitted with panic=abort
# bugs in the test suite
# ======================
rm tests/ui/process/nofile-limit.rs # TODO some AArch64 linking issue
rm tests/ui/process/nofile-limit.rs # FIXME some AArch64 linking issue
rm -r tests/ui/codegen/equal-pointers-unequal # make incorrect assumptions about the location of stack variables
rm -r tests/incremental/extern_static/issue-49153.rs # assumes reference to undefined static gets optimized away
@@ -356,10 +356,10 @@ fn codegen_float_intrinsic_call<'tcx>(
sym::fmaf64 => ("fma", 3, fx.tcx.types.f64, types::F64),
sym::fmaf128 => ("fmaf128", 3, fx.tcx.types.f128, types::F128),
// FIXME: calling `fma` from libc without FMA target feature uses expensive sofware emulation
sym::fmuladdf16 => ("fmaf16", 3, fx.tcx.types.f16, types::F16), // TODO: use cranelift intrinsic analogous to llvm.fmuladd.f16
sym::fmuladdf32 => ("fmaf", 3, fx.tcx.types.f32, types::F32), // TODO: use cranelift intrinsic analogous to llvm.fmuladd.f32
sym::fmuladdf64 => ("fma", 3, fx.tcx.types.f64, types::F64), // TODO: use cranelift intrinsic analogous to llvm.fmuladd.f64
sym::fmuladdf128 => ("fmaf128", 3, fx.tcx.types.f128, types::F128), // TODO: use cranelift intrinsic analogous to llvm.fmuladd.f128
sym::fmuladdf16 => ("fmaf16", 3, fx.tcx.types.f16, types::F16), // FIXME: use cranelift intrinsic analogous to llvm.fmuladd.f16
sym::fmuladdf32 => ("fmaf", 3, fx.tcx.types.f32, types::F32), // FIXME: use cranelift intrinsic analogous to llvm.fmuladd.f32
sym::fmuladdf64 => ("fma", 3, fx.tcx.types.f64, types::F64), // FIXME: use cranelift intrinsic analogous to llvm.fmuladd.f64
sym::fmuladdf128 => ("fmaf128", 3, fx.tcx.types.f128, types::F128), // FIXME: use cranelift intrinsic analogous to llvm.fmuladd.f128
sym::copysignf16 => ("copysignf16", 2, fx.tcx.types.f16, types::F16),
sym::copysignf32 => ("copysignf", 2, fx.tcx.types.f32, types::F32),
sym::copysignf64 => ("copysign", 2, fx.tcx.types.f64, types::F64),
+3 -3
View File
@@ -1,4 +1,4 @@
# TODO: refactor to avoid duplication with the ci.yml file.
# FIXME: refactor to avoid duplication with the ci.yml file.
name: Failures
on:
@@ -92,7 +92,7 @@ jobs:
run: ./y.sh prepare
- name: Run tests
# TODO: re-enable those tests for libgccjit 12.
# FIXME: re-enable those tests for libgccjit 12.
if: matrix.libgccjit_version.gcc != 'libgccjit12.so'
id: tests
run: |
@@ -100,7 +100,7 @@ jobs:
rg --text "test result" output_log >> $GITHUB_STEP_SUMMARY
- name: Run failing ui pattern tests for ICE
# TODO: re-enable those tests for libgccjit 12.
# FIXME: re-enable those tests for libgccjit 12.
if: matrix.libgccjit_version.gcc != 'libgccjit12.so'
id: ui-tests
run: |
+2 -2
View File
@@ -1,4 +1,4 @@
# TODO: check if qemu-user-static-binfmt is needed (perhaps to run some tests since it probably calls exec).
# FIXME: check if qemu-user-static-binfmt is needed (perhaps to run some tests since it probably calls exec).
name: m68k CI
@@ -24,7 +24,7 @@ jobs:
matrix:
commands: [
"--std-tests",
# TODO(antoyo): fix those on m68k.
# FIXME(antoyo): fix those on m68k.
#"--test-libcore",
#"--extended-rand-tests",
#"--extended-regex-example-tests",
+2 -2
View File
@@ -38,7 +38,7 @@ jobs:
- name: Install packages
run: sudo apt-get install ninja-build ripgrep
# TODO: remove when we have binutils version 2.43 in the repo.
# FIXME: remove when we have binutils version 2.43 in the repo.
- name: Install more recent binutils
run: |
echo "deb http://archive.ubuntu.com/ubuntu plucky main universe" | sudo tee /etc/apt/sources.list.d/plucky-copies.list
@@ -94,7 +94,7 @@ jobs:
if: ${{ matrix.cargo_runner }}
run: |
# FIXME: these tests fail when the sysroot is compiled with LTO because of a missing symbol in proc-macro.
# TODO: remove --skip test_tile_ when it's implemented.
# FIXME: remove --skip test_tile_ when it's implemented.
STDARCH_TEST_SKIP_FUNCTION="xsave,xsaveopt,xsave64,xsaveopt64" STDARCH_TEST_EVERYTHING=1 CHANNEL=release CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_RUNNER="${{ matrix.cargo_runner }}" TARGET=x86_64-unknown-linux-gnu CG_RUSTFLAGS="-Ainternal_features" ./y.sh cargo test --manifest-path build/build_sysroot/sysroot_src/library/stdarch/Cargo.toml -- --skip rtm --skip tbm --skip sse4a --skip test_tile_
# Summary job for the merge queue.
@@ -148,7 +148,7 @@ fn prepare_libcore(
Ok(())
}
// TODO: remove when we can ignore warnings in rustdoc tests.
// FIXME: remove when we can ignore warnings in rustdoc tests.
fn prepare_rand() -> Result<(), String> {
// Apply patch for the rand crate.
let file_path = "patches/crates/0001-Remove-deny-warnings.patch";
@@ -285,7 +285,7 @@ fn build_sysroot(env: &Env, args: &TestArg) -> Result<(), String> {
Ok(())
}
// TODO(GuillaumeGomez): when rewriting in Rust, refactor with the code in tests/lang_tests_common.rs if possible.
// FIXME(GuillaumeGomez): when rewriting in Rust, refactor with the code in tests/lang_tests_common.rs if possible.
fn maybe_run_command_in_vm(
command: &[&dyn AsRef<OsStr>],
env: &Env,
@@ -648,16 +648,16 @@ fn test_projects(env: &Env, args: &TestArg) -> Result<(), String> {
"https://github.com/BurntSushi/memchr",
"https://github.com/dtolnay/itoa",
"https://github.com/rust-lang/cfg-if",
//"https://github.com/rust-lang-nursery/lazy-static.rs", // TODO: re-enable when the
//"https://github.com/rust-lang-nursery/lazy-static.rs", // FIXME: re-enable when the
//failing test is fixed upstream.
//"https://github.com/marshallpierce/rust-base64", // FIXME: one test is OOM-killed.
// TODO: ignore the base64 test that is OOM-killed.
// FIXME: ignore the base64 test that is OOM-killed.
//"https://github.com/time-rs/time", // FIXME: one test fails (https://github.com/time-rs/time/issues/719).
"https://github.com/rust-lang/log",
"https://github.com/bitflags/bitflags",
//"https://github.com/serde-rs/serde", // FIXME: one test fails.
//"https://github.com/rayon-rs/rayon", // TODO: very slow, only run on master?
//"https://github.com/rust-lang/cargo", // TODO: very slow, only run on master?
//"https://github.com/rayon-rs/rayon", // FIXME: very slow, only run on master?
//"https://github.com/rust-lang/cargo", // FIXME: very slow, only run on master?
];
let mut env = env.clone();
@@ -699,7 +699,7 @@ fn test_libcore(env: &Env, args: &TestArg) -> Result<(), String> {
println!("[TEST] libcore");
let path = get_sysroot_dir().join("sysroot_src/library/coretests");
let _ = remove_dir_all(path.join("target"));
// TODO(antoyo): run in release mode when we fix the failures.
// FIXME(antoyo): run in release mode when we fix the failures.
run_cargo_command(&[&"test"], Some(&path), env, args)?;
Ok(())
}
@@ -71,4 +71,4 @@ Maybe by calling the following at the beginning of gdb:
set substitute-path /usr/src/debug/gcc /path/to/gcc-repo/gcc
```
TODO(antoyo): but that's not what I remember I was doing.
FIXME(antoyo): but that's not what I remember I was doing.
+1 -1
View File
@@ -47,6 +47,6 @@ git push
PATH="$HOME/bin:$PATH" ~/bin/git-subtree push -P compiler/rustc_codegen_gcc/ ../rustc_codegen_gcc/ sync_branch_name
```
TODO: write a script that does the above.
FIXME: write a script that does the above.
https://rust-lang.zulipchat.com/#narrow/stream/301329-t-devtools/topic/subtree.20madness/near/258877725
@@ -275,7 +275,7 @@ unsafe fn uninitialized<T>() -> T {
}
}
// TODO(antoyo): to make this work, support weak linkage.
// FIXME(antoyo): to make this work, support weak linkage.
//unsafe { assert_eq!(ABC as usize, 0); }
&mut (|| Some(0 as *const ())) as &mut dyn FnMut() -> Option<*const ()>;
+2 -2
View File
@@ -105,7 +105,7 @@ pub struct FnAbiGcc<'gcc> {
}
pub trait FnAbiGccExt<'gcc, 'tcx> {
// TODO(antoyo): return a function pointer type instead?
// FIXME(antoyo): return a function pointer type instead?
fn gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> FnAbiGcc<'gcc>;
fn ptr_to_gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
#[cfg(feature = "master")]
@@ -260,7 +260,7 @@ pub fn conv_to_fn_attribute<'gcc>(conv: CanonAbi, arch: &Arch) -> Option<FnAttri
&Arch::Nvptx64 => FnAttribute::NvptxKernel,
arch => panic!("Arch {arch} does not support GpuKernel calling convention"),
},
// TODO(antoyo): check if those AVR attributes are mapped correctly.
// FIXME(antoyo): check if those AVR attributes are mapped correctly.
CanonAbi::Interrupt(interrupt_kind) => match interrupt_kind {
InterruptKind::Avr => FnAttribute::AvrSignal,
InterruptKind::AvrNonBlocking => FnAttribute::AvrInterrupt,
+2 -2
View File
@@ -98,7 +98,7 @@ fn create_wrapper_function(
)));
if tcx.sess.must_emit_unwind_tables() {
// TODO(antoyo): emit unwind tables.
// FIXME(antoyo): emit unwind tables.
}
let block = func.new_block("entry");
@@ -138,6 +138,6 @@ fn create_wrapper_function(
block.end_with_void_return(None);
}
// TODO(@Commeownist): Check if we need to emit some extra debugging info in certain circumstances
// FIXME(@Commeownist): Check if we need to emit some extra debugging info in certain circumstances
// as described in https://github.com/rust-lang/rust/commit/77a96ed5646f7c3ee8897693decc4626fe380643
}
+12 -12
View File
@@ -308,13 +308,13 @@ fn codegen_inline_asm(
}
InlineAsmOperandRef::SymFn { instance } => {
// TODO(@Amanieu): Additional mangling is needed on
// FIXME(@Amanieu): Additional mangling is needed on
// some targets to add a leading underscore (Mach-O)
// or byte count suffixes (x86 Windows).
constants_len += self.tcx.symbol_name(instance).name.len();
}
InlineAsmOperandRef::SymStatic { def_id } => {
// TODO(@Amanieu): Additional mangling is needed on
// FIXME(@Amanieu): Additional mangling is needed on
// some targets to add a leading underscore (Mach-O).
constants_len +=
self.tcx.symbol_name(Instance::mono(self.tcx, def_id)).name.len();
@@ -440,7 +440,7 @@ fn codegen_inline_asm(
match *piece {
InlineAsmTemplatePiece::String(ref string) => {
for char in string.chars() {
// TODO(antoyo): might also need to escape | if rustc doesn't do it.
// FIXME(antoyo): might also need to escape | if rustc doesn't do it.
let escaped_char = match char {
'%' => "%%",
'{' => "%{",
@@ -496,7 +496,7 @@ fn codegen_inline_asm(
}
InlineAsmOperandRef::SymFn { instance } => {
// TODO(@Amanieu): Additional mangling is needed on
// FIXME(@Amanieu): Additional mangling is needed on
// some targets to add a leading underscore (Mach-O)
// or byte count suffixes (x86 Windows).
let name = self.tcx.symbol_name(instance).name;
@@ -504,7 +504,7 @@ fn codegen_inline_asm(
}
InlineAsmOperandRef::SymStatic { def_id } => {
// TODO(@Amanieu): Additional mangling is needed on
// FIXME(@Amanieu): Additional mangling is needed on
// some targets to add a leading underscore (Mach-O).
let instance = Instance::mono(self.tcx, def_id);
let name = self.tcx.symbol_name(instance).name;
@@ -557,7 +557,7 @@ fn codegen_inline_asm(
InlineAsmArch::PowerPC | InlineAsmArch::PowerPC64 => {
// "cc" is cr0 on powerpc.
}
// TODO(@Commeownist): I'm not 100% sure this one clobber is sufficient
// FIXME(@Commeownist): I'm not 100% sure this one clobber is sufficient
// on all architectures. For instance, what about FP stack?
_ => {
extended_asm.add_clobber("cc");
@@ -571,7 +571,7 @@ fn codegen_inline_asm(
extended_asm.set_volatile_flag(true);
}
if !options.contains(InlineAsmOptions::NOSTACK) {
// TODO(@Commeownist): figure out how to align stack
// FIXME(@Commeownist): figure out how to align stack
}
if dest.is_none() && options.contains(InlineAsmOptions::NORETURN) {
let builtin_unreachable = self.context.get_builtin_function("__builtin_unreachable");
@@ -640,7 +640,7 @@ fn explicit_reg_to_gcc(reg: InlineAsmReg) -> &'static str {
// For explicit registers, we have to create a register variable: https://stackoverflow.com/a/31774784/389119
match reg {
InlineAsmReg::X86(reg) => {
// TODO(antoyo): add support for vector register.
// FIXME(antoyo): add support for vector register.
match reg.reg_class() {
X86InlineAsmRegClass::reg_byte => {
// GCC does not support the `b` suffix, so we just strip it
@@ -901,7 +901,7 @@ fn codegen_global_asm(
GlobalAsmOperandRef::SymFn { instance } => {
let function = get_fn(self, instance);
self.add_used_function(function);
// TODO(@Amanieu): Additional mangling is needed on
// FIXME(@Amanieu): Additional mangling is needed on
// some targets to add a leading underscore (Mach-O)
// or byte count suffixes (x86 Windows).
let name = self.tcx.symbol_name(instance).name;
@@ -909,8 +909,8 @@ fn codegen_global_asm(
}
GlobalAsmOperandRef::SymStatic { def_id } => {
// TODO(antoyo): set the global variable as used.
// TODO(@Amanieu): Additional mangling is needed on
// FIXME(antoyo): set the global variable as used.
// FIXME(@Amanieu): Additional mangling is needed on
// some targets to add a leading underscore (Mach-O).
let instance = Instance::mono(self.tcx, def_id);
let name = self.tcx.symbol_name(instance).name;
@@ -930,7 +930,7 @@ fn codegen_global_asm(
}
fn mangled_name(&self, instance: Instance<'tcx>) -> String {
// TODO(@Amanieu): Additional mangling is needed on
// FIXME(@Amanieu): Additional mangling is needed on
// some targets to add a leading underscore (Mach-O)
// or byte count suffixes (x86 Windows).
self.tcx.symbol_name(instance).name.to_string()
+3 -3
View File
@@ -126,19 +126,19 @@ pub fn from_fn_attrs<'gcc, 'tcx>(
.map(|features| features.name.as_str())
.flat_map(|feat| to_gcc_features(cx.tcx.sess, feat).into_iter())
.chain(codegen_fn_attrs.instruction_set.iter().map(|x| match *x {
InstructionSetAttr::ArmA32 => "-thumb-mode", // TODO(antoyo): support removing feature.
InstructionSetAttr::ArmA32 => "-thumb-mode", // FIXME(antoyo): support removing feature.
InstructionSetAttr::ArmT32 => "thumb-mode",
}))
.collect::<Vec<_>>();
// TODO(antoyo): cg_llvm adds global features to each function so that LTO keep them.
// FIXME(antoyo): cg_llvm adds global features to each function so that LTO keep them.
// Check if GCC requires the same.
let mut global_features = cx.tcx.global_backend_features(()).iter().map(|s| s.as_str());
function_features.extend(&mut global_features);
let target_features = function_features
.iter()
.filter_map(|feature| {
// TODO(antoyo): support soft-float.
// FIXME(antoyo): support soft-float.
if feature.contains("soft-float") {
return None;
}
+3 -3
View File
@@ -10,7 +10,7 @@
// Maybe that's because the combined object files contain the IR (true) and the final link
// does not remove it?
//
// TODO(antoyo): for performance, check which optimizations the C++ frontend enables.
// FIXME(antoyo): for performance, check which optimizations the C++ frontend enables.
// cSpell:disable
// Fix these warnings:
// /usr/bin/ld: warning: type of symbol `_RNvNvNvNtCs5JWOrf9uCus_5rayon11thread_pool19WORKER_THREAD_STATE7___getit5___KEY' changed from 1 to 6 in /tmp/ccKeUSiR.ltrans0.ltrans.o
@@ -39,7 +39,7 @@
use crate::{GccCodegenBackend, GccContext, LtoMode, to_gcc_opt_level};
struct LtoData {
// TODO(antoyo): use symbols_below_threshold.
// FIXME(antoyo): use symbols_below_threshold.
//symbols_below_threshold: Vec<String>,
upstream_modules: Vec<(SerializedModule<ModuleBuffer>, CString)>,
tmp_path: TempDir,
@@ -173,7 +173,7 @@ fn fat_lto(
.filter(|&(_, module)| module.kind == ModuleKind::Regular)
.map(|(i, _module)| {
//let cost = unsafe { llvm::LLVMRustModuleCost(module.module_llvm.llmod()) };
// TODO(antoyo): compute the cost of a module if GCC allows this.
// FIXME(antoyo): compute the cost of a module if GCC allows this.
(0, i)
})
.max();
+4 -4
View File
@@ -44,7 +44,7 @@ pub(crate) fn codegen(
let _timer =
prof.generic_activity_with_arg("GCC_module_codegen_make_bitcode", &*module.name);
// TODO(antoyo)
// FIXME(antoyo)
/*if let Some(bitcode_filename) = bc_out.file_name() {
cgcx.prof.artifact_size(
"llvm_bitcode",
@@ -68,14 +68,14 @@ pub(crate) fn codegen(
let _timer = prof
.generic_activity_with_arg("GCC_module_codegen_embed_bitcode", &*module.name);
if lto_supported {
// TODO(antoyo): maybe we should call embed_bitcode to have the proper iOS fixes?
// FIXME(antoyo): maybe we should call embed_bitcode to have the proper iOS fixes?
//embed_bitcode(cgcx, llcx, llmod, &config.bc_cmdline, data);
context.add_command_line_option("-flto=auto");
context.add_command_line_option("-flto-partition=one");
context.add_command_line_option("-ffat-lto-objects");
}
// TODO(antoyo): Send -plugin/usr/lib/gcc/x86_64-pc-linux-gnu/11.1.0/liblto_plugin.so to linker (this should be done when specifying the appropriate rustc cli argument).
// FIXME(antoyo): Send -plugin/usr/lib/gcc/x86_64-pc-linux-gnu/11.1.0/liblto_plugin.so to linker (this should be done when specifying the appropriate rustc cli argument).
context
.compile_to_file(OutputKind::ObjectFile, bc_out.to_str().expect("path to str"));
}
@@ -135,7 +135,7 @@ pub(crate) fn codegen(
// NOTE: without -fuse-linker-plugin, we get the following error:
// lto1: internal compiler error: decompressed stream: Destination buffer is too small
// TODO(antoyo): since we do not do LTO when the linker is invoked anymore, perhaps
// FIXME(antoyo): since we do not do LTO when the linker is invoked anymore, perhaps
// the following flag is not necessary anymore.
context.add_driver_option("-fuse-linker-plugin");
}
+4 -4
View File
@@ -50,7 +50,7 @@ pub fn global_linkage_to_gcc(linkage: Linkage) -> GlobalKind {
Linkage::WeakAny => unimplemented!(),
Linkage::WeakODR => unimplemented!(),
Linkage::Internal => GlobalKind::Internal,
Linkage::ExternalWeak => GlobalKind::Imported, // TODO(antoyo): should be weak linkage.
Linkage::ExternalWeak => GlobalKind::Imported, // FIXME(antoyo): should be weak linkage.
Linkage::Common => unimplemented!(),
}
}
@@ -58,7 +58,7 @@ pub fn global_linkage_to_gcc(linkage: Linkage) -> GlobalKind {
pub fn linkage_to_gcc(linkage: Linkage) -> FunctionType {
match linkage {
Linkage::External => FunctionType::Exported,
// TODO(antoyo): set the attribute externally_visible.
// FIXME(antoyo): set the attribute externally_visible.
Linkage::AvailableExternally => FunctionType::Extern,
Linkage::LinkOnceAny => unimplemented!(),
Linkage::LinkOnceODR => unimplemented!(),
@@ -198,7 +198,7 @@ fn module_codegen(
context.set_allow_unreachable_blocks(true);
{
// TODO: to make it less error-prone (calling get_target_info() will add the flag
// FIXME: to make it less error-prone (calling get_target_info() will add the flag
// -fsyntax-only), forbid the compilation when get_target_info() is called on a
// context.
let f16_type_supported = target_info.supports_target_dependent_type(CType::Float16);
@@ -206,7 +206,7 @@ fn module_codegen(
let f64_type_supported = target_info.supports_target_dependent_type(CType::Float64);
let f128_type_supported = target_info.supports_target_dependent_type(CType::Float128);
let u128_type_supported = target_info.supports_target_dependent_type(CType::UInt128t);
// TODO: improve this to avoid passing that many arguments.
// FIXME: improve this to avoid passing that many arguments.
let mut cx = CodegenCx::new(
&context,
cgu,
+42 -42
View File
@@ -38,7 +38,7 @@
use crate::intrinsic::llvm;
use crate::type_of::LayoutGccExt;
// TODO(antoyo)
// FIXME(antoyo)
type Funclet = ();
enum ExtremumOperation {
@@ -75,7 +75,7 @@ fn atomic_extremum(
let func = self.current_func();
let load_ordering = match order {
// TODO(antoyo): does this make sense?
// FIXME(antoyo): does this make sense?
AtomicOrdering::AcqRel | AtomicOrdering::Release => AtomicOrdering::Acquire,
_ => order,
};
@@ -284,8 +284,8 @@ fn check_ptr_call<'b>(
func_ptr,
index
);
// TODO(antoyo): perhaps use __builtin_convertvector for vector casting.
// TODO: remove bitcast now that vector types can be compared?
// FIXME(antoyo): perhaps use __builtin_convertvector for vector casting.
// FIXME: remove bitcast now that vector types can be compared?
// ==> We use bitcast to avoid having to do many manual casts from e.g. __m256i to __v32qi (in
// the case of _mm256_aesenc_epi128).
self.bitcast(actual_val, expected_ty)
@@ -430,7 +430,7 @@ pub fn overflow_call(
// That's why we assign the result to a local.
let return_type = self.context.new_type::<bool>();
let current_func = self.block.get_function();
// TODO(antoyo): return the new_call() directly? Since the overflow function has no side-effects.
// FIXME(antoyo): return the new_call() directly? Since the overflow function has no side-effects.
let result = current_func.new_local(
self.location,
return_type,
@@ -610,7 +610,7 @@ fn invoke(
let current_block = self.block;
self.block = try_block;
let call = self.call(typ, fn_attrs, None, func, args, None, instance); // TODO(antoyo): use funclet here?
let call = self.call(typ, fn_attrs, None, func, args, None, instance); // FIXME(antoyo): use funclet here?
self.block = current_block;
let return_value =
@@ -648,7 +648,7 @@ fn invoke(
let condition = self.context.new_rvalue_from_int(self.bool_type, 1);
self.llbb().end_with_conditional(self.location, condition, then, catch);
if let Some(_fn_abi) = fn_abi {
// TODO(bjorn3): Apply function attributes
// FIXME(bjorn3): Apply function attributes
}
call_site
}
@@ -675,7 +675,7 @@ fn fadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
self.assign_to_var(a + b)
}
// TODO(antoyo): should we also override the `unchecked_` versions?
// FIXME(antoyo): should we also override the `unchecked_` versions?
fn sub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
self.assign_to_var(self.gcc_sub(a, b))
}
@@ -703,7 +703,7 @@ fn udiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
}
fn exactudiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
// TODO(antoyo): poison if not exact.
// FIXME(antoyo): poison if not exact.
let a_type = a.get_type().to_unsigned(self);
let a = self.gcc_int_cast(a, a_type);
let b_type = b.get_type().to_unsigned(self);
@@ -716,7 +716,7 @@ fn sdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
}
fn exactsdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
// TODO(antoyo): poison if not exact.
// FIXME(antoyo): poison if not exact.
// FIXME(antoyo): rustc_codegen_ssa::mir::intrinsic uses different types for a and b but they
// should be the same.
let typ = a.get_type().to_signed(self);
@@ -737,7 +737,7 @@ fn srem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
}
fn frem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
// TODO(antoyo): add check in libgccjit since using the binary operator % causes the following error:
// FIXME(antoyo): add check in libgccjit since using the binary operator % causes the following error:
// during RTL pass: expand
// libgccjit.so: error: in expmed_mode_index, at expmed.h:240
// 0x7f0101d58dc6 expmed_mode_index
@@ -789,7 +789,7 @@ fn frem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
return self.context.new_call(self.location, fmod, &[a, b]);
}
TypeKind::FP128 => {
// TODO(antoyo): use get_simple_function_f128_2args.
// FIXME(antoyo): use get_simple_function_f128_2args.
let f128_type = self.type_f128();
let fmodf128 = self.context.new_function(
None,
@@ -837,7 +837,7 @@ fn lshr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
}
fn ashr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
// TODO(antoyo): check whether behavior is an arithmetic shift for >> .
// FIXME(antoyo): check whether behavior is an arithmetic shift for >> .
// It seems to be if the value is signed.
self.gcc_lshr(a, b)
}
@@ -937,7 +937,7 @@ fn checked_binop(
fn alloca(&mut self, size: Size, align: Align) -> RValue<'gcc> {
let ty = self.cx.type_array(self.cx.type_i8(), size.bytes()).get_aligned(align.bytes());
// TODO(antoyo): It might be better to return a LValue, but fixing the rustc API is non-trivial.
// FIXME(antoyo): It might be better to return a LValue, but fixing the rustc API is non-trivial.
self.current_func()
.new_local(self.location, ty, format!("stack_var_{}", self.next_value_counter()))
.get_address(self.location)
@@ -1006,8 +1006,8 @@ fn atomic_load(
order: AtomicOrdering,
size: Size,
) -> RValue<'gcc> {
// TODO(antoyo): use ty.
// TODO(antoyo): handle alignment.
// FIXME(antoyo): use ty.
// FIXME(antoyo): handle alignment.
let atomic_load =
self.context.get_builtin_function(format!("__atomic_load_{}", size.bytes()));
let ordering = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
@@ -1128,11 +1128,11 @@ fn write_operand_repeatedly(
}
fn range_metadata(&mut self, _load: RValue<'gcc>, _range: WrappingRange) {
// TODO(antoyo)
// FIXME(antoyo)
}
fn nonnull_metadata(&mut self, _load: RValue<'gcc>) {
// TODO(antoyo)
// FIXME(antoyo)
}
fn store(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>, align: Align) -> RValue<'gcc> {
@@ -1161,7 +1161,7 @@ fn store_with_flags(
self.cx.context.new_cast(self.location, ptr, modified_destination_type.make_pointer());
let modified_destination = modified_ptr.dereference(self.location);
self.llbb().add_assignment(self.location, modified_destination, val);
// TODO(antoyo): handle `MemFlags::NONTEMPORAL`.
// FIXME(antoyo): handle `MemFlags::NONTEMPORAL`.
// NOTE: dummy value here since it's never used. FIXME(antoyo): API should not return a value here?
// When adding support for NONTEMPORAL, make sure to not just emit MOVNT on x86; see the
// LLVM backend for details.
@@ -1175,7 +1175,7 @@ fn atomic_store(
order: AtomicOrdering,
size: Size,
) {
// TODO(antoyo): handle alignment.
// FIXME(antoyo): handle alignment.
let atomic_store =
self.context.get_builtin_function(format!("__atomic_store_{}", size.bytes()));
let ordering = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
@@ -1205,7 +1205,7 @@ fn gep(
let ptr_type = ptr.get_type();
let mut pointee_type = ptr.get_type();
// NOTE: we cannot use array indexing here like in inbounds_gep because array indexing is
// always considered in bounds in GCC (TODO(antoyo): to be verified).
// always considered in bounds in GCC (FIXME(antoyo): to be verified).
// So, we have to cast to a number.
let mut result = self.context.new_bitcast(self.location, ptr, self.sizet_type);
// FIXME(antoyo): if there were more than 1 index, this code is probably wrong and would
@@ -1233,7 +1233,7 @@ fn inbounds_gep(
) -> RValue<'gcc> {
// NOTE: due to opaque pointers now being used, we need to cast here.
let ptr = self.context.new_cast(self.location, ptr, typ.make_pointer());
// NOTE: array indexing is always considered in bounds in GCC (TODO(antoyo): to be verified).
// NOTE: array indexing is always considered in bounds in GCC (FIXME(antoyo): to be verified).
let mut indices = indices.iter();
let index = indices.next().expect("first index in inbounds_gep");
let mut result = self.context.new_array_access(self.location, ptr, *index);
@@ -1245,14 +1245,14 @@ fn inbounds_gep(
/* Casts */
fn trunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
// TODO(antoyo): check that it indeed truncate the value.
// FIXME(antoyo): check that it indeed truncate the value.
self.gcc_int_cast(value, dest_ty)
}
fn sext(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
// TODO(antoyo): check that it indeed sign extend the value.
// FIXME(antoyo): check that it indeed sign extend the value.
if dest_ty.dyncast_vector().is_some() {
// TODO(antoyo): nothing to do as it is only for LLVM?
// FIXME(antoyo): nothing to do as it is only for LLVM?
return value;
}
self.context.new_cast(self.location, value, dest_ty)
@@ -1275,7 +1275,7 @@ fn sitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
}
fn fptrunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
// TODO(antoyo): make sure it truncates.
// FIXME(antoyo): make sure it truncates.
set_rvalue_location(self, self.context.new_cast(self.location, value, dest_ty))
}
@@ -1405,7 +1405,7 @@ fn memcpy(
let dst = self.pointercast(dst, self.type_i8p());
let src = self.pointercast(src, self.type_ptr_to(self.type_void()));
let memcpy = self.context.get_builtin_function("memcpy");
// TODO(antoyo): handle aligns and is_volatile.
// FIXME(antoyo): handle aligns and is_volatile.
self.block.add_eval(
self.location,
self.context.new_call(self.location, memcpy, &[dst, src, size]),
@@ -1428,7 +1428,7 @@ fn memmove(
let src = self.pointercast(src, self.type_ptr_to(self.type_void()));
let memmove = self.context.get_builtin_function("memmove");
// TODO(antoyo): handle is_volatile.
// FIXME(antoyo): handle is_volatile.
self.block.add_eval(
self.location,
self.context.new_call(self.location, memmove, &[dst, src, size]),
@@ -1447,7 +1447,7 @@ fn memset(
let _is_volatile = flags.contains(MemFlags::VOLATILE);
let ptr = self.pointercast(ptr, self.type_i8p());
let memset = self.context.get_builtin_function("memset");
// TODO(antoyo): handle align and is_volatile.
// FIXME(antoyo): handle align and is_volatile.
let fill_byte = self.context.new_cast(self.location, fill_byte, self.i32_type);
let size = self.intcast(size, self.type_size_t(), false);
self.block.add_eval(
@@ -1599,7 +1599,7 @@ fn cleanup_landing_pad(&mut self, pers_fn: Function<'gcc>) -> (RValue<'gcc>, RVa
let value1_type = self.u8_type.make_pointer();
let ptr = self.cx.context.new_cast(self.location, ptr, value1_type);
let value1 = ptr;
let value2 = zero; // TODO(antoyo): set the proper value here (the type of exception?).
let value2 = zero; // FIXME(antoyo): set the proper value here (the type of exception?).
(value1, value2)
}
@@ -1616,7 +1616,7 @@ fn cleanup_landing_pad(&mut self, _pers_fn: Function<'gcc>) -> (RValue<'gcc>, RV
}
fn filter_landing_pad(&mut self, pers_fn: Function<'gcc>) {
// TODO(antoyo): generate the correct landing pad
// FIXME(antoyo): generate the correct landing pad
self.cleanup_landing_pad(pers_fn);
}
@@ -1747,15 +1747,15 @@ fn atomic_fence(&mut self, order: AtomicOrdering, scope: SynchronizationScope) {
fn set_invariant_load(&mut self, load: RValue<'gcc>) {
// NOTE: Hack to consider vtable function pointer as non-global-variable function pointer.
self.normal_function_addresses.borrow_mut().insert(load);
// TODO(antoyo)
// FIXME(antoyo)
}
fn lifetime_start(&mut self, _ptr: RValue<'gcc>, _size: Size) {
// TODO(antoyo)
// FIXME(antoyo)
}
fn lifetime_end(&mut self, _ptr: RValue<'gcc>, _size: Size) {
// TODO(antoyo)
// FIXME(antoyo)
}
fn call(
@@ -1771,7 +1771,7 @@ fn call(
// FIXME(antoyo): remove when having a proper API.
let gcc_func = unsafe { std::mem::transmute::<RValue<'gcc>, Function<'gcc>>(func) };
let call = if self.functions.borrow().values().any(|value| *value == gcc_func) {
// TODO(antoyo): remove when the API supports a different type for functions.
// FIXME(antoyo): remove when the API supports a different type for functions.
let func: Function<'gcc> = self.cx.rvalue_as_function(func);
self.function_call(func, args, funclet)
} else {
@@ -1779,7 +1779,7 @@ fn call(
self.function_ptr_call(typ, func, args, funclet)
};
if let Some(_fn_abi) = fn_abi {
// TODO(bjorn3): Apply function attributes
// FIXME(bjorn3): Apply function attributes
}
call
}
@@ -1886,7 +1886,7 @@ fn int_min(signed: bool, int_width: u64) -> i128 {
if signed { i128::MIN >> (128 - int_width) } else { 0 }
}
// TODO: rewrite using a generic function with <F: Float>.
// FIXME: rewrite using a generic function with <F: Float>.
let compute_clamp_bounds_half = |signed: bool, int_width: u64| -> (u128, u128) {
let rounded_min =
ieee::Half::from_i128_r(int_min(signed, int_width), Round::TowardZero);
@@ -2023,7 +2023,7 @@ pub fn shuffle_vector(
block.add_assignment(self.location, mask_var, mask);
let mask = mask_var.to_rvalue();
// TODO(antoyo): use a recursive unqualified() here.
// FIXME(antoyo): use a recursive unqualified() here.
let vector_type = v1.get_type().unqualified().dyncast_vector().expect("vector type");
let element_type = vector_type.get_element_type();
let vec_num_units = vector_type.get_num_units();
@@ -2387,7 +2387,7 @@ pub fn vector_select(
#[cfg(feature = "master")]
let (cond, element_type) = {
// TODO(antoyo): dyncast_vector should not require a call to unqualified.
// FIXME(antoyo): dyncast_vector should not require a call to unqualified.
let then_val_vector_type =
then_val.get_type().unqualified().dyncast_vector().expect("vector type");
let then_val_element_type = then_val_vector_type.get_element_type();
@@ -2426,7 +2426,7 @@ pub fn vector_select(
// NOTE: sometimes, the type of else_val can be different than the type of then_val in
// libgccjit (vector of int vs vector of int32_t), but they should be the same for the AND
// operation to work.
// TODO: remove bitcast now that vector types can be compared?
// FIXME: remove bitcast now that vector types can be compared?
let else_val = self.context.new_bitcast(self.location, else_val, then_val.get_type());
let else_vals = inverted_masks & else_val;
@@ -2508,7 +2508,7 @@ fn to_gcc_comparison(&self) -> ComparisonOp {
impl ToGccComp for RealPredicate {
fn to_gcc_comparison(&self) -> ComparisonOp {
// TODO(antoyo): check that ordered vs non-ordered is respected.
// FIXME(antoyo): check that ordered vs non-ordered is respected.
match *self {
RealPredicate::RealPredicateFalse => unreachable!(),
RealPredicate::RealOEQ => ComparisonOp::Equals,
@@ -2550,7 +2550,7 @@ fn to_gcc(self) -> i32 {
use MemOrdering::*;
let ordering = match self {
AtomicOrdering::Relaxed => __ATOMIC_RELAXED, // TODO(antoyo): check if that's the same.
AtomicOrdering::Relaxed => __ATOMIC_RELAXED, // FIXME(antoyo): check if that's the same.
AtomicOrdering::Acquire => __ATOMIC_ACQUIRE,
AtomicOrdering::Release => __ATOMIC_RELEASE,
AtomicOrdering::AcqRel => __ATOMIC_ACQ_REL,
+2 -2
View File
@@ -60,7 +60,7 @@ pub fn get_fn<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, instance: Instance<'tcx>)
// reference. It also occurs when testing libcore and in some
// other weird situations. Annoying.
if cx.val_ty(func) != ptrtype {
// TODO(antoyo): cast the pointer.
// FIXME(antoyo): cast the pointer.
func
}
else {
@@ -76,7 +76,7 @@ pub fn get_fn<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, instance: Instance<'tcx>)
{
let instance_def_id = instance.def_id();
// TODO(antoyo): set linkage and attributes.
// FIXME(antoyo): set linkage and attributes.
// Apply an appropriate linkage/visibility value to our item that we
// just declared.
+8 -8
View File
@@ -26,13 +26,13 @@ pub fn const_u16(&self, i: u16) -> RValue<'gcc> {
}
fn global_string(&self, string: &str) -> LValue<'gcc> {
// TODO(antoyo): handle non-null-terminated strings.
// FIXME(antoyo): handle non-null-terminated strings.
let string = self.context.new_string_literal(string);
let sym = self.generate_local_symbol_name("str");
let global = self.declare_private_global(&sym, self.val_ty(string));
global.global_set_initializer_rvalue(string);
global
// TODO(antoyo): set linkage.
// FIXME(antoyo): set linkage.
}
pub fn const_bitcast(&self, value: RValue<'gcc>, typ: Type<'gcc>) -> RValue<'gcc> {
@@ -204,7 +204,7 @@ fn const_str(&self, s: &str) -> (RValue<'gcc>, RValue<'gcc>) {
fn const_struct(&self, values: &[RValue<'gcc>], packed: bool) -> RValue<'gcc> {
let fields: Vec<_> = values.iter().map(|value| value.get_type()).collect();
// TODO(antoyo): cache the type? It's anonymous, so probably not.
// FIXME(antoyo): cache the type? It's anonymous, so probably not.
let typ = self.type_struct(&fields, packed);
let struct_type = typ.is_struct().expect("struct type");
self.context.new_struct_constructor(None, struct_type.as_type(), None, values)
@@ -216,12 +216,12 @@ fn const_vector(&self, values: &[RValue<'gcc>]) -> RValue<'gcc> {
}
fn const_to_opt_uint(&self, _v: RValue<'gcc>) -> Option<u64> {
// TODO(antoyo)
// FIXME(antoyo)
None
}
fn const_to_opt_u128(&self, _v: RValue<'gcc>, _sign_ext: bool) -> Option<u128> {
// TODO(antoyo)
// FIXME(antoyo)
None
}
@@ -236,10 +236,10 @@ fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, ty: Type<'gcc>) ->
// NOTE: since the intrinsic _xabort is called with a bitcast, which
// is non-const, but expects a constant, do a normal cast instead of a bitcast.
// FIXME(antoyo): fix bitcast to work in constant contexts.
// TODO(antoyo): perhaps only use bitcast for pointers?
// FIXME(antoyo): perhaps only use bitcast for pointers?
self.context.new_cast(None, value, ty)
} else {
// TODO(bjorn3): assert size is correct
// FIXME(bjorn3): assert size is correct
self.const_bitcast(value, ty)
}
}
@@ -270,7 +270,7 @@ fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, ty: Type<'gcc>) ->
_ => self.static_addr_of(alloc, None),
};
if !self.sess().fewer_names() {
// TODO(antoyo): set value name.
// FIXME(antoyo): set value name.
}
value
}
+7 -7
View File
@@ -166,7 +166,7 @@ fn codegen_static(&mut self, def_id: DefId) {
unimplemented!();
}
} else {
// TODO(antoyo): set link section.
// FIXME(antoyo): set link section.
}
if attrs.flags.contains(CodegenFnAttrFlags::USED_COMPILER)
@@ -180,7 +180,7 @@ fn codegen_static(&mut self, def_id: DefId) {
impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
/// Add a global value to a list to be stored in the `llvm.used` variable, an array of i8*.
pub fn add_used_global(&mut self, _global: RValue<'gcc>) {
// TODO(antoyo)
// FIXME(antoyo)
}
#[cfg_attr(not(feature = "master"), expect(unused_variables))]
@@ -198,7 +198,7 @@ pub fn static_addr_of_mut(
let global = match kind {
Some(kind) if !self.tcx.sess.fewer_names() => {
let name = self.generate_local_symbol_name(kind);
// TODO(antoyo): check if it's okay that no link_section is set.
// FIXME(antoyo): check if it's okay that no link_section is set.
let typ = self.val_ty(cv).get_aligned(align.bytes());
self.declare_private_global(&name[..], typ)
@@ -209,7 +209,7 @@ pub fn static_addr_of_mut(
}
};
global.global_set_initializer_rvalue(cv);
// TODO(antoyo): set unnamed address.
// FIXME(antoyo): set unnamed address.
let rvalue = global.get_address(None);
self.global_lvalues.borrow_mut().insert(rvalue, global);
rvalue
@@ -276,7 +276,7 @@ pub(crate) fn get_static_inner(&self, def_id: DefId, gcc_type: Type<'gcc>) -> LV
};
if !def_id.is_local() {
let needs_dll_storage_attr = false; // TODO(antoyo)
let needs_dll_storage_attr = false; // FIXME(antoyo)
// If this assertion triggers, there's something wrong with commandline
// argument validation.
@@ -303,7 +303,7 @@ pub(crate) fn get_static_inner(&self, def_id: DefId, gcc_type: Type<'gcc>) -> LV
}
}
// TODO(antoyo): set dll storage class.
// FIXME(antoyo): set dll storage class.
self.instances.borrow_mut().insert(instance, global);
global
@@ -412,7 +412,7 @@ fn check_and_apply_linkage<'gcc, 'tcx>(
let real_name =
format!("_rust_extern_with_linkage_{:016x}_{sym}", cx.tcx.stable_crate_id(LOCAL_CRATE));
let global2 = cx.define_global(&real_name, gcc_type, is_tls, attrs.link_section);
// TODO(antoyo): set linkage.
// FIXME(antoyo): set linkage.
let value = cx.const_ptrcast(global1.get_address(None), gcc_type);
global2.global_set_initializer_rvalue(value);
global2
+13 -13
View File
@@ -37,7 +37,7 @@ pub struct CodegenCx<'gcc, 'tcx> {
pub codegen_unit: &'tcx CodegenUnit<'tcx>,
pub context: &'gcc Context<'gcc>,
// TODO(bjorn3): Can this field be removed?
// FIXME(bjorn3): Can this field be removed?
pub current_func: RefCell<Option<Function<'gcc>>>,
pub normal_function_addresses: RefCell<FxHashSet<RValue<'gcc>>>,
pub function_address_names: RefCell<FxHashMap<RValue<'gcc>, String>>,
@@ -100,7 +100,7 @@ pub struct CodegenCx<'gcc, 'tcx> {
pub vtables:
RefCell<FxHashMap<(Ty<'tcx>, Option<ty::ExistentialTraitRef<'tcx>>), RValue<'gcc>>>,
// TODO(antoyo): improve the SSA API to not require those.
// FIXME(antoyo): improve the SSA API to not require those.
/// Mapping from function pointer type to indexes of on stack parameters.
pub on_stack_params: RefCell<FxHashMap<FunctionPtrType<'gcc>, FxHashSet<usize>>>,
/// Mapping from function to indexes of on stack parameters.
@@ -110,7 +110,7 @@ pub struct CodegenCx<'gcc, 'tcx> {
pub const_globals: RefCell<FxHashMap<RValue<'gcc>, RValue<'gcc>>>,
/// Map from the address of a global variable (rvalue) to the global variable itself (lvalue).
/// TODO(antoyo): remove when the rustc API is fixed.
/// FIXME(antoyo): remove when the rustc API is fixed.
pub global_lvalues: RefCell<FxHashMap<RValue<'gcc>, LValue<'gcc>>>,
/// Cache of constant strings,
@@ -199,7 +199,7 @@ pub fn new(
let layout = tcx.layout_of(ParamEnv::reveal_all().and(tcx.types.u128)).unwrap();
let u128_align = layout.align.bytes();*/
// TODO(antoyo): re-enable the alignment when libgccjit fixed the issue in
// FIXME(antoyo): re-enable the alignment when libgccjit fixed the issue in
// gcc_jit_context_new_array_constructor (it should not use reinterpret_cast).
let i128_type = new_array_type(context, None, i64_type, 2)/*.get_aligned(i128_align)*/;
let u128_type = new_array_type(context, None, u64_type, 2)/*.get_aligned(u128_align)*/;
@@ -208,7 +208,7 @@ pub fn new(
let tls_model = to_gcc_tls_mode(tcx.sess.tls_model());
// TODO(antoyo): set alignment on those types as well.
// FIXME(antoyo): set alignment on those types as well.
let float_type = context.new_type::<f32>();
let double_type = context.new_type::<f64>();
@@ -309,7 +309,7 @@ pub fn new(
#[cfg(feature = "master")]
cleanup_blocks: Default::default(),
};
// TODO(antoyo): instead of doing this, add SsizeT to libgccjit.
// FIXME(antoyo): instead of doing this, add SsizeT to libgccjit.
cx.isize_type = usize_type.to_signed(&cx);
cx
}
@@ -382,15 +382,15 @@ pub fn bitcast_if_needed(
impl<'gcc, 'tcx> BackendTypes for CodegenCx<'gcc, 'tcx> {
type Function = Function<'gcc>;
type BasicBlock = Block<'gcc>;
type Funclet = (); // TODO(antoyo)
type Funclet = (); // FIXME(antoyo)
type Value = RValue<'gcc>;
type Type = Type<'gcc>;
type FunctionSignature = Type<'gcc>;
type DIScope = (); // TODO(antoyo)
type DIScope = (); // FIXME(antoyo)
type DILocation = Location<'gcc>;
type DIVariable = (); // TODO(antoyo)
type DIVariable = (); // FIXME(antoyo)
}
impl<'gcc, 'tcx> MiscCodegenMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
@@ -414,7 +414,7 @@ fn get_fn_addr(&self, instance: Instance<'tcx>) -> RValue<'gcc> {
};
let ptr = func.get_address(None);
// TODO(antoyo): don't do this twice: i.e. in declare_fn and here.
// FIXME(antoyo): don't do this twice: i.e. in declare_fn and here.
// FIXME(antoyo): the rustc API seems to call get_fn_addr() when not needed (e.g. for FFI).
self.normal_function_addresses.borrow_mut().insert(ptr);
@@ -472,7 +472,7 @@ fn eh_personality(&self) -> Function<'gcc> {
self.declare_func(name, self.type_i32(), &[], true)
}
};
// TODO(antoyo): apply target cpu attributes.
// FIXME(antoyo): apply target cpu attributes.
self.eh_personality.set(Some(func));
func
}
@@ -482,11 +482,11 @@ fn sess(&self) -> &Session {
}
fn set_frame_pointer_type(&self, _llfn: Function<'gcc>) {
// TODO(antoyo)
// FIXME(antoyo)
}
fn apply_target_cpu_attr(&self, _llfn: Function<'gcc>) {
// TODO(antoyo)
// FIXME(antoyo)
}
fn declare_c_main(&self, fn_type: Self::Type) -> Option<Self::Function> {
@@ -6,6 +6,6 @@
impl<'a, 'gcc, 'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
fn add_coverage(&mut self, _instance: Instance<'tcx>, _kind: &CoverageKind) {
// TODO(antoyo)
// FIXME(antoyo)
}
}
+6 -6
View File
@@ -48,7 +48,7 @@ fn dbg_var_value(
}
fn insert_reference_to_gdb_debug_scripts_section_global(&mut self) {
// TODO(antoyo): insert reference to gdb debug scripts section global.
// FIXME(antoyo): insert reference to gdb debug scripts section global.
}
/// FIXME(tempdragon): Currently, this function is not yet implemented. It seems that the
@@ -153,7 +153,7 @@ fn make_mir_scope<'gcc, 'tcx>(
// FIXME(eddyb) this doesn't account for the macro-related
// `Span` fixups that `rustc_codegen_ssa::mir::debuginfo` does.
// TODO(tempdragon): Add scope support and then revert to cg_llvm version of this closure
// FIXME(tempdragon): Add scope support and then revert to cg_llvm version of this closure
// NOTE: These variables passed () here.
// Changed to comply to clippy.
@@ -162,7 +162,7 @@ fn make_mir_scope<'gcc, 'tcx>(
cx.dbg_loc(/* callsite_scope */ (), parent_scope.inlined_at, callsite_span)
});
let p_inlined_at = parent_scope.inlined_at;
// TODO(tempdragon): dbg_scope: Add support for scope extension here.
// FIXME(tempdragon): dbg_scope: Add support for scope extension here.
inlined_at.or(p_inlined_at);
debug_context.scopes[scope] = DebugScope {
@@ -225,7 +225,7 @@ fn create_vtable_debuginfo(
_trait_ref: Option<ExistentialTraitRef<'tcx>>,
_vtable: Self::Value,
) {
// TODO(antoyo)
// FIXME(antoyo)
}
fn create_function_debug_context(
@@ -262,7 +262,7 @@ fn extend_scope_to_file(
_scope_metadata: Self::DIScope,
_file: &SourceFile,
) -> Self::DIScope {
// TODO(antoyo): implement.
// FIXME(antoyo): implement.
}
fn debuginfo_finalize(&self) {
@@ -285,7 +285,7 @@ fn dbg_scope_fn(
_fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
_maybe_definition_llfn: Option<Function<'gcc>>,
) -> Self::DIScope {
// TODO(antoyo): implement.
// FIXME(antoyo): implement.
}
fn dbg_loc(
+9 -9
View File
@@ -94,7 +94,7 @@ pub fn declare_entry_fn(
#[cfg(feature = "master")] callconv: Option<FnAttribute<'gcc>>,
#[cfg(not(feature = "master"))] callconv: Option<()>,
) -> Function<'gcc> {
// TODO(antoyo): use the fn_type parameter.
// FIXME(antoyo): use the fn_type parameter.
let const_string = self.context.new_type::<u8>().make_pointer().make_pointer();
let return_type = self.type_i32();
let variadic = false;
@@ -142,7 +142,7 @@ pub fn define_global(
}
pub fn get_declared_value(&self, name: &str) -> Option<RValue<'gcc>> {
// TODO(antoyo): use a different field than globals, because this seems to return a function?
// FIXME(antoyo): use a different field than globals, because this seems to return a function?
self.globals.borrow().get(name).cloned()
}
}
@@ -166,7 +166,7 @@ fn declare_raw_fn<'gcc>(
let params: Vec<_> = param_types
.iter()
.enumerate()
.map(|(index, param)| cx.context.new_parameter(None, *param, format!("param{}", index))) // TODO(antoyo): set name.
.map(|(index, param)| cx.context.new_parameter(None, *param, format!("param{}", index))) // FIXME(antoyo): set name.
.collect();
#[cfg(not(feature = "master"))]
let name = &mangle_name(name);
@@ -194,7 +194,7 @@ fn declare_raw_fn<'gcc>(
.enumerate()
.map(|(index, param)| {
cx.context.new_parameter(None, *param, format!("param{}", index))
}) // TODO(antoyo): set name.
}) // FIXME(antoyo): set name.
.collect();
let gcc_func = cx.context.new_function(
None,
@@ -228,11 +228,11 @@ fn declare_raw_fn<'gcc>(
func
};
// TODO(antoyo): set function calling convention.
// TODO(antoyo): set unnamed address.
// TODO(antoyo): set no red zone function attribute.
// TODO(antoyo): set attributes for optimisation.
// TODO(antoyo): set attributes for non lazy bind.
// FIXME(antoyo): set function calling convention.
// FIXME(antoyo): set unnamed address.
// FIXME(antoyo): set no red zone function attribute.
// FIXME(antoyo): set attributes for optimisation.
// FIXME(antoyo): set attributes for non lazy bind.
// FIXME(antoyo): invalid cast.
func
+16 -16
View File
@@ -75,7 +75,7 @@ pub fn gcc_lshr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
let b_native = self.is_native_int_type(b_type);
if a_native && b_native {
// FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by a signed number.
// TODO(antoyo): cast to unsigned to do a logical shift if that does not work.
// FIXME(antoyo): cast to unsigned to do a logical shift if that does not work.
if a_type.is_signed(self) != b_type.is_signed(self) {
let b = self.context.new_cast(self.location, b, a_type);
a >> b
@@ -168,7 +168,7 @@ fn additive_operation(
if a_type != b_type {
if a_type.is_vector() {
// Vector types need to be bitcast.
// TODO(antoyo): perhaps use __builtin_convertvector for vector casting.
// FIXME(antoyo): perhaps use __builtin_convertvector for vector casting.
b = self.context.new_bitcast(self.location, b, a_type);
} else {
b = self.context.new_cast(self.location, b, a_type);
@@ -228,7 +228,7 @@ fn multiplicative_operation(
if !a_type.is_compatible_with(b_type) {
if a_type.is_vector() {
// Vector types need to be bitcast.
// TODO(antoyo): perhaps use __builtin_convertvector for vector casting.
// FIXME(antoyo): perhaps use __builtin_convertvector for vector casting.
b = self.context.new_bitcast(self.location, b, a_type);
} else {
b = self.context.new_cast(self.location, b, a_type);
@@ -255,9 +255,9 @@ fn multiplicative_operation(
}
pub fn gcc_sdiv(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
// TODO(antoyo): check if the types are signed?
// FIXME(antoyo): check if the types are signed?
// 128-bit, signed: __divti3
// TODO(antoyo): convert the arguments to signed?
// FIXME(antoyo): convert the arguments to signed?
self.multiplicative_operation(BinaryOp::Divide, "div", true, a, b)
}
@@ -284,7 +284,7 @@ pub fn gcc_checked_binop(
_ => panic!("tried to get overflow intrinsic for op applied to non-int type"),
};
// TODO(antoyo): remove duplication with intrinsic?
// FIXME(antoyo): remove duplication with intrinsic?
let name = if self.is_native_int_type(lhs.get_type()) {
match oop {
OverflowOp::Add => "__builtin_add_overflow",
@@ -306,7 +306,7 @@ pub fn gcc_checked_binop(
OverflowOp::Mul => match new_kind {
Int(I32) => ("__mulosi4", 32),
Int(I64) => ("__mulodi4", 64),
Int(I128) => ("__rust_i128_mulo", 128), // TODO(antoyo): use __muloti4d instead?
Int(I128) => ("__rust_i128_mulo", 128), // FIXME(antoyo): use __muloti4d instead?
Uint(U128) => ("__rust_u128_mulo", 128),
_ => unreachable!(),
},
@@ -317,7 +317,7 @@ pub fn gcc_checked_binop(
let intrinsic = self.context.get_builtin_function(name);
let res = self
.current_func()
// TODO(antoyo): is it correct to use rhs type instead of the parameter typ?
// FIXME(antoyo): is it correct to use rhs type instead of the parameter typ?
.new_local(self.location, rhs.get_type(), "binopResult")
.get_address(self.location);
let new_type = type_kind_to_gcc_type(new_kind);
@@ -462,7 +462,7 @@ pub fn gcc_icmp(
lhs_high = self.context.new_cast(self.location, lhs_high, unsigned_type);
rhs_high = self.context.new_cast(self.location, rhs_high, unsigned_type);
}
// TODO(antoyo): we probably need to handle signed comparison for unsigned
// FIXME(antoyo): we probably need to handle signed comparison for unsigned
// integers.
_ => (),
}
@@ -556,7 +556,7 @@ pub fn gcc_icmp(
self.context.new_rvalue_one(self.int_type),
);
}
// TODO(antoyo): cast to u128 for unsigned comparison. See below.
// FIXME(antoyo): cast to u128 for unsigned comparison. See below.
IntPredicate::IntUGT => (ComparisonOp::Equals, 2),
IntPredicate::IntUGE => (ComparisonOp::GreaterThanEquals, 1),
IntPredicate::IntULT => (ComparisonOp::Equals, 0),
@@ -602,7 +602,7 @@ pub fn gcc_icmp(
rhs = self.context.new_cast(self.location, rhs, unsigned_type);
}
}
// TODO(antoyo): we probably need to handle signed comparison for unsigned
// FIXME(antoyo): we probably need to handle signed comparison for unsigned
// integers.
_ => (),
}
@@ -693,7 +693,7 @@ pub fn gcc_shl(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
b0_block.end_with_jump(self.location, after_block);
// NOTE: cast low to its unsigned type in order to perform a logical right shift.
// TODO(antoyo): adjust this ^ comment.
// FIXME(antoyo): adjust this ^ comment.
let unsigned_type = native_int_type.to_unsigned(self.cx);
let casted_low = self.context.new_cast(self.location, self.low(a), unsigned_type);
let shift_value = self.context.new_cast(self.location, sixty_four - b, unsigned_type);
@@ -732,7 +732,7 @@ pub fn gcc_bswap(&mut self, mut arg: RValue<'gcc>, width: u64) -> RValue<'gcc> {
return self.concat_low_high_rvalues(arg_type, swapped_msb, swapped_lsb);
}
// TODO(antoyo): check if it's faster to use string literals and a
// FIXME(antoyo): check if it's faster to use string literals and a
// match instead of format!.
let bswap = self.cx.context.get_builtin_function(format!("__builtin_bswap{}", width));
// FIXME(antoyo): this cast should not be necessary. Remove
@@ -862,12 +862,12 @@ pub fn gcc_or(
self.bitwise_operation(BinaryOp::BitwiseOr, a, b, loc)
}
// TODO(antoyo): can we use https://github.com/rust-lang/compiler-builtins/blob/master/src/int/mod.rs#L379 instead?
// FIXME(antoyo): can we use https://github.com/rust-lang/compiler-builtins/blob/master/src/int/mod.rs#L379 instead?
pub fn gcc_int_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
let value_type = value.get_type();
if self.is_native_int_type_or_bool(dest_typ) && self.is_native_int_type_or_bool(value_type)
{
// TODO: use self.location.
// FIXME: use self.location.
self.context.new_cast(None, value, dest_typ)
} else if self.is_native_int_type_or_bool(dest_typ) {
self.context.new_cast(None, self.low(value), dest_typ)
@@ -888,7 +888,7 @@ pub fn gcc_int_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<
// Since u128 and i128 are the only types that can be unsupported, we know the type of
// value and the destination type have the same size, so a bitcast is fine.
// TODO(antoyo): perhaps use __builtin_convertvector for vector casting.
// FIXME(antoyo): perhaps use __builtin_convertvector for vector casting.
self.context.new_bitcast(None, value, dest_typ)
}
}
@@ -123,7 +123,7 @@ pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>(
mut args: Cow<'b, [RValue<'gcc>]>,
func_name: &str,
) -> Cow<'b, [RValue<'gcc>]> {
// TODO: this might not be a good way to workaround the missing tile builtins.
// FIXME: this might not be a good way to workaround the missing tile builtins.
if func_name == "__builtin_trap" {
return vec![].into();
}
@@ -1578,7 +1578,7 @@ pub fn intrinsic<'gcc, 'tcx>(name: &str, cx: &CodegenCx<'gcc, 'tcx>) -> Function
"llvm.x86.avx512.uitofp.round.v8f32.v8i64" => "__builtin_ia32_cvtuqq2ps512_mask",
"llvm.x86.avx512.uitofp.round.v4f32.v4i64" => "__builtin_ia32_cvtuqq2ps256_mask",
// TODO: support the tile builtins:
// FIXME: support the tile builtins:
"llvm.x86.ldtilecfg" => "__builtin_trap",
"llvm.x86.sttilecfg" => "__builtin_trap",
"llvm.x86.tileloadd64" => "__builtin_trap",
+13 -13
View File
@@ -68,8 +68,8 @@ fn get_simple_intrinsic<'gcc, 'tcx>(
sym::fmaf32 => "fmaf",
sym::fmaf64 => "fma",
// FIXME: calling `fma` from libc without FMA target feature uses expensive software emulation
sym::fmuladdf32 => "fmaf", // TODO: use gcc intrinsic analogous to llvm.fmuladd.f32
sym::fmuladdf64 => "fma", // TODO: use gcc intrinsic analogous to llvm.fmuladd.f64
sym::fmuladdf32 => "fmaf", // FIXME: use gcc intrinsic analogous to llvm.fmuladd.f32
sym::fmuladdf64 => "fma", // FIXME: use gcc intrinsic analogous to llvm.fmuladd.f64
sym::fabsf32 => "fabsf",
sym::fabsf64 => "fabs",
sym::minnumf32 => "fminf",
@@ -131,7 +131,7 @@ fn get_simple_intrinsic<'gcc, 'tcx>(
Some(cx.context.get_builtin_function(gcc_name))
}
// TODO(antoyo): We can probably remove these and use the fallback intrinsic implementation.
// FIXME(antoyo): We can probably remove these and use the fallback intrinsic implementation.
fn get_simple_function<'gcc, 'tcx>(
cx: &CodegenCx<'gcc, 'tcx>,
name: Symbol,
@@ -305,7 +305,7 @@ fn codegen_intrinsic_call(
let fn_args = instance.args;
let simple = get_simple_intrinsic(self, name);
// TODO(antoyo): Only call get_simple_function_f128 and get_simple_function_f128_2args when
// FIXME(antoyo): Only call get_simple_function_f128 and get_simple_function_f128_2args when
// it is the symbols for the supported f128 builtins.
let simple_func = get_simple_function(self, name)
.or_else(|| get_simple_function_f128(self, name))
@@ -406,7 +406,7 @@ fn codegen_intrinsic_call(
sym::volatile_load | sym::unaligned_volatile_load => {
let ptr = args[0].immediate();
let load = self.volatile_load(result.layout.gcc_type(self), ptr);
// TODO(antoyo): set alignment.
// FIXME(antoyo): set alignment.
if let BackendRepr::Scalar(scalar) = result.layout.backend_repr {
self.to_immediate_scalar(load, scalar)
} else {
@@ -697,14 +697,14 @@ fn abort(&mut self) {
}
fn assume(&mut self, value: Self::Value) {
// TODO(antoyo): switch to assume when it exists.
// FIXME(antoyo): switch to assume when it exists.
// Or use something like this:
// #define __assume(cond) do { if (!(cond)) __builtin_unreachable(); } while (0)
self.expect(value, true);
}
fn expect(&mut self, cond: Self::Value, _expected: bool) -> Self::Value {
// TODO(antoyo)
// FIXME(antoyo)
cond
}
@@ -723,7 +723,7 @@ fn va_start(&mut self, _va_list: RValue<'gcc>) -> RValue<'gcc> {
}
fn va_end(&mut self, _va_list: RValue<'gcc>) -> RValue<'gcc> {
// TODO(antoyo): implement.
// FIXME(antoyo): implement.
self.context.new_rvalue_from_int(self.int_type, 0)
}
}
@@ -945,7 +945,7 @@ fn bit_reverse(&mut self, width: u64, value: RValue<'gcc>) -> RValue<'gcc> {
if width == 8 { step3 } else { self.gcc_bswap(step3, width) }
}
128 => {
// TODO(antoyo): find a more efficient implementation?
// FIXME(antoyo): find a more efficient implementation?
let sixty_four = self.gcc_int(typ, 64);
let right_shift = self.gcc_lshr(value, sixty_four);
let high = self.gcc_int_cast(right_shift, self.u64_type);
@@ -1026,7 +1026,7 @@ fn use_builtin_function<'a, 'gcc, 'tcx>(
builder.context.new_cast(builder.location, res, builder.u32_type)
}
// TODO(antoyo): use width?
// FIXME(antoyo): use width?
let result_type = self.u32_type;
let mut arg_type = arg.get_type();
let arg = if arg_type.is_signed(self.cx) {
@@ -1035,7 +1035,7 @@ fn use_builtin_function<'a, 'gcc, 'tcx>(
} else {
arg
};
// TODO(antoyo): write a new function Type::is_compatible_with(&Type) and use it here
// FIXME(antoyo): write a new function Type::is_compatible_with(&Type) and use it here
// instead of using is_uint().
if arg_type.is_uchar(self.cx) || arg_type.is_ushort(self.cx) || arg_type.is_uint(self.cx) {
let builtin = if count_leading { "__builtin_clz" } else { "__builtin_ctz" };
@@ -1139,7 +1139,7 @@ fn count_trailing_zeroes_nonzero(&mut self, width: u64, arg: RValue<'gcc>) -> RV
}
fn pop_count(&mut self, value: RValue<'gcc>) -> RValue<'gcc> {
// TODO(antoyo): use the optimized version with fewer operations.
// FIXME(antoyo): use the optimized version with fewer operations.
let result_type = self.u32_type;
let arg_type = value.get_type();
let value_type = arg_type.to_unsigned(self.cx);
@@ -1148,7 +1148,7 @@ fn pop_count(&mut self, value: RValue<'gcc>) -> RValue<'gcc> {
if arg_type.is_signed(self.cx) { self.gcc_int_cast(value, value_type) } else { value };
// only break apart 128-bit ints if they're not natively supported
// TODO(antoyo): remove this if/when native 128-bit integers land in libgccjit
// FIXME(antoyo): remove this if/when native 128-bit integers land in libgccjit
if value_type.is_u128(self.cx) && !self.cx.supports_128bit_integers {
let sixty_four = self.gcc_int(value_type, 64);
let right_shift = self.gcc_lshr(value, sixty_four);
@@ -53,7 +53,7 @@ macro_rules! require_simd {
};
}
// TODO(antoyo): refactor with the above require_simd macro that was changed in cg_llvm.
// FIXME(antoyo): refactor with the above require_simd macro that was changed in cg_llvm.
#[cfg(feature = "master")]
macro_rules! require_simd2 {
($ty: expr, $variant:ident) => {{
@@ -473,14 +473,14 @@ macro_rules! require_simd2 {
}
);
// TODO(antoyo): For simd_insert, check if the index is a constant of the correct size.
// FIXME(antoyo): For simd_insert, check if the index is a constant of the correct size.
let vector = args[0].immediate();
let index = args[1].immediate();
let value = args[2].immediate();
let variable = bx.current_func().new_local(None, vector.get_type(), "new_vector");
bx.llbb().add_assignment(None, variable, vector);
let lvalue = bx.context.new_vector_access(None, variable.to_rvalue(), index);
// TODO(antoyo): if simd_insert is constant, use BIT_REF.
// FIXME(antoyo): if simd_insert is constant, use BIT_REF.
bx.llbb().add_assignment(None, lvalue, value);
return Ok(variable.to_rvalue());
}
@@ -491,7 +491,7 @@ macro_rules! require_simd2 {
ret_ty == in_elem,
InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
);
// TODO(antoyo): For simd_extract, check if the index is a constant of the correct size.
// FIXME(antoyo): For simd_extract, check if the index is a constant of the correct size.
let vector = args[0].immediate();
let index = args[1].immediate();
return Ok(bx.context.new_vector_access(None, vector, index).to_rvalue());
@@ -737,7 +737,7 @@ macro_rules! arith_binary {
// endian and MSB-first for big endian.
let vector = args[0].immediate();
// TODO(antoyo): dyncast_vector should not require a call to unqualified.
// FIXME(antoyo): dyncast_vector should not require a call to unqualified.
let vector_type = vector.get_type().unqualified().dyncast_vector().expect("vector type");
let elem_type = vector_type.get_element_type();
@@ -832,7 +832,7 @@ macro_rules! return_error {
let intr_name = match name {
sym::simd_ceil => "ceil",
sym::simd_fabs => "fabs", // TODO(antoyo): pand with 170141183420855150465331762880109871103
sym::simd_fabs => "fabs", // FIXME(antoyo): pand with 170141183420855150465331762880109871103
sym::simd_fcos => "cos",
sym::simd_fexp2 => "exp2",
sym::simd_fexp => "exp",
@@ -852,7 +852,7 @@ macro_rules! return_error {
let builtin_name = format!("{}{}", intr_name, elem_ty_str);
let function = bx.context.get_builtin_function(builtin_name);
// TODO(antoyo): add platform-specific behavior here for architectures that have these
// FIXME(antoyo): add platform-specific behavior here for architectures that have these
// intrinsics as instructions (for instance, gpus)
let mut vector_elements = vec![];
for i in 0..in_len {
@@ -1060,7 +1060,7 @@ fn non_ptr(t: Ty<'_>) -> Ty<'_> {
assert_eq!(underlying_ty, non_ptr(element_ty0));
// The element type of the third argument must be an integer type of any width:
// TODO: also support unsigned integers.
// FIXME: also support unsigned integers.
let (_, element_ty2) = args[2].layout.ty.simd_size_and_type(bx.tcx());
match *element_ty2.kind() {
ty::Int(_) => (),
@@ -1175,7 +1175,7 @@ fn non_ptr(t: Ty<'_>) -> Ty<'_> {
assert_eq!(underlying_ty, non_ptr(element_ty0));
// The element type of the third argument must be a signed integer type of any width:
// TODO: also support unsigned integers.
// FIXME: also support unsigned integers.
match *element_ty2.kind() {
ty::Int(_) => (),
_ => {
@@ -1273,10 +1273,10 @@ macro_rules! arith_unary {
}
(true, true) => {
// Algorithm from: https://codereview.stackexchange.com/questions/115869/saturated-signed-addition
// TODO(antoyo): improve using conditional operators if possible.
// TODO(antoyo): dyncast_vector should not require a call to unqualified.
// FIXME(antoyo): improve using conditional operators if possible.
// FIXME(antoyo): dyncast_vector should not require a call to unqualified.
let arg_type = lhs.get_type().unqualified();
// TODO(antoyo): convert lhs and rhs to unsigned.
// FIXME(antoyo): convert lhs and rhs to unsigned.
let sum = lhs + rhs;
let vector_type = arg_type.dyncast_vector().expect("vector type");
let unit = vector_type.get_num_units();
@@ -1308,13 +1308,13 @@ macro_rules! arith_unary {
res & cmp
}
(true, false) => {
// TODO(antoyo): dyncast_vector should not require a call to unqualified.
// FIXME(antoyo): dyncast_vector should not require a call to unqualified.
let arg_type = lhs.get_type().unqualified();
// TODO(antoyo): this uses the same algorithm from saturating add, but add the
// FIXME(antoyo): this uses the same algorithm from saturating add, but add the
// negative of the right operand. Find a proper subtraction algorithm.
let rhs = bx.context.new_unary_op(None, UnaryOp::Minus, arg_type, rhs);
// TODO(antoyo): convert lhs and rhs to unsigned.
// FIXME(antoyo): convert lhs and rhs to unsigned.
let sum = lhs + rhs;
let vector_type = arg_type.dyncast_vector().expect("vector type");
let unit = vector_type.get_num_units();
@@ -1391,7 +1391,7 @@ macro_rules! arith_red {
vector_reduce_fadd_reassoc,
false,
add,
0.0 // TODO: Use this argument.
0.0 // FIXME: Use this argument.
);
arith_red!(
simd_reduce_mul_unordered: BinaryOp::Mult,
@@ -1507,7 +1507,7 @@ macro_rules! bitwise_red {
// those lanes whose `mask` bit is enabled.
// The memory addresses corresponding to the “off” lanes are not accessed.
// TODO: handle the alignment.
// FIXME: handle the alignment.
// The element type of the "mask" argument must be a signed integer type of any width
let mask_ty = in_ty;
@@ -1595,7 +1595,7 @@ macro_rules! bitwise_red {
// those lanes whose `mask` bit is enabled.
// The memory addresses corresponding to the “off” lanes are not accessed.
// TODO: handle the alignment.
// FIXME: handle the alignment.
// The element type of the "mask" argument must be a signed integer type of any width
let mask_ty = in_ty;
+6 -6
View File
@@ -1,5 +1,5 @@
/*
* TODO(antoyo): implement equality in libgccjit based on https://zpz.github.io/blog/overloading-equality-operator-in-cpp-class-hierarchy/ (for type equality?)
* FIXME(antoyo): implement equality in libgccjit based on https://zpz.github.io/blog/overloading-equality-operator-in-cpp-class-hierarchy/ (for type equality?)
* For Thin LTO, this might be helpful:
// cspell:disable-next-line
* In gcc 4.6 -fwhopr was removed and became default with -flto. The non-whopr path can still be executed via -flto-partition=none.
@@ -8,9 +8,9 @@
* Maybe some missing optimizations enabled by rustc's LTO is in there: https://gcc.gnu.org/onlinedocs/gcc/Optimize-Options.html
// cspell:disable-next-line
* Like -fipa-icf (should be already enabled) and maybe -fdevirtualize-at-ltrans.
* TODO: disable debug info always being emitted. Perhaps this slows down things?
* FIXME: disable debug info always being emitted. Perhaps this slows down things?
*
* TODO(antoyo): remove the patches.
* FIXME(antoyo): remove the patches.
*/
#![feature(rustc_private)]
@@ -328,7 +328,7 @@ fn new_context<'gcc, 'tcx>(tcx: TyCtxt<'tcx>) -> Context<'gcc> {
version,
));
}
// TODO(antoyo): check if this should only be added when using -Cforce-unwind-tables=n.
// FIXME(antoyo): check if this should only be added when using -Cforce-unwind-tables=n.
context.add_command_line_option("-fno-asynchronous-unwind-tables");
context
}
@@ -425,7 +425,7 @@ fn target_machine_factory(
_opt_level: OptLevel,
_features: &[String],
) -> TargetMachineFactoryFn<Self> {
// TODO(antoyo): set opt level.
// FIXME(antoyo): set opt level.
Arc::new(|_, _| ())
}
@@ -531,7 +531,7 @@ fn target_config(sess: &Session, target_info: &LockedTargetInfo) -> TargetConfig
sess,
|feature| to_gcc_features(sess, feature),
|feature| {
// TODO: we disable Neon for now since we don't support the LLVM intrinsics for it.
// FIXME: we disable Neon for now since we don't support the LLVM intrinsics for it.
if feature == "neon" {
return false;
}
+4 -4
View File
@@ -37,7 +37,7 @@ fn predefine_static(
#[cfg(feature = "master")]
global.add_attribute(VarAttribute::Visibility(base::visibility_to_gcc(visibility)));
// TODO(antoyo): set linkage.
// FIXME(antoyo): set linkage.
self.instances.borrow_mut().insert(instance, global);
}
@@ -69,9 +69,9 @@ fn predefine_fn(
decl.add_attribute(FnAttribute::Visibility(base::visibility_to_gcc(visibility)));
}
// TODO(antoyo): call set_link_section() to allow initializing argc/argv.
// TODO(antoyo): set unique comdat.
// TODO(antoyo): use inline attribute from there in linkage.set() above.
// FIXME(antoyo): call set_link_section() to allow initializing argc/argv.
// FIXME(antoyo): set unique comdat.
// FIXME(antoyo): use inline attribute from there in linkage.set() above.
self.functions.borrow_mut().insert(symbol_name.to_string(), decl);
self.function_instances.borrow_mut().insert(instance, decl);
+5 -5
View File
@@ -66,7 +66,7 @@ pub fn type_ptr_to(&self, ty: Type<'gcc>) -> Type<'gcc> {
}
pub fn type_ptr_to_ext(&self, ty: Type<'gcc>, _address_space: AddressSpace) -> Type<'gcc> {
// TODO(antoyo): use address_space, perhaps with TYPE_ADDR_SPACE?
// FIXME(antoyo): use address_space, perhaps with TYPE_ADDR_SPACE?
ty.make_pointer()
}
@@ -110,7 +110,7 @@ pub fn type_struct(&self, fields: &[Type<'gcc>], packed: bool) -> Type<'gcc> {
.iter()
.enumerate()
.map(|(index, field)| {
self.context.new_field(None, *field, format!("field{}_TODO", index))
self.context.new_field(None, *field, format!("field{}_FIXME", index))
})
.collect();
let typ = self.context.new_struct_type(None, "struct", &fields).as_type();
@@ -213,7 +213,7 @@ fn type_kind(&self, typ: Type<'gcc>) -> TypeKind {
} else if typ == self.type_void() {
TypeKind::Void
} else {
// TODO(antoyo): support other types.
// FIXME(antoyo): support other types.
unimplemented!();
}
}
@@ -239,7 +239,7 @@ fn type_kind(&self, typ: Type<'gcc>) -> TypeKind {
} else if typ == self.type_void() {
TypeKind::Void
} else {
// TODO(antoyo): support other types.
// FIXME(antoyo): support other types.
unimplemented!();
}
}
@@ -288,7 +288,7 @@ fn float_width(&self, typ: Type<'gcc>) -> usize {
} else {
panic!("Cannot get width of float type {:?}", typ);
}
// TODO(antoyo): support other sizes.
// FIXME(antoyo): support other sizes.
}
fn int_width(&self, typ: Type<'gcc>) -> u64 {
+2 -2
View File
@@ -221,7 +221,7 @@ fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
let ty = match *self.ty.kind() {
// NOTE: we cannot remove this match like in the LLVM codegen because the call
// to fn_ptr_backend_type handle the on-stack attribute.
// TODO(antoyo): find a less hackish way to handle the on-stack attribute.
// FIXME(antoyo): find a less hackish way to handle the on-stack attribute.
ty::FnPtr(sig_tys, hdr) => cx
.fn_ptr_backend_type(cx.fn_abi_of_fn_ptr(sig_tys.with(hdr), ty::List::empty())),
_ => self.scalar_gcc_type_at(cx, scalar, Size::ZERO),
@@ -320,7 +320,7 @@ fn scalar_pair_element_gcc_type<'gcc>(
// immediate, just like `bool` is typically `i8` in memory and only `i1`
// when immediate. We need to load/store `bool` as `i8` to avoid
// crippling LLVM optimizations or triggering other LLVM bugs with `i1`.
// TODO(antoyo): this bugs certainly don't happen in this case since the bool type is used instead of i1.
// FIXME(antoyo): this bugs certainly don't happen in this case since the bool type is used instead of i1.
if scalar.is_bool() {
return cx.type_i1();
}
@@ -73,14 +73,14 @@ fn filter(filename: &Path) -> bool {
path.to_str().expect("to_str"),
]);
// TODO(antoyo): find a way to send this via a cli argument.
// FIXME(antoyo): find a way to send this via a cli argument.
let test_target = std::env::var("CG_GCC_TEST_TARGET");
if let Ok(ref target) = test_target {
compiler.args(["--target", target]);
let linker = format!("{}-gcc", target);
compiler.args(&[format!("-Clinker={}", linker)]);
let mut env_path = std::env::var("PATH").unwrap_or_default();
// TODO(antoyo): find a better way to add the PATH necessary locally.
// FIXME(antoyo): find a better way to add the PATH necessary locally.
env_path = format!("/opt/m68k-unknown-linux-gnu/bin:{}", env_path);
compiler.env("PATH", env_path);
}
@@ -103,9 +103,7 @@ fn emit_deprecated_safe_fn_call(&self, span: Span, kind: &UnsafeOpKind) -> bool
let guarantee = format!("that {}", suggestion);
let suggestion = sm
.indentation_before(span)
.map(|indent| {
format!("{}// TODO: Audit that {}.\n", indent, suggestion) // ignore-tidy-todo
})
.map(|indent| format!("{}// FIXME: Audit that {}.\n", indent, suggestion))
.unwrap_or_default();
self.tcx.emit_node_span_lint(
+1 -1
View File
@@ -79,7 +79,7 @@ pub fn insert(&self, symbol: Symbol, span: Span) {
}
}
// todo: this function now accepts `Session` instead of `ParseSess` and should be relocated
// FIXME: this function now accepts `Session` instead of `ParseSess` and should be relocated
/// Construct a diagnostic for a language feature error due to the given `span`.
/// The `feature`'s `Symbol` is the one you used in `unstable.rs` and `rustc_span::symbol`.
#[track_caller]