From afae271d5d3719eeb92c18bc004bb6d1965a5f3f Mon Sep 17 00:00:00 2001 From: Antoni Boucher Date: Sun, 10 May 2020 10:54:30 -0400 Subject: [PATCH 01/27] Initial commit --- .github/FUNDING.yml | 2 + .github/workflows/main.yml | 93 + .gitignore | 17 + Cargo.lock | 373 ++++ Cargo.toml | 51 + LICENSE-APACHE | 176 ++ LICENSE-MIT | 23 + Readme.md | 118 ++ build.sh | 26 + build_sysroot/Cargo.toml | 19 + build_sysroot/build_sysroot.sh | 30 + build_sysroot/prepare_sysroot_src.sh | 39 + build_sysroot/src/lib.rs | 1 + cargo.sh | 23 + clean_all.sh | 5 + config.sh | 48 + example/alloc_example.rs | 41 + example/alloc_system.rs | 212 ++ ...itrary_self_types_pointers_and_wrappers.rs | 69 + example/dst-field-align.rs | 67 + example/example.rs | 208 ++ example/mini_core.rs | 585 ++++++ example/mini_core_hello_world.rs | 424 ++++ example/mod_bench.rs | 37 + example/std_example.rs | 279 +++ example/subslice-patterns-const-eval.rs | 97 + example/track-caller-attribute.rs | 40 + gcc_path | 0 ...022-core-Disable-not-compiling-tests.patch | 63 + patches/0023-core-Ignore-failing-tests.patch | 49 + prepare.sh | 22 + prepare_build.sh | 5 + rust-toolchain | 1 + rustup.sh | 29 + src/abi.rs | 286 +++ src/allocator.rs | 115 ++ src/archive.rs | 270 +++ src/asm.rs | 632 ++++++ src/back/mod.rs | 1 + src/back/write.rs | 234 +++ src/base.rs | 173 ++ src/builder.rs | 1812 +++++++++++++++++ src/callee.rs | 99 + src/common.rs | 448 ++++ src/consts.rs | 527 +++++ src/context.rs | 491 +++++ src/coverageinfo.rs | 140 ++ src/debuginfo.rs | 407 ++++ src/declare.rs | 220 ++ src/intrinsic/llvm.rs | 26 + src/intrinsic/mod.rs | 1286 ++++++++++++ src/intrinsic/simd.rs | 1001 +++++++++ src/lib.rs | 342 ++++ src/mangled_std_symbols.rs | 4 + src/mono_item.rs | 59 + src/type_.rs | 355 ++++ src/type_of.rs | 366 ++++ src/va_arg.rs | 179 ++ test.sh | 197 ++ tests/lib.rs | 50 + tests/run/abort1.rs | 51 + tests/run/abort2.rs | 53 + tests/run/array.rs | 229 +++ tests/run/asm.rs | 66 + tests/run/assign.rs | 153 ++ tests/run/closure.rs | 230 +++ tests/run/condition.rs | 320 +++ tests/run/empty_main.rs | 39 + tests/run/exit.rs | 49 + tests/run/exit_code.rs | 39 + tests/run/fun_ptr.rs | 223 ++ tests/run/int_overflow.rs | 129 ++ tests/run/mut_ref.rs | 165 ++ tests/run/operations.rs | 221 ++ tests/run/ptr_cast.rs | 222 ++ tests/run/return-tuple.rs | 72 + tests/run/slice.rs | 128 ++ tests/run/static.rs | 106 + tests/run/structs.rs | 70 + tests/run/tuple.rs | 51 + 80 files changed, 15608 insertions(+) create mode 100644 .github/FUNDING.yml create mode 100644 .github/workflows/main.yml create mode 100644 .gitignore create mode 100644 Cargo.lock create mode 100644 Cargo.toml create mode 100644 LICENSE-APACHE create mode 100644 LICENSE-MIT create mode 100644 Readme.md create mode 100755 build.sh create mode 100644 build_sysroot/Cargo.toml create mode 100755 build_sysroot/build_sysroot.sh create mode 100755 build_sysroot/prepare_sysroot_src.sh create mode 100644 build_sysroot/src/lib.rs create mode 100755 cargo.sh create mode 100755 clean_all.sh create mode 100644 config.sh create mode 100644 example/alloc_example.rs create mode 100644 example/alloc_system.rs create mode 100644 example/arbitrary_self_types_pointers_and_wrappers.rs create mode 100644 example/dst-field-align.rs create mode 100644 example/example.rs create mode 100644 example/mini_core.rs create mode 100644 example/mini_core_hello_world.rs create mode 100644 example/mod_bench.rs create mode 100644 example/std_example.rs create mode 100644 example/subslice-patterns-const-eval.rs create mode 100644 example/track-caller-attribute.rs create mode 100644 gcc_path create mode 100644 patches/0022-core-Disable-not-compiling-tests.patch create mode 100644 patches/0023-core-Ignore-failing-tests.patch create mode 100755 prepare.sh create mode 100755 prepare_build.sh create mode 100644 rust-toolchain create mode 100755 rustup.sh create mode 100644 src/abi.rs create mode 100644 src/allocator.rs create mode 100644 src/archive.rs create mode 100644 src/asm.rs create mode 100644 src/back/mod.rs create mode 100644 src/back/write.rs create mode 100644 src/base.rs create mode 100644 src/builder.rs create mode 100644 src/callee.rs create mode 100644 src/common.rs create mode 100644 src/consts.rs create mode 100644 src/context.rs create mode 100644 src/coverageinfo.rs create mode 100644 src/debuginfo.rs create mode 100644 src/declare.rs create mode 100644 src/intrinsic/llvm.rs create mode 100644 src/intrinsic/mod.rs create mode 100644 src/intrinsic/simd.rs create mode 100644 src/lib.rs create mode 100644 src/mangled_std_symbols.rs create mode 100644 src/mono_item.rs create mode 100644 src/type_.rs create mode 100644 src/type_of.rs create mode 100644 src/va_arg.rs create mode 100755 test.sh create mode 100644 tests/lib.rs create mode 100644 tests/run/abort1.rs create mode 100644 tests/run/abort2.rs create mode 100644 tests/run/array.rs create mode 100644 tests/run/asm.rs create mode 100644 tests/run/assign.rs create mode 100644 tests/run/closure.rs create mode 100644 tests/run/condition.rs create mode 100644 tests/run/empty_main.rs create mode 100644 tests/run/exit.rs create mode 100644 tests/run/exit_code.rs create mode 100644 tests/run/fun_ptr.rs create mode 100644 tests/run/int_overflow.rs create mode 100644 tests/run/mut_ref.rs create mode 100644 tests/run/operations.rs create mode 100644 tests/run/ptr_cast.rs create mode 100644 tests/run/return-tuple.rs create mode 100644 tests/run/slice.rs create mode 100644 tests/run/static.rs create mode 100644 tests/run/structs.rs create mode 100644 tests/run/tuple.rs diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml new file mode 100644 index 0000000000000..cf2dcd4d099c2 --- /dev/null +++ b/.github/FUNDING.yml @@ -0,0 +1,2 @@ +github: antoyo +patreon: antoyo diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml new file mode 100644 index 0000000000000..774a68aa5e1a4 --- /dev/null +++ b/.github/workflows/main.yml @@ -0,0 +1,93 @@ +name: CI + +on: + - push + - pull_request + +jobs: + build: + runs-on: ubuntu-latest + + strategy: + fail-fast: false + + steps: + - uses: actions/checkout@v2 + + - name: Install packages + run: sudo apt-get install ninja-build ripgrep + + - name: Download artifact + uses: dawidd6/action-download-artifact@v2 + with: + workflow: main.yml + name: libgccjit.so + path: gcc-build + repo: antoyo/gcc + + - name: Setup path to libgccjit + run: | + echo $(readlink -f gcc-build) > gcc_path + ln gcc-build/libgccjit.so gcc-build/libgccjit.so.0 + + - name: Set LIBRARY_PATH + run: echo "LIBRARY_PATH=$(cat gcc_path)" >> $GITHUB_ENV + + # https://github.com/actions/cache/issues/133 + - name: Fixup owner of ~/.cargo/ + # Don't remove the trailing /. It is necessary to follow the symlink. + run: sudo chown -R $(whoami):$(id -ng) ~/.cargo/ + + - name: Cache cargo installed crates + uses: actions/cache@v1.1.2 + with: + path: ~/.cargo/bin + key: cargo-installed-crates2-ubuntu-latest + + - name: Cache cargo registry + uses: actions/cache@v1 + with: + path: ~/.cargo/registry + key: ${{ runner.os }}-cargo-registry2-${{ hashFiles('**/Cargo.lock') }} + + - name: Cache cargo index + uses: actions/cache@v1 + with: + path: ~/.cargo/git + key: ${{ runner.os }}-cargo-index-${{ hashFiles('**/Cargo.lock') }} + + - name: Cache cargo target dir + uses: actions/cache@v1.1.2 + with: + path: target + key: ${{ runner.os }}-cargo-build-target-${{ hashFiles('rust-toolchain') }} + + - name: Build + run: | + ./prepare_build.sh + ./build.sh + ./clean_all.sh + + - name: Prepare dependencies + run: | + git config --global user.email "user@example.com" + git config --global user.name "User" + ./prepare.sh + + # Compile is a separate step, as the actions-rs/cargo action supports error annotations + - name: Compile + uses: actions-rs/cargo@v1.0.3 + with: + command: build + args: --release + + - name: Test + run: | + # Enable backtraces for easier debugging + export RUST_BACKTRACE=1 + + # Reduce amount of benchmark runs as they are slow + export COMPILE_RUNS=2 + export RUN_RUNS=2 + + ./test.sh --release diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000..d9f0ae05ed40d --- /dev/null +++ b/.gitignore @@ -0,0 +1,17 @@ +target +**/*.rs.bk +*.rlib +*.o +perf.data +perf.data.old +*.events +*.string* +/build_sysroot/sysroot +/build_sysroot/Cargo.lock +/build_sysroot/test_target/Cargo.lock +/rust +/regex +gimple* +*asm +res +test-backend diff --git a/Cargo.lock b/Cargo.lock new file mode 100644 index 0000000000000..952c03b05c7f4 --- /dev/null +++ b/Cargo.lock @@ -0,0 +1,373 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "aho-corasick" +version = "0.7.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f" +dependencies = [ + "memchr", +] + +[[package]] +name = "ar" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "450575f58f7bee32816abbff470cbc47797397c2a81e0eaced4b98436daf52e1" + +[[package]] +name = "autocfg" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" + +[[package]] +name = "bitflags" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "crc32fast" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "fm" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68fda3cff2cce84c19e5dfa5179a4b35d2c0f18b893f108002b8a6a54984acca" +dependencies = [ + "regex", +] + +[[package]] +name = "gccjit" +version = "1.0.0" +source = "git+https://github.com/antoyo/gccjit.rs#0572117c7ffdfcb0e6c6526d45266c3f34796bea" +dependencies = [ + "gccjit_sys", +] + +[[package]] +name = "gccjit_sys" +version = "0.0.1" +source = "git+https://github.com/antoyo/gccjit.rs#0572117c7ffdfcb0e6c6526d45266c3f34796bea" +dependencies = [ + "libc 0.1.12", +] + +[[package]] +name = "getopts" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14dbbfd5c71d70241ecf9e6f13737f7b5ce823821063188d7e46c41d371eebd5" +dependencies = [ + "unicode-width", +] + +[[package]] +name = "getrandom" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" +dependencies = [ + "cfg-if", + "libc 0.2.98", + "wasi", +] + +[[package]] +name = "hashbrown" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" + +[[package]] +name = "hermit-abi" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" +dependencies = [ + "libc 0.2.98", +] + +[[package]] +name = "indexmap" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc633605454125dec4b66843673f01c7df2b89479b32e0ed634e43a91cff62a5" +dependencies = [ + "autocfg", + "hashbrown", +] + +[[package]] +name = "lang_tester" +version = "0.3.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96bd995a092cac79868250589869b5a5d656b02a02bd74c8ebdc566dc7203090" +dependencies = [ + "fm", + "getopts", + "libc 0.2.98", + "num_cpus", + "termcolor", + "threadpool", + "wait-timeout", + "walkdir", +] + +[[package]] +name = "libc" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e32a70cf75e5846d53a673923498228bbec6a8624708a9ea5645f075d6276122" + +[[package]] +name = "libc" +version = "0.2.98" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "320cfe77175da3a483efed4bc0adc1968ca050b098ce4f2f1c13a56626128790" + +[[package]] +name = "memchr" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b16bd47d9e329435e309c58469fe0791c2d0d1ba96ec0954152a5ae2b04387dc" + +[[package]] +name = "num_cpus" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3" +dependencies = [ + "hermit-abi", + "libc 0.2.98", +] + +[[package]] +name = "object" +version = "0.25.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a38f2be3697a57b4060074ff41b44c16870d916ad7877c17696e063257482bc7" +dependencies = [ + "crc32fast", + "indexmap", + "memchr", +] + +[[package]] +name = "ppv-lite86" +version = "0.2.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" + +[[package]] +name = "rand" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8" +dependencies = [ + "libc 0.2.98", + "rand_chacha", + "rand_core", + "rand_hc", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" +dependencies = [ + "getrandom", +] + +[[package]] +name = "rand_hc" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7" +dependencies = [ + "rand_core", +] + +[[package]] +name = "redox_syscall" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ab49abadf3f9e1c4bc499e8845e152ad87d2ad2d30371841171169e9d75feee" +dependencies = [ + "bitflags", +] + +[[package]] +name = "regex" +version = "1.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d07a8629359eb56f1e2fb1652bb04212c072a87ba68546a04065d525673ac461" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.6.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b" + +[[package]] +name = "remove_dir_all" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" +dependencies = [ + "winapi", +] + +[[package]] +name = "rustc_codegen_gcc" +version = "0.1.0" +dependencies = [ + "ar", + "gccjit", + "lang_tester", + "object", + "target-lexicon", + "tempfile", +] + +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "target-lexicon" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab0e7238dcc7b40a7be719a25365910f6807bd864f4cce6b2e6b873658e2b19d" + +[[package]] +name = "tempfile" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" +dependencies = [ + "cfg-if", + "libc 0.2.98", + "rand", + "redox_syscall", + "remove_dir_all", + "winapi", +] + +[[package]] +name = "termcolor" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dfed899f0eb03f32ee8c6a0aabdb8a7949659e3466561fc0adf54e26d88c5f4" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "threadpool" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa" +dependencies = [ + "num_cpus", +] + +[[package]] +name = "unicode-width" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3" + +[[package]] +name = "wait-timeout" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f200f5b12eb75f8c1ed65abd4b2db8a6e1b138a20de009dacee265a2498f3f6" +dependencies = [ + "libc 0.2.98", +] + +[[package]] +name = "walkdir" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56" +dependencies = [ + "same-file", + "winapi", + "winapi-util", +] + +[[package]] +name = "wasi" +version = "0.10.2+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" +dependencies = [ + "winapi", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 0000000000000..9e8c195c15f60 --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,51 @@ +[package] +name = "rustc_codegen_gcc" +version = "0.1.0" +authors = ["Antoni Boucher "] +edition = "2018" +license = "MIT OR Apache-2.0" + +[lib] +crate-type = ["dylib"] + +[[test]] +name = "lang_tests" +path = "tests/lib.rs" +harness = false + +[dependencies] +gccjit = { git = "https://github.com/antoyo/gccjit.rs" } + +# Local copy. +#gccjit = { path = "../gccjit.rs" } + +target-lexicon = "0.10.0" + +ar = "0.8.0" + +[dependencies.object] +version = "0.25.0" +default-features = false +features = ["read", "std", "write"] # We don't need WASM support. + +[dev-dependencies] +lang_tester = "0.3.9" +tempfile = "3.1.0" + +[profile.dev] +# By compiling dependencies with optimizations, performing tests gets much faster. +opt-level = 3 + +[profile.dev.package.rustc_codegen_gcc] +# Disabling optimizations for cg_gccjit itself makes compilation after a change faster. +opt-level = 0 + +# Disable optimizations and debuginfo of build scripts and some of the heavy build deps, as the +# execution time of build scripts is so fast that optimizing them slows down the total build time. +[profile.dev.build-override] +opt-level = 0 +debug = false + +[profile.release.build-override] +opt-level = 0 +debug = false diff --git a/LICENSE-APACHE b/LICENSE-APACHE new file mode 100644 index 0000000000000..1b5ec8b78e237 --- /dev/null +++ b/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS diff --git a/LICENSE-MIT b/LICENSE-MIT new file mode 100644 index 0000000000000..31aa79387f27e --- /dev/null +++ b/LICENSE-MIT @@ -0,0 +1,23 @@ +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/Readme.md b/Readme.md new file mode 100644 index 0000000000000..44cb16334742f --- /dev/null +++ b/Readme.md @@ -0,0 +1,118 @@ +# WIP libgccjit codegen backend for rust + +This is a GCC codegen for rustc, which means it can be loaded by the existing rustc frontend, but benefits from GCC: more architectures are supported and GCC's optimizations are used. + +**Despite its name, libgccjit can be used for ahead-of-time compilation, as is used here.** + +## Motivation + +The primary goal of this project is to be able to compile Rust code on platforms unsupported by LLVM. +A secondary goal is to check if using the gcc backend will provide any run-time speed improvement for the programs compiled using rustc. + +## Building + +**This requires a patched libgccjit in order to work. +The patches in [this repostory](https://github.com/antoyo/libgccjit-patches) need to be applied. +(Those patches should work when applied on master, but in case it doesn't work, they are known to work when applied on 079c23cfe079f203d5df83fea8e92a60c7d7e878.) +You can also use my [fork of gcc](https://github.com/antoyo/gcc) which already includes these patches.** + +**Put the path to your custom build of libgccjit in the file `gcc_path`.** + +```bash +$ git clone https://github.com/antoyo/rustc_codegen_gcc.git +$ cd rustc_codegen_gcc +$ ./prepare_build.sh # download and patch sysroot src +$ ./build.sh --release +``` + +To run the tests: + +```bash +$ ./prepare.sh # download and patch sysroot src and install hyperfine for benchmarking +$ ./test.sh --release +``` + +## Usage + +`$cg_gccjit_dir` is the directory you cloned this repo into in the following instructions. + +### Cargo + +```bash +$ CHANNEL="release" $cg_gccjit_dir/cargo.sh run +``` + +If you compiled cg_gccjit in debug mode (aka you didn't pass `--release` to `./test.sh`) you should use `CHANNEL="debug"` instead or omit `CHANNEL="release"` completely. + +### Rustc + +> You should prefer using the Cargo method. + +```bash +$ rustc +$(cat $cg_gccjit_dir/rust-toolchain) -Cpanic=abort -Zcodegen-backend=$cg_gccjit_dir/target/release/librustc_codegen_gcc.so --sysroot $cg_gccjit_dir/build_sysroot/sysroot my_crate.rs +``` + +## Env vars + +
+
CG_GCCJIT_INCR_CACHE_DISABLED
+
Don't cache object files in the incremental cache. Useful during development of cg_gccjit + to make it possible to use incremental mode for all analyses performed by rustc without caching + object files when their content should have been changed by a change to cg_gccjit.
+
CG_GCCJIT_DISPLAY_CG_TIME
+
Display the time it took to perform codegen for a crate
+
+ +## Debugging + +Sometimes, libgccjit will crash and output an error like this: + +``` +during RTL pass: expand +libgccjit.so: error: in expmed_mode_index, at expmed.h:249 +0x7f0da2e61a35 expmed_mode_index + ../../../gcc/gcc/expmed.h:249 +0x7f0da2e61aa4 expmed_op_cost_ptr + ../../../gcc/gcc/expmed.h:271 +0x7f0da2e620dc sdiv_cost_ptr + ../../../gcc/gcc/expmed.h:540 +0x7f0da2e62129 sdiv_cost + ../../../gcc/gcc/expmed.h:558 +0x7f0da2e73c12 expand_divmod(int, tree_code, machine_mode, rtx_def*, rtx_def*, rtx_def*, int) + ../../../gcc/gcc/expmed.c:4335 +0x7f0da2ea1423 expand_expr_real_2(separate_ops*, rtx_def*, machine_mode, expand_modifier) + ../../../gcc/gcc/expr.c:9240 +0x7f0da2cd1a1e expand_gimple_stmt_1 + ../../../gcc/gcc/cfgexpand.c:3796 +0x7f0da2cd1c30 expand_gimple_stmt + ../../../gcc/gcc/cfgexpand.c:3857 +0x7f0da2cd90a9 expand_gimple_basic_block + ../../../gcc/gcc/cfgexpand.c:5898 +0x7f0da2cdade8 execute + ../../../gcc/gcc/cfgexpand.c:6582 +``` + +To see the code which causes this error, call the following function: + +```c +gcc_jit_context_dump_to_file(ctxt, "/tmp/output.c", 1 /* update_locations */) +``` + +This will create a C-like file and add the locations into the IR pointing to this C file. +Then, rerun the program and it will output the location in the second line: + +``` +libgccjit.so: /tmp/something.c:61322:0: error: in expmed_mode_index, at expmed.h:249 +``` + +Or add a breakpoint to `add_error` in gdb and print the line number using: + +``` +p loc->m_line +``` + +### How to use a custom-build rustc + + * Build the stage1 compiler (`rustup toolchain link debug-current stage2 build/x86_64-unknown-linux-gnu/stage1`). + * Clean and rebuild the codegen with `debug-current` in the file `rust-toolchain`. + * Add `~/.rustup/toolchains/debug-current/lib/rustlib/x86_64-unknown-linux-gnu/lib` to `LD_LIBRARY_PATH`. diff --git a/build.sh b/build.sh new file mode 100755 index 0000000000000..9f1228687e27d --- /dev/null +++ b/build.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +#set -x +set -e + +export GCC_PATH=$(cat gcc_path) + +export LD_LIBRARY_PATH="$GCC_PATH" +export LIBRARY_PATH="$GCC_PATH" + +if [[ "$1" == "--release" ]]; then + export CHANNEL='release' + CARGO_INCREMENTAL=1 cargo rustc --release +else + echo $LD_LIBRARY_PATH + export CHANNEL='debug' + cargo rustc +fi + +source config.sh + +rm -r target/out || true +mkdir -p target/out/gccjit + +echo "[BUILD] sysroot" +time ./build_sysroot/build_sysroot.sh $CHANNEL diff --git a/build_sysroot/Cargo.toml b/build_sysroot/Cargo.toml new file mode 100644 index 0000000000000..cfadf47cc3f86 --- /dev/null +++ b/build_sysroot/Cargo.toml @@ -0,0 +1,19 @@ +[package] +authors = ["bjorn3 "] +name = "sysroot" +version = "0.0.0" + +[dependencies] +core = { path = "./sysroot_src/library/core" } +compiler_builtins = "0.1" +alloc = { path = "./sysroot_src/library/alloc" } +std = { path = "./sysroot_src/library/std", features = ["panic_unwind", "backtrace"] } +test = { path = "./sysroot_src/library/test" } + +[patch.crates-io] +rustc-std-workspace-core = { path = "./sysroot_src/library/rustc-std-workspace-core" } +rustc-std-workspace-alloc = { path = "./sysroot_src/library/rustc-std-workspace-alloc" } +rustc-std-workspace-std = { path = "./sysroot_src/library/rustc-std-workspace-std" } + +[profile.release] +debug = true diff --git a/build_sysroot/build_sysroot.sh b/build_sysroot/build_sysroot.sh new file mode 100755 index 0000000000000..d1dcf495db8a3 --- /dev/null +++ b/build_sysroot/build_sysroot.sh @@ -0,0 +1,30 @@ +#!/bin/bash + +# Requires the CHANNEL env var to be set to `debug` or `release.` + +set -e +cd $(dirname "$0") + +pushd ../ >/dev/null +source ./config.sh +popd >/dev/null + +# Cleanup for previous run +# v Clean target dir except for build scripts and incremental cache +rm -r target/*/{debug,release}/{build,deps,examples,libsysroot*,native} 2>/dev/null || true +rm Cargo.lock test_target/Cargo.lock 2>/dev/null || true +rm -r sysroot/ 2>/dev/null || true + +# Build libs +export RUSTFLAGS="$RUSTFLAGS -Z force-unstable-if-unmarked -Cpanic=abort" +if [[ "$1" == "--release" ]]; then + sysroot_channel='release' + RUSTFLAGS="$RUSTFLAGS -Zmir-opt-level=3" cargo build --target $TARGET_TRIPLE --release +else + sysroot_channel='debug' + cargo build --target $TARGET_TRIPLE +fi + +# Copy files to sysroot +mkdir -p sysroot/lib/rustlib/$TARGET_TRIPLE/lib/ +cp -r target/$TARGET_TRIPLE/$sysroot_channel/deps/* sysroot/lib/rustlib/$TARGET_TRIPLE/lib/ diff --git a/build_sysroot/prepare_sysroot_src.sh b/build_sysroot/prepare_sysroot_src.sh new file mode 100755 index 0000000000000..4d18092419a4b --- /dev/null +++ b/build_sysroot/prepare_sysroot_src.sh @@ -0,0 +1,39 @@ +#!/bin/bash +set -e +cd $(dirname "$0") + +SRC_DIR=$(dirname $(rustup which rustc))"/../lib/rustlib/src/rust/" +DST_DIR="sysroot_src" + +if [ ! -e $SRC_DIR ]; then + echo "Please install rust-src component" + exit 1 +fi + +rm -rf $DST_DIR +mkdir -p $DST_DIR/library +cp -r $SRC_DIR/library $DST_DIR/ + +pushd $DST_DIR +echo "[GIT] init" +git init +echo "[GIT] add" +git add . +echo "[GIT] commit" + +# This is needed on virgin system where nothing is configured. +# git really needs something here, or it will fail. +# Even using --author is not enough. +git config user.email || git config user.email "none@example.com" +git config user.name || git config user.name "None" + +git commit -m "Initial commit" -q +for file in $(ls ../../patches/ | grep -v patcha); do +echo "[GIT] apply" $file +git apply ../../patches/$file +git add -A +git commit --no-gpg-sign -m "Patch $file" +done +popd + +echo "Successfully prepared libcore for building" diff --git a/build_sysroot/src/lib.rs b/build_sysroot/src/lib.rs new file mode 100644 index 0000000000000..0c9ac1ac8e4bd --- /dev/null +++ b/build_sysroot/src/lib.rs @@ -0,0 +1 @@ +#![no_std] diff --git a/cargo.sh b/cargo.sh new file mode 100755 index 0000000000000..a85865019c135 --- /dev/null +++ b/cargo.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +if [ -z $CHANNEL ]; then +export CHANNEL='debug' +fi + +pushd $(dirname "$0") >/dev/null +source config.sh + +# read nightly compiler from rust-toolchain file +TOOLCHAIN=$(cat rust-toolchain) + +popd >/dev/null + +if [[ $(rustc -V) != $(rustc +${TOOLCHAIN} -V) ]]; then + echo "rustc_codegen_gcc is build for $(rustc +${TOOLCHAIN} -V) but the default rustc version is $(rustc -V)." + echo "Using $(rustc +${TOOLCHAIN} -V)." +fi + +cmd=$1 +shift + +RUSTDOCFLAGS=$RUSTFLAGS cargo +${TOOLCHAIN} $cmd --target $TARGET_TRIPLE $@ diff --git a/clean_all.sh b/clean_all.sh new file mode 100755 index 0000000000000..a77d1486fe283 --- /dev/null +++ b/clean_all.sh @@ -0,0 +1,5 @@ +#!/bin/bash --verbose +set -e + +rm -rf target/ build_sysroot/{sysroot/,sysroot_src/,target/,Cargo.lock} perf.data{,.old} +rm -rf regex/ simple-raytracer/ diff --git a/config.sh b/config.sh new file mode 100644 index 0000000000000..2311600d1cf57 --- /dev/null +++ b/config.sh @@ -0,0 +1,48 @@ +set -e + +export CARGO_INCREMENTAL=0 + +export GCC_PATH=$(cat gcc_path) + +unamestr=`uname` +if [[ "$unamestr" == 'Linux' ]]; then + dylib_ext='so' +elif [[ "$unamestr" == 'Darwin' ]]; then + dylib_ext='dylib' +else + echo "Unsupported os" + exit 1 +fi + +HOST_TRIPLE=$(rustc -vV | grep host | cut -d: -f2 | tr -d " ") +TARGET_TRIPLE=$HOST_TRIPLE +#TARGET_TRIPLE="aarch64-unknown-linux-gnu" + +linker='' +RUN_WRAPPER='' +if [[ "$HOST_TRIPLE" != "$TARGET_TRIPLE" ]]; then + if [[ "$TARGET_TRIPLE" == "aarch64-unknown-linux-gnu" ]]; then + # We are cross-compiling for aarch64. Use the correct linker and run tests in qemu. + linker='-Clinker=aarch64-linux-gnu-gcc' + RUN_WRAPPER='qemu-aarch64 -L /usr/aarch64-linux-gnu' + else + echo "Unknown non-native platform" + fi +fi + +export RUSTFLAGS=$linker' -Cpanic=abort -Cdebuginfo=2 -Zpanic-abort-tests -Zcodegen-backend='$(pwd)'/target/'$CHANNEL'/librustc_codegen_gcc.'$dylib_ext' --sysroot '$(pwd)'/build_sysroot/sysroot' +#export RUSTFLAGS=$linker' -Cpanic=abort -Cdebuginfo=2 -Zpanic-abort-tests -Zcodegen-backend='$(pwd)'/target/'$CHANNEL'/librustc_codegen_gcc.'$dylib_ext' --sysroot '$(pwd)'/build_sysroot/sysroot -Clto=fat -Cembed-bitcode=yes' + +# FIXME remove once the atomic shim is gone +if [[ `uname` == 'Darwin' ]]; then + export RUSTFLAGS="$RUSTFLAGS -Clink-arg=-undefined -Clink-arg=dynamic_lookup" +fi + +RUSTC="rustc $RUSTFLAGS -L crate=target/out --out-dir target/out" +export RUSTC_LOG=warn # display metadata load errors + +export LD_LIBRARY_PATH="$(pwd)/target/out:$(pwd)/build_sysroot/sysroot/lib/rustlib/$TARGET_TRIPLE/lib:$GCC_PATH" +export DYLD_LIBRARY_PATH=$LD_LIBRARY_PATH + +export CG_CLIF_DISPLAY_CG_TIME=1 +export CG_CLIF_INCR_CACHE_DISABLED=1 diff --git a/example/alloc_example.rs b/example/alloc_example.rs new file mode 100644 index 0000000000000..bc6dd007ba010 --- /dev/null +++ b/example/alloc_example.rs @@ -0,0 +1,41 @@ +#![feature(start, box_syntax, core_intrinsics, alloc_prelude, alloc_error_handler)] +#![no_std] + +extern crate alloc; +extern crate alloc_system; + +use alloc::prelude::v1::*; + +use alloc_system::System; + +#[global_allocator] +static ALLOC: System = System; + +#[link(name = "c")] +extern "C" { + fn puts(s: *const u8) -> i32; +} + +#[panic_handler] +fn panic_handler(_: &core::panic::PanicInfo) -> ! { + unsafe { + core::intrinsics::abort(); + } +} + +#[alloc_error_handler] +fn alloc_error_handler(_: alloc::alloc::Layout) -> ! { + unsafe { + core::intrinsics::abort(); + } +} + +#[start] +fn main(_argc: isize, _argv: *const *const u8) -> isize { + let world: Box<&str> = box "Hello World!\0"; + unsafe { + puts(*world as *const str as *const u8); + } + + 0 +} diff --git a/example/alloc_system.rs b/example/alloc_system.rs new file mode 100644 index 0000000000000..5f66ca67f2d40 --- /dev/null +++ b/example/alloc_system.rs @@ -0,0 +1,212 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. +#![no_std] +#![feature(allocator_api, rustc_private)] +#![cfg_attr(any(unix, target_os = "redox"), feature(libc))] + +// The minimum alignment guaranteed by the architecture. This value is used to +// add fast paths for low alignment values. +#[cfg(all(any(target_arch = "x86", + target_arch = "arm", + target_arch = "mips", + target_arch = "powerpc", + target_arch = "powerpc64")))] +const MIN_ALIGN: usize = 8; +#[cfg(all(any(target_arch = "x86_64", + target_arch = "aarch64", + target_arch = "mips64", + target_arch = "s390x", + target_arch = "sparc64")))] +const MIN_ALIGN: usize = 16; + +pub struct System; +#[cfg(any(windows, unix, target_os = "redox"))] +mod realloc_fallback { + use core::alloc::{GlobalAlloc, Layout}; + use core::cmp; + use core::ptr; + impl super::System { + pub(crate) unsafe fn realloc_fallback(&self, ptr: *mut u8, old_layout: Layout, + new_size: usize) -> *mut u8 { + // Docs for GlobalAlloc::realloc require this to be valid: + let new_layout = Layout::from_size_align_unchecked(new_size, old_layout.align()); + let new_ptr = GlobalAlloc::alloc(self, new_layout); + if !new_ptr.is_null() { + let size = cmp::min(old_layout.size(), new_size); + ptr::copy_nonoverlapping(ptr, new_ptr, size); + GlobalAlloc::dealloc(self, ptr, old_layout); + } + new_ptr + } + } +} +#[cfg(any(unix, target_os = "redox"))] +mod platform { + extern crate libc; + use core::ptr; + use MIN_ALIGN; + use System; + use core::alloc::{GlobalAlloc, Layout}; + unsafe impl GlobalAlloc for System { + #[inline] + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() { + libc::malloc(layout.size()) as *mut u8 + } else { + #[cfg(target_os = "macos")] + { + if layout.align() > (1 << 31) { + return ptr::null_mut() + } + } + aligned_malloc(&layout) + } + } + #[inline] + unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 { + if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() { + libc::calloc(layout.size(), 1) as *mut u8 + } else { + let ptr = self.alloc(layout.clone()); + if !ptr.is_null() { + ptr::write_bytes(ptr, 0, layout.size()); + } + ptr + } + } + #[inline] + unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) { + libc::free(ptr as *mut libc::c_void) + } + #[inline] + unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { + if layout.align() <= MIN_ALIGN && layout.align() <= new_size { + libc::realloc(ptr as *mut libc::c_void, new_size) as *mut u8 + } else { + self.realloc_fallback(ptr, layout, new_size) + } + } + } + #[cfg(any(target_os = "android", + target_os = "hermit", + target_os = "redox", + target_os = "solaris"))] + #[inline] + unsafe fn aligned_malloc(layout: &Layout) -> *mut u8 { + // On android we currently target API level 9 which unfortunately + // doesn't have the `posix_memalign` API used below. Instead we use + // `memalign`, but this unfortunately has the property on some systems + // where the memory returned cannot be deallocated by `free`! + // + // Upon closer inspection, however, this appears to work just fine with + // Android, so for this platform we should be fine to call `memalign` + // (which is present in API level 9). Some helpful references could + // possibly be chromium using memalign [1], attempts at documenting that + // memalign + free is ok [2] [3], or the current source of chromium + // which still uses memalign on android [4]. + // + // [1]: https://codereview.chromium.org/10796020/ + // [2]: https://code.google.com/p/android/issues/detail?id=35391 + // [3]: https://bugs.chromium.org/p/chromium/issues/detail?id=138579 + // [4]: https://chromium.googlesource.com/chromium/src/base/+/master/ + // /memory/aligned_memory.cc + libc::memalign(layout.align(), layout.size()) as *mut u8 + } + #[cfg(not(any(target_os = "android", + target_os = "hermit", + target_os = "redox", + target_os = "solaris")))] + #[inline] + unsafe fn aligned_malloc(layout: &Layout) -> *mut u8 { + let mut out = ptr::null_mut(); + let ret = libc::posix_memalign(&mut out, layout.align(), layout.size()); + if ret != 0 { + ptr::null_mut() + } else { + out as *mut u8 + } + } +} +#[cfg(windows)] +#[allow(nonstandard_style)] +mod platform { + use MIN_ALIGN; + use System; + use core::alloc::{GlobalAlloc, Layout}; + type LPVOID = *mut u8; + type HANDLE = LPVOID; + type SIZE_T = usize; + type DWORD = u32; + type BOOL = i32; + extern "system" { + fn GetProcessHeap() -> HANDLE; + fn HeapAlloc(hHeap: HANDLE, dwFlags: DWORD, dwBytes: SIZE_T) -> LPVOID; + fn HeapReAlloc(hHeap: HANDLE, dwFlags: DWORD, lpMem: LPVOID, dwBytes: SIZE_T) -> LPVOID; + fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: LPVOID) -> BOOL; + fn GetLastError() -> DWORD; + } + #[repr(C)] + struct Header(*mut u8); + const HEAP_ZERO_MEMORY: DWORD = 0x00000008; + unsafe fn get_header<'a>(ptr: *mut u8) -> &'a mut Header { + &mut *(ptr as *mut Header).offset(-1) + } + unsafe fn align_ptr(ptr: *mut u8, align: usize) -> *mut u8 { + let aligned = ptr.add(align - (ptr as usize & (align - 1))); + *get_header(aligned) = Header(ptr); + aligned + } + #[inline] + unsafe fn allocate_with_flags(layout: Layout, flags: DWORD) -> *mut u8 { + let ptr = if layout.align() <= MIN_ALIGN { + HeapAlloc(GetProcessHeap(), flags, layout.size()) + } else { + let size = layout.size() + layout.align(); + let ptr = HeapAlloc(GetProcessHeap(), flags, size); + if ptr.is_null() { + ptr + } else { + align_ptr(ptr, layout.align()) + } + }; + ptr as *mut u8 + } + unsafe impl GlobalAlloc for System { + #[inline] + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + allocate_with_flags(layout, 0) + } + #[inline] + unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 { + allocate_with_flags(layout, HEAP_ZERO_MEMORY) + } + #[inline] + unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { + if layout.align() <= MIN_ALIGN { + let err = HeapFree(GetProcessHeap(), 0, ptr as LPVOID); + debug_assert!(err != 0, "Failed to free heap memory: {}", + GetLastError()); + } else { + let header = get_header(ptr); + let err = HeapFree(GetProcessHeap(), 0, header.0 as LPVOID); + debug_assert!(err != 0, "Failed to free heap memory: {}", + GetLastError()); + } + } + #[inline] + unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { + if layout.align() <= MIN_ALIGN { + HeapReAlloc(GetProcessHeap(), 0, ptr as LPVOID, new_size) as *mut u8 + } else { + self.realloc_fallback(ptr, layout, new_size) + } + } + } +} diff --git a/example/arbitrary_self_types_pointers_and_wrappers.rs b/example/arbitrary_self_types_pointers_and_wrappers.rs new file mode 100644 index 0000000000000..ddeb752f93ed7 --- /dev/null +++ b/example/arbitrary_self_types_pointers_and_wrappers.rs @@ -0,0 +1,69 @@ +// Adapted from rustc run-pass test suite + +#![feature(arbitrary_self_types, unsize, coerce_unsized, dispatch_from_dyn)] +#![feature(rustc_attrs)] + +use std::{ + ops::{Deref, CoerceUnsized, DispatchFromDyn}, + marker::Unsize, +}; + +struct Ptr(Box); + +impl Deref for Ptr { + type Target = T; + + fn deref(&self) -> &T { + &*self.0 + } +} + +impl + ?Sized, U: ?Sized> CoerceUnsized> for Ptr {} +impl + ?Sized, U: ?Sized> DispatchFromDyn> for Ptr {} + +struct Wrapper(T); + +impl Deref for Wrapper { + type Target = T; + + fn deref(&self) -> &T { + &self.0 + } +} + +impl, U> CoerceUnsized> for Wrapper {} +impl, U> DispatchFromDyn> for Wrapper {} + + +trait Trait { + // This method isn't object-safe yet. Unsized by-value `self` is object-safe (but not callable + // without unsized_locals), but wrappers arond `Self` currently are not. + // FIXME (mikeyhew) uncomment this when unsized rvalues object-safety is implemented + // fn wrapper(self: Wrapper) -> i32; + fn ptr_wrapper(self: Ptr>) -> i32; + fn wrapper_ptr(self: Wrapper>) -> i32; + fn wrapper_ptr_wrapper(self: Wrapper>>) -> i32; +} + +impl Trait for i32 { + fn ptr_wrapper(self: Ptr>) -> i32 { + **self + } + fn wrapper_ptr(self: Wrapper>) -> i32 { + **self + } + fn wrapper_ptr_wrapper(self: Wrapper>>) -> i32 { + ***self + } +} + +fn main() { + let pw = Ptr(Box::new(Wrapper(5))) as Ptr>; + assert_eq!(pw.ptr_wrapper(), 5); + + let wp = Wrapper(Ptr(Box::new(6))) as Wrapper>; + assert_eq!(wp.wrapper_ptr(), 6); + + let wpw = Wrapper(Ptr(Box::new(Wrapper(7)))) as Wrapper>>; + assert_eq!(wpw.wrapper_ptr_wrapper(), 7); +} diff --git a/example/dst-field-align.rs b/example/dst-field-align.rs new file mode 100644 index 0000000000000..6c338e99912ec --- /dev/null +++ b/example/dst-field-align.rs @@ -0,0 +1,67 @@ +// run-pass +#![allow(dead_code)] +struct Foo { + a: u16, + b: T +} + +trait Bar { + fn get(&self) -> usize; +} + +impl Bar for usize { + fn get(&self) -> usize { *self } +} + +struct Baz { + a: T +} + +struct HasDrop { + ptr: Box, + data: T +} + +fn main() { + // Test that zero-offset works properly + let b : Baz = Baz { a: 7 }; + assert_eq!(b.a.get(), 7); + let b : &Baz = &b; + assert_eq!(b.a.get(), 7); + + // Test that the field is aligned properly + let f : Foo = Foo { a: 0, b: 11 }; + assert_eq!(f.b.get(), 11); + let ptr1 : *const u8 = &f.b as *const _ as *const u8; + + let f : &Foo = &f; + let ptr2 : *const u8 = &f.b as *const _ as *const u8; + assert_eq!(f.b.get(), 11); + + // The pointers should be the same + assert_eq!(ptr1, ptr2); + + // Test that nested DSTs work properly + let f : Foo> = Foo { a: 0, b: Foo { a: 1, b: 17 }}; + assert_eq!(f.b.b.get(), 17); + let f : &Foo> = &f; + assert_eq!(f.b.b.get(), 17); + + // Test that get the pointer via destructuring works + + let f : Foo = Foo { a: 0, b: 11 }; + let f : &Foo = &f; + let &Foo { a: _, b: ref bar } = f; + assert_eq!(bar.get(), 11); + + // Make sure that drop flags don't screw things up + + let d : HasDrop> = HasDrop { + ptr: Box::new(0), + data: Baz { a: [1,2,3,4] } + }; + assert_eq!([1,2,3,4], d.data.a); + + let d : &HasDrop> = &d; + assert_eq!(&[1,2,3,4], &d.data.a); +} diff --git a/example/example.rs b/example/example.rs new file mode 100644 index 0000000000000..5878e8548d926 --- /dev/null +++ b/example/example.rs @@ -0,0 +1,208 @@ +#![feature(no_core, unboxed_closures)] +#![no_core] +#![allow(dead_code)] + +extern crate mini_core; + +use mini_core::*; + +fn abc(a: u8) -> u8 { + a * 2 +} + +fn bcd(b: bool, a: u8) -> u8 { + if b { + a * 2 + } else { + a * 3 + } +} + +fn call() { + abc(42); +} + +fn indirect_call() { + let f: fn() = call; + f(); +} + +enum BoolOption { + Some(bool), + None, +} + +fn option_unwrap_or(o: BoolOption, d: bool) -> bool { + match o { + BoolOption::Some(b) => b, + BoolOption::None => d, + } +} + +fn ret_42() -> u8 { + 42 +} + +fn return_str() -> &'static str { + "hello world" +} + +fn promoted_val() -> &'static u8 { + &(1 * 2) +} + +fn cast_ref_to_raw_ptr(abc: &u8) -> *const u8 { + abc as *const u8 +} + +fn cmp_raw_ptr(a: *const u8, b: *const u8) -> bool { + a == b +} + +fn int_cast(a: u16, b: i16) -> (u8, u16, u32, usize, i8, i16, i32, isize, u8, u32) { + ( + a as u8, a as u16, a as u32, a as usize, a as i8, a as i16, a as i32, a as isize, b as u8, + b as u32, + ) +} + +fn char_cast(c: char) -> u8 { + c as u8 +} + +pub struct DebugTuple(()); + +fn debug_tuple() -> DebugTuple { + DebugTuple(()) +} + +fn size_of() -> usize { + intrinsics::size_of::() +} + +fn use_size_of() -> usize { + size_of::() +} + +unsafe fn use_copy_intrinsic(src: *const u8, dst: *mut u8) { + intrinsics::copy::(src, dst, 1); +} + +unsafe fn use_copy_intrinsic_ref(src: *const u8, dst: *mut u8) { + let copy2 = &intrinsics::copy::; + copy2(src, dst, 1); +} + +const ABC: u8 = 6 * 7; + +fn use_const() -> u8 { + ABC +} + +pub fn call_closure_3arg() { + (|_, _, _| {})(0u8, 42u16, 0u8) +} + +pub fn call_closure_2arg() { + (|_, _| {})(0u8, 42u16) +} + +struct IsNotEmpty; + +impl<'a, 'b> FnOnce<(&'a &'b [u16],)> for IsNotEmpty { + type Output = (u8, u8); + + #[inline] + extern "rust-call" fn call_once(mut self, arg: (&'a &'b [u16],)) -> (u8, u8) { + self.call_mut(arg) + } +} + +impl<'a, 'b> FnMut<(&'a &'b [u16],)> for IsNotEmpty { + #[inline] + extern "rust-call" fn call_mut(&mut self, _arg: (&'a &'b [u16],)) -> (u8, u8) { + (0, 42) + } +} + +pub fn call_is_not_empty() { + IsNotEmpty.call_once((&(&[0u16] as &[_]),)); +} + +fn eq_char(a: char, b: char) -> bool { + a == b +} + +unsafe fn transmute(c: char) -> u32 { + intrinsics::transmute(c) +} + +unsafe fn deref_str_ptr(s: *const str) -> &'static str { + &*s +} + +fn use_array(arr: [u8; 3]) -> u8 { + arr[1] +} + +fn repeat_array() -> [u8; 3] { + [0; 3] +} + +fn array_as_slice(arr: &[u8; 3]) -> &[u8] { + arr +} + +unsafe fn use_ctlz_nonzero(a: u16) -> u16 { + intrinsics::ctlz_nonzero(a) +} + +fn ptr_as_usize(ptr: *const u8) -> usize { + ptr as usize +} + +fn float_cast(a: f32, b: f64) -> (f64, f32) { + (a as f64, b as f32) +} + +fn int_to_float(a: u8, b: i32) -> (f64, f32) { + (a as f64, b as f32) +} + +fn make_array() -> [u8; 3] { + [42, 0, 5] +} + +fn some_promoted_tuple() -> &'static (&'static str, &'static str) { + &("abc", "some") +} + +fn index_slice(s: &[u8]) -> u8 { + s[2] +} + +pub struct StrWrapper { + s: str, +} + +fn str_wrapper_get(w: &StrWrapper) -> &str { + &w.s +} + +fn i16_as_i8(a: i16) -> i8 { + a as i8 +} + +struct Unsized(u8, str); + +fn get_sized_field_ref_from_unsized_type(u: &Unsized) -> &u8 { + &u.0 +} + +fn get_unsized_field_ref_from_unsized_type(u: &Unsized) -> &str { + &u.1 +} + +pub fn reuse_byref_argument_storage(a: (u8, u16, u32)) -> u8 { + a.0 +} diff --git a/example/mini_core.rs b/example/mini_core.rs new file mode 100644 index 0000000000000..1067cee881487 --- /dev/null +++ b/example/mini_core.rs @@ -0,0 +1,585 @@ +#![feature( + no_core, lang_items, intrinsics, unboxed_closures, type_ascription, extern_types, + untagged_unions, decl_macro, rustc_attrs, transparent_unions, auto_traits, + thread_local +)] +#![no_core] +#![allow(dead_code)] + +#[no_mangle] +unsafe extern "C" fn _Unwind_Resume() { + intrinsics::unreachable(); +} + +#[lang = "sized"] +pub trait Sized {} + +#[lang = "unsize"] +pub trait Unsize {} + +#[lang = "coerce_unsized"] +pub trait CoerceUnsized {} + +impl<'a, 'b: 'a, T: ?Sized + Unsize, U: ?Sized> CoerceUnsized<&'a U> for &'b T {} +impl<'a, T: ?Sized + Unsize, U: ?Sized> CoerceUnsized<&'a mut U> for &'a mut T {} +impl, U: ?Sized> CoerceUnsized<*const U> for *const T {} +impl, U: ?Sized> CoerceUnsized<*mut U> for *mut T {} + +#[lang = "dispatch_from_dyn"] +pub trait DispatchFromDyn {} + +// &T -> &U +impl<'a, T: ?Sized+Unsize, U: ?Sized> DispatchFromDyn<&'a U> for &'a T {} +// &mut T -> &mut U +impl<'a, T: ?Sized+Unsize, U: ?Sized> DispatchFromDyn<&'a mut U> for &'a mut T {} +// *const T -> *const U +impl, U: ?Sized> DispatchFromDyn<*const U> for *const T {} +// *mut T -> *mut U +impl, U: ?Sized> DispatchFromDyn<*mut U> for *mut T {} +impl, U: ?Sized> DispatchFromDyn> for Box {} + +#[lang = "receiver"] +pub trait Receiver {} + +impl Receiver for &T {} +impl Receiver for &mut T {} +impl Receiver for Box {} + +#[lang = "copy"] +pub unsafe trait Copy {} + +unsafe impl Copy for bool {} +unsafe impl Copy for u8 {} +unsafe impl Copy for u16 {} +unsafe impl Copy for u32 {} +unsafe impl Copy for u64 {} +unsafe impl Copy for usize {} +unsafe impl Copy for i8 {} +unsafe impl Copy for i16 {} +unsafe impl Copy for i32 {} +unsafe impl Copy for isize {} +unsafe impl Copy for f32 {} +unsafe impl Copy for char {} +unsafe impl<'a, T: ?Sized> Copy for &'a T {} +unsafe impl Copy for *const T {} +unsafe impl Copy for *mut T {} + +#[lang = "sync"] +pub unsafe trait Sync {} + +unsafe impl Sync for bool {} +unsafe impl Sync for u8 {} +unsafe impl Sync for u16 {} +unsafe impl Sync for u32 {} +unsafe impl Sync for u64 {} +unsafe impl Sync for usize {} +unsafe impl Sync for i8 {} +unsafe impl Sync for i16 {} +unsafe impl Sync for i32 {} +unsafe impl Sync for isize {} +unsafe impl Sync for char {} +unsafe impl<'a, T: ?Sized> Sync for &'a T {} +unsafe impl Sync for [u8; 16] {} + +#[lang = "freeze"] +unsafe auto trait Freeze {} + +unsafe impl Freeze for PhantomData {} +unsafe impl Freeze for *const T {} +unsafe impl Freeze for *mut T {} +unsafe impl Freeze for &T {} +unsafe impl Freeze for &mut T {} + +#[lang = "structural_peq"] +pub trait StructuralPartialEq {} + +#[lang = "structural_teq"] +pub trait StructuralEq {} + +#[lang = "not"] +pub trait Not { + type Output; + + fn not(self) -> Self::Output; +} + +impl Not for bool { + type Output = bool; + + fn not(self) -> bool { + !self + } +} + +#[lang = "mul"] +pub trait Mul { + type Output; + + #[must_use] + fn mul(self, rhs: RHS) -> Self::Output; +} + +impl Mul for u8 { + type Output = Self; + + fn mul(self, rhs: Self) -> Self::Output { + self * rhs + } +} + +impl Mul for usize { + type Output = Self; + + fn mul(self, rhs: Self) -> Self::Output { + self * rhs + } +} + +#[lang = "add"] +pub trait Add { + type Output; + + fn add(self, rhs: RHS) -> Self::Output; +} + +impl Add for u8 { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + self + rhs + } +} + +impl Add for i8 { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + self + rhs + } +} + +impl Add for usize { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + self + rhs + } +} + +#[lang = "sub"] +pub trait Sub { + type Output; + + fn sub(self, rhs: RHS) -> Self::Output; +} + +impl Sub for usize { + type Output = Self; + + fn sub(self, rhs: Self) -> Self { + self - rhs + } +} + +impl Sub for u8 { + type Output = Self; + + fn sub(self, rhs: Self) -> Self { + self - rhs + } +} + +impl Sub for i8 { + type Output = Self; + + fn sub(self, rhs: Self) -> Self { + self - rhs + } +} + +impl Sub for i16 { + type Output = Self; + + fn sub(self, rhs: Self) -> Self { + self - rhs + } +} + +#[lang = "rem"] +pub trait Rem { + type Output; + + fn rem(self, rhs: RHS) -> Self::Output; +} + +impl Rem for usize { + type Output = Self; + + fn rem(self, rhs: Self) -> Self { + self % rhs + } +} + +#[lang = "bitor"] +pub trait BitOr { + type Output; + + #[must_use] + fn bitor(self, rhs: RHS) -> Self::Output; +} + +impl BitOr for bool { + type Output = bool; + + fn bitor(self, rhs: bool) -> bool { + self | rhs + } +} + +impl<'a> BitOr for &'a bool { + type Output = bool; + + fn bitor(self, rhs: bool) -> bool { + *self | rhs + } +} + +#[lang = "eq"] +pub trait PartialEq { + fn eq(&self, other: &Rhs) -> bool; + fn ne(&self, other: &Rhs) -> bool; +} + +impl PartialEq for u8 { + fn eq(&self, other: &u8) -> bool { + (*self) == (*other) + } + fn ne(&self, other: &u8) -> bool { + (*self) != (*other) + } +} + +impl PartialEq for u16 { + fn eq(&self, other: &u16) -> bool { + (*self) == (*other) + } + fn ne(&self, other: &u16) -> bool { + (*self) != (*other) + } +} + +impl PartialEq for u32 { + fn eq(&self, other: &u32) -> bool { + (*self) == (*other) + } + fn ne(&self, other: &u32) -> bool { + (*self) != (*other) + } +} + + +impl PartialEq for u64 { + fn eq(&self, other: &u64) -> bool { + (*self) == (*other) + } + fn ne(&self, other: &u64) -> bool { + (*self) != (*other) + } +} + +impl PartialEq for usize { + fn eq(&self, other: &usize) -> bool { + (*self) == (*other) + } + fn ne(&self, other: &usize) -> bool { + (*self) != (*other) + } +} + +impl PartialEq for i8 { + fn eq(&self, other: &i8) -> bool { + (*self) == (*other) + } + fn ne(&self, other: &i8) -> bool { + (*self) != (*other) + } +} + +impl PartialEq for i32 { + fn eq(&self, other: &i32) -> bool { + (*self) == (*other) + } + fn ne(&self, other: &i32) -> bool { + (*self) != (*other) + } +} + +impl PartialEq for isize { + fn eq(&self, other: &isize) -> bool { + (*self) == (*other) + } + fn ne(&self, other: &isize) -> bool { + (*self) != (*other) + } +} + +impl PartialEq for char { + fn eq(&self, other: &char) -> bool { + (*self) == (*other) + } + fn ne(&self, other: &char) -> bool { + (*self) != (*other) + } +} + +impl PartialEq for *const T { + fn eq(&self, other: &*const T) -> bool { + *self == *other + } + fn ne(&self, other: &*const T) -> bool { + *self != *other + } +} + +#[lang = "neg"] +pub trait Neg { + type Output; + + fn neg(self) -> Self::Output; +} + +impl Neg for i8 { + type Output = i8; + + fn neg(self) -> i8 { + -self + } +} + +impl Neg for i16 { + type Output = i16; + + fn neg(self) -> i16 { + self + } +} + +impl Neg for isize { + type Output = isize; + + fn neg(self) -> isize { + -self + } +} + +impl Neg for f32 { + type Output = f32; + + fn neg(self) -> f32 { + -self + } +} + +pub enum Option { + Some(T), + None, +} + +pub use Option::*; + +#[lang = "phantom_data"] +pub struct PhantomData; + +#[lang = "fn_once"] +#[rustc_paren_sugar] +pub trait FnOnce { + #[lang = "fn_once_output"] + type Output; + + extern "rust-call" fn call_once(self, args: Args) -> Self::Output; +} + +#[lang = "fn_mut"] +#[rustc_paren_sugar] +pub trait FnMut: FnOnce { + extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output; +} + +#[lang = "panic"] +#[track_caller] +pub fn panic(_msg: &str) -> ! { + unsafe { + libc::puts("Panicking\n\0" as *const str as *const u8); + intrinsics::abort(); + } +} + +#[lang = "panic_bounds_check"] +#[track_caller] +fn panic_bounds_check(index: usize, len: usize) -> ! { + unsafe { + libc::printf("index out of bounds: the len is %d but the index is %d\n\0" as *const str as *const i8, len, index); + intrinsics::abort(); + } +} + +#[lang = "eh_personality"] +fn eh_personality() -> ! { + loop {} +} + +#[lang = "drop_in_place"] +#[allow(unconditional_recursion)] +pub unsafe fn drop_in_place(to_drop: *mut T) { + // Code here does not matter - this is replaced by the + // real drop glue by the compiler. + drop_in_place(to_drop); +} + +#[lang = "deref"] +pub trait Deref { + type Target: ?Sized; + + fn deref(&self) -> &Self::Target; +} + +#[lang = "owned_box"] +pub struct Box(*mut T); + +impl, U: ?Sized> CoerceUnsized> for Box {} + +impl Drop for Box { + fn drop(&mut self) { + // drop is currently performed by compiler. + } +} + +impl Deref for Box { + type Target = T; + + fn deref(&self) -> &Self::Target { + &**self + } +} + +#[lang = "exchange_malloc"] +unsafe fn allocate(size: usize, _align: usize) -> *mut u8 { + libc::malloc(size) +} + +#[lang = "box_free"] +unsafe fn box_free(ptr: *mut T) { + libc::free(ptr as *mut u8); +} + +#[lang = "drop"] +pub trait Drop { + fn drop(&mut self); +} + +#[lang = "manually_drop"] +#[repr(transparent)] +pub struct ManuallyDrop { + pub value: T, +} + +#[lang = "maybe_uninit"] +#[repr(transparent)] +pub union MaybeUninit { + pub uninit: (), + pub value: ManuallyDrop, +} + +pub mod intrinsics { + extern "rust-intrinsic" { + pub fn abort() -> !; + pub fn size_of() -> usize; + pub fn size_of_val(val: *const T) -> usize; + pub fn min_align_of() -> usize; + pub fn min_align_of_val(val: *const T) -> usize; + pub fn copy(src: *const T, dst: *mut T, count: usize); + pub fn transmute(e: T) -> U; + pub fn ctlz_nonzero(x: T) -> T; + pub fn needs_drop() -> bool; + pub fn bitreverse(x: T) -> T; + pub fn bswap(x: T) -> T; + pub fn write_bytes(dst: *mut T, val: u8, count: usize); + pub fn unreachable() -> !; + } +} + +pub mod libc { + #[link(name = "c")] + extern "C" { + pub fn puts(s: *const u8) -> i32; + pub fn printf(format: *const i8, ...) -> i32; + pub fn malloc(size: usize) -> *mut u8; + pub fn free(ptr: *mut u8); + pub fn memcpy(dst: *mut u8, src: *const u8, size: usize); + pub fn memmove(dst: *mut u8, src: *const u8, size: usize); + pub fn strncpy(dst: *mut u8, src: *const u8, size: usize); + } +} + +#[lang = "index"] +pub trait Index { + type Output: ?Sized; + fn index(&self, index: Idx) -> &Self::Output; +} + +impl Index for [T; 3] { + type Output = T; + + fn index(&self, index: usize) -> &Self::Output { + &self[index] + } +} + +impl Index for [T] { + type Output = T; + + fn index(&self, index: usize) -> &Self::Output { + &self[index] + } +} + +extern { + type VaListImpl; +} + +#[lang = "va_list"] +#[repr(transparent)] +pub struct VaList<'a>(&'a mut VaListImpl); + +#[rustc_builtin_macro] +#[rustc_macro_transparency = "semitransparent"] +pub macro stringify($($t:tt)*) { /* compiler built-in */ } + +#[rustc_builtin_macro] +#[rustc_macro_transparency = "semitransparent"] +pub macro file() { /* compiler built-in */ } + +#[rustc_builtin_macro] +#[rustc_macro_transparency = "semitransparent"] +pub macro line() { /* compiler built-in */ } + +#[rustc_builtin_macro] +#[rustc_macro_transparency = "semitransparent"] +pub macro cfg() { /* compiler built-in */ } + +pub static A_STATIC: u8 = 42; + +#[lang = "panic_location"] +struct PanicLocation { + file: &'static str, + line: u32, + column: u32, +} + +#[no_mangle] +pub fn get_tls() -> u8 { + #[thread_local] + static A: u8 = 42; + + A +} diff --git a/example/mini_core_hello_world.rs b/example/mini_core_hello_world.rs new file mode 100644 index 0000000000000..ccf4b53a702e5 --- /dev/null +++ b/example/mini_core_hello_world.rs @@ -0,0 +1,424 @@ +// Adapted from https://github.com/sunfishcode/mir2cranelift/blob/master/rust-examples/nocore-hello-world.rs + +#![feature( + no_core, unboxed_closures, start, lang_items, box_syntax, never_type, linkage, + extern_types, thread_local +)] +#![no_core] +#![allow(dead_code, non_camel_case_types)] + +extern crate mini_core; + +use mini_core::*; +use mini_core::libc::*; + +unsafe extern "C" fn my_puts(s: *const u8) { + puts(s); +} + +#[lang = "termination"] +trait Termination { + fn report(self) -> i32; +} + +impl Termination for () { + fn report(self) -> i32 { + unsafe { + NUM = 6 * 7 + 1 + (1u8 == 1u8) as u8; // 44 + *NUM_REF as i32 + } + } +} + +trait SomeTrait { + fn object_safe(&self); +} + +impl SomeTrait for &'static str { + fn object_safe(&self) { + unsafe { + puts(*self as *const str as *const u8); + } + } +} + +struct NoisyDrop { + text: &'static str, + inner: NoisyDropInner, +} + +struct NoisyDropInner; + +impl Drop for NoisyDrop { + fn drop(&mut self) { + unsafe { + puts(self.text as *const str as *const u8); + } + } +} + +impl Drop for NoisyDropInner { + fn drop(&mut self) { + unsafe { + puts("Inner got dropped!\0" as *const str as *const u8); + } + } +} + +impl SomeTrait for NoisyDrop { + fn object_safe(&self) {} +} + +enum Ordering { + Less = -1, + Equal = 0, + Greater = 1, +} + +#[lang = "start"] +fn start( + main: fn() -> T, + argc: isize, + argv: *const *const u8, +) -> isize { + if argc == 3 { + unsafe { puts(*argv); } + unsafe { puts(*((argv as usize + intrinsics::size_of::<*const u8>()) as *const *const u8)); } + unsafe { puts(*((argv as usize + 2 * intrinsics::size_of::<*const u8>()) as *const *const u8)); } + } + + main().report(); + 0 +} + +static mut NUM: u8 = 6 * 7; +static NUM_REF: &'static u8 = unsafe { &NUM }; + +macro_rules! assert { + ($e:expr) => { + if !$e { + panic(stringify!(! $e)); + } + }; +} + +macro_rules! assert_eq { + ($l:expr, $r: expr) => { + if $l != $r { + panic(stringify!($l != $r)); + } + } +} + +struct Unique { + pointer: *const T, + _marker: PhantomData, +} + +impl CoerceUnsized> for Unique where T: Unsize {} + +unsafe fn zeroed() -> T { + let mut uninit = MaybeUninit { uninit: () }; + intrinsics::write_bytes(&mut uninit.value.value as *mut T, 0, 1); + uninit.value.value +} + +fn take_f32(_f: f32) {} +fn take_unique(_u: Unique<()>) {} + +fn return_u128_pair() -> (u128, u128) { + (0, 0) +} + +fn call_return_u128_pair() { + return_u128_pair(); +} + +fn main() { + take_unique(Unique { + pointer: 0 as *const (), + _marker: PhantomData, + }); + take_f32(0.1); + + //call_return_u128_pair(); + + let slice = &[0, 1] as &[i32]; + let slice_ptr = slice as *const [i32] as *const i32; + + assert_eq!(slice_ptr as usize % 4, 0); + + //return; + + unsafe { + printf("Hello %s\n\0" as *const str as *const i8, "printf\0" as *const str as *const i8); + + let hello: &[u8] = b"Hello\0" as &[u8; 6]; + let ptr: *const u8 = hello as *const [u8] as *const u8; + puts(ptr); + + let world: Box<&str> = box "World!\0"; + puts(*world as *const str as *const u8); + world as Box; + + assert_eq!(intrinsics::bitreverse(0b10101000u8), 0b00010101u8); + + assert_eq!(intrinsics::bswap(0xabu8), 0xabu8); + assert_eq!(intrinsics::bswap(0xddccu16), 0xccddu16); + assert_eq!(intrinsics::bswap(0xffee_ddccu32), 0xccdd_eeffu32); + assert_eq!(intrinsics::bswap(0x1234_5678_ffee_ddccu64), 0xccdd_eeff_7856_3412u64); + + assert_eq!(intrinsics::size_of_val(hello) as u8, 6); + + let chars = &['C', 'h', 'a', 'r', 's']; + let chars = chars as &[char]; + assert_eq!(intrinsics::size_of_val(chars) as u8, 4 * 5); + + let a: &dyn SomeTrait = &"abc\0"; + a.object_safe(); + + assert_eq!(intrinsics::size_of_val(a) as u8, 16); + assert_eq!(intrinsics::size_of_val(&0u32) as u8, 4); + + assert_eq!(intrinsics::min_align_of::() as u8, 2); + assert_eq!(intrinsics::min_align_of_val(&a) as u8, intrinsics::min_align_of::<&str>() as u8); + + assert!(!intrinsics::needs_drop::()); + assert!(intrinsics::needs_drop::()); + + Unique { + pointer: 0 as *const &str, + _marker: PhantomData, + } as Unique; + + struct MyDst(T); + + intrinsics::size_of_val(&MyDst([0u8; 4]) as &MyDst<[u8]>); + + struct Foo { + x: u8, + y: !, + } + + unsafe fn uninitialized() -> T { + MaybeUninit { uninit: () }.value.value + } + + zeroed::<(u8, u8)>(); + #[allow(unreachable_code)] + { + if false { + zeroed::(); + zeroed::(); + uninitialized::(); + } + } + } + + let _ = box NoisyDrop { + text: "Boxed outer got dropped!\0", + inner: NoisyDropInner, + } as Box; + + const FUNC_REF: Option = Some(main); + match FUNC_REF { + Some(_) => {}, + None => assert!(false), + } + + match Ordering::Less { + Ordering::Less => {}, + _ => assert!(false), + } + + [NoisyDropInner, NoisyDropInner]; + + let x = &[0u32, 42u32] as &[u32]; + match x { + [] => assert_eq!(0u32, 1), + [_, ref y @ ..] => assert_eq!(&x[1] as *const u32 as usize, &y[0] as *const u32 as usize), + } + + assert_eq!(((|()| 42u8) as fn(()) -> u8)(()), 42); + + extern { + #[linkage = "weak"] + static ABC: *const u8; + } + + { + extern { + #[linkage = "weak"] + static ABC: *const u8; + } + } + + // TODO: not sure about this assert. ABC is not defined, so should it be really 0? + //unsafe { assert_eq!(ABC as usize, 0); } + + &mut (|| Some(0 as *const ())) as &mut dyn FnMut() -> Option<*const ()>; + + let f = 1000.0; + assert_eq!(f as u8, 255); + let f2 = -1000.0; + assert_eq!(f2 as i8, -128); + assert_eq!(f2 as u8, 0); + + static ANOTHER_STATIC: &u8 = &A_STATIC; + assert_eq!(*ANOTHER_STATIC, 42); + + check_niche_behavior(); + + extern "C" { + type ExternType; + } + + struct ExternTypeWrapper { + _a: ExternType, + } + + let nullptr = 0 as *const (); + let extern_nullptr = nullptr as *const ExternTypeWrapper; + extern_nullptr as *const (); + let slice_ptr = &[] as *const [u8]; + slice_ptr as *const u8; + + #[cfg(not(jit))] + test_tls(); +} + +#[repr(C)] +enum c_void { + _1, + _2, +} + +type c_int = i32; +type c_ulong = u64; + +type pthread_t = c_ulong; + +#[repr(C)] +struct pthread_attr_t { + __size: [u64; 7], +} + +#[link(name = "pthread")] +extern "C" { + fn pthread_attr_init(attr: *mut pthread_attr_t) -> c_int; + + fn pthread_create( + native: *mut pthread_t, + attr: *const pthread_attr_t, + f: extern "C" fn(_: *mut c_void) -> *mut c_void, + value: *mut c_void + ) -> c_int; + + fn pthread_join( + native: pthread_t, + value: *mut *mut c_void + ) -> c_int; +} + +#[thread_local] +#[cfg(not(jit))] +static mut TLS: u8 = 42; + +#[cfg(not(jit))] +extern "C" fn mutate_tls(_: *mut c_void) -> *mut c_void { + unsafe { TLS = 0; } + 0 as *mut c_void +} + +#[cfg(not(jit))] +fn test_tls() { + unsafe { + let mut attr: pthread_attr_t = zeroed(); + let mut thread: pthread_t = 0; + + assert_eq!(TLS, 42); + + if pthread_attr_init(&mut attr) != 0 { + assert!(false); + } + + if pthread_create(&mut thread, &attr, mutate_tls, 0 as *mut c_void) != 0 { + assert!(false); + } + + let mut res = 0 as *mut c_void; + pthread_join(thread, &mut res); + + // TLS of main thread must not have been changed by the other thread. + assert_eq!(TLS, 42); + + puts("TLS works!\n\0" as *const str as *const u8); + } +} + +// Copied ui/issues/issue-61696.rs + +pub enum Infallible {} + +// The check that the `bool` field of `V1` is encoding a "niche variant" +// (i.e. not `V1`, so `V3` or `V4`) used to be mathematically incorrect, +// causing valid `V1` values to be interpreted as other variants. +pub enum E1 { + V1 { f: bool }, + V2 { f: Infallible }, + V3, + V4, +} + +// Computing the discriminant used to be done using the niche type (here `u8`, +// from the `bool` field of `V1`), overflowing for variants with large enough +// indices (`V3` and `V4`), causing them to be interpreted as other variants. +pub enum E2 { + V1 { f: bool }, + + /*_00*/ _01(X), _02(X), _03(X), _04(X), _05(X), _06(X), _07(X), + _08(X), _09(X), _0A(X), _0B(X), _0C(X), _0D(X), _0E(X), _0F(X), + _10(X), _11(X), _12(X), _13(X), _14(X), _15(X), _16(X), _17(X), + _18(X), _19(X), _1A(X), _1B(X), _1C(X), _1D(X), _1E(X), _1F(X), + _20(X), _21(X), _22(X), _23(X), _24(X), _25(X), _26(X), _27(X), + _28(X), _29(X), _2A(X), _2B(X), _2C(X), _2D(X), _2E(X), _2F(X), + _30(X), _31(X), _32(X), _33(X), _34(X), _35(X), _36(X), _37(X), + _38(X), _39(X), _3A(X), _3B(X), _3C(X), _3D(X), _3E(X), _3F(X), + _40(X), _41(X), _42(X), _43(X), _44(X), _45(X), _46(X), _47(X), + _48(X), _49(X), _4A(X), _4B(X), _4C(X), _4D(X), _4E(X), _4F(X), + _50(X), _51(X), _52(X), _53(X), _54(X), _55(X), _56(X), _57(X), + _58(X), _59(X), _5A(X), _5B(X), _5C(X), _5D(X), _5E(X), _5F(X), + _60(X), _61(X), _62(X), _63(X), _64(X), _65(X), _66(X), _67(X), + _68(X), _69(X), _6A(X), _6B(X), _6C(X), _6D(X), _6E(X), _6F(X), + _70(X), _71(X), _72(X), _73(X), _74(X), _75(X), _76(X), _77(X), + _78(X), _79(X), _7A(X), _7B(X), _7C(X), _7D(X), _7E(X), _7F(X), + _80(X), _81(X), _82(X), _83(X), _84(X), _85(X), _86(X), _87(X), + _88(X), _89(X), _8A(X), _8B(X), _8C(X), _8D(X), _8E(X), _8F(X), + _90(X), _91(X), _92(X), _93(X), _94(X), _95(X), _96(X), _97(X), + _98(X), _99(X), _9A(X), _9B(X), _9C(X), _9D(X), _9E(X), _9F(X), + _A0(X), _A1(X), _A2(X), _A3(X), _A4(X), _A5(X), _A6(X), _A7(X), + _A8(X), _A9(X), _AA(X), _AB(X), _AC(X), _AD(X), _AE(X), _AF(X), + _B0(X), _B1(X), _B2(X), _B3(X), _B4(X), _B5(X), _B6(X), _B7(X), + _B8(X), _B9(X), _BA(X), _BB(X), _BC(X), _BD(X), _BE(X), _BF(X), + _C0(X), _C1(X), _C2(X), _C3(X), _C4(X), _C5(X), _C6(X), _C7(X), + _C8(X), _C9(X), _CA(X), _CB(X), _CC(X), _CD(X), _CE(X), _CF(X), + _D0(X), _D1(X), _D2(X), _D3(X), _D4(X), _D5(X), _D6(X), _D7(X), + _D8(X), _D9(X), _DA(X), _DB(X), _DC(X), _DD(X), _DE(X), _DF(X), + _E0(X), _E1(X), _E2(X), _E3(X), _E4(X), _E5(X), _E6(X), _E7(X), + _E8(X), _E9(X), _EA(X), _EB(X), _EC(X), _ED(X), _EE(X), _EF(X), + _F0(X), _F1(X), _F2(X), _F3(X), _F4(X), _F5(X), _F6(X), _F7(X), + _F8(X), _F9(X), _FA(X), _FB(X), _FC(X), _FD(X), _FE(X), _FF(X), + + V3, + V4, +} + +fn check_niche_behavior () { + if let E1::V2 { .. } = (E1::V1 { f: true }) { + intrinsics::abort(); + } + + if let E2::V1 { .. } = E2::V3:: { + intrinsics::abort(); + } +} diff --git a/example/mod_bench.rs b/example/mod_bench.rs new file mode 100644 index 0000000000000..2e2b0052dee8b --- /dev/null +++ b/example/mod_bench.rs @@ -0,0 +1,37 @@ +#![feature(start, box_syntax, core_intrinsics, lang_items)] +#![no_std] + +#[link(name = "c")] +extern {} + +#[panic_handler] +fn panic_handler(_: &core::panic::PanicInfo) -> ! { + unsafe { + core::intrinsics::abort(); + } +} + +#[lang="eh_personality"] +fn eh_personality(){} + +// Required for rustc_codegen_llvm +#[no_mangle] +unsafe extern "C" fn _Unwind_Resume() { + core::intrinsics::unreachable(); +} + +#[start] +fn main(_argc: isize, _argv: *const *const u8) -> isize { + for i in 2..100_000_000 { + black_box((i + 1) % i); + } + + 0 +} + +#[inline(never)] +fn black_box(i: u32) { + if i != 1 { + unsafe { core::intrinsics::abort(); } + } +} diff --git a/example/std_example.rs b/example/std_example.rs new file mode 100644 index 0000000000000..d99d6bb4b855e --- /dev/null +++ b/example/std_example.rs @@ -0,0 +1,279 @@ +#![feature(core_intrinsics, generators, generator_trait, is_sorted)] + +use std::arch::x86_64::*; +use std::io::Write; +use std::ops::Generator; + +extern { + pub fn printf(format: *const i8, ...) -> i32; +} + +fn main() { + let mutex = std::sync::Mutex::new(()); + let _guard = mutex.lock().unwrap(); + + let _ = ::std::iter::repeat('a' as u8).take(10).collect::>(); + let stderr = ::std::io::stderr(); + let mut stderr = stderr.lock(); + + // FIXME: this thread panics. + std::thread::spawn(move || { + println!("Hello from another thread!"); + }); + + writeln!(stderr, "some {} text", "").unwrap(); + + let _ = std::process::Command::new("true").env("c", "d").spawn(); + + println!("cargo:rustc-link-lib=z"); + + static ONCE: std::sync::Once = std::sync::Once::new(); + ONCE.call_once(|| {}); + + let _eq = LoopState::Continue(()) == LoopState::Break(()); + + // Make sure ByValPair values with differently sized components are correctly passed + map(None::<(u8, Box)>); + + println!("{}", 2.3f32.exp()); + println!("{}", 2.3f32.exp2()); + println!("{}", 2.3f32.abs()); + println!("{}", 2.3f32.sqrt()); + println!("{}", 2.3f32.floor()); + println!("{}", 2.3f32.ceil()); + println!("{}", 2.3f32.min(1.0)); + println!("{}", 2.3f32.max(1.0)); + println!("{}", 2.3f32.powi(2)); + println!("{}", 2.3f32.log2()); + assert_eq!(2.3f32.copysign(-1.0), -2.3f32); + println!("{}", 2.3f32.powf(2.0)); + + assert_eq!(-128i8, (-128i8).saturating_sub(1)); + assert_eq!(127i8, 127i8.saturating_sub(-128)); + assert_eq!(-128i8, (-128i8).saturating_add(-128)); + assert_eq!(127i8, 127i8.saturating_add(1)); + + assert_eq!(-32768i16, (-32768i16).saturating_add(-32768)); + assert_eq!(32767i16, 32767i16.saturating_add(1)); + + /*assert_eq!(0b0000000000000000000000000010000010000000000000000000000000000000_0000000000100000000000000000000000001000000000000100000000000000u128.leading_zeros(), 26); + assert_eq!(0b0000000000000000000000000010000000000000000000000000000000000000_0000000000000000000000000000000000001000000000000000000010000000u128.trailing_zeros(), 7); + + let _d = 0i128.checked_div(2i128); + let _d = 0u128.checked_div(2u128); + assert_eq!(1u128 + 2, 3); + + assert_eq!(0b100010000000000000000000000000000u128 >> 10, 0b10001000000000000000000u128); + assert_eq!(0xFEDCBA987654321123456789ABCDEFu128 >> 64, 0xFEDCBA98765432u128); + assert_eq!(0xFEDCBA987654321123456789ABCDEFu128 as i128 >> 64, 0xFEDCBA98765432i128); + + let tmp = 353985398u128; + assert_eq!(tmp * 932490u128, 330087843781020u128); + + let tmp = -0x1234_5678_9ABC_DEF0i64; + assert_eq!(tmp as i128, -0x1234_5678_9ABC_DEF0i128); + + // Check that all u/i128 <-> float casts work correctly. + let houndred_u128 = 100u128; + let houndred_i128 = 100i128; + let houndred_f32 = 100.0f32; + let houndred_f64 = 100.0f64; + assert_eq!(houndred_u128 as f32, 100.0); + assert_eq!(houndred_u128 as f64, 100.0); + assert_eq!(houndred_f32 as u128, 100); + assert_eq!(houndred_f64 as u128, 100); + assert_eq!(houndred_i128 as f32, 100.0); + assert_eq!(houndred_i128 as f64, 100.0); + assert_eq!(houndred_f32 as i128, 100); + assert_eq!(houndred_f64 as i128, 100);*/ + + let _a = 1u32 << 2u8; + + let empty: [i32; 0] = []; + assert!(empty.is_sorted()); + + println!("{:?}", std::intrinsics::caller_location()); + + /*unsafe { + test_simd(); + }*/ + + Box::pin(move |mut _task_context| { + yield (); + }).as_mut().resume(0); + + println!("End"); +} + +/*#[target_feature(enable = "sse2")] +unsafe fn test_simd() { + let x = _mm_setzero_si128(); + let y = _mm_set1_epi16(7); + let or = _mm_or_si128(x, y); + let cmp_eq = _mm_cmpeq_epi8(y, y); + let cmp_lt = _mm_cmplt_epi8(y, y); + + /*assert_eq!(std::mem::transmute::<_, [u16; 8]>(or), [7, 7, 7, 7, 7, 7, 7, 7]); + assert_eq!(std::mem::transmute::<_, [u16; 8]>(cmp_eq), [0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff]); + assert_eq!(std::mem::transmute::<_, [u16; 8]>(cmp_lt), [0, 0, 0, 0, 0, 0, 0, 0]); + + test_mm_slli_si128(); + test_mm_movemask_epi8(); + test_mm256_movemask_epi8(); + test_mm_add_epi8(); + test_mm_add_pd(); + test_mm_cvtepi8_epi16(); + test_mm_cvtsi128_si64(); + + // FIXME(#666) implement `#[rustc_arg_required_const(..)]` support + //test_mm_extract_epi8(); + + let mask1 = _mm_movemask_epi8(dbg!(_mm_setr_epi8(255u8 as i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))); + assert_eq!(mask1, 1);*/ +}*/ + +/*#[target_feature(enable = "sse2")] +unsafe fn test_mm_slli_si128() { + #[rustfmt::skip] + let a = _mm_setr_epi8( + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + ); + let r = _mm_slli_si128(a, 1); + let e = _mm_setr_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + assert_eq_m128i(r, e); + + #[rustfmt::skip] + let a = _mm_setr_epi8( + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + ); + let r = _mm_slli_si128(a, 15); + let e = _mm_setr_epi8(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1); + assert_eq_m128i(r, e); + + #[rustfmt::skip] + let a = _mm_setr_epi8( + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + ); + let r = _mm_slli_si128(a, 16); + assert_eq_m128i(r, _mm_set1_epi8(0)); + + #[rustfmt::skip] + let a = _mm_setr_epi8( + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + ); + let r = _mm_slli_si128(a, -1); + assert_eq_m128i(_mm_set1_epi8(0), r); + + #[rustfmt::skip] + let a = _mm_setr_epi8( + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + ); + let r = _mm_slli_si128(a, -0x80000000); + assert_eq_m128i(r, _mm_set1_epi8(0)); +} + +#[target_feature(enable = "sse2")] +unsafe fn test_mm_movemask_epi8() { + #[rustfmt::skip] + let a = _mm_setr_epi8( + 0b1000_0000u8 as i8, 0b0, 0b1000_0000u8 as i8, 0b01, + 0b0101, 0b1111_0000u8 as i8, 0, 0, + 0, 0, 0b1111_0000u8 as i8, 0b0101, + 0b01, 0b1000_0000u8 as i8, 0b0, 0b1000_0000u8 as i8, + ); + let r = _mm_movemask_epi8(a); + assert_eq!(r, 0b10100100_00100101); +} + +#[target_feature(enable = "avx2")] +unsafe fn test_mm256_movemask_epi8() { + let a = _mm256_set1_epi8(-1); + let r = _mm256_movemask_epi8(a); + let e = -1; + assert_eq!(r, e); +} + +#[target_feature(enable = "sse2")] +unsafe fn test_mm_add_epi8() { + let a = _mm_setr_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + #[rustfmt::skip] + let b = _mm_setr_epi8( + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + ); + let r = _mm_add_epi8(a, b); + #[rustfmt::skip] + let e = _mm_setr_epi8( + 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, + ); + assert_eq_m128i(r, e); +} + +#[target_feature(enable = "sse2")] +unsafe fn test_mm_add_pd() { + let a = _mm_setr_pd(1.0, 2.0); + let b = _mm_setr_pd(5.0, 10.0); + let r = _mm_add_pd(a, b); + assert_eq_m128d(r, _mm_setr_pd(6.0, 12.0)); +} + +fn assert_eq_m128i(x: std::arch::x86_64::__m128i, y: std::arch::x86_64::__m128i) { + unsafe { + assert_eq!(std::mem::transmute::<_, [u8; 16]>(x), std::mem::transmute::<_, [u8; 16]>(y)); + } +} + +#[target_feature(enable = "sse2")] +pub unsafe fn assert_eq_m128d(a: __m128d, b: __m128d) { + if _mm_movemask_pd(_mm_cmpeq_pd(a, b)) != 0b11 { + panic!("{:?} != {:?}", a, b); + } +} + +#[target_feature(enable = "sse2")] +unsafe fn test_mm_cvtsi128_si64() { + let r = _mm_cvtsi128_si64(std::mem::transmute::<[i64; 2], _>([5, 0])); + assert_eq!(r, 5); +} + +#[target_feature(enable = "sse4.1")] +unsafe fn test_mm_cvtepi8_epi16() { + let a = _mm_set1_epi8(10); + let r = _mm_cvtepi8_epi16(a); + let e = _mm_set1_epi16(10); + assert_eq_m128i(r, e); + let a = _mm_set1_epi8(-10); + let r = _mm_cvtepi8_epi16(a); + let e = _mm_set1_epi16(-10); + assert_eq_m128i(r, e); +} + +#[target_feature(enable = "sse4.1")] +unsafe fn test_mm_extract_epi8() { + #[rustfmt::skip] + let a = _mm_setr_epi8( + -1, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15 + ); + let r1 = _mm_extract_epi8(a, 0); + let r2 = _mm_extract_epi8(a, 19); + assert_eq!(r1, 0xFF); + assert_eq!(r2, 3); +}*/ + +#[derive(PartialEq)] +enum LoopState { + Continue(()), + Break(()) +} + +pub enum Instruction { + Increment, + Loop, +} + +fn map(a: Option<(u8, Box)>) -> Option> { + match a { + None => None, + Some((_, instr)) => Some(instr), + } +} diff --git a/example/subslice-patterns-const-eval.rs b/example/subslice-patterns-const-eval.rs new file mode 100644 index 0000000000000..2cb84786f56d0 --- /dev/null +++ b/example/subslice-patterns-const-eval.rs @@ -0,0 +1,97 @@ +// Based on https://github.com/rust-lang/rust/blob/c5840f9d252c2f5cc16698dbf385a29c5de3ca07/src/test/ui/array-slice-vec/subslice-patterns-const-eval-match.rs + +// Test that array subslice patterns are correctly handled in const evaluation. + +// run-pass + +#[derive(PartialEq, Debug, Clone)] +struct N(u8); + +#[derive(PartialEq, Debug, Clone)] +struct Z; + +macro_rules! n { + ($($e:expr),* $(,)?) => { + [$(N($e)),*] + } +} + +// This macro has an unused variable so that it can be repeated base on the +// number of times a repeated variable (`$e` in `z`) occurs. +macro_rules! zed { + ($e:expr) => { Z } +} + +macro_rules! z { + ($($e:expr),* $(,)?) => { + [$(zed!($e)),*] + } +} + +// Compare constant evaluation and runtime evaluation of a given expression. +macro_rules! compare_evaluation { + ($e:expr, $t:ty $(,)?) => {{ + const CONST_EVAL: $t = $e; + const fn const_eval() -> $t { $e } + static CONST_EVAL2: $t = const_eval(); + let runtime_eval = $e; + assert_eq!(CONST_EVAL, runtime_eval); + assert_eq!(CONST_EVAL2, runtime_eval); + }} +} + +// Repeat `$test`, substituting the given macro variables with the given +// identifiers. +// +// For example: +// +// repeat! { +// ($name); X; Y: +// struct $name; +// } +// +// Expands to: +// +// struct X; struct Y; +// +// This is used to repeat the tests using both the `N` and `Z` +// types. +macro_rules! repeat { + (($($dollar:tt $placeholder:ident)*); $($($values:ident),+);*: $($test:tt)*) => { + macro_rules! single { + ($($dollar $placeholder:ident),*) => { $($test)* } + } + $(single!($($values),+);)* + } +} + +fn main() { + repeat! { + ($arr $Ty); n, N; z, Z: + compare_evaluation!({ let [_, x @ .., _] = $arr!(1, 2, 3, 4); x }, [$Ty; 2]); + compare_evaluation!({ let [_, ref x @ .., _] = $arr!(1, 2, 3, 4); x }, &'static [$Ty; 2]); + compare_evaluation!({ let [_, x @ .., _] = &$arr!(1, 2, 3, 4); x }, &'static [$Ty; 2]); + + compare_evaluation!({ let [_, _, x @ .., _, _] = $arr!(1, 2, 3, 4); x }, [$Ty; 0]); + compare_evaluation!( + { let [_, _, ref x @ .., _, _] = $arr!(1, 2, 3, 4); x }, + &'static [$Ty; 0], + ); + compare_evaluation!( + { let [_, _, x @ .., _, _] = &$arr!(1, 2, 3, 4); x }, + &'static [$Ty; 0], + ); + + compare_evaluation!({ let [_, .., x] = $arr!(1, 2, 3, 4); x }, $Ty); + compare_evaluation!({ let [_, .., ref x] = $arr!(1, 2, 3, 4); x }, &'static $Ty); + compare_evaluation!({ let [_, _y @ .., x] = &$arr!(1, 2, 3, 4); x }, &'static $Ty); + } + + compare_evaluation!({ let [_, .., N(x)] = n!(1, 2, 3, 4); x }, u8); + compare_evaluation!({ let [_, .., N(ref x)] = n!(1, 2, 3, 4); x }, &'static u8); + compare_evaluation!({ let [_, .., N(x)] = &n!(1, 2, 3, 4); x }, &'static u8); + + compare_evaluation!({ let [N(x), .., _] = n!(1, 2, 3, 4); x }, u8); + compare_evaluation!({ let [N(ref x), .., _] = n!(1, 2, 3, 4); x }, &'static u8); + compare_evaluation!({ let [N(x), .., _] = &n!(1, 2, 3, 4); x }, &'static u8); +} diff --git a/example/track-caller-attribute.rs b/example/track-caller-attribute.rs new file mode 100644 index 0000000000000..93bab17e46b27 --- /dev/null +++ b/example/track-caller-attribute.rs @@ -0,0 +1,40 @@ +// Based on https://github.com/anp/rust/blob/175631311716d7dfeceec40d2587cde7142ffa8c/src/test/ui/rfc-2091-track-caller/track-caller-attribute.rs + +// run-pass + +use std::panic::Location; + +#[track_caller] +fn tracked() -> &'static Location<'static> { + Location::caller() +} + +fn nested_intrinsic() -> &'static Location<'static> { + Location::caller() +} + +fn nested_tracked() -> &'static Location<'static> { + tracked() +} + +fn main() { + let location = Location::caller(); + assert_eq!(location.file(), file!()); + assert_eq!(location.line(), 21); + assert_eq!(location.column(), 20); + + let tracked = tracked(); + assert_eq!(tracked.file(), file!()); + assert_eq!(tracked.line(), 26); + assert_eq!(tracked.column(), 19); + + let nested = nested_intrinsic(); + assert_eq!(nested.file(), file!()); + assert_eq!(nested.line(), 13); + assert_eq!(nested.column(), 5); + + let contained = nested_tracked(); + assert_eq!(contained.file(), file!()); + assert_eq!(contained.line(), 17); + assert_eq!(contained.column(), 5); +} diff --git a/gcc_path b/gcc_path new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/patches/0022-core-Disable-not-compiling-tests.patch b/patches/0022-core-Disable-not-compiling-tests.patch new file mode 100644 index 0000000000000..aae62a938b457 --- /dev/null +++ b/patches/0022-core-Disable-not-compiling-tests.patch @@ -0,0 +1,63 @@ +From f6befc4bb51d84f5f1cf35938a168c953d421350 Mon Sep 17 00:00:00 2001 +From: bjorn3 +Date: Sun, 24 Nov 2019 15:10:23 +0100 +Subject: [PATCH] [core] Disable not compiling tests + +--- + library/core/tests/Cargo.toml | 8 ++++++++ + library/core/tests/num/flt2dec/mod.rs | 1 - + library/core/tests/num/int_macros.rs | 2 ++ + library/core/tests/num/uint_macros.rs | 2 ++ + library/core/tests/ptr.rs | 2 ++ + library/core/tests/slice.rs | 2 ++ + 6 files changed, 16 insertions(+), 1 deletion(-) + create mode 100644 library/core/tests/Cargo.toml + +diff --git a/library/core/tests/Cargo.toml b/library/core/tests/Cargo.toml +new file mode 100644 +index 0000000..46fd999 +--- /dev/null ++++ b/library/core/tests/Cargo.toml +@@ -0,0 +1,8 @@ ++[package] ++name = "core" ++version = "0.0.0" ++edition = "2018" ++ ++[lib] ++name = "coretests" ++path = "lib.rs" +diff --git a/library/core/tests/num/flt2dec/mod.rs b/library/core/tests/num/flt2dec/mod.rs +index a35897e..f0bf645 100644 +--- a/library/core/tests/num/flt2dec/mod.rs ++++ b/library/core/tests/num/flt2dec/mod.rs +@@ -13,7 +13,6 @@ mod strategy { + mod dragon; + mod grisu; + } +-mod random; + + pub fn decode_finite(v: T) -> Decoded { + match decode(v).1 { +diff --git a/library/core/tests/slice.rs b/library/core/tests/slice.rs +index 6609bc3..241b497 100644 +--- a/library/core/tests/slice.rs ++++ b/library/core/tests/slice.rs +@@ -1209,6 +1209,7 @@ fn brute_force_rotate_test_1() { + } + } + ++/* + #[test] + #[cfg(not(target_arch = "wasm32"))] + fn sort_unstable() { +@@ -1394,6 +1395,7 @@ fn partition_at_index() { + v.select_nth_unstable(0); + assert!(v == [0xDEADBEEF]); + } ++*/ + + #[test] + #[should_panic(expected = "index 0 greater than length of slice")] +-- +2.21.0 (Apple Git-122) diff --git a/patches/0023-core-Ignore-failing-tests.patch b/patches/0023-core-Ignore-failing-tests.patch new file mode 100644 index 0000000000000..ee5ba449fb8e6 --- /dev/null +++ b/patches/0023-core-Ignore-failing-tests.patch @@ -0,0 +1,49 @@ +From dd82e95c9de212524e14fc60155de1ae40156dfc Mon Sep 17 00:00:00 2001 +From: bjorn3 +Date: Sun, 24 Nov 2019 15:34:06 +0100 +Subject: [PATCH] [core] Ignore failing tests + +--- + library/core/tests/iter.rs | 4 ++++ + library/core/tests/num/bignum.rs | 10 ++++++++++ + library/core/tests/num/mod.rs | 5 +++-- + library/core/tests/time.rs | 1 + + 4 files changed, 18 insertions(+), 2 deletions(-) + +diff --git a/library/core/tests/array.rs b/library/core/tests/array.rs +index 4bc44e9..8e3c7a4 100644 +--- a/library/core/tests/array.rs ++++ b/library/core/tests/array.rs +@@ -242,6 +242,7 @@ fn iterator_drops() { + assert_eq!(i.get(), 5); + } + ++/* + // This test does not work on targets without panic=unwind support. + // To work around this problem, test is marked is should_panic, so it will + // be automagically skipped on unsuitable targets, such as +@@ -283,6 +284,7 @@ fn array_default_impl_avoids_leaks_on_panic() { + assert_eq!(COUNTER.load(Relaxed), 0); + panic!("test succeeded") + } ++*/ + + #[test] + fn empty_array_is_always_default() { +@@ -304,6 +304,7 @@ fn array_map() { + assert_eq!(b, [1, 2, 3]); + } + ++/* + // See note on above test for why `should_panic` is used. + #[test] + #[should_panic(expected = "test succeeded")] +@@ -332,6 +333,7 @@ fn array_map_drop_safety() { + assert_eq!(DROPPED.load(Ordering::SeqCst), num_to_create); + panic!("test succeeded") + } ++*/ + + #[test] + fn cell_allows_array_cycle() { +-- 2.21.0 (Apple Git-122) diff --git a/prepare.sh b/prepare.sh new file mode 100755 index 0000000000000..503fa29b36269 --- /dev/null +++ b/prepare.sh @@ -0,0 +1,22 @@ +#!/bin/bash --verbose +set -e + +source prepare_build.sh + +cargo install hyperfine || echo "Skipping hyperfine install" + +git clone https://github.com/rust-lang/regex.git || echo "rust-lang/regex has already been cloned" +pushd regex +git checkout -- . +git checkout 341f207c1071f7290e3f228c710817c280c8dca1 +popd + +git clone https://github.com/ebobby/simple-raytracer || echo "ebobby/simple-raytracer has already been cloned" +pushd simple-raytracer +git checkout -- . +git checkout 804a7a21b9e673a482797aa289a18ed480e4d813 + +# build with cg_llvm for perf comparison +cargo build +mv target/debug/main raytracer_cg_llvm +popd diff --git a/prepare_build.sh b/prepare_build.sh new file mode 100755 index 0000000000000..ccf5350983015 --- /dev/null +++ b/prepare_build.sh @@ -0,0 +1,5 @@ +#!/bin/bash --verbose +set -e + +rustup component add rust-src rustc-dev llvm-tools-preview +./build_sysroot/prepare_sysroot_src.sh diff --git a/rust-toolchain b/rust-toolchain new file mode 100644 index 0000000000000..3a29ac5ddb120 --- /dev/null +++ b/rust-toolchain @@ -0,0 +1 @@ +nightly-2021-07-21 diff --git a/rustup.sh b/rustup.sh new file mode 100755 index 0000000000000..01ce5bb78be0f --- /dev/null +++ b/rustup.sh @@ -0,0 +1,29 @@ +#!/bin/bash + +set -e + +case $1 in + "prepare") + TOOLCHAIN=$(date +%Y-%m-%d) + + echo "=> Installing new nightly" + rustup toolchain install --profile minimal nightly-${TOOLCHAIN} # Sanity check to see if the nightly exists + echo nightly-${TOOLCHAIN} > rust-toolchain + + echo "=> Uninstalling all old nighlies" + for nightly in $(rustup toolchain list | grep nightly | grep -v $TOOLCHAIN | grep -v nightly-x86_64); do + rustup toolchain uninstall $nightly + done + + ./clean_all.sh + ./prepare.sh + ;; + "commit") + git add rust-toolchain + git commit -m "Rustup to $(rustc -V)" + ;; + *) + echo "Unknown command '$1'" + echo "Usage: ./rustup.sh prepare|commit" + ;; +esac diff --git a/src/abi.rs b/src/abi.rs new file mode 100644 index 0000000000000..95d536dc51c5f --- /dev/null +++ b/src/abi.rs @@ -0,0 +1,286 @@ +use gccjit::{ToRValue, Type}; +use rustc_codegen_ssa::traits::{AbiBuilderMethods, BaseTypeMethods}; +use rustc_middle::bug; +use rustc_middle::ty::Ty; +use rustc_target::abi::call::{CastTarget, FnAbi, PassMode, Reg, RegKind}; + +use crate::builder::Builder; +use crate::context::CodegenCx; +use crate::intrinsic::ArgAbiExt; +use crate::type_of::LayoutGccExt; + +impl<'a, 'gcc, 'tcx> AbiBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { + fn apply_attrs_callsite(&mut self, _fn_abi: &FnAbi<'tcx, Ty<'tcx>>, _callsite: Self::Value) { + // TODO + //fn_abi.apply_attrs_callsite(self, callsite) + } + + fn get_param(&self, index: usize) -> Self::Value { + self.cx.current_func.borrow().expect("current func") + .get_param(index as i32) + .to_rvalue() + } +} + +impl GccType for CastTarget { + fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, '_>) -> Type<'gcc> { + let rest_gcc_unit = self.rest.unit.gcc_type(cx); + let (rest_count, rem_bytes) = + if self.rest.unit.size.bytes() == 0 { + (0, 0) + } + else { + (self.rest.total.bytes() / self.rest.unit.size.bytes(), self.rest.total.bytes() % self.rest.unit.size.bytes()) + }; + + if self.prefix.iter().all(|x| x.is_none()) { + // Simplify to a single unit when there is no prefix and size <= unit size + if self.rest.total <= self.rest.unit.size { + return rest_gcc_unit; + } + + // Simplify to array when all chunks are the same size and type + if rem_bytes == 0 { + return cx.type_array(rest_gcc_unit, rest_count); + } + } + + // Create list of fields in the main structure + let mut args: Vec<_> = self + .prefix + .iter() + .flat_map(|option_kind| { + option_kind.map(|kind| Reg { kind, size: self.prefix_chunk_size }.gcc_type(cx)) + }) + .chain((0..rest_count).map(|_| rest_gcc_unit)) + .collect(); + + // Append final integer + if rem_bytes != 0 { + // Only integers can be really split further. + assert_eq!(self.rest.unit.kind, RegKind::Integer); + args.push(cx.type_ix(rem_bytes * 8)); + } + + cx.type_struct(&args, false) + } +} + +pub trait GccType { + fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, '_>) -> Type<'gcc>; +} + +impl GccType for Reg { + fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, '_>) -> Type<'gcc> { + match self.kind { + RegKind::Integer => cx.type_ix(self.size.bits()), + RegKind::Float => { + match self.size.bits() { + 32 => cx.type_f32(), + 64 => cx.type_f64(), + _ => bug!("unsupported float: {:?}", self), + } + }, + RegKind::Vector => unimplemented!(), //cx.type_vector(cx.type_i8(), self.size.bytes()), + } + } +} + +pub trait FnAbiGccExt<'gcc, 'tcx> { + // TODO: return a function pointer type instead? + fn gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> (Type<'gcc>, Vec>, bool); + fn ptr_to_gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>; + /*fn llvm_cconv(&self) -> llvm::CallConv; + fn apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value); + fn apply_attrs_callsite(&self, bx: &mut Builder<'a, 'll, 'tcx>, callsite: &'ll Value);*/ +} + +impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { + fn gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> (Type<'gcc>, Vec>, bool) { + let args_capacity: usize = self.args.iter().map(|arg| + if arg.pad.is_some() { + 1 + } + else { + 0 + } + + if let PassMode::Pair(_, _) = arg.mode { + 2 + } else { + 1 + } + ).sum(); + let mut argument_tys = Vec::with_capacity( + if let PassMode::Indirect { .. } = self.ret.mode { + 1 + } + else { + 0 + } + args_capacity, + ); + + let return_ty = + match self.ret.mode { + PassMode::Ignore => cx.type_void(), + PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_gcc_type(cx), + PassMode::Cast(cast) => cast.gcc_type(cx), + PassMode::Indirect { .. } => { + argument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx))); + cx.type_void() + } + }; + + for arg in &self.args { + // add padding + if let Some(ty) = arg.pad { + argument_tys.push(ty.gcc_type(cx)); + } + + let arg_ty = match arg.mode { + PassMode::Ignore => continue, + PassMode::Direct(_) => arg.layout.immediate_gcc_type(cx), + PassMode::Pair(..) => { + argument_tys.push(arg.layout.scalar_pair_element_gcc_type(cx, 0, true)); + argument_tys.push(arg.layout.scalar_pair_element_gcc_type(cx, 1, true)); + continue; + } + PassMode::Indirect { extra_attrs: Some(_), .. } => { + /*let ptr_ty = cx.tcx.mk_mut_ptr(arg.layout.ty); + let ptr_layout = cx.layout_of(ptr_ty); + argument_tys.push(ptr_layout.scalar_pair_element_gcc_type(cx, 0, true)); + argument_tys.push(ptr_layout.scalar_pair_element_gcc_type(cx, 1, true));*/ + unimplemented!(); + //continue; + } + PassMode::Cast(cast) => cast.gcc_type(cx), + PassMode::Indirect { extra_attrs: None, .. } => cx.type_ptr_to(arg.memory_ty(cx)), + }; + argument_tys.push(arg_ty); + } + + (return_ty, argument_tys, self.c_variadic) + } + + fn ptr_to_gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> { + let (return_type, params, variadic) = self.gcc_type(cx); + let pointer_type = cx.context.new_function_pointer_type(None, return_type, ¶ms, variadic); + pointer_type + } + + /*fn llvm_cconv(&self) -> llvm::CallConv { + match self.conv { + Conv::C | Conv::Rust => llvm::CCallConv, + Conv::AmdGpuKernel => llvm::AmdGpuKernel, + Conv::ArmAapcs => llvm::ArmAapcsCallConv, + Conv::Msp430Intr => llvm::Msp430Intr, + Conv::PtxKernel => llvm::PtxKernel, + Conv::X86Fastcall => llvm::X86FastcallCallConv, + Conv::X86Intr => llvm::X86_Intr, + Conv::X86Stdcall => llvm::X86StdcallCallConv, + Conv::X86ThisCall => llvm::X86_ThisCall, + Conv::X86VectorCall => llvm::X86_VectorCall, + Conv::X86_64SysV => llvm::X86_64_SysV, + Conv::X86_64Win64 => llvm::X86_64_Win64, + } + } + + fn apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value) { + // FIXME(eddyb) can this also be applied to callsites? + if self.ret.layout.abi.is_uninhabited() { + llvm::Attribute::NoReturn.apply_llfn(llvm::AttributePlace::Function, llfn); + } + + // FIXME(eddyb, wesleywiser): apply this to callsites as well? + if !self.can_unwind { + llvm::Attribute::NoUnwind.apply_llfn(llvm::AttributePlace::Function, llfn); + } + + let mut i = 0; + let mut apply = |attrs: &ArgAttributes, ty: Option<&Type>| { + attrs.apply_llfn(llvm::AttributePlace::Argument(i), llfn, ty); + i += 1; + }; + match self.ret.mode { + PassMode::Direct(ref attrs) => { + attrs.apply_llfn(llvm::AttributePlace::ReturnValue, llfn, None); + } + PassMode::Indirect(ref attrs, _) => apply(attrs, Some(self.ret.layout.gcc_type(cx))), + _ => {} + } + for arg in &self.args { + if arg.pad.is_some() { + apply(&ArgAttributes::new(), None); + } + match arg.mode { + PassMode::Ignore => {} + PassMode::Direct(ref attrs) | PassMode::Indirect(ref attrs, None) => { + apply(attrs, Some(arg.layout.gcc_type(cx))) + } + PassMode::Indirect(ref attrs, Some(ref extra_attrs)) => { + apply(attrs, None); + apply(extra_attrs, None); + } + PassMode::Pair(ref a, ref b) => { + apply(a, None); + apply(b, None); + } + PassMode::Cast(_) => apply(&ArgAttributes::new(), None), + } + } + } + + fn apply_attrs_callsite(&self, bx: &mut Builder<'a, 'll, 'tcx>, callsite: &'ll Value) { + // FIXME(wesleywiser, eddyb): We should apply `nounwind` and `noreturn` as appropriate to this callsite. + + let mut i = 0; + let mut apply = |attrs: &ArgAttributes, ty: Option<&Type>| { + attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite, ty); + i += 1; + }; + match self.ret.mode { + PassMode::Direct(ref attrs) => { + attrs.apply_callsite(llvm::AttributePlace::ReturnValue, callsite, None); + } + PassMode::Indirect(ref attrs, _) => apply(attrs, Some(self.ret.layout.gcc_type(bx))), + _ => {} + } + if let abi::Abi::Scalar(ref scalar) = self.ret.layout.abi { + // If the value is a boolean, the range is 0..2 and that ultimately + // become 0..0 when the type becomes i1, which would be rejected + // by the LLVM verifier. + if let Int(..) = scalar.value { + if !scalar.is_bool() { + let range = scalar.valid_range_exclusive(bx); + if range.start != range.end { + bx.range_metadata(callsite, range); + } + } + } + } + for arg in &self.args { + if arg.pad.is_some() { + apply(&ArgAttributes::new(), None); + } + match arg.mode { + PassMode::Ignore => {} + PassMode::Direct(ref attrs) | PassMode::Indirect(ref attrs, None) => { + apply(attrs, Some(arg.layout.gcc_type(bx))) + } + PassMode::Indirect(ref attrs, Some(ref extra_attrs)) => { + apply(attrs, None); + apply(extra_attrs, None); + } + PassMode::Pair(ref a, ref b) => { + apply(a, None); + apply(b, None); + } + PassMode::Cast(_) => apply(&ArgAttributes::new(), None), + } + } + + let cconv = self.llvm_cconv(); + if cconv != llvm::CCallConv { + llvm::SetInstructionCallConv(callsite, cconv); + } + }*/ +} diff --git a/src/allocator.rs b/src/allocator.rs new file mode 100644 index 0000000000000..11b2fad3e7eee --- /dev/null +++ b/src/allocator.rs @@ -0,0 +1,115 @@ +//use crate::attributes; +use gccjit::{FunctionType, ToRValue}; +use rustc_ast::expand::allocator::{AllocatorKind, AllocatorTy, ALLOCATOR_METHODS}; +use rustc_middle::bug; +use rustc_middle::ty::TyCtxt; +use rustc_span::symbol::sym; + +use crate::GccContext; + +pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut GccContext, kind: AllocatorKind, has_alloc_error_handler: bool) { + let context = &mods.context; + let usize = + match tcx.sess.target.pointer_width { + 16 => context.new_type::(), + 32 => context.new_type::(), + 64 => context.new_type::(), + tws => bug!("Unsupported target word size for int: {}", tws), + }; + let i8 = context.new_type::(); + let i8p = i8.make_pointer(); + let void = context.new_type::<()>(); + + for method in ALLOCATOR_METHODS { + let mut types = Vec::with_capacity(method.inputs.len()); + for ty in method.inputs.iter() { + match *ty { + AllocatorTy::Layout => { + types.push(usize); + types.push(usize); + } + AllocatorTy::Ptr => types.push(i8p), + AllocatorTy::Usize => types.push(usize), + + AllocatorTy::ResultPtr | AllocatorTy::Unit => panic!("invalid allocator arg"), + } + } + let output = match method.output { + AllocatorTy::ResultPtr => Some(i8p), + AllocatorTy::Unit => None, + + AllocatorTy::Layout | AllocatorTy::Usize | AllocatorTy::Ptr => { + panic!("invalid allocator output") + } + }; + let name = format!("__rust_{}", method.name); + + let args: Vec<_> = types.iter().enumerate() + .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index))) + .collect(); + let func = context.new_function(None, FunctionType::Exported, output.unwrap_or(void), &args, name, false); + + if tcx.sess.target.options.default_hidden_visibility { + //llvm::LLVMRustSetVisibility(func, llvm::Visibility::Hidden); + } + if tcx.sess.must_emit_unwind_tables() { + // TODO + //attributes::emit_uwtable(func, true); + } + + let callee = kind.fn_name(method.name); + let args: Vec<_> = types.iter().enumerate() + .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index))) + .collect(); + let callee = context.new_function(None, FunctionType::Extern, output.unwrap_or(void), &args, callee, false); + //llvm::LLVMRustSetVisibility(callee, llvm::Visibility::Hidden); + + let block = func.new_block("entry"); + + let args = args + .iter() + .enumerate() + .map(|(i, _)| func.get_param(i as i32).to_rvalue()) + .collect::>(); + let ret = context.new_call(None, callee, &args); + //llvm::LLVMSetTailCall(ret, True); + if output.is_some() { + block.end_with_return(None, ret); + } + else { + block.end_with_void_return(None); + } + } + + let types = [usize, usize]; + let name = "__rust_alloc_error_handler".to_string(); + let args: Vec<_> = types.iter().enumerate() + .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index))) + .collect(); + let func = context.new_function(None, FunctionType::Exported, void, &args, name, false); + + let kind = + if has_alloc_error_handler { + AllocatorKind::Global + } + else { + AllocatorKind::Default + }; + let callee = kind.fn_name(sym::oom); + let args: Vec<_> = types.iter().enumerate() + .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index))) + .collect(); + let callee = context.new_function(None, FunctionType::Extern, void, &args, callee, false); + //llvm::LLVMRustSetVisibility(callee, llvm::Visibility::Hidden); + + let block = func.new_block("entry"); + + let args = args + .iter() + .enumerate() + .map(|(i, _)| func.get_param(i as i32).to_rvalue()) + .collect::>(); + let _ret = context.new_call(None, callee, &args); + //llvm::LLVMSetTailCall(ret, True); + block.end_with_void_return(None); +} diff --git a/src/archive.rs b/src/archive.rs new file mode 100644 index 0000000000000..52acf4f2d26e7 --- /dev/null +++ b/src/archive.rs @@ -0,0 +1,270 @@ +use std::fs::File; +use std::path::{Path, PathBuf}; + +use rustc_session::Session; +use rustc_codegen_ssa::back::archive::{find_library, ArchiveBuilder}; +use rustc_codegen_ssa::METADATA_FILENAME; +use rustc_data_structures::temp_dir::MaybeTempDir; +use rustc_middle::middle::cstore::DllImport; +use rustc_span::symbol::Symbol; + +struct ArchiveConfig<'a> { + sess: &'a Session, + dst: PathBuf, + lib_search_paths: Vec, + use_native_ar: bool, + use_gnu_style_archive: bool, +} + +#[derive(Debug)] +enum ArchiveEntry { + FromArchive { + archive_index: usize, + entry_index: usize, + }, + File(PathBuf), +} + +pub struct ArArchiveBuilder<'a> { + config: ArchiveConfig<'a>, + src_archives: Vec<(PathBuf, ar::Archive)>, + // Don't use `HashMap` here, as the order is important. `rust.metadata.bin` must always be at + // the end of an archive for linkers to not get confused. + entries: Vec<(String, ArchiveEntry)>, +} + +impl<'a> ArchiveBuilder<'a> for ArArchiveBuilder<'a> { + fn new(sess: &'a Session, output: &Path, input: Option<&Path>) -> Self { + use rustc_codegen_ssa::back::link::archive_search_paths; + let config = ArchiveConfig { + sess, + dst: output.to_path_buf(), + lib_search_paths: archive_search_paths(sess), + use_native_ar: false, + // FIXME test for linux and System V derivatives instead + use_gnu_style_archive: sess.target.options.archive_format == "gnu", + }; + + let (src_archives, entries) = if let Some(input) = input { + let mut archive = ar::Archive::new(File::open(input).unwrap()); + let mut entries = Vec::new(); + + let mut i = 0; + while let Some(entry) = archive.next_entry() { + let entry = entry.unwrap(); + entries.push(( + String::from_utf8(entry.header().identifier().to_vec()).unwrap(), + ArchiveEntry::FromArchive { + archive_index: 0, + entry_index: i, + }, + )); + i += 1; + } + + (vec![(input.to_owned(), archive)], entries) + } else { + (vec![], Vec::new()) + }; + + ArArchiveBuilder { + config, + src_archives, + entries, + } + } + + fn src_files(&mut self) -> Vec { + self.entries.iter().map(|(name, _)| name.clone()).collect() + } + + fn remove_file(&mut self, name: &str) { + let index = self + .entries + .iter() + .position(|(entry_name, _)| entry_name == name) + .expect("Tried to remove file not existing in src archive"); + self.entries.remove(index); + } + + fn add_file(&mut self, file: &Path) { + self.entries.push(( + file.file_name().unwrap().to_str().unwrap().to_string(), + ArchiveEntry::File(file.to_owned()), + )); + } + + fn add_native_library(&mut self, name: Symbol, verbatim: bool) { + let location = find_library(name, verbatim, &self.config.lib_search_paths, self.config.sess); + self.add_archive(location.clone(), |_| false) + .unwrap_or_else(|e| { + panic!( + "failed to add native library {}: {}", + location.to_string_lossy(), + e + ); + }); + } + + fn add_rlib( + &mut self, + rlib: &Path, + name: &str, + lto: bool, + skip_objects: bool, + ) -> std::io::Result<()> { + let obj_start = name.to_owned(); + + self.add_archive(rlib.to_owned(), move |fname: &str| { + // Ignore metadata files, no matter the name. + if fname == METADATA_FILENAME { + return true; + } + + // Don't include Rust objects if LTO is enabled + if lto && fname.starts_with(&obj_start) && fname.ends_with(".o") { + return true; + } + + // Otherwise if this is *not* a rust object and we're skipping + // objects then skip this file + if skip_objects && (!fname.starts_with(&obj_start) || !fname.ends_with(".o")) { + return true; + } + + // ok, don't skip this + return false; + }) + } + + fn update_symbols(&mut self) { + } + + fn build(mut self) { + use std::process::Command; + + fn add_file_using_ar(archive: &Path, file: &Path) { + Command::new("ar") + .arg("r") // add or replace file + .arg("-c") // silence created file message + .arg(archive) + .arg(&file) + .status() + .unwrap(); + } + + enum BuilderKind<'a> { + Bsd(ar::Builder), + Gnu(ar::GnuBuilder), + NativeAr(&'a Path), + } + + let mut builder = if self.config.use_native_ar { + BuilderKind::NativeAr(&self.config.dst) + } else if self.config.use_gnu_style_archive { + BuilderKind::Gnu(ar::GnuBuilder::new( + File::create(&self.config.dst).unwrap(), + self.entries + .iter() + .map(|(name, _)| name.as_bytes().to_vec()) + .collect(), + )) + } else { + BuilderKind::Bsd(ar::Builder::new(File::create(&self.config.dst).unwrap())) + }; + + // Add all files + for (entry_name, entry) in self.entries.into_iter() { + match entry { + ArchiveEntry::FromArchive { + archive_index, + entry_index, + } => { + let (ref src_archive_path, ref mut src_archive) = + self.src_archives[archive_index]; + let entry = src_archive.jump_to_entry(entry_index).unwrap(); + let header = entry.header().clone(); + + match builder { + BuilderKind::Bsd(ref mut builder) => { + builder.append(&header, entry).unwrap() + } + BuilderKind::Gnu(ref mut builder) => { + builder.append(&header, entry).unwrap() + } + BuilderKind::NativeAr(archive_file) => { + Command::new("ar") + .arg("x") + .arg(src_archive_path) + .arg(&entry_name) + .status() + .unwrap(); + add_file_using_ar(archive_file, Path::new(&entry_name)); + std::fs::remove_file(entry_name).unwrap(); + } + } + } + ArchiveEntry::File(file) => + match builder { + BuilderKind::Bsd(ref mut builder) => { + builder + .append_file(entry_name.as_bytes(), &mut File::open(file).expect("file for bsd builder")) + .unwrap() + }, + BuilderKind::Gnu(ref mut builder) => { + builder + .append_file(entry_name.as_bytes(), &mut File::open(&file).expect(&format!("file {:?} for gnu builder", file))) + .unwrap() + }, + BuilderKind::NativeAr(archive_file) => add_file_using_ar(archive_file, &file), + }, + } + } + + // Finalize archive + std::mem::drop(builder); + + // Run ranlib to be able to link the archive + let status = std::process::Command::new("ranlib") + .arg(self.config.dst) + .status() + .expect("Couldn't run ranlib"); + + if !status.success() { + self.config.sess.fatal(&format!("Ranlib exited with code {:?}", status.code())); + } + } + + fn inject_dll_import_lib(&mut self, _lib_name: &str, _dll_imports: &[DllImport], _tmpdir: &MaybeTempDir) { + unimplemented!(); + } +} + +impl<'a> ArArchiveBuilder<'a> { + fn add_archive(&mut self, archive_path: PathBuf, mut skip: F) -> std::io::Result<()> + where + F: FnMut(&str) -> bool + 'static, + { + let mut archive = ar::Archive::new(std::fs::File::open(&archive_path)?); + let archive_index = self.src_archives.len(); + + let mut i = 0; + while let Some(entry) = archive.next_entry() { + let entry = entry.unwrap(); + let file_name = String::from_utf8(entry.header().identifier().to_vec()).unwrap(); + if !skip(&file_name) { + self.entries.push(( + file_name, + ArchiveEntry::FromArchive { + archive_index, + entry_index: i, + }, + )); + } + i += 1; + } + + self.src_archives.push((archive_path, archive)); + Ok(()) + } +} diff --git a/src/asm.rs b/src/asm.rs new file mode 100644 index 0000000000000..6616366235fcd --- /dev/null +++ b/src/asm.rs @@ -0,0 +1,632 @@ +use gccjit::{RValue, ToRValue, Type}; +use rustc_ast::ast::{InlineAsmOptions, InlineAsmTemplatePiece}; +use rustc_codegen_ssa::mir::operand::OperandValue; +use rustc_codegen_ssa::mir::place::PlaceRef; +use rustc_codegen_ssa::traits::{AsmBuilderMethods, AsmMethods, BaseTypeMethods, BuilderMethods, GlobalAsmOperandRef, InlineAsmOperandRef}; +use rustc_data_structures::fx::FxHashMap; +use rustc_hir::LlvmInlineAsmInner; +use rustc_middle::bug; +use rustc_span::Span; +use rustc_target::asm::*; + +use crate::builder::Builder; +use crate::context::CodegenCx; +use crate::type_of::LayoutGccExt; + +impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { + fn codegen_llvm_inline_asm(&mut self, _ia: &LlvmInlineAsmInner, _outputs: Vec>>, mut _inputs: Vec>, _span: Span) -> bool { + // TODO + return true; + + /*let mut ext_constraints = vec![]; + let mut output_types = vec![]; + + // Prepare the output operands + let mut indirect_outputs = vec![]; + for (i, (out, &place)) in ia.outputs.iter().zip(&outputs).enumerate() { + if out.is_rw { + let operand = self.load_operand(place); + if let OperandValue::Immediate(_) = operand.val { + inputs.push(operand.immediate()); + } + ext_constraints.push(i.to_string()); + } + if out.is_indirect { + let operand = self.load_operand(place); + if let OperandValue::Immediate(_) = operand.val { + indirect_outputs.push(operand.immediate()); + } + } else { + output_types.push(place.layout.gcc_type(self.cx())); + } + } + if !indirect_outputs.is_empty() { + indirect_outputs.extend_from_slice(&inputs); + inputs = indirect_outputs; + } + + let clobbers = ia.clobbers.iter().map(|s| format!("~{{{}}}", &s)); + + // Default per-arch clobbers + // Basically what clang does + let arch_clobbers = match &self.sess().target.target.arch[..] { + "x86" | "x86_64" => vec!["~{dirflag}", "~{fpsr}", "~{flags}"], + "mips" | "mips64" => vec!["~{$1}"], + _ => Vec::new(), + }; + + let all_constraints = ia + .outputs + .iter() + .map(|out| out.constraint.to_string()) + .chain(ia.inputs.iter().map(|s| s.to_string())) + .chain(ext_constraints) + .chain(clobbers) + .chain(arch_clobbers.iter().map(|s| (*s).to_string())) + .collect::>() + .join(","); + + debug!("Asm Constraints: {}", &all_constraints); + + // Depending on how many outputs we have, the return type is different + let num_outputs = output_types.len(); + let output_type = match num_outputs { + 0 => self.type_void(), + 1 => output_types[0], + _ => self.type_struct(&output_types, false), + }; + + let asm = ia.asm.as_str(); + let r = inline_asm_call( + self, + &asm, + &all_constraints, + &inputs, + output_type, + ia.volatile, + ia.alignstack, + ia.dialect, + ); + if r.is_none() { + return false; + } + let r = r.unwrap(); + + // Again, based on how many outputs we have + let outputs = ia.outputs.iter().zip(&outputs).filter(|&(ref o, _)| !o.is_indirect); + for (i, (_, &place)) in outputs.enumerate() { + let v = if num_outputs == 1 { r } else { self.extract_value(r, i as u64) }; + OperandValue::Immediate(v).store(self, place); + } + + // Store mark in a metadata node so we can map LLVM errors + // back to source locations. See #17552. + unsafe { + let key = "srcloc"; + let kind = llvm::LLVMGetMDKindIDInContext( + self.llcx, + key.as_ptr() as *const c_char, + key.len() as c_uint, + ); + + let val: &'ll Value = self.const_i32(span.ctxt().outer_expn().as_u32() as i32); + + llvm::LLVMSetMetadata(r, kind, llvm::LLVMMDNodeInContext(self.llcx, &val, 1)); + } + + true*/ + } + + fn codegen_inline_asm(&mut self, template: &[InlineAsmTemplatePiece], operands: &[InlineAsmOperandRef<'tcx, Self>], options: InlineAsmOptions, _span: &[Span]) { + let asm_arch = self.tcx.sess.asm_arch.unwrap(); + + let intel_dialect = + match asm_arch { + InlineAsmArch::X86 | InlineAsmArch::X86_64 if !options.contains(InlineAsmOptions::ATT_SYNTAX) => true, + _ => false, + }; + + // Collect the types of output operands + // FIXME: we do this here instead of later because of a bug in libgccjit where creating the + // variable after the extended asm expression causes a segfault: + // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=100380 + let mut output_vars = FxHashMap::default(); + let mut operand_numbers = FxHashMap::default(); + let mut current_number = 0; + for (idx, op) in operands.iter().enumerate() { + match *op { + InlineAsmOperandRef::Out { place, .. } => { + let ty = + match place { + Some(place) => place.layout.gcc_type(self.cx, false), + None => { + // If the output is discarded, we don't really care what + // type is used. We're just using this to tell GCC to + // reserve the register. + //dummy_output_type(self.cx, reg.reg_class()) + + // NOTE: if no output value, we should not create one (it will be a + // clobber). + continue; + }, + }; + let var = self.current_func().new_local(None, ty, "output_register"); + operand_numbers.insert(idx, current_number); + current_number += 1; + output_vars.insert(idx, var); + } + InlineAsmOperandRef::InOut { out_place, .. } => { + let ty = + match out_place { + Some(place) => place.layout.gcc_type(self.cx, false), + None => { + // If the output is discarded, we don't really care what + // type is used. We're just using this to tell GCC to + // reserve the register. + //dummy_output_type(self.cx, reg.reg_class()) + + // NOTE: if no output value, we should not create one. + continue; + }, + }; + operand_numbers.insert(idx, current_number); + current_number += 1; + let var = self.current_func().new_local(None, ty, "output_register"); + output_vars.insert(idx, var); + } + _ => {} + } + } + + // All output operands must come before the input operands, hence the 2 loops. + for (idx, op) in operands.iter().enumerate() { + match *op { + InlineAsmOperandRef::In { .. } | InlineAsmOperandRef::InOut { .. } => { + operand_numbers.insert(idx, current_number); + current_number += 1; + }, + _ => (), + } + } + + // Build the template string + let mut template_str = String::new(); + for piece in template { + match *piece { + InlineAsmTemplatePiece::String(ref string) => { + if string.contains('%') { + for c in string.chars() { + if c == '%' { + template_str.push_str("%%"); + } + else { + template_str.push(c); + } + } + } + else { + template_str.push_str(string) + } + } + InlineAsmTemplatePiece::Placeholder { operand_idx, modifier, span: _ } => { + match operands[operand_idx] { + InlineAsmOperandRef::Out { reg, place: Some(_), .. } => { + let modifier = modifier_to_gcc(asm_arch, reg.reg_class(), modifier); + if let Some(modifier) = modifier { + template_str.push_str(&format!("%{}{}", modifier, operand_numbers[&operand_idx])); + } else { + template_str.push_str(&format!("%{}", operand_numbers[&operand_idx])); + } + }, + InlineAsmOperandRef::Out { place: None, .. } => { + unimplemented!("Out None"); + }, + InlineAsmOperandRef::In { reg, .. } + | InlineAsmOperandRef::InOut { reg, .. } => { + let modifier = modifier_to_gcc(asm_arch, reg.reg_class(), modifier); + if let Some(modifier) = modifier { + template_str.push_str(&format!("%{}{}", modifier, operand_numbers[&operand_idx])); + } else { + template_str.push_str(&format!("%{}", operand_numbers[&operand_idx])); + } + } + InlineAsmOperandRef::Const { ref string } => { + // Const operands get injected directly into the template + template_str.push_str(string); + } + InlineAsmOperandRef::SymFn { .. } + | InlineAsmOperandRef::SymStatic { .. } => { + unimplemented!(); + // Only emit the raw symbol name + //template_str.push_str(&format!("${{{}:c}}", op_idx[&operand_idx])); + } + } + } + } + } + + let block = self.llbb(); + let template_str = + if intel_dialect { + template_str + } + else { + // FIXME: this might break the "m" memory constraint: + // https://stackoverflow.com/a/9347957/389119 + // TODO: only set on x86 platforms. + format!(".att_syntax noprefix\n\t{}\n\t.intel_syntax noprefix", template_str) + }; + let extended_asm = block.add_extended_asm(None, &template_str); + + // Collect the types of output operands + let mut output_types = vec![]; + for (idx, op) in operands.iter().enumerate() { + match *op { + InlineAsmOperandRef::Out { reg, late, place } => { + let ty = + match place { + Some(place) => place.layout.gcc_type(self.cx, false), + None => { + // If the output is discarded, we don't really care what + // type is used. We're just using this to tell GCC to + // reserve the register. + dummy_output_type(self.cx, reg.reg_class()) + }, + }; + output_types.push(ty); + //op_idx.insert(idx, constraints.len()); + let prefix = if late { "=" } else { "=&" }; + let constraint = format!("{}{}", prefix, reg_to_gcc(reg)); + + if place.is_some() { + let var = output_vars[&idx]; + extended_asm.add_output_operand(None, &constraint, var); + } + else { + // NOTE: reg.to_string() returns the register name with quotes around it so + // remove them. + extended_asm.add_clobber(reg.to_string().trim_matches('"')); + } + } + InlineAsmOperandRef::InOut { reg, late, in_value, out_place } => { + let ty = + match out_place { + Some(out_place) => out_place.layout.gcc_type(self.cx, false), + None => dummy_output_type(self.cx, reg.reg_class()) + }; + output_types.push(ty); + //op_idx.insert(idx, constraints.len()); + // TODO: prefix of "+" for reading and writing? + let prefix = if late { "=" } else { "=&" }; + let constraint = format!("{}{}", prefix, reg_to_gcc(reg)); + + if out_place.is_some() { + let var = output_vars[&idx]; + // TODO: also specify an output operand when out_place is none: that would + // be the clobber but clobbers do not support general constraint like reg; + // they only support named registers. + // Not sure how we can do this. And the LLVM backend does not seem to add a + // clobber. + extended_asm.add_output_operand(None, &constraint, var); + } + + let constraint = reg_to_gcc(reg); + extended_asm.add_input_operand(None, &constraint, in_value.immediate()); + } + InlineAsmOperandRef::In { reg, value } => { + let constraint = reg_to_gcc(reg); + extended_asm.add_input_operand(None, &constraint, value.immediate()); + } + _ => {} + } + } + + /*if !options.contains(InlineAsmOptions::PRESERVES_FLAGS) { + match asm_arch { + InlineAsmArch::AArch64 | InlineAsmArch::Arm => { + constraints.push("~{cc}".to_string()); + } + InlineAsmArch::X86 | InlineAsmArch::X86_64 => { + constraints.extend_from_slice(&[ + "~{dirflag}".to_string(), + "~{fpsr}".to_string(), + "~{flags}".to_string(), + ]); + } + InlineAsmArch::RiscV32 | InlineAsmArch::RiscV64 => {} + } + } + if !options.contains(InlineAsmOptions::NOMEM) { + // This is actually ignored by LLVM, but it's probably best to keep + // it just in case. LLVM instead uses the ReadOnly/ReadNone + // attributes on the call instruction to optimize. + constraints.push("~{memory}".to_string()); + } + let volatile = !options.contains(InlineAsmOptions::PURE); + let alignstack = !options.contains(InlineAsmOptions::NOSTACK); + let output_type = match &output_types[..] { + [] => self.type_void(), + [ty] => ty, + tys => self.type_struct(&tys, false), + };*/ + + /*let result = inline_asm_call( + self, + &template_str, + &constraints.join(","), + &inputs, + output_type, + volatile, + alignstack, + dialect, + span, + ) + .unwrap_or_else(|| span_bug!(span, "LLVM asm constraint validation failed")); + + if options.contains(InlineAsmOptions::PURE) { + if options.contains(InlineAsmOptions::NOMEM) { + llvm::Attribute::ReadNone.apply_callsite(llvm::AttributePlace::Function, result); + } else if options.contains(InlineAsmOptions::READONLY) { + llvm::Attribute::ReadOnly.apply_callsite(llvm::AttributePlace::Function, result); + } + } else { + if options.contains(InlineAsmOptions::NOMEM) { + llvm::Attribute::InaccessibleMemOnly + .apply_callsite(llvm::AttributePlace::Function, result); + } else { + // LLVM doesn't have an attribute to represent ReadOnly + SideEffect + } + }*/ + + // Write results to outputs + for (idx, op) in operands.iter().enumerate() { + if let InlineAsmOperandRef::Out { place: Some(place), .. } + | InlineAsmOperandRef::InOut { out_place: Some(place), .. } = *op + { + OperandValue::Immediate(output_vars[&idx].to_rvalue()).store(self, place); + } + } + } +} + +/// Converts a register class to a GCC constraint code. +// TODO: return &'static str instead? +fn reg_to_gcc(reg: InlineAsmRegOrRegClass) -> String { + match reg { + // For vector registers LLVM wants the register name to match the type size. + InlineAsmRegOrRegClass::Reg(reg) => { + // TODO: add support for vector register. + let constraint = + match reg.name() { + "ax" => "a", + "bx" => "b", + "cx" => "c", + "dx" => "d", + "si" => "S", + "di" => "D", + // TODO: for registers like r11, we have to create a register variable: https://stackoverflow.com/a/31774784/389119 + // TODO: in this case though, it's a clobber, so it should work as r11. + // Recent nightly supports clobber() syntax, so update to it. It does not seem + // like it's implemented yet. + name => name, // FIXME: probably wrong. + }; + constraint.to_string() + }, + InlineAsmRegOrRegClass::RegClass(reg) => match reg { + InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => unimplemented!(), + InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => unimplemented!(), + InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg) => unimplemented!(), + InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => unimplemented!(), + InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => unimplemented!(), + InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg_thumb) => unimplemented!(), + InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg) + | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16) + | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8) => unimplemented!(), + InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) + | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8) + | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => unimplemented!(), + InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg) + | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg) => unimplemented!(), + InlineAsmRegClass::Bpf(_) => unimplemented!(), + InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => unimplemented!(), + InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => unimplemented!(), + InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => unimplemented!(), + InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => unimplemented!(), + InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => unimplemented!(), + InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => unimplemented!(), + InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg) => unimplemented!(), + InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => unimplemented!(), + InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => unimplemented!(), + InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => unimplemented!(), + InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => unimplemented!(), + InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => unimplemented!(), + InlineAsmRegClass::X86(X86InlineAsmRegClass::mmx_reg) => unimplemented!(), + InlineAsmRegClass::X86(X86InlineAsmRegClass::reg) => "r", + InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => unimplemented!(), + InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => unimplemented!(), + InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg) + | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg) => unimplemented!(), + InlineAsmRegClass::X86(X86InlineAsmRegClass::x87_reg) => unimplemented!(), + InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => unimplemented!(), + InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => unimplemented!(), + InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => unimplemented!(), + InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => { + bug!("GCC backend does not support SPIR-V") + } + InlineAsmRegClass::Err => unreachable!(), + } + .to_string(), + } +} + +/// Type to use for outputs that are discarded. It doesn't really matter what +/// the type is, as long as it is valid for the constraint code. +fn dummy_output_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, reg: InlineAsmRegClass) -> Type<'gcc> { + match reg { + InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => cx.type_i32(), + InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => unimplemented!(), + InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg) + | InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => { + unimplemented!() + } + InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) + | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg_thumb) => cx.type_i32(), + InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg) + | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) => cx.type_f32(), + InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg) + | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16) + | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8) => cx.type_f64(), + InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg) + | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8) + | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => { + unimplemented!() + } + InlineAsmRegClass::Bpf(_) => unimplemented!(), + InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => cx.type_i32(), + InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => cx.type_i32(), + InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => cx.type_f32(), + InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => cx.type_i16(), + InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => cx.type_i32(), + InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => cx.type_i64(), + InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg) => cx.type_i32(), + InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => cx.type_i32(), + InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => cx.type_f64(), + InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => cx.type_i32(), + InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => cx.type_f32(), + InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => cx.type_f32(), + InlineAsmRegClass::X86(X86InlineAsmRegClass::reg) + | InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => cx.type_i32(), + InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => cx.type_i8(), + InlineAsmRegClass::X86(X86InlineAsmRegClass::mmx_reg) => unimplemented!(), + InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg) + | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg) + | InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => cx.type_f32(), + InlineAsmRegClass::X86(X86InlineAsmRegClass::x87_reg) => unimplemented!(), + InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => cx.type_i16(), + InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => cx.type_i32(), + InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => { + bug!("LLVM backend does not support SPIR-V") + }, + InlineAsmRegClass::Err => unreachable!(), + } +} + +impl<'gcc, 'tcx> AsmMethods for CodegenCx<'gcc, 'tcx> { + fn codegen_global_asm(&self, template: &[InlineAsmTemplatePiece], operands: &[GlobalAsmOperandRef], options: InlineAsmOptions, _line_spans: &[Span]) { + let asm_arch = self.tcx.sess.asm_arch.unwrap(); + + // Default to Intel syntax on x86 + let intel_syntax = matches!(asm_arch, InlineAsmArch::X86 | InlineAsmArch::X86_64) + && !options.contains(InlineAsmOptions::ATT_SYNTAX); + + // Build the template string + let mut template_str = String::new(); + for piece in template { + match *piece { + InlineAsmTemplatePiece::String(ref string) => { + for line in string.lines() { + // NOTE: gcc does not allow inline comment, so remove them. + let line = + if let Some(index) = line.rfind("//") { + &line[..index] + } + else { + line + }; + template_str.push_str(line); + template_str.push('\n'); + } + }, + InlineAsmTemplatePiece::Placeholder { operand_idx, modifier: _, span: _ } => { + match operands[operand_idx] { + GlobalAsmOperandRef::Const { ref string } => { + // Const operands get injected directly into the + // template. Note that we don't need to escape $ + // here unlike normal inline assembly. + template_str.push_str(string); + } + } + } + } + } + + let template_str = + if intel_syntax { + format!("{}\n\t.intel_syntax noprefix", template_str) + } + else { + format!(".att_syntax\n\t{}\n\t.intel_syntax noprefix", template_str) + }; + // NOTE: seems like gcc will put the asm in the wrong section, so set it to .text manually. + let template_str = format!(".pushsection .text\n{}\n.popsection", template_str); + self.context.add_top_level_asm(None, &template_str); + } +} + +fn modifier_to_gcc(arch: InlineAsmArch, reg: InlineAsmRegClass, modifier: Option) -> Option { + match reg { + InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => modifier, + InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => modifier, + InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg) + | InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => { + unimplemented!() + //if modifier == Some('v') { None } else { modifier } + } + InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) + | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg_thumb) => unimplemented!(), + InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg) + | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) => unimplemented!(), + InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg) + | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16) + | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8) => unimplemented!(), + InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg) + | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8) + | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => { + unimplemented!() + /*if modifier.is_none() { + Some('q') + } else { + modifier + }*/ + } + InlineAsmRegClass::Bpf(_) => unimplemented!(), + InlineAsmRegClass::Hexagon(_) => unimplemented!(), + InlineAsmRegClass::Mips(_) => unimplemented!(), + InlineAsmRegClass::Nvptx(_) => unimplemented!(), + InlineAsmRegClass::PowerPC(_) => unimplemented!(), + InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) + | InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => unimplemented!(), + InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => unimplemented!(), + InlineAsmRegClass::X86(X86InlineAsmRegClass::reg) + | InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => match modifier { + None if arch == InlineAsmArch::X86_64 => Some('q'), + None => Some('k'), + Some('l') => Some('b'), + Some('h') => Some('h'), + Some('x') => Some('w'), + Some('e') => Some('k'), + Some('r') => Some('q'), + _ => unreachable!(), + }, + InlineAsmRegClass::X86(X86InlineAsmRegClass::mmx_reg) => unimplemented!(), + InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => unimplemented!(), + InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg) + | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg) + | InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => unimplemented!() /*match (reg, modifier) { + (X86InlineAsmRegClass::xmm_reg, None) => Some('x'), + (X86InlineAsmRegClass::ymm_reg, None) => Some('t'), + (X86InlineAsmRegClass::zmm_reg, None) => Some('g'), + (_, Some('x')) => Some('x'), + (_, Some('y')) => Some('t'), + (_, Some('z')) => Some('g'), + _ => unreachable!(), + }*/, + InlineAsmRegClass::X86(X86InlineAsmRegClass::x87_reg) => unimplemented!(), + InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => unimplemented!(), + InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => unimplemented!(), + InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => { + bug!("LLVM backend does not support SPIR-V") + }, + InlineAsmRegClass::Err => unreachable!(), + } +} diff --git a/src/back/mod.rs b/src/back/mod.rs new file mode 100644 index 0000000000000..d692799d7642f --- /dev/null +++ b/src/back/mod.rs @@ -0,0 +1 @@ +pub mod write; diff --git a/src/back/write.rs b/src/back/write.rs new file mode 100644 index 0000000000000..5201e49427799 --- /dev/null +++ b/src/back/write.rs @@ -0,0 +1,234 @@ +use std::fs; + +use gccjit::OutputKind; +use rustc_codegen_ssa::{CompiledModule, ModuleCodegen}; +use rustc_codegen_ssa::back::write::{CodegenContext, EmitObj, ModuleConfig}; +use rustc_errors::Handler; +use rustc_session::config::OutputType; +use rustc_span::fatal_error::FatalError; +use rustc_target::spec::SplitDebuginfo; + +use crate::{GccCodegenBackend, GccContext}; + +pub(crate) unsafe fn codegen(cgcx: &CodegenContext, _diag_handler: &Handler, module: ModuleCodegen, config: &ModuleConfig) -> Result { + let _timer = cgcx.prof.generic_activity_with_arg("LLVM_module_codegen", &module.name[..]); + { + let context = &module.module_llvm.context; + + //let llcx = &*module.module_llvm.llcx; + //let tm = &*module.module_llvm.tm; + let module_name = module.name.clone(); + let module_name = Some(&module_name[..]); + //let handlers = DiagnosticHandlers::new(cgcx, diag_handler, llcx); + + /*if cgcx.msvc_imps_needed { + create_msvc_imps(cgcx, llcx, llmod); + }*/ + + // A codegen-specific pass manager is used to generate object + // files for an GCC module. + // + // Apparently each of these pass managers is a one-shot kind of + // thing, so we create a new one for each type of output. The + // pass manager passed to the closure should be ensured to not + // escape the closure itself, and the manager should only be + // used once. + /*unsafe fn with_codegen<'ll, F, R>(tm: &'ll llvm::TargetMachine, llmod: &'ll llvm::Module, no_builtins: bool, f: F) -> R + where F: FnOnce(&'ll mut PassManager<'ll>) -> R, + { + let cpm = llvm::LLVMCreatePassManager(); + llvm::LLVMAddAnalysisPasses(tm, cpm); + llvm::LLVMRustAddLibraryInfo(cpm, llmod, no_builtins); + f(cpm) + }*/ + + // Two things to note: + // - If object files are just LLVM bitcode we write bitcode, copy it to + // the .o file, and delete the bitcode if it wasn't otherwise + // requested. + // - If we don't have the integrated assembler then we need to emit + // asm from LLVM and use `gcc` to create the object file. + + let _bc_out = cgcx.output_filenames.temp_path(OutputType::Bitcode, module_name); + let obj_out = cgcx.output_filenames.temp_path(OutputType::Object, module_name); + + if config.bitcode_needed() { + // TODO + /*let _timer = cgcx + .prof + .generic_activity_with_arg("LLVM_module_codegen_make_bitcode", &module.name[..]); + let thin = ThinBuffer::new(llmod); + let data = thin.data(); + + if config.emit_bc || config.emit_obj == EmitObj::Bitcode { + let _timer = cgcx.prof.generic_activity_with_arg( + "LLVM_module_codegen_emit_bitcode", + &module.name[..], + ); + if let Err(e) = fs::write(&bc_out, data) { + let msg = format!("failed to write bytecode to {}: {}", bc_out.display(), e); + diag_handler.err(&msg); + } + } + + if config.emit_obj == EmitObj::ObjectCode(BitcodeSection::Full) { + let _timer = cgcx.prof.generic_activity_with_arg( + "LLVM_module_codegen_embed_bitcode", + &module.name[..], + ); + embed_bitcode(cgcx, llcx, llmod, Some(data)); + } + + if config.emit_bc_compressed { + let _timer = cgcx.prof.generic_activity_with_arg( + "LLVM_module_codegen_emit_compressed_bitcode", + &module.name[..], + ); + let dst = bc_out.with_extension(RLIB_BYTECODE_EXTENSION); + let data = bytecode::encode(&module.name, data); + if let Err(e) = fs::write(&dst, data) { + let msg = format!("failed to write bytecode to {}: {}", dst.display(), e); + diag_handler.err(&msg); + } + }*/ + } /*else if config.emit_obj == EmitObj::ObjectCode(BitcodeSection::Marker) { + unimplemented!(); + //embed_bitcode(cgcx, llcx, llmod, None); + }*/ + + if config.emit_ir { + unimplemented!(); + /*let _timer = cgcx + .prof + .generic_activity_with_arg("LLVM_module_codegen_emit_ir", &module.name[..]); + let out = cgcx.output_filenames.temp_path(OutputType::LlvmAssembly, module_name); + let out_c = path_to_c_string(&out); + + extern "C" fn demangle_callback( + input_ptr: *const c_char, + input_len: size_t, + output_ptr: *mut c_char, + output_len: size_t, + ) -> size_t { + let input = + unsafe { slice::from_raw_parts(input_ptr as *const u8, input_len as usize) }; + + let input = match str::from_utf8(input) { + Ok(s) => s, + Err(_) => return 0, + }; + + let output = unsafe { + slice::from_raw_parts_mut(output_ptr as *mut u8, output_len as usize) + }; + let mut cursor = io::Cursor::new(output); + + let demangled = match rustc_demangle::try_demangle(input) { + Ok(d) => d, + Err(_) => return 0, + }; + + if write!(cursor, "{:#}", demangled).is_err() { + // Possible only if provided buffer is not big enough + return 0; + } + + cursor.position() as size_t + } + + let result = llvm::LLVMRustPrintModule(llmod, out_c.as_ptr(), demangle_callback); + result.into_result().map_err(|()| { + let msg = format!("failed to write LLVM IR to {}", out.display()); + llvm_err(diag_handler, &msg) + })?;*/ + } + + if config.emit_asm { + let _timer = cgcx + .prof + .generic_activity_with_arg("LLVM_module_codegen_emit_asm", &module.name[..]); + let path = cgcx.output_filenames.temp_path(OutputType::Assembly, module_name); + context.compile_to_file(OutputKind::Assembler, path.to_str().expect("path to str")); + + /*with_codegen(tm, llmod, config.no_builtins, |cpm| { + write_output_file(diag_handler, tm, cpm, llmod, &path, llvm::FileType::AssemblyFile) + })?;*/ + } + + match config.emit_obj { + EmitObj::ObjectCode(_) => { + let _timer = cgcx + .prof + .generic_activity_with_arg("LLVM_module_codegen_emit_obj", &module.name[..]); + //with_codegen(tm, llmod, config.no_builtins, |cpm| { + //println!("1: {}", module.name); + match &*module.name { + "std_example.7rcbfp3g-cgu.15" => { + println!("Dumping reproducer {}", module.name); + let _ = fs::create_dir("/tmp/reproducers"); + // FIXME: segfault in dump_reproducer_to_file() might be caused by + // transmuting an rvalue to an lvalue. + // Segfault is actually in gcc::jit::reproducer::get_identifier_as_lvalue + context.dump_reproducer_to_file(&format!("/tmp/reproducers/{}.c", module.name)); + println!("Dumped reproducer {}", module.name); + }, + _ => (), + } + /*let _ = fs::create_dir("/tmp/dumps"); + context.dump_to_file(&format!("/tmp/dumps/{}.c", module.name), true); + println!("Dumped {}", module.name);*/ + //println!("Compile module {}", module.name); + context.compile_to_file(OutputKind::ObjectFile, obj_out.to_str().expect("path to str")); + //})?; + } + + EmitObj::Bitcode => { + //unimplemented!(); + /*debug!("copying bitcode {:?} to obj {:?}", bc_out, obj_out); + if let Err(e) = link_or_copy(&bc_out, &obj_out) { + diag_handler.err(&format!("failed to copy bitcode to object file: {}", e)); + } + + if !config.emit_bc { + debug!("removing_bitcode {:?}", bc_out); + if let Err(e) = fs::remove_file(&bc_out) { + diag_handler.err(&format!("failed to remove bitcode: {}", e)); + } + }*/ + } + + EmitObj::None => {} + } + + //drop(handlers); + } + + Ok(module.into_compiled_module( + config.emit_obj != EmitObj::None, + cgcx.target_can_use_split_dwarf && cgcx.split_debuginfo == SplitDebuginfo::Unpacked, + config.emit_bc, + &cgcx.output_filenames, + )) +} + +pub(crate) fn link(_cgcx: &CodegenContext, _diag_handler: &Handler, mut _modules: Vec>) -> Result, FatalError> { + unimplemented!(); + /*use super::lto::{Linker, ModuleBuffer}; + // Sort the modules by name to ensure to ensure deterministic behavior. + modules.sort_by(|a, b| a.name.cmp(&b.name)); + let (first, elements) = + modules.split_first().expect("Bug! modules must contain at least one module."); + + let mut linker = Linker::new(first.module_llvm.llmod()); + for module in elements { + let _timer = + cgcx.prof.generic_activity_with_arg("LLVM_link_module", format!("{:?}", module.name)); + let buffer = ModuleBuffer::new(module.module_llvm.llmod()); + linker.add(&buffer.data()).map_err(|()| { + let msg = format!("failed to serialize module {:?}", module.name); + llvm_err(&diag_handler, &msg) + })?; + } + drop(linker); + Ok(modules.remove(0))*/ +} diff --git a/src/base.rs b/src/base.rs new file mode 100644 index 0000000000000..7050f122a1765 --- /dev/null +++ b/src/base.rs @@ -0,0 +1,173 @@ +use std::env; +use std::sync::Once; +use std::time::Instant; + +use gccjit::{ + Context, + FunctionType, + GlobalKind, +}; +use rustc_hir::def_id::LOCAL_CRATE; +use rustc_middle::dep_graph; +use rustc_middle::middle::cstore::EncodedMetadata; +use rustc_middle::middle::exported_symbols; +use rustc_middle::ty::TyCtxt; +use rustc_middle::mir::mono::Linkage; +use rustc_codegen_ssa::{ModuleCodegen, ModuleKind}; +use rustc_codegen_ssa::base::maybe_create_entry_wrapper; +use rustc_codegen_ssa::mono_item::MonoItemExt; +use rustc_codegen_ssa::traits::DebugInfoMethods; +use rustc_session::config::DebugInfo; +use rustc_span::Symbol; + +use crate::{GccContext, create_function_calling_initializers}; +use crate::builder::Builder; +use crate::context::CodegenCx; + +pub fn global_linkage_to_gcc(linkage: Linkage) -> GlobalKind { + match linkage { + Linkage::External => GlobalKind::Imported, + Linkage::AvailableExternally => GlobalKind::Imported, + Linkage::LinkOnceAny => unimplemented!(), + Linkage::LinkOnceODR => unimplemented!(), + Linkage::WeakAny => unimplemented!(), + Linkage::WeakODR => unimplemented!(), + Linkage::Appending => unimplemented!(), + Linkage::Internal => GlobalKind::Internal, + Linkage::Private => GlobalKind::Internal, + Linkage::ExternalWeak => GlobalKind::Imported, // TODO: should be weak linkage. + Linkage::Common => unimplemented!(), + } +} + +pub fn linkage_to_gcc(linkage: Linkage) -> FunctionType { + match linkage { + Linkage::External => FunctionType::Exported, + Linkage::AvailableExternally => FunctionType::Extern, + Linkage::LinkOnceAny => unimplemented!(), + Linkage::LinkOnceODR => unimplemented!(), + Linkage::WeakAny => FunctionType::Exported, // FIXME: should be similar to linkonce. + Linkage::WeakODR => unimplemented!(), + Linkage::Appending => unimplemented!(), + Linkage::Internal => FunctionType::Internal, + Linkage::Private => FunctionType::Internal, + Linkage::ExternalWeak => unimplemented!(), + Linkage::Common => unimplemented!(), + } +} + +pub fn compile_codegen_unit<'tcx>(tcx: TyCtxt<'tcx>, cgu_name: Symbol) -> (ModuleCodegen, u64) { + let prof_timer = tcx.prof.generic_activity("codegen_module"); + let start_time = Instant::now(); + + let dep_node = tcx.codegen_unit(cgu_name).codegen_dep_node(tcx); + let (module, _) = tcx.dep_graph.with_task(dep_node, tcx, cgu_name, module_codegen, dep_graph::hash_result); + let time_to_codegen = start_time.elapsed(); + drop(prof_timer); + + // We assume that the cost to run GCC on a CGU is proportional to + // the time we needed for codegenning it. + let cost = time_to_codegen.as_secs() * 1_000_000_000 + time_to_codegen.subsec_nanos() as u64; + + fn module_codegen(tcx: TyCtxt<'_>, cgu_name: Symbol) -> ModuleCodegen { + let cgu = tcx.codegen_unit(cgu_name); + // Instantiate monomorphizations without filling out definitions yet... + //let llvm_module = ModuleLlvm::new(tcx, &cgu_name.as_str()); + let context = Context::default(); + // TODO: only set on x86 platforms. + context.add_command_line_option("-masm=intel"); + for arg in &tcx.sess.opts.cg.llvm_args { + context.add_command_line_option(arg); + } + context.add_command_line_option("-fno-semantic-interposition"); + //context.set_dump_code_on_compile(true); + if env::var("CG_GCCJIT_DUMP_GIMPLE").as_deref() == Ok("1") { + context.set_dump_initial_gimple(true); + } + context.set_debug_info(true); + //context.set_dump_everything(true); + //context.set_keep_intermediates(true); + + { + let cx = CodegenCx::new(&context, cgu, tcx); + + static START: Once = Once::new(); + START.call_once(|| { + let initializer_name = format!("__gccGlobalCrateInit{}", tcx.crate_name(LOCAL_CRATE)); + let func = context.new_function(None, FunctionType::Exported, context.new_type::<()>(), &[], initializer_name, false); + let block = func.new_block("initial"); + create_function_calling_initializers(tcx, &context, block); + block.end_with_void_return(None); + }); + + //println!("module_codegen: {:?} {:?}", cgu_name, &cx.context as *const _); + let mono_items = cgu.items_in_deterministic_order(tcx); + for &(mono_item, (linkage, visibility)) in &mono_items { + mono_item.predefine::>(&cx, linkage, visibility); + } + + // ... and now that we have everything pre-defined, fill out those definitions. + for &(mono_item, _) in &mono_items { + mono_item.define::>(&cx); + } + + // If this codegen unit contains the main function, also create the + // wrapper here + maybe_create_entry_wrapper::>(&cx); + + // Finalize debuginfo + if cx.sess().opts.debuginfo != DebugInfo::None { + cx.debuginfo_finalize(); + } + + cx.global_init_block.end_with_void_return(None); + } + + ModuleCodegen { + name: cgu_name.to_string(), + module_llvm: GccContext { + context + }, + kind: ModuleKind::Regular, + } + } + + (module, cost) +} + +pub fn write_compressed_metadata<'tcx>(tcx: TyCtxt<'tcx>, metadata: &EncodedMetadata, gcc_module: &mut GccContext) { + use snap::write::FrameEncoder; + use std::io::Write; + + // Historical note: + // + // When using link.exe it was seen that the section name `.note.rustc` + // was getting shortened to `.note.ru`, and according to the PE and COFF + // specification: + // + // > Executable images do not use a string table and do not support + // > section names longer than 8 characters + // + // https://docs.microsoft.com/en-us/windows/win32/debug/pe-format + // + // As a result, we choose a slightly shorter name! As to why + // `.note.rustc` works on MinGW, see + // https://github.com/llvm/llvm-project/blob/llvmorg-12.0.0/lld/COFF/Writer.cpp#L1190-L1197 + let section_name = if tcx.sess.target.is_like_osx { "__DATA,.rustc" } else { ".rustc" }; + + let context = &gcc_module.context; + let mut compressed = rustc_metadata::METADATA_HEADER.to_vec(); + FrameEncoder::new(&mut compressed).write_all(&metadata.raw_data).unwrap(); + + let name = exported_symbols::metadata_symbol_name(tcx); + let typ = context.new_array_type(None, context.new_type::(), compressed.len() as i32); + let global = context.new_global(None, GlobalKind::Exported, typ, name); + global.global_set_initializer(&compressed); + global.set_link_section(section_name); + + // Also generate a .section directive to force no + // flags, at least for ELF outputs, so that the + // metadata doesn't get loaded into memory. + let directive = format!(".section {}", section_name); + context.add_top_level_asm(None, &directive); +} diff --git a/src/builder.rs b/src/builder.rs new file mode 100644 index 0000000000000..8bdcb08bd3d6b --- /dev/null +++ b/src/builder.rs @@ -0,0 +1,1812 @@ +use std::borrow::Cow; +use std::cell::Cell; +use std::convert::TryFrom; +use std::ops::{Deref, Range}; + +use gccjit::FunctionType; +use gccjit::{ + BinaryOp, + Block, + ComparisonOp, + Function, + LValue, + RValue, + ToRValue, + Type, + UnaryOp, +}; +use rustc_codegen_ssa::MemFlags; +use rustc_codegen_ssa::common::{AtomicOrdering, AtomicRmwBinOp, IntPredicate, RealPredicate, SynchronizationScope}; +use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue}; +use rustc_codegen_ssa::mir::place::PlaceRef; +use rustc_codegen_ssa::traits::{ + BackendTypes, + BaseTypeMethods, + BuilderMethods, + ConstMethods, + DerivedTypeMethods, + HasCodegen, + OverflowOp, + StaticBuilderMethods, +}; +use rustc_middle::ty::{ParamEnv, Ty, TyCtxt}; +use rustc_middle::ty::layout::{HasParamEnv, HasTyCtxt, TyAndLayout}; +use rustc_span::Span; +use rustc_span::def_id::DefId; +use rustc_target::abi::{ + self, + Align, + HasDataLayout, + LayoutOf, + Size, + TargetDataLayout, +}; +use rustc_target::spec::{HasTargetSpec, Target}; + +use crate::common::{SignType, TypeReflection, type_is_pointer}; +use crate::context::CodegenCx; +use crate::type_of::LayoutGccExt; + +// TODO +type Funclet = (); + +// TODO: remove this variable. +static mut RETURN_VALUE_COUNT: usize = 0; + +enum ExtremumOperation { + Max, + Min, +} + +trait EnumClone { + fn clone(&self) -> Self; +} + +impl EnumClone for AtomicOrdering { + fn clone(&self) -> Self { + match *self { + AtomicOrdering::NotAtomic => AtomicOrdering::NotAtomic, + AtomicOrdering::Unordered => AtomicOrdering::Unordered, + AtomicOrdering::Monotonic => AtomicOrdering::Monotonic, + AtomicOrdering::Acquire => AtomicOrdering::Acquire, + AtomicOrdering::Release => AtomicOrdering::Release, + AtomicOrdering::AcquireRelease => AtomicOrdering::AcquireRelease, + AtomicOrdering::SequentiallyConsistent => AtomicOrdering::SequentiallyConsistent, + } + } +} + +pub struct Builder<'a: 'gcc, 'gcc, 'tcx> { + pub cx: &'a CodegenCx<'gcc, 'tcx>, + pub block: Option>, + stack_var_count: Cell, +} + +impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { + fn with_cx(cx: &'a CodegenCx<'gcc, 'tcx>) -> Self { + Builder { + cx, + block: None, + stack_var_count: Cell::new(0), + } + } + + fn atomic_extremum(&mut self, operation: ExtremumOperation, dst: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering) -> RValue<'gcc> { + let size = self.cx.int_width(src.get_type()) / 8; + + let func = self.current_func(); + + let load_ordering = + match order { + // TODO: does this make sense? + AtomicOrdering::AcquireRelease | AtomicOrdering::Release => AtomicOrdering::Acquire, + _ => order.clone(), + }; + let previous_value = self.atomic_load(dst.get_type(), dst, load_ordering.clone(), Size::from_bytes(size)); + let previous_var = func.new_local(None, previous_value.get_type(), "previous_value"); + let return_value = func.new_local(None, previous_value.get_type(), "return_value"); + self.llbb().add_assignment(None, previous_var, previous_value); + self.llbb().add_assignment(None, return_value, previous_var.to_rvalue()); + + let while_block = func.new_block("while"); + let after_block = func.new_block("after_while"); + self.llbb().end_with_jump(None, while_block); + + // NOTE: since jumps were added and compare_exchange doesn't expect this, the current blocks in the + // state need to be updated. + self.block = Some(while_block); + *self.cx.current_block.borrow_mut() = Some(while_block); + + let comparison_operator = + match operation { + ExtremumOperation::Max => ComparisonOp::LessThan, + ExtremumOperation::Min => ComparisonOp::GreaterThan, + }; + + let cond1 = self.context.new_comparison(None, comparison_operator, previous_var.to_rvalue(), self.context.new_cast(None, src, previous_value.get_type())); + let compare_exchange = self.compare_exchange(dst, previous_var, src, order, load_ordering, false); + let cond2 = self.cx.context.new_unary_op(None, UnaryOp::LogicalNegate, compare_exchange.get_type(), compare_exchange); + let cond = self.cx.context.new_binary_op(None, BinaryOp::LogicalAnd, self.cx.bool_type, cond1, cond2); + + while_block.end_with_conditional(None, cond, while_block, after_block); + + // NOTE: since jumps were added in a place rustc does not expect, the current blocks in the + // state need to be updated. + self.block = Some(after_block); + *self.cx.current_block.borrow_mut() = Some(after_block); + + return_value.to_rvalue() + } + + fn compare_exchange(&self, dst: RValue<'gcc>, cmp: LValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering, failure_order: AtomicOrdering, weak: bool) -> RValue<'gcc> { + let size = self.cx.int_width(src.get_type()); + let compare_exchange = self.context.get_builtin_function(&format!("__atomic_compare_exchange_{}", size / 8)); + let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc()); + let failure_order = self.context.new_rvalue_from_int(self.i32_type, failure_order.to_gcc()); + let weak = self.context.new_rvalue_from_int(self.bool_type, weak as i32); + + let void_ptr_type = self.context.new_type::<*mut ()>(); + let volatile_void_ptr_type = void_ptr_type.make_volatile(); + let dst = self.context.new_cast(None, dst, volatile_void_ptr_type); + let expected = self.context.new_cast(None, cmp.get_address(None), void_ptr_type); + + // NOTE: not sure why, but we have the wrong type here. + let int_type = compare_exchange.get_param(2).to_rvalue().get_type(); + let src = self.context.new_cast(None, src, int_type); + self.context.new_call(None, compare_exchange, &[dst, expected, src, weak, order, failure_order]) + } + + pub fn assign(&self, lvalue: LValue<'gcc>, value: RValue<'gcc>) { + self.llbb().add_assignment(None, lvalue, value); + } + + fn check_call<'b>(&mut self, _typ: &str, func: Function<'gcc>, args: &'b [RValue<'gcc>]) -> Cow<'b, [RValue<'gcc>]> { + //let mut fn_ty = self.cx.val_ty(func); + // Strip off pointers + /*while self.cx.type_kind(fn_ty) == TypeKind::Pointer { + fn_ty = self.cx.element_type(fn_ty); + }*/ + + /*assert!( + self.cx.type_kind(fn_ty) == TypeKind::Function, + "builder::{} not passed a function, but {:?}", + typ, + fn_ty + ); + + let param_tys = self.cx.func_params_types(fn_ty); + + let all_args_match = param_tys + .iter() + .zip(args.iter().map(|&v| self.val_ty(v))) + .all(|(expected_ty, actual_ty)| *expected_ty == actual_ty);*/ + + let mut all_args_match = true; + let mut param_types = vec![]; + let param_count = func.get_param_count(); + for (index, arg) in args.iter().enumerate().take(param_count) { + let param = func.get_param(index as i32); + let param = param.to_rvalue().get_type(); + if param != arg.get_type() { + all_args_match = false; + } + param_types.push(param); + } + + if all_args_match { + return Cow::Borrowed(args); + } + + let casted_args: Vec<_> = param_types + .into_iter() + .zip(args.iter()) + .enumerate() + .map(|(_i, (expected_ty, &actual_val))| { + let actual_ty = actual_val.get_type(); + if expected_ty != actual_ty { + /*debug!( + "type mismatch in function call of {:?}. \ + Expected {:?} for param {}, got {:?}; injecting bitcast", + func, expected_ty, i, actual_ty + );*/ + /*println!( + "type mismatch in function call of {:?}. \ + Expected {:?} for param {}, got {:?}; injecting bitcast", + func, expected_ty, i, actual_ty + );*/ + self.bitcast(actual_val, expected_ty) + } + else { + actual_val + } + }) + .collect(); + + Cow::Owned(casted_args) + } + + fn check_ptr_call<'b>(&mut self, _typ: &str, func_ptr: RValue<'gcc>, args: &'b [RValue<'gcc>]) -> Cow<'b, [RValue<'gcc>]> { + //let mut fn_ty = self.cx.val_ty(func); + // Strip off pointers + /*while self.cx.type_kind(fn_ty) == TypeKind::Pointer { + fn_ty = self.cx.element_type(fn_ty); + }*/ + + /*assert!( + self.cx.type_kind(fn_ty) == TypeKind::Function, + "builder::{} not passed a function, but {:?}", + typ, + fn_ty + ); + + let param_tys = self.cx.func_params_types(fn_ty); + + let all_args_match = param_tys + .iter() + .zip(args.iter().map(|&v| self.val_ty(v))) + .all(|(expected_ty, actual_ty)| *expected_ty == actual_ty);*/ + + let mut all_args_match = true; + let mut param_types = vec![]; + let gcc_func = func_ptr.get_type().is_function_ptr_type().expect("function ptr"); + for (index, arg) in args.iter().enumerate().take(gcc_func.get_param_count()) { + let param = gcc_func.get_param_type(index); + if param != arg.get_type() { + all_args_match = false; + } + param_types.push(param); + } + + if all_args_match { + return Cow::Borrowed(args); + } + + let casted_args: Vec<_> = param_types + .into_iter() + .zip(args.iter()) + .enumerate() + .map(|(_i, (expected_ty, &actual_val))| { + let actual_ty = actual_val.get_type(); + if expected_ty != actual_ty { + /*debug!( + "type mismatch in function call of {:?}. \ + Expected {:?} for param {}, got {:?}; injecting bitcast", + func, expected_ty, i, actual_ty + );*/ + /*println!( + "type mismatch in function call of {:?}. \ + Expected {:?} for param {}, got {:?}; injecting bitcast", + func, expected_ty, i, actual_ty + );*/ + self.bitcast(actual_val, expected_ty) + } + else { + actual_val + } + }) + .collect(); + + Cow::Owned(casted_args) + } + + fn check_store(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>) -> RValue<'gcc> { + let dest_ptr_ty = self.cx.val_ty(ptr).make_pointer(); // TODO: make sure make_pointer() is okay here. + let stored_ty = self.cx.val_ty(val); + let stored_ptr_ty = self.cx.type_ptr_to(stored_ty); + + //assert_eq!(self.cx.type_kind(dest_ptr_ty), TypeKind::Pointer); + + if dest_ptr_ty == stored_ptr_ty { + ptr + } + else { + /*debug!( + "type mismatch in store. \ + Expected {:?}, got {:?}; inserting bitcast", + dest_ptr_ty, stored_ptr_ty + );*/ + /*println!( + "type mismatch in store. \ + Expected {:?}, got {:?}; inserting bitcast", + dest_ptr_ty, stored_ptr_ty + );*/ + //ptr + self.bitcast(ptr, stored_ptr_ty) + } + } + + pub fn current_func(&self) -> Function<'gcc> { + self.block.expect("block").get_function() + } + + fn function_call(&mut self, func: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> { + //debug!("call {:?} with args ({:?})", func, args); + + // TODO: remove when the API supports a different type for functions. + let func: Function<'gcc> = self.cx.rvalue_as_function(func); + let args = self.check_call("call", func, args); + //let bundle = funclet.map(|funclet| funclet.bundle()); + //let bundle = bundle.as_ref().map(|b| &*b.raw); + + // gccjit requires to use the result of functions, even when it's not used. + // That's why we assign the result to a local or call add_eval(). + let return_type = func.get_return_type(); + let current_block = self.current_block.borrow().expect("block"); + let void_type = self.context.new_type::<()>(); + let current_func = current_block.get_function(); + if return_type != void_type { + unsafe { RETURN_VALUE_COUNT += 1 }; + let result = current_func.new_local(None, return_type, &format!("returnValue{}", unsafe { RETURN_VALUE_COUNT })); + current_block.add_assignment(None, result, self.cx.context.new_call(None, func, &args)); + result.to_rvalue() + } + else { + current_block.add_eval(None, self.cx.context.new_call(None, func, &args)); + // Return dummy value when not having return value. + self.context.new_rvalue_from_long(self.isize_type, 0) + } + } + + fn function_ptr_call(&mut self, func_ptr: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> { + //debug!("func ptr call {:?} with args ({:?})", func, args); + + let args = self.check_ptr_call("call", func_ptr, args); + //let bundle = funclet.map(|funclet| funclet.bundle()); + //let bundle = bundle.as_ref().map(|b| &*b.raw); + + // gccjit requires to use the result of functions, even when it's not used. + // That's why we assign the result to a local or call add_eval(). + let gcc_func = func_ptr.get_type().is_function_ptr_type().expect("function ptr"); + let mut return_type = gcc_func.get_return_type(); + let current_block = self.current_block.borrow().expect("block"); + let void_type = self.context.new_type::<()>(); + let current_func = current_block.get_function(); + + // FIXME: As a temporary workaround for unsupported LLVM intrinsics. + if gcc_func.get_param_count() == 0 && format!("{:?}", func_ptr) == "__builtin_ia32_pmovmskb128" { + return_type = self.int_type; + } + + if return_type != void_type { + unsafe { RETURN_VALUE_COUNT += 1 }; + let result = current_func.new_local(None, return_type, &format!("returnValue{}", unsafe { RETURN_VALUE_COUNT })); + current_block.add_assignment(None, result, self.cx.context.new_call_through_ptr(None, func_ptr, &args)); + result.to_rvalue() + } + else { + if gcc_func.get_param_count() == 0 { + // FIXME: As a temporary workaround for unsupported LLVM intrinsics. + current_block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &[])); + } + else { + current_block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &args)); + } + // Return dummy value when not having return value. + let result = current_func.new_local(None, self.isize_type, "dummyValueThatShouldNeverBeUsed"); + current_block.add_assignment(None, result, self.context.new_rvalue_from_long(self.isize_type, 0)); + result.to_rvalue() + } + } + + pub fn overflow_call(&mut self, func: Function<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> { + //debug!("overflow_call {:?} with args ({:?})", func, args); + + //let bundle = funclet.map(|funclet| funclet.bundle()); + //let bundle = bundle.as_ref().map(|b| &*b.raw); + + // gccjit requires to use the result of functions, even when it's not used. + // That's why we assign the result to a local. + let return_type = self.context.new_type::(); + let current_block = self.current_block.borrow().expect("block"); + let current_func = current_block.get_function(); + // TODO: return the new_call() directly? Since the overflow function has no side-effects. + unsafe { RETURN_VALUE_COUNT += 1 }; + let result = current_func.new_local(None, return_type, &format!("returnValue{}", unsafe { RETURN_VALUE_COUNT })); + current_block.add_assignment(None, result, self.cx.context.new_call(None, func, &args)); + result.to_rvalue() + } +} + +impl<'gcc, 'tcx> HasCodegen<'tcx> for Builder<'_, 'gcc, 'tcx> { + type CodegenCx = CodegenCx<'gcc, 'tcx>; +} + +impl<'tcx> HasTyCtxt<'tcx> for Builder<'_, '_, 'tcx> { + fn tcx(&self) -> TyCtxt<'tcx> { + self.cx.tcx() + } +} + +impl HasDataLayout for Builder<'_, '_, '_> { + fn data_layout(&self) -> &TargetDataLayout { + self.cx.data_layout() + } +} + +impl<'tcx> LayoutOf for Builder<'_, '_, 'tcx> { + type Ty = Ty<'tcx>; + type TyAndLayout = TyAndLayout<'tcx>; + + fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout { + self.cx.layout_of(ty) + } +} + +impl<'gcc, 'tcx> Deref for Builder<'_, 'gcc, 'tcx> { + type Target = CodegenCx<'gcc, 'tcx>; + + fn deref(&self) -> &Self::Target { + self.cx + } +} + +impl<'gcc, 'tcx> BackendTypes for Builder<'_, 'gcc, 'tcx> { + type Value = as BackendTypes>::Value; + type Function = as BackendTypes>::Function; + type BasicBlock = as BackendTypes>::BasicBlock; + type Type = as BackendTypes>::Type; + type Funclet = as BackendTypes>::Funclet; + + type DIScope = as BackendTypes>::DIScope; + type DILocation = as BackendTypes>::DILocation; + type DIVariable = as BackendTypes>::DIVariable; +} + +impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { + fn build(cx: &'a CodegenCx<'gcc, 'tcx>, block: Block<'gcc>) -> Self { + let mut bx = Builder::with_cx(cx); + *cx.current_block.borrow_mut() = Some(block); + bx.block = Some(block); + bx + } + + fn build_sibling_block(&mut self, name: &str) -> Self { + let block = self.append_sibling_block(name); + Self::build(self.cx, block) + } + + fn llbb(&self) -> Block<'gcc> { + self.block.expect("block") + } + + fn append_block(cx: &'a CodegenCx<'gcc, 'tcx>, func: RValue<'gcc>, name: &str) -> Block<'gcc> { + let func = cx.rvalue_as_function(func); + func.new_block(name) + } + + fn append_sibling_block(&mut self, name: &str) -> Block<'gcc> { + let func = self.current_func(); + func.new_block(name) + } + + fn ret_void(&mut self) { + self.llbb().end_with_void_return(None) + } + + fn ret(&mut self, value: RValue<'gcc>) { + let value = + if self.structs_as_pointer.borrow().contains(&value) { + // NOTE: hack to workaround a limitation of the rustc API: see comment on + // CodegenCx.structs_as_pointer + value.dereference(None).to_rvalue() + } + else { + value + }; + self.llbb().end_with_return(None, value); + } + + fn br(&mut self, dest: Block<'gcc>) { + self.llbb().end_with_jump(None, dest) + } + + fn cond_br(&mut self, cond: RValue<'gcc>, then_block: Block<'gcc>, else_block: Block<'gcc>) { + self.llbb().end_with_conditional(None, cond, then_block, else_block) + } + + fn switch(&mut self, value: RValue<'gcc>, default_block: Block<'gcc>, cases: impl ExactSizeIterator)>) { + let mut gcc_cases = vec![]; + let typ = self.val_ty(value); + for (on_val, dest) in cases { + let on_val = self.const_uint_big(typ, on_val); + gcc_cases.push(self.context.new_case(on_val, on_val, dest)); + } + self.block.expect("block").end_with_switch(None, value, default_block, &gcc_cases); + } + + fn invoke(&mut self, _func: RValue<'gcc>, _args: &[RValue<'gcc>], _then: Block<'gcc>, _catch: Block<'gcc>, _funclet: Option<&Funclet>) -> RValue<'gcc> { + unimplemented!(); + /*debug!("invoke {:?} with args ({:?})", func, args); + + let args = self.check_call("invoke", func, args); + let bundle = funclet.map(|funclet| funclet.bundle()); + let bundle = bundle.as_ref().map(|b| &*b.raw); + + unsafe { + llvm::LLVMRustBuildInvoke( + self.llbuilder, + func, + args.as_ptr(), + args.len() as c_uint, + then, + catch, + bundle, + UNNAMED, + ) + }*/ + } + + fn unreachable(&mut self) { + let func = self.context.get_builtin_function("__builtin_unreachable"); + let block = self.block.expect("block"); + block.add_eval(None, self.context.new_call(None, func, &[])); + let return_type = block.get_function().get_return_type(); + let void_type = self.context.new_type::<()>(); + if return_type == void_type { + block.end_with_void_return(None) + } + else { + let return_value = self.current_func() + .new_local(None, return_type, "unreachableReturn"); + block.end_with_return(None, return_value) + } + } + + fn add(&mut self, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> { + // FIXME: this should not be required. + if format!("{:?}", a.get_type()) != format!("{:?}", b.get_type()) { + b = self.context.new_cast(None, b, a.get_type()); + } + a + b + } + + fn fadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + a + b + } + + fn sub(&mut self, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> { + if a.get_type() != b.get_type() { + b = self.context.new_cast(None, b, a.get_type()); + } + a - b + } + + fn fsub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + a - b + } + + fn mul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + a * b + } + + fn fmul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + a * b + } + + fn udiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + // TODO: convert the arguments to unsigned? + a / b + } + + fn exactudiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + // TODO: convert the arguments to unsigned? + // TODO: poison if not exact. + a / b + } + + fn sdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + // TODO: convert the arguments to signed? + a / b + } + + fn exactsdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + // TODO: posion if not exact. + // FIXME: rustc_codegen_ssa::mir::intrinsic uses different types for a and b but they + // should be the same. + let typ = a.get_type().to_signed(self); + let a = self.context.new_cast(None, a, typ); + let b = self.context.new_cast(None, b, typ); + a / b + } + + fn fdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + a / b + } + + fn urem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + a % b + } + + fn srem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + a % b + } + + fn frem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + if a.get_type() == self.cx.float_type { + let fmodf = self.context.get_builtin_function("fmodf"); + // FIXME: this seems to produce the wrong result. + return self.context.new_call(None, fmodf, &[a, b]); + } + assert_eq!(a.get_type(), self.cx.double_type); + + let fmod = self.context.get_builtin_function("fmod"); + return self.context.new_call(None, fmod, &[a, b]); + } + + fn shl(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + // FIXME: remove the casts when libgccjit can shift an unsigned number by an unsigned number. + let a_type = a.get_type(); + let b_type = b.get_type(); + if a_type.is_unsigned(self) && b_type.is_signed(self) { + //println!("shl: {:?} -> {:?}", a, b_type); + let a = self.context.new_cast(None, a, b_type); + let result = a << b; + //println!("shl: {:?} -> {:?}", result, a_type); + self.context.new_cast(None, result, a_type) + } + else if a_type.is_signed(self) && b_type.is_unsigned(self) { + //println!("shl: {:?} -> {:?}", b, a_type); + let b = self.context.new_cast(None, b, a_type); + a << b + } + else { + a << b + } + } + + fn lshr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + // FIXME: remove the casts when libgccjit can shift an unsigned number by an unsigned number. + // TODO: cast to unsigned to do a logical shift if that does not work. + let a_type = a.get_type(); + let b_type = b.get_type(); + if a_type.is_unsigned(self) && b_type.is_signed(self) { + //println!("lshl: {:?} -> {:?}", a, b_type); + let a = self.context.new_cast(None, a, b_type); + let result = a >> b; + //println!("lshl: {:?} -> {:?}", result, a_type); + self.context.new_cast(None, result, a_type) + } + else if a_type.is_signed(self) && b_type.is_unsigned(self) { + //println!("lshl: {:?} -> {:?}", b, a_type); + let b = self.context.new_cast(None, b, a_type); + a >> b + } + else { + a >> b + } + } + + fn ashr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + // TODO: check whether behavior is an arithmetic shift for >> . + // FIXME: remove the casts when libgccjit can shift an unsigned number by an unsigned number. + let a_type = a.get_type(); + let b_type = b.get_type(); + if a_type.is_unsigned(self) && b_type.is_signed(self) { + //println!("ashl: {:?} -> {:?}", a, b_type); + let a = self.context.new_cast(None, a, b_type); + let result = a >> b; + //println!("ashl: {:?} -> {:?}", result, a_type); + self.context.new_cast(None, result, a_type) + } + else if a_type.is_signed(self) && b_type.is_unsigned(self) { + //println!("ashl: {:?} -> {:?}", b, a_type); + let b = self.context.new_cast(None, b, a_type); + a >> b + } + else { + a >> b + } + } + + fn and(&mut self, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> { + // FIXME: hack by putting the result in a variable to workaround this bug: + // https://gcc.gnu.org/bugzilla//show_bug.cgi?id=95498 + if a.get_type() != b.get_type() { + b = self.context.new_cast(None, b, a.get_type()); + } + let res = self.current_func().new_local(None, b.get_type(), "andResult"); + self.llbb().add_assignment(None, res, a & b); + res.to_rvalue() + } + + fn or(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + // FIXME: hack by putting the result in a variable to workaround this bug: + // https://gcc.gnu.org/bugzilla//show_bug.cgi?id=95498 + let res = self.current_func().new_local(None, b.get_type(), "orResult"); + self.llbb().add_assignment(None, res, a | b); + res.to_rvalue() + } + + fn xor(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + a ^ b + } + + fn neg(&mut self, a: RValue<'gcc>) -> RValue<'gcc> { + // TODO: use new_unary_op()? + self.cx.context.new_rvalue_from_long(a.get_type(), 0) - a + } + + fn fneg(&mut self, a: RValue<'gcc>) -> RValue<'gcc> { + self.cx.context.new_unary_op(None, UnaryOp::Minus, a.get_type(), a) + } + + fn not(&mut self, a: RValue<'gcc>) -> RValue<'gcc> { + let operation = + if a.get_type().is_bool() { + UnaryOp::LogicalNegate + } + else { + UnaryOp::BitwiseNegate + }; + self.cx.context.new_unary_op(None, operation, a.get_type(), a) + } + + fn unchecked_sadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + a + b + } + + fn unchecked_uadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + a + b + } + + fn unchecked_ssub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + a - b + } + + fn unchecked_usub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + // TODO: should generate poison value? + a - b + } + + fn unchecked_smul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + a * b + } + + fn unchecked_umul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + a * b + } + + fn fadd_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> { + unimplemented!(); + /*unsafe { + let instr = llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, UNNAMED); + llvm::LLVMRustSetHasUnsafeAlgebra(instr); + instr + }*/ + } + + fn fsub_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> { + unimplemented!(); + /*unsafe { + let instr = llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, UNNAMED); + llvm::LLVMRustSetHasUnsafeAlgebra(instr); + instr + }*/ + } + + fn fmul_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> { + unimplemented!(); + /*unsafe { + let instr = llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, UNNAMED); + llvm::LLVMRustSetHasUnsafeAlgebra(instr); + instr + }*/ + } + + fn fdiv_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> { + unimplemented!(); + /*unsafe { + let instr = llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, UNNAMED); + llvm::LLVMRustSetHasUnsafeAlgebra(instr); + instr + }*/ + } + + fn frem_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> { + unimplemented!(); + /*unsafe { + let instr = llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, UNNAMED); + llvm::LLVMRustSetHasUnsafeAlgebra(instr); + instr + }*/ + } + + fn checked_binop(&mut self, oop: OverflowOp, typ: Ty<'_>, lhs: Self::Value, rhs: Self::Value) -> (Self::Value, Self::Value) { + use rustc_middle::ty::{Int, IntTy::*, Uint, UintTy::*}; + + let new_kind = + match typ.kind() { + Int(t @ Isize) => Int(t.normalize(self.tcx.sess.target.pointer_width)), + Uint(t @ Usize) => Uint(t.normalize(self.tcx.sess.target.pointer_width)), + t @ (Uint(_) | Int(_)) => t.clone(), + _ => panic!("tried to get overflow intrinsic for op applied to non-int type"), + }; + + // TODO: remove duplication with intrinsic? + let name = + match oop { + OverflowOp::Add => + match new_kind { + Int(I8) => "__builtin_add_overflow", + Int(I16) => "__builtin_add_overflow", + Int(I32) => "__builtin_sadd_overflow", + Int(I64) => "__builtin_saddll_overflow", + Int(I128) => "__builtin_add_overflow", + + Uint(U8) => "__builtin_add_overflow", + Uint(U16) => "__builtin_add_overflow", + Uint(U32) => "__builtin_uadd_overflow", + Uint(U64) => "__builtin_uaddll_overflow", + Uint(U128) => "__builtin_add_overflow", + + _ => unreachable!(), + }, + OverflowOp::Sub => + match new_kind { + Int(I8) => "__builtin_sub_overflow", + Int(I16) => "__builtin_sub_overflow", + Int(I32) => "__builtin_ssub_overflow", + Int(I64) => "__builtin_ssubll_overflow", + Int(I128) => "__builtin_sub_overflow", + + Uint(U8) => "__builtin_sub_overflow", + Uint(U16) => "__builtin_sub_overflow", + Uint(U32) => "__builtin_usub_overflow", + Uint(U64) => "__builtin_usubll_overflow", + Uint(U128) => "__builtin_sub_overflow", + + _ => unreachable!(), + }, + OverflowOp::Mul => + match new_kind { + Int(I8) => "__builtin_mul_overflow", + Int(I16) => "__builtin_mul_overflow", + Int(I32) => "__builtin_smul_overflow", + Int(I64) => "__builtin_smulll_overflow", + Int(I128) => "__builtin_mul_overflow", + + Uint(U8) => "__builtin_mul_overflow", + Uint(U16) => "__builtin_mul_overflow", + Uint(U32) => "__builtin_umul_overflow", + Uint(U64) => "__builtin_umulll_overflow", + Uint(U128) => "__builtin_mul_overflow", + + _ => unreachable!(), + }, + }; + + let intrinsic = self.context.get_builtin_function(&name); + let res = self.current_func() + // TODO: is it correct to use rhs type instead of the parameter typ? + .new_local(None, rhs.get_type(), "binopResult") + .get_address(None); + let overflow = self.overflow_call(intrinsic, &[lhs, rhs, res], None); + (res.dereference(None).to_rvalue(), overflow) + } + + fn alloca(&mut self, ty: Type<'gcc>, align: Align) -> RValue<'gcc> { + // FIXME: this check that we don't call get_aligned() a second time on a time. + // Ideally, we shouldn't need to do this check. + let aligned_type = + if ty == self.cx.u128_type || ty == self.cx.i128_type { + ty + } + else { + ty.get_aligned(align.bytes()) + }; + // TODO: It might be better to return a LValue, but fixing the rustc API is non-trivial. + self.stack_var_count.set(self.stack_var_count.get() + 1); + self.current_func().new_local(None, aligned_type, &format!("stack_var_{}", self.stack_var_count.get())).get_address(None) + } + + fn dynamic_alloca(&mut self, _ty: Type<'gcc>, _align: Align) -> RValue<'gcc> { + unimplemented!(); + /*unsafe { + let alloca = llvm::LLVMBuildAlloca(self.llbuilder, ty, UNNAMED); + llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint); + alloca + }*/ + } + + fn array_alloca(&mut self, _ty: Type<'gcc>, _len: RValue<'gcc>, _align: Align) -> RValue<'gcc> { + unimplemented!(); + /*unsafe { + let alloca = llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len, UNNAMED); + llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint); + alloca + }*/ + } + + fn load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>, _align: Align) -> RValue<'gcc> { + // TODO: use ty. + let block = self.llbb(); + let function = block.get_function(); + // NOTE: instead of returning the dereference here, we have to assign it to a variable in + // the current basic block. Otherwise, it could be used in another basic block, causing a + // dereference after a drop, for instance. + // TODO: handle align. + let deref = ptr.dereference(None).to_rvalue(); + let value_type = deref.get_type(); + unsafe { RETURN_VALUE_COUNT += 1 }; + let loaded_value = function.new_local(None, value_type, &format!("loadedValue{}", unsafe { RETURN_VALUE_COUNT })); + block.add_assignment(None, loaded_value, deref); + loaded_value.to_rvalue() + } + + fn volatile_load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>) -> RValue<'gcc> { + // TODO: use ty. + //println!("5: volatile load: {:?} to {:?}", ptr, ptr.get_type().make_volatile()); + let ptr = self.context.new_cast(None, ptr, ptr.get_type().make_volatile()); + //println!("6"); + ptr.dereference(None).to_rvalue() + } + + fn atomic_load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>, order: AtomicOrdering, size: Size) -> RValue<'gcc> { + // TODO: use ty. + // TODO: handle alignment. + let atomic_load = self.context.get_builtin_function(&format!("__atomic_load_{}", size.bytes())); + let ordering = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc()); + + let volatile_const_void_ptr_type = self.context.new_type::<*mut ()>().make_const().make_volatile(); + let ptr = self.context.new_cast(None, ptr, volatile_const_void_ptr_type); + self.context.new_call(None, atomic_load, &[ptr, ordering]) + } + + fn load_operand(&mut self, place: PlaceRef<'tcx, RValue<'gcc>>) -> OperandRef<'tcx, RValue<'gcc>> { + //debug!("PlaceRef::load: {:?}", place); + + assert_eq!(place.llextra.is_some(), place.layout.is_unsized()); + + if place.layout.is_zst() { + return OperandRef::new_zst(self, place.layout); + } + + fn scalar_load_metadata<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, load: RValue<'gcc>, scalar: &abi::Scalar) { + let vr = scalar.valid_range.clone(); + match scalar.value { + abi::Int(..) => { + let range = scalar.valid_range_exclusive(bx); + if range.start != range.end { + bx.range_metadata(load, range); + } + } + abi::Pointer if vr.start() < vr.end() && !vr.contains(&0) => { + bx.nonnull_metadata(load); + } + _ => {} + } + } + + let val = + if let Some(llextra) = place.llextra { + OperandValue::Ref(place.llval, Some(llextra), place.align) + } + else if place.layout.is_gcc_immediate() { + let const_llval = None; + /*unsafe { + if let Some(global) = llvm::LLVMIsAGlobalVariable(place.llval) { + if llvm::LLVMIsGlobalConstant(global) == llvm::True { + const_llval = llvm::LLVMGetInitializer(global); + } + } + }*/ + let llval = const_llval.unwrap_or_else(|| { + let load = self.load(place.llval.get_type(), place.llval, place.align); + if let abi::Abi::Scalar(ref scalar) = place.layout.abi { + scalar_load_metadata(self, load, scalar); + } + load + }); + OperandValue::Immediate(self.to_immediate(llval, place.layout)) + } + else if let abi::Abi::ScalarPair(ref a, ref b) = place.layout.abi { + let b_offset = a.value.size(self).align_to(b.value.align(self).abi); + + let mut load = |i, scalar: &abi::Scalar, align| { + let llptr = self.struct_gep(place.llval, i as u64); + let load = self.load(llptr.get_type(), llptr, align); + scalar_load_metadata(self, load, scalar); + if scalar.is_bool() { self.trunc(load, self.type_i1()) } else { load } + }; + + OperandValue::Pair( + load(0, a, place.align), + load(1, b, place.align.restrict_for_offset(b_offset)), + ) + } + else { + OperandValue::Ref(place.llval, None, place.align) + }; + + OperandRef { val, layout: place.layout } + } + + fn write_operand_repeatedly(mut self, cg_elem: OperandRef<'tcx, RValue<'gcc>>, count: u64, dest: PlaceRef<'tcx, RValue<'gcc>>) -> Self { + let zero = self.const_usize(0); + let count = self.const_usize(count); + let start = dest.project_index(&mut self, zero).llval; + let end = dest.project_index(&mut self, count).llval; + + let mut header_bx = self.build_sibling_block("repeat_loop_header"); + let mut body_bx = self.build_sibling_block("repeat_loop_body"); + let next_bx = self.build_sibling_block("repeat_loop_next"); + + let ptr_type = start.get_type(); + let current = self.llbb().get_function().new_local(None, ptr_type, "loop_var"); + let current_val = current.to_rvalue(); + self.assign(current, start); + + self.br(header_bx.llbb()); + + let keep_going = header_bx.icmp(IntPredicate::IntNE, current_val, end); + header_bx.cond_br(keep_going, body_bx.llbb(), next_bx.llbb()); + + let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size); + cg_elem.val.store(&mut body_bx, PlaceRef::new_sized_aligned(current_val, cg_elem.layout, align)); + + let next = body_bx.inbounds_gep(current.to_rvalue(), &[self.const_usize(1)]); + body_bx.llbb().add_assignment(None, current, next); + body_bx.br(header_bx.llbb()); + + next_bx + } + + fn range_metadata(&mut self, _load: RValue<'gcc>, _range: Range) { + // TODO + /*if self.sess().target.target.arch == "amdgpu" { + // amdgpu/LLVM does something weird and thinks a i64 value is + // split into a v2i32, halving the bitwidth LLVM expects, + // tripping an assertion. So, for now, just disable this + // optimization. + return; + } + + unsafe { + let llty = self.cx.val_ty(load); + let v = [ + self.cx.const_uint_big(llty, range.start), + self.cx.const_uint_big(llty, range.end), + ]; + + llvm::LLVMSetMetadata( + load, + llvm::MD_range as c_uint, + llvm::LLVMMDNodeInContext(self.cx.llcx, v.as_ptr(), v.len() as c_uint), + ); + }*/ + } + + fn nonnull_metadata(&mut self, _load: RValue<'gcc>) { + // TODO + /*unsafe { + llvm::LLVMSetMetadata( + load, + llvm::MD_nonnull as c_uint, + llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0), + ); + }*/ + } + + fn store(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>, align: Align) -> RValue<'gcc> { + self.store_with_flags(val, ptr, align, MemFlags::empty()) + } + + fn store_with_flags(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>, _align: Align, _flags: MemFlags) -> RValue<'gcc> { + //debug!("Store {:?} -> {:?} ({:?})", val, ptr, flags); + let ptr = self.check_store(val, ptr); + self.llbb().add_assignment(None, ptr.dereference(None), val); + /*let align = + if flags.contains(MemFlags::UNALIGNED) { 1 } else { align.bytes() as c_uint }; + llvm::LLVMSetAlignment(store, align); + if flags.contains(MemFlags::VOLATILE) { + llvm::LLVMSetVolatile(store, llvm::True); + } + if flags.contains(MemFlags::NONTEMPORAL) { + // According to LLVM [1] building a nontemporal store must + // *always* point to a metadata value of the integer 1. + // + // [1]: http://llvm.org/docs/LangRef.html#store-instruction + let one = self.cx.const_i32(1); + let node = llvm::LLVMMDNodeInContext(self.cx.llcx, &one, 1); + llvm::LLVMSetMetadata(store, llvm::MD_nontemporal as c_uint, node); + }*/ + // NOTE: dummy value here since it's never used. FIXME: API should not return a value here? + self.cx.context.new_rvalue_zero(self.type_i32()) + } + + fn atomic_store(&mut self, value: RValue<'gcc>, ptr: RValue<'gcc>, order: AtomicOrdering, size: Size) { + // TODO: handle alignment. + let atomic_store = self.context.get_builtin_function(&format!("__atomic_store_{}", size.bytes())); + let ordering = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc()); + let volatile_const_void_ptr_type = self.context.new_type::<*mut ()>().make_const().make_volatile(); + let ptr = self.context.new_cast(None, ptr, volatile_const_void_ptr_type); + + // FIXME: fix libgccjit to allow comparing an integer type with an aligned integer type because + // the following cast is required to avoid this error: + // gcc_jit_context_new_call: mismatching types for argument 2 of function "__atomic_store_4": assignment to param arg1 (type: int) from loadedValue3577 (type: unsigned int __attribute__((aligned(4)))) + let int_type = atomic_store.get_param(1).to_rvalue().get_type(); + let value = self.context.new_cast(None, value, int_type); + self.llbb() + .add_eval(None, self.context.new_call(None, atomic_store, &[ptr, value, ordering])); + } + + fn gep(&mut self, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> { + let mut result = ptr; + for index in indices { + result = self.context.new_array_access(None, result, *index).get_address(None).to_rvalue(); + } + result + } + + fn inbounds_gep(&mut self, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> { + // FIXME: would be safer if doing the same thing (loop) as gep. + // TODO: specify inbounds somehow. + match indices.len() { + 1 => { + self.context.new_array_access(None, ptr, indices[0]).get_address(None) + }, + 2 => { + let array = ptr.dereference(None); // TODO: assert that first index is 0? + self.context.new_array_access(None, array, indices[1]).get_address(None) + }, + _ => unimplemented!(), + } + } + + fn struct_gep(&mut self, ptr: RValue<'gcc>, idx: u64) -> RValue<'gcc> { + // FIXME: it would be better if the API only called this on struct, not on arrays. + assert_eq!(idx as usize as u64, idx); + let value = ptr.dereference(None).to_rvalue(); + let value_type = value.get_type(); + + if value_type.is_array().is_some() { + let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from")); + let element = self.context.new_array_access(None, value, index); + element.get_address(None) + } + else if let Some(vector_type) = value_type.is_vector() { + let array_type = vector_type.get_element_type().make_pointer(); + let array = self.bitcast(ptr, array_type); + let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from")); + let element = self.context.new_array_access(None, array, index); + element.get_address(None) + } + else if let Some(struct_type) = value_type.is_struct() { + ptr.dereference_field(None, struct_type.get_field(idx as i32)).get_address(None) + } + else { + panic!("Unexpected type {:?}", value_type); + } + } + + /* Casts */ + fn trunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { + // TODO: check that it indeed truncate the value. + //println!("trunc: {:?} -> {:?}", value, dest_ty); + self.context.new_cast(None, value, dest_ty) + } + + fn sext(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { + // TODO: check that it indeed sign extend the value. + //println!("Sext {:?} to {:?}", value, dest_ty); + //if let Some(vector_type) = value.get_type().is_vector() { + if dest_ty.is_vector().is_some() { + // TODO: nothing to do as it is only for LLVM? + return value; + /*let dest_type = self.context.new_vector_type(dest_ty, vector_type.get_num_units() as u64); + println!("Casting {:?} to {:?}", value, dest_type); + return self.context.new_cast(None, value, dest_type);*/ + } + self.context.new_cast(None, value, dest_ty) + } + + fn fptoui(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { + //println!("7: fptoui: {:?} to {:?}", value, dest_ty); + let ret = self.context.new_cast(None, value, dest_ty); + //println!("8"); + ret + //unsafe { llvm::LLVMBuildFPToUI(self.llbuilder, val, dest_ty, UNNAMED) } + } + + fn fptosi(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { + self.context.new_cast(None, value, dest_ty) + } + + fn uitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { + //println!("1: uitofp: {:?} -> {:?}", value, dest_ty); + let ret = self.context.new_cast(None, value, dest_ty); + //println!("2"); + ret + } + + fn sitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { + //println!("3: sitofp: {:?} -> {:?}", value, dest_ty); + let ret = self.context.new_cast(None, value, dest_ty); + //println!("4"); + ret + } + + fn fptrunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { + // TODO: make sure it trancates. + self.context.new_cast(None, value, dest_ty) + } + + fn fpext(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { + self.context.new_cast(None, value, dest_ty) + } + + fn ptrtoint(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { + self.cx.ptrtoint(self.block.expect("block"), value, dest_ty) + } + + fn inttoptr(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { + self.cx.inttoptr(self.block.expect("block"), value, dest_ty) + } + + fn bitcast(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { + self.cx.const_bitcast(value, dest_ty) + } + + fn intcast(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>, _is_signed: bool) -> RValue<'gcc> { + // NOTE: is_signed is for value, not dest_typ. + //println!("intcast: {:?} ({:?}) -> {:?}", value, value.get_type(), dest_typ); + self.cx.context.new_cast(None, value, dest_typ) + } + + fn pointercast(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { + //println!("pointercast: {:?} ({:?}) -> {:?}", value, value.get_type(), dest_ty); + let val_type = value.get_type(); + match (type_is_pointer(val_type), type_is_pointer(dest_ty)) { + (false, true) => { + // NOTE: Projecting a field of a pointer type will attemp a cast from a signed char to + // a pointer, which is not supported by gccjit. + return self.cx.context.new_cast(None, self.inttoptr(value, val_type.make_pointer()), dest_ty); + }, + (false, false) => { + // When they are not pointers, we want a transmute (or reinterpret_cast). + //self.cx.context.new_cast(None, value, dest_ty) + self.bitcast(value, dest_ty) + }, + (true, true) => self.cx.context.new_cast(None, value, dest_ty), + (true, false) => unimplemented!(), + } + } + + /* Comparisons */ + fn icmp(&mut self, op: IntPredicate, lhs: RValue<'gcc>, mut rhs: RValue<'gcc>) -> RValue<'gcc> { + if lhs.get_type() != rhs.get_type() { + // NOTE: hack because we try to cast a vector type to the same vector type. + if format!("{:?}", lhs.get_type()) != format!("{:?}", rhs.get_type()) { + rhs = self.context.new_cast(None, rhs, lhs.get_type()); + } + } + self.context.new_comparison(None, op.to_gcc_comparison(), lhs, rhs) + } + + fn fcmp(&mut self, op: RealPredicate, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> { + self.context.new_comparison(None, op.to_gcc_comparison(), lhs, rhs) + } + + /* Miscellaneous instructions */ + fn memcpy(&mut self, dst: RValue<'gcc>, dst_align: Align, src: RValue<'gcc>, src_align: Align, size: RValue<'gcc>, flags: MemFlags) { + if flags.contains(MemFlags::NONTEMPORAL) { + // HACK(nox): This is inefficient but there is no nontemporal memcpy. + let val = self.load(src.get_type(), src, src_align); + let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val))); + self.store_with_flags(val, ptr, dst_align, flags); + return; + } + let size = self.intcast(size, self.type_size_t(), false); + let _is_volatile = flags.contains(MemFlags::VOLATILE); + let dst = self.pointercast(dst, self.type_i8p()); + let src = self.pointercast(src, self.type_ptr_to(self.type_void())); + let memcpy = self.context.get_builtin_function("memcpy"); + let block = self.block.expect("block"); + // TODO: handle aligns and is_volatile. + block.add_eval(None, self.context.new_call(None, memcpy, &[dst, src, size])); + } + + fn memmove(&mut self, dst: RValue<'gcc>, dst_align: Align, src: RValue<'gcc>, src_align: Align, size: RValue<'gcc>, flags: MemFlags) { + if flags.contains(MemFlags::NONTEMPORAL) { + // HACK(nox): This is inefficient but there is no nontemporal memmove. + let val = self.load(src.get_type(), src, src_align); + let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val))); + self.store_with_flags(val, ptr, dst_align, flags); + return; + } + let size = self.intcast(size, self.type_size_t(), false); + let _is_volatile = flags.contains(MemFlags::VOLATILE); + let dst = self.pointercast(dst, self.type_i8p()); + let src = self.pointercast(src, self.type_ptr_to(self.type_void())); + + let memmove = self.context.get_builtin_function("memmove"); + let block = self.block.expect("block"); + // TODO: handle is_volatile. + block.add_eval(None, self.context.new_call(None, memmove, &[dst, src, size])); + } + + fn memset(&mut self, ptr: RValue<'gcc>, fill_byte: RValue<'gcc>, size: RValue<'gcc>, _align: Align, flags: MemFlags) { + let _is_volatile = flags.contains(MemFlags::VOLATILE); + let ptr = self.pointercast(ptr, self.type_i8p()); + let memset = self.context.get_builtin_function("memset"); + let block = self.block.expect("block"); + // TODO: handle aligns and is_volatile. + //println!("memset: {:?} -> {:?}", fill_byte, self.i32_type); + let fill_byte = self.context.new_cast(None, fill_byte, self.i32_type); + let size = self.intcast(size, self.type_size_t(), false); + block.add_eval(None, self.context.new_call(None, memset, &[ptr, fill_byte, size])); + } + + fn select(&mut self, cond: RValue<'gcc>, then_val: RValue<'gcc>, mut else_val: RValue<'gcc>) -> RValue<'gcc> { + let func = self.current_func(); + let variable = func.new_local(None, then_val.get_type(), "selectVar"); + let then_block = func.new_block("then"); + let else_block = func.new_block("else"); + let after_block = func.new_block("after"); + self.llbb().end_with_conditional(None, cond, then_block, else_block); + + then_block.add_assignment(None, variable, then_val); + then_block.end_with_jump(None, after_block); + + if then_val.get_type() != else_val.get_type() { + else_val = self.context.new_cast(None, else_val, then_val.get_type()); + } + else_block.add_assignment(None, variable, else_val); + else_block.end_with_jump(None, after_block); + + // NOTE: since jumps were added in a place rustc does not expect, the current blocks in the + // state need to be updated. + self.block = Some(after_block); + *self.cx.current_block.borrow_mut() = Some(after_block); + + variable.to_rvalue() + } + + #[allow(dead_code)] + fn va_arg(&mut self, _list: RValue<'gcc>, _ty: Type<'gcc>) -> RValue<'gcc> { + unimplemented!(); + //unsafe { llvm::LLVMBuildVAArg(self.llbuilder, list, ty, UNNAMED) } + } + + fn extract_element(&mut self, _vec: RValue<'gcc>, _idx: RValue<'gcc>) -> RValue<'gcc> { + unimplemented!(); + //unsafe { llvm::LLVMBuildExtractElement(self.llbuilder, vec, idx, UNNAMED) } + } + + fn vector_splat(&mut self, _num_elts: usize, _elt: RValue<'gcc>) -> RValue<'gcc> { + unimplemented!(); + /*unsafe { + let elt_ty = self.cx.val_ty(elt); + let undef = llvm::LLVMGetUndef(self.type_vector(elt_ty, num_elts as u64)); + let vec = self.insert_element(undef, elt, self.cx.const_i32(0)); + let vec_i32_ty = self.type_vector(self.type_i32(), num_elts as u64); + self.shuffle_vector(vec, undef, self.const_null(vec_i32_ty)) + }*/ + } + + fn extract_value(&mut self, aggregate_value: RValue<'gcc>, idx: u64) -> RValue<'gcc> { + // FIXME: it would be better if the API only called this on struct, not on arrays. + assert_eq!(idx as usize as u64, idx); + let value_type = aggregate_value.get_type(); + + if value_type.is_array().is_some() { + let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from")); + let element = self.context.new_array_access(None, aggregate_value, index); + element.get_address(None) + } + else if value_type.is_vector().is_some() { + panic!(); + } + else if let Some(pointer_type) = value_type.get_pointee() { + if let Some(struct_type) = pointer_type.is_struct() { + // NOTE: hack to workaround a limitation of the rustc API: see comment on + // CodegenCx.structs_as_pointer + aggregate_value.dereference_field(None, struct_type.get_field(idx as i32)).to_rvalue() + } + else { + panic!("Unexpected type {:?}", value_type); + } + } + else if let Some(struct_type) = value_type.is_struct() { + aggregate_value.access_field(None, struct_type.get_field(idx as i32)).to_rvalue() + } + else { + panic!("Unexpected type {:?}", value_type); + } + /*assert_eq!(idx as c_uint as u64, idx); + unsafe { llvm::LLVMBuildExtractValue(self.llbuilder, agg_val, idx as c_uint, UNNAMED) }*/ + } + + fn insert_value(&mut self, aggregate_value: RValue<'gcc>, value: RValue<'gcc>, idx: u64) -> RValue<'gcc> { + // FIXME: it would be better if the API only called this on struct, not on arrays. + assert_eq!(idx as usize as u64, idx); + let value_type = aggregate_value.get_type(); + + let lvalue = + if value_type.is_array().is_some() { + let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from")); + self.context.new_array_access(None, aggregate_value, index) + } + else if value_type.is_vector().is_some() { + panic!(); + } + else if let Some(pointer_type) = value_type.get_pointee() { + if let Some(struct_type) = pointer_type.is_struct() { + // NOTE: hack to workaround a limitation of the rustc API: see comment on + // CodegenCx.structs_as_pointer + aggregate_value.dereference_field(None, struct_type.get_field(idx as i32)) + } + else { + panic!("Unexpected type {:?}", value_type); + } + } + else { + panic!("Unexpected type {:?}", value_type); + }; + self.llbb().add_assignment(None, lvalue, value); + + aggregate_value + } + + fn landing_pad(&mut self, _ty: Type<'gcc>, _pers_fn: RValue<'gcc>, _num_clauses: usize) -> RValue<'gcc> { + unimplemented!(); + /*unsafe { + llvm::LLVMBuildLandingPad(self.llbuilder, ty, pers_fn, num_clauses as c_uint, UNNAMED) + }*/ + } + + fn set_cleanup(&mut self, _landing_pad: RValue<'gcc>) { + unimplemented!(); + /*unsafe { + llvm::LLVMSetCleanup(landing_pad, llvm::True); + }*/ + } + + fn resume(&mut self, _exn: RValue<'gcc>) -> RValue<'gcc> { + unimplemented!(); + //unsafe { llvm::LLVMBuildResume(self.llbuilder, exn) } + } + + fn cleanup_pad(&mut self, _parent: Option>, _args: &[RValue<'gcc>]) -> Funclet { + unimplemented!(); + /*let name = const_cstr!("cleanuppad"); + let ret = unsafe { + llvm::LLVMRustBuildCleanupPad( + self.llbuilder, + parent, + args.len() as c_uint, + args.as_ptr(), + name.as_ptr(), + ) + }; + Funclet::new(ret.expect("LLVM does not have support for cleanuppad"))*/ + } + + fn cleanup_ret(&mut self, _funclet: &Funclet, _unwind: Option>) -> RValue<'gcc> { + unimplemented!(); + /*let ret = + unsafe { llvm::LLVMRustBuildCleanupRet(self.llbuilder, funclet.cleanuppad(), unwind) }; + ret.expect("LLVM does not have support for cleanupret")*/ + } + + fn catch_pad(&mut self, _parent: RValue<'gcc>, _args: &[RValue<'gcc>]) -> Funclet { + unimplemented!(); + /*let name = const_cstr!("catchpad"); + let ret = unsafe { + llvm::LLVMRustBuildCatchPad( + self.llbuilder, + parent, + args.len() as c_uint, + args.as_ptr(), + name.as_ptr(), + ) + }; + Funclet::new(ret.expect("LLVM does not have support for catchpad"))*/ + } + + fn catch_switch(&mut self, _parent: Option>, _unwind: Option>, _num_handlers: usize) -> RValue<'gcc> { + unimplemented!(); + /*let name = const_cstr!("catchswitch"); + let ret = unsafe { + llvm::LLVMRustBuildCatchSwitch( + self.llbuilder, + parent, + unwind, + num_handlers as c_uint, + name.as_ptr(), + ) + }; + ret.expect("LLVM does not have support for catchswitch")*/ + } + + fn add_handler(&mut self, _catch_switch: RValue<'gcc>, _handler: Block<'gcc>) { + unimplemented!(); + /*unsafe { + llvm::LLVMRustAddHandler(catch_switch, handler); + }*/ + } + + fn set_personality_fn(&mut self, _personality: RValue<'gcc>) { + unimplemented!(); + /*unsafe { + llvm::LLVMSetPersonalityFn(self.llfn(), personality); + }*/ + } + + // Atomic Operations + fn atomic_cmpxchg(&mut self, dst: RValue<'gcc>, cmp: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering, failure_order: AtomicOrdering, weak: bool) -> RValue<'gcc> { + let expected = self.current_func().new_local(None, cmp.get_type(), "expected"); + self.llbb().add_assignment(None, expected, cmp); + let success = self.compare_exchange(dst, expected, src, order, failure_order, weak); + + let pair_type = self.cx.type_struct(&[src.get_type(), self.bool_type], false); + let result = self.current_func().new_local(None, pair_type, "atomic_cmpxchg_result"); + let align = Align::from_bits(64).expect("align"); // TODO: use good align. + + let value_type = result.to_rvalue().get_type(); + if let Some(struct_type) = value_type.is_struct() { + self.store(success, result.access_field(None, struct_type.get_field(1)).get_address(None), align); + // NOTE: since success contains the call to the intrinsic, it must be stored before + // expected so that we store expected after the call. + self.store(expected.to_rvalue(), result.access_field(None, struct_type.get_field(0)).get_address(None), align); + } + // TODO: handle when value is not a struct. + + result.to_rvalue() + } + + fn atomic_rmw(&mut self, op: AtomicRmwBinOp, dst: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering) -> RValue<'gcc> { + let size = self.cx.int_width(src.get_type()) / 8; + let name = + match op { + AtomicRmwBinOp::AtomicXchg => format!("__atomic_exchange_{}", size), + AtomicRmwBinOp::AtomicAdd => format!("__atomic_fetch_add_{}", size), + AtomicRmwBinOp::AtomicSub => format!("__atomic_fetch_sub_{}", size), + AtomicRmwBinOp::AtomicAnd => format!("__atomic_fetch_and_{}", size), + AtomicRmwBinOp::AtomicNand => format!("__atomic_fetch_nand_{}", size), + AtomicRmwBinOp::AtomicOr => format!("__atomic_fetch_or_{}", size), + AtomicRmwBinOp::AtomicXor => format!("__atomic_fetch_xor_{}", size), + AtomicRmwBinOp::AtomicMax => return self.atomic_extremum(ExtremumOperation::Max, dst, src, order), + AtomicRmwBinOp::AtomicMin => return self.atomic_extremum(ExtremumOperation::Min, dst, src, order), + AtomicRmwBinOp::AtomicUMax => return self.atomic_extremum(ExtremumOperation::Max, dst, src, order), + AtomicRmwBinOp::AtomicUMin => return self.atomic_extremum(ExtremumOperation::Min, dst, src, order), + }; + + + let atomic_function = self.context.get_builtin_function(name); + let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc()); + + let void_ptr_type = self.context.new_type::<*mut ()>(); + let volatile_void_ptr_type = void_ptr_type.make_volatile(); + let dst = self.context.new_cast(None, dst, volatile_void_ptr_type); + // NOTE: not sure why, but we have the wrong type here. + let new_src_type = atomic_function.get_param(1).to_rvalue().get_type(); + let src = self.context.new_cast(None, src, new_src_type); + let res = self.context.new_call(None, atomic_function, &[dst, src, order]); + self.context.new_cast(None, res, src.get_type()) + } + + fn atomic_fence(&mut self, order: AtomicOrdering, scope: SynchronizationScope) { + let name = + match scope { + SynchronizationScope::SingleThread => "__atomic_signal_fence", + SynchronizationScope::CrossThread => "__atomic_thread_fence", + }; + let thread_fence = self.context.get_builtin_function(name); + let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc()); + self.llbb().add_eval(None, self.context.new_call(None, thread_fence, &[order])); + } + + fn set_invariant_load(&mut self, load: RValue<'gcc>) { + // NOTE: Hack to consider vtable function pointer as non-global-variable function pointer. + self.normal_function_addresses.borrow_mut().insert(load); + // TODO + /*unsafe { + llvm::LLVMSetMetadata( + load, + llvm::MD_invariant_load as c_uint, + llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0), + ); + }*/ + } + + fn lifetime_start(&mut self, _ptr: RValue<'gcc>, _size: Size) { + // TODO + //self.call_lifetime_intrinsic("llvm.lifetime.start.p0i8", ptr, size); + } + + fn lifetime_end(&mut self, _ptr: RValue<'gcc>, _size: Size) { + // TODO + //self.call_lifetime_intrinsic("llvm.lifetime.end.p0i8", ptr, size); + } + + fn call(&mut self, func: RValue<'gcc>, args: &[RValue<'gcc>], funclet: Option<&Funclet>) -> RValue<'gcc> { + // FIXME: remove when having a proper API. + let gcc_func = unsafe { std::mem::transmute(func) }; + if self.functions.borrow().values().find(|value| **value == gcc_func).is_some() { + self.function_call(func, args, funclet) + } + else { + // If it's a not function that was defined, it's a function pointer. + self.function_ptr_call(func, args, funclet) + } + } + + fn zext(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> { + // FIXME: this does not zero-extend. + if value.get_type().is_bool() && dest_typ.is_i8(&self.cx) { + // FIXME: hack because base::from_immediate converts i1 to i8. + // Fix the code in codegen_ssa::base::from_immediate. + return value; + } + //println!("zext: {:?} -> {:?}", value, dest_typ); + self.context.new_cast(None, value, dest_typ) + } + + fn cx(&self) -> &CodegenCx<'gcc, 'tcx> { + self.cx + } + + fn do_not_inline(&mut self, _llret: RValue<'gcc>) { + unimplemented!(); + //llvm::Attribute::NoInline.apply_callsite(llvm::AttributePlace::Function, llret); + } + + fn set_span(&mut self, _span: Span) {} + + fn from_immediate(&mut self, val: Self::Value) -> Self::Value { + if self.cx().val_ty(val) == self.cx().type_i1() { + self.zext(val, self.cx().type_i8()) + } + else { + val + } + } + + fn to_immediate_scalar(&mut self, val: Self::Value, scalar: &abi::Scalar) -> Self::Value { + if scalar.is_bool() { + return self.trunc(val, self.cx().type_i1()); + } + val + } + + fn fptoui_sat(&mut self, _val: RValue<'gcc>, _dest_ty: Type<'gcc>) -> Option> { + None + } + + fn fptosi_sat(&mut self, _val: RValue<'gcc>, _dest_ty: Type<'gcc>) -> Option> { + None + } + + fn instrprof_increment(&mut self, _fn_name: RValue<'gcc>, _hash: RValue<'gcc>, _num_counters: RValue<'gcc>, _index: RValue<'gcc>) { + unimplemented!(); + /*debug!( + "instrprof_increment() with args ({:?}, {:?}, {:?}, {:?})", + fn_name, hash, num_counters, index + ); + + let llfn = unsafe { llvm::LLVMRustGetInstrProfIncrementIntrinsic(self.cx().llmod) }; + let args = &[fn_name, hash, num_counters, index]; + let args = self.check_call("call", llfn, args); + + unsafe { + let _ = llvm::LLVMRustBuildCall( + self.llbuilder, + llfn, + args.as_ptr() as *const &llvm::Value, + args.len() as c_uint, + None, + ); + }*/ + } +} + +impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { + pub fn shuffle_vector(&mut self, v1: RValue<'gcc>, v2: RValue<'gcc>, mask: RValue<'gcc>) -> RValue<'gcc> { + let return_type = v1.get_type(); + let params = [ + self.context.new_parameter(None, return_type, "v1"), + self.context.new_parameter(None, return_type, "v2"), + self.context.new_parameter(None, mask.get_type(), "mask"), + ]; + let shuffle = self.context.new_function(None, FunctionType::Extern, return_type, ¶ms, "_mm_shuffle_epi8", false); + self.context.new_call(None, shuffle, &[v1, v2, mask]) + } +} + +impl<'a, 'gcc, 'tcx> StaticBuilderMethods for Builder<'a, 'gcc, 'tcx> { + fn get_static(&mut self, def_id: DefId) -> RValue<'gcc> { + // Forward to the `get_static` method of `CodegenCx` + self.cx().get_static(def_id) + } +} + +impl<'tcx> HasParamEnv<'tcx> for Builder<'_, '_, 'tcx> { + fn param_env(&self) -> ParamEnv<'tcx> { + self.cx.param_env() + } +} + +impl<'tcx> HasTargetSpec for Builder<'_, '_, 'tcx> { + fn target_spec(&self) -> &Target { + &self.cx.target_spec() + } +} + +trait ToGccComp { + fn to_gcc_comparison(&self) -> ComparisonOp; +} + +impl ToGccComp for IntPredicate { + fn to_gcc_comparison(&self) -> ComparisonOp { + match *self { + IntPredicate::IntEQ => ComparisonOp::Equals, + IntPredicate::IntNE => ComparisonOp::NotEquals, + IntPredicate::IntUGT => ComparisonOp::GreaterThan, + IntPredicate::IntUGE => ComparisonOp::GreaterThanEquals, + IntPredicate::IntULT => ComparisonOp::LessThan, + IntPredicate::IntULE => ComparisonOp::LessThanEquals, + IntPredicate::IntSGT => ComparisonOp::GreaterThan, + IntPredicate::IntSGE => ComparisonOp::GreaterThanEquals, + IntPredicate::IntSLT => ComparisonOp::LessThan, + IntPredicate::IntSLE => ComparisonOp::LessThanEquals, + } + } +} + +impl ToGccComp for RealPredicate { + fn to_gcc_comparison(&self) -> ComparisonOp { + // TODO: check that ordered vs non-ordered is respected. + match *self { + RealPredicate::RealPredicateFalse => unreachable!(), + RealPredicate::RealOEQ => ComparisonOp::Equals, + RealPredicate::RealOGT => ComparisonOp::GreaterThan, + RealPredicate::RealOGE => ComparisonOp::GreaterThanEquals, + RealPredicate::RealOLT => ComparisonOp::LessThan, + RealPredicate::RealOLE => ComparisonOp::LessThanEquals, + RealPredicate::RealONE => ComparisonOp::NotEquals, + RealPredicate::RealORD => unreachable!(), + RealPredicate::RealUNO => unreachable!(), + RealPredicate::RealUEQ => ComparisonOp::Equals, + RealPredicate::RealUGT => ComparisonOp::GreaterThan, + RealPredicate::RealUGE => ComparisonOp::GreaterThan, + RealPredicate::RealULT => ComparisonOp::LessThan, + RealPredicate::RealULE => ComparisonOp::LessThan, + RealPredicate::RealUNE => ComparisonOp::NotEquals, + RealPredicate::RealPredicateTrue => unreachable!(), + } + } +} + +#[repr(C)] +#[allow(non_camel_case_types)] +enum MemOrdering { + __ATOMIC_RELAXED, + __ATOMIC_CONSUME, + __ATOMIC_ACQUIRE, + __ATOMIC_RELEASE, + __ATOMIC_ACQ_REL, + __ATOMIC_SEQ_CST, +} + +trait ToGccOrdering { + fn to_gcc(self) -> i32; +} + +impl ToGccOrdering for AtomicOrdering { + fn to_gcc(self) -> i32 { + use MemOrdering::*; + + let ordering = + match self { + AtomicOrdering::NotAtomic => __ATOMIC_RELAXED, // TODO: check if that's the same. + AtomicOrdering::Unordered => __ATOMIC_RELAXED, + AtomicOrdering::Monotonic => __ATOMIC_RELAXED, // TODO: check if that's the same. + AtomicOrdering::Acquire => __ATOMIC_ACQUIRE, + AtomicOrdering::Release => __ATOMIC_RELEASE, + AtomicOrdering::AcquireRelease => __ATOMIC_ACQ_REL, + AtomicOrdering::SequentiallyConsistent => __ATOMIC_SEQ_CST, + }; + ordering as i32 + } +} diff --git a/src/callee.rs b/src/callee.rs new file mode 100644 index 0000000000000..d0ae96adcedaf --- /dev/null +++ b/src/callee.rs @@ -0,0 +1,99 @@ +use gccjit::{FunctionType, RValue}; +use rustc_codegen_ssa::traits::BaseTypeMethods; +use rustc_middle::ty::{Instance, TypeFoldable}; +use rustc_middle::ty::layout::{FnAbiExt, HasTyCtxt}; +use rustc_target::abi::call::FnAbi; + +use crate::abi::FnAbiGccExt; +use crate::context::CodegenCx; + +/// Codegens a reference to a fn/method item, monomorphizing and +/// inlining as it goes. +/// +/// # Parameters +/// +/// - `cx`: the crate context +/// - `instance`: the instance to be instantiated +pub fn get_fn<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, instance: Instance<'tcx>) -> RValue<'gcc> { + let tcx = cx.tcx(); + + //debug!("get_fn(instance={:?})", instance); + + assert!(!instance.substs.needs_infer()); + assert!(!instance.substs.has_escaping_bound_vars()); + assert!(!instance.substs.has_param_types_or_consts()); + + if let Some(&func) = cx.instances.borrow().get(&instance) { + return func; + } + + let sym = tcx.symbol_name(instance).name; + //debug!("get_fn({:?}: {:?}) => {}", instance, instance.monomorphic_ty(cx.tcx()), sym); + + let fn_abi = FnAbi::of_instance(cx, instance, &[]); + + // TODO + let func = + if let Some(func) = cx.get_declared_value(&sym) { + // Create a fn pointer with the new signature. + let ptrty = fn_abi.ptr_to_gcc_type(cx); + + // This is subtle and surprising, but sometimes we have to bitcast + // the resulting fn pointer. The reason has to do with external + // functions. If you have two crates that both bind the same C + // library, they may not use precisely the same types: for + // example, they will probably each declare their own structs, + // which are distinct types from LLVM's point of view (nominal + // types). + // + // Now, if those two crates are linked into an application, and + // they contain inlined code, you can wind up with a situation + // where both of those functions wind up being loaded into this + // application simultaneously. In that case, the same function + // (from LLVM's point of view) requires two types. But of course + // LLVM won't allow one function to have two types. + // + // What we currently do, therefore, is declare the function with + // one of the two types (whichever happens to come first) and then + // bitcast as needed when the function is referenced to make sure + // it has the type we expect. + // + // This can occur on either a crate-local or crate-external + // reference. It also occurs when testing libcore and in some + // other weird situations. Annoying. + if cx.val_ty(func) != ptrty { + //debug!("get_fn: casting {:?} to {:?}", func, ptrty); + // TODO + //cx.const_ptrcast(func, ptrty) + func + } + else { + //debug!("get_fn: not casting pointer!"); + func + } + } + else { + cx.linkage.set(FunctionType::Extern); + let func = cx.declare_fn(&sym, &fn_abi); + //cx.linkage.set(FunctionType::Internal); + //debug!("get_fn: not casting pointer!"); + + // TODO + //attributes::from_fn_attrs(cx, func, instance); + + //let instance_def_id = instance.def_id(); + + // TODO + /*if cx.use_dll_storage_attrs && tcx.is_dllimport_foreign_item(instance_def_id) { + unsafe { + llvm::LLVMSetDLLStorageClass(func, llvm::DLLStorageClass::DllImport); + } + }*/ + + func + }; + + cx.instances.borrow_mut().insert(instance, func); + + func +} diff --git a/src/common.rs b/src/common.rs new file mode 100644 index 0000000000000..3178ada9ec3e2 --- /dev/null +++ b/src/common.rs @@ -0,0 +1,448 @@ +use std::convert::TryFrom; +use std::convert::TryInto; + +use gccjit::{Block, CType, RValue, Type, ToRValue}; +use rustc_codegen_ssa::mir::place::PlaceRef; +use rustc_codegen_ssa::traits::{ + BaseTypeMethods, + ConstMethods, + DerivedTypeMethods, + MiscMethods, + StaticMethods, +}; +use rustc_middle::bug; +use rustc_middle::mir::Mutability; +use rustc_middle::ty::{layout::TyAndLayout, ScalarInt}; +use rustc_mir::interpret::{Allocation, GlobalAlloc, Scalar}; +use rustc_span::Symbol; +use rustc_target::abi::{self, HasDataLayout, LayoutOf, Pointer, Size}; + +use crate::consts::const_alloc_to_gcc; +use crate::context::CodegenCx; +use crate::type_of::LayoutGccExt; + +impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { + pub fn const_bytes(&self, bytes: &[u8]) -> RValue<'gcc> { + bytes_in_context(self, bytes) + } + + fn const_cstr(&self, symbol: Symbol, _null_terminated: bool) -> RValue<'gcc> { + // TODO: handle null_terminated. + if let Some(&value) = self.const_cstr_cache.borrow().get(&symbol) { + return value.to_rvalue(); + } + + let global = self.global_string(&*symbol.as_str()); + + self.const_cstr_cache.borrow_mut().insert(symbol, global.dereference(None)); + global + } + + fn global_string(&self, string: &str) -> RValue<'gcc> { + // TODO: handle non-null-terminated strings. + let string = self.context.new_string_literal(&*string); + let sym = self.generate_local_symbol_name("str"); + // NOTE: TLS is always off for a string litteral. + // NOTE: string litterals do not have a link section. + let global = self.define_global(&sym, self.val_ty(string), false, None) + .unwrap_or_else(|| bug!("symbol `{}` is already defined", sym)); + self.global_init_block.add_assignment(None, global.dereference(None), string); + global.to_rvalue() + //llvm::LLVMRustSetLinkage(global, llvm::Linkage::InternalLinkage); + } + + pub fn inttoptr(&self, block: Block<'gcc>, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { + let func = block.get_function(); + let local = func.new_local(None, value.get_type(), "intLocal"); + block.add_assignment(None, local, value); + let value_address = local.get_address(None); + + let ptr = self.context.new_cast(None, value_address, dest_ty.make_pointer()); + ptr.dereference(None).to_rvalue() + } + + pub fn ptrtoint(&self, block: Block<'gcc>, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { + // TODO: when libgccjit allow casting from pointer to int, remove this. + let func = block.get_function(); + let local = func.new_local(None, value.get_type(), "ptrLocal"); + block.add_assignment(None, local, value); + let ptr_address = local.get_address(None); + + let ptr = self.context.new_cast(None, ptr_address, dest_ty.make_pointer()); + ptr.dereference(None).to_rvalue() + } + + /*pub fn const_vector(&self, elements: &[RValue<'gcc>]) -> RValue<'gcc> { + self.context.new_rvalue_from_vector(None, elements[0].get_type(), elements) + }*/ +} + +pub fn bytes_in_context<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, bytes: &[u8]) -> RValue<'gcc> { + let context = &cx.context; + let typ = context.new_array_type(None, context.new_type::(), bytes.len() as i32); + let global = cx.declare_unnamed_global(typ); + global.global_set_initializer(bytes); + global.to_rvalue() +} + +pub fn type_is_pointer<'gcc>(typ: Type<'gcc>) -> bool { + typ.get_pointee().is_some() +} + +impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> { + fn const_null(&self, typ: Type<'gcc>) -> RValue<'gcc> { + if type_is_pointer(typ) { + self.context.new_null(typ) + } + else { + self.const_int(typ, 0) + } + } + + fn const_undef(&self, typ: Type<'gcc>) -> RValue<'gcc> { + let local = self.current_func.borrow().expect("func") + .new_local(None, typ, "undefined"); + if typ.is_struct().is_some() { + // NOTE: hack to workaround a limitation of the rustc API: see comment on + // CodegenCx.structs_as_pointer + let pointer = local.get_address(None); + self.structs_as_pointer.borrow_mut().insert(pointer); + pointer + } + else { + local.to_rvalue() + } + } + + fn const_int(&self, typ: Type<'gcc>, int: i64) -> RValue<'gcc> { + self.context.new_rvalue_from_long(typ, i64::try_from(int).expect("i64::try_from")) + } + + fn const_uint(&self, typ: Type<'gcc>, int: u64) -> RValue<'gcc> { + self.context.new_rvalue_from_long(typ, u64::try_from(int).expect("u64::try_from") as i64) + } + + fn const_uint_big(&self, typ: Type<'gcc>, num: u128) -> RValue<'gcc> { + let num64: Result = num.try_into(); + if let Ok(num) = num64 { + // FIXME: workaround for a bug where libgccjit is expecting a constant. + // The operations >> 64 and | low are making the normal case a non-constant. + return self.context.new_rvalue_from_long(typ, num as i64); + } + + if num >> 64 != 0 { + // FIXME: use a new function new_rvalue_from_unsigned_long()? + let low = self.context.new_rvalue_from_long(self.u64_type, num as u64 as i64); + let high = self.context.new_rvalue_from_long(typ, (num >> 64) as u64 as i64); + + let sixty_four = self.context.new_rvalue_from_long(typ, 64); + (high << sixty_four) | self.context.new_cast(None, low, typ) + } + else if typ.is_i128(self) { + let num = self.context.new_rvalue_from_long(self.u64_type, num as u64 as i64); + self.context.new_cast(None, num, typ) + } + else { + self.context.new_rvalue_from_long(typ, num as u64 as i64) + } + } + + fn const_bool(&self, val: bool) -> RValue<'gcc> { + self.const_uint(self.type_i1(), val as u64) + } + + fn const_i32(&self, i: i32) -> RValue<'gcc> { + self.const_int(self.type_i32(), i as i64) + } + + fn const_u32(&self, i: u32) -> RValue<'gcc> { + self.const_uint(self.type_u32(), i as u64) + } + + fn const_u64(&self, i: u64) -> RValue<'gcc> { + self.const_uint(self.type_u64(), i) + } + + fn const_usize(&self, i: u64) -> RValue<'gcc> { + let bit_size = self.data_layout().pointer_size.bits(); + if bit_size < 64 { + // make sure it doesn't overflow + assert!(i < (1 << bit_size)); + } + + self.const_uint(self.usize_type, i) + } + + fn const_u8(&self, _i: u8) -> RValue<'gcc> { + unimplemented!(); + //self.const_uint(self.type_i8(), i as u64) + } + + fn const_real(&self, _t: Type<'gcc>, _val: f64) -> RValue<'gcc> { + unimplemented!(); + //unsafe { llvm::LLVMConstReal(t, val) } + } + + fn const_str(&self, s: Symbol) -> (RValue<'gcc>, RValue<'gcc>) { + let len = s.as_str().len(); + let cs = self.const_ptrcast(self.const_cstr(s, false), + self.type_ptr_to(self.layout_of(self.tcx.types.str_).gcc_type(self, true)), + ); + (cs, self.const_usize(len as u64)) + } + + fn const_struct(&self, values: &[RValue<'gcc>], packed: bool) -> RValue<'gcc> { + let fields: Vec<_> = values.iter() + .map(|value| value.get_type()) + .collect(); + // TODO: cache the type? It's anonymous, so probably not. + let name = fields.iter().map(|typ| format!("{:?}", typ)).collect::>().join("_"); + let typ = self.type_struct(&fields, packed); + let structure = self.global_init_func.new_local(None, typ, &name); + let struct_type = typ.is_struct().expect("struct type"); + for (index, value) in values.iter().enumerate() { + let field = struct_type.get_field(index as i32); + let field_lvalue = structure.access_field(None, field); + self.global_init_block.add_assignment(None, field_lvalue, *value); + } + self.lvalue_to_rvalue(structure) + } + + fn const_to_opt_uint(&self, _v: RValue<'gcc>) -> Option { + // TODO + None + //try_as_const_integral(v).map(|v| unsafe { llvm::LLVMConstIntGetZExtValue(v) }) + } + + fn const_to_opt_u128(&self, _v: RValue<'gcc>, _sign_ext: bool) -> Option { + // TODO + None + /*try_as_const_integral(v).and_then(|v| unsafe { + let (mut lo, mut hi) = (0u64, 0u64); + let success = llvm::LLVMRustConstInt128Get(v, sign_ext, &mut hi, &mut lo); + success.then_some(hi_lo_to_u128(lo, hi)) + })*/ + } + + fn scalar_to_backend(&self, cv: Scalar, layout: &abi::Scalar, ty: Type<'gcc>) -> RValue<'gcc> { + let bitsize = if layout.is_bool() { 1 } else { layout.value.size(self).bits() }; + match cv { + Scalar::Int(ScalarInt::ZST) => { + assert_eq!(0, layout.value.size(self).bytes()); + self.const_undef(self.type_ix(0)) + } + Scalar::Int(int) => { + let data = int.assert_bits(layout.value.size(self)); + + // FIXME: there's some issues with using the u128 code that follows, so hard-code + // the paths for floating-point values. + if ty == self.float_type { + return self.context.new_rvalue_from_double(ty, f32::from_bits(data as u32) as f64); + } + else if ty == self.double_type { + return self.context.new_rvalue_from_double(ty, f64::from_bits(data as u64)); + } + + let value = self.const_uint_big(self.type_ix(bitsize), data); + if layout.value == Pointer { + self.inttoptr(self.current_block.borrow().expect("block"), value, ty) + } else { + self.const_bitcast(value, ty) + } + } + Scalar::Ptr(ptr, _size) => { + let (alloc_id, offset) = ptr.into_parts(); + let base_addr = + match self.tcx.global_alloc(alloc_id) { + GlobalAlloc::Memory(alloc) => { + let init = const_alloc_to_gcc(self, alloc); + let value = + match alloc.mutability { + Mutability::Mut => self.static_addr_of_mut(init, alloc.align, None), + _ => self.static_addr_of(init, alloc.align, None), + }; + if !self.sess().fewer_names() { + // TODO + //llvm::set_value_name(value, format!("{:?}", ptr.alloc_id).as_bytes()); + } + value + }, + GlobalAlloc::Function(fn_instance) => { + self.get_fn_addr(fn_instance) + }, + GlobalAlloc::Static(def_id) => { + assert!(self.tcx.is_static(def_id)); + self.get_static(def_id) + }, + }; + let ptr_type = base_addr.get_type(); + let base_addr = self.const_bitcast(base_addr, self.usize_type); + let offset = self.context.new_rvalue_from_long(self.usize_type, offset.bytes() as i64); + let ptr = self.const_bitcast(base_addr + offset, ptr_type); + let value = ptr.dereference(None); + if layout.value != Pointer { + self.const_bitcast(value.to_rvalue(), ty) + } + else { + self.const_bitcast(value.get_address(None), ty) + } + } + } + } + + fn const_data_from_alloc(&self, alloc: &Allocation) -> Self::Value { + const_alloc_to_gcc(self, alloc) + } + + fn from_const_alloc(&self, layout: TyAndLayout<'tcx>, alloc: &Allocation, offset: Size) -> PlaceRef<'tcx, RValue<'gcc>> { + assert_eq!(alloc.align, layout.align.abi); + let ty = self.type_ptr_to(layout.gcc_type(self, true)); + let value = + if layout.size == Size::ZERO { + let value = self.const_usize(alloc.align.bytes()); + self.context.new_cast(None, value, ty) + } + else { + let init = const_alloc_to_gcc(self, alloc); + let base_addr = self.static_addr_of(init, alloc.align, None); + + let array = self.const_bitcast(base_addr, self.type_i8p()); + let value = self.context.new_array_access(None, array, self.const_usize(offset.bytes())).get_address(None); + self.const_bitcast(value, ty) + }; + PlaceRef::new_sized(value, layout) + } + + fn const_ptrcast(&self, val: RValue<'gcc>, ty: Type<'gcc>) -> RValue<'gcc> { + self.context.new_cast(None, val, ty) + } +} + +pub trait SignType<'gcc, 'tcx> { + fn is_signed(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool; + fn is_unsigned(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool; + fn to_signed(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>; +} + +impl<'gcc, 'tcx> SignType<'gcc, 'tcx> for Type<'gcc> { + fn is_signed(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool { + self.is_i8(cx) || self.is_i16(cx) || self.is_i32(cx) || self.is_i64(cx) || self.is_i128(cx) + } + + fn is_unsigned(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool { + self.is_u8(cx) || self.is_u16(cx) || self.is_u32(cx) || self.is_u64(cx) || self.is_u128(cx) + } + + fn to_signed(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> { + if self.is_u8(cx) { + cx.i8_type + } + else if self.is_u16(cx) { + cx.i16_type + } + else if self.is_u32(cx) { + cx.i32_type + } + else if self.is_u64(cx) { + cx.i64_type + } + else if self.is_u128(cx) { + cx.i128_type + } + else { + self.clone() + } + } +} + +pub trait TypeReflection<'gcc, 'tcx> { + fn is_uchar(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool; + fn is_ushort(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool; + fn is_uint(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool; + fn is_ulong(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool; + fn is_ulonglong(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool; + + fn is_i8(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool; + fn is_u8(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool; + fn is_i16(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool; + fn is_u16(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool; + fn is_i32(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool; + fn is_u32(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool; + fn is_i64(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool; + fn is_u64(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool; + fn is_i128(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool; + fn is_u128(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool; + + fn is_f32(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool; + fn is_f64(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool; +} + +impl<'gcc, 'tcx> TypeReflection<'gcc, 'tcx> for Type<'gcc> { + fn is_uchar(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool { + self.unqualified() == cx.u8_type + } + + fn is_ushort(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool { + self.unqualified() == cx.u16_type + } + + fn is_uint(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool { + self.unqualified() == cx.uint_type + } + + fn is_ulong(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool { + self.unqualified() == cx.ulong_type + } + + fn is_ulonglong(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool { + self.unqualified() == cx.ulonglong_type + } + + fn is_i8(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool { + self.unqualified() == cx.i8_type + } + + fn is_u8(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool { + self.unqualified() == cx.u8_type + } + + fn is_i16(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool { + self.unqualified() == cx.i16_type + } + + fn is_u16(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool { + self.unqualified() == cx.u16_type + } + + fn is_i32(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool { + self.unqualified() == cx.i32_type + } + + fn is_u32(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool { + self.unqualified() == cx.u32_type + } + + fn is_i64(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool { + self.unqualified() == cx.i64_type + } + + fn is_u64(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool { + self.unqualified() == cx.u64_type + } + + fn is_i128(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool { + self.unqualified() == cx.context.new_c_type(CType::Int128t) + } + + fn is_u128(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool { + self.unqualified() == cx.context.new_c_type(CType::UInt128t) + } + + fn is_f32(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool { + self.unqualified() == cx.context.new_type::() + } + + fn is_f64(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool { + self.unqualified() == cx.context.new_type::() + } +} diff --git a/src/consts.rs b/src/consts.rs new file mode 100644 index 0000000000000..2cdd7fcab8b46 --- /dev/null +++ b/src/consts.rs @@ -0,0 +1,527 @@ +use gccjit::{RValue, Type}; +use rustc_codegen_ssa::traits::{BaseTypeMethods, ConstMethods, DerivedTypeMethods, StaticMethods}; +use rustc_hir as hir; +use rustc_hir::Node; +use rustc_middle::{bug, span_bug}; +use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs}; +use rustc_middle::mir::mono::MonoItem; +use rustc_middle::ty::{self, Instance, Ty}; +use rustc_mir::interpret::{self, Allocation, ErrorHandled, Scalar as InterpScalar, read_target_uint}; +use rustc_span::Span; +use rustc_span::def_id::DefId; +use rustc_target::abi::{self, Align, HasDataLayout, LayoutOf, Primitive, Size}; + +use crate::base; +use crate::context::CodegenCx; +use crate::mangled_std_symbols::{ARGC, ARGV, ARGV_INIT_ARRAY}; +use crate::type_of::LayoutGccExt; + +impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { + pub fn const_bitcast(&self, value: RValue<'gcc>, typ: Type<'gcc>) -> RValue<'gcc> { + if value.get_type() == self.bool_type.make_pointer() { + if let Some(pointee) = typ.get_pointee() { + if pointee.is_vector().is_some() { + panic!() + } + } + } + self.context.new_bitcast(None, value, typ) + } +} + +impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> { + fn static_addr_of(&self, cv: RValue<'gcc>, align: Align, kind: Option<&str>) -> RValue<'gcc> { + if let Some(global_value) = self.const_globals.borrow().get(&cv) { + // TODO + /*unsafe { + // Upgrade the alignment in cases where the same constant is used with different + // alignment requirements + let llalign = align.bytes() as u32; + if llalign > llvm::LLVMGetAlignment(gv) { + llvm::LLVMSetAlignment(gv, llalign); + } + }*/ + return *global_value; + } + let global_value = self.static_addr_of_mut(cv, align, kind); + // TODO + /*unsafe { + llvm::LLVMSetGlobalConstant(global_value, True); + }*/ + self.const_globals.borrow_mut().insert(cv, global_value); + global_value + } + + fn codegen_static(&self, def_id: DefId, is_mutable: bool) { + let attrs = self.tcx.codegen_fn_attrs(def_id); + + let instance = Instance::mono(self.tcx, def_id); + let name = &*self.tcx.symbol_name(instance).name; + + let (value, alloc) = + match codegen_static_initializer(&self, def_id) { + Ok(value) => value, + // Error has already been reported + Err(_) => return, + }; + + let is_tls = attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL); + let global = self.get_static(def_id); + + // boolean SSA values are i1, but they have to be stored in i8 slots, + // otherwise some LLVM optimization passes don't work as expected + let val_llty = self.val_ty(value); + let value = + if val_llty == self.type_i1() { + //val_llty = self.type_i8(); + unimplemented!(); + //llvm::LLVMConstZExt(value, val_llty) + } + else { + value + }; + + let instance = Instance::mono(self.tcx, def_id); + let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all()); + let gcc_type = self.layout_of(ty).gcc_type(self, true); + + let global = + if val_llty == gcc_type { + global + } + else { + // If we created the global with the wrong type, + // correct the type. + /*let name = llvm::get_value_name(global).to_vec(); + llvm::set_value_name(global, b""); + + let linkage = llvm::LLVMRustGetLinkage(global); + let visibility = llvm::LLVMRustGetVisibility(global);*/ + + let new_global = self.get_or_insert_global(&name, val_llty, is_tls, attrs.link_section); + + /*llvm::LLVMRustSetLinkage(new_global, linkage); + llvm::LLVMRustSetVisibility(new_global, visibility);*/ + + // To avoid breaking any invariants, we leave around the old + // global for the moment; we'll replace all references to it + // with the new global later. (See base::codegen_backend.) + //self.statics_to_rauw.borrow_mut().push((global, new_global)); + new_global + }; + // TODO + //set_global_alignment(&self, global, self.align_of(ty)); + //llvm::LLVMSetInitializer(global, value); + let value = self.rvalue_as_lvalue(value); + let value = value.get_address(None); + let dest_typ = global.get_type(); + let value = self.context.new_cast(None, value, dest_typ); + + // NOTE: do not init the variables related to argc/argv because it seems we cannot + // overwrite those variables. + // FIXME: correctly support global variable initialization. + let skip_init = [ + ARGV_INIT_ARRAY, + ARGC, + ARGV, + ]; + if !skip_init.iter().any(|symbol_name| name.starts_with(symbol_name)) { + // TODO: switch to set_initializer when libgccjit supports that. + let memcpy = self.context.get_builtin_function("memcpy"); + let dst = self.context.new_cast(None, global, self.type_i8p()); + let src = self.context.new_cast(None, value, self.type_ptr_to(self.type_void())); + let size = self.context.new_rvalue_from_long(self.sizet_type, alloc.size().bytes() as i64); + self.global_init_block.add_eval(None, self.context.new_call(None, memcpy, &[dst, src, size])); + } + + // As an optimization, all shared statics which do not have interior + // mutability are placed into read-only memory. + if !is_mutable { + if self.type_is_freeze(ty) { + // TODO + //llvm::LLVMSetGlobalConstant(global, llvm::True); + } + } + + //debuginfo::create_global_var_metadata(&self, def_id, global); + + if attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) { + // Do not allow LLVM to change the alignment of a TLS on macOS. + // + // By default a global's alignment can be freely increased. + // This allows LLVM to generate more performant instructions + // e.g., using load-aligned into a SIMD register. + // + // However, on macOS 10.10 or below, the dynamic linker does not + // respect any alignment given on the TLS (radar 24221680). + // This will violate the alignment assumption, and causing segfault at runtime. + // + // This bug is very easy to trigger. In `println!` and `panic!`, + // the `LOCAL_STDOUT`/`LOCAL_STDERR` handles are stored in a TLS, + // which the values would be `mem::replace`d on initialization. + // The implementation of `mem::replace` will use SIMD + // whenever the size is 32 bytes or higher. LLVM notices SIMD is used + // and tries to align `LOCAL_STDOUT`/`LOCAL_STDERR` to a 32-byte boundary, + // which macOS's dyld disregarded and causing crashes + // (see issues #51794, #51758, #50867, #48866 and #44056). + // + // To workaround the bug, we trick LLVM into not increasing + // the global's alignment by explicitly assigning a section to it + // (equivalent to automatically generating a `#[link_section]` attribute). + // See the comment in the `GlobalValue::canIncreaseAlignment()` function + // of `lib/IR/Globals.cpp` for why this works. + // + // When the alignment is not increased, the optimized `mem::replace` + // will use load-unaligned instructions instead, and thus avoiding the crash. + // + // We could remove this hack whenever we decide to drop macOS 10.10 support. + if self.tcx.sess.target.options.is_like_osx { + // The `inspect` method is okay here because we checked relocations, and + // because we are doing this access to inspect the final interpreter state + // (not as part of the interpreter execution). + // + // FIXME: This check requires that the (arbitrary) value of undefined bytes + // happens to be zero. Instead, we should only check the value of defined bytes + // and set all undefined bytes to zero if this allocation is headed for the + // BSS. + /*let all_bytes_are_zero = alloc.relocations().is_empty() + && alloc + .inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len()) + .iter() + .all(|&byte| byte == 0); + + let sect_name = if all_bytes_are_zero { + CStr::from_bytes_with_nul_unchecked(b"__DATA,__thread_bss\0") + } else { + CStr::from_bytes_with_nul_unchecked(b"__DATA,__thread_data\0") + };*/ + unimplemented!(); + //llvm::LLVMSetSection(global, sect_name.as_ptr()); + } + } + + // Wasm statics with custom link sections get special treatment as they + // go into custom sections of the wasm executable. + if self.tcx.sess.opts.target_triple.triple().starts_with("wasm32") { + if let Some(_section) = attrs.link_section { + unimplemented!(); + /*let section = llvm::LLVMMDStringInContext( + self.llcx, + section.as_str().as_ptr().cast(), + section.as_str().len() as c_uint, + ); + assert!(alloc.relocations().is_empty()); + + // The `inspect` method is okay here because we checked relocations, and + // because we are doing this access to inspect the final interpreter state (not + // as part of the interpreter execution). + let bytes = + alloc.inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len()); + let alloc = llvm::LLVMMDStringInContext( + self.llcx, + bytes.as_ptr().cast(), + bytes.len() as c_uint, + ); + let data = [section, alloc]; + let meta = llvm::LLVMMDNodeInContext(self.llcx, data.as_ptr(), 2); + llvm::LLVMAddNamedMetadataOperand( + self.llmod, + "wasm.custom_sections\0".as_ptr().cast(), + meta, + );*/ + } + } else { + // TODO + //base::set_link_section(global, &attrs); + } + + if attrs.flags.contains(CodegenFnAttrFlags::USED) { + self.add_used_global(global); + } + } + + /// Add a global value to a list to be stored in the `llvm.used` variable, an array of i8*. + fn add_used_global(&self, _global: RValue<'gcc>) { + // TODO + //let cast = self.context.new_cast(None, global, self.type_i8p()); + //self.used_statics.borrow_mut().push(cast); + } +} + +impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { + pub fn static_addr_of_mut(&self, cv: RValue<'gcc>, align: Align, kind: Option<&str>) -> RValue<'gcc> { + let (name, gv) = + match kind { + Some(kind) if !self.tcx.sess.fewer_names() => { + let name = self.generate_local_symbol_name(kind); + // TODO: check if it's okay that TLS is off here. + // TODO: check if it's okay that link_section is None here. + // TODO: set alignment here as well. + let gv = self.define_global(&name[..], self.val_ty(cv), false, None).unwrap_or_else(|| { + bug!("symbol `{}` is already defined", name); + }); + //llvm::LLVMRustSetLinkage(gv, llvm::Linkage::PrivateLinkage); + (name, gv) + } + _ => { + let index = self.global_gen_sym_counter.get(); + let name = format!("global_{}_{}", index, self.codegen_unit.name()); + let typ = self.val_ty(cv).get_aligned(align.bytes()); + let global = self.define_private_global(typ); + (name, global) + }, + }; + // FIXME: I think the name coming from generate_local_symbol_name() above cannot be used + // globally. + // NOTE: global seems to only be global in a module. So save the name instead of the value + // to import it later. + self.global_names.borrow_mut().insert(cv, name); + self.global_init_block.add_assignment(None, gv.dereference(None), cv); + //llvm::SetUnnamedAddress(gv, llvm::UnnamedAddr::Global); + gv + } + + pub fn get_static(&self, def_id: DefId) -> RValue<'gcc> { + let instance = Instance::mono(self.tcx, def_id); + let fn_attrs = self.tcx.codegen_fn_attrs(def_id); + if let Some(&global) = self.instances.borrow().get(&instance) { + /*let attrs = self.tcx.codegen_fn_attrs(def_id); + let name = &*self.tcx.symbol_name(instance).name; + let name = + if let Some(linkage) = attrs.linkage { + // This is to match what happens in check_and_apply_linkage. + Cow::from(format!("_rust_extern_with_linkage_{}", name)) + } + else { + Cow::from(name) + }; + let global = self.context.new_global(None, GlobalKind::Imported, global.get_type(), &name) + .get_address(None); + self.global_names.borrow_mut().insert(global, name.to_string());*/ + return global; + } + + let defined_in_current_codegen_unit = + self.codegen_unit.items().contains_key(&MonoItem::Static(def_id)); + assert!( + !defined_in_current_codegen_unit, + "consts::get_static() should always hit the cache for \ + statics defined in the same CGU, but did not for `{:?}`", + def_id + ); + + let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all()); + let sym = self.tcx.symbol_name(instance).name; + + //debug!("get_static: sym={} instance={:?}", sym, instance); + + let global = + if let Some(def_id) = def_id.as_local() { + let id = self.tcx.hir().local_def_id_to_hir_id(def_id); + let llty = self.layout_of(ty).gcc_type(self, true); + // FIXME: refactor this to work without accessing the HIR + let global = match self.tcx.hir().get(id) { + Node::Item(&hir::Item { span, kind: hir::ItemKind::Static(..), .. }) => { + if let Some(global) = self.get_declared_value(&sym) { + if self.val_ty(global) != self.type_ptr_to(llty) { + span_bug!(span, "Conflicting types for static"); + } + } + + let is_tls = fn_attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL); + let global = self.declare_global(&sym, llty, is_tls, fn_attrs.link_section); + + if !self.tcx.is_reachable_non_generic(def_id) { + /*unsafe { + llvm::LLVMRustSetVisibility(global, llvm::Visibility::Hidden); + }*/ + } + + global + } + + Node::ForeignItem(&hir::ForeignItem { + span, + kind: hir::ForeignItemKind::Static(..), + .. + }) => { + let fn_attrs = self.tcx.codegen_fn_attrs(def_id); + check_and_apply_linkage(&self, &fn_attrs, ty, sym, span) + } + + item => bug!("get_static: expected static, found {:?}", item), + }; + + //debug!("get_static: sym={} attrs={:?}", sym, attrs); + + global + } + else { + // FIXME(nagisa): perhaps the map of externs could be offloaded to llvm somehow? + //debug!("get_static: sym={} item_attr={:?}", sym, self.tcx.item_attrs(def_id)); + + let attrs = self.tcx.codegen_fn_attrs(def_id); + let span = self.tcx.def_span(def_id); + let global = check_and_apply_linkage(&self, &attrs, ty, sym, span); + + let needs_dll_storage_attr = false; /*self.use_dll_storage_attrs && !self.tcx.is_foreign_item(def_id) && + // ThinLTO can't handle this workaround in all cases, so we don't + // emit the attrs. Instead we make them unnecessary by disallowing + // dynamic linking when linker plugin based LTO is enabled. + !self.tcx.sess.opts.cg.linker_plugin_lto.enabled();*/ + + // If this assertion triggers, there's something wrong with commandline + // argument validation. + debug_assert!( + !(self.tcx.sess.opts.cg.linker_plugin_lto.enabled() + && self.tcx.sess.target.options.is_like_msvc + && self.tcx.sess.opts.cg.prefer_dynamic) + ); + + if needs_dll_storage_attr { + // This item is external but not foreign, i.e., it originates from an external Rust + // crate. Since we don't know whether this crate will be linked dynamically or + // statically in the final application, we always mark such symbols as 'dllimport'. + // If final linkage happens to be static, we rely on compiler-emitted __imp_ stubs + // to make things work. + // + // However, in some scenarios we defer emission of statics to downstream + // crates, so there are cases where a static with an upstream DefId + // is actually present in the current crate. We can find out via the + // is_codegened_item query. + if !self.tcx.is_codegened_item(def_id) { + unimplemented!(); + /*unsafe { + llvm::LLVMSetDLLStorageClass(global, llvm::DLLStorageClass::DllImport); + }*/ + } + } + global + }; + + /*if self.use_dll_storage_attrs && self.tcx.is_dllimport_foreign_item(def_id) { + // For foreign (native) libs we know the exact storage type to use. + unsafe { + llvm::LLVMSetDLLStorageClass(global, llvm::DLLStorageClass::DllImport); + } + }*/ + + self.instances.borrow_mut().insert(instance, global); + global + } +} + +pub fn const_alloc_to_gcc<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, alloc: &Allocation) -> RValue<'gcc> { + let mut llvals = Vec::with_capacity(alloc.relocations().len() + 1); + let dl = cx.data_layout(); + let pointer_size = dl.pointer_size.bytes() as usize; + + let mut next_offset = 0; + for &(offset, alloc_id) in alloc.relocations().iter() { + let offset = offset.bytes(); + assert_eq!(offset as usize as u64, offset); + let offset = offset as usize; + if offset > next_offset { + // This `inspect` is okay since we have checked that it is not within a relocation, it + // is within the bounds of the allocation, and it doesn't affect interpreter execution + // (we inspect the result after interpreter execution). Any undef byte is replaced with + // some arbitrary byte value. + // + // FIXME: relay undef bytes to codegen as undef const bytes + let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(next_offset..offset); + llvals.push(cx.const_bytes(bytes)); + } + let ptr_offset = + read_target_uint( dl.endian, + // This `inspect` is okay since it is within the bounds of the allocation, it doesn't + // affect interpreter execution (we inspect the result after interpreter execution), + // and we properly interpret the relocation as a relocation pointer offset. + alloc.inspect_with_uninit_and_ptr_outside_interpreter(offset..(offset + pointer_size)), + ) + .expect("const_alloc_to_llvm: could not read relocation pointer") + as u64; + llvals.push(cx.scalar_to_backend( + InterpScalar::from_pointer( + interpret::Pointer::new(alloc_id, Size::from_bytes(ptr_offset)), + &cx.tcx, + ), + &abi::Scalar { value: Primitive::Pointer, valid_range: 0..=!0 }, + cx.type_i8p(), + )); + next_offset = offset + pointer_size; + } + if alloc.len() >= next_offset { + let range = next_offset..alloc.len(); + // This `inspect` is okay since we have check that it is after all relocations, it is + // within the bounds of the allocation, and it doesn't affect interpreter execution (we + // inspect the result after interpreter execution). Any undef byte is replaced with some + // arbitrary byte value. + // + // FIXME: relay undef bytes to codegen as undef const bytes + let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(range); + llvals.push(cx.const_bytes(bytes)); + } + + cx.const_struct(&llvals, true) +} + +pub fn codegen_static_initializer<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, def_id: DefId) -> Result<(RValue<'gcc>, &'tcx Allocation), ErrorHandled> { + let alloc = cx.tcx.eval_static_initializer(def_id)?; + Ok((const_alloc_to_gcc(cx, alloc), alloc)) +} + +fn check_and_apply_linkage<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, attrs: &CodegenFnAttrs, ty: Ty<'tcx>, sym: &str, span: Span) -> RValue<'gcc> { + let is_tls = attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL); + let llty = cx.layout_of(ty).gcc_type(cx, true); + if let Some(linkage) = attrs.linkage { + //debug!("get_static: sym={} linkage={:?}", sym, linkage); + + // If this is a static with a linkage specified, then we need to handle + // it a little specially. The typesystem prevents things like &T and + // extern "C" fn() from being non-null, so we can't just declare a + // static and call it a day. Some linkages (like weak) will make it such + // that the static actually has a null value. + let llty2 = + if let ty::RawPtr(ref mt) = ty.kind() { + cx.layout_of(mt.ty).gcc_type(cx, true) + } + else { + cx.sess().span_fatal( + span, + "must have type `*const T` or `*mut T` due to `#[linkage]` attribute", + ) + }; + // Declare a symbol `foo` with the desired linkage. + let global1 = cx.declare_global_with_linkage(&sym, llty2, base::global_linkage_to_gcc(linkage)); + + // Declare an internal global `extern_with_linkage_foo` which + // is initialized with the address of `foo`. If `foo` is + // discarded during linking (for example, if `foo` has weak + // linkage and there are no definitions), then + // `extern_with_linkage_foo` will instead be initialized to + // zero. + let mut real_name = "_rust_extern_with_linkage_".to_string(); + real_name.push_str(&sym); + let global2 = + cx.define_global(&real_name, llty, is_tls, attrs.link_section).unwrap_or_else(|| { + cx.sess().span_fatal(span, &format!("symbol `{}` is already defined", &sym)) + }); + //llvm::LLVMRustSetLinkage(global2, llvm::Linkage::InternalLinkage); + let lvalue = global2.dereference(None); + cx.global_init_block.add_assignment(None, lvalue, global1); + //llvm::LLVMSetInitializer(global2, global1); + global2 + } + else { + // Generate an external declaration. + // FIXME(nagisa): investigate whether it can be changed into define_global + + // Thread-local statics in some other crate need to *always* be linked + // against in a thread-local fashion, so we need to be sure to apply the + // thread-local attribute locally if it was present remotely. If we + // don't do this then linker errors can be generated where the linker + // complains that one object files has a thread local version of the + // symbol and another one doesn't. + cx.declare_global(&sym, llty, is_tls, attrs.link_section) + } +} diff --git a/src/context.rs b/src/context.rs new file mode 100644 index 0000000000000..9cbbee772c5fb --- /dev/null +++ b/src/context.rs @@ -0,0 +1,491 @@ +use std::cell::{Cell, RefCell}; + +use gccjit::{ + Block, + Context, + CType, + Function, + FunctionType, + LValue, + RValue, + Struct, + Type, +}; +use rustc_codegen_ssa::base::wants_msvc_seh; +use rustc_codegen_ssa::traits::{ + BackendTypes, + BaseTypeMethods, + MiscMethods, +}; +use rustc_data_structures::base_n; +use rustc_data_structures::fx::{FxHashMap, FxHashSet}; +use rustc_middle::bug; +use rustc_middle::mir::mono::CodegenUnit; +use rustc_middle::ty::{self, Instance, ParamEnv, PolyExistentialTraitRef, Ty, TyCtxt}; +use rustc_middle::ty::layout::{HasParamEnv, HasTyCtxt, LayoutError, TyAndLayout}; +use rustc_session::Session; +use rustc_span::{Span, Symbol, DUMMY_SP}; +use rustc_target::abi::{HasDataLayout, LayoutOf, PointeeInfo, Size, TargetDataLayout, VariantIdx}; +use rustc_target::spec::{HasTargetSpec, Target, TlsModel}; + +use crate::callee::get_fn; +use crate::declare::mangle_name; + +#[derive(Clone)] +pub struct FuncSig<'gcc> { + pub params: Vec>, + pub return_type: Type<'gcc>, +} + +pub struct CodegenCx<'gcc, 'tcx> { + pub check_overflow: bool, + pub codegen_unit: &'tcx CodegenUnit<'tcx>, + pub context: &'gcc Context<'gcc>, + + // TODO: First set it to a dummy block to avoid using Option? + pub current_block: RefCell>>, + pub current_func: RefCell>>, + pub normal_function_addresses: RefCell>>, + + /// The function where globals are initialized. + pub global_init_func: Function<'gcc>, + pub global_init_block: Block<'gcc>, + + pub functions: RefCell>>, + + pub tls_model: gccjit::TlsModel, + + pub bool_type: Type<'gcc>, + pub i8_type: Type<'gcc>, + pub i16_type: Type<'gcc>, + pub i32_type: Type<'gcc>, + pub i64_type: Type<'gcc>, + pub i128_type: Type<'gcc>, + pub isize_type: Type<'gcc>, + + pub u8_type: Type<'gcc>, + pub u16_type: Type<'gcc>, + pub u32_type: Type<'gcc>, + pub u64_type: Type<'gcc>, + pub u128_type: Type<'gcc>, + pub usize_type: Type<'gcc>, + + pub int_type: Type<'gcc>, + pub uint_type: Type<'gcc>, + pub long_type: Type<'gcc>, + pub ulong_type: Type<'gcc>, + pub ulonglong_type: Type<'gcc>, + pub sizet_type: Type<'gcc>, + + pub float_type: Type<'gcc>, + pub double_type: Type<'gcc>, + + pub linkage: Cell, + pub scalar_types: RefCell, Type<'gcc>>>, + pub types: RefCell, Option), Type<'gcc>>>, + pub tcx: TyCtxt<'tcx>, + + pub struct_types: RefCell>, Type<'gcc>>>, + + pub types_with_fields_to_set: RefCell, (Struct<'gcc>, TyAndLayout<'tcx>)>>, + + /// Cache instances of monomorphic and polymorphic items + pub instances: RefCell, RValue<'gcc>>>, + /// Cache generated vtables + pub vtables: RefCell, Option>), RValue<'gcc>>>, + + /// Cache of emitted const globals (value -> global) + pub const_globals: RefCell, RValue<'gcc>>>, + + pub init_argv_var: RefCell, + pub argv_initialized: Cell, + + /// Cache of constant strings, + pub const_cstr_cache: RefCell>>, + + /// Cache of globals. + pub globals: RefCell>>, + // TODO: remove global_names. + pub global_names: RefCell, String>>, + + /// A counter that is used for generating local symbol names + local_gen_sym_counter: Cell, + pub global_gen_sym_counter: Cell, + + eh_personality: Cell>>, + + pub pointee_infos: RefCell, Size), Option>>, + + /// NOTE: a hack is used because the rustc API is not suitable to libgccjit and as such, + /// `const_undef()` returns struct as pointer so that they can later be assigned a value. + /// As such, this set remembers which of these pointers were returned by this function so that + /// they can be derefered later. + /// FIXME: fix the rustc API to avoid having this hack. + pub structs_as_pointer: RefCell>>, + + /// Store the pointer of different types for safety. + /// When casting the values back to their original types, check that they are indeed that type + /// with these sets. + /// FIXME: remove when the API supports more types. + #[cfg(debug_assertions)] + lvalues: RefCell>>, +} + +impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { + pub fn new(context: &'gcc Context<'gcc>, codegen_unit: &'tcx CodegenUnit<'tcx>, tcx: TyCtxt<'tcx>) -> Self { + let check_overflow = tcx.sess.overflow_checks(); + // TODO: fix this mess. libgccjit seems to return random type when using new_int_type(). + //let isize_type = context.new_int_type((tcx.data_layout.pointer_size.bits() / 8) as i32, true); + let isize_type = context.new_c_type(CType::LongLong); + //let usize_type = context.new_int_type((tcx.data_layout.pointer_size.bits() / 8) as i32, false); + let usize_type = context.new_c_type(CType::ULongLong); + let bool_type = context.new_type::(); + let i8_type = context.new_type::(); + let i16_type = context.new_type::(); + let i32_type = context.new_type::(); + let i64_type = context.new_c_type(CType::LongLong); + let i128_type = context.new_c_type(CType::Int128t).get_aligned(8); // TODO: should this be hard-coded? + let u8_type = context.new_type::(); + let u16_type = context.new_type::(); + let u32_type = context.new_type::(); + let u64_type = context.new_c_type(CType::ULongLong); + let u128_type = context.new_c_type(CType::UInt128t).get_aligned(8); // TODO: should this be hard-coded? + + let tls_model = to_gcc_tls_mode(tcx.sess.tls_model()); + + let float_type = context.new_type::(); + let double_type = context.new_type::(); + + let int_type = context.new_c_type(CType::Int); + let uint_type = context.new_c_type(CType::UInt); + let long_type = context.new_c_type(CType::Long); + let ulong_type = context.new_c_type(CType::ULong); + let ulonglong_type = context.new_c_type(CType::ULongLong); + let sizet_type = context.new_c_type(CType::SizeT); + + assert_eq!(isize_type, i64_type); + assert_eq!(usize_type, u64_type); + + let mut functions = FxHashMap::default(); + let builtins = [ + "__builtin_unreachable", "abort", "__builtin_expect", "__builtin_add_overflow", "__builtin_mul_overflow", + "__builtin_saddll_overflow", /*"__builtin_sadd_overflow",*/ "__builtin_smulll_overflow", /*"__builtin_smul_overflow",*/ + "__builtin_ssubll_overflow", /*"__builtin_ssub_overflow",*/ "__builtin_sub_overflow", "__builtin_uaddll_overflow", + "__builtin_uadd_overflow", "__builtin_umulll_overflow", "__builtin_umul_overflow", "__builtin_usubll_overflow", + "__builtin_usub_overflow", "sqrtf", "sqrt", "__builtin_powif", "__builtin_powi", "sinf", "sin", "cosf", "cos", + "powf", "pow", "expf", "exp", "exp2f", "exp2", "logf", "log", "log10f", "log10", "log2f", "log2", "fmaf", + "fma", "fabsf", "fabs", "fminf", "fmin", "fmaxf", "fmax", "copysignf", "copysign", "floorf", "floor", "ceilf", + "ceil", "truncf", "trunc", "rintf", "rint", "nearbyintf", "nearbyint", "roundf", "round", + "__builtin_expect_with_probability", + ]; + + for builtin in builtins.iter() { + functions.insert(builtin.to_string(), context.get_builtin_function(builtin)); + } + + let global_init_func = context.new_function(None, FunctionType::Exported, context.new_type::<()>(), &[], + &format!("__gccGlobalInit{}", unit_name(&codegen_unit)), false); + let global_init_block = global_init_func.new_block("initial"); + + Self { + check_overflow, + codegen_unit, + context, + current_block: RefCell::new(None), + current_func: RefCell::new(None), + normal_function_addresses: Default::default(), + functions: RefCell::new(functions), + global_init_func, + global_init_block, + + tls_model, + + bool_type, + i8_type, + i16_type, + i32_type, + i64_type, + i128_type, + isize_type, + usize_type, + u8_type, + u16_type, + u32_type, + u64_type, + u128_type, + int_type, + uint_type, + long_type, + ulong_type, + ulonglong_type, + sizet_type, + + float_type, + double_type, + + linkage: Cell::new(FunctionType::Internal), + #[cfg(debug_assertions)] + lvalues: Default::default(), + instances: Default::default(), + vtables: Default::default(), + const_globals: Default::default(), + init_argv_var: RefCell::new(String::new()), + argv_initialized: Cell::new(false), + const_cstr_cache: Default::default(), + global_names: Default::default(), + globals: Default::default(), + scalar_types: Default::default(), + types: Default::default(), + tcx, + struct_types: Default::default(), + types_with_fields_to_set: Default::default(), + local_gen_sym_counter: Cell::new(0), + global_gen_sym_counter: Cell::new(0), + eh_personality: Cell::new(None), + pointee_infos: Default::default(), + structs_as_pointer: Default::default(), + } + } + + pub fn lvalue_to_rvalue(&self, value: LValue<'gcc>) -> RValue<'gcc> { + #[cfg(debug_assertions)] + self.lvalues.borrow_mut().insert(value); + unsafe { std::mem::transmute(value) } + } + + pub fn rvalue_as_function(&self, value: RValue<'gcc>) -> Function<'gcc> { + let function: Function<'gcc> = unsafe { std::mem::transmute(value) }; + debug_assert!(self.functions.borrow().values().find(|value| **value == function).is_some(), + "{:?} ({:?}) is not a function", value, value.get_type()); + function + } + + pub fn rvalue_as_lvalue(&self, value: RValue<'gcc>) -> LValue<'gcc> { + let lvalue: LValue<'gcc> = unsafe { std::mem::transmute(value) }; + //debug_assert!(self.lvalues.borrow().contains(&lvalue), "{:?} is not an lvalue", value); + lvalue + } + + pub fn sess(&self) -> &Session { + &self.tcx.sess + } +} + +impl<'gcc, 'tcx> BackendTypes for CodegenCx<'gcc, 'tcx> { + type Value = RValue<'gcc>; + type Function = RValue<'gcc>; + + type BasicBlock = Block<'gcc>; + type Type = Type<'gcc>; + type Funclet = (); // TODO + + type DIScope = (); // TODO + type DILocation = (); // TODO + type DIVariable = (); // TODO +} + +impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> { + fn vtables(&self) -> &RefCell, Option>), RValue<'gcc>>> { + &self.vtables + } + + fn get_fn(&self, instance: Instance<'tcx>) -> RValue<'gcc> { + let func = get_fn(self, instance); + *self.current_func.borrow_mut() = Some(self.rvalue_as_function(func)); + func + } + + fn get_fn_addr(&self, instance: Instance<'tcx>) -> RValue<'gcc> { + //let symbol = self.tcx.symbol_name(instance).name; + + let func = get_fn(self, instance); + let func = self.rvalue_as_function(func); + let ptr = func.get_address(None); + + // TODO: don't do this twice: i.e. in declare_fn and here. + //let fn_abi = FnAbi::of_instance(self, instance, &[]); + //let (return_type, params, _) = fn_abi.gcc_type(self); + // FIXME: the rustc API seems to call get_fn_addr() when not needed (e.g. for FFI). + //let pointer_type = ptr.get_type(); + + self.normal_function_addresses.borrow_mut().insert(ptr); + + ptr + } + + fn eh_personality(&self) -> RValue<'gcc> { + // The exception handling personality function. + // + // If our compilation unit has the `eh_personality` lang item somewhere + // within it, then we just need to codegen that. Otherwise, we're + // building an rlib which will depend on some upstream implementation of + // this function, so we just codegen a generic reference to it. We don't + // specify any of the types for the function, we just make it a symbol + // that LLVM can later use. + // + // Note that MSVC is a little special here in that we don't use the + // `eh_personality` lang item at all. Currently LLVM has support for + // both Dwarf and SEH unwind mechanisms for MSVC targets and uses the + // *name of the personality function* to decide what kind of unwind side + // tables/landing pads to emit. It looks like Dwarf is used by default, + // injecting a dependency on the `_Unwind_Resume` symbol for resuming + // an "exception", but for MSVC we want to force SEH. This means that we + // can't actually have the personality function be our standard + // `rust_eh_personality` function, but rather we wired it up to the + // CRT's custom personality function, which forces LLVM to consider + // landing pads as "landing pads for SEH". + if let Some(llpersonality) = self.eh_personality.get() { + return llpersonality; + } + let tcx = self.tcx; + let llfn = match tcx.lang_items().eh_personality() { + Some(def_id) if !wants_msvc_seh(self.sess()) => self.get_fn_addr( + ty::Instance::resolve( + tcx, + ty::ParamEnv::reveal_all(), + def_id, + tcx.intern_substs(&[]), + ) + .unwrap().unwrap(), + ), + _ => { + let name = if wants_msvc_seh(self.sess()) { + "__CxxFrameHandler3" + } else { + "rust_eh_personality" + }; + self.declare_func(name, self.type_i32(), &[], true) + } + }; + //attributes::apply_target_cpu_attr(self, llfn); + self.eh_personality.set(Some(llfn)); + llfn + } + + fn sess(&self) -> &Session { + &self.tcx.sess + } + + fn check_overflow(&self) -> bool { + self.check_overflow + } + + fn codegen_unit(&self) -> &'tcx CodegenUnit<'tcx> { + self.codegen_unit + } + + fn used_statics(&self) -> &RefCell>> { + unimplemented!(); + //&self.used_statics + } + + fn set_frame_pointer_type(&self, _llfn: RValue<'gcc>) { + // TODO + //attributes::set_frame_pointer_type(self, llfn) + } + + fn apply_target_cpu_attr(&self, _llfn: RValue<'gcc>) { + // TODO + //attributes::apply_target_cpu_attr(self, llfn) + } + + fn create_used_variable(&self) { + unimplemented!(); + /*let name = const_cstr!("llvm.used"); + let section = const_cstr!("llvm.metadata"); + let array = + self.const_array(&self.type_ptr_to(self.type_i8()), &*self.used_statics.borrow()); + + unsafe { + let g = llvm::LLVMAddGlobal(self.llmod, self.val_ty(array), name.as_ptr()); + llvm::LLVMSetInitializer(g, array); + llvm::LLVMRustSetLinkage(g, llvm::Linkage::AppendingLinkage); + llvm::LLVMSetSection(g, section.as_ptr()); + }*/ + } + + fn declare_c_main(&self, fn_type: Self::Type) -> Option { + if self.get_declared_value("main").is_none() { + Some(self.declare_cfn("main", fn_type)) + } + else { + // If the symbol already exists, it is an error: for example, the user wrote + // #[no_mangle] extern "C" fn main(..) {..} + // instead of #[start] + None + } + } +} + +impl<'gcc, 'tcx> HasTyCtxt<'tcx> for CodegenCx<'gcc, 'tcx> { + fn tcx(&self) -> TyCtxt<'tcx> { + self.tcx + } +} + +impl<'gcc, 'tcx> HasDataLayout for CodegenCx<'gcc, 'tcx> { + fn data_layout(&self) -> &TargetDataLayout { + &self.tcx.data_layout + } +} + +impl<'gcc, 'tcx> HasTargetSpec for CodegenCx<'gcc, 'tcx> { + fn target_spec(&self) -> &Target { + &self.tcx.sess.target + } +} + +impl<'gcc, 'tcx> LayoutOf for CodegenCx<'gcc, 'tcx> { + type Ty = Ty<'tcx>; + type TyAndLayout = TyAndLayout<'tcx>; + + fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout { + self.spanned_layout_of(ty, DUMMY_SP) + } + + fn spanned_layout_of(&self, ty: Ty<'tcx>, span: Span) -> Self::TyAndLayout { + self.tcx.layout_of(ParamEnv::reveal_all().and(ty)).unwrap_or_else(|e| { + if let LayoutError::SizeOverflow(_) = e { + self.sess().span_fatal(span, &e.to_string()) + } else { + bug!("failed to get layout for `{}`: {}", ty, e) + } + }) + } +} + +impl<'tcx, 'gcc> HasParamEnv<'tcx> for CodegenCx<'gcc, 'tcx> { + fn param_env(&self) -> ParamEnv<'tcx> { + ParamEnv::reveal_all() + } +} + +impl<'b, 'tcx> CodegenCx<'b, 'tcx> { + /// Generates a new symbol name with the given prefix. This symbol name must + /// only be used for definitions with `internal` or `private` linkage. + pub fn generate_local_symbol_name(&self, prefix: &str) -> String { + let idx = self.local_gen_sym_counter.get(); + self.local_gen_sym_counter.set(idx + 1); + // Include a '.' character, so there can be no accidental conflicts with + // user defined names + let mut name = String::with_capacity(prefix.len() + 6); + name.push_str(prefix); + name.push_str("."); + base_n::push_str(idx as u128, base_n::ALPHANUMERIC_ONLY, &mut name); + name + } +} + +pub fn unit_name<'tcx>(codegen_unit: &CodegenUnit<'tcx>) -> String { + let name = &codegen_unit.name().to_string(); + mangle_name(&name.replace('-', "_")) +} + +fn to_gcc_tls_mode(tls_model: TlsModel) -> gccjit::TlsModel { + match tls_model { + TlsModel::GeneralDynamic => gccjit::TlsModel::GlobalDynamic, + TlsModel::LocalDynamic => gccjit::TlsModel::LocalDynamic, + TlsModel::InitialExec => gccjit::TlsModel::InitialExec, + TlsModel::LocalExec => gccjit::TlsModel::LocalExec, + } +} diff --git a/src/coverageinfo.rs b/src/coverageinfo.rs new file mode 100644 index 0000000000000..f966f6e75336f --- /dev/null +++ b/src/coverageinfo.rs @@ -0,0 +1,140 @@ +use gccjit::RValue; +use rustc_codegen_ssa::traits::{CoverageInfoBuilderMethods, CoverageInfoMethods}; +use rustc_hir::def_id::DefId; +use rustc_middle::mir::coverage::{ + CodeRegion, + CounterValueReference, + ExpressionOperandId, + InjectedExpressionId, + Op, +}; +use rustc_middle::ty::Instance; + +use crate::builder::Builder; +use crate::context::CodegenCx; + +impl<'a, 'gcc, 'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { + fn set_function_source_hash( + &mut self, + _instance: Instance<'tcx>, + _function_source_hash: u64, + ) -> bool { + unimplemented!(); + /*if let Some(coverage_context) = self.coverage_context() { + debug!( + "ensuring function source hash is set for instance={:?}; function_source_hash={}", + instance, function_source_hash, + ); + let mut coverage_map = coverage_context.function_coverage_map.borrow_mut(); + coverage_map + .entry(instance) + .or_insert_with(|| FunctionCoverage::new(self.tcx, instance)) + .set_function_source_hash(function_source_hash); + true + } else { + false + }*/ + } + + fn add_coverage_counter(&mut self, _instance: Instance<'tcx>, _id: CounterValueReference, _region: CodeRegion) -> bool { + /*if let Some(coverage_context) = self.coverage_context() { + debug!( + "adding counter to coverage_regions: instance={:?}, function_source_hash={}, id={:?}, \ + at {:?}", + instance, function_source_hash, id, region, + ); + let mut coverage_regions = coverage_context.function_coverage_map.borrow_mut(); + coverage_regions + .entry(instance) + .or_insert_with(|| FunctionCoverage::new(self.tcx, instance)) + .add_counter(function_source_hash, id, region); + true + } else { + false + }*/ + // TODO + false + } + + fn add_coverage_counter_expression(&mut self, _instance: Instance<'tcx>, _id: InjectedExpressionId, _lhs: ExpressionOperandId, _op: Op, _rhs: ExpressionOperandId, _region: Option) -> bool { + /*if let Some(coverage_context) = self.coverage_context() { + debug!( + "adding counter expression to coverage_regions: instance={:?}, id={:?}, {:?} {:?} {:?}, \ + at {:?}", + instance, id, lhs, op, rhs, region, + ); + let mut coverage_regions = coverage_context.function_coverage_map.borrow_mut(); + coverage_regions + .entry(instance) + .or_insert_with(|| FunctionCoverage::new(self.tcx, instance)) + .add_counter_expression(id, lhs, op, rhs, region); + true + } else { + false + }*/ + // TODO + false + } + + fn add_coverage_unreachable(&mut self, _instance: Instance<'tcx>, _region: CodeRegion) -> bool { + /*if let Some(coverage_context) = self.coverage_context() { + debug!( + "adding unreachable code to coverage_regions: instance={:?}, at {:?}", + instance, region, + ); + let mut coverage_regions = coverage_context.function_coverage_map.borrow_mut(); + coverage_regions + .entry(instance) + .or_insert_with(|| FunctionCoverage::new(self.tcx, instance)) + .add_unreachable_region(region); + true + } else { + false + }*/ + // TODO + false + } +} + +impl<'gcc, 'tcx> CoverageInfoMethods<'tcx> for CodegenCx<'gcc, 'tcx> { + fn coverageinfo_finalize(&self) { + // TODO + //mapgen::finalize(self) + } + + fn get_pgo_func_name_var(&self, _instance: Instance<'tcx>) -> RValue<'gcc> { + unimplemented!(); + /*if let Some(coverage_context) = self.coverage_context() { + debug!("getting pgo_func_name_var for instance={:?}", instance); + let mut pgo_func_name_var_map = coverage_context.pgo_func_name_var_map.borrow_mut(); + pgo_func_name_var_map + .entry(instance) + .or_insert_with(|| create_pgo_func_name_var(self, instance)) + } else { + bug!("Could not get the `coverage_context`"); + }*/ + } + + /// Functions with MIR-based coverage are normally codegenned _only_ if + /// called. LLVM coverage tools typically expect every function to be + /// defined (even if unused), with at least one call to LLVM intrinsic + /// `instrprof.increment`. + /// + /// Codegen a small function that will never be called, with one counter + /// that will never be incremented. + /// + /// For used/called functions, the coverageinfo was already added to the + /// `function_coverage_map` (keyed by function `Instance`) during codegen. + /// But in this case, since the unused function was _not_ previously + /// codegenned, collect the coverage `CodeRegion`s from the MIR and add + /// them. The first `CodeRegion` is used to add a single counter, with the + /// same counter ID used in the injected `instrprof.increment` intrinsic + /// call. Since the function is never called, all other `CodeRegion`s can be + /// added as `unreachable_region`s. + fn define_unused_fn(&self, _def_id: DefId) { + unimplemented!(); + /*let instance = declare_unused_fn(self, &def_id); + codegen_unused_fn_and_counter(self, instance); + add_unused_function_coverage(self, instance, def_id);*/ + } +} diff --git a/src/debuginfo.rs b/src/debuginfo.rs new file mode 100644 index 0000000000000..a8902e6966d53 --- /dev/null +++ b/src/debuginfo.rs @@ -0,0 +1,407 @@ +use gccjit::{FunctionType, RValue}; +use rustc_codegen_ssa::mir::debuginfo::{FunctionDebugContext, VariableKind}; +use rustc_codegen_ssa::traits::{BuilderMethods, DebugInfoBuilderMethods, DebugInfoMethods}; +use rustc_middle::middle::cstore::CrateDepKind; +use rustc_middle::mir; +use rustc_middle::ty::{Instance, Ty}; +use rustc_span::{SourceFile, Span, Symbol}; +use rustc_span::def_id::LOCAL_CRATE; +use rustc_target::abi::Size; +use rustc_target::abi::call::FnAbi; + +use crate::builder::Builder; +use crate::context::CodegenCx; + +impl<'a, 'gcc, 'tcx> DebugInfoBuilderMethods for Builder<'a, 'gcc, 'tcx> { + // FIXME(eddyb) find a common convention for all of the debuginfo-related + // names (choose between `dbg`, `debug`, `debuginfo`, `debug_info` etc.). + fn dbg_var_addr(&mut self, _dbg_var: Self::DIVariable, _scope_metadata: Self::DIScope, _variable_alloca: Self::Value, _direct_offset: Size, _indirect_offsets: &[Size]) { + unimplemented!(); + /*let cx = self.cx(); + + // Convert the direct and indirect offsets to address ops. + // FIXME(eddyb) use `const`s instead of getting the values via FFI, + // the values should match the ones in the DWARF standard anyway. + let op_deref = || unsafe { llvm::LLVMRustDIBuilderCreateOpDeref() }; + let op_plus_uconst = || unsafe { llvm::LLVMRustDIBuilderCreateOpPlusUconst() }; + let mut addr_ops = SmallVec::<[_; 8]>::new(); + + if direct_offset.bytes() > 0 { + addr_ops.push(op_plus_uconst()); + addr_ops.push(direct_offset.bytes() as i64); + } + for &offset in indirect_offsets { + addr_ops.push(op_deref()); + if offset.bytes() > 0 { + addr_ops.push(op_plus_uconst()); + addr_ops.push(offset.bytes() as i64); + } + } + + // FIXME(eddyb) maybe this information could be extracted from `dbg_var`, + // to avoid having to pass it down in both places? + // NB: `var` doesn't seem to know about the column, so that's a limitation. + let dbg_loc = cx.create_debug_loc(scope_metadata, span); + unsafe { + // FIXME(eddyb) replace `llvm.dbg.declare` with `llvm.dbg.addr`. + llvm::LLVMRustDIBuilderInsertDeclareAtEnd( + DIB(cx), + variable_alloca, + dbg_var, + addr_ops.as_ptr(), + addr_ops.len() as c_uint, + dbg_loc, + self.llbb(), + ); + }*/ + } + + /*fn set_source_location(&mut self, scope: Self::DIScope, span: Span) { + unimplemented!(); + /*debug!("set_source_location: {}", self.sess().source_map().span_to_string(span)); + + let dbg_loc = self.cx().create_debug_loc(scope, span); + + unsafe { + llvm::LLVMSetCurrentDebugLocation(self.llbuilder, dbg_loc); + }*/ + }*/ + + fn insert_reference_to_gdb_debug_scripts_section_global(&mut self) { + // TODO: replace with gcc_jit_context_new_global_with_initializer() if it's added: + // https://gcc.gnu.org/pipermail/jit/2020q3/001225.html + // + // Call the function to initialize global values here. + // We assume this is only called for the main function. + use std::iter; + + for crate_num in self.cx.tcx.crates(()).iter().copied().chain(iter::once(LOCAL_CRATE)) { + // FIXME: better way to find if a crate is of proc-macro type? + if crate_num == LOCAL_CRATE || self.cx.tcx.dep_kind(crate_num) != CrateDepKind::MacrosOnly { + // NOTE: proc-macro crates are not included in the executable, so don't call their + // initialization routine. + let initializer_name = format!("__gccGlobalCrateInit{}", self.cx.tcx.crate_name(crate_num)); + let codegen_init_func = self.context.new_function(None, FunctionType::Extern, self.context.new_type::<()>(), &[], + initializer_name, false); + self.llbb().add_eval(None, self.context.new_call(None, codegen_init_func, &[])); + } + } + + // TODO + //gdb::insert_reference_to_gdb_debug_scripts_section_global(self) + } + + fn set_var_name(&mut self, _value: RValue<'gcc>, _name: &str) { + unimplemented!(); + // Avoid wasting time if LLVM value names aren't even enabled. + /*if self.sess().fewer_names() { + return; + } + + // Only function parameters and instructions are local to a function, + // don't change the name of anything else (e.g. globals). + let param_or_inst = unsafe { + llvm::LLVMIsAArgument(value).is_some() || llvm::LLVMIsAInstruction(value).is_some() + }; + if !param_or_inst { + return; + } + + // Avoid replacing the name if it already exists. + // While we could combine the names somehow, it'd + // get noisy quick, and the usefulness is dubious. + if llvm::get_value_name(value).is_empty() { + llvm::set_value_name(value, name.as_bytes()); + }*/ + } + + fn set_dbg_loc(&mut self, _dbg_loc: Self::DILocation) { + unimplemented!(); + /*unsafe { + let dbg_loc_as_llval = llvm::LLVMRustMetadataAsValue(self.cx().llcx, dbg_loc); + llvm::LLVMSetCurrentDebugLocation(self.llbuilder, dbg_loc_as_llval); + }*/ + } +} + +impl<'gcc, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'gcc, 'tcx> { + fn create_vtable_metadata(&self, _ty: Ty<'tcx>, _vtable: Self::Value) { + //metadata::create_vtable_metadata(self, ty, vtable) + } + + fn create_function_debug_context(&self, _instance: Instance<'tcx>, _fn_abi: &FnAbi<'tcx, Ty<'tcx>>, _llfn: RValue<'gcc>, _mir: &mir::Body<'tcx>) -> Option> { + // TODO + None + } + + fn extend_scope_to_file(&self, _scope_metadata: Self::DIScope, _file: &SourceFile) -> Self::DIScope { + unimplemented!(); + } + + fn debuginfo_finalize(&self) { + //unimplemented!(); + } + + fn create_dbg_var(&self, _variable_name: Symbol, _variable_type: Ty<'tcx>, _scope_metadata: Self::DIScope, _variable_kind: VariableKind, _span: Span) -> Self::DIVariable { + unimplemented!(); + } + + fn dbg_scope_fn(&self, _instance: Instance<'tcx>, _fn_abi: &FnAbi<'tcx, Ty<'tcx>>, _maybe_definition_llfn: Option>) -> Self::DIScope { + unimplemented!(); + /*let def_id = instance.def_id(); + let containing_scope = get_containing_scope(self, instance); + let span = self.tcx.def_span(def_id); + let loc = self.lookup_debug_loc(span.lo()); + let file_metadata = file_metadata(self, &loc.file); + + let function_type_metadata = unsafe { + let fn_signature = get_function_signature(self, fn_abi); + llvm::LLVMRustDIBuilderCreateSubroutineType(DIB(self), fn_signature) + }; + + // Find the enclosing function, in case this is a closure. + let def_key = self.tcx().def_key(def_id); + let mut name = def_key.disambiguated_data.data.to_string(); + + let enclosing_fn_def_id = self.tcx().closure_base_def_id(def_id); + + // Get_template_parameters() will append a `<...>` clause to the function + // name if necessary. + let generics = self.tcx().generics_of(enclosing_fn_def_id); + let substs = instance.substs.truncate_to(self.tcx(), generics); + let template_parameters = get_template_parameters(self, &generics, substs, &mut name); + + let linkage_name = &mangled_name_of_instance(self, instance).name; + // Omit the linkage_name if it is the same as subprogram name. + let linkage_name = if &name == linkage_name { "" } else { linkage_name }; + + // FIXME(eddyb) does this need to be separate from `loc.line` for some reason? + let scope_line = loc.line; + + let mut flags = DIFlags::FlagPrototyped; + + if fn_abi.ret.layout.abi.is_uninhabited() { + flags |= DIFlags::FlagNoReturn; + } + + let mut spflags = DISPFlags::SPFlagDefinition; + if is_node_local_to_unit(self, def_id) { + spflags |= DISPFlags::SPFlagLocalToUnit; + } + if self.sess().opts.optimize != config::OptLevel::No { + spflags |= DISPFlags::SPFlagOptimized; + } + if let Some((id, _)) = self.tcx.entry_fn(LOCAL_CRATE) { + if id.to_def_id() == def_id { + spflags |= DISPFlags::SPFlagMainSubprogram; + } + } + + unsafe { + return llvm::LLVMRustDIBuilderCreateFunction( + DIB(self), + containing_scope, + name.as_ptr().cast(), + name.len(), + linkage_name.as_ptr().cast(), + linkage_name.len(), + file_metadata, + loc.line.unwrap_or(UNKNOWN_LINE_NUMBER), + function_type_metadata, + scope_line.unwrap_or(UNKNOWN_LINE_NUMBER), + flags, + spflags, + maybe_definition_llfn, + template_parameters, + None, + ); + } + + fn get_function_signature<'ll, 'tcx>( + cx: &CodegenCx<'ll, 'tcx>, + fn_abi: &FnAbi<'tcx, Ty<'tcx>>, + ) -> &'ll DIArray { + if cx.sess().opts.debuginfo == DebugInfo::Limited { + return create_DIArray(DIB(cx), &[]); + } + + let mut signature = Vec::with_capacity(fn_abi.args.len() + 1); + + // Return type -- llvm::DIBuilder wants this at index 0 + signature.push(if fn_abi.ret.is_ignore() { + None + } else { + Some(type_metadata(cx, fn_abi.ret.layout.ty, rustc_span::DUMMY_SP)) + }); + + // Arguments types + if cx.sess().target.options.is_like_msvc { + // FIXME(#42800): + // There is a bug in MSDIA that leads to a crash when it encounters + // a fixed-size array of `u8` or something zero-sized in a + // function-type (see #40477). + // As a workaround, we replace those fixed-size arrays with a + // pointer-type. So a function `fn foo(a: u8, b: [u8; 4])` would + // appear as `fn foo(a: u8, b: *const u8)` in debuginfo, + // and a function `fn bar(x: [(); 7])` as `fn bar(x: *const ())`. + // This transformed type is wrong, but these function types are + // already inaccurate due to ABI adjustments (see #42800). + signature.extend(fn_abi.args.iter().map(|arg| { + let t = arg.layout.ty; + let t = match t.kind() { + ty::Array(ct, _) + if (*ct == cx.tcx.types.u8) || cx.layout_of(ct).is_zst() => + { + cx.tcx.mk_imm_ptr(ct) + } + _ => t, + }; + Some(type_metadata(cx, t, rustc_span::DUMMY_SP)) + })); + } else { + signature.extend( + fn_abi + .args + .iter() + .map(|arg| Some(type_metadata(cx, arg.layout.ty, rustc_span::DUMMY_SP))), + ); + } + + create_DIArray(DIB(cx), &signature[..]) + } + + fn get_template_parameters<'ll, 'tcx>( + cx: &CodegenCx<'ll, 'tcx>, + generics: &ty::Generics, + substs: SubstsRef<'tcx>, + name_to_append_suffix_to: &mut String, + ) -> &'ll DIArray { + if substs.types().next().is_none() { + return create_DIArray(DIB(cx), &[]); + } + + name_to_append_suffix_to.push('<'); + for (i, actual_type) in substs.types().enumerate() { + if i != 0 { + name_to_append_suffix_to.push(','); + } + + let actual_type = + cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), actual_type); + // Add actual type name to <...> clause of function name + let actual_type_name = compute_debuginfo_type_name(cx.tcx(), actual_type, true); + name_to_append_suffix_to.push_str(&actual_type_name[..]); + } + name_to_append_suffix_to.push('>'); + + // Again, only create type information if full debuginfo is enabled + let template_params: Vec<_> = if cx.sess().opts.debuginfo == DebugInfo::Full { + let names = get_parameter_names(cx, generics); + substs + .iter() + .zip(names) + .filter_map(|(kind, name)| { + if let GenericArgKind::Type(ty) = kind.unpack() { + let actual_type = + cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), ty); + let actual_type_metadata = + type_metadata(cx, actual_type, rustc_span::DUMMY_SP); + let name = name.as_str(); + Some(unsafe { + Some(llvm::LLVMRustDIBuilderCreateTemplateTypeParameter( + DIB(cx), + None, + name.as_ptr().cast(), + name.len(), + actual_type_metadata, + )) + }) + } else { + None + } + }) + .collect() + } else { + vec![] + }; + + create_DIArray(DIB(cx), &template_params[..]) + } + + fn get_parameter_names(cx: &CodegenCx<'_, '_>, generics: &ty::Generics) -> Vec { + let mut names = generics + .parent + .map_or(vec![], |def_id| get_parameter_names(cx, cx.tcx.generics_of(def_id))); + names.extend(generics.params.iter().map(|param| param.name)); + names + } + + fn get_containing_scope<'ll, 'tcx>( + cx: &CodegenCx<'ll, 'tcx>, + instance: Instance<'tcx>, + ) -> &'ll DIScope { + // First, let's see if this is a method within an inherent impl. Because + // if yes, we want to make the result subroutine DIE a child of the + // subroutine's self-type. + let self_type = cx.tcx.impl_of_method(instance.def_id()).and_then(|impl_def_id| { + // If the method does *not* belong to a trait, proceed + if cx.tcx.trait_id_of_impl(impl_def_id).is_none() { + let impl_self_ty = cx.tcx.subst_and_normalize_erasing_regions( + instance.substs, + ty::ParamEnv::reveal_all(), + &cx.tcx.type_of(impl_def_id), + ); + + // Only "class" methods are generally understood by LLVM, + // so avoid methods on other types (e.g., `<*mut T>::null`). + match impl_self_ty.kind() { + ty::Adt(def, ..) if !def.is_box() => { + // Again, only create type information if full debuginfo is enabled + if cx.sess().opts.debuginfo == DebugInfo::Full + && !impl_self_ty.needs_subst() + { + Some(type_metadata(cx, impl_self_ty, rustc_span::DUMMY_SP)) + } else { + Some(namespace::item_namespace(cx, def.did)) + } + } + _ => None, + } + } else { + // For trait method impls we still use the "parallel namespace" + // strategy + None + } + }); + + self_type.unwrap_or_else(|| { + namespace::item_namespace( + cx, + DefId { + krate: instance.def_id().krate, + index: cx + .tcx + .def_key(instance.def_id()) + .parent + .expect("get_containing_scope: missing parent?"), + }, + ) + }) + }*/ + } + + fn dbg_loc(&self, _scope: Self::DIScope, _inlined_at: Option, _span: Span) -> Self::DILocation { + unimplemented!(); + /*let DebugLoc { line, col, .. } = self.lookup_debug_loc(span.lo()); + + unsafe { + llvm::LLVMRustDIBuilderCreateDebugLocation( + utils::debug_context(self).llcontext, + line.unwrap_or(UNKNOWN_LINE_NUMBER), + col.unwrap_or(UNKNOWN_COLUMN_NUMBER), + scope, + inlined_at, + ) + }*/ + } +} diff --git a/src/declare.rs b/src/declare.rs new file mode 100644 index 0000000000000..339a613c09661 --- /dev/null +++ b/src/declare.rs @@ -0,0 +1,220 @@ +use gccjit::{Function, FunctionType, GlobalKind, LValue, RValue, Type}; +use rustc_codegen_ssa::traits::BaseTypeMethods; +use rustc_middle::ty::Ty; +use rustc_span::Symbol; +use rustc_target::abi::call::FnAbi; + +use crate::abi::FnAbiGccExt; +use crate::context::{CodegenCx, unit_name}; +use crate::intrinsic::llvm; +use crate::mangled_std_symbols::{ARGV_INIT_ARRAY, ARGV_INIT_WRAPPER}; + +impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { + pub fn get_or_insert_global(&self, name: &str, ty: Type<'gcc>, is_tls: bool, link_section: Option) -> RValue<'gcc> { + if self.globals.borrow().contains_key(name) { + let typ = self.globals.borrow().get(name).expect("global").get_type(); + let global = self.context.new_global(None, GlobalKind::Imported, typ, name); + if is_tls { + global.set_tls_model(self.tls_model); + } + if let Some(link_section) = link_section { + global.set_link_section(&link_section.as_str()); + } + global.get_address(None) + } + else { + self.declare_global(name, ty, is_tls, link_section) + } + } + + pub fn declare_unnamed_global(&self, ty: Type<'gcc>) -> LValue<'gcc> { + let index = self.global_gen_sym_counter.get(); + self.global_gen_sym_counter.set(index + 1); + let name = format!("global_{}_{}", index, unit_name(&self.codegen_unit)); + self.context.new_global(None, GlobalKind::Exported, ty, &name) + } + + pub fn declare_global_with_linkage(&self, name: &str, ty: Type<'gcc>, linkage: GlobalKind) -> RValue<'gcc> { + //debug!("declare_global_with_linkage(name={:?})", name); + let global = self.context.new_global(None, linkage, ty, name) + .get_address(None); + self.globals.borrow_mut().insert(name.to_string(), global); + // NOTE: global seems to only be global in a module. So save the name instead of the value + // to import it later. + self.global_names.borrow_mut().insert(global, name.to_string()); + global + } + + pub fn declare_func(&self, name: &str, return_type: Type<'gcc>, params: &[Type<'gcc>], variadic: bool) -> RValue<'gcc> { + self.linkage.set(FunctionType::Exported); + let func = declare_raw_fn(self, name, () /*llvm::CCallConv*/, return_type, params, variadic); + // FIXME: this is a wrong cast. That requires changing the compiler API. + unsafe { std::mem::transmute(func) } + } + + pub fn declare_global(&self, name: &str, ty: Type<'gcc>, is_tls: bool, link_section: Option) -> RValue<'gcc> { + //debug!("declare_global(name={:?})", name); + // FIXME: correctly support global variable initialization. + if name.starts_with(ARGV_INIT_ARRAY) { + // NOTE: hack to avoid having to update the names in mangled_std_symbols: we save the + // name of the variable now to actually declare it later. + *self.init_argv_var.borrow_mut() = name.to_string(); + + let global = self.context.new_global(None, GlobalKind::Imported, ty, name); + if let Some(link_section) = link_section { + global.set_link_section(&link_section.as_str()); + } + return global.get_address(None); + } + let global = self.context.new_global(None, GlobalKind::Exported, ty, name); + if is_tls { + global.set_tls_model(self.tls_model); + } + if let Some(link_section) = link_section { + global.set_link_section(&link_section.as_str()); + } + let global = global.get_address(None); + self.globals.borrow_mut().insert(name.to_string(), global); + // NOTE: global seems to only be global in a module. So save the name instead of the value + // to import it later. + self.global_names.borrow_mut().insert(global, name.to_string()); + global + } + + pub fn declare_cfn(&self, name: &str, _fn_type: Type<'gcc>) -> RValue<'gcc> { + // TODO: use the fn_type parameter. + let const_string = self.context.new_type::().make_pointer().make_pointer(); + let return_type = self.type_i32(); + let variadic = false; + self.linkage.set(FunctionType::Exported); + let func = declare_raw_fn(self, name, () /*llvm::CCallConv*/, return_type, &[self.type_i32(), const_string], variadic); + // NOTE: it is needed to set the current_func here as well, because get_fn() is not called + // for the main function. + *self.current_func.borrow_mut() = Some(func); + // FIXME: this is a wrong cast. That requires changing the compiler API. + unsafe { std::mem::transmute(func) } + } + + pub fn declare_fn(&self, name: &str, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> RValue<'gcc> { + // NOTE: hack to avoid having to update the names in mangled_std_symbols: we found the name + // of the variable earlier, so we declare it now. + // Since we don't correctly support initializers yet, we initialize this variable manually + // for now. + if name.starts_with(ARGV_INIT_WRAPPER) && !self.argv_initialized.get() { + let global_name = &*self.init_argv_var.borrow(); + let return_type = self.type_void(); + let params = [ + self.context.new_parameter(None, self.int_type, "argc"), + self.context.new_parameter(None, self.u8_type.make_pointer().make_pointer(), "argv"), + self.context.new_parameter(None, self.u8_type.make_pointer().make_pointer(), "envp"), + ]; + let function = self.context.new_function(None, FunctionType::Extern, return_type, ¶ms, name, false); + let initializer = function.get_address(None); + + let param_types = [ + self.int_type, + self.u8_type.make_pointer().make_pointer(), + self.u8_type.make_pointer().make_pointer(), + ]; + let ty = self.context.new_function_pointer_type(None, return_type, ¶m_types, false); + + let global = self.context.new_global(None, GlobalKind::Exported, ty, global_name); + global.set_link_section(".init_array.00099"); + global.global_set_initializer_value(initializer); + let global = global.get_address(None); + self.globals.borrow_mut().insert(global_name.to_string(), global); + // NOTE: global seems to only be global in a module. So save the name instead of the value + // to import it later. + self.global_names.borrow_mut().insert(global, global_name.to_string()); + self.argv_initialized.set(true); + } + //debug!("declare_rust_fn(name={:?}, fn_abi={:?})", name, fn_abi); + let (return_type, params, variadic) = fn_abi.gcc_type(self); + let func = declare_raw_fn(self, name, () /*fn_abi.llvm_cconv()*/, return_type, ¶ms, variadic); + //fn_abi.apply_attrs_llfn(self, func); + // FIXME: this is a wrong cast. That requires changing the compiler API. + unsafe { std::mem::transmute(func) } + } + + pub fn define_global(&self, name: &str, ty: Type<'gcc>, is_tls: bool, link_section: Option) -> Option> { + Some(self.get_or_insert_global(name, ty, is_tls, link_section)) + } + + pub fn define_private_global(&self, ty: Type<'gcc>) -> RValue<'gcc> { + let global = self.declare_unnamed_global(ty); + global.get_address(None) + } + + pub fn get_declared_value(&self, name: &str) -> Option> { + //debug!("get_declared_value(name={:?})", name); + // TODO: use a different field than globals, because this seems to return a function? + self.globals.borrow().get(name).cloned() + } + + /*fn get_defined_value(&self, name: &str) -> Option> { + // TODO: gcc does not allow global initialization. + None + /*self.get_declared_value(name).and_then(|val| { + let declaration = unsafe { llvm::LLVMIsDeclaration(val) != 0 }; + if !declaration { Some(val) } else { None } + })*/ + }*/ +} + +/// Declare a function. +/// +/// If there’s a value with the same name already declared, the function will +/// update the declaration and return existing Value instead. +fn declare_raw_fn<'gcc>(cx: &CodegenCx<'gcc, '_>, name: &str, _callconv: () /*llvm::CallConv*/, return_type: Type<'gcc>, param_types: &[Type<'gcc>], variadic: bool) -> Function<'gcc> { + //debug!("declare_raw_fn(name={:?}, ty={:?})", name, ty); + /*let llfn = unsafe { + llvm::LLVMRustGetOrInsertFunction(cx.llmod, name.as_ptr().cast(), name.len(), ty) + };*/ + + if name.starts_with("llvm.") { + return llvm::intrinsic(name, cx); + } + let func = + if cx.functions.borrow().contains_key(name) { + *cx.functions.borrow().get(name).expect("function") + } + else { + let params: Vec<_> = param_types.into_iter().enumerate() + .map(|(index, param)| cx.context.new_parameter(None, *param, &format!("param{}", index))) // TODO: set name. + .collect(); + let func = cx.context.new_function(None, cx.linkage.get(), return_type, ¶ms, mangle_name(name), variadic); + cx.functions.borrow_mut().insert(name.to_string(), func); + func + }; + + //llvm::SetFunctionCallConv(llfn, callconv); // TODO + // Function addresses in Rust are never significant, allowing functions to + // be merged. + //llvm::SetUnnamedAddress(llfn, llvm::UnnamedAddr::Global); // TODO + + /*if cx.tcx.sess.opts.cg.no_redzone.unwrap_or(cx.tcx.sess.target.target.options.disable_redzone) { + llvm::Attribute::NoRedZone.apply_llfn(Function, llfn); + }*/ + + //attributes::default_optimisation_attrs(cx.tcx.sess, llfn); + //attributes::non_lazy_bind(cx.sess(), llfn); + + // FIXME: invalid cast. + // TODO: is this line useful? + //cx.globals.borrow_mut().insert(name.to_string(), unsafe { std::mem::transmute(func) }); + func +} + +// FIXME: this is a hack because libgccjit currently only supports alpha, num and _. +// Unsupported characters: `$` and `.`. +pub fn mangle_name(name: &str) -> String { + name.replace(|char: char| { + if !char.is_alphanumeric() && char != '_' { + debug_assert!("$.".contains(char), "Unsupported char in function name: {}", char); + true + } + else { + false + } + }, "_") +} diff --git a/src/intrinsic/llvm.rs b/src/intrinsic/llvm.rs new file mode 100644 index 0000000000000..bf9472d3ea942 --- /dev/null +++ b/src/intrinsic/llvm.rs @@ -0,0 +1,26 @@ +use gccjit::Function; + +use crate::context::CodegenCx; + +pub fn intrinsic<'gcc, 'tcx>(name: &str, cx: &CodegenCx<'gcc, 'tcx>) -> Function<'gcc> { + let _gcc_name = + match name { + "llvm.x86.xgetbv" => { + let gcc_name = "__builtin_trap"; + let func = cx.context.get_builtin_function(gcc_name); + cx.functions.borrow_mut().insert(gcc_name.to_string(), func); + return func; + }, + // TODO: this doc specifies the equivalent GCC builtins: http://huonw.github.io/llvmint/llvmint/x86/index.html + "llvm.x86.sse2.cmp.pd" => "__builtin_ia32_cmppd", + "llvm.x86.sse2.movmsk.pd" => "__builtin_ia32_movmskpd", + "llvm.x86.sse2.pmovmskb.128" => "__builtin_ia32_pmovmskb128", + _ => unimplemented!("unsupported LLVM intrinsic {}", name) + }; + + println!("Get target builtin"); + unimplemented!(); + /*let func = cx.context.get_target_builtin_function(gcc_name); + cx.functions.borrow_mut().insert(gcc_name.to_string(), func); + func*/ +} diff --git a/src/intrinsic/mod.rs b/src/intrinsic/mod.rs new file mode 100644 index 0000000000000..083f7e01c80d0 --- /dev/null +++ b/src/intrinsic/mod.rs @@ -0,0 +1,1286 @@ +pub mod llvm; +mod simd; + +use gccjit::{ComparisonOp, Function, RValue, ToRValue, Type, UnaryOp}; +use rustc_codegen_ssa::MemFlags; +use rustc_codegen_ssa::base::wants_msvc_seh; +use rustc_codegen_ssa::common::{IntPredicate, span_invalid_monomorphization_error}; +use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue}; +use rustc_codegen_ssa::mir::place::PlaceRef; +use rustc_codegen_ssa::traits::{ArgAbiMethods, BaseTypeMethods, BuilderMethods, ConstMethods, IntrinsicCallMethods}; +use rustc_middle::bug; +use rustc_middle::ty::{self, Instance, Ty}; +use rustc_span::{Span, Symbol, symbol::kw, sym}; +use rustc_target::abi::{HasDataLayout, LayoutOf}; +use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode}; +use rustc_target::spec::PanicStrategy; + +use crate::abi::GccType; +use crate::builder::Builder; +use crate::common::TypeReflection; +use crate::context::CodegenCx; +use crate::type_of::LayoutGccExt; +use crate::intrinsic::simd::generic_simd_intrinsic; + +fn get_simple_intrinsic<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, name: Symbol) -> Option> { + let gcc_name = match name { + sym::sqrtf32 => "sqrtf", + sym::sqrtf64 => "sqrt", + sym::powif32 => "__builtin_powif", + sym::powif64 => "__builtin_powi", + sym::sinf32 => "sinf", + sym::sinf64 => "sin", + sym::cosf32 => "cosf", + sym::cosf64 => "cos", + sym::powf32 => "powf", + sym::powf64 => "pow", + sym::expf32 => "expf", + sym::expf64 => "exp", + sym::exp2f32 => "exp2f", + sym::exp2f64 => "exp2", + sym::logf32 => "logf", + sym::logf64 => "log", + sym::log10f32 => "log10f", + sym::log10f64 => "log10", + sym::log2f32 => "log2f", + sym::log2f64 => "log2", + sym::fmaf32 => "fmaf", + sym::fmaf64 => "fma", + sym::fabsf32 => "fabsf", + sym::fabsf64 => "fabs", + sym::minnumf32 => "fminf", + sym::minnumf64 => "fmin", + sym::maxnumf32 => "fmaxf", + sym::maxnumf64 => "fmax", + sym::copysignf32 => "copysignf", + sym::copysignf64 => "copysign", + sym::floorf32 => "floorf", + sym::floorf64 => "floor", + sym::ceilf32 => "ceilf", + sym::ceilf64 => "ceil", + sym::truncf32 => "truncf", + sym::truncf64 => "trunc", + sym::rintf32 => "rintf", + sym::rintf64 => "rint", + sym::nearbyintf32 => "nearbyintf", + sym::nearbyintf64 => "nearbyint", + sym::roundf32 => "roundf", + sym::roundf64 => "round", + sym::abort => "abort", + _ => return None, + }; + Some(cx.context.get_builtin_function(&gcc_name)) +} + +impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { + fn codegen_intrinsic_call(&mut self, instance: Instance<'tcx>, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OperandRef<'tcx, RValue<'gcc>>], llresult: RValue<'gcc>, span: Span) { + let tcx = self.tcx; + let callee_ty = instance.ty(tcx, ty::ParamEnv::reveal_all()); + + let (def_id, substs) = match *callee_ty.kind() { + ty::FnDef(def_id, substs) => (def_id, substs), + _ => bug!("expected fn item type, found {}", callee_ty), + }; + + let sig = callee_ty.fn_sig(tcx); + let sig = tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig); + let arg_tys = sig.inputs(); + let ret_ty = sig.output(); + let name = tcx.item_name(def_id); + let name_str = &*name.as_str(); + + let llret_ty = self.layout_of(ret_ty).gcc_type(self, true); + let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout); + + let simple = get_simple_intrinsic(self, name); + let llval = + match name { + _ if simple.is_some() => { + // FIXME: remove this cast when the API supports function. + let func = unsafe { std::mem::transmute(simple.expect("simple")) }; + self.call(func, &args.iter().map(|arg| arg.immediate()).collect::>(), None) + }, + sym::likely => { + self.expect(args[0].immediate(), true) + } + sym::unlikely => { + self.expect(args[0].immediate(), false) + } + kw::Try => { + try_intrinsic( + self, + args[0].immediate(), + args[1].immediate(), + args[2].immediate(), + llresult, + ); + return; + } + sym::breakpoint => { + unimplemented!(); + /*let llfn = self.get_intrinsic(&("llvm.debugtrap")); + self.call(llfn, &[], None)*/ + } + sym::va_copy => { + unimplemented!(); + /*let intrinsic = self.cx().get_intrinsic(&("llvm.va_copy")); + self.call(intrinsic, &[args[0].immediate(), args[1].immediate()], None)*/ + } + sym::va_arg => { + unimplemented!(); + /*match fn_abi.ret.layout.abi { + abi::Abi::Scalar(ref scalar) => { + match scalar.value { + Primitive::Int(..) => { + if self.cx().size_of(ret_ty).bytes() < 4 { + // `va_arg` should not be called on a integer type + // less than 4 bytes in length. If it is, promote + // the integer to a `i32` and truncate the result + // back to the smaller type. + let promoted_result = emit_va_arg(self, args[0], tcx.types.i32); + self.trunc(promoted_result, llret_ty) + } else { + emit_va_arg(self, args[0], ret_ty) + } + } + Primitive::F64 | Primitive::Pointer => { + emit_va_arg(self, args[0], ret_ty) + } + // `va_arg` should never be used with the return type f32. + Primitive::F32 => bug!("the va_arg intrinsic does not work with `f32`"), + } + } + _ => bug!("the va_arg intrinsic does not work with non-scalar types"), + }*/ + } + + sym::volatile_load | sym::unaligned_volatile_load => { + let tp_ty = substs.type_at(0); + let mut ptr = args[0].immediate(); + if let PassMode::Cast(ty) = fn_abi.ret.mode { + ptr = self.pointercast(ptr, self.type_ptr_to(ty.gcc_type(self))); + } + let load = self.volatile_load(ptr.get_type(), ptr); + // TODO + /*let align = if name == sym::unaligned_volatile_load { + 1 + } else { + self.align_of(tp_ty).bytes() as u32 + }; + unsafe { + llvm::LLVMSetAlignment(load, align); + }*/ + self.to_immediate(load, self.layout_of(tp_ty)) + } + sym::volatile_store => { + let dst = args[0].deref(self.cx()); + args[1].val.volatile_store(self, dst); + return; + } + sym::unaligned_volatile_store => { + let dst = args[0].deref(self.cx()); + args[1].val.unaligned_volatile_store(self, dst); + return; + } + sym::prefetch_read_data + | sym::prefetch_write_data + | sym::prefetch_read_instruction + | sym::prefetch_write_instruction => { + unimplemented!(); + /*let expect = self.get_intrinsic(&("llvm.prefetch")); + let (rw, cache_type) = match name { + sym::prefetch_read_data => (0, 1), + sym::prefetch_write_data => (1, 1), + sym::prefetch_read_instruction => (0, 0), + sym::prefetch_write_instruction => (1, 0), + _ => bug!(), + }; + self.call( + expect, + &[ + args[0].immediate(), + self.const_i32(rw), + args[1].immediate(), + self.const_i32(cache_type), + ], + None, + )*/ + } + sym::ctlz + | sym::ctlz_nonzero + | sym::cttz + | sym::cttz_nonzero + | sym::ctpop + | sym::bswap + | sym::bitreverse + | sym::rotate_left + | sym::rotate_right + | sym::saturating_add + | sym::saturating_sub => { + let ty = arg_tys[0]; + match int_type_width_signed(ty, self) { + Some((width, signed)) => match name { + sym::ctlz | sym::cttz => { + let func = self.current_func.borrow().expect("func"); + let then_block = func.new_block("then"); + let else_block = func.new_block("else"); + let after_block = func.new_block("after"); + + let arg = args[0].immediate(); + let result = func.new_local(None, arg.get_type(), "zeros"); + let zero = self.cx.context.new_rvalue_zero(arg.get_type()); + let cond = self.cx.context.new_comparison(None, ComparisonOp::Equals, arg, zero); + self.block.expect("block").end_with_conditional(None, cond, then_block, else_block); + + let zero_result = self.cx.context.new_rvalue_from_long(arg.get_type(), width as i64); + then_block.add_assignment(None, result, zero_result); + then_block.end_with_jump(None, after_block); + + // NOTE: since jumps were added in a place + // count_leading_zeroes() does not expect, the current blocks + // in the state need to be updated. + *self.current_block.borrow_mut() = Some(else_block); + self.block = Some(else_block); + + let zeros = + match name { + sym::ctlz => self.count_leading_zeroes(width, arg), + sym::cttz => self.count_trailing_zeroes(width, arg), + _ => unreachable!(), + }; + else_block.add_assignment(None, result, zeros); + else_block.end_with_jump(None, after_block); + + // NOTE: since jumps were added in a place rustc does not + // expect, the current blocks in the state need to be updated. + *self.current_block.borrow_mut() = Some(after_block); + self.block = Some(after_block); + + result.to_rvalue() + + /*let y = self.const_bool(false); + let llfn = self.get_intrinsic(&format!("llvm.{}.i{}", name, width)); + self.call(llfn, &[args[0].immediate(), y], None)*/ + } + sym::ctlz_nonzero => { + self.count_leading_zeroes(width, args[0].immediate()) + }, + sym::cttz_nonzero => { + self.count_trailing_zeroes(width, args[0].immediate()) + } + sym::ctpop => self.pop_count(args[0].immediate()), + sym::bswap => { + if width == 8 { + args[0].immediate() // byte swap a u8/i8 is just a no-op + } + else { + // TODO: check if it's faster to use string literals and a + // match instead of format!. + let bswap = self.cx.context.get_builtin_function(&format!("__builtin_bswap{}", width)); + let mut arg = args[0].immediate(); + // FIXME: this cast should not be necessary. Remove + // when having proper sized integer types. + let param_type = bswap.get_param(0).to_rvalue().get_type(); + if param_type != arg.get_type() { + arg = self.bitcast(arg, param_type); + } + self.cx.context.new_call(None, bswap, &[arg]) + } + }, + sym::bitreverse => self.bit_reverse(width, args[0].immediate()), + sym::rotate_left | sym::rotate_right => { + // TODO: implement using algorithm from: + // https://blog.regehr.org/archives/1063 + // for other platforms. + let is_left = name == sym::rotate_left; + let val = args[0].immediate(); + let raw_shift = args[1].immediate(); + if is_left { + self.rotate_left(val, raw_shift, width) + } + else { + self.rotate_right(val, raw_shift, width) + } + }, + sym::saturating_add => { + self.saturating_add(args[0].immediate(), args[1].immediate(), signed, width) + }, + sym::saturating_sub => { + self.saturating_sub(args[0].immediate(), args[1].immediate(), signed, width) + }, + _ => bug!(), + }, + None => { + span_invalid_monomorphization_error( + tcx.sess, + span, + &format!( + "invalid monomorphization of `{}` intrinsic: \ + expected basic integer type, found `{}`", + name, ty + ), + ); + return; + } + } + } + + sym::raw_eq => { + use rustc_target::abi::Abi::*; + let tp_ty = substs.type_at(0); + let layout = self.layout_of(tp_ty).layout; + let use_integer_compare = match layout.abi { + Scalar(_) | ScalarPair(_, _) => true, + Uninhabited | Vector { .. } => false, + Aggregate { .. } => { + // For rusty ABIs, small aggregates are actually passed + // as `RegKind::Integer` (see `FnAbi::adjust_for_abi`), + // so we re-use that same threshold here. + layout.size <= self.data_layout().pointer_size * 2 + } + }; + + let a = args[0].immediate(); + let b = args[1].immediate(); + if layout.size.bytes() == 0 { + self.const_bool(true) + } + /*else if use_integer_compare { + let integer_ty = self.type_ix(layout.size.bits()); // FIXME: LLVM creates an integer of 96 bits for [i32; 3], but gcc doesn't support this, so it creates an integer of 128 bits. + let ptr_ty = self.type_ptr_to(integer_ty); + let a_ptr = self.bitcast(a, ptr_ty); + let a_val = self.load(integer_ty, a_ptr, layout.align.abi); + let b_ptr = self.bitcast(b, ptr_ty); + let b_val = self.load(integer_ty, b_ptr, layout.align.abi); + self.icmp(IntPredicate::IntEQ, a_val, b_val) + }*/ + else { + let void_ptr_type = self.context.new_type::<*const ()>(); + let a_ptr = self.bitcast(a, void_ptr_type); + let b_ptr = self.bitcast(b, void_ptr_type); + let n = self.context.new_cast(None, self.const_usize(layout.size.bytes()), self.sizet_type); + let builtin = self.context.get_builtin_function("memcmp"); + let cmp = self.context.new_call(None, builtin, &[a_ptr, b_ptr, n]); + self.icmp(IntPredicate::IntEQ, cmp, self.const_i32(0)) + } + } + + _ if name_str.starts_with("simd_") => { + match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) { + Ok(llval) => llval, + Err(()) => return, + } + } + + _ => bug!("unknown intrinsic '{}'", name), + }; + + if !fn_abi.ret.is_ignore() { + if let PassMode::Cast(ty) = fn_abi.ret.mode { + let ptr_llty = self.type_ptr_to(ty.gcc_type(self)); + let ptr = self.pointercast(result.llval, ptr_llty); + self.store(llval, ptr, result.align); + } + else { + OperandRef::from_immediate_or_packed_pair(self, llval, result.layout) + .val + .store(self, result); + } + } + } + + fn abort(&mut self) { + let func = self.context.get_builtin_function("abort"); + let func: RValue<'gcc> = unsafe { std::mem::transmute(func) }; + self.call(func, &[], None); + } + + fn assume(&mut self, value: Self::Value) { + // TODO: switch to asumme when it exists. + // Or use something like this: + // #define __assume(cond) do { if (!(cond)) __builtin_unreachable(); } while (0) + self.expect(value, true); + } + + fn expect(&mut self, cond: Self::Value, _expected: bool) -> Self::Value { + // TODO + /*let expect = self.context.get_builtin_function("__builtin_expect"); + let expect: RValue<'gcc> = unsafe { std::mem::transmute(expect) }; + self.call(expect, &[cond, self.const_bool(expected)], None)*/ + cond + } + + fn sideeffect(&mut self) { + // TODO + /*if self.tcx().sess.opts.debugging_opts.insert_sideeffect { + let fnname = self.get_intrinsic(&("llvm.sideeffect")); + self.call(fnname, &[], None); + }*/ + } + + fn va_start(&mut self, _va_list: RValue<'gcc>) -> RValue<'gcc> { + unimplemented!(); + /*let intrinsic = self.cx().get_intrinsic("llvm.va_start"); + self.call(intrinsic, &[va_list], None)*/ + } + + fn va_end(&mut self, _va_list: RValue<'gcc>) -> RValue<'gcc> { + unimplemented!(); + /*let intrinsic = self.cx().get_intrinsic("llvm.va_end"); + self.call(intrinsic, &[va_list], None)*/ + } +} + +impl<'a, 'gcc, 'tcx> ArgAbiMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { + fn store_fn_arg(&mut self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>, idx: &mut usize, dst: PlaceRef<'tcx, Self::Value>) { + arg_abi.store_fn_arg(self, idx, dst) + } + + fn store_arg(&mut self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>, val: RValue<'gcc>, dst: PlaceRef<'tcx, RValue<'gcc>>) { + arg_abi.store(self, val, dst) + } + + fn arg_memory_ty(&self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>) -> Type<'gcc> { + arg_abi.memory_ty(self) + } +} + +pub trait ArgAbiExt<'gcc, 'tcx> { + fn memory_ty(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>; + fn store(&self, bx: &mut Builder<'_, 'gcc, 'tcx>, val: RValue<'gcc>, dst: PlaceRef<'tcx, RValue<'gcc>>); + fn store_fn_arg(&self, bx: &mut Builder<'_, 'gcc, 'tcx>, idx: &mut usize, dst: PlaceRef<'tcx, RValue<'gcc>>); +} + +impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> { + /// Gets the LLVM type for a place of the original Rust type of + /// this argument/return, i.e., the result of `type_of::type_of`. + fn memory_ty(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> { + self.layout.gcc_type(cx, true) + } + + /// Stores a direct/indirect value described by this ArgAbi into a + /// place for the original Rust type of this argument/return. + /// Can be used for both storing formal arguments into Rust variables + /// or results of call/invoke instructions into their destinations. + fn store(&self, bx: &mut Builder<'_, 'gcc, 'tcx>, val: RValue<'gcc>, dst: PlaceRef<'tcx, RValue<'gcc>>) { + if self.is_ignore() { + return; + } + if self.is_sized_indirect() { + OperandValue::Ref(val, None, self.layout.align.abi).store(bx, dst) + } + else if self.is_unsized_indirect() { + bug!("unsized `ArgAbi` must be handled through `store_fn_arg`"); + } + else if let PassMode::Cast(cast) = self.mode { + // FIXME(eddyb): Figure out when the simpler Store is safe, clang + // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}. + let can_store_through_cast_ptr = false; + if can_store_through_cast_ptr { + let cast_ptr_llty = bx.type_ptr_to(cast.gcc_type(bx)); + let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty); + bx.store(val, cast_dst, self.layout.align.abi); + } + else { + // The actual return type is a struct, but the ABI + // adaptation code has cast it into some scalar type. The + // code that follows is the only reliable way I have + // found to do a transform like i64 -> {i32,i32}. + // Basically we dump the data onto the stack then memcpy it. + // + // Other approaches I tried: + // - Casting rust ret pointer to the foreign type and using Store + // is (a) unsafe if size of foreign type > size of rust type and + // (b) runs afoul of strict aliasing rules, yielding invalid + // assembly under -O (specifically, the store gets removed). + // - Truncating foreign type to correct integral type and then + // bitcasting to the struct type yields invalid cast errors. + + // We instead thus allocate some scratch space... + let scratch_size = cast.size(bx); + let scratch_align = cast.align(bx); + let llscratch = bx.alloca(cast.gcc_type(bx), scratch_align); + bx.lifetime_start(llscratch, scratch_size); + + // ... where we first store the value... + bx.store(val, llscratch, scratch_align); + + // ... and then memcpy it to the intended destination. + bx.memcpy( + dst.llval, + self.layout.align.abi, + llscratch, + scratch_align, + bx.const_usize(self.layout.size.bytes()), + MemFlags::empty(), + ); + + bx.lifetime_end(llscratch, scratch_size); + } + } + else { + OperandValue::Immediate(val).store(bx, dst); + } + } + + fn store_fn_arg<'a>(&self, bx: &mut Builder<'a, 'gcc, 'tcx>, idx: &mut usize, dst: PlaceRef<'tcx, RValue<'gcc>>) { + let mut next = || { + let val = bx.current_func().get_param(*idx as i32); + *idx += 1; + val.to_rvalue() + }; + match self.mode { + PassMode::Ignore => {} + PassMode::Pair(..) => { + OperandValue::Pair(next(), next()).store(bx, dst); + } + PassMode::Indirect { extra_attrs: Some(_), .. } => { + OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst); + } + PassMode::Direct(_) | PassMode::Indirect { extra_attrs: None, .. } | PassMode::Cast(_) => { + let next_arg = next(); + self.store(bx, next_arg.to_rvalue(), dst); + } + } + } +} + +fn int_type_width_signed<'gcc, 'tcx>(ty: Ty<'tcx>, cx: &CodegenCx<'gcc, 'tcx>) -> Option<(u64, bool)> { + match ty.kind() { + ty::Int(t) => Some(( + match t { + rustc_middle::ty::IntTy::Isize => u64::from(cx.tcx.sess.target.pointer_width), + rustc_middle::ty::IntTy::I8 => 8, + rustc_middle::ty::IntTy::I16 => 16, + rustc_middle::ty::IntTy::I32 => 32, + rustc_middle::ty::IntTy::I64 => 64, + rustc_middle::ty::IntTy::I128 => 128, + }, + true, + )), + ty::Uint(t) => Some(( + match t { + rustc_middle::ty::UintTy::Usize => u64::from(cx.tcx.sess.target.pointer_width), + rustc_middle::ty::UintTy::U8 => 8, + rustc_middle::ty::UintTy::U16 => 16, + rustc_middle::ty::UintTy::U32 => 32, + rustc_middle::ty::UintTy::U64 => 64, + rustc_middle::ty::UintTy::U128 => 128, + }, + false, + )), + _ => None, + } +} + +impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { + fn bit_reverse(&mut self, width: u64, value: RValue<'gcc>) -> RValue<'gcc> { + let typ = value.get_type(); + let context = &self.cx.context; + match width { + 8 => { + // First step. + let left = self.and(value, context.new_rvalue_from_int(typ, 0xF0)); + let left = self.lshr(left, context.new_rvalue_from_int(typ, 4)); + let right = self.and(value, context.new_rvalue_from_int(typ, 0x0F)); + let right = self.shl(right, context.new_rvalue_from_int(typ, 4)); + let step1 = self.or(left, right); + + // Second step. + let left = self.and(step1, context.new_rvalue_from_int(typ, 0xCC)); + let left = self.lshr(left, context.new_rvalue_from_int(typ, 2)); + let right = self.and(step1, context.new_rvalue_from_int(typ, 0x33)); + let right = self.shl(right, context.new_rvalue_from_int(typ, 2)); + let step2 = self.or(left, right); + + // Third step. + let left = self.and(step2, context.new_rvalue_from_int(typ, 0xAA)); + let left = self.lshr(left, context.new_rvalue_from_int(typ, 1)); + let right = self.and(step2, context.new_rvalue_from_int(typ, 0x55)); + let right = self.shl(right, context.new_rvalue_from_int(typ, 1)); + let step3 = self.or(left, right); + + step3 + }, + 16 => { + // First step. + let left = self.and(value, context.new_rvalue_from_int(typ, 0x5555)); + let left = self.shl(left, context.new_rvalue_from_int(typ, 1)); + let right = self.and(value, context.new_rvalue_from_int(typ, 0xAAAA)); + let right = self.lshr(right, context.new_rvalue_from_int(typ, 1)); + let step1 = self.or(left, right); + + // Second step. + let left = self.and(step1, context.new_rvalue_from_int(typ, 0x3333)); + let left = self.shl(left, context.new_rvalue_from_int(typ, 2)); + let right = self.and(step1, context.new_rvalue_from_int(typ, 0xCCCC)); + let right = self.lshr(right, context.new_rvalue_from_int(typ, 2)); + let step2 = self.or(left, right); + + // Third step. + let left = self.and(step2, context.new_rvalue_from_int(typ, 0x0F0F)); + let left = self.shl(left, context.new_rvalue_from_int(typ, 4)); + let right = self.and(step2, context.new_rvalue_from_int(typ, 0xF0F0)); + let right = self.lshr(right, context.new_rvalue_from_int(typ, 4)); + let step3 = self.or(left, right); + + // Fourth step. + let left = self.and(step3, context.new_rvalue_from_int(typ, 0x00FF)); + let left = self.shl(left, context.new_rvalue_from_int(typ, 8)); + let right = self.and(step3, context.new_rvalue_from_int(typ, 0xFF00)); + let right = self.lshr(right, context.new_rvalue_from_int(typ, 8)); + let step4 = self.or(left, right); + + step4 + }, + 32 => { + // TODO: Refactor with other implementations. + // First step. + let left = self.and(value, context.new_rvalue_from_long(typ, 0x55555555)); + let left = self.shl(left, context.new_rvalue_from_long(typ, 1)); + let right = self.and(value, context.new_rvalue_from_long(typ, 0xAAAAAAAA)); + let right = self.lshr(right, context.new_rvalue_from_long(typ, 1)); + let step1 = self.or(left, right); + + // Second step. + let left = self.and(step1, context.new_rvalue_from_long(typ, 0x33333333)); + let left = self.shl(left, context.new_rvalue_from_long(typ, 2)); + let right = self.and(step1, context.new_rvalue_from_long(typ, 0xCCCCCCCC)); + let right = self.lshr(right, context.new_rvalue_from_long(typ, 2)); + let step2 = self.or(left, right); + + // Third step. + let left = self.and(step2, context.new_rvalue_from_long(typ, 0x0F0F0F0F)); + let left = self.shl(left, context.new_rvalue_from_long(typ, 4)); + let right = self.and(step2, context.new_rvalue_from_long(typ, 0xF0F0F0F0)); + let right = self.lshr(right, context.new_rvalue_from_long(typ, 4)); + let step3 = self.or(left, right); + + // Fourth step. + let left = self.and(step3, context.new_rvalue_from_long(typ, 0x00FF00FF)); + let left = self.shl(left, context.new_rvalue_from_long(typ, 8)); + let right = self.and(step3, context.new_rvalue_from_long(typ, 0xFF00FF00)); + let right = self.lshr(right, context.new_rvalue_from_long(typ, 8)); + let step4 = self.or(left, right); + + // Fifth step. + let left = self.and(step4, context.new_rvalue_from_long(typ, 0x0000FFFF)); + let left = self.shl(left, context.new_rvalue_from_long(typ, 16)); + let right = self.and(step4, context.new_rvalue_from_long(typ, 0xFFFF0000)); + let right = self.lshr(right, context.new_rvalue_from_long(typ, 16)); + let step5 = self.or(left, right); + + step5 + }, + 64 => { + // First step. + let left = self.shl(value, context.new_rvalue_from_long(typ, 32)); + let right = self.lshr(value, context.new_rvalue_from_long(typ, 32)); + let step1 = self.or(left, right); + + // Second step. + let left = self.and(step1, context.new_rvalue_from_long(typ, 0x0001FFFF0001FFFF)); + let left = self.shl(left, context.new_rvalue_from_long(typ, 15)); + let right = self.and(step1, context.new_rvalue_from_long(typ, 0xFFFE0000FFFE0000u64 as i64)); // TODO: transmute the number instead? + let right = self.lshr(right, context.new_rvalue_from_long(typ, 17)); + let step2 = self.or(left, right); + + // Third step. + let left = self.lshr(step2, context.new_rvalue_from_long(typ, 10)); + let left = self.xor(step2, left); + let temp = self.and(left, context.new_rvalue_from_long(typ, 0x003F801F003F801F)); + + let left = self.shl(temp, context.new_rvalue_from_long(typ, 10)); + let left = self.or(temp, left); + let step3 = self.xor(left, step2); + + // Fourth step. + let left = self.lshr(step3, context.new_rvalue_from_long(typ, 4)); + let left = self.xor(step3, left); + let temp = self.and(left, context.new_rvalue_from_long(typ, 0x0E0384210E038421)); + + let left = self.shl(temp, context.new_rvalue_from_long(typ, 4)); + let left = self.or(temp, left); + let step4 = self.xor(left, step3); + + // Fifth step. + let left = self.lshr(step4, context.new_rvalue_from_long(typ, 2)); + let left = self.xor(step4, left); + let temp = self.and(left, context.new_rvalue_from_long(typ, 0x2248884222488842)); + + let left = self.shl(temp, context.new_rvalue_from_long(typ, 2)); + let left = self.or(temp, left); + let step5 = self.xor(left, step4); + + step5 + }, + 128 => { + // TODO: find a more efficient implementation? + let sixty_four = self.context.new_rvalue_from_long(typ, 64); + let high = self.context.new_cast(None, value >> sixty_four, self.u64_type); + let low = self.context.new_cast(None, value, self.u64_type); + + let reversed_high = self.bit_reverse(64, high); + let reversed_low = self.bit_reverse(64, low); + + let new_low = self.context.new_cast(None, reversed_high, typ); + let new_high = self.context.new_cast(None, reversed_low, typ) << sixty_four; + + new_low | new_high + }, + _ => { + panic!("cannot bit reverse with width = {}", width); + }, + } + } + + fn count_leading_zeroes(&self, width: u64, arg: RValue<'gcc>) -> RValue<'gcc> { + // TODO: use width? + let arg_type = arg.get_type(); + let count_leading_zeroes = + if arg_type.is_uint(&self.cx) { + "__builtin_clz" + } + else if arg_type.is_ulong(&self.cx) { + "__builtin_clzl" + } + else if arg_type.is_ulonglong(&self.cx) { + "__builtin_clzll" + } + else if width == 128 { + // Algorithm from: https://stackoverflow.com/a/28433850/389119 + let array_type = self.context.new_array_type(None, arg_type, 3); + let result = self.current_func() + .new_local(None, array_type, "count_loading_zeroes_results"); + + let sixty_four = self.context.new_rvalue_from_long(arg_type, 64); + let high = self.context.new_cast(None, arg >> sixty_four, self.u64_type); + let low = self.context.new_cast(None, arg, self.u64_type); + + let zero = self.context.new_rvalue_zero(self.usize_type); + let one = self.context.new_rvalue_one(self.usize_type); + let two = self.context.new_rvalue_from_long(self.usize_type, 2); + + let clzll = self.context.get_builtin_function("__builtin_clzll"); + + let first_elem = self.context.new_array_access(None, result, zero); + let first_value = self.context.new_cast(None, self.context.new_call(None, clzll, &[high]), arg_type); + self.llbb() + .add_assignment(None, first_elem, first_value); + + let second_elem = self.context.new_array_access(None, result, one); + let second_value = self.context.new_cast(None, self.context.new_call(None, clzll, &[low]), arg_type) + sixty_four; + self.llbb() + .add_assignment(None, second_elem, second_value); + + let third_elem = self.context.new_array_access(None, result, two); + let third_value = self.context.new_rvalue_from_long(arg_type, 128); + self.llbb() + .add_assignment(None, third_elem, third_value); + + let not_high = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, high); + let not_low = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, low); + let not_low_and_not_high = not_low & not_high; + let index = not_high + not_low_and_not_high; + + let res = self.context.new_array_access(None, result, index); + + return self.context.new_cast(None, res, arg_type); + } + else { + let count_leading_zeroes = self.context.get_builtin_function("__builtin_clz"); + let arg = self.context.new_cast(None, arg, self.uint_type); + let diff = self.int_width(self.uint_type) - self.int_width(arg_type); + let diff = self.context.new_rvalue_from_long(self.int_type, diff); + let res = self.context.new_call(None, count_leading_zeroes, &[arg]) - diff; + return self.context.new_cast(None, res, arg_type); + }; + let count_leading_zeroes = self.context.get_builtin_function(count_leading_zeroes); + let res = self.context.new_call(None, count_leading_zeroes, &[arg]); + self.context.new_cast(None, res, arg_type) + } + + fn count_trailing_zeroes(&self, _width: u64, arg: RValue<'gcc>) -> RValue<'gcc> { + let arg_type = arg.get_type(); + let (count_trailing_zeroes, expected_type) = + if arg_type.is_uchar(&self.cx) || arg_type.is_ushort(&self.cx) || arg_type.is_uint(&self.cx) { + // NOTE: we don't need to & 0xFF for uchar because the result is undefined on zero. + ("__builtin_ctz", self.cx.uint_type) + } + else if arg_type.is_ulong(&self.cx) { + ("__builtin_ctzl", self.cx.ulong_type) + } + else if arg_type.is_ulonglong(&self.cx) { + ("__builtin_ctzll", self.cx.ulonglong_type) + } + else if arg_type.is_u128(&self.cx) { + // Adapted from the algorithm to count leading zeroes from: https://stackoverflow.com/a/28433850/389119 + let array_type = self.context.new_array_type(None, arg_type, 3); + let result = self.current_func() + .new_local(None, array_type, "count_loading_zeroes_results"); + + let sixty_four = self.context.new_rvalue_from_long(arg_type, 64); + let high = self.context.new_cast(None, arg >> sixty_four, self.u64_type); + let low = self.context.new_cast(None, arg, self.u64_type); + + let zero = self.context.new_rvalue_zero(self.usize_type); + let one = self.context.new_rvalue_one(self.usize_type); + let two = self.context.new_rvalue_from_long(self.usize_type, 2); + + let ctzll = self.context.get_builtin_function("__builtin_ctzll"); + + let first_elem = self.context.new_array_access(None, result, zero); + let first_value = self.context.new_cast(None, self.context.new_call(None, ctzll, &[low]), arg_type); + self.llbb() + .add_assignment(None, first_elem, first_value); + + let second_elem = self.context.new_array_access(None, result, one); + let second_value = self.context.new_cast(None, self.context.new_call(None, ctzll, &[high]), arg_type) + sixty_four; + self.llbb() + .add_assignment(None, second_elem, second_value); + + let third_elem = self.context.new_array_access(None, result, two); + let third_value = self.context.new_rvalue_from_long(arg_type, 128); + self.llbb() + .add_assignment(None, third_elem, third_value); + + let not_low = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, low); + let not_high = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, high); + let not_low_and_not_high = not_low & not_high; + let index = not_low + not_low_and_not_high; + + let res = self.context.new_array_access(None, result, index); + + return self.context.new_cast(None, res, arg_type); + } + else { + unimplemented!("count_trailing_zeroes for {:?}", arg_type); + }; + let count_trailing_zeroes = self.context.get_builtin_function(count_trailing_zeroes); + let arg = + if arg_type != expected_type { + self.context.new_cast(None, arg, expected_type) + } + else { + arg + }; + let res = self.context.new_call(None, count_trailing_zeroes, &[arg]); + self.context.new_cast(None, res, arg_type) + } + + fn int_width(&self, typ: Type<'gcc>) -> i64 { + self.cx.int_width(typ) as i64 + } + + fn pop_count(&self, value: RValue<'gcc>) -> RValue<'gcc> { + // TODO: use the optimized version with fewer operations. + let value_type = value.get_type(); + + if value_type.is_u128(&self.cx) { + // TODO: implement in the normal algorithm below to have a more efficient + // implementation (that does not require a call to __popcountdi2). + let popcount = self.context.get_builtin_function("__builtin_popcountll"); + let sixty_four = self.context.new_rvalue_from_long(value_type, 64); + let high = self.context.new_cast(None, value >> sixty_four, self.cx.ulonglong_type); + let high = self.context.new_call(None, popcount, &[high]); + let low = self.context.new_cast(None, value, self.cx.ulonglong_type); + let low = self.context.new_call(None, popcount, &[low]); + return high + low; + } + + // First step. + let mask = self.context.new_rvalue_from_long(value_type, 0x5555555555555555); + let left = value & mask; + let shifted = value >> self.context.new_rvalue_from_int(value_type, 1); + let right = shifted & mask; + let value = left + right; + + // Second step. + let mask = self.context.new_rvalue_from_long(value_type, 0x3333333333333333); + let left = value & mask; + let shifted = value >> self.context.new_rvalue_from_int(value_type, 2); + let right = shifted & mask; + let value = left + right; + + // Third step. + let mask = self.context.new_rvalue_from_long(value_type, 0x0F0F0F0F0F0F0F0F); + let left = value & mask; + let shifted = value >> self.context.new_rvalue_from_int(value_type, 4); + let right = shifted & mask; + let value = left + right; + + if value_type.is_u8(&self.cx) { + return value; + } + + // Fourth step. + let mask = self.context.new_rvalue_from_long(value_type, 0x00FF00FF00FF00FF); + let left = value & mask; + let shifted = value >> self.context.new_rvalue_from_int(value_type, 8); + let right = shifted & mask; + let value = left + right; + + if value_type.is_u16(&self.cx) { + return value; + } + + // Fifth step. + let mask = self.context.new_rvalue_from_long(value_type, 0x0000FFFF0000FFFF); + let left = value & mask; + let shifted = value >> self.context.new_rvalue_from_int(value_type, 16); + let right = shifted & mask; + let value = left + right; + + if value_type.is_u32(&self.cx) { + return value; + } + + // Sixth step. + let mask = self.context.new_rvalue_from_long(value_type, 0x00000000FFFFFFFF); + let left = value & mask; + let shifted = value >> self.context.new_rvalue_from_int(value_type, 32); + let right = shifted & mask; + let value = left + right; + + value + } + + // Algorithm from: https://blog.regehr.org/archives/1063 + fn rotate_left(&mut self, value: RValue<'gcc>, shift: RValue<'gcc>, width: u64) -> RValue<'gcc> { + let max = self.context.new_rvalue_from_long(shift.get_type(), width as i64); + let shift = shift % max; + let lhs = self.shl(value, shift); + let result_and = + self.and( + self.context.new_unary_op(None, UnaryOp::Minus, shift.get_type(), shift), + self.context.new_rvalue_from_long(shift.get_type(), width as i64 - 1), + ); + let rhs = self.lshr(value, result_and); + self.or(lhs, rhs) + } + + // Algorithm from: https://blog.regehr.org/archives/1063 + fn rotate_right(&mut self, value: RValue<'gcc>, shift: RValue<'gcc>, width: u64) -> RValue<'gcc> { + let max = self.context.new_rvalue_from_long(shift.get_type(), width as i64); + let shift = shift % max; + let lhs = self.lshr(value, shift); + let result_and = + self.and( + self.context.new_unary_op(None, UnaryOp::Minus, shift.get_type(), shift), + self.context.new_rvalue_from_long(shift.get_type(), width as i64 - 1), + ); + let rhs = self.shl(value, result_and); + self.or(lhs, rhs) + } + + fn saturating_add(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>, signed: bool, width: u64) -> RValue<'gcc> { + let func = self.current_func.borrow().expect("func"); + + if signed { + // Algorithm from: https://stackoverflow.com/a/56531252/389119 + let after_block = func.new_block("after"); + let func_name = + match width { + 8 => "__builtin_add_overflow", + 16 => "__builtin_add_overflow", + 32 => "__builtin_sadd_overflow", + 64 => "__builtin_saddll_overflow", + 128 => "__builtin_add_overflow", + _ => unreachable!(), + }; + let overflow_func = self.context.get_builtin_function(func_name); + let result_type = lhs.get_type(); + let res = func.new_local(None, result_type, "saturating_sum"); + let overflow = self.overflow_call(overflow_func, &[lhs, rhs, res.get_address(None)], None); + + let then_block = func.new_block("then"); + + let unsigned_type = self.context.new_int_type(width as i32 / 8, false); + let shifted = self.context.new_cast(None, lhs, unsigned_type) >> self.context.new_rvalue_from_int(unsigned_type, width as i32 - 1); + let uint_max = self.context.new_unary_op(None, UnaryOp::BitwiseNegate, unsigned_type, + self.context.new_rvalue_from_int(unsigned_type, 0) + ); + let int_max = uint_max >> self.context.new_rvalue_one(unsigned_type); + then_block.add_assignment(None, res, self.context.new_cast(None, shifted + int_max, result_type)); + then_block.end_with_jump(None, after_block); + + self.block.expect("block").end_with_conditional(None, overflow, then_block, after_block); + + // NOTE: since jumps were added in a place rustc does not + // expect, the current blocks in the state need to be updated. + *self.current_block.borrow_mut() = Some(after_block); + self.block = Some(after_block); + + res.to_rvalue() + } + else { + // Algorithm from: http://locklessinc.com/articles/sat_arithmetic/ + let res = lhs + rhs; + let res_type = res.get_type(); + let cond = self.context.new_comparison(None, ComparisonOp::LessThan, res, lhs); + let value = self.context.new_unary_op(None, UnaryOp::Minus, res_type, self.context.new_cast(None, cond, res_type)); + res | value + } + } + + // Algorithm from: https://locklessinc.com/articles/sat_arithmetic/ + fn saturating_sub(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>, signed: bool, width: u64) -> RValue<'gcc> { + if signed { + // Also based on algorithm from: https://stackoverflow.com/a/56531252/389119 + let func_name = + match width { + 8 => "__builtin_sub_overflow", + 16 => "__builtin_sub_overflow", + 32 => "__builtin_ssub_overflow", + 64 => "__builtin_ssubll_overflow", + 128 => "__builtin_sub_overflow", + _ => unreachable!(), + }; + let overflow_func = self.context.get_builtin_function(func_name); + let result_type = lhs.get_type(); + let func = self.current_func.borrow().expect("func"); + let res = func.new_local(None, result_type, "saturating_diff"); + let overflow = self.overflow_call(overflow_func, &[lhs, rhs, res.get_address(None)], None); + + let then_block = func.new_block("then"); + let after_block = func.new_block("after"); + + let unsigned_type = self.context.new_int_type(width as i32 / 8, false); + let shifted = self.context.new_cast(None, lhs, unsigned_type) >> self.context.new_rvalue_from_int(unsigned_type, width as i32 - 1); + let uint_max = self.context.new_unary_op(None, UnaryOp::BitwiseNegate, unsigned_type, + self.context.new_rvalue_from_int(unsigned_type, 0) + ); + let int_max = uint_max >> self.context.new_rvalue_one(unsigned_type); + then_block.add_assignment(None, res, self.context.new_cast(None, shifted + int_max, result_type)); + then_block.end_with_jump(None, after_block); + + self.block.expect("block").end_with_conditional(None, overflow, then_block, after_block); + + // NOTE: since jumps were added in a place rustc does not + // expect, the current blocks in the state need to be updated. + *self.current_block.borrow_mut() = Some(after_block); + self.block = Some(after_block); + + res.to_rvalue() + } + else { + let res = lhs - rhs; + let comparison = self.context.new_comparison(None, ComparisonOp::LessThanEquals, res, lhs); + let comparison = self.context.new_cast(None, comparison, lhs.get_type()); + let unary_op = self.context.new_unary_op(None, UnaryOp::Minus, comparison.get_type(), comparison); + self.and(res, unary_op) + } + } +} + +fn try_intrinsic<'gcc, 'tcx>(bx: &mut Builder<'_, 'gcc, 'tcx>, try_func: RValue<'gcc>, data: RValue<'gcc>, _catch_func: RValue<'gcc>, dest: RValue<'gcc>) { + if bx.sess().panic_strategy() == PanicStrategy::Abort { + bx.call(try_func, &[data], None); + // Return 0 unconditionally from the intrinsic call; + // we can never unwind. + let ret_align = bx.tcx.data_layout.i32_align.abi; + bx.store(bx.const_i32(0), dest, ret_align); + } + else if wants_msvc_seh(bx.sess()) { + unimplemented!(); + //codegen_msvc_try(bx, try_func, data, catch_func, dest); + } + else { + unimplemented!(); + //codegen_gnu_try(bx, try_func, data, catch_func, dest); + } +} + +// MSVC's definition of the `rust_try` function. +// +// This implementation uses the new exception handling instructions in LLVM +// which have support in LLVM for SEH on MSVC targets. Although these +// instructions are meant to work for all targets, as of the time of this +// writing, however, LLVM does not recommend the usage of these new instructions +// as the old ones are still more optimized. +/*fn codegen_msvc_try<'a, 'gcc, 'tcx>(_bx: &mut Builder<'a, 'gcc, 'tcx>, _try_func: RValue<'gcc>, _data: RValue<'gcc>, _catch_func: RValue<'gcc>, _dest: RValue<'gcc>) { + unimplemented!(); + /*let llfn = get_rust_try_fn(bx, &mut |mut bx| { + bx.set_personality_fn(bx.eh_personality()); + bx.sideeffect(); + + let mut normal = bx.build_sibling_block("normal"); + let mut catchswitch = bx.build_sibling_block("catchswitch"); + let mut catchpad = bx.build_sibling_block("catchpad"); + let mut caught = bx.build_sibling_block("caught"); + + let try_func = llvm::get_param(bx.llfn(), 0); + let data = llvm::get_param(bx.llfn(), 1); + let catch_func = llvm::get_param(bx.llfn(), 2); + + // We're generating an IR snippet that looks like: + // + // declare i32 @rust_try(%try_func, %data, %catch_func) { + // %slot = alloca u8* + // invoke %try_func(%data) to label %normal unwind label %catchswitch + // + // normal: + // ret i32 0 + // + // catchswitch: + // %cs = catchswitch within none [%catchpad] unwind to caller + // + // catchpad: + // %tok = catchpad within %cs [%type_descriptor, 0, %slot] + // %ptr = load %slot + // call %catch_func(%data, %ptr) + // catchret from %tok to label %caught + // + // caught: + // ret i32 1 + // } + // + // This structure follows the basic usage of throw/try/catch in LLVM. + // For example, compile this C++ snippet to see what LLVM generates: + // + // #include + // + // struct rust_panic { + // rust_panic(const rust_panic&); + // ~rust_panic(); + // + // uint64_t x[2]; + // }; + // + // int __rust_try( + // void (*try_func)(void*), + // void *data, + // void (*catch_func)(void*, void*) noexcept + // ) { + // try { + // try_func(data); + // return 0; + // } catch(rust_panic& a) { + // catch_func(data, &a); + // return 1; + // } + // } + // + // More information can be found in libstd's seh.rs implementation. + let ptr_align = bx.tcx().data_layout.pointer_align.abi; + let slot = bx.alloca(bx.type_i8p(), ptr_align); + bx.invoke(try_func, &[data], normal.llbb(), catchswitch.llbb(), None); + + normal.ret(bx.const_i32(0)); + + let cs = catchswitch.catch_switch(None, None, 1); + catchswitch.add_handler(cs, catchpad.llbb()); + + // We can't use the TypeDescriptor defined in libpanic_unwind because it + // might be in another DLL and the SEH encoding only supports specifying + // a TypeDescriptor from the current module. + // + // However this isn't an issue since the MSVC runtime uses string + // comparison on the type name to match TypeDescriptors rather than + // pointer equality. + // + // So instead we generate a new TypeDescriptor in each module that uses + // `try` and let the linker merge duplicate definitions in the same + // module. + // + // When modifying, make sure that the type_name string exactly matches + // the one used in src/libpanic_unwind/seh.rs. + let type_info_vtable = bx.declare_global("??_7type_info@@6B@", bx.type_i8p()); + let type_name = bx.const_bytes(b"rust_panic\0"); + let type_info = + bx.const_struct(&[type_info_vtable, bx.const_null(bx.type_i8p()), type_name], false); + let tydesc = bx.declare_global("__rust_panic_type_info", bx.val_ty(type_info)); + unsafe { + llvm::LLVMRustSetLinkage(tydesc, llvm::Linkage::LinkOnceODRLinkage); + llvm::SetUniqueComdat(bx.llmod, tydesc); + llvm::LLVMSetInitializer(tydesc, type_info); + } + + // The flag value of 8 indicates that we are catching the exception by + // reference instead of by value. We can't use catch by value because + // that requires copying the exception object, which we don't support + // since our exception object effectively contains a Box. + // + // Source: MicrosoftCXXABI::getAddrOfCXXCatchHandlerType in clang + let flags = bx.const_i32(8); + let funclet = catchpad.catch_pad(cs, &[tydesc, flags, slot]); + let ptr = catchpad.load(slot, ptr_align); + catchpad.call(catch_func, &[data, ptr], Some(&funclet)); + + catchpad.catch_ret(&funclet, caught.llbb()); + + caught.ret(bx.const_i32(1)); + }); + + // Note that no invoke is used here because by definition this function + // can't panic (that's what it's catching). + let ret = bx.call(llfn, &[try_func, data, catch_func], None); + let i32_align = bx.tcx().data_layout.i32_align.abi; + bx.store(ret, dest, i32_align);*/ +}*/ + +// Definition of the standard `try` function for Rust using the GNU-like model +// of exceptions (e.g., the normal semantics of LLVM's `landingpad` and `invoke` +// instructions). +// +// This codegen is a little surprising because we always call a shim +// function instead of inlining the call to `invoke` manually here. This is done +// because in LLVM we're only allowed to have one personality per function +// definition. The call to the `try` intrinsic is being inlined into the +// function calling it, and that function may already have other personality +// functions in play. By calling a shim we're guaranteed that our shim will have +// the right personality function. +/*fn codegen_gnu_try<'a, 'gcc, 'tcx>(_bx: &mut Builder<'a, 'gcc, 'tcx>, _try_func: RValue<'gcc>, _data: RValue<'gcc>, _catch_func: RValue<'gcc>, _dest: RValue<'gcc>) { + unimplemented!(); + /*let llfn = get_rust_try_fn(bx, &mut |mut bx| { + // Codegens the shims described above: + // + // bx: + // invoke %try_func(%data) normal %normal unwind %catch + // + // normal: + // ret 0 + // + // catch: + // (%ptr, _) = landingpad + // call %catch_func(%data, %ptr) + // ret 1 + + bx.sideeffect(); + + let mut then = bx.build_sibling_block("then"); + let mut catch = bx.build_sibling_block("catch"); + + let try_func = llvm::get_param(bx.llfn(), 0); + let data = llvm::get_param(bx.llfn(), 1); + let catch_func = llvm::get_param(bx.llfn(), 2); + bx.invoke(try_func, &[data], then.llbb(), catch.llbb(), None); + then.ret(bx.const_i32(0)); + + // Type indicator for the exception being thrown. + // + // The first value in this tuple is a pointer to the exception object + // being thrown. The second value is a "selector" indicating which of + // the landing pad clauses the exception's type had been matched to. + // rust_try ignores the selector. + let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false); + let vals = catch.landing_pad(lpad_ty, bx.eh_personality(), 1); + let tydesc = match bx.tcx().lang_items().eh_catch_typeinfo() { + Some(tydesc) => { + let tydesc = bx.get_static(tydesc); + bx.bitcast(tydesc, bx.type_i8p()) + } + None => bx.const_null(bx.type_i8p()), + }; + catch.add_clause(vals, tydesc); + let ptr = catch.extract_value(vals, 0); + catch.call(catch_func, &[data, ptr], None); + catch.ret(bx.const_i32(1)); + }); + + // Note that no invoke is used here because by definition this function + // can't panic (that's what it's catching). + let ret = bx.call(llfn, &[try_func, data, catch_func], None); + let i32_align = bx.tcx().data_layout.i32_align.abi; + bx.store(ret, dest, i32_align);*/ +}*/ diff --git a/src/intrinsic/simd.rs b/src/intrinsic/simd.rs new file mode 100644 index 0000000000000..71cf5cce9f4e1 --- /dev/null +++ b/src/intrinsic/simd.rs @@ -0,0 +1,1001 @@ +use gccjit::{RValue, Type}; +use rustc_codegen_ssa::base::compare_simd_types; +use rustc_codegen_ssa::common::{TypeKind, span_invalid_monomorphization_error}; +use rustc_codegen_ssa::mir::operand::OperandRef; +use rustc_codegen_ssa::traits::{BaseTypeMethods, BuilderMethods}; +use rustc_hir as hir; +use rustc_middle::span_bug; +use rustc_middle::ty::layout::HasTyCtxt; +use rustc_middle::ty::{self, Ty}; +use rustc_span::{Span, Symbol, sym}; + +use crate::builder::Builder; + +pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, name: Symbol, callee_ty: Ty<'tcx>, args: &[OperandRef<'tcx, RValue<'gcc>>], ret_ty: Ty<'tcx>, llret_ty: Type<'gcc>, span: Span) -> Result, ()> { + //println!("Generic simd: {}", name); + + // macros for error handling: + macro_rules! emit_error { + ($msg: tt) => { + emit_error!($msg, ) + }; + ($msg: tt, $($fmt: tt)*) => { + span_invalid_monomorphization_error( + bx.sess(), span, + &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg), + name, $($fmt)*)); + } + } + + macro_rules! return_error { + ($($fmt: tt)*) => { + { + emit_error!($($fmt)*); + return Err(()); + } + } + } + + macro_rules! require { + ($cond: expr, $($fmt: tt)*) => { + if !$cond { + return_error!($($fmt)*); + } + }; + } + + macro_rules! require_simd { + ($ty: expr, $position: expr) => { + require!($ty.is_simd(), "expected SIMD {} type, found non-SIMD `{}`", $position, $ty) + }; + } + + let tcx = bx.tcx(); + let sig = + tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), callee_ty.fn_sig(tcx)); + let arg_tys = sig.inputs(); + let name_str = &*name.as_str(); + + /*if name == sym::simd_select_bitmask { + let in_ty = arg_tys[0]; + let m_len = match in_ty.kind() { + // Note that this `.unwrap()` crashes for isize/usize, that's sort + // of intentional as there's not currently a use case for that. + ty::Int(i) => i.bit_width().unwrap(), + ty::Uint(i) => i.bit_width().unwrap(), + _ => return_error!("`{}` is not an integral type", in_ty), + }; + require_simd!(arg_tys[1], "argument"); + let (v_len, _) = arg_tys[1].simd_size_and_type(bx.tcx()); + require!( + // Allow masks for vectors with fewer than 8 elements to be + // represented with a u8 or i8. + m_len == v_len || (m_len == 8 && v_len < 8), + "mismatched lengths: mask length `{}` != other vector length `{}`", + m_len, + v_len + ); + let i1 = bx.type_i1(); + let im = bx.type_ix(v_len); + let i1xn = bx.type_vector(i1, v_len); + let m_im = bx.trunc(args[0].immediate(), im); + let m_i1s = bx.bitcast(m_im, i1xn); + return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate())); + }*/ + + // every intrinsic below takes a SIMD vector as its first argument + require_simd!(arg_tys[0], "input"); + let in_ty = arg_tys[0]; + + let comparison = match name { + sym::simd_eq => Some(hir::BinOpKind::Eq), + sym::simd_ne => Some(hir::BinOpKind::Ne), + sym::simd_lt => Some(hir::BinOpKind::Lt), + sym::simd_le => Some(hir::BinOpKind::Le), + sym::simd_gt => Some(hir::BinOpKind::Gt), + sym::simd_ge => Some(hir::BinOpKind::Ge), + _ => None, + }; + + let (in_len, in_elem) = arg_tys[0].simd_size_and_type(bx.tcx()); + if let Some(cmp_op) = comparison { + require_simd!(ret_ty, "return"); + + let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx()); + require!( + in_len == out_len, + "expected return type with length {} (same as input type `{}`), \ + found `{}` with length {}", + in_len, + in_ty, + ret_ty, + out_len + ); + require!( + bx.type_kind(bx.element_type(llret_ty)) == TypeKind::Integer, + "expected return type with integer elements, found `{}` with non-integer `{}`", + ret_ty, + out_ty + ); + + return Ok(compare_simd_types( + bx, + args[0].immediate(), + args[1].immediate(), + in_elem, + llret_ty, + cmp_op, + )); + } + + if let Some(stripped) = name_str.strip_prefix("simd_shuffle") { + let n: u64 = stripped.parse().unwrap_or_else(|_| { + span_bug!(span, "bad `simd_shuffle` instruction only caught in codegen?") + }); + + require_simd!(ret_ty, "return"); + + let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx()); + require!( + out_len == n, + "expected return type of length {}, found `{}` with length {}", + n, + ret_ty, + out_len + ); + require!( + in_elem == out_ty, + "expected return element type `{}` (element of input `{}`), \ + found `{}` with element type `{}`", + in_elem, + in_ty, + ret_ty, + out_ty + ); + + //let total_len = u128::from(in_len) * 2; + + let vector = args[2].immediate(); + + // TODO: + /*let indices: Option> = (0..n) + .map(|i| { + let arg_idx = i; + let val = bx.const_get_vector_element(vector, i as u64); + match bx.const_to_opt_u128(val, true) { + None => { + emit_error!("shuffle index #{} is not a constant", arg_idx); + None + } + Some(idx) if idx >= total_len => { + emit_error!( + "shuffle index #{} is out of bounds (limit {})", + arg_idx, + total_len + ); + None + } + Some(idx) => Some(bx.const_i32(idx as i32)), + } + }) + .collect(); + let indices = match indices { + Some(i) => i, + None => return Ok(bx.const_null(llret_ty)), + };*/ + + return Ok(bx.shuffle_vector( + args[0].immediate(), + args[1].immediate(), + vector, + )); + } + + /*if name == sym::simd_insert { + require!( + in_elem == arg_tys[2], + "expected inserted type `{}` (element of input `{}`), found `{}`", + in_elem, + in_ty, + arg_tys[2] + ); + return Ok(bx.insert_element( + args[0].immediate(), + args[2].immediate(), + args[1].immediate(), + )); + } + if name == sym::simd_extract { + require!( + ret_ty == in_elem, + "expected return type `{}` (element of input `{}`), found `{}`", + in_elem, + in_ty, + ret_ty + ); + return Ok(bx.extract_element(args[0].immediate(), args[1].immediate())); + } + + if name == sym::simd_select { + let m_elem_ty = in_elem; + let m_len = in_len; + require_simd!(arg_tys[1], "argument"); + let (v_len, _) = arg_tys[1].simd_size_and_type(bx.tcx()); + require!( + m_len == v_len, + "mismatched lengths: mask length `{}` != other vector length `{}`", + m_len, + v_len + ); + match m_elem_ty.kind() { + ty::Int(_) => {} + _ => return_error!("mask element type is `{}`, expected `i_`", m_elem_ty), + } + // truncate the mask to a vector of i1s + let i1 = bx.type_i1(); + let i1xn = bx.type_vector(i1, m_len as u64); + let m_i1s = bx.trunc(args[0].immediate(), i1xn); + return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate())); + } + + if name == sym::simd_bitmask { + // The `fn simd_bitmask(vector) -> unsigned integer` intrinsic takes a + // vector mask and returns an unsigned integer containing the most + // significant bit (MSB) of each lane. + + // If the vector has less than 8 lanes, an u8 is returned with zeroed + // trailing bits. + let expected_int_bits = in_len.max(8); + match ret_ty.kind() { + ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => (), + _ => return_error!("bitmask `{}`, expected `u{}`", ret_ty, expected_int_bits), + } + + // Integer vector : + let (i_xn, in_elem_bitwidth) = match in_elem.kind() { + ty::Int(i) => ( + args[0].immediate(), + i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits()), + ), + ty::Uint(i) => ( + args[0].immediate(), + i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits()), + ), + _ => return_error!( + "vector argument `{}`'s element type `{}`, expected integer element type", + in_ty, + in_elem + ), + }; + + // Shift the MSB to the right by "in_elem_bitwidth - 1" into the first bit position. + let shift_indices = + vec![ + bx.cx.const_int(bx.type_ix(in_elem_bitwidth), (in_elem_bitwidth - 1) as _); + in_len as _ + ]; + let i_xn_msb = bx.lshr(i_xn, bx.const_vector(shift_indices.as_slice())); + // Truncate vector to an + let i1xn = bx.trunc(i_xn_msb, bx.type_vector(bx.type_i1(), in_len)); + // Bitcast to iN: + let i_ = bx.bitcast(i1xn, bx.type_ix(in_len)); + // Zero-extend iN to the bitmask type: + return Ok(bx.zext(i_, bx.type_ix(expected_int_bits))); + } + + fn simd_simple_float_intrinsic<'a, 'gcc, 'tcx>( + name: Symbol, + in_elem: &::rustc_middle::ty::TyS<'_>, + in_ty: &::rustc_middle::ty::TyS<'_>, + in_len: u64, + bx: &mut Builder<'a, 'gcc, 'tcx>, + span: Span, + args: &[OperandRef<'tcx, RValue<'gcc>>], + ) -> Result, ()> { + macro_rules! emit_error { + ($msg: tt) => { + emit_error!($msg, ) + }; + ($msg: tt, $($fmt: tt)*) => { + span_invalid_monomorphization_error( + bx.sess(), span, + &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg), + name, $($fmt)*)); + } + } + macro_rules! return_error { + ($($fmt: tt)*) => { + { + emit_error!($($fmt)*); + return Err(()); + } + } + } + + let (elem_ty_str, elem_ty) = if let ty::Float(f) = in_elem.kind() { + let elem_ty = bx.cx.type_float_from_ty(*f); + match f.bit_width() { + 32 => ("f32", elem_ty), + 64 => ("f64", elem_ty), + _ => { + return_error!( + "unsupported element type `{}` of floating-point vector `{}`", + f.name_str(), + in_ty + ); + } + } + } else { + return_error!("`{}` is not a floating-point type", in_ty); + }; + + let vec_ty = bx.type_vector(elem_ty, in_len); + + let (intr_name, fn_ty) = match name { + sym::simd_ceil => ("ceil", bx.type_func(&[vec_ty], vec_ty)), + sym::simd_fabs => ("fabs", bx.type_func(&[vec_ty], vec_ty)), + sym::simd_fcos => ("cos", bx.type_func(&[vec_ty], vec_ty)), + sym::simd_fexp2 => ("exp2", bx.type_func(&[vec_ty], vec_ty)), + sym::simd_fexp => ("exp", bx.type_func(&[vec_ty], vec_ty)), + sym::simd_flog10 => ("log10", bx.type_func(&[vec_ty], vec_ty)), + sym::simd_flog2 => ("log2", bx.type_func(&[vec_ty], vec_ty)), + sym::simd_flog => ("log", bx.type_func(&[vec_ty], vec_ty)), + sym::simd_floor => ("floor", bx.type_func(&[vec_ty], vec_ty)), + sym::simd_fma => ("fma", bx.type_func(&[vec_ty, vec_ty, vec_ty], vec_ty)), + sym::simd_fpowi => ("powi", bx.type_func(&[vec_ty, bx.type_i32()], vec_ty)), + sym::simd_fpow => ("pow", bx.type_func(&[vec_ty, vec_ty], vec_ty)), + sym::simd_fsin => ("sin", bx.type_func(&[vec_ty], vec_ty)), + sym::simd_fsqrt => ("sqrt", bx.type_func(&[vec_ty], vec_ty)), + sym::simd_round => ("round", bx.type_func(&[vec_ty], vec_ty)), + sym::simd_trunc => ("trunc", bx.type_func(&[vec_ty], vec_ty)), + _ => return_error!("unrecognized intrinsic `{}`", name), + }; + let llvm_name = &format!("llvm.{0}.v{1}{2}", intr_name, in_len, elem_ty_str); + let f = bx.declare_cfn(&llvm_name, fn_ty); + let c = bx.call(f, &args.iter().map(|arg| arg.immediate()).collect::>(), None); + Ok(c) + } + + if std::matches!( + name, + sym::simd_ceil + | sym::simd_fabs + | sym::simd_fcos + | sym::simd_fexp2 + | sym::simd_fexp + | sym::simd_flog10 + | sym::simd_flog2 + | sym::simd_flog + | sym::simd_floor + | sym::simd_fma + | sym::simd_fpow + | sym::simd_fpowi + | sym::simd_fsin + | sym::simd_fsqrt + | sym::simd_round + | sym::simd_trunc + ) { + return simd_simple_float_intrinsic(name, in_elem, in_ty, in_len, bx, span, args); + } + + // FIXME: use: + // https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Function.h#L182 + // https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Intrinsics.h#L81 + fn llvm_vector_str(elem_ty: Ty<'_>, vec_len: u64, no_pointers: usize) -> String { + let p0s: String = "p0".repeat(no_pointers); + match *elem_ty.kind() { + ty::Int(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()), + ty::Uint(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()), + ty::Float(v) => format!("v{}{}f{}", vec_len, p0s, v.bit_width()), + _ => unreachable!(), + } + } + + fn gcc_vector_ty<'gcc>( + cx: &CodegenCx<'gcc, '_>, + elem_ty: Ty<'_>, + vec_len: u64, + mut no_pointers: usize, + ) -> Type<'gcc> { + // FIXME: use cx.layout_of(ty).llvm_type() ? + let mut elem_ty = match *elem_ty.kind() { + ty::Int(v) => cx.type_int_from_ty(v), + ty::Uint(v) => cx.type_uint_from_ty(v), + ty::Float(v) => cx.type_float_from_ty(v), + _ => unreachable!(), + }; + while no_pointers > 0 { + elem_ty = cx.type_ptr_to(elem_ty); + no_pointers -= 1; + } + cx.type_vector(elem_ty, vec_len) + } + + if name == sym::simd_gather { + // simd_gather(values: , pointers: , + // mask: ) -> + // * N: number of elements in the input vectors + // * T: type of the element to load + // * M: any integer width is supported, will be truncated to i1 + + // All types must be simd vector types + require_simd!(in_ty, "first"); + require_simd!(arg_tys[1], "second"); + require_simd!(arg_tys[2], "third"); + require_simd!(ret_ty, "return"); + + // Of the same length: + let (out_len, _) = arg_tys[1].simd_size_and_type(bx.tcx()); + let (out_len2, _) = arg_tys[2].simd_size_and_type(bx.tcx()); + require!( + in_len == out_len, + "expected {} argument with length {} (same as input type `{}`), \ + found `{}` with length {}", + "second", + in_len, + in_ty, + arg_tys[1], + out_len + ); + require!( + in_len == out_len2, + "expected {} argument with length {} (same as input type `{}`), \ + found `{}` with length {}", + "third", + in_len, + in_ty, + arg_tys[2], + out_len2 + ); + + // The return type must match the first argument type + require!(ret_ty == in_ty, "expected return type `{}`, found `{}`", in_ty, ret_ty); + + // This counts how many pointers + fn ptr_count(t: Ty<'_>) -> usize { + match t.kind() { + ty::RawPtr(p) => 1 + ptr_count(p.ty), + _ => 0, + } + } + + // Non-ptr type + fn non_ptr(t: Ty<'_>) -> Ty<'_> { + match t.kind() { + ty::RawPtr(p) => non_ptr(p.ty), + _ => t, + } + } + + // The second argument must be a simd vector with an element type that's a pointer + // to the element type of the first argument + let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx()); + let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx()); + let (pointer_count, underlying_ty) = match element_ty1.kind() { + ty::RawPtr(p) if p.ty == in_elem => (ptr_count(element_ty1), non_ptr(element_ty1)), + _ => { + require!( + false, + "expected element type `{}` of second argument `{}` \ + to be a pointer to the element type `{}` of the first \ + argument `{}`, found `{}` != `*_ {}`", + element_ty1, + arg_tys[1], + in_elem, + in_ty, + element_ty1, + in_elem + ); + unreachable!(); + } + }; + assert!(pointer_count > 0); + assert_eq!(pointer_count - 1, ptr_count(element_ty0)); + assert_eq!(underlying_ty, non_ptr(element_ty0)); + + // The element type of the third argument must be a signed integer type of any width: + let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx()); + match element_ty2.kind() { + ty::Int(_) => (), + _ => { + require!( + false, + "expected element type `{}` of third argument `{}` \ + to be a signed integer type", + element_ty2, + arg_tys[2] + ); + } + } + + // Alignment of T, must be a constant integer value: + let alignment_ty = bx.type_i32(); + let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32); + + // Truncate the mask vector to a vector of i1s: + let (mask, mask_ty) = { + let i1 = bx.type_i1(); + let i1xn = bx.type_vector(i1, in_len); + (bx.trunc(args[2].immediate(), i1xn), i1xn) + }; + + // Type of the vector of pointers: + let llvm_pointer_vec_ty = gcc_vector_ty(bx, underlying_ty, in_len, pointer_count); + let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count); + + // Type of the vector of elements: + let llvm_elem_vec_ty = gcc_vector_ty(bx, underlying_ty, in_len, pointer_count - 1); + let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1); + + let llvm_intrinsic = + format!("llvm.masked.gather.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str); + let f = bx.declare_cfn( + &llvm_intrinsic, + bx.type_func( + &[llvm_pointer_vec_ty, alignment_ty, mask_ty, llvm_elem_vec_ty], + llvm_elem_vec_ty, + ), + ); + let v = bx.call(f, &[args[1].immediate(), alignment, mask, args[0].immediate()], None); + return Ok(v); + } + + if name == sym::simd_scatter { + // simd_scatter(values: , pointers: , + // mask: ) -> () + // * N: number of elements in the input vectors + // * T: type of the element to load + // * M: any integer width is supported, will be truncated to i1 + + // All types must be simd vector types + require_simd!(in_ty, "first"); + require_simd!(arg_tys[1], "second"); + require_simd!(arg_tys[2], "third"); + + // Of the same length: + let (element_len1, _) = arg_tys[1].simd_size_and_type(bx.tcx()); + let (element_len2, _) = arg_tys[2].simd_size_and_type(bx.tcx()); + require!( + in_len == element_len1, + "expected {} argument with length {} (same as input type `{}`), \ + found `{}` with length {}", + "second", + in_len, + in_ty, + arg_tys[1], + element_len1 + ); + require!( + in_len == element_len2, + "expected {} argument with length {} (same as input type `{}`), \ + found `{}` with length {}", + "third", + in_len, + in_ty, + arg_tys[2], + element_len2 + ); + + // This counts how many pointers + fn ptr_count(t: Ty<'_>) -> usize { + match t.kind() { + ty::RawPtr(p) => 1 + ptr_count(p.ty), + _ => 0, + } + } + + // Non-ptr type + fn non_ptr(t: Ty<'_>) -> Ty<'_> { + match t.kind() { + ty::RawPtr(p) => non_ptr(p.ty), + _ => t, + } + } + + // The second argument must be a simd vector with an element type that's a pointer + // to the element type of the first argument + let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx()); + let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx()); + let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx()); + let (pointer_count, underlying_ty) = match element_ty1.kind() { + ty::RawPtr(p) if p.ty == in_elem && p.mutbl == hir::Mutability::Mut => { + (ptr_count(element_ty1), non_ptr(element_ty1)) + } + _ => { + require!( + false, + "expected element type `{}` of second argument `{}` \ + to be a pointer to the element type `{}` of the first \ + argument `{}`, found `{}` != `*mut {}`", + element_ty1, + arg_tys[1], + in_elem, + in_ty, + element_ty1, + in_elem + ); + unreachable!(); + } + }; + assert!(pointer_count > 0); + assert_eq!(pointer_count - 1, ptr_count(element_ty0)); + assert_eq!(underlying_ty, non_ptr(element_ty0)); + + // The element type of the third argument must be a signed integer type of any width: + match element_ty2.kind() { + ty::Int(_) => (), + _ => { + require!( + false, + "expected element type `{}` of third argument `{}` \ + be a signed integer type", + element_ty2, + arg_tys[2] + ); + } + } + + // Alignment of T, must be a constant integer value: + let alignment_ty = bx.type_i32(); + let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32); + + // Truncate the mask vector to a vector of i1s: + let (mask, mask_ty) = { + let i1 = bx.type_i1(); + let i1xn = bx.type_vector(i1, in_len); + (bx.trunc(args[2].immediate(), i1xn), i1xn) + }; + + let ret_t = bx.type_void(); + + // Type of the vector of pointers: + let llvm_pointer_vec_ty = gcc_vector_ty(bx, underlying_ty, in_len, pointer_count); + let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count); + + // Type of the vector of elements: + let llvm_elem_vec_ty = gcc_vector_ty(bx, underlying_ty, in_len, pointer_count - 1); + let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1); + + let llvm_intrinsic = + format!("llvm.masked.scatter.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str); + let f = bx.declare_cfn( + &llvm_intrinsic, + bx.type_func(&[llvm_elem_vec_ty, llvm_pointer_vec_ty, alignment_ty, mask_ty], ret_t), + ); + let v = bx.call(f, &[args[0].immediate(), args[1].immediate(), alignment, mask], None); + return Ok(v); + } + + macro_rules! arith_red { + ($name:ident : $integer_reduce:ident, $float_reduce:ident, $ordered:expr, $op:ident, + $identity:expr) => { + if name == sym::$name { + require!( + ret_ty == in_elem, + "expected return type `{}` (element of input `{}`), found `{}`", + in_elem, + in_ty, + ret_ty + ); + return match in_elem.kind() { + ty::Int(_) | ty::Uint(_) => { + let r = bx.$integer_reduce(args[0].immediate()); + if $ordered { + // if overflow occurs, the result is the + // mathematical result modulo 2^n: + Ok(bx.$op(args[1].immediate(), r)) + } else { + Ok(bx.$integer_reduce(args[0].immediate())) + } + } + ty::Float(f) => { + let acc = if $ordered { + // ordered arithmetic reductions take an accumulator + args[1].immediate() + } else { + // unordered arithmetic reductions use the identity accumulator + match f.bit_width() { + 32 => bx.const_real(bx.type_f32(), $identity), + 64 => bx.const_real(bx.type_f64(), $identity), + v => return_error!( + r#" +unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#, + sym::$name, + in_ty, + in_elem, + v, + ret_ty + ), + } + }; + Ok(bx.$float_reduce(acc, args[0].immediate())) + } + _ => return_error!( + "unsupported {} from `{}` with element `{}` to `{}`", + sym::$name, + in_ty, + in_elem, + ret_ty + ), + }; + } + }; + } + + arith_red!(simd_reduce_add_ordered: vector_reduce_add, vector_reduce_fadd, true, add, 0.0); + arith_red!(simd_reduce_mul_ordered: vector_reduce_mul, vector_reduce_fmul, true, mul, 1.0); + arith_red!( + simd_reduce_add_unordered: vector_reduce_add, + vector_reduce_fadd_fast, + false, + add, + 0.0 + ); + arith_red!( + simd_reduce_mul_unordered: vector_reduce_mul, + vector_reduce_fmul_fast, + false, + mul, + 1.0 + ); + + macro_rules! minmax_red { + ($name:ident: $int_red:ident, $float_red:ident) => { + if name == sym::$name { + require!( + ret_ty == in_elem, + "expected return type `{}` (element of input `{}`), found `{}`", + in_elem, + in_ty, + ret_ty + ); + return match in_elem.kind() { + ty::Int(_i) => Ok(bx.$int_red(args[0].immediate(), true)), + ty::Uint(_u) => Ok(bx.$int_red(args[0].immediate(), false)), + ty::Float(_f) => Ok(bx.$float_red(args[0].immediate())), + _ => return_error!( + "unsupported {} from `{}` with element `{}` to `{}`", + sym::$name, + in_ty, + in_elem, + ret_ty + ), + }; + } + }; + } + + minmax_red!(simd_reduce_min: vector_reduce_min, vector_reduce_fmin); + minmax_red!(simd_reduce_max: vector_reduce_max, vector_reduce_fmax); + + minmax_red!(simd_reduce_min_nanless: vector_reduce_min, vector_reduce_fmin_fast); + minmax_red!(simd_reduce_max_nanless: vector_reduce_max, vector_reduce_fmax_fast); + + macro_rules! bitwise_red { + ($name:ident : $red:ident, $boolean:expr) => { + if name == sym::$name { + let input = if !$boolean { + require!( + ret_ty == in_elem, + "expected return type `{}` (element of input `{}`), found `{}`", + in_elem, + in_ty, + ret_ty + ); + args[0].immediate() + } else { + match in_elem.kind() { + ty::Int(_) | ty::Uint(_) => {} + _ => return_error!( + "unsupported {} from `{}` with element `{}` to `{}`", + sym::$name, + in_ty, + in_elem, + ret_ty + ), + } + + // boolean reductions operate on vectors of i1s: + let i1 = bx.type_i1(); + let i1xn = bx.type_vector(i1, in_len as u64); + bx.trunc(args[0].immediate(), i1xn) + }; + return match in_elem.kind() { + ty::Int(_) | ty::Uint(_) => { + let r = bx.$red(input); + Ok(if !$boolean { r } else { bx.zext(r, bx.type_bool()) }) + } + _ => return_error!( + "unsupported {} from `{}` with element `{}` to `{}`", + sym::$name, + in_ty, + in_elem, + ret_ty + ), + }; + } + }; + } + + bitwise_red!(simd_reduce_and: vector_reduce_and, false); + bitwise_red!(simd_reduce_or: vector_reduce_or, false); + bitwise_red!(simd_reduce_xor: vector_reduce_xor, false); + bitwise_red!(simd_reduce_all: vector_reduce_and, true); + bitwise_red!(simd_reduce_any: vector_reduce_or, true); + + if name == sym::simd_cast { + require_simd!(ret_ty, "return"); + let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx()); + require!( + in_len == out_len, + "expected return type with length {} (same as input type `{}`), \ + found `{}` with length {}", + in_len, + in_ty, + ret_ty, + out_len + ); + // casting cares about nominal type, not just structural type + if in_elem == out_elem { + return Ok(args[0].immediate()); + } + + enum Style { + Float, + Int(/* is signed? */ bool), + Unsupported, + } + + let (in_style, in_width) = match in_elem.kind() { + // vectors of pointer-sized integers should've been + // disallowed before here, so this unwrap is safe. + ty::Int(i) => (Style::Int(true), i.bit_width().unwrap()), + ty::Uint(u) => (Style::Int(false), u.bit_width().unwrap()), + ty::Float(f) => (Style::Float, f.bit_width()), + _ => (Style::Unsupported, 0), + }; + let (out_style, out_width) = match out_elem.kind() { + ty::Int(i) => (Style::Int(true), i.bit_width().unwrap()), + ty::Uint(u) => (Style::Int(false), u.bit_width().unwrap()), + ty::Float(f) => (Style::Float, f.bit_width()), + _ => (Style::Unsupported, 0), + }; + + match (in_style, out_style) { + (Style::Int(in_is_signed), Style::Int(_)) => { + return Ok(match in_width.cmp(&out_width) { + Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty), + Ordering::Equal => args[0].immediate(), + Ordering::Less => { + if in_is_signed { + bx.sext(args[0].immediate(), llret_ty) + } else { + bx.zext(args[0].immediate(), llret_ty) + } + } + }); + } + (Style::Int(in_is_signed), Style::Float) => { + return Ok(if in_is_signed { + bx.sitofp(args[0].immediate(), llret_ty) + } else { + bx.uitofp(args[0].immediate(), llret_ty) + }); + } + (Style::Float, Style::Int(out_is_signed)) => { + return Ok(if out_is_signed { + bx.fptosi(args[0].immediate(), llret_ty) + } else { + bx.fptoui(args[0].immediate(), llret_ty) + }); + } + (Style::Float, Style::Float) => { + return Ok(match in_width.cmp(&out_width) { + Ordering::Greater => bx.fptrunc(args[0].immediate(), llret_ty), + Ordering::Equal => args[0].immediate(), + Ordering::Less => bx.fpext(args[0].immediate(), llret_ty), + }); + } + _ => { /* Unsupported. Fallthrough. */ } + } + require!( + false, + "unsupported cast from `{}` with element `{}` to `{}` with element `{}`", + in_ty, + in_elem, + ret_ty, + out_elem + ); + }*/ + + macro_rules! arith_binary { + ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => { + $(if name == sym::$name { + match in_elem.kind() { + $($(ty::$p(_))|* => { + return Ok(bx.$call(args[0].immediate(), args[1].immediate())) + })* + _ => {}, + } + require!(false, + "unsupported operation on `{}` with element `{}`", + in_ty, + in_elem) + })* + } + } + + arith_binary! { + simd_add: Uint, Int => add, Float => fadd; + simd_sub: Uint, Int => sub, Float => fsub; + simd_mul: Uint, Int => mul, Float => fmul; + simd_div: Uint => udiv, Int => sdiv, Float => fdiv; + simd_rem: Uint => urem, Int => srem, Float => frem; + simd_shl: Uint, Int => shl; + simd_shr: Uint => lshr, Int => ashr; + simd_and: Uint, Int => and; + simd_or: Uint, Int => or; // FIXME: calling or might not work on vectors. + simd_xor: Uint, Int => xor; + /*simd_fmax: Float => maxnum; + simd_fmin: Float => minnum;*/ + } + + /*macro_rules! arith_unary { + ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => { + $(if name == sym::$name { + match in_elem.kind() { + $($(ty::$p(_))|* => { + return Ok(bx.$call(args[0].immediate())) + })* + _ => {}, + } + require!(false, + "unsupported operation on `{}` with element `{}`", + in_ty, + in_elem) + })* + } + } + + arith_unary! { + simd_neg: Int => neg, Float => fneg; + } + + if name == sym::simd_saturating_add || name == sym::simd_saturating_sub { + let lhs = args[0].immediate(); + let rhs = args[1].immediate(); + let is_add = name == sym::simd_saturating_add; + let ptr_bits = bx.tcx().data_layout.pointer_size.bits() as _; + let (signed, elem_width, elem_ty) = match *in_elem.kind() { + ty::Int(i) => (true, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_int_from_ty(i)), + ty::Uint(i) => (false, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_uint_from_ty(i)), + _ => { + return_error!( + "expected element type `{}` of vector type `{}` \ + to be a signed or unsigned integer type", + arg_tys[0].simd_size_and_type(bx.tcx()).1, + arg_tys[0] + ); + } + }; + let llvm_intrinsic = &format!( + "llvm.{}{}.sat.v{}i{}", + if signed { 's' } else { 'u' }, + if is_add { "add" } else { "sub" }, + in_len, + elem_width + ); + let vec_ty = bx.cx.type_vector(elem_ty, in_len as u64); + + let f = bx.declare_cfn( + &llvm_intrinsic, + bx.type_func(&[vec_ty, vec_ty], vec_ty), + ); + let v = bx.call(f, &[lhs, rhs], None); + return Ok(v); + }*/ + + unimplemented!("simd {}", name); + + //span_bug!(span, "unknown SIMD intrinsic"); +} diff --git a/src/lib.rs b/src/lib.rs new file mode 100644 index 0000000000000..797251814d7f4 --- /dev/null +++ b/src/lib.rs @@ -0,0 +1,342 @@ +/* + * TODO: support #[inline] attributes. + * TODO: support LTO. + * + * TODO: remove the local gccjit LD_LIBRARY_PATH in config.sh. + * TODO: remove the object dependency. + * TODO: remove the patches. + */ + +#![feature(rustc_private, decl_macro, associated_type_bounds, never_type, trusted_len)] +#![allow(broken_intra_doc_links)] +#![recursion_limit="256"] +#![warn(rust_2018_idioms)] +#![warn(unused_lifetimes)] + +/*extern crate flate2; +extern crate libc;*/ +extern crate rustc_ast; +extern crate rustc_codegen_ssa; +extern crate rustc_data_structures; +extern crate rustc_errors; +//extern crate rustc_fs_util; +extern crate rustc_hir; +extern crate rustc_metadata; +extern crate rustc_middle; +extern crate rustc_mir; +extern crate rustc_session; +extern crate rustc_span; +extern crate rustc_symbol_mangling; +extern crate rustc_target; +extern crate snap; + +// This prevents duplicating functions and statics that are already part of the host rustc process. +#[allow(unused_extern_crates)] +extern crate rustc_driver; + +mod abi; +mod allocator; +mod archive; +mod asm; +mod back; +mod base; +mod builder; +mod callee; +mod common; +mod consts; +mod context; +mod coverageinfo; +mod debuginfo; +mod declare; +mod intrinsic; +mod mangled_std_symbols; +mod mono_item; +mod type_; +mod type_of; +mod va_arg; + +use std::any::Any; +use std::sync::Arc; + +use gccjit::{Block, Context, FunctionType, OptimizationLevel}; +use rustc_ast::expand::allocator::AllocatorKind; +use rustc_codegen_ssa::{CodegenResults, CompiledModule, ModuleCodegen}; +use rustc_codegen_ssa::base::codegen_crate; +use rustc_codegen_ssa::back::write::{CodegenContext, FatLTOInput, ModuleConfig, TargetMachineFactoryFn}; +use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule}; +use rustc_codegen_ssa::target_features::supported_target_features; +use rustc_codegen_ssa::traits::{CodegenBackend, ExtraBackendMethods, ModuleBufferMethods, ThinBufferMethods, WriteBackendMethods}; +use rustc_data_structures::fx::FxHashMap; +use rustc_errors::{ErrorReported, Handler}; +use rustc_middle::dep_graph::{WorkProduct, WorkProductId}; +use rustc_middle::middle::cstore::EncodedMetadata; +use rustc_middle::ty::TyCtxt; +use rustc_session::config::{CrateType, Lto, OptLevel, OutputFilenames}; +use rustc_session::Session; +use rustc_span::Symbol; +use rustc_span::fatal_error::FatalError; + +use crate::context::unit_name; + +pub struct PrintOnPanic String>(pub F); + +impl String> Drop for PrintOnPanic { + fn drop(&mut self) { + if ::std::thread::panicking() { + println!("{}", (self.0)()); + } + } +} + +#[derive(Clone)] +pub struct GccCodegenBackend; + +impl CodegenBackend for GccCodegenBackend { + fn init(&self, sess: &Session) { + if sess.lto() != Lto::No { + sess.warn("LTO is not supported. You may get a linker error."); + } + } + + fn codegen_crate<'tcx>(&self, tcx: TyCtxt<'tcx>, metadata: EncodedMetadata, need_metadata_module: bool) -> Box { + let target_cpu = target_cpu(tcx.sess); + let res = codegen_crate(self.clone(), tcx, target_cpu.to_string(), metadata, need_metadata_module); + + rustc_symbol_mangling::test::report_symbol_names(tcx); + + Box::new(res) + } + + fn join_codegen(&self, ongoing_codegen: Box, sess: &Session) -> Result<(CodegenResults, FxHashMap), ErrorReported> { + let (codegen_results, work_products) = ongoing_codegen + .downcast::>() + .expect("Expected GccCodegenBackend's OngoingCodegen, found Box") + .join(sess); + + Ok((codegen_results, work_products)) + } + + fn link(&self, sess: &Session, mut codegen_results: CodegenResults, outputs: &OutputFilenames) -> Result<(), ErrorReported> { + use rustc_codegen_ssa::back::link::link_binary; + if let Some(symbols) = codegen_results.crate_info.exported_symbols.get_mut(&CrateType::Dylib) { + // TODO: remove when global initializer work without calling a function at runtime. + // HACK: since this codegen add some symbols (e.g. __gccGlobalCrateInit) and the UI + // tests load libstd.so as a dynamic library, and rustc use a version-script to specify + // the symbols visibility, we add * to export all symbols. + // It seems other symbols from libstd/libcore are causing some issues here as well. + symbols.push("*".to_string()); + } + + link_binary::>( + sess, + &codegen_results, + outputs, + ) + } + + fn target_features(&self, sess: &Session) -> Vec { + target_features(sess) + } +} + +impl ExtraBackendMethods for GccCodegenBackend { + fn new_metadata<'tcx>(&self, _tcx: TyCtxt<'tcx>, _mod_name: &str) -> Self::Module { + GccContext { + context: Context::default(), + } + } + + fn write_compressed_metadata<'tcx>(&self, tcx: TyCtxt<'tcx>, metadata: &EncodedMetadata, gcc_module: &mut Self::Module) { + base::write_compressed_metadata(tcx, metadata, gcc_module) + } + + fn codegen_allocator<'tcx>(&self, tcx: TyCtxt<'tcx>, mods: &mut Self::Module, kind: AllocatorKind, has_alloc_error_handler: bool) { + unsafe { allocator::codegen(tcx, mods, kind, has_alloc_error_handler) } + } + + fn compile_codegen_unit<'tcx>(&self, tcx: TyCtxt<'tcx>, cgu_name: Symbol) -> (ModuleCodegen, u64) { + base::compile_codegen_unit(tcx, cgu_name) + } + + fn target_machine_factory(&self, _sess: &Session, _opt_level: OptLevel) -> TargetMachineFactoryFn { + // TODO: set opt level. + Arc::new(|_| { + Ok(()) + }) + } + + fn target_cpu<'b>(&self, _sess: &'b Session) -> &'b str { + unimplemented!(); + } + + fn tune_cpu<'b>(&self, _sess: &'b Session) -> Option<&'b str> { + None + // TODO + //llvm_util::tune_cpu(sess) + } +} + +pub struct ModuleBuffer; + +impl ModuleBufferMethods for ModuleBuffer { + fn data(&self) -> &[u8] { + unimplemented!(); + } +} + +pub struct ThinBuffer; + +impl ThinBufferMethods for ThinBuffer { + fn data(&self) -> &[u8] { + unimplemented!(); + } +} + +pub struct GccContext { + context: Context<'static>, +} + +unsafe impl Send for GccContext {} +// FIXME: that shouldn't be Sync. Parallel compilation is currently disabled with "-Zno-parallel-llvm". Try to disable it here. +unsafe impl Sync for GccContext {} + +impl WriteBackendMethods for GccCodegenBackend { + type Module = GccContext; + type TargetMachine = (); + type ModuleBuffer = ModuleBuffer; + type Context = (); + type ThinData = (); + type ThinBuffer = ThinBuffer; + + fn run_fat_lto(_cgcx: &CodegenContext, mut modules: Vec>, _cached_modules: Vec<(SerializedModule, WorkProduct)>) -> Result, FatalError> { + // TODO: implement LTO by sending -flto to libgccjit and adding the appropriate gcc linker plugins. + // NOTE: implemented elsewhere. + let module = + match modules.remove(0) { + FatLTOInput::InMemory(module) => module, + FatLTOInput::Serialized { .. } => { + unimplemented!(); + /*info!("pushing serialized module {:?}", name); + let buffer = SerializedModule::Local(buffer); + serialized_modules.push((buffer, CString::new(name).unwrap()));*/ + } + }; + Ok(LtoModuleCodegen::Fat { module: Some(module), _serialized_bitcode: vec![] }) + } + + fn run_thin_lto(_cgcx: &CodegenContext, _modules: Vec<(String, Self::ThinBuffer)>, _cached_modules: Vec<(SerializedModule, WorkProduct)>) -> Result<(Vec>, Vec), FatalError> { + unimplemented!(); + } + + fn print_pass_timings(&self) { + unimplemented!(); + } + + unsafe fn optimize(_cgcx: &CodegenContext, _diag_handler: &Handler, module: &ModuleCodegen, config: &ModuleConfig) -> Result<(), FatalError> { + //if cgcx.lto == Lto::Fat { + //module.module_llvm.context.add_driver_option("-flto"); + //} + module.module_llvm.context.set_optimization_level(to_gcc_opt_level(config.opt_level)); + Ok(()) + } + + unsafe fn optimize_thin(_cgcx: &CodegenContext, _thin: &mut ThinModule) -> Result, FatalError> { + unimplemented!(); + } + + unsafe fn codegen(cgcx: &CodegenContext, diag_handler: &Handler, module: ModuleCodegen, config: &ModuleConfig) -> Result { + back::write::codegen(cgcx, diag_handler, module, config) + } + + fn prepare_thin(_module: ModuleCodegen) -> (String, Self::ThinBuffer) { + unimplemented!(); + } + + fn serialize_module(_module: ModuleCodegen) -> (String, Self::ModuleBuffer) { + unimplemented!(); + } + + fn run_lto_pass_manager(_cgcx: &CodegenContext, _module: &ModuleCodegen, _config: &ModuleConfig, _thin: bool) -> Result<(), FatalError> { + // TODO + Ok(()) + } + + fn run_link(cgcx: &CodegenContext, diag_handler: &Handler, modules: Vec>) -> Result, FatalError> { + back::write::link(cgcx, diag_handler, modules) + } +} + +/*fn target_triple(sess: &Session) -> target_lexicon::Triple { + sess.target.llvm_target.parse().unwrap() +}*/ + +/// This is the entrypoint for a hot plugged rustc_codegen_gccjit +#[no_mangle] +pub fn __rustc_codegen_backend() -> Box { + Box::new(GccCodegenBackend) +} + +fn to_gcc_opt_level(optlevel: Option) -> OptimizationLevel { + match optlevel { + None => OptimizationLevel::None, + Some(level) => { + match level { + OptLevel::No => OptimizationLevel::None, + OptLevel::Less => OptimizationLevel::Limited, + OptLevel::Default => OptimizationLevel::Standard, + OptLevel::Aggressive => OptimizationLevel::Aggressive, + OptLevel::Size | OptLevel::SizeMin => OptimizationLevel::Limited, + } + }, + } +} + +fn create_function_calling_initializers<'gcc, 'tcx>(tcx: TyCtxt<'tcx>, context: &Context<'gcc>, block: Block<'gcc>) { + let codegen_units = tcx.collect_and_partition_mono_items(()).1; + for codegen_unit in codegen_units { + let codegen_init_func = context.new_function(None, FunctionType::Extern, context.new_type::<()>(), &[], + &format!("__gccGlobalInit{}", unit_name(&codegen_unit)), false); + block.add_eval(None, context.new_call(None, codegen_init_func, &[])); + } +} + +fn handle_native(name: &str) -> &str { + if name != "native" { + return name; + } + + unimplemented!(); + /*unsafe { + let mut len = 0; + let ptr = llvm::LLVMRustGetHostCPUName(&mut len); + str::from_utf8(slice::from_raw_parts(ptr as *const u8, len)).unwrap() + }*/ +} + +pub fn target_cpu(sess: &Session) -> &str { + let name = sess.opts.cg.target_cpu.as_ref().unwrap_or(&sess.target.cpu); + handle_native(name) +} + +pub fn target_features(sess: &Session) -> Vec { + supported_target_features(sess) + .iter() + .filter_map( + |&(feature, gate)| { + if sess.is_nightly_build() || gate.is_none() { Some(feature) } else { None } + }, + ) + .filter(|_feature| { + /*if feature.starts_with("sse") { + return true; + }*/ + // TODO: implement a way to get enabled feature in libgccjit. + //println!("Feature: {}", feature); + /*let llvm_feature = to_llvm_feature(sess, feature); + let cstr = CString::new(llvm_feature).unwrap(); + unsafe { llvm::LLVMRustHasFeature(target_machine, cstr.as_ptr()) }*/ + false + }) + .map(|feature| Symbol::intern(feature)) + .collect() +} diff --git a/src/mangled_std_symbols.rs b/src/mangled_std_symbols.rs new file mode 100644 index 0000000000000..b0c3f214d66c1 --- /dev/null +++ b/src/mangled_std_symbols.rs @@ -0,0 +1,4 @@ +pub const ARGV_INIT_ARRAY: &str = "_ZN3std3sys4unix4args3imp15ARGV_INIT_ARRAY"; +pub const ARGV_INIT_WRAPPER: &str = "_ZN3std3sys4unix4args3imp15ARGV_INIT_ARRAY12init_wrapper"; +pub const ARGC: &str = "_ZN3std3sys4unix4args3imp4ARGC"; +pub const ARGV: &str = "_ZN3std3sys4unix4args3imp4ARGV"; diff --git a/src/mono_item.rs b/src/mono_item.rs new file mode 100644 index 0000000000000..c261efbbc559f --- /dev/null +++ b/src/mono_item.rs @@ -0,0 +1,59 @@ +use rustc_codegen_ssa::traits::PreDefineMethods; +use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags; +use rustc_middle::mir::mono::{Linkage, Visibility}; +use rustc_middle::ty::{self, Instance, TypeFoldable}; +use rustc_middle::ty::layout::FnAbiExt; +use rustc_span::def_id::DefId; +use rustc_target::abi::LayoutOf; +use rustc_target::abi::call::FnAbi; + +use crate::base; +use crate::context::CodegenCx; +use crate::type_of::LayoutGccExt; + +impl<'gcc, 'tcx> PreDefineMethods<'tcx> for CodegenCx<'gcc, 'tcx> { + fn predefine_static(&self, def_id: DefId, _linkage: Linkage, _visibility: Visibility, symbol_name: &str) { + let attrs = self.tcx.codegen_fn_attrs(def_id); + let instance = Instance::mono(self.tcx, def_id); + let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all()); + let gcc_type = self.layout_of(ty).gcc_type(self, true); + + let is_tls = attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL); + let global = self.define_global(symbol_name, gcc_type, is_tls, attrs.link_section).unwrap_or_else(|| { + self.sess().span_fatal( + self.tcx.def_span(def_id), + &format!("symbol `{}` is already defined", symbol_name), + ) + }); + + // TODO + /*unsafe { + llvm::LLVMRustSetLinkage(global, base::linkage_to_llvm(linkage)); + llvm::LLVMRustSetVisibility(global, base::visibility_to_llvm(visibility)); + }*/ + + self.instances.borrow_mut().insert(instance, global); + } + + fn predefine_fn(&self, instance: Instance<'tcx>, linkage: Linkage, _visibility: Visibility, symbol_name: &str) { + assert!(!instance.substs.needs_infer() && !instance.substs.has_param_types_or_consts()); + + let fn_abi = FnAbi::of_instance(self, instance, &[]); + self.linkage.set(base::linkage_to_gcc(linkage)); + let _decl = self.declare_fn(symbol_name, &fn_abi); + //let attrs = self.tcx.codegen_fn_attrs(instance.def_id()); + + // TODO: call set_link_section() to allow initializing argc/argv. + //base::set_link_section(decl, &attrs); + /*if linkage == Linkage::LinkOnceODR || linkage == Linkage::WeakODR { + llvm::SetUniqueComdat(self.llmod, decl); + }*/ + + //debug!("predefine_fn: instance = {:?}", instance); + + // TODO: use inline attribute from there in linkage.set() above: + //attributes::from_fn_attrs(self, decl, instance); + + //self.instances.borrow_mut().insert(instance, decl); + } +} diff --git a/src/type_.rs b/src/type_.rs new file mode 100644 index 0000000000000..90f7d9d9bbace --- /dev/null +++ b/src/type_.rs @@ -0,0 +1,355 @@ +use std::convert::TryInto; + +use gccjit::{RValue, Struct, Type}; +use rustc_codegen_ssa::traits::{BaseTypeMethods, DerivedTypeMethods}; +use rustc_codegen_ssa::common::TypeKind; +use rustc_middle::bug; +use rustc_middle::ty::layout::TyAndLayout; +use rustc_target::abi::{AddressSpace, Align, Integer, Size}; + +use crate::common::TypeReflection; +use crate::context::CodegenCx; +use crate::type_of::LayoutGccExt; + +impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { + pub fn type_ix(&self, num_bits: u64) -> Type<'gcc> { + // gcc only supports 1, 2, 4 or 8-byte integers. + let bytes = (num_bits / 8).next_power_of_two() as i32; + match bytes { + 1 => self.i8_type, + 2 => self.i16_type, + 4 => self.i32_type, + 8 => self.i64_type, + 16 => self.i128_type, + _ => panic!("unexpected num_bits: {}", num_bits), + } + /* + let bytes = (num_bits / 8).next_power_of_two() as i32; + println!("num_bits: {}, bytes: {}", num_bits, bytes); + self.context.new_int_type(bytes, true) // TODO: check if it is indeed a signed integer. + */ + } + + /*pub fn type_bool(&self) -> Type<'gcc> { + self.bool_type + }*/ + + pub fn type_void(&self) -> Type<'gcc> { + self.context.new_type::<()>() + } + + pub fn type_size_t(&self) -> Type<'gcc> { + self.context.new_type::() + } + + pub fn type_u8(&self) -> Type<'gcc> { + self.u8_type + } + + pub fn type_u16(&self) -> Type<'gcc> { + self.u16_type + } + + pub fn type_u32(&self) -> Type<'gcc> { + self.u32_type + } + + pub fn type_u64(&self) -> Type<'gcc> { + self.u64_type + } + + pub fn type_u128(&self) -> Type<'gcc> { + self.u128_type + } + + pub fn type_pointee_for_align(&self, align: Align) -> Type<'gcc> { + // FIXME(eddyb) We could find a better approximation if ity.align < align. + let ity = Integer::approximate_align(self, align); + self.type_from_integer(ity) + } + + /*pub fn type_int_from_ty(&self, t: ty::IntTy) -> Type<'gcc> { + match t { + ty::IntTy::Isize => self.type_isize(), + ty::IntTy::I8 => self.type_i8(), + ty::IntTy::I16 => self.type_i16(), + ty::IntTy::I32 => self.type_i32(), + ty::IntTy::I64 => self.type_i64(), + ty::IntTy::I128 => self.type_i128(), + } + } + + pub fn type_uint_from_ty(&self, t: ty::UintTy) -> Type<'gcc> { + match t { + ty::UintTy::Usize => self.type_isize(), + ty::UintTy::U8 => self.type_i8(), + ty::UintTy::U16 => self.type_i16(), + ty::UintTy::U32 => self.type_i32(), + ty::UintTy::U64 => self.type_i64(), + ty::UintTy::U128 => self.type_i128(), + } + } + + pub fn type_float_from_ty(&self, t: ty::FloatTy) -> Type<'gcc> { + match t { + ty::FloatTy::F32 => self.type_f32(), + ty::FloatTy::F64 => self.type_f64(), + } + } + + pub fn type_vector(&self, ty: Type<'gcc>, len: u64) -> Type<'gcc> { + self.context.new_vector_type(ty, len) + }*/ +} + +impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> { + fn type_i1(&self) -> Type<'gcc> { + self.bool_type + } + + fn type_i8(&self) -> Type<'gcc> { + self.i8_type + } + + fn type_i16(&self) -> Type<'gcc> { + self.i16_type + } + + fn type_i32(&self) -> Type<'gcc> { + self.i32_type + } + + fn type_i64(&self) -> Type<'gcc> { + self.i64_type + } + + fn type_i128(&self) -> Type<'gcc> { + self.i128_type + } + + fn type_isize(&self) -> Type<'gcc> { + self.isize_type + } + + fn type_f32(&self) -> Type<'gcc> { + self.context.new_type::() + } + + fn type_f64(&self) -> Type<'gcc> { + self.context.new_type::() + } + + fn type_func(&self, params: &[Type<'gcc>], return_type: Type<'gcc>) -> Type<'gcc> { + self.context.new_function_pointer_type(None, return_type, params, false) + } + + fn type_struct(&self, fields: &[Type<'gcc>], _packed: bool) -> Type<'gcc> { + let types = fields.to_vec(); + if let Some(typ) = self.struct_types.borrow().get(fields) { + return typ.clone(); + } + let fields: Vec<_> = fields.iter().enumerate() + .map(|(index, field)| self.context.new_field(None, *field, &format!("field{}_TODO", index))) + .collect(); + // TODO: use packed. + //let name = types.iter().map(|typ| format!("{:?}", typ)).collect::>().join("_"); + //let typ = self.context.new_struct_type(None, format!("struct{}", name), &fields).as_type(); + let typ = self.context.new_struct_type(None, "struct", &fields).as_type(); + self.struct_types.borrow_mut().insert(types, typ); + typ + } + + fn type_kind(&self, typ: Type<'gcc>) -> TypeKind { + if typ.is_integral() { + TypeKind::Integer + } + else if typ.is_vector().is_some() { + TypeKind::Vector + } + else { + // TODO + TypeKind::Void + } + } + + fn type_ptr_to(&self, ty: Type<'gcc>) -> Type<'gcc> { + // TODO + /*assert_ne!(self.type_kind(ty), TypeKind::Function, + "don't call ptr_to on function types, use ptr_to_gcc_type on FnAbi instead" + );*/ + ty.make_pointer() + } + + fn type_ptr_to_ext(&self, ty: Type<'gcc>, _address_space: AddressSpace) -> Type<'gcc> { + // TODO: use address_space + ty.make_pointer() + } + + fn element_type(&self, ty: Type<'gcc>) -> Type<'gcc> { + if let Some(typ) = ty.is_array() { + typ + } + else if let Some(vector_type) = ty.is_vector() { + vector_type.get_element_type() + } + else if let Some(typ) = ty.get_pointee() { + typ + } + else { + unreachable!() + } + } + + fn vector_length(&self, _ty: Type<'gcc>) -> usize { + unimplemented!(); + //unsafe { llvm::LLVMGetVectorSize(ty) as usize } + } + + fn float_width(&self, typ: Type<'gcc>) -> usize { + let f32 = self.context.new_type::(); + let f64 = self.context.new_type::(); + if typ == f32 { + 32 + } + else if typ == f64 { + 64 + } + else { + panic!("Cannot get width of float type {:?}", typ); + } + // TODO: support other sizes. + /*match self.type_kind(ty) { + TypeKind::Float => 32, + TypeKind::Double => 64, + TypeKind::X86_FP80 => 80, + TypeKind::FP128 | TypeKind::PPC_FP128 => 128, + _ => bug!("llvm_float_width called on a non-float type"), + }*/ + } + + fn int_width(&self, typ: Type<'gcc>) -> u64 { + if typ.is_i8(self) || typ.is_u8(self) { + 8 + } + else if typ.is_i16(self) || typ.is_u16(self) { + 16 + } + else if typ.is_i32(self) || typ.is_u32(self) { + 32 + } + else if typ.is_i64(self) || typ.is_u64(self) { + 64 + } + else if typ.is_i128(self) || typ.is_u128(self) { + 128 + } + else { + panic!("Cannot get width of int type {:?}", typ); + } + } + + fn val_ty(&self, value: RValue<'gcc>) -> Type<'gcc> { + value.get_type() + } +} + +impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { + pub fn type_padding_filler(&self, size: Size, align: Align) -> Type<'gcc> { + let unit = Integer::approximate_align(self, align); + let size = size.bytes(); + let unit_size = unit.size().bytes(); + assert_eq!(size % unit_size, 0); + self.type_array(self.type_from_integer(unit), size / unit_size) + } + + pub fn set_struct_body(&self, typ: Struct<'gcc>, fields: &[Type<'gcc>], _packed: bool) { + // TODO: use packed. + let fields: Vec<_> = fields.iter().enumerate() + .map(|(index, field)| self.context.new_field(None, *field, &format!("field_{}", index))) + .collect(); + typ.set_fields(None, &fields); + } + + /*fn type_struct(&self, fields: &[Type<'gcc>], packed: bool) -> Type<'gcc> { + // TODO: use packed. + let fields: Vec<_> = fields.iter().enumerate() + .map(|(index, field)| self.context.new_field(None, *field, &format!("field_{}", index))) + .collect(); + return self.context.new_struct_type(None, "unnamedStruct", &fields).as_type(); + }*/ + + pub fn type_named_struct(&self, name: &str) -> Struct<'gcc> { + self.context.new_opaque_struct_type(None, name) + } + + pub fn type_array(&self, ty: Type<'gcc>, mut len: u64) -> Type<'gcc> { + if let Some(struct_type) = ty.is_struct() { + if struct_type.get_field_count() == 0 { + // NOTE: since gccjit only supports i32 for the array size and libcore's tests uses a + // size of usize::MAX in test_binary_search, we workaround this by setting the size to + // zero for ZSTs. + // FIXME: fix gccjit API. + len = 0; + } + } + + let len: i32 = len.try_into().expect("array len"); + + self.context.new_array_type(None, ty, len) + } +} + +pub fn struct_fields<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout<'tcx>) -> (Vec>, bool) { + //debug!("struct_fields: {:#?}", layout); + let field_count = layout.fields.count(); + + let mut packed = false; + let mut offset = Size::ZERO; + let mut prev_effective_align = layout.align.abi; + let mut result: Vec<_> = Vec::with_capacity(1 + field_count * 2); + for i in layout.fields.index_by_increasing_offset() { + let target_offset = layout.fields.offset(i as usize); + let field = layout.field(cx, i); + let effective_field_align = + layout.align.abi.min(field.align.abi).restrict_for_offset(target_offset); + packed |= effective_field_align < field.align.abi; + + /*debug!( + "struct_fields: {}: {:?} offset: {:?} target_offset: {:?} \ + effective_field_align: {}", + i, + field, + offset, + target_offset, + effective_field_align.bytes() + );*/ + assert!(target_offset >= offset); + let padding = target_offset - offset; + let padding_align = prev_effective_align.min(effective_field_align); + assert_eq!(offset.align_to(padding_align) + padding, target_offset); + result.push(cx.type_padding_filler(padding, padding_align)); + //debug!(" padding before: {:?}", padding); + + result.push(field.gcc_type(cx, !field.ty.is_any_ptr())); // FIXME: might need to check if the type is inside another, like Box. + offset = target_offset + field.size; + prev_effective_align = effective_field_align; + } + if !layout.is_unsized() && field_count > 0 { + if offset > layout.size { + bug!("layout: {:#?} stride: {:?} offset: {:?}", layout, layout.size, offset); + } + let padding = layout.size - offset; + let padding_align = prev_effective_align; + assert_eq!(offset.align_to(padding_align) + padding, layout.size); + /*debug!( + "struct_fields: pad_bytes: {:?} offset: {:?} stride: {:?}", + padding, offset, layout.size + );*/ + result.push(cx.type_padding_filler(padding, padding_align)); + assert_eq!(result.len(), 1 + field_count * 2); + } else { + //debug!("struct_fields: offset: {:?} stride: {:?}", offset, layout.size); + } + + (result, packed) +} diff --git a/src/type_of.rs b/src/type_of.rs new file mode 100644 index 0000000000000..c5db0a1b2e459 --- /dev/null +++ b/src/type_of.rs @@ -0,0 +1,366 @@ +use std::fmt::Write; + +use gccjit::{Struct, Type}; +use crate::rustc_codegen_ssa::traits::{BaseTypeMethods, DerivedTypeMethods, LayoutTypeMethods}; +use rustc_middle::bug; +use rustc_middle::ty::{self, Ty, TypeFoldable}; +use rustc_middle::ty::layout::{FnAbiExt, TyAndLayout}; +use rustc_middle::ty::print::with_no_trimmed_paths; +use rustc_target::abi::{self, Abi, F32, F64, FieldsShape, Int, Integer, LayoutOf, Pointer, PointeeInfo, Size, TyAndLayoutMethods, Variants}; +use rustc_target::abi::call::{CastTarget, FnAbi, Reg}; + +use crate::abi::{FnAbiGccExt, GccType}; +use crate::context::CodegenCx; +use crate::type_::struct_fields; + +impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { + fn type_from_unsigned_integer(&self, i: Integer) -> Type<'gcc> { + use Integer::*; + match i { + I8 => self.type_u8(), + I16 => self.type_u16(), + I32 => self.type_u32(), + I64 => self.type_u64(), + I128 => self.type_u128(), + } + } +} + +pub fn uncached_gcc_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout<'tcx>, defer: &mut Option<(Struct<'gcc>, TyAndLayout<'tcx>)>) -> Type<'gcc> { + match layout.abi { + Abi::Scalar(_) => bug!("handled elsewhere"), + Abi::Vector { ref element, count } => { + let element = layout.scalar_gcc_type_at(cx, element, Size::ZERO); + return cx.context.new_vector_type(element, count); + }, + Abi::ScalarPair(..) => { + return cx.type_struct( + &[ + layout.scalar_pair_element_gcc_type(cx, 0, false), + layout.scalar_pair_element_gcc_type(cx, 1, false), + ], + false, + ); + } + Abi::Uninhabited | Abi::Aggregate { .. } => {} + } + + let name = match layout.ty.kind() { + // FIXME(eddyb) producing readable type names for trait objects can result + // in problematically distinct types due to HRTB and subtyping (see #47638). + // ty::Dynamic(..) | + ty::Adt(..) | ty::Closure(..) | ty::Foreign(..) | ty::Generator(..) | ty::Str + if !cx.sess().fewer_names() => + { + let mut name = with_no_trimmed_paths(|| layout.ty.to_string()); + if let (&ty::Adt(def, _), &Variants::Single { index }) = + (layout.ty.kind(), &layout.variants) + { + if def.is_enum() && !def.variants.is_empty() { + write!(&mut name, "::{}", def.variants[index].ident).unwrap(); + } + } + if let (&ty::Generator(_, _, _), &Variants::Single { index }) = + (layout.ty.kind(), &layout.variants) + { + write!(&mut name, "::{}", ty::GeneratorSubsts::variant_name(index)).unwrap(); + } + Some(name) + } + ty::Adt(..) => { + // If `Some` is returned then a named struct is created in LLVM. Name collisions are + // avoided by LLVM (with increasing suffixes). If rustc doesn't generate names then that + // can improve perf. + // FIXME: I don't think that's true for libgccjit. + Some(String::new()) + } + _ => None, + }; + + match layout.fields { + FieldsShape::Primitive | FieldsShape::Union(_) => { + let fill = cx.type_padding_filler(layout.size, layout.align.abi); + let packed = false; + match name { + None => cx.type_struct(&[fill], packed), + Some(ref name) => { + let gcc_type = cx.type_named_struct(name); + cx.set_struct_body(gcc_type, &[fill], packed); + gcc_type.as_type() + }, + } + } + FieldsShape::Array { count, .. } => cx.type_array(layout.field(cx, 0).gcc_type(cx, true), count), + FieldsShape::Arbitrary { .. } => + match name { + None => { + let (gcc_fields, packed) = struct_fields(cx, layout); + cx.type_struct(&gcc_fields, packed) + }, + Some(ref name) => { + let gcc_type = cx.type_named_struct(name); + *defer = Some((gcc_type, layout)); + gcc_type.as_type() + }, + }, + } +} + +pub trait LayoutGccExt<'tcx> { + fn is_gcc_immediate(&self) -> bool; + fn is_gcc_scalar_pair(&self) -> bool; + fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, set_fields: bool) -> Type<'gcc>; + fn immediate_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>; + fn scalar_gcc_type_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, scalar: &abi::Scalar, offset: Size) -> Type<'gcc>; + fn scalar_pair_element_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, index: usize, immediate: bool) -> Type<'gcc>; + fn gcc_field_index(&self, index: usize) -> u64; + fn pointee_info_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, offset: Size) -> Option; +} + +impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { + fn is_gcc_immediate(&self) -> bool { + match self.abi { + Abi::Scalar(_) | Abi::Vector { .. } => true, + Abi::ScalarPair(..) => false, + Abi::Uninhabited | Abi::Aggregate { .. } => self.is_zst(), + } + } + + fn is_gcc_scalar_pair(&self) -> bool { + match self.abi { + Abi::ScalarPair(..) => true, + Abi::Uninhabited | Abi::Scalar(_) | Abi::Vector { .. } | Abi::Aggregate { .. } => false, + } + } + + /// Gets the GCC type corresponding to a Rust type, i.e., `rustc_middle::ty::Ty`. + /// The pointee type of the pointer in `PlaceRef` is always this type. + /// For sized types, it is also the right LLVM type for an `alloca` + /// containing a value of that type, and most immediates (except `bool`). + /// Unsized types, however, are represented by a "minimal unit", e.g. + /// `[T]` becomes `T`, while `str` and `Trait` turn into `i8` - this + /// is useful for indexing slices, as `&[T]`'s data pointer is `T*`. + /// If the type is an unsized struct, the regular layout is generated, + /// with the inner-most trailing unsized field using the "minimal unit" + /// of that field's type - this is useful for taking the address of + /// that field and ensuring the struct has the right alignment. + fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, set_fields: bool) -> Type<'gcc> { + if let Abi::Scalar(ref scalar) = self.abi { + // Use a different cache for scalars because pointers to DSTs + // can be either fat or thin (data pointers of fat pointers). + if let Some(&ty) = cx.scalar_types.borrow().get(&self.ty) { + return ty; + } + let ty = + match *self.ty.kind() { + ty::Ref(_, ty, _) | ty::RawPtr(ty::TypeAndMut { ty, .. }) => { + cx.type_ptr_to(cx.layout_of(ty).gcc_type(cx, set_fields)) + } + ty::Adt(def, _) if def.is_box() => { + cx.type_ptr_to(cx.layout_of(self.ty.boxed_ty()).gcc_type(cx, true)) + } + ty::FnPtr(sig) => cx.fn_ptr_backend_type(&FnAbi::of_fn_ptr(cx, sig, &[])), + _ => self.scalar_gcc_type_at(cx, scalar, Size::ZERO), + }; + cx.scalar_types.borrow_mut().insert(self.ty, ty); + return ty; + } + + // Check the cache. + let variant_index = + match self.variants { + Variants::Single { index } => Some(index), + _ => None, + }; + let cached_type = cx.types.borrow().get(&(self.ty, variant_index)).cloned(); + if let Some(ty) = cached_type { + let type_to_set_fields = cx.types_with_fields_to_set.borrow_mut().remove(&ty); + if let Some((struct_type, layout)) = type_to_set_fields { + // Since we might be trying to generate a type containing another type which is not + // completely generated yet, we deferred setting the fields until now. + let (fields, packed) = struct_fields(cx, layout); + cx.set_struct_body(struct_type, &fields, packed); + } + return ty; + } + + //debug!("gcc_type({:#?})", self); + + assert!(!self.ty.has_escaping_bound_vars(), "{:?} has escaping bound vars", self.ty); + + // Make sure lifetimes are erased, to avoid generating distinct LLVM + // types for Rust types that only differ in the choice of lifetimes. + let normal_ty = cx.tcx.erase_regions(self.ty); + + let mut defer = None; + let ty = + if self.ty != normal_ty { + let mut layout = cx.layout_of(normal_ty); + if let Some(v) = variant_index { + layout = layout.for_variant(cx, v); + } + layout.gcc_type(cx, true) + } + else { + uncached_gcc_type(cx, *self, &mut defer) + }; + //debug!("--> mapped {:#?} to ty={:?}", self, ty); + + cx.types.borrow_mut().insert((self.ty, variant_index), ty); + + if let Some((ty, layout)) = defer { + //TODO: do we still need this conditions and the set_fields parameter? + //if set_fields { + let (fields, packed) = struct_fields(cx, layout); + cx.set_struct_body(ty, &fields, packed); + /*} + else { + // Since we might be trying to generate a type containing another type which is not + // completely generated yet, we don't set the fields right now, but we save the + // type to set the fields later. + cx.types_with_fields_to_set.borrow_mut().insert(ty.as_type(), (ty, layout)); + }*/ + } + + ty + } + + fn immediate_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> { + if let Abi::Scalar(ref scalar) = self.abi { + if scalar.is_bool() { + return cx.type_i1(); + } + } + self.gcc_type(cx, true) + } + + fn scalar_gcc_type_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, scalar: &abi::Scalar, offset: Size) -> Type<'gcc> { + match scalar.value { + Int(i, true) => cx.type_from_integer(i), + Int(i, false) => cx.type_from_unsigned_integer(i), + F32 => cx.type_f32(), + F64 => cx.type_f64(), + Pointer => { + // If we know the alignment, pick something better than i8. + let pointee = + if let Some(pointee) = self.pointee_info_at(cx, offset) { + cx.type_pointee_for_align(pointee.align) + } + else { + cx.type_i8() + }; + cx.type_ptr_to(pointee) + } + } + } + + fn scalar_pair_element_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, index: usize, immediate: bool) -> Type<'gcc> { + // TODO: remove llvm hack: + // HACK(eddyb) special-case fat pointers until LLVM removes + // pointee types, to avoid bitcasting every `OperandRef::deref`. + match self.ty.kind() { + ty::Ref(..) | ty::RawPtr(_) => { + return self.field(cx, index).gcc_type(cx, true); + } + ty::Adt(def, _) if def.is_box() => { + let ptr_ty = cx.tcx.mk_mut_ptr(self.ty.boxed_ty()); + return cx.layout_of(ptr_ty).scalar_pair_element_gcc_type(cx, index, immediate); + } + _ => {} + } + + let (a, b) = match self.abi { + Abi::ScalarPair(ref a, ref b) => (a, b), + _ => bug!("TyAndLayout::scalar_pair_element_llty({:?}): not applicable", self), + }; + let scalar = [a, b][index]; + + // Make sure to return the same type `immediate_gcc_type` would when + // dealing with an immediate pair. This means that `(bool, bool)` is + // effectively represented as `{i8, i8}` in memory and two `i1`s as an + // immediate, just like `bool` is typically `i8` in memory and only `i1` + // when immediate. We need to load/store `bool` as `i8` to avoid + // crippling LLVM optimizations or triggering other LLVM bugs with `i1`. + // TODO: this bugs certainly don't happen in this case since the bool type is used instead of i1. + if /*immediate &&*/ scalar.is_bool() { + return cx.type_i1(); + } + + let offset = + if index == 0 { + Size::ZERO + } + else { + a.value.size(cx).align_to(b.value.align(cx).abi) + }; + self.scalar_gcc_type_at(cx, scalar, offset) + } + + fn gcc_field_index(&self, index: usize) -> u64 { + match self.abi { + Abi::Scalar(_) | Abi::ScalarPair(..) => { + bug!("TyAndLayout::gcc_field_index({:?}): not applicable", self) + } + _ => {} + } + match self.fields { + FieldsShape::Primitive | FieldsShape::Union(_) => { + bug!("TyAndLayout::gcc_field_index({:?}): not applicable", self) + } + + FieldsShape::Array { .. } => index as u64, + + FieldsShape::Arbitrary { .. } => 1 + (self.fields.memory_index(index) as u64) * 2, + } + } + + fn pointee_info_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, offset: Size) -> Option { + if let Some(&pointee) = cx.pointee_infos.borrow().get(&(self.ty, offset)) { + return pointee; + } + + let result = Ty::pointee_info_at(*self, cx, offset); + + cx.pointee_infos.borrow_mut().insert((self.ty, offset), result); + result + } +} + +impl<'gcc, 'tcx> LayoutTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> { + fn backend_type(&self, layout: TyAndLayout<'tcx>) -> Type<'gcc> { + layout.gcc_type(self, true) + } + + fn immediate_backend_type(&self, layout: TyAndLayout<'tcx>) -> Type<'gcc> { + layout.immediate_gcc_type(self) + } + + fn is_backend_immediate(&self, layout: TyAndLayout<'tcx>) -> bool { + layout.is_gcc_immediate() + } + + fn is_backend_scalar_pair(&self, layout: TyAndLayout<'tcx>) -> bool { + layout.is_gcc_scalar_pair() + } + + fn backend_field_index(&self, layout: TyAndLayout<'tcx>, index: usize) -> u64 { + layout.gcc_field_index(index) + } + + fn scalar_pair_element_backend_type(&self, layout: TyAndLayout<'tcx>, index: usize, immediate: bool) -> Type<'gcc> { + layout.scalar_pair_element_gcc_type(self, index, immediate) + } + + fn cast_backend_type(&self, ty: &CastTarget) -> Type<'gcc> { + ty.gcc_type(self) + } + + fn fn_ptr_backend_type(&self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> Type<'gcc> { + fn_abi.ptr_to_gcc_type(self) + } + + fn reg_backend_type(&self, _ty: &Reg) -> Type<'gcc> { + unimplemented!(); + //ty.gcc_type(self) + } +} diff --git a/src/va_arg.rs b/src/va_arg.rs new file mode 100644 index 0000000000000..404a169d39ac9 --- /dev/null +++ b/src/va_arg.rs @@ -0,0 +1,179 @@ +/*use gccjit::{RValue, ToRValue, Type}; +use rustc_codegen_ssa::mir::operand::OperandRef; +use rustc_codegen_ssa::{ + common::IntPredicate, + traits::{BaseTypeMethods, BuilderMethods, ConstMethods, DerivedTypeMethods}, +}; +use rustc_middle::ty::layout::HasTyCtxt; +use rustc_middle::ty::Ty; +use rustc_target::abi::{Align, Endian, HasDataLayout, LayoutOf, Size}; + +use crate::builder::Builder; +use crate::type_of::LayoutGccExt; + +fn round_pointer_up_to_alignment<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, addr: RValue<'gcc>, align: Align, ptr_ty: Type<'gcc>) -> RValue<'gcc> { + let mut ptr_as_int = bx.ptrtoint(addr, bx.cx().type_isize()); + ptr_as_int = bx.add(ptr_as_int, bx.cx().const_i32(align.bytes() as i32 - 1)); + ptr_as_int = bx.and(ptr_as_int, bx.cx().const_i32(-(align.bytes() as i32))); + bx.inttoptr(ptr_as_int, ptr_ty) +} + +fn emit_direct_ptr_va_arg<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, list: OperandRef<'tcx, RValue<'gcc>>, llty: Type<'gcc>, size: Size, align: Align, slot_size: Align, allow_higher_align: bool) -> (RValue<'gcc>, Align) { + let va_list_ptr_ty = bx.cx().type_ptr_to(bx.cx.type_i8p()); + let va_list_addr = + if list.layout.gcc_type(bx.cx, true) != va_list_ptr_ty { + bx.bitcast(list.immediate(), va_list_ptr_ty) + } + else { + list.immediate() + }; + + let ptr = bx.load(va_list_addr, bx.tcx().data_layout.pointer_align.abi); + + let (addr, addr_align) = if allow_higher_align && align > slot_size { + (round_pointer_up_to_alignment(bx, ptr, align, bx.cx().type_i8p()), align) + } else { + (ptr, slot_size) + }; + + let aligned_size = size.align_to(slot_size).bytes() as i32; + let full_direct_size = bx.cx().const_i32(aligned_size); + let next = bx.inbounds_gep(addr, &[full_direct_size]); + bx.store(next, va_list_addr, bx.tcx().data_layout.pointer_align.abi); + + if size.bytes() < slot_size.bytes() && bx.tcx().sess.target.endian == Endian::Big { + let adjusted_size = bx.cx().const_i32((slot_size.bytes() - size.bytes()) as i32); + let adjusted = bx.inbounds_gep(addr, &[adjusted_size]); + (bx.bitcast(adjusted, bx.cx().type_ptr_to(llty)), addr_align) + } else { + (bx.bitcast(addr, bx.cx().type_ptr_to(llty)), addr_align) + } +} + +fn emit_ptr_va_arg<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, list: OperandRef<'tcx, RValue<'gcc>>, target_ty: Ty<'tcx>, indirect: bool, slot_size: Align, allow_higher_align: bool) -> RValue<'gcc> { + let layout = bx.cx.layout_of(target_ty); + let (llty, size, align) = + if indirect { + ( + bx.cx.layout_of(bx.cx.tcx.mk_imm_ptr(target_ty)).gcc_type(bx.cx, true), + bx.cx.data_layout().pointer_size, + bx.cx.data_layout().pointer_align, + ) + } + else { + (layout.gcc_type(bx.cx, true), layout.size, layout.align) + }; + let (addr, addr_align) = emit_direct_ptr_va_arg(bx, list, llty, size, align.abi, slot_size, allow_higher_align); + if indirect { + let tmp_ret = bx.load(addr, addr_align); + bx.load(tmp_ret, align.abi) + } + else { + bx.load(addr, addr_align) + } +} + +fn emit_aapcs_va_arg<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, list: OperandRef<'tcx, RValue<'gcc>>, target_ty: Ty<'tcx>) -> RValue<'gcc> { + // Implementation of the AAPCS64 calling convention for va_args see + // https://github.com/ARM-software/abi-aa/blob/master/aapcs64/aapcs64.rst + let va_list_addr = list.immediate(); + let layout = bx.cx.layout_of(target_ty); + let gcc_type = layout.immediate_gcc_type(bx); + + let function = bx.llbb().get_function(); + let variable = function.new_local(None, gcc_type, "va_arg"); + + let mut maybe_reg = bx.build_sibling_block("va_arg.maybe_reg"); + let mut in_reg = bx.build_sibling_block("va_arg.in_reg"); + let mut on_stack = bx.build_sibling_block("va_arg.on_stack"); + let end = bx.build_sibling_block("va_arg.end"); + let zero = bx.const_i32(0); + let offset_align = Align::from_bytes(4).unwrap(); + assert!(bx.tcx().sess.target.endian == Endian::Little); + + let gr_type = target_ty.is_any_ptr() || target_ty.is_integral(); + let (reg_off, reg_top_index, slot_size) = if gr_type { + let gr_offs = bx.struct_gep(va_list_addr, 7); + let nreg = (layout.size.bytes() + 7) / 8; + (gr_offs, 3, nreg * 8) + } else { + let vr_off = bx.struct_gep(va_list_addr, 9); + let nreg = (layout.size.bytes() + 15) / 16; + (vr_off, 5, nreg * 16) + }; + + // if the offset >= 0 then the value will be on the stack + let mut reg_off_v = bx.load(reg_off, offset_align); + let use_stack = bx.icmp(IntPredicate::IntSGE, reg_off_v, zero); + bx.cond_br(use_stack, on_stack.llbb(), maybe_reg.llbb()); + + // The value at this point might be in a register, but there is a chance that + // it could be on the stack so we have to update the offset and then check + // the offset again. + + if gr_type && layout.align.abi.bytes() > 8 { + reg_off_v = maybe_reg.add(reg_off_v, bx.const_i32(15)); + reg_off_v = maybe_reg.and(reg_off_v, bx.const_i32(-16)); + } + let new_reg_off_v = maybe_reg.add(reg_off_v, bx.const_i32(slot_size as i32)); + + maybe_reg.store(new_reg_off_v, reg_off, offset_align); + + // Check to see if we have overflowed the registers as a result of this. + // If we have then we need to use the stack for this value + let use_stack = maybe_reg.icmp(IntPredicate::IntSGT, new_reg_off_v, zero); + maybe_reg.cond_br(use_stack, on_stack.llbb(), in_reg.llbb()); + + let top = in_reg.struct_gep(va_list_addr, reg_top_index); + let top = in_reg.load(top, bx.tcx().data_layout.pointer_align.abi); + + // reg_value = *(@top + reg_off_v); + let top = in_reg.gep(top, &[reg_off_v]); + let top = in_reg.bitcast(top, bx.cx.type_ptr_to(layout.gcc_type(bx, true))); + let reg_value = in_reg.load(top, layout.align.abi); + in_reg.assign(variable, reg_value); + in_reg.br(end.llbb()); + + // On Stack block + let stack_value = + emit_ptr_va_arg(&mut on_stack, list, target_ty, false, Align::from_bytes(8).unwrap(), true); + on_stack.assign(variable, stack_value); + on_stack.br(end.llbb()); + + *bx = end; + variable.to_rvalue() +} + +pub(super) fn emit_va_arg<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, addr: OperandRef<'tcx, RValue<'gcc>>, target_ty: Ty<'tcx>) -> RValue<'gcc> { + // Determine the va_arg implementation to use. The LLVM va_arg instruction + // is lacking in some instances, so we should only use it as a fallback. + let target = &bx.cx.tcx.sess.target; + let arch = &bx.cx.tcx.sess.target.arch; + match &**arch { + // Windows x86 + "x86" if target.options.is_like_windows => { + emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(4).unwrap(), false) + } + // Generic x86 + "x86" => emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(4).unwrap(), true), + // Windows AArch64 + "aarch64" if target.options.is_like_windows => { + emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(8).unwrap(), false) + } + // macOS / iOS AArch64 + "aarch64" if target.options.is_like_osx => { + emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(8).unwrap(), true) + } + "aarch64" => emit_aapcs_va_arg(bx, addr, target_ty), + // Windows x86_64 + "x86_64" if target.options.is_like_windows => { + let target_ty_size = bx.cx.size_of(target_ty).bytes(); + let indirect: bool = target_ty_size > 8 || !target_ty_size.is_power_of_two(); + emit_ptr_va_arg(bx, addr, target_ty, indirect, Align::from_bytes(8).unwrap(), false) + } + // For all other architecture/OS combinations fall back to using + // the LLVM va_arg instruction. + // https://llvm.org/docs/LangRef.html#va-arg-instruction + _ => bx.va_arg(addr.immediate(), bx.cx.layout_of(target_ty).gcc_type(bx.cx, true)), + } +}*/ diff --git a/test.sh b/test.sh new file mode 100755 index 0000000000000..1e13b06cf9c3d --- /dev/null +++ b/test.sh @@ -0,0 +1,197 @@ +#!/bin/bash + +# TODO: rewrite to cargo-make (or just) or something like that to only rebuild the sysroot when needed? + +#set -x +set -e + +export GCC_PATH=$(cat gcc_path) + +export LD_LIBRARY_PATH="$GCC_PATH" +export LIBRARY_PATH="$GCC_PATH" + +if [[ "$1" == "--release" ]]; then + export CHANNEL='release' + CARGO_INCREMENTAL=1 cargo rustc --release +else + echo $LD_LIBRARY_PATH + export CHANNEL='debug' + cargo rustc +fi + +source config.sh + +rm -r target/out || true +mkdir -p target/out/gccjit + +echo "[BUILD] mini_core" +$RUSTC example/mini_core.rs --crate-name mini_core --crate-type lib,dylib --target $TARGET_TRIPLE + +echo "[BUILD] example" +$RUSTC example/example.rs --crate-type lib --target $TARGET_TRIPLE + +#if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then + #echo "[JIT] mini_core_hello_world" + #CG_CLIF_JIT=1 CG_CLIF_JIT_ARGS="abc bcd" $RUSTC --crate-type bin -Cprefer-dynamic example/mini_core_hello_world.rs --cfg jit --target $HOST_TRIPLE +#else + #echo "[JIT] mini_core_hello_world (skipped)" +#fi + +echo "[AOT] mini_core_hello_world" +$RUSTC example/mini_core_hello_world.rs --crate-name mini_core_hello_world --crate-type bin -g --target $TARGET_TRIPLE +$RUN_WRAPPER ./target/out/mini_core_hello_world abc bcd +# (echo "break set -n main"; echo "run"; sleep 1; echo "si -c 10"; sleep 1; echo "frame variable") | lldb -- ./target/out/mini_core_hello_world abc bcd + +echo "[BUILD] sysroot" +time ./build_sysroot/build_sysroot.sh + +echo "[AOT] arbitrary_self_types_pointers_and_wrappers" +$RUSTC example/arbitrary_self_types_pointers_and_wrappers.rs --crate-name arbitrary_self_types_pointers_and_wrappers --crate-type bin --target $TARGET_TRIPLE +$RUN_WRAPPER ./target/out/arbitrary_self_types_pointers_and_wrappers + +echo "[AOT] alloc_system" +$RUSTC example/alloc_system.rs --crate-type lib --target "$TARGET_TRIPLE" + +# FIXME: this requires linking an additional lib for __popcountdi2 +#echo "[AOT] alloc_example" +#$RUSTC example/alloc_example.rs --crate-type bin --target $TARGET_TRIPLE +#$RUN_WRAPPER ./target/out/alloc_example + +#if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then + #echo "[JIT] std_example" + #CG_CLIF_JIT=1 $RUSTC --crate-type bin -Cprefer-dynamic example/std_example.rs --target $HOST_TRIPLE +#else + #echo "[JIT] std_example (skipped)" +#fi + +echo "[AOT] dst_field_align" +# FIXME Re-add -Zmir-opt-level=2 once rust-lang/rust#67529 is fixed. +$RUSTC example/dst-field-align.rs --crate-name dst_field_align --crate-type bin --target $TARGET_TRIPLE +$RUN_WRAPPER ./target/out/dst_field_align || (echo $?; false) + +echo "[AOT] std_example" +$RUSTC example/std_example.rs --crate-type bin --target $TARGET_TRIPLE +$RUN_WRAPPER ./target/out/std_example --target $TARGET_TRIPLE + +echo "[AOT] subslice-patterns-const-eval" +$RUSTC example/subslice-patterns-const-eval.rs --crate-type bin -Cpanic=abort --target $TARGET_TRIPLE +$RUN_WRAPPER ./target/out/subslice-patterns-const-eval + +echo "[AOT] track-caller-attribute" +$RUSTC example/track-caller-attribute.rs --crate-type bin -Cpanic=abort --target $TARGET_TRIPLE +$RUN_WRAPPER ./target/out/track-caller-attribute + +# FIXME: this requires linking an additional lib for __popcountdi2 +#echo "[BUILD] mod_bench" +#$RUSTC example/mod_bench.rs --crate-type bin --target $TARGET_TRIPLE + +# FIXME linker gives multiple definitions error on Linux +#echo "[BUILD] sysroot in release mode" +#./build_sysroot/build_sysroot.sh --release + +#pushd simple-raytracer +#if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then + #echo "[BENCH COMPILE] ebobby/simple-raytracer" + #hyperfine --runs ${RUN_RUNS:-10} --warmup 1 --prepare "rm -r target/*/debug || true" \ + #"RUSTFLAGS='' cargo build --target $TARGET_TRIPLE" \ + #"../cargo.sh build" + + #echo "[BENCH RUN] ebobby/simple-raytracer" + #cp ./target/*/debug/main ./raytracer_cg_gccjit + #hyperfine --runs ${RUN_RUNS:-10} ./raytracer_cg_llvm ./raytracer_cg_gccjit +#else + #echo "[BENCH COMPILE] ebobby/simple-raytracer (skipped)" + #echo "[COMPILE] ebobby/simple-raytracer" + #../cargo.sh build + #echo "[BENCH RUN] ebobby/simple-raytracer (skipped)" +#fi +#popd + +pushd build_sysroot/sysroot_src/library/core/tests +echo "[TEST] libcore" +rm -r ./target || true +../../../../../cargo.sh test +popd + +#pushd regex +#echo "[TEST] rust-lang/regex example shootout-regex-dna" +#../cargo.sh clean +## Make sure `[codegen mono items] start` doesn't poison the diff +#../cargo.sh build --example shootout-regex-dna +#cat examples/regexdna-input.txt | ../cargo.sh run --example shootout-regex-dna | grep -v "Spawned thread" > res.txt +#diff -u res.txt examples/regexdna-output.txt + +#echo "[TEST] rust-lang/regex tests" +#../cargo.sh test --tests -- --exclude-should-panic --test-threads 1 -Zunstable-options +#popd + +#echo +#echo "[BENCH COMPILE] mod_bench" + +#COMPILE_MOD_BENCH_INLINE="$RUSTC example/mod_bench.rs --crate-type bin -Zmir-opt-level=3 -O --crate-name mod_bench_inline" +#COMPILE_MOD_BENCH_LLVM_0="rustc example/mod_bench.rs --crate-type bin -Copt-level=0 -o target/out/mod_bench_llvm_0 -Cpanic=abort" +#COMPILE_MOD_BENCH_LLVM_1="rustc example/mod_bench.rs --crate-type bin -Copt-level=1 -o target/out/mod_bench_llvm_1 -Cpanic=abort" +#COMPILE_MOD_BENCH_LLVM_2="rustc example/mod_bench.rs --crate-type bin -Copt-level=2 -o target/out/mod_bench_llvm_2 -Cpanic=abort" +#COMPILE_MOD_BENCH_LLVM_3="rustc example/mod_bench.rs --crate-type bin -Copt-level=3 -o target/out/mod_bench_llvm_3 -Cpanic=abort" + +## Use 100 runs, because a single compilations doesn't take more than ~150ms, so it isn't very slow +#hyperfine --runs ${COMPILE_RUNS:-100} "$COMPILE_MOD_BENCH_INLINE" "$COMPILE_MOD_BENCH_LLVM_0" "$COMPILE_MOD_BENCH_LLVM_1" "$COMPILE_MOD_BENCH_LLVM_2" "$COMPILE_MOD_BENCH_LLVM_3" + +#echo +#echo "[BENCH RUN] mod_bench" +#hyperfine --runs ${RUN_RUNS:-10} ./target/out/mod_bench{,_inline} ./target/out/mod_bench_llvm_* + +echo +echo "[TEST] rust-lang/rust" + +rust_toolchain=$(cat rust-toolchain) + +git clone https://github.com/rust-lang/rust.git || true +cd rust +git fetch +git checkout $(rustc -V | cut -d' ' -f3 | tr -d '(') +export RUSTFLAGS= + +#git apply ../rust_lang.patch + + +rm config.toml || true + +cat > config.toml <>() + .join("\n"); + Some(lines) + }) + .test_cmds(move |path| { + // Test command 1: Compile `x.rs` into `tempdir/x`. + let mut exe = PathBuf::new(); + exe.push(&tempdir); + exe.push(path.file_stem().expect("file_stem")); + let mut compiler = Command::new("rustc"); + compiler.args(&[ + &format!("-Zcodegen-backend={}/target/debug/librustc_codegen_gcc.so", current_dir), + "--sysroot", &format!("{}/build_sysroot/sysroot/", current_dir), + "-Zno-parallel-llvm", + "-C", "panic=abort", + "-C", "link-arg=-lc", + "-o", exe.to_str().expect("to_str"), + path.to_str().expect("to_str"), + ]); + // Test command 2: run `tempdir/x`. + let runtime = Command::new(exe); + vec![("Compiler", compiler), ("Run-time", runtime)] + }) + .run(); +} diff --git a/tests/run/abort1.rs b/tests/run/abort1.rs new file mode 100644 index 0000000000000..291af5993aa25 --- /dev/null +++ b/tests/run/abort1.rs @@ -0,0 +1,51 @@ +// Compiler: +// +// Run-time: +// status: signal + +#![feature(auto_traits, lang_items, no_core, start, intrinsics)] + +#![no_std] +#![no_core] + +/* + * Core + */ + +// Because we don't have core yet. +#[lang = "sized"] +pub trait Sized {} + +#[lang = "copy"] +trait Copy { +} + +impl Copy for isize {} + +#[lang = "receiver"] +trait Receiver { +} + +#[lang = "freeze"] +pub(crate) unsafe auto trait Freeze {} + +mod intrinsics { + use super::Sized; + + extern "rust-intrinsic" { + pub fn abort() -> !; + } +} + +/* + * Code + */ + +fn test_fail() -> ! { + unsafe { intrinsics::abort() }; +} + +#[start] +fn main(mut argc: isize, _argv: *const *const u8) -> isize { + test_fail(); +} diff --git a/tests/run/abort2.rs b/tests/run/abort2.rs new file mode 100644 index 0000000000000..3c87c567892b5 --- /dev/null +++ b/tests/run/abort2.rs @@ -0,0 +1,53 @@ +// Compiler: +// +// Run-time: +// status: signal + +#![feature(auto_traits, lang_items, no_core, start, intrinsics)] + +#![no_std] +#![no_core] + +/* + * Core + */ + +// Because we don't have core yet. +#[lang = "sized"] +pub trait Sized {} + +#[lang = "copy"] +trait Copy { +} + +impl Copy for isize {} + +#[lang = "receiver"] +trait Receiver { +} + +#[lang = "freeze"] +pub(crate) unsafe auto trait Freeze {} + +mod intrinsics { + use super::Sized; + + extern "rust-intrinsic" { + pub fn abort() -> !; + } +} + +/* + * Code + */ + +fn fail() -> i32 { + unsafe { intrinsics::abort() }; + 0 +} + +#[start] +fn main(mut argc: isize, _argv: *const *const u8) -> isize { + fail(); + 0 +} diff --git a/tests/run/array.rs b/tests/run/array.rs new file mode 100644 index 0000000000000..8b621d8a3530f --- /dev/null +++ b/tests/run/array.rs @@ -0,0 +1,229 @@ +// Compiler: +// +// Run-time: +// status: 0 +// stdout: 42 +// 7 +// 5 +// 10 + +#![feature(arbitrary_self_types, auto_traits, lang_items, no_core, start, intrinsics)] + +#![no_std] +#![no_core] + +/* + * Core + */ + +// Because we don't have core yet. +#[lang = "sized"] +pub trait Sized {} + +#[lang = "copy"] +trait Copy { +} + +impl Copy for isize {} +impl Copy for usize {} +impl Copy for i32 {} +impl Copy for u8 {} +impl Copy for i8 {} +impl Copy for i16 {} + +#[lang = "receiver"] +trait Receiver { +} + +#[lang = "freeze"] +pub(crate) unsafe auto trait Freeze {} + +mod libc { + #[link(name = "c")] + extern "C" { + pub fn printf(format: *const i8, ...) -> i32; + pub fn puts(s: *const u8) -> i32; + } +} + +#[lang = "index"] +pub trait Index { + type Output: ?Sized; + fn index(&self, index: Idx) -> &Self::Output; +} + +impl Index for [T; 3] { + type Output = T; + + fn index(&self, index: usize) -> &Self::Output { + &self[index] + } +} + +impl Index for [T] { + type Output = T; + + fn index(&self, index: usize) -> &Self::Output { + &self[index] + } +} + +#[lang = "drop_in_place"] +#[allow(unconditional_recursion)] +pub unsafe fn drop_in_place(to_drop: *mut T) { + // Code here does not matter - this is replaced by the + // real drop glue by the compiler. + drop_in_place(to_drop); +} + +#[lang = "panic"] +#[track_caller] +#[no_mangle] +pub fn panic(_msg: &str) -> ! { + unsafe { + libc::puts("Panicking\0" as *const str as *const u8); + intrinsics::abort(); + } +} + +#[lang = "panic_location"] +struct PanicLocation { + file: &'static str, + line: u32, + column: u32, +} + +#[lang = "panic_bounds_check"] +#[track_caller] +#[no_mangle] +fn panic_bounds_check(index: usize, len: usize) -> ! { + unsafe { + libc::printf("index out of bounds: the len is %d but the index is %d\n\0" as *const str as *const i8, len, index); + intrinsics::abort(); + } +} + +mod intrinsics { + extern "rust-intrinsic" { + pub fn abort() -> !; + } +} + +#[lang = "add"] +trait Add { + type Output; + + fn add(self, rhs: RHS) -> Self::Output; +} + +impl Add for u8 { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + self + rhs + } +} + +impl Add for i8 { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + self + rhs + } +} + +impl Add for i32 { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + self + rhs + } +} + +impl Add for usize { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + self + rhs + } +} + +impl Add for isize { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + self + rhs + } +} + +#[lang = "sub"] +pub trait Sub { + type Output; + + fn sub(self, rhs: RHS) -> Self::Output; +} + +impl Sub for usize { + type Output = Self; + + fn sub(self, rhs: Self) -> Self { + self - rhs + } +} + +impl Sub for isize { + type Output = Self; + + fn sub(self, rhs: Self) -> Self { + self - rhs + } +} + +impl Sub for u8 { + type Output = Self; + + fn sub(self, rhs: Self) -> Self { + self - rhs + } +} + +impl Sub for i8 { + type Output = Self; + + fn sub(self, rhs: Self) -> Self { + self - rhs + } +} + +impl Sub for i16 { + type Output = Self; + + fn sub(self, rhs: Self) -> Self { + self - rhs + } +} + + +/* + * Code + */ + +static mut ONE: usize = 1; + +fn make_array() -> [u8; 3] { + [42, 10, 5] +} + +#[start] +fn main(argc: isize, _argv: *const *const u8) -> isize { + let array = [42, 7, 5]; + let array2 = make_array(); + unsafe { + libc::printf(b"%ld\n\0" as *const u8 as *const i8, array[ONE - 1]); + libc::printf(b"%ld\n\0" as *const u8 as *const i8, array[ONE]); + libc::printf(b"%ld\n\0" as *const u8 as *const i8, array[ONE + 1]); + + libc::printf(b"%d\n\0" as *const u8 as *const i8, array2[argc as usize] as u32); + } + 0 +} diff --git a/tests/run/asm.rs b/tests/run/asm.rs new file mode 100644 index 0000000000000..bd76153e047e4 --- /dev/null +++ b/tests/run/asm.rs @@ -0,0 +1,66 @@ +// Compiler: +// +// Run-time: +// status: 0 + +#![feature(asm, global_asm)] + +global_asm!(" + .global add_asm +add_asm: + mov rax, rdi + add rax, rsi + ret" +); + +extern "C" { + fn add_asm(a: i64, b: i64) -> i64; +} + +fn main() { + unsafe { + asm!("nop"); + } + + let x: u64; + unsafe { + asm!("mov $5, {}", + out(reg) x, + options(att_syntax) + ); + } + assert_eq!(x, 5); + + let x: u64; + let input: u64 = 42; + unsafe { + asm!("mov {input}, {output}", + "add $1, {output}", + input = in(reg) input, + output = out(reg) x, + options(att_syntax) + ); + } + assert_eq!(x, 43); + + let x: u64; + unsafe { + asm!("mov {}, 6", + out(reg) x, + ); + } + assert_eq!(x, 6); + + let x: u64; + let input: u64 = 42; + unsafe { + asm!("mov {output}, {input}", + "add {output}, 1", + input = in(reg) input, + output = out(reg) x, + ); + } + assert_eq!(x, 43); + + assert_eq!(unsafe { add_asm(40, 2) }, 42); +} diff --git a/tests/run/assign.rs b/tests/run/assign.rs new file mode 100644 index 0000000000000..cc8647006ca63 --- /dev/null +++ b/tests/run/assign.rs @@ -0,0 +1,153 @@ +// Compiler: +// +// Run-time: +// stdout: 2 +// 7 8 +// 10 + +#![allow(unused_attributes)] +#![feature(auto_traits, lang_items, no_core, start, intrinsics, track_caller)] + +#![no_std] +#![no_core] + +/* + * Core + */ + +// Because we don't have core yet. +#[lang = "sized"] +pub trait Sized {} + +#[lang = "copy"] +trait Copy { +} + +impl Copy for isize {} +impl Copy for *mut i32 {} +impl Copy for usize {} +impl Copy for u8 {} +impl Copy for i8 {} +impl Copy for i32 {} + +#[lang = "receiver"] +trait Receiver { +} + +#[lang = "freeze"] +pub(crate) unsafe auto trait Freeze {} + +#[lang = "panic_location"] +struct PanicLocation { + file: &'static str, + line: u32, + column: u32, +} + +mod libc { + #[link(name = "c")] + extern "C" { + pub fn puts(s: *const u8) -> i32; + pub fn fflush(stream: *mut i32) -> i32; + pub fn printf(format: *const i8, ...) -> i32; + + pub static STDOUT: *mut i32; + } +} + +mod intrinsics { + extern "rust-intrinsic" { + pub fn abort() -> !; + } +} + +#[lang = "panic"] +#[track_caller] +#[no_mangle] +pub fn panic(_msg: &str) -> ! { + unsafe { + libc::puts("Panicking\0" as *const str as *const u8); + libc::fflush(libc::STDOUT); + intrinsics::abort(); + } +} + +#[lang = "add"] +trait Add { + type Output; + + fn add(self, rhs: RHS) -> Self::Output; +} + +impl Add for u8 { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + self + rhs + } +} + +impl Add for i8 { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + self + rhs + } +} + +impl Add for i32 { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + self + rhs + } +} + +impl Add for usize { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + self + rhs + } +} + +impl Add for isize { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + self + rhs + } +} + +/* + * Code + */ + +fn inc_ref(num: &mut isize) -> isize { + *num = *num + 5; + *num + 1 +} + +fn inc(num: isize) -> isize { + num + 1 +} + + +#[start] +fn main(mut argc: isize, _argv: *const *const u8) -> isize { + argc = inc(argc); + unsafe { + libc::printf(b"%ld\n\0" as *const u8 as *const i8, argc); + } + + let b = inc_ref(&mut argc); + unsafe { + libc::printf(b"%ld %ld\n\0" as *const u8 as *const i8, argc, b); + } + + argc = 10; + unsafe { + libc::printf(b"%ld\n\0" as *const u8 as *const i8, argc); + } + 0 +} diff --git a/tests/run/closure.rs b/tests/run/closure.rs new file mode 100644 index 0000000000000..7121a5f0d5221 --- /dev/null +++ b/tests/run/closure.rs @@ -0,0 +1,230 @@ +// Compiler: +// +// Run-time: +// status: 0 +// stdout: Arg: 1 +// Argument: 1 +// String arg: 1 +// Int argument: 2 +// Both args: 11 + +#![feature(arbitrary_self_types, auto_traits, lang_items, no_core, start, intrinsics, + unboxed_closures)] + +#![no_std] +#![no_core] + +/* + * Core + */ + +// Because we don't have core yet. +#[lang = "sized"] +pub trait Sized {} + +#[lang = "copy"] +trait Copy { +} + +impl Copy for isize {} +impl Copy for usize {} +impl Copy for i32 {} +impl Copy for u32 {} +impl Copy for u8 {} +impl Copy for i8 {} + +#[lang = "receiver"] +trait Receiver { +} + +#[lang = "freeze"] +pub(crate) unsafe auto trait Freeze {} + +mod libc { + #[link(name = "c")] + extern "C" { + pub fn puts(s: *const u8) -> i32; + pub fn printf(format: *const i8, ...) -> i32; + } +} + +#[lang = "index"] +pub trait Index { + type Output: ?Sized; + fn index(&self, index: Idx) -> &Self::Output; +} + +impl Index for [T; 3] { + type Output = T; + + fn index(&self, index: usize) -> &Self::Output { + &self[index] + } +} + +impl Index for [T] { + type Output = T; + + fn index(&self, index: usize) -> &Self::Output { + &self[index] + } +} + +#[lang = "drop_in_place"] +#[allow(unconditional_recursion)] +pub unsafe fn drop_in_place(to_drop: *mut T) { + // Code here does not matter - this is replaced by the + // real drop glue by the compiler. + drop_in_place(to_drop); +} + +#[lang = "panic_location"] +struct PanicLocation { + file: &'static str, + line: u32, + column: u32, +} + +#[lang = "panic_bounds_check"] +#[track_caller] +#[no_mangle] +fn panic_bounds_check(index: usize, len: usize) -> ! { + unsafe { + libc::printf("index out of bounds: the len is %d but the index is %d\n\0" as *const str as *const i8, len, index); + intrinsics::abort(); + } +} + +mod intrinsics { + extern "rust-intrinsic" { + pub fn abort() -> !; + } +} + +#[lang = "unsize"] +pub trait Unsize {} + +#[lang = "coerce_unsized"] +pub trait CoerceUnsized {} + +impl<'a, 'b: 'a, T: ?Sized + Unsize, U: ?Sized> CoerceUnsized<&'a U> for &'b T {} +impl<'a, T: ?Sized + Unsize, U: ?Sized> CoerceUnsized<&'a mut U> for &'a mut T {} +impl, U: ?Sized> CoerceUnsized<*const U> for *const T {} +impl, U: ?Sized> CoerceUnsized<*mut U> for *mut T {} + +#[lang = "fn_once"] +#[rustc_paren_sugar] +pub trait FnOnce { + #[lang = "fn_once_output"] + type Output; + + extern "rust-call" fn call_once(self, args: Args) -> Self::Output; +} + +#[lang = "fn_mut"] +#[rustc_paren_sugar] +pub trait FnMut: FnOnce { + extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output; +} + +#[lang = "add"] +trait Add { + type Output; + + fn add(self, rhs: RHS) -> Self::Output; +} + +impl Add for u8 { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + self + rhs + } +} + +impl Add for i8 { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + self + rhs + } +} + +impl Add for i32 { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + self + rhs + } +} + +impl Add for usize { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + self + rhs + } +} + +impl Add for isize { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + self + rhs + } +} + +#[lang = "panic"] +#[track_caller] +#[no_mangle] +pub fn panic(_msg: &str) -> ! { + unsafe { + libc::puts("Panicking\0" as *const str as *const u8); + intrinsics::abort(); + } +} + +/* + * Code + */ + +#[start] +fn main(mut argc: isize, _argv: *const *const u8) -> isize { + let string = "Arg: %d\n\0"; + let mut closure = || { + unsafe { + libc::printf(string as *const str as *const i8, argc); + } + }; + closure(); + + let mut closure = || { + unsafe { + libc::printf("Argument: %d\n\0" as *const str as *const i8, argc); + } + }; + closure(); + + let mut closure = |string| { + unsafe { + libc::printf(string as *const str as *const i8, argc); + } + }; + closure("String arg: %d\n\0"); + + let mut closure = |arg: isize| { + unsafe { + libc::printf("Int argument: %d\n\0" as *const str as *const i8, arg); + } + }; + closure(argc + 1); + + let mut closure = |string, arg: isize| { + unsafe { + libc::printf(string as *const str as *const i8, arg); + } + }; + closure("Both args: %d\n\0", argc + 10); + + 0 +} diff --git a/tests/run/condition.rs b/tests/run/condition.rs new file mode 100644 index 0000000000000..6a2e2d5bb11a9 --- /dev/null +++ b/tests/run/condition.rs @@ -0,0 +1,320 @@ +// Compiler: +// +// Run-time: +// status: 0 +// stdout: true +// 1 + +#![feature(arbitrary_self_types, auto_traits, lang_items, no_core, start, intrinsics)] + +#![no_std] +#![no_core] + +/* + * Core + */ + +// Because we don't have core yet. +#[lang = "sized"] +pub trait Sized {} + +#[lang = "copy"] +trait Copy { +} + +impl Copy for isize {} +impl Copy for usize {} +impl Copy for u64 {} +impl Copy for i32 {} +impl Copy for u32 {} +impl Copy for bool {} +impl Copy for u16 {} +impl Copy for i16 {} +impl Copy for char {} +impl Copy for i8 {} +impl Copy for u8 {} + +#[lang = "receiver"] +trait Receiver { +} + +#[lang = "freeze"] +pub(crate) unsafe auto trait Freeze {} + +mod libc { + #[link(name = "c")] + extern "C" { + pub fn printf(format: *const i8, ...) -> i32; + pub fn puts(s: *const u8) -> i32; + } +} + +#[lang = "index"] +pub trait Index { + type Output: ?Sized; + fn index(&self, index: Idx) -> &Self::Output; +} + +impl Index for [T; 3] { + type Output = T; + + fn index(&self, index: usize) -> &Self::Output { + &self[index] + } +} + +impl Index for [T] { + type Output = T; + + fn index(&self, index: usize) -> &Self::Output { + &self[index] + } +} + +#[lang = "drop_in_place"] +#[allow(unconditional_recursion)] +pub unsafe fn drop_in_place(to_drop: *mut T) { + // Code here does not matter - this is replaced by the + // real drop glue by the compiler. + drop_in_place(to_drop); +} + +#[lang = "panic"] +#[track_caller] +#[no_mangle] +pub fn panic(_msg: &str) -> ! { + unsafe { + libc::puts("Panicking\0" as *const str as *const u8); + intrinsics::abort(); + } +} + +#[lang = "panic_location"] +struct PanicLocation { + file: &'static str, + line: u32, + column: u32, +} + +#[lang = "panic_bounds_check"] +#[track_caller] +#[no_mangle] +fn panic_bounds_check(index: usize, len: usize) -> ! { + unsafe { + libc::printf("index out of bounds: the len is %d but the index is %d\n\0" as *const str as *const i8, len, index); + intrinsics::abort(); + } +} + +mod intrinsics { + extern "rust-intrinsic" { + pub fn abort() -> !; + } +} + +#[lang = "add"] +trait Add { + type Output; + + fn add(self, rhs: RHS) -> Self::Output; +} + +impl Add for u8 { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + self + rhs + } +} + +impl Add for i8 { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + self + rhs + } +} + +impl Add for i32 { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + self + rhs + } +} + +impl Add for usize { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + self + rhs + } +} + +impl Add for isize { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + self + rhs + } +} + +#[lang = "sub"] +pub trait Sub { + type Output; + + fn sub(self, rhs: RHS) -> Self::Output; +} + +impl Sub for usize { + type Output = Self; + + fn sub(self, rhs: Self) -> Self { + self - rhs + } +} + +impl Sub for isize { + type Output = Self; + + fn sub(self, rhs: Self) -> Self { + self - rhs + } +} + +impl Sub for u8 { + type Output = Self; + + fn sub(self, rhs: Self) -> Self { + self - rhs + } +} + +impl Sub for i8 { + type Output = Self; + + fn sub(self, rhs: Self) -> Self { + self - rhs + } +} + +impl Sub for i16 { + type Output = Self; + + fn sub(self, rhs: Self) -> Self { + self - rhs + } +} + +#[lang = "eq"] +pub trait PartialEq { + fn eq(&self, other: &Rhs) -> bool; + fn ne(&self, other: &Rhs) -> bool; +} + +impl PartialEq for u8 { + fn eq(&self, other: &u8) -> bool { + (*self) == (*other) + } + fn ne(&self, other: &u8) -> bool { + (*self) != (*other) + } +} + +impl PartialEq for u16 { + fn eq(&self, other: &u16) -> bool { + (*self) == (*other) + } + fn ne(&self, other: &u16) -> bool { + (*self) != (*other) + } +} + +impl PartialEq for u32 { + fn eq(&self, other: &u32) -> bool { + (*self) == (*other) + } + fn ne(&self, other: &u32) -> bool { + (*self) != (*other) + } +} + + +impl PartialEq for u64 { + fn eq(&self, other: &u64) -> bool { + (*self) == (*other) + } + fn ne(&self, other: &u64) -> bool { + (*self) != (*other) + } +} + +impl PartialEq for usize { + fn eq(&self, other: &usize) -> bool { + (*self) == (*other) + } + fn ne(&self, other: &usize) -> bool { + (*self) != (*other) + } +} + +impl PartialEq for i8 { + fn eq(&self, other: &i8) -> bool { + (*self) == (*other) + } + fn ne(&self, other: &i8) -> bool { + (*self) != (*other) + } +} + +impl PartialEq for i32 { + fn eq(&self, other: &i32) -> bool { + (*self) == (*other) + } + fn ne(&self, other: &i32) -> bool { + (*self) != (*other) + } +} + +impl PartialEq for isize { + fn eq(&self, other: &isize) -> bool { + (*self) == (*other) + } + fn ne(&self, other: &isize) -> bool { + (*self) != (*other) + } +} + +impl PartialEq for char { + fn eq(&self, other: &char) -> bool { + (*self) == (*other) + } + fn ne(&self, other: &char) -> bool { + (*self) != (*other) + } +} + +/* + * Code + */ + +#[start] +fn main(argc: isize, _argv: *const *const u8) -> isize { + unsafe { + if argc == 1 { + libc::printf(b"true\n\0" as *const u8 as *const i8); + } + + let string = + match argc { + 1 => b"1\n\0", + 2 => b"2\n\0", + 3 => b"3\n\0", + 4 => b"4\n\0", + 5 => b"5\n\0", + _ => b"_\n\0", + }; + libc::printf(string as *const u8 as *const i8); + } + 0 +} diff --git a/tests/run/empty_main.rs b/tests/run/empty_main.rs new file mode 100644 index 0000000000000..c02cfd2a85f03 --- /dev/null +++ b/tests/run/empty_main.rs @@ -0,0 +1,39 @@ +// Compiler: +// +// Run-time: +// status: 0 + +#![feature(auto_traits, lang_items, no_core, start)] + +#![no_std] +#![no_core] + +/* + * Core + */ + +// Because we don't have core yet. +#[lang = "sized"] +pub trait Sized {} + +#[lang = "copy"] +trait Copy { +} + +impl Copy for isize {} + +#[lang = "receiver"] +trait Receiver { +} + +#[lang = "freeze"] +pub(crate) unsafe auto trait Freeze {} + +/* + * Code + */ + +#[start] +fn main(mut argc: isize, _argv: *const *const u8) -> isize { + 0 +} diff --git a/tests/run/exit.rs b/tests/run/exit.rs new file mode 100644 index 0000000000000..956e53dd4aa65 --- /dev/null +++ b/tests/run/exit.rs @@ -0,0 +1,49 @@ +// Compiler: +// +// Run-time: +// status: 2 + +#![feature(auto_traits, lang_items, no_core, start, intrinsics)] + +#![no_std] +#![no_core] + +mod libc { + #[link(name = "c")] + extern "C" { + pub fn exit(status: i32); + } +} + +/* + * Core + */ + +// Because we don't have core yet. +#[lang = "sized"] +pub trait Sized {} + +#[lang = "copy"] +trait Copy { +} + +impl Copy for isize {} + +#[lang = "receiver"] +trait Receiver { +} + +#[lang = "freeze"] +pub(crate) unsafe auto trait Freeze {} + +/* + * Code + */ + +#[start] +fn main(mut argc: isize, _argv: *const *const u8) -> isize { + unsafe { + libc::exit(2); + } + 0 +} diff --git a/tests/run/exit_code.rs b/tests/run/exit_code.rs new file mode 100644 index 0000000000000..eeab352095123 --- /dev/null +++ b/tests/run/exit_code.rs @@ -0,0 +1,39 @@ +// Compiler: +// +// Run-time: +// status: 1 + +#![feature(auto_traits, lang_items, no_core, start)] + +#![no_std] +#![no_core] + +/* + * Core + */ + +// Because we don't have core yet. +#[lang = "sized"] +pub trait Sized {} + +#[lang = "copy"] +trait Copy { +} + +impl Copy for isize {} + +#[lang = "receiver"] +trait Receiver { +} + +#[lang = "freeze"] +pub(crate) unsafe auto trait Freeze {} + +/* + * Code + */ + +#[start] +fn main(mut argc: isize, _argv: *const *const u8) -> isize { + 1 +} diff --git a/tests/run/fun_ptr.rs b/tests/run/fun_ptr.rs new file mode 100644 index 0000000000000..a226fff79e51b --- /dev/null +++ b/tests/run/fun_ptr.rs @@ -0,0 +1,223 @@ +// Compiler: +// +// Run-time: +// status: 0 +// stdout: 1 + +#![feature(arbitrary_self_types, auto_traits, lang_items, no_core, start, intrinsics)] + +#![no_std] +#![no_core] + +/* + * Core + */ + +// Because we don't have core yet. +#[lang = "sized"] +pub trait Sized {} + +#[lang = "copy"] +trait Copy { +} + +impl Copy for isize {} +impl Copy for usize {} +impl Copy for i32 {} +impl Copy for u8 {} +impl Copy for i8 {} +impl Copy for i16 {} + +#[lang = "receiver"] +trait Receiver { +} + +#[lang = "freeze"] +pub(crate) unsafe auto trait Freeze {} + +mod libc { + #[link(name = "c")] + extern "C" { + pub fn printf(format: *const i8, ...) -> i32; + pub fn puts(s: *const u8) -> i32; + } +} + +#[lang = "index"] +pub trait Index { + type Output: ?Sized; + fn index(&self, index: Idx) -> &Self::Output; +} + +impl Index for [T; 3] { + type Output = T; + + fn index(&self, index: usize) -> &Self::Output { + &self[index] + } +} + +impl Index for [T] { + type Output = T; + + fn index(&self, index: usize) -> &Self::Output { + &self[index] + } +} + +#[lang = "drop_in_place"] +#[allow(unconditional_recursion)] +pub unsafe fn drop_in_place(to_drop: *mut T) { + // Code here does not matter - this is replaced by the + // real drop glue by the compiler. + drop_in_place(to_drop); +} + +#[lang = "panic"] +#[track_caller] +#[no_mangle] +pub fn panic(_msg: &str) -> ! { + unsafe { + libc::puts("Panicking\0" as *const str as *const u8); + intrinsics::abort(); + } +} + +#[lang = "panic_location"] +struct PanicLocation { + file: &'static str, + line: u32, + column: u32, +} + +#[lang = "panic_bounds_check"] +#[track_caller] +#[no_mangle] +fn panic_bounds_check(index: usize, len: usize) -> ! { + unsafe { + libc::printf("index out of bounds: the len is %d but the index is %d\n\0" as *const str as *const i8, len, index); + intrinsics::abort(); + } +} + +mod intrinsics { + extern "rust-intrinsic" { + pub fn abort() -> !; + } +} + +#[lang = "add"] +trait Add { + type Output; + + fn add(self, rhs: RHS) -> Self::Output; +} + +impl Add for u8 { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + self + rhs + } +} + +impl Add for i8 { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + self + rhs + } +} + +impl Add for i32 { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + self + rhs + } +} + +impl Add for usize { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + self + rhs + } +} + +impl Add for isize { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + self + rhs + } +} + +#[lang = "sub"] +pub trait Sub { + type Output; + + fn sub(self, rhs: RHS) -> Self::Output; +} + +impl Sub for usize { + type Output = Self; + + fn sub(self, rhs: Self) -> Self { + self - rhs + } +} + +impl Sub for isize { + type Output = Self; + + fn sub(self, rhs: Self) -> Self { + self - rhs + } +} + +impl Sub for u8 { + type Output = Self; + + fn sub(self, rhs: Self) -> Self { + self - rhs + } +} + +impl Sub for i8 { + type Output = Self; + + fn sub(self, rhs: Self) -> Self { + self - rhs + } +} + +impl Sub for i16 { + type Output = Self; + + fn sub(self, rhs: Self) -> Self { + self - rhs + } +} + + +/* + * Code + */ + +fn i16_as_i8(a: i16) -> i8 { + a as i8 +} + +fn call_func(func: fn(i16) -> i8, param: i16) -> i8 { + func(param) +} + +#[start] +fn main(argc: isize, _argv: *const *const u8) -> isize { + unsafe { + let result = call_func(i16_as_i8, argc as i16) as isize; + libc::printf(b"%ld\n\0" as *const u8 as *const i8, result); + } + 0 +} diff --git a/tests/run/int_overflow.rs b/tests/run/int_overflow.rs new file mode 100644 index 0000000000000..7111703ca2532 --- /dev/null +++ b/tests/run/int_overflow.rs @@ -0,0 +1,129 @@ +// Compiler: +// +// Run-time: +// stdout: Panicking +// status: signal + +#![allow(unused_attributes)] +#![feature(auto_traits, lang_items, no_core, start, intrinsics)] + +#![no_std] +#![no_core] + +/* + * Core + */ + +// Because we don't have core yet. +#[lang = "sized"] +pub trait Sized {} + +#[lang = "copy"] +trait Copy { +} + +impl Copy for isize {} +impl Copy for *mut i32 {} +impl Copy for usize {} +impl Copy for i32 {} +impl Copy for u8 {} +impl Copy for i8 {} + +#[lang = "receiver"] +trait Receiver { +} + +#[lang = "freeze"] +pub(crate) unsafe auto trait Freeze {} + +#[lang = "panic_location"] +struct PanicLocation { + file: &'static str, + line: u32, + column: u32, +} + +mod libc { + #[link(name = "c")] + extern "C" { + pub fn puts(s: *const u8) -> i32; + pub fn fflush(stream: *mut i32) -> i32; + + pub static STDOUT: *mut i32; + } +} + +mod intrinsics { + extern "rust-intrinsic" { + pub fn abort() -> !; + } +} + +#[lang = "panic"] +#[track_caller] +#[no_mangle] +pub fn panic(_msg: &str) -> ! { + unsafe { + libc::puts("Panicking\0" as *const str as *const u8); + libc::fflush(libc::STDOUT); + intrinsics::abort(); + } +} + +#[lang = "add"] +trait Add { + type Output; + + fn add(self, rhs: RHS) -> Self::Output; +} + +impl Add for u8 { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + self + rhs + } +} + +impl Add for i8 { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + self + rhs + } +} + +impl Add for i32 { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + self + rhs + } +} + +impl Add for usize { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + self + rhs + } +} + +impl Add for isize { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + self + rhs + } +} + +/* + * Code + */ + +#[start] +fn main(mut argc: isize, _argv: *const *const u8) -> isize { + let int = 9223372036854775807isize; + let int = int + argc; + int +} diff --git a/tests/run/mut_ref.rs b/tests/run/mut_ref.rs new file mode 100644 index 0000000000000..e8876009cc610 --- /dev/null +++ b/tests/run/mut_ref.rs @@ -0,0 +1,165 @@ + +// Compiler: +// +// Run-time: +// stdout: 2 +// 7 +// 6 +// 11 + +#![allow(unused_attributes)] +#![feature(auto_traits, lang_items, no_core, start, intrinsics, track_caller)] + +#![no_std] +#![no_core] + +/* + * Core + */ + +// Because we don't have core yet. +#[lang = "sized"] +pub trait Sized {} + +#[lang = "copy"] +trait Copy { +} + +impl Copy for isize {} +impl Copy for *mut i32 {} +impl Copy for usize {} +impl Copy for u8 {} +impl Copy for i8 {} +impl Copy for i32 {} + +#[lang = "receiver"] +trait Receiver { +} + +#[lang = "freeze"] +pub(crate) unsafe auto trait Freeze {} + +#[lang = "panic_location"] +struct PanicLocation { + file: &'static str, + line: u32, + column: u32, +} + +mod libc { + #[link(name = "c")] + extern "C" { + pub fn puts(s: *const u8) -> i32; + pub fn fflush(stream: *mut i32) -> i32; + pub fn printf(format: *const i8, ...) -> i32; + + pub static STDOUT: *mut i32; + } +} + +mod intrinsics { + extern "rust-intrinsic" { + pub fn abort() -> !; + } +} + +#[lang = "panic"] +#[track_caller] +#[no_mangle] +pub fn panic(_msg: &str) -> ! { + unsafe { + libc::puts("Panicking\0" as *const str as *const u8); + libc::fflush(libc::STDOUT); + intrinsics::abort(); + } +} + +#[lang = "add"] +trait Add { + type Output; + + fn add(self, rhs: RHS) -> Self::Output; +} + +impl Add for u8 { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + self + rhs + } +} + +impl Add for i8 { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + self + rhs + } +} + +impl Add for i32 { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + self + rhs + } +} + +impl Add for usize { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + self + rhs + } +} + +impl Add for isize { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + self + rhs + } +} + +/* + * Code + */ + +struct Test { + field: isize, +} + +fn test(num: isize) -> Test { + Test { + field: num + 1, + } +} + +fn update_num(num: &mut isize) { + *num = *num + 5; +} + +#[start] +fn main(mut argc: isize, _argv: *const *const u8) -> isize { + let mut test = test(argc); + unsafe { + libc::printf(b"%ld\n\0" as *const u8 as *const i8, test.field); + } + update_num(&mut test.field); + unsafe { + libc::printf(b"%ld\n\0" as *const u8 as *const i8, test.field); + } + + update_num(&mut argc); + unsafe { + libc::printf(b"%ld\n\0" as *const u8 as *const i8, argc); + } + + let refe = &mut argc; + *refe = *refe + 5; + unsafe { + libc::printf(b"%ld\n\0" as *const u8 as *const i8, argc); + } + + 0 +} diff --git a/tests/run/operations.rs b/tests/run/operations.rs new file mode 100644 index 0000000000000..4dc375309e426 --- /dev/null +++ b/tests/run/operations.rs @@ -0,0 +1,221 @@ +// Compiler: +// +// Run-time: +// stdout: 41 +// 39 +// 10 + +#![allow(unused_attributes)] +#![feature(auto_traits, lang_items, no_core, start, intrinsics, arbitrary_self_types)] + +#![no_std] +#![no_core] + +/* + * Core + */ + +// Because we don't have core yet. +#[lang = "sized"] +pub trait Sized {} + +#[lang = "copy"] +trait Copy { +} + +impl Copy for isize {} +impl Copy for *mut i32 {} +impl Copy for usize {} +impl Copy for u8 {} +impl Copy for i8 {} +impl Copy for i16 {} +impl Copy for i32 {} + +#[lang = "deref"] +pub trait Deref { + type Target: ?Sized; + + fn deref(&self) -> &Self::Target; +} + +#[lang = "receiver"] +trait Receiver { +} + +#[lang = "freeze"] +pub(crate) unsafe auto trait Freeze {} + +#[lang = "panic_location"] +struct PanicLocation { + file: &'static str, + line: u32, + column: u32, +} + +mod libc { + #[link(name = "c")] + extern "C" { + pub fn printf(format: *const i8, ...) -> i32; + pub fn puts(s: *const u8) -> i32; + pub fn fflush(stream: *mut i32) -> i32; + + pub static STDOUT: *mut i32; + } +} + +mod intrinsics { + extern "rust-intrinsic" { + pub fn abort() -> !; + } +} + +#[lang = "panic"] +#[track_caller] +#[no_mangle] +pub fn panic(_msg: &str) -> ! { + unsafe { + libc::puts("Panicking\0" as *const str as *const u8); + libc::fflush(libc::STDOUT); + intrinsics::abort(); + } +} + +#[lang = "add"] +trait Add { + type Output; + + fn add(self, rhs: RHS) -> Self::Output; +} + +impl Add for u8 { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + self + rhs + } +} + +impl Add for i8 { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + self + rhs + } +} + +impl Add for i32 { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + self + rhs + } +} + +impl Add for usize { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + self + rhs + } +} + +impl Add for isize { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + self + rhs + } +} + +#[lang = "sub"] +pub trait Sub { + type Output; + + fn sub(self, rhs: RHS) -> Self::Output; +} + +impl Sub for usize { + type Output = Self; + + fn sub(self, rhs: Self) -> Self { + self - rhs + } +} + +impl Sub for isize { + type Output = Self; + + fn sub(self, rhs: Self) -> Self { + self - rhs + } +} + +impl Sub for u8 { + type Output = Self; + + fn sub(self, rhs: Self) -> Self { + self - rhs + } +} + +impl Sub for i8 { + type Output = Self; + + fn sub(self, rhs: Self) -> Self { + self - rhs + } +} + +impl Sub for i16 { + type Output = Self; + + fn sub(self, rhs: Self) -> Self { + self - rhs + } +} + +#[lang = "mul"] +pub trait Mul { + type Output; + + #[must_use] + fn mul(self, rhs: RHS) -> Self::Output; +} + +impl Mul for u8 { + type Output = Self; + + fn mul(self, rhs: Self) -> Self::Output { + self * rhs + } +} + +impl Mul for usize { + type Output = Self; + + fn mul(self, rhs: Self) -> Self::Output { + self * rhs + } +} + +impl Mul for isize { + type Output = Self; + + fn mul(self, rhs: Self) -> Self::Output { + self * rhs + } +} + +/* + * Code + */ + +#[start] +fn main(mut argc: isize, _argv: *const *const u8) -> isize { + unsafe { + libc::printf(b"%ld\n\0" as *const u8 as *const i8, 40 + argc); + libc::printf(b"%ld\n\0" as *const u8 as *const i8, 40 - argc); + libc::printf(b"%ld\n\0" as *const u8 as *const i8, 10 * argc); + } + 0 +} diff --git a/tests/run/ptr_cast.rs b/tests/run/ptr_cast.rs new file mode 100644 index 0000000000000..6ac099ea145c2 --- /dev/null +++ b/tests/run/ptr_cast.rs @@ -0,0 +1,222 @@ +// Compiler: +// +// Run-time: +// status: 0 +// stdout: 1 + +#![feature(arbitrary_self_types, auto_traits, lang_items, no_core, start, intrinsics)] + +#![no_std] +#![no_core] + +/* + * Core + */ + +// Because we don't have core yet. +#[lang = "sized"] +pub trait Sized {} + +#[lang = "copy"] +trait Copy { +} + +impl Copy for isize {} +impl Copy for usize {} +impl Copy for i32 {} +impl Copy for u8 {} +impl Copy for i8 {} +impl Copy for i16 {} + +#[lang = "receiver"] +trait Receiver { +} + +#[lang = "freeze"] +pub(crate) unsafe auto trait Freeze {} + +mod libc { + #[link(name = "c")] + extern "C" { + pub fn printf(format: *const i8, ...) -> i32; + pub fn puts(s: *const u8) -> i32; + } +} + +#[lang = "index"] +pub trait Index { + type Output: ?Sized; + fn index(&self, index: Idx) -> &Self::Output; +} + +impl Index for [T; 3] { + type Output = T; + + fn index(&self, index: usize) -> &Self::Output { + &self[index] + } +} + +impl Index for [T] { + type Output = T; + + fn index(&self, index: usize) -> &Self::Output { + &self[index] + } +} + +#[lang = "drop_in_place"] +#[allow(unconditional_recursion)] +pub unsafe fn drop_in_place(to_drop: *mut T) { + // Code here does not matter - this is replaced by the + // real drop glue by the compiler. + drop_in_place(to_drop); +} + +#[lang = "panic"] +#[track_caller] +#[no_mangle] +pub fn panic(_msg: &str) -> ! { + unsafe { + libc::puts("Panicking\0" as *const str as *const u8); + intrinsics::abort(); + } +} + +#[lang = "panic_location"] +struct PanicLocation { + file: &'static str, + line: u32, + column: u32, +} + +#[lang = "panic_bounds_check"] +#[track_caller] +#[no_mangle] +fn panic_bounds_check(index: usize, len: usize) -> ! { + unsafe { + libc::printf("index out of bounds: the len is %d but the index is %d\n\0" as *const str as *const i8, len, index); + intrinsics::abort(); + } +} + +mod intrinsics { + extern "rust-intrinsic" { + pub fn abort() -> !; + } +} + +#[lang = "add"] +trait Add { + type Output; + + fn add(self, rhs: RHS) -> Self::Output; +} + +impl Add for u8 { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + self + rhs + } +} + +impl Add for i8 { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + self + rhs + } +} + +impl Add for i32 { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + self + rhs + } +} + +impl Add for usize { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + self + rhs + } +} + +impl Add for isize { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + self + rhs + } +} + +#[lang = "sub"] +pub trait Sub { + type Output; + + fn sub(self, rhs: RHS) -> Self::Output; +} + +impl Sub for usize { + type Output = Self; + + fn sub(self, rhs: Self) -> Self { + self - rhs + } +} + +impl Sub for isize { + type Output = Self; + + fn sub(self, rhs: Self) -> Self { + self - rhs + } +} + +impl Sub for u8 { + type Output = Self; + + fn sub(self, rhs: Self) -> Self { + self - rhs + } +} + +impl Sub for i8 { + type Output = Self; + + fn sub(self, rhs: Self) -> Self { + self - rhs + } +} + +impl Sub for i16 { + type Output = Self; + + fn sub(self, rhs: Self) -> Self { + self - rhs + } +} + + +/* + * Code + */ + +static mut ONE: usize = 1; + +fn make_array() -> [u8; 3] { + [42, 10, 5] +} + +#[start] +fn main(argc: isize, _argv: *const *const u8) -> isize { + unsafe { + let ptr = ONE as *mut usize; + let value = ptr as usize; + libc::printf(b"%ld\n\0" as *const u8 as *const i8, value); + } + 0 +} diff --git a/tests/run/return-tuple.rs b/tests/run/return-tuple.rs new file mode 100644 index 0000000000000..6fa10dca06f67 --- /dev/null +++ b/tests/run/return-tuple.rs @@ -0,0 +1,72 @@ +// Compiler: +// +// Run-time: +// status: 0 +// stdout: 10 +// 10 +// 42 + +#![feature(auto_traits, lang_items, no_core, start, intrinsics)] + +#![no_std] +#![no_core] + +#[lang = "copy"] +pub unsafe trait Copy {} + +unsafe impl Copy for bool {} +unsafe impl Copy for u8 {} +unsafe impl Copy for u16 {} +unsafe impl Copy for u32 {} +unsafe impl Copy for u64 {} +unsafe impl Copy for usize {} +unsafe impl Copy for i8 {} +unsafe impl Copy for i16 {} +unsafe impl Copy for i32 {} +unsafe impl Copy for isize {} +unsafe impl Copy for f32 {} +unsafe impl Copy for char {} + +mod libc { + #[link(name = "c")] + extern "C" { + pub fn printf(format: *const i8, ...) -> i32; + } +} + +/* + * Core + */ + +// Because we don't have core yet. +#[lang = "sized"] +pub trait Sized {} + +#[lang = "receiver"] +trait Receiver { +} + +#[lang = "freeze"] +pub(crate) unsafe auto trait Freeze {} + +/* + * Code + */ + +fn int_cast(a: u16, b: i16) -> (u8, u16, u32, usize, i8, i16, i32, isize, u8, u32) { + ( + a as u8, a as u16, a as u32, a as usize, a as i8, a as i16, a as i32, a as isize, b as u8, + b as u32, + ) +} + +#[start] +fn main(argc: isize, _argv: *const *const u8) -> isize { + let (a, b, c, d, e, f, g, h, i, j) = int_cast(10, 42); + unsafe { + libc::printf(b"%d\n\0" as *const u8 as *const i8, c); + libc::printf(b"%ld\n\0" as *const u8 as *const i8, d); + libc::printf(b"%ld\n\0" as *const u8 as *const i8, j); + } + 0 +} diff --git a/tests/run/slice.rs b/tests/run/slice.rs new file mode 100644 index 0000000000000..ad9258ed0bdeb --- /dev/null +++ b/tests/run/slice.rs @@ -0,0 +1,128 @@ +// Compiler: +// +// Run-time: +// status: 0 +// stdout: 5 + +#![feature(arbitrary_self_types, auto_traits, lang_items, no_core, start, intrinsics)] + +#![no_std] +#![no_core] + +/* + * Core + */ + +// Because we don't have core yet. +#[lang = "sized"] +pub trait Sized {} + +#[lang = "copy"] +trait Copy { +} + +impl Copy for isize {} +impl Copy for usize {} +impl Copy for i32 {} +impl Copy for u32 {} + +#[lang = "receiver"] +trait Receiver { +} + +#[lang = "freeze"] +pub(crate) unsafe auto trait Freeze {} + +mod libc { + #[link(name = "c")] + extern "C" { + pub fn printf(format: *const i8, ...) -> i32; + } +} + +#[lang = "index"] +pub trait Index { + type Output: ?Sized; + fn index(&self, index: Idx) -> &Self::Output; +} + +impl Index for [T; 3] { + type Output = T; + + fn index(&self, index: usize) -> &Self::Output { + &self[index] + } +} + +impl Index for [T] { + type Output = T; + + fn index(&self, index: usize) -> &Self::Output { + &self[index] + } +} + +#[lang = "unsize"] +pub trait Unsize {} + +#[lang = "coerce_unsized"] +pub trait CoerceUnsized {} + +impl<'a, 'b: 'a, T: ?Sized + Unsize, U: ?Sized> CoerceUnsized<&'a U> for &'b T {} +impl<'a, T: ?Sized + Unsize, U: ?Sized> CoerceUnsized<&'a mut U> for &'a mut T {} +impl, U: ?Sized> CoerceUnsized<*const U> for *const T {} +impl, U: ?Sized> CoerceUnsized<*mut U> for *mut T {} + +#[lang = "drop_in_place"] +#[allow(unconditional_recursion)] +pub unsafe fn drop_in_place(to_drop: *mut T) { + // Code here does not matter - this is replaced by the + // real drop glue by the compiler. + drop_in_place(to_drop); +} + +#[lang = "panic_location"] +struct PanicLocation { + file: &'static str, + line: u32, + column: u32, +} + +#[lang = "panic_bounds_check"] +#[track_caller] +#[no_mangle] +fn panic_bounds_check(index: usize, len: usize) -> ! { + unsafe { + libc::printf("index out of bounds: the len is %d but the index is %d\n\0" as *const str as *const i8, len, index); + intrinsics::abort(); + } +} + +mod intrinsics { + use super::Sized; + + extern "rust-intrinsic" { + pub fn abort() -> !; + } +} + +/* + * Code + */ + +static mut TWO: usize = 2; + +fn index_slice(s: &[u32]) -> u32 { + unsafe { + s[TWO] + } +} + +#[start] +fn main(mut argc: isize, _argv: *const *const u8) -> isize { + let array = [42, 7, 5]; + unsafe { + libc::printf(b"%ld\n\0" as *const u8 as *const i8, index_slice(&array)); + } + 0 +} diff --git a/tests/run/static.rs b/tests/run/static.rs new file mode 100644 index 0000000000000..ab89f6aff4b50 --- /dev/null +++ b/tests/run/static.rs @@ -0,0 +1,106 @@ +// Compiler: +// +// Run-time: +// status: 0 +// stdout: 10 +// 14 +// 1 +// 12 +// 12 +// 1 + +#![feature(auto_traits, lang_items, no_core, start, intrinsics)] + +#![no_std] +#![no_core] + +/* + * Core + */ + +// Because we don't have core yet. +#[lang = "sized"] +pub trait Sized {} + +#[lang = "copy"] +trait Copy { +} + +impl Copy for isize {} + +#[lang = "receiver"] +trait Receiver { +} + +#[lang = "freeze"] +pub(crate) unsafe auto trait Freeze {} + +mod intrinsics { + use super::Sized; + + extern "rust-intrinsic" { + pub fn abort() -> !; + } +} + +mod libc { + #[link(name = "c")] + extern "C" { + pub fn printf(format: *const i8, ...) -> i32; + } +} + +#[lang = "structural_peq"] +pub trait StructuralPartialEq {} + +#[lang = "structural_teq"] +pub trait StructuralEq {} + +#[lang = "drop_in_place"] +#[allow(unconditional_recursion)] +pub unsafe fn drop_in_place(to_drop: *mut T) { + // Code here does not matter - this is replaced by the + // real drop glue by the compiler. + drop_in_place(to_drop); +} + +/* + * Code + */ + +struct Test { + field: isize, +} + +struct WithRef { + refe: &'static Test, +} + +static mut CONSTANT: isize = 10; + +static mut TEST: Test = Test { + field: 12, +}; + +static mut TEST2: Test = Test { + field: 14, +}; + +static mut WITH_REF: WithRef = WithRef { + refe: unsafe { &TEST }, +}; + +#[start] +fn main(mut argc: isize, _argv: *const *const u8) -> isize { + unsafe { + libc::printf(b"%ld\n\0" as *const u8 as *const i8, CONSTANT); + libc::printf(b"%ld\n\0" as *const u8 as *const i8, TEST2.field); + TEST2.field = argc; + libc::printf(b"%ld\n\0" as *const u8 as *const i8, TEST2.field); + libc::printf(b"%ld\n\0" as *const u8 as *const i8, WITH_REF.refe.field); + WITH_REF.refe = &TEST2; + libc::printf(b"%ld\n\0" as *const u8 as *const i8, TEST.field); + libc::printf(b"%ld\n\0" as *const u8 as *const i8, WITH_REF.refe.field); + } + 0 +} diff --git a/tests/run/structs.rs b/tests/run/structs.rs new file mode 100644 index 0000000000000..6c8884855ac35 --- /dev/null +++ b/tests/run/structs.rs @@ -0,0 +1,70 @@ +// Compiler: +// +// Run-time: +// status: 0 +// stdout: 1 +// 2 + +#![feature(auto_traits, lang_items, no_core, start, intrinsics)] + +#![no_std] +#![no_core] + +/* + * Core + */ + +// Because we don't have core yet. +#[lang = "sized"] +pub trait Sized {} + +#[lang = "copy"] +trait Copy { +} + +impl Copy for isize {} + +#[lang = "receiver"] +trait Receiver { +} + +#[lang = "freeze"] +pub(crate) unsafe auto trait Freeze {} + +mod libc { + #[link(name = "c")] + extern "C" { + pub fn printf(format: *const i8, ...) -> i32; + } +} + +/* + * Code + */ + +struct Test { + field: isize, +} + +struct Two { + two: isize, +} + +fn one() -> isize { + 1 +} + +#[start] +fn main(mut argc: isize, _argv: *const *const u8) -> isize { + let test = Test { + field: one(), + }; + let two = Two { + two: 2, + }; + unsafe { + libc::printf(b"%ld\n\0" as *const u8 as *const i8, test.field); + libc::printf(b"%ld\n\0" as *const u8 as *const i8, two.two); + } + 0 +} diff --git a/tests/run/tuple.rs b/tests/run/tuple.rs new file mode 100644 index 0000000000000..0b670bf267424 --- /dev/null +++ b/tests/run/tuple.rs @@ -0,0 +1,51 @@ +// Compiler: +// +// Run-time: +// status: 0 +// stdout: 3 + +#![feature(auto_traits, lang_items, no_core, start, intrinsics)] + +#![no_std] +#![no_core] + +/* + * Core + */ + +// Because we don't have core yet. +#[lang = "sized"] +pub trait Sized {} + +#[lang = "copy"] +trait Copy { +} + +impl Copy for isize {} + +#[lang = "receiver"] +trait Receiver { +} + +#[lang = "freeze"] +pub(crate) unsafe auto trait Freeze {} + +mod libc { + #[link(name = "c")] + extern "C" { + pub fn printf(format: *const i8, ...) -> i32; + } +} + +/* + * Code + */ + +#[start] +fn main(mut argc: isize, _argv: *const *const u8) -> isize { + let test: (isize, isize, isize) = (3, 1, 4); + unsafe { + libc::printf(b"%ld\n\0" as *const u8 as *const i8, test.0); + } + 0 +} From 8841e9e632c0c8c815e8f3ab4c57feb22a99ceb5 Mon Sep 17 00:00:00 2001 From: Antoni Boucher Date: Mon, 19 Jul 2021 19:08:08 -0400 Subject: [PATCH 02/27] Fix tidy --- rustfmt.toml | 1 + src/bootstrap/check.rs | 9 +++++++-- src/tools/tidy/src/lib.rs | 1 + 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/rustfmt.toml b/rustfmt.toml index 480b19a5e9336..053f3e3ee583b 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -19,6 +19,7 @@ ignore = [ "library/backtrace", "library/stdarch", "compiler/rustc_codegen_cranelift", + "compiler/rustc_codegen_gcc", "src/doc/book", "src/doc/edition-guide", "src/doc/embedded-book", diff --git a/src/bootstrap/check.rs b/src/bootstrap/check.rs index bc106746e57e0..03f57dbf30173 100644 --- a/src/bootstrap/check.rs +++ b/src/bootstrap/check.rs @@ -237,11 +237,16 @@ impl Step for CodegenBackend { const DEFAULT: bool = true; fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { - run.paths(&["compiler/rustc_codegen_cranelift", "rustc_codegen_cranelift"]) + run.paths(&[ + "compiler/rustc_codegen_cranelift", + "rustc_codegen_cranelift", + "compiler/rustc_codegen_gcc", + "rustc_codegen_gcc", + ]) } fn make_run(run: RunConfig<'_>) { - for &backend in &[INTERNER.intern_str("cranelift")] { + for &backend in &[INTERNER.intern_str("cranelift"), INTERNER.intern_str("gcc")] { run.builder.ensure(CodegenBackend { target: run.target, backend }); } } diff --git a/src/tools/tidy/src/lib.rs b/src/tools/tidy/src/lib.rs index a1c41eb99810e..003c3a4fd3630 100644 --- a/src/tools/tidy/src/lib.rs +++ b/src/tools/tidy/src/lib.rs @@ -56,6 +56,7 @@ fn filter_dirs(path: &Path) -> bool { let skip = [ "tidy-test-file", "compiler/rustc_codegen_cranelift", + "compiler/rustc_codegen_gcc", "src/llvm-project", "library/backtrace", "library/stdarch", From 7132ce63cf9b61618020eb321500795b455cfdc1 Mon Sep 17 00:00:00 2001 From: Antoni Boucher Date: Mon, 19 Jul 2021 19:13:02 -0400 Subject: [PATCH 03/27] Exclude rustc_codegen_gcc from namespace --- Cargo.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/Cargo.toml b/Cargo.toml index dedfe45aca49b..94e78df50176f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -40,6 +40,7 @@ members = [ exclude = [ "build", "compiler/rustc_codegen_cranelift", + "compiler/rustc_codegen_gcc", "src/test/rustdoc-gui", # HACK(eddyb) This hardcodes the fact that our CI uses `/checkout/obj`. "obj", From 0c89065b934397b62838fe3e4ef6f6352fc52daf Mon Sep 17 00:00:00 2001 From: antoyo Date: Sat, 14 Aug 2021 10:05:49 -0400 Subject: [PATCH 04/27] Update to nightly-2021-08-12 (#61) --- rust-toolchain | 2 +- src/builder.rs | 34 ++++++++++++++++++++++------------ src/context.rs | 8 +++++--- src/intrinsic/mod.rs | 6 +++--- src/type_.rs | 5 +++++ src/type_of.rs | 6 ++++++ 6 files changed, 42 insertions(+), 19 deletions(-) diff --git a/rust-toolchain b/rust-toolchain index 3a29ac5ddb120..4e9771311efbd 100644 --- a/rust-toolchain +++ b/rust-toolchain @@ -1 +1 @@ -nightly-2021-07-21 +nightly-2021-08-12 diff --git a/src/builder.rs b/src/builder.rs index 8bdcb08bd3d6b..bb864c27e1b4d 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -25,6 +25,7 @@ use rustc_codegen_ssa::traits::{ BuilderMethods, ConstMethods, DerivedTypeMethods, + LayoutTypeMethods, HasCodegen, OverflowOp, StaticBuilderMethods, @@ -514,8 +515,12 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { self.block.expect("block").end_with_switch(None, value, default_block, &gcc_cases); } - fn invoke(&mut self, _func: RValue<'gcc>, _args: &[RValue<'gcc>], _then: Block<'gcc>, _catch: Block<'gcc>, _funclet: Option<&Funclet>) -> RValue<'gcc> { - unimplemented!(); + fn invoke(&mut self, _typ: Type<'gcc>, _func: RValue<'gcc>, _args: &[RValue<'gcc>], then: Block<'gcc>, catch: Block<'gcc>, _funclet: Option<&Funclet>) -> RValue<'gcc> { + let condition = self.context.new_rvalue_from_int(self.bool_type, 0); + self.llbb().end_with_conditional(None, condition, then, catch); + self.context.new_rvalue_from_int(self.int_type, 0) + + // TODO /*debug!("invoke {:?} with args ({:?})", func, args); let args = self.check_call("invoke", func, args); @@ -1001,9 +1006,10 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } else if let abi::Abi::ScalarPair(ref a, ref b) = place.layout.abi { let b_offset = a.value.size(self).align_to(b.value.align(self).abi); + let pair_type = place.layout.gcc_type(self, false); let mut load = |i, scalar: &abi::Scalar, align| { - let llptr = self.struct_gep(place.llval, i as u64); + let llptr = self.struct_gep(pair_type, place.llval, i as u64); let load = self.load(llptr.get_type(), llptr, align); scalar_load_metadata(self, load, scalar); if scalar.is_bool() { self.trunc(load, self.type_i1()) } else { load } @@ -1044,7 +1050,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size); cg_elem.val.store(&mut body_bx, PlaceRef::new_sized_aligned(current_val, cg_elem.layout, align)); - let next = body_bx.inbounds_gep(current.to_rvalue(), &[self.const_usize(1)]); + let next = body_bx.inbounds_gep(self.backend_type(cg_elem.layout), current.to_rvalue(), &[self.const_usize(1)]); body_bx.llbb().add_assignment(None, current, next); body_bx.br(header_bx.llbb()); @@ -1130,7 +1136,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { .add_eval(None, self.context.new_call(None, atomic_store, &[ptr, value, ordering])); } - fn gep(&mut self, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> { + fn gep(&mut self, _typ: Type<'gcc>, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> { let mut result = ptr; for index in indices { result = self.context.new_array_access(None, result, *index).get_address(None).to_rvalue(); @@ -1138,7 +1144,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { result } - fn inbounds_gep(&mut self, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> { + fn inbounds_gep(&mut self, _typ: Type<'gcc>, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> { // FIXME: would be safer if doing the same thing (loop) as gep. // TODO: specify inbounds somehow. match indices.len() { @@ -1153,11 +1159,10 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } } - fn struct_gep(&mut self, ptr: RValue<'gcc>, idx: u64) -> RValue<'gcc> { + fn struct_gep(&mut self, value_type: Type<'gcc>, ptr: RValue<'gcc>, idx: u64) -> RValue<'gcc> { // FIXME: it would be better if the API only called this on struct, not on arrays. assert_eq!(idx as usize as u64, idx); let value = ptr.dereference(None).to_rvalue(); - let value_type = value.get_type(); if value_type.is_array().is_some() { let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from")); @@ -1449,14 +1454,19 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } fn landing_pad(&mut self, _ty: Type<'gcc>, _pers_fn: RValue<'gcc>, _num_clauses: usize) -> RValue<'gcc> { - unimplemented!(); + let field1 = self.context.new_field(None, self.u8_type, "landing_pad_field_1"); + let field2 = self.context.new_field(None, self.i32_type, "landing_pad_field_1"); + let struct_type = self.context.new_struct_type(None, "landing_pad", &[field1, field2]); + self.current_func().new_local(None, struct_type.as_type(), "landing_pad") + .to_rvalue() + // TODO /*unsafe { llvm::LLVMBuildLandingPad(self.llbuilder, ty, pers_fn, num_clauses as c_uint, UNNAMED) }*/ } fn set_cleanup(&mut self, _landing_pad: RValue<'gcc>) { - unimplemented!(); + // TODO /*unsafe { llvm::LLVMSetCleanup(landing_pad, llvm::True); }*/ @@ -1527,7 +1537,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } fn set_personality_fn(&mut self, _personality: RValue<'gcc>) { - unimplemented!(); + // TODO /*unsafe { llvm::LLVMSetPersonalityFn(self.llfn(), personality); }*/ @@ -1620,7 +1630,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { //self.call_lifetime_intrinsic("llvm.lifetime.end.p0i8", ptr, size); } - fn call(&mut self, func: RValue<'gcc>, args: &[RValue<'gcc>], funclet: Option<&Funclet>) -> RValue<'gcc> { + fn call(&mut self, _typ: Type<'gcc>, func: RValue<'gcc>, args: &[RValue<'gcc>], funclet: Option<&Funclet>) -> RValue<'gcc> { // FIXME: remove when having a proper API. let gcc_func = unsafe { std::mem::transmute(func) }; if self.functions.borrow().values().find(|value| **value == gcc_func).is_some() { diff --git a/src/context.rs b/src/context.rs index 9cbbee772c5fb..19243b0cbce5d 100644 --- a/src/context.rs +++ b/src/context.rs @@ -14,7 +14,6 @@ use gccjit::{ use rustc_codegen_ssa::base::wants_msvc_seh; use rustc_codegen_ssa::traits::{ BackendTypes, - BaseTypeMethods, MiscMethods, }; use rustc_data_structures::base_n; @@ -349,12 +348,15 @@ impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> { .unwrap().unwrap(), ), _ => { - let name = if wants_msvc_seh(self.sess()) { + let _name = if wants_msvc_seh(self.sess()) { "__CxxFrameHandler3" } else { "rust_eh_personality" }; - self.declare_func(name, self.type_i32(), &[], true) + //let func = self.declare_func(name, self.type_i32(), &[], true); + // FIXME: this hack should not be needed. That will probably be removed when + // unwinding support is added. + self.context.new_rvalue_from_int(self.int_type, 0) } }; //attributes::apply_target_cpu_attr(self, llfn); diff --git a/src/intrinsic/mod.rs b/src/intrinsic/mod.rs index 083f7e01c80d0..ad6dfbffbac96 100644 --- a/src/intrinsic/mod.rs +++ b/src/intrinsic/mod.rs @@ -98,7 +98,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { _ if simple.is_some() => { // FIXME: remove this cast when the API supports function. let func = unsafe { std::mem::transmute(simple.expect("simple")) }; - self.call(func, &args.iter().map(|arg| arg.immediate()).collect::>(), None) + self.call(self.type_void(), func, &args.iter().map(|arg| arg.immediate()).collect::>(), None) }, sym::likely => { self.expect(args[0].immediate(), true) @@ -392,7 +392,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { fn abort(&mut self) { let func = self.context.get_builtin_function("abort"); let func: RValue<'gcc> = unsafe { std::mem::transmute(func) }; - self.call(func, &[], None); + self.call(self.type_void(), func, &[], None); } fn assume(&mut self, value: Self::Value) { @@ -1075,7 +1075,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { fn try_intrinsic<'gcc, 'tcx>(bx: &mut Builder<'_, 'gcc, 'tcx>, try_func: RValue<'gcc>, data: RValue<'gcc>, _catch_func: RValue<'gcc>, dest: RValue<'gcc>) { if bx.sess().panic_strategy() == PanicStrategy::Abort { - bx.call(try_func, &[data], None); + bx.call(bx.type_void(), try_func, &[data], None); // Return 0 unconditionally from the intrinsic call; // we can never unwind. let ret_align = bx.tcx.data_layout.i32_align.abi; diff --git a/src/type_.rs b/src/type_.rs index 90f7d9d9bbace..2ef90bca5ac20 100644 --- a/src/type_.rs +++ b/src/type_.rs @@ -293,6 +293,11 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { } } + // NOTE: see note above. Some other test uses usize::MAX. + if len == u64::MAX { + len = 0; + } + let len: i32 = len.try_into().expect("array len"); self.context.new_array_type(None, ty, len) diff --git a/src/type_of.rs b/src/type_of.rs index c5db0a1b2e459..010805808d2a3 100644 --- a/src/type_of.rs +++ b/src/type_of.rs @@ -363,4 +363,10 @@ impl<'gcc, 'tcx> LayoutTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> { unimplemented!(); //ty.gcc_type(self) } + + fn fn_decl_backend_type(&self, _fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> Type<'gcc> { + // FIXME: return correct type. + self.type_void() + //fn_abi.gcc_type(self) + } } From e228f0c16ea8c34794a6285bf57aab627c26b147 Mon Sep 17 00:00:00 2001 From: antoyo Date: Sun, 15 Aug 2021 08:28:46 -0400 Subject: [PATCH 05/27] Cleanup (#67) --- build_sysroot/prepare_sysroot_src.sh | 2 +- config.sh | 6 +- example/mini_core_hello_world.rs | 2 +- example/std_example.rs | 5 +- gcc_path | 1 + src/abi.rs | 130 +---- src/allocator.rs | 8 +- src/asm.rs | 200 +------ src/back/write.rs | 188 +----- src/base.rs | 19 +- src/builder.rs | 471 +++------------ src/callee.rs | 24 +- src/common.rs | 35 +- src/consts.rs | 137 +---- src/context.rs | 56 +- src/coverageinfo.rs | 79 +-- src/debuginfo.rs | 336 +---------- src/declare.rs | 55 +- src/intrinsic/llvm.rs | 6 +- src/intrinsic/mod.rs | 295 +--------- src/intrinsic/simd.rs | 836 +-------------------------- src/lib.rs | 49 +- src/mono_item.rs | 22 +- src/type_.rs | 98 +--- src/type_of.rs | 29 +- src/va_arg.rs | 179 ------ test.sh | 50 +- 27 files changed, 265 insertions(+), 3053 deletions(-) delete mode 100644 src/va_arg.rs diff --git a/build_sysroot/prepare_sysroot_src.sh b/build_sysroot/prepare_sysroot_src.sh index 4d18092419a4b..071e7ed1f85df 100755 --- a/build_sysroot/prepare_sysroot_src.sh +++ b/build_sysroot/prepare_sysroot_src.sh @@ -21,7 +21,7 @@ echo "[GIT] add" git add . echo "[GIT] commit" -# This is needed on virgin system where nothing is configured. +# This is needed on systems where nothing is configured. # git really needs something here, or it will fail. # Even using --author is not enough. git config user.email || git config user.email "none@example.com" diff --git a/config.sh b/config.sh index 2311600d1cf57..bd2d915f589a0 100644 --- a/config.sh +++ b/config.sh @@ -31,9 +31,8 @@ if [[ "$HOST_TRIPLE" != "$TARGET_TRIPLE" ]]; then fi export RUSTFLAGS=$linker' -Cpanic=abort -Cdebuginfo=2 -Zpanic-abort-tests -Zcodegen-backend='$(pwd)'/target/'$CHANNEL'/librustc_codegen_gcc.'$dylib_ext' --sysroot '$(pwd)'/build_sysroot/sysroot' -#export RUSTFLAGS=$linker' -Cpanic=abort -Cdebuginfo=2 -Zpanic-abort-tests -Zcodegen-backend='$(pwd)'/target/'$CHANNEL'/librustc_codegen_gcc.'$dylib_ext' --sysroot '$(pwd)'/build_sysroot/sysroot -Clto=fat -Cembed-bitcode=yes' -# FIXME remove once the atomic shim is gone +# FIXME(antoyo): remove once the atomic shim is gone if [[ `uname` == 'Darwin' ]]; then export RUSTFLAGS="$RUSTFLAGS -Clink-arg=-undefined -Clink-arg=dynamic_lookup" fi @@ -43,6 +42,3 @@ export RUSTC_LOG=warn # display metadata load errors export LD_LIBRARY_PATH="$(pwd)/target/out:$(pwd)/build_sysroot/sysroot/lib/rustlib/$TARGET_TRIPLE/lib:$GCC_PATH" export DYLD_LIBRARY_PATH=$LD_LIBRARY_PATH - -export CG_CLIF_DISPLAY_CG_TIME=1 -export CG_CLIF_INCR_CACHE_DISABLED=1 diff --git a/example/mini_core_hello_world.rs b/example/mini_core_hello_world.rs index ccf4b53a702e5..69d591565acfa 100644 --- a/example/mini_core_hello_world.rs +++ b/example/mini_core_hello_world.rs @@ -253,7 +253,7 @@ fn main() { } } - // TODO: not sure about this assert. ABC is not defined, so should it be really 0? + // TODO(antoyo): to make this work, support weak linkage. //unsafe { assert_eq!(ABC as usize, 0); } &mut (|| Some(0 as *const ())) as &mut dyn FnMut() -> Option<*const ()>; diff --git a/example/std_example.rs b/example/std_example.rs index d99d6bb4b855e..eba0eb8289600 100644 --- a/example/std_example.rs +++ b/example/std_example.rs @@ -16,7 +16,6 @@ fn main() { let stderr = ::std::io::stderr(); let mut stderr = stderr.lock(); - // FIXME: this thread panics. std::thread::spawn(move || { println!("Hello from another thread!"); }); @@ -56,7 +55,7 @@ fn main() { assert_eq!(-32768i16, (-32768i16).saturating_add(-32768)); assert_eq!(32767i16, 32767i16.saturating_add(1)); - /*assert_eq!(0b0000000000000000000000000010000010000000000000000000000000000000_0000000000100000000000000000000000001000000000000100000000000000u128.leading_zeros(), 26); + assert_eq!(0b0000000000000000000000000010000010000000000000000000000000000000_0000000000100000000000000000000000001000000000000100000000000000u128.leading_zeros(), 26); assert_eq!(0b0000000000000000000000000010000000000000000000000000000000000000_0000000000000000000000000000000000001000000000000000000010000000u128.trailing_zeros(), 7); let _d = 0i128.checked_div(2i128); @@ -85,7 +84,7 @@ fn main() { assert_eq!(houndred_i128 as f32, 100.0); assert_eq!(houndred_i128 as f64, 100.0); assert_eq!(houndred_f32 as i128, 100); - assert_eq!(houndred_f64 as i128, 100);*/ + assert_eq!(houndred_f64 as i128, 100); let _a = 1u32 << 2u8; diff --git a/gcc_path b/gcc_path index e69de29bb2d1d..b3696e3cf885e 100644 --- a/gcc_path +++ b/gcc_path @@ -0,0 +1 @@ +/home/bouanto/Ordinateur/Programmation/Projets/gcc-build/build/gcc diff --git a/src/abi.rs b/src/abi.rs index 95d536dc51c5f..ce428c589a478 100644 --- a/src/abi.rs +++ b/src/abi.rs @@ -11,8 +11,7 @@ use crate::type_of::LayoutGccExt; impl<'a, 'gcc, 'tcx> AbiBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { fn apply_attrs_callsite(&mut self, _fn_abi: &FnAbi<'tcx, Ty<'tcx>>, _callsite: Self::Value) { - // TODO - //fn_abi.apply_attrs_callsite(self, callsite) + // TODO(antoyo) } fn get_param(&self, index: usize) -> Self::Value { @@ -87,12 +86,9 @@ impl GccType for Reg { } pub trait FnAbiGccExt<'gcc, 'tcx> { - // TODO: return a function pointer type instead? + // TODO(antoyo): return a function pointer type instead? fn gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> (Type<'gcc>, Vec>, bool); fn ptr_to_gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>; - /*fn llvm_cconv(&self) -> llvm::CallConv; - fn apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value); - fn apply_attrs_callsite(&self, bx: &mut Builder<'a, 'll, 'tcx>, callsite: &'ll Value);*/ } impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { @@ -145,12 +141,7 @@ impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { continue; } PassMode::Indirect { extra_attrs: Some(_), .. } => { - /*let ptr_ty = cx.tcx.mk_mut_ptr(arg.layout.ty); - let ptr_layout = cx.layout_of(ptr_ty); - argument_tys.push(ptr_layout.scalar_pair_element_gcc_type(cx, 0, true)); - argument_tys.push(ptr_layout.scalar_pair_element_gcc_type(cx, 1, true));*/ unimplemented!(); - //continue; } PassMode::Cast(cast) => cast.gcc_type(cx), PassMode::Indirect { extra_attrs: None, .. } => cx.type_ptr_to(arg.memory_ty(cx)), @@ -166,121 +157,4 @@ impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { let pointer_type = cx.context.new_function_pointer_type(None, return_type, ¶ms, variadic); pointer_type } - - /*fn llvm_cconv(&self) -> llvm::CallConv { - match self.conv { - Conv::C | Conv::Rust => llvm::CCallConv, - Conv::AmdGpuKernel => llvm::AmdGpuKernel, - Conv::ArmAapcs => llvm::ArmAapcsCallConv, - Conv::Msp430Intr => llvm::Msp430Intr, - Conv::PtxKernel => llvm::PtxKernel, - Conv::X86Fastcall => llvm::X86FastcallCallConv, - Conv::X86Intr => llvm::X86_Intr, - Conv::X86Stdcall => llvm::X86StdcallCallConv, - Conv::X86ThisCall => llvm::X86_ThisCall, - Conv::X86VectorCall => llvm::X86_VectorCall, - Conv::X86_64SysV => llvm::X86_64_SysV, - Conv::X86_64Win64 => llvm::X86_64_Win64, - } - } - - fn apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value) { - // FIXME(eddyb) can this also be applied to callsites? - if self.ret.layout.abi.is_uninhabited() { - llvm::Attribute::NoReturn.apply_llfn(llvm::AttributePlace::Function, llfn); - } - - // FIXME(eddyb, wesleywiser): apply this to callsites as well? - if !self.can_unwind { - llvm::Attribute::NoUnwind.apply_llfn(llvm::AttributePlace::Function, llfn); - } - - let mut i = 0; - let mut apply = |attrs: &ArgAttributes, ty: Option<&Type>| { - attrs.apply_llfn(llvm::AttributePlace::Argument(i), llfn, ty); - i += 1; - }; - match self.ret.mode { - PassMode::Direct(ref attrs) => { - attrs.apply_llfn(llvm::AttributePlace::ReturnValue, llfn, None); - } - PassMode::Indirect(ref attrs, _) => apply(attrs, Some(self.ret.layout.gcc_type(cx))), - _ => {} - } - for arg in &self.args { - if arg.pad.is_some() { - apply(&ArgAttributes::new(), None); - } - match arg.mode { - PassMode::Ignore => {} - PassMode::Direct(ref attrs) | PassMode::Indirect(ref attrs, None) => { - apply(attrs, Some(arg.layout.gcc_type(cx))) - } - PassMode::Indirect(ref attrs, Some(ref extra_attrs)) => { - apply(attrs, None); - apply(extra_attrs, None); - } - PassMode::Pair(ref a, ref b) => { - apply(a, None); - apply(b, None); - } - PassMode::Cast(_) => apply(&ArgAttributes::new(), None), - } - } - } - - fn apply_attrs_callsite(&self, bx: &mut Builder<'a, 'll, 'tcx>, callsite: &'ll Value) { - // FIXME(wesleywiser, eddyb): We should apply `nounwind` and `noreturn` as appropriate to this callsite. - - let mut i = 0; - let mut apply = |attrs: &ArgAttributes, ty: Option<&Type>| { - attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite, ty); - i += 1; - }; - match self.ret.mode { - PassMode::Direct(ref attrs) => { - attrs.apply_callsite(llvm::AttributePlace::ReturnValue, callsite, None); - } - PassMode::Indirect(ref attrs, _) => apply(attrs, Some(self.ret.layout.gcc_type(bx))), - _ => {} - } - if let abi::Abi::Scalar(ref scalar) = self.ret.layout.abi { - // If the value is a boolean, the range is 0..2 and that ultimately - // become 0..0 when the type becomes i1, which would be rejected - // by the LLVM verifier. - if let Int(..) = scalar.value { - if !scalar.is_bool() { - let range = scalar.valid_range_exclusive(bx); - if range.start != range.end { - bx.range_metadata(callsite, range); - } - } - } - } - for arg in &self.args { - if arg.pad.is_some() { - apply(&ArgAttributes::new(), None); - } - match arg.mode { - PassMode::Ignore => {} - PassMode::Direct(ref attrs) | PassMode::Indirect(ref attrs, None) => { - apply(attrs, Some(arg.layout.gcc_type(bx))) - } - PassMode::Indirect(ref attrs, Some(ref extra_attrs)) => { - apply(attrs, None); - apply(extra_attrs, None); - } - PassMode::Pair(ref a, ref b) => { - apply(a, None); - apply(b, None); - } - PassMode::Cast(_) => apply(&ArgAttributes::new(), None), - } - } - - let cconv = self.llvm_cconv(); - if cconv != llvm::CCallConv { - llvm::SetInstructionCallConv(callsite, cconv); - } - }*/ } diff --git a/src/allocator.rs b/src/allocator.rs index 11b2fad3e7eee..4e622efcaa0ba 100644 --- a/src/allocator.rs +++ b/src/allocator.rs @@ -1,4 +1,3 @@ -//use crate::attributes; use gccjit::{FunctionType, ToRValue}; use rustc_ast::expand::allocator::{AllocatorKind, AllocatorTy, ALLOCATOR_METHODS}; use rustc_middle::bug; @@ -50,11 +49,10 @@ pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut GccContext, kind: Alloc let func = context.new_function(None, FunctionType::Exported, output.unwrap_or(void), &args, name, false); if tcx.sess.target.options.default_hidden_visibility { - //llvm::LLVMRustSetVisibility(func, llvm::Visibility::Hidden); + // TODO(antoyo): set visibility. } if tcx.sess.must_emit_unwind_tables() { - // TODO - //attributes::emit_uwtable(func, true); + // TODO(antoyo): emit unwind tables. } let callee = kind.fn_name(method.name); @@ -62,7 +60,7 @@ pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut GccContext, kind: Alloc .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index))) .collect(); let callee = context.new_function(None, FunctionType::Extern, output.unwrap_or(void), &args, callee, false); - //llvm::LLVMRustSetVisibility(callee, llvm::Visibility::Hidden); + // TODO(antoyo): set visibility. let block = func.new_block("entry"); diff --git a/src/asm.rs b/src/asm.rs index 6616366235fcd..e4d57c39de48a 100644 --- a/src/asm.rs +++ b/src/asm.rs @@ -15,106 +15,8 @@ use crate::type_of::LayoutGccExt; impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { fn codegen_llvm_inline_asm(&mut self, _ia: &LlvmInlineAsmInner, _outputs: Vec>>, mut _inputs: Vec>, _span: Span) -> bool { - // TODO + // TODO(antoyo) return true; - - /*let mut ext_constraints = vec![]; - let mut output_types = vec![]; - - // Prepare the output operands - let mut indirect_outputs = vec![]; - for (i, (out, &place)) in ia.outputs.iter().zip(&outputs).enumerate() { - if out.is_rw { - let operand = self.load_operand(place); - if let OperandValue::Immediate(_) = operand.val { - inputs.push(operand.immediate()); - } - ext_constraints.push(i.to_string()); - } - if out.is_indirect { - let operand = self.load_operand(place); - if let OperandValue::Immediate(_) = operand.val { - indirect_outputs.push(operand.immediate()); - } - } else { - output_types.push(place.layout.gcc_type(self.cx())); - } - } - if !indirect_outputs.is_empty() { - indirect_outputs.extend_from_slice(&inputs); - inputs = indirect_outputs; - } - - let clobbers = ia.clobbers.iter().map(|s| format!("~{{{}}}", &s)); - - // Default per-arch clobbers - // Basically what clang does - let arch_clobbers = match &self.sess().target.target.arch[..] { - "x86" | "x86_64" => vec!["~{dirflag}", "~{fpsr}", "~{flags}"], - "mips" | "mips64" => vec!["~{$1}"], - _ => Vec::new(), - }; - - let all_constraints = ia - .outputs - .iter() - .map(|out| out.constraint.to_string()) - .chain(ia.inputs.iter().map(|s| s.to_string())) - .chain(ext_constraints) - .chain(clobbers) - .chain(arch_clobbers.iter().map(|s| (*s).to_string())) - .collect::>() - .join(","); - - debug!("Asm Constraints: {}", &all_constraints); - - // Depending on how many outputs we have, the return type is different - let num_outputs = output_types.len(); - let output_type = match num_outputs { - 0 => self.type_void(), - 1 => output_types[0], - _ => self.type_struct(&output_types, false), - }; - - let asm = ia.asm.as_str(); - let r = inline_asm_call( - self, - &asm, - &all_constraints, - &inputs, - output_type, - ia.volatile, - ia.alignstack, - ia.dialect, - ); - if r.is_none() { - return false; - } - let r = r.unwrap(); - - // Again, based on how many outputs we have - let outputs = ia.outputs.iter().zip(&outputs).filter(|&(ref o, _)| !o.is_indirect); - for (i, (_, &place)) in outputs.enumerate() { - let v = if num_outputs == 1 { r } else { self.extract_value(r, i as u64) }; - OperandValue::Immediate(v).store(self, place); - } - - // Store mark in a metadata node so we can map LLVM errors - // back to source locations. See #17552. - unsafe { - let key = "srcloc"; - let kind = llvm::LLVMGetMDKindIDInContext( - self.llcx, - key.as_ptr() as *const c_char, - key.len() as c_uint, - ); - - let val: &'ll Value = self.const_i32(span.ctxt().outer_expn().as_u32() as i32); - - llvm::LLVMSetMetadata(r, kind, llvm::LLVMMDNodeInContext(self.llcx, &val, 1)); - } - - true*/ } fn codegen_inline_asm(&mut self, template: &[InlineAsmTemplatePiece], operands: &[InlineAsmOperandRef<'tcx, Self>], options: InlineAsmOptions, _span: &[Span]) { @@ -127,7 +29,7 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { }; // Collect the types of output operands - // FIXME: we do this here instead of later because of a bug in libgccjit where creating the + // FIXME(antoyo): we do this here instead of later because of a bug in libgccjit where creating the // variable after the extended asm expression causes a segfault: // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=100380 let mut output_vars = FxHashMap::default(); @@ -160,11 +62,6 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { match out_place { Some(place) => place.layout.gcc_type(self.cx, false), None => { - // If the output is discarded, we don't really care what - // type is used. We're just using this to tell GCC to - // reserve the register. - //dummy_output_type(self.cx, reg.reg_class()) - // NOTE: if no output value, we should not create one. continue; }, @@ -251,9 +148,9 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { template_str } else { - // FIXME: this might break the "m" memory constraint: + // FIXME(antoyo): this might break the "m" memory constraint: // https://stackoverflow.com/a/9347957/389119 - // TODO: only set on x86 platforms. + // TODO(antoyo): only set on x86 platforms. format!(".att_syntax noprefix\n\t{}\n\t.intel_syntax noprefix", template_str) }; let extended_asm = block.add_extended_asm(None, &template_str); @@ -274,7 +171,6 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { }, }; output_types.push(ty); - //op_idx.insert(idx, constraints.len()); let prefix = if late { "=" } else { "=&" }; let constraint = format!("{}{}", prefix, reg_to_gcc(reg)); @@ -295,14 +191,13 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { None => dummy_output_type(self.cx, reg.reg_class()) }; output_types.push(ty); - //op_idx.insert(idx, constraints.len()); - // TODO: prefix of "+" for reading and writing? + // TODO(antoyo): prefix of "+" for reading and writing? let prefix = if late { "=" } else { "=&" }; let constraint = format!("{}{}", prefix, reg_to_gcc(reg)); if out_place.is_some() { let var = output_vars[&idx]; - // TODO: also specify an output operand when out_place is none: that would + // TODO(antoyo): also specify an output operand when out_place is none: that would // be the clobber but clobbers do not support general constraint like reg; // they only support named registers. // Not sure how we can do this. And the LLVM backend does not seem to add a @@ -321,63 +216,6 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { } } - /*if !options.contains(InlineAsmOptions::PRESERVES_FLAGS) { - match asm_arch { - InlineAsmArch::AArch64 | InlineAsmArch::Arm => { - constraints.push("~{cc}".to_string()); - } - InlineAsmArch::X86 | InlineAsmArch::X86_64 => { - constraints.extend_from_slice(&[ - "~{dirflag}".to_string(), - "~{fpsr}".to_string(), - "~{flags}".to_string(), - ]); - } - InlineAsmArch::RiscV32 | InlineAsmArch::RiscV64 => {} - } - } - if !options.contains(InlineAsmOptions::NOMEM) { - // This is actually ignored by LLVM, but it's probably best to keep - // it just in case. LLVM instead uses the ReadOnly/ReadNone - // attributes on the call instruction to optimize. - constraints.push("~{memory}".to_string()); - } - let volatile = !options.contains(InlineAsmOptions::PURE); - let alignstack = !options.contains(InlineAsmOptions::NOSTACK); - let output_type = match &output_types[..] { - [] => self.type_void(), - [ty] => ty, - tys => self.type_struct(&tys, false), - };*/ - - /*let result = inline_asm_call( - self, - &template_str, - &constraints.join(","), - &inputs, - output_type, - volatile, - alignstack, - dialect, - span, - ) - .unwrap_or_else(|| span_bug!(span, "LLVM asm constraint validation failed")); - - if options.contains(InlineAsmOptions::PURE) { - if options.contains(InlineAsmOptions::NOMEM) { - llvm::Attribute::ReadNone.apply_callsite(llvm::AttributePlace::Function, result); - } else if options.contains(InlineAsmOptions::READONLY) { - llvm::Attribute::ReadOnly.apply_callsite(llvm::AttributePlace::Function, result); - } - } else { - if options.contains(InlineAsmOptions::NOMEM) { - llvm::Attribute::InaccessibleMemOnly - .apply_callsite(llvm::AttributePlace::Function, result); - } else { - // LLVM doesn't have an attribute to represent ReadOnly + SideEffect - } - }*/ - // Write results to outputs for (idx, op) in operands.iter().enumerate() { if let InlineAsmOperandRef::Out { place: Some(place), .. } @@ -390,12 +228,12 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { } /// Converts a register class to a GCC constraint code. -// TODO: return &'static str instead? +// TODO(antoyo): return &'static str instead? fn reg_to_gcc(reg: InlineAsmRegOrRegClass) -> String { match reg { // For vector registers LLVM wants the register name to match the type size. InlineAsmRegOrRegClass::Reg(reg) => { - // TODO: add support for vector register. + // TODO(antoyo): add support for vector register. let constraint = match reg.name() { "ax" => "a", @@ -404,11 +242,11 @@ fn reg_to_gcc(reg: InlineAsmRegOrRegClass) -> String { "dx" => "d", "si" => "S", "di" => "D", - // TODO: for registers like r11, we have to create a register variable: https://stackoverflow.com/a/31774784/389119 - // TODO: in this case though, it's a clobber, so it should work as r11. + // TODO(antoyo): for registers like r11, we have to create a register variable: https://stackoverflow.com/a/31774784/389119 + // TODO(antoyo): in this case though, it's a clobber, so it should work as r11. // Recent nightly supports clobber() syntax, so update to it. It does not seem // like it's implemented yet. - name => name, // FIXME: probably wrong. + name => name, // FIXME(antoyo): probably wrong. }; constraint.to_string() }, @@ -570,7 +408,6 @@ fn modifier_to_gcc(arch: InlineAsmArch, reg: InlineAsmRegClass, modifier: Option InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg) | InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => { unimplemented!() - //if modifier == Some('v') { None } else { modifier } } InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg_thumb) => unimplemented!(), @@ -583,11 +420,6 @@ fn modifier_to_gcc(arch: InlineAsmArch, reg: InlineAsmRegClass, modifier: Option | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8) | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => { unimplemented!() - /*if modifier.is_none() { - Some('q') - } else { - modifier - }*/ } InlineAsmRegClass::Bpf(_) => unimplemented!(), InlineAsmRegClass::Hexagon(_) => unimplemented!(), @@ -612,15 +444,7 @@ fn modifier_to_gcc(arch: InlineAsmArch, reg: InlineAsmRegClass, modifier: Option InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => unimplemented!(), InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg) | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg) - | InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => unimplemented!() /*match (reg, modifier) { - (X86InlineAsmRegClass::xmm_reg, None) => Some('x'), - (X86InlineAsmRegClass::ymm_reg, None) => Some('t'), - (X86InlineAsmRegClass::zmm_reg, None) => Some('g'), - (_, Some('x')) => Some('x'), - (_, Some('y')) => Some('t'), - (_, Some('z')) => Some('g'), - _ => unreachable!(), - }*/, + | InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => unimplemented!(), InlineAsmRegClass::X86(X86InlineAsmRegClass::x87_reg) => unimplemented!(), InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => unimplemented!(), InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => unimplemented!(), diff --git a/src/back/write.rs b/src/back/write.rs index 5201e49427799..c3e3847823d91 100644 --- a/src/back/write.rs +++ b/src/back/write.rs @@ -15,132 +15,18 @@ pub(crate) unsafe fn codegen(cgcx: &CodegenContext, _diag_han { let context = &module.module_llvm.context; - //let llcx = &*module.module_llvm.llcx; - //let tm = &*module.module_llvm.tm; let module_name = module.name.clone(); let module_name = Some(&module_name[..]); - //let handlers = DiagnosticHandlers::new(cgcx, diag_handler, llcx); - - /*if cgcx.msvc_imps_needed { - create_msvc_imps(cgcx, llcx, llmod); - }*/ - - // A codegen-specific pass manager is used to generate object - // files for an GCC module. - // - // Apparently each of these pass managers is a one-shot kind of - // thing, so we create a new one for each type of output. The - // pass manager passed to the closure should be ensured to not - // escape the closure itself, and the manager should only be - // used once. - /*unsafe fn with_codegen<'ll, F, R>(tm: &'ll llvm::TargetMachine, llmod: &'ll llvm::Module, no_builtins: bool, f: F) -> R - where F: FnOnce(&'ll mut PassManager<'ll>) -> R, - { - let cpm = llvm::LLVMCreatePassManager(); - llvm::LLVMAddAnalysisPasses(tm, cpm); - llvm::LLVMRustAddLibraryInfo(cpm, llmod, no_builtins); - f(cpm) - }*/ - - // Two things to note: - // - If object files are just LLVM bitcode we write bitcode, copy it to - // the .o file, and delete the bitcode if it wasn't otherwise - // requested. - // - If we don't have the integrated assembler then we need to emit - // asm from LLVM and use `gcc` to create the object file. let _bc_out = cgcx.output_filenames.temp_path(OutputType::Bitcode, module_name); let obj_out = cgcx.output_filenames.temp_path(OutputType::Object, module_name); if config.bitcode_needed() { - // TODO - /*let _timer = cgcx - .prof - .generic_activity_with_arg("LLVM_module_codegen_make_bitcode", &module.name[..]); - let thin = ThinBuffer::new(llmod); - let data = thin.data(); - - if config.emit_bc || config.emit_obj == EmitObj::Bitcode { - let _timer = cgcx.prof.generic_activity_with_arg( - "LLVM_module_codegen_emit_bitcode", - &module.name[..], - ); - if let Err(e) = fs::write(&bc_out, data) { - let msg = format!("failed to write bytecode to {}: {}", bc_out.display(), e); - diag_handler.err(&msg); - } - } - - if config.emit_obj == EmitObj::ObjectCode(BitcodeSection::Full) { - let _timer = cgcx.prof.generic_activity_with_arg( - "LLVM_module_codegen_embed_bitcode", - &module.name[..], - ); - embed_bitcode(cgcx, llcx, llmod, Some(data)); - } - - if config.emit_bc_compressed { - let _timer = cgcx.prof.generic_activity_with_arg( - "LLVM_module_codegen_emit_compressed_bitcode", - &module.name[..], - ); - let dst = bc_out.with_extension(RLIB_BYTECODE_EXTENSION); - let data = bytecode::encode(&module.name, data); - if let Err(e) = fs::write(&dst, data) { - let msg = format!("failed to write bytecode to {}: {}", dst.display(), e); - diag_handler.err(&msg); - } - }*/ - } /*else if config.emit_obj == EmitObj::ObjectCode(BitcodeSection::Marker) { - unimplemented!(); - //embed_bitcode(cgcx, llcx, llmod, None); - }*/ + // TODO(antoyo) + } if config.emit_ir { unimplemented!(); - /*let _timer = cgcx - .prof - .generic_activity_with_arg("LLVM_module_codegen_emit_ir", &module.name[..]); - let out = cgcx.output_filenames.temp_path(OutputType::LlvmAssembly, module_name); - let out_c = path_to_c_string(&out); - - extern "C" fn demangle_callback( - input_ptr: *const c_char, - input_len: size_t, - output_ptr: *mut c_char, - output_len: size_t, - ) -> size_t { - let input = - unsafe { slice::from_raw_parts(input_ptr as *const u8, input_len as usize) }; - - let input = match str::from_utf8(input) { - Ok(s) => s, - Err(_) => return 0, - }; - - let output = unsafe { - slice::from_raw_parts_mut(output_ptr as *mut u8, output_len as usize) - }; - let mut cursor = io::Cursor::new(output); - - let demangled = match rustc_demangle::try_demangle(input) { - Ok(d) => d, - Err(_) => return 0, - }; - - if write!(cursor, "{:#}", demangled).is_err() { - // Possible only if provided buffer is not big enough - return 0; - } - - cursor.position() as size_t - } - - let result = llvm::LLVMRustPrintModule(llmod, out_c.as_ptr(), demangle_callback); - result.into_result().map_err(|()| { - let msg = format!("failed to write LLVM IR to {}", out.display()); - llvm_err(diag_handler, &msg) - })?;*/ } if config.emit_asm { @@ -149,10 +35,6 @@ pub(crate) unsafe fn codegen(cgcx: &CodegenContext, _diag_han .generic_activity_with_arg("LLVM_module_codegen_emit_asm", &module.name[..]); let path = cgcx.output_filenames.temp_path(OutputType::Assembly, module_name); context.compile_to_file(OutputKind::Assembler, path.to_str().expect("path to str")); - - /*with_codegen(tm, llmod, config.no_builtins, |cpm| { - write_output_file(diag_handler, tm, cpm, llmod, &path, llvm::FileType::AssemblyFile) - })?;*/ } match config.emit_obj { @@ -160,47 +42,27 @@ pub(crate) unsafe fn codegen(cgcx: &CodegenContext, _diag_han let _timer = cgcx .prof .generic_activity_with_arg("LLVM_module_codegen_emit_obj", &module.name[..]); - //with_codegen(tm, llmod, config.no_builtins, |cpm| { - //println!("1: {}", module.name); - match &*module.name { - "std_example.7rcbfp3g-cgu.15" => { - println!("Dumping reproducer {}", module.name); - let _ = fs::create_dir("/tmp/reproducers"); - // FIXME: segfault in dump_reproducer_to_file() might be caused by - // transmuting an rvalue to an lvalue. - // Segfault is actually in gcc::jit::reproducer::get_identifier_as_lvalue - context.dump_reproducer_to_file(&format!("/tmp/reproducers/{}.c", module.name)); - println!("Dumped reproducer {}", module.name); - }, - _ => (), - } - /*let _ = fs::create_dir("/tmp/dumps"); - context.dump_to_file(&format!("/tmp/dumps/{}.c", module.name), true); - println!("Dumped {}", module.name);*/ - //println!("Compile module {}", module.name); - context.compile_to_file(OutputKind::ObjectFile, obj_out.to_str().expect("path to str")); - //})?; + match &*module.name { + "std_example.7rcbfp3g-cgu.15" => { + println!("Dumping reproducer {}", module.name); + let _ = fs::create_dir("/tmp/reproducers"); + // FIXME(antoyo): segfault in dump_reproducer_to_file() might be caused by + // transmuting an rvalue to an lvalue. + // Segfault is actually in gcc::jit::reproducer::get_identifier_as_lvalue + context.dump_reproducer_to_file(&format!("/tmp/reproducers/{}.c", module.name)); + println!("Dumped reproducer {}", module.name); + }, + _ => (), + } + context.compile_to_file(OutputKind::ObjectFile, obj_out.to_str().expect("path to str")); } EmitObj::Bitcode => { - //unimplemented!(); - /*debug!("copying bitcode {:?} to obj {:?}", bc_out, obj_out); - if let Err(e) = link_or_copy(&bc_out, &obj_out) { - diag_handler.err(&format!("failed to copy bitcode to object file: {}", e)); - } - - if !config.emit_bc { - debug!("removing_bitcode {:?}", bc_out); - if let Err(e) = fs::remove_file(&bc_out) { - diag_handler.err(&format!("failed to remove bitcode: {}", e)); - } - }*/ + // TODO(antoyo) } EmitObj::None => {} } - - //drop(handlers); } Ok(module.into_compiled_module( @@ -213,22 +75,4 @@ pub(crate) unsafe fn codegen(cgcx: &CodegenContext, _diag_han pub(crate) fn link(_cgcx: &CodegenContext, _diag_handler: &Handler, mut _modules: Vec>) -> Result, FatalError> { unimplemented!(); - /*use super::lto::{Linker, ModuleBuffer}; - // Sort the modules by name to ensure to ensure deterministic behavior. - modules.sort_by(|a, b| a.name.cmp(&b.name)); - let (first, elements) = - modules.split_first().expect("Bug! modules must contain at least one module."); - - let mut linker = Linker::new(first.module_llvm.llmod()); - for module in elements { - let _timer = - cgcx.prof.generic_activity_with_arg("LLVM_link_module", format!("{:?}", module.name)); - let buffer = ModuleBuffer::new(module.module_llvm.llmod()); - linker.add(&buffer.data()).map_err(|()| { - let msg = format!("failed to serialize module {:?}", module.name); - llvm_err(&diag_handler, &msg) - })?; - } - drop(linker); - Ok(modules.remove(0))*/ } diff --git a/src/base.rs b/src/base.rs index 7050f122a1765..2963a3d49a20e 100644 --- a/src/base.rs +++ b/src/base.rs @@ -35,7 +35,7 @@ pub fn global_linkage_to_gcc(linkage: Linkage) -> GlobalKind { Linkage::Appending => unimplemented!(), Linkage::Internal => GlobalKind::Internal, Linkage::Private => GlobalKind::Internal, - Linkage::ExternalWeak => GlobalKind::Imported, // TODO: should be weak linkage. + Linkage::ExternalWeak => GlobalKind::Imported, // TODO(antoyo): should be weak linkage. Linkage::Common => unimplemented!(), } } @@ -46,7 +46,7 @@ pub fn linkage_to_gcc(linkage: Linkage) -> FunctionType { Linkage::AvailableExternally => FunctionType::Extern, Linkage::LinkOnceAny => unimplemented!(), Linkage::LinkOnceODR => unimplemented!(), - Linkage::WeakAny => FunctionType::Exported, // FIXME: should be similar to linkonce. + Linkage::WeakAny => FunctionType::Exported, // FIXME(antoyo): should be similar to linkonce. Linkage::WeakODR => unimplemented!(), Linkage::Appending => unimplemented!(), Linkage::Internal => FunctionType::Internal, @@ -74,19 +74,25 @@ pub fn compile_codegen_unit<'tcx>(tcx: TyCtxt<'tcx>, cgu_name: Symbol) -> (Modul // Instantiate monomorphizations without filling out definitions yet... //let llvm_module = ModuleLlvm::new(tcx, &cgu_name.as_str()); let context = Context::default(); - // TODO: only set on x86 platforms. + // TODO(antoyo): only set on x86 platforms. context.add_command_line_option("-masm=intel"); for arg in &tcx.sess.opts.cg.llvm_args { context.add_command_line_option(arg); } context.add_command_line_option("-fno-semantic-interposition"); - //context.set_dump_code_on_compile(true); + if env::var("CG_GCCJIT_DUMP_CODE").as_deref() == Ok("1") { + context.set_dump_code_on_compile(true); + } if env::var("CG_GCCJIT_DUMP_GIMPLE").as_deref() == Ok("1") { context.set_dump_initial_gimple(true); } context.set_debug_info(true); - //context.set_dump_everything(true); - //context.set_keep_intermediates(true); + if env::var("CG_GCCJIT_DUMP_EVERYTHING").as_deref() == Ok("1") { + context.set_dump_everything(true); + } + if env::var("CG_GCCJIT_KEEP_INTERMEDIATES").as_deref() == Ok("1") { + context.set_keep_intermediates(true); + } { let cx = CodegenCx::new(&context, cgu, tcx); @@ -100,7 +106,6 @@ pub fn compile_codegen_unit<'tcx>(tcx: TyCtxt<'tcx>, cgu_name: Symbol) -> (Modul block.end_with_void_return(None); }); - //println!("module_codegen: {:?} {:?}", cgu_name, &cx.context as *const _); let mono_items = cgu.items_in_deterministic_order(tcx); for &(mono_item, (linkage, visibility)) in &mono_items { mono_item.predefine::>(&cx, linkage, visibility); diff --git a/src/builder.rs b/src/builder.rs index bb864c27e1b4d..0f7db911552a1 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -48,10 +48,10 @@ use crate::common::{SignType, TypeReflection, type_is_pointer}; use crate::context::CodegenCx; use crate::type_of::LayoutGccExt; -// TODO +// TODO(antoyo) type Funclet = (); -// TODO: remove this variable. +// TODO(antoyo): remove this variable. static mut RETURN_VALUE_COUNT: usize = 0; enum ExtremumOperation { @@ -99,7 +99,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { let load_ordering = match order { - // TODO: does this make sense? + // TODO(antoyo): does this make sense? AtomicOrdering::AcquireRelease | AtomicOrdering::Release => AtomicOrdering::Acquire, _ => order.clone(), }; @@ -162,26 +162,6 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { } fn check_call<'b>(&mut self, _typ: &str, func: Function<'gcc>, args: &'b [RValue<'gcc>]) -> Cow<'b, [RValue<'gcc>]> { - //let mut fn_ty = self.cx.val_ty(func); - // Strip off pointers - /*while self.cx.type_kind(fn_ty) == TypeKind::Pointer { - fn_ty = self.cx.element_type(fn_ty); - }*/ - - /*assert!( - self.cx.type_kind(fn_ty) == TypeKind::Function, - "builder::{} not passed a function, but {:?}", - typ, - fn_ty - ); - - let param_tys = self.cx.func_params_types(fn_ty); - - let all_args_match = param_tys - .iter() - .zip(args.iter().map(|&v| self.val_ty(v))) - .all(|(expected_ty, actual_ty)| *expected_ty == actual_ty);*/ - let mut all_args_match = true; let mut param_types = vec![]; let param_count = func.get_param_count(); @@ -205,16 +185,6 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { .map(|(_i, (expected_ty, &actual_val))| { let actual_ty = actual_val.get_type(); if expected_ty != actual_ty { - /*debug!( - "type mismatch in function call of {:?}. \ - Expected {:?} for param {}, got {:?}; injecting bitcast", - func, expected_ty, i, actual_ty - );*/ - /*println!( - "type mismatch in function call of {:?}. \ - Expected {:?} for param {}, got {:?}; injecting bitcast", - func, expected_ty, i, actual_ty - );*/ self.bitcast(actual_val, expected_ty) } else { @@ -227,26 +197,6 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { } fn check_ptr_call<'b>(&mut self, _typ: &str, func_ptr: RValue<'gcc>, args: &'b [RValue<'gcc>]) -> Cow<'b, [RValue<'gcc>]> { - //let mut fn_ty = self.cx.val_ty(func); - // Strip off pointers - /*while self.cx.type_kind(fn_ty) == TypeKind::Pointer { - fn_ty = self.cx.element_type(fn_ty); - }*/ - - /*assert!( - self.cx.type_kind(fn_ty) == TypeKind::Function, - "builder::{} not passed a function, but {:?}", - typ, - fn_ty - ); - - let param_tys = self.cx.func_params_types(fn_ty); - - let all_args_match = param_tys - .iter() - .zip(args.iter().map(|&v| self.val_ty(v))) - .all(|(expected_ty, actual_ty)| *expected_ty == actual_ty);*/ - let mut all_args_match = true; let mut param_types = vec![]; let gcc_func = func_ptr.get_type().is_function_ptr_type().expect("function ptr"); @@ -269,16 +219,6 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { .map(|(_i, (expected_ty, &actual_val))| { let actual_ty = actual_val.get_type(); if expected_ty != actual_ty { - /*debug!( - "type mismatch in function call of {:?}. \ - Expected {:?} for param {}, got {:?}; injecting bitcast", - func, expected_ty, i, actual_ty - );*/ - /*println!( - "type mismatch in function call of {:?}. \ - Expected {:?} for param {}, got {:?}; injecting bitcast", - func, expected_ty, i, actual_ty - );*/ self.bitcast(actual_val, expected_ty) } else { @@ -291,27 +231,14 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { } fn check_store(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>) -> RValue<'gcc> { - let dest_ptr_ty = self.cx.val_ty(ptr).make_pointer(); // TODO: make sure make_pointer() is okay here. + let dest_ptr_ty = self.cx.val_ty(ptr).make_pointer(); // TODO(antoyo): make sure make_pointer() is okay here. let stored_ty = self.cx.val_ty(val); let stored_ptr_ty = self.cx.type_ptr_to(stored_ty); - //assert_eq!(self.cx.type_kind(dest_ptr_ty), TypeKind::Pointer); - if dest_ptr_ty == stored_ptr_ty { ptr } else { - /*debug!( - "type mismatch in store. \ - Expected {:?}, got {:?}; inserting bitcast", - dest_ptr_ty, stored_ptr_ty - );*/ - /*println!( - "type mismatch in store. \ - Expected {:?}, got {:?}; inserting bitcast", - dest_ptr_ty, stored_ptr_ty - );*/ - //ptr self.bitcast(ptr, stored_ptr_ty) } } @@ -321,13 +248,9 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { } fn function_call(&mut self, func: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> { - //debug!("call {:?} with args ({:?})", func, args); - - // TODO: remove when the API supports a different type for functions. + // TODO(antoyo): remove when the API supports a different type for functions. let func: Function<'gcc> = self.cx.rvalue_as_function(func); let args = self.check_call("call", func, args); - //let bundle = funclet.map(|funclet| funclet.bundle()); - //let bundle = bundle.as_ref().map(|b| &*b.raw); // gccjit requires to use the result of functions, even when it's not used. // That's why we assign the result to a local or call add_eval(). @@ -349,11 +272,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { } fn function_ptr_call(&mut self, func_ptr: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> { - //debug!("func ptr call {:?} with args ({:?})", func, args); - let args = self.check_ptr_call("call", func_ptr, args); - //let bundle = funclet.map(|funclet| funclet.bundle()); - //let bundle = bundle.as_ref().map(|b| &*b.raw); // gccjit requires to use the result of functions, even when it's not used. // That's why we assign the result to a local or call add_eval(). @@ -363,7 +282,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { let void_type = self.context.new_type::<()>(); let current_func = current_block.get_function(); - // FIXME: As a temporary workaround for unsupported LLVM intrinsics. + // FIXME(antoyo): As a temporary workaround for unsupported LLVM intrinsics. if gcc_func.get_param_count() == 0 && format!("{:?}", func_ptr) == "__builtin_ia32_pmovmskb128" { return_type = self.int_type; } @@ -376,7 +295,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { } else { if gcc_func.get_param_count() == 0 { - // FIXME: As a temporary workaround for unsupported LLVM intrinsics. + // FIXME(antoyo): As a temporary workaround for unsupported LLVM intrinsics. current_block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &[])); } else { @@ -390,17 +309,12 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { } pub fn overflow_call(&mut self, func: Function<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> { - //debug!("overflow_call {:?} with args ({:?})", func, args); - - //let bundle = funclet.map(|funclet| funclet.bundle()); - //let bundle = bundle.as_ref().map(|b| &*b.raw); - // gccjit requires to use the result of functions, even when it's not used. // That's why we assign the result to a local. let return_type = self.context.new_type::(); let current_block = self.current_block.borrow().expect("block"); let current_func = current_block.get_function(); - // TODO: return the new_call() directly? Since the overflow function has no side-effects. + // TODO(antoyo): return the new_call() directly? Since the overflow function has no side-effects. unsafe { RETURN_VALUE_COUNT += 1 }; let result = current_func.new_local(None, return_type, &format!("returnValue{}", unsafe { RETURN_VALUE_COUNT })); current_block.add_assignment(None, result, self.cx.context.new_call(None, func, &args)); @@ -520,25 +434,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { self.llbb().end_with_conditional(None, condition, then, catch); self.context.new_rvalue_from_int(self.int_type, 0) - // TODO - /*debug!("invoke {:?} with args ({:?})", func, args); - - let args = self.check_call("invoke", func, args); - let bundle = funclet.map(|funclet| funclet.bundle()); - let bundle = bundle.as_ref().map(|b| &*b.raw); - - unsafe { - llvm::LLVMRustBuildInvoke( - self.llbuilder, - func, - args.as_ptr(), - args.len() as c_uint, - then, - catch, - bundle, - UNNAMED, - ) - }*/ + // TODO(antoyo) } fn unreachable(&mut self) { @@ -558,7 +454,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } fn add(&mut self, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> { - // FIXME: this should not be required. + // FIXME(antoyo): this should not be required. if format!("{:?}", a.get_type()) != format!("{:?}", b.get_type()) { b = self.context.new_cast(None, b, a.get_type()); } @@ -589,24 +485,24 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } fn udiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { - // TODO: convert the arguments to unsigned? + // TODO(antoyo): convert the arguments to unsigned? a / b } fn exactudiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { - // TODO: convert the arguments to unsigned? - // TODO: poison if not exact. + // TODO(antoyo): convert the arguments to unsigned? + // TODO(antoyo): poison if not exact. a / b } fn sdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { - // TODO: convert the arguments to signed? + // TODO(antoyo): convert the arguments to signed? a / b } fn exactsdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { - // TODO: posion if not exact. - // FIXME: rustc_codegen_ssa::mir::intrinsic uses different types for a and b but they + // TODO(antoyo): posion if not exact. + // FIXME(antoyo): rustc_codegen_ssa::mir::intrinsic uses different types for a and b but they // should be the same. let typ = a.get_type().to_signed(self); let a = self.context.new_cast(None, a, typ); @@ -629,7 +525,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { fn frem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { if a.get_type() == self.cx.float_type { let fmodf = self.context.get_builtin_function("fmodf"); - // FIXME: this seems to produce the wrong result. + // FIXME(antoyo): this seems to produce the wrong result. return self.context.new_call(None, fmodf, &[a, b]); } assert_eq!(a.get_type(), self.cx.double_type); @@ -639,18 +535,15 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } fn shl(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { - // FIXME: remove the casts when libgccjit can shift an unsigned number by an unsigned number. + // FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by an unsigned number. let a_type = a.get_type(); let b_type = b.get_type(); if a_type.is_unsigned(self) && b_type.is_signed(self) { - //println!("shl: {:?} -> {:?}", a, b_type); let a = self.context.new_cast(None, a, b_type); let result = a << b; - //println!("shl: {:?} -> {:?}", result, a_type); self.context.new_cast(None, result, a_type) } else if a_type.is_signed(self) && b_type.is_unsigned(self) { - //println!("shl: {:?} -> {:?}", b, a_type); let b = self.context.new_cast(None, b, a_type); a << b } @@ -660,19 +553,16 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } fn lshr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { - // FIXME: remove the casts when libgccjit can shift an unsigned number by an unsigned number. - // TODO: cast to unsigned to do a logical shift if that does not work. + // FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by an unsigned number. + // TODO(antoyo): cast to unsigned to do a logical shift if that does not work. let a_type = a.get_type(); let b_type = b.get_type(); if a_type.is_unsigned(self) && b_type.is_signed(self) { - //println!("lshl: {:?} -> {:?}", a, b_type); let a = self.context.new_cast(None, a, b_type); let result = a >> b; - //println!("lshl: {:?} -> {:?}", result, a_type); self.context.new_cast(None, result, a_type) } else if a_type.is_signed(self) && b_type.is_unsigned(self) { - //println!("lshl: {:?} -> {:?}", b, a_type); let b = self.context.new_cast(None, b, a_type); a >> b } @@ -682,19 +572,16 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } fn ashr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { - // TODO: check whether behavior is an arithmetic shift for >> . - // FIXME: remove the casts when libgccjit can shift an unsigned number by an unsigned number. + // TODO(antoyo): check whether behavior is an arithmetic shift for >> . + // FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by an unsigned number. let a_type = a.get_type(); let b_type = b.get_type(); if a_type.is_unsigned(self) && b_type.is_signed(self) { - //println!("ashl: {:?} -> {:?}", a, b_type); let a = self.context.new_cast(None, a, b_type); let result = a >> b; - //println!("ashl: {:?} -> {:?}", result, a_type); self.context.new_cast(None, result, a_type) } else if a_type.is_signed(self) && b_type.is_unsigned(self) { - //println!("ashl: {:?} -> {:?}", b, a_type); let b = self.context.new_cast(None, b, a_type); a >> b } @@ -704,7 +591,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } fn and(&mut self, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> { - // FIXME: hack by putting the result in a variable to workaround this bug: + // FIXME(antoyo): hack by putting the result in a variable to workaround this bug: // https://gcc.gnu.org/bugzilla//show_bug.cgi?id=95498 if a.get_type() != b.get_type() { b = self.context.new_cast(None, b, a.get_type()); @@ -715,7 +602,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } fn or(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { - // FIXME: hack by putting the result in a variable to workaround this bug: + // FIXME(antoyo): hack by putting the result in a variable to workaround this bug: // https://gcc.gnu.org/bugzilla//show_bug.cgi?id=95498 let res = self.current_func().new_local(None, b.get_type(), "orResult"); self.llbb().add_assignment(None, res, a | b); @@ -727,7 +614,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } fn neg(&mut self, a: RValue<'gcc>) -> RValue<'gcc> { - // TODO: use new_unary_op()? + // TODO(antoyo): use new_unary_op()? self.cx.context.new_rvalue_from_long(a.get_type(), 0) - a } @@ -759,7 +646,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } fn unchecked_usub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { - // TODO: should generate poison value? + // TODO(antoyo): should generate poison value? a - b } @@ -773,47 +660,22 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { fn fadd_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> { unimplemented!(); - /*unsafe { - let instr = llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, UNNAMED); - llvm::LLVMRustSetHasUnsafeAlgebra(instr); - instr - }*/ } fn fsub_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> { unimplemented!(); - /*unsafe { - let instr = llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, UNNAMED); - llvm::LLVMRustSetHasUnsafeAlgebra(instr); - instr - }*/ } fn fmul_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> { unimplemented!(); - /*unsafe { - let instr = llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, UNNAMED); - llvm::LLVMRustSetHasUnsafeAlgebra(instr); - instr - }*/ } fn fdiv_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> { unimplemented!(); - /*unsafe { - let instr = llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, UNNAMED); - llvm::LLVMRustSetHasUnsafeAlgebra(instr); - instr - }*/ } fn frem_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> { unimplemented!(); - /*unsafe { - let instr = llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, UNNAMED); - llvm::LLVMRustSetHasUnsafeAlgebra(instr); - instr - }*/ } fn checked_binop(&mut self, oop: OverflowOp, typ: Ty<'_>, lhs: Self::Value, rhs: Self::Value) -> (Self::Value, Self::Value) { @@ -827,7 +689,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { _ => panic!("tried to get overflow intrinsic for op applied to non-int type"), }; - // TODO: remove duplication with intrinsic? + // TODO(antoyo): remove duplication with intrinsic? let name = match oop { OverflowOp::Add => @@ -882,7 +744,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { let intrinsic = self.context.get_builtin_function(&name); let res = self.current_func() - // TODO: is it correct to use rhs type instead of the parameter typ? + // TODO(antoyo): is it correct to use rhs type instead of the parameter typ? .new_local(None, rhs.get_type(), "binopResult") .get_address(None); let overflow = self.overflow_call(intrinsic, &[lhs, rhs, res], None); @@ -890,7 +752,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } fn alloca(&mut self, ty: Type<'gcc>, align: Align) -> RValue<'gcc> { - // FIXME: this check that we don't call get_aligned() a second time on a time. + // FIXME(antoyo): this check that we don't call get_aligned() a second time on a type. // Ideally, we shouldn't need to do this check. let aligned_type = if ty == self.cx.u128_type || ty == self.cx.i128_type { @@ -899,37 +761,27 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { else { ty.get_aligned(align.bytes()) }; - // TODO: It might be better to return a LValue, but fixing the rustc API is non-trivial. + // TODO(antoyo): It might be better to return a LValue, but fixing the rustc API is non-trivial. self.stack_var_count.set(self.stack_var_count.get() + 1); self.current_func().new_local(None, aligned_type, &format!("stack_var_{}", self.stack_var_count.get())).get_address(None) } fn dynamic_alloca(&mut self, _ty: Type<'gcc>, _align: Align) -> RValue<'gcc> { unimplemented!(); - /*unsafe { - let alloca = llvm::LLVMBuildAlloca(self.llbuilder, ty, UNNAMED); - llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint); - alloca - }*/ } fn array_alloca(&mut self, _ty: Type<'gcc>, _len: RValue<'gcc>, _align: Align) -> RValue<'gcc> { unimplemented!(); - /*unsafe { - let alloca = llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len, UNNAMED); - llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint); - alloca - }*/ } fn load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>, _align: Align) -> RValue<'gcc> { - // TODO: use ty. + // TODO(antoyo): use ty. let block = self.llbb(); let function = block.get_function(); // NOTE: instead of returning the dereference here, we have to assign it to a variable in // the current basic block. Otherwise, it could be used in another basic block, causing a // dereference after a drop, for instance. - // TODO: handle align. + // TODO(antoyo): handle align. let deref = ptr.dereference(None).to_rvalue(); let value_type = deref.get_type(); unsafe { RETURN_VALUE_COUNT += 1 }; @@ -939,16 +791,14 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } fn volatile_load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>) -> RValue<'gcc> { - // TODO: use ty. - //println!("5: volatile load: {:?} to {:?}", ptr, ptr.get_type().make_volatile()); + // TODO(antoyo): use ty. let ptr = self.context.new_cast(None, ptr, ptr.get_type().make_volatile()); - //println!("6"); ptr.dereference(None).to_rvalue() } fn atomic_load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>, order: AtomicOrdering, size: Size) -> RValue<'gcc> { - // TODO: use ty. - // TODO: handle alignment. + // TODO(antoyo): use ty. + // TODO(antoyo): handle alignment. let atomic_load = self.context.get_builtin_function(&format!("__atomic_load_{}", size.bytes())); let ordering = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc()); @@ -958,8 +808,6 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } fn load_operand(&mut self, place: PlaceRef<'tcx, RValue<'gcc>>) -> OperandRef<'tcx, RValue<'gcc>> { - //debug!("PlaceRef::load: {:?}", place); - assert_eq!(place.llextra.is_some(), place.layout.is_unsized()); if place.layout.is_zst() { @@ -987,22 +835,11 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { OperandValue::Ref(place.llval, Some(llextra), place.align) } else if place.layout.is_gcc_immediate() { - let const_llval = None; - /*unsafe { - if let Some(global) = llvm::LLVMIsAGlobalVariable(place.llval) { - if llvm::LLVMIsGlobalConstant(global) == llvm::True { - const_llval = llvm::LLVMGetInitializer(global); - } - } - }*/ - let llval = const_llval.unwrap_or_else(|| { - let load = self.load(place.llval.get_type(), place.llval, place.align); - if let abi::Abi::Scalar(ref scalar) = place.layout.abi { - scalar_load_metadata(self, load, scalar); - } - load - }); - OperandValue::Immediate(self.to_immediate(llval, place.layout)) + let load = self.load(place.llval.get_type(), place.llval, place.align); + if let abi::Abi::Scalar(ref scalar) = place.layout.abi { + scalar_load_metadata(self, load, scalar); + } + OperandValue::Immediate(self.to_immediate(load, place.layout)) } else if let abi::Abi::ScalarPair(ref a, ref b) = place.layout.abi { let b_offset = a.value.size(self).align_to(b.value.align(self).abi); @@ -1058,39 +895,11 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } fn range_metadata(&mut self, _load: RValue<'gcc>, _range: Range) { - // TODO - /*if self.sess().target.target.arch == "amdgpu" { - // amdgpu/LLVM does something weird and thinks a i64 value is - // split into a v2i32, halving the bitwidth LLVM expects, - // tripping an assertion. So, for now, just disable this - // optimization. - return; - } - - unsafe { - let llty = self.cx.val_ty(load); - let v = [ - self.cx.const_uint_big(llty, range.start), - self.cx.const_uint_big(llty, range.end), - ]; - - llvm::LLVMSetMetadata( - load, - llvm::MD_range as c_uint, - llvm::LLVMMDNodeInContext(self.cx.llcx, v.as_ptr(), v.len() as c_uint), - ); - }*/ + // TODO(antoyo) } fn nonnull_metadata(&mut self, _load: RValue<'gcc>) { - // TODO - /*unsafe { - llvm::LLVMSetMetadata( - load, - llvm::MD_nonnull as c_uint, - llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0), - ); - }*/ + // TODO(antoyo) } fn store(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>, align: Align) -> RValue<'gcc> { @@ -1098,36 +907,21 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } fn store_with_flags(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>, _align: Align, _flags: MemFlags) -> RValue<'gcc> { - //debug!("Store {:?} -> {:?} ({:?})", val, ptr, flags); let ptr = self.check_store(val, ptr); self.llbb().add_assignment(None, ptr.dereference(None), val); - /*let align = - if flags.contains(MemFlags::UNALIGNED) { 1 } else { align.bytes() as c_uint }; - llvm::LLVMSetAlignment(store, align); - if flags.contains(MemFlags::VOLATILE) { - llvm::LLVMSetVolatile(store, llvm::True); - } - if flags.contains(MemFlags::NONTEMPORAL) { - // According to LLVM [1] building a nontemporal store must - // *always* point to a metadata value of the integer 1. - // - // [1]: http://llvm.org/docs/LangRef.html#store-instruction - let one = self.cx.const_i32(1); - let node = llvm::LLVMMDNodeInContext(self.cx.llcx, &one, 1); - llvm::LLVMSetMetadata(store, llvm::MD_nontemporal as c_uint, node); - }*/ - // NOTE: dummy value here since it's never used. FIXME: API should not return a value here? + // TODO(antoyo): handle align and flags. + // NOTE: dummy value here since it's never used. FIXME(antoyo): API should not return a value here? self.cx.context.new_rvalue_zero(self.type_i32()) } fn atomic_store(&mut self, value: RValue<'gcc>, ptr: RValue<'gcc>, order: AtomicOrdering, size: Size) { - // TODO: handle alignment. + // TODO(antoyo): handle alignment. let atomic_store = self.context.get_builtin_function(&format!("__atomic_store_{}", size.bytes())); let ordering = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc()); let volatile_const_void_ptr_type = self.context.new_type::<*mut ()>().make_const().make_volatile(); let ptr = self.context.new_cast(None, ptr, volatile_const_void_ptr_type); - // FIXME: fix libgccjit to allow comparing an integer type with an aligned integer type because + // FIXME(antoyo): fix libgccjit to allow comparing an integer type with an aligned integer type because // the following cast is required to avoid this error: // gcc_jit_context_new_call: mismatching types for argument 2 of function "__atomic_store_4": assignment to param arg1 (type: int) from loadedValue3577 (type: unsigned int __attribute__((aligned(4)))) let int_type = atomic_store.get_param(1).to_rvalue().get_type(); @@ -1145,14 +939,14 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } fn inbounds_gep(&mut self, _typ: Type<'gcc>, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> { - // FIXME: would be safer if doing the same thing (loop) as gep. - // TODO: specify inbounds somehow. + // FIXME(antoyo): would be safer if doing the same thing (loop) as gep. + // TODO(antoyo): specify inbounds somehow. match indices.len() { 1 => { self.context.new_array_access(None, ptr, indices[0]).get_address(None) }, 2 => { - let array = ptr.dereference(None); // TODO: assert that first index is 0? + let array = ptr.dereference(None); // TODO(antoyo): assert that first index is 0? self.context.new_array_access(None, array, indices[1]).get_address(None) }, _ => unimplemented!(), @@ -1160,7 +954,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } fn struct_gep(&mut self, value_type: Type<'gcc>, ptr: RValue<'gcc>, idx: u64) -> RValue<'gcc> { - // FIXME: it would be better if the API only called this on struct, not on arrays. + // FIXME(antoyo): it would be better if the API only called this on struct, not on arrays. assert_eq!(idx as usize as u64, idx); let value = ptr.dereference(None).to_rvalue(); @@ -1186,31 +980,21 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { /* Casts */ fn trunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { - // TODO: check that it indeed truncate the value. - //println!("trunc: {:?} -> {:?}", value, dest_ty); + // TODO(antoyo): check that it indeed truncate the value. self.context.new_cast(None, value, dest_ty) } fn sext(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { - // TODO: check that it indeed sign extend the value. - //println!("Sext {:?} to {:?}", value, dest_ty); - //if let Some(vector_type) = value.get_type().is_vector() { + // TODO(antoyo): check that it indeed sign extend the value. if dest_ty.is_vector().is_some() { - // TODO: nothing to do as it is only for LLVM? + // TODO(antoyo): nothing to do as it is only for LLVM? return value; - /*let dest_type = self.context.new_vector_type(dest_ty, vector_type.get_num_units() as u64); - println!("Casting {:?} to {:?}", value, dest_type); - return self.context.new_cast(None, value, dest_type);*/ } self.context.new_cast(None, value, dest_ty) } fn fptoui(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { - //println!("7: fptoui: {:?} to {:?}", value, dest_ty); - let ret = self.context.new_cast(None, value, dest_ty); - //println!("8"); - ret - //unsafe { llvm::LLVMBuildFPToUI(self.llbuilder, val, dest_ty, UNNAMED) } + self.context.new_cast(None, value, dest_ty) } fn fptosi(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { @@ -1218,21 +1002,15 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } fn uitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { - //println!("1: uitofp: {:?} -> {:?}", value, dest_ty); - let ret = self.context.new_cast(None, value, dest_ty); - //println!("2"); - ret + self.context.new_cast(None, value, dest_ty) } fn sitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { - //println!("3: sitofp: {:?} -> {:?}", value, dest_ty); - let ret = self.context.new_cast(None, value, dest_ty); - //println!("4"); - ret + self.context.new_cast(None, value, dest_ty) } fn fptrunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { - // TODO: make sure it trancates. + // TODO(antoyo): make sure it truncates. self.context.new_cast(None, value, dest_ty) } @@ -1254,12 +1032,10 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { fn intcast(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>, _is_signed: bool) -> RValue<'gcc> { // NOTE: is_signed is for value, not dest_typ. - //println!("intcast: {:?} ({:?}) -> {:?}", value, value.get_type(), dest_typ); self.cx.context.new_cast(None, value, dest_typ) } fn pointercast(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { - //println!("pointercast: {:?} ({:?}) -> {:?}", value, value.get_type(), dest_ty); let val_type = value.get_type(); match (type_is_pointer(val_type), type_is_pointer(dest_ty)) { (false, true) => { @@ -1269,7 +1045,6 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { }, (false, false) => { // When they are not pointers, we want a transmute (or reinterpret_cast). - //self.cx.context.new_cast(None, value, dest_ty) self.bitcast(value, dest_ty) }, (true, true) => self.cx.context.new_cast(None, value, dest_ty), @@ -1307,7 +1082,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { let src = self.pointercast(src, self.type_ptr_to(self.type_void())); let memcpy = self.context.get_builtin_function("memcpy"); let block = self.block.expect("block"); - // TODO: handle aligns and is_volatile. + // TODO(antoyo): handle aligns and is_volatile. block.add_eval(None, self.context.new_call(None, memcpy, &[dst, src, size])); } @@ -1326,7 +1101,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { let memmove = self.context.get_builtin_function("memmove"); let block = self.block.expect("block"); - // TODO: handle is_volatile. + // TODO(antoyo): handle is_volatile. block.add_eval(None, self.context.new_call(None, memmove, &[dst, src, size])); } @@ -1335,8 +1110,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { let ptr = self.pointercast(ptr, self.type_i8p()); let memset = self.context.get_builtin_function("memset"); let block = self.block.expect("block"); - // TODO: handle aligns and is_volatile. - //println!("memset: {:?} -> {:?}", fill_byte, self.i32_type); + // TODO(antoyo): handle align and is_volatile. let fill_byte = self.context.new_cast(None, fill_byte, self.i32_type); let size = self.intcast(size, self.type_size_t(), false); block.add_eval(None, self.context.new_call(None, memset, &[ptr, fill_byte, size])); @@ -1370,27 +1144,18 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { #[allow(dead_code)] fn va_arg(&mut self, _list: RValue<'gcc>, _ty: Type<'gcc>) -> RValue<'gcc> { unimplemented!(); - //unsafe { llvm::LLVMBuildVAArg(self.llbuilder, list, ty, UNNAMED) } } fn extract_element(&mut self, _vec: RValue<'gcc>, _idx: RValue<'gcc>) -> RValue<'gcc> { unimplemented!(); - //unsafe { llvm::LLVMBuildExtractElement(self.llbuilder, vec, idx, UNNAMED) } } fn vector_splat(&mut self, _num_elts: usize, _elt: RValue<'gcc>) -> RValue<'gcc> { unimplemented!(); - /*unsafe { - let elt_ty = self.cx.val_ty(elt); - let undef = llvm::LLVMGetUndef(self.type_vector(elt_ty, num_elts as u64)); - let vec = self.insert_element(undef, elt, self.cx.const_i32(0)); - let vec_i32_ty = self.type_vector(self.type_i32(), num_elts as u64); - self.shuffle_vector(vec, undef, self.const_null(vec_i32_ty)) - }*/ } fn extract_value(&mut self, aggregate_value: RValue<'gcc>, idx: u64) -> RValue<'gcc> { - // FIXME: it would be better if the API only called this on struct, not on arrays. + // FIXME(antoyo): it would be better if the API only called this on struct, not on arrays. assert_eq!(idx as usize as u64, idx); let value_type = aggregate_value.get_type(); @@ -1418,12 +1183,10 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { else { panic!("Unexpected type {:?}", value_type); } - /*assert_eq!(idx as c_uint as u64, idx); - unsafe { llvm::LLVMBuildExtractValue(self.llbuilder, agg_val, idx as c_uint, UNNAMED) }*/ } fn insert_value(&mut self, aggregate_value: RValue<'gcc>, value: RValue<'gcc>, idx: u64) -> RValue<'gcc> { - // FIXME: it would be better if the API only called this on struct, not on arrays. + // FIXME(antoyo): it would be better if the API only called this on struct, not on arrays. assert_eq!(idx as usize as u64, idx); let value_type = aggregate_value.get_type(); @@ -1459,88 +1222,41 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { let struct_type = self.context.new_struct_type(None, "landing_pad", &[field1, field2]); self.current_func().new_local(None, struct_type.as_type(), "landing_pad") .to_rvalue() - // TODO - /*unsafe { - llvm::LLVMBuildLandingPad(self.llbuilder, ty, pers_fn, num_clauses as c_uint, UNNAMED) - }*/ + // TODO(antoyo): Properly implement unwinding. + // the above is just to make the compilation work as it seems + // rustc_codegen_ssa now calls the unwinding builder methods even on panic=abort. } fn set_cleanup(&mut self, _landing_pad: RValue<'gcc>) { - // TODO - /*unsafe { - llvm::LLVMSetCleanup(landing_pad, llvm::True); - }*/ + // TODO(antoyo) } fn resume(&mut self, _exn: RValue<'gcc>) -> RValue<'gcc> { unimplemented!(); - //unsafe { llvm::LLVMBuildResume(self.llbuilder, exn) } } fn cleanup_pad(&mut self, _parent: Option>, _args: &[RValue<'gcc>]) -> Funclet { unimplemented!(); - /*let name = const_cstr!("cleanuppad"); - let ret = unsafe { - llvm::LLVMRustBuildCleanupPad( - self.llbuilder, - parent, - args.len() as c_uint, - args.as_ptr(), - name.as_ptr(), - ) - }; - Funclet::new(ret.expect("LLVM does not have support for cleanuppad"))*/ } fn cleanup_ret(&mut self, _funclet: &Funclet, _unwind: Option>) -> RValue<'gcc> { unimplemented!(); - /*let ret = - unsafe { llvm::LLVMRustBuildCleanupRet(self.llbuilder, funclet.cleanuppad(), unwind) }; - ret.expect("LLVM does not have support for cleanupret")*/ } fn catch_pad(&mut self, _parent: RValue<'gcc>, _args: &[RValue<'gcc>]) -> Funclet { unimplemented!(); - /*let name = const_cstr!("catchpad"); - let ret = unsafe { - llvm::LLVMRustBuildCatchPad( - self.llbuilder, - parent, - args.len() as c_uint, - args.as_ptr(), - name.as_ptr(), - ) - }; - Funclet::new(ret.expect("LLVM does not have support for catchpad"))*/ } fn catch_switch(&mut self, _parent: Option>, _unwind: Option>, _num_handlers: usize) -> RValue<'gcc> { unimplemented!(); - /*let name = const_cstr!("catchswitch"); - let ret = unsafe { - llvm::LLVMRustBuildCatchSwitch( - self.llbuilder, - parent, - unwind, - num_handlers as c_uint, - name.as_ptr(), - ) - }; - ret.expect("LLVM does not have support for catchswitch")*/ } fn add_handler(&mut self, _catch_switch: RValue<'gcc>, _handler: Block<'gcc>) { unimplemented!(); - /*unsafe { - llvm::LLVMRustAddHandler(catch_switch, handler); - }*/ } fn set_personality_fn(&mut self, _personality: RValue<'gcc>) { - // TODO - /*unsafe { - llvm::LLVMSetPersonalityFn(self.llfn(), personality); - }*/ + // TODO(antoyo) } // Atomic Operations @@ -1551,7 +1267,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { let pair_type = self.cx.type_struct(&[src.get_type(), self.bool_type], false); let result = self.current_func().new_local(None, pair_type, "atomic_cmpxchg_result"); - let align = Align::from_bits(64).expect("align"); // TODO: use good align. + let align = Align::from_bits(64).expect("align"); // TODO(antoyo): use good align. let value_type = result.to_rvalue().get_type(); if let Some(struct_type) = value_type.is_struct() { @@ -1560,7 +1276,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { // expected so that we store expected after the call. self.store(expected.to_rvalue(), result.access_field(None, struct_type.get_field(0)).get_address(None), align); } - // TODO: handle when value is not a struct. + // TODO(antoyo): handle when value is not a struct. result.to_rvalue() } @@ -1589,7 +1305,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { let void_ptr_type = self.context.new_type::<*mut ()>(); let volatile_void_ptr_type = void_ptr_type.make_volatile(); let dst = self.context.new_cast(None, dst, volatile_void_ptr_type); - // NOTE: not sure why, but we have the wrong type here. + // FIXME(antoyo): not sure why, but we have the wrong type here. let new_src_type = atomic_function.get_param(1).to_rvalue().get_type(); let src = self.context.new_cast(None, src, new_src_type); let res = self.context.new_call(None, atomic_function, &[dst, src, order]); @@ -1610,28 +1326,19 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { fn set_invariant_load(&mut self, load: RValue<'gcc>) { // NOTE: Hack to consider vtable function pointer as non-global-variable function pointer. self.normal_function_addresses.borrow_mut().insert(load); - // TODO - /*unsafe { - llvm::LLVMSetMetadata( - load, - llvm::MD_invariant_load as c_uint, - llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0), - ); - }*/ + // TODO(antoyo) } fn lifetime_start(&mut self, _ptr: RValue<'gcc>, _size: Size) { - // TODO - //self.call_lifetime_intrinsic("llvm.lifetime.start.p0i8", ptr, size); + // TODO(antoyo) } fn lifetime_end(&mut self, _ptr: RValue<'gcc>, _size: Size) { - // TODO - //self.call_lifetime_intrinsic("llvm.lifetime.end.p0i8", ptr, size); + // TODO(antoyo) } fn call(&mut self, _typ: Type<'gcc>, func: RValue<'gcc>, args: &[RValue<'gcc>], funclet: Option<&Funclet>) -> RValue<'gcc> { - // FIXME: remove when having a proper API. + // FIXME(antoyo): remove when having a proper API. let gcc_func = unsafe { std::mem::transmute(func) }; if self.functions.borrow().values().find(|value| **value == gcc_func).is_some() { self.function_call(func, args, funclet) @@ -1643,13 +1350,12 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } fn zext(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> { - // FIXME: this does not zero-extend. + // FIXME(antoyo): this does not zero-extend. if value.get_type().is_bool() && dest_typ.is_i8(&self.cx) { - // FIXME: hack because base::from_immediate converts i1 to i8. + // FIXME(antoyo): hack because base::from_immediate converts i1 to i8. // Fix the code in codegen_ssa::base::from_immediate. return value; } - //println!("zext: {:?} -> {:?}", value, dest_typ); self.context.new_cast(None, value, dest_typ) } @@ -1659,7 +1365,6 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { fn do_not_inline(&mut self, _llret: RValue<'gcc>) { unimplemented!(); - //llvm::Attribute::NoInline.apply_callsite(llvm::AttributePlace::Function, llret); } fn set_span(&mut self, _span: Span) {} @@ -1690,24 +1395,6 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { fn instrprof_increment(&mut self, _fn_name: RValue<'gcc>, _hash: RValue<'gcc>, _num_counters: RValue<'gcc>, _index: RValue<'gcc>) { unimplemented!(); - /*debug!( - "instrprof_increment() with args ({:?}, {:?}, {:?}, {:?})", - fn_name, hash, num_counters, index - ); - - let llfn = unsafe { llvm::LLVMRustGetInstrProfIncrementIntrinsic(self.cx().llmod) }; - let args = &[fn_name, hash, num_counters, index]; - let args = self.check_call("call", llfn, args); - - unsafe { - let _ = llvm::LLVMRustBuildCall( - self.llbuilder, - llfn, - args.as_ptr() as *const &llvm::Value, - args.len() as c_uint, - None, - ); - }*/ } } @@ -1766,7 +1453,7 @@ impl ToGccComp for IntPredicate { impl ToGccComp for RealPredicate { fn to_gcc_comparison(&self) -> ComparisonOp { - // TODO: check that ordered vs non-ordered is respected. + // TODO(antoyo): check that ordered vs non-ordered is respected. match *self { RealPredicate::RealPredicateFalse => unreachable!(), RealPredicate::RealOEQ => ComparisonOp::Equals, @@ -1809,9 +1496,9 @@ impl ToGccOrdering for AtomicOrdering { let ordering = match self { - AtomicOrdering::NotAtomic => __ATOMIC_RELAXED, // TODO: check if that's the same. + AtomicOrdering::NotAtomic => __ATOMIC_RELAXED, // TODO(antoyo): check if that's the same. AtomicOrdering::Unordered => __ATOMIC_RELAXED, - AtomicOrdering::Monotonic => __ATOMIC_RELAXED, // TODO: check if that's the same. + AtomicOrdering::Monotonic => __ATOMIC_RELAXED, // TODO(antoyo): check if that's the same. AtomicOrdering::Acquire => __ATOMIC_ACQUIRE, AtomicOrdering::Release => __ATOMIC_RELEASE, AtomicOrdering::AcquireRelease => __ATOMIC_ACQ_REL, diff --git a/src/callee.rs b/src/callee.rs index d0ae96adcedaf..4ea084ef729d3 100644 --- a/src/callee.rs +++ b/src/callee.rs @@ -17,8 +17,6 @@ use crate::context::CodegenCx; pub fn get_fn<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, instance: Instance<'tcx>) -> RValue<'gcc> { let tcx = cx.tcx(); - //debug!("get_fn(instance={:?})", instance); - assert!(!instance.substs.needs_infer()); assert!(!instance.substs.has_escaping_bound_vars()); assert!(!instance.substs.has_param_types_or_consts()); @@ -28,11 +26,9 @@ pub fn get_fn<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, instance: Instance<'tcx>) } let sym = tcx.symbol_name(instance).name; - //debug!("get_fn({:?}: {:?}) => {}", instance, instance.monomorphic_ty(cx.tcx()), sym); let fn_abi = FnAbi::of_instance(cx, instance, &[]); - // TODO let func = if let Some(func) = cx.get_declared_value(&sym) { // Create a fn pointer with the new signature. @@ -62,34 +58,18 @@ pub fn get_fn<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, instance: Instance<'tcx>) // reference. It also occurs when testing libcore and in some // other weird situations. Annoying. if cx.val_ty(func) != ptrty { - //debug!("get_fn: casting {:?} to {:?}", func, ptrty); - // TODO - //cx.const_ptrcast(func, ptrty) + // TODO(antoyo): cast the pointer. func } else { - //debug!("get_fn: not casting pointer!"); func } } else { cx.linkage.set(FunctionType::Extern); let func = cx.declare_fn(&sym, &fn_abi); - //cx.linkage.set(FunctionType::Internal); - //debug!("get_fn: not casting pointer!"); - - // TODO - //attributes::from_fn_attrs(cx, func, instance); - - //let instance_def_id = instance.def_id(); - - // TODO - /*if cx.use_dll_storage_attrs && tcx.is_dllimport_foreign_item(instance_def_id) { - unsafe { - llvm::LLVMSetDLLStorageClass(func, llvm::DLLStorageClass::DllImport); - } - }*/ + // TODO(antoyo): set linkage and attributes. func }; diff --git a/src/common.rs b/src/common.rs index 3178ada9ec3e2..752ba99af9ce3 100644 --- a/src/common.rs +++ b/src/common.rs @@ -27,7 +27,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { } fn const_cstr(&self, symbol: Symbol, _null_terminated: bool) -> RValue<'gcc> { - // TODO: handle null_terminated. + // TODO(antoyo): handle null_terminated. if let Some(&value) = self.const_cstr_cache.borrow().get(&symbol) { return value.to_rvalue(); } @@ -39,7 +39,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { } fn global_string(&self, string: &str) -> RValue<'gcc> { - // TODO: handle non-null-terminated strings. + // TODO(antoyo): handle non-null-terminated strings. let string = self.context.new_string_literal(&*string); let sym = self.generate_local_symbol_name("str"); // NOTE: TLS is always off for a string litteral. @@ -48,7 +48,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { .unwrap_or_else(|| bug!("symbol `{}` is already defined", sym)); self.global_init_block.add_assignment(None, global.dereference(None), string); global.to_rvalue() - //llvm::LLVMRustSetLinkage(global, llvm::Linkage::InternalLinkage); + // TODO(antoyo): set linkage. } pub fn inttoptr(&self, block: Block<'gcc>, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { @@ -62,7 +62,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { } pub fn ptrtoint(&self, block: Block<'gcc>, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { - // TODO: when libgccjit allow casting from pointer to int, remove this. + // TODO(antoyo): when libgccjit allow casting from pointer to int, remove this. let func = block.get_function(); let local = func.new_local(None, value.get_type(), "ptrLocal"); block.add_assignment(None, local, value); @@ -71,10 +71,6 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { let ptr = self.context.new_cast(None, ptr_address, dest_ty.make_pointer()); ptr.dereference(None).to_rvalue() } - - /*pub fn const_vector(&self, elements: &[RValue<'gcc>]) -> RValue<'gcc> { - self.context.new_rvalue_from_vector(None, elements[0].get_type(), elements) - }*/ } pub fn bytes_in_context<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, bytes: &[u8]) -> RValue<'gcc> { @@ -125,13 +121,13 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> { fn const_uint_big(&self, typ: Type<'gcc>, num: u128) -> RValue<'gcc> { let num64: Result = num.try_into(); if let Ok(num) = num64 { - // FIXME: workaround for a bug where libgccjit is expecting a constant. + // FIXME(antoyo): workaround for a bug where libgccjit is expecting a constant. // The operations >> 64 and | low are making the normal case a non-constant. return self.context.new_rvalue_from_long(typ, num as i64); } if num >> 64 != 0 { - // FIXME: use a new function new_rvalue_from_unsigned_long()? + // FIXME(antoyo): use a new function new_rvalue_from_unsigned_long()? let low = self.context.new_rvalue_from_long(self.u64_type, num as u64 as i64); let high = self.context.new_rvalue_from_long(typ, (num >> 64) as u64 as i64); @@ -175,12 +171,10 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> { fn const_u8(&self, _i: u8) -> RValue<'gcc> { unimplemented!(); - //self.const_uint(self.type_i8(), i as u64) } fn const_real(&self, _t: Type<'gcc>, _val: f64) -> RValue<'gcc> { unimplemented!(); - //unsafe { llvm::LLVMConstReal(t, val) } } fn const_str(&self, s: Symbol) -> (RValue<'gcc>, RValue<'gcc>) { @@ -195,7 +189,7 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> { let fields: Vec<_> = values.iter() .map(|value| value.get_type()) .collect(); - // TODO: cache the type? It's anonymous, so probably not. + // TODO(antoyo): cache the type? It's anonymous, so probably not. let name = fields.iter().map(|typ| format!("{:?}", typ)).collect::>().join("_"); let typ = self.type_struct(&fields, packed); let structure = self.global_init_func.new_local(None, typ, &name); @@ -209,19 +203,13 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> { } fn const_to_opt_uint(&self, _v: RValue<'gcc>) -> Option { - // TODO + // TODO(antoyo) None - //try_as_const_integral(v).map(|v| unsafe { llvm::LLVMConstIntGetZExtValue(v) }) } fn const_to_opt_u128(&self, _v: RValue<'gcc>, _sign_ext: bool) -> Option { - // TODO + // TODO(antoyo) None - /*try_as_const_integral(v).and_then(|v| unsafe { - let (mut lo, mut hi) = (0u64, 0u64); - let success = llvm::LLVMRustConstInt128Get(v, sign_ext, &mut hi, &mut lo); - success.then_some(hi_lo_to_u128(lo, hi)) - })*/ } fn scalar_to_backend(&self, cv: Scalar, layout: &abi::Scalar, ty: Type<'gcc>) -> RValue<'gcc> { @@ -234,7 +222,7 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> { Scalar::Int(int) => { let data = int.assert_bits(layout.value.size(self)); - // FIXME: there's some issues with using the u128 code that follows, so hard-code + // FIXME(antoyo): there's some issues with using the u128 code that follows, so hard-code // the paths for floating-point values. if ty == self.float_type { return self.context.new_rvalue_from_double(ty, f32::from_bits(data as u32) as f64); @@ -262,8 +250,7 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> { _ => self.static_addr_of(init, alloc.align, None), }; if !self.sess().fewer_names() { - // TODO - //llvm::set_value_name(value, format!("{:?}", ptr.alloc_id).as_bytes()); + // TODO(antoyo): set value name. } value }, diff --git a/src/consts.rs b/src/consts.rs index 2cdd7fcab8b46..9b7959503ab1b 100644 --- a/src/consts.rs +++ b/src/consts.rs @@ -32,22 +32,11 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> { fn static_addr_of(&self, cv: RValue<'gcc>, align: Align, kind: Option<&str>) -> RValue<'gcc> { if let Some(global_value) = self.const_globals.borrow().get(&cv) { - // TODO - /*unsafe { - // Upgrade the alignment in cases where the same constant is used with different - // alignment requirements - let llalign = align.bytes() as u32; - if llalign > llvm::LLVMGetAlignment(gv) { - llvm::LLVMSetAlignment(gv, llalign); - } - }*/ + // TODO(antoyo): upgrade alignment. return *global_value; } let global_value = self.static_addr_of_mut(cv, align, kind); - // TODO - /*unsafe { - llvm::LLVMSetGlobalConstant(global_value, True); - }*/ + // TODO(antoyo): set global constant. self.const_globals.borrow_mut().insert(cv, global_value); global_value } @@ -73,9 +62,7 @@ impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> { let val_llty = self.val_ty(value); let value = if val_llty == self.type_i1() { - //val_llty = self.type_i8(); unimplemented!(); - //llvm::LLVMConstZExt(value, val_llty) } else { value @@ -92,26 +79,17 @@ impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> { else { // If we created the global with the wrong type, // correct the type. - /*let name = llvm::get_value_name(global).to_vec(); - llvm::set_value_name(global, b""); - - let linkage = llvm::LLVMRustGetLinkage(global); - let visibility = llvm::LLVMRustGetVisibility(global);*/ + // TODO(antoyo): set value name, linkage and visibility. let new_global = self.get_or_insert_global(&name, val_llty, is_tls, attrs.link_section); - /*llvm::LLVMRustSetLinkage(new_global, linkage); - llvm::LLVMRustSetVisibility(new_global, visibility);*/ - // To avoid breaking any invariants, we leave around the old // global for the moment; we'll replace all references to it // with the new global later. (See base::codegen_backend.) //self.statics_to_rauw.borrow_mut().push((global, new_global)); new_global }; - // TODO - //set_global_alignment(&self, global, self.align_of(ty)); - //llvm::LLVMSetInitializer(global, value); + // TODO(antoyo): set alignment and initializer. let value = self.rvalue_as_lvalue(value); let value = value.get_address(None); let dest_typ = global.get_type(); @@ -119,14 +97,14 @@ impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> { // NOTE: do not init the variables related to argc/argv because it seems we cannot // overwrite those variables. - // FIXME: correctly support global variable initialization. + // FIXME(antoyo): correctly support global variable initialization. let skip_init = [ ARGV_INIT_ARRAY, ARGC, ARGV, ]; if !skip_init.iter().any(|symbol_name| name.starts_with(symbol_name)) { - // TODO: switch to set_initializer when libgccjit supports that. + // TODO(antoyo): switch to set_initializer when libgccjit supports that. let memcpy = self.context.get_builtin_function("memcpy"); let dst = self.context.new_cast(None, global, self.type_i8p()); let src = self.context.new_cast(None, value, self.type_ptr_to(self.type_void())); @@ -138,13 +116,10 @@ impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> { // mutability are placed into read-only memory. if !is_mutable { if self.type_is_freeze(ty) { - // TODO - //llvm::LLVMSetGlobalConstant(global, llvm::True); + // TODO(antoyo): set global constant. } } - //debuginfo::create_global_var_metadata(&self, def_id, global); - if attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) { // Do not allow LLVM to change the alignment of a TLS on macOS. // @@ -184,19 +159,7 @@ impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> { // happens to be zero. Instead, we should only check the value of defined bytes // and set all undefined bytes to zero if this allocation is headed for the // BSS. - /*let all_bytes_are_zero = alloc.relocations().is_empty() - && alloc - .inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len()) - .iter() - .all(|&byte| byte == 0); - - let sect_name = if all_bytes_are_zero { - CStr::from_bytes_with_nul_unchecked(b"__DATA,__thread_bss\0") - } else { - CStr::from_bytes_with_nul_unchecked(b"__DATA,__thread_data\0") - };*/ unimplemented!(); - //llvm::LLVMSetSection(global, sect_name.as_ptr()); } } @@ -205,34 +168,9 @@ impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> { if self.tcx.sess.opts.target_triple.triple().starts_with("wasm32") { if let Some(_section) = attrs.link_section { unimplemented!(); - /*let section = llvm::LLVMMDStringInContext( - self.llcx, - section.as_str().as_ptr().cast(), - section.as_str().len() as c_uint, - ); - assert!(alloc.relocations().is_empty()); - - // The `inspect` method is okay here because we checked relocations, and - // because we are doing this access to inspect the final interpreter state (not - // as part of the interpreter execution). - let bytes = - alloc.inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len()); - let alloc = llvm::LLVMMDStringInContext( - self.llcx, - bytes.as_ptr().cast(), - bytes.len() as c_uint, - ); - let data = [section, alloc]; - let meta = llvm::LLVMMDNodeInContext(self.llcx, data.as_ptr(), 2); - llvm::LLVMAddNamedMetadataOperand( - self.llmod, - "wasm.custom_sections\0".as_ptr().cast(), - meta, - );*/ } } else { - // TODO - //base::set_link_section(global, &attrs); + // TODO(antoyo): set link section. } if attrs.flags.contains(CodegenFnAttrFlags::USED) { @@ -242,9 +180,7 @@ impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> { /// Add a global value to a list to be stored in the `llvm.used` variable, an array of i8*. fn add_used_global(&self, _global: RValue<'gcc>) { - // TODO - //let cast = self.context.new_cast(None, global, self.type_i8p()); - //self.used_statics.borrow_mut().push(cast); + // TODO(antoyo) } } @@ -254,13 +190,13 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { match kind { Some(kind) if !self.tcx.sess.fewer_names() => { let name = self.generate_local_symbol_name(kind); - // TODO: check if it's okay that TLS is off here. - // TODO: check if it's okay that link_section is None here. - // TODO: set alignment here as well. + // TODO(antoyo): check if it's okay that TLS is off here. + // TODO(antoyo): check if it's okay that link_section is None here. + // TODO(antoyo): set alignment here as well. let gv = self.define_global(&name[..], self.val_ty(cv), false, None).unwrap_or_else(|| { bug!("symbol `{}` is already defined", name); }); - //llvm::LLVMRustSetLinkage(gv, llvm::Linkage::PrivateLinkage); + // TODO(antoyo): set linkage. (name, gv) } _ => { @@ -271,13 +207,13 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { (name, global) }, }; - // FIXME: I think the name coming from generate_local_symbol_name() above cannot be used + // FIXME(antoyo): I think the name coming from generate_local_symbol_name() above cannot be used // globally. // NOTE: global seems to only be global in a module. So save the name instead of the value // to import it later. self.global_names.borrow_mut().insert(cv, name); self.global_init_block.add_assignment(None, gv.dereference(None), cv); - //llvm::SetUnnamedAddress(gv, llvm::UnnamedAddr::Global); + // TODO(antoyo): set unnamed address. gv } @@ -285,19 +221,6 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { let instance = Instance::mono(self.tcx, def_id); let fn_attrs = self.tcx.codegen_fn_attrs(def_id); if let Some(&global) = self.instances.borrow().get(&instance) { - /*let attrs = self.tcx.codegen_fn_attrs(def_id); - let name = &*self.tcx.symbol_name(instance).name; - let name = - if let Some(linkage) = attrs.linkage { - // This is to match what happens in check_and_apply_linkage. - Cow::from(format!("_rust_extern_with_linkage_{}", name)) - } - else { - Cow::from(name) - }; - let global = self.context.new_global(None, GlobalKind::Imported, global.get_type(), &name) - .get_address(None); - self.global_names.borrow_mut().insert(global, name.to_string());*/ return global; } @@ -313,8 +236,6 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all()); let sym = self.tcx.symbol_name(instance).name; - //debug!("get_static: sym={} instance={:?}", sym, instance); - let global = if let Some(def_id) = def_id.as_local() { let id = self.tcx.hir().local_def_id_to_hir_id(def_id); @@ -332,9 +253,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { let global = self.declare_global(&sym, llty, is_tls, fn_attrs.link_section); if !self.tcx.is_reachable_non_generic(def_id) { - /*unsafe { - llvm::LLVMRustSetVisibility(global, llvm::Visibility::Hidden); - }*/ + // TODO(antoyo): set visibility. } global @@ -352,8 +271,6 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { item => bug!("get_static: expected static, found {:?}", item), }; - //debug!("get_static: sym={} attrs={:?}", sym, attrs); - global } else { @@ -364,11 +281,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { let span = self.tcx.def_span(def_id); let global = check_and_apply_linkage(&self, &attrs, ty, sym, span); - let needs_dll_storage_attr = false; /*self.use_dll_storage_attrs && !self.tcx.is_foreign_item(def_id) && - // ThinLTO can't handle this workaround in all cases, so we don't - // emit the attrs. Instead we make them unnecessary by disallowing - // dynamic linking when linker plugin based LTO is enabled. - !self.tcx.sess.opts.cg.linker_plugin_lto.enabled();*/ + let needs_dll_storage_attr = false; // TODO(antoyo) // If this assertion triggers, there's something wrong with commandline // argument validation. @@ -391,20 +304,12 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { // is_codegened_item query. if !self.tcx.is_codegened_item(def_id) { unimplemented!(); - /*unsafe { - llvm::LLVMSetDLLStorageClass(global, llvm::DLLStorageClass::DllImport); - }*/ } } global }; - /*if self.use_dll_storage_attrs && self.tcx.is_dllimport_foreign_item(def_id) { - // For foreign (native) libs we know the exact storage type to use. - unsafe { - llvm::LLVMSetDLLStorageClass(global, llvm::DLLStorageClass::DllImport); - } - }*/ + // TODO(antoyo): set dll storage class. self.instances.borrow_mut().insert(instance, global); global @@ -474,8 +379,6 @@ fn check_and_apply_linkage<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, attrs: &Codeg let is_tls = attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL); let llty = cx.layout_of(ty).gcc_type(cx, true); if let Some(linkage) = attrs.linkage { - //debug!("get_static: sym={} linkage={:?}", sym, linkage); - // If this is a static with a linkage specified, then we need to handle // it a little specially. The typesystem prevents things like &T and // extern "C" fn() from being non-null, so we can't just declare a @@ -506,10 +409,10 @@ fn check_and_apply_linkage<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, attrs: &Codeg cx.define_global(&real_name, llty, is_tls, attrs.link_section).unwrap_or_else(|| { cx.sess().span_fatal(span, &format!("symbol `{}` is already defined", &sym)) }); - //llvm::LLVMRustSetLinkage(global2, llvm::Linkage::InternalLinkage); + // TODO(antoyo): set linkage. let lvalue = global2.dereference(None); cx.global_init_block.add_assignment(None, lvalue, global1); - //llvm::LLVMSetInitializer(global2, global1); + // TODO(antoyo): use global_set_initializer() when it will work. global2 } else { diff --git a/src/context.rs b/src/context.rs index 19243b0cbce5d..7ab1ca0d771c1 100644 --- a/src/context.rs +++ b/src/context.rs @@ -41,7 +41,7 @@ pub struct CodegenCx<'gcc, 'tcx> { pub codegen_unit: &'tcx CodegenUnit<'tcx>, pub context: &'gcc Context<'gcc>, - // TODO: First set it to a dummy block to avoid using Option? + // TODO(antoyo): First set it to a dummy block to avoid using Option? pub current_block: RefCell>>, pub current_func: RefCell>>, pub normal_function_addresses: RefCell>>, @@ -104,7 +104,7 @@ pub struct CodegenCx<'gcc, 'tcx> { /// Cache of globals. pub globals: RefCell>>, - // TODO: remove global_names. + // TODO(antoyo): remove global_names. pub global_names: RefCell, String>>, /// A counter that is used for generating local symbol names @@ -119,13 +119,13 @@ pub struct CodegenCx<'gcc, 'tcx> { /// `const_undef()` returns struct as pointer so that they can later be assigned a value. /// As such, this set remembers which of these pointers were returned by this function so that /// they can be derefered later. - /// FIXME: fix the rustc API to avoid having this hack. + /// FIXME(antoyo): fix the rustc API to avoid having this hack. pub structs_as_pointer: RefCell>>, /// Store the pointer of different types for safety. /// When casting the values back to their original types, check that they are indeed that type /// with these sets. - /// FIXME: remove when the API supports more types. + /// FIXME(antoyo): remove when the API supports more types. #[cfg(debug_assertions)] lvalues: RefCell>>, } @@ -133,22 +133,20 @@ pub struct CodegenCx<'gcc, 'tcx> { impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { pub fn new(context: &'gcc Context<'gcc>, codegen_unit: &'tcx CodegenUnit<'tcx>, tcx: TyCtxt<'tcx>) -> Self { let check_overflow = tcx.sess.overflow_checks(); - // TODO: fix this mess. libgccjit seems to return random type when using new_int_type(). - //let isize_type = context.new_int_type((tcx.data_layout.pointer_size.bits() / 8) as i32, true); + // TODO(antoyo): fix this mess. libgccjit seems to return random type when using new_int_type(). let isize_type = context.new_c_type(CType::LongLong); - //let usize_type = context.new_int_type((tcx.data_layout.pointer_size.bits() / 8) as i32, false); let usize_type = context.new_c_type(CType::ULongLong); let bool_type = context.new_type::(); let i8_type = context.new_type::(); let i16_type = context.new_type::(); let i32_type = context.new_type::(); let i64_type = context.new_c_type(CType::LongLong); - let i128_type = context.new_c_type(CType::Int128t).get_aligned(8); // TODO: should this be hard-coded? + let i128_type = context.new_c_type(CType::Int128t).get_aligned(8); // TODO(antoyo): should the alignment be hard-coded? let u8_type = context.new_type::(); let u16_type = context.new_type::(); let u32_type = context.new_type::(); let u64_type = context.new_c_type(CType::ULongLong); - let u128_type = context.new_c_type(CType::UInt128t).get_aligned(8); // TODO: should this be hard-coded? + let u128_type = context.new_c_type(CType::UInt128t).get_aligned(8); // TODO(antoyo): should the alignment be hard-coded? let tls_model = to_gcc_tls_mode(tcx.sess.tls_model()); @@ -261,7 +259,6 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { pub fn rvalue_as_lvalue(&self, value: RValue<'gcc>) -> LValue<'gcc> { let lvalue: LValue<'gcc> = unsafe { std::mem::transmute(value) }; - //debug_assert!(self.lvalues.borrow().contains(&lvalue), "{:?} is not an lvalue", value); lvalue } @@ -276,11 +273,11 @@ impl<'gcc, 'tcx> BackendTypes for CodegenCx<'gcc, 'tcx> { type BasicBlock = Block<'gcc>; type Type = Type<'gcc>; - type Funclet = (); // TODO + type Funclet = (); // TODO(antoyo) - type DIScope = (); // TODO - type DILocation = (); // TODO - type DIVariable = (); // TODO + type DIScope = (); // TODO(antoyo) + type DILocation = (); // TODO(antoyo) + type DIVariable = (); // TODO(antoyo) } impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> { @@ -295,17 +292,12 @@ impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> { } fn get_fn_addr(&self, instance: Instance<'tcx>) -> RValue<'gcc> { - //let symbol = self.tcx.symbol_name(instance).name; - let func = get_fn(self, instance); let func = self.rvalue_as_function(func); let ptr = func.get_address(None); - // TODO: don't do this twice: i.e. in declare_fn and here. - //let fn_abi = FnAbi::of_instance(self, instance, &[]); - //let (return_type, params, _) = fn_abi.gcc_type(self); - // FIXME: the rustc API seems to call get_fn_addr() when not needed (e.g. for FFI). - //let pointer_type = ptr.get_type(); + // TODO(antoyo): don't do this twice: i.e. in declare_fn and here. + // FIXME(antoyo): the rustc API seems to call get_fn_addr() when not needed (e.g. for FFI). self.normal_function_addresses.borrow_mut().insert(ptr); @@ -354,12 +346,12 @@ impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> { "rust_eh_personality" }; //let func = self.declare_func(name, self.type_i32(), &[], true); - // FIXME: this hack should not be needed. That will probably be removed when + // FIXME(antoyo): this hack should not be needed. That will probably be removed when // unwinding support is added. self.context.new_rvalue_from_int(self.int_type, 0) } }; - //attributes::apply_target_cpu_attr(self, llfn); + // TODO(antoyo): apply target cpu attributes. self.eh_personality.set(Some(llfn)); llfn } @@ -378,32 +370,18 @@ impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> { fn used_statics(&self) -> &RefCell>> { unimplemented!(); - //&self.used_statics } fn set_frame_pointer_type(&self, _llfn: RValue<'gcc>) { - // TODO - //attributes::set_frame_pointer_type(self, llfn) + // TODO(antoyo) } fn apply_target_cpu_attr(&self, _llfn: RValue<'gcc>) { - // TODO - //attributes::apply_target_cpu_attr(self, llfn) + // TODO(antoyo) } fn create_used_variable(&self) { unimplemented!(); - /*let name = const_cstr!("llvm.used"); - let section = const_cstr!("llvm.metadata"); - let array = - self.const_array(&self.type_ptr_to(self.type_i8()), &*self.used_statics.borrow()); - - unsafe { - let g = llvm::LLVMAddGlobal(self.llmod, self.val_ty(array), name.as_ptr()); - llvm::LLVMSetInitializer(g, array); - llvm::LLVMRustSetLinkage(g, llvm::Linkage::AppendingLinkage); - llvm::LLVMSetSection(g, section.as_ptr()); - }*/ } fn declare_c_main(&self, fn_type: Self::Type) -> Option { diff --git a/src/coverageinfo.rs b/src/coverageinfo.rs index f966f6e75336f..872fc2472e223 100644 --- a/src/coverageinfo.rs +++ b/src/coverageinfo.rs @@ -20,99 +20,31 @@ impl<'a, 'gcc, 'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx _function_source_hash: u64, ) -> bool { unimplemented!(); - /*if let Some(coverage_context) = self.coverage_context() { - debug!( - "ensuring function source hash is set for instance={:?}; function_source_hash={}", - instance, function_source_hash, - ); - let mut coverage_map = coverage_context.function_coverage_map.borrow_mut(); - coverage_map - .entry(instance) - .or_insert_with(|| FunctionCoverage::new(self.tcx, instance)) - .set_function_source_hash(function_source_hash); - true - } else { - false - }*/ } fn add_coverage_counter(&mut self, _instance: Instance<'tcx>, _id: CounterValueReference, _region: CodeRegion) -> bool { - /*if let Some(coverage_context) = self.coverage_context() { - debug!( - "adding counter to coverage_regions: instance={:?}, function_source_hash={}, id={:?}, \ - at {:?}", - instance, function_source_hash, id, region, - ); - let mut coverage_regions = coverage_context.function_coverage_map.borrow_mut(); - coverage_regions - .entry(instance) - .or_insert_with(|| FunctionCoverage::new(self.tcx, instance)) - .add_counter(function_source_hash, id, region); - true - } else { - false - }*/ - // TODO + // TODO(antoyo) false } fn add_coverage_counter_expression(&mut self, _instance: Instance<'tcx>, _id: InjectedExpressionId, _lhs: ExpressionOperandId, _op: Op, _rhs: ExpressionOperandId, _region: Option) -> bool { - /*if let Some(coverage_context) = self.coverage_context() { - debug!( - "adding counter expression to coverage_regions: instance={:?}, id={:?}, {:?} {:?} {:?}, \ - at {:?}", - instance, id, lhs, op, rhs, region, - ); - let mut coverage_regions = coverage_context.function_coverage_map.borrow_mut(); - coverage_regions - .entry(instance) - .or_insert_with(|| FunctionCoverage::new(self.tcx, instance)) - .add_counter_expression(id, lhs, op, rhs, region); - true - } else { - false - }*/ - // TODO + // TODO(antoyo) false } fn add_coverage_unreachable(&mut self, _instance: Instance<'tcx>, _region: CodeRegion) -> bool { - /*if let Some(coverage_context) = self.coverage_context() { - debug!( - "adding unreachable code to coverage_regions: instance={:?}, at {:?}", - instance, region, - ); - let mut coverage_regions = coverage_context.function_coverage_map.borrow_mut(); - coverage_regions - .entry(instance) - .or_insert_with(|| FunctionCoverage::new(self.tcx, instance)) - .add_unreachable_region(region); - true - } else { - false - }*/ - // TODO + // TODO(antoyo) false } } impl<'gcc, 'tcx> CoverageInfoMethods<'tcx> for CodegenCx<'gcc, 'tcx> { fn coverageinfo_finalize(&self) { - // TODO - //mapgen::finalize(self) + // TODO(antoyo) } fn get_pgo_func_name_var(&self, _instance: Instance<'tcx>) -> RValue<'gcc> { unimplemented!(); - /*if let Some(coverage_context) = self.coverage_context() { - debug!("getting pgo_func_name_var for instance={:?}", instance); - let mut pgo_func_name_var_map = coverage_context.pgo_func_name_var_map.borrow_mut(); - pgo_func_name_var_map - .entry(instance) - .or_insert_with(|| create_pgo_func_name_var(self, instance)) - } else { - bug!("Could not get the `coverage_context`"); - }*/ } /// Functions with MIR-based coverage are normally codegenned _only_ if @@ -133,8 +65,5 @@ impl<'gcc, 'tcx> CoverageInfoMethods<'tcx> for CodegenCx<'gcc, 'tcx> { /// added as `unreachable_region`s. fn define_unused_fn(&self, _def_id: DefId) { unimplemented!(); - /*let instance = declare_unused_fn(self, &def_id); - codegen_unused_fn_and_counter(self, instance); - add_unused_function_coverage(self, instance, def_id);*/ } } diff --git a/src/debuginfo.rs b/src/debuginfo.rs index a8902e6966d53..8661532a3595e 100644 --- a/src/debuginfo.rs +++ b/src/debuginfo.rs @@ -17,58 +17,10 @@ impl<'a, 'gcc, 'tcx> DebugInfoBuilderMethods for Builder<'a, 'gcc, 'tcx> { // names (choose between `dbg`, `debug`, `debuginfo`, `debug_info` etc.). fn dbg_var_addr(&mut self, _dbg_var: Self::DIVariable, _scope_metadata: Self::DIScope, _variable_alloca: Self::Value, _direct_offset: Size, _indirect_offsets: &[Size]) { unimplemented!(); - /*let cx = self.cx(); - - // Convert the direct and indirect offsets to address ops. - // FIXME(eddyb) use `const`s instead of getting the values via FFI, - // the values should match the ones in the DWARF standard anyway. - let op_deref = || unsafe { llvm::LLVMRustDIBuilderCreateOpDeref() }; - let op_plus_uconst = || unsafe { llvm::LLVMRustDIBuilderCreateOpPlusUconst() }; - let mut addr_ops = SmallVec::<[_; 8]>::new(); - - if direct_offset.bytes() > 0 { - addr_ops.push(op_plus_uconst()); - addr_ops.push(direct_offset.bytes() as i64); - } - for &offset in indirect_offsets { - addr_ops.push(op_deref()); - if offset.bytes() > 0 { - addr_ops.push(op_plus_uconst()); - addr_ops.push(offset.bytes() as i64); - } - } - - // FIXME(eddyb) maybe this information could be extracted from `dbg_var`, - // to avoid having to pass it down in both places? - // NB: `var` doesn't seem to know about the column, so that's a limitation. - let dbg_loc = cx.create_debug_loc(scope_metadata, span); - unsafe { - // FIXME(eddyb) replace `llvm.dbg.declare` with `llvm.dbg.addr`. - llvm::LLVMRustDIBuilderInsertDeclareAtEnd( - DIB(cx), - variable_alloca, - dbg_var, - addr_ops.as_ptr(), - addr_ops.len() as c_uint, - dbg_loc, - self.llbb(), - ); - }*/ } - /*fn set_source_location(&mut self, scope: Self::DIScope, span: Span) { - unimplemented!(); - /*debug!("set_source_location: {}", self.sess().source_map().span_to_string(span)); - - let dbg_loc = self.cx().create_debug_loc(scope, span); - - unsafe { - llvm::LLVMSetCurrentDebugLocation(self.llbuilder, dbg_loc); - }*/ - }*/ - fn insert_reference_to_gdb_debug_scripts_section_global(&mut self) { - // TODO: replace with gcc_jit_context_new_global_with_initializer() if it's added: + // TODO(antoyo): replace with gcc_jit_context_new_global_with_initializer() if it's added: // https://gcc.gnu.org/pipermail/jit/2020q3/001225.html // // Call the function to initialize global values here. @@ -76,7 +28,7 @@ impl<'a, 'gcc, 'tcx> DebugInfoBuilderMethods for Builder<'a, 'gcc, 'tcx> { use std::iter; for crate_num in self.cx.tcx.crates(()).iter().copied().chain(iter::once(LOCAL_CRATE)) { - // FIXME: better way to find if a crate is of proc-macro type? + // FIXME(antoyo): better way to find if a crate is of proc-macro type? if crate_num == LOCAL_CRATE || self.cx.tcx.dep_kind(crate_num) != CrateDepKind::MacrosOnly { // NOTE: proc-macro crates are not included in the executable, so don't call their // initialization routine. @@ -87,50 +39,25 @@ impl<'a, 'gcc, 'tcx> DebugInfoBuilderMethods for Builder<'a, 'gcc, 'tcx> { } } - // TODO - //gdb::insert_reference_to_gdb_debug_scripts_section_global(self) + // TODO(antoyo): insert reference to gdb debug scripts section global. } fn set_var_name(&mut self, _value: RValue<'gcc>, _name: &str) { unimplemented!(); - // Avoid wasting time if LLVM value names aren't even enabled. - /*if self.sess().fewer_names() { - return; - } - - // Only function parameters and instructions are local to a function, - // don't change the name of anything else (e.g. globals). - let param_or_inst = unsafe { - llvm::LLVMIsAArgument(value).is_some() || llvm::LLVMIsAInstruction(value).is_some() - }; - if !param_or_inst { - return; - } - - // Avoid replacing the name if it already exists. - // While we could combine the names somehow, it'd - // get noisy quick, and the usefulness is dubious. - if llvm::get_value_name(value).is_empty() { - llvm::set_value_name(value, name.as_bytes()); - }*/ } fn set_dbg_loc(&mut self, _dbg_loc: Self::DILocation) { unimplemented!(); - /*unsafe { - let dbg_loc_as_llval = llvm::LLVMRustMetadataAsValue(self.cx().llcx, dbg_loc); - llvm::LLVMSetCurrentDebugLocation(self.llbuilder, dbg_loc_as_llval); - }*/ } } impl<'gcc, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'gcc, 'tcx> { fn create_vtable_metadata(&self, _ty: Ty<'tcx>, _vtable: Self::Value) { - //metadata::create_vtable_metadata(self, ty, vtable) + // TODO(antoyo) } fn create_function_debug_context(&self, _instance: Instance<'tcx>, _fn_abi: &FnAbi<'tcx, Ty<'tcx>>, _llfn: RValue<'gcc>, _mir: &mir::Body<'tcx>) -> Option> { - // TODO + // TODO(antoyo) None } @@ -139,7 +66,7 @@ impl<'gcc, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'gcc, 'tcx> { } fn debuginfo_finalize(&self) { - //unimplemented!(); + // TODO(antoyo) } fn create_dbg_var(&self, _variable_name: Symbol, _variable_type: Ty<'tcx>, _scope_metadata: Self::DIScope, _variable_kind: VariableKind, _span: Span) -> Self::DIVariable { @@ -148,260 +75,9 @@ impl<'gcc, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'gcc, 'tcx> { fn dbg_scope_fn(&self, _instance: Instance<'tcx>, _fn_abi: &FnAbi<'tcx, Ty<'tcx>>, _maybe_definition_llfn: Option>) -> Self::DIScope { unimplemented!(); - /*let def_id = instance.def_id(); - let containing_scope = get_containing_scope(self, instance); - let span = self.tcx.def_span(def_id); - let loc = self.lookup_debug_loc(span.lo()); - let file_metadata = file_metadata(self, &loc.file); - - let function_type_metadata = unsafe { - let fn_signature = get_function_signature(self, fn_abi); - llvm::LLVMRustDIBuilderCreateSubroutineType(DIB(self), fn_signature) - }; - - // Find the enclosing function, in case this is a closure. - let def_key = self.tcx().def_key(def_id); - let mut name = def_key.disambiguated_data.data.to_string(); - - let enclosing_fn_def_id = self.tcx().closure_base_def_id(def_id); - - // Get_template_parameters() will append a `<...>` clause to the function - // name if necessary. - let generics = self.tcx().generics_of(enclosing_fn_def_id); - let substs = instance.substs.truncate_to(self.tcx(), generics); - let template_parameters = get_template_parameters(self, &generics, substs, &mut name); - - let linkage_name = &mangled_name_of_instance(self, instance).name; - // Omit the linkage_name if it is the same as subprogram name. - let linkage_name = if &name == linkage_name { "" } else { linkage_name }; - - // FIXME(eddyb) does this need to be separate from `loc.line` for some reason? - let scope_line = loc.line; - - let mut flags = DIFlags::FlagPrototyped; - - if fn_abi.ret.layout.abi.is_uninhabited() { - flags |= DIFlags::FlagNoReturn; - } - - let mut spflags = DISPFlags::SPFlagDefinition; - if is_node_local_to_unit(self, def_id) { - spflags |= DISPFlags::SPFlagLocalToUnit; - } - if self.sess().opts.optimize != config::OptLevel::No { - spflags |= DISPFlags::SPFlagOptimized; - } - if let Some((id, _)) = self.tcx.entry_fn(LOCAL_CRATE) { - if id.to_def_id() == def_id { - spflags |= DISPFlags::SPFlagMainSubprogram; - } - } - - unsafe { - return llvm::LLVMRustDIBuilderCreateFunction( - DIB(self), - containing_scope, - name.as_ptr().cast(), - name.len(), - linkage_name.as_ptr().cast(), - linkage_name.len(), - file_metadata, - loc.line.unwrap_or(UNKNOWN_LINE_NUMBER), - function_type_metadata, - scope_line.unwrap_or(UNKNOWN_LINE_NUMBER), - flags, - spflags, - maybe_definition_llfn, - template_parameters, - None, - ); - } - - fn get_function_signature<'ll, 'tcx>( - cx: &CodegenCx<'ll, 'tcx>, - fn_abi: &FnAbi<'tcx, Ty<'tcx>>, - ) -> &'ll DIArray { - if cx.sess().opts.debuginfo == DebugInfo::Limited { - return create_DIArray(DIB(cx), &[]); - } - - let mut signature = Vec::with_capacity(fn_abi.args.len() + 1); - - // Return type -- llvm::DIBuilder wants this at index 0 - signature.push(if fn_abi.ret.is_ignore() { - None - } else { - Some(type_metadata(cx, fn_abi.ret.layout.ty, rustc_span::DUMMY_SP)) - }); - - // Arguments types - if cx.sess().target.options.is_like_msvc { - // FIXME(#42800): - // There is a bug in MSDIA that leads to a crash when it encounters - // a fixed-size array of `u8` or something zero-sized in a - // function-type (see #40477). - // As a workaround, we replace those fixed-size arrays with a - // pointer-type. So a function `fn foo(a: u8, b: [u8; 4])` would - // appear as `fn foo(a: u8, b: *const u8)` in debuginfo, - // and a function `fn bar(x: [(); 7])` as `fn bar(x: *const ())`. - // This transformed type is wrong, but these function types are - // already inaccurate due to ABI adjustments (see #42800). - signature.extend(fn_abi.args.iter().map(|arg| { - let t = arg.layout.ty; - let t = match t.kind() { - ty::Array(ct, _) - if (*ct == cx.tcx.types.u8) || cx.layout_of(ct).is_zst() => - { - cx.tcx.mk_imm_ptr(ct) - } - _ => t, - }; - Some(type_metadata(cx, t, rustc_span::DUMMY_SP)) - })); - } else { - signature.extend( - fn_abi - .args - .iter() - .map(|arg| Some(type_metadata(cx, arg.layout.ty, rustc_span::DUMMY_SP))), - ); - } - - create_DIArray(DIB(cx), &signature[..]) - } - - fn get_template_parameters<'ll, 'tcx>( - cx: &CodegenCx<'ll, 'tcx>, - generics: &ty::Generics, - substs: SubstsRef<'tcx>, - name_to_append_suffix_to: &mut String, - ) -> &'ll DIArray { - if substs.types().next().is_none() { - return create_DIArray(DIB(cx), &[]); - } - - name_to_append_suffix_to.push('<'); - for (i, actual_type) in substs.types().enumerate() { - if i != 0 { - name_to_append_suffix_to.push(','); - } - - let actual_type = - cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), actual_type); - // Add actual type name to <...> clause of function name - let actual_type_name = compute_debuginfo_type_name(cx.tcx(), actual_type, true); - name_to_append_suffix_to.push_str(&actual_type_name[..]); - } - name_to_append_suffix_to.push('>'); - - // Again, only create type information if full debuginfo is enabled - let template_params: Vec<_> = if cx.sess().opts.debuginfo == DebugInfo::Full { - let names = get_parameter_names(cx, generics); - substs - .iter() - .zip(names) - .filter_map(|(kind, name)| { - if let GenericArgKind::Type(ty) = kind.unpack() { - let actual_type = - cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), ty); - let actual_type_metadata = - type_metadata(cx, actual_type, rustc_span::DUMMY_SP); - let name = name.as_str(); - Some(unsafe { - Some(llvm::LLVMRustDIBuilderCreateTemplateTypeParameter( - DIB(cx), - None, - name.as_ptr().cast(), - name.len(), - actual_type_metadata, - )) - }) - } else { - None - } - }) - .collect() - } else { - vec![] - }; - - create_DIArray(DIB(cx), &template_params[..]) - } - - fn get_parameter_names(cx: &CodegenCx<'_, '_>, generics: &ty::Generics) -> Vec { - let mut names = generics - .parent - .map_or(vec![], |def_id| get_parameter_names(cx, cx.tcx.generics_of(def_id))); - names.extend(generics.params.iter().map(|param| param.name)); - names - } - - fn get_containing_scope<'ll, 'tcx>( - cx: &CodegenCx<'ll, 'tcx>, - instance: Instance<'tcx>, - ) -> &'ll DIScope { - // First, let's see if this is a method within an inherent impl. Because - // if yes, we want to make the result subroutine DIE a child of the - // subroutine's self-type. - let self_type = cx.tcx.impl_of_method(instance.def_id()).and_then(|impl_def_id| { - // If the method does *not* belong to a trait, proceed - if cx.tcx.trait_id_of_impl(impl_def_id).is_none() { - let impl_self_ty = cx.tcx.subst_and_normalize_erasing_regions( - instance.substs, - ty::ParamEnv::reveal_all(), - &cx.tcx.type_of(impl_def_id), - ); - - // Only "class" methods are generally understood by LLVM, - // so avoid methods on other types (e.g., `<*mut T>::null`). - match impl_self_ty.kind() { - ty::Adt(def, ..) if !def.is_box() => { - // Again, only create type information if full debuginfo is enabled - if cx.sess().opts.debuginfo == DebugInfo::Full - && !impl_self_ty.needs_subst() - { - Some(type_metadata(cx, impl_self_ty, rustc_span::DUMMY_SP)) - } else { - Some(namespace::item_namespace(cx, def.did)) - } - } - _ => None, - } - } else { - // For trait method impls we still use the "parallel namespace" - // strategy - None - } - }); - - self_type.unwrap_or_else(|| { - namespace::item_namespace( - cx, - DefId { - krate: instance.def_id().krate, - index: cx - .tcx - .def_key(instance.def_id()) - .parent - .expect("get_containing_scope: missing parent?"), - }, - ) - }) - }*/ } fn dbg_loc(&self, _scope: Self::DIScope, _inlined_at: Option, _span: Span) -> Self::DILocation { unimplemented!(); - /*let DebugLoc { line, col, .. } = self.lookup_debug_loc(span.lo()); - - unsafe { - llvm::LLVMRustDIBuilderCreateDebugLocation( - utils::debug_context(self).llcontext, - line.unwrap_or(UNKNOWN_LINE_NUMBER), - col.unwrap_or(UNKNOWN_COLUMN_NUMBER), - scope, - inlined_at, - ) - }*/ } } diff --git a/src/declare.rs b/src/declare.rs index 339a613c09661..c1382bf2f4a71 100644 --- a/src/declare.rs +++ b/src/declare.rs @@ -35,7 +35,6 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { } pub fn declare_global_with_linkage(&self, name: &str, ty: Type<'gcc>, linkage: GlobalKind) -> RValue<'gcc> { - //debug!("declare_global_with_linkage(name={:?})", name); let global = self.context.new_global(None, linkage, ty, name) .get_address(None); self.globals.borrow_mut().insert(name.to_string(), global); @@ -48,13 +47,12 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { pub fn declare_func(&self, name: &str, return_type: Type<'gcc>, params: &[Type<'gcc>], variadic: bool) -> RValue<'gcc> { self.linkage.set(FunctionType::Exported); let func = declare_raw_fn(self, name, () /*llvm::CCallConv*/, return_type, params, variadic); - // FIXME: this is a wrong cast. That requires changing the compiler API. + // FIXME(antoyo): this is a wrong cast. That requires changing the compiler API. unsafe { std::mem::transmute(func) } } pub fn declare_global(&self, name: &str, ty: Type<'gcc>, is_tls: bool, link_section: Option) -> RValue<'gcc> { - //debug!("declare_global(name={:?})", name); - // FIXME: correctly support global variable initialization. + // FIXME(antoyo): correctly support global variable initialization. if name.starts_with(ARGV_INIT_ARRAY) { // NOTE: hack to avoid having to update the names in mangled_std_symbols: we save the // name of the variable now to actually declare it later. @@ -82,7 +80,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { } pub fn declare_cfn(&self, name: &str, _fn_type: Type<'gcc>) -> RValue<'gcc> { - // TODO: use the fn_type parameter. + // TODO(antoyo): use the fn_type parameter. let const_string = self.context.new_type::().make_pointer().make_pointer(); let return_type = self.type_i32(); let variadic = false; @@ -91,7 +89,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { // NOTE: it is needed to set the current_func here as well, because get_fn() is not called // for the main function. *self.current_func.borrow_mut() = Some(func); - // FIXME: this is a wrong cast. That requires changing the compiler API. + // FIXME(antoyo): this is a wrong cast. That requires changing the compiler API. unsafe { std::mem::transmute(func) } } @@ -128,11 +126,9 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { self.global_names.borrow_mut().insert(global, global_name.to_string()); self.argv_initialized.set(true); } - //debug!("declare_rust_fn(name={:?}, fn_abi={:?})", name, fn_abi); let (return_type, params, variadic) = fn_abi.gcc_type(self); let func = declare_raw_fn(self, name, () /*fn_abi.llvm_cconv()*/, return_type, ¶ms, variadic); - //fn_abi.apply_attrs_llfn(self, func); - // FIXME: this is a wrong cast. That requires changing the compiler API. + // FIXME(antoyo): this is a wrong cast. That requires changing the compiler API. unsafe { std::mem::transmute(func) } } @@ -146,19 +142,9 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { } pub fn get_declared_value(&self, name: &str) -> Option> { - //debug!("get_declared_value(name={:?})", name); - // TODO: use a different field than globals, because this seems to return a function? + // TODO(antoyo): use a different field than globals, because this seems to return a function? self.globals.borrow().get(name).cloned() } - - /*fn get_defined_value(&self, name: &str) -> Option> { - // TODO: gcc does not allow global initialization. - None - /*self.get_declared_value(name).and_then(|val| { - let declaration = unsafe { llvm::LLVMIsDeclaration(val) != 0 }; - if !declaration { Some(val) } else { None } - })*/ - }*/ } /// Declare a function. @@ -166,11 +152,6 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { /// If there’s a value with the same name already declared, the function will /// update the declaration and return existing Value instead. fn declare_raw_fn<'gcc>(cx: &CodegenCx<'gcc, '_>, name: &str, _callconv: () /*llvm::CallConv*/, return_type: Type<'gcc>, param_types: &[Type<'gcc>], variadic: bool) -> Function<'gcc> { - //debug!("declare_raw_fn(name={:?}, ty={:?})", name, ty); - /*let llfn = unsafe { - llvm::LLVMRustGetOrInsertFunction(cx.llmod, name.as_ptr().cast(), name.len(), ty) - };*/ - if name.starts_with("llvm.") { return llvm::intrinsic(name, cx); } @@ -180,32 +161,24 @@ fn declare_raw_fn<'gcc>(cx: &CodegenCx<'gcc, '_>, name: &str, _callconv: () /*ll } else { let params: Vec<_> = param_types.into_iter().enumerate() - .map(|(index, param)| cx.context.new_parameter(None, *param, &format!("param{}", index))) // TODO: set name. + .map(|(index, param)| cx.context.new_parameter(None, *param, &format!("param{}", index))) // TODO(antoyo): set name. .collect(); let func = cx.context.new_function(None, cx.linkage.get(), return_type, ¶ms, mangle_name(name), variadic); cx.functions.borrow_mut().insert(name.to_string(), func); func }; - //llvm::SetFunctionCallConv(llfn, callconv); // TODO - // Function addresses in Rust are never significant, allowing functions to - // be merged. - //llvm::SetUnnamedAddress(llfn, llvm::UnnamedAddr::Global); // TODO - - /*if cx.tcx.sess.opts.cg.no_redzone.unwrap_or(cx.tcx.sess.target.target.options.disable_redzone) { - llvm::Attribute::NoRedZone.apply_llfn(Function, llfn); - }*/ - - //attributes::default_optimisation_attrs(cx.tcx.sess, llfn); - //attributes::non_lazy_bind(cx.sess(), llfn); + // TODO(antoyo): set function calling convention. + // TODO(antoyo): set unnamed address. + // TODO(antoyo): set no red zone function attribute. + // TODO(antoyo): set attributes for optimisation. + // TODO(antoyo): set attributes for non lazy bind. - // FIXME: invalid cast. - // TODO: is this line useful? - //cx.globals.borrow_mut().insert(name.to_string(), unsafe { std::mem::transmute(func) }); + // FIXME(antoyo): invalid cast. func } -// FIXME: this is a hack because libgccjit currently only supports alpha, num and _. +// FIXME(antoyo): this is a hack because libgccjit currently only supports alpha, num and _. // Unsupported characters: `$` and `.`. pub fn mangle_name(name: &str) -> String { name.replace(|char: char| { diff --git a/src/intrinsic/llvm.rs b/src/intrinsic/llvm.rs index bf9472d3ea942..b074febc521eb 100644 --- a/src/intrinsic/llvm.rs +++ b/src/intrinsic/llvm.rs @@ -11,16 +11,12 @@ pub fn intrinsic<'gcc, 'tcx>(name: &str, cx: &CodegenCx<'gcc, 'tcx>) -> Function cx.functions.borrow_mut().insert(gcc_name.to_string(), func); return func; }, - // TODO: this doc specifies the equivalent GCC builtins: http://huonw.github.io/llvmint/llvmint/x86/index.html + // NOTE: this doc specifies the equivalent GCC builtins: http://huonw.github.io/llvmint/llvmint/x86/index.html "llvm.x86.sse2.cmp.pd" => "__builtin_ia32_cmppd", "llvm.x86.sse2.movmsk.pd" => "__builtin_ia32_movmskpd", "llvm.x86.sse2.pmovmskb.128" => "__builtin_ia32_pmovmskb128", _ => unimplemented!("unsupported LLVM intrinsic {}", name) }; - println!("Get target builtin"); unimplemented!(); - /*let func = cx.context.get_target_builtin_function(gcc_name); - cx.functions.borrow_mut().insert(gcc_name.to_string(), func); - func*/ } diff --git a/src/intrinsic/mod.rs b/src/intrinsic/mod.rs index ad6dfbffbac96..a79be7cfc74c8 100644 --- a/src/intrinsic/mod.rs +++ b/src/intrinsic/mod.rs @@ -96,7 +96,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { let llval = match name { _ if simple.is_some() => { - // FIXME: remove this cast when the API supports function. + // FIXME(antoyo): remove this cast when the API supports function. let func = unsafe { std::mem::transmute(simple.expect("simple")) }; self.call(self.type_void(), func, &args.iter().map(|arg| arg.immediate()).collect::>(), None) }, @@ -118,40 +118,12 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { } sym::breakpoint => { unimplemented!(); - /*let llfn = self.get_intrinsic(&("llvm.debugtrap")); - self.call(llfn, &[], None)*/ } sym::va_copy => { unimplemented!(); - /*let intrinsic = self.cx().get_intrinsic(&("llvm.va_copy")); - self.call(intrinsic, &[args[0].immediate(), args[1].immediate()], None)*/ } sym::va_arg => { unimplemented!(); - /*match fn_abi.ret.layout.abi { - abi::Abi::Scalar(ref scalar) => { - match scalar.value { - Primitive::Int(..) => { - if self.cx().size_of(ret_ty).bytes() < 4 { - // `va_arg` should not be called on a integer type - // less than 4 bytes in length. If it is, promote - // the integer to a `i32` and truncate the result - // back to the smaller type. - let promoted_result = emit_va_arg(self, args[0], tcx.types.i32); - self.trunc(promoted_result, llret_ty) - } else { - emit_va_arg(self, args[0], ret_ty) - } - } - Primitive::F64 | Primitive::Pointer => { - emit_va_arg(self, args[0], ret_ty) - } - // `va_arg` should never be used with the return type f32. - Primitive::F32 => bug!("the va_arg intrinsic does not work with `f32`"), - } - } - _ => bug!("the va_arg intrinsic does not work with non-scalar types"), - }*/ } sym::volatile_load | sym::unaligned_volatile_load => { @@ -161,15 +133,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { ptr = self.pointercast(ptr, self.type_ptr_to(ty.gcc_type(self))); } let load = self.volatile_load(ptr.get_type(), ptr); - // TODO - /*let align = if name == sym::unaligned_volatile_load { - 1 - } else { - self.align_of(tp_ty).bytes() as u32 - }; - unsafe { - llvm::LLVMSetAlignment(load, align); - }*/ + // TODO(antoyo): set alignment. self.to_immediate(load, self.layout_of(tp_ty)) } sym::volatile_store => { @@ -187,24 +151,6 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { | sym::prefetch_read_instruction | sym::prefetch_write_instruction => { unimplemented!(); - /*let expect = self.get_intrinsic(&("llvm.prefetch")); - let (rw, cache_type) = match name { - sym::prefetch_read_data => (0, 1), - sym::prefetch_write_data => (1, 1), - sym::prefetch_read_instruction => (0, 0), - sym::prefetch_write_instruction => (1, 0), - _ => bug!(), - }; - self.call( - expect, - &[ - args[0].immediate(), - self.const_i32(rw), - args[1].immediate(), - self.const_i32(cache_type), - ], - None, - )*/ } sym::ctlz | sym::ctlz_nonzero @@ -257,10 +203,6 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { self.block = Some(after_block); result.to_rvalue() - - /*let y = self.const_bool(false); - let llfn = self.get_intrinsic(&format!("llvm.{}.i{}", name, width)); - self.call(llfn, &[args[0].immediate(), y], None)*/ } sym::ctlz_nonzero => { self.count_leading_zeroes(width, args[0].immediate()) @@ -274,11 +216,11 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { args[0].immediate() // byte swap a u8/i8 is just a no-op } else { - // TODO: check if it's faster to use string literals and a + // TODO(antoyo): check if it's faster to use string literals and a // match instead of format!. let bswap = self.cx.context.get_builtin_function(&format!("__builtin_bswap{}", width)); let mut arg = args[0].immediate(); - // FIXME: this cast should not be necessary. Remove + // FIXME(antoyo): this cast should not be necessary. Remove // when having proper sized integer types. let param_type = bswap.get_param(0).to_rvalue().get_type(); if param_type != arg.get_type() { @@ -289,7 +231,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { }, sym::bitreverse => self.bit_reverse(width, args[0].immediate()), sym::rotate_left | sym::rotate_right => { - // TODO: implement using algorithm from: + // TODO(antoyo): implement using algorithm from: // https://blog.regehr.org/archives/1063 // for other platforms. let is_left = name == sym::rotate_left; @@ -346,7 +288,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { self.const_bool(true) } /*else if use_integer_compare { - let integer_ty = self.type_ix(layout.size.bits()); // FIXME: LLVM creates an integer of 96 bits for [i32; 3], but gcc doesn't support this, so it creates an integer of 128 bits. + let integer_ty = self.type_ix(layout.size.bits()); // FIXME(antoyo): LLVM creates an integer of 96 bits for [i32; 3], but gcc doesn't support this, so it creates an integer of 128 bits. let ptr_ty = self.type_ptr_to(integer_ty); let a_ptr = self.bitcast(a, ptr_ty); let a_val = self.load(integer_ty, a_ptr, layout.align.abi); @@ -396,38 +338,27 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { } fn assume(&mut self, value: Self::Value) { - // TODO: switch to asumme when it exists. + // TODO(antoyo): switch to asumme when it exists. // Or use something like this: // #define __assume(cond) do { if (!(cond)) __builtin_unreachable(); } while (0) self.expect(value, true); } fn expect(&mut self, cond: Self::Value, _expected: bool) -> Self::Value { - // TODO - /*let expect = self.context.get_builtin_function("__builtin_expect"); - let expect: RValue<'gcc> = unsafe { std::mem::transmute(expect) }; - self.call(expect, &[cond, self.const_bool(expected)], None)*/ + // TODO(antoyo) cond } fn sideeffect(&mut self) { - // TODO - /*if self.tcx().sess.opts.debugging_opts.insert_sideeffect { - let fnname = self.get_intrinsic(&("llvm.sideeffect")); - self.call(fnname, &[], None); - }*/ + // TODO(antoyo) } fn va_start(&mut self, _va_list: RValue<'gcc>) -> RValue<'gcc> { unimplemented!(); - /*let intrinsic = self.cx().get_intrinsic("llvm.va_start"); - self.call(intrinsic, &[va_list], None)*/ } fn va_end(&mut self, _va_list: RValue<'gcc>) -> RValue<'gcc> { unimplemented!(); - /*let intrinsic = self.cx().get_intrinsic("llvm.va_end"); - self.call(intrinsic, &[va_list], None)*/ } } @@ -634,7 +565,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { step4 }, 32 => { - // TODO: Refactor with other implementations. + // TODO(antoyo): Refactor with other implementations. // First step. let left = self.and(value, context.new_rvalue_from_long(typ, 0x55555555)); let left = self.shl(left, context.new_rvalue_from_long(typ, 1)); @@ -681,7 +612,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { // Second step. let left = self.and(step1, context.new_rvalue_from_long(typ, 0x0001FFFF0001FFFF)); let left = self.shl(left, context.new_rvalue_from_long(typ, 15)); - let right = self.and(step1, context.new_rvalue_from_long(typ, 0xFFFE0000FFFE0000u64 as i64)); // TODO: transmute the number instead? + let right = self.and(step1, context.new_rvalue_from_long(typ, 0xFFFE0000FFFE0000u64 as i64)); // TODO(antoyo): transmute the number instead? let right = self.lshr(right, context.new_rvalue_from_long(typ, 17)); let step2 = self.or(left, right); @@ -715,7 +646,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { step5 }, 128 => { - // TODO: find a more efficient implementation? + // TODO(antoyo): find a more efficient implementation? let sixty_four = self.context.new_rvalue_from_long(typ, 64); let high = self.context.new_cast(None, value >> sixty_four, self.u64_type); let low = self.context.new_cast(None, value, self.u64_type); @@ -735,7 +666,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { } fn count_leading_zeroes(&self, width: u64, arg: RValue<'gcc>) -> RValue<'gcc> { - // TODO: use width? + // TODO(antoyo): use width? let arg_type = arg.get_type(); let count_leading_zeroes = if arg_type.is_uint(&self.cx) { @@ -873,11 +804,11 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { } fn pop_count(&self, value: RValue<'gcc>) -> RValue<'gcc> { - // TODO: use the optimized version with fewer operations. + // TODO(antoyo): use the optimized version with fewer operations. let value_type = value.get_type(); if value_type.is_u128(&self.cx) { - // TODO: implement in the normal algorithm below to have a more efficient + // TODO(antoyo): implement in the normal algorithm below to have a more efficient // implementation (that does not require a call to __popcountdi2). let popcount = self.context.get_builtin_function("__builtin_popcountll"); let sixty_four = self.context.new_rvalue_from_long(value_type, 64); @@ -1083,204 +1014,8 @@ fn try_intrinsic<'gcc, 'tcx>(bx: &mut Builder<'_, 'gcc, 'tcx>, try_func: RValue< } else if wants_msvc_seh(bx.sess()) { unimplemented!(); - //codegen_msvc_try(bx, try_func, data, catch_func, dest); } else { unimplemented!(); - //codegen_gnu_try(bx, try_func, data, catch_func, dest); } } - -// MSVC's definition of the `rust_try` function. -// -// This implementation uses the new exception handling instructions in LLVM -// which have support in LLVM for SEH on MSVC targets. Although these -// instructions are meant to work for all targets, as of the time of this -// writing, however, LLVM does not recommend the usage of these new instructions -// as the old ones are still more optimized. -/*fn codegen_msvc_try<'a, 'gcc, 'tcx>(_bx: &mut Builder<'a, 'gcc, 'tcx>, _try_func: RValue<'gcc>, _data: RValue<'gcc>, _catch_func: RValue<'gcc>, _dest: RValue<'gcc>) { - unimplemented!(); - /*let llfn = get_rust_try_fn(bx, &mut |mut bx| { - bx.set_personality_fn(bx.eh_personality()); - bx.sideeffect(); - - let mut normal = bx.build_sibling_block("normal"); - let mut catchswitch = bx.build_sibling_block("catchswitch"); - let mut catchpad = bx.build_sibling_block("catchpad"); - let mut caught = bx.build_sibling_block("caught"); - - let try_func = llvm::get_param(bx.llfn(), 0); - let data = llvm::get_param(bx.llfn(), 1); - let catch_func = llvm::get_param(bx.llfn(), 2); - - // We're generating an IR snippet that looks like: - // - // declare i32 @rust_try(%try_func, %data, %catch_func) { - // %slot = alloca u8* - // invoke %try_func(%data) to label %normal unwind label %catchswitch - // - // normal: - // ret i32 0 - // - // catchswitch: - // %cs = catchswitch within none [%catchpad] unwind to caller - // - // catchpad: - // %tok = catchpad within %cs [%type_descriptor, 0, %slot] - // %ptr = load %slot - // call %catch_func(%data, %ptr) - // catchret from %tok to label %caught - // - // caught: - // ret i32 1 - // } - // - // This structure follows the basic usage of throw/try/catch in LLVM. - // For example, compile this C++ snippet to see what LLVM generates: - // - // #include - // - // struct rust_panic { - // rust_panic(const rust_panic&); - // ~rust_panic(); - // - // uint64_t x[2]; - // }; - // - // int __rust_try( - // void (*try_func)(void*), - // void *data, - // void (*catch_func)(void*, void*) noexcept - // ) { - // try { - // try_func(data); - // return 0; - // } catch(rust_panic& a) { - // catch_func(data, &a); - // return 1; - // } - // } - // - // More information can be found in libstd's seh.rs implementation. - let ptr_align = bx.tcx().data_layout.pointer_align.abi; - let slot = bx.alloca(bx.type_i8p(), ptr_align); - bx.invoke(try_func, &[data], normal.llbb(), catchswitch.llbb(), None); - - normal.ret(bx.const_i32(0)); - - let cs = catchswitch.catch_switch(None, None, 1); - catchswitch.add_handler(cs, catchpad.llbb()); - - // We can't use the TypeDescriptor defined in libpanic_unwind because it - // might be in another DLL and the SEH encoding only supports specifying - // a TypeDescriptor from the current module. - // - // However this isn't an issue since the MSVC runtime uses string - // comparison on the type name to match TypeDescriptors rather than - // pointer equality. - // - // So instead we generate a new TypeDescriptor in each module that uses - // `try` and let the linker merge duplicate definitions in the same - // module. - // - // When modifying, make sure that the type_name string exactly matches - // the one used in src/libpanic_unwind/seh.rs. - let type_info_vtable = bx.declare_global("??_7type_info@@6B@", bx.type_i8p()); - let type_name = bx.const_bytes(b"rust_panic\0"); - let type_info = - bx.const_struct(&[type_info_vtable, bx.const_null(bx.type_i8p()), type_name], false); - let tydesc = bx.declare_global("__rust_panic_type_info", bx.val_ty(type_info)); - unsafe { - llvm::LLVMRustSetLinkage(tydesc, llvm::Linkage::LinkOnceODRLinkage); - llvm::SetUniqueComdat(bx.llmod, tydesc); - llvm::LLVMSetInitializer(tydesc, type_info); - } - - // The flag value of 8 indicates that we are catching the exception by - // reference instead of by value. We can't use catch by value because - // that requires copying the exception object, which we don't support - // since our exception object effectively contains a Box. - // - // Source: MicrosoftCXXABI::getAddrOfCXXCatchHandlerType in clang - let flags = bx.const_i32(8); - let funclet = catchpad.catch_pad(cs, &[tydesc, flags, slot]); - let ptr = catchpad.load(slot, ptr_align); - catchpad.call(catch_func, &[data, ptr], Some(&funclet)); - - catchpad.catch_ret(&funclet, caught.llbb()); - - caught.ret(bx.const_i32(1)); - }); - - // Note that no invoke is used here because by definition this function - // can't panic (that's what it's catching). - let ret = bx.call(llfn, &[try_func, data, catch_func], None); - let i32_align = bx.tcx().data_layout.i32_align.abi; - bx.store(ret, dest, i32_align);*/ -}*/ - -// Definition of the standard `try` function for Rust using the GNU-like model -// of exceptions (e.g., the normal semantics of LLVM's `landingpad` and `invoke` -// instructions). -// -// This codegen is a little surprising because we always call a shim -// function instead of inlining the call to `invoke` manually here. This is done -// because in LLVM we're only allowed to have one personality per function -// definition. The call to the `try` intrinsic is being inlined into the -// function calling it, and that function may already have other personality -// functions in play. By calling a shim we're guaranteed that our shim will have -// the right personality function. -/*fn codegen_gnu_try<'a, 'gcc, 'tcx>(_bx: &mut Builder<'a, 'gcc, 'tcx>, _try_func: RValue<'gcc>, _data: RValue<'gcc>, _catch_func: RValue<'gcc>, _dest: RValue<'gcc>) { - unimplemented!(); - /*let llfn = get_rust_try_fn(bx, &mut |mut bx| { - // Codegens the shims described above: - // - // bx: - // invoke %try_func(%data) normal %normal unwind %catch - // - // normal: - // ret 0 - // - // catch: - // (%ptr, _) = landingpad - // call %catch_func(%data, %ptr) - // ret 1 - - bx.sideeffect(); - - let mut then = bx.build_sibling_block("then"); - let mut catch = bx.build_sibling_block("catch"); - - let try_func = llvm::get_param(bx.llfn(), 0); - let data = llvm::get_param(bx.llfn(), 1); - let catch_func = llvm::get_param(bx.llfn(), 2); - bx.invoke(try_func, &[data], then.llbb(), catch.llbb(), None); - then.ret(bx.const_i32(0)); - - // Type indicator for the exception being thrown. - // - // The first value in this tuple is a pointer to the exception object - // being thrown. The second value is a "selector" indicating which of - // the landing pad clauses the exception's type had been matched to. - // rust_try ignores the selector. - let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false); - let vals = catch.landing_pad(lpad_ty, bx.eh_personality(), 1); - let tydesc = match bx.tcx().lang_items().eh_catch_typeinfo() { - Some(tydesc) => { - let tydesc = bx.get_static(tydesc); - bx.bitcast(tydesc, bx.type_i8p()) - } - None => bx.const_null(bx.type_i8p()), - }; - catch.add_clause(vals, tydesc); - let ptr = catch.extract_value(vals, 0); - catch.call(catch_func, &[data, ptr], None); - catch.ret(bx.const_i32(1)); - }); - - // Note that no invoke is used here because by definition this function - // can't panic (that's what it's catching). - let ret = bx.call(llfn, &[try_func, data, catch_func], None); - let i32_align = bx.tcx().data_layout.i32_align.abi; - bx.store(ret, dest, i32_align);*/ -}*/ diff --git a/src/intrinsic/simd.rs b/src/intrinsic/simd.rs index 71cf5cce9f4e1..26a42217e4c56 100644 --- a/src/intrinsic/simd.rs +++ b/src/intrinsic/simd.rs @@ -12,8 +12,6 @@ use rustc_span::{Span, Symbol, sym}; use crate::builder::Builder; pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, name: Symbol, callee_ty: Ty<'tcx>, args: &[OperandRef<'tcx, RValue<'gcc>>], ret_ty: Ty<'tcx>, llret_ty: Type<'gcc>, span: Span) -> Result, ()> { - //println!("Generic simd: {}", name); - // macros for error handling: macro_rules! emit_error { ($msg: tt) => { @@ -56,33 +54,6 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, let arg_tys = sig.inputs(); let name_str = &*name.as_str(); - /*if name == sym::simd_select_bitmask { - let in_ty = arg_tys[0]; - let m_len = match in_ty.kind() { - // Note that this `.unwrap()` crashes for isize/usize, that's sort - // of intentional as there's not currently a use case for that. - ty::Int(i) => i.bit_width().unwrap(), - ty::Uint(i) => i.bit_width().unwrap(), - _ => return_error!("`{}` is not an integral type", in_ty), - }; - require_simd!(arg_tys[1], "argument"); - let (v_len, _) = arg_tys[1].simd_size_and_type(bx.tcx()); - require!( - // Allow masks for vectors with fewer than 8 elements to be - // represented with a u8 or i8. - m_len == v_len || (m_len == 8 && v_len < 8), - "mismatched lengths: mask length `{}` != other vector length `{}`", - m_len, - v_len - ); - let i1 = bx.type_i1(); - let im = bx.type_ix(v_len); - let i1xn = bx.type_vector(i1, v_len); - let m_im = bx.trunc(args[0].immediate(), im); - let m_i1s = bx.bitcast(m_im, i1xn); - return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate())); - }*/ - // every intrinsic below takes a SIMD vector as its first argument require_simd!(arg_tys[0], "input"); let in_ty = arg_tys[0]; @@ -153,37 +124,8 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, out_ty ); - //let total_len = u128::from(in_len) * 2; - let vector = args[2].immediate(); - // TODO: - /*let indices: Option> = (0..n) - .map(|i| { - let arg_idx = i; - let val = bx.const_get_vector_element(vector, i as u64); - match bx.const_to_opt_u128(val, true) { - None => { - emit_error!("shuffle index #{} is not a constant", arg_idx); - None - } - Some(idx) if idx >= total_len => { - emit_error!( - "shuffle index #{} is out of bounds (limit {})", - arg_idx, - total_len - ); - None - } - Some(idx) => Some(bx.const_i32(idx as i32)), - } - }) - .collect(); - let indices = match indices { - Some(i) => i, - None => return Ok(bx.const_null(llret_ty)), - };*/ - return Ok(bx.shuffle_vector( args[0].immediate(), args[1].immediate(), @@ -191,723 +133,6 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, )); } - /*if name == sym::simd_insert { - require!( - in_elem == arg_tys[2], - "expected inserted type `{}` (element of input `{}`), found `{}`", - in_elem, - in_ty, - arg_tys[2] - ); - return Ok(bx.insert_element( - args[0].immediate(), - args[2].immediate(), - args[1].immediate(), - )); - } - if name == sym::simd_extract { - require!( - ret_ty == in_elem, - "expected return type `{}` (element of input `{}`), found `{}`", - in_elem, - in_ty, - ret_ty - ); - return Ok(bx.extract_element(args[0].immediate(), args[1].immediate())); - } - - if name == sym::simd_select { - let m_elem_ty = in_elem; - let m_len = in_len; - require_simd!(arg_tys[1], "argument"); - let (v_len, _) = arg_tys[1].simd_size_and_type(bx.tcx()); - require!( - m_len == v_len, - "mismatched lengths: mask length `{}` != other vector length `{}`", - m_len, - v_len - ); - match m_elem_ty.kind() { - ty::Int(_) => {} - _ => return_error!("mask element type is `{}`, expected `i_`", m_elem_ty), - } - // truncate the mask to a vector of i1s - let i1 = bx.type_i1(); - let i1xn = bx.type_vector(i1, m_len as u64); - let m_i1s = bx.trunc(args[0].immediate(), i1xn); - return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate())); - } - - if name == sym::simd_bitmask { - // The `fn simd_bitmask(vector) -> unsigned integer` intrinsic takes a - // vector mask and returns an unsigned integer containing the most - // significant bit (MSB) of each lane. - - // If the vector has less than 8 lanes, an u8 is returned with zeroed - // trailing bits. - let expected_int_bits = in_len.max(8); - match ret_ty.kind() { - ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => (), - _ => return_error!("bitmask `{}`, expected `u{}`", ret_ty, expected_int_bits), - } - - // Integer vector : - let (i_xn, in_elem_bitwidth) = match in_elem.kind() { - ty::Int(i) => ( - args[0].immediate(), - i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits()), - ), - ty::Uint(i) => ( - args[0].immediate(), - i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits()), - ), - _ => return_error!( - "vector argument `{}`'s element type `{}`, expected integer element type", - in_ty, - in_elem - ), - }; - - // Shift the MSB to the right by "in_elem_bitwidth - 1" into the first bit position. - let shift_indices = - vec![ - bx.cx.const_int(bx.type_ix(in_elem_bitwidth), (in_elem_bitwidth - 1) as _); - in_len as _ - ]; - let i_xn_msb = bx.lshr(i_xn, bx.const_vector(shift_indices.as_slice())); - // Truncate vector to an - let i1xn = bx.trunc(i_xn_msb, bx.type_vector(bx.type_i1(), in_len)); - // Bitcast to iN: - let i_ = bx.bitcast(i1xn, bx.type_ix(in_len)); - // Zero-extend iN to the bitmask type: - return Ok(bx.zext(i_, bx.type_ix(expected_int_bits))); - } - - fn simd_simple_float_intrinsic<'a, 'gcc, 'tcx>( - name: Symbol, - in_elem: &::rustc_middle::ty::TyS<'_>, - in_ty: &::rustc_middle::ty::TyS<'_>, - in_len: u64, - bx: &mut Builder<'a, 'gcc, 'tcx>, - span: Span, - args: &[OperandRef<'tcx, RValue<'gcc>>], - ) -> Result, ()> { - macro_rules! emit_error { - ($msg: tt) => { - emit_error!($msg, ) - }; - ($msg: tt, $($fmt: tt)*) => { - span_invalid_monomorphization_error( - bx.sess(), span, - &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg), - name, $($fmt)*)); - } - } - macro_rules! return_error { - ($($fmt: tt)*) => { - { - emit_error!($($fmt)*); - return Err(()); - } - } - } - - let (elem_ty_str, elem_ty) = if let ty::Float(f) = in_elem.kind() { - let elem_ty = bx.cx.type_float_from_ty(*f); - match f.bit_width() { - 32 => ("f32", elem_ty), - 64 => ("f64", elem_ty), - _ => { - return_error!( - "unsupported element type `{}` of floating-point vector `{}`", - f.name_str(), - in_ty - ); - } - } - } else { - return_error!("`{}` is not a floating-point type", in_ty); - }; - - let vec_ty = bx.type_vector(elem_ty, in_len); - - let (intr_name, fn_ty) = match name { - sym::simd_ceil => ("ceil", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_fabs => ("fabs", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_fcos => ("cos", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_fexp2 => ("exp2", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_fexp => ("exp", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_flog10 => ("log10", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_flog2 => ("log2", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_flog => ("log", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_floor => ("floor", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_fma => ("fma", bx.type_func(&[vec_ty, vec_ty, vec_ty], vec_ty)), - sym::simd_fpowi => ("powi", bx.type_func(&[vec_ty, bx.type_i32()], vec_ty)), - sym::simd_fpow => ("pow", bx.type_func(&[vec_ty, vec_ty], vec_ty)), - sym::simd_fsin => ("sin", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_fsqrt => ("sqrt", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_round => ("round", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_trunc => ("trunc", bx.type_func(&[vec_ty], vec_ty)), - _ => return_error!("unrecognized intrinsic `{}`", name), - }; - let llvm_name = &format!("llvm.{0}.v{1}{2}", intr_name, in_len, elem_ty_str); - let f = bx.declare_cfn(&llvm_name, fn_ty); - let c = bx.call(f, &args.iter().map(|arg| arg.immediate()).collect::>(), None); - Ok(c) - } - - if std::matches!( - name, - sym::simd_ceil - | sym::simd_fabs - | sym::simd_fcos - | sym::simd_fexp2 - | sym::simd_fexp - | sym::simd_flog10 - | sym::simd_flog2 - | sym::simd_flog - | sym::simd_floor - | sym::simd_fma - | sym::simd_fpow - | sym::simd_fpowi - | sym::simd_fsin - | sym::simd_fsqrt - | sym::simd_round - | sym::simd_trunc - ) { - return simd_simple_float_intrinsic(name, in_elem, in_ty, in_len, bx, span, args); - } - - // FIXME: use: - // https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Function.h#L182 - // https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Intrinsics.h#L81 - fn llvm_vector_str(elem_ty: Ty<'_>, vec_len: u64, no_pointers: usize) -> String { - let p0s: String = "p0".repeat(no_pointers); - match *elem_ty.kind() { - ty::Int(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()), - ty::Uint(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()), - ty::Float(v) => format!("v{}{}f{}", vec_len, p0s, v.bit_width()), - _ => unreachable!(), - } - } - - fn gcc_vector_ty<'gcc>( - cx: &CodegenCx<'gcc, '_>, - elem_ty: Ty<'_>, - vec_len: u64, - mut no_pointers: usize, - ) -> Type<'gcc> { - // FIXME: use cx.layout_of(ty).llvm_type() ? - let mut elem_ty = match *elem_ty.kind() { - ty::Int(v) => cx.type_int_from_ty(v), - ty::Uint(v) => cx.type_uint_from_ty(v), - ty::Float(v) => cx.type_float_from_ty(v), - _ => unreachable!(), - }; - while no_pointers > 0 { - elem_ty = cx.type_ptr_to(elem_ty); - no_pointers -= 1; - } - cx.type_vector(elem_ty, vec_len) - } - - if name == sym::simd_gather { - // simd_gather(values: , pointers: , - // mask: ) -> - // * N: number of elements in the input vectors - // * T: type of the element to load - // * M: any integer width is supported, will be truncated to i1 - - // All types must be simd vector types - require_simd!(in_ty, "first"); - require_simd!(arg_tys[1], "second"); - require_simd!(arg_tys[2], "third"); - require_simd!(ret_ty, "return"); - - // Of the same length: - let (out_len, _) = arg_tys[1].simd_size_and_type(bx.tcx()); - let (out_len2, _) = arg_tys[2].simd_size_and_type(bx.tcx()); - require!( - in_len == out_len, - "expected {} argument with length {} (same as input type `{}`), \ - found `{}` with length {}", - "second", - in_len, - in_ty, - arg_tys[1], - out_len - ); - require!( - in_len == out_len2, - "expected {} argument with length {} (same as input type `{}`), \ - found `{}` with length {}", - "third", - in_len, - in_ty, - arg_tys[2], - out_len2 - ); - - // The return type must match the first argument type - require!(ret_ty == in_ty, "expected return type `{}`, found `{}`", in_ty, ret_ty); - - // This counts how many pointers - fn ptr_count(t: Ty<'_>) -> usize { - match t.kind() { - ty::RawPtr(p) => 1 + ptr_count(p.ty), - _ => 0, - } - } - - // Non-ptr type - fn non_ptr(t: Ty<'_>) -> Ty<'_> { - match t.kind() { - ty::RawPtr(p) => non_ptr(p.ty), - _ => t, - } - } - - // The second argument must be a simd vector with an element type that's a pointer - // to the element type of the first argument - let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx()); - let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx()); - let (pointer_count, underlying_ty) = match element_ty1.kind() { - ty::RawPtr(p) if p.ty == in_elem => (ptr_count(element_ty1), non_ptr(element_ty1)), - _ => { - require!( - false, - "expected element type `{}` of second argument `{}` \ - to be a pointer to the element type `{}` of the first \ - argument `{}`, found `{}` != `*_ {}`", - element_ty1, - arg_tys[1], - in_elem, - in_ty, - element_ty1, - in_elem - ); - unreachable!(); - } - }; - assert!(pointer_count > 0); - assert_eq!(pointer_count - 1, ptr_count(element_ty0)); - assert_eq!(underlying_ty, non_ptr(element_ty0)); - - // The element type of the third argument must be a signed integer type of any width: - let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx()); - match element_ty2.kind() { - ty::Int(_) => (), - _ => { - require!( - false, - "expected element type `{}` of third argument `{}` \ - to be a signed integer type", - element_ty2, - arg_tys[2] - ); - } - } - - // Alignment of T, must be a constant integer value: - let alignment_ty = bx.type_i32(); - let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32); - - // Truncate the mask vector to a vector of i1s: - let (mask, mask_ty) = { - let i1 = bx.type_i1(); - let i1xn = bx.type_vector(i1, in_len); - (bx.trunc(args[2].immediate(), i1xn), i1xn) - }; - - // Type of the vector of pointers: - let llvm_pointer_vec_ty = gcc_vector_ty(bx, underlying_ty, in_len, pointer_count); - let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count); - - // Type of the vector of elements: - let llvm_elem_vec_ty = gcc_vector_ty(bx, underlying_ty, in_len, pointer_count - 1); - let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1); - - let llvm_intrinsic = - format!("llvm.masked.gather.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str); - let f = bx.declare_cfn( - &llvm_intrinsic, - bx.type_func( - &[llvm_pointer_vec_ty, alignment_ty, mask_ty, llvm_elem_vec_ty], - llvm_elem_vec_ty, - ), - ); - let v = bx.call(f, &[args[1].immediate(), alignment, mask, args[0].immediate()], None); - return Ok(v); - } - - if name == sym::simd_scatter { - // simd_scatter(values: , pointers: , - // mask: ) -> () - // * N: number of elements in the input vectors - // * T: type of the element to load - // * M: any integer width is supported, will be truncated to i1 - - // All types must be simd vector types - require_simd!(in_ty, "first"); - require_simd!(arg_tys[1], "second"); - require_simd!(arg_tys[2], "third"); - - // Of the same length: - let (element_len1, _) = arg_tys[1].simd_size_and_type(bx.tcx()); - let (element_len2, _) = arg_tys[2].simd_size_and_type(bx.tcx()); - require!( - in_len == element_len1, - "expected {} argument with length {} (same as input type `{}`), \ - found `{}` with length {}", - "second", - in_len, - in_ty, - arg_tys[1], - element_len1 - ); - require!( - in_len == element_len2, - "expected {} argument with length {} (same as input type `{}`), \ - found `{}` with length {}", - "third", - in_len, - in_ty, - arg_tys[2], - element_len2 - ); - - // This counts how many pointers - fn ptr_count(t: Ty<'_>) -> usize { - match t.kind() { - ty::RawPtr(p) => 1 + ptr_count(p.ty), - _ => 0, - } - } - - // Non-ptr type - fn non_ptr(t: Ty<'_>) -> Ty<'_> { - match t.kind() { - ty::RawPtr(p) => non_ptr(p.ty), - _ => t, - } - } - - // The second argument must be a simd vector with an element type that's a pointer - // to the element type of the first argument - let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx()); - let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx()); - let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx()); - let (pointer_count, underlying_ty) = match element_ty1.kind() { - ty::RawPtr(p) if p.ty == in_elem && p.mutbl == hir::Mutability::Mut => { - (ptr_count(element_ty1), non_ptr(element_ty1)) - } - _ => { - require!( - false, - "expected element type `{}` of second argument `{}` \ - to be a pointer to the element type `{}` of the first \ - argument `{}`, found `{}` != `*mut {}`", - element_ty1, - arg_tys[1], - in_elem, - in_ty, - element_ty1, - in_elem - ); - unreachable!(); - } - }; - assert!(pointer_count > 0); - assert_eq!(pointer_count - 1, ptr_count(element_ty0)); - assert_eq!(underlying_ty, non_ptr(element_ty0)); - - // The element type of the third argument must be a signed integer type of any width: - match element_ty2.kind() { - ty::Int(_) => (), - _ => { - require!( - false, - "expected element type `{}` of third argument `{}` \ - be a signed integer type", - element_ty2, - arg_tys[2] - ); - } - } - - // Alignment of T, must be a constant integer value: - let alignment_ty = bx.type_i32(); - let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32); - - // Truncate the mask vector to a vector of i1s: - let (mask, mask_ty) = { - let i1 = bx.type_i1(); - let i1xn = bx.type_vector(i1, in_len); - (bx.trunc(args[2].immediate(), i1xn), i1xn) - }; - - let ret_t = bx.type_void(); - - // Type of the vector of pointers: - let llvm_pointer_vec_ty = gcc_vector_ty(bx, underlying_ty, in_len, pointer_count); - let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count); - - // Type of the vector of elements: - let llvm_elem_vec_ty = gcc_vector_ty(bx, underlying_ty, in_len, pointer_count - 1); - let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1); - - let llvm_intrinsic = - format!("llvm.masked.scatter.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str); - let f = bx.declare_cfn( - &llvm_intrinsic, - bx.type_func(&[llvm_elem_vec_ty, llvm_pointer_vec_ty, alignment_ty, mask_ty], ret_t), - ); - let v = bx.call(f, &[args[0].immediate(), args[1].immediate(), alignment, mask], None); - return Ok(v); - } - - macro_rules! arith_red { - ($name:ident : $integer_reduce:ident, $float_reduce:ident, $ordered:expr, $op:ident, - $identity:expr) => { - if name == sym::$name { - require!( - ret_ty == in_elem, - "expected return type `{}` (element of input `{}`), found `{}`", - in_elem, - in_ty, - ret_ty - ); - return match in_elem.kind() { - ty::Int(_) | ty::Uint(_) => { - let r = bx.$integer_reduce(args[0].immediate()); - if $ordered { - // if overflow occurs, the result is the - // mathematical result modulo 2^n: - Ok(bx.$op(args[1].immediate(), r)) - } else { - Ok(bx.$integer_reduce(args[0].immediate())) - } - } - ty::Float(f) => { - let acc = if $ordered { - // ordered arithmetic reductions take an accumulator - args[1].immediate() - } else { - // unordered arithmetic reductions use the identity accumulator - match f.bit_width() { - 32 => bx.const_real(bx.type_f32(), $identity), - 64 => bx.const_real(bx.type_f64(), $identity), - v => return_error!( - r#" -unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#, - sym::$name, - in_ty, - in_elem, - v, - ret_ty - ), - } - }; - Ok(bx.$float_reduce(acc, args[0].immediate())) - } - _ => return_error!( - "unsupported {} from `{}` with element `{}` to `{}`", - sym::$name, - in_ty, - in_elem, - ret_ty - ), - }; - } - }; - } - - arith_red!(simd_reduce_add_ordered: vector_reduce_add, vector_reduce_fadd, true, add, 0.0); - arith_red!(simd_reduce_mul_ordered: vector_reduce_mul, vector_reduce_fmul, true, mul, 1.0); - arith_red!( - simd_reduce_add_unordered: vector_reduce_add, - vector_reduce_fadd_fast, - false, - add, - 0.0 - ); - arith_red!( - simd_reduce_mul_unordered: vector_reduce_mul, - vector_reduce_fmul_fast, - false, - mul, - 1.0 - ); - - macro_rules! minmax_red { - ($name:ident: $int_red:ident, $float_red:ident) => { - if name == sym::$name { - require!( - ret_ty == in_elem, - "expected return type `{}` (element of input `{}`), found `{}`", - in_elem, - in_ty, - ret_ty - ); - return match in_elem.kind() { - ty::Int(_i) => Ok(bx.$int_red(args[0].immediate(), true)), - ty::Uint(_u) => Ok(bx.$int_red(args[0].immediate(), false)), - ty::Float(_f) => Ok(bx.$float_red(args[0].immediate())), - _ => return_error!( - "unsupported {} from `{}` with element `{}` to `{}`", - sym::$name, - in_ty, - in_elem, - ret_ty - ), - }; - } - }; - } - - minmax_red!(simd_reduce_min: vector_reduce_min, vector_reduce_fmin); - minmax_red!(simd_reduce_max: vector_reduce_max, vector_reduce_fmax); - - minmax_red!(simd_reduce_min_nanless: vector_reduce_min, vector_reduce_fmin_fast); - minmax_red!(simd_reduce_max_nanless: vector_reduce_max, vector_reduce_fmax_fast); - - macro_rules! bitwise_red { - ($name:ident : $red:ident, $boolean:expr) => { - if name == sym::$name { - let input = if !$boolean { - require!( - ret_ty == in_elem, - "expected return type `{}` (element of input `{}`), found `{}`", - in_elem, - in_ty, - ret_ty - ); - args[0].immediate() - } else { - match in_elem.kind() { - ty::Int(_) | ty::Uint(_) => {} - _ => return_error!( - "unsupported {} from `{}` with element `{}` to `{}`", - sym::$name, - in_ty, - in_elem, - ret_ty - ), - } - - // boolean reductions operate on vectors of i1s: - let i1 = bx.type_i1(); - let i1xn = bx.type_vector(i1, in_len as u64); - bx.trunc(args[0].immediate(), i1xn) - }; - return match in_elem.kind() { - ty::Int(_) | ty::Uint(_) => { - let r = bx.$red(input); - Ok(if !$boolean { r } else { bx.zext(r, bx.type_bool()) }) - } - _ => return_error!( - "unsupported {} from `{}` with element `{}` to `{}`", - sym::$name, - in_ty, - in_elem, - ret_ty - ), - }; - } - }; - } - - bitwise_red!(simd_reduce_and: vector_reduce_and, false); - bitwise_red!(simd_reduce_or: vector_reduce_or, false); - bitwise_red!(simd_reduce_xor: vector_reduce_xor, false); - bitwise_red!(simd_reduce_all: vector_reduce_and, true); - bitwise_red!(simd_reduce_any: vector_reduce_or, true); - - if name == sym::simd_cast { - require_simd!(ret_ty, "return"); - let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx()); - require!( - in_len == out_len, - "expected return type with length {} (same as input type `{}`), \ - found `{}` with length {}", - in_len, - in_ty, - ret_ty, - out_len - ); - // casting cares about nominal type, not just structural type - if in_elem == out_elem { - return Ok(args[0].immediate()); - } - - enum Style { - Float, - Int(/* is signed? */ bool), - Unsupported, - } - - let (in_style, in_width) = match in_elem.kind() { - // vectors of pointer-sized integers should've been - // disallowed before here, so this unwrap is safe. - ty::Int(i) => (Style::Int(true), i.bit_width().unwrap()), - ty::Uint(u) => (Style::Int(false), u.bit_width().unwrap()), - ty::Float(f) => (Style::Float, f.bit_width()), - _ => (Style::Unsupported, 0), - }; - let (out_style, out_width) = match out_elem.kind() { - ty::Int(i) => (Style::Int(true), i.bit_width().unwrap()), - ty::Uint(u) => (Style::Int(false), u.bit_width().unwrap()), - ty::Float(f) => (Style::Float, f.bit_width()), - _ => (Style::Unsupported, 0), - }; - - match (in_style, out_style) { - (Style::Int(in_is_signed), Style::Int(_)) => { - return Ok(match in_width.cmp(&out_width) { - Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty), - Ordering::Equal => args[0].immediate(), - Ordering::Less => { - if in_is_signed { - bx.sext(args[0].immediate(), llret_ty) - } else { - bx.zext(args[0].immediate(), llret_ty) - } - } - }); - } - (Style::Int(in_is_signed), Style::Float) => { - return Ok(if in_is_signed { - bx.sitofp(args[0].immediate(), llret_ty) - } else { - bx.uitofp(args[0].immediate(), llret_ty) - }); - } - (Style::Float, Style::Int(out_is_signed)) => { - return Ok(if out_is_signed { - bx.fptosi(args[0].immediate(), llret_ty) - } else { - bx.fptoui(args[0].immediate(), llret_ty) - }); - } - (Style::Float, Style::Float) => { - return Ok(match in_width.cmp(&out_width) { - Ordering::Greater => bx.fptrunc(args[0].immediate(), llret_ty), - Ordering::Equal => args[0].immediate(), - Ordering::Less => bx.fpext(args[0].immediate(), llret_ty), - }); - } - _ => { /* Unsupported. Fallthrough. */ } - } - require!( - false, - "unsupported cast from `{}` with element `{}` to `{}` with element `{}`", - in_ty, - in_elem, - ret_ty, - out_elem - ); - }*/ - macro_rules! arith_binary { ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => { $(if name == sym::$name { @@ -934,68 +159,9 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#, simd_shl: Uint, Int => shl; simd_shr: Uint => lshr, Int => ashr; simd_and: Uint, Int => and; - simd_or: Uint, Int => or; // FIXME: calling or might not work on vectors. + simd_or: Uint, Int => or; // FIXME(antoyo): calling `or` might not work on vectors. simd_xor: Uint, Int => xor; - /*simd_fmax: Float => maxnum; - simd_fmin: Float => minnum;*/ } - /*macro_rules! arith_unary { - ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => { - $(if name == sym::$name { - match in_elem.kind() { - $($(ty::$p(_))|* => { - return Ok(bx.$call(args[0].immediate())) - })* - _ => {}, - } - require!(false, - "unsupported operation on `{}` with element `{}`", - in_ty, - in_elem) - })* - } - } - - arith_unary! { - simd_neg: Int => neg, Float => fneg; - } - - if name == sym::simd_saturating_add || name == sym::simd_saturating_sub { - let lhs = args[0].immediate(); - let rhs = args[1].immediate(); - let is_add = name == sym::simd_saturating_add; - let ptr_bits = bx.tcx().data_layout.pointer_size.bits() as _; - let (signed, elem_width, elem_ty) = match *in_elem.kind() { - ty::Int(i) => (true, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_int_from_ty(i)), - ty::Uint(i) => (false, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_uint_from_ty(i)), - _ => { - return_error!( - "expected element type `{}` of vector type `{}` \ - to be a signed or unsigned integer type", - arg_tys[0].simd_size_and_type(bx.tcx()).1, - arg_tys[0] - ); - } - }; - let llvm_intrinsic = &format!( - "llvm.{}{}.sat.v{}i{}", - if signed { 's' } else { 'u' }, - if is_add { "add" } else { "sub" }, - in_len, - elem_width - ); - let vec_ty = bx.cx.type_vector(elem_ty, in_len as u64); - - let f = bx.declare_cfn( - &llvm_intrinsic, - bx.type_func(&[vec_ty, vec_ty], vec_ty), - ); - let v = bx.call(f, &[lhs, rhs], None); - return Ok(v); - }*/ - unimplemented!("simd {}", name); - - //span_bug!(span, "unknown SIMD intrinsic"); } diff --git a/src/lib.rs b/src/lib.rs index 797251814d7f4..6febccff1ffac 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,10 +1,8 @@ /* - * TODO: support #[inline] attributes. - * TODO: support LTO. + * TODO(antoyo): support #[inline] attributes. + * TODO(antoyo): support LTO. * - * TODO: remove the local gccjit LD_LIBRARY_PATH in config.sh. - * TODO: remove the object dependency. - * TODO: remove the patches. + * TODO(antoyo): remove the patches. */ #![feature(rustc_private, decl_macro, associated_type_bounds, never_type, trusted_len)] @@ -13,13 +11,10 @@ #![warn(rust_2018_idioms)] #![warn(unused_lifetimes)] -/*extern crate flate2; -extern crate libc;*/ extern crate rustc_ast; extern crate rustc_codegen_ssa; extern crate rustc_data_structures; extern crate rustc_errors; -//extern crate rustc_fs_util; extern crate rustc_hir; extern crate rustc_metadata; extern crate rustc_middle; @@ -53,7 +48,6 @@ mod mangled_std_symbols; mod mono_item; mod type_; mod type_of; -mod va_arg; use std::any::Any; use std::sync::Arc; @@ -119,7 +113,7 @@ impl CodegenBackend for GccCodegenBackend { fn link(&self, sess: &Session, mut codegen_results: CodegenResults, outputs: &OutputFilenames) -> Result<(), ErrorReported> { use rustc_codegen_ssa::back::link::link_binary; if let Some(symbols) = codegen_results.crate_info.exported_symbols.get_mut(&CrateType::Dylib) { - // TODO: remove when global initializer work without calling a function at runtime. + // TODO:(antoyo): remove when global initializer work without calling a function at runtime. // HACK: since this codegen add some symbols (e.g. __gccGlobalCrateInit) and the UI // tests load libstd.so as a dynamic library, and rustc use a version-script to specify // the symbols visibility, we add * to export all symbols. @@ -159,7 +153,7 @@ impl ExtraBackendMethods for GccCodegenBackend { } fn target_machine_factory(&self, _sess: &Session, _opt_level: OptLevel) -> TargetMachineFactoryFn { - // TODO: set opt level. + // TODO(antoyo): set opt level. Arc::new(|_| { Ok(()) }) @@ -171,8 +165,7 @@ impl ExtraBackendMethods for GccCodegenBackend { fn tune_cpu<'b>(&self, _sess: &'b Session) -> Option<&'b str> { None - // TODO - //llvm_util::tune_cpu(sess) + // TODO(antoyo) } } @@ -197,7 +190,7 @@ pub struct GccContext { } unsafe impl Send for GccContext {} -// FIXME: that shouldn't be Sync. Parallel compilation is currently disabled with "-Zno-parallel-llvm". Try to disable it here. +// FIXME(antoyo): that shouldn't be Sync. Parallel compilation is currently disabled with "-Zno-parallel-llvm". Try to disable it here. unsafe impl Sync for GccContext {} impl WriteBackendMethods for GccCodegenBackend { @@ -209,16 +202,13 @@ impl WriteBackendMethods for GccCodegenBackend { type ThinBuffer = ThinBuffer; fn run_fat_lto(_cgcx: &CodegenContext, mut modules: Vec>, _cached_modules: Vec<(SerializedModule, WorkProduct)>) -> Result, FatalError> { - // TODO: implement LTO by sending -flto to libgccjit and adding the appropriate gcc linker plugins. + // TODO(antoyo): implement LTO by sending -flto to libgccjit and adding the appropriate gcc linker plugins. // NOTE: implemented elsewhere. let module = match modules.remove(0) { FatLTOInput::InMemory(module) => module, FatLTOInput::Serialized { .. } => { unimplemented!(); - /*info!("pushing serialized module {:?}", name); - let buffer = SerializedModule::Local(buffer); - serialized_modules.push((buffer, CString::new(name).unwrap()));*/ } }; Ok(LtoModuleCodegen::Fat { module: Some(module), _serialized_bitcode: vec![] }) @@ -233,9 +223,6 @@ impl WriteBackendMethods for GccCodegenBackend { } unsafe fn optimize(_cgcx: &CodegenContext, _diag_handler: &Handler, module: &ModuleCodegen, config: &ModuleConfig) -> Result<(), FatalError> { - //if cgcx.lto == Lto::Fat { - //module.module_llvm.context.add_driver_option("-flto"); - //} module.module_llvm.context.set_optimization_level(to_gcc_opt_level(config.opt_level)); Ok(()) } @@ -257,7 +244,7 @@ impl WriteBackendMethods for GccCodegenBackend { } fn run_lto_pass_manager(_cgcx: &CodegenContext, _module: &ModuleCodegen, _config: &ModuleConfig, _thin: bool) -> Result<(), FatalError> { - // TODO + // TODO(antoyo) Ok(()) } @@ -266,10 +253,6 @@ impl WriteBackendMethods for GccCodegenBackend { } } -/*fn target_triple(sess: &Session) -> target_lexicon::Triple { - sess.target.llvm_target.parse().unwrap() -}*/ - /// This is the entrypoint for a hot plugged rustc_codegen_gccjit #[no_mangle] pub fn __rustc_codegen_backend() -> Box { @@ -306,11 +289,6 @@ fn handle_native(name: &str) -> &str { } unimplemented!(); - /*unsafe { - let mut len = 0; - let ptr = llvm::LLVMRustGetHostCPUName(&mut len); - str::from_utf8(slice::from_raw_parts(ptr as *const u8, len)).unwrap() - }*/ } pub fn target_cpu(sess: &Session) -> &str { @@ -327,14 +305,7 @@ pub fn target_features(sess: &Session) -> Vec { }, ) .filter(|_feature| { - /*if feature.starts_with("sse") { - return true; - }*/ - // TODO: implement a way to get enabled feature in libgccjit. - //println!("Feature: {}", feature); - /*let llvm_feature = to_llvm_feature(sess, feature); - let cstr = CString::new(llvm_feature).unwrap(); - unsafe { llvm::LLVMRustHasFeature(target_machine, cstr.as_ptr()) }*/ + // TODO(antoyo): implement a way to get enabled feature in libgccjit. false }) .map(|feature| Symbol::intern(feature)) diff --git a/src/mono_item.rs b/src/mono_item.rs index c261efbbc559f..f9ec933dd3abc 100644 --- a/src/mono_item.rs +++ b/src/mono_item.rs @@ -26,12 +26,7 @@ impl<'gcc, 'tcx> PreDefineMethods<'tcx> for CodegenCx<'gcc, 'tcx> { ) }); - // TODO - /*unsafe { - llvm::LLVMRustSetLinkage(global, base::linkage_to_llvm(linkage)); - llvm::LLVMRustSetVisibility(global, base::visibility_to_llvm(visibility)); - }*/ - + // TODO(antoyo): set linkage and visibility. self.instances.borrow_mut().insert(instance, global); } @@ -43,17 +38,8 @@ impl<'gcc, 'tcx> PreDefineMethods<'tcx> for CodegenCx<'gcc, 'tcx> { let _decl = self.declare_fn(symbol_name, &fn_abi); //let attrs = self.tcx.codegen_fn_attrs(instance.def_id()); - // TODO: call set_link_section() to allow initializing argc/argv. - //base::set_link_section(decl, &attrs); - /*if linkage == Linkage::LinkOnceODR || linkage == Linkage::WeakODR { - llvm::SetUniqueComdat(self.llmod, decl); - }*/ - - //debug!("predefine_fn: instance = {:?}", instance); - - // TODO: use inline attribute from there in linkage.set() above: - //attributes::from_fn_attrs(self, decl, instance); - - //self.instances.borrow_mut().insert(instance, decl); + // TODO(antoyo): call set_link_section() to allow initializing argc/argv. + // TODO(antoyo): set unique comdat. + // TODO(antoyo): use inline attribute from there in linkage.set() above. } } diff --git a/src/type_.rs b/src/type_.rs index 2ef90bca5ac20..3545e1b628105 100644 --- a/src/type_.rs +++ b/src/type_.rs @@ -14,6 +14,9 @@ use crate::type_of::LayoutGccExt; impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { pub fn type_ix(&self, num_bits: u64) -> Type<'gcc> { // gcc only supports 1, 2, 4 or 8-byte integers. + // FIXME(antoyo): this is misleading to use the next power of two as rustc_codegen_ssa + // sometimes use 96-bit numbers and the following code will give an integer of a different + // size. let bytes = (num_bits / 8).next_power_of_two() as i32; match bytes { 1 => self.i8_type, @@ -23,17 +26,8 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { 16 => self.i128_type, _ => panic!("unexpected num_bits: {}", num_bits), } - /* - let bytes = (num_bits / 8).next_power_of_two() as i32; - println!("num_bits: {}, bytes: {}", num_bits, bytes); - self.context.new_int_type(bytes, true) // TODO: check if it is indeed a signed integer. - */ } - /*pub fn type_bool(&self) -> Type<'gcc> { - self.bool_type - }*/ - pub fn type_void(&self) -> Type<'gcc> { self.context.new_type::<()>() } @@ -67,39 +61,6 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { let ity = Integer::approximate_align(self, align); self.type_from_integer(ity) } - - /*pub fn type_int_from_ty(&self, t: ty::IntTy) -> Type<'gcc> { - match t { - ty::IntTy::Isize => self.type_isize(), - ty::IntTy::I8 => self.type_i8(), - ty::IntTy::I16 => self.type_i16(), - ty::IntTy::I32 => self.type_i32(), - ty::IntTy::I64 => self.type_i64(), - ty::IntTy::I128 => self.type_i128(), - } - } - - pub fn type_uint_from_ty(&self, t: ty::UintTy) -> Type<'gcc> { - match t { - ty::UintTy::Usize => self.type_isize(), - ty::UintTy::U8 => self.type_i8(), - ty::UintTy::U16 => self.type_i16(), - ty::UintTy::U32 => self.type_i32(), - ty::UintTy::U64 => self.type_i64(), - ty::UintTy::U128 => self.type_i128(), - } - } - - pub fn type_float_from_ty(&self, t: ty::FloatTy) -> Type<'gcc> { - match t { - ty::FloatTy::F32 => self.type_f32(), - ty::FloatTy::F64 => self.type_f64(), - } - } - - pub fn type_vector(&self, ty: Type<'gcc>, len: u64) -> Type<'gcc> { - self.context.new_vector_type(ty, len) - }*/ } impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> { @@ -151,9 +112,7 @@ impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> { let fields: Vec<_> = fields.iter().enumerate() .map(|(index, field)| self.context.new_field(None, *field, &format!("field{}_TODO", index))) .collect(); - // TODO: use packed. - //let name = types.iter().map(|typ| format!("{:?}", typ)).collect::>().join("_"); - //let typ = self.context.new_struct_type(None, format!("struct{}", name), &fields).as_type(); + // TODO(antoyo): use packed. let typ = self.context.new_struct_type(None, "struct", &fields).as_type(); self.struct_types.borrow_mut().insert(types, typ); typ @@ -167,21 +126,17 @@ impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> { TypeKind::Vector } else { - // TODO + // TODO(antoyo): support other types. TypeKind::Void } } fn type_ptr_to(&self, ty: Type<'gcc>) -> Type<'gcc> { - // TODO - /*assert_ne!(self.type_kind(ty), TypeKind::Function, - "don't call ptr_to on function types, use ptr_to_gcc_type on FnAbi instead" - );*/ ty.make_pointer() } fn type_ptr_to_ext(&self, ty: Type<'gcc>, _address_space: AddressSpace) -> Type<'gcc> { - // TODO: use address_space + // TODO(antoyo): use address_space ty.make_pointer() } @@ -202,7 +157,6 @@ impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> { fn vector_length(&self, _ty: Type<'gcc>) -> usize { unimplemented!(); - //unsafe { llvm::LLVMGetVectorSize(ty) as usize } } fn float_width(&self, typ: Type<'gcc>) -> usize { @@ -217,14 +171,7 @@ impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> { else { panic!("Cannot get width of float type {:?}", typ); } - // TODO: support other sizes. - /*match self.type_kind(ty) { - TypeKind::Float => 32, - TypeKind::Double => 64, - TypeKind::X86_FP80 => 80, - TypeKind::FP128 | TypeKind::PPC_FP128 => 128, - _ => bug!("llvm_float_width called on a non-float type"), - }*/ + // TODO(antoyo): support other sizes. } fn int_width(&self, typ: Type<'gcc>) -> u64 { @@ -263,21 +210,13 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { } pub fn set_struct_body(&self, typ: Struct<'gcc>, fields: &[Type<'gcc>], _packed: bool) { - // TODO: use packed. + // TODO(antoyo): use packed. let fields: Vec<_> = fields.iter().enumerate() .map(|(index, field)| self.context.new_field(None, *field, &format!("field_{}", index))) .collect(); typ.set_fields(None, &fields); } - /*fn type_struct(&self, fields: &[Type<'gcc>], packed: bool) -> Type<'gcc> { - // TODO: use packed. - let fields: Vec<_> = fields.iter().enumerate() - .map(|(index, field)| self.context.new_field(None, *field, &format!("field_{}", index))) - .collect(); - return self.context.new_struct_type(None, "unnamedStruct", &fields).as_type(); - }*/ - pub fn type_named_struct(&self, name: &str) -> Struct<'gcc> { self.context.new_opaque_struct_type(None, name) } @@ -288,7 +227,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { // NOTE: since gccjit only supports i32 for the array size and libcore's tests uses a // size of usize::MAX in test_binary_search, we workaround this by setting the size to // zero for ZSTs. - // FIXME: fix gccjit API. + // FIXME(antoyo): fix gccjit API. len = 0; } } @@ -305,7 +244,6 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { } pub fn struct_fields<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout<'tcx>) -> (Vec>, bool) { - //debug!("struct_fields: {:#?}", layout); let field_count = layout.fields.count(); let mut packed = false; @@ -319,23 +257,13 @@ pub fn struct_fields<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout layout.align.abi.min(field.align.abi).restrict_for_offset(target_offset); packed |= effective_field_align < field.align.abi; - /*debug!( - "struct_fields: {}: {:?} offset: {:?} target_offset: {:?} \ - effective_field_align: {}", - i, - field, - offset, - target_offset, - effective_field_align.bytes() - );*/ assert!(target_offset >= offset); let padding = target_offset - offset; let padding_align = prev_effective_align.min(effective_field_align); assert_eq!(offset.align_to(padding_align) + padding, target_offset); result.push(cx.type_padding_filler(padding, padding_align)); - //debug!(" padding before: {:?}", padding); - result.push(field.gcc_type(cx, !field.ty.is_any_ptr())); // FIXME: might need to check if the type is inside another, like Box. + result.push(field.gcc_type(cx, !field.ty.is_any_ptr())); // FIXME(antoyo): might need to check if the type is inside another, like Box. offset = target_offset + field.size; prev_effective_align = effective_field_align; } @@ -346,14 +274,8 @@ pub fn struct_fields<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout let padding = layout.size - offset; let padding_align = prev_effective_align; assert_eq!(offset.align_to(padding_align) + padding, layout.size); - /*debug!( - "struct_fields: pad_bytes: {:?} offset: {:?} stride: {:?}", - padding, offset, layout.size - );*/ result.push(cx.type_padding_filler(padding, padding_align)); assert_eq!(result.len(), 1 + field_count * 2); - } else { - //debug!("struct_fields: offset: {:?} stride: {:?}", offset, layout.size); } (result, packed) diff --git a/src/type_of.rs b/src/type_of.rs index 010805808d2a3..9302f57b642af 100644 --- a/src/type_of.rs +++ b/src/type_of.rs @@ -71,7 +71,7 @@ pub fn uncached_gcc_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLa // If `Some` is returned then a named struct is created in LLVM. Name collisions are // avoided by LLVM (with increasing suffixes). If rustc doesn't generate names then that // can improve perf. - // FIXME: I don't think that's true for libgccjit. + // FIXME(antoyo): I don't think that's true for libgccjit. Some(String::new()) } _ => None, @@ -144,6 +144,7 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { /// with the inner-most trailing unsized field using the "minimal unit" /// of that field's type - this is useful for taking the address of /// that field and ensuring the struct has the right alignment. + //TODO(antoyo): do we still need the set_fields parameter? fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, set_fields: bool) -> Type<'gcc> { if let Abi::Scalar(ref scalar) = self.abi { // Use a different cache for scalars because pointers to DSTs @@ -184,8 +185,6 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { return ty; } - //debug!("gcc_type({:#?})", self); - assert!(!self.ty.has_escaping_bound_vars(), "{:?} has escaping bound vars", self.ty); // Make sure lifetimes are erased, to avoid generating distinct LLVM @@ -204,22 +203,12 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { else { uncached_gcc_type(cx, *self, &mut defer) }; - //debug!("--> mapped {:#?} to ty={:?}", self, ty); cx.types.borrow_mut().insert((self.ty, variant_index), ty); if let Some((ty, layout)) = defer { - //TODO: do we still need this conditions and the set_fields parameter? - //if set_fields { - let (fields, packed) = struct_fields(cx, layout); - cx.set_struct_body(ty, &fields, packed); - /*} - else { - // Since we might be trying to generate a type containing another type which is not - // completely generated yet, we don't set the fields right now, but we save the - // type to set the fields later. - cx.types_with_fields_to_set.borrow_mut().insert(ty.as_type(), (ty, layout)); - }*/ + let (fields, packed) = struct_fields(cx, layout); + cx.set_struct_body(ty, &fields, packed); } ty @@ -255,7 +244,7 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { } fn scalar_pair_element_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, index: usize, immediate: bool) -> Type<'gcc> { - // TODO: remove llvm hack: + // TODO(antoyo): remove llvm hack: // HACK(eddyb) special-case fat pointers until LLVM removes // pointee types, to avoid bitcasting every `OperandRef::deref`. match self.ty.kind() { @@ -281,8 +270,8 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { // immediate, just like `bool` is typically `i8` in memory and only `i1` // when immediate. We need to load/store `bool` as `i8` to avoid // crippling LLVM optimizations or triggering other LLVM bugs with `i1`. - // TODO: this bugs certainly don't happen in this case since the bool type is used instead of i1. - if /*immediate &&*/ scalar.is_bool() { + // TODO(antoyo): this bugs certainly don't happen in this case since the bool type is used instead of i1. + if scalar.is_bool() { return cx.type_i1(); } @@ -361,12 +350,10 @@ impl<'gcc, 'tcx> LayoutTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> { fn reg_backend_type(&self, _ty: &Reg) -> Type<'gcc> { unimplemented!(); - //ty.gcc_type(self) } fn fn_decl_backend_type(&self, _fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> Type<'gcc> { - // FIXME: return correct type. + // FIXME(antoyo): return correct type. self.type_void() - //fn_abi.gcc_type(self) } } diff --git a/src/va_arg.rs b/src/va_arg.rs deleted file mode 100644 index 404a169d39ac9..0000000000000 --- a/src/va_arg.rs +++ /dev/null @@ -1,179 +0,0 @@ -/*use gccjit::{RValue, ToRValue, Type}; -use rustc_codegen_ssa::mir::operand::OperandRef; -use rustc_codegen_ssa::{ - common::IntPredicate, - traits::{BaseTypeMethods, BuilderMethods, ConstMethods, DerivedTypeMethods}, -}; -use rustc_middle::ty::layout::HasTyCtxt; -use rustc_middle::ty::Ty; -use rustc_target::abi::{Align, Endian, HasDataLayout, LayoutOf, Size}; - -use crate::builder::Builder; -use crate::type_of::LayoutGccExt; - -fn round_pointer_up_to_alignment<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, addr: RValue<'gcc>, align: Align, ptr_ty: Type<'gcc>) -> RValue<'gcc> { - let mut ptr_as_int = bx.ptrtoint(addr, bx.cx().type_isize()); - ptr_as_int = bx.add(ptr_as_int, bx.cx().const_i32(align.bytes() as i32 - 1)); - ptr_as_int = bx.and(ptr_as_int, bx.cx().const_i32(-(align.bytes() as i32))); - bx.inttoptr(ptr_as_int, ptr_ty) -} - -fn emit_direct_ptr_va_arg<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, list: OperandRef<'tcx, RValue<'gcc>>, llty: Type<'gcc>, size: Size, align: Align, slot_size: Align, allow_higher_align: bool) -> (RValue<'gcc>, Align) { - let va_list_ptr_ty = bx.cx().type_ptr_to(bx.cx.type_i8p()); - let va_list_addr = - if list.layout.gcc_type(bx.cx, true) != va_list_ptr_ty { - bx.bitcast(list.immediate(), va_list_ptr_ty) - } - else { - list.immediate() - }; - - let ptr = bx.load(va_list_addr, bx.tcx().data_layout.pointer_align.abi); - - let (addr, addr_align) = if allow_higher_align && align > slot_size { - (round_pointer_up_to_alignment(bx, ptr, align, bx.cx().type_i8p()), align) - } else { - (ptr, slot_size) - }; - - let aligned_size = size.align_to(slot_size).bytes() as i32; - let full_direct_size = bx.cx().const_i32(aligned_size); - let next = bx.inbounds_gep(addr, &[full_direct_size]); - bx.store(next, va_list_addr, bx.tcx().data_layout.pointer_align.abi); - - if size.bytes() < slot_size.bytes() && bx.tcx().sess.target.endian == Endian::Big { - let adjusted_size = bx.cx().const_i32((slot_size.bytes() - size.bytes()) as i32); - let adjusted = bx.inbounds_gep(addr, &[adjusted_size]); - (bx.bitcast(adjusted, bx.cx().type_ptr_to(llty)), addr_align) - } else { - (bx.bitcast(addr, bx.cx().type_ptr_to(llty)), addr_align) - } -} - -fn emit_ptr_va_arg<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, list: OperandRef<'tcx, RValue<'gcc>>, target_ty: Ty<'tcx>, indirect: bool, slot_size: Align, allow_higher_align: bool) -> RValue<'gcc> { - let layout = bx.cx.layout_of(target_ty); - let (llty, size, align) = - if indirect { - ( - bx.cx.layout_of(bx.cx.tcx.mk_imm_ptr(target_ty)).gcc_type(bx.cx, true), - bx.cx.data_layout().pointer_size, - bx.cx.data_layout().pointer_align, - ) - } - else { - (layout.gcc_type(bx.cx, true), layout.size, layout.align) - }; - let (addr, addr_align) = emit_direct_ptr_va_arg(bx, list, llty, size, align.abi, slot_size, allow_higher_align); - if indirect { - let tmp_ret = bx.load(addr, addr_align); - bx.load(tmp_ret, align.abi) - } - else { - bx.load(addr, addr_align) - } -} - -fn emit_aapcs_va_arg<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, list: OperandRef<'tcx, RValue<'gcc>>, target_ty: Ty<'tcx>) -> RValue<'gcc> { - // Implementation of the AAPCS64 calling convention for va_args see - // https://github.com/ARM-software/abi-aa/blob/master/aapcs64/aapcs64.rst - let va_list_addr = list.immediate(); - let layout = bx.cx.layout_of(target_ty); - let gcc_type = layout.immediate_gcc_type(bx); - - let function = bx.llbb().get_function(); - let variable = function.new_local(None, gcc_type, "va_arg"); - - let mut maybe_reg = bx.build_sibling_block("va_arg.maybe_reg"); - let mut in_reg = bx.build_sibling_block("va_arg.in_reg"); - let mut on_stack = bx.build_sibling_block("va_arg.on_stack"); - let end = bx.build_sibling_block("va_arg.end"); - let zero = bx.const_i32(0); - let offset_align = Align::from_bytes(4).unwrap(); - assert!(bx.tcx().sess.target.endian == Endian::Little); - - let gr_type = target_ty.is_any_ptr() || target_ty.is_integral(); - let (reg_off, reg_top_index, slot_size) = if gr_type { - let gr_offs = bx.struct_gep(va_list_addr, 7); - let nreg = (layout.size.bytes() + 7) / 8; - (gr_offs, 3, nreg * 8) - } else { - let vr_off = bx.struct_gep(va_list_addr, 9); - let nreg = (layout.size.bytes() + 15) / 16; - (vr_off, 5, nreg * 16) - }; - - // if the offset >= 0 then the value will be on the stack - let mut reg_off_v = bx.load(reg_off, offset_align); - let use_stack = bx.icmp(IntPredicate::IntSGE, reg_off_v, zero); - bx.cond_br(use_stack, on_stack.llbb(), maybe_reg.llbb()); - - // The value at this point might be in a register, but there is a chance that - // it could be on the stack so we have to update the offset and then check - // the offset again. - - if gr_type && layout.align.abi.bytes() > 8 { - reg_off_v = maybe_reg.add(reg_off_v, bx.const_i32(15)); - reg_off_v = maybe_reg.and(reg_off_v, bx.const_i32(-16)); - } - let new_reg_off_v = maybe_reg.add(reg_off_v, bx.const_i32(slot_size as i32)); - - maybe_reg.store(new_reg_off_v, reg_off, offset_align); - - // Check to see if we have overflowed the registers as a result of this. - // If we have then we need to use the stack for this value - let use_stack = maybe_reg.icmp(IntPredicate::IntSGT, new_reg_off_v, zero); - maybe_reg.cond_br(use_stack, on_stack.llbb(), in_reg.llbb()); - - let top = in_reg.struct_gep(va_list_addr, reg_top_index); - let top = in_reg.load(top, bx.tcx().data_layout.pointer_align.abi); - - // reg_value = *(@top + reg_off_v); - let top = in_reg.gep(top, &[reg_off_v]); - let top = in_reg.bitcast(top, bx.cx.type_ptr_to(layout.gcc_type(bx, true))); - let reg_value = in_reg.load(top, layout.align.abi); - in_reg.assign(variable, reg_value); - in_reg.br(end.llbb()); - - // On Stack block - let stack_value = - emit_ptr_va_arg(&mut on_stack, list, target_ty, false, Align::from_bytes(8).unwrap(), true); - on_stack.assign(variable, stack_value); - on_stack.br(end.llbb()); - - *bx = end; - variable.to_rvalue() -} - -pub(super) fn emit_va_arg<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, addr: OperandRef<'tcx, RValue<'gcc>>, target_ty: Ty<'tcx>) -> RValue<'gcc> { - // Determine the va_arg implementation to use. The LLVM va_arg instruction - // is lacking in some instances, so we should only use it as a fallback. - let target = &bx.cx.tcx.sess.target; - let arch = &bx.cx.tcx.sess.target.arch; - match &**arch { - // Windows x86 - "x86" if target.options.is_like_windows => { - emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(4).unwrap(), false) - } - // Generic x86 - "x86" => emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(4).unwrap(), true), - // Windows AArch64 - "aarch64" if target.options.is_like_windows => { - emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(8).unwrap(), false) - } - // macOS / iOS AArch64 - "aarch64" if target.options.is_like_osx => { - emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(8).unwrap(), true) - } - "aarch64" => emit_aapcs_va_arg(bx, addr, target_ty), - // Windows x86_64 - "x86_64" if target.options.is_like_windows => { - let target_ty_size = bx.cx.size_of(target_ty).bytes(); - let indirect: bool = target_ty_size > 8 || !target_ty_size.is_power_of_two(); - emit_ptr_va_arg(bx, addr, target_ty, indirect, Align::from_bytes(8).unwrap(), false) - } - // For all other architecture/OS combinations fall back to using - // the LLVM va_arg instruction. - // https://llvm.org/docs/LangRef.html#va-arg-instruction - _ => bx.va_arg(addr.immediate(), bx.cx.layout_of(target_ty).gcc_type(bx.cx, true)), - } -}*/ diff --git a/test.sh b/test.sh index 1e13b06cf9c3d..06faa98d9b1f0 100755 --- a/test.sh +++ b/test.sh @@ -1,8 +1,7 @@ #!/bin/bash -# TODO: rewrite to cargo-make (or just) or something like that to only rebuild the sysroot when needed? +# TODO(antoyo): rewrite to cargo-make (or just) or something like that to only rebuild the sysroot when needed? -#set -x set -e export GCC_PATH=$(cat gcc_path) @@ -30,17 +29,9 @@ $RUSTC example/mini_core.rs --crate-name mini_core --crate-type lib,dylib --targ echo "[BUILD] example" $RUSTC example/example.rs --crate-type lib --target $TARGET_TRIPLE -#if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then - #echo "[JIT] mini_core_hello_world" - #CG_CLIF_JIT=1 CG_CLIF_JIT_ARGS="abc bcd" $RUSTC --crate-type bin -Cprefer-dynamic example/mini_core_hello_world.rs --cfg jit --target $HOST_TRIPLE -#else - #echo "[JIT] mini_core_hello_world (skipped)" -#fi - echo "[AOT] mini_core_hello_world" $RUSTC example/mini_core_hello_world.rs --crate-name mini_core_hello_world --crate-type bin -g --target $TARGET_TRIPLE $RUN_WRAPPER ./target/out/mini_core_hello_world abc bcd -# (echo "break set -n main"; echo "run"; sleep 1; echo "si -c 10"; sleep 1; echo "frame variable") | lldb -- ./target/out/mini_core_hello_world abc bcd echo "[BUILD] sysroot" time ./build_sysroot/build_sysroot.sh @@ -52,20 +43,12 @@ $RUN_WRAPPER ./target/out/arbitrary_self_types_pointers_and_wrappers echo "[AOT] alloc_system" $RUSTC example/alloc_system.rs --crate-type lib --target "$TARGET_TRIPLE" -# FIXME: this requires linking an additional lib for __popcountdi2 -#echo "[AOT] alloc_example" -#$RUSTC example/alloc_example.rs --crate-type bin --target $TARGET_TRIPLE -#$RUN_WRAPPER ./target/out/alloc_example - -#if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then - #echo "[JIT] std_example" - #CG_CLIF_JIT=1 $RUSTC --crate-type bin -Cprefer-dynamic example/std_example.rs --target $HOST_TRIPLE -#else - #echo "[JIT] std_example (skipped)" -#fi +echo "[AOT] alloc_example" +$RUSTC example/alloc_example.rs --crate-type bin --target $TARGET_TRIPLE +$RUN_WRAPPER ./target/out/alloc_example echo "[AOT] dst_field_align" -# FIXME Re-add -Zmir-opt-level=2 once rust-lang/rust#67529 is fixed. +# FIXME(antoyo): Re-add -Zmir-opt-level=2 once rust-lang/rust#67529 is fixed. $RUSTC example/dst-field-align.rs --crate-name dst_field_align --crate-type bin --target $TARGET_TRIPLE $RUN_WRAPPER ./target/out/dst_field_align || (echo $?; false) @@ -81,14 +64,14 @@ echo "[AOT] track-caller-attribute" $RUSTC example/track-caller-attribute.rs --crate-type bin -Cpanic=abort --target $TARGET_TRIPLE $RUN_WRAPPER ./target/out/track-caller-attribute -# FIXME: this requires linking an additional lib for __popcountdi2 -#echo "[BUILD] mod_bench" -#$RUSTC example/mod_bench.rs --crate-type bin --target $TARGET_TRIPLE +echo "[BUILD] mod_bench" +$RUSTC example/mod_bench.rs --crate-type bin --target $TARGET_TRIPLE -# FIXME linker gives multiple definitions error on Linux +# FIXME(antoyo): linker gives multiple definitions error on Linux #echo "[BUILD] sysroot in release mode" #./build_sysroot/build_sysroot.sh --release +# TODO(antoyo): uncomment when it works. #pushd simple-raytracer #if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then #echo "[BENCH COMPILE] ebobby/simple-raytracer" @@ -113,6 +96,7 @@ rm -r ./target || true ../../../../../cargo.sh test popd +# TODO(antoyo): uncomment when it works. #pushd regex #echo "[TEST] rust-lang/regex example shootout-regex-dna" #../cargo.sh clean @@ -152,9 +136,6 @@ git fetch git checkout $(rustc -V | cut -d' ' -f3 | tr -d '(') export RUSTFLAGS= -#git apply ../rust_lang.patch - - rm config.toml || true cat > config.toml < Date: Sun, 15 Aug 2021 10:54:12 -0400 Subject: [PATCH 06/27] Empty gcc_path --- gcc_path | 1 - 1 file changed, 1 deletion(-) diff --git a/gcc_path b/gcc_path index b3696e3cf885e..e69de29bb2d1d 100644 --- a/gcc_path +++ b/gcc_path @@ -1 +0,0 @@ -/home/bouanto/Ordinateur/Programmation/Projets/gcc-build/build/gcc From 5dad13cc3bfbf69f470198860ea905d72fc9fbd3 Mon Sep 17 00:00:00 2001 From: antoyo Date: Sat, 28 Aug 2021 11:34:47 -0400 Subject: [PATCH 07/27] Update custom rustc instructions (#73) --- .github/workflows/main.yml | 5 ++++- .gitignore | 1 + Readme.md | 3 +-- gcc_path | 0 4 files changed, 6 insertions(+), 3 deletions(-) delete mode 100644 gcc_path diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 774a68aa5e1a4..98bed8ef387ff 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -31,7 +31,9 @@ jobs: ln gcc-build/libgccjit.so gcc-build/libgccjit.so.0 - name: Set LIBRARY_PATH - run: echo "LIBRARY_PATH=$(cat gcc_path)" >> $GITHUB_ENV + run: | + echo "LIBRARY_PATH=$(cat gcc_path)" >> $GITHUB_ENV + echo "LD_LIBRARY_PATH=$(cat gcc_path)" >> $GITHUB_ENV # https://github.com/actions/cache/issues/133 - name: Fixup owner of ~/.cargo/ @@ -66,6 +68,7 @@ jobs: run: | ./prepare_build.sh ./build.sh + cargo test ./clean_all.sh - name: Prepare dependencies diff --git a/.gitignore b/.gitignore index d9f0ae05ed40d..573ed83e53566 100644 --- a/.gitignore +++ b/.gitignore @@ -15,3 +15,4 @@ gimple* *asm res test-backend +gcc_path diff --git a/Readme.md b/Readme.md index 44cb16334742f..96a6c3a04145e 100644 --- a/Readme.md +++ b/Readme.md @@ -113,6 +113,5 @@ p loc->m_line ### How to use a custom-build rustc - * Build the stage1 compiler (`rustup toolchain link debug-current stage2 build/x86_64-unknown-linux-gnu/stage1`). + * Build the stage2 compiler (`rustup toolchain link debug-current build/x86_64-unknown-linux-gnu/stage2`). * Clean and rebuild the codegen with `debug-current` in the file `rust-toolchain`. - * Add `~/.rustup/toolchains/debug-current/lib/rustlib/x86_64-unknown-linux-gnu/lib` to `LD_LIBRARY_PATH`. diff --git a/gcc_path b/gcc_path deleted file mode 100644 index e69de29bb2d1d..0000000000000 From 7c707e4b9535b70de8c6beac24b7bc05595b1932 Mon Sep 17 00:00:00 2001 From: Commeownist Date: Sun, 5 Sep 2021 18:26:01 +0300 Subject: [PATCH 08/27] Implement basic inline asm support (#72) * Implement basic support for inline assembly * Disable LTO We don't support it yet at all * Handle `inout(reg) var` correctly Turns out that `+` readwrite output registers cannot be tied with input variables. * Add limited support for llvm_asm! * Handle CHANNEL correctly * Add support for arbitrary explicit registers * Handle symbols properly * Add rudimentary asm tests * Exclude llvm_asm! tests from tests runs * Insert `__builtin_unreachable()` after diverging asm blocks --- .gitignore | 2 + Cargo.lock | 4 +- build.sh | 7 +- cargo.sh | 2 +- config.sh | 9 +- src/asm.rs | 724 +++++++++++++++++++++++++++++++++-------------- test.sh | 9 +- tests/run/asm.rs | 87 ++++++ 8 files changed, 631 insertions(+), 213 deletions(-) diff --git a/.gitignore b/.gitignore index 573ed83e53566..1e2f9e3aebb2c 100644 --- a/.gitignore +++ b/.gitignore @@ -7,9 +7,11 @@ perf.data.old *.events *.string* /build_sysroot/sysroot +/build_sysroot/sysroot_src /build_sysroot/Cargo.lock /build_sysroot/test_target/Cargo.lock /rust +/simple-raytracer /regex gimple* *asm diff --git a/Cargo.lock b/Cargo.lock index 952c03b05c7f4..f3e07fd08ee8e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -56,7 +56,7 @@ dependencies = [ [[package]] name = "gccjit" version = "1.0.0" -source = "git+https://github.com/antoyo/gccjit.rs#0572117c7ffdfcb0e6c6526d45266c3f34796bea" +source = "git+https://github.com/antoyo/gccjit.rs#54be27e41fff7b6ab532e2e21a82df50a12b9ad3" dependencies = [ "gccjit_sys", ] @@ -64,7 +64,7 @@ dependencies = [ [[package]] name = "gccjit_sys" version = "0.0.1" -source = "git+https://github.com/antoyo/gccjit.rs#0572117c7ffdfcb0e6c6526d45266c3f34796bea" +source = "git+https://github.com/antoyo/gccjit.rs#54be27e41fff7b6ab532e2e21a82df50a12b9ad3" dependencies = [ "libc 0.1.12", ] diff --git a/build.sh b/build.sh index 9f1228687e27d..17a0d2ab3f060 100755 --- a/build.sh +++ b/build.sh @@ -3,7 +3,12 @@ #set -x set -e -export GCC_PATH=$(cat gcc_path) +if [ -f ./gcc_path ]; then + export GCC_PATH=$(cat gcc_path) +else + echo 'Please put the path to your custom build of libgccjit in the file `gcc_path`, see Readme.md for details' + exit 1 +fi export LD_LIBRARY_PATH="$GCC_PATH" export LIBRARY_PATH="$GCC_PATH" diff --git a/cargo.sh b/cargo.sh index a85865019c135..1001c522052c8 100755 --- a/cargo.sh +++ b/cargo.sh @@ -20,4 +20,4 @@ fi cmd=$1 shift -RUSTDOCFLAGS=$RUSTFLAGS cargo +${TOOLCHAIN} $cmd --target $TARGET_TRIPLE $@ +RUSTDOCFLAGS="$RUSTFLAGS" cargo +${TOOLCHAIN} $cmd --target $TARGET_TRIPLE $@ diff --git a/config.sh b/config.sh index bd2d915f589a0..98caeb7407e08 100644 --- a/config.sh +++ b/config.sh @@ -2,7 +2,12 @@ set -e export CARGO_INCREMENTAL=0 -export GCC_PATH=$(cat gcc_path) +if [ -f ./gcc_path ]; then + export GCC_PATH=$(cat gcc_path) +else + echo 'Please put the path to your custom build of libgccjit in the file `gcc_path`, see Readme.md for details' + exit 1 +fi unamestr=`uname` if [[ "$unamestr" == 'Linux' ]]; then @@ -30,7 +35,7 @@ if [[ "$HOST_TRIPLE" != "$TARGET_TRIPLE" ]]; then fi fi -export RUSTFLAGS=$linker' -Cpanic=abort -Cdebuginfo=2 -Zpanic-abort-tests -Zcodegen-backend='$(pwd)'/target/'$CHANNEL'/librustc_codegen_gcc.'$dylib_ext' --sysroot '$(pwd)'/build_sysroot/sysroot' +export RUSTFLAGS="$linker -Cpanic=abort -Cdebuginfo=2 -Clto=off -Zpanic-abort-tests -Zcodegen-backend=$(pwd)/target/${CHANNEL:-debug}/librustc_codegen_gcc.$dylib_ext --sysroot $(pwd)/build_sysroot/sysroot" # FIXME(antoyo): remove once the atomic shim is gone if [[ `uname` == 'Darwin' ]]; then diff --git a/src/asm.rs b/src/asm.rs index e4d57c39de48a..512b9f553f5e3 100644 --- a/src/asm.rs +++ b/src/asm.rs @@ -1,254 +1,558 @@ -use gccjit::{RValue, ToRValue, Type}; +use gccjit::{LValue, RValue, ToRValue, Type}; use rustc_ast::ast::{InlineAsmOptions, InlineAsmTemplatePiece}; use rustc_codegen_ssa::mir::operand::OperandValue; use rustc_codegen_ssa::mir::place::PlaceRef; use rustc_codegen_ssa::traits::{AsmBuilderMethods, AsmMethods, BaseTypeMethods, BuilderMethods, GlobalAsmOperandRef, InlineAsmOperandRef}; -use rustc_data_structures::fx::FxHashMap; + use rustc_hir::LlvmInlineAsmInner; -use rustc_middle::bug; +use rustc_middle::{bug, ty::Instance}; use rustc_span::Span; use rustc_target::asm::*; +use std::borrow::Cow; + use crate::builder::Builder; use crate::context::CodegenCx; use crate::type_of::LayoutGccExt; + +// Rust asm! and GCC Extended Asm semantics differ substantially. +// +// 1. Rust asm operands go along as one list of operands. Operands themselves indicate +// if they're "in" or "out". "In" and "out" operands can interleave. One operand can be +// both "in" and "out" (`inout(reg)`). +// +// GCC asm has two different lists for "in" and "out" operands. In terms of gccjit, +// this means that all "out" operands must go before "in" operands. "In" and "out" operands +// cannot interleave. +// +// 2. Operand lists in both Rust and GCC are indexed. Index starts from 0. Indexes are important +// because the asm template refers to operands by index. +// +// Mapping from Rust to GCC index would be 1-1 if it wasn't for... +// +// 3. Clobbers. GCC has a separate list of clobbers, and clobbers don't have indexes. +// Contrary, Rust expresses clobbers through "out" operands that aren't tied to +// a variable (`_`), and such "clobbers" do have index. +// +// 4. Furthermore, GCC Extended Asm does not support explicit register constraints +// (like `out("eax")`) directly, offering so-called "local register variables" +// as a workaround. These variables need to be declared and initialized *before* +// the Extended Asm block but *after* normal local variables +// (see comment in `codegen_inline_asm` for explanation). +// +// With that in mind, let's see how we translate Rust syntax to GCC +// (from now on, `CC` stands for "constraint code"): +// +// * `out(reg_class) var` -> translated to output operand: `"=CC"(var)` +// * `inout(reg_class) var` -> translated to output operand: `"+CC"(var)` +// * `in(reg_class) var` -> translated to input operand: `"CC"(var)` +// +// * `out(reg_class) _` -> translated to one `=r(tmp)`, where "tmp" is a temporary unused variable +// +// * `out("explicit register") _` -> not translated to any operands, register is simply added to clobbers list +// +// * `inout(reg_class) in_var => out_var` -> translated to two operands: +// output: `"=CC"(in_var)` +// input: `"num"(out_var)` where num is the GCC index +// of the corresponding output operand +// +// * `inout(reg_class) in_var => _` -> same as `inout(reg_class) in_var => tmp`, +// where "tmp" is a temporary unused variable +// +// * `out/in/inout("explicit register") var` -> translated to one or two operands as described above +// with `"r"(var)` constraint, +// and one register variable assigned to the desired register. +// + +const ATT_SYNTAX_INS: &str = ".att_syntax noprefix\n\t"; +const INTEL_SYNTAX_INS: &str = "\n\t.intel_syntax noprefix"; + + +struct AsmOutOperand<'a, 'tcx, 'gcc> { + rust_idx: usize, + constraint: &'a str, + late: bool, + readwrite: bool, + + tmp_var: LValue<'gcc>, + out_place: Option>> +} + +struct AsmInOperand<'a, 'tcx> { + rust_idx: usize, + constraint: Cow<'a, str>, + val: RValue<'tcx> +} + +impl AsmOutOperand<'_, '_, '_> { + fn to_constraint(&self) -> String { + let mut res = String::with_capacity(self.constraint.len() + self.late as usize + 1); + + let sign = if self.readwrite { '+' } else { '=' }; + res.push(sign); + if !self.late { + res.push('&'); + } + + res.push_str(&self.constraint); + res + } +} + +enum ConstraintOrRegister { + Constraint(&'static str), + Register(&'static str) +} + + impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { - fn codegen_llvm_inline_asm(&mut self, _ia: &LlvmInlineAsmInner, _outputs: Vec>>, mut _inputs: Vec>, _span: Span) -> bool { - // TODO(antoyo) - return true; + fn codegen_llvm_inline_asm(&mut self, ia: &LlvmInlineAsmInner, outputs: Vec>>, inputs: Vec>, span: Span) -> bool { + if ia.asm.as_str().is_empty() && outputs.is_empty() { + // TODO(@Commeownist): there's one use of `llvm_asm` in rustc sysroot we can't get rid of just yet. + // Fortunately, it's used as a simple black box to make sure that inputs are not optimized away. + // Let's just emulate it. + let block = self.llbb(); + let extended_asm = block.add_extended_asm(None, ""); + for input in inputs { + extended_asm.add_input_operand(None, "r", input); + } + extended_asm.add_clobber("memory"); + extended_asm.set_volatile_flag(true); + } + else { + // TODO(@Commeownist): switch to `struct_span_err_with_code` + // once we get this merged into rustc + self.sess().struct_span_err(span, "GCC backend does not support `llvm_asm!`") + .help("consider using the `asm!` macro instead") + .emit(); + } + + // We return `true` even if we've failed to generate the asm + // because we want to suppress the "malformed inline assembly" error + // generated by the frontend. + true } - fn codegen_inline_asm(&mut self, template: &[InlineAsmTemplatePiece], operands: &[InlineAsmOperandRef<'tcx, Self>], options: InlineAsmOptions, _span: &[Span]) { + fn codegen_inline_asm(&mut self, template: &[InlineAsmTemplatePiece], rust_operands: &[InlineAsmOperandRef<'tcx, Self>], options: InlineAsmOptions, _span: &[Span]) { let asm_arch = self.tcx.sess.asm_arch.unwrap(); + let is_x86 = matches!(asm_arch, InlineAsmArch::X86 | InlineAsmArch::X86_64); + let att_dialect = is_x86 && options.contains(InlineAsmOptions::ATT_SYNTAX); + let intel_dialect = is_x86 && !options.contains(InlineAsmOptions::ATT_SYNTAX); - let intel_dialect = - match asm_arch { - InlineAsmArch::X86 | InlineAsmArch::X86_64 if !options.contains(InlineAsmOptions::ATT_SYNTAX) => true, - _ => false, - }; + // GCC index of an output operand equals its position in the array + let mut outputs = vec![]; + + // GCC index of an input operand equals its position in the array + // added to `outputs.len()` + let mut inputs = vec![]; - // Collect the types of output operands - // FIXME(antoyo): we do this here instead of later because of a bug in libgccjit where creating the - // variable after the extended asm expression causes a segfault: - // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=100380 - let mut output_vars = FxHashMap::default(); - let mut operand_numbers = FxHashMap::default(); - let mut current_number = 0; - for (idx, op) in operands.iter().enumerate() { + // Clobbers collected from `out("explicit register") _` and `inout("expl_reg") var => _` + let mut clobbers = vec![]; + + // We're trying to preallocate space for the template + let mut constants_len = 0; + + // There are rules we must adhere to if we want GCC to do the right thing: + // + // * Every local variable that the asm block uses as an output must be declared *before* + // the asm block. + // * There must be no instructions whatsoever between the register variables and the asm. + // + // Therefore, the backend must generate the instructions strictly in this order: + // + // 1. Output variables. + // 2. Register variables. + // 3. The asm block. + // + // We also must make sure that no input operands are emitted before output operands. + // + // This is why we work in passes, first emitting local vars, then local register vars. + // Also, we don't emit any asm operands immediately; we save them to + // the one of the buffers to be emitted later. + + // 1. Normal variables (and saving operands to buffers). + for (rust_idx, op) in rust_operands.iter().enumerate() { match *op { - InlineAsmOperandRef::Out { place, .. } => { - let ty = - match place { - Some(place) => place.layout.gcc_type(self.cx, false), - None => { - // If the output is discarded, we don't really care what - // type is used. We're just using this to tell GCC to - // reserve the register. - //dummy_output_type(self.cx, reg.reg_class()) - - // NOTE: if no output value, we should not create one (it will be a - // clobber). - continue; - }, - }; - let var = self.current_func().new_local(None, ty, "output_register"); - operand_numbers.insert(idx, current_number); - current_number += 1; - output_vars.insert(idx, var); + InlineAsmOperandRef::Out { reg, late, place } => { + use ConstraintOrRegister::*; + + let (constraint, ty) = match (reg_to_gcc(reg), place) { + (Constraint(constraint), Some(place)) => (constraint, place.layout.gcc_type(self.cx, false)), + // When `reg` is a class and not an explicit register but the out place is not specified, + // we need to create an unused output variable to assign the output to. This var + // needs to be of a type that's "compatible" with the register class, but specific type + // doesn't matter. + (Constraint(constraint), None) => (constraint, dummy_output_type(self.cx, reg.reg_class())), + (Register(_), Some(_)) => { + // left for the next pass + continue + }, + (Register(reg_name), None) => { + clobbers.push(reg_name); + continue + } + }; + + let tmp_var = self.current_func().new_local(None, ty, "output_register"); + outputs.push(AsmOutOperand { + constraint, + rust_idx, + late, + readwrite: false, + tmp_var, + out_place: place + }); } - InlineAsmOperandRef::InOut { out_place, .. } => { - let ty = - match out_place { - Some(place) => place.layout.gcc_type(self.cx, false), - None => { - // NOTE: if no output value, we should not create one. - continue; - }, - }; - operand_numbers.insert(idx, current_number); - current_number += 1; - let var = self.current_func().new_local(None, ty, "output_register"); - output_vars.insert(idx, var); + + InlineAsmOperandRef::In { reg, value } => { + if let ConstraintOrRegister::Constraint(constraint) = reg_to_gcc(reg) { + inputs.push(AsmInOperand { + constraint: Cow::Borrowed(constraint), + rust_idx, + val: value.immediate() + }); + } + else { + // left for the next pass + continue + } + } + + InlineAsmOperandRef::InOut { reg, late, in_value, out_place } => { + let constraint = if let ConstraintOrRegister::Constraint(constraint) = reg_to_gcc(reg) { + constraint + } + else { + // left for the next pass + continue + }; + + // Rustc frontend guarantees that input and output types are "compatible", + // so we can just use input var's type for the output variable. + // + // This decision is also backed by the fact that LLVM needs in and out + // values to be of *exactly the same type*, not just "compatible". + // I'm not sure if GCC is so picky too, but better safe than sorry. + let ty = in_value.layout.gcc_type(self.cx, false); + let tmp_var = self.current_func().new_local(None, ty, "output_register"); + + // If the out_place is None (i.e `inout(reg) _` syntax was used), we translate + // it to one "readwrite (+) output variable", otherwise we translate it to two + // "out and tied in" vars as described above. + let readwrite = out_place.is_none(); + outputs.push(AsmOutOperand { + constraint, + rust_idx, + late, + readwrite, + tmp_var, + out_place, + }); + + if !readwrite { + let out_gcc_idx = outputs.len() - 1; + let constraint = Cow::Owned(out_gcc_idx.to_string()); + + inputs.push(AsmInOperand { + constraint, + rust_idx, + val: in_value.immediate() + }); + } + } + + InlineAsmOperandRef::Const { ref string } => { + constants_len += string.len() + att_dialect as usize; + } + + InlineAsmOperandRef::SymFn { instance } => { + constants_len += self.tcx.symbol_name(instance).name.len(); + } + InlineAsmOperandRef::SymStatic { def_id } => { + constants_len += self.tcx.symbol_name(Instance::mono(self.tcx, def_id)).name.len(); } - _ => {} } } - // All output operands must come before the input operands, hence the 2 loops. - for (idx, op) in operands.iter().enumerate() { + // 2. Register variables. + for (rust_idx, op) in rust_operands.iter().enumerate() { match *op { - InlineAsmOperandRef::In { .. } | InlineAsmOperandRef::InOut { .. } => { - operand_numbers.insert(idx, current_number); - current_number += 1; - }, - _ => (), + // `out("explicit register") var` + InlineAsmOperandRef::Out { reg, late, place } => { + if let ConstraintOrRegister::Register(reg_name) = reg_to_gcc(reg) { + let out_place = if let Some(place) = place { + place + } + else { + // processed in the previous pass + continue + }; + + let ty = out_place.layout.gcc_type(self.cx, false); + let tmp_var = self.current_func().new_local(None, ty, "output_register"); + tmp_var.set_register_name(reg_name); + + outputs.push(AsmOutOperand { + constraint: "r".into(), + rust_idx, + late, + readwrite: false, + tmp_var, + out_place: Some(out_place) + }); + } + + // processed in the previous pass + } + + // `in("explicit register") var` + InlineAsmOperandRef::In { reg, value } => { + if let ConstraintOrRegister::Register(reg_name) = reg_to_gcc(reg) { + let ty = value.layout.gcc_type(self.cx, false); + let reg_var = self.current_func().new_local(None, ty, "input_register"); + reg_var.set_register_name(reg_name); + self.llbb().add_assignment(None, reg_var, value.immediate()); + + inputs.push(AsmInOperand { + constraint: "r".into(), + rust_idx, + val: reg_var.to_rvalue() + }); + } + + // processed in the previous pass + } + + // `inout("explicit register") in_var => out_var` + InlineAsmOperandRef::InOut { reg, late, in_value, out_place } => { + if let ConstraintOrRegister::Register(reg_name) = reg_to_gcc(reg) { + let out_place = if let Some(place) = out_place { + place + } + else { + // processed in the previous pass + continue + }; + + // See explanation in the first pass. + let ty = in_value.layout.gcc_type(self.cx, false); + let tmp_var = self.current_func().new_local(None, ty, "output_register"); + tmp_var.set_register_name(reg_name); + + outputs.push(AsmOutOperand { + constraint: "r".into(), + rust_idx, + late, + readwrite: false, + tmp_var, + out_place: Some(out_place) + }); + + let constraint = Cow::Owned((outputs.len() - 1).to_string()); + inputs.push(AsmInOperand { + constraint, + rust_idx, + val: in_value.immediate() + }); + } + + // processed in the previous pass + } + + InlineAsmOperandRef::Const { .. } + | InlineAsmOperandRef::SymFn { .. } + | InlineAsmOperandRef::SymStatic { .. } => { + // processed in the previous pass + } } } - // Build the template string - let mut template_str = String::new(); + // 3. Build the template string + + let mut template_str = String::with_capacity(estimate_template_length(template, constants_len, att_dialect)); + if !intel_dialect { + template_str.push_str(ATT_SYNTAX_INS); + } + for piece in template { match *piece { InlineAsmTemplatePiece::String(ref string) => { - if string.contains('%') { - for c in string.chars() { - if c == '%' { - template_str.push_str("%%"); - } - else { - template_str.push(c); - } - } + // TODO(@Commeownist): switch to `Iterator::intersperse` once it's stable + let mut iter = string.split('%'); + if let Some(s) = iter.next() { + template_str.push_str(s); } - else { - template_str.push_str(string) + + for s in iter { + template_str.push_str("%%"); + template_str.push_str(s); } } InlineAsmTemplatePiece::Placeholder { operand_idx, modifier, span: _ } => { - match operands[operand_idx] { - InlineAsmOperandRef::Out { reg, place: Some(_), .. } => { + let mut push_to_template = |modifier, gcc_idx| { + use std::fmt::Write; + + template_str.push('%'); + if let Some(modifier) = modifier { + template_str.push(modifier); + } + write!(template_str, "{}", gcc_idx).expect("pushing to string failed"); + }; + + match rust_operands[operand_idx] { + InlineAsmOperandRef::Out { reg, .. } => { let modifier = modifier_to_gcc(asm_arch, reg.reg_class(), modifier); - if let Some(modifier) = modifier { - template_str.push_str(&format!("%{}{}", modifier, operand_numbers[&operand_idx])); - } else { - template_str.push_str(&format!("%{}", operand_numbers[&operand_idx])); - } - }, - InlineAsmOperandRef::Out { place: None, .. } => { - unimplemented!("Out None"); - }, - InlineAsmOperandRef::In { reg, .. } - | InlineAsmOperandRef::InOut { reg, .. } => { + let gcc_index = outputs.iter() + .position(|op| operand_idx == op.rust_idx) + .expect("wrong rust index"); + push_to_template(modifier, gcc_index); + } + + InlineAsmOperandRef::In { reg, .. } => { let modifier = modifier_to_gcc(asm_arch, reg.reg_class(), modifier); - if let Some(modifier) = modifier { - template_str.push_str(&format!("%{}{}", modifier, operand_numbers[&operand_idx])); - } else { - template_str.push_str(&format!("%{}", operand_numbers[&operand_idx])); - } + let in_gcc_index = inputs.iter() + .position(|op| operand_idx == op.rust_idx) + .expect("wrong rust index"); + let gcc_index = in_gcc_index + outputs.len(); + push_to_template(modifier, gcc_index); + } + + InlineAsmOperandRef::InOut { reg, .. } => { + let modifier = modifier_to_gcc(asm_arch, reg.reg_class(), modifier); + + // The input register is tied to the output, so we can just use the index of the output register + let gcc_index = outputs.iter() + .position(|op| operand_idx == op.rust_idx) + .expect("wrong rust index"); + push_to_template(modifier, gcc_index); } + + InlineAsmOperandRef::SymFn { instance } => { + let name = self.tcx.symbol_name(instance).name; + template_str.push_str(name); + } + + InlineAsmOperandRef::SymStatic { def_id } => { + // TODO(@Commeownist): This may not be sufficient for all kinds of statics. + // Some statics may need the `@plt` suffix, like thread-local vars. + let instance = Instance::mono(self.tcx, def_id); + let name = self.tcx.symbol_name(instance).name; + template_str.push_str(name); + } + InlineAsmOperandRef::Const { ref string } => { // Const operands get injected directly into the template + if att_dialect { + template_str.push('$'); + } template_str.push_str(string); } - InlineAsmOperandRef::SymFn { .. } - | InlineAsmOperandRef::SymStatic { .. } => { - unimplemented!(); - // Only emit the raw symbol name - //template_str.push_str(&format!("${{{}:c}}", op_idx[&operand_idx])); - } } } } } + if !intel_dialect { + template_str.push_str(INTEL_SYNTAX_INS); + } + + // 4. Generate Extended Asm block + let block = self.llbb(); - let template_str = - if intel_dialect { - template_str - } - else { - // FIXME(antoyo): this might break the "m" memory constraint: - // https://stackoverflow.com/a/9347957/389119 - // TODO(antoyo): only set on x86 platforms. - format!(".att_syntax noprefix\n\t{}\n\t.intel_syntax noprefix", template_str) - }; let extended_asm = block.add_extended_asm(None, &template_str); - // Collect the types of output operands - let mut output_types = vec![]; - for (idx, op) in operands.iter().enumerate() { - match *op { - InlineAsmOperandRef::Out { reg, late, place } => { - let ty = - match place { - Some(place) => place.layout.gcc_type(self.cx, false), - None => { - // If the output is discarded, we don't really care what - // type is used. We're just using this to tell GCC to - // reserve the register. - dummy_output_type(self.cx, reg.reg_class()) - }, - }; - output_types.push(ty); - let prefix = if late { "=" } else { "=&" }; - let constraint = format!("{}{}", prefix, reg_to_gcc(reg)); + for op in &outputs { + extended_asm.add_output_operand(None, &op.to_constraint(), op.tmp_var); + } - if place.is_some() { - let var = output_vars[&idx]; - extended_asm.add_output_operand(None, &constraint, var); - } - else { - // NOTE: reg.to_string() returns the register name with quotes around it so - // remove them. - extended_asm.add_clobber(reg.to_string().trim_matches('"')); - } - } - InlineAsmOperandRef::InOut { reg, late, in_value, out_place } => { - let ty = - match out_place { - Some(out_place) => out_place.layout.gcc_type(self.cx, false), - None => dummy_output_type(self.cx, reg.reg_class()) - }; - output_types.push(ty); - // TODO(antoyo): prefix of "+" for reading and writing? - let prefix = if late { "=" } else { "=&" }; - let constraint = format!("{}{}", prefix, reg_to_gcc(reg)); - - if out_place.is_some() { - let var = output_vars[&idx]; - // TODO(antoyo): also specify an output operand when out_place is none: that would - // be the clobber but clobbers do not support general constraint like reg; - // they only support named registers. - // Not sure how we can do this. And the LLVM backend does not seem to add a - // clobber. - extended_asm.add_output_operand(None, &constraint, var); - } + for op in &inputs { + extended_asm.add_input_operand(None, &op.constraint, op.val); + } - let constraint = reg_to_gcc(reg); - extended_asm.add_input_operand(None, &constraint, in_value.immediate()); - } - InlineAsmOperandRef::In { reg, value } => { - let constraint = reg_to_gcc(reg); - extended_asm.add_input_operand(None, &constraint, value.immediate()); - } - _ => {} + for clobber in clobbers.iter() { + extended_asm.add_clobber(clobber); + } + + if !options.contains(InlineAsmOptions::PRESERVES_FLAGS) { + // TODO(@Commeownist): I'm not 100% sure this one clobber is sufficient + // on all architectures. For instance, what about FP stack? + extended_asm.add_clobber("cc"); + } + if !options.contains(InlineAsmOptions::NOMEM) { + extended_asm.add_clobber("memory"); + } + if !options.contains(InlineAsmOptions::PURE) { + extended_asm.set_volatile_flag(true); + } + if !options.contains(InlineAsmOptions::NOSTACK) { + // TODO(@Commeownist): figure out how to align stack + } + if options.contains(InlineAsmOptions::NORETURN) { + let builtin_unreachable = self.context.get_builtin_function("__builtin_unreachable"); + let builtin_unreachable: RValue<'gcc> = unsafe { std::mem::transmute(builtin_unreachable) }; + self.call(self.type_void(), builtin_unreachable, &[], None); + } + + // Write results to outputs. + // + // We need to do this because: + // 1. Turning `PlaceRef` into `RValue` is error-prone and has nasty edge cases + // (especially with current `rustc_backend_ssa` API). + // 2. Not every output operand has an `out_place`, and it's required by `add_output_operand`. + // + // Instead, we generate a temporary output variable for each output operand, and then this loop, + // generates `out_place = tmp_var;` assignments if out_place exists. + for op in &outputs { + if let Some(place) = op.out_place { + OperandValue::Immediate(op.tmp_var.to_rvalue()).store(self, place); } } - // Write results to outputs - for (idx, op) in operands.iter().enumerate() { - if let InlineAsmOperandRef::Out { place: Some(place), .. } - | InlineAsmOperandRef::InOut { out_place: Some(place), .. } = *op - { - OperandValue::Immediate(output_vars[&idx].to_rvalue()).store(self, place); + } +} + +fn estimate_template_length(template: &[InlineAsmTemplatePiece], constants_len: usize, att_dialect: bool) -> usize { + let len: usize = template.iter().map(|piece| { + match *piece { + InlineAsmTemplatePiece::String(ref string) => { + string.len() + } + InlineAsmTemplatePiece::Placeholder { .. } => { + // '%' + 1 char modifier + 1 char index + 3 } } + }) + .sum(); + + // increase it by 5% to account for possible '%' signs that'll be duplicated + // I pulled the number out of blue, but should be fair enough + // as the upper bound + let mut res = (len as f32 * 1.05) as usize + constants_len; + + if att_dialect { + res += INTEL_SYNTAX_INS.len() + ATT_SYNTAX_INS.len(); } + res } /// Converts a register class to a GCC constraint code. -// TODO(antoyo): return &'static str instead? -fn reg_to_gcc(reg: InlineAsmRegOrRegClass) -> String { - match reg { +fn reg_to_gcc(reg: InlineAsmRegOrRegClass) -> ConstraintOrRegister { + let constraint = match reg { // For vector registers LLVM wants the register name to match the type size. InlineAsmRegOrRegClass::Reg(reg) => { // TODO(antoyo): add support for vector register. - let constraint = - match reg.name() { - "ax" => "a", - "bx" => "b", - "cx" => "c", - "dx" => "d", - "si" => "S", - "di" => "D", - // TODO(antoyo): for registers like r11, we have to create a register variable: https://stackoverflow.com/a/31774784/389119 - // TODO(antoyo): in this case though, it's a clobber, so it should work as r11. - // Recent nightly supports clobber() syntax, so update to it. It does not seem - // like it's implemented yet. - name => name, // FIXME(antoyo): probably wrong. - }; - constraint.to_string() + match reg.name() { + "ax" => "a", + "bx" => "b", + "cx" => "c", + "dx" => "d", + "si" => "S", + "di" => "D", + // For registers like r11, we have to create a register variable: https://stackoverflow.com/a/31774784/389119 + name => return ConstraintOrRegister::Register(name), + } }, InlineAsmRegOrRegClass::RegClass(reg) => match reg { InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => unimplemented!(), @@ -278,23 +582,25 @@ fn reg_to_gcc(reg: InlineAsmRegOrRegClass) -> String { InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => unimplemented!(), InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => unimplemented!(), InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => unimplemented!(), - InlineAsmRegClass::X86(X86InlineAsmRegClass::mmx_reg) => unimplemented!(), InlineAsmRegClass::X86(X86InlineAsmRegClass::reg) => "r", - InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => unimplemented!(), - InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => unimplemented!(), + InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => "Q", + InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => "q", InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg) - | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg) => unimplemented!(), - InlineAsmRegClass::X86(X86InlineAsmRegClass::x87_reg) => unimplemented!(), - InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => unimplemented!(), + | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg) => "x", + InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => "v", InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => unimplemented!(), InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => unimplemented!(), + InlineAsmRegClass::X86( + X86InlineAsmRegClass::x87_reg | X86InlineAsmRegClass::mmx_reg, + ) => unreachable!("clobber-only"), InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => { bug!("GCC backend does not support SPIR-V") } InlineAsmRegClass::Err => unreachable!(), } - .to_string(), - } + }; + + ConstraintOrRegister::Constraint(constraint) } /// Type to use for outputs that are discarded. It doesn't really matter what @@ -379,7 +685,7 @@ impl<'gcc, 'tcx> AsmMethods for CodegenCx<'gcc, 'tcx> { match operands[operand_idx] { GlobalAsmOperandRef::Const { ref string } => { // Const operands get injected directly into the - // template. Note that we don't need to escape $ + // template. Note that we don't need to escape % // here unlike normal inline assembly. template_str.push_str(string); } @@ -431,8 +737,7 @@ fn modifier_to_gcc(arch: InlineAsmArch, reg: InlineAsmRegClass, modifier: Option InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => unimplemented!(), InlineAsmRegClass::X86(X86InlineAsmRegClass::reg) | InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => match modifier { - None if arch == InlineAsmArch::X86_64 => Some('q'), - None => Some('k'), + None => if arch == InlineAsmArch::X86_64 { Some('q') } else { Some('k') }, Some('l') => Some('b'), Some('h') => Some('h'), Some('x') => Some('w'), @@ -440,13 +745,22 @@ fn modifier_to_gcc(arch: InlineAsmArch, reg: InlineAsmRegClass, modifier: Option Some('r') => Some('q'), _ => unreachable!(), }, - InlineAsmRegClass::X86(X86InlineAsmRegClass::mmx_reg) => unimplemented!(), - InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => unimplemented!(), - InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg) - | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg) - | InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => unimplemented!(), - InlineAsmRegClass::X86(X86InlineAsmRegClass::x87_reg) => unimplemented!(), - InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => unimplemented!(), + InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => None, + InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::xmm_reg) + | InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::ymm_reg) + | InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::zmm_reg) => match (reg, modifier) { + (X86InlineAsmRegClass::xmm_reg, None) => Some('x'), + (X86InlineAsmRegClass::ymm_reg, None) => Some('t'), + (X86InlineAsmRegClass::zmm_reg, None) => Some('g'), + (_, Some('x')) => Some('x'), + (_, Some('y')) => Some('t'), + (_, Some('z')) => Some('g'), + _ => unreachable!(), + }, + InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => None, + InlineAsmRegClass::X86(X86InlineAsmRegClass::x87_reg | X86InlineAsmRegClass::mmx_reg) => { + unreachable!("clobber-only") + } InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => unimplemented!(), InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => { bug!("LLVM backend does not support SPIR-V") diff --git a/test.sh b/test.sh index 06faa98d9b1f0..114db7d53f5bb 100755 --- a/test.sh +++ b/test.sh @@ -4,7 +4,12 @@ set -e -export GCC_PATH=$(cat gcc_path) +if [ -f ./gcc_path ]; then + export GCC_PATH=$(cat gcc_path) +else + echo 'Please put the path to your custom build of libgccjit in the file `gcc_path`, see Readme.md for details' + exit 1 +fi export LD_LIBRARY_PATH="$GCC_PATH" export LIBRARY_PATH="$GCC_PATH" @@ -157,7 +162,7 @@ done git checkout -- src/test/ui/issues/auxiliary/issue-3136-a.rs # contains //~ERROR, but shouldn't be removed -rm -r src/test/ui/{abi*,extern/,panic-runtime/,panics/,unsized-locals/,proc-macro/,threads-sendsync/,thinlto/,simd*,borrowck/,test*,*lto*.rs} || true +rm -r src/test/ui/{abi*,extern/,llvm-asm/,panic-runtime/,panics/,unsized-locals/,proc-macro/,threads-sendsync/,thinlto/,simd*,borrowck/,test*,*lto*.rs} || true for test in $(rg --files-with-matches "catch_unwind|should_panic|thread|lto" src/test/ui); do rm $test done diff --git a/tests/run/asm.rs b/tests/run/asm.rs index bd76153e047e4..9c0055b0b6b5e 100644 --- a/tests/run/asm.rs +++ b/tests/run/asm.rs @@ -62,5 +62,92 @@ fn main() { } assert_eq!(x, 43); + // check inout(reg_class) x + let mut x: u64 = 42; + unsafe { + asm!("add {0}, {0}", + inout(reg) x + ); + } + assert_eq!(x, 84); + + // check inout("reg") x + let mut x: u64 = 42; + unsafe { + asm!("add r11, r11", + inout("r11") x + ); + } + assert_eq!(x, 84); + + // check a mix of + // in("reg") + // inout(class) x => y + // inout (class) x + let x: u64 = 702; + let y: u64 = 100; + let res: u64; + let mut rem: u64 = 0; + unsafe { + asm!("div r11", + in("r11") y, + inout("eax") x => res, + inout("edx") rem, + ); + } + assert_eq!(res, 7); + assert_eq!(rem, 2); + + // check const + let mut x: u64 = 42; + unsafe { + asm!("add {}, {}", + inout(reg) x, + const 1 + ); + } + assert_eq!(x, 43); + + // check const (ATT syntax) + let mut x: u64 = 42; + unsafe { + asm!("add {}, {}", + const 1, + inout(reg) x, + options(att_syntax) + ); + } + assert_eq!(x, 43); + + // check sym fn + extern "C" fn foo() -> u64 { 42 } + let x: u64; + unsafe { + asm!("call {}", sym foo, lateout("rax") x); + } + assert_eq!(x, 42); + + // check sym fn (ATT syntax) + let x: u64; + unsafe { + asm!("call {}", sym foo, lateout("rax") x, options(att_syntax)); + } + assert_eq!(x, 42); + + // check sym static + static FOO: u64 = 42; + let x: u64; + unsafe { + asm!("mov {1}, qword ptr [rip + {0}]", sym FOO, lateout(reg) x); + } + assert_eq!(x, 42); + + // check sym static (ATT syntax) + let x: u64; + unsafe { + asm!("movq {0}(%rip), {1}", sym FOO, lateout(reg) x, options(att_syntax)); + } + assert_eq!(x, 42); + assert_eq!(unsafe { add_asm(40, 2) }, 42); } From 4d3dcd414c05072b139487b7b57a7b8ef4082421 Mon Sep 17 00:00:00 2001 From: Antoni Boucher Date: Thu, 16 Sep 2021 07:37:23 -0400 Subject: [PATCH 09/27] Remove FUNDING.yml --- .github/FUNDING.yml | 2 -- 1 file changed, 2 deletions(-) delete mode 100644 .github/FUNDING.yml diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml deleted file mode 100644 index cf2dcd4d099c2..0000000000000 --- a/.github/FUNDING.yml +++ /dev/null @@ -1,2 +0,0 @@ -github: antoyo -patreon: antoyo From 8ec7976ced61c31dbcaeda3f52d7ecc197764f80 Mon Sep 17 00:00:00 2001 From: Emerson Laurentino Date: Thu, 16 Sep 2021 22:47:19 -0300 Subject: [PATCH 10/27] fix: gh origin on readme (#83) --- Readme.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Readme.md b/Readme.md index 96a6c3a04145e..1f27d721fabbb 100644 --- a/Readme.md +++ b/Readme.md @@ -19,7 +19,7 @@ You can also use my [fork of gcc](https://github.com/antoyo/gcc) which already i **Put the path to your custom build of libgccjit in the file `gcc_path`.** ```bash -$ git clone https://github.com/antoyo/rustc_codegen_gcc.git +$ git clone https://github.com/rust-lang/rustc_codegen_gcc.git $ cd rustc_codegen_gcc $ ./prepare_build.sh # download and patch sysroot src $ ./build.sh --release From 48d60ab7c505c6c1ebb042eacaafd8dc9f7a9267 Mon Sep 17 00:00:00 2001 From: Commeownist Date: Sat, 18 Sep 2021 00:19:25 +0300 Subject: [PATCH 11/27] Update to nightly-2021-09-11 (#79) * Implement `black_box` as intrinsic Responsibility of implementing the black box is now lies on backend * Remove some TODOs * Update to nightly-2021-09-17 * CI: don't fail on warnings --- rust-toolchain | 2 +- src/allocator.rs | 5 ++- src/archive.rs | 94 ++++++++++---------------------------------- src/asm.rs | 38 +++++++++--------- src/builder.rs | 27 ++++++------- src/callee.rs | 1 - src/common.rs | 9 +++-- src/consts.rs | 11 ++++-- src/context.rs | 40 ++++++++++--------- src/intrinsic/mod.rs | 22 +++++++++-- src/lib.rs | 5 +-- src/mono_item.rs | 5 +-- src/type_of.rs | 6 +-- test.sh | 1 + 14 files changed, 117 insertions(+), 149 deletions(-) diff --git a/rust-toolchain b/rust-toolchain index 4e9771311efbd..a03d26c0467e1 100644 --- a/rust-toolchain +++ b/rust-toolchain @@ -1 +1 @@ -nightly-2021-08-12 +nightly-2021-09-17 diff --git a/src/allocator.rs b/src/allocator.rs index 4e622efcaa0ba..6378a31202c1b 100644 --- a/src/allocator.rs +++ b/src/allocator.rs @@ -6,7 +6,7 @@ use rustc_span::symbol::sym; use crate::GccContext; -pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut GccContext, kind: AllocatorKind, has_alloc_error_handler: bool) { +pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut GccContext, _module_name: &str, kind: AllocatorKind, has_alloc_error_handler: bool) { let context = &mods.context; let usize = match tcx.sess.target.pointer_width { @@ -77,6 +77,9 @@ pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut GccContext, kind: Alloc else { block.end_with_void_return(None); } + + // TODO(@Commeownist): Check if we need to emit some extra debugging info in certain circumstances + // as described in https://github.com/rust-lang/rust/commit/77a96ed5646f7c3ee8897693decc4626fe380643 } let types = [usize, usize]; diff --git a/src/archive.rs b/src/archive.rs index 52acf4f2d26e7..d749d763402b8 100644 --- a/src/archive.rs +++ b/src/archive.rs @@ -2,16 +2,15 @@ use std::fs::File; use std::path::{Path, PathBuf}; use rustc_session::Session; -use rustc_codegen_ssa::back::archive::{find_library, ArchiveBuilder}; -use rustc_codegen_ssa::METADATA_FILENAME; +use rustc_codegen_ssa::back::archive::ArchiveBuilder; + use rustc_data_structures::temp_dir::MaybeTempDir; use rustc_middle::middle::cstore::DllImport; -use rustc_span::symbol::Symbol; + struct ArchiveConfig<'a> { sess: &'a Session, dst: PathBuf, - lib_search_paths: Vec, use_native_ar: bool, use_gnu_style_archive: bool, } @@ -35,11 +34,9 @@ pub struct ArArchiveBuilder<'a> { impl<'a> ArchiveBuilder<'a> for ArArchiveBuilder<'a> { fn new(sess: &'a Session, output: &Path, input: Option<&Path>) -> Self { - use rustc_codegen_ssa::back::link::archive_search_paths; let config = ArchiveConfig { sess, dst: output.to_path_buf(), - lib_search_paths: archive_search_paths(sess), use_native_ar: false, // FIXME test for linux and System V derivatives instead use_gnu_style_archive: sess.target.options.archive_format == "gnu", @@ -94,47 +91,27 @@ impl<'a> ArchiveBuilder<'a> for ArArchiveBuilder<'a> { )); } - fn add_native_library(&mut self, name: Symbol, verbatim: bool) { - let location = find_library(name, verbatim, &self.config.lib_search_paths, self.config.sess); - self.add_archive(location.clone(), |_| false) - .unwrap_or_else(|e| { - panic!( - "failed to add native library {}: {}", - location.to_string_lossy(), - e - ); - }); - } - - fn add_rlib( - &mut self, - rlib: &Path, - name: &str, - lto: bool, - skip_objects: bool, - ) -> std::io::Result<()> { - let obj_start = name.to_owned(); - - self.add_archive(rlib.to_owned(), move |fname: &str| { - // Ignore metadata files, no matter the name. - if fname == METADATA_FILENAME { - return true; - } - - // Don't include Rust objects if LTO is enabled - if lto && fname.starts_with(&obj_start) && fname.ends_with(".o") { - return true; - } + fn add_archive(&mut self, archive_path: &Path, mut skip: F) -> std::io::Result<()> + where + F: FnMut(&str) -> bool + 'static, + { + let mut archive = ar::Archive::new(std::fs::File::open(&archive_path)?); + let archive_index = self.src_archives.len(); - // Otherwise if this is *not* a rust object and we're skipping - // objects then skip this file - if skip_objects && (!fname.starts_with(&obj_start) || !fname.ends_with(".o")) { - return true; + let mut i = 0; + while let Some(entry) = archive.next_entry() { + let entry = entry?; + let file_name = String::from_utf8(entry.header().identifier().to_vec()) + .map_err(|err| std::io::Error::new(std::io::ErrorKind::InvalidData, err))?; + if !skip(&file_name) { + self.entries + .push((file_name, ArchiveEntry::FromArchive { archive_index, entry_index: i })); } + i += 1; + } - // ok, don't skip this - return false; - }) + self.src_archives.push((archive_path.to_owned(), archive)); + Ok(()) } fn update_symbols(&mut self) { @@ -239,32 +216,3 @@ impl<'a> ArchiveBuilder<'a> for ArArchiveBuilder<'a> { unimplemented!(); } } - -impl<'a> ArArchiveBuilder<'a> { - fn add_archive(&mut self, archive_path: PathBuf, mut skip: F) -> std::io::Result<()> - where - F: FnMut(&str) -> bool + 'static, - { - let mut archive = ar::Archive::new(std::fs::File::open(&archive_path)?); - let archive_index = self.src_archives.len(); - - let mut i = 0; - while let Some(entry) = archive.next_entry() { - let entry = entry.unwrap(); - let file_name = String::from_utf8(entry.header().identifier().to_vec()).unwrap(); - if !skip(&file_name) { - self.entries.push(( - file_name, - ArchiveEntry::FromArchive { - archive_index, - entry_index: i, - }, - )); - } - i += 1; - } - - self.src_archives.push((archive_path, archive)); - Ok(()) - } -} diff --git a/src/asm.rs b/src/asm.rs index 512b9f553f5e3..a684c34b64410 100644 --- a/src/asm.rs +++ b/src/asm.rs @@ -107,26 +107,10 @@ enum ConstraintOrRegister { impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { - fn codegen_llvm_inline_asm(&mut self, ia: &LlvmInlineAsmInner, outputs: Vec>>, inputs: Vec>, span: Span) -> bool { - if ia.asm.as_str().is_empty() && outputs.is_empty() { - // TODO(@Commeownist): there's one use of `llvm_asm` in rustc sysroot we can't get rid of just yet. - // Fortunately, it's used as a simple black box to make sure that inputs are not optimized away. - // Let's just emulate it. - let block = self.llbb(); - let extended_asm = block.add_extended_asm(None, ""); - for input in inputs { - extended_asm.add_input_operand(None, "r", input); - } - extended_asm.add_clobber("memory"); - extended_asm.set_volatile_flag(true); - } - else { - // TODO(@Commeownist): switch to `struct_span_err_with_code` - // once we get this merged into rustc - self.sess().struct_span_err(span, "GCC backend does not support `llvm_asm!`") - .help("consider using the `asm!` macro instead") - .emit(); - } + fn codegen_llvm_inline_asm(&mut self, _ia: &LlvmInlineAsmInner, _outputs: Vec>>, _inputs: Vec>, span: Span) -> bool { + self.sess().struct_span_err(span, "GCC backend does not support `llvm_asm!`") + .help("consider using the `asm!` macro instead") + .emit(); // We return `true` even if we've failed to generate the asm // because we want to suppress the "malformed inline assembly" error @@ -579,6 +563,10 @@ fn reg_to_gcc(reg: InlineAsmRegOrRegClass) -> ConstraintOrRegister { InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg) => unimplemented!(), InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => unimplemented!(), InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => unimplemented!(), + InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::cr) + | InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) => { + unreachable!("clobber-only") + }, InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => unimplemented!(), InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => unimplemented!(), InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => unimplemented!(), @@ -596,6 +584,8 @@ fn reg_to_gcc(reg: InlineAsmRegOrRegClass) -> ConstraintOrRegister { InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => { bug!("GCC backend does not support SPIR-V") } + InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg) => unimplemented!(), + InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => unimplemented!(), InlineAsmRegClass::Err => unreachable!(), } }; @@ -635,6 +625,10 @@ fn dummy_output_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, reg: InlineAsmRegCl InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg) => cx.type_i32(), InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => cx.type_i32(), InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => cx.type_f64(), + InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::cr) + | InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) => { + unreachable!("clobber-only") + }, InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => cx.type_i32(), InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => cx.type_f32(), InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => cx.type_f32(), @@ -651,6 +645,8 @@ fn dummy_output_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, reg: InlineAsmRegCl InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => { bug!("LLVM backend does not support SPIR-V") }, + InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg) => cx.type_i32(), + InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => cx.type_f64(), InlineAsmRegClass::Err => unreachable!(), } } @@ -765,6 +761,8 @@ fn modifier_to_gcc(arch: InlineAsmArch, reg: InlineAsmRegClass, modifier: Option InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => { bug!("LLVM backend does not support SPIR-V") }, + InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg) => unimplemented!(), + InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => unimplemented!(), InlineAsmRegClass::Err => unreachable!(), } } diff --git a/src/builder.rs b/src/builder.rs index 0f7db911552a1..5d06d71953c66 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -1,7 +1,7 @@ use std::borrow::Cow; use std::cell::Cell; use std::convert::TryFrom; -use std::ops::{Deref, Range}; +use std::ops::Deref; use gccjit::FunctionType; use gccjit::{ @@ -31,16 +31,16 @@ use rustc_codegen_ssa::traits::{ StaticBuilderMethods, }; use rustc_middle::ty::{ParamEnv, Ty, TyCtxt}; -use rustc_middle::ty::layout::{HasParamEnv, HasTyCtxt, TyAndLayout}; +use rustc_middle::ty::layout::{HasParamEnv, HasTyCtxt, LayoutError, LayoutOfHelpers, TyAndLayout}; use rustc_span::Span; use rustc_span::def_id::DefId; use rustc_target::abi::{ self, Align, HasDataLayout, - LayoutOf, Size, TargetDataLayout, + WrappingRange, }; use rustc_target::spec::{HasTargetSpec, Target}; @@ -338,12 +338,12 @@ impl HasDataLayout for Builder<'_, '_, '_> { } } -impl<'tcx> LayoutOf for Builder<'_, '_, 'tcx> { - type Ty = Ty<'tcx>; - type TyAndLayout = TyAndLayout<'tcx>; +impl<'tcx> LayoutOfHelpers<'tcx> for Builder<'_, '_, 'tcx> { + type LayoutOfResult = TyAndLayout<'tcx>; - fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout { - self.cx.layout_of(ty) + #[inline] + fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! { + self.cx.handle_layout_err(err, span, ty) } } @@ -818,12 +818,11 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { let vr = scalar.valid_range.clone(); match scalar.value { abi::Int(..) => { - let range = scalar.valid_range_exclusive(bx); - if range.start != range.end { - bx.range_metadata(load, range); + if !scalar.is_always_valid(bx) { + bx.range_metadata(load, scalar.valid_range); } } - abi::Pointer if vr.start() < vr.end() && !vr.contains(&0) => { + abi::Pointer if vr.start < vr.end && !vr.contains(0) => { bx.nonnull_metadata(load); } _ => {} @@ -894,7 +893,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { next_bx } - fn range_metadata(&mut self, _load: RValue<'gcc>, _range: Range) { + fn range_metadata(&mut self, _load: RValue<'gcc>, _range: WrappingRange) { // TODO(antoyo) } @@ -1378,7 +1377,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } } - fn to_immediate_scalar(&mut self, val: Self::Value, scalar: &abi::Scalar) -> Self::Value { + fn to_immediate_scalar(&mut self, val: Self::Value, scalar: abi::Scalar) -> Self::Value { if scalar.is_bool() { return self.trunc(val, self.cx().type_i1()); } diff --git a/src/callee.rs b/src/callee.rs index 4ea084ef729d3..e402e0e91f133 100644 --- a/src/callee.rs +++ b/src/callee.rs @@ -19,7 +19,6 @@ pub fn get_fn<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, instance: Instance<'tcx>) assert!(!instance.substs.needs_infer()); assert!(!instance.substs.has_escaping_bound_vars()); - assert!(!instance.substs.has_param_types_or_consts()); if let Some(&func) = cx.instances.borrow().get(&instance) { return func; diff --git a/src/common.rs b/src/common.rs index 752ba99af9ce3..a24fe0df911a6 100644 --- a/src/common.rs +++ b/src/common.rs @@ -12,10 +12,11 @@ use rustc_codegen_ssa::traits::{ }; use rustc_middle::bug; use rustc_middle::mir::Mutability; -use rustc_middle::ty::{layout::TyAndLayout, ScalarInt}; -use rustc_mir::interpret::{Allocation, GlobalAlloc, Scalar}; +use rustc_middle::ty::ScalarInt; +use rustc_middle::ty::layout::{TyAndLayout, LayoutOf}; +use rustc_middle::mir::interpret::{Allocation, GlobalAlloc, Scalar}; use rustc_span::Symbol; -use rustc_target::abi::{self, HasDataLayout, LayoutOf, Pointer, Size}; +use rustc_target::abi::{self, HasDataLayout, Pointer, Size}; use crate::consts::const_alloc_to_gcc; use crate::context::CodegenCx; @@ -212,7 +213,7 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> { None } - fn scalar_to_backend(&self, cv: Scalar, layout: &abi::Scalar, ty: Type<'gcc>) -> RValue<'gcc> { + fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, ty: Type<'gcc>) -> RValue<'gcc> { let bitsize = if layout.is_bool() { 1 } else { layout.value.size(self).bits() }; match cv { Scalar::Int(ScalarInt::ZST) => { diff --git a/src/consts.rs b/src/consts.rs index 9b7959503ab1b..df13fa79f0696 100644 --- a/src/consts.rs +++ b/src/consts.rs @@ -6,10 +6,11 @@ use rustc_middle::{bug, span_bug}; use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs}; use rustc_middle::mir::mono::MonoItem; use rustc_middle::ty::{self, Instance, Ty}; -use rustc_mir::interpret::{self, Allocation, ErrorHandled, Scalar as InterpScalar, read_target_uint}; +use rustc_middle::ty::layout::LayoutOf; +use rustc_middle::mir::interpret::{self, Allocation, ErrorHandled, Scalar as InterpScalar, read_target_uint}; use rustc_span::Span; use rustc_span::def_id::DefId; -use rustc_target::abi::{self, Align, HasDataLayout, LayoutOf, Primitive, Size}; +use rustc_target::abi::{self, Align, HasDataLayout, Primitive, Size, WrappingRange}; use crate::base; use crate::context::CodegenCx; @@ -182,6 +183,10 @@ impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> { fn add_used_global(&self, _global: RValue<'gcc>) { // TODO(antoyo) } + + fn add_compiler_used_global(&self, _global: RValue<'gcc>) { + // TODO(antoyo) + } } impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { @@ -350,7 +355,7 @@ pub fn const_alloc_to_gcc<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, alloc: &Alloca interpret::Pointer::new(alloc_id, Size::from_bytes(ptr_offset)), &cx.tcx, ), - &abi::Scalar { value: Primitive::Pointer, valid_range: 0..=!0 }, + abi::Scalar { value: Primitive::Pointer, valid_range: WrappingRange { start: 0, end: !0 } }, cx.type_i8p(), )); next_offset = offset + pointer_size; diff --git a/src/context.rs b/src/context.rs index 7ab1ca0d771c1..ef687dd22c6da 100644 --- a/src/context.rs +++ b/src/context.rs @@ -18,13 +18,13 @@ use rustc_codegen_ssa::traits::{ }; use rustc_data_structures::base_n; use rustc_data_structures::fx::{FxHashMap, FxHashSet}; -use rustc_middle::bug; +use rustc_middle::span_bug; use rustc_middle::mir::mono::CodegenUnit; use rustc_middle::ty::{self, Instance, ParamEnv, PolyExistentialTraitRef, Ty, TyCtxt}; -use rustc_middle::ty::layout::{HasParamEnv, HasTyCtxt, LayoutError, TyAndLayout}; +use rustc_middle::ty::layout::{HasParamEnv, HasTyCtxt, LayoutError, TyAndLayout, LayoutOfHelpers}; use rustc_session::Session; -use rustc_span::{Span, Symbol, DUMMY_SP}; -use rustc_target::abi::{HasDataLayout, LayoutOf, PointeeInfo, Size, TargetDataLayout, VariantIdx}; +use rustc_span::{Span, Symbol}; +use rustc_target::abi::{HasDataLayout, PointeeInfo, Size, TargetDataLayout, VariantIdx}; use rustc_target::spec::{HasTargetSpec, Target, TlsModel}; use crate::callee::get_fn; @@ -395,6 +395,14 @@ impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> { None } } + + fn compiler_used_statics(&self) -> &RefCell>> { + unimplemented!() + } + + fn create_compiler_used_variable(&self) { + unimplemented!() + } } impl<'gcc, 'tcx> HasTyCtxt<'tcx> for CodegenCx<'gcc, 'tcx> { @@ -415,22 +423,16 @@ impl<'gcc, 'tcx> HasTargetSpec for CodegenCx<'gcc, 'tcx> { } } -impl<'gcc, 'tcx> LayoutOf for CodegenCx<'gcc, 'tcx> { - type Ty = Ty<'tcx>; - type TyAndLayout = TyAndLayout<'tcx>; +impl<'gcc, 'tcx> LayoutOfHelpers<'tcx> for CodegenCx<'gcc, 'tcx> { + type LayoutOfResult = TyAndLayout<'tcx>; - fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout { - self.spanned_layout_of(ty, DUMMY_SP) - } - - fn spanned_layout_of(&self, ty: Ty<'tcx>, span: Span) -> Self::TyAndLayout { - self.tcx.layout_of(ParamEnv::reveal_all().and(ty)).unwrap_or_else(|e| { - if let LayoutError::SizeOverflow(_) = e { - self.sess().span_fatal(span, &e.to_string()) - } else { - bug!("failed to get layout for `{}`: {}", ty, e) - } - }) + #[inline] + fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! { + if let LayoutError::SizeOverflow(_) = err { + self.sess().span_fatal(span, &err.to_string()) + } else { + span_bug!(span, "failed to get layout for `{}`: {}", ty, err) + } } } diff --git a/src/intrinsic/mod.rs b/src/intrinsic/mod.rs index a79be7cfc74c8..3dc4f61a7ac3c 100644 --- a/src/intrinsic/mod.rs +++ b/src/intrinsic/mod.rs @@ -10,8 +10,9 @@ use rustc_codegen_ssa::mir::place::PlaceRef; use rustc_codegen_ssa::traits::{ArgAbiMethods, BaseTypeMethods, BuilderMethods, ConstMethods, IntrinsicCallMethods}; use rustc_middle::bug; use rustc_middle::ty::{self, Instance, Ty}; +use rustc_middle::ty::layout::LayoutOf; use rustc_span::{Span, Symbol, symbol::kw, sym}; -use rustc_target::abi::{HasDataLayout, LayoutOf}; +use rustc_target::abi::HasDataLayout; use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode}; use rustc_target::spec::PanicStrategy; @@ -176,7 +177,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { let result = func.new_local(None, arg.get_type(), "zeros"); let zero = self.cx.context.new_rvalue_zero(arg.get_type()); let cond = self.cx.context.new_comparison(None, ComparisonOp::Equals, arg, zero); - self.block.expect("block").end_with_conditional(None, cond, then_block, else_block); + self.llbb().end_with_conditional(None, cond, then_block, else_block); let zero_result = self.cx.context.new_rvalue_from_long(arg.get_type(), width as i64); then_block.add_assignment(None, result, zero_result); @@ -307,6 +308,19 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { } } + sym::black_box => { + args[0].val.store(self, result); + + let block = self.llbb(); + let extended_asm = block.add_extended_asm(None, ""); + extended_asm.add_input_operand(None, "r", result.llval); + extended_asm.add_clobber("memory"); + extended_asm.set_volatile_flag(true); + + // We have copied the value to `result` already. + return; + } + _ if name_str.starts_with("simd_") => { match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) { Ok(llval) => llval, @@ -935,7 +949,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { then_block.add_assignment(None, res, self.context.new_cast(None, shifted + int_max, result_type)); then_block.end_with_jump(None, after_block); - self.block.expect("block").end_with_conditional(None, overflow, then_block, after_block); + self.llbb().end_with_conditional(None, overflow, then_block, after_block); // NOTE: since jumps were added in a place rustc does not // expect, the current blocks in the state need to be updated. @@ -985,7 +999,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { then_block.add_assignment(None, res, self.context.new_cast(None, shifted + int_max, result_type)); then_block.end_with_jump(None, after_block); - self.block.expect("block").end_with_conditional(None, overflow, then_block, after_block); + self.llbb().end_with_conditional(None, overflow, then_block, after_block); // NOTE: since jumps were added in a place rustc does not // expect, the current blocks in the state need to be updated. diff --git a/src/lib.rs b/src/lib.rs index 6febccff1ffac..793e5c48d0a36 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -18,7 +18,6 @@ extern crate rustc_errors; extern crate rustc_hir; extern crate rustc_metadata; extern crate rustc_middle; -extern crate rustc_mir; extern crate rustc_session; extern crate rustc_span; extern crate rustc_symbol_mangling; @@ -144,8 +143,8 @@ impl ExtraBackendMethods for GccCodegenBackend { base::write_compressed_metadata(tcx, metadata, gcc_module) } - fn codegen_allocator<'tcx>(&self, tcx: TyCtxt<'tcx>, mods: &mut Self::Module, kind: AllocatorKind, has_alloc_error_handler: bool) { - unsafe { allocator::codegen(tcx, mods, kind, has_alloc_error_handler) } + fn codegen_allocator<'tcx>(&self, tcx: TyCtxt<'tcx>, mods: &mut Self::Module, module_name: &str, kind: AllocatorKind, has_alloc_error_handler: bool) { + unsafe { allocator::codegen(tcx, mods, module_name, kind, has_alloc_error_handler) } } fn compile_codegen_unit<'tcx>(&self, tcx: TyCtxt<'tcx>, cgu_name: Symbol) -> (ModuleCodegen, u64) { diff --git a/src/mono_item.rs b/src/mono_item.rs index f9ec933dd3abc..cedeb54f60bf2 100644 --- a/src/mono_item.rs +++ b/src/mono_item.rs @@ -2,9 +2,8 @@ use rustc_codegen_ssa::traits::PreDefineMethods; use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags; use rustc_middle::mir::mono::{Linkage, Visibility}; use rustc_middle::ty::{self, Instance, TypeFoldable}; -use rustc_middle::ty::layout::FnAbiExt; +use rustc_middle::ty::layout::{FnAbiExt, LayoutOf}; use rustc_span::def_id::DefId; -use rustc_target::abi::LayoutOf; use rustc_target::abi::call::FnAbi; use crate::base; @@ -31,7 +30,7 @@ impl<'gcc, 'tcx> PreDefineMethods<'tcx> for CodegenCx<'gcc, 'tcx> { } fn predefine_fn(&self, instance: Instance<'tcx>, linkage: Linkage, _visibility: Visibility, symbol_name: &str) { - assert!(!instance.substs.needs_infer() && !instance.substs.has_param_types_or_consts()); + assert!(!instance.substs.needs_infer()); let fn_abi = FnAbi::of_instance(self, instance, &[]); self.linkage.set(base::linkage_to_gcc(linkage)); diff --git a/src/type_of.rs b/src/type_of.rs index 9302f57b642af..2ff2ee7b85226 100644 --- a/src/type_of.rs +++ b/src/type_of.rs @@ -4,9 +4,9 @@ use gccjit::{Struct, Type}; use crate::rustc_codegen_ssa::traits::{BaseTypeMethods, DerivedTypeMethods, LayoutTypeMethods}; use rustc_middle::bug; use rustc_middle::ty::{self, Ty, TypeFoldable}; -use rustc_middle::ty::layout::{FnAbiExt, TyAndLayout}; +use rustc_middle::ty::layout::{FnAbiExt, LayoutOf, TyAndLayout}; use rustc_middle::ty::print::with_no_trimmed_paths; -use rustc_target::abi::{self, Abi, F32, F64, FieldsShape, Int, Integer, LayoutOf, Pointer, PointeeInfo, Size, TyAndLayoutMethods, Variants}; +use rustc_target::abi::{self, Abi, F32, F64, FieldsShape, Int, Integer, Pointer, PointeeInfo, Size, TyAbiInterface, Variants}; use rustc_target::abi::call::{CastTarget, FnAbi, Reg}; use crate::abi::{FnAbiGccExt, GccType}; @@ -308,7 +308,7 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { return pointee; } - let result = Ty::pointee_info_at(*self, cx, offset); + let result = Ty::ty_and_layout_pointee_info_at(*self, cx, offset); cx.pointee_infos.borrow_mut().insert((self.ty, offset), result); result diff --git a/test.sh b/test.sh index 114db7d53f5bb..12df1f8af2f0b 100755 --- a/test.sh +++ b/test.sh @@ -146,6 +146,7 @@ rm config.toml || true cat > config.toml < Date: Fri, 24 Sep 2021 22:42:05 +0200 Subject: [PATCH 12/27] Hide `<...> defined here` note if the source is not available --- .../src/structured_errors/wrong_number_of_generic_args.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/compiler/rustc_typeck/src/structured_errors/wrong_number_of_generic_args.rs b/compiler/rustc_typeck/src/structured_errors/wrong_number_of_generic_args.rs index 2e3db4d6d655f..c2038f4e047ce 100644 --- a/compiler/rustc_typeck/src/structured_errors/wrong_number_of_generic_args.rs +++ b/compiler/rustc_typeck/src/structured_errors/wrong_number_of_generic_args.rs @@ -731,7 +731,11 @@ impl<'a, 'tcx> WrongNumberOfGenericArgs<'a, 'tcx> { /// Builds the `type defined here` message. fn show_definition(&self, err: &mut DiagnosticBuilder<'_>) { let mut spans: MultiSpan = if let Some(def_span) = self.tcx.def_ident_span(self.def_id) { - def_span.into() + if self.tcx.sess.source_map().span_to_snippet(def_span).is_ok() { + def_span.into() + } else { + return; + } } else { return; }; From 7779eb74c8367bc718a2005b4af42796a5d0b023 Mon Sep 17 00:00:00 2001 From: Jane Lusby Date: Fri, 24 Sep 2021 14:45:09 -0700 Subject: [PATCH 13/27] make junit output more consistent with default format --- library/test/src/formatters/junit.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/library/test/src/formatters/junit.rs b/library/test/src/formatters/junit.rs index aa24480751419..44a33c92a9ff7 100644 --- a/library/test/src/formatters/junit.rs +++ b/library/test/src/formatters/junit.rs @@ -29,7 +29,8 @@ impl JunitFormatter { impl OutputFormatter for JunitFormatter { fn write_run_start(&mut self, _test_count: usize) -> io::Result<()> { // We write xml header on run start - self.write_message(&"") + self.out.write_all("\n".as_bytes())?; + self.write_message("") } fn write_test_start(&mut self, _desc: &TestDesc) -> io::Result<()> { @@ -133,6 +134,8 @@ impl OutputFormatter for JunitFormatter { self.write_message("")?; self.write_message("")?; + self.out.write_all("\n\n".as_bytes())?; + Ok(state.failed == 0) } } From 3d08ff1c19360e89a351184b9191454045059823 Mon Sep 17 00:00:00 2001 From: Fabian Wolff Date: Sat, 25 Sep 2021 21:47:33 +0200 Subject: [PATCH 14/27] Fix incorrect disambiguation suggestion for associated items --- compiler/rustc_typeck/src/check/method/suggest.rs | 10 +++++++++- .../associated-const-ambiguity-report.stderr | 8 ++++---- src/test/ui/error-codes/E0034.stderr | 8 ++++---- .../method-ambig-two-traits-from-impls2.stderr | 8 ++++---- src/test/ui/span/issue-7575.stderr | 14 +++++++------- 5 files changed, 28 insertions(+), 20 deletions(-) diff --git a/compiler/rustc_typeck/src/check/method/suggest.rs b/compiler/rustc_typeck/src/check/method/suggest.rs index 9744f4f6483c7..4ca326e3a396c 100644 --- a/compiler/rustc_typeck/src/check/method/suggest.rs +++ b/compiler/rustc_typeck/src/check/method/suggest.rs @@ -178,6 +178,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { sugg_span, idx, self.tcx.sess.source_map(), + item.fn_has_self_parameter, ); } } @@ -220,6 +221,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { sugg_span, idx, self.tcx.sess.source_map(), + item.fn_has_self_parameter, ); } } @@ -1738,6 +1740,7 @@ fn print_disambiguation_help( span: Span, candidate: Option, source_map: &source_map::SourceMap, + fn_has_self_parameter: bool, ) { let mut applicability = Applicability::MachineApplicable; let (span, sugg) = if let (ty::AssocKind::Fn, Some(args)) = (kind, args) { @@ -1756,9 +1759,14 @@ fn print_disambiguation_help( .collect::>() .join(", "), ); + let trait_name = if !fn_has_self_parameter { + format!("<{} as {}>", rcvr_ty, trait_name) + } else { + trait_name + }; (span, format!("{}::{}{}", trait_name, item_name, args)) } else { - (span.with_hi(item_name.span.lo()), format!("{}::", trait_name)) + (span.with_hi(item_name.span.lo()), format!("<{} as {}>::", rcvr_ty, trait_name)) }; err.span_suggestion_verbose( span, diff --git a/src/test/ui/associated-consts/associated-const-ambiguity-report.stderr b/src/test/ui/associated-consts/associated-const-ambiguity-report.stderr index d4c12b8e061cc..60cf9a533cd72 100644 --- a/src/test/ui/associated-consts/associated-const-ambiguity-report.stderr +++ b/src/test/ui/associated-consts/associated-const-ambiguity-report.stderr @@ -16,12 +16,12 @@ LL | const ID: i32 = 3; | ^^^^^^^^^^^^^^^^^^ help: disambiguate the associated constant for candidate #1 | -LL | const X: i32 = Foo::ID; - | ~~~~~ +LL | const X: i32 = ::ID; + | ~~~~~~~~~~~~~~ help: disambiguate the associated constant for candidate #2 | -LL | const X: i32 = Bar::ID; - | ~~~~~ +LL | const X: i32 = ::ID; + | ~~~~~~~~~~~~~~ error: aborting due to previous error diff --git a/src/test/ui/error-codes/E0034.stderr b/src/test/ui/error-codes/E0034.stderr index 83718a1e27371..e296217026570 100644 --- a/src/test/ui/error-codes/E0034.stderr +++ b/src/test/ui/error-codes/E0034.stderr @@ -16,12 +16,12 @@ LL | fn foo() {} | ^^^^^^^^ help: disambiguate the associated function for candidate #1 | -LL | Trait1::foo() - | ~~~~~~~~ +LL | ::foo() + | ~~~~~~~~~~~~~~~~~~ help: disambiguate the associated function for candidate #2 | -LL | Trait2::foo() - | ~~~~~~~~ +LL | ::foo() + | ~~~~~~~~~~~~~~~~~~ error: aborting due to previous error diff --git a/src/test/ui/methods/method-ambig-two-traits-from-impls2.stderr b/src/test/ui/methods/method-ambig-two-traits-from-impls2.stderr index e0a58aed8f447..4ba778e0ef725 100644 --- a/src/test/ui/methods/method-ambig-two-traits-from-impls2.stderr +++ b/src/test/ui/methods/method-ambig-two-traits-from-impls2.stderr @@ -16,12 +16,12 @@ LL | fn foo() {} | ^^^^^^^^ help: disambiguate the associated function for candidate #1 | -LL | A::foo(); - | ~~~ +LL | ::foo(); + | ~~~~~~~~~~~ help: disambiguate the associated function for candidate #2 | -LL | B::foo(); - | ~~~ +LL | ::foo(); + | ~~~~~~~~~~~ error: aborting due to previous error diff --git a/src/test/ui/span/issue-7575.stderr b/src/test/ui/span/issue-7575.stderr index 288c1042a26c7..783f5aca41746 100644 --- a/src/test/ui/span/issue-7575.stderr +++ b/src/test/ui/span/issue-7575.stderr @@ -27,16 +27,16 @@ LL | fn f9(_: usize) -> usize; candidate #3: `UnusedTrait` help: disambiguate the associated function for candidate #1 | -LL | u.f8(42) + CtxtFn::f9(u, 342) + m.fff(42) - | ~~~~~~~~~~~~~~~~~~ +LL | u.f8(42) + ::f9(u, 342) + m.fff(42) + | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ help: disambiguate the associated function for candidate #2 | -LL | u.f8(42) + OtherTrait::f9(u, 342) + m.fff(42) - | ~~~~~~~~~~~~~~~~~~~~~~ +LL | u.f8(42) + ::f9(u, 342) + m.fff(42) + | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ help: disambiguate the associated function for candidate #3 | -LL | u.f8(42) + UnusedTrait::f9(u, 342) + m.fff(42) - | ~~~~~~~~~~~~~~~~~~~~~~~ +LL | u.f8(42) + ::f9(u, 342) + m.fff(42) + | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ error[E0599]: no method named `fff` found for struct `Myisize` in the current scope --> $DIR/issue-7575.rs:62:30 @@ -72,7 +72,7 @@ LL | fn is_str() -> bool { = help: items from traits can only be used if the type parameter is bounded by the trait help: disambiguate the associated function for the candidate | -LL | ManyImplTrait::is_str(t) +LL | ::is_str(t) | error: aborting due to 3 previous errors From 0f4b616a08eda85247e6123120b8cba01dc4383b Mon Sep 17 00:00:00 2001 From: antoyo Date: Sun, 26 Sep 2021 00:06:42 -0400 Subject: [PATCH 15/27] Add notes for cross-compilation to gcc-only targets (#68) --- Readme.md | 18 ++++++++++++++++++ config.sh | 7 +++++-- 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/Readme.md b/Readme.md index 1f27d721fabbb..709d93c6edb05 100644 --- a/Readme.md +++ b/Readme.md @@ -115,3 +115,21 @@ p loc->m_line * Build the stage2 compiler (`rustup toolchain link debug-current build/x86_64-unknown-linux-gnu/stage2`). * Clean and rebuild the codegen with `debug-current` in the file `rust-toolchain`. + +### How to build a cross-compiling libgccjit + +#### Building libgccjit + + * Follow these instructions: https://preshing.com/20141119/how-to-build-a-gcc-cross-compiler/ with the following changes: + * Configure gcc with `../gcc/configure --enable-host-shared --disable-multilib --enable-languages=c,jit,c++ --disable-bootstrap --enable-checking=release --prefix=/opt/m68k-gcc/ --target=m68k-linux --without-headers`. + * Some shells, like fish, don't define the environment variable `$MACHTYPE`. + * Add `CFLAGS="-Wno-error=attributes -g -O2"` at the end of the configure command for building glibc (`CFLAGS="-Wno-error=attributes -Wno-error=array-parameter -Wno-error=stringop-overflow -Wno-error=array-bounds -g -O2"` for glibc 2.31, which is useful for Debian). + +#### Configuring rustc_codegen_gcc + + * Set `TARGET_TRIPLE="m68k-unknown-linux-gnu"` in config.sh. + * Since rustc doesn't support this architecture yet, set it back to `TARGET_TRIPLE="mips-unknown-linux-gnu"` (or another target having the same attributes). Alternatively, create a [target specification file](https://book.avr-rust.com/005.1-the-target-specification-json-file.html) (note that the `arch` specified in this file must be supported by the rust compiler). + * Set `linker='-Clinker=m68k-linux-gcc'`. + * Set the path to the cross-compiling libgccjit in `gcc_path`. + * Disable the 128-bit integer types if the target doesn't support them by using `let i128_type = context.new_type::();` in `context.rs` (same for u128_type). + * (might not be necessary) Disable the compilation of libstd.so (and possibly libcore.so?). diff --git a/config.sh b/config.sh index 98caeb7407e08..d022371eebed4 100644 --- a/config.sh +++ b/config.sh @@ -21,12 +21,15 @@ fi HOST_TRIPLE=$(rustc -vV | grep host | cut -d: -f2 | tr -d " ") TARGET_TRIPLE=$HOST_TRIPLE -#TARGET_TRIPLE="aarch64-unknown-linux-gnu" +#TARGET_TRIPLE="m68k-unknown-linux-gnu" linker='' RUN_WRAPPER='' if [[ "$HOST_TRIPLE" != "$TARGET_TRIPLE" ]]; then - if [[ "$TARGET_TRIPLE" == "aarch64-unknown-linux-gnu" ]]; then + if [[ "$TARGET_TRIPLE" == "m68k-unknown-linux-gnu" ]]; then + TARGET_TRIPLE="mips-unknown-linux-gnu" + linker='-Clinker=m68k-linux-gcc' + elif [[ "$TARGET_TRIPLE" == "aarch64-unknown-linux-gnu" ]]; then # We are cross-compiling for aarch64. Use the correct linker and run tests in qemu. linker='-Clinker=aarch64-linux-gnu-gcc' RUN_WRAPPER='qemu-aarch64 -L /usr/aarch64-linux-gnu' From 4e7e822f39fca4ccf146a71dbb1098b76ecea1af Mon Sep 17 00:00:00 2001 From: Commeownist Date: Sun, 26 Sep 2021 16:30:45 +0300 Subject: [PATCH 16/27] Impove handling of registers in inline asm (#82) * Correctly handle st(0) register in the clobbers list * Gate the clobbers based on enabled target features --- src/asm.rs | 41 +++++++++++++++++++++++++++++------------ 1 file changed, 29 insertions(+), 12 deletions(-) diff --git a/src/asm.rs b/src/asm.rs index a684c34b64410..3b77097e9ad00 100644 --- a/src/asm.rs +++ b/src/asm.rs @@ -6,7 +6,7 @@ use rustc_codegen_ssa::traits::{AsmBuilderMethods, AsmMethods, BaseTypeMethods, use rustc_hir::LlvmInlineAsmInner; use rustc_middle::{bug, ty::Instance}; -use rustc_span::Span; +use rustc_span::{Span, Symbol}; use rustc_target::asm::*; use std::borrow::Cow; @@ -173,7 +173,20 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { continue }, (Register(reg_name), None) => { - clobbers.push(reg_name); + // `clobber_abi` can add lots of clobbers that are not supported by the target, + // such as AVX-512 registers, so we just ignore unsupported registers + let is_target_supported = reg.reg_class().supported_types(asm_arch).iter() + .any(|&(_, feature)| { + if let Some(feature) = feature { + self.tcx.sess.target_features.contains(&Symbol::intern(feature)) + } else { + true // Register class is unconditionally supported + } + }); + + if is_target_supported && !clobbers.contains(®_name) { + clobbers.push(reg_name); + } continue } }; @@ -526,16 +539,20 @@ fn reg_to_gcc(reg: InlineAsmRegOrRegClass) -> ConstraintOrRegister { let constraint = match reg { // For vector registers LLVM wants the register name to match the type size. InlineAsmRegOrRegClass::Reg(reg) => { - // TODO(antoyo): add support for vector register. - match reg.name() { - "ax" => "a", - "bx" => "b", - "cx" => "c", - "dx" => "d", - "si" => "S", - "di" => "D", - // For registers like r11, we have to create a register variable: https://stackoverflow.com/a/31774784/389119 - name => return ConstraintOrRegister::Register(name), + match reg { + InlineAsmReg::X86(_) => { + // TODO(antoyo): add support for vector register. + // + // // For explicit registers, we have to create a register variable: https://stackoverflow.com/a/31774784/389119 + return ConstraintOrRegister::Register(match reg.name() { + // Some of registers' names does not map 1-1 from rust to gcc + "st(0)" => "st", + + name => name, + }); + } + + _ => unimplemented!(), } }, InlineAsmRegOrRegClass::RegClass(reg) => match reg { From 64c561dc2253463659a8ae93b3d265dda45c6ee9 Mon Sep 17 00:00:00 2001 From: antoyo Date: Sun, 26 Sep 2021 12:20:02 -0400 Subject: [PATCH 17/27] Fix global initialization (#91) * Make define_global() return a RValue directly * Return LValue in functions declaring a global variable * Remove useless cast * Fix bytes_in_context to use an array rvalue * Remove global_names which is unused * Make const_struct create a constant struct * Correctly initialize global in static_addr_of_mut * Fix global variable initialization * Remove workaround for ARGV --- Cargo.lock | 38 ++++++++-------- src/base.rs | 15 +------ src/builder.rs | 3 +- src/callee.rs | 4 +- src/common.rs | 46 +++++++++----------- src/consts.rs | 89 ++++++++++---------------------------- src/context.rs | 45 +++---------------- src/debuginfo.rs | 25 +---------- src/declare.rs | 85 ++++++++---------------------------- src/lib.rs | 15 +------ src/mangled_std_symbols.rs | 4 -- src/mono_item.rs | 7 +-- 12 files changed, 93 insertions(+), 283 deletions(-) delete mode 100644 src/mangled_std_symbols.rs diff --git a/Cargo.lock b/Cargo.lock index f3e07fd08ee8e..60a2101c689cc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -25,9 +25,9 @@ checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" [[package]] name = "bitflags" -version = "1.2.1" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "cfg-if" @@ -56,7 +56,7 @@ dependencies = [ [[package]] name = "gccjit" version = "1.0.0" -source = "git+https://github.com/antoyo/gccjit.rs#54be27e41fff7b6ab532e2e21a82df50a12b9ad3" +source = "git+https://github.com/antoyo/gccjit.rs#2d4fea7319f80531b2e5d264fca9f1c498a3a62e" dependencies = [ "gccjit_sys", ] @@ -64,7 +64,7 @@ dependencies = [ [[package]] name = "gccjit_sys" version = "0.0.1" -source = "git+https://github.com/antoyo/gccjit.rs#54be27e41fff7b6ab532e2e21a82df50a12b9ad3" +source = "git+https://github.com/antoyo/gccjit.rs#2d4fea7319f80531b2e5d264fca9f1c498a3a62e" dependencies = [ "libc 0.1.12", ] @@ -85,7 +85,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" dependencies = [ "cfg-if", - "libc 0.2.98", + "libc 0.2.102", "wasi", ] @@ -101,7 +101,7 @@ version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" dependencies = [ - "libc 0.2.98", + "libc 0.2.102", ] [[package]] @@ -122,7 +122,7 @@ checksum = "96bd995a092cac79868250589869b5a5d656b02a02bd74c8ebdc566dc7203090" dependencies = [ "fm", "getopts", - "libc 0.2.98", + "libc 0.2.102", "num_cpus", "termcolor", "threadpool", @@ -138,15 +138,15 @@ checksum = "e32a70cf75e5846d53a673923498228bbec6a8624708a9ea5645f075d6276122" [[package]] name = "libc" -version = "0.2.98" +version = "0.2.102" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "320cfe77175da3a483efed4bc0adc1968ca050b098ce4f2f1c13a56626128790" +checksum = "a2a5ac8f984bfcf3a823267e5fde638acc3325f6496633a5da6bb6eb2171e103" [[package]] name = "memchr" -version = "2.4.0" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b16bd47d9e329435e309c58469fe0791c2d0d1ba96ec0954152a5ae2b04387dc" +checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" [[package]] name = "num_cpus" @@ -155,7 +155,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3" dependencies = [ "hermit-abi", - "libc 0.2.98", + "libc 0.2.102", ] [[package]] @@ -181,7 +181,7 @@ version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8" dependencies = [ - "libc 0.2.98", + "libc 0.2.102", "rand_chacha", "rand_core", "rand_hc", @@ -217,9 +217,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.2.9" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ab49abadf3f9e1c4bc499e8845e152ad87d2ad2d30371841171169e9d75feee" +checksum = "8383f39639269cde97d255a32bdb68c047337295414940c68bdd30c2e13203ff" dependencies = [ "bitflags", ] @@ -284,7 +284,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" dependencies = [ "cfg-if", - "libc 0.2.98", + "libc 0.2.102", "rand", "redox_syscall", "remove_dir_all", @@ -311,9 +311,9 @@ dependencies = [ [[package]] name = "unicode-width" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3" +checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973" [[package]] name = "wait-timeout" @@ -321,7 +321,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9f200f5b12eb75f8c1ed65abd4b2db8a6e1b138a20de009dacee265a2498f3f6" dependencies = [ - "libc 0.2.98", + "libc 0.2.102", ] [[package]] diff --git a/src/base.rs b/src/base.rs index 2963a3d49a20e..9fd043607fc7e 100644 --- a/src/base.rs +++ b/src/base.rs @@ -1,5 +1,4 @@ use std::env; -use std::sync::Once; use std::time::Instant; use gccjit::{ @@ -7,7 +6,6 @@ use gccjit::{ FunctionType, GlobalKind, }; -use rustc_hir::def_id::LOCAL_CRATE; use rustc_middle::dep_graph; use rustc_middle::middle::cstore::EncodedMetadata; use rustc_middle::middle::exported_symbols; @@ -20,7 +18,7 @@ use rustc_codegen_ssa::traits::DebugInfoMethods; use rustc_session::config::DebugInfo; use rustc_span::Symbol; -use crate::{GccContext, create_function_calling_initializers}; +use crate::GccContext; use crate::builder::Builder; use crate::context::CodegenCx; @@ -97,15 +95,6 @@ pub fn compile_codegen_unit<'tcx>(tcx: TyCtxt<'tcx>, cgu_name: Symbol) -> (Modul { let cx = CodegenCx::new(&context, cgu, tcx); - static START: Once = Once::new(); - START.call_once(|| { - let initializer_name = format!("__gccGlobalCrateInit{}", tcx.crate_name(LOCAL_CRATE)); - let func = context.new_function(None, FunctionType::Exported, context.new_type::<()>(), &[], initializer_name, false); - let block = func.new_block("initial"); - create_function_calling_initializers(tcx, &context, block); - block.end_with_void_return(None); - }); - let mono_items = cgu.items_in_deterministic_order(tcx); for &(mono_item, (linkage, visibility)) in &mono_items { mono_item.predefine::>(&cx, linkage, visibility); @@ -124,8 +113,6 @@ pub fn compile_codegen_unit<'tcx>(tcx: TyCtxt<'tcx>, cgu_name: Symbol) -> (Modul if cx.sess().opts.debuginfo != DebugInfo::None { cx.debuginfo_finalize(); } - - cx.global_init_block.end_with_void_return(None); } ModuleCodegen { diff --git a/src/builder.rs b/src/builder.rs index 5d06d71953c66..fd3710062ee45 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -505,7 +505,6 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { // FIXME(antoyo): rustc_codegen_ssa::mir::intrinsic uses different types for a and b but they // should be the same. let typ = a.get_type().to_signed(self); - let a = self.context.new_cast(None, a, typ); let b = self.context.new_cast(None, b, typ); a / b } @@ -1413,7 +1412,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { impl<'a, 'gcc, 'tcx> StaticBuilderMethods for Builder<'a, 'gcc, 'tcx> { fn get_static(&mut self, def_id: DefId) -> RValue<'gcc> { // Forward to the `get_static` method of `CodegenCx` - self.cx().get_static(def_id) + self.cx().get_static(def_id).get_address(None) } } diff --git a/src/callee.rs b/src/callee.rs index e402e0e91f133..aa86332c475cf 100644 --- a/src/callee.rs +++ b/src/callee.rs @@ -20,7 +20,7 @@ pub fn get_fn<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, instance: Instance<'tcx>) assert!(!instance.substs.needs_infer()); assert!(!instance.substs.has_escaping_bound_vars()); - if let Some(&func) = cx.instances.borrow().get(&instance) { + if let Some(&func) = cx.function_instances.borrow().get(&instance) { return func; } @@ -72,7 +72,7 @@ pub fn get_fn<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, instance: Instance<'tcx>) func }; - cx.instances.borrow_mut().insert(instance, func); + cx.function_instances.borrow_mut().insert(instance, func); func } diff --git a/src/common.rs b/src/common.rs index a24fe0df911a6..c60cbd81f636a 100644 --- a/src/common.rs +++ b/src/common.rs @@ -1,6 +1,7 @@ use std::convert::TryFrom; use std::convert::TryInto; +use gccjit::LValue; use gccjit::{Block, CType, RValue, Type, ToRValue}; use rustc_codegen_ssa::mir::place::PlaceRef; use rustc_codegen_ssa::traits::{ @@ -10,7 +11,6 @@ use rustc_codegen_ssa::traits::{ MiscMethods, StaticMethods, }; -use rustc_middle::bug; use rustc_middle::mir::Mutability; use rustc_middle::ty::ScalarInt; use rustc_middle::ty::layout::{TyAndLayout, LayoutOf}; @@ -27,28 +27,27 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { bytes_in_context(self, bytes) } - fn const_cstr(&self, symbol: Symbol, _null_terminated: bool) -> RValue<'gcc> { + fn const_cstr(&self, symbol: Symbol, _null_terminated: bool) -> LValue<'gcc> { // TODO(antoyo): handle null_terminated. if let Some(&value) = self.const_cstr_cache.borrow().get(&symbol) { - return value.to_rvalue(); + return value; } let global = self.global_string(&*symbol.as_str()); - self.const_cstr_cache.borrow_mut().insert(symbol, global.dereference(None)); + self.const_cstr_cache.borrow_mut().insert(symbol, global); global } - fn global_string(&self, string: &str) -> RValue<'gcc> { + fn global_string(&self, string: &str) -> LValue<'gcc> { // TODO(antoyo): handle non-null-terminated strings. let string = self.context.new_string_literal(&*string); let sym = self.generate_local_symbol_name("str"); // NOTE: TLS is always off for a string litteral. // NOTE: string litterals do not have a link section. - let global = self.define_global(&sym, self.val_ty(string), false, None) - .unwrap_or_else(|| bug!("symbol `{}` is already defined", sym)); - self.global_init_block.add_assignment(None, global.dereference(None), string); - global.to_rvalue() + let global = self.declare_private_global(&sym, self.val_ty(string)); + global.global_set_initializer_value(string); // TODO: only set if not imported? + global // TODO(antoyo): set linkage. } @@ -76,10 +75,13 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { pub fn bytes_in_context<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, bytes: &[u8]) -> RValue<'gcc> { let context = &cx.context; - let typ = context.new_array_type(None, context.new_type::(), bytes.len() as i32); - let global = cx.declare_unnamed_global(typ); - global.global_set_initializer(bytes); - global.to_rvalue() + let byte_type = context.new_type::(); + let typ = context.new_array_type(None, byte_type, bytes.len() as i32); + let elements: Vec<_> = + bytes.iter() + .map(|&byte| context.new_rvalue_from_int(byte_type, byte as i32)) + .collect(); + context.new_rvalue_from_array(None, typ, &elements) } pub fn type_is_pointer<'gcc>(typ: Type<'gcc>) -> bool { @@ -180,7 +182,7 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> { fn const_str(&self, s: Symbol) -> (RValue<'gcc>, RValue<'gcc>) { let len = s.as_str().len(); - let cs = self.const_ptrcast(self.const_cstr(s, false), + let cs = self.const_ptrcast(self.const_cstr(s, false).get_address(None), self.type_ptr_to(self.layout_of(self.tcx.types.str_).gcc_type(self, true)), ); (cs, self.const_usize(len as u64)) @@ -191,16 +193,9 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> { .map(|value| value.get_type()) .collect(); // TODO(antoyo): cache the type? It's anonymous, so probably not. - let name = fields.iter().map(|typ| format!("{:?}", typ)).collect::>().join("_"); let typ = self.type_struct(&fields, packed); - let structure = self.global_init_func.new_local(None, typ, &name); let struct_type = typ.is_struct().expect("struct type"); - for (index, value) in values.iter().enumerate() { - let field = struct_type.get_field(index as i32); - let field_lvalue = structure.access_field(None, field); - self.global_init_block.add_assignment(None, field_lvalue, *value); - } - self.lvalue_to_rvalue(structure) + self.context.new_rvalue_from_struct(None, struct_type, values) } fn const_to_opt_uint(&self, _v: RValue<'gcc>) -> Option { @@ -260,19 +255,18 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> { }, GlobalAlloc::Static(def_id) => { assert!(self.tcx.is_static(def_id)); - self.get_static(def_id) + self.get_static(def_id).get_address(None) }, }; let ptr_type = base_addr.get_type(); let base_addr = self.const_bitcast(base_addr, self.usize_type); let offset = self.context.new_rvalue_from_long(self.usize_type, offset.bytes() as i64); let ptr = self.const_bitcast(base_addr + offset, ptr_type); - let value = ptr.dereference(None); if layout.value != Pointer { - self.const_bitcast(value.to_rvalue(), ty) + self.const_bitcast(ptr.dereference(None).to_rvalue(), ty) } else { - self.const_bitcast(value.get_address(None), ty) + self.const_bitcast(ptr, ty) } } } diff --git a/src/consts.rs b/src/consts.rs index df13fa79f0696..205498acc3187 100644 --- a/src/consts.rs +++ b/src/consts.rs @@ -1,4 +1,4 @@ -use gccjit::{RValue, Type}; +use gccjit::{LValue, RValue, ToRValue, Type}; use rustc_codegen_ssa::traits::{BaseTypeMethods, ConstMethods, DerivedTypeMethods, StaticMethods}; use rustc_hir as hir; use rustc_hir::Node; @@ -14,7 +14,6 @@ use rustc_target::abi::{self, Align, HasDataLayout, Primitive, Size, WrappingRan use crate::base; use crate::context::CodegenCx; -use crate::mangled_std_symbols::{ARGC, ARGV, ARGV_INIT_ARRAY}; use crate::type_of::LayoutGccExt; impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { @@ -45,17 +44,13 @@ impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> { fn codegen_static(&self, def_id: DefId, is_mutable: bool) { let attrs = self.tcx.codegen_fn_attrs(def_id); - let instance = Instance::mono(self.tcx, def_id); - let name = &*self.tcx.symbol_name(instance).name; - - let (value, alloc) = + let value = match codegen_static_initializer(&self, def_id) { - Ok(value) => value, + Ok((value, _)) => value, // Error has already been reported Err(_) => return, }; - let is_tls = attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL); let global = self.get_static(def_id); // boolean SSA values are i1, but they have to be stored in i8 slots, @@ -73,45 +68,16 @@ impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> { let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all()); let gcc_type = self.layout_of(ty).gcc_type(self, true); - let global = - if val_llty == gcc_type { - global + // TODO(antoyo): set alignment. + + let value = + if value.get_type() != gcc_type { + self.context.new_bitcast(None, value, gcc_type) } else { - // If we created the global with the wrong type, - // correct the type. - // TODO(antoyo): set value name, linkage and visibility. - - let new_global = self.get_or_insert_global(&name, val_llty, is_tls, attrs.link_section); - - // To avoid breaking any invariants, we leave around the old - // global for the moment; we'll replace all references to it - // with the new global later. (See base::codegen_backend.) - //self.statics_to_rauw.borrow_mut().push((global, new_global)); - new_global + value }; - // TODO(antoyo): set alignment and initializer. - let value = self.rvalue_as_lvalue(value); - let value = value.get_address(None); - let dest_typ = global.get_type(); - let value = self.context.new_cast(None, value, dest_typ); - - // NOTE: do not init the variables related to argc/argv because it seems we cannot - // overwrite those variables. - // FIXME(antoyo): correctly support global variable initialization. - let skip_init = [ - ARGV_INIT_ARRAY, - ARGC, - ARGV, - ]; - if !skip_init.iter().any(|symbol_name| name.starts_with(symbol_name)) { - // TODO(antoyo): switch to set_initializer when libgccjit supports that. - let memcpy = self.context.get_builtin_function("memcpy"); - let dst = self.context.new_cast(None, global, self.type_i8p()); - let src = self.context.new_cast(None, value, self.type_ptr_to(self.type_void())); - let size = self.context.new_rvalue_from_long(self.sizet_type, alloc.size().bytes() as i64); - self.global_init_block.add_eval(None, self.context.new_call(None, memcpy, &[dst, src, size])); - } + global.global_set_initializer_value(value); // As an optimization, all shared statics which do not have interior // mutability are placed into read-only memory. @@ -175,7 +141,7 @@ impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> { } if attrs.flags.contains(CodegenFnAttrFlags::USED) { - self.add_used_global(global); + self.add_used_global(global.to_rvalue()); } } @@ -191,38 +157,31 @@ impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> { impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { pub fn static_addr_of_mut(&self, cv: RValue<'gcc>, align: Align, kind: Option<&str>) -> RValue<'gcc> { - let (name, gv) = + let global = match kind { Some(kind) if !self.tcx.sess.fewer_names() => { let name = self.generate_local_symbol_name(kind); // TODO(antoyo): check if it's okay that TLS is off here. // TODO(antoyo): check if it's okay that link_section is None here. // TODO(antoyo): set alignment here as well. - let gv = self.define_global(&name[..], self.val_ty(cv), false, None).unwrap_or_else(|| { - bug!("symbol `{}` is already defined", name); - }); + let global = self.define_global(&name[..], self.val_ty(cv), false, None); // TODO(antoyo): set linkage. - (name, gv) + global } _ => { - let index = self.global_gen_sym_counter.get(); - let name = format!("global_{}_{}", index, self.codegen_unit.name()); let typ = self.val_ty(cv).get_aligned(align.bytes()); - let global = self.define_private_global(typ); - (name, global) + let global = self.declare_unnamed_global(typ); + global }, }; // FIXME(antoyo): I think the name coming from generate_local_symbol_name() above cannot be used // globally. - // NOTE: global seems to only be global in a module. So save the name instead of the value - // to import it later. - self.global_names.borrow_mut().insert(cv, name); - self.global_init_block.add_assignment(None, gv.dereference(None), cv); + global.global_set_initializer_value(cv); // TODO(antoyo): set unnamed address. - gv + global.get_address(None) } - pub fn get_static(&self, def_id: DefId) -> RValue<'gcc> { + pub fn get_static(&self, def_id: DefId) -> LValue<'gcc> { let instance = Instance::mono(self.tcx, def_id); let fn_attrs = self.tcx.codegen_fn_attrs(def_id); if let Some(&global) = self.instances.borrow().get(&instance) { @@ -380,7 +339,7 @@ pub fn codegen_static_initializer<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, def_id Ok((const_alloc_to_gcc(cx, alloc), alloc)) } -fn check_and_apply_linkage<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, attrs: &CodegenFnAttrs, ty: Ty<'tcx>, sym: &str, span: Span) -> RValue<'gcc> { +fn check_and_apply_linkage<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, attrs: &CodegenFnAttrs, ty: Ty<'tcx>, sym: &str, span: Span) -> LValue<'gcc> { let is_tls = attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL); let llty = cx.layout_of(ty).gcc_type(cx, true); if let Some(linkage) = attrs.linkage { @@ -410,13 +369,9 @@ fn check_and_apply_linkage<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, attrs: &Codeg // zero. let mut real_name = "_rust_extern_with_linkage_".to_string(); real_name.push_str(&sym); - let global2 = - cx.define_global(&real_name, llty, is_tls, attrs.link_section).unwrap_or_else(|| { - cx.sess().span_fatal(span, &format!("symbol `{}` is already defined", &sym)) - }); + let global2 = cx.define_global(&real_name, llty, is_tls, attrs.link_section); // TODO(antoyo): set linkage. - let lvalue = global2.dereference(None); - cx.global_init_block.add_assignment(None, lvalue, global1); + global2.global_set_initializer_value(global1.get_address(None)); // TODO(antoyo): use global_set_initializer() when it will work. global2 } diff --git a/src/context.rs b/src/context.rs index ef687dd22c6da..29b6e33130735 100644 --- a/src/context.rs +++ b/src/context.rs @@ -46,10 +46,6 @@ pub struct CodegenCx<'gcc, 'tcx> { pub current_func: RefCell>>, pub normal_function_addresses: RefCell>>, - /// The function where globals are initialized. - pub global_init_func: Function<'gcc>, - pub global_init_block: Block<'gcc>, - pub functions: RefCell>>, pub tls_model: gccjit::TlsModel, @@ -89,23 +85,20 @@ pub struct CodegenCx<'gcc, 'tcx> { pub types_with_fields_to_set: RefCell, (Struct<'gcc>, TyAndLayout<'tcx>)>>, /// Cache instances of monomorphic and polymorphic items - pub instances: RefCell, RValue<'gcc>>>, + pub instances: RefCell, LValue<'gcc>>>, + /// Cache function instances of monomorphic and polymorphic items + pub function_instances: RefCell, RValue<'gcc>>>, /// Cache generated vtables pub vtables: RefCell, Option>), RValue<'gcc>>>, /// Cache of emitted const globals (value -> global) pub const_globals: RefCell, RValue<'gcc>>>, - pub init_argv_var: RefCell, - pub argv_initialized: Cell, - /// Cache of constant strings, pub const_cstr_cache: RefCell>>, /// Cache of globals. pub globals: RefCell>>, - // TODO(antoyo): remove global_names. - pub global_names: RefCell, String>>, /// A counter that is used for generating local symbol names local_gen_sym_counter: Cell, @@ -118,16 +111,9 @@ pub struct CodegenCx<'gcc, 'tcx> { /// NOTE: a hack is used because the rustc API is not suitable to libgccjit and as such, /// `const_undef()` returns struct as pointer so that they can later be assigned a value. /// As such, this set remembers which of these pointers were returned by this function so that - /// they can be derefered later. + /// they can be deferenced later. /// FIXME(antoyo): fix the rustc API to avoid having this hack. pub structs_as_pointer: RefCell>>, - - /// Store the pointer of different types for safety. - /// When casting the values back to their original types, check that they are indeed that type - /// with these sets. - /// FIXME(antoyo): remove when the API supports more types. - #[cfg(debug_assertions)] - lvalues: RefCell>>, } impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { @@ -180,10 +166,6 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { functions.insert(builtin.to_string(), context.get_builtin_function(builtin)); } - let global_init_func = context.new_function(None, FunctionType::Exported, context.new_type::<()>(), &[], - &format!("__gccGlobalInit{}", unit_name(&codegen_unit)), false); - let global_init_block = global_init_func.new_block("initial"); - Self { check_overflow, codegen_unit, @@ -192,8 +174,6 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { current_func: RefCell::new(None), normal_function_addresses: Default::default(), functions: RefCell::new(functions), - global_init_func, - global_init_block, tls_model, @@ -221,15 +201,11 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { double_type, linkage: Cell::new(FunctionType::Internal), - #[cfg(debug_assertions)] - lvalues: Default::default(), instances: Default::default(), + function_instances: Default::default(), vtables: Default::default(), const_globals: Default::default(), - init_argv_var: RefCell::new(String::new()), - argv_initialized: Cell::new(false), const_cstr_cache: Default::default(), - global_names: Default::default(), globals: Default::default(), scalar_types: Default::default(), types: Default::default(), @@ -244,12 +220,6 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { } } - pub fn lvalue_to_rvalue(&self, value: LValue<'gcc>) -> RValue<'gcc> { - #[cfg(debug_assertions)] - self.lvalues.borrow_mut().insert(value); - unsafe { std::mem::transmute(value) } - } - pub fn rvalue_as_function(&self, value: RValue<'gcc>) -> Function<'gcc> { let function: Function<'gcc> = unsafe { std::mem::transmute(value) }; debug_assert!(self.functions.borrow().values().find(|value| **value == function).is_some(), @@ -257,11 +227,6 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { function } - pub fn rvalue_as_lvalue(&self, value: RValue<'gcc>) -> LValue<'gcc> { - let lvalue: LValue<'gcc> = unsafe { std::mem::transmute(value) }; - lvalue - } - pub fn sess(&self) -> &Session { &self.tcx.sess } diff --git a/src/debuginfo.rs b/src/debuginfo.rs index 8661532a3595e..4d3b4f04badec 100644 --- a/src/debuginfo.rs +++ b/src/debuginfo.rs @@ -1,11 +1,9 @@ -use gccjit::{FunctionType, RValue}; +use gccjit::RValue; use rustc_codegen_ssa::mir::debuginfo::{FunctionDebugContext, VariableKind}; -use rustc_codegen_ssa::traits::{BuilderMethods, DebugInfoBuilderMethods, DebugInfoMethods}; -use rustc_middle::middle::cstore::CrateDepKind; +use rustc_codegen_ssa::traits::{DebugInfoBuilderMethods, DebugInfoMethods}; use rustc_middle::mir; use rustc_middle::ty::{Instance, Ty}; use rustc_span::{SourceFile, Span, Symbol}; -use rustc_span::def_id::LOCAL_CRATE; use rustc_target::abi::Size; use rustc_target::abi::call::FnAbi; @@ -20,25 +18,6 @@ impl<'a, 'gcc, 'tcx> DebugInfoBuilderMethods for Builder<'a, 'gcc, 'tcx> { } fn insert_reference_to_gdb_debug_scripts_section_global(&mut self) { - // TODO(antoyo): replace with gcc_jit_context_new_global_with_initializer() if it's added: - // https://gcc.gnu.org/pipermail/jit/2020q3/001225.html - // - // Call the function to initialize global values here. - // We assume this is only called for the main function. - use std::iter; - - for crate_num in self.cx.tcx.crates(()).iter().copied().chain(iter::once(LOCAL_CRATE)) { - // FIXME(antoyo): better way to find if a crate is of proc-macro type? - if crate_num == LOCAL_CRATE || self.cx.tcx.dep_kind(crate_num) != CrateDepKind::MacrosOnly { - // NOTE: proc-macro crates are not included in the executable, so don't call their - // initialization routine. - let initializer_name = format!("__gccGlobalCrateInit{}", self.cx.tcx.crate_name(crate_num)); - let codegen_init_func = self.context.new_function(None, FunctionType::Extern, self.context.new_type::<()>(), &[], - initializer_name, false); - self.llbb().add_eval(None, self.context.new_call(None, codegen_init_func, &[])); - } - } - // TODO(antoyo): insert reference to gdb debug scripts section global. } diff --git a/src/declare.rs b/src/declare.rs index c1382bf2f4a71..b43f68f878d7a 100644 --- a/src/declare.rs +++ b/src/declare.rs @@ -7,10 +7,9 @@ use rustc_target::abi::call::FnAbi; use crate::abi::FnAbiGccExt; use crate::context::{CodegenCx, unit_name}; use crate::intrinsic::llvm; -use crate::mangled_std_symbols::{ARGV_INIT_ARRAY, ARGV_INIT_WRAPPER}; impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { - pub fn get_or_insert_global(&self, name: &str, ty: Type<'gcc>, is_tls: bool, link_section: Option) -> RValue<'gcc> { + pub fn get_or_insert_global(&self, name: &str, ty: Type<'gcc>, is_tls: bool, link_section: Option) -> LValue<'gcc> { if self.globals.borrow().contains_key(name) { let typ = self.globals.borrow().get(name).expect("global").get_type(); let global = self.context.new_global(None, GlobalKind::Imported, typ, name); @@ -20,7 +19,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { if let Some(link_section) = link_section { global.set_link_section(&link_section.as_str()); } - global.get_address(None) + global } else { self.declare_global(name, ty, is_tls, link_section) @@ -34,13 +33,10 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { self.context.new_global(None, GlobalKind::Exported, ty, &name) } - pub fn declare_global_with_linkage(&self, name: &str, ty: Type<'gcc>, linkage: GlobalKind) -> RValue<'gcc> { - let global = self.context.new_global(None, linkage, ty, name) - .get_address(None); - self.globals.borrow_mut().insert(name.to_string(), global); - // NOTE: global seems to only be global in a module. So save the name instead of the value - // to import it later. - self.global_names.borrow_mut().insert(global, name.to_string()); + pub fn declare_global_with_linkage(&self, name: &str, ty: Type<'gcc>, linkage: GlobalKind) -> LValue<'gcc> { + let global = self.context.new_global(None, linkage, ty, name); + let global_address = global.get_address(None); + self.globals.borrow_mut().insert(name.to_string(), global_address); global } @@ -51,19 +47,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { unsafe { std::mem::transmute(func) } } - pub fn declare_global(&self, name: &str, ty: Type<'gcc>, is_tls: bool, link_section: Option) -> RValue<'gcc> { - // FIXME(antoyo): correctly support global variable initialization. - if name.starts_with(ARGV_INIT_ARRAY) { - // NOTE: hack to avoid having to update the names in mangled_std_symbols: we save the - // name of the variable now to actually declare it later. - *self.init_argv_var.borrow_mut() = name.to_string(); - - let global = self.context.new_global(None, GlobalKind::Imported, ty, name); - if let Some(link_section) = link_section { - global.set_link_section(&link_section.as_str()); - } - return global.get_address(None); - } + pub fn declare_global(&self, name: &str, ty: Type<'gcc>, is_tls: bool, link_section: Option) -> LValue<'gcc> { let global = self.context.new_global(None, GlobalKind::Exported, ty, name); if is_tls { global.set_tls_model(self.tls_model); @@ -71,11 +55,15 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { if let Some(link_section) = link_section { global.set_link_section(&link_section.as_str()); } - let global = global.get_address(None); - self.globals.borrow_mut().insert(name.to_string(), global); - // NOTE: global seems to only be global in a module. So save the name instead of the value - // to import it later. - self.global_names.borrow_mut().insert(global, name.to_string()); + let global_address = global.get_address(None); + self.globals.borrow_mut().insert(name.to_string(), global_address); + global + } + + pub fn declare_private_global(&self, name: &str, ty: Type<'gcc>) -> LValue<'gcc> { + let global = self.context.new_global(None, GlobalKind::Internal, ty, name); + let global_address = global.get_address(None); + self.globals.borrow_mut().insert(name.to_string(), global_address); global } @@ -94,51 +82,14 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { } pub fn declare_fn(&self, name: &str, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> RValue<'gcc> { - // NOTE: hack to avoid having to update the names in mangled_std_symbols: we found the name - // of the variable earlier, so we declare it now. - // Since we don't correctly support initializers yet, we initialize this variable manually - // for now. - if name.starts_with(ARGV_INIT_WRAPPER) && !self.argv_initialized.get() { - let global_name = &*self.init_argv_var.borrow(); - let return_type = self.type_void(); - let params = [ - self.context.new_parameter(None, self.int_type, "argc"), - self.context.new_parameter(None, self.u8_type.make_pointer().make_pointer(), "argv"), - self.context.new_parameter(None, self.u8_type.make_pointer().make_pointer(), "envp"), - ]; - let function = self.context.new_function(None, FunctionType::Extern, return_type, ¶ms, name, false); - let initializer = function.get_address(None); - - let param_types = [ - self.int_type, - self.u8_type.make_pointer().make_pointer(), - self.u8_type.make_pointer().make_pointer(), - ]; - let ty = self.context.new_function_pointer_type(None, return_type, ¶m_types, false); - - let global = self.context.new_global(None, GlobalKind::Exported, ty, global_name); - global.set_link_section(".init_array.00099"); - global.global_set_initializer_value(initializer); - let global = global.get_address(None); - self.globals.borrow_mut().insert(global_name.to_string(), global); - // NOTE: global seems to only be global in a module. So save the name instead of the value - // to import it later. - self.global_names.borrow_mut().insert(global, global_name.to_string()); - self.argv_initialized.set(true); - } let (return_type, params, variadic) = fn_abi.gcc_type(self); let func = declare_raw_fn(self, name, () /*fn_abi.llvm_cconv()*/, return_type, ¶ms, variadic); // FIXME(antoyo): this is a wrong cast. That requires changing the compiler API. unsafe { std::mem::transmute(func) } } - pub fn define_global(&self, name: &str, ty: Type<'gcc>, is_tls: bool, link_section: Option) -> Option> { - Some(self.get_or_insert_global(name, ty, is_tls, link_section)) - } - - pub fn define_private_global(&self, ty: Type<'gcc>) -> RValue<'gcc> { - let global = self.declare_unnamed_global(ty); - global.get_address(None) + pub fn define_global(&self, name: &str, ty: Type<'gcc>, is_tls: bool, link_section: Option) -> LValue<'gcc> { + self.get_or_insert_global(name, ty, is_tls, link_section) } pub fn get_declared_value(&self, name: &str) -> Option> { diff --git a/src/lib.rs b/src/lib.rs index 793e5c48d0a36..3c3a3568228c6 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -43,7 +43,6 @@ mod coverageinfo; mod debuginfo; mod declare; mod intrinsic; -mod mangled_std_symbols; mod mono_item; mod type_; mod type_of; @@ -51,7 +50,7 @@ mod type_of; use std::any::Any; use std::sync::Arc; -use gccjit::{Block, Context, FunctionType, OptimizationLevel}; +use gccjit::{Context, OptimizationLevel}; use rustc_ast::expand::allocator::AllocatorKind; use rustc_codegen_ssa::{CodegenResults, CompiledModule, ModuleCodegen}; use rustc_codegen_ssa::base::codegen_crate; @@ -69,8 +68,6 @@ use rustc_session::Session; use rustc_span::Symbol; use rustc_span::fatal_error::FatalError; -use crate::context::unit_name; - pub struct PrintOnPanic String>(pub F); impl String> Drop for PrintOnPanic { @@ -203,6 +200,7 @@ impl WriteBackendMethods for GccCodegenBackend { fn run_fat_lto(_cgcx: &CodegenContext, mut modules: Vec>, _cached_modules: Vec<(SerializedModule, WorkProduct)>) -> Result, FatalError> { // TODO(antoyo): implement LTO by sending -flto to libgccjit and adding the appropriate gcc linker plugins. // NOTE: implemented elsewhere. + // TODO: what is implemented elsewhere ^ ? let module = match modules.remove(0) { FatLTOInput::InMemory(module) => module, @@ -273,15 +271,6 @@ fn to_gcc_opt_level(optlevel: Option) -> OptimizationLevel { } } -fn create_function_calling_initializers<'gcc, 'tcx>(tcx: TyCtxt<'tcx>, context: &Context<'gcc>, block: Block<'gcc>) { - let codegen_units = tcx.collect_and_partition_mono_items(()).1; - for codegen_unit in codegen_units { - let codegen_init_func = context.new_function(None, FunctionType::Extern, context.new_type::<()>(), &[], - &format!("__gccGlobalInit{}", unit_name(&codegen_unit)), false); - block.add_eval(None, context.new_call(None, codegen_init_func, &[])); - } -} - fn handle_native(name: &str) -> &str { if name != "native" { return name; diff --git a/src/mangled_std_symbols.rs b/src/mangled_std_symbols.rs deleted file mode 100644 index b0c3f214d66c1..0000000000000 --- a/src/mangled_std_symbols.rs +++ /dev/null @@ -1,4 +0,0 @@ -pub const ARGV_INIT_ARRAY: &str = "_ZN3std3sys4unix4args3imp15ARGV_INIT_ARRAY"; -pub const ARGV_INIT_WRAPPER: &str = "_ZN3std3sys4unix4args3imp15ARGV_INIT_ARRAY12init_wrapper"; -pub const ARGC: &str = "_ZN3std3sys4unix4args3imp4ARGC"; -pub const ARGV: &str = "_ZN3std3sys4unix4args3imp4ARGV"; diff --git a/src/mono_item.rs b/src/mono_item.rs index cedeb54f60bf2..a34c68a4e22d3 100644 --- a/src/mono_item.rs +++ b/src/mono_item.rs @@ -18,12 +18,7 @@ impl<'gcc, 'tcx> PreDefineMethods<'tcx> for CodegenCx<'gcc, 'tcx> { let gcc_type = self.layout_of(ty).gcc_type(self, true); let is_tls = attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL); - let global = self.define_global(symbol_name, gcc_type, is_tls, attrs.link_section).unwrap_or_else(|| { - self.sess().span_fatal( - self.tcx.def_span(def_id), - &format!("symbol `{}` is already defined", symbol_name), - ) - }); + let global = self.define_global(symbol_name, gcc_type, is_tls, attrs.link_section); // TODO(antoyo): set linkage and visibility. self.instances.borrow_mut().insert(instance, global); From 88ff75c6ccd44e6eef20157cbedfa15dc0f6a1b2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20BRANSTETT?= Date: Sun, 26 Sep 2021 18:22:07 +0200 Subject: [PATCH 18/27] Fix populate of union.impls --- src/librustdoc/json/mod.rs | 2 ++ src/test/rustdoc-json/unions/impl.rs | 15 +++++++++++++++ 2 files changed, 17 insertions(+) create mode 100644 src/test/rustdoc-json/unions/impl.rs diff --git a/src/librustdoc/json/mod.rs b/src/librustdoc/json/mod.rs index 5089cc30a1e39..915a9fd2b894a 100644 --- a/src/librustdoc/json/mod.rs +++ b/src/librustdoc/json/mod.rs @@ -169,6 +169,8 @@ impl<'tcx> FormatRenderer<'tcx> for JsonRenderer<'tcx> { s.impls = self.get_impls(id.expect_def_id()) } else if let types::ItemEnum::Enum(ref mut e) = new_item.inner { e.impls = self.get_impls(id.expect_def_id()) + } else if let types::ItemEnum::Union(ref mut u) = new_item.inner { + u.impls = self.get_impls(id.expect_def_id()) } let removed = self.index.borrow_mut().insert(from_item_id(id), new_item.clone()); diff --git a/src/test/rustdoc-json/unions/impl.rs b/src/test/rustdoc-json/unions/impl.rs new file mode 100644 index 0000000000000..0388b4a8c3c3b --- /dev/null +++ b/src/test/rustdoc-json/unions/impl.rs @@ -0,0 +1,15 @@ +#![no_std] + +// @has impl.json "$.index[*][?(@.name=='Ux')].visibility" \"public\" +// @has - "$.index[*][?(@.name=='Ux')].kind" \"union\" +pub union Ux { + a: u32, + b: u64 +} + +// @has - "$.index[*][?(@.name=='Num')].visibility" \"public\" +// @has - "$.index[*][?(@.name=='Num')].kind" \"trait\" +pub trait Num {} + +// @count - "$.index[*][?(@.name=='Ux')].inner.impls" 1 +impl Num for Ux {} From df687bdae0cc58e6cc4c62f8f3ca3c5bb7e7103b Mon Sep 17 00:00:00 2001 From: Noah Lev Date: Sun, 26 Sep 2021 12:21:44 -0700 Subject: [PATCH 19/27] Add regression test for issue #83564 --- .../core-std-import-order-issue-83564.rs | 10 ++++++++++ .../core-std-import-order-issue-83564.stderr | 16 ++++++++++++++++ 2 files changed, 26 insertions(+) create mode 100644 src/test/ui/suggestions/core-std-import-order-issue-83564.rs create mode 100644 src/test/ui/suggestions/core-std-import-order-issue-83564.stderr diff --git a/src/test/ui/suggestions/core-std-import-order-issue-83564.rs b/src/test/ui/suggestions/core-std-import-order-issue-83564.rs new file mode 100644 index 0000000000000..b7fe5af7bf8a1 --- /dev/null +++ b/src/test/ui/suggestions/core-std-import-order-issue-83564.rs @@ -0,0 +1,10 @@ +// edition:2018 + +// This is a regression test for #83564. +// For some reason, Rust 2018 or higher is required to reproduce the bug. + +fn main() { + //~^ HELP consider importing one of these items + let _x = NonZeroU32::new(5).unwrap(); + //~^ ERROR failed to resolve: use of undeclared type `NonZeroU32` +} diff --git a/src/test/ui/suggestions/core-std-import-order-issue-83564.stderr b/src/test/ui/suggestions/core-std-import-order-issue-83564.stderr new file mode 100644 index 0000000000000..d484fb8cbe0d8 --- /dev/null +++ b/src/test/ui/suggestions/core-std-import-order-issue-83564.stderr @@ -0,0 +1,16 @@ +error[E0433]: failed to resolve: use of undeclared type `NonZeroU32` + --> $DIR/core-std-import-order-issue-83564.rs:8:14 + | +LL | let _x = NonZeroU32::new(5).unwrap(); + | ^^^^^^^^^^ not found in this scope + | +help: consider importing one of these items + | +LL | use std::num::NonZeroU32; + | +LL | use core::num::NonZeroU32; + | + +error: aborting due to previous error + +For more information about this error, try `rustc --explain E0433`. From adbb608678c618efdc823e3c7a60a0331b9c0156 Mon Sep 17 00:00:00 2001 From: Timothy Maloney Date: Thu, 23 Sep 2021 17:24:58 -0700 Subject: [PATCH 20/27] Link stage1 build to toolchain automatically Fixed types Add checks for rustup and if toolchain is linked Fortified rustup/directory checks; made other suggested changes Added check for output status Remove output of rustup from console Made suggested change Deleted confusing comment Fixed compiler error; removed extra declaration Refactored to smaller components; made suggested changes Automate toolchain linking for stage 1 builds --- src/bootstrap/setup.rs | 80 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 80 insertions(+) diff --git a/src/bootstrap/setup.rs b/src/bootstrap/setup.rs index a5829dfa9d879..5bc0a505bf695 100644 --- a/src/bootstrap/setup.rs +++ b/src/bootstrap/setup.rs @@ -1,3 +1,4 @@ +use crate::TargetSelection; use crate::{t, VERSION}; use std::fmt::Write as _; use std::path::{Path, PathBuf}; @@ -107,6 +108,17 @@ pub fn setup(src_path: &Path, profile: Profile) { let include_path = profile.include_path(src_path); println!("`x.py` will now use the configuration at {}", include_path.display()); + let build = TargetSelection::from_user(&env!("BUILD_TRIPLE")); + let stage_path = ["build", build.rustc_target_arg(), "stage1"].join("/"); + + println!(); + + if !rustup_installed() && profile != Profile::User { + println!("`rustup` is not installed; cannot link `stage1` toolchain"); + } else if stage_dir_exists(&stage_path[..]) { + attempt_toolchain_link(&stage_path[..]); + } + let suggestions = match profile { Profile::Codegen | Profile::Compiler => &["check", "build", "test"][..], Profile::Tools => &[ @@ -139,6 +151,74 @@ pub fn setup(src_path: &Path, profile: Profile) { } } +fn rustup_installed() -> bool { + Command::new("rustup") + .arg("--version") + .stdout(std::process::Stdio::null()) + .output() + .map_or(false, |output| output.status.success()) +} + +fn stage_dir_exists(stage_path: &str) -> bool { + match fs::create_dir(&stage_path[..]) { + Ok(_) => true, + Err(_) => Path::new(&stage_path[..]).exists(), + } +} + +fn attempt_toolchain_link(stage_path: &str) { + if toolchain_is_linked() { + return; + } + + if try_link_toolchain(&stage_path[..]) { + println!( + "Added `stage1` rustup toolchain; try `cargo +stage1 build` on a separate rust project to run a newly-built toolchain" + ); + } else { + println!("`rustup` failed to link stage 1 build to `stage1` toolchain"); + println!( + "To manually link stage 1 build to `stage1` toolchain, run:\n + `rustup toolchain link stage1 {}`", + &stage_path[..] + ); + } +} + +fn toolchain_is_linked() -> bool { + match Command::new("rustup") + .args(&["toolchain", "list"]) + .stdout(std::process::Stdio::piped()) + .output() + { + Ok(toolchain_list) => { + if !String::from_utf8_lossy(&toolchain_list.stdout).contains("stage1") { + return false; + } + // The toolchain has already been linked. + println!( + "`stage1` toolchain already linked; not attempting to link `stage1` toolchain" + ); + } + Err(_) => { + // In this case, we don't know if the `stage1` toolchain has been linked; + // but `rustup` failed, so let's not go any further. + println!( + "`rustup` failed to list current toolchains; not attempting to link `stage1` toolchain" + ); + } + } + true +} + +fn try_link_toolchain(stage_path: &str) -> bool { + Command::new("rustup") + .stdout(std::process::Stdio::null()) + .args(&["toolchain", "link", "stage1", &stage_path[..]]) + .output() + .map_or(false, |output| output.status.success()) +} + // Used to get the path for `Subcommand::Setup` pub fn interactive_path() -> io::Result { fn abbrev_all() -> impl Iterator { From ab4ff2dfe0955ede04c4ddb7373337a6141081b8 Mon Sep 17 00:00:00 2001 From: antoyo Date: Mon, 27 Sep 2021 09:34:06 -0400 Subject: [PATCH 21/27] Cleanup fix for global initialization (#93) * Cleanup fix for global initialization * Remove linker script hack * Use v0 symbol mangling * Fix warnings --- config.sh | 2 +- src/common.rs | 4 +--- src/lib.rs | 12 ++---------- test.sh | 2 +- 4 files changed, 5 insertions(+), 15 deletions(-) diff --git a/config.sh b/config.sh index d022371eebed4..87df2f2102bcd 100644 --- a/config.sh +++ b/config.sh @@ -38,7 +38,7 @@ if [[ "$HOST_TRIPLE" != "$TARGET_TRIPLE" ]]; then fi fi -export RUSTFLAGS="$linker -Cpanic=abort -Cdebuginfo=2 -Clto=off -Zpanic-abort-tests -Zcodegen-backend=$(pwd)/target/${CHANNEL:-debug}/librustc_codegen_gcc.$dylib_ext --sysroot $(pwd)/build_sysroot/sysroot" +export RUSTFLAGS="$linker -Cpanic=abort -Zsymbol-mangling-version=v0 -Cdebuginfo=2 -Clto=off -Zpanic-abort-tests -Zcodegen-backend=$(pwd)/target/${CHANNEL:-debug}/librustc_codegen_gcc.$dylib_ext --sysroot $(pwd)/build_sysroot/sysroot" # FIXME(antoyo): remove once the atomic shim is gone if [[ `uname` == 'Darwin' ]]; then diff --git a/src/common.rs b/src/common.rs index c60cbd81f636a..709fa2a297bb8 100644 --- a/src/common.rs +++ b/src/common.rs @@ -43,10 +43,8 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { // TODO(antoyo): handle non-null-terminated strings. let string = self.context.new_string_literal(&*string); let sym = self.generate_local_symbol_name("str"); - // NOTE: TLS is always off for a string litteral. - // NOTE: string litterals do not have a link section. let global = self.declare_private_global(&sym, self.val_ty(string)); - global.global_set_initializer_value(string); // TODO: only set if not imported? + global.global_set_initializer_value(string); global // TODO(antoyo): set linkage. } diff --git a/src/lib.rs b/src/lib.rs index 3c3a3568228c6..f3c02e2634ff6 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -63,7 +63,7 @@ use rustc_errors::{ErrorReported, Handler}; use rustc_middle::dep_graph::{WorkProduct, WorkProductId}; use rustc_middle::middle::cstore::EncodedMetadata; use rustc_middle::ty::TyCtxt; -use rustc_session::config::{CrateType, Lto, OptLevel, OutputFilenames}; +use rustc_session::config::{Lto, OptLevel, OutputFilenames}; use rustc_session::Session; use rustc_span::Symbol; use rustc_span::fatal_error::FatalError; @@ -106,16 +106,8 @@ impl CodegenBackend for GccCodegenBackend { Ok((codegen_results, work_products)) } - fn link(&self, sess: &Session, mut codegen_results: CodegenResults, outputs: &OutputFilenames) -> Result<(), ErrorReported> { + fn link(&self, sess: &Session, codegen_results: CodegenResults, outputs: &OutputFilenames) -> Result<(), ErrorReported> { use rustc_codegen_ssa::back::link::link_binary; - if let Some(symbols) = codegen_results.crate_info.exported_symbols.get_mut(&CrateType::Dylib) { - // TODO:(antoyo): remove when global initializer work without calling a function at runtime. - // HACK: since this codegen add some symbols (e.g. __gccGlobalCrateInit) and the UI - // tests load libstd.so as a dynamic library, and rustc use a version-script to specify - // the symbols visibility, we add * to export all symbols. - // It seems other symbols from libstd/libcore are causing some issues here as well. - symbols.push("*".to_string()); - } link_binary::>( sess, diff --git a/test.sh b/test.sh index 12df1f8af2f0b..92cbdbb7c00db 100755 --- a/test.sh +++ b/test.sh @@ -171,7 +171,7 @@ git checkout src/test/ui/type-alias-impl-trait/auxiliary/cross_crate_ice.rs git checkout src/test/ui/type-alias-impl-trait/auxiliary/cross_crate_ice2.rs rm src/test/ui/llvm-asm/llvm-asm-in-out-operand.rs || true # TODO(antoyo): Enable back this test if I ever implement the llvm_asm! macro. -RUSTC_ARGS="-Zpanic-abort-tests -Zcodegen-backend="$(pwd)"/../target/"$CHANNEL"/librustc_codegen_gcc."$dylib_ext" --sysroot "$(pwd)"/../build_sysroot/sysroot -Cpanic=abort" +RUSTC_ARGS="-Zpanic-abort-tests -Zsymbol-mangling-version=v0 -Zcodegen-backend="$(pwd)"/../target/"$CHANNEL"/librustc_codegen_gcc."$dylib_ext" --sysroot "$(pwd)"/../build_sysroot/sysroot -Cpanic=abort" echo "[TEST] rustc test suite" COMPILETEST_FORCE_STAGE0=1 ./x.py test --run always --stage 0 src/test/ui/ --rustc-args "$RUSTC_ARGS" From 0911069febd6c247594c26e41c2e5cf8ffb8a74f Mon Sep 17 00:00:00 2001 From: Jane Lusby Date: Mon, 27 Sep 2021 14:50:35 -0700 Subject: [PATCH 22/27] Apply suggestions from code review Co-authored-by: kennytm --- library/test/src/formatters/junit.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/library/test/src/formatters/junit.rs b/library/test/src/formatters/junit.rs index 44a33c92a9ff7..04057906d1c3e 100644 --- a/library/test/src/formatters/junit.rs +++ b/library/test/src/formatters/junit.rs @@ -29,7 +29,7 @@ impl JunitFormatter { impl OutputFormatter for JunitFormatter { fn write_run_start(&mut self, _test_count: usize) -> io::Result<()> { // We write xml header on run start - self.out.write_all("\n".as_bytes())?; + self.out.write_all(b"\n")?; self.write_message("") } @@ -134,7 +134,7 @@ impl OutputFormatter for JunitFormatter { self.write_message("")?; self.write_message("")?; - self.out.write_all("\n\n".as_bytes())?; + self.out.write_all(b"\n\n")?; Ok(state.failed == 0) } From 63608ac6b34fedfd6b7b8da42805b1f798595e57 Mon Sep 17 00:00:00 2001 From: antoyo Date: Mon, 27 Sep 2021 19:31:24 -0400 Subject: [PATCH 23/27] Fix/mismatch types (#94) * Refactor test.sh script * Fix mismatched types error --- src/builder.rs | 26 +++++-- test.sh | 180 ++++++++++++++++++++++++++++++------------------- 2 files changed, 132 insertions(+), 74 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index fd3710062ee45..10ee63e2a550c 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -1051,11 +1051,18 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } /* Comparisons */ - fn icmp(&mut self, op: IntPredicate, lhs: RValue<'gcc>, mut rhs: RValue<'gcc>) -> RValue<'gcc> { - if lhs.get_type() != rhs.get_type() { + fn icmp(&mut self, op: IntPredicate, mut lhs: RValue<'gcc>, mut rhs: RValue<'gcc>) -> RValue<'gcc> { + let left_type = lhs.get_type(); + let right_type = rhs.get_type(); + if left_type != right_type { + // NOTE: because libgccjit cannot compare function pointers. + if left_type.is_function_ptr_type().is_some() && right_type.is_function_ptr_type().is_some() { + lhs = self.context.new_cast(None, lhs, self.usize_type.make_pointer()); + rhs = self.context.new_cast(None, rhs, self.usize_type.make_pointer()); + } // NOTE: hack because we try to cast a vector type to the same vector type. - if format!("{:?}", lhs.get_type()) != format!("{:?}", rhs.get_type()) { - rhs = self.context.new_cast(None, rhs, lhs.get_type()); + else if format!("{:?}", left_type) != format!("{:?}", right_type) { + rhs = self.context.new_cast(None, rhs, left_type); } } self.context.new_comparison(None, op.to_gcc_comparison(), lhs, rhs) @@ -1209,6 +1216,17 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { else { panic!("Unexpected type {:?}", value_type); }; + + let lvalue_type = lvalue.to_rvalue().get_type(); + let value = + // NOTE: sometimes, rustc will create a value with the wrong type. + if lvalue_type != value.get_type() { + self.context.new_cast(None, value, lvalue_type) + } + else { + value + }; + self.llbb().add_assignment(None, lvalue, value); aggregate_value diff --git a/test.sh b/test.sh index 92cbdbb7c00db..944d0ce516e0f 100755 --- a/test.sh +++ b/test.sh @@ -17,6 +17,7 @@ export LIBRARY_PATH="$GCC_PATH" if [[ "$1" == "--release" ]]; then export CHANNEL='release' CARGO_INCREMENTAL=1 cargo rustc --release + shift else echo $LD_LIBRARY_PATH export CHANNEL='debug' @@ -25,52 +26,60 @@ fi source config.sh -rm -r target/out || true -mkdir -p target/out/gccjit +function clean() { + rm -r target/out || true + mkdir -p target/out/gccjit +} -echo "[BUILD] mini_core" -$RUSTC example/mini_core.rs --crate-name mini_core --crate-type lib,dylib --target $TARGET_TRIPLE +function mini_tests() { + echo "[BUILD] mini_core" + $RUSTC example/mini_core.rs --crate-name mini_core --crate-type lib,dylib --target $TARGET_TRIPLE -echo "[BUILD] example" -$RUSTC example/example.rs --crate-type lib --target $TARGET_TRIPLE + echo "[BUILD] example" + $RUSTC example/example.rs --crate-type lib --target $TARGET_TRIPLE -echo "[AOT] mini_core_hello_world" -$RUSTC example/mini_core_hello_world.rs --crate-name mini_core_hello_world --crate-type bin -g --target $TARGET_TRIPLE -$RUN_WRAPPER ./target/out/mini_core_hello_world abc bcd + echo "[AOT] mini_core_hello_world" + $RUSTC example/mini_core_hello_world.rs --crate-name mini_core_hello_world --crate-type bin -g --target $TARGET_TRIPLE + $RUN_WRAPPER ./target/out/mini_core_hello_world abc bcd +} -echo "[BUILD] sysroot" -time ./build_sysroot/build_sysroot.sh +function build_sysroot() { + echo "[BUILD] sysroot" + time ./build_sysroot/build_sysroot.sh +} -echo "[AOT] arbitrary_self_types_pointers_and_wrappers" -$RUSTC example/arbitrary_self_types_pointers_and_wrappers.rs --crate-name arbitrary_self_types_pointers_and_wrappers --crate-type bin --target $TARGET_TRIPLE -$RUN_WRAPPER ./target/out/arbitrary_self_types_pointers_and_wrappers +function std_tests() { + echo "[AOT] arbitrary_self_types_pointers_and_wrappers" + $RUSTC example/arbitrary_self_types_pointers_and_wrappers.rs --crate-name arbitrary_self_types_pointers_and_wrappers --crate-type bin --target $TARGET_TRIPLE + $RUN_WRAPPER ./target/out/arbitrary_self_types_pointers_and_wrappers -echo "[AOT] alloc_system" -$RUSTC example/alloc_system.rs --crate-type lib --target "$TARGET_TRIPLE" + echo "[AOT] alloc_system" + $RUSTC example/alloc_system.rs --crate-type lib --target "$TARGET_TRIPLE" -echo "[AOT] alloc_example" -$RUSTC example/alloc_example.rs --crate-type bin --target $TARGET_TRIPLE -$RUN_WRAPPER ./target/out/alloc_example + echo "[AOT] alloc_example" + $RUSTC example/alloc_example.rs --crate-type bin --target $TARGET_TRIPLE + $RUN_WRAPPER ./target/out/alloc_example -echo "[AOT] dst_field_align" -# FIXME(antoyo): Re-add -Zmir-opt-level=2 once rust-lang/rust#67529 is fixed. -$RUSTC example/dst-field-align.rs --crate-name dst_field_align --crate-type bin --target $TARGET_TRIPLE -$RUN_WRAPPER ./target/out/dst_field_align || (echo $?; false) + echo "[AOT] dst_field_align" + # FIXME(antoyo): Re-add -Zmir-opt-level=2 once rust-lang/rust#67529 is fixed. + $RUSTC example/dst-field-align.rs --crate-name dst_field_align --crate-type bin --target $TARGET_TRIPLE + $RUN_WRAPPER ./target/out/dst_field_align || (echo $?; false) -echo "[AOT] std_example" -$RUSTC example/std_example.rs --crate-type bin --target $TARGET_TRIPLE -$RUN_WRAPPER ./target/out/std_example --target $TARGET_TRIPLE + echo "[AOT] std_example" + $RUSTC example/std_example.rs --crate-type bin --target $TARGET_TRIPLE + $RUN_WRAPPER ./target/out/std_example --target $TARGET_TRIPLE -echo "[AOT] subslice-patterns-const-eval" -$RUSTC example/subslice-patterns-const-eval.rs --crate-type bin -Cpanic=abort --target $TARGET_TRIPLE -$RUN_WRAPPER ./target/out/subslice-patterns-const-eval + echo "[AOT] subslice-patterns-const-eval" + $RUSTC example/subslice-patterns-const-eval.rs --crate-type bin -Cpanic=abort --target $TARGET_TRIPLE + $RUN_WRAPPER ./target/out/subslice-patterns-const-eval -echo "[AOT] track-caller-attribute" -$RUSTC example/track-caller-attribute.rs --crate-type bin -Cpanic=abort --target $TARGET_TRIPLE -$RUN_WRAPPER ./target/out/track-caller-attribute + echo "[AOT] track-caller-attribute" + $RUSTC example/track-caller-attribute.rs --crate-type bin -Cpanic=abort --target $TARGET_TRIPLE + $RUN_WRAPPER ./target/out/track-caller-attribute -echo "[BUILD] mod_bench" -$RUSTC example/mod_bench.rs --crate-type bin --target $TARGET_TRIPLE + echo "[BUILD] mod_bench" + $RUSTC example/mod_bench.rs --crate-type bin --target $TARGET_TRIPLE +} # FIXME(antoyo): linker gives multiple definitions error on Linux #echo "[BUILD] sysroot in release mode" @@ -95,11 +104,13 @@ $RUSTC example/mod_bench.rs --crate-type bin --target $TARGET_TRIPLE #fi #popd -pushd build_sysroot/sysroot_src/library/core/tests -echo "[TEST] libcore" -rm -r ./target || true -../../../../../cargo.sh test -popd +function test_libcore() { + pushd build_sysroot/sysroot_src/library/core/tests + echo "[TEST] libcore" + rm -r ./target || true + ../../../../../cargo.sh test + popd +} # TODO(antoyo): uncomment when it works. #pushd regex @@ -130,20 +141,21 @@ popd #echo "[BENCH RUN] mod_bench" #hyperfine --runs ${RUN_RUNS:-10} ./target/out/mod_bench{,_inline} ./target/out/mod_bench_llvm_* -echo -echo "[TEST] rust-lang/rust" +function test_rustc() { + echo + echo "[TEST] rust-lang/rust" -rust_toolchain=$(cat rust-toolchain) + rust_toolchain=$(cat rust-toolchain) -git clone https://github.com/rust-lang/rust.git || true -cd rust -git fetch -git checkout $(rustc -V | cut -d' ' -f3 | tr -d '(') -export RUSTFLAGS= + git clone https://github.com/rust-lang/rust.git || true + cd rust + git fetch + git checkout $(rustc -V | cut -d' ' -f3 | tr -d '(') + export RUSTFLAGS= -rm config.toml || true + rm config.toml || true -cat > config.toml < config.toml < Date: Mon, 27 Sep 2021 20:35:45 -0400 Subject: [PATCH 24/27] Fix/count trailing zeroes (#95) * Fix count trailing zeroes * Fix pop count * Fix bit reverse --- src/common.rs | 22 +++ src/intrinsic/mod.rs | 356 +++++++++++++++++++++++-------------------- 2 files changed, 216 insertions(+), 162 deletions(-) diff --git a/src/common.rs b/src/common.rs index 709fa2a297bb8..bda08b653f059 100644 --- a/src/common.rs +++ b/src/common.rs @@ -302,6 +302,7 @@ pub trait SignType<'gcc, 'tcx> { fn is_signed(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool; fn is_unsigned(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool; fn to_signed(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>; + fn to_unsigned(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>; } impl<'gcc, 'tcx> SignType<'gcc, 'tcx> for Type<'gcc> { @@ -333,6 +334,27 @@ impl<'gcc, 'tcx> SignType<'gcc, 'tcx> for Type<'gcc> { self.clone() } } + + fn to_unsigned(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> { + if self.is_i8(cx) { + cx.u8_type + } + else if self.is_i16(cx) { + cx.u16_type + } + else if self.is_i32(cx) { + cx.u32_type + } + else if self.is_i64(cx) { + cx.u64_type + } + else if self.is_i128(cx) { + cx.u128_type + } + else { + self.clone() + } + } } pub trait TypeReflection<'gcc, 'tcx> { diff --git a/src/intrinsic/mod.rs b/src/intrinsic/mod.rs index 3dc4f61a7ac3c..45978e1409023 100644 --- a/src/intrinsic/mod.rs +++ b/src/intrinsic/mod.rs @@ -18,7 +18,7 @@ use rustc_target::spec::PanicStrategy; use crate::abi::GccType; use crate::builder::Builder; -use crate::common::TypeReflection; +use crate::common::{SignType, TypeReflection}; use crate::context::CodegenCx; use crate::type_of::LayoutGccExt; use crate::intrinsic::simd::generic_simd_intrinsic; @@ -520,163 +520,176 @@ fn int_type_width_signed<'gcc, 'tcx>(ty: Ty<'tcx>, cx: &CodegenCx<'gcc, 'tcx>) - impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { fn bit_reverse(&mut self, width: u64, value: RValue<'gcc>) -> RValue<'gcc> { - let typ = value.get_type(); + let result_type = value.get_type(); + let typ = result_type.to_unsigned(self.cx); + + let value = + if result_type.is_signed(self.cx) { + self.context.new_bitcast(None, value, typ) + } + else { + value + }; + let context = &self.cx.context; - match width { - 8 => { - // First step. - let left = self.and(value, context.new_rvalue_from_int(typ, 0xF0)); - let left = self.lshr(left, context.new_rvalue_from_int(typ, 4)); - let right = self.and(value, context.new_rvalue_from_int(typ, 0x0F)); - let right = self.shl(right, context.new_rvalue_from_int(typ, 4)); - let step1 = self.or(left, right); - - // Second step. - let left = self.and(step1, context.new_rvalue_from_int(typ, 0xCC)); - let left = self.lshr(left, context.new_rvalue_from_int(typ, 2)); - let right = self.and(step1, context.new_rvalue_from_int(typ, 0x33)); - let right = self.shl(right, context.new_rvalue_from_int(typ, 2)); - let step2 = self.or(left, right); - - // Third step. - let left = self.and(step2, context.new_rvalue_from_int(typ, 0xAA)); - let left = self.lshr(left, context.new_rvalue_from_int(typ, 1)); - let right = self.and(step2, context.new_rvalue_from_int(typ, 0x55)); - let right = self.shl(right, context.new_rvalue_from_int(typ, 1)); - let step3 = self.or(left, right); - - step3 - }, - 16 => { - // First step. - let left = self.and(value, context.new_rvalue_from_int(typ, 0x5555)); - let left = self.shl(left, context.new_rvalue_from_int(typ, 1)); - let right = self.and(value, context.new_rvalue_from_int(typ, 0xAAAA)); - let right = self.lshr(right, context.new_rvalue_from_int(typ, 1)); - let step1 = self.or(left, right); - - // Second step. - let left = self.and(step1, context.new_rvalue_from_int(typ, 0x3333)); - let left = self.shl(left, context.new_rvalue_from_int(typ, 2)); - let right = self.and(step1, context.new_rvalue_from_int(typ, 0xCCCC)); - let right = self.lshr(right, context.new_rvalue_from_int(typ, 2)); - let step2 = self.or(left, right); - - // Third step. - let left = self.and(step2, context.new_rvalue_from_int(typ, 0x0F0F)); - let left = self.shl(left, context.new_rvalue_from_int(typ, 4)); - let right = self.and(step2, context.new_rvalue_from_int(typ, 0xF0F0)); - let right = self.lshr(right, context.new_rvalue_from_int(typ, 4)); - let step3 = self.or(left, right); - - // Fourth step. - let left = self.and(step3, context.new_rvalue_from_int(typ, 0x00FF)); - let left = self.shl(left, context.new_rvalue_from_int(typ, 8)); - let right = self.and(step3, context.new_rvalue_from_int(typ, 0xFF00)); - let right = self.lshr(right, context.new_rvalue_from_int(typ, 8)); - let step4 = self.or(left, right); - - step4 - }, - 32 => { - // TODO(antoyo): Refactor with other implementations. - // First step. - let left = self.and(value, context.new_rvalue_from_long(typ, 0x55555555)); - let left = self.shl(left, context.new_rvalue_from_long(typ, 1)); - let right = self.and(value, context.new_rvalue_from_long(typ, 0xAAAAAAAA)); - let right = self.lshr(right, context.new_rvalue_from_long(typ, 1)); - let step1 = self.or(left, right); - - // Second step. - let left = self.and(step1, context.new_rvalue_from_long(typ, 0x33333333)); - let left = self.shl(left, context.new_rvalue_from_long(typ, 2)); - let right = self.and(step1, context.new_rvalue_from_long(typ, 0xCCCCCCCC)); - let right = self.lshr(right, context.new_rvalue_from_long(typ, 2)); - let step2 = self.or(left, right); - - // Third step. - let left = self.and(step2, context.new_rvalue_from_long(typ, 0x0F0F0F0F)); - let left = self.shl(left, context.new_rvalue_from_long(typ, 4)); - let right = self.and(step2, context.new_rvalue_from_long(typ, 0xF0F0F0F0)); - let right = self.lshr(right, context.new_rvalue_from_long(typ, 4)); - let step3 = self.or(left, right); - - // Fourth step. - let left = self.and(step3, context.new_rvalue_from_long(typ, 0x00FF00FF)); - let left = self.shl(left, context.new_rvalue_from_long(typ, 8)); - let right = self.and(step3, context.new_rvalue_from_long(typ, 0xFF00FF00)); - let right = self.lshr(right, context.new_rvalue_from_long(typ, 8)); - let step4 = self.or(left, right); - - // Fifth step. - let left = self.and(step4, context.new_rvalue_from_long(typ, 0x0000FFFF)); - let left = self.shl(left, context.new_rvalue_from_long(typ, 16)); - let right = self.and(step4, context.new_rvalue_from_long(typ, 0xFFFF0000)); - let right = self.lshr(right, context.new_rvalue_from_long(typ, 16)); - let step5 = self.or(left, right); - - step5 - }, - 64 => { - // First step. - let left = self.shl(value, context.new_rvalue_from_long(typ, 32)); - let right = self.lshr(value, context.new_rvalue_from_long(typ, 32)); - let step1 = self.or(left, right); - - // Second step. - let left = self.and(step1, context.new_rvalue_from_long(typ, 0x0001FFFF0001FFFF)); - let left = self.shl(left, context.new_rvalue_from_long(typ, 15)); - let right = self.and(step1, context.new_rvalue_from_long(typ, 0xFFFE0000FFFE0000u64 as i64)); // TODO(antoyo): transmute the number instead? - let right = self.lshr(right, context.new_rvalue_from_long(typ, 17)); - let step2 = self.or(left, right); - - // Third step. - let left = self.lshr(step2, context.new_rvalue_from_long(typ, 10)); - let left = self.xor(step2, left); - let temp = self.and(left, context.new_rvalue_from_long(typ, 0x003F801F003F801F)); - - let left = self.shl(temp, context.new_rvalue_from_long(typ, 10)); - let left = self.or(temp, left); - let step3 = self.xor(left, step2); - - // Fourth step. - let left = self.lshr(step3, context.new_rvalue_from_long(typ, 4)); - let left = self.xor(step3, left); - let temp = self.and(left, context.new_rvalue_from_long(typ, 0x0E0384210E038421)); - - let left = self.shl(temp, context.new_rvalue_from_long(typ, 4)); - let left = self.or(temp, left); - let step4 = self.xor(left, step3); - - // Fifth step. - let left = self.lshr(step4, context.new_rvalue_from_long(typ, 2)); - let left = self.xor(step4, left); - let temp = self.and(left, context.new_rvalue_from_long(typ, 0x2248884222488842)); - - let left = self.shl(temp, context.new_rvalue_from_long(typ, 2)); - let left = self.or(temp, left); - let step5 = self.xor(left, step4); - - step5 - }, - 128 => { - // TODO(antoyo): find a more efficient implementation? - let sixty_four = self.context.new_rvalue_from_long(typ, 64); - let high = self.context.new_cast(None, value >> sixty_four, self.u64_type); - let low = self.context.new_cast(None, value, self.u64_type); + let result = + match width { + 8 => { + // First step. + let left = self.and(value, context.new_rvalue_from_int(typ, 0xF0)); + let left = self.lshr(left, context.new_rvalue_from_int(typ, 4)); + let right = self.and(value, context.new_rvalue_from_int(typ, 0x0F)); + let right = self.shl(right, context.new_rvalue_from_int(typ, 4)); + let step1 = self.or(left, right); + + // Second step. + let left = self.and(step1, context.new_rvalue_from_int(typ, 0xCC)); + let left = self.lshr(left, context.new_rvalue_from_int(typ, 2)); + let right = self.and(step1, context.new_rvalue_from_int(typ, 0x33)); + let right = self.shl(right, context.new_rvalue_from_int(typ, 2)); + let step2 = self.or(left, right); + + // Third step. + let left = self.and(step2, context.new_rvalue_from_int(typ, 0xAA)); + let left = self.lshr(left, context.new_rvalue_from_int(typ, 1)); + let right = self.and(step2, context.new_rvalue_from_int(typ, 0x55)); + let right = self.shl(right, context.new_rvalue_from_int(typ, 1)); + let step3 = self.or(left, right); + + step3 + }, + 16 => { + // First step. + let left = self.and(value, context.new_rvalue_from_int(typ, 0x5555)); + let left = self.shl(left, context.new_rvalue_from_int(typ, 1)); + let right = self.and(value, context.new_rvalue_from_int(typ, 0xAAAA)); + let right = self.lshr(right, context.new_rvalue_from_int(typ, 1)); + let step1 = self.or(left, right); + + // Second step. + let left = self.and(step1, context.new_rvalue_from_int(typ, 0x3333)); + let left = self.shl(left, context.new_rvalue_from_int(typ, 2)); + let right = self.and(step1, context.new_rvalue_from_int(typ, 0xCCCC)); + let right = self.lshr(right, context.new_rvalue_from_int(typ, 2)); + let step2 = self.or(left, right); + + // Third step. + let left = self.and(step2, context.new_rvalue_from_int(typ, 0x0F0F)); + let left = self.shl(left, context.new_rvalue_from_int(typ, 4)); + let right = self.and(step2, context.new_rvalue_from_int(typ, 0xF0F0)); + let right = self.lshr(right, context.new_rvalue_from_int(typ, 4)); + let step3 = self.or(left, right); + + // Fourth step. + let left = self.and(step3, context.new_rvalue_from_int(typ, 0x00FF)); + let left = self.shl(left, context.new_rvalue_from_int(typ, 8)); + let right = self.and(step3, context.new_rvalue_from_int(typ, 0xFF00)); + let right = self.lshr(right, context.new_rvalue_from_int(typ, 8)); + let step4 = self.or(left, right); + + step4 + }, + 32 => { + // TODO(antoyo): Refactor with other implementations. + // First step. + let left = self.and(value, context.new_rvalue_from_long(typ, 0x55555555)); + let left = self.shl(left, context.new_rvalue_from_long(typ, 1)); + let right = self.and(value, context.new_rvalue_from_long(typ, 0xAAAAAAAA)); + let right = self.lshr(right, context.new_rvalue_from_long(typ, 1)); + let step1 = self.or(left, right); + + // Second step. + let left = self.and(step1, context.new_rvalue_from_long(typ, 0x33333333)); + let left = self.shl(left, context.new_rvalue_from_long(typ, 2)); + let right = self.and(step1, context.new_rvalue_from_long(typ, 0xCCCCCCCC)); + let right = self.lshr(right, context.new_rvalue_from_long(typ, 2)); + let step2 = self.or(left, right); + + // Third step. + let left = self.and(step2, context.new_rvalue_from_long(typ, 0x0F0F0F0F)); + let left = self.shl(left, context.new_rvalue_from_long(typ, 4)); + let right = self.and(step2, context.new_rvalue_from_long(typ, 0xF0F0F0F0)); + let right = self.lshr(right, context.new_rvalue_from_long(typ, 4)); + let step3 = self.or(left, right); + + // Fourth step. + let left = self.and(step3, context.new_rvalue_from_long(typ, 0x00FF00FF)); + let left = self.shl(left, context.new_rvalue_from_long(typ, 8)); + let right = self.and(step3, context.new_rvalue_from_long(typ, 0xFF00FF00)); + let right = self.lshr(right, context.new_rvalue_from_long(typ, 8)); + let step4 = self.or(left, right); + + // Fifth step. + let left = self.and(step4, context.new_rvalue_from_long(typ, 0x0000FFFF)); + let left = self.shl(left, context.new_rvalue_from_long(typ, 16)); + let right = self.and(step4, context.new_rvalue_from_long(typ, 0xFFFF0000)); + let right = self.lshr(right, context.new_rvalue_from_long(typ, 16)); + let step5 = self.or(left, right); + + step5 + }, + 64 => { + // First step. + let left = self.shl(value, context.new_rvalue_from_long(typ, 32)); + let right = self.lshr(value, context.new_rvalue_from_long(typ, 32)); + let step1 = self.or(left, right); + + // Second step. + let left = self.and(step1, context.new_rvalue_from_long(typ, 0x0001FFFF0001FFFF)); + let left = self.shl(left, context.new_rvalue_from_long(typ, 15)); + let right = self.and(step1, context.new_rvalue_from_long(typ, 0xFFFE0000FFFE0000u64 as i64)); // TODO(antoyo): transmute the number instead? + let right = self.lshr(right, context.new_rvalue_from_long(typ, 17)); + let step2 = self.or(left, right); + + // Third step. + let left = self.lshr(step2, context.new_rvalue_from_long(typ, 10)); + let left = self.xor(step2, left); + let temp = self.and(left, context.new_rvalue_from_long(typ, 0x003F801F003F801F)); + + let left = self.shl(temp, context.new_rvalue_from_long(typ, 10)); + let left = self.or(temp, left); + let step3 = self.xor(left, step2); + + // Fourth step. + let left = self.lshr(step3, context.new_rvalue_from_long(typ, 4)); + let left = self.xor(step3, left); + let temp = self.and(left, context.new_rvalue_from_long(typ, 0x0E0384210E038421)); + + let left = self.shl(temp, context.new_rvalue_from_long(typ, 4)); + let left = self.or(temp, left); + let step4 = self.xor(left, step3); + + // Fifth step. + let left = self.lshr(step4, context.new_rvalue_from_long(typ, 2)); + let left = self.xor(step4, left); + let temp = self.and(left, context.new_rvalue_from_long(typ, 0x2248884222488842)); + + let left = self.shl(temp, context.new_rvalue_from_long(typ, 2)); + let left = self.or(temp, left); + let step5 = self.xor(left, step4); + + step5 + }, + 128 => { + // TODO(antoyo): find a more efficient implementation? + let sixty_four = self.context.new_rvalue_from_long(typ, 64); + let high = self.context.new_cast(None, value >> sixty_four, self.u64_type); + let low = self.context.new_cast(None, value, self.u64_type); - let reversed_high = self.bit_reverse(64, high); - let reversed_low = self.bit_reverse(64, low); + let reversed_high = self.bit_reverse(64, high); + let reversed_low = self.bit_reverse(64, low); - let new_low = self.context.new_cast(None, reversed_high, typ); - let new_high = self.context.new_cast(None, reversed_low, typ) << sixty_four; + let new_low = self.context.new_cast(None, reversed_high, typ); + let new_high = self.context.new_cast(None, reversed_low, typ) << sixty_four; - new_low | new_high - }, - _ => { - panic!("cannot bit reverse with width = {}", width); - }, - } + new_low | new_high + }, + _ => { + panic!("cannot bit reverse with width = {}", width); + }, + }; + + self.context.new_bitcast(None, result, result_type) } fn count_leading_zeroes(&self, width: u64, arg: RValue<'gcc>) -> RValue<'gcc> { @@ -746,6 +759,15 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { } fn count_trailing_zeroes(&self, _width: u64, arg: RValue<'gcc>) -> RValue<'gcc> { + let result_type = arg.get_type(); + let arg = + if result_type.is_signed(self.cx) { + let new_type = result_type.to_unsigned(self.cx); + self.context.new_bitcast(None, arg, new_type) + } + else { + arg + }; let arg_type = arg.get_type(); let (count_trailing_zeroes, expected_type) = if arg_type.is_uchar(&self.cx) || arg_type.is_ushort(&self.cx) || arg_type.is_uint(&self.cx) { @@ -796,7 +818,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { let res = self.context.new_array_access(None, result, index); - return self.context.new_cast(None, res, arg_type); + return self.context.new_bitcast(None, res, result_type); } else { unimplemented!("count_trailing_zeroes for {:?}", arg_type); @@ -810,7 +832,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { arg }; let res = self.context.new_call(None, count_trailing_zeroes, &[arg]); - self.context.new_cast(None, res, arg_type) + self.context.new_bitcast(None, res, result_type) } fn int_width(&self, typ: Type<'gcc>) -> i64 { @@ -819,7 +841,16 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { fn pop_count(&self, value: RValue<'gcc>) -> RValue<'gcc> { // TODO(antoyo): use the optimized version with fewer operations. - let value_type = value.get_type(); + let result_type = value.get_type(); + let value_type = result_type.to_unsigned(self.cx); + + let value = + if result_type.is_signed(self.cx) { + self.context.new_bitcast(None, value, value_type) + } + else { + value + }; if value_type.is_u128(&self.cx) { // TODO(antoyo): implement in the normal algorithm below to have a more efficient @@ -830,7 +861,8 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { let high = self.context.new_call(None, popcount, &[high]); let low = self.context.new_cast(None, value, self.cx.ulonglong_type); let low = self.context.new_call(None, popcount, &[low]); - return high + low; + let res = high + low; + return self.context.new_bitcast(None, res, result_type); } // First step. @@ -855,7 +887,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { let value = left + right; if value_type.is_u8(&self.cx) { - return value; + return self.context.new_bitcast(None, value, result_type); } // Fourth step. @@ -866,7 +898,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { let value = left + right; if value_type.is_u16(&self.cx) { - return value; + return self.context.new_bitcast(None, value, result_type); } // Fifth step. @@ -877,7 +909,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { let value = left + right; if value_type.is_u32(&self.cx) { - return value; + return self.context.new_bitcast(None, value, result_type); } // Sixth step. @@ -887,7 +919,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { let right = shifted & mask; let value = left + right; - value + self.context.new_bitcast(None, value, result_type) } // Algorithm from: https://blog.regehr.org/archives/1063 From a09fb901cba8e03728b8444b55f334465c4db4ca Mon Sep 17 00:00:00 2001 From: Vadim Petrochenkov Date: Tue, 28 Sep 2021 11:53:33 +0300 Subject: [PATCH 25/27] rustc_session: Remove lint store from `Session` --- compiler/rustc_interface/src/passes.rs | 5 +---- compiler/rustc_interface/src/queries.rs | 4 ++-- compiler/rustc_lint/src/context.rs | 15 --------------- compiler/rustc_session/src/session.rs | 17 +---------------- 4 files changed, 4 insertions(+), 37 deletions(-) diff --git a/compiler/rustc_interface/src/passes.rs b/compiler/rustc_interface/src/passes.rs index 45ad2df82455b..8aff093dd1826 100644 --- a/compiler/rustc_interface/src/passes.rs +++ b/compiler/rustc_interface/src/passes.rs @@ -179,7 +179,7 @@ pub fn register_plugins<'a>( register_lints: impl Fn(&Session, &mut LintStore), mut krate: ast::Crate, crate_name: &str, -) -> Result<(ast::Crate, Lrc)> { +) -> Result<(ast::Crate, LintStore)> { krate = sess.time("attributes_injection", || { rustc_builtin_macros::cmdline_attrs::inject( krate, @@ -230,9 +230,6 @@ pub fn register_plugins<'a>( } }); - let lint_store = Lrc::new(lint_store); - sess.init_lint_store(lint_store.clone()); - Ok((krate, lint_store)) } diff --git a/compiler/rustc_interface/src/queries.rs b/compiler/rustc_interface/src/queries.rs index 2f540395b2d79..a71b59b9d49bd 100644 --- a/compiler/rustc_interface/src/queries.rs +++ b/compiler/rustc_interface/src/queries.rs @@ -135,7 +135,7 @@ impl<'tcx> Queries<'tcx> { let krate = self.parse()?.take(); let empty: &(dyn Fn(&Session, &mut LintStore) + Sync + Send) = &|_, _| {}; - let result = passes::register_plugins( + let (krate, lint_store) = passes::register_plugins( self.session(), &*self.codegen_backend().metadata_loader(), self.compiler.register_lints.as_deref().unwrap_or_else(|| empty), @@ -150,7 +150,7 @@ impl<'tcx> Queries<'tcx> { // called, which happens within passes::register_plugins(). self.dep_graph_future().ok(); - Ok(result) + Ok((krate, Lrc::new(lint_store))) }) } diff --git a/compiler/rustc_lint/src/context.rs b/compiler/rustc_lint/src/context.rs index 8cbd2ddcbfdf7..d235b2209444e 100644 --- a/compiler/rustc_lint/src/context.rs +++ b/compiler/rustc_lint/src/context.rs @@ -38,7 +38,6 @@ use rustc_serialize::json::Json; use rustc_session::lint::{BuiltinLintDiagnostics, ExternDepSpec}; use rustc_session::lint::{FutureIncompatibleInfo, Level, Lint, LintBuffer, LintId}; use rustc_session::Session; -use rustc_session::SessionLintStore; use rustc_span::lev_distance::find_best_match_for_name; use rustc_span::{symbol::Symbol, MultiSpan, Span, DUMMY_SP}; use rustc_target::abi; @@ -75,20 +74,6 @@ pub struct LintStore { lint_groups: FxHashMap<&'static str, LintGroup>, } -impl SessionLintStore for LintStore { - fn name_to_lint(&self, lint_name: &str) -> LintId { - let lints = self - .find_lints(lint_name) - .unwrap_or_else(|_| panic!("Failed to find lint with name `{}`", lint_name)); - - if let &[lint] = lints.as_slice() { - return lint; - } else { - panic!("Found mutliple lints with name `{}`: {:?}", lint_name, lints); - } - } -} - /// The target of the `by_name` map, which accounts for renaming/deprecation. #[derive(Debug)] enum TargetLint { diff --git a/compiler/rustc_session/src/session.rs b/compiler/rustc_session/src/session.rs index 27215556045be..87f9190114109 100644 --- a/compiler/rustc_session/src/session.rs +++ b/compiler/rustc_session/src/session.rs @@ -2,10 +2,9 @@ use crate::cgu_reuse_tracker::CguReuseTracker; use crate::code_stats::CodeStats; pub use crate::code_stats::{DataTypeKind, FieldInfo, SizeKind, VariantInfo}; use crate::config::{self, CrateType, OutputType, SwitchWithOptPath}; -use crate::filesearch; -use crate::lint::{self, LintId}; use crate::parse::ParseSess; use crate::search_paths::{PathKind, SearchPath}; +use crate::{filesearch, lint}; pub use rustc_ast::attr::MarkedAttrs; pub use rustc_ast::Attribute; @@ -41,10 +40,6 @@ use std::str::FromStr; use std::sync::Arc; use std::time::Duration; -pub trait SessionLintStore: sync::Send + sync::Sync { - fn name_to_lint(&self, lint_name: &str) -> LintId; -} - pub struct OptimizationFuel { /// If `-zfuel=crate=n` is specified, initially set to `n`, otherwise `0`. remaining: u64, @@ -153,8 +148,6 @@ pub struct Session { features: OnceCell, - lint_store: OnceCell>, - incr_comp_session: OneThread>, /// Used for incremental compilation tests. Will only be populated if /// `-Zquery-dep-graph` is specified. @@ -591,13 +584,6 @@ impl Session { } } - pub fn init_lint_store(&self, lint_store: Lrc) { - self.lint_store - .set(lint_store) - .map_err(|_| ()) - .expect("`lint_store` was initialized twice"); - } - /// Calculates the flavor of LTO to use for this compilation. pub fn lto(&self) -> config::Lto { // If our target has codegen requirements ignore the command line @@ -1315,7 +1301,6 @@ pub fn build_session( crate_types: OnceCell::new(), stable_crate_id: OnceCell::new(), features: OnceCell::new(), - lint_store: OnceCell::new(), incr_comp_session: OneThread::new(RefCell::new(IncrCompSession::NotInitialized)), cgu_reuse_tracker, prof, From 9809f5d21990d9e24b3e9876ea7da756fd4e9def Mon Sep 17 00:00:00 2001 From: antoyo Date: Tue, 28 Sep 2021 09:18:27 -0400 Subject: [PATCH 26/27] Update to nightly-2021-09-28 (#97) --- rust-toolchain | 2 +- src/builder.rs | 17 ++++++++++++++++- src/callee.rs | 7 +++---- src/context.rs | 41 +++++++++++++++++++++++++++++++++++++++-- src/mono_item.rs | 5 ++--- src/type_of.rs | 4 ++-- 6 files changed, 63 insertions(+), 13 deletions(-) diff --git a/rust-toolchain b/rust-toolchain index a03d26c0467e1..d311a33f807b7 100644 --- a/rust-toolchain +++ b/rust-toolchain @@ -1 +1 @@ -nightly-2021-09-17 +nightly-2021-09-28 diff --git a/src/builder.rs b/src/builder.rs index 10ee63e2a550c..ac908418ee4bf 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -31,11 +31,12 @@ use rustc_codegen_ssa::traits::{ StaticBuilderMethods, }; use rustc_middle::ty::{ParamEnv, Ty, TyCtxt}; -use rustc_middle::ty::layout::{HasParamEnv, HasTyCtxt, LayoutError, LayoutOfHelpers, TyAndLayout}; +use rustc_middle::ty::layout::{FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasParamEnv, HasTyCtxt, LayoutError, LayoutOfHelpers, TyAndLayout}; use rustc_span::Span; use rustc_span::def_id::DefId; use rustc_target::abi::{ self, + call::FnAbi, Align, HasDataLayout, Size, @@ -347,6 +348,20 @@ impl<'tcx> LayoutOfHelpers<'tcx> for Builder<'_, '_, 'tcx> { } } +impl<'tcx> FnAbiOfHelpers<'tcx> for Builder<'_, '_, 'tcx> { + type FnAbiOfResult = &'tcx FnAbi<'tcx, Ty<'tcx>>; + + #[inline] + fn handle_fn_abi_err( + &self, + err: FnAbiError<'tcx>, + span: Span, + fn_abi_request: FnAbiRequest<'tcx>, + ) -> ! { + self.cx.handle_fn_abi_err(err, span, fn_abi_request) + } +} + impl<'gcc, 'tcx> Deref for Builder<'_, 'gcc, 'tcx> { type Target = CodegenCx<'gcc, 'tcx>; diff --git a/src/callee.rs b/src/callee.rs index aa86332c475cf..76419b103d049 100644 --- a/src/callee.rs +++ b/src/callee.rs @@ -1,8 +1,7 @@ use gccjit::{FunctionType, RValue}; use rustc_codegen_ssa::traits::BaseTypeMethods; -use rustc_middle::ty::{Instance, TypeFoldable}; -use rustc_middle::ty::layout::{FnAbiExt, HasTyCtxt}; -use rustc_target::abi::call::FnAbi; +use rustc_middle::ty::{self, Instance, TypeFoldable}; +use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt}; use crate::abi::FnAbiGccExt; use crate::context::CodegenCx; @@ -26,7 +25,7 @@ pub fn get_fn<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, instance: Instance<'tcx>) let sym = tcx.symbol_name(instance).name; - let fn_abi = FnAbi::of_instance(cx, instance, &[]); + let fn_abi = cx.fn_abi_of_instance(instance, ty::List::empty()); let func = if let Some(func) = cx.get_declared_value(&sym) { diff --git a/src/context.rs b/src/context.rs index 29b6e33130735..7677ade7314e5 100644 --- a/src/context.rs +++ b/src/context.rs @@ -21,10 +21,10 @@ use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use rustc_middle::span_bug; use rustc_middle::mir::mono::CodegenUnit; use rustc_middle::ty::{self, Instance, ParamEnv, PolyExistentialTraitRef, Ty, TyCtxt}; -use rustc_middle::ty::layout::{HasParamEnv, HasTyCtxt, LayoutError, TyAndLayout, LayoutOfHelpers}; +use rustc_middle::ty::layout::{FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasParamEnv, HasTyCtxt, LayoutError, TyAndLayout, LayoutOfHelpers}; use rustc_session::Session; use rustc_span::{Span, Symbol}; -use rustc_target::abi::{HasDataLayout, PointeeInfo, Size, TargetDataLayout, VariantIdx}; +use rustc_target::abi::{call::FnAbi, HasDataLayout, PointeeInfo, Size, TargetDataLayout, VariantIdx}; use rustc_target::spec::{HasTargetSpec, Target, TlsModel}; use crate::callee::get_fn; @@ -401,6 +401,43 @@ impl<'gcc, 'tcx> LayoutOfHelpers<'tcx> for CodegenCx<'gcc, 'tcx> { } } +impl<'gcc, 'tcx> FnAbiOfHelpers<'tcx> for CodegenCx<'gcc, 'tcx> { + type FnAbiOfResult = &'tcx FnAbi<'tcx, Ty<'tcx>>; + + #[inline] + fn handle_fn_abi_err( + &self, + err: FnAbiError<'tcx>, + span: Span, + fn_abi_request: FnAbiRequest<'tcx>, + ) -> ! { + if let FnAbiError::Layout(LayoutError::SizeOverflow(_)) = err { + self.sess().span_fatal(span, &err.to_string()) + } else { + match fn_abi_request { + FnAbiRequest::OfFnPtr { sig, extra_args } => { + span_bug!( + span, + "`fn_abi_of_fn_ptr({}, {:?})` failed: {}", + sig, + extra_args, + err + ); + } + FnAbiRequest::OfInstance { instance, extra_args } => { + span_bug!( + span, + "`fn_abi_of_instance({}, {:?})` failed: {}", + instance, + extra_args, + err + ); + } + } + } + } +} + impl<'tcx, 'gcc> HasParamEnv<'tcx> for CodegenCx<'gcc, 'tcx> { fn param_env(&self) -> ParamEnv<'tcx> { ParamEnv::reveal_all() diff --git a/src/mono_item.rs b/src/mono_item.rs index a34c68a4e22d3..e21d40b6c37e3 100644 --- a/src/mono_item.rs +++ b/src/mono_item.rs @@ -2,9 +2,8 @@ use rustc_codegen_ssa::traits::PreDefineMethods; use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags; use rustc_middle::mir::mono::{Linkage, Visibility}; use rustc_middle::ty::{self, Instance, TypeFoldable}; -use rustc_middle::ty::layout::{FnAbiExt, LayoutOf}; +use rustc_middle::ty::layout::{FnAbiOf, LayoutOf}; use rustc_span::def_id::DefId; -use rustc_target::abi::call::FnAbi; use crate::base; use crate::context::CodegenCx; @@ -27,7 +26,7 @@ impl<'gcc, 'tcx> PreDefineMethods<'tcx> for CodegenCx<'gcc, 'tcx> { fn predefine_fn(&self, instance: Instance<'tcx>, linkage: Linkage, _visibility: Visibility, symbol_name: &str) { assert!(!instance.substs.needs_infer()); - let fn_abi = FnAbi::of_instance(self, instance, &[]); + let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty()); self.linkage.set(base::linkage_to_gcc(linkage)); let _decl = self.declare_fn(symbol_name, &fn_abi); //let attrs = self.tcx.codegen_fn_attrs(instance.def_id()); diff --git a/src/type_of.rs b/src/type_of.rs index 2ff2ee7b85226..9c39c8f91a1ff 100644 --- a/src/type_of.rs +++ b/src/type_of.rs @@ -4,7 +4,7 @@ use gccjit::{Struct, Type}; use crate::rustc_codegen_ssa::traits::{BaseTypeMethods, DerivedTypeMethods, LayoutTypeMethods}; use rustc_middle::bug; use rustc_middle::ty::{self, Ty, TypeFoldable}; -use rustc_middle::ty::layout::{FnAbiExt, LayoutOf, TyAndLayout}; +use rustc_middle::ty::layout::{FnAbiOf, LayoutOf, TyAndLayout}; use rustc_middle::ty::print::with_no_trimmed_paths; use rustc_target::abi::{self, Abi, F32, F64, FieldsShape, Int, Integer, Pointer, PointeeInfo, Size, TyAbiInterface, Variants}; use rustc_target::abi::call::{CastTarget, FnAbi, Reg}; @@ -160,7 +160,7 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { ty::Adt(def, _) if def.is_box() => { cx.type_ptr_to(cx.layout_of(self.ty.boxed_ty()).gcc_type(cx, true)) } - ty::FnPtr(sig) => cx.fn_ptr_backend_type(&FnAbi::of_fn_ptr(cx, sig, &[])), + ty::FnPtr(sig) => cx.fn_ptr_backend_type(&cx.fn_abi_of_fn_ptr(sig, ty::List::empty())), _ => self.scalar_gcc_type_at(cx, scalar, Size::ZERO), }; cx.scalar_types.borrow_mut().insert(self.ty, ty); From cd4810de42c57b64b74dae09c530a4c3a41f87b9 Mon Sep 17 00:00:00 2001 From: antoyo Date: Tue, 28 Sep 2021 09:32:54 -0400 Subject: [PATCH 27/27] Fix warnings (#98) --- src/declare.rs | 4 ++-- src/intrinsic/mod.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/declare.rs b/src/declare.rs index b43f68f878d7a..b79a50d1eee2e 100644 --- a/src/declare.rs +++ b/src/declare.rs @@ -40,12 +40,12 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { global } - pub fn declare_func(&self, name: &str, return_type: Type<'gcc>, params: &[Type<'gcc>], variadic: bool) -> RValue<'gcc> { + /*pub fn declare_func(&self, name: &str, return_type: Type<'gcc>, params: &[Type<'gcc>], variadic: bool) -> RValue<'gcc> { self.linkage.set(FunctionType::Exported); let func = declare_raw_fn(self, name, () /*llvm::CCallConv*/, return_type, params, variadic); // FIXME(antoyo): this is a wrong cast. That requires changing the compiler API. unsafe { std::mem::transmute(func) } - } + }*/ pub fn declare_global(&self, name: &str, ty: Type<'gcc>, is_tls: bool, link_section: Option) -> LValue<'gcc> { let global = self.context.new_global(None, GlobalKind::Exported, ty, name); diff --git a/src/intrinsic/mod.rs b/src/intrinsic/mod.rs index 45978e1409023..375d422cb25c4 100644 --- a/src/intrinsic/mod.rs +++ b/src/intrinsic/mod.rs @@ -272,7 +272,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { use rustc_target::abi::Abi::*; let tp_ty = substs.type_at(0); let layout = self.layout_of(tp_ty).layout; - let use_integer_compare = match layout.abi { + let _use_integer_compare = match layout.abi { Scalar(_) | ScalarPair(_, _) => true, Uninhabited | Vector { .. } => false, Aggregate { .. } => {