diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore index dbe1504e0931d8..31d395a0fef5a4 100644 --- a/deps/v8/.gitignore +++ b/deps/v8/.gitignore @@ -63,13 +63,10 @@ /test/wasm-spec-tests/tests.tar.gz /third_party/* !/third_party/antlr4 -!/third_party/cpu_features -/third_party/cpu_features/src -!/third_party/inspector_protocol -!/third_party/jsoncpp -/third_party/jsoncpp/source !/third_party/colorama /third_party/colorama/src +!/third_party/cpu_features +/third_party/cpu_features/src !/third_party/glibc !/third_party/googletest /third_party/googletest/src/* @@ -80,6 +77,11 @@ !/third_party/googletest/src/googletest/include/gtest /third_party/googletest/src/googletest/include/gtest/* !/third_party/googletest/src/googletest/include/gtest/gtest_prod.h +!/third_party/inspector_protocol +!/third_party/jsoncpp +/third_party/jsoncpp/source +!/third_party/re2 +/third_party/re2/src !/third_party/test262-harness !/third_party/v8 !/third_party/wasm-api diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS index 2136b85df21be9..9bd9ff447e5d03 100644 --- a/deps/v8/AUTHORS +++ b/deps/v8/AUTHORS @@ -311,3 +311,4 @@ Zhongping Wang Yang Xiang Kotaro Ohsugi Jing Peiyang +magic-akari diff --git a/deps/v8/BUILD.bazel b/deps/v8/BUILD.bazel index 03ff0d8b3b0aca..2351c1e3300e0b 100644 --- a/deps/v8/BUILD.bazel +++ b/deps/v8/BUILD.bazel @@ -1689,6 +1689,8 @@ filegroup( "src/heap/memory-balancer.h", "src/heap/memory-chunk.cc", "src/heap/memory-chunk.h", + "src/heap/memory-chunk-header.cc", + "src/heap/memory-chunk-header.h", "src/heap/memory-chunk-inl.h", "src/heap/memory-chunk-layout.cc", "src/heap/memory-chunk-layout.h", @@ -2792,6 +2794,7 @@ filegroup( "src/wasm/code-space-access.cc", "src/wasm/code-space-access.h", "src/wasm/compilation-environment.h", + "src/wasm/compilation-environment-inl.h", "src/wasm/constant-expression.cc", "src/wasm/constant-expression.h", "src/wasm/constant-expression-interface.cc", @@ -2883,6 +2886,7 @@ filegroup( "src/wasm/wasm-value.h", "src/wasm/well-known-imports.cc", "src/wasm/well-known-imports.h", + "src/wasm/wrappers.cc", ], "//conditions:default": [], }), @@ -3011,6 +3015,8 @@ filegroup( "src/compiler/compiler-source-position-table.h", "src/compiler/constant-folding-reducer.cc", "src/compiler/constant-folding-reducer.h", + "src/compiler/const-tracking-let-helpers.cc", + "src/compiler/const-tracking-let-helpers.h", "src/compiler/control-equivalence.cc", "src/compiler/control-equivalence.h", "src/compiler/control-flow-optimizer.cc", @@ -3185,7 +3191,7 @@ filegroup( "src/compiler/turboshaft/builtin-call-descriptors.h", "src/compiler/turboshaft/csa-optimize-phase.cc", "src/compiler/turboshaft/csa-optimize-phase.h", - "src/compiler/turboshaft/dataview-reducer.h", + "src/compiler/turboshaft/dataview-lowering-reducer.h", "src/compiler/turboshaft/code-elimination-and-simplification-phase.cc", "src/compiler/turboshaft/code-elimination-and-simplification-phase.h", "src/compiler/turboshaft/dead-code-elimination-reducer.h", @@ -3199,7 +3205,7 @@ filegroup( "src/compiler/turboshaft/define-assembler-macros.inc", "src/compiler/turboshaft/deopt-data.h", "src/compiler/turboshaft/explicit-truncation-reducer.h", - "src/compiler/turboshaft/fast-api-call-reducer.h", + "src/compiler/turboshaft/fast-api-call-lowering-reducer.h", "src/compiler/turboshaft/fast-hash.h", "src/compiler/turboshaft/graph.cc", "src/compiler/turboshaft/graph.h", @@ -3249,7 +3255,6 @@ filegroup( "src/compiler/turboshaft/recreate-schedule.h", "src/compiler/turboshaft/recreate-schedule-phase.cc", "src/compiler/turboshaft/recreate-schedule-phase.h", - "src/compiler/turboshaft/reduce-args-helper.h", "src/compiler/turboshaft/reducer-traits.h", "src/compiler/turboshaft/representations.cc", "src/compiler/turboshaft/representations.h", @@ -3265,7 +3270,7 @@ filegroup( "src/compiler/turboshaft/simplify-tf-loops.h", "src/compiler/turboshaft/snapshot-table.h", "src/compiler/turboshaft/snapshot-table-opindex.h", - "src/compiler/turboshaft/stack-check-reducer.h", + "src/compiler/turboshaft/stack-check-lowering-reducer.h", "src/compiler/turboshaft/store-store-elimination-phase.cc", "src/compiler/turboshaft/store-store-elimination-phase.h", "src/compiler/turboshaft/store-store-elimination-reducer.h", @@ -3374,8 +3379,8 @@ filegroup( "src/compiler/turboshaft/wasm-assembler-helpers.h", "src/compiler/turboshaft/wasm-gc-optimize-phase.cc", "src/compiler/turboshaft/wasm-gc-optimize-phase.h", - "src/compiler/turboshaft/wasm-gc-type-reducer.cc", - "src/compiler/turboshaft/wasm-gc-type-reducer.h", + "src/compiler/turboshaft/wasm-gc-typed-optimization-reducer.cc", + "src/compiler/turboshaft/wasm-gc-typed-optimization-reducer.h", "src/compiler/turboshaft/wasm-load-elimination-reducer.h", "src/compiler/turboshaft/wasm-lowering-phase.cc", "src/compiler/turboshaft/wasm-lowering-phase.h", diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn index 133701b20ed9ba..a28da024c76279 100644 --- a/deps/v8/BUILD.gn +++ b/deps/v8/BUILD.gn @@ -1149,6 +1149,9 @@ config("features") { if (v8_fuzzilli) { defines += [ "V8_FUZZILLI" ] } + if (v8_enable_fuzztest) { + defines += [ "V8_ENABLE_FUZZTEST" ] + } if (v8_enable_short_builtin_calls) { defines += [ "V8_SHORT_BUILTIN_CALLS" ] } @@ -1482,7 +1485,6 @@ config("toolchain") { if (is_clang) { cflags += [ - "-Wmissing-field-initializers", "-Wunreachable-code", # TODO(v8:12245): Fix shadowing instances and remove. @@ -1496,11 +1498,6 @@ config("toolchain") { # warning. cflags += [ "-Wctad-maybe-unsupported" ] } - - if (v8_current_cpu == "x64" || v8_current_cpu == "arm64" || - v8_current_cpu == "mips64el" || v8_current_cpu == "riscv64") { - cflags += [ "-Wshorten-64-to-32" ] - } } if (is_clang || !is_win) { @@ -1700,20 +1697,6 @@ config("toolchain") { # Fix build with older versions of GCC # Ported from v8 bazel: https://crrev.com/c/3368869 "-Wno-stringop-overflow", - - # Fix a number of bogus errors with gcc12 - # TODO(miladfarca): re-evaluate for future gcc upgrades - # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=111499 - "-Wno-stringop-overread", - - # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=104336 - "-Wno-restrict", - - # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=105523 - "-Wno-array-bounds", - - # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=108517 - "-Wno-nonnull", ] } @@ -1723,6 +1706,17 @@ config("toolchain") { } } +config("strict_warnings") { + cflags = [] + if (is_clang) { + if (v8_current_cpu == "x64" || v8_current_cpu == "arm64" || + v8_current_cpu == "mips64el" || v8_current_cpu == "riscv64") { + cflags += [ "-Wshorten-64-to-32" ] + } + cflags += [ "-Wmissing-field-initializers" ] + } +} + # For code that is hot during mksnapshot. In fast-mksnapshot builds, we # optimize some files even in debug builds to speed up mksnapshot times. config("always_turbofanimize") { @@ -3304,6 +3298,7 @@ v8_header_set("v8_internal_headers") { "src/compiler/common-operator.h", "src/compiler/compilation-dependencies.h", "src/compiler/compiler-source-position-table.h", + "src/compiler/const-tracking-let-helpers.h", "src/compiler/constant-folding-reducer.h", "src/compiler/control-equivalence.h", "src/compiler/control-flow-optimizer.h", @@ -3401,7 +3396,7 @@ v8_header_set("v8_internal_headers") { "src/compiler/turboshaft/code-elimination-and-simplification-phase.h", "src/compiler/turboshaft/copying-phase.h", "src/compiler/turboshaft/csa-optimize-phase.h", - "src/compiler/turboshaft/dataview-reducer.h", + "src/compiler/turboshaft/dataview-lowering-reducer.h", "src/compiler/turboshaft/dead-code-elimination-reducer.h", "src/compiler/turboshaft/debug-feature-lowering-phase.h", "src/compiler/turboshaft/debug-feature-lowering-reducer.h", @@ -3411,7 +3406,7 @@ v8_header_set("v8_internal_headers") { "src/compiler/turboshaft/deopt-data.h", "src/compiler/turboshaft/duplication-optimization-reducer.h", "src/compiler/turboshaft/explicit-truncation-reducer.h", - "src/compiler/turboshaft/fast-api-call-reducer.h", + "src/compiler/turboshaft/fast-api-call-lowering-reducer.h", "src/compiler/turboshaft/fast-hash.h", "src/compiler/turboshaft/graph-builder.h", "src/compiler/turboshaft/graph-visualizer.h", @@ -3440,7 +3435,6 @@ v8_header_set("v8_internal_headers") { "src/compiler/turboshaft/pretenuring-propagation-reducer.h", "src/compiler/turboshaft/recreate-schedule-phase.h", "src/compiler/turboshaft/recreate-schedule.h", - "src/compiler/turboshaft/reduce-args-helper.h", "src/compiler/turboshaft/reducer-traits.h", "src/compiler/turboshaft/representations.h", "src/compiler/turboshaft/required-optimization-reducer.h", @@ -3452,7 +3446,7 @@ v8_header_set("v8_internal_headers") { "src/compiler/turboshaft/simplify-tf-loops.h", "src/compiler/turboshaft/snapshot-table-opindex.h", "src/compiler/turboshaft/snapshot-table.h", - "src/compiler/turboshaft/stack-check-reducer.h", + "src/compiler/turboshaft/stack-check-lowering-reducer.h", "src/compiler/turboshaft/store-store-elimination-phase.h", "src/compiler/turboshaft/store-store-elimination-reducer.h", "src/compiler/turboshaft/structural-optimization-reducer.h", @@ -3640,6 +3634,7 @@ v8_header_set("v8_internal_headers") { "src/heap/marking.h", "src/heap/memory-allocator.h", "src/heap/memory-balancer.h", + "src/heap/memory-chunk-header.h", "src/heap/memory-chunk-inl.h", "src/heap/memory-chunk-layout.h", "src/heap/memory-chunk.h", @@ -4136,6 +4131,12 @@ v8_header_set("v8_internal_headers") { if (v8_use_perfetto) { sources -= [ "//base/trace_event/common/trace_event_common.h" ] + sources += [ + "src/tracing/code-data-source.h", + "src/tracing/code-trace-context.h", + "src/tracing/perfetto-logger.h", + "src/tracing/perfetto-utils.h", + ] } if (v8_enable_sparkplug) { @@ -4195,7 +4196,7 @@ v8_header_set("v8_internal_headers") { "src/compiler/turboshaft/int64-lowering-reducer.h", "src/compiler/turboshaft/wasm-assembler-helpers.h", "src/compiler/turboshaft/wasm-gc-optimize-phase.h", - "src/compiler/turboshaft/wasm-gc-type-reducer.h", + "src/compiler/turboshaft/wasm-gc-typed-optimization-reducer.h", "src/compiler/turboshaft/wasm-js-lowering-reducer.h", "src/compiler/turboshaft/wasm-load-elimination-reducer.h", "src/compiler/turboshaft/wasm-lowering-phase.h", @@ -4230,6 +4231,7 @@ v8_header_set("v8_internal_headers") { "src/wasm/baseline/parallel-move.h", "src/wasm/canonical-types.h", "src/wasm/code-space-access.h", + "src/wasm/compilation-environment-inl.h", "src/wasm/compilation-environment.h", "src/wasm/constant-expression-interface.h", "src/wasm/constant-expression.h", @@ -4759,6 +4761,7 @@ v8_compiler_sources = [ "src/compiler/common-operator.cc", "src/compiler/compilation-dependencies.cc", "src/compiler/compiler-source-position-table.cc", + "src/compiler/const-tracking-let-helpers.cc", "src/compiler/constant-folding-reducer.cc", "src/compiler/control-equivalence.cc", "src/compiler/control-flow-optimizer.cc", @@ -4935,7 +4938,7 @@ if (v8_enable_webassembly) { "src/compiler/int64-lowering.cc", "src/compiler/turboshaft/int64-lowering-phase.cc", "src/compiler/turboshaft/wasm-gc-optimize-phase.cc", - "src/compiler/turboshaft/wasm-gc-type-reducer.cc", + "src/compiler/turboshaft/wasm-gc-typed-optimization-reducer.cc", "src/compiler/turboshaft/wasm-lowering-phase.cc", "src/compiler/turboshaft/wasm-optimize-phase.cc", "src/compiler/turboshaft/wasm-turboshaft-compiler.cc", @@ -5330,6 +5333,7 @@ v8_source_set("v8_base_without_compiler") { "src/heap/marking.cc", "src/heap/memory-allocator.cc", "src/heap/memory-balancer.cc", + "src/heap/memory-chunk-header.cc", "src/heap/memory-chunk-layout.cc", "src/heap/memory-chunk.cc", "src/heap/memory-measurement.cc", @@ -5651,6 +5655,14 @@ v8_source_set("v8_base_without_compiler") { } } + if (v8_use_perfetto) { + sources += [ + "src/tracing/code-data-source.cc", + "src/tracing/perfetto-logger.cc", + "src/tracing/perfetto-utils.cc", + ] + } + if (v8_enable_webassembly) { sources += [ ### gcmole(all) ### @@ -5706,6 +5718,7 @@ v8_source_set("v8_base_without_compiler") { "src/wasm/wasm-serialization.cc", "src/wasm/wasm-subtyping.cc", "src/wasm/well-known-imports.cc", + "src/wasm/wrappers.cc", ] } @@ -8131,7 +8144,6 @@ if (!build_with_chromium && v8_use_perfetto) { "//third_party/perfetto/src/tracing/core", # TODO(skyostil): Support non-POSIX platforms. - "//third_party/perfetto/protos/perfetto/config:cpp", "//third_party/perfetto/protos/perfetto/trace/track_event:zero", "//third_party/perfetto/src/tracing:in_process_backend", "//third_party/perfetto/src/tracing:platform_impl", @@ -8139,6 +8151,8 @@ if (!build_with_chromium && v8_use_perfetto) { public_deps = [ "//third_party/perfetto/include/perfetto/trace_processor", + "//third_party/perfetto/protos/perfetto/config:cpp", + "//third_party/perfetto/protos/perfetto/trace/chrome:zero", "//third_party/perfetto/src/trace_processor:export_json", "//third_party/perfetto/src/tracing:client_api", ] diff --git a/deps/v8/DEPS b/deps/v8/DEPS index badda7719a4500..6c3ca4e741a6e0 100644 --- a/deps/v8/DEPS +++ b/deps/v8/DEPS @@ -27,6 +27,7 @@ vars = { 'checkout_fuchsia_boot_images': "terminal.x64", 'checkout_fuchsia_product_bundles': '"{checkout_fuchsia_boot_images}" != ""', + 'checkout_centipede_deps': False, 'checkout_instrumented_libraries': False, 'checkout_ittapi': False, @@ -41,8 +42,9 @@ vars = { # Fetch and build V8 builtins with PGO profiles 'checkout_v8_builtins_pgo_profiles': False, - 'chromium_url': 'https://chromium.googlesource.com', 'android_url': 'https://android.googlesource.com', + 'boringssl_url': 'https://boringssl.googlesource.com', + 'chromium_url': 'https://chromium.googlesource.com', 'download_gcmole': False, 'download_jsfunfuzz': False, 'download_prebuilt_bazel': False, @@ -55,7 +57,7 @@ vars = { 'checkout_fuchsia_no_hooks': False, # reclient CIPD package version - 'reclient_version': 're_client_version:0.126.0.4aaef37-gomaip', + 'reclient_version': 're_client_version:0.131.1.784ddbb-gomaip', # Fetch configuration files required for the 'use_remoteexec' gn arg 'download_remoteexec_cfg': False, @@ -71,19 +73,19 @@ vars = { 'build_with_chromium': False, # GN CIPD package version. - 'gn_version': 'git_revision:b5adfe5f574d7110b80feb9aae6fae97c366840b', + 'gn_version': 'git_revision:0a2b8eac80f164f10b2cbc126890db0d295790cd', # ninja CIPD package version # https://chrome-infra-packages.appspot.com/p/infra/3pp/tools/ninja 'ninja_version': 'version:2@1.11.1.chromium.6', # luci-go CIPD package version. - 'luci_go': 'git_revision:0d11be367258bfe14a13ff1afcf43a0bc6aedb45', + 'luci_go': 'git_revision:3df60a11d33a59614c0e8d2bccc58d8c30984901', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling Fuchsia sdk # and whatever else without interference from each other. - 'fuchsia_version': 'version:17.20240120.1.1', + 'fuchsia_version': 'version:18.20240215.1.1', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_build-tools_version @@ -123,9 +125,9 @@ deps = { 'base/trace_event/common': Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '29ac73db520575590c3aceb0a6f1f58dda8934f6', 'build': - Var('chromium_url') + '/chromium/src/build.git' + '@' + '28cd6ea727d171ec990e6174308451d4178d7f8e', + Var('chromium_url') + '/chromium/src/build.git' + '@' + 'e5cf1b3ceb3fec6aa5c57b34dede99d36cede32d', 'buildtools': - Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '17ce6d2f0416038de7989bc71d055c07d333ccb5', + Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '342659133d7d0b33f4e24b640a9ad78c0c423633', 'buildtools/linux64': { 'packages': [ { @@ -171,7 +173,7 @@ deps = { 'test/mozilla/data': Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be', 'test/test262/data': - Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'a1ba783ca340e4bf3d80b5f5e11fa54f2ee5f1ef', + Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'e4f91b6381d7694265031caad0c71d733ac132f3', 'third_party/android_platform': { 'url': Var('chromium_url') + '/chromium/src/third_party/android_platform.git' + '@' + 'eeb2d566f963bb66212fdc0d9bbe1dde550b4969', 'condition': 'checkout_android', @@ -224,8 +226,16 @@ deps = { 'condition': 'checkout_android', 'dep_type': 'cipd', }, + 'third_party/boringssl': { + 'url': Var('chromium_url') + '/chromium/src/third_party/boringssl.git' + '@' + '9ead20bdbf0ecc33219d25fd3a426876c54d126e', + 'condition': "checkout_centipede_deps", + }, + 'third_party/boringssl/src': { + 'url': Var('boringssl_url') + '/boringssl.git' + '@' + '414f69504d30d0848b69f6453ea7fb5e88004cb4', + 'condition': "checkout_centipede_deps", + }, 'third_party/catapult': { - 'url': Var('chromium_url') + '/catapult.git' + '@' + '3e413d7b62c09fda8713146714ba2146a0369d86', + 'url': Var('chromium_url') + '/catapult.git' + '@' + '3d6c15240b480da1e498a64a72ea77a61ba335e1', 'condition': 'checkout_android', }, 'third_party/clang-format/script': @@ -239,11 +249,11 @@ deps = { 'condition': 'checkout_android', }, 'third_party/depot_tools': - Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '46cb7d0aca592cd20ddc2f6cb16ee386b2abbf0d', + Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '9d7c8e76f82ddc6a3bbc307217e31dec44a0f73a', 'third_party/fp16/src': Var('chromium_url') + '/external/github.com/Maratyszcza/FP16.git' + '@' + '0a92994d729ff76a58f692d3028ca1b64b145d91', 'third_party/fuchsia-gn-sdk': { - 'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-gn-sdk.git' + '@' + '6ad82eadcb1a4404964a8d86c544fda1dab7af94', + 'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-gn-sdk.git' + '@' + 'fa3c41d7a15127a989111fcede8dae9265f8566b', 'condition': 'checkout_fuchsia', }, # Exists for rolling the Fuchsia SDK. Check out of the SDK should always @@ -259,17 +269,21 @@ deps = { 'dep_type': 'cipd', }, 'third_party/google_benchmark_chrome': { - 'url': Var('chromium_url') + '/chromium/src/third_party/google_benchmark.git' + '@' + '992199c3cb1076d307816e963ed4b5102df53c65', + 'url': Var('chromium_url') + '/chromium/src/third_party/google_benchmark.git' + '@' + 'c300add93460c31efe53fa71e61427fa1bc09e6a', }, 'third_party/google_benchmark_chrome/src': { 'url': Var('chromium_url') + '/external/github.com/google/benchmark.git' + '@' + 'b177433f3ee2513b1075140c723d73ab8901790f', }, + 'third_party/fuzztest': + Var('chromium_url') + '/chromium/src/third_party/fuzztest.git' + '@' + '9fc64e5930915bfb5a593b7e12487d78283e8221', + 'third_party/fuzztest/src': + Var('chromium_url') + '/external/github.com/google/fuzztest.git' + '@' + '61d95200e7ece7d121cab26f0c39fbf392e6566e', 'third_party/googletest/src': Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + 'af29db7ec28d6df1c7f0f745186884091e602e07', 'third_party/icu': Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'a622de35ac311c5ad390a7af80724634e5dc61ed', 'third_party/instrumented_libraries': - Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + '0011c28c8d35fc5093bb29631d05428932cd1206', + Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + '0893d760101b3ddf9a2408b9d20f15ec2b80b2c1', 'third_party/ittapi': { # Force checkout ittapi libraries to pass v8 header includes check on # bots that has check_v8_header_includes enabled. @@ -277,19 +291,19 @@ deps = { 'condition': "checkout_ittapi or check_v8_header_includes", }, 'third_party/jinja2': - Var('chromium_url') + '/chromium/src/third_party/jinja2.git' + '@' + 'e2d024354e11cc6b041b0cff032d73f0c7e43a07', + Var('chromium_url') + '/chromium/src/third_party/jinja2.git' + '@' + 'c9c77525ea20c871a1d4658f8d312b51266d4bad', 'third_party/jsoncpp/source': Var('chromium_url') + '/external/github.com/open-source-parsers/jsoncpp.git'+ '@' + '42e892d96e47b1f6e29844cc705e148ec4856448', 'third_party/libc++/src': - Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + '28aa23ffb4c7344914a5b4ac7169f12e5a12333f', + Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + '6d83791af99ea95f04986d64f111b84ce0b3c6f5', 'third_party/libc++abi/src': - Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + 'ea028d4d2b8a901f6302f5371c68a24480766e2b', + Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + 'a7b3d968a3a923886fea64b424bd770e69dc4ea4', 'third_party/libunwind/src': - Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + 'f400fdb561d4416b59b8f8a33d8ec8b79da60495', + Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + '8bad7bd6ec30f94bce82f7cb5b58ecbd6ce02996', 'third_party/logdog/logdog': Var('chromium_url') + '/infra/luci/luci-py/client/libs/logdog' + '@' + '0b2078a90f7a638d576b3a7c407d136f2fb62399', 'third_party/markupsafe': - Var('chromium_url') + '/chromium/src/third_party/markupsafe.git' + '@' + '0bad08bb207bbfc1d6f3bbc82b9242b0c50e5794', + Var('chromium_url') + '/chromium/src/third_party/markupsafe.git' + '@' + 'e582d7f0edb9d67499b0f5abd6ae5550e91da7f2', 'third_party/ninja': { 'packages': [ { @@ -304,14 +318,16 @@ deps = { Var('android_url') + '/platform/external/perfetto.git' + '@' + '6fc824d618d2f06b5d9cd8655ba0419b6b3b366e', 'third_party/protobuf': Var('chromium_url') + '/external/github.com/google/protobuf'+ '@' + '6a59a2ad1f61d9696092f79b6d74368b4d7970a3', + 'third_party/re2/src': + Var('chromium_url') + '/external/github.com/google/re2.git' + '@' + 'd00d1e93781e6ebe415771a952689dff8f260d44', 'third_party/requests': { 'url': Var('chromium_url') + '/external/github.com/kennethreitz/requests.git' + '@' + 'c7e0fc087ceeadb8b4c84a0953a422c474093d6d', 'condition': 'checkout_android', }, 'third_party/zlib': - Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + '63c0cec0344e6ba70f22bd690187088299baaa94', + Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + '4b5807f344182fd392849b820642457212618e5f', 'tools/clang': - Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'f0b1beffd512e855db0f46571958cfc83c8b05a9', + Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'a4df104173dae7d49205ed8abefc920b7c5162d2', 'tools/luci-go': { 'packages': [ { @@ -327,7 +343,7 @@ deps = { 'dep_type': 'cipd', }, 'third_party/abseil-cpp': { - 'url': Var('chromium_url') + '/chromium/src/third_party/abseil-cpp.git' + '@' + '5ff8c1facf6b2e54811969ae7b90152bc1f44269', + 'url': Var('chromium_url') + '/chromium/src/third_party/abseil-cpp.git' + '@' + 'f1c5751a2cb4102efbffc4110ee7551b3c54cfea', 'condition': 'not build_with_chromium', } } @@ -338,6 +354,7 @@ include_rules = [ '+unicode', '+third_party/fdlibm', '+third_party/ittapi/include', + '+third_party/fuzztest', # Abseil features are allow-listed. Please use your best judgement when adding # to this set -- if in doubt, email v8-dev@. For general guidance, refer to # the Chromium guidelines (though note that some requirements in V8 may be diff --git a/deps/v8/PRESUBMIT.py b/deps/v8/PRESUBMIT.py index 244f4ab168ad8e..42cebdd65328f3 100644 --- a/deps/v8/PRESUBMIT.py +++ b/deps/v8/PRESUBMIT.py @@ -443,10 +443,7 @@ def _CheckCommitMessageBugEntry(input_api, output_api): continue if ':' not in bug and not bug.startswith('b/'): try: - if int(bug) > 10000000: - results.append( - 'Buganizer entry requires issue tracker prefix b/{}'.format(bug)) - else: + if int(bug) < 10000000: if int(bug) > 200000: prefix_guess = 'chromium' else: diff --git a/deps/v8/build_overrides/build.gni b/deps/v8/build_overrides/build.gni index 64abd3e92a5dd8..9830dfc51d0213 100644 --- a/deps/v8/build_overrides/build.gni +++ b/deps/v8/build_overrides/build.gni @@ -6,6 +6,13 @@ # Chromium specific targets in a client project's GN file etc. build_with_chromium = false +# Variable that can be used to support multiple build scenarios, like when +# V8 is embedded within a target. +build_with_v8_embedder = false + +# Not all of V8's dependencies are available in V8's Node.js build. +build_with_node = false + # Used by perfetto to distinguish from its own standalone build and the # chromium build. perfetto_build_with_embedder = true diff --git a/deps/v8/gni/v8.gni b/deps/v8/gni/v8.gni index 72381c75cae640..185de67a52237b 100644 --- a/deps/v8/gni/v8.gni +++ b/deps/v8/gni/v8.gni @@ -8,6 +8,7 @@ import("//build/config/gclient_args.gni") import("//build/config/ios/config.gni") import("//build/config/sanitizers/sanitizers.gni") import("//build/config/v8_target_cpu.gni") +import("//build_overrides/build.gni") import("release_branch_toggle.gni") import("split_static_library.gni") @@ -96,6 +97,11 @@ declare_args() { # Add fuzzilli fuzzer support. v8_fuzzilli = false + # Enable FuzzTest + v8_enable_fuzztest = !build_with_v8_embedder && + !(defined(build_with_node) && build_with_node) && + !(is_win && is_component_build) && is_clang + # Scan the call stack conservatively during garbage collection. v8_enable_conservative_stack_scanning = false @@ -228,6 +234,7 @@ v8_remove_configs = [] v8_add_configs = [ v8_path_prefix + ":features", v8_path_prefix + ":toolchain", + v8_path_prefix + ":strict_warnings", ] if (is_debug && !v8_optimized_debug) { diff --git a/deps/v8/include/cppgc/internal/api-constants.h b/deps/v8/include/cppgc/internal/api-constants.h index 4e2a637e420560..fed7005b46089b 100644 --- a/deps/v8/include/cppgc/internal/api-constants.h +++ b/deps/v8/include/cppgc/internal/api-constants.h @@ -32,7 +32,7 @@ static constexpr uint16_t kFullyConstructedBitMask = uint16_t{1}; static constexpr size_t kPageSize = size_t{1} << 17; -#if defined(V8_TARGET_ARCH_ARM64) && defined(V8_OS_DARWIN) +#if defined(V8_HOST_ARCH_ARM64) && defined(V8_OS_DARWIN) constexpr size_t kGuardPageSize = 0; #else constexpr size_t kGuardPageSize = 4096; diff --git a/deps/v8/include/v8-array-buffer.h b/deps/v8/include/v8-array-buffer.h index 804fc42c4b56dd..ea6f5b5571a476 100644 --- a/deps/v8/include/v8-array-buffer.h +++ b/deps/v8/include/v8-array-buffer.h @@ -318,6 +318,12 @@ class V8_EXPORT ArrayBuffer : public Object { */ std::shared_ptr GetBackingStore(); + /** + * More efficient shortcut for + * GetBackingStore()->IsResizableByUserJavaScript(). + */ + bool IsResizableByUserJavaScript() const; + /** * More efficient shortcut for GetBackingStore()->Data(). The returned pointer * is valid as long as the ArrayBuffer is alive. diff --git a/deps/v8/include/v8-callbacks.h b/deps/v8/include/v8-callbacks.h index 2a25b9ee04e003..4f5e716f8147a2 100644 --- a/deps/v8/include/v8-callbacks.h +++ b/deps/v8/include/v8-callbacks.h @@ -327,10 +327,6 @@ using WasmAsyncResolvePromiseCallback = void (*)( using WasmLoadSourceMapCallback = Local (*)(Isolate* isolate, const char* name); -// --- Callback for checking if WebAssembly GC is enabled --- -// If the callback returns true, it will also enable Wasm stringrefs. -using WasmGCEnabledCallback = bool (*)(Local context); - // --- Callback for checking if WebAssembly imported strings are enabled --- using WasmImportedStringsEnabledCallback = bool (*)(Local context); @@ -342,6 +338,9 @@ using SharedArrayBufferConstructorEnabledCallback = using JavaScriptCompileHintsMagicEnabledCallback = bool (*)(Local context); +// --- Callback for checking if WebAssembly JSPI is enabled --- +using WasmJSPIEnabledCallback = bool (*)(Local context); + /** * HostImportModuleDynamicallyCallback is called when we * require the embedder to load a module. This is used as part of the dynamic @@ -352,11 +351,11 @@ using JavaScriptCompileHintsMagicEnabledCallback = * * The specifier is the name of the module that should be imported. * - * The import_assertions are import assertions for this request in the form: + * The import_attributes are import attributes for this request in the form: * [key1, value1, key2, value2, ...] where the keys and values are of type * v8::String. Note, unlike the FixedArray passed to ResolveModuleCallback and * returned from ModuleRequest::GetImportAssertions(), this array does not - * contain the source Locations of the assertions. + * contain the source Locations of the attributes. * * The embedder must compile, instantiate, evaluate the Module, and * obtain its namespace object. @@ -368,15 +367,10 @@ using JavaScriptCompileHintsMagicEnabledCallback = * fails (e.g. due to stack overflow), the embedder must propagate * that exception by returning an empty MaybeLocal. */ -using HostImportModuleDynamicallyWithImportAssertionsCallback = - MaybeLocal (*)(Local context, - Local referrer, - Local specifier, - Local import_assertions); using HostImportModuleDynamicallyCallback = MaybeLocal (*)( Local context, Local host_defined_options, Local resource_name, Local specifier, - Local import_assertions); + Local import_attributes); /** * Callback for requesting a compile hint for a function from the embedder. The diff --git a/deps/v8/include/v8-context.h b/deps/v8/include/v8-context.h index 5552c7a809cbf1..c81dc80c526ca2 100644 --- a/deps/v8/include/v8-context.h +++ b/deps/v8/include/v8-context.h @@ -459,12 +459,12 @@ void* Context::GetAlignedPointerFromEmbedderData(int index) { template MaybeLocal Context::GetDataFromSnapshotOnce(size_t index) { - auto slot = GetDataFromSnapshotOnce(index); - if (slot) { + if (auto slot = GetDataFromSnapshotOnce(index); slot) { internal::PerformCastCheck( internal::ValueHelper::SlotAsValue(slot)); + return Local::FromSlot(slot); } - return Local::FromSlot(slot); + return {}; } Context* Context::Cast(v8::Data* data) { diff --git a/deps/v8/include/v8-forward.h b/deps/v8/include/v8-forward.h index db3a2017b7e5ee..435fe856d97f56 100644 --- a/deps/v8/include/v8-forward.h +++ b/deps/v8/include/v8-forward.h @@ -27,6 +27,7 @@ class Context; class DataView; class Data; class Date; +class DictionaryTemplate; class Extension; class External; class FixedArray; diff --git a/deps/v8/include/v8-function-callback.h b/deps/v8/include/v8-function-callback.h index a21d59d1299a28..22b5328d101f89 100644 --- a/deps/v8/include/v8-function-callback.h +++ b/deps/v8/include/v8-function-callback.h @@ -475,7 +475,8 @@ Local ReturnValue::Get() const { #endif // V8_STATIC_ROOTS_BOOL return Undefined(GetIsolate()); } - return Local::New(GetIsolate(), reinterpret_cast(value_)); + return Local::New(GetIsolate(), + internal::ValueHelper::SlotAsValue(value_)); } template diff --git a/deps/v8/include/v8-internal.h b/deps/v8/include/v8-internal.h index a10735dac9006a..48001c68b0b433 100644 --- a/deps/v8/include/v8-internal.h +++ b/deps/v8/include/v8-internal.h @@ -616,8 +616,6 @@ constexpr bool kAllCodeObjectsLiveInTrustedSpace = kRuntimeGeneratedCodeObjectsLiveInTrustedSpace && kBuiltinCodeObjectsLiveInTrustedSpace; -constexpr bool kInterpreterDataObjectsLiveInTrustedSpace = false; - // {obj} must be the raw tagged pointer representation of a HeapObject // that's guaranteed to never be in ReadOnlySpace. V8_EXPORT internal::Isolate* IsolateFromNeverReadOnlySpaceObject(Address obj); @@ -781,8 +779,6 @@ class Internals { static const int kNodeStateMask = 0x3; static const int kNodeStateIsWeakValue = 2; - static const int kTracedNodeClassIdOffset = kApiSystemPointerSize; - static const int kFirstNonstringType = 0x80; static const int kOddballType = 0x83; static const int kForeignType = 0xcc; diff --git a/deps/v8/include/v8-isolate.h b/deps/v8/include/v8-isolate.h index add965abeb7350..a3ceec01334ea0 100644 --- a/deps/v8/include/v8-isolate.h +++ b/deps/v8/include/v8-isolate.h @@ -1579,19 +1579,14 @@ class V8_EXPORT Isolate { void SetWasmLoadSourceMapCallback(WasmLoadSourceMapCallback callback); - /** - * Register callback to control whether Wasm GC is enabled. - * The callback overwrites the value of the flag. - * If the callback returns true, it will also enable Wasm stringrefs. - */ - void SetWasmGCEnabledCallback(WasmGCEnabledCallback callback); - void SetWasmImportedStringsEnabledCallback( WasmImportedStringsEnabledCallback callback); void SetSharedArrayBufferConstructorEnabledCallback( SharedArrayBufferConstructorEnabledCallback callback); + void SetWasmJSPIEnabledCallback(WasmJSPIEnabledCallback callback); + /** * Register callback to control whether compile hints magic comments are * enabled. @@ -1751,12 +1746,12 @@ uint32_t Isolate::GetNumberOfDataSlots() { template MaybeLocal Isolate::GetDataFromSnapshotOnce(size_t index) { - auto slot = GetDataFromSnapshotOnce(index); - if (slot) { + if (auto slot = GetDataFromSnapshotOnce(index); slot) { internal::PerformCastCheck( internal::ValueHelper::SlotAsValue(slot)); + return Local::FromSlot(slot); } - return Local::FromSlot(slot); + return {}; } } // namespace v8 diff --git a/deps/v8/include/v8-local-handle.h b/deps/v8/include/v8-local-handle.h index 37efafdd647035..46f7308431af0d 100644 --- a/deps/v8/include/v8-local-handle.h +++ b/deps/v8/include/v8-local-handle.h @@ -62,6 +62,7 @@ class ReturnValue; class String; template class Traced; +class TypecheckWitness; class Utils; namespace debug { @@ -405,6 +406,8 @@ class V8_TRIVIAL_ABI Local : public LocalBase, } #ifdef V8_ENABLE_DIRECT_LOCAL + friend class TypecheckWitness; + V8_INLINE static Local FromAddress(internal::Address ptr) { return Local(LocalBase(ptr)); } diff --git a/deps/v8/include/v8-persistent-handle.h b/deps/v8/include/v8-persistent-handle.h index 9db5af5dddd557..49518fe3631945 100644 --- a/deps/v8/include/v8-persistent-handle.h +++ b/deps/v8/include/v8-persistent-handle.h @@ -241,7 +241,7 @@ class NonCopyablePersistentTraits { * This will clone the contents of storage cell, but not any of the flags, etc. */ template -struct CopyablePersistentTraits { +struct V8_DEPRECATED("Use v8::Global instead") CopyablePersistentTraits { using CopyablePersistent = Persistent>; static const bool kResetInDestructor = true; template diff --git a/deps/v8/include/v8-script.h b/deps/v8/include/v8-script.h index a2d0bcaad343a6..db22de9b18797b 100644 --- a/deps/v8/include/v8-script.h +++ b/deps/v8/include/v8-script.h @@ -136,19 +136,24 @@ class V8_EXPORT ModuleRequest : public Data { int GetSourceOffset() const; /** - * Contains the import assertions for this request in the form: + * Contains the import attributes for this request in the form: * [key1, value1, source_offset1, key2, value2, source_offset2, ...]. * The keys and values are of type v8::String, and the source offsets are of * type Int32. Use Module::SourceOffsetToLocation to convert the source * offsets to Locations with line/column numbers. * - * All assertions present in the module request will be supplied in this + * All attributes present in the module request will be supplied in this * list, regardless of whether they are supported by the host. Per * https://tc39.es/proposal-import-attributes/#sec-hostgetsupportedimportattributes, - * hosts are expected to throw for assertions that they do not support (as + * hosts are expected to throw for attributes that they do not support (as * opposed to, for example, ignoring them). */ - Local GetImportAssertions() const; + Local GetImportAttributes() const; + + V8_DEPRECATE_SOON("Use GetImportAttributes instead") + Local GetImportAssertions() const { + return GetImportAttributes(); + } V8_INLINE static ModuleRequest* Cast(Data* data); diff --git a/deps/v8/include/v8-snapshot.h b/deps/v8/include/v8-snapshot.h index 70d2ca5ecf5e88..a1dc0c3881c22d 100644 --- a/deps/v8/include/v8-snapshot.h +++ b/deps/v8/include/v8-snapshot.h @@ -14,6 +14,10 @@ namespace v8 { class Object; +namespace internal { +class SnapshotCreatorImpl; +} // namespace internal + class V8_EXPORT StartupData { public: /** @@ -206,7 +210,8 @@ class V8_EXPORT SnapshotCreator { size_t AddData(Local context, internal::Address object); size_t AddData(internal::Address object); - void* data_; + internal::SnapshotCreatorImpl* impl_; + friend class internal::SnapshotCreatorImpl; }; template diff --git a/deps/v8/include/v8-template.h b/deps/v8/include/v8-template.h index c18baf95c2e472..674d4201d5d782 100644 --- a/deps/v8/include/v8-template.h +++ b/deps/v8/include/v8-template.h @@ -5,6 +5,9 @@ #ifndef INCLUDE_V8_TEMPLATE_H_ #define INCLUDE_V8_TEMPLATE_H_ +#include +#include + #include "v8-data.h" // NOLINT(build/include_directory) #include "v8-function-callback.h" // NOLINT(build/include_directory) #include "v8-local-handle.h" // NOLINT(build/include_directory) @@ -778,7 +781,11 @@ class V8_EXPORT ObjectTemplate : public Template { Isolate* isolate, Local constructor = Local()); - /** Creates a new instance of this template.*/ + /** + * Creates a new instance of this template. + * + * \param context The context in which the instance is created. + */ V8_WARN_UNUSED_RESULT MaybeLocal NewInstance(Local context); /** @@ -950,6 +957,41 @@ class V8_EXPORT ObjectTemplate : public Template { friend class FunctionTemplate; }; +/** + * A template to create dictionary objects at runtime. + */ +class V8_EXPORT DictionaryTemplate final { + public: + /** Creates a new template. Also declares data properties that can be passed + * on instantiation of the template. Properties can only be declared on + * construction and are then immutable. The values are passed on creating the + * object via `NewInstance()`. + * + * \param names the keys that can be passed on instantiation. + */ + static Local New( + Isolate* isolate, MemorySpan names); + + /** + * Creates a new instance of this template. + * + * \param context The context used to create the dictionary object. + * \param property_values Values of properties that were declared using + * `DeclareDataProperties()`. The span only passes values and expectes the + * order to match the declaration. Non-existent properties are signaled via + * empty `MaybeLocal`s. + */ + V8_WARN_UNUSED_RESULT Local NewInstance( + Local context, MemorySpan> property_values); + + V8_INLINE static DictionaryTemplate* Cast(Data* data); + + private: + static void CheckCast(Data* that); + + DictionaryTemplate(); +}; + /** * A Signature specifies which receiver is valid for a function. * @@ -995,6 +1037,13 @@ ObjectTemplate* ObjectTemplate::Cast(Data* data) { return reinterpret_cast(data); } +DictionaryTemplate* DictionaryTemplate::Cast(Data* data) { +#ifdef V8_ENABLE_CHECKS + CheckCast(data); +#endif + return reinterpret_cast(data); +} + Signature* Signature::Cast(Data* data) { #ifdef V8_ENABLE_CHECKS CheckCast(data); diff --git a/deps/v8/include/v8-traced-handle.h b/deps/v8/include/v8-traced-handle.h index 7abe0b9446ef42..c9fd357b871bf7 100644 --- a/deps/v8/include/v8-traced-handle.h +++ b/deps/v8/include/v8-traced-handle.h @@ -77,19 +77,6 @@ class TracedReferenceBase : public api_internal::IndirectHandleBase { return this->GetSlotThreadSafe() == nullptr; } - /** - * Assigns a wrapper class ID to the handle. - */ - V8_DEPRECATED("Embedders need to maintain state for references themselves.") - V8_INLINE void SetWrapperClassId(uint16_t class_id); - - /** - * Returns the class ID previously assigned to this handle or 0 if no class ID - * was previously assigned. - */ - V8_DEPRECATED("Embedders need to maintain state for references themselves.") - V8_INLINE uint16_t WrapperClassId() const; - protected: V8_INLINE TracedReferenceBase() = default; @@ -440,22 +427,6 @@ TracedReference& TracedReference::operator=(const TracedReference& rhs) { return *this; } -void TracedReferenceBase::SetWrapperClassId(uint16_t class_id) { - using I = internal::Internals; - if (IsEmpty()) return; - uint8_t* addr = - reinterpret_cast(slot()) + I::kTracedNodeClassIdOffset; - *reinterpret_cast(addr) = class_id; -} - -uint16_t TracedReferenceBase::WrapperClassId() const { - using I = internal::Internals; - if (IsEmpty()) return 0; - uint8_t* addr = - reinterpret_cast(slot()) + I::kTracedNodeClassIdOffset; - return *reinterpret_cast(addr); -} - } // namespace v8 #endif // INCLUDE_V8_TRACED_HANDLE_H_ diff --git a/deps/v8/include/v8-util.h b/deps/v8/include/v8-util.h index c845c9924cd403..db6d1a2fe6befc 100644 --- a/deps/v8/include/v8-util.h +++ b/deps/v8/include/v8-util.h @@ -240,8 +240,9 @@ class PersistentValueMapBase { : value_(other.value_) { } Local NewLocal(Isolate* isolate) const { - return Local::New( - isolate, internal::ValueHelper::SlotAsValue(FromVal(value_))); + return Local::New(isolate, + internal::ValueHelper::SlotAsValue( + reinterpret_cast(value_))); } bool IsEmpty() const { return value_ == kPersistentContainerNotFound; @@ -298,7 +299,8 @@ class PersistentValueMapBase { typename Traits::Impl* impl() { return &impl_; } static V* FromVal(PersistentContainerValue v) { - return reinterpret_cast(v); + return internal::ValueHelper::SlotAsValue( + reinterpret_cast(v)); } static PersistentContainerValue ClearAndLeak(Global* persistent) { @@ -318,7 +320,7 @@ class PersistentValueMapBase { */ static Global Release(PersistentContainerValue v) { Global p; - p.slot() = reinterpret_cast(FromVal(v)); + p.slot() = reinterpret_cast(v); if (Traits::kCallbackType != kNotWeak && p.IsWeak()) { Traits::DisposeCallbackData( p.template ClearWeak()); @@ -328,8 +330,8 @@ class PersistentValueMapBase { void RemoveWeak(const K& key) { Global p; - p.slot() = reinterpret_cast( - FromVal(Traits::Remove(&impl_, key))); + p.slot() = + reinterpret_cast(Traits::Remove(&impl_, key)); p.Reset(); } @@ -345,8 +347,7 @@ class PersistentValueMapBase { PersistentContainerValue value) { bool hasValue = value != kPersistentContainerNotFound; if (hasValue) { - returnValue->SetInternal( - *reinterpret_cast(FromVal(value))); + returnValue->SetInternal(*reinterpret_cast(value)); } return hasValue; } @@ -620,7 +621,7 @@ class V8_DEPRECATE_SOON("Use std::vector>.") PersistentValueVector { */ Local Get(size_t index) const { return Local::New(isolate_, internal::ValueHelper::SlotAsValue( - FromVal(Traits::Get(&impl_, index)))); + Traits::Get(&impl_, index))); } /** @@ -630,8 +631,7 @@ class V8_DEPRECATE_SOON("Use std::vector>.") PersistentValueVector { size_t length = Traits::Size(&impl_); for (size_t i = 0; i < length; i++) { Global p; - p.slot() = - reinterpret_cast(FromVal(Traits::Get(&impl_, i))); + p.slot() = reinterpret_cast(Traits::Get(&impl_, i)); } Traits::Clear(&impl_); } @@ -652,7 +652,8 @@ class V8_DEPRECATE_SOON("Use std::vector>.") PersistentValueVector { } static V* FromVal(PersistentContainerValue v) { - return reinterpret_cast(v); + return internal::ValueHelper::SlotAsValue( + reinterpret_cast(v)); } Isolate* isolate_; diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h index d702a676db560d..c3c0da86379d07 100644 --- a/deps/v8/include/v8-version.h +++ b/deps/v8/include/v8-version.h @@ -9,9 +9,9 @@ // NOTE these macros are used by some of the tool scripts and the build // system so their names cannot be changed without changing the scripts. #define V8_MAJOR_VERSION 12 -#define V8_MINOR_VERSION 2 -#define V8_BUILD_NUMBER 281 -#define V8_PATCH_LEVEL 27 +#define V8_MINOR_VERSION 3 +#define V8_BUILD_NUMBER 219 +#define V8_PATCH_LEVEL 16 // Use 1 for candidates and 0 otherwise. // (Boolean macro values are not supported by all preprocessors.) diff --git a/deps/v8/infra/testing/builders.pyl b/deps/v8/infra/testing/builders.pyl index de03e53600b813..674f5f2d572151 100644 --- a/deps/v8/infra/testing/builders.pyl +++ b/deps/v8/infra/testing/builders.pyl @@ -376,7 +376,6 @@ }, 'tests': [ {'name': 'v8testing', 'variant': 'default'}, - {'name': 'v8testing', 'variant': 'future'}, ], }, 'v8_linux64_coverage_rel': { @@ -1435,7 +1434,6 @@ }, 'tests': [ {'name': 'v8testing', 'variant': 'default'}, - {'name': 'v8testing', 'variant': 'future'}, ], }, 'V8 Linux64 - custom snapshot - debug': { diff --git a/deps/v8/samples/shell.cc b/deps/v8/samples/shell.cc index 835dfcb0d6bbb7..7ec41b104947bc 100644 --- a/deps/v8/samples/shell.cc +++ b/deps/v8/samples/shell.cc @@ -48,11 +48,11 @@ * For a more sophisticated shell, consider using the debug shell D8. */ - -v8::Local CreateShellContext(v8::Isolate* isolate); -void RunShell(v8::Local context, v8::Platform* platform); -int RunMain(v8::Isolate* isolate, v8::Platform* platform, int argc, - char* argv[]); +v8::Global CreateShellContext(v8::Isolate* isolate); +void RunShell(v8::Isolate* isolate, const v8::Global& context, + v8::Platform* platform); +int RunMain(v8::Isolate* isolate, const v8::Global& context, + v8::Platform* platform, int argc, char* argv[]); bool ExecuteString(v8::Isolate* isolate, v8::Local source, v8::Local name, bool print_result, bool report_exceptions); @@ -64,10 +64,8 @@ void Version(const v8::FunctionCallbackInfo& info); v8::MaybeLocal ReadFile(v8::Isolate* isolate, const char* name); void ReportException(v8::Isolate* isolate, v8::TryCatch* handler); - static bool run_shell; - int main(int argc, char* argv[]) { v8::V8::InitializeICUDefaultLocation(argv[0]); v8::V8::InitializeExternalStartupData(argv[0]); @@ -83,15 +81,13 @@ int main(int argc, char* argv[]) { int result; { v8::Isolate::Scope isolate_scope(isolate); - v8::HandleScope handle_scope(isolate); - v8::Local context = CreateShellContext(isolate); + v8::Global context = CreateShellContext(isolate); if (context.IsEmpty()) { fprintf(stderr, "Error creating context\n"); return 1; } - v8::Context::Scope context_scope(context); - result = RunMain(isolate, platform.get(), argc, argv); - if (run_shell) RunShell(context, platform.get()); + result = RunMain(isolate, context, platform.get(), argc, argv); + if (run_shell) RunShell(isolate, context, platform.get()); } isolate->Dispose(); v8::V8::Dispose(); @@ -100,16 +96,15 @@ int main(int argc, char* argv[]) { return result; } - // Extracts a C string from a V8 Utf8Value. const char* ToCString(const v8::String::Utf8Value& value) { return *value ? *value : ""; } - // Creates a new execution environment containing the built-in // functions. -v8::Local CreateShellContext(v8::Isolate* isolate) { +v8::Global CreateShellContext(v8::Isolate* isolate) { + v8::HandleScope handle_scope(isolate); // Create a template for the global object. v8::Local global = v8::ObjectTemplate::New(isolate); // Bind the global 'print' function to the C++ Print callback. @@ -122,10 +117,11 @@ v8::Local CreateShellContext(v8::Isolate* isolate) { global->Set(isolate, "quit", v8::FunctionTemplate::New(isolate, Quit)); // Bind the 'version' function global->Set(isolate, "version", v8::FunctionTemplate::New(isolate, Version)); - return v8::Context::New(isolate, NULL, global); + // Return the context. + v8::Local context = v8::Context::New(isolate, nullptr, global); + return v8::Global(isolate, context); } - // The callback that is invoked by v8 whenever the JavaScript 'print' // function is called. Prints its arguments on stdout separated by // spaces and ending with a newline. @@ -155,7 +151,7 @@ void Read(const v8::FunctionCallbackInfo& info) { return; } v8::String::Utf8Value file(info.GetIsolate(), info[0]); - if (*file == NULL) { + if (*file == nullptr) { info.GetIsolate()->ThrowError("Error loading file"); return; } @@ -175,7 +171,7 @@ void Load(const v8::FunctionCallbackInfo& info) { for (int i = 0; i < info.Length(); i++) { v8::HandleScope handle_scope(info.GetIsolate()); v8::String::Utf8Value file(info.GetIsolate(), info[i]); - if (*file == NULL) { + if (*file == nullptr) { info.GetIsolate()->ThrowError("Error loading file"); return; } @@ -203,6 +199,8 @@ void Quit(const v8::FunctionCallbackInfo& info) { exit(exit_code); } +// The callback that is invoked by v8 whenever the JavaScript 'version' +// function is called. Returns a string containing the current V8 version. void Version(const v8::FunctionCallbackInfo& info) { info.GetReturnValue().Set( v8::String::NewFromUtf8(info.GetIsolate(), v8::V8::GetVersion()) @@ -212,7 +210,7 @@ void Version(const v8::FunctionCallbackInfo& info) { // Reads a file into a v8 string. v8::MaybeLocal ReadFile(v8::Isolate* isolate, const char* name) { FILE* file = fopen(name, "rb"); - if (file == NULL) return v8::MaybeLocal(); + if (file == nullptr) return {}; fseek(file, 0, SEEK_END); size_t size = ftell(file); @@ -224,7 +222,7 @@ v8::MaybeLocal ReadFile(v8::Isolate* isolate, const char* name) { i += fread(&chars[i], 1, size - i, file); if (ferror(file)) { fclose(file); - return v8::MaybeLocal(); + return {}; } } fclose(file); @@ -234,10 +232,9 @@ v8::MaybeLocal ReadFile(v8::Isolate* isolate, const char* name) { return result; } - // Process remaining command line arguments and execute files -int RunMain(v8::Isolate* isolate, v8::Platform* platform, int argc, - char* argv[]) { +int RunMain(v8::Isolate* isolate, const v8::Global& context, + v8::Platform* platform, int argc, char* argv[]) { for (int i = 1; i < argc; i++) { const char* str = argv[i]; if (strcmp(str, "--shell") == 0) { @@ -251,25 +248,41 @@ int RunMain(v8::Isolate* isolate, v8::Platform* platform, int argc, "Warning: unknown flag %s.\nTry --help for options\n", str); } else if (strcmp(str, "-e") == 0 && i + 1 < argc) { // Execute argument given to -e option directly. - v8::Local file_name = - v8::String::NewFromUtf8Literal(isolate, "unnamed"); - v8::Local source; - if (!v8::String::NewFromUtf8(isolate, argv[++i]).ToLocal(&source)) { - return 1; + bool success; + { + // Enter the execution environment before evaluating any code. + v8::HandleScope handle_scope(isolate); + v8::Context::Scope context_scope(context.Get(isolate)); + v8::Local file_name = + v8::String::NewFromUtf8Literal(isolate, "unnamed"); + v8::Local source; + if (!v8::String::NewFromUtf8(isolate, argv[++i]).ToLocal(&source)) { + return 1; + } + success = ExecuteString(isolate, source, file_name, false, true); } - bool success = ExecuteString(isolate, source, file_name, false, true); + // It is important not to pump the message loop when there are v8::Local + // handles on the stack, as this may trigger a stackless GC. while (v8::platform::PumpMessageLoop(platform, isolate)) continue; if (!success) return 1; } else { // Use all other arguments as names of files to load and run. - v8::Local file_name = - v8::String::NewFromUtf8(isolate, str).ToLocalChecked(); - v8::Local source; - if (!ReadFile(isolate, str).ToLocal(&source)) { - fprintf(stderr, "Error reading '%s'\n", str); - continue; + bool success; + { + // Enter the execution environment before evaluating any code. + v8::HandleScope handle_scope(isolate); + v8::Context::Scope context_scope(context.Get(isolate)); + v8::Local file_name = + v8::String::NewFromUtf8(isolate, str).ToLocalChecked(); + v8::Local source; + if (!ReadFile(isolate, str).ToLocal(&source)) { + fprintf(stderr, "Error reading '%s'\n", str); + continue; + } + success = ExecuteString(isolate, source, file_name, false, true); } - bool success = ExecuteString(isolate, source, file_name, false, true); + // It is important not to pump the message loop when there are v8::Local + // handles on the stack, as this may trigger a stackless GC. while (v8::platform::PumpMessageLoop(platform, isolate)) continue; if (!success) return 1; } @@ -277,32 +290,33 @@ int RunMain(v8::Isolate* isolate, v8::Platform* platform, int argc, return 0; } - // The read-eval-execute loop of the shell. -void RunShell(v8::Local context, v8::Platform* platform) { +void RunShell(v8::Isolate* isolate, const v8::Global& context, + v8::Platform* platform) { fprintf(stderr, "V8 version %s [sample shell]\n", v8::V8::GetVersion()); static const int kBufferSize = 256; - // Enter the execution environment before evaluating any code. - v8::Context::Scope context_scope(context); - v8::Local name( - v8::String::NewFromUtf8Literal(context->GetIsolate(), "(shell)")); while (true) { char buffer[kBufferSize]; fprintf(stderr, "> "); char* str = fgets(buffer, kBufferSize, stdin); - if (str == NULL) break; - v8::HandleScope handle_scope(context->GetIsolate()); - ExecuteString( - context->GetIsolate(), - v8::String::NewFromUtf8(context->GetIsolate(), str).ToLocalChecked(), - name, true, true); - while (v8::platform::PumpMessageLoop(platform, context->GetIsolate())) - continue; + if (str == nullptr) break; + { + // Enter the execution environment before evaluating any code. + v8::HandleScope handle_scope(isolate); + v8::Context::Scope context_scope(context.Get(isolate)); + v8::Local name( + v8::String::NewFromUtf8Literal(isolate, "(shell)")); + ExecuteString(isolate, + v8::String::NewFromUtf8(isolate, str).ToLocalChecked(), + name, true, true); + } + // It is important not to pump the message loop when there are v8::Local + // handles on the stack, as this may trigger a stackless GC. + while (v8::platform::PumpMessageLoop(platform, isolate)) continue; } fprintf(stderr, "\n"); } - // Executes a string within the current v8 context. bool ExecuteString(v8::Isolate* isolate, v8::Local source, v8::Local name, bool print_result, @@ -339,7 +353,6 @@ bool ExecuteString(v8::Isolate* isolate, v8::Local source, } } - void ReportException(v8::Isolate* isolate, v8::TryCatch* try_catch) { v8::HandleScope handle_scope(isolate); v8::String::Utf8Value exception(isolate, try_catch->Exception()); diff --git a/deps/v8/src/DEPS b/deps/v8/src/DEPS index 791550a933f5d4..aeedfd8bb70aee 100644 --- a/deps/v8/src/DEPS +++ b/deps/v8/src/DEPS @@ -39,6 +39,7 @@ include_rules = [ # TODO(v8:10496): Don't expose memory chunk outside of heap/. "+src/heap/memory-chunk.h", "+src/heap/memory-chunk-inl.h", + "+src/heap/memory-chunk-header.h", "+src/heap/paged-spaces-inl.h", "+src/heap/parked-scope-inl.h", "+src/heap/parked-scope.h", @@ -107,6 +108,12 @@ specific_include_rules = { "external-pointer-table\.cc": [ "+src/heap/read-only-spaces.h", ], + # keep the includes to a minimum since this header will be included via write barriers. + "memory-chunk-header\.h": [ + "-src", + "+src/base/build_config.h", + "+src/flags/flags.h", + ], "script\.h": [ "+src/heap/factory.h", "+src/heap/factory-base.h", diff --git a/deps/v8/src/api/api-inl.h b/deps/v8/src/api/api-inl.h index 2b9deff7d37a69..b8e60c48e651d8 100644 --- a/deps/v8/src/api/api-inl.h +++ b/deps/v8/src/api/api-inl.h @@ -179,7 +179,7 @@ class V8_NODISCARD CallDepthScope { CallDepthScope(i::Isolate* isolate, Local context) : isolate_(isolate), saved_context_(isolate->context(), isolate_) { isolate_->thread_local_top()->IncrementCallDepth(this); - i::Tagged env = *Utils::OpenHandle(*context); + i::Tagged env = *Utils::OpenDirectHandle(*context); isolate->set_context(env); if (do_callback) isolate_->FireBeforeCallEnteredCallback(); @@ -304,7 +304,7 @@ bool CopyAndConvertArrayToCppBuffer(Local src, T* dst, } i::DisallowGarbageCollection no_gc; - i::Tagged obj = *Utils::OpenHandle(*src); + i::Tagged obj = *Utils::OpenDirectHandle(*src); if (i::Object::IterationHasObservableEffects(obj)) { // The array has a custom iterator. return false; diff --git a/deps/v8/src/api/api.cc b/deps/v8/src/api/api.cc index 8b709703b5fb51..9ca94b045c26c9 100644 --- a/deps/v8/src/api/api.cc +++ b/deps/v8/src/api/api.cc @@ -24,6 +24,7 @@ #include "include/v8-primitive-object.h" #include "include/v8-profiler.h" #include "include/v8-source-location.h" +#include "include/v8-template.h" #include "include/v8-unwinder-state.h" #include "include/v8-util.h" #include "include/v8-wasm.h" @@ -86,6 +87,7 @@ #include "src/objects/js-array-buffer-inl.h" #include "src/objects/js-array-inl.h" #include "src/objects/js-collection-inl.h" +#include "src/objects/js-objects.h" #include "src/objects/js-promise-inl.h" #include "src/objects/js-regexp-inl.h" #include "src/objects/js-weak-refs-inl.h" @@ -101,6 +103,7 @@ #include "src/objects/shared-function-info.h" #include "src/objects/slots.h" #include "src/objects/smi.h" +#include "src/objects/string.h" #include "src/objects/synthetic-module-inl.h" #include "src/objects/templates.h" #include "src/objects/value-serializer.h" @@ -537,7 +540,7 @@ SnapshotCreator::SnapshotCreator(Isolate* v8_isolate, const intptr_t* external_references, const StartupData* existing_snapshot, bool owns_isolate) - : data_(new i::SnapshotCreatorImpl( + : impl_(new i::SnapshotCreatorImpl( reinterpret_cast(v8_isolate), external_references, existing_snapshot, owns_isolate)) {} @@ -546,50 +549,43 @@ SnapshotCreator::SnapshotCreator(const intptr_t* external_references, : SnapshotCreator(nullptr, external_references, existing_snapshot) {} SnapshotCreator::SnapshotCreator(const v8::Isolate::CreateParams& params) - : data_(new i::SnapshotCreatorImpl(params)) {} + : impl_(new i::SnapshotCreatorImpl(params)) {} SnapshotCreator::SnapshotCreator(v8::Isolate* isolate, const v8::Isolate::CreateParams& params) - : data_(new i::SnapshotCreatorImpl(reinterpret_cast(isolate), + : impl_(new i::SnapshotCreatorImpl(reinterpret_cast(isolate), params)) {} SnapshotCreator::~SnapshotCreator() { - DCHECK_NOT_NULL(data_); - auto impl = static_cast(data_); - delete impl; + DCHECK_NOT_NULL(impl_); + delete impl_; } Isolate* SnapshotCreator::GetIsolate() { - auto impl = static_cast(data_); - return reinterpret_cast(impl->isolate()); + return reinterpret_cast(impl_->isolate()); } void SnapshotCreator::SetDefaultContext( Local context, SerializeInternalFieldsCallback callback) { - auto impl = static_cast(data_); - impl->SetDefaultContext(Utils::OpenHandle(*context), callback); + impl_->SetDefaultContext(Utils::OpenHandle(*context), callback); } size_t SnapshotCreator::AddContext(Local context, SerializeInternalFieldsCallback callback) { - auto impl = static_cast(data_); - return impl->AddContext(Utils::OpenHandle(*context), callback); + return impl_->AddContext(Utils::OpenHandle(*context), callback); } size_t SnapshotCreator::AddData(i::Address object) { - auto impl = static_cast(data_); - return impl->AddData(object); + return impl_->AddData(object); } size_t SnapshotCreator::AddData(Local context, i::Address object) { - auto impl = static_cast(data_); - return impl->AddData(Utils::OpenHandle(*context), object); + return impl_->AddData(Utils::OpenHandle(*context), object); } StartupData SnapshotCreator::CreateBlob( SnapshotCreator::FunctionCodeHandling function_code_handling) { - auto impl = static_cast(data_); - return impl->CreateBlob(function_code_handling); + return impl_->CreateBlob(function_code_handling); } bool StartupData::CanBeRehashed() const { @@ -1949,6 +1945,24 @@ void ObjectTemplate::SetCodeLike() { self->set_code_like(true); } +Local DictionaryTemplate::New( + Isolate* isolate, MemorySpan names) { + i::Isolate* i_isolate = reinterpret_cast(isolate); + API_RCS_SCOPE(i_isolate, DictionaryTemplate, New); + ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); + return Utils::ToLocal(i::DictionaryTemplateInfo::Create(i_isolate, names)); +} + +Local DictionaryTemplate::NewInstance( + Local context, MemorySpan> property_values) { + i::Isolate* i_isolate = reinterpret_cast(context->GetIsolate()); + API_RCS_SCOPE(i_isolate, DictionaryTemplate, NewInstance); + ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); + auto self = Utils::OpenDirectHandle(this); + return ToApiHandle(i::DictionaryTemplateInfo::NewInstance( + Utils::OpenHandle(*context), self, property_values)); +} + // --- S c r i p t s --- // Internally, UnboundScript and UnboundModuleScript are SharedFunctionInfos, @@ -2294,11 +2308,11 @@ int ModuleRequest::GetSourceOffset() const { return Utils::OpenDirectHandle(this)->position(); } -Local ModuleRequest::GetImportAssertions() const { +Local ModuleRequest::GetImportAttributes() const { auto self = Utils::OpenDirectHandle(this); i::Isolate* i_isolate = self->GetIsolate(); return ToApiHandle( - i::direct_handle(self->import_assertions(), i_isolate), i_isolate); + i::direct_handle(self->import_attributes(), i_isolate), i_isolate); } Module::Status Module::GetStatus() const { @@ -3866,7 +3880,7 @@ bool Value::IsInt32() const { } bool Value::IsUint32() const { - auto obj = *Utils::OpenHandle(this); + auto obj = *Utils::OpenDirectHandle(this); if (i::IsSmi(obj)) return i::Smi::ToInt(obj) >= 0; if (i::IsNumber(obj)) { double value = i::Object::Number(obj); @@ -4271,6 +4285,10 @@ void* v8::ArrayBuffer::Data() const { return Utils::OpenDirectHandle(this)->backing_store(); } +bool v8::ArrayBuffer::IsResizableByUserJavaScript() const { + return Utils::OpenDirectHandle(this)->is_resizable_by_js(); +} + std::shared_ptr v8::SharedArrayBuffer::GetBackingStore() { auto self = Utils::OpenDirectHandle(this); std::shared_ptr backing_store = self->GetBackingStore(); @@ -4306,7 +4324,7 @@ void v8::TypedArray::CheckCast(Value* that) { #define CHECK_TYPED_ARRAY_CAST(Type, typeName, TYPE, ctype) \ void v8::Type##Array::CheckCast(Value* that) { \ - auto obj = *Utils::OpenHandle(that); \ + auto obj = *Utils::OpenDirectHandle(that); \ Utils::ApiCheck( \ i::IsJSTypedArray(obj) && \ i::JSTypedArray::cast(obj)->type() == i::kExternal##Type##Array, \ @@ -5359,6 +5377,30 @@ bool v8::Object::IsUndetectable() const { return i::IsUndetectable(*self); } +namespace { +#ifdef V8_ENABLE_DIRECT_LOCAL +// A newly allocated vector is required to convert from an array of direct +// locals to an array of indirect handles. +std::vector> PrepareArguments(int argc, + Local argv[]) { + std::vector> args(argc); + for (int i = 0; i < argc; ++i) { + args[i] = Utils::OpenHandle(*argv[i]); + } + return args; +} +#else // !V8_ENABLE_DIRECT_LOCAL +// A simple cast is used to convert from an array of indirect locals to an +// array of indirect handles. A MemorySpan object is returned, as no +// deallocation is necessary. +v8::MemorySpan> PrepareArguments(int argc, + Local argv[]) { + return {reinterpret_cast*>(argv), + static_cast(argc)}; +} +#endif // V8_ENABLE_DIRECT_LOCAL +} // namespace + MaybeLocal Object::CallAsFunction(Local context, Local recv, int argc, Local argv[]) { @@ -5371,10 +5413,11 @@ MaybeLocal Object::CallAsFunction(Local context, auto self = Utils::OpenHandle(this); auto recv_obj = Utils::OpenHandle(*recv); static_assert(sizeof(v8::Local) == sizeof(i::Handle)); - i::Handle* args = reinterpret_cast*>(argv); + auto args = PrepareArguments(argc, argv); Local result; has_exception = !ToLocal( - i::Execution::Call(i_isolate, self, recv_obj, argc, args), &result); + i::Execution::Call(i_isolate, self, recv_obj, argc, args.data()), + &result); RETURN_ON_FAILED_EXECUTION(Value); RETURN_ESCAPED(result); } @@ -5390,10 +5433,10 @@ MaybeLocal Object::CallAsConstructor(Local context, int argc, i_isolate); auto self = Utils::OpenHandle(this); static_assert(sizeof(v8::Local) == sizeof(i::Handle)); - i::Handle* args = reinterpret_cast*>(argv); + auto args = PrepareArguments(argc, argv); Local result; has_exception = !ToLocal( - i::Execution::New(i_isolate, self, self, argc, args), &result); + i::Execution::New(i_isolate, self, self, argc, args.data()), &result); RETURN_ON_FAILED_EXECUTION(Value); RETURN_ESCAPED(result); } @@ -5446,10 +5489,10 @@ MaybeLocal Function::NewInstanceWithSideEffectType( } } } - i::Handle* args = reinterpret_cast*>(argv); + auto args = PrepareArguments(argc, argv); Local result; has_exception = !ToLocal( - i::Execution::New(i_isolate, self, self, argc, args), &result); + i::Execution::New(i_isolate, self, self, argc, args.data()), &result); RETURN_ON_FAILED_EXECUTION(Object); RETURN_ESCAPED(result); } @@ -5468,19 +5511,11 @@ MaybeLocal Function::Call(Local context, "Function to be called is a null pointer"); auto recv_obj = Utils::OpenHandle(*recv); static_assert(sizeof(v8::Local) == sizeof(i::Handle)); - -#ifdef V8_ENABLE_DIRECT_LOCAL - i::Handle* args = new i::Handle[argc]; - for (int i = 0; i < argc; ++i) { - args[i] = Utils::OpenHandle(*argv[i]); - } -#else // !V8_ENABLE_DIRECT_LOCAL - i::Handle* args = reinterpret_cast*>(argv); -#endif // V8_ENABLE_DIRECT_LOCAL - + auto args = PrepareArguments(argc, argv); Local result; has_exception = !ToLocal( - i::Execution::Call(i_isolate, self, recv_obj, argc, args), &result); + i::Execution::Call(i_isolate, self, recv_obj, argc, args.data()), + &result); RETURN_ON_FAILED_EXECUTION(Value); RETURN_ESCAPED(result); } @@ -7328,6 +7363,13 @@ void v8::ObjectTemplate::CheckCast(Data* that) { "Value is not an ObjectTemplate"); } +void v8::DictionaryTemplate::CheckCast(Data* that) { + auto obj = Utils::OpenDirectHandle(that); + Utils::ApiCheck(i::IsDictionaryTemplateInfo(*obj), + "v8::DictionaryTemplate::Cast", + "Value is not an DictionaryTemplate"); +} + void v8::FunctionTemplate::CheckCast(Data* that) { auto obj = Utils::OpenDirectHandle(that); Utils::ApiCheck(i::IsFunctionTemplateInfo(*obj), "v8::FunctionTemplate::Cast", @@ -8087,7 +8129,7 @@ uint32_t GetLength(Tagged array) { } // namespace internal uint32_t v8::Array::Length() const { - return i::GetLength(*Utils::OpenHandle(this)); + return i::GetLength(*Utils::OpenDirectHandle(this)); } namespace internal { @@ -8259,13 +8301,27 @@ Maybe v8::Array::Iterate(Local context, } v8::TypecheckWitness::TypecheckWitness(Isolate* isolate) +#ifdef V8_ENABLE_DIRECT_LOCAL + // An empty local suffices. + : cached_map_() +#else // We need to reserve a handle that we can patch later. - // TODO(13270): When we switch to CSS, we can use a direct pointer - // instead of a handle. - : cached_map_(v8::Number::New(isolate, 1)) {} + // We initialize it with something that cannot compare equal to any map. + : cached_map_(v8::Number::New(isolate, 1)) +#endif +{ +} void v8::TypecheckWitness::Update(Local baseline) { i::Tagged obj = *Utils::OpenDirectHandle(*baseline); +#ifdef V8_ENABLE_DIRECT_LOCAL + if (IsSmi(obj)) { + cached_map_ = Local(); + } else { + i::Tagged map = i::HeapObject::cast(obj)->map(); + cached_map_ = Local::FromAddress(map->ptr()); + } +#else i::Tagged map = i::Smi::zero(); if (!IsSmi(obj)) map = i::HeapObject::cast(obj)->map(); // Design overview: in the {TypecheckWitness} constructor, we create @@ -8274,12 +8330,12 @@ void v8::TypecheckWitness::Update(Local baseline) { // to allow having short-lived HandleScopes (e.g. in {FastIterateArray} // above) while a {TypecheckWitness} is alive: it therefore cannot hold // on to one of the short-lived handles. - // Calling {OpenHandle} on the {cached_map_} only serves to "reinterpret_cast" - // it to an {i::Handle} on which we can call {PatchValue}. - // TODO(13270): When we switch to CSS, this can become simpler: we can - // then simply overwrite the direct pointer. + // Calling {OpenIndirectHandle} on the {cached_map_} only serves to + // "reinterpret_cast" it to an {i::IndirectHandle} on which we can call + // {PatchValue}. auto cache = Utils::OpenIndirectHandle(*cached_map_); cache.PatchValue(map); +#endif } Local v8::Map::New(Isolate* v8_isolate) { @@ -10384,13 +10440,13 @@ CALLBACK_SETTER(WasmAsyncResolvePromiseCallback, CALLBACK_SETTER(WasmLoadSourceMapCallback, WasmLoadSourceMapCallback, wasm_load_source_map_callback) -CALLBACK_SETTER(WasmGCEnabledCallback, WasmGCEnabledCallback, - wasm_gc_enabled_callback) - CALLBACK_SETTER(WasmImportedStringsEnabledCallback, WasmImportedStringsEnabledCallback, wasm_imported_strings_enabled_callback) +CALLBACK_SETTER(WasmJSPIEnabledCallback, WasmJSPIEnabledCallback, + wasm_jspi_enabled_callback) + CALLBACK_SETTER(SharedArrayBufferConstructorEnabledCallback, SharedArrayBufferConstructorEnabledCallback, sharedarraybuffer_constructor_enabled_callback) @@ -10410,6 +10466,7 @@ void Isolate::InstallConditionalFeatures(Local context) { i::WasmJs::InstallConditionalFeatures(i_isolate, Utils::OpenHandle(*context)); } + #endif // V8_ENABLE_WEBASSEMBLY } diff --git a/deps/v8/src/api/api.h b/deps/v8/src/api/api.h index 359783d78a899d..bbc42ef09680bd 100644 --- a/deps/v8/src/api/api.h +++ b/deps/v8/src/api/api.h @@ -30,6 +30,7 @@ namespace v8 { +class DictionaryTemplate; class Extension; class Signature; class Template; @@ -96,97 +97,99 @@ class RegisteredExtension { static RegisteredExtension* first_extension_; }; -#define TO_LOCAL_LIST(V) \ - V(ToLocal, AccessorPair, debug::AccessorPair) \ - V(ToLocal, NativeContext, Context) \ - V(ToLocal, Object, Value) \ - V(ToLocal, Module, Module) \ - V(ToLocal, Name, Name) \ - V(ToLocal, String, String) \ - V(ToLocal, Symbol, Symbol) \ - V(ToLocal, JSRegExp, RegExp) \ - V(ToLocal, JSReceiver, Object) \ - V(ToLocal, JSObject, Object) \ - V(ToLocal, JSFunction, Function) \ - V(ToLocal, JSArray, Array) \ - V(ToLocal, JSMap, Map) \ - V(ToLocal, JSSet, Set) \ - V(ToLocal, JSProxy, Proxy) \ - V(ToLocal, JSArrayBuffer, ArrayBuffer) \ - V(ToLocal, JSArrayBufferView, ArrayBufferView) \ - V(ToLocal, JSDataView, DataView) \ - V(ToLocal, JSRabGsabDataView, DataView) \ - V(ToLocal, JSTypedArray, TypedArray) \ - V(ToLocalShared, JSArrayBuffer, SharedArrayBuffer) \ - V(ToLocal, FunctionTemplateInfo, FunctionTemplate) \ - V(ToLocal, ObjectTemplateInfo, ObjectTemplate) \ - V(SignatureToLocal, FunctionTemplateInfo, Signature) \ - V(MessageToLocal, Object, Message) \ - V(PromiseToLocal, JSObject, Promise) \ - V(StackTraceToLocal, FixedArray, StackTrace) \ - V(StackFrameToLocal, StackFrameInfo, StackFrame) \ - V(NumberToLocal, Object, Number) \ - V(IntegerToLocal, Object, Integer) \ - V(Uint32ToLocal, Object, Uint32) \ - V(ToLocal, BigInt, BigInt) \ - V(ExternalToLocal, JSObject, External) \ - V(CallableToLocal, JSReceiver, Function) \ - V(ToLocalPrimitive, Object, Primitive) \ - V(FixedArrayToLocal, FixedArray, FixedArray) \ - V(PrimitiveArrayToLocal, FixedArray, PrimitiveArray) \ +#define TO_LOCAL_LIST(V) \ + V(ToLocal, AccessorPair, debug::AccessorPair) \ + V(ToLocal, NativeContext, Context) \ + V(ToLocal, Object, Value) \ + V(ToLocal, Module, Module) \ + V(ToLocal, Name, Name) \ + V(ToLocal, String, String) \ + V(ToLocal, Symbol, Symbol) \ + V(ToLocal, JSRegExp, RegExp) \ + V(ToLocal, JSReceiver, Object) \ + V(ToLocal, JSObject, Object) \ + V(ToLocal, JSFunction, Function) \ + V(ToLocal, JSArray, Array) \ + V(ToLocal, JSMap, Map) \ + V(ToLocal, JSSet, Set) \ + V(ToLocal, JSProxy, Proxy) \ + V(ToLocal, JSArrayBuffer, ArrayBuffer) \ + V(ToLocal, JSArrayBufferView, ArrayBufferView) \ + V(ToLocal, JSDataView, DataView) \ + V(ToLocal, JSRabGsabDataView, DataView) \ + V(ToLocal, JSTypedArray, TypedArray) \ + V(ToLocalShared, JSArrayBuffer, SharedArrayBuffer) \ + V(ToLocal, FunctionTemplateInfo, FunctionTemplate) \ + V(ToLocal, ObjectTemplateInfo, ObjectTemplate) \ + V(ToLocal, DictionaryTemplateInfo, DictionaryTemplate) \ + V(SignatureToLocal, FunctionTemplateInfo, Signature) \ + V(MessageToLocal, Object, Message) \ + V(PromiseToLocal, JSObject, Promise) \ + V(StackTraceToLocal, FixedArray, StackTrace) \ + V(StackFrameToLocal, StackFrameInfo, StackFrame) \ + V(NumberToLocal, Object, Number) \ + V(IntegerToLocal, Object, Integer) \ + V(Uint32ToLocal, Object, Uint32) \ + V(ToLocal, BigInt, BigInt) \ + V(ExternalToLocal, JSObject, External) \ + V(CallableToLocal, JSReceiver, Function) \ + V(ToLocalPrimitive, Object, Primitive) \ + V(FixedArrayToLocal, FixedArray, FixedArray) \ + V(PrimitiveArrayToLocal, FixedArray, PrimitiveArray) \ V(ToLocal, ScriptOrModule, ScriptOrModule) -#define OPEN_HANDLE_LIST(V) \ - V(Template, TemplateInfo) \ - V(FunctionTemplate, FunctionTemplateInfo) \ - V(ObjectTemplate, ObjectTemplateInfo) \ - V(Signature, FunctionTemplateInfo) \ - V(Data, Object) \ - V(RegExp, JSRegExp) \ - V(Object, JSReceiver) \ - V(Array, JSArray) \ - V(Map, JSMap) \ - V(Set, JSSet) \ - V(ArrayBuffer, JSArrayBuffer) \ - V(ArrayBufferView, JSArrayBufferView) \ - V(TypedArray, JSTypedArray) \ - V(Uint8Array, JSTypedArray) \ - V(Uint8ClampedArray, JSTypedArray) \ - V(Int8Array, JSTypedArray) \ - V(Uint16Array, JSTypedArray) \ - V(Int16Array, JSTypedArray) \ - V(Uint32Array, JSTypedArray) \ - V(Int32Array, JSTypedArray) \ - V(Float32Array, JSTypedArray) \ - V(Float64Array, JSTypedArray) \ - V(DataView, JSDataViewOrRabGsabDataView) \ - V(SharedArrayBuffer, JSArrayBuffer) \ - V(Name, Name) \ - V(String, String) \ - V(Symbol, Symbol) \ - V(Script, JSFunction) \ - V(UnboundModuleScript, SharedFunctionInfo) \ - V(UnboundScript, SharedFunctionInfo) \ - V(Module, Module) \ - V(Function, JSReceiver) \ - V(Message, JSMessageObject) \ - V(Context, NativeContext) \ - V(External, Object) \ - V(StackTrace, FixedArray) \ - V(StackFrame, StackFrameInfo) \ - V(Proxy, JSProxy) \ - V(debug::GeneratorObject, JSGeneratorObject) \ - V(debug::ScriptSource, HeapObject) \ - V(debug::Script, Script) \ - V(debug::EphemeronTable, EphemeronHashTable) \ - V(debug::AccessorPair, AccessorPair) \ - V(Promise, JSPromise) \ - V(Primitive, Object) \ - V(PrimitiveArray, FixedArray) \ - V(BigInt, BigInt) \ - V(ScriptOrModule, ScriptOrModule) \ - V(FixedArray, FixedArray) \ - V(ModuleRequest, ModuleRequest) \ +#define OPEN_HANDLE_LIST(V) \ + V(Template, TemplateInfo) \ + V(FunctionTemplate, FunctionTemplateInfo) \ + V(ObjectTemplate, ObjectTemplateInfo) \ + V(DictionaryTemplate, DictionaryTemplateInfo) \ + V(Signature, FunctionTemplateInfo) \ + V(Data, Object) \ + V(RegExp, JSRegExp) \ + V(Object, JSReceiver) \ + V(Array, JSArray) \ + V(Map, JSMap) \ + V(Set, JSSet) \ + V(ArrayBuffer, JSArrayBuffer) \ + V(ArrayBufferView, JSArrayBufferView) \ + V(TypedArray, JSTypedArray) \ + V(Uint8Array, JSTypedArray) \ + V(Uint8ClampedArray, JSTypedArray) \ + V(Int8Array, JSTypedArray) \ + V(Uint16Array, JSTypedArray) \ + V(Int16Array, JSTypedArray) \ + V(Uint32Array, JSTypedArray) \ + V(Int32Array, JSTypedArray) \ + V(Float32Array, JSTypedArray) \ + V(Float64Array, JSTypedArray) \ + V(DataView, JSDataViewOrRabGsabDataView) \ + V(SharedArrayBuffer, JSArrayBuffer) \ + V(Name, Name) \ + V(String, String) \ + V(Symbol, Symbol) \ + V(Script, JSFunction) \ + V(UnboundModuleScript, SharedFunctionInfo) \ + V(UnboundScript, SharedFunctionInfo) \ + V(Module, Module) \ + V(Function, JSReceiver) \ + V(Message, JSMessageObject) \ + V(Context, NativeContext) \ + V(External, Object) \ + V(StackTrace, FixedArray) \ + V(StackFrame, StackFrameInfo) \ + V(Proxy, JSProxy) \ + V(debug::GeneratorObject, JSGeneratorObject) \ + V(debug::ScriptSource, HeapObject) \ + V(debug::Script, Script) \ + V(debug::EphemeronTable, EphemeronHashTable) \ + V(debug::AccessorPair, AccessorPair) \ + V(Promise, JSPromise) \ + V(Primitive, Object) \ + V(PrimitiveArray, FixedArray) \ + V(BigInt, BigInt) \ + V(ScriptOrModule, ScriptOrModule) \ + V(FixedArray, FixedArray) \ + V(ModuleRequest, ModuleRequest) \ IF_WASM(V, WasmMemoryObject, WasmMemoryObject) class Utils { diff --git a/deps/v8/src/ast/ast-source-ranges.h b/deps/v8/src/ast/ast-source-ranges.h index 7edd07658be948..b29f4cf07da5dc 100644 --- a/deps/v8/src/ast/ast-source-ranges.h +++ b/deps/v8/src/ast/ast-source-ranges.h @@ -46,6 +46,7 @@ struct SourceRange { V(BinaryOperation) \ V(Block) \ V(CaseClause) \ + V(ConditionalChain) \ V(Conditional) \ V(Expression) \ V(FunctionLiteral) \ @@ -142,6 +143,39 @@ class CaseClauseSourceRanges final : public AstNodeSourceRanges { SourceRange body_range_; }; +class ConditionalChainSourceRanges final : public AstNodeSourceRanges { + public: + explicit ConditionalChainSourceRanges(Zone* zone) + : then_ranges_(zone), else_ranges_(zone) {} + + SourceRange GetRangeAtIndex(SourceRangeKind kind, size_t index) { + if (kind == SourceRangeKind::kThen) { + DCHECK_LT(index, then_ranges_.size()); + return then_ranges_[index]; + } + DCHECK_EQ(kind, SourceRangeKind::kElse); + DCHECK_LT(index, else_ranges_.size()); + return else_ranges_[index]; + } + + void AddThenRanges(const SourceRange& range) { + then_ranges_.push_back(range); + } + + void AddElseRange(const SourceRange& else_range) { + else_ranges_.push_back(else_range); + } + + size_t RangeCount() const { return then_ranges_.size(); } + + SourceRange GetRange(SourceRangeKind kind) override { UNREACHABLE(); } + bool HasRange(SourceRangeKind kind) override { return false; } + + private: + ZoneVector then_ranges_; + ZoneVector else_ranges_; +}; + class ConditionalSourceRanges final : public AstNodeSourceRanges { public: explicit ConditionalSourceRanges(const SourceRange& then_range, diff --git a/deps/v8/src/ast/ast-traversal-visitor.h b/deps/v8/src/ast/ast-traversal-visitor.h index c1755e6c72eb57..3b4f7e22ad380b 100644 --- a/deps/v8/src/ast/ast-traversal-visitor.h +++ b/deps/v8/src/ast/ast-traversal-visitor.h @@ -295,6 +295,17 @@ void AstTraversalVisitor::VisitNativeFunctionLiteral( PROCESS_EXPRESSION(expr); } +template +void AstTraversalVisitor::VisitConditionalChain( + ConditionalChain* expr) { + PROCESS_EXPRESSION(expr); + for (size_t i = 0; i < expr->conditional_chain_length(); ++i) { + RECURSE_EXPRESSION(Visit(expr->condition_at(i))); + RECURSE_EXPRESSION(Visit(expr->then_expression_at(i))); + } + RECURSE(Visit(expr->else_expression())); +} + template void AstTraversalVisitor::VisitConditional(Conditional* expr) { PROCESS_EXPRESSION(expr); @@ -561,8 +572,8 @@ void AstTraversalVisitor::VisitImportCallExpression( ImportCallExpression* expr) { PROCESS_EXPRESSION(expr); RECURSE_EXPRESSION(Visit(expr->specifier())); - if (expr->import_assertions()) { - RECURSE_EXPRESSION(Visit(expr->import_assertions())); + if (expr->import_options()) { + RECURSE_EXPRESSION(Visit(expr->import_options())); } } diff --git a/deps/v8/src/ast/ast-value-factory.cc b/deps/v8/src/ast/ast-value-factory.cc index 239f0a6e8d3168..f7470b5cff60c5 100644 --- a/deps/v8/src/ast/ast-value-factory.cc +++ b/deps/v8/src/ast/ast-value-factory.cc @@ -60,7 +60,7 @@ class OneByteStringStream { template void AstRawString::Internalize(IsolateT* isolate) { DCHECK(!has_string_); - if (literal_bytes_.length() == 0) { + if (literal_bytes_.empty()) { set_string(isolate->factory()->empty_string()); } else if (is_one_byte()) { OneByteStringKey key(raw_hash_field_, literal_bytes_); diff --git a/deps/v8/src/ast/ast.cc b/deps/v8/src/ast/ast.cc index feced7e38df4f5..55b20131a5ca6c 100644 --- a/deps/v8/src/ast/ast.cc +++ b/deps/v8/src/ast/ast.cc @@ -847,8 +847,8 @@ template EXPORT_TEMPLATE_DEFINE(V8_BASE_EXPORT) static bool IsCommutativeOperationWithSmiLiteral(Token::Value op) { // Add is not commutative due to potential for string addition. - return op == Token::MUL || op == Token::BIT_AND || op == Token::BIT_OR || - op == Token::BIT_XOR; + return op == Token::kMul || op == Token::kBitAnd || op == Token::kBitOr || + op == Token::kBitXor; } // Check for the pattern: x + 1. @@ -869,32 +869,9 @@ bool BinaryOperation::IsSmiLiteralOperation(Expression** subexpr, MatchSmiLiteralOperation(right_, left_, subexpr, literal)); } -static bool IsTypeof(Expression* expr) { - UnaryOperation* maybe_unary = expr->AsUnaryOperation(); - return maybe_unary != nullptr && maybe_unary->op() == Token::TYPEOF; -} - -// Check for the pattern: typeof equals . -static bool MatchLiteralCompareTypeof(Expression* left, Token::Value op, - Expression* right, Expression** expr, - Literal** literal) { - if (IsTypeof(left) && right->IsStringLiteral() && Token::IsEqualityOp(op)) { - *expr = left->AsUnaryOperation()->expression(); - *literal = right->AsLiteral(); - return true; - } - return false; -} - -bool CompareOperation::IsLiteralCompareTypeof(Expression** expr, - Literal** literal) { - return MatchLiteralCompareTypeof(left_, op(), right_, expr, literal) || - MatchLiteralCompareTypeof(right_, op(), left_, expr, literal); -} - static bool IsVoidOfLiteral(Expression* expr) { UnaryOperation* maybe_unary = expr->AsUnaryOperation(); - return maybe_unary != nullptr && maybe_unary->op() == Token::VOID && + return maybe_unary != nullptr && maybe_unary->op() == Token::kVoid && maybe_unary->expression()->IsLiteral(); } @@ -902,7 +879,7 @@ static bool MatchLiteralStrictCompareBoolean(Expression* left, Token::Value op, Expression* right, Expression** expr, Literal** literal) { - if (left->IsBooleanLiteral() && op == Token::EQ_STRICT) { + if (left->IsBooleanLiteral() && op == Token::kEqStrict) { *expr = right; *literal = left->AsLiteral(); return true; diff --git a/deps/v8/src/ast/ast.h b/deps/v8/src/ast/ast.h index 5843b3f6a9698a..483f5a2c908e11 100644 --- a/deps/v8/src/ast/ast.h +++ b/deps/v8/src/ast/ast.h @@ -91,6 +91,7 @@ namespace internal { V(ClassLiteral) \ V(CompareOperation) \ V(CompoundAssignment) \ + V(ConditionalChain) \ V(Conditional) \ V(CountOperation) \ V(EmptyParentheses) \ @@ -1938,7 +1939,7 @@ class NaryOperation final : public Expression { subsequent_(zone) { bit_field_ |= OperatorField::encode(op); DCHECK(Token::IsBinaryOp(op)); - DCHECK_NE(op, Token::EXP); + DCHECK_NE(op, Token::kExp); subsequent_.reserve(initial_subsequent_size); } @@ -2002,7 +2003,6 @@ class CompareOperation final : public Expression { Expression* right() const { return right_; } // Match special cases. - bool IsLiteralCompareTypeof(Expression** expr, Literal** literal); bool IsLiteralStrictCompareBoolean(Expression** expr, Literal** literal); bool IsLiteralCompareUndefined(Expression** expr); bool IsLiteralCompareNull(Expression** expr); @@ -2045,6 +2045,77 @@ class Spread final : public Expression { Expression* expression_; }; +class ConditionalChain : public Expression { + public: + Expression* condition_at(size_t index) const { + return conditional_chain_entries_[index].condition; + } + Expression* then_expression_at(size_t index) const { + return conditional_chain_entries_[index].then_expression; + } + int condition_position_at(size_t index) const { + return conditional_chain_entries_[index].condition_position; + } + size_t conditional_chain_length() const { + return conditional_chain_entries_.size(); + } + Expression* else_expression() const { return else_expression_; } + void set_else_expression(Expression* s) { else_expression_ = s; } + + void AddChainEntry(Expression* cond, Expression* then, int pos) { + conditional_chain_entries_.emplace_back(cond, then, pos); + } + + private: + friend class AstNodeFactory; + friend Zone; + + ConditionalChain(Zone* zone, size_t initial_size, int pos) + : Expression(pos, kConditionalChain), + conditional_chain_entries_(zone), + else_expression_(nullptr) { + conditional_chain_entries_.reserve(initial_size); + } + + // Conditional Chain Expression stores the conditional chain entries out of + // line, along with their operation's position. The else expression is stored + // inline. This Expression is reserved for ternary operations that have more + // than one conditional chain entry. For ternary operations with only one + // conditional chain entry, the Conditional Expression is used instead. + // + // So an conditional chain: + // + // cond ? then : cond ? then : cond ? then : else + // + // is stored as: + // + // [(cond, then), (cond, then),...] else + // '-----------------------------' '----' + // conditional chain entries else + // + // Example: + // + // Expression: v1 == 1 ? "a" : v2 == 2 ? "b" : "c" + // + // conditionat_chain_entries_: [(v1 == 1, "a", 0), (v2 == 2, "b", 14)] + // else_expression_: "c" + // + // Example of a _not_ expected expression (only one chain entry): + // + // Expression: v1 == 1 ? "a" : "b" + // + + struct ConditionalChainEntry { + Expression* condition; + Expression* then_expression; + int condition_position; + ConditionalChainEntry(Expression* cond, Expression* then, int pos) + : condition(cond), then_expression(then), condition_position(pos) {} + }; + ZoneVector conditional_chain_entries_; + Expression* else_expression_; +}; + class Conditional final : public Expression { public: Expression* condition() const { return condition_; } @@ -2666,7 +2737,7 @@ class SuperCallReference final : public Expression { class ImportCallExpression final : public Expression { public: Expression* specifier() const { return specifier_; } - Expression* import_assertions() const { return import_assertions_; } + Expression* import_options() const { return import_options_; } private: friend class AstNodeFactory; @@ -2675,16 +2746,16 @@ class ImportCallExpression final : public Expression { ImportCallExpression(Expression* specifier, int pos) : Expression(pos, kImportCallExpression), specifier_(specifier), - import_assertions_(nullptr) {} + import_options_(nullptr) {} - ImportCallExpression(Expression* specifier, Expression* import_assertions, + ImportCallExpression(Expression* specifier, Expression* import_options, int pos) : Expression(pos, kImportCallExpression), specifier_(specifier), - import_assertions_(import_assertions) {} + import_options_(import_options) {} Expression* specifier_; - Expression* import_assertions_; + Expression* import_options_; }; // This class is produced when parsing the () in arrow functions without any @@ -3216,6 +3287,10 @@ class AstNodeFactory final { return zone_->New(expression, pos, expr_pos); } + ConditionalChain* NewConditionalChain(size_t initial_size, int pos) { + return zone_->New(zone_, initial_size, pos); + } + Conditional* NewConditional(Expression* condition, Expression* then_expression, Expression* else_expression, @@ -3232,11 +3307,11 @@ class AstNodeFactory final { DCHECK_NOT_NULL(target); DCHECK_NOT_NULL(value); - if (op != Token::INIT && target->IsVariableProxy()) { + if (op != Token::kInit && target->IsVariableProxy()) { target->AsVariableProxy()->set_is_assigned(); } - if (op == Token::ASSIGN || op == Token::INIT) { + if (op == Token::kAssign || op == Token::kInit) { return zone_->New(AstNode::kAssignment, op, target, value, pos); } else { @@ -3371,9 +3446,9 @@ class AstNodeFactory final { } ImportCallExpression* NewImportCallExpression(Expression* specifier, - Expression* import_assertions, + Expression* import_options, int pos) { - return zone_->New(specifier, import_assertions, pos); + return zone_->New(specifier, import_options, pos); } InitializeClassMembersStatement* NewInitializeClassMembersStatement( diff --git a/deps/v8/src/ast/modules.cc b/deps/v8/src/ast/modules.cc index 2b07460c256bc7..6624b9cc930e77 100644 --- a/deps/v8/src/ast/modules.cc +++ b/deps/v8/src/ast/modules.cc @@ -27,10 +27,10 @@ bool SourceTextModuleDescriptor::ModuleRequestComparer::operator()( return specifier_comparison < 0; } - auto lhsIt = lhs->import_assertions()->cbegin(); - auto rhsIt = rhs->import_assertions()->cbegin(); - for (; lhsIt != lhs->import_assertions()->cend() && - rhsIt != rhs->import_assertions()->cend(); + auto lhsIt = lhs->import_attributes()->cbegin(); + auto rhsIt = rhs->import_attributes()->cbegin(); + for (; lhsIt != lhs->import_attributes()->cend() && + rhsIt != rhs->import_attributes()->cend(); ++lhsIt, ++rhsIt) { if (int assertion_key_comparison = AstRawString::Compare(lhsIt->first, rhsIt->first)) { @@ -43,9 +43,9 @@ bool SourceTextModuleDescriptor::ModuleRequestComparer::operator()( } } - if (lhs->import_assertions()->size() != rhs->import_assertions()->size()) { - return (lhs->import_assertions()->size() < - rhs->import_assertions()->size()); + if (lhs->import_attributes()->size() != rhs->import_attributes()->size()) { + return (lhs->import_attributes()->size() < + rhs->import_attributes()->size()); } return false; @@ -54,32 +54,32 @@ bool SourceTextModuleDescriptor::ModuleRequestComparer::operator()( void SourceTextModuleDescriptor::AddImport( const AstRawString* import_name, const AstRawString* local_name, const AstRawString* module_request, - const ImportAssertions* import_assertions, const Scanner::Location loc, + const ImportAttributes* import_attributes, const Scanner::Location loc, const Scanner::Location specifier_loc, Zone* zone) { Entry* entry = zone->New(loc); entry->local_name = local_name; entry->import_name = import_name; entry->module_request = - AddModuleRequest(module_request, import_assertions, specifier_loc, zone); + AddModuleRequest(module_request, import_attributes, specifier_loc, zone); AddRegularImport(entry); } void SourceTextModuleDescriptor::AddStarImport( const AstRawString* local_name, const AstRawString* module_request, - const ImportAssertions* import_assertions, const Scanner::Location loc, + const ImportAttributes* import_attributes, const Scanner::Location loc, const Scanner::Location specifier_loc, Zone* zone) { Entry* entry = zone->New(loc); entry->local_name = local_name; entry->module_request = - AddModuleRequest(module_request, import_assertions, specifier_loc, zone); + AddModuleRequest(module_request, import_attributes, specifier_loc, zone); AddNamespaceImport(entry, zone); } void SourceTextModuleDescriptor::AddEmptyImport( const AstRawString* module_request, - const ImportAssertions* import_assertions, + const ImportAttributes* import_attributes, const Scanner::Location specifier_loc, Zone* zone) { - AddModuleRequest(module_request, import_assertions, specifier_loc, zone); + AddModuleRequest(module_request, import_attributes, specifier_loc, zone); } void SourceTextModuleDescriptor::AddExport(const AstRawString* local_name, @@ -94,7 +94,7 @@ void SourceTextModuleDescriptor::AddExport(const AstRawString* local_name, void SourceTextModuleDescriptor::AddExport( const AstRawString* import_name, const AstRawString* export_name, const AstRawString* module_request, - const ImportAssertions* import_assertions, const Scanner::Location loc, + const ImportAttributes* import_attributes, const Scanner::Location loc, const Scanner::Location specifier_loc, Zone* zone) { DCHECK_NOT_NULL(import_name); DCHECK_NOT_NULL(export_name); @@ -102,17 +102,17 @@ void SourceTextModuleDescriptor::AddExport( entry->export_name = export_name; entry->import_name = import_name; entry->module_request = - AddModuleRequest(module_request, import_assertions, specifier_loc, zone); + AddModuleRequest(module_request, import_attributes, specifier_loc, zone); AddSpecialExport(entry, zone); } void SourceTextModuleDescriptor::AddStarExport( const AstRawString* module_request, - const ImportAssertions* import_assertions, const Scanner::Location loc, + const ImportAttributes* import_attributes, const Scanner::Location loc, const Scanner::Location specifier_loc, Zone* zone) { Entry* entry = zone->New(loc); entry->module_request = - AddModuleRequest(module_request, import_assertions, specifier_loc, zone); + AddModuleRequest(module_request, import_attributes, specifier_loc, zone); AddSpecialExport(entry, zone); } @@ -128,28 +128,28 @@ Handle ToStringOrUndefined(IsolateT* isolate, template Handle SourceTextModuleDescriptor::AstModuleRequest::Serialize( IsolateT* isolate) const { - // The import assertions will be stored in this array in the form: + // The import attributes will be stored in this array in the form: // [key1, value1, location1, key2, value2, location2, ...] - Handle import_assertions_array = + Handle import_attributes_array = isolate->factory()->NewFixedArray( - static_cast(import_assertions()->size() * - ModuleRequest::kAssertionEntrySize), + static_cast(import_attributes()->size() * + ModuleRequest::kAttributeEntrySize), AllocationType::kOld); { DisallowGarbageCollection no_gc; - Tagged raw_import_assertions = *import_assertions_array; + Tagged raw_import_attributes = *import_attributes_array; int i = 0; - for (auto iter = import_assertions()->cbegin(); - iter != import_assertions()->cend(); - ++iter, i += ModuleRequest::kAssertionEntrySize) { - raw_import_assertions->set(i, *iter->first->string()); - raw_import_assertions->set(i + 1, *iter->second.first->string()); - raw_import_assertions->set(i + 2, + for (auto iter = import_attributes()->cbegin(); + iter != import_attributes()->cend(); + ++iter, i += ModuleRequest::kAttributeEntrySize) { + raw_import_attributes->set(i, *iter->first->string()); + raw_import_attributes->set(i + 1, *iter->second.first->string()); + raw_import_attributes->set(i + 2, Smi::FromInt(iter->second.second.beg_pos)); } } return v8::internal::ModuleRequest::New(isolate, specifier()->string(), - import_assertions_array, position()); + import_attributes_array, position()); } template Handle SourceTextModuleDescriptor::AstModuleRequest::Serialize(Isolate* isolate) const; diff --git a/deps/v8/src/ast/modules.h b/deps/v8/src/ast/modules.h index f496a0bb85a3c3..02a0bd6a8d3e48 100644 --- a/deps/v8/src/ast/modules.h +++ b/deps/v8/src/ast/modules.h @@ -38,14 +38,14 @@ class SourceTextModuleDescriptor : public ZoneObject { void AddImport(const AstRawString* import_name, const AstRawString* local_name, const AstRawString* module_request, - const ImportAssertions* import_assertions, + const ImportAttributes* import_attributes, const Scanner::Location loc, const Scanner::Location specifier_loc, Zone* zone); // import * as x from "foo.js"; void AddStarImport(const AstRawString* local_name, const AstRawString* module_request, - const ImportAssertions* import_assertions, + const ImportAttributes* import_attributes, const Scanner::Location loc, const Scanner::Location specifier_loc, Zone* zone); @@ -53,7 +53,7 @@ class SourceTextModuleDescriptor : public ZoneObject { // import {} from "foo.js"; // export {} from "foo.js"; (sic!) void AddEmptyImport(const AstRawString* module_request, - const ImportAssertions* import_assertions, + const ImportAttributes* import_attributes, const Scanner::Location specifier_loc, Zone* zone); // export {x}; @@ -70,13 +70,13 @@ class SourceTextModuleDescriptor : public ZoneObject { void AddExport(const AstRawString* export_name, const AstRawString* import_name, const AstRawString* module_request, - const ImportAssertions* import_assertions, + const ImportAttributes* import_attributes, const Scanner::Location loc, const Scanner::Location specifier_loc, Zone* zone); // export * from "foo.js"; void AddStarExport(const AstRawString* module_request, - const ImportAssertions* import_assertions, + const ImportAttributes* import_attributes, const Scanner::Location loc, const Scanner::Location specifier_loc, Zone* zone); @@ -125,10 +125,10 @@ class SourceTextModuleDescriptor : public ZoneObject { class AstModuleRequest : public ZoneObject { public: AstModuleRequest(const AstRawString* specifier, - const ImportAssertions* import_assertions, int position, + const ImportAttributes* import_attributes, int position, int index) : specifier_(specifier), - import_assertions_(import_assertions), + import_attributes_(import_attributes), position_(position), index_(index) {} @@ -136,8 +136,8 @@ class SourceTextModuleDescriptor : public ZoneObject { Handle Serialize(IsolateT* isolate) const; const AstRawString* specifier() const { return specifier_; } - const ImportAssertions* import_assertions() const { - return import_assertions_; + const ImportAttributes* import_attributes() const { + return import_attributes_; } int position() const { return position_; } @@ -145,7 +145,7 @@ class SourceTextModuleDescriptor : public ZoneObject { private: const AstRawString* specifier_; - const ImportAssertions* import_assertions_; + const ImportAttributes* import_attributes_; // The JS source code position of the request, used for reporting errors. int position_; @@ -264,13 +264,13 @@ class SourceTextModuleDescriptor : public ZoneObject { void AssignCellIndices(); int AddModuleRequest(const AstRawString* specifier, - const ImportAssertions* import_assertions, + const ImportAttributes* import_attributes, Scanner::Location specifier_loc, Zone* zone) { DCHECK_NOT_NULL(specifier); int module_requests_count = static_cast(module_requests_.size()); auto it = module_requests_ .insert(zone->New( - specifier, import_assertions, specifier_loc.beg_pos, + specifier, import_attributes, specifier_loc.beg_pos, module_requests_count)) .first; return (*it)->index(); diff --git a/deps/v8/src/ast/prettyprinter.cc b/deps/v8/src/ast/prettyprinter.cc index 5ed4a4fa5b8436..2876beb19e643b 100644 --- a/deps/v8/src/ast/prettyprinter.cc +++ b/deps/v8/src/ast/prettyprinter.cc @@ -256,6 +256,13 @@ void CallPrinter::VisitInitializeClassStaticElementsStatement( void CallPrinter::VisitNativeFunctionLiteral(NativeFunctionLiteral* node) {} +void CallPrinter::VisitConditionalChain(ConditionalChain* node) { + for (size_t i = 0; i < node->conditional_chain_length(); ++i) { + Find(node->condition_at(i)); + Find(node->then_expression_at(i)); + } + Find(node->else_expression()); +} void CallPrinter::VisitConditional(Conditional* node) { Find(node->condition()); @@ -500,7 +507,7 @@ void CallPrinter::VisitSuperCallForwardArgs(SuperCallForwardArgs* node) { void CallPrinter::VisitUnaryOperation(UnaryOperation* node) { Token::Value op = node->op(); bool needsSpace = - op == Token::DELETE || op == Token::TYPEOF || op == Token::VOID; + op == Token::kDelete || op == Token::kTypeOf || op == Token::kVoid; Print("("); Print(Token::String(op)); if (needsSpace) Print(" "); @@ -572,8 +579,8 @@ void CallPrinter::VisitTemplateLiteral(TemplateLiteral* node) { void CallPrinter::VisitImportCallExpression(ImportCallExpression* node) { Print("ImportCall("); Find(node->specifier(), true); - if (node->import_assertions()) { - Find(node->import_assertions(), true); + if (node->import_options()) { + Find(node->import_options(), true); } Print(")"); } @@ -1176,6 +1183,17 @@ void AstPrinter::VisitNativeFunctionLiteral(NativeFunctionLiteral* node) { PrintLiteralIndented("NAME", node->raw_name(), false); } +void AstPrinter::VisitConditionalChain(ConditionalChain* node) { + IndentedScope indent(this, "CONDITIONAL_CHAIN", node->position()); + PrintIndentedVisit("CONDITION", node->condition_at(0)); + PrintIndentedVisit("THEN", node->then_expression_at(0)); + for (size_t i = 1; i < node->conditional_chain_length(); ++i) { + IndentedScope indent(this, "ELSE IF", node->condition_position_at(i)); + PrintIndentedVisit("CONDITION", node->condition_at(i)); + PrintIndentedVisit("THEN", node->then_expression_at(i)); + } + PrintIndentedVisit("ELSE", node->else_expression()); +} void AstPrinter::VisitConditional(Conditional* node) { IndentedScope indent(this, "CONDITIONAL", node->position()); @@ -1471,8 +1489,8 @@ void AstPrinter::VisitTemplateLiteral(TemplateLiteral* node) { void AstPrinter::VisitImportCallExpression(ImportCallExpression* node) { IndentedScope indent(this, "IMPORT-CALL", node->position()); Visit(node->specifier()); - if (node->import_assertions()) { - Visit(node->import_assertions()); + if (node->import_options()) { + Visit(node->import_options()); } } diff --git a/deps/v8/src/ast/scopes.cc b/deps/v8/src/ast/scopes.cc index 6dfcd45cf208e5..581156baf34e6d 100644 --- a/deps/v8/src/ast/scopes.cc +++ b/deps/v8/src/ast/scopes.cc @@ -679,7 +679,7 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) { DCHECK(is_being_lazily_parsed_); bool was_added; Variable* var = DeclareVariableName(name, VariableMode::kVar, &was_added); - if (sloppy_block_function->init() == Token::ASSIGN) { + if (sloppy_block_function->init() == Token::kAssign) { var->SetMaybeAssigned(); } } @@ -1077,14 +1077,15 @@ Variable* Scope::DeclareLocal(const AstRawString* name, VariableMode mode, DCHECK(!already_resolved_); // Private methods should be declared with ClassScope::DeclarePrivateName() DCHECK(!IsPrivateMethodOrAccessorVariableMode(mode)); - // This function handles VariableMode::kVar, VariableMode::kLet, and - // VariableMode::kConst modes. VariableMode::kDynamic variables are - // introduced during variable allocation, and VariableMode::kTemporary - // variables are allocated via NewTemporary(). + // This function handles VariableMode::kVar, VariableMode::kLet, + // VariableMode::kConst, and VariableMode::kUsing modes. + // VariableMode::kDynamic variables are introduced during variable allocation, + // and VariableMode::kTemporary variables are allocated via NewTemporary(). DCHECK(IsDeclaredVariableMode(mode)); DCHECK_IMPLIES(GetDeclarationScope()->is_being_lazily_parsed(), mode == VariableMode::kVar || mode == VariableMode::kLet || - mode == VariableMode::kConst); + mode == VariableMode::kConst || + mode == VariableMode::kUsing); DCHECK(!GetDeclarationScope()->was_lazily_parsed()); Variable* var = Declare(zone(), name, mode, kind, init_flag, kNotAssigned, was_added); diff --git a/deps/v8/src/ast/scopes.h b/deps/v8/src/ast/scopes.h index 751aaee3d11ecc..cea379caec837d 100644 --- a/deps/v8/src/ast/scopes.h +++ b/deps/v8/src/ast/scopes.h @@ -486,6 +486,7 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) { switch (scope_type_) { case MODULE_SCOPE: case WITH_SCOPE: // DebugEvaluateContext as well + case SCRIPT_SCOPE: // Side data for const tracking let. return true; default: DCHECK_IMPLIES(sloppy_eval_can_extend_vars_, diff --git a/deps/v8/src/base/bounds.h b/deps/v8/src/base/bounds.h index 143ea82c5788af..85f7bba6896ad0 100644 --- a/deps/v8/src/base/bounds.h +++ b/deps/v8/src/base/bounds.h @@ -25,6 +25,20 @@ inline constexpr bool IsInRange(T value, U lower_limit, U higher_limit) { static_cast(lower_limit)); } +// Like IsInRange but for the half-open range [lower_limit, higher_limit). +template +inline constexpr bool IsInHalfOpenRange(T value, U lower_limit, + U higher_limit) { + DCHECK_LE(lower_limit, higher_limit); + static_assert(sizeof(U) <= sizeof(T)); + using unsigned_T = typename std::make_unsigned::type; + // Use static_cast to support enum classes. + return static_cast(static_cast(value) - + static_cast(lower_limit)) < + static_cast(static_cast(higher_limit) - + static_cast(lower_limit)); +} + // Checks if [index, index+length) is in range [0, max). Note that this check // works even if {index+length} would wrap around. template -#include - -#include "src/base/logging.h" +#include namespace v8 { namespace base { -// Specification: -// http://en.cppreference.com/w/cpp/utility/optional/in_place_t -struct in_place_t {}; - -// Specification: -// http://en.cppreference.com/w/cpp/utility/optional/nullopt_t -struct nullopt_t { - constexpr explicit nullopt_t(int) {} -}; - -// Specification: -// http://en.cppreference.com/w/cpp/utility/optional/in_place -constexpr in_place_t in_place = {}; - -// Specification: -// http://en.cppreference.com/w/cpp/utility/optional/nullopt -constexpr nullopt_t nullopt(0); - -// Forward declaration, which is referred by following helpers. -template -class Optional; - -namespace internal { - -template ::value> -struct OptionalStorageBase { - // Initializing |empty_| here instead of using default member initializing - // to avoid errors in g++ 4.8. - constexpr OptionalStorageBase() : empty_('\0') {} - - template - constexpr explicit OptionalStorageBase(in_place_t, Args&&... args) - : is_populated_(true), value_(std::forward(args)...) {} - - // When T is not trivially destructible we must call its - // destructor before deallocating its memory. - // Note that this hides the (implicitly declared) move constructor, which - // would be used for constexpr move constructor in OptionalStorage. - // It is needed iff T is trivially move constructible. However, the current - // is_trivially_{copy,move}_constructible implementation requires - // is_trivially_destructible (which looks a bug, cf: - // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=51452 and - // http://cplusplus.github.io/LWG/lwg-active.html#2116), so it is not - // necessary for this case at the moment. Please see also the destructor - // comment in "is_trivially_destructible = true" specialization below. - ~OptionalStorageBase() { - if (is_populated_) value_.~T(); - } - - template - void Init(Args&&... args) { - DCHECK(!is_populated_); - ::new (&value_) T(std::forward(args)...); - is_populated_ = true; - } - - bool is_populated_ = false; - union { - // |empty_| exists so that the union will always be initialized, even when - // it doesn't contain a value. Union members must be initialized for the - // constructor to be 'constexpr'. - char empty_; - T value_; - }; -}; - -template -struct OptionalStorageBase { - // Initializing |empty_| here instead of using default member initializing - // to avoid errors in g++ 4.8. - constexpr OptionalStorageBase() : empty_('\0') {} - - template - constexpr explicit OptionalStorageBase(in_place_t, Args&&... args) - : is_populated_(true), value_(std::forward(args)...) {} - - // When T is trivially destructible (i.e. its destructor does nothing) there - // is no need to call it. Implicitly defined destructor is trivial, because - // both members (bool and union containing only variants which are trivially - // destructible) are trivially destructible. - // Explicitly-defaulted destructor is also trivial, but do not use it here, - // because it hides the implicit move constructor. It is needed to implement - // constexpr move constructor in OptionalStorage iff T is trivially move - // constructible. Note that, if T is trivially move constructible, the move - // constructor of OptionalStorageBase is also implicitly defined and it is - // trivially move constructor. If T is not trivially move constructible, - // "not declaring move constructor without destructor declaration" here means - // "delete move constructor", which works because any move constructor of - // OptionalStorage will not refer to it in that case. - - template - void Init(Args&&... args) { - DCHECK(!is_populated_); - ::new (&value_) T(std::forward(args)...); - is_populated_ = true; - } - - bool is_populated_ = false; - union { - // |empty_| exists so that the union will always be initialized, even when - // it doesn't contain a value. Union members must be initialized for the - // constructor to be 'constexpr'. - char empty_; - T value_; - }; -}; - -// Implement conditional constexpr copy and move constructors. These are -// constexpr if is_trivially_{copy,move}_constructible::value is true -// respectively. If each is true, the corresponding constructor is defined as -// "= default;", which generates a constexpr constructor (In this case, -// the condition of constexpr-ness is satisfied because the base class also has -// compiler generated constexpr {copy,move} constructors). Note that -// placement-new is prohibited in constexpr. -template ::value, - bool = std::is_trivially_move_constructible::value> -struct OptionalStorage : OptionalStorageBase { - // This is no trivially {copy,move} constructible case. Other cases are - // defined below as specializations. - - // Accessing the members of template base class requires explicit - // declaration. - using OptionalStorageBase::is_populated_; - using OptionalStorageBase::value_; - using OptionalStorageBase::Init; - - // Inherit constructors (specifically, the in_place constructor). - using OptionalStorageBase::OptionalStorageBase; - - // User defined constructor deletes the default constructor. - // Define it explicitly. - OptionalStorage() = default; - - OptionalStorage(const OptionalStorage& other) V8_NOEXCEPT { - if (other.is_populated_) Init(other.value_); - } - - OptionalStorage(OptionalStorage&& other) V8_NOEXCEPT { - if (other.is_populated_) Init(std::move(other.value_)); - } -}; - -template -struct OptionalStorage - : OptionalStorageBase { - using OptionalStorageBase::is_populated_; - using OptionalStorageBase::value_; - using OptionalStorageBase::Init; - using OptionalStorageBase::OptionalStorageBase; - - OptionalStorage() = default; - OptionalStorage(const OptionalStorage& other) V8_NOEXCEPT = default; - - OptionalStorage(OptionalStorage&& other) V8_NOEXCEPT { - if (other.is_populated_) Init(std::move(other.value_)); - } -}; - -template -struct OptionalStorage - : OptionalStorageBase { - using OptionalStorageBase::is_populated_; - using OptionalStorageBase::value_; - using OptionalStorageBase::Init; - using OptionalStorageBase::OptionalStorageBase; - - OptionalStorage() = default; - OptionalStorage(OptionalStorage&& other) V8_NOEXCEPT = default; - - OptionalStorage(const OptionalStorage& other) V8_NOEXCEPT { - if (other.is_populated_) Init(other.value_); - } -}; - -template -struct OptionalStorage - : OptionalStorageBase { - // If both trivially {copy,move} constructible are true, it is not necessary - // to use user-defined constructors. So, just inheriting constructors - // from the base class works. - using OptionalStorageBase::OptionalStorageBase; -}; - -// Base class to support conditionally usable copy-/move- constructors -// and assign operators. +// These aliases are deprecated, use std::optional directly. template -class OptionalBase { - // This class provides implementation rather than public API, so everything - // should be hidden. Often we use composition, but we cannot in this case - // because of C++ language restriction. - protected: - constexpr OptionalBase() = default; - constexpr OptionalBase(const OptionalBase& other) V8_NOEXCEPT = default; - constexpr OptionalBase(OptionalBase&& other) V8_NOEXCEPT = default; - - template - constexpr explicit OptionalBase(in_place_t, Args&&... args) - : storage_(in_place, std::forward(args)...) {} - - // Implementation of converting constructors. - template - explicit OptionalBase(const OptionalBase& other) V8_NOEXCEPT { - if (other.storage_.is_populated_) storage_.Init(other.storage_.value_); - } - - template - explicit OptionalBase(OptionalBase&& other) V8_NOEXCEPT { - if (other.storage_.is_populated_) - storage_.Init(std::move(other.storage_.value_)); - } - - ~OptionalBase() = default; - - OptionalBase& operator=(const OptionalBase& other) V8_NOEXCEPT { - CopyAssign(other); - return *this; - } - - OptionalBase& operator=(OptionalBase&& other) V8_NOEXCEPT { - MoveAssign(std::move(other)); - return *this; - } - - template - void CopyAssign(const OptionalBase& other) { - if (other.storage_.is_populated_) - InitOrAssign(other.storage_.value_); - else - FreeIfNeeded(); - } - - template - void MoveAssign(OptionalBase&& other) { - if (other.storage_.is_populated_) - InitOrAssign(std::move(other.storage_.value_)); - else - FreeIfNeeded(); - } - - template - void InitOrAssign(U&& value) { - if (storage_.is_populated_) - storage_.value_ = std::forward(value); - else - storage_.Init(std::forward(value)); - } - - void FreeIfNeeded() { - if (!storage_.is_populated_) return; - storage_.value_.~T(); - storage_.is_populated_ = false; - } - - // For implementing conversion, allow access to other typed OptionalBase - // class. - template - friend class OptionalBase; - - OptionalStorage storage_; -}; - -// The following {Copy,Move}{Constructible,Assignable} structs are helpers to -// implement constructor/assign-operator overloading. Specifically, if T is -// is not movable but copyable, Optional's move constructor should not -// participate in overload resolution. This inheritance trick implements that. -template -struct CopyConstructible {}; - -template <> -struct CopyConstructible { - constexpr CopyConstructible() = default; - constexpr CopyConstructible(const CopyConstructible&) V8_NOEXCEPT = delete; - constexpr CopyConstructible(CopyConstructible&&) V8_NOEXCEPT = default; - CopyConstructible& operator=(const CopyConstructible&) V8_NOEXCEPT = default; - CopyConstructible& operator=(CopyConstructible&&) V8_NOEXCEPT = default; -}; - -template -struct MoveConstructible {}; - -template <> -struct MoveConstructible { - constexpr MoveConstructible() = default; - constexpr MoveConstructible(const MoveConstructible&) V8_NOEXCEPT = default; - constexpr MoveConstructible(MoveConstructible&&) V8_NOEXCEPT = delete; - MoveConstructible& operator=(const MoveConstructible&) V8_NOEXCEPT = default; - MoveConstructible& operator=(MoveConstructible&&) V8_NOEXCEPT = default; -}; - -template -struct CopyAssignable {}; - -template <> -struct CopyAssignable { - constexpr CopyAssignable() = default; - constexpr CopyAssignable(const CopyAssignable&) V8_NOEXCEPT = default; - constexpr CopyAssignable(CopyAssignable&&) V8_NOEXCEPT = default; - CopyAssignable& operator=(const CopyAssignable&) V8_NOEXCEPT = delete; - CopyAssignable& operator=(CopyAssignable&&) V8_NOEXCEPT = default; -}; - -template -struct MoveAssignable {}; - -template <> -struct MoveAssignable { - constexpr MoveAssignable() = default; - constexpr MoveAssignable(const MoveAssignable&) V8_NOEXCEPT = default; - constexpr MoveAssignable(MoveAssignable&&) V8_NOEXCEPT = default; - MoveAssignable& operator=(const MoveAssignable&) V8_NOEXCEPT = default; - MoveAssignable& operator=(MoveAssignable&&) V8_NOEXCEPT = delete; -}; - -// Helper to conditionally enable converting constructors and assign operators. -template -struct IsConvertibleFromOptional - : std::integral_constant< - bool, std::is_constructible&>::value || - std::is_constructible&>::value || - std::is_constructible&&>::value || - std::is_constructible&&>::value || - std::is_convertible&, T>::value || - std::is_convertible&, T>::value || - std::is_convertible&&, T>::value || - std::is_convertible&&, T>::value> {}; - -template -struct IsAssignableFromOptional - : std::integral_constant< - bool, IsConvertibleFromOptional::value || - std::is_assignable&>::value || - std::is_assignable&>::value || - std::is_assignable&&>::value || - std::is_assignable&&>::value> {}; - -// Forward compatibility for C++17. -// Introduce one more deeper nested namespace to avoid leaking using std::swap. -namespace swappable_impl { -using std::swap; - -struct IsSwappableImpl { - // Tests if swap can be called. Check(0) returns true_type iff swap - // is available for T. Otherwise, Check's overload resolution falls back - // to Check(...) declared below thanks to SFINAE, so returns false_type. - template - static auto Check(int i) - -> decltype(swap(std::declval(), std::declval()), std::true_type()); - - template - static std::false_type Check(...); -}; -} // namespace swappable_impl - -template -struct IsSwappable : decltype(swappable_impl::IsSwappableImpl::Check(0)) {}; - -// Forward compatibility for C++20. -template -using RemoveCvRefT = - typename std::remove_cv::type>::type; - -} // namespace internal - -// On Windows, by default, empty-base class optimization does not work, -// which means even if the base class is empty struct, it still consumes one -// byte for its body. __declspec(empty_bases) enables the optimization. -// cf) -// https://blogs.msdn.microsoft.com/vcblog/2016/03/30/optimizing-the-layout-of-empty-base-classes-in-vs2015-update-2-3/ -#ifdef OS_WIN -#define OPTIONAL_DECLSPEC_EMPTY_BASES __declspec(empty_bases) -#else -#define OPTIONAL_DECLSPEC_EMPTY_BASES -#endif - -// base::Optional is a Chromium version of the C++17 optional class: -// std::optional documentation: -// http://en.cppreference.com/w/cpp/utility/optional -// Chromium documentation: -// https://chromium.googlesource.com/chromium/src/+/master/docs/optional.md -// -// These are the differences between the specification and the implementation: -// - Constructors do not use 'constexpr' as it is a C++14 extension. -// - 'constexpr' might be missing in some places for reasons specified locally. -// - No exceptions are thrown, because they are banned from Chromium. -// All copy/move constructors or assignment operators are marked V8_NOEXCEPT. -// - All the non-members are in the 'base' namespace instead of 'std'. -// -// Note that T cannot have a constructor T(Optional) etc. Optional checks -// T's constructor (specifically via IsConvertibleFromOptional), and in the -// check whether T can be constructible from Optional, which is recursive -// so it does not work. As of Feb 2018, std::optional C++17 implementation in -// both clang and gcc has same limitation. MSVC SFINAE looks to have different -// behavior, but anyway it reports an error, too. -template -class OPTIONAL_DECLSPEC_EMPTY_BASES Optional - : public internal::OptionalBase, - public internal::CopyConstructible::value>, - public internal::MoveConstructible::value>, - public internal::CopyAssignable::value && - std::is_copy_assignable::value>, - public internal::MoveAssignable::value && - std::is_move_assignable::value> { - public: -#undef OPTIONAL_DECLSPEC_EMPTY_BASES - using value_type = T; - - // Defer default/copy/move constructor implementation to OptionalBase. - constexpr Optional() = default; - constexpr Optional(const Optional& other) V8_NOEXCEPT = default; - constexpr Optional(Optional&& other) V8_NOEXCEPT = default; - - constexpr Optional(nullopt_t) {} // NOLINT(runtime/explicit) - - // Converting copy constructor. "explicit" only if - // std::is_convertible::value is false. It is implemented by - // declaring two almost same constructors, but that condition in enable_if - // is different, so that either one is chosen, thanks to SFINAE. - template ::value && - !internal::IsConvertibleFromOptional::value && - std::is_convertible::value, - bool>::type = false> - Optional(const Optional& other) V8_NOEXCEPT - : internal::OptionalBase(other) {} - - template ::value && - !internal::IsConvertibleFromOptional::value && - !std::is_convertible::value, - bool>::type = false> - explicit Optional(const Optional& other) V8_NOEXCEPT - : internal::OptionalBase(other) {} - - // Converting move constructor. Similar to converting copy constructor, - // declaring two (explicit and non-explicit) constructors. - template ::value && - !internal::IsConvertibleFromOptional::value && - std::is_convertible::value, - bool>::type = false> - Optional(Optional&& other) V8_NOEXCEPT - : internal::OptionalBase(std::move(other)) {} - - template ::value && - !internal::IsConvertibleFromOptional::value && - !std::is_convertible::value, - bool>::type = false> - explicit Optional(Optional&& other) V8_NOEXCEPT - : internal::OptionalBase(std::move(other)) {} - - template - constexpr explicit Optional(in_place_t, Args&&... args) - : internal::OptionalBase(in_place, std::forward(args)...) {} - - template &, Args...>::value>::type> - constexpr explicit Optional(in_place_t, std::initializer_list il, - Args&&... args) - : internal::OptionalBase(in_place, il, std::forward(args)...) {} - - // Forward value constructor. Similar to converting constructors, - // conditionally explicit. - template < - typename U = value_type, - typename std::enable_if< - std::is_constructible::value && - !std::is_same, in_place_t>::value && - !std::is_same, Optional>::value && - std::is_convertible::value, - bool>::type = false> - constexpr Optional(U&& value) // NOLINT(runtime/explicit) - : internal::OptionalBase(in_place, std::forward(value)) {} - - template < - typename U = value_type, - typename std::enable_if< - std::is_constructible::value && - !std::is_same, in_place_t>::value && - !std::is_same, Optional>::value && - !std::is_convertible::value, - bool>::type = false> - constexpr explicit Optional(U&& value) - : internal::OptionalBase(in_place, std::forward(value)) {} - - ~Optional() = default; - - // Defer copy-/move- assign operator implementation to OptionalBase. - Optional& operator=(const Optional& other) V8_NOEXCEPT = default; - Optional& operator=(Optional&& other) V8_NOEXCEPT = default; - - Optional& operator=(nullopt_t) { - FreeIfNeeded(); - return *this; - } - - // Perfect-forwarded assignment. - template - typename std::enable_if< - !std::is_same, Optional>::value && - std::is_constructible::value && - std::is_assignable::value && - (!std::is_scalar::value || - !std::is_same::type, T>::value), - Optional&>::type - operator=(U&& value) V8_NOEXCEPT { - InitOrAssign(std::forward(value)); - return *this; - } - - // Copy assign the state of other. - template - typename std::enable_if::value && - std::is_constructible::value && - std::is_assignable::value, - Optional&>::type - operator=(const Optional& other) V8_NOEXCEPT { - CopyAssign(other); - return *this; - } - - // Move assign the state of other. - template - typename std::enable_if::value && - std::is_constructible::value && - std::is_assignable::value, - Optional&>::type - operator=(Optional&& other) V8_NOEXCEPT { - MoveAssign(std::move(other)); - return *this; - } - - constexpr const T* operator->() const { - DCHECK(storage_.is_populated_); - return &storage_.value_; - } - - constexpr T* operator->() { - DCHECK(storage_.is_populated_); - return &storage_.value_; - } - - constexpr const T& operator*() const& { - DCHECK(storage_.is_populated_); - return storage_.value_; - } - - constexpr T& operator*() & { - DCHECK(storage_.is_populated_); - return storage_.value_; - } - - constexpr const T&& operator*() const&& { - DCHECK(storage_.is_populated_); - return std::move(storage_.value_); - } - - constexpr T&& operator*() && { - DCHECK(storage_.is_populated_); - return std::move(storage_.value_); - } - - constexpr explicit operator bool() const { return storage_.is_populated_; } - - constexpr bool has_value() const { return storage_.is_populated_; } - - T& value() & { - CHECK(storage_.is_populated_); - return storage_.value_; - } - - const T& value() const & { - CHECK(storage_.is_populated_); - return storage_.value_; - } - - T&& value() && { - CHECK(storage_.is_populated_); - return std::move(storage_.value_); - } - - const T&& value() const && { - CHECK(storage_.is_populated_); - return std::move(storage_.value_); - } - - template - constexpr T value_or(U&& default_value) const & { - // TODO(mlamouri): add the following assert when possible: - // static_assert(std::is_copy_constructible::value, - // "T must be copy constructible"); - static_assert(std::is_convertible::value, - "U must be convertible to T"); - return storage_.is_populated_ - ? storage_.value_ - : static_cast(std::forward(default_value)); - } - - template - constexpr T value_or(U&& default_value) && { - // TODO(mlamouri): add the following assert when possible: - // static_assert(std::is_move_constructible::value, - // "T must be move constructible"); - static_assert(std::is_convertible::value, - "U must be convertible to T"); - return storage_.is_populated_ - ? std::move(storage_.value_) - : static_cast(std::forward(default_value)); - } - - void swap(Optional& other) { - if (!storage_.is_populated_ && !other.storage_.is_populated_) return; - - if (storage_.is_populated_ != other.storage_.is_populated_) { - if (storage_.is_populated_) { - other.storage_.Init(std::move(storage_.value_)); - FreeIfNeeded(); - } else { - storage_.Init(std::move(other.storage_.value_)); - other.FreeIfNeeded(); - } - return; - } - - DCHECK(storage_.is_populated_ && other.storage_.is_populated_); - using std::swap; - swap(**this, *other); - } - - void reset() { FreeIfNeeded(); } - - template - T& emplace(Args&&... args) { - FreeIfNeeded(); - storage_.Init(std::forward(args)...); - return storage_.value_; - } - - template - typename std::enable_if< - std::is_constructible&, Args&&...>::value, - T&>::type - emplace(std::initializer_list il, Args&&... args) { - FreeIfNeeded(); - storage_.Init(il, std::forward(args)...); - return storage_.value_; - } - - private: - // Accessing template base class's protected member needs explicit - // declaration to do so. - using internal::OptionalBase::CopyAssign; - using internal::OptionalBase::FreeIfNeeded; - using internal::OptionalBase::InitOrAssign; - using internal::OptionalBase::MoveAssign; - using internal::OptionalBase::storage_; -}; - -// Here after defines comparation operators. The definition follows -// http://en.cppreference.com/w/cpp/utility/optional/operator_cmp -// while bool() casting is replaced by has_value() to meet the chromium -// style guide. -template -bool operator==(const Optional& lhs, const Optional& rhs) { - if (lhs.has_value() != rhs.has_value()) return false; - if (!lhs.has_value()) return true; - return *lhs == *rhs; -} - -template -bool operator!=(const Optional& lhs, const Optional& rhs) { - if (lhs.has_value() != rhs.has_value()) return true; - if (!lhs.has_value()) return false; - return *lhs != *rhs; -} - -template -bool operator<(const Optional& lhs, const Optional& rhs) { - if (!rhs.has_value()) return false; - if (!lhs.has_value()) return true; - return *lhs < *rhs; -} - -template -bool operator<=(const Optional& lhs, const Optional& rhs) { - if (!lhs.has_value()) return true; - if (!rhs.has_value()) return false; - return *lhs <= *rhs; -} - -template -bool operator>(const Optional& lhs, const Optional& rhs) { - if (!lhs.has_value()) return false; - if (!rhs.has_value()) return true; - return *lhs > *rhs; -} - -template -bool operator>=(const Optional& lhs, const Optional& rhs) { - if (!rhs.has_value()) return true; - if (!lhs.has_value()) return false; - return *lhs >= *rhs; -} - -template -constexpr bool operator==(const Optional& opt, nullopt_t) { - return !opt; -} - -template -constexpr bool operator==(nullopt_t, const Optional& opt) { - return !opt; -} - -template -constexpr bool operator!=(const Optional& opt, nullopt_t) { - return opt.has_value(); -} - -template -constexpr bool operator!=(nullopt_t, const Optional& opt) { - return opt.has_value(); -} - -template -constexpr bool operator<(const Optional& opt, nullopt_t) { - return false; -} - -template -constexpr bool operator<(nullopt_t, const Optional& opt) { - return opt.has_value(); -} - -template -constexpr bool operator<=(const Optional& opt, nullopt_t) { - return !opt; -} - -template -constexpr bool operator<=(nullopt_t, const Optional& opt) { - return true; -} - -template -constexpr bool operator>(const Optional& opt, nullopt_t) { - return opt.has_value(); -} - -template -constexpr bool operator>(nullopt_t, const Optional& opt) { - return false; -} - -template -constexpr bool operator>=(const Optional& opt, nullopt_t) { - return true; -} - -template -constexpr bool operator>=(nullopt_t, const Optional& opt) { - return !opt; -} - -template -constexpr bool operator==(const Optional& opt, const U& value) { - return opt.has_value() ? *opt == value : false; -} - -template -constexpr bool operator==(const U& value, const Optional& opt) { - return opt.has_value() ? value == *opt : false; -} - -template -constexpr bool operator!=(const Optional& opt, const U& value) { - return opt.has_value() ? *opt != value : true; -} - -template -constexpr bool operator!=(const U& value, const Optional& opt) { - return opt.has_value() ? value != *opt : true; -} - -template -constexpr bool operator<(const Optional& opt, const U& value) { - return opt.has_value() ? *opt < value : true; -} - -template -constexpr bool operator<(const U& value, const Optional& opt) { - return opt.has_value() ? value < *opt : false; -} - -template -constexpr bool operator<=(const Optional& opt, const U& value) { - return opt.has_value() ? *opt <= value : true; -} - -template -constexpr bool operator<=(const U& value, const Optional& opt) { - return opt.has_value() ? value <= *opt : false; -} - -template -constexpr bool operator>(const Optional& opt, const U& value) { - return opt.has_value() ? *opt > value : false; -} - -template -constexpr bool operator>(const U& value, const Optional& opt) { - return opt.has_value() ? value > *opt : true; -} - -template -constexpr bool operator>=(const Optional& opt, const U& value) { - return opt.has_value() ? *opt >= value : false; -} - -template -constexpr bool operator>=(const U& value, const Optional& opt) { - return opt.has_value() ? value >= *opt : true; -} - -template -constexpr Optional::type> make_optional(T&& value) { - return Optional::type>(std::forward(value)); -} - -template -constexpr Optional make_optional(Args&&... args) { - return Optional(in_place, std::forward(args)...); -} - -template -constexpr Optional make_optional(std::initializer_list il, - Args&&... args) { - return Optional(in_place, il, std::forward(args)...); -} +using Optional [[deprecated]] = std::optional; -// Partial specialization for a function template is not allowed. Also, it is -// not allowed to add overload function to std namespace, while it is allowed -// to specialize the template in std. Thus, swap() (kind of) overloading is -// defined in base namespace, instead. -template -typename std::enable_if::value && - internal::IsSwappable::value>::type -swap(Optional& lhs, Optional& rhs) { - lhs.swap(rhs); -} +using std::in_place; +using std::make_optional; +using std::nullopt; +using std::nullopt_t; } // namespace base } // namespace v8 diff --git a/deps/v8/src/base/platform/condition-variable.cc b/deps/v8/src/base/platform/condition-variable.cc index b7b21c99473b36..7c5d8f7f27a4c8 100644 --- a/deps/v8/src/base/platform/condition-variable.cc +++ b/deps/v8/src/base/platform/condition-variable.cc @@ -190,7 +190,7 @@ void ConditionVariable::Wait(Mutex* mutex) { } bool ConditionVariable::WaitFor(Mutex* mutex, const TimeDelta& rel_time) { - SbTime microseconds = static_cast(rel_time.InMicroseconds()); + int64_t microseconds = static_cast(rel_time.InMicroseconds()); SbConditionVariableResult result = SbConditionVariableWaitTimed( &native_handle_, &mutex->native_handle(), microseconds); DCHECK(result != kSbConditionVariableFailed); diff --git a/deps/v8/src/base/platform/memory.h b/deps/v8/src/base/platform/memory.h index 48b0d70ec16b88..77e3fe9d4d9acc 100644 --- a/deps/v8/src/base/platform/memory.h +++ b/deps/v8/src/base/platform/memory.h @@ -83,8 +83,6 @@ inline void* AlignedAlloc(size_t size, size_t alignment) { // posix_memalign is not exposed in some Android versions, so we fall back to // memalign. See http://code.google.com/p/android/issues/detail?id=35391. return memalign(alignment, size); -#elif V8_OS_STARBOARD - return SbMemoryAllocateAligned(alignment, size); #else // POSIX void* ptr; if (posix_memalign(&ptr, alignment, size)) ptr = nullptr; @@ -95,8 +93,6 @@ inline void* AlignedAlloc(size_t size, size_t alignment) { inline void AlignedFree(void* ptr) { #if V8_OS_WIN _aligned_free(ptr); -#elif V8_OS_STARBOARD - SbMemoryFreeAligned(ptr); #else // Using regular Free() is not correct in general. For most platforms, // including V8_LIBC_BIONIC, it is though. diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc index 116ba4f9d9fda8..529a073040af1f 100644 --- a/deps/v8/src/base/platform/platform-posix.cc +++ b/deps/v8/src/base/platform/platform-posix.cc @@ -967,6 +967,7 @@ void OS::PrintError(const char* format, ...) { va_start(args, format); VPrintError(format, args); va_end(args); + fflush(stderr); } diff --git a/deps/v8/src/base/platform/platform-starboard.cc b/deps/v8/src/base/platform/platform-starboard.cc index 1550a214d8c1c6..af257a2d00081c 100644 --- a/deps/v8/src/base/platform/platform-starboard.cc +++ b/deps/v8/src/base/platform/platform-starboard.cc @@ -6,6 +6,9 @@ // abstraction layer for Cobalt, an HTML5 container used mainly by YouTube // apps in the living room. +#include +#include + #include "src/base/lazy-instance.h" #include "src/base/macros.h" #include "src/base/platform/platform.h" @@ -16,10 +19,9 @@ #include "starboard/common/condition_variable.h" #include "starboard/common/log.h" #include "starboard/common/string.h" +#include "starboard/common/time.h" #include "starboard/configuration.h" #include "starboard/configuration_constants.h" -#include "starboard/memory.h" -#include "starboard/time.h" #include "starboard/time_zone.h" namespace v8 { @@ -82,18 +84,11 @@ void OS::Initialize(AbortMode abort_mode, const char* const gc_fake_mmap) { } int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) { -#if SB_API_VERSION >= 12 - if (!SbTimeIsTimeThreadNowSupported()) return -1; -#endif - -#if SB_API_VERSION >= 12 || SB_HAS(TIME_THREAD_NOW) - SbTimeMonotonic thread_now = SbTimeGetMonotonicThreadNow(); - *secs = thread_now / kSbTimeSecond; - *usecs = thread_now % kSbTimeSecond; + const int64_t us_time = starboard::CurrentMonotonicThreadTime(); + if (us_time == 0) return -1; + *secs = us_time / TimeConstants::kMicroSecondsPerSecond; + *usecs = us_time % TimeConstants::kMicroSecondsPerSecond; return 0; -#else - return -1; -#endif } double OS::TimeCurrentMillis() { return Time::Now().ToJsTime(); } @@ -130,13 +125,13 @@ void OS::SetRandomMmapSeed(int64_t seed) { SB_NOTIMPLEMENTED(); } void* OS::GetRandomMmapAddr() { return nullptr; } void* Allocate(void* address, size_t size, OS::MemoryPermission access) { - SbMemoryMapFlags sb_flags; + int prot_flags; switch (access) { case OS::MemoryPermission::kNoAccess: - sb_flags = SbMemoryMapFlags(0); + prot_flags = PROT_NONE; break; case OS::MemoryPermission::kReadWrite: - sb_flags = SbMemoryMapFlags(kSbMemoryMapProtectReadWrite); + prot_flags = PROT_READ | PROT_WRITE; break; default: SB_LOG(ERROR) << "The requested memory allocation access is not" @@ -144,8 +139,8 @@ void* Allocate(void* address, size_t size, OS::MemoryPermission access) { << static_cast(access); return nullptr; } - void* result = SbMemoryMap(size, sb_flags, "v8::Base::Allocate"); - if (result == SB_MEMORY_MAP_FAILED) { + void* result = mmap(nullptr, size, prot_flags, MAP_PRIVATE | MAP_ANON, -1, 0); + if (result == MAP_FAILED) { return nullptr; } return result; @@ -188,30 +183,29 @@ void* OS::Allocate(void* address, size_t size, size_t alignment, // static void OS::Free(void* address, const size_t size) { - CHECK(SbMemoryUnmap(address, size)); + CHECK_EQ(munmap(address, size), 0); } // static void OS::Release(void* address, size_t size) { - CHECK(SbMemoryUnmap(address, size)); + CHECK_EQ(munmap(address, size), 0); } // static bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) { - SbMemoryMapFlags new_protection; + int new_protection; switch (access) { case OS::MemoryPermission::kNoAccess: - new_protection = SbMemoryMapFlags(0); + new_protection = PROT_NONE; break; case OS::MemoryPermission::kRead: - new_protection = SbMemoryMapFlags(kSbMemoryMapProtectRead); + new_protection = PROT_READ; case OS::MemoryPermission::kReadWrite: - new_protection = SbMemoryMapFlags(kSbMemoryMapProtectReadWrite); + new_protection = PROT_READ | PROT_WRITE; break; case OS::MemoryPermission::kReadExecute: #if SB_CAN(MAP_EXECUTABLE_MEMORY) - new_protection = - SbMemoryMapFlags(kSbMemoryMapProtectRead | kSbMemoryMapProtectExec); + new_protection = PROT_READ | PROT_EXEC; #else UNREACHABLE(); #endif @@ -220,7 +214,7 @@ bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) { // All other types are not supported by Starboard. return false; } - return SbMemoryProtect(address, size, new_protection); + return mprotect(address, size, new_protection) == 0; } // static @@ -348,7 +342,7 @@ int OS::SNPrintF(char* str, int length, const char* format, ...) { } int OS::VSNPrintF(char* str, int length, const char* format, va_list args) { - int n = SbStringFormat(str, length, format, args); + int n = vsnprintf(str, length, format, args); if (n < 0 || n >= length) { // If the length is zero, the assignment fails. if (length > 0) str[length - 1] = '\0'; @@ -363,7 +357,7 @@ int OS::VSNPrintF(char* str, int length, const char* format, va_list args) { // void OS::StrNCpy(char* dest, int length, const char* src, size_t n) { - SbStringCopy(dest, src, n); + strncpy(dest, src, n); } // ---------------------------------------------------------------------------- @@ -448,14 +442,18 @@ class StarboardDefaultTimezoneCache : public StarboardTimezoneCache { return SbTimeZoneGetName(); } double LocalTimeOffset(double time_ms, bool is_utc) override { - // SbTimeZOneGetCurrent returns an offset west of Greenwich, which has the + // SbTimeZoneGetCurrent returns an offset west of Greenwich, which has the // opposite sign V8 expects. // The starboard function returns offset in minutes. We convert to return // value in milliseconds. return SbTimeZoneGetCurrent() * 60.0 * msPerSecond * (-1); } double DaylightSavingsOffset(double time_ms) override { - EzTimeValue value = EzTimeValueFromSbTime(SbTimeGetNow()); + int64_t posix_microseconds = starboard::CurrentPosixTime(); + EzTimeValue value = { + posix_microseconds / TimeConstants::kMicroSecondsPerSecond, + (int32_t)(posix_microseconds % TimeConstants::kMicroSecondsPerSecond) + }; EzTimeExploded ez_exploded; bool result = EzTimeValueExplode(&value, kEzTimeZoneLocal, &ez_exploded, NULL); @@ -489,6 +487,12 @@ bool OS::DiscardSystemPages(void* address, size_t size) { return true; } +// static +Stack::StackSlot Stack::GetStackStart() { + SB_NOTIMPLEMENTED(); + return nullptr; +} + // static Stack::StackSlot Stack::GetCurrentStackPosition() { void* addresses[kStackSize]; diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc index a5558c738ba839..c23b399e091dc4 100644 --- a/deps/v8/src/base/platform/platform-win32.cc +++ b/deps/v8/src/base/platform/platform-win32.cc @@ -702,6 +702,7 @@ void OS::PrintError(const char* format, ...) { va_start(args, format); VPrintError(format, args); va_end(args); + fflush(stderr); } diff --git a/deps/v8/src/base/platform/semaphore.cc b/deps/v8/src/base/platform/semaphore.cc index 3e9f6334d9c4c3..2ac431dbf60b98 100644 --- a/deps/v8/src/base/platform/semaphore.cc +++ b/deps/v8/src/base/platform/semaphore.cc @@ -170,7 +170,7 @@ void Semaphore::Signal() { native_handle_.Put(); } void Semaphore::Wait() { native_handle_.Take(); } bool Semaphore::WaitFor(const TimeDelta& rel_time) { - SbTime microseconds = rel_time.InMicroseconds(); + int64_t microseconds = rel_time.InMicroseconds(); return native_handle_.TakeWait(microseconds); } diff --git a/deps/v8/src/base/platform/time.cc b/deps/v8/src/base/platform/time.cc index b6da0a690c0e14..dda0823c6597b5 100644 --- a/deps/v8/src/base/platform/time.cc +++ b/deps/v8/src/base/platform/time.cc @@ -22,6 +22,10 @@ #include #endif +#if V8_OS_STARBOARD +#include +#endif // V8_OS_STARBOARD + #include #include @@ -41,7 +45,7 @@ #include "src/base/platform/platform.h" #if V8_OS_STARBOARD -#include "starboard/time.h" +#include "starboard/common/time.h" #endif namespace { @@ -402,7 +406,7 @@ FILETIME Time::ToFiletime() const { return ft; } -#elif V8_OS_POSIX +#elif V8_OS_POSIX || V8_OS_STARBOARD Time Time::Now() { struct timeval tv; @@ -482,13 +486,7 @@ struct timeval Time::ToTimeval() const { return tv; } -#elif V8_OS_STARBOARD - -Time Time::Now() { return Time(SbTimeToPosix(SbTimeGetNow())); } - -Time Time::NowFromSystemTime() { return Now(); } - -#endif // V8_OS_STARBOARD +#endif // V8_OS_POSIX || V8_OS_STARBOARD Time Time::FromJsTime(double ms_since_epoch) { // The epoch is a valid time, so this constructor doesn't interpret @@ -753,7 +751,7 @@ TimeTicks TimeTicks::Now() { #elif V8_OS_POSIX ticks = ClockNow(CLOCK_MONOTONIC); #elif V8_OS_STARBOARD - ticks = SbTimeGetMonotonicNow(); + ticks = starboard::CurrentMonotonicTime(); #else #error platform does not implement TimeTicks::Now. #endif // V8_OS_DARWIN @@ -780,13 +778,7 @@ bool TimeTicks::IsHighResolution() { bool ThreadTicks::IsSupported() { #if V8_OS_STARBOARD -#if SB_API_VERSION >= 12 - return SbTimeIsTimeThreadNowSupported(); -#elif SB_HAS(TIME_THREAD_NOW) - return true; -#else - return false; -#endif + return starboard::CurrentMonotonicThreadTime() != 0; #elif defined(__PASE__) // Thread CPU time accounting is unavailable in PASE return false; @@ -803,15 +795,10 @@ bool ThreadTicks::IsSupported() { ThreadTicks ThreadTicks::Now() { #if V8_OS_STARBOARD -#if SB_API_VERSION >= 12 - if (SbTimeIsTimeThreadNowSupported()) - return ThreadTicks(SbTimeGetMonotonicThreadNow()); - UNREACHABLE(); -#elif SB_HAS(TIME_THREAD_NOW) - return ThreadTicks(SbTimeGetMonotonicThreadNow()); -#else + const int64_t now = starboard::CurrentMonotonicThreadTime(); + if (now != 0) + return ThreadTicks(now); UNREACHABLE(); -#endif #elif V8_OS_DARWIN return ThreadTicks(ComputeThreadTicks()); #elif V8_OS_FUCHSIA diff --git a/deps/v8/src/baseline/baseline-compiler.cc b/deps/v8/src/baseline/baseline-compiler.cc index 30b371264dc132..58ea23043da452 100644 --- a/deps/v8/src/baseline/baseline-compiler.cc +++ b/deps/v8/src/baseline/baseline-compiler.cc @@ -422,6 +422,9 @@ Tagged BaselineCompiler::IndexAsSmi(int operand_index) { Tagged BaselineCompiler::IntAsSmi(int operand_index) { return Smi::FromInt(Int(operand_index)); } +Tagged BaselineCompiler::UintAsSmi(int operand_index) { + return Smi::FromInt(Uint(operand_index)); +} Tagged BaselineCompiler::Flag8AsSmi(int operand_index) { return Smi::FromInt(Flag8(operand_index)); } @@ -647,6 +650,8 @@ constexpr static bool BuiltinMayDeopt(Builtin id) { case Builtin::kBaselineOutOfLinePrologue: case Builtin::kIncBlockCounter: case Builtin::kToObject: + case Builtin::kStoreScriptContextSlotBaseline: + case Builtin::kStoreCurrentScriptContextSlotBaseline: // This one explicitly skips the construct if the debugger is enabled. case Builtin::kFindNonDefaultConstructorOrConstruct: return false; @@ -812,6 +817,30 @@ void BaselineCompiler::VisitStaCurrentContextSlot() { context, Context::OffsetOfElementAt(Index(0)), value); } +void BaselineCompiler::VisitStaScriptContextSlot() { + Register value = WriteBarrierDescriptor::ValueRegister(); + Register context = WriteBarrierDescriptor::ObjectRegister(); + DCHECK(!AreAliased(value, context, kInterpreterAccumulatorRegister)); + __ Move(value, kInterpreterAccumulatorRegister); + LoadRegister(context, 0); + SaveAccumulatorScope accumulator_scope(this, &basm_); + CallBuiltin( + context, // context + value, // value + IndexAsSmi(1), // slot + UintAsTagged(2)); // depth +} + +void BaselineCompiler::VisitStaCurrentScriptContextSlot() { + Register value = WriteBarrierDescriptor::ValueRegister(); + DCHECK(!AreAliased(value, kInterpreterAccumulatorRegister)); + SaveAccumulatorScope accumulator_scope(this, &basm_); + __ Move(value, kInterpreterAccumulatorRegister); + CallBuiltin( + value, // value + IndexAsSmi(0)); // slot +} + void BaselineCompiler::VisitLdaLookupSlot() { CallRuntime(Runtime::kLoadLookupSlot, Constant(0)); } diff --git a/deps/v8/src/baseline/baseline-compiler.h b/deps/v8/src/baseline/baseline-compiler.h index 17e996d5328cd6..c06fdafddf44c9 100644 --- a/deps/v8/src/baseline/baseline-compiler.h +++ b/deps/v8/src/baseline/baseline-compiler.h @@ -91,6 +91,7 @@ class BaselineCompiler { Tagged UintAsTagged(int operand_index); Tagged IndexAsSmi(int operand_index); Tagged IntAsSmi(int operand_index); + Tagged UintAsSmi(int operand_index); Tagged Flag8AsSmi(int operand_index); Tagged Flag16AsSmi(int operand_index); diff --git a/deps/v8/src/builtins/accessors.cc b/deps/v8/src/builtins/accessors.cc index a0f35d99b096c1..a85d738681745c 100644 --- a/deps/v8/src/builtins/accessors.cc +++ b/deps/v8/src/builtins/accessors.cc @@ -151,7 +151,8 @@ void Accessors::ArrayLengthGetter( RCS_SCOPE(isolate, RuntimeCallCounterId::kArrayLengthGetter); DisallowGarbageCollection no_gc; HandleScope scope(isolate); - Tagged holder = JSArray::cast(*Utils::OpenHandle(*info.Holder())); + Tagged holder = + JSArray::cast(*Utils::OpenDirectHandle(*info.Holder())); Tagged result = holder->length(); info.GetReturnValue().Set(Utils::ToLocal(Handle(result, isolate))); } @@ -163,7 +164,7 @@ void Accessors::ArrayLengthSetter( RCS_SCOPE(isolate, RuntimeCallCounterId::kArrayLengthSetter); HandleScope scope(isolate); - DCHECK(Object::SameValue(*Utils::OpenHandle(*name), + DCHECK(Object::SameValue(*Utils::OpenDirectHandle(*name), ReadOnlyRoots(isolate).length_string())); Handle object = Utils::OpenHandle(*info.Holder()); @@ -233,7 +234,7 @@ void Accessors::ModuleNamespaceEntryGetter( i::Isolate* isolate = reinterpret_cast(info.GetIsolate()); HandleScope scope(isolate); Tagged holder = - JSModuleNamespace::cast(*Utils::OpenHandle(*info.Holder())); + JSModuleNamespace::cast(*Utils::OpenDirectHandle(*info.Holder())); Handle result; if (holder->GetExport(isolate, Handle::cast(Utils::OpenHandle(*name))) .ToHandle(&result)) { @@ -281,12 +282,13 @@ void Accessors::StringLengthGetter( // v8::Object, but internally we have callbacks on entities which are higher // in the hierarchy, in this case for String values. - Tagged value = *Utils::OpenHandle(*v8::Local(info.This())); + Tagged value = + *Utils::OpenDirectHandle(*v8::Local(info.This())); if (!IsString(value)) { // Not a string value. That means that we either got a String wrapper or // a Value with a String wrapper in its prototype chain. - value = - JSPrimitiveWrapper::cast(*Utils::OpenHandle(*info.Holder()))->value(); + value = JSPrimitiveWrapper::cast(*Utils::OpenDirectHandle(*info.Holder())) + ->value(); } Tagged result = Smi::FromInt(String::cast(value)->length()); info.GetReturnValue().Set(Utils::ToLocal(Handle(result, isolate))); diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc index 2adb7dd73173fc..79124bd196a57f 100644 --- a/deps/v8/src/builtins/arm/builtins-arm.cc +++ b/deps/v8/src/builtins/arm/builtins-arm.cc @@ -930,7 +930,7 @@ void ResetFeedbackVectorOsrUrgency(MacroAssembler* masm, void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { UseScratchRegisterScope temps(masm); // Need a few extra registers - temps.Include({r4, r8, r9}); + temps.Include({r4, r5, r8, r9}); auto descriptor = Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue); @@ -943,7 +943,11 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); __ ldr(feedback_vector, FieldMemOperand(feedback_cell, FeedbackCell::kValueOffset)); - __ AssertFeedbackVector(feedback_vector); + { + UseScratchRegisterScope temps(masm); + Register temporary = temps.Acquire(); + __ AssertFeedbackVector(feedback_vector, temporary); + } // Check the tiering state. Label flags_need_processing; diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc index 8221c3a6c3f6c7..5c607660fb913a 100644 --- a/deps/v8/src/builtins/arm64/builtins-arm64.cc +++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc @@ -491,9 +491,8 @@ static void GetSharedFunctionInfoBytecodeOrBaseline( &done); } - __ LoadTrustedPointerField( - bytecode, FieldMemOperand(data, InterpreterData::kBytecodeArrayOffset), - kBytecodeArrayIndirectPointerTag); + __ LoadProtectedPointerField( + bytecode, FieldMemOperand(data, InterpreterData::kBytecodeArrayOffset)); __ Bind(&done); __ IsObjectType(bytecode, scratch1, scratch1, BYTECODE_ARRAY_TYPE); @@ -1557,7 +1556,7 @@ void Builtins::Generate_InterpreterEntryTrampoline( __ Move(x2, kInterpreterBytecodeArrayRegister); static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch"); __ ReplaceClosureCodeWithOptimizedCode(x2, closure); - __ JumpCodeObject(x2); + __ JumpCodeObject(x2, kJSEntrypointTag); __ bind(&install_baseline_code); __ GenerateTailCallToReturnedCode(Runtime::kInstallBaselineCode); @@ -2026,9 +2025,9 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { kInterpreterDispatchTableRegister, INTERPRETER_DATA_TYPE); __ B(ne, &builtin_trampoline); - __ LoadCodePointerField( + __ LoadProtectedPointerField( x1, FieldMemOperand(x1, InterpreterData::kInterpreterTrampolineOffset)); - __ LoadCodeInstructionStart(x1, x1); + __ LoadCodeInstructionStart(x1, x1, kJSEntrypointTag); __ B(&trampoline_loaded); __ Bind(&builtin_trampoline); @@ -2280,17 +2279,17 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source, // Load deoptimization data from the code object. // = [#deoptimization_data_offset] - __ LoadTaggedField( + __ LoadProtectedPointerField( x1, FieldMemOperand(x0, Code::kDeoptimizationDataOrInterpreterDataOffset)); // Load the OSR entrypoint offset from the deoptimization data. // = [#header_size + #osr_pc_offset] __ SmiUntagField( - x1, FieldMemOperand(x1, FixedArray::OffsetOfElementAt( + x1, FieldMemOperand(x1, TrustedFixedArray::OffsetOfElementAt( DeoptimizationData::kOsrPcOffsetIndex))); - __ LoadCodeInstructionStart(x0, x0); + __ LoadCodeInstructionStart(x0, x0, kJSEntrypointTag); // Compute the target address = code_entry + osr_offset // = + @@ -5509,7 +5508,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, FrameScope scope(masm, StackFrame::INTERNAL); __ CallCFunction(get_baseline_pc, 3, 0); } - __ LoadCodeInstructionStart(code_obj, code_obj); + __ LoadCodeInstructionStart(code_obj, code_obj, kJSEntrypointTag); __ Add(code_obj, code_obj, kReturnRegister0); __ Pop(kInterpreterAccumulatorRegister, padreg); diff --git a/deps/v8/src/builtins/base.tq b/deps/v8/src/builtins/base.tq index 4745af7a0c2e11..090e2ee31ad20d 100644 --- a/deps/v8/src/builtins/base.tq +++ b/deps/v8/src/builtins/base.tq @@ -243,7 +243,10 @@ type IndirectPointer generates 'TNode' constexpr 'IndirectPointerHandle'; // TODO(saelo): implement accessors and type checkers for these fields. -type IndirectPointer extends IndirectPointer; +type IndirectPointer extends + IndirectPointer; +type ProtectedPointer extends Tagged; +type ProtectedPointer extends ProtectedPointer; extern class InstructionStream extends TrustedObject; type BuiltinPtr extends Smi generates 'TNode'; @@ -419,6 +422,7 @@ extern enum MessageTemplate { kTypedArraySetOffsetOutOfBounds, kInvalidArgument, kInvalidRegExpExecResult, + kInvalidSizeValue, kRegExpNonRegExp, kRegExpNonObject, kPromiseNonCallable, @@ -477,6 +481,7 @@ extern enum MessageTemplate { kIteratorResultNotAnObject, kFlattenPastSafeLength, kStrictReadOnlyProperty, + kInvalidUsingInForInLoop, ... } diff --git a/deps/v8/src/builtins/builtins-async-generator-gen.cc b/deps/v8/src/builtins/builtins-async-generator-gen.cc index c859031d424d89..10ffd889f7f8cc 100644 --- a/deps/v8/src/builtins/builtins-async-generator-gen.cc +++ b/deps/v8/src/builtins/builtins-async-generator-gen.cc @@ -413,8 +413,10 @@ TF_BUILTIN(AsyncGeneratorAwaitResolveClosure, AsyncGeneratorBuiltinsAssembler) { TF_BUILTIN(AsyncGeneratorAwaitRejectClosure, AsyncGeneratorBuiltinsAssembler) { auto value = Parameter(Descriptor::kValue); auto context = Parameter(Descriptor::kContext); + // Restart in Rethrow mode, as this exception was already thrown and we don't + // want to trigger a second debug break event or change the message location. AsyncGeneratorAwaitResumeClosure(context, value, - JSAsyncGeneratorObject::kThrow); + JSAsyncGeneratorObject::kRethrow); } TF_BUILTIN(AsyncGeneratorAwaitUncaught, AsyncGeneratorBuiltinsAssembler) { diff --git a/deps/v8/src/builtins/builtins-collections-gen.cc b/deps/v8/src/builtins/builtins-collections-gen.cc index 6fea5c37e8c2f2..e5e6026ce61632 100644 --- a/deps/v8/src/builtins/builtins-collections-gen.cc +++ b/deps/v8/src/builtins/builtins-collections-gen.cc @@ -2782,9 +2782,10 @@ TNode WeakCollectionsBuiltinsAssembler::ShouldShrink( TNode WeakCollectionsBuiltinsAssembler::ValueIndexFromKeyIndex( TNode key_index) { - return IntPtrAdd(key_index, - IntPtrConstant(EphemeronHashTable::ShapeT::kEntryValueIndex - - EphemeronHashTable::kEntryKeyIndex)); + return IntPtrAdd( + key_index, + IntPtrConstant(EphemeronHashTable::TodoShape::kEntryValueIndex - + EphemeronHashTable::kEntryKeyIndex)); } TF_BUILTIN(WeakMapConstructor, WeakCollectionsBuiltinsAssembler) { diff --git a/deps/v8/src/builtins/builtins-constructor-gen.cc b/deps/v8/src/builtins/builtins-constructor-gen.cc index 367365a92de155..3224b2996a4041 100644 --- a/deps/v8/src/builtins/builtins-constructor-gen.cc +++ b/deps/v8/src/builtins/builtins-constructor-gen.cc @@ -373,12 +373,8 @@ TNode ConstructorBuiltinsAssembler::FastNewObject( } BIND(&allocate_properties); { - if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) { - properties = - AllocateSwissNameDictionary(SwissNameDictionary::kInitialCapacity); - } else { - properties = AllocateNameDictionary(NameDictionary::kInitialCapacity); - } + properties = + AllocatePropertyDictionary(PropertyDictionary::kInitialCapacity); Goto(&instantiate_map); } diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h index 0a037ba91d4829..8cdba0d032eea4 100644 --- a/deps/v8/src/builtins/builtins-definitions.h +++ b/deps/v8/src/builtins/builtins-definitions.h @@ -665,7 +665,9 @@ namespace internal { TFH(StoreGlobalICTrampoline, StoreGlobal) \ TFH(StoreGlobalICBaseline, StoreGlobalBaseline) \ TFH(StoreIC, StoreWithVector) \ + TFH(StoreIC_Megamorphic, StoreWithVector) \ TFH(StoreICTrampoline, Store) \ + TFH(StoreICTrampoline_Megamorphic, Store) \ TFH(StoreICBaseline, StoreBaseline) \ TFH(DefineNamedOwnIC, StoreWithVector) \ TFH(DefineNamedOwnICTrampoline, Store) \ @@ -892,8 +894,8 @@ namespace internal { kMatchInfo) \ TFS(RegExpExecInternal, NeedsContext::kYes, kRegExp, kString, kLastIndex, \ kMatchInfo) \ - ASM(RegExpInterpreterTrampoline, CCall) \ - ASM(RegExpExperimentalTrampoline, CCall) \ + ASM(RegExpInterpreterTrampoline, RegExpTrampoline) \ + ASM(RegExpExperimentalTrampoline, RegExpTrampoline) \ \ /* Set */ \ TFS(FindOrderedHashSetEntry, NeedsContext::kYes, kTable, kKey) \ @@ -2029,14 +2031,6 @@ namespace internal { BUILTIN_LIST(V, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, \ IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN) -#define BUILTIN_LIST_A(V) \ - BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, \ - IGNORE_BUILTIN, IGNORE_BUILTIN, V) - -#define BUILTIN_LIST_TFS(V) \ - BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, V, \ - IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN) - #define BUILTIN_LIST_TFJ(V) \ BUILTIN_LIST(IGNORE_BUILTIN, V, IGNORE_BUILTIN, IGNORE_BUILTIN, \ IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN) @@ -2045,6 +2039,22 @@ namespace internal { BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, V, IGNORE_BUILTIN, \ IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN) +#define BUILTIN_LIST_TFS(V) \ + BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, V, \ + IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN) + +#define BUILTIN_LIST_TFH(V) \ + BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, \ + V, IGNORE_BUILTIN, IGNORE_BUILTIN) + +#define BUILTIN_LIST_BCH(V) \ + BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, \ + IGNORE_BUILTIN, V, IGNORE_BUILTIN) + +#define BUILTIN_LIST_A(V) \ + BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, \ + IGNORE_BUILTIN, IGNORE_BUILTIN, V) + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/builtins-generator-gen.cc b/deps/v8/src/builtins/builtins-generator-gen.cc index d67dc0ef59f7e3..a6d4a45101e7fd 100644 --- a/deps/v8/src/builtins/builtins-generator-gen.cc +++ b/deps/v8/src/builtins/builtins-generator-gen.cc @@ -106,6 +106,9 @@ void GeneratorBuiltinsAssembler::InnerResume( case JSGeneratorObject::kThrow: builtin_result = CallRuntime(Runtime::kThrow, context, value); break; + case JSGeneratorObject::kRethrow: + // Currently only async generators use this mode. + UNREACHABLE(); } args->PopAndReturn(builtin_result); } diff --git a/deps/v8/src/builtins/builtins-ic-gen.cc b/deps/v8/src/builtins/builtins-ic-gen.cc index d6c1c0136bbd45..da645619dc5a74 100644 --- a/deps/v8/src/builtins/builtins-ic-gen.cc +++ b/deps/v8/src/builtins/builtins-ic-gen.cc @@ -101,10 +101,20 @@ void Builtins::Generate_StoreIC(compiler::CodeAssemblerState* state) { AccessorAssembler assembler(state); assembler.GenerateStoreIC(); } +void Builtins::Generate_StoreIC_Megamorphic( + compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateStoreIC_Megamorphic(); +} void Builtins::Generate_StoreICTrampoline(compiler::CodeAssemblerState* state) { AccessorAssembler assembler(state); assembler.GenerateStoreICTrampoline(); } +void Builtins::Generate_StoreICTrampoline_Megamorphic( + compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateStoreICTrampoline_Megamorphic(); +} void Builtins::Generate_StoreICBaseline(compiler::CodeAssemblerState* state) { AccessorAssembler assembler(state); assembler.GenerateStoreICBaseline(); diff --git a/deps/v8/src/builtins/builtins-internal-gen.cc b/deps/v8/src/builtins/builtins-internal-gen.cc index 9602300cc7e054..9b3c69a4dbf5ee 100644 --- a/deps/v8/src/builtins/builtins-internal-gen.cc +++ b/deps/v8/src/builtins/builtins-internal-gen.cc @@ -137,10 +137,10 @@ class WriteBarrierCodeStubAssembler : public CodeStubAssembler { } TNode IsPageFlagSet(TNode object, int mask) { - TNode page = PageFromAddress(object); + TNode header = PageHeaderFromAddress(object); TNode flags = UncheckedCast( - Load(MachineType::Pointer(), page, - IntPtrConstant(BasicMemoryChunk::kFlagsOffset))); + Load(MachineType::Pointer(), header, + IntPtrConstant(MemoryChunkLayout::kFlagsOffset))); return WordNotEqual(WordAnd(flags, IntPtrConstant(mask)), IntPtrConstant(0)); } @@ -156,8 +156,8 @@ class WriteBarrierCodeStubAssembler : public CodeStubAssembler { void GetMarkBit(TNode object, TNode* cell, TNode* mask) { TNode page = PageFromAddress(object); - TNode bitmap = - IntPtrAdd(page, IntPtrConstant(MemoryChunk::kMarkingBitmapOffset)); + TNode bitmap = IntPtrAdd( + page, IntPtrConstant(MemoryChunkLayout::kMarkingBitmapOffset)); { // Temp variable to calculate cell offset in bitmap. @@ -165,8 +165,10 @@ class WriteBarrierCodeStubAssembler : public CodeStubAssembler { int shift = MarkingBitmap::kBitsPerCellLog2 + kTaggedSizeLog2 - MarkingBitmap::kBytesPerCellLog2; r0 = WordShr(object, IntPtrConstant(shift)); - r0 = WordAnd(r0, IntPtrConstant((kPageAlignmentMask >> shift) & - ~(MarkingBitmap::kBytesPerCell - 1))); + r0 = WordAnd( + r0, IntPtrConstant( + (MemoryChunkHeader::GetAlignmentMaskForAssembler() >> shift) & + ~(MarkingBitmap::kBytesPerCell - 1))); *cell = IntPtrAdd(bitmap, Signed(r0)); } { @@ -185,11 +187,12 @@ class WriteBarrierCodeStubAssembler : public CodeStubAssembler { void InsertIntoRememberedSet(TNode object, TNode slot, SaveFPRegsMode fp_mode) { Label slow_path(this), next(this); - TNode page = PageFromAddress(object); + TNode page_header = PageHeaderFromAddress(object); + TNode page = PageFromPageHeader(page_header); // Load address of SlotSet TNode slot_set = LoadSlotSet(page, &slow_path); - TNode slot_offset = IntPtrSub(slot, page); + TNode slot_offset = IntPtrSub(slot, page_header); // Load bucket TNode bucket = LoadBucket(slot_set, slot_offset, &slow_path); @@ -1423,7 +1426,8 @@ void Builtins::Generate_MaglevOptimizeCodeOrTailCallOptimizedCodeSlot( using D = MaglevOptimizeCodeOrTailCallOptimizedCodeSlotDescriptor; Register flags = D::GetRegisterParameter(D::kFlags); Register feedback_vector = D::GetRegisterParameter(D::kFeedbackVector); - masm->AssertFeedbackVector(feedback_vector); + Register temporary = D::GetRegisterParameter(D::kTemporary); + masm->AssertFeedbackVector(feedback_vector, temporary); masm->OptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector); masm->Trap(); } diff --git a/deps/v8/src/builtins/builtins-object-gen.cc b/deps/v8/src/builtins/builtins-object-gen.cc index 712fe0ceb06aff..2c5d35c3395b71 100644 --- a/deps/v8/src/builtins/builtins-object-gen.cc +++ b/deps/v8/src/builtins/builtins-object-gen.cc @@ -1072,13 +1072,8 @@ TF_BUILTIN(ObjectCreate, ObjectBuiltinsAssembler) { BIND(&null_proto); { map = LoadSlowObjectWithNullPrototypeMap(native_context); - if constexpr (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) { - new_properties = - AllocateSwissNameDictionary(SwissNameDictionary::kInitialCapacity); - } else { - new_properties = - AllocateNameDictionary(NameDictionary::kInitialCapacity); - } + new_properties = + AllocatePropertyDictionary(PropertyDictionary::kInitialCapacity); Goto(&instantiate_map); } @@ -1419,10 +1414,7 @@ TNode ObjectBuiltinsAssembler::FromPropertyDescriptor( native_context, Context::SLOW_OBJECT_WITH_OBJECT_PROTOTYPE_MAP)); // We want to preallocate the slots for value, writable, get, set, // enumerable and configurable - a total of 6 - TNode properties = - V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL - ? TNode(AllocateSwissNameDictionary(6)) - : AllocateNameDictionary(6); + TNode properties = AllocatePropertyDictionary(6); TNode js_desc = AllocateJSObjectFromMap(map, properties); Label bailout(this, Label::kDeferred); diff --git a/deps/v8/src/builtins/builtins-object.cc b/deps/v8/src/builtins/builtins-object.cc index 247927cd9e1cf6..3196d73fa52136 100644 --- a/deps/v8/src/builtins/builtins-object.cc +++ b/deps/v8/src/builtins/builtins-object.cc @@ -115,10 +115,9 @@ Tagged ObjectLookupAccessor(Isolate* isolate, Handle object, LookupIterator it(isolate, object, lookup_key, LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR); - for (; it.IsFound(); it.Next()) { + for (;; it.Next()) { switch (it.state()) { case LookupIterator::INTERCEPTOR: - case LookupIterator::NOT_FOUND: case LookupIterator::TRANSITION: UNREACHABLE(); @@ -151,8 +150,9 @@ Tagged ObjectLookupAccessor(Isolate* isolate, Handle object, return ObjectLookupAccessor(isolate, prototype, key, component); } case LookupIterator::WASM_OBJECT: - case LookupIterator::INTEGER_INDEXED_EXOTIC: + case LookupIterator::TYPED_ARRAY_INDEX_NOT_FOUND: case LookupIterator::DATA: + case LookupIterator::NOT_FOUND: return ReadOnlyRoots(isolate).undefined_value(); case LookupIterator::ACCESSOR: { @@ -165,11 +165,11 @@ Tagged ObjectLookupAccessor(Isolate* isolate, Handle object, isolate, holder_realm, Handle::cast(maybe_pair), component); } + continue; } } + UNREACHABLE(); } - - return ReadOnlyRoots(isolate).undefined_value(); } } // namespace diff --git a/deps/v8/src/builtins/builtins-regexp-gen.cc b/deps/v8/src/builtins/builtins-regexp-gen.cc index 672a46f0136a59..e3befac11c7f92 100644 --- a/deps/v8/src/builtins/builtins-regexp-gen.cc +++ b/deps/v8/src/builtins/builtins-regexp-gen.cc @@ -312,19 +312,45 @@ TNode RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo( // implementation of CreateDataProperty instead. // At this point the spec says to call CreateDataProperty. However, we can - // skip most of the steps and go straight to adding a dictionary entry - // because we know a bunch of useful facts: + // skip most of the steps and go straight to adding/updating a dictionary + // entry because we know a bunch of useful facts: // - All keys are non-numeric internalized strings - // - No keys repeat // - Receiver has no prototype // - Receiver isn't used as a prototype // - Receiver isn't any special object like a Promise intrinsic object // - Receiver is extensible // - Receiver has no interceptors Label add_dictionary_property_slow(this, Label::kDeferred); + TVARIABLE(IntPtrT, var_name_index); + Label add_name_entry_find_index(this), + add_name_entry_known_index(this, &var_name_index), + duplicate_name(this, &var_name_index), next(this); + NameDictionaryLookup( + CAST(properties), name, &duplicate_name, &var_name_index, + &add_name_entry_find_index, kFindExisting, + &add_name_entry_known_index); + BIND(&duplicate_name); + GotoIf(IsUndefined(capture), &next); + CSA_DCHECK(this, + TaggedEqual(LoadValueByKeyIndex( + CAST(properties), var_name_index.value()), + UndefinedConstant())); + StoreValueByKeyIndex(CAST(properties), + var_name_index.value(), capture); + Goto(&next); + + BIND(&add_name_entry_find_index); + FindInsertionEntry(CAST(properties), name, + &var_name_index); + Goto(&add_name_entry_known_index); + + BIND(&add_name_entry_known_index); AddToDictionary(CAST(properties), name, capture, - &add_dictionary_property_slow); + &add_dictionary_property_slow, + var_name_index.value()); + Goto(&next); + BIND(&next); var_i = i_plus_2; Branch(IntPtrGreaterThanOrEqual(var_i.value(), names_length), &maybe_build_indices, &inner_loop); @@ -578,7 +604,8 @@ TNode RegExpBuiltinsAssembler::RegExpExecInternal( // instead of referencing the CodeWrapper object, we could directly load // the entrypoint from that via LoadCodeEntrypointViaCodePointerField. This // will save an indirection when the sandbox is enabled. - TNode code_entry = LoadCodeInstructionStart(code); + TNode code_entry = + LoadCodeInstructionStart(code, kRegExpEntrypointTag); // AIX uses function descriptors on CFunction calls. code_entry in this case // may also point to a Regex interpreter entry trampoline which does not diff --git a/deps/v8/src/builtins/builtins.cc b/deps/v8/src/builtins/builtins.cc index 6ba148eac268f4..6a909d15ee7128 100644 --- a/deps/v8/src/builtins/builtins.cc +++ b/deps/v8/src/builtins/builtins.cc @@ -462,6 +462,28 @@ bool Builtins::IsCpp(Builtin builtin) { return Builtins::KindOf(builtin) == CPP; } +// static +CodeEntrypointTag Builtins::EntrypointTagFor(Builtin builtin) { + if (builtin == Builtin::kNoBuiltinId) { + // Special case needed for example for tests. + return kDefaultCodeEntrypointTag; + } + + Kind kind = Builtins::KindOf(builtin); + switch (kind) { + case BCH: + return kBytecodeHandlerEntrypointTag; + case TFH: + return kICHandlerEntrypointTag; + case ASM: + // TODO(saelo) consider using this approach for the other kinds as well. + return CallInterfaceDescriptorFor(builtin).tag(); + default: + // TODO(saelo): use more fine-grained tags here. + return kDefaultCodeEntrypointTag; + } +} + // static bool Builtins::AllowDynamicFunction(Isolate* isolate, Handle target, Handle target_global_proxy) { diff --git a/deps/v8/src/builtins/builtins.h b/deps/v8/src/builtins/builtins.h index 6d747e02cca03f..f2b9bd48134a74 100644 --- a/deps/v8/src/builtins/builtins.h +++ b/deps/v8/src/builtins/builtins.h @@ -9,6 +9,7 @@ #include "src/builtins/builtins-definitions.h" #include "src/common/globals.h" #include "src/objects/type-hints.h" +#include "src/sandbox/code-entrypoint-tag.h" namespace v8 { namespace internal { @@ -193,6 +194,9 @@ class Builtins { static Kind KindOf(Builtin builtin); static const char* KindNameOf(Builtin builtin); + // The tag for the builtins entrypoint. + V8_EXPORT_PRIVATE static CodeEntrypointTag EntrypointTagFor(Builtin builtin); + static bool IsCpp(Builtin builtin); // True, iff the given code object is a builtin. Note that this does not diff --git a/deps/v8/src/builtins/collections.tq b/deps/v8/src/builtins/collections.tq index b257bb5298d72a..7d4f506519c9d9 100644 --- a/deps/v8/src/builtins/collections.tq +++ b/deps/v8/src/builtins/collections.tq @@ -288,21 +288,26 @@ transitioning macro GetSetRecord( // 6. Let intSize be ! ToIntegerOrInfinity(numSize). const intSize = ToInteger_Inline(numSize); - // 7. Let has be ? Get(obj, "has"). + // 7. If intSize < 0, throw a RangeError exception. + if (intSize < 0) { + ThrowRangeError(MessageTemplate::kInvalidSizeValue, intSize); + } + + // 8. Let has be ? Get(obj, "has"). let has = GetProperty(obj, kHasString); - // 8. If IsCallable(has) is false, throw a TypeError exception. + // 9. If IsCallable(has) is false, throw a TypeError exception. has = Cast(has) otherwise ThrowCalledNonCallable(kHasString); - // 9. Let keys be ? Get(obj, "keys"). + // 10. Let keys be ? Get(obj, "keys"). let keys = GetProperty(obj, kKeysString); - // 10. If IsCallable(keys) is false, throw a TypeError exception. + // 11. If IsCallable(keys) is false, throw a TypeError exception. keys = Cast(keys) otherwise ThrowCalledNonCallable(kKeysString); - // 11. Return a new Set Record { [[Set]]: obj, [[Size]]: intSize, [[Has]]: + // 12. Return a new Set Record { [[Set]]: obj, [[Size]]: intSize, [[Has]]: // has, [[Keys]]: keys }. return SetRecord{object: obj, size: intSize, has: has, keys: keys}; } diff --git a/deps/v8/src/builtins/js-to-js.tq b/deps/v8/src/builtins/js-to-js.tq index 46dd70b4927673..691271e9caf0c5 100644 --- a/deps/v8/src/builtins/js-to-js.tq +++ b/deps/v8/src/builtins/js-to-js.tq @@ -42,6 +42,8 @@ macro ConvertToAndFromWasm(context: Context, wasmType: int32, value: JSAny): } else if (wasmType == kWasmF64Type) { return Convert(WasmTaggedToFloat64(value)); } else { + const wasmKind = wasmType & kValueTypeKindBitsMask; + dcheck(wasmKind == ValueKind::kRef || wasmKind == ValueKind::kRefNull); if (value == Null) { // At the moment it is not possible to define non-nullable types for // WebAssembly.Functions. @@ -61,6 +63,13 @@ macro ConvertToAndFromWasm(context: Context, wasmType: int32, value: JSAny): } } +extern runtime WasmThrowJSTypeError(Context): never; + +transitioning javascript builtin JSToJSWrapperInvalidSig( + js-implicit context: NativeContext)(): JSAny { + runtime::WasmThrowJSTypeError(context); +} + transitioning javascript builtin JSToJSWrapper( js-implicit context: NativeContext, receiver: JSAny, target: JSFunction)( ...arguments): JSAny { diff --git a/deps/v8/src/builtins/js-to-wasm.tq b/deps/v8/src/builtins/js-to-wasm.tq index 45d31c65917f99..19d6a1077e943b 100644 --- a/deps/v8/src/builtins/js-to-wasm.tq +++ b/deps/v8/src/builtins/js-to-wasm.tq @@ -504,6 +504,8 @@ macro JSToWasmWrapperHelper( } else if (retType == kWasmF64Type) { allocator.AllocFP64(); } else { + const retKind = retType & kValueTypeKindBitsMask; + dcheck(retKind == ValueKind::kRef || retKind == ValueKind::kRefNull); // Also check if there are any reference return values, as this allows // us to skip code when we process return values. hasRefReturns = true; @@ -600,6 +602,8 @@ macro JSToWasmWrapperHelper( *toHighRef = Signed(pair.high); } } else { + const paramKind = paramType & kValueTypeKindBitsMask; + dcheck(paramKind == ValueKind::kRef || paramKind == ValueKind::kRefNull); // The byte array where we store converted parameters is not GC-safe. // Therefore we can only copy references into this array once no GC can // happen anymore. Any conversion of a primitive type can execute diff --git a/deps/v8/src/builtins/promise-abstract-operations.tq b/deps/v8/src/builtins/promise-abstract-operations.tq index 7ee1f5db9eecc9..fdbc6faa9776e0 100644 --- a/deps/v8/src/builtins/promise-abstract-operations.tq +++ b/deps/v8/src/builtins/promise-abstract-operations.tq @@ -305,6 +305,7 @@ macro CreatePromiseCapability( struct PromiseResolvingFunctions { resolve: JSFunction; reject: JSFunction; + context: Context; } @export @@ -322,7 +323,11 @@ macro CreatePromiseResolvingFunctions( const rejectInfo = PromiseCapabilityDefaultRejectSharedFunConstant(); const reject: JSFunction = AllocateFunctionWithMapAndContext(map, rejectInfo, promiseContext); - return PromiseResolvingFunctions{resolve: resolve, reject: reject}; + return PromiseResolvingFunctions{ + resolve: resolve, + reject: reject, + context: promiseContext + }; } transitioning macro InnerNewPromiseCapability( diff --git a/deps/v8/src/builtins/promise-constructor.tq b/deps/v8/src/builtins/promise-constructor.tq index 5611e228b50cfd..c77e0501cced8a 100644 --- a/deps/v8/src/builtins/promise-constructor.tq +++ b/deps/v8/src/builtins/promise-constructor.tq @@ -82,6 +82,13 @@ transitioning javascript builtin PromiseConstructor( try { Call(context, UnsafeCast(executor), Undefined, resolve, reject); } catch (e, _message) { + // We need to disable the debug event, as we have already paused on this + // exception. + const promiseContext = + %RawDownCast(funcs.context); + *ContextSlot( + promiseContext, PromiseResolvingFunctionContextSlot::kDebugEventSlot) = + False; Call(context, reject, Undefined, e); } diff --git a/deps/v8/src/builtins/riscv/builtins-riscv.cc b/deps/v8/src/builtins/riscv/builtins-riscv.cc index f0ae7947325539..94593f08920567 100644 --- a/deps/v8/src/builtins/riscv/builtins-riscv.cc +++ b/deps/v8/src/builtins/riscv/builtins-riscv.cc @@ -3028,7 +3028,10 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, // Check result for exception sentinel. Label exception_returned; - __ Branch(&exception_returned, eq, a0, RootIndex::kException); + // The returned value may be a trusted object, living outside of the main + // pointer compression cage, so we need to use full pointer comparison here. + __ CompareRootAndBranch(a0, RootIndex::kException, eq, &exception_returned, + ComparisonMode::kFullPointer); // Check that there is no exception, otherwise we // should have returned the exception sentinel. diff --git a/deps/v8/src/builtins/wasm-strings.tq b/deps/v8/src/builtins/wasm-strings.tq index eda80d707302bd..fb103155db439f 100644 --- a/deps/v8/src/builtins/wasm-strings.tq +++ b/deps/v8/src/builtins/wasm-strings.tq @@ -63,6 +63,16 @@ transitioning javascript builtin WebAssemblyStringIntoUtf8Array( } } +transitioning javascript builtin WebAssemblyStringToUtf8Array( + js-implicit context: Context)(...arguments): JSAny { + try { + const string = Cast(arguments[0]) otherwise goto IllegalCast; + return runtime::WasmStringToUtf8Array(context, string); + } label IllegalCast deferred { + Trap(context, MessageTemplate::kWasmTrapIllegalCast); + } +} + transitioning javascript builtin WebAssemblyStringToWtf16Array( js-implicit context: Context)(...arguments): JSAny { try { diff --git a/deps/v8/src/builtins/wasm-to-js.tq b/deps/v8/src/builtins/wasm-to-js.tq index b3c2ce29cd71e9..87e43f6f7835c3 100644 --- a/deps/v8/src/builtins/wasm-to-js.tq +++ b/deps/v8/src/builtins/wasm-to-js.tq @@ -81,6 +81,7 @@ transitioning macro WasmToJSWrapper(ref: WasmApiFunctionRef): WasmToJSResult { const returnCount = Convert(*torque_internal::unsafe::NewReference( serializedSig.object, serializedSig.offset)); + dcheck(returnCount < serializedSig.length); const paramCount: intptr = serializedSig.length - returnCount - 1; const returnTypes = Subslice(serializedSig, Convert(1), returnCount) otherwise unreachable; @@ -261,6 +262,8 @@ transitioning macro WasmToJSWrapper(ref: WasmApiFunctionRef): WasmToJSResult { *toHighRef = Signed(pair.high); } } else { + const retKind = retType & kValueTypeKindBitsMask; + dcheck(retKind == ValueKind::kRef || retKind == ValueKind::kRefNull); dcheck(ref.instance == Undefined || Is(ref.instance)); const trustedData = ref.instance == Undefined ? Undefined : diff --git a/deps/v8/src/builtins/wasm.tq b/deps/v8/src/builtins/wasm.tq index 1e5d82ee37e208..4179bcb7fd474e 100644 --- a/deps/v8/src/builtins/wasm.tq +++ b/deps/v8/src/builtins/wasm.tq @@ -62,6 +62,7 @@ extern runtime WasmStringEncodeWtf8( Context, WasmTrustedInstanceData, Smi, Smi, String, Number): Number; extern runtime WasmStringEncodeWtf8Array( Context, Smi, String, WasmArray, Number): Number; +extern runtime WasmStringToUtf8Array(Context, String): WasmArray; extern runtime WasmStringEncodeWtf16( Context, WasmTrustedInstanceData, Smi, String, Number, Smi, Smi): JSAny; extern runtime WasmStringAsWtf8(Context, String): ByteArray; @@ -995,6 +996,9 @@ builtin WasmStringEncodeWtf8Array( WasmUint32ToNumber(start)); return ChangeNumberToUint32(result); } +builtin WasmStringToUtf8Array(string: String): WasmArray { + return runtime::WasmStringToUtf8Array(LoadContextFromFrame(), string); +} builtin WasmStringEncodeWtf16(string: String, offset: uint32, memory: Smi): uint32 { const trustedData = LoadInstanceDataFromFrame(); diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc index 2d3080c59ab4f6..68d1f2c6641cef 100644 --- a/deps/v8/src/builtins/x64/builtins-x64.cc +++ b/deps/v8/src/builtins/x64/builtins-x64.cc @@ -759,9 +759,8 @@ static void GetSharedFunctionInfoBytecodeOrBaseline( &done); } - __ LoadTrustedPointerField( - bytecode, FieldOperand(data, InterpreterData::kBytecodeArrayOffset), - kBytecodeArrayIndirectPointerTag, scratch1); + __ LoadProtectedPointerField( + bytecode, FieldOperand(data, InterpreterData::kBytecodeArrayOffset)); __ bind(&done); __ IsObjectType(bytecode, BYTECODE_ARRAY_TYPE, scratch1); @@ -1298,7 +1297,7 @@ void Builtins::Generate_InterpreterEntryTrampoline( __ ReplaceClosureCodeWithOptimizedCode( rcx, closure, kInterpreterBytecodeArrayRegister, WriteBarrierDescriptor::SlotAddressRegister()); - __ JumpCodeObject(rcx); + __ JumpCodeObject(rcx, kJSEntrypointTag); __ bind(&install_baseline_code); __ GenerateTailCallToReturnedCode(Runtime::kInstallBaselineCode); @@ -1707,11 +1706,9 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { GetSharedFunctionInfoData(masm, rbx, shared_function_info, kScratchRegister); __ IsObjectType(rbx, INTERPRETER_DATA_TYPE, kScratchRegister); __ j(not_equal, &builtin_trampoline, Label::kNear); - - __ LoadCodePointerField( - rbx, FieldOperand(rbx, InterpreterData::kInterpreterTrampolineOffset), - kScratchRegister); - __ LoadCodeInstructionStart(rbx, rbx); + __ LoadProtectedPointerField( + rbx, FieldOperand(rbx, InterpreterData::kInterpreterTrampolineOffset)); + __ LoadCodeInstructionStart(rbx, rbx, kJSEntrypointTag); __ jmp(&trampoline_loaded, Label::kNear); __ bind(&builtin_trampoline); @@ -1841,7 +1838,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { FieldOperand(closure, JSFunction::kFeedbackCellOffset)); __ LoadTaggedField(feedback_vector, FieldOperand(feedback_cell, FeedbackCell::kValueOffset)); - __ AssertFeedbackVector(feedback_vector); + __ AssertFeedbackVector(feedback_vector, kScratchRegister); // Check the tiering state. Label flags_need_processing; @@ -2935,18 +2932,18 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source, } // Load deoptimization data from the code object. - const TaggedRegister deopt_data(rbx); - __ LoadTaggedField( + const Register deopt_data(rbx); + __ LoadProtectedPointerField( deopt_data, FieldOperand(rax, Code::kDeoptimizationDataOrInterpreterDataOffset)); // Load the OSR entrypoint offset from the deoptimization data. __ SmiUntagField( rbx, - FieldOperand(deopt_data, FixedArray::OffsetOfElementAt( + FieldOperand(deopt_data, TrustedFixedArray::OffsetOfElementAt( DeoptimizationData::kOsrPcOffsetIndex))); - __ LoadCodeInstructionStart(rax, rax); + __ LoadCodeInstructionStart(rax, rax, kJSEntrypointTag); // Compute the target address = code_entry + osr_offset __ addq(rax, rbx); @@ -4990,7 +4987,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, __ movq(kCArgRegs[2], kInterpreterBytecodeArrayRegister); __ CallCFunction(get_baseline_pc, 3); } - __ LoadCodeInstructionStart(code_obj, code_obj); + __ LoadCodeInstructionStart(code_obj, code_obj, kJSEntrypointTag); __ addq(code_obj, kReturnRegister0); __ popq(kInterpreterAccumulatorRegister); diff --git a/deps/v8/src/codegen/arm/assembler-arm.cc b/deps/v8/src/codegen/arm/assembler-arm.cc index defa349c741bce..33289ffa1c4b31 100644 --- a/deps/v8/src/codegen/arm/assembler-arm.cc +++ b/deps/v8/src/codegen/arm/assembler-arm.cc @@ -1250,7 +1250,7 @@ void Assembler::AddrMode1(Instr instr, Register rd, Register rn, // pool only for a MOV instruction which does not set the flags. DCHECK(!rn.is_valid()); Move32BitImmediate(rd, x, cond); - } else if ((opcode == ADD) && !set_flags && (rd == rn) && + } else if ((opcode == ADD || opcode == SUB) && !set_flags && (rd == rn) && !temps.CanAcquire()) { // Split the operation into a sequence of additions if we cannot use a // scratch register. In this case, we cannot re-use rn and the assembler @@ -1266,10 +1266,20 @@ void Assembler::AddrMode1(Instr instr, Register rd, Register rn, // immediate allows us to more efficiently split it: int trailing_zeroes = base::bits::CountTrailingZeros(imm) & ~1u; uint32_t mask = (0xFF << trailing_zeroes); - add(rd, rd, Operand(imm & mask), LeaveCC, cond); + if (opcode == ADD) { + add(rd, rd, Operand(imm & mask), LeaveCC, cond); + } else { + DCHECK_EQ(opcode, SUB); + sub(rd, rd, Operand(imm & mask), LeaveCC, cond); + } imm = imm & ~mask; } while (!ImmediateFitsAddrMode1Instruction(imm)); - add(rd, rd, Operand(imm), LeaveCC, cond); + if (opcode == ADD) { + add(rd, rd, Operand(imm), LeaveCC, cond); + } else { + DCHECK_EQ(opcode, SUB); + sub(rd, rd, Operand(imm), LeaveCC, cond); + } } else { // The immediate operand cannot be encoded as a shifter operand, so load // it first to a scratch register and change the original instruction to diff --git a/deps/v8/src/codegen/arm/interface-descriptors-arm-inl.h b/deps/v8/src/codegen/arm/interface-descriptors-arm-inl.h index 2bf8f367cf6d29..2142ca9119c4c3 100644 --- a/deps/v8/src/codegen/arm/interface-descriptors-arm-inl.h +++ b/deps/v8/src/codegen/arm/interface-descriptors-arm-inl.h @@ -164,6 +164,11 @@ constexpr Register MaglevOptimizeCodeOrTailCallOptimizedCodeSlotDescriptor:: FeedbackVectorRegister() { return r5; } +// static +constexpr Register +MaglevOptimizeCodeOrTailCallOptimizedCodeSlotDescriptor::TemporaryRegister() { + return r4; +} // static constexpr auto CallTrampolineDescriptor::registers() { diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.cc b/deps/v8/src/codegen/arm/macro-assembler-arm.cc index da092e1b703504..2310cd9e75b413 100644 --- a/deps/v8/src/codegen/arm/macro-assembler-arm.cc +++ b/deps/v8/src/codegen/arm/macro-assembler-arm.cc @@ -363,7 +363,8 @@ void MacroAssembler::TailCallBuiltin(Builtin builtin, Condition cond) { } void MacroAssembler::LoadCodeInstructionStart(Register destination, - Register code_object) { + Register code_object, + CodeEntrypointTag tag) { ASM_CODE_COMMENT(this); ldr(destination, FieldMemOperand(code_object, Code::kInstructionStartOffset)); } @@ -2006,10 +2007,8 @@ void MacroAssembler::AssertFeedbackCell(Register object, Register scratch) { Assert(eq, AbortReason::kExpectedFeedbackCell); } } -void MacroAssembler::AssertFeedbackVector(Register object) { +void MacroAssembler::AssertFeedbackVector(Register object, Register scratch) { if (v8_flags.debug_code) { - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); CompareObjectType(object, scratch, scratch, FEEDBACK_VECTOR_TYPE); Assert(eq, AbortReason::kExpectedFeedbackVector); } @@ -2729,20 +2728,22 @@ void MacroAssembler::MovToFloatParameters(DwVfpRegister src1, } } -void MacroAssembler::CallCFunction(ExternalReference function, - int num_reg_arguments, - int num_double_arguments, - SetIsolateDataSlots set_isolate_data_slots) { +int MacroAssembler::CallCFunction(ExternalReference function, + int num_reg_arguments, + int num_double_arguments, + SetIsolateDataSlots set_isolate_data_slots, + Label* return_label) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); Move(scratch, function); - CallCFunction(scratch, num_reg_arguments, num_double_arguments, - set_isolate_data_slots); + return CallCFunction(scratch, num_reg_arguments, num_double_arguments, + set_isolate_data_slots, return_label); } -void MacroAssembler::CallCFunction(Register function, int num_reg_arguments, - int num_double_arguments, - SetIsolateDataSlots set_isolate_data_slots) { +int MacroAssembler::CallCFunction(Register function, int num_reg_arguments, + int num_double_arguments, + SetIsolateDataSlots set_isolate_data_slots, + Label* return_label) { ASM_CODE_COMMENT(this); DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters); DCHECK(has_frame()); @@ -2767,13 +2768,19 @@ void MacroAssembler::CallCFunction(Register function, int num_reg_arguments, } #endif + Label get_pc; + if (set_isolate_data_slots == SetIsolateDataSlots::kYes) { + Register pc_scratch = r5; + Push(pc_scratch); + GetLabelAddress(pc_scratch, &get_pc); + // Save the frame pointer and PC so that the stack layout remains iterable, // even without an ExitFrame which normally exists between JS and C frames. // See x64 code for reasoning about how to address the isolate data fields. if (root_array_available()) { - str(pc, MemOperand(kRootRegister, - IsolateData::fast_c_call_caller_pc_offset())); + str(pc_scratch, MemOperand(kRootRegister, + IsolateData::fast_c_call_caller_pc_offset())); str(fp, MemOperand(kRootRegister, IsolateData::fast_c_call_caller_fp_offset())); } else { @@ -2783,19 +2790,24 @@ void MacroAssembler::CallCFunction(Register function, int num_reg_arguments, Move(addr_scratch, ExternalReference::fast_c_call_caller_pc_address(isolate())); - str(pc, MemOperand(addr_scratch)); + str(pc_scratch, MemOperand(addr_scratch)); Move(addr_scratch, ExternalReference::fast_c_call_caller_fp_address(isolate())); str(fp, MemOperand(addr_scratch)); Pop(addr_scratch); } + + Pop(pc_scratch); } // Just call directly. The function called cannot cause a GC, or // allow preemption, so the return address in the link register // stays correct. Call(function); + int call_pc_offset = pc_offset(); + bind(&get_pc); + if (return_label) bind(return_label); if (set_isolate_data_slots == SetIsolateDataSlots::kYes) { // We don't unset the PC; the FP is the source of truth. @@ -2827,17 +2839,22 @@ void MacroAssembler::CallCFunction(Register function, int num_reg_arguments, } else { add(sp, sp, Operand(stack_passed_arguments * kPointerSize)); } + + return call_pc_offset; } -void MacroAssembler::CallCFunction(ExternalReference function, - int num_arguments, - SetIsolateDataSlots set_isolate_data_slots) { - CallCFunction(function, num_arguments, 0, set_isolate_data_slots); +int MacroAssembler::CallCFunction(ExternalReference function, int num_arguments, + SetIsolateDataSlots set_isolate_data_slots, + Label* return_label) { + return CallCFunction(function, num_arguments, 0, set_isolate_data_slots, + return_label); } -void MacroAssembler::CallCFunction(Register function, int num_arguments, - SetIsolateDataSlots set_isolate_data_slots) { - CallCFunction(function, num_arguments, 0, set_isolate_data_slots); +int MacroAssembler::CallCFunction(Register function, int num_arguments, + SetIsolateDataSlots set_isolate_data_slots, + Label* return_label) { + return CallCFunction(function, num_arguments, 0, set_isolate_data_slots, + return_label); } void MacroAssembler::CheckPageFlag(Register object, int mask, Condition cc, @@ -2848,7 +2865,7 @@ void MacroAssembler::CheckPageFlag(Register object, int mask, Condition cc, DCHECK(!AreAliased(object, scratch)); DCHECK(cc == eq || cc == ne); Bfc(scratch, object, 0, kPageSizeBits); - ldr(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset)); + ldr(scratch, MemOperand(scratch, MemoryChunkLayout::kFlagsOffset)); tst(scratch, Operand(mask)); b(cc, condition_met); } diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.h b/deps/v8/src/codegen/arm/macro-assembler-arm.h index c8c80f06629c07..91d4dd7810252b 100644 --- a/deps/v8/src/codegen/arm/macro-assembler-arm.h +++ b/deps/v8/src/codegen/arm/macro-assembler-arm.h @@ -252,23 +252,23 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { // garbage collection, since that might move the code and invalidate the // return address (unless this is somehow accounted for by the called // function). - enum class SetIsolateDataSlots { - kNo, - kYes, - }; - void CallCFunction( + int CallCFunction( ExternalReference function, int num_arguments, - SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes); - void CallCFunction( + SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes, + Label* return_label = nullptr); + int CallCFunction( Register function, int num_arguments, - SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes); - void CallCFunction( + SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes, + Label* return_label = nullptr); + int CallCFunction( ExternalReference function, int num_reg_arguments, int num_double_arguments, - SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes); - void CallCFunction( + SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes, + Label* return_label = nullptr); + int CallCFunction( Register function, int num_reg_arguments, int num_double_arguments, - SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes); + SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes, + Label* return_label = nullptr); void MovFromFloatParameter(DwVfpRegister dst); void MovFromFloatResult(DwVfpRegister dst); @@ -337,7 +337,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { void TailCallBuiltin(Builtin builtin, Condition cond = al); // Load the code entry point from the Code object. - void LoadCodeInstructionStart(Register destination, Register code_object); + void LoadCodeInstructionStart( + Register destination, Register code_object, + CodeEntrypointTag tag = kDefaultCodeEntrypointTag); void CallCodeObject(Register code_object); void JumpCodeObject(Register code_object, JumpMode jump_mode = JumpMode::kJump); @@ -875,7 +877,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { // Tiering support. void AssertFeedbackCell(Register object, Register scratch) NOOP_UNLESS_DEBUG_CODE; - void AssertFeedbackVector(Register object) NOOP_UNLESS_DEBUG_CODE; + void AssertFeedbackVector(Register object, + Register scratch) NOOP_UNLESS_DEBUG_CODE; void ReplaceClosureCodeWithOptimizedCode(Register optimized_code, Register closure); void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id); diff --git a/deps/v8/src/codegen/arm64/interface-descriptors-arm64-inl.h b/deps/v8/src/codegen/arm64/interface-descriptors-arm64-inl.h index 1cbfd20744973c..0502dc16737acf 100644 --- a/deps/v8/src/codegen/arm64/interface-descriptors-arm64-inl.h +++ b/deps/v8/src/codegen/arm64/interface-descriptors-arm64-inl.h @@ -154,6 +154,11 @@ constexpr Register MaglevOptimizeCodeOrTailCallOptimizedCodeSlotDescriptor:: FeedbackVectorRegister() { return x9; } +// static +constexpr Register +MaglevOptimizeCodeOrTailCallOptimizedCodeSlotDescriptor::TemporaryRegister() { + return x4; +} // static constexpr auto TypeofDescriptor::registers() { return RegisterArray(x0); } diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc index 20d8de3552b5c8..636dcdc874382c 100644 --- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc +++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc @@ -1440,7 +1440,7 @@ void TailCallOptimizedCodeSlot(MacroAssembler* masm, __ ReplaceClosureCodeWithOptimizedCode(optimized_code_entry, closure); static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch"); __ Move(x2, optimized_code_entry); - __ JumpCodeObject(x2); + __ JumpCodeObject(x2, kJSEntrypointTag); // Optimized code slot contains deoptimized code or code is cleared and // optimized code marker isn't updated. Evict the code, update the marker @@ -1507,7 +1507,7 @@ void MacroAssembler::GenerateTailCallToReturnedCode( } static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch"); - JumpCodeObject(x2); + JumpCodeObject(x2, kJSEntrypointTag); } // Read off the flags in the feedback vector and check if there @@ -2061,31 +2061,37 @@ int MacroAssembler::ActivationFrameAlignment() { #endif // V8_HOST_ARCH_ARM64 } -void MacroAssembler::CallCFunction(ExternalReference function, - int num_of_reg_args, - SetIsolateDataSlots set_isolate_data_slots) { - CallCFunction(function, num_of_reg_args, 0, set_isolate_data_slots); +int MacroAssembler::CallCFunction(ExternalReference function, + int num_of_reg_args, + SetIsolateDataSlots set_isolate_data_slots, + Label* return_location) { + return CallCFunction(function, num_of_reg_args, 0, set_isolate_data_slots, + return_location); } -void MacroAssembler::CallCFunction(ExternalReference function, - int num_of_reg_args, int num_of_double_args, - SetIsolateDataSlots set_isolate_data_slots) { +int MacroAssembler::CallCFunction(ExternalReference function, + int num_of_reg_args, int num_of_double_args, + SetIsolateDataSlots set_isolate_data_slots, + Label* return_location) { // Note: The "CallCFunction" code comment will be generated by the other // CallCFunction method called below. UseScratchRegisterScope temps(this); Register temp = temps.AcquireX(); Mov(temp, function); - CallCFunction(temp, num_of_reg_args, num_of_double_args, - set_isolate_data_slots); + return CallCFunction(temp, num_of_reg_args, num_of_double_args, + set_isolate_data_slots, return_location); } -void MacroAssembler::CallCFunction(Register function, int num_of_reg_args, - int num_of_double_args, - SetIsolateDataSlots set_isolate_data_slots) { +int MacroAssembler::CallCFunction(Register function, int num_of_reg_args, + int num_of_double_args, + SetIsolateDataSlots set_isolate_data_slots, + Label* return_location) { ASM_CODE_COMMENT(this); DCHECK_LE(num_of_reg_args + num_of_double_args, kMaxCParameters); DCHECK(has_frame()); + Label get_pc; + if (set_isolate_data_slots == SetIsolateDataSlots::kYes) { // Save the frame pointer and PC so that the stack layout remains iterable, // even without an ExitFrame which normally exists between JS and C frames. @@ -2093,8 +2099,6 @@ void MacroAssembler::CallCFunction(Register function, int num_of_reg_args, Register addr_scratch = x5; Push(pc_scratch, addr_scratch); - Label get_pc; - Bind(&get_pc); Adr(pc_scratch, &get_pc); // See x64 code for reasoning about how to address the isolate data fields. @@ -2119,6 +2123,9 @@ void MacroAssembler::CallCFunction(Register function, int num_of_reg_args, // Call directly. The function called cannot cause a GC, or allow preemption, // so the return address in the link register stays correct. Call(function); + int call_pc_offset = pc_offset(); + bind(&get_pc); + if (return_location) bind(return_location); if (set_isolate_data_slots == SetIsolateDataSlots::kYes) { // We don't unset the PC; the FP is the source of truth. @@ -2148,6 +2155,8 @@ void MacroAssembler::CallCFunction(Register function, int num_of_reg_args, RoundUp(num_of_double_args - kFPRegisterPassedArguments, 2); Drop(claim_slots); } + + return call_pc_offset; } void MacroAssembler::LoadFromConstantsTable(Register destination, @@ -2459,27 +2468,30 @@ void MacroAssembler::TailCallBuiltin(Builtin builtin, Condition cond) { } void MacroAssembler::LoadCodeInstructionStart(Register destination, - Register code_object) { + Register code_object, + CodeEntrypointTag tag) { ASM_CODE_COMMENT(this); #ifdef V8_ENABLE_SANDBOX LoadCodeEntrypointViaCodePointer( destination, - FieldMemOperand(code_object, Code::kSelfIndirectPointerOffset)); + FieldMemOperand(code_object, Code::kSelfIndirectPointerOffset), tag); #else Ldr(destination, FieldMemOperand(code_object, Code::kInstructionStartOffset)); #endif } -void MacroAssembler::CallCodeObject(Register code_object) { +void MacroAssembler::CallCodeObject(Register code_object, + CodeEntrypointTag tag) { ASM_CODE_COMMENT(this); - LoadCodeInstructionStart(code_object, code_object); + LoadCodeInstructionStart(code_object, code_object, tag); Call(code_object); } -void MacroAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) { +void MacroAssembler::JumpCodeObject(Register code_object, CodeEntrypointTag tag, + JumpMode jump_mode) { ASM_CODE_COMMENT(this); DCHECK_EQ(JumpMode::kJump, jump_mode); - LoadCodeInstructionStart(code_object, code_object); + LoadCodeInstructionStart(code_object, code_object, tag); // We jump through x17 here because for Branch Identification (BTI) we use // "Call" (`bti c`) rather than "Jump" (`bti j`) landing pads for tail-called // code. See TailCallBuiltin for more information. @@ -2496,12 +2508,13 @@ void MacroAssembler::CallJSFunction(Register function_object) { // from the code pointer table instead of going through the Code object. In // this way, we avoid one memory load on this code path. LoadCodeEntrypointViaCodePointer( - code, FieldMemOperand(function_object, JSFunction::kCodeOffset)); + code, FieldMemOperand(function_object, JSFunction::kCodeOffset), + kJSEntrypointTag); Call(code); #else LoadTaggedField(code, FieldMemOperand(function_object, JSFunction::kCodeOffset)); - CallCodeObject(code); + CallCodeObject(code, kJSEntrypointTag); #endif } @@ -2513,7 +2526,8 @@ void MacroAssembler::JumpJSFunction(Register function_object, // from the code pointer table instead of going through the Code object. In // this way, we avoid one memory load on this code path. LoadCodeEntrypointViaCodePointer( - code, FieldMemOperand(function_object, JSFunction::kCodeOffset)); + code, FieldMemOperand(function_object, JSFunction::kCodeOffset), + kJSEntrypointTag); DCHECK_EQ(jump_mode, JumpMode::kJump); // We jump through x17 here because for Branch Identification (BTI) we use // "Call" (`bti c`) rather than "Jump" (`bti j`) landing pads for tail-called @@ -2524,7 +2538,7 @@ void MacroAssembler::JumpJSFunction(Register function_object, #else LoadTaggedField(code, FieldMemOperand(function_object, JSFunction::kCodeOffset)); - JumpCodeObject(code, jump_mode); + JumpCodeObject(code, kJSEntrypointTag, jump_mode); #endif } @@ -3479,8 +3493,8 @@ void MacroAssembler::CheckPageFlag(const Register& object, int mask, ASM_CODE_COMMENT(this); UseScratchRegisterScope temps(this); Register scratch = temps.AcquireX(); - And(scratch, object, ~kPageAlignmentMask); - Ldr(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset)); + And(scratch, object, ~MemoryChunkHeader::GetAlignmentMaskForAssembler()); + Ldr(scratch, MemOperand(scratch, MemoryChunkLayout::kFlagsOffset)); if (cc == ne) { TestAndBranchIfAnySet(scratch, mask, condition_met); } else { @@ -3705,17 +3719,23 @@ void MacroAssembler::ResolveCodePointerHandle(Register destination, Orr(destination, destination, Immediate(kHeapObjectTag)); } -void MacroAssembler::LoadCodeEntrypointViaCodePointer( - Register destination, MemOperand field_operand) { +void MacroAssembler::LoadCodeEntrypointViaCodePointer(Register destination, + MemOperand field_operand, + CodeEntrypointTag tag) { + DCHECK_NE(tag, kInvalidEntrypointTag); ASM_CODE_COMMENT(this); UseScratchRegisterScope temps(this); - Register table = temps.AcquireX(); - Mov(table, ExternalReference::code_pointer_table_address()); + Register scratch = temps.AcquireX(); + Mov(scratch, ExternalReference::code_pointer_table_address()); Ldr(destination.W(), field_operand); // TODO(saelo): can the offset computation be done more efficiently? Mov(destination, Operand(destination, LSR, kCodePointerHandleShift)); Mov(destination, Operand(destination, LSL, kCodePointerTableEntrySizeLog2)); - Ldr(destination, MemOperand(table, destination)); + Ldr(destination, MemOperand(scratch, destination)); + if (tag != 0) { + Mov(scratch, Immediate(tag)); + Eor(destination, destination, scratch); + } } #endif // V8_ENABLE_SANDBOX diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h index e17dc8f6ee8792..b861b0dbe9b86a 100644 --- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h +++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h @@ -1063,9 +1063,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { void TailCallBuiltin(Builtin builtin, Condition cond = al); // Load code entry point from the Code object. - void LoadCodeInstructionStart(Register destination, Register code_object); - void CallCodeObject(Register code_object); - void JumpCodeObject(Register code_object, + void LoadCodeInstructionStart(Register destination, Register code_object, + CodeEntrypointTag tag); + void CallCodeObject(Register code_object, CodeEntrypointTag tag); + void JumpCodeObject(Register code_object, CodeEntrypointTag tag, JumpMode jump_mode = JumpMode::kJump); // Convenience functions to call/jmp to the code of a JSFunction object. @@ -1083,25 +1084,24 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { DeoptimizeKind kind, Label* ret, Label* jump_deoptimization_entry_label); - // Calls a C function. - // The called function is not allowed to trigger a + // Calls a C function and cleans up the space for arguments allocated + // by PrepareCallCFunction. The called function is not allowed to trigger a // garbage collection, since that might move the code and invalidate the // return address (unless this is somehow accounted for by the called // function). - enum class SetIsolateDataSlots { - kNo, - kYes, - }; - void CallCFunction( + int CallCFunction( ExternalReference function, int num_reg_arguments, - SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes); - void CallCFunction( + SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes, + Label* return_location = nullptr); + int CallCFunction( ExternalReference function, int num_reg_arguments, int num_double_arguments, - SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes); - void CallCFunction( + SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes, + Label* return_location = nullptr); + int CallCFunction( Register function, int num_reg_arguments, int num_double_arguments, - SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes); + SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes, + Label* return_location = nullptr); // Performs a truncating conversion of a floating point number as used by // the JS bitwise operations. See ECMA-262 9.5: ToInt32. @@ -1632,7 +1632,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { // Only available when the sandbox is enabled as it requires the code pointer // table. void LoadCodeEntrypointViaCodePointer(Register destination, - MemOperand field_operand); + MemOperand field_operand, + CodeEntrypointTag tag); #endif // Load a protected pointer field. diff --git a/deps/v8/src/codegen/code-stub-assembler.cc b/deps/v8/src/codegen/code-stub-assembler.cc index ec9f176f342d64..cc80f7aaccacb3 100644 --- a/deps/v8/src/codegen/code-stub-assembler.cc +++ b/deps/v8/src/codegen/code-stub-assembler.cc @@ -1842,12 +1842,12 @@ void CodeStubAssembler::StoreExternalPointerToObject(TNode object, #endif // V8_ENABLE_SANDBOX } -TNode CodeStubAssembler::LoadTrustedPointerFromObject( +TNode CodeStubAssembler::LoadTrustedPointerFromObject( TNode object, int field_offset, IndirectPointerTag tag) { #ifdef V8_ENABLE_SANDBOX return LoadIndirectPointerFromObject(object, field_offset, tag); #else - return LoadObjectField(object, field_offset); + return LoadObjectField(object, field_offset); #endif // V8_ENABLE_SANDBOX } @@ -1858,7 +1858,7 @@ TNode CodeStubAssembler::LoadCodePointerFromObject( } #ifdef V8_ENABLE_SANDBOX -TNode CodeStubAssembler::LoadIndirectPointerFromObject( +TNode CodeStubAssembler::LoadIndirectPointerFromObject( TNode object, int field_offset, IndirectPointerTag tag) { TNode handle = LoadObjectField(object, field_offset); @@ -1871,13 +1871,13 @@ TNode CodeStubAssembler::IsTrustedPointerHandle( Int32Constant(0)); } -TNode CodeStubAssembler::ResolveIndirectPointerHandle( +TNode CodeStubAssembler::ResolveIndirectPointerHandle( TNode handle, IndirectPointerTag tag) { // The tag implies which pointer table to use. if (tag == kUnknownIndirectPointerTag) { // In this case we have to rely on the handle marking to determine which // pointer table to use. - return Select( + return Select( IsTrustedPointerHandle(handle), [=] { return ResolveTrustedPointerHandle(handle, tag); }, [=] { return ResolveCodePointerHandle(handle); }); @@ -1903,7 +1903,7 @@ TNode CodeStubAssembler::ResolveCodePointerHandle( return UncheckedCast(BitcastWordToTagged(value)); } -TNode CodeStubAssembler::ResolveTrustedPointerHandle( +TNode CodeStubAssembler::ResolveTrustedPointerHandle( TNode handle, IndirectPointerTag tag) { TNode table = ExternalConstant( ExternalReference::trusted_pointer_table_base_address(isolate())); @@ -1919,7 +1919,7 @@ TNode CodeStubAssembler::ResolveTrustedPointerHandle( // to set it using a bitwise OR as it may or may not be set. value = UncheckedCast(WordOr(value, UintPtrConstant(kHeapObjectTag))); - return UncheckedCast(BitcastWordToTagged(value)); + return UncheckedCast(BitcastWordToTagged(value)); } TNode CodeStubAssembler::ComputeCodePointerTableEntryOffset( @@ -1935,16 +1935,36 @@ TNode CodeStubAssembler::ComputeCodePointerTableEntryOffset( } TNode CodeStubAssembler::LoadCodeEntrypointViaCodePointerField( - TNode object, TNode field_offset) { + TNode object, TNode field_offset, + CodeEntrypointTag tag) { TNode handle = LoadObjectField(object, field_offset); TNode table = ExternalConstant(ExternalReference::code_pointer_table_address()); TNode offset = ComputeCodePointerTableEntryOffset(handle); - return Load(table, offset); + TNode entry = Load(table, offset); + if (tag != 0) { + entry = UncheckedCast(WordXor(entry, UintPtrConstant(tag))); + } + return UncheckedCast(UncheckedCast(entry)); } #endif // V8_ENABLE_SANDBOX +TNode CodeStubAssembler::LoadProtectedPointerFromObject( + TNode object, int offset) { +#ifdef V8_ENABLE_SANDBOX + TNode trusted_cage_base = LoadPointerFromRootRegister( + IntPtrConstant(IsolateData::trusted_cage_base_offset())); + TNode offset_from_cage_base = + ChangeUint32ToWord(LoadObjectField(object, offset)); + TNode pointer = + UncheckedCast(WordOr(trusted_cage_base, offset_from_cage_base)); + return UncheckedCast(BitcastWordToTagged(pointer)); +#else + return LoadObjectField(object, offset); +#endif +} + TNode CodeStubAssembler::LoadFromParentFrame(int offset) { TNode frame_pointer = LoadParentFramePointer(); return LoadFullTagged(frame_pointer, IntPtrConstant(offset)); @@ -2099,13 +2119,8 @@ TNode CodeStubAssembler::LoadSlowProperties( }; NodeGenerator cast_properties = [=] { TNode dict = CAST(properties); - if constexpr (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) { - CSA_DCHECK(this, Word32Or(IsSwissNameDictionary(dict), - IsGlobalDictionary(dict))); - } else { - CSA_DCHECK(this, - Word32Or(IsNameDictionary(dict), IsGlobalDictionary(dict))); - } + CSA_DCHECK(this, + Word32Or(IsPropertyDictionary(dict), IsGlobalDictionary(dict))); return dict; }; return Select(TaggedIsSmi(properties), make_empty, @@ -2640,6 +2655,12 @@ TNode CodeStubAssembler::LoadArrayLength( return LoadAndUntagWeakFixedArrayLength(array); } +template <> +TNode CodeStubAssembler::LoadArrayLength( + TNode array) { + return SmiUntag(LoadArrayCapacity(array)); +} + template TNode CodeStubAssembler::LoadArrayElement(TNode array, int array_header_size, @@ -2673,6 +2694,9 @@ template V8_EXPORT_PRIVATE TNode CodeStubAssembler::LoadArrayElement< template V8_EXPORT_PRIVATE TNode CodeStubAssembler::LoadArrayElement( TNode, int, TNode, int); +template V8_EXPORT_PRIVATE TNode +CodeStubAssembler::LoadArrayElement( + TNode, int, TNode, int); template TNode CodeStubAssembler::LoadFixedArrayElement( @@ -3468,27 +3492,17 @@ TNode CodeStubAssembler::LoadSharedFunctionInfoBytecodeArray( this, Word32Equal(DecodeWord32(code_flags), Int32Constant(static_cast(CodeKind::BASELINE)))); #endif // DEBUG - TNode baseline_data = LoadObjectField( + TNode baseline_data = LoadProtectedPointerFromObject( code, Code::kDeoptimizationDataOrInterpreterDataOffset); var_result = baseline_data; - // As long as InterpreterData objects still live inside the sandbox, Code - // references BytecodeArrays through their in-sandbox wrapper object. - static_assert(!kInterpreterDataObjectsLiveInTrustedSpace); - GotoIfNot(HasInstanceType(var_result.value(), BYTECODE_WRAPPER_TYPE), - &check_for_interpreter_data); - TNode bytecode = LoadTrustedPointerFromObject( - var_result.value(), BytecodeWrapper::kBytecodeOffset, - kBytecodeArrayIndirectPointerTag); - var_result = bytecode; - Goto(&done); } + Goto(&check_for_interpreter_data); BIND(&check_for_interpreter_data); GotoIfNot(HasInstanceType(var_result.value(), INTERPRETER_DATA_TYPE), &done); - TNode bytecode_array = CAST(LoadTrustedPointerFromObject( - var_result.value(), InterpreterData::kBytecodeArrayOffset, - kBytecodeArrayIndirectPointerTag)); + TNode bytecode_array = CAST(LoadProtectedPointerFromObject( + CAST(var_result.value()), InterpreterData::kBytecodeArrayOffset)); var_result = bytecode_array; Goto(&done); @@ -4229,6 +4243,40 @@ TNode CodeStubAssembler::AllocateNameDictionaryWithCapacity( return result; } +TNode CodeStubAssembler::AllocatePropertyDictionary( + int at_least_space_for) { + TNode dict; + if constexpr (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) { + dict = AllocateSwissNameDictionary(at_least_space_for); + } else { + dict = AllocateNameDictionary(at_least_space_for); + } + return TNode::UncheckedCast(dict); +} + +TNode CodeStubAssembler::AllocatePropertyDictionary( + TNode at_least_space_for, AllocationFlags flags) { + TNode dict; + if constexpr (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) { + dict = AllocateSwissNameDictionary(at_least_space_for); + } else { + dict = AllocateNameDictionary(at_least_space_for, flags); + } + return TNode::UncheckedCast(dict); +} + +TNode +CodeStubAssembler::AllocatePropertyDictionaryWithCapacity( + TNode capacity, AllocationFlags flags) { + TNode dict; + if constexpr (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) { + dict = AllocateSwissNameDictionaryWithCapacity(capacity); + } else { + dict = AllocateNameDictionaryWithCapacity(capacity, flags); + } + return TNode::UncheckedCast(dict); +} + TNode CodeStubAssembler::CopyNameDictionary( TNode dictionary, Label* large_object_fallback) { Comment("Copy boilerplate property dict"); @@ -4423,9 +4471,8 @@ void CodeStubAssembler::InitializeJSObjectFromMap( StoreObjectFieldRoot(object, JSObject::kPropertiesOrHashOffset, RootIndex::kEmptyFixedArray); } else { - CSA_DCHECK(this, Word32Or(Word32Or(Word32Or(IsPropertyArray(*properties), - IsNameDictionary(*properties)), - IsSwissNameDictionary(*properties)), + CSA_DCHECK(this, Word32Or(Word32Or(IsPropertyArray(*properties), + IsPropertyDictionary(*properties)), IsEmptyFixedArray(*properties))); StoreObjectFieldNoWriteBarrier(object, JSObject::kPropertiesOrHashOffset, *properties); @@ -5013,9 +5060,9 @@ TNode CodeStubAssembler::ExtractToFixedArray( #ifndef V8_ENABLE_SINGLE_GENERATION #ifdef DEBUG TNode object_word = BitcastTaggedToWord(to_elements); - TNode object_page = PageFromAddress(object_word); - TNode page_flags = - Load(object_page, IntPtrConstant(Page::kFlagsOffset)); + TNode object_page_header = PageHeaderFromAddress(object_word); + TNode page_flags = Load( + object_page_header, IntPtrConstant(MemoryChunkLayout::kFlagsOffset)); CSA_DCHECK( this, WordNotEqual( @@ -5414,9 +5461,10 @@ void CodeStubAssembler::JumpIfPointersFromHereAreInteresting( TNode object, Label* interesting) { Label finished(this); TNode object_word = BitcastTaggedToWord(object); - TNode object_page = PageFromAddress(object_word); - TNode page_flags = UncheckedCast(Load( - MachineType::IntPtr(), object_page, IntPtrConstant(Page::kFlagsOffset))); + TNode object_page_header = PageHeaderFromAddress(object_word); + TNode page_flags = UncheckedCast( + Load(MachineType::IntPtr(), object_page_header, + IntPtrConstant(MemoryChunkLayout::kFlagsOffset))); Branch( WordEqual(WordAnd(page_flags, IntPtrConstant( @@ -7617,19 +7665,15 @@ TNode CodeStubAssembler::IsEphemeronHashTable(TNode object) { return HasInstanceType(object, EPHEMERON_HASH_TABLE_TYPE); } -TNode CodeStubAssembler::IsNameDictionary(TNode object) { - return HasInstanceType(object, NAME_DICTIONARY_TYPE); +TNode CodeStubAssembler::IsPropertyDictionary(TNode object) { + return HasInstanceType(object, PROPERTY_DICTIONARY_TYPE); } + TNode CodeStubAssembler::IsOrderedNameDictionary( TNode object) { return HasInstanceType(object, ORDERED_NAME_DICTIONARY_TYPE); } -TNode CodeStubAssembler::IsSwissNameDictionary( - TNode object) { - return HasInstanceType(object, SWISS_NAME_DICTIONARY_TYPE); -} - TNode CodeStubAssembler::IsGlobalDictionary(TNode object) { return HasInstanceType(object, GLOBAL_DICTIONARY_TYPE); } @@ -7823,10 +7867,10 @@ TNode CodeStubAssembler::IsNumberArrayIndex(TNode number) { TNode CodeStubAssembler::LoadBasicMemoryChunkFlags( TNode object) { TNode object_word = BitcastTaggedToWord(object); - TNode page = PageFromAddress(object_word); + TNode page_header = PageHeaderFromAddress(object_word); return UncheckedCast( - Load(MachineType::Pointer(), page, - IntPtrConstant(BasicMemoryChunk::kFlagsOffset))); + Load(MachineType::Pointer(), page_header, + IntPtrConstant(MemoryChunkLayout::kFlagsOffset))); } template @@ -9410,7 +9454,8 @@ TNode CodeStubAssembler::NameToIndexHashTableLookup( template void CodeStubAssembler::NameDictionaryLookup( TNode dictionary, TNode unique_name, Label* if_found, - TVariable* var_name_index, Label* if_not_found, LookupMode mode) { + TVariable* var_name_index, Label* if_not_found_no_insertion_index, + LookupMode mode, Label* if_not_found_with_insertion_index) { static_assert(std::is_same::value || std::is_same::value || std::is_same::value, @@ -9418,8 +9463,13 @@ void CodeStubAssembler::NameDictionaryLookup( DCHECK_IMPLIES(var_name_index != nullptr, MachineType::PointerRepresentation() == var_name_index->rep()); DCHECK_IMPLIES(mode == kFindInsertionIndex, if_found == nullptr); + DCHECK_IMPLIES(if_not_found_with_insertion_index != nullptr, + var_name_index != nullptr); Comment("NameDictionaryLookup"); CSA_DCHECK(this, IsUniqueName(unique_name)); + if (if_not_found_with_insertion_index == nullptr) { + if_not_found_with_insertion_index = if_not_found_no_insertion_index; + } Label if_not_computed(this, Label::kDeferred); @@ -9453,16 +9503,17 @@ void CodeStubAssembler::NameDictionaryLookup( TNode current = CAST(UnsafeLoadFixedArrayElement(dictionary, index)); - GotoIf(TaggedEqual(current, undefined), if_not_found); + GotoIf(TaggedEqual(current, undefined), if_not_found_with_insertion_index); if (mode == kFindExisting) { - if (Dictionary::ShapeT::kMatchNeedsHoleCheck) { + if (Dictionary::TodoShape::kMatchNeedsHoleCheck) { GotoIf(TaggedEqual(current, TheHoleConstant()), &next_probe); } current = LoadName(current); GotoIf(TaggedEqual(current, unique_name), if_found); } else { DCHECK_EQ(kFindInsertionIndex, mode); - GotoIf(TaggedEqual(current, TheHoleConstant()), if_not_found); + GotoIf(TaggedEqual(current, TheHoleConstant()), + if_not_found_with_insertion_index); } Goto(&next_probe); @@ -9507,14 +9558,19 @@ void CodeStubAssembler::NameDictionaryLookup( std::make_pair(MachineType::Pointer(), isolate_ptr), std::make_pair(MachineType::TaggedPointer(), dictionary), std::make_pair(MachineType::TaggedPointer(), unique_name))); + if (var_name_index) *var_name_index = EntryToIndex(entry); if (mode == kFindExisting) { GotoIf(IntPtrEqual(entry, IntPtrConstant(InternalIndex::NotFound().raw_value())), - if_not_found); + if_not_found_no_insertion_index); Goto(if_found); } else { - Goto(if_not_found); + CSA_DCHECK( + this, + WordNotEqual(entry, + IntPtrConstant(InternalIndex::NotFound().raw_value()))); + Goto(if_not_found_with_insertion_index); } } } @@ -9524,10 +9580,11 @@ template V8_EXPORT_PRIVATE void CodeStubAssembler::NameDictionaryLookup(TNode, TNode, Label*, TVariable*, - Label*, LookupMode); + Label*, LookupMode, + Label*); template V8_EXPORT_PRIVATE void CodeStubAssembler::NameDictionaryLookup< GlobalDictionary>(TNode, TNode, Label*, - TVariable*, Label*, LookupMode); + TVariable*, Label*, LookupMode, Label*); TNode CodeStubAssembler::ComputeSeededHash(TNode key) { const TNode function_addr = @@ -9547,10 +9604,13 @@ TNode CodeStubAssembler::ComputeSeededHash(TNode key) { template <> void CodeStubAssembler::NameDictionaryLookup( TNode dictionary, TNode unique_name, - Label* if_found, TVariable* var_name_index, Label* if_not_found, - LookupMode mode) { + Label* if_found, TVariable* var_name_index, + Label* if_not_found_no_insertion_index, LookupMode mode, + Label* if_not_found_with_insertion_index) { + // TODO(pthier): Support path for not found with valid insertion index for + // SwissNameDictionary. SwissNameDictionaryFindEntry(dictionary, unique_name, if_found, - var_name_index, if_not_found); + var_name_index, if_not_found_no_insertion_index); } void CodeStubAssembler::NumberDictionaryLookup( @@ -9707,9 +9767,9 @@ void CodeStubAssembler::InsertEntry( } template -void CodeStubAssembler::AddToDictionary(TNode dictionary, - TNode key, TNode value, - Label* bailout) { +void CodeStubAssembler::AddToDictionary( + TNode dictionary, TNode key, TNode value, + Label* bailout, base::Optional> insertion_index) { CSA_DCHECK(this, Word32BinaryNot(IsEmptyPropertyDictionary(dictionary))); TNode capacity = GetCapacity(dictionary); TNode nof = GetNumberOfElements(dictionary); @@ -9737,16 +9797,21 @@ void CodeStubAssembler::AddToDictionary(TNode dictionary, SetNextEnumerationIndex(dictionary, new_enum_index); SetNumberOfElements(dictionary, new_nof); - TVARIABLE(IntPtrT, var_key_index); - FindInsertionEntry(dictionary, key, &var_key_index); - InsertEntry(dictionary, key, value, var_key_index.value(), - enum_index); + if (insertion_index.has_value()) { + InsertEntry(dictionary, key, value, *insertion_index, + enum_index); + } else { + TVARIABLE(IntPtrT, var_key_index); + FindInsertionEntry(dictionary, key, &var_key_index); + InsertEntry(dictionary, key, value, var_key_index.value(), + enum_index); + } } template <> -void CodeStubAssembler::AddToDictionary(TNode dictionary, - TNode key, TNode value, - Label* bailout) { +void CodeStubAssembler::AddToDictionary( + TNode dictionary, TNode key, TNode value, + Label* bailout, base::Optional> insertion_index) { PropertyDetails d(PropertyKind::kData, NONE, PropertyDetails::kConstIfDictConstnessTracking); @@ -9765,11 +9830,13 @@ void CodeStubAssembler::AddToDictionary(TNode dictionary, Goto(¬_private); BIND(¬_private); + // TODO(pthier): Use insertion_index if it was provided. SwissNameDictionaryAdd(dictionary, key, value, var_details.value(), bailout); } template void CodeStubAssembler::AddToDictionary( - TNode, TNode, TNode, Label*); + TNode, TNode, TNode, Label*, + base::Optional>); template TNode CodeStubAssembler::GetNumberOfElements( @@ -11083,9 +11150,9 @@ TNode CodeStubAssembler::GetInterestingProperty( TNode properties = LoadObjectField(holder, JSObject::kPropertiesOrHashOffset); CSA_DCHECK(this, TaggedIsNotSmi(properties)); + CSA_DCHECK(this, IsPropertyDictionary(CAST(properties))); // TODO(pthier): Support swiss dictionaries. if constexpr (!V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) { - CSA_DCHECK(this, IsNameDictionary(CAST(properties))); TNode flags = GetNameDictionaryFlags(CAST(properties)); GotoIf(IsSetSmi(flags, @@ -12748,10 +12815,10 @@ void CodeStubAssembler::TrapAllocationMemento(TNode object, TNode object_word = BitcastTaggedToWord(object); // TODO(v8:11641): Skip TrapAllocationMemento when allocation-site // tracking is disabled. - TNode object_page = PageFromAddress(object_word); + TNode object_page_header = PageHeaderFromAddress(object_word); { - TNode page_flags = - Load(object_page, IntPtrConstant(Page::kFlagsOffset)); + TNode page_flags = Load( + object_page_header, IntPtrConstant(MemoryChunkLayout::kFlagsOffset)); GotoIf(WordEqual( WordAnd(page_flags, IntPtrConstant(MemoryChunk::kIsInYoungGenerationMask)), @@ -12767,20 +12834,23 @@ void CodeStubAssembler::TrapAllocationMemento(TNode object, TNode memento_last_word = IntPtrAdd( object_word, IntPtrConstant(kMementoLastWordOffset - kHeapObjectTag)); - TNode memento_last_word_page = PageFromAddress(memento_last_word); + TNode memento_last_word_page_header = + PageHeaderFromAddress(memento_last_word); TNode new_space_top = Load(new_space_top_address); - TNode new_space_top_page = PageFromAddress(new_space_top); + TNode new_space_top_page_header = + PageHeaderFromAddress(new_space_top); // If the object is in new space, we need to check whether respective // potential memento object is on the same page as the current top. - GotoIf(WordEqual(memento_last_word_page, new_space_top_page), &top_check); + GotoIf(WordEqual(memento_last_word_page_header, new_space_top_page_header), + &top_check); // The object is on a different page than allocation top. Bail out if the // object sits on the page boundary as no memento can follow and we cannot // touch the memory following it. - Branch(WordEqual(object_page, memento_last_word_page), &map_check, - &no_memento_found); + Branch(WordEqual(object_page_header, memento_last_word_page_header), + &map_check, &no_memento_found); // If top is on the same page as the current object, we need to check whether // we are below top. @@ -12804,9 +12874,22 @@ void CodeStubAssembler::TrapAllocationMemento(TNode object, Comment("] TrapAllocationMemento"); } +TNode CodeStubAssembler::PageHeaderFromAddress( + TNode address) { + DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL); + return WordAnd( + address, + IntPtrConstant(~MemoryChunkHeader::GetAlignmentMaskForAssembler())); +} + +TNode CodeStubAssembler::PageFromPageHeader(TNode address) { + DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL); + return address; +} + TNode CodeStubAssembler::PageFromAddress(TNode address) { DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL); - return WordAnd(address, IntPtrConstant(~kPageAlignmentMask)); + return PageFromPageHeader(PageHeaderFromAddress(address)); } TNode CodeStubAssembler::CreateAllocationSiteInFeedbackVector( @@ -13230,6 +13313,18 @@ TNode CodeStubAssembler::GotoIfHasContextExtensionUpToDepth( Goto(&context_search); BIND(&context_search); { +#if DEBUG + // Const tracking let data is stored in the extension slot of a + // ScriptContext - however, it's unrelated to the sloppy eval variable + // extension. We should never iterate through a ScriptContext here. + auto scope_info = LoadScopeInfo(cur_context.value()); + TNode flags = + LoadAndUntagToWord32ObjectField(scope_info, ScopeInfo::kFlagsOffset); + auto scope_type = DecodeWord32(flags); + CSA_DCHECK(this, Word32NotEqual(scope_type, + Int32Constant(ScopeType::SCRIPT_SCOPE))); +#endif + // Check if context has an extension slot. TNode has_extension = LoadScopeInfoHasExtensionField(LoadScopeInfo(cur_context.value())); @@ -16301,8 +16396,8 @@ TNode CodeStubAssembler::GetSharedFunctionInfoCode( CSA_DCHECK(this, Word32Equal(data_type, Int32Constant(INTERPRETER_DATA_TYPE))); { - TNode trampoline = LoadCodePointerFromObject( - CAST(sfi_data), InterpreterData::kInterpreterTrampolineOffset); + TNode trampoline = CAST(LoadProtectedPointerFromObject( + CAST(sfi_data), InterpreterData::kInterpreterTrampolineOffset)); sfi_code = trampoline; } Goto(&done); @@ -16329,12 +16424,13 @@ TNode CodeStubAssembler::GetSharedFunctionInfoCode( return sfi_code.value(); } -TNode CodeStubAssembler::LoadCodeInstructionStart(TNode code) { +TNode CodeStubAssembler::LoadCodeInstructionStart( + TNode code, CodeEntrypointTag tag) { #ifdef V8_ENABLE_SANDBOX // In this case, the entrypoint is stored in the code pointer table entry // referenced via the Code object's 'self' indirect pointer. return LoadCodeEntrypointViaCodePointerField( - code, Code::kSelfIndirectPointerOffset); + code, Code::kSelfIndirectPointerOffset, tag); #else return LoadObjectField(code, Code::kInstructionStartOffset); #endif @@ -16439,15 +16535,14 @@ TNode CodeStubAssembler::CheckEnumCache(TNode receiver, // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85149 // TODO(miladfarca): Use `if constexpr` once all compilers handle this // properly. + CSA_DCHECK(this, Word32Or(IsPropertyDictionary(properties), + IsGlobalDictionary(properties))); if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) { - CSA_DCHECK(this, Word32Or(IsSwissNameDictionary(properties), - IsGlobalDictionary(properties))); - length = Select( - IsSwissNameDictionary(properties), + IsPropertyDictionary(properties), [=] { return GetNumberOfElements( - UncheckedCast(properties)); + UncheckedCast(properties)); }, [=] { return GetNumberOfElements( @@ -16455,8 +16550,6 @@ TNode CodeStubAssembler::CheckEnumCache(TNode receiver, }); } else { - CSA_DCHECK(this, Word32Or(IsNameDictionary(properties), - IsGlobalDictionary(properties))); static_assert(static_cast(NameDictionary::kNumberOfElementsIndex) == static_cast(GlobalDictionary::kNumberOfElementsIndex)); length = GetNumberOfElements(UncheckedCast(properties)); diff --git a/deps/v8/src/codegen/code-stub-assembler.h b/deps/v8/src/codegen/code-stub-assembler.h index 3b2793a4913439..7d687744c10ed1 100644 --- a/deps/v8/src/codegen/code-stub-assembler.h +++ b/deps/v8/src/codegen/code-stub-assembler.h @@ -18,6 +18,7 @@ #include "src/objects/arguments.h" #include "src/objects/bigint.h" #include "src/objects/cell.h" +#include "src/objects/dictionary.h" #include "src/objects/feedback-vector.h" #include "src/objects/heap-number.h" #include "src/objects/hole.h" @@ -358,6 +359,14 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol }; #define CSA_SLOW_DCHECK(csa, ...) ((void)0) #endif +// Similar to SBXCHECK in C++, these become a CSA_CHECK in sandbox-enabled +// builds, otherwise a CSA_DCHECK. +#ifdef V8_ENABLE_SANDBOX +#define CSA_SBXCHECK(csa, ...) CSA_CHECK(csa, __VA_ARGS__) +#else +#define CSA_SBXCHECK(csa, ...) CSA_DCHECK(csa, __VA_ARGS__) +#endif + // Provides JavaScript-specific "macro-assembler" functionality on top of the // CodeAssembler. By factoring the JavaScript-isms out of the CodeAssembler, // it's possible to add JavaScript-specific useful CodeAssembler "macros" @@ -922,7 +931,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler void FastCheck(TNode condition); - TNode LoadCodeInstructionStart(TNode code); + TNode LoadCodeInstructionStart(TNode code, + CodeEntrypointTag tag); TNode IsMarkedForDeoptimization(TNode code); // The following Call wrappers call an object according to the semantics that @@ -1193,9 +1203,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // Load a trusted pointer field. // When the sandbox is enabled, these are indirect pointers using the trusted // pointer table. Otherwise they are regular tagged fields. - TNode LoadTrustedPointerFromObject(TNode object, - int offset, - IndirectPointerTag tag); + TNode LoadTrustedPointerFromObject(TNode object, + int offset, + IndirectPointerTag tag); // Load a code pointer field. // These are special versions of trusted pointers that, when the sandbox is @@ -1204,9 +1214,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler #ifdef V8_ENABLE_SANDBOX // Load an indirect pointer field. - TNode LoadIndirectPointerFromObject(TNode object, - int offset, - IndirectPointerTag tag); + TNode LoadIndirectPointerFromObject(TNode object, + int offset, + IndirectPointerTag tag); // Determines whether the given indirect pointer handle is a trusted pointer // handle or a code pointer handle. @@ -1214,14 +1224,14 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // Retrieve the heap object referenced by the given indirect pointer handle, // which can either be a trusted pointer handle or a code pointer handle. - TNode ResolveIndirectPointerHandle( + TNode ResolveIndirectPointerHandle( TNode handle, IndirectPointerTag tag); // Retrieve the Code object referenced by the given trusted pointer handle. TNode ResolveCodePointerHandle(TNode handle); // Retrieve the heap object referenced by the given trusted pointer handle. - TNode ResolveTrustedPointerHandle( + TNode ResolveTrustedPointerHandle( TNode handle, IndirectPointerTag tag); // Helper function to compute the offset into the code pointer table from a @@ -1233,14 +1243,19 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // Only available when the sandbox is enabled as it requires the code pointer // table. TNode LoadCodeEntrypointViaCodePointerField(TNode object, - int offset) { - return LoadCodeEntrypointViaCodePointerField(object, - IntPtrConstant(offset)); + int offset, + CodeEntrypointTag tag) { + return LoadCodeEntrypointViaCodePointerField(object, IntPtrConstant(offset), + tag); } TNode LoadCodeEntrypointViaCodePointerField(TNode object, - TNode offset); + TNode offset, + CodeEntrypointTag tag); #endif + TNode LoadProtectedPointerFromObject( + TNode object, int offset); + TNode LoadForeignForeignAddressPtr(TNode object) { return LoadExternalPointerFromObject(object, Foreign::kForeignAddressOffset, kForeignForeignAddressTag); @@ -1289,11 +1304,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode object) { #ifdef V8_ENABLE_SANDBOX return LoadCodeEntrypointViaCodePointerField( - object, WasmInternalFunction::kCodeOffset); + object, WasmInternalFunction::kCodeOffset, kWasmEntrypointTag); #else TNode code = LoadObjectField(object, WasmInternalFunction::kCodeOffset); - return LoadCodeInstructionStart(code); + return LoadCodeInstructionStart(code, kWasmEntrypointTag); #endif // V8_ENABLE_SANDBOX } @@ -2192,6 +2207,14 @@ class V8_EXPORT_PRIVATE CodeStubAssembler AllocationFlags = AllocationFlag::kNone); TNode AllocateNameDictionaryWithCapacity( TNode capacity, AllocationFlags = AllocationFlag::kNone); + + TNode AllocatePropertyDictionary(int at_least_space_for); + TNode AllocatePropertyDictionary( + TNode at_least_space_for, + AllocationFlags = AllocationFlag::kNone); + TNode AllocatePropertyDictionaryWithCapacity( + TNode capacity, AllocationFlags = AllocationFlag::kNone); + TNode CopyNameDictionary(TNode dictionary, Label* large_object_fallback); @@ -2865,7 +2888,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode IsConstructorMap(TNode map); TNode IsConstructor(TNode object); TNode IsDeprecatedMap(TNode map); - TNode IsNameDictionary(TNode object); + TNode IsPropertyDictionary(TNode object); TNode IsOrderedNameDictionary(TNode object); TNode IsGlobalDictionary(TNode object); TNode IsExtensibleMap(TNode map); @@ -2978,7 +3001,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode IsStringInstanceType(TNode instance_type); TNode IsString(TNode object); TNode IsSeqOneByteString(TNode object); - TNode IsSwissNameDictionary(TNode object); TNode IsSymbolInstanceType(TNode instance_type); TNode IsInternalizedStringInstanceType(TNode instance_type); @@ -3511,24 +3533,40 @@ class V8_EXPORT_PRIVATE CodeStubAssembler template void SetNameDictionaryFlags(TNode, TNode flags); - // Looks up an entry in a NameDictionaryBase successor. If the entry is found - // control goes to {if_found} and {var_name_index} contains an index of the - // key field of the entry found. If the key is not found control goes to - // {if_not_found}. enum LookupMode { kFindExisting, kFindInsertionIndex }; template TNode LoadName(TNode key); + // Looks up an entry in a NameDictionaryBase successor. + // For {mode} == kFindExisting: + // If the entry is found control goes to {if_found} and {var_name_index} + // contains an index of the key field of the entry found. + // If the key is not found and {if_not_found_with_insertion_index} is + // provided, control goes to {if_not_found_with_insertion_index} and + // {var_name_index} contains the index of the key field to insert the given + // name at. + // Otherwise control goes to {if_not_found_no_insertion_index}. + // For {mode} == kFindInsertionIndex: + // {if_not_found_no_insertion_index} and {if_not_found_with_insertion_index} + // are treated equally. If {if_not_found_with_insertion_index} is provided, + // control goes to {if_not_found_with_insertion_index}, otherwise control + // goes to {if_not_found_no_insertion_index}. In both cases {var_name_index} + // contains the index of the key field to insert the given name at. template void NameDictionaryLookup(TNode dictionary, TNode unique_name, Label* if_found, TVariable* var_name_index, - Label* if_not_found, - LookupMode mode = kFindExisting); + Label* if_not_found_no_insertion_index, + LookupMode mode = kFindExisting, + Label* if_not_found_with_insertion_index = nullptr); TNode ComputeSeededHash(TNode key); + // Looks up an entry in a NameDictionaryBase successor. If the entry is found + // control goes to {if_found} and {var_name_index} contains an index of the + // key field of the entry found. If the key is not found control goes to + // {if_not_found}. void NumberDictionaryLookup(TNode dictionary, TNode intptr_index, Label* if_found, TVariable* var_entry, @@ -3548,8 +3586,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode enum_index); template - void AddToDictionary(TNode dictionary, TNode key, - TNode value, Label* bailout); + void AddToDictionary( + TNode dictionary, TNode key, TNode value, + Label* bailout, + base::Optional> insertion_index = base::nullopt); // Tries to check if {object} has own {unique_name} property. void TryHasOwnProperty(TNode object, TNode map, @@ -3875,6 +3915,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler void TrapAllocationMemento(TNode object, Label* memento_found); + // Helpers to look up MemoryChunk/Page metadata for a given address. + // Equivalent to MemoryChunkHeader::FromAddress(). + TNode PageHeaderFromAddress(TNode address); + // Equivalent to MemoryChunkHeader::MemoryChunk(). + TNode PageFromPageHeader(TNode address); + // Equivalent to BasicMemoryChunk::FromAddress(). TNode PageFromAddress(TNode address); // Store a weak in-place reference into the FeedbackVector. diff --git a/deps/v8/src/codegen/compiler.cc b/deps/v8/src/codegen/compiler.cc index ef884f7d5866f8..93ef078548674b 100644 --- a/deps/v8/src/codegen/compiler.cc +++ b/deps/v8/src/codegen/compiler.cc @@ -2032,34 +2032,44 @@ class ConstantPoolPointerForwarder { for (Handle bytecode_array : bytecode_arrays_to_update_) { local_heap_->Safepoint(); DisallowGarbageCollection no_gc; - Tagged constant_pool = bytecode_array->constant_pool(); - IterateConstantPool(constant_pool); + IterateConstantPool(bytecode_array->constant_pool()); } } bool HasAnythingToForward() const { return !forwarding_table_.empty(); } private: - void IterateConstantPool(Tagged constant_pool) { - for (int i = 0, length = constant_pool->length(); i < length; ++i) { - Tagged obj = constant_pool->get(i); - if (IsSmi(obj)) continue; - Tagged heap_obj = HeapObject::cast(obj); - if (IsFixedArray(heap_obj, cage_base_)) { - // Constant pools can have nested fixed arrays, but such relationships - // are acyclic and never more than a few layers deep, so recursion is - // fine here. - IterateConstantPool(FixedArray::cast(heap_obj)); - } else if (IsSharedFunctionInfo(heap_obj, cage_base_)) { - auto it = forwarding_table_.find( - SharedFunctionInfo::cast(heap_obj)->function_literal_id()); - if (it != forwarding_table_.end()) { - constant_pool->set(i, *it->second); - } + template + void IterateConstantPoolEntry(Tagged constant_pool, int i) { + Tagged obj = constant_pool->get(i); + if (IsSmi(obj)) return; + Tagged heap_obj = HeapObject::cast(obj); + if (IsFixedArray(heap_obj, cage_base_)) { + // Constant pools can have nested fixed arrays, but such relationships + // are acyclic and never more than a few layers deep, so recursion is + // fine here. + IterateConstantPoolNestedArray(FixedArray::cast(heap_obj)); + } else if (IsSharedFunctionInfo(heap_obj, cage_base_)) { + auto it = forwarding_table_.find( + SharedFunctionInfo::cast(heap_obj)->function_literal_id()); + if (it != forwarding_table_.end()) { + constant_pool->set(i, *it->second); } } } + void IterateConstantPool(Tagged constant_pool) { + for (int i = 0, length = constant_pool->length(); i < length; ++i) { + IterateConstantPoolEntry(constant_pool, i); + } + } + + void IterateConstantPoolNestedArray(Tagged nested_array) { + for (int i = 0, length = nested_array->length(); i < length; ++i) { + IterateConstantPoolEntry(nested_array, i); + } + } + PtrComprCageBase cage_base_; LocalHeap* local_heap_; std::vector> bytecode_arrays_to_update_; diff --git a/deps/v8/src/codegen/external-reference-table.cc b/deps/v8/src/codegen/external-reference-table.cc index 96e2606b8fc14d..207b26498fe855 100644 --- a/deps/v8/src/codegen/external-reference-table.cc +++ b/deps/v8/src/codegen/external-reference-table.cc @@ -288,26 +288,19 @@ void ExternalReferenceTable::AddStubCache(Isolate* isolate, int* index) { kIsolateAddressReferenceCount, *index); - StubCache* load_stub_cache = isolate->load_stub_cache(); - // Stub cache tables - Add(load_stub_cache->key_reference(StubCache::kPrimary).address(), index); - Add(load_stub_cache->value_reference(StubCache::kPrimary).address(), index); - Add(load_stub_cache->map_reference(StubCache::kPrimary).address(), index); - Add(load_stub_cache->key_reference(StubCache::kSecondary).address(), index); - Add(load_stub_cache->value_reference(StubCache::kSecondary).address(), index); - Add(load_stub_cache->map_reference(StubCache::kSecondary).address(), index); - - StubCache* store_stub_cache = isolate->store_stub_cache(); - - // Stub cache tables - Add(store_stub_cache->key_reference(StubCache::kPrimary).address(), index); - Add(store_stub_cache->value_reference(StubCache::kPrimary).address(), index); - Add(store_stub_cache->map_reference(StubCache::kPrimary).address(), index); - Add(store_stub_cache->key_reference(StubCache::kSecondary).address(), index); - Add(store_stub_cache->value_reference(StubCache::kSecondary).address(), - index); - Add(store_stub_cache->map_reference(StubCache::kSecondary).address(), index); + std::array stub_caches{isolate->load_stub_cache(), + isolate->store_stub_cache(), + isolate->define_own_stub_cache()}; + + for (StubCache* stub_cache : stub_caches) { + Add(stub_cache->key_reference(StubCache::kPrimary).address(), index); + Add(stub_cache->value_reference(StubCache::kPrimary).address(), index); + Add(stub_cache->map_reference(StubCache::kPrimary).address(), index); + Add(stub_cache->key_reference(StubCache::kSecondary).address(), index); + Add(stub_cache->value_reference(StubCache::kSecondary).address(), index); + Add(stub_cache->map_reference(StubCache::kSecondary).address(), index); + } CHECK_EQ(kSizeIsolateIndependent + kExternalReferenceCountIsolateDependent + kIsolateAddressReferenceCount + kStubCacheReferenceCount, diff --git a/deps/v8/src/codegen/external-reference-table.h b/deps/v8/src/codegen/external-reference-table.h index d00358a0853011..d5d12833f7117a 100644 --- a/deps/v8/src/codegen/external-reference-table.h +++ b/deps/v8/src/codegen/external-reference-table.h @@ -38,7 +38,7 @@ class ExternalReferenceTable { Accessors::kAccessorInfoCount + Accessors::kAccessorGetterCount + Accessors::kAccessorSetterCount + Accessors::kAccessorCallbackCount; // The number of stub cache external references, see AddStubCache. - static constexpr int kStubCacheReferenceCount = 12; + static constexpr int kStubCacheReferenceCount = 6 * 3; // 3 stub caches static constexpr int kStatsCountersReferenceCount = #define SC(...) +1 STATS_COUNTER_NATIVE_CODE_LIST(SC); diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc index 87b17032916001..ecbec91ad42f6d 100644 --- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc +++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc @@ -1948,18 +1948,20 @@ void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) { } } -void MacroAssembler::CallCFunction(ExternalReference function, - int num_arguments, - SetIsolateDataSlots set_isolate_data_slots) { +int MacroAssembler::CallCFunction(ExternalReference function, int num_arguments, + SetIsolateDataSlots set_isolate_data_slots, + Label* return_location) { // Note: The "CallCFunction" code comment will be generated by the other // CallCFunction method called below. // Trashing eax is ok as it will be the return value. Move(eax, Immediate(function)); - CallCFunction(eax, num_arguments); + return CallCFunction(eax, num_arguments, set_isolate_data_slots, + return_location); } -void MacroAssembler::CallCFunction(Register function, int num_arguments, - SetIsolateDataSlots set_isolate_data_slots) { +int MacroAssembler::CallCFunction(Register function, int num_arguments, + SetIsolateDataSlots set_isolate_data_slots, + Label* return_location) { ASM_CODE_COMMENT(this); DCHECK_LE(num_arguments, kMaxCParameters); DCHECK(has_frame()); @@ -1968,6 +1970,8 @@ void MacroAssembler::CallCFunction(Register function, int num_arguments, CheckStackAlignment(); } + Label get_pc; + if (set_isolate_data_slots == SetIsolateDataSlots::kYes) { // Save the frame pointer and PC so that the stack layout remains iterable, // even without an ExitFrame which normally exists between JS and C frames. @@ -1976,8 +1980,7 @@ void MacroAssembler::CallCFunction(Register function, int num_arguments, Register scratch = ecx; if (function == eax) pc_scratch = edx; if (function == ecx) scratch = edx; - PushPC(); - pop(pc_scratch); + LoadLabelAddress(pc_scratch, &get_pc); // See x64 code for reasoning about how to address the isolate data fields. DCHECK_IMPLIES(!root_array_available(), isolate() != nullptr); @@ -1998,6 +2001,9 @@ void MacroAssembler::CallCFunction(Register function, int num_arguments, } call(function); + int call_pc_offset = pc_offset(); + bind(&get_pc); + if (return_location) bind(return_location); if (set_isolate_data_slots == SetIsolateDataSlots::kYes) { // We don't unset the PC; the FP is the source of truth. @@ -2016,6 +2022,8 @@ void MacroAssembler::CallCFunction(Register function, int num_arguments, } else { add(esp, Immediate(num_arguments * kSystemPointerSize)); } + + return call_pc_offset; } void MacroAssembler::PushPC() { @@ -2112,7 +2120,8 @@ Operand MacroAssembler::EntryFromBuiltinAsOperand(Builtin builtin) { } void MacroAssembler::LoadCodeInstructionStart(Register destination, - Register code_object) { + Register code_object, + CodeEntrypointTag tag) { ASM_CODE_COMMENT(this); mov(destination, FieldOperand(code_object, Code::kInstructionStartOffset)); } @@ -2185,21 +2194,28 @@ void MacroAssembler::LoadLabelAddress(Register dst, Label* lbl) { DCHECK(pc_offset() - kStart == kInsDelta); } +void MacroAssembler::MemoryChunkHeaderFromObject(Register object, + Register header) { + constexpr intptr_t alignment_mask = + MemoryChunkHeader::GetAlignmentMaskForAssembler(); + if (header == object) { + and_(header, Immediate(~alignment_mask)); + } else { + mov(header, Immediate(~alignment_mask)); + and_(header, object); + } +} + void MacroAssembler::CheckPageFlag(Register object, Register scratch, int mask, Condition cc, Label* condition_met, Label::Distance condition_met_distance) { ASM_CODE_COMMENT(this); DCHECK(cc == zero || cc == not_zero); - if (scratch == object) { - and_(scratch, Immediate(~kPageAlignmentMask)); - } else { - mov(scratch, Immediate(~kPageAlignmentMask)); - and_(scratch, object); - } + MemoryChunkHeaderFromObject(object, scratch); if (mask < (1 << kBitsPerByte)) { - test_b(Operand(scratch, BasicMemoryChunk::kFlagsOffset), Immediate(mask)); + test_b(Operand(scratch, MemoryChunkLayout::kFlagsOffset), Immediate(mask)); } else { - test(Operand(scratch, BasicMemoryChunk::kFlagsOffset), Immediate(mask)); + test(Operand(scratch, MemoryChunkLayout::kFlagsOffset), Immediate(mask)); } j(cc, condition_met, condition_met_distance); } diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h index 27de799690723b..3be07986967aa7 100644 --- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h +++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h @@ -74,6 +74,7 @@ class V8_EXPORT_PRIVATE MacroAssembler public: using SharedMacroAssembler::SharedMacroAssembler; + void MemoryChunkHeaderFromObject(Register object, Register header); void CheckPageFlag(Register object, Register scratch, int mask, Condition cc, Label* condition_met, Label::Distance condition_met_distance = Label::kFar); @@ -162,7 +163,8 @@ class V8_EXPORT_PRIVATE MacroAssembler void TailCallBuiltin(Builtin builtin); // Load the code entry point from the Code object. - void LoadCodeInstructionStart(Register destination, Register code_object); + void LoadCodeInstructionStart(Register destination, Register code_object, + CodeEntrypointTag = kDefaultCodeEntrypointTag); void CallCodeObject(Register code_object); void JumpCodeObject(Register code_object, JumpMode jump_mode = JumpMode::kJump); @@ -234,16 +236,14 @@ class V8_EXPORT_PRIVATE MacroAssembler // garbage collection, since that might move the code and invalidate the // return address (unless this is somehow accounted for by the called // function). - enum class SetIsolateDataSlots { - kNo, - kYes, - }; - void CallCFunction( + int CallCFunction( ExternalReference function, int num_arguments, - SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes); - void CallCFunction( + SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes, + Label* return_location = nullptr); + int CallCFunction( Register function, int num_arguments, - SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes); + SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes, + Label* return_location = nullptr); void ShlPair(Register high, Register low, uint8_t imm8); void ShlPair_cl(Register high, Register low); diff --git a/deps/v8/src/codegen/interface-descriptors-inl.h b/deps/v8/src/codegen/interface-descriptors-inl.h index 325b8f2b3d72d4..9f7ee01571beb1 100644 --- a/deps/v8/src/codegen/interface-descriptors-inl.h +++ b/deps/v8/src/codegen/interface-descriptors-inl.h @@ -115,8 +115,8 @@ void StaticCallInterfaceDescriptor::Initialize( DCHECK_GE(return_registers.size(), DerivedDescriptor::kReturnCount); DCHECK_GE(return_double_registers.size(), DerivedDescriptor::kReturnCount); data->InitializeRegisters( - DerivedDescriptor::flags(), DerivedDescriptor::kReturnCount, - DerivedDescriptor::GetParameterCount(), + DerivedDescriptor::flags(), DerivedDescriptor::kEntrypointTag, + DerivedDescriptor::kReturnCount, DerivedDescriptor::GetParameterCount(), DerivedDescriptor::kStackArgumentOrder, DerivedDescriptor::GetRegisterParameterCount(), registers.data(), double_registers.data(), return_registers.data(), @@ -138,7 +138,7 @@ StaticCallInterfaceDescriptor::GetReturnCount() { static_assert( DerivedDescriptor::kReturnCount >= 0, "DerivedDescriptor subclass should override return count with a value " - "that is greater than 0"); + "that is greater than or equal to 0"); return DerivedDescriptor::kReturnCount; } @@ -150,7 +150,7 @@ StaticCallInterfaceDescriptor::GetParameterCount() { static_assert( DerivedDescriptor::kParameterCount >= 0, "DerivedDescriptor subclass should override parameter count with a " - "value that is greater than 0"); + "value that is greater than or equal to 0"); return DerivedDescriptor::kParameterCount; } @@ -502,7 +502,8 @@ constexpr auto OnStackReplacementDescriptor::registers() { constexpr auto MaglevOptimizeCodeOrTailCallOptimizedCodeSlotDescriptor::registers() { #ifdef V8_ENABLE_MAGLEV - return RegisterArray(FlagsRegister(), FeedbackVectorRegister()); + return RegisterArray(FlagsRegister(), FeedbackVectorRegister(), + TemporaryRegister()); #else return DefaultRegisterArray(); #endif diff --git a/deps/v8/src/codegen/interface-descriptors.cc b/deps/v8/src/codegen/interface-descriptors.cc index 2b4302482625e6..3c6578006138d3 100644 --- a/deps/v8/src/codegen/interface-descriptors.cc +++ b/deps/v8/src/codegen/interface-descriptors.cc @@ -37,7 +37,7 @@ void CheckRegisterConfiguration(int count, const Register* registers, #endif void CallInterfaceDescriptorData::InitializeRegisters( - Flags flags, int return_count, int parameter_count, + Flags flags, CodeEntrypointTag tag, int return_count, int parameter_count, StackArgumentOrder stack_order, int register_parameter_count, const Register* registers, const DoubleRegister* double_registers, const Register* return_registers, @@ -52,6 +52,7 @@ void CallInterfaceDescriptorData::InitializeRegisters( #endif flags_ = flags; + tag_ = tag; stack_order_ = stack_order; return_count_ = return_count; param_count_ = parameter_count; diff --git a/deps/v8/src/codegen/interface-descriptors.h b/deps/v8/src/codegen/interface-descriptors.h index d52c07820130c0..be8aa938c2c764 100644 --- a/deps/v8/src/codegen/interface-descriptors.h +++ b/deps/v8/src/codegen/interface-descriptors.h @@ -53,6 +53,8 @@ namespace internal { V(CallWithSpread) \ V(CallWithSpread_Baseline) \ V(CallWithSpread_WithFeedback) \ + V(CCall) \ + V(CEntryDummy) \ V(CEntry1ArgvOnStack) \ V(CloneObjectBaseline) \ V(CloneObjectWithVector) \ @@ -117,6 +119,7 @@ namespace internal { V(NewHeapNumber) \ V(NoContext) \ V(OnStackReplacement) \ + V(RegExpTrampoline) \ V(RestartFrameTrampoline) \ V(ResumeGenerator) \ V(ResumeGeneratorBaseline) \ @@ -141,6 +144,7 @@ namespace internal { V(UnaryOp_Baseline) \ V(UnaryOp_WithFeedback) \ V(Void) \ + V(WasmDummy) \ V(WasmFloat32ToNumber) \ V(WasmFloat64ToTagged) \ V(WasmJSToWasmWrapper) \ @@ -191,8 +195,8 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptorData { // The passed registers are owned by the caller, and their lifetime is // expected to exceed that of this data. In practice, they are expected to // be in a static local. - void InitializeRegisters(Flags flags, int return_count, int parameter_count, - StackArgumentOrder stack_order, + void InitializeRegisters(Flags flags, CodeEntrypointTag tag, int return_count, + int parameter_count, StackArgumentOrder stack_order, int register_parameter_count, const Register* registers, const DoubleRegister* double_registers, @@ -217,6 +221,7 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptorData { } Flags flags() const { return flags_; } + CodeEntrypointTag tag() const { return tag_; } int return_count() const { return return_count_; } int param_count() const { return param_count_; } int register_param_count() const { return register_param_count_; } @@ -271,6 +276,7 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptorData { int return_count_ = kUninitializedCount; int param_count_ = kUninitializedCount; Flags flags_ = kNoFlags; + CodeEntrypointTag tag_ = kDefaultCodeEntrypointTag; StackArgumentOrder stack_order_ = StackArgumentOrder::kDefault; // Specifying the set of registers that could be used by the register @@ -354,6 +360,8 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptor { Flags flags() const { return data()->flags(); } + CodeEntrypointTag tag() const { return data()->tag(); } + bool HasContextParameter() const { return (flags() & CallInterfaceDescriptorData::kNoContext) == 0; } @@ -474,6 +482,9 @@ class StaticCallInterfaceDescriptor : public CallInterfaceDescriptor { static constexpr bool kNoContext = false; static constexpr bool kAllowVarArgs = false; static constexpr bool kNoStackScan = false; + // TODO(saelo): we should not have a default value here to force all interface + // descriptors to define a (unique) tag. + static constexpr CodeEntrypointTag kEntrypointTag = kDefaultCodeEntrypointTag; static constexpr auto kStackArgumentOrder = StackArgumentOrder::kDefault; // The set of registers available to the parameters, as a @@ -752,6 +763,7 @@ constexpr EmptyDoubleRegisterArray DoubleRegisterArray() { return {}; } class V8_EXPORT_PRIVATE VoidDescriptor : public StaticCallInterfaceDescriptor { public: + static constexpr CodeEntrypointTag kEntrypointTag = kInvalidEntrypointTag; // The void descriptor could (and indeed probably should) also be NO_CONTEXT, // but this breaks some code assembler unittests. DEFINE_PARAMETERS() @@ -761,10 +773,6 @@ class V8_EXPORT_PRIVATE VoidDescriptor static constexpr auto registers(); }; -// Dummy descriptor that marks builtins with C calling convention. -// TODO(jgruber): Define real descriptors for C calling conventions. -using CCallDescriptor = VoidDescriptor; - // Marks deoptimization entry builtins. Precise calling conventions currently // differ based on the platform. // TODO(jgruber): Once this is unified, we could create a better description @@ -777,15 +785,39 @@ using JSEntryDescriptor = VoidDescriptor; // TODO(jgruber): Consider filling in the details here; however, this doesn't // make too much sense as long as the descriptor isn't used or verified. -using CEntryDummyDescriptor = VoidDescriptor; +using ContinueToBuiltinDescriptor = VoidDescriptor; + +// Dummy descriptor that marks builtins with C calling convention. +// TODO(jgruber): Define real descriptors for C calling conventions. +class CCallDescriptor : public StaticCallInterfaceDescriptor { + public: + static constexpr CodeEntrypointTag kEntrypointTag = kDefaultCodeEntrypointTag; + DEFINE_PARAMETERS() + DEFINE_PARAMETER_TYPES() + DECLARE_DESCRIPTOR(CCallDescriptor) +}; // TODO(jgruber): Consider filling in the details here; however, this doesn't // make too much sense as long as the descriptor isn't used or verified. -using ContinueToBuiltinDescriptor = VoidDescriptor; +class CEntryDummyDescriptor + : public StaticCallInterfaceDescriptor { + public: + static constexpr CodeEntrypointTag kEntrypointTag = kDefaultCodeEntrypointTag; + DEFINE_PARAMETERS() + DEFINE_PARAMETER_TYPES() + DECLARE_DESCRIPTOR(CEntryDummyDescriptor) +}; // TODO(wasm): Consider filling in details / defining real descriptors for all // builtins still using this placeholder descriptor. -using WasmDummyDescriptor = VoidDescriptor; +class WasmDummyDescriptor + : public StaticCallInterfaceDescriptor { + public: + static constexpr CodeEntrypointTag kEntrypointTag = kWasmEntrypointTag; + DEFINE_PARAMETERS() + DEFINE_PARAMETER_TYPES() + DECLARE_DESCRIPTOR(WasmDummyDescriptor) +}; class AllocateDescriptor : public StaticCallInterfaceDescriptor { @@ -819,6 +851,17 @@ class JSTrampolineDescriptor DECLARE_JS_COMPATIBLE_DESCRIPTOR(JSTrampolineDescriptor) }; +// Descriptor used for code using the RegExp calling convention, in particular +// the RegExp interpreter trampolines. +class RegExpTrampolineDescriptor + : public StaticCallInterfaceDescriptor { + public: + static constexpr CodeEntrypointTag kEntrypointTag = kRegExpEntrypointTag; + DEFINE_PARAMETERS() + DEFINE_PARAMETER_TYPES() + DECLARE_DESCRIPTOR(RegExpTrampolineDescriptor) +}; + class ContextOnlyDescriptor : public StaticCallInterfaceDescriptor { public: @@ -953,14 +996,17 @@ class MaglevOptimizeCodeOrTailCallOptimizedCodeSlotDescriptor : public StaticCallInterfaceDescriptor< MaglevOptimizeCodeOrTailCallOptimizedCodeSlotDescriptor> { public: - DEFINE_PARAMETERS_NO_CONTEXT(kFlags, kFeedbackVector) + DEFINE_PARAMETERS_NO_CONTEXT(kFlags, kFeedbackVector, kTemporary) DEFINE_PARAMETER_TYPES(MachineType::Int32(), // kFlags - MachineType::TaggedPointer()) // kFeedbackVector + MachineType::TaggedPointer(), // kFeedbackVector + MachineType::AnyTagged()) // kTemporary DECLARE_DESCRIPTOR(MaglevOptimizeCodeOrTailCallOptimizedCodeSlotDescriptor) static constexpr inline Register FlagsRegister(); static constexpr inline Register FeedbackVectorRegister(); + static constexpr inline Register TemporaryRegister(); + static constexpr inline auto registers(); }; @@ -1009,6 +1055,8 @@ class StoreBaselineDescriptor class StoreTransitionDescriptor : public StaticCallInterfaceDescriptor { public: + static constexpr CodeEntrypointTag kEntrypointTag = kICHandlerEntrypointTag; + DEFINE_PARAMETERS(kReceiver, kName, kMap, kValue, kSlot, kVector) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver MachineType::AnyTagged(), // kName @@ -1026,6 +1074,8 @@ class StoreTransitionDescriptor class StoreWithVectorDescriptor : public StaticCallInterfaceDescriptor { public: + static constexpr CodeEntrypointTag kEntrypointTag = kICHandlerEntrypointTag; + DEFINE_PARAMETERS(kReceiver, kName, kValue, kSlot, kVector) DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver MachineType::AnyTagged(), // kName @@ -1127,6 +1177,8 @@ class DefineKeyedOwnWithVectorDescriptor class LoadWithVectorDescriptor : public StaticCallInterfaceDescriptor { public: + static constexpr CodeEntrypointTag kEntrypointTag = kICHandlerEntrypointTag; + // TODO(v8:9497): Revert the Machine type for kSlot to the // TaggedSigned once Torque can emit better call descriptors DEFINE_PARAMETERS(kReceiver, kName, kSlot, kVector) diff --git a/deps/v8/src/codegen/loong64/macro-assembler-loong64.cc b/deps/v8/src/codegen/loong64/macro-assembler-loong64.cc index 1a4acb771b4f54..4e234b803cc8c4 100644 --- a/deps/v8/src/codegen/loong64/macro-assembler-loong64.cc +++ b/deps/v8/src/codegen/loong64/macro-assembler-loong64.cc @@ -4555,8 +4555,9 @@ void MacroAssembler::CheckPageFlag(Register object, int mask, Condition cc, UseScratchRegisterScope temps(this); temps.Include(t8); Register scratch = temps.Acquire(); - And(scratch, object, Operand(~kPageAlignmentMask)); - Ld_d(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset)); + And(scratch, object, + Operand(~MemoryChunkHeader::GetAlignmentMaskForAssembler())); + Ld_d(scratch, MemOperand(scratch, MemoryChunkLayout::kFlagsOffset)); And(scratch, scratch, Operand(mask)); Branch(condition_met, cc, scratch, Operand(zero_reg)); } diff --git a/deps/v8/src/codegen/loong64/macro-assembler-loong64.h b/deps/v8/src/codegen/loong64/macro-assembler-loong64.h index 3c9007cede9f45..6779ff5d754c53 100644 --- a/deps/v8/src/codegen/loong64/macro-assembler-loong64.h +++ b/deps/v8/src/codegen/loong64/macro-assembler-loong64.h @@ -518,10 +518,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { // garbage collection, since that might move the code and invalidate the // return address (unless this is somehow accounted for by the called // function). - enum class SetIsolateDataSlots { - kNo, - kYes, - }; void CallCFunction( ExternalReference function, int num_arguments, SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes); diff --git a/deps/v8/src/codegen/macro-assembler.h b/deps/v8/src/codegen/macro-assembler.h index 8c89741aa9d3ca..50c1f374d593f1 100644 --- a/deps/v8/src/codegen/macro-assembler.h +++ b/deps/v8/src/codegen/macro-assembler.h @@ -45,6 +45,11 @@ enum class ComparisonMode { kFullPointer, }; +enum class SetIsolateDataSlots { + kNo, + kYes, +}; + // This is the only place allowed to include the platform-specific headers. #define INCLUDED_FROM_MACRO_ASSEMBLER_H #if V8_TARGET_ARCH_IA32 diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc index f190667f33c5c9..5068ce795e977e 100644 --- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc +++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc @@ -6172,8 +6172,9 @@ void MacroAssembler::CallCFunctionHelper( void MacroAssembler::CheckPageFlag(Register object, Register scratch, int mask, Condition cc, Label* condition_met) { ASM_CODE_COMMENT(this); - And(scratch, object, Operand(~kPageAlignmentMask)); - Ld(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset)); + And(scratch, object, + Operand(~MemoryChunkHeader::GetAlignmentMaskForAssembler())); + Ld(scratch, MemOperand(scratch, MemoryChunkLayout::kFlagsOffset)); And(scratch, scratch, Operand(mask)); Branch(condition_met, cc, scratch, Operand(zero_reg)); } diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h index 6b7466f9de16f9..68e1fae7506eeb 100644 --- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h +++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h @@ -580,10 +580,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { // garbage collection, since that might move the code and invalidate the // return address (unless this is somehow accounted for by the called // function). - enum class SetIsolateDataSlots { - kNo, - kYes, - }; void CallCFunction( ExternalReference function, int num_arguments, SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes); diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc index 57c2688511a46a..53a6b609351e0d 100644 --- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc +++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc @@ -3036,7 +3036,7 @@ void MacroAssembler::CheckPageFlag( DCHECK(cc == ne || cc == eq); DCHECK(scratch != r0); ClearRightImm(scratch, object, Operand(kPageSizeBits)); - LoadU64(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset), r0); + LoadU64(scratch, MemOperand(scratch, MemoryChunkLayout::kFlagsOffset), r0); mov(r0, Operand(mask)); and_(r0, scratch, r0, SetRC); @@ -5534,7 +5534,8 @@ MemOperand MacroAssembler::EntryFromBuiltinAsOperand(Builtin builtin) { } void MacroAssembler::LoadCodeInstructionStart(Register destination, - Register code_object) { + Register code_object, + CodeEntrypointTag tag) { ASM_CODE_COMMENT(this); #ifdef V8_ENABLE_SANDBOX LoadCodeEntrypointViaCodePointer( diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h index 5427969433bf77..f6bc06cca5e13e 100644 --- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h +++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h @@ -661,10 +661,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { // garbage collection, since that might move the code and invalidate the // return address (unless this is somehow accounted for by the called // function). - enum class SetIsolateDataSlots { - kNo, - kYes, - }; void CallCFunction( ExternalReference function, int num_arguments, SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes, @@ -750,7 +746,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { MemOperand EntryFromBuiltinAsOperand(Builtin builtin); // Load the code entry point from the Code object. - void LoadCodeInstructionStart(Register destination, Register code_object); + void LoadCodeInstructionStart( + Register destination, Register code_object, + CodeEntrypointTag tag = kDefaultCodeEntrypointTag); void CallCodeObject(Register code_object); void JumpCodeObject(Register code_object, JumpMode jump_mode = JumpMode::kJump); diff --git a/deps/v8/src/codegen/riscv/macro-assembler-riscv.cc b/deps/v8/src/codegen/riscv/macro-assembler-riscv.cc index 68852a4833025c..a9fc8c5b8ec37a 100644 --- a/deps/v8/src/codegen/riscv/macro-assembler-riscv.cc +++ b/deps/v8/src/codegen/riscv/macro-assembler-riscv.cc @@ -4588,6 +4588,43 @@ void MacroAssembler::Call(Register target, Condition cond, Register rs, } } +void MacroAssembler::CompareTaggedRootAndBranch(const Register& obj, + RootIndex index, Condition cc, + Label* target) { + ASM_CODE_COMMENT(this); + // AssertSmiOrHeapObjectInMainCompressionCage(obj); + UseScratchRegisterScope temps(this); + if (V8_STATIC_ROOTS_BOOL && RootsTable::IsReadOnly(index)) { + CompareTaggedAndBranch(target, cc, obj, Operand(ReadOnlyRootPtr(index))); + return; + } + // Some smi roots contain system pointer size values like stack limits. + DCHECK(base::IsInRange(index, RootIndex::kFirstStrongOrReadOnlyRoot, + RootIndex::kLastStrongOrReadOnlyRoot)); + Register temp = temps.Acquire(); + DCHECK(!AreAliased(obj, temp)); + LoadRoot(temp, index); + CompareTaggedAndBranch(target, cc, obj, Operand(temp)); +} +// Compare the object in a register to a value from the root list. +void MacroAssembler::CompareRootAndBranch(const Register& obj, RootIndex index, + Condition cc, Label* target, + ComparisonMode mode) { + ASM_CODE_COMMENT(this); + if (mode == ComparisonMode::kFullPointer || + !base::IsInRange(index, RootIndex::kFirstStrongOrReadOnlyRoot, + RootIndex::kLastStrongOrReadOnlyRoot)) { + // Some smi roots contain system pointer size values like stack limits. + UseScratchRegisterScope temps(this); + Register temp = temps.Acquire(); + DCHECK(!AreAliased(obj, temp)); + LoadRoot(temp, index); + Branch(target, cc, obj, Operand(temp)); + return; + } + CompareTaggedRootAndBranch(obj, index, cc, target); +} + void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit, unsigned higher_limit, Label* on_in_range) { @@ -6479,8 +6516,13 @@ void MacroAssembler::CallCFunctionHelper( Call(function); if (set_isolate_data_slots == SetIsolateDataSlots::kYes) { - if (isolate() != nullptr) { - // We don't unset the PC; the FP is the source of truth. + // We don't unset the PC; the FP is the source of truth. + if (root_array_available()) { + StoreWord(zero_reg, + MemOperand(kRootRegister, + IsolateData::fast_c_call_caller_fp_offset())); + } else { + DCHECK_NOT_NULL(isolate()); UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); li(scratch, @@ -6504,8 +6546,9 @@ void MacroAssembler::CallCFunctionHelper( void MacroAssembler::CheckPageFlag(Register object, Register scratch, int mask, Condition cc, Label* condition_met) { - And(scratch, object, Operand(~kPageAlignmentMask)); - LoadWord(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset)); + And(scratch, object, + Operand(~MemoryChunkHeader::GetAlignmentMaskForAssembler())); + LoadWord(scratch, MemOperand(scratch, MemoryChunkLayout::kFlagsOffset)); And(scratch, scratch, Operand(mask)); Branch(condition_met, cc, scratch, Operand(zero_reg)); } @@ -6547,9 +6590,27 @@ void MacroAssembler::CallForDeoptimization(Builtin target, int, Label* exit, } void MacroAssembler::LoadCodeInstructionStart(Register destination, - Register code) { + Register code_object, + CodeEntrypointTag tag) { + ASM_CODE_COMMENT(this); + LoadWord(destination, + FieldMemOperand(code_object, Code::kInstructionStartOffset)); +} + +void MacroAssembler::LoadProtectedPointerField(Register destination, + MemOperand field_operand) { + DCHECK(root_array_available()); +#ifdef V8_ENABLE_SANDBOX ASM_CODE_COMMENT(this); - LoadWord(destination, FieldMemOperand(code, Code::kInstructionStartOffset)); + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + Lwu(destination, field_operand); + Ld(scratch, + MemOperand(kRootRegister, IsolateData::trusted_cage_base_offset())); + Or(destination, destination, scratch); +#else + LoadTaggedField(destination, field_operand); +#endif } void MacroAssembler::CallCodeObject(Register code) { diff --git a/deps/v8/src/codegen/riscv/macro-assembler-riscv.h b/deps/v8/src/codegen/riscv/macro-assembler-riscv.h index 7006bdc354590f..74431fd9ac7ede 100644 --- a/deps/v8/src/codegen/riscv/macro-assembler-riscv.h +++ b/deps/v8/src/codegen/riscv/macro-assembler-riscv.h @@ -307,7 +307,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { RelocInfo::Mode rmode = RelocInfo::INTERNAL_REFERENCE_ENCODED); // Load the code entry point from the Code object. - void LoadCodeInstructionStart(Register destination, Register code_object); + void LoadCodeInstructionStart( + Register destination, Register code_object, + CodeEntrypointTag tag = kDefaultCodeEntrypointTag); void CallCodeObject(Register code_object); void JumpCodeObject(Register code_object, JumpMode jump_mode = JumpMode::kJump); @@ -614,10 +616,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { // garbage collection, since that might move the code and invalidate the // return address (unless this is somehow accounted for by the called // function). - enum class SetIsolateDataSlots { - kNo, - kYes, - }; void CallCFunction( ExternalReference function, int num_arguments, SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes); @@ -1292,6 +1290,12 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { Push(scratch); } + // Compare the object in a register to a value from the root list. + void CompareRootAndBranch(const Register& obj, RootIndex index, Condition cc, + Label* target, + ComparisonMode mode = ComparisonMode::kDefault); + void CompareTaggedRootAndBranch(const Register& with, RootIndex index, + Condition cc, Label* target); // Compare the object in a register to a value and jump if they are equal. void JumpIfRoot(Register with, RootIndex index, Label* if_equal, Label::Distance distance = Label::kFar) { @@ -1555,6 +1559,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { void DecodeField(Register reg) { DecodeField(reg, reg); } + // Load a protected pointer field. + void LoadProtectedPointerField(Register destination, + MemOperand field_operand); protected: inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch); diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.cc b/deps/v8/src/codegen/s390/macro-assembler-s390.cc index 41263552734f02..88d18f14099f4e 100644 --- a/deps/v8/src/codegen/s390/macro-assembler-s390.cc +++ b/deps/v8/src/codegen/s390/macro-assembler-s390.cc @@ -2631,10 +2631,10 @@ void MacroAssembler::CheckPageFlag( // Reverse the byte_offset if emulating on little endian platform byte_offset = kSystemPointerSize - byte_offset - 1; #endif - tm(MemOperand(scratch, BasicMemoryChunk::kFlagsOffset + byte_offset), + tm(MemOperand(scratch, MemoryChunkLayout::kFlagsOffset + byte_offset), Operand(shifted_mask)); } else { - LoadU64(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset)); + LoadU64(scratch, MemOperand(scratch, MemoryChunkLayout::kFlagsOffset)); AndP(r0, scratch, Operand(mask)); } // Should be okay to remove rc @@ -5021,7 +5021,8 @@ MemOperand MacroAssembler::EntryFromBuiltinAsOperand(Builtin builtin) { } void MacroAssembler::LoadCodeInstructionStart(Register destination, - Register code_object) { + Register code_object, + CodeEntrypointTag tag) { ASM_CODE_COMMENT(this); LoadU64(destination, FieldMemOperand(code_object, Code::kInstructionStartOffset)); diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.h b/deps/v8/src/codegen/s390/macro-assembler-s390.h index 5e72b24c7cad0e..3c369e6b65b6ef 100644 --- a/deps/v8/src/codegen/s390/macro-assembler-s390.h +++ b/deps/v8/src/codegen/s390/macro-assembler-s390.h @@ -143,7 +143,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { MemOperand EntryFromBuiltinAsOperand(Builtin builtin); // Load the code entry point from the Code object. - void LoadCodeInstructionStart(Register destination, Register code_object); + void LoadCodeInstructionStart( + Register destination, Register code_object, + CodeEntrypointTag tag = kDefaultCodeEntrypointTag); void CallCodeObject(Register code_object); void JumpCodeObject(Register code_object, JumpMode jump_mode = JumpMode::kJump); @@ -926,10 +928,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { // garbage collection, since that might move the code and invalidate the // return address (unless this is somehow accounted for by the called // function). - enum class SetIsolateDataSlots { - kNo, - kYes, - }; void CallCFunction( ExternalReference function, int num_arguments, SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes); diff --git a/deps/v8/src/codegen/source-position.h b/deps/v8/src/codegen/source-position.h index 74bd2cfa8c02ee..46c330fe37370b 100644 --- a/deps/v8/src/codegen/source-position.h +++ b/deps/v8/src/codegen/source-position.h @@ -178,6 +178,8 @@ struct InliningPosition { struct WasmInliningPosition { // Non-canonicalized (module-specific) index of the inlined function. int inlinee_func_index; + // Whether the call was a tail call. + bool was_tail_call; // Source location of the caller. SourcePosition caller_pos; }; diff --git a/deps/v8/src/codegen/x64/interface-descriptors-x64-inl.h b/deps/v8/src/codegen/x64/interface-descriptors-x64-inl.h index bf5570cd4a583f..253d3edad4b927 100644 --- a/deps/v8/src/codegen/x64/interface-descriptors-x64-inl.h +++ b/deps/v8/src/codegen/x64/interface-descriptors-x64-inl.h @@ -164,6 +164,11 @@ constexpr Register MaglevOptimizeCodeOrTailCallOptimizedCodeSlotDescriptor:: FeedbackVectorRegister() { return r9; } +// static +constexpr Register +MaglevOptimizeCodeOrTailCallOptimizedCodeSlotDescriptor::TemporaryRegister() { + return r11; +} // static constexpr Register TypeConversionDescriptor::ArgumentRegister() { return rax; } diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.cc b/deps/v8/src/codegen/x64/macro-assembler-x64.cc index 0c242f167b7dc8..e25855ca7399a1 100644 --- a/deps/v8/src/codegen/x64/macro-assembler-x64.cc +++ b/deps/v8/src/codegen/x64/macro-assembler-x64.cc @@ -616,15 +616,22 @@ void MacroAssembler::ResolveCodePointerHandle(Register destination, } void MacroAssembler::LoadCodeEntrypointViaCodePointer(Register destination, - Operand field_operand) { + Operand field_operand, + CodeEntrypointTag tag) { DCHECK(!AreAliased(destination, kScratchRegister)); DCHECK(!field_operand.AddressUsesRegister(kScratchRegister)); + DCHECK_NE(tag, kInvalidEntrypointTag); LoadAddress(kScratchRegister, ExternalReference::code_pointer_table_address()); movl(destination, field_operand); shrl(destination, Immediate(kCodePointerHandleShift)); shll(destination, Immediate(kCodePointerTableEntrySizeLog2)); movq(destination, Operand(kScratchRegister, destination, times_1, 0)); + if (tag != 0) { + // Can this be improved? + movq(kScratchRegister, Immediate64(tag)); + xorq(destination, kScratchRegister); + } } #endif // V8_ENABLE_SANDBOX @@ -634,9 +641,8 @@ void MacroAssembler::LoadProtectedPointerField(Register destination, #ifdef V8_ENABLE_SANDBOX DCHECK(!AreAliased(destination, kScratchRegister)); movl(destination, field_operand); - movq(kScratchRegister, - Operand(kRootRegister, IsolateData::trusted_cage_base_offset())); - orq(destination, kScratchRegister); + orq(destination, + Operand(kRootRegister, IsolateData::trusted_cage_base_offset())); #else LoadTaggedField(destination, field_operand); #endif @@ -1052,7 +1058,7 @@ void TailCallOptimizedCodeSlot(MacroAssembler* masm, scratch1, scratch2); static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch"); __ Move(rcx, optimized_code_entry); - __ JumpCodeObject(rcx, jump_mode); + __ JumpCodeObject(rcx, kJSEntrypointTag, jump_mode); // Optimized code slot contains deoptimized code or code is cleared and // optimized code marker isn't updated. Evict the code, update the marker @@ -1070,9 +1076,9 @@ void MacroAssembler::AssertFeedbackCell(Register object, Register scratch) { Assert(equal, AbortReason::kExpectedFeedbackCell); } } -void MacroAssembler::AssertFeedbackVector(Register object) { +void MacroAssembler::AssertFeedbackVector(Register object, Register scratch) { if (v8_flags.debug_code) { - IsObjectType(object, FEEDBACK_VECTOR_TYPE, kScratchRegister); + IsObjectType(object, FEEDBACK_VECTOR_TYPE, scratch); Assert(equal, AbortReason::kExpectedFeedbackVector); } } @@ -1107,7 +1113,7 @@ void MacroAssembler::GenerateTailCallToReturnedCode( Pop(kJavaScriptCallTargetRegister); } static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch"); - JumpCodeObject(rcx, jump_mode); + JumpCodeObject(rcx, kJSEntrypointTag, jump_mode); } void MacroAssembler::ReplaceClosureCodeWithOptimizedCode( @@ -2781,23 +2787,27 @@ void MacroAssembler::TailCallBuiltin(Builtin builtin, Condition cc) { } void MacroAssembler::LoadCodeInstructionStart(Register destination, - Register code_object) { + Register code_object, + CodeEntrypointTag tag) { ASM_CODE_COMMENT(this); #ifdef V8_ENABLE_SANDBOX LoadCodeEntrypointViaCodePointer( - destination, FieldOperand(code_object, Code::kSelfIndirectPointerOffset)); + destination, FieldOperand(code_object, Code::kSelfIndirectPointerOffset), + tag); #else movq(destination, FieldOperand(code_object, Code::kInstructionStartOffset)); #endif } -void MacroAssembler::CallCodeObject(Register code_object) { - LoadCodeInstructionStart(code_object, code_object); +void MacroAssembler::CallCodeObject(Register code_object, + CodeEntrypointTag tag) { + LoadCodeInstructionStart(code_object, code_object, tag); call(code_object); } -void MacroAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) { - LoadCodeInstructionStart(code_object, code_object); +void MacroAssembler::JumpCodeObject(Register code_object, CodeEntrypointTag tag, + JumpMode jump_mode) { + LoadCodeInstructionStart(code_object, code_object, tag); switch (jump_mode) { case JumpMode::kJump: jmp(code_object); @@ -2816,11 +2826,12 @@ void MacroAssembler::CallJSFunction(Register function_object) { // from the code pointer table instead of going through the Code object. In // this way, we avoid one memory load on this code path. LoadCodeEntrypointViaCodePointer( - rcx, FieldOperand(function_object, JSFunction::kCodeOffset)); + rcx, FieldOperand(function_object, JSFunction::kCodeOffset), + kJSEntrypointTag); call(rcx); #else LoadTaggedField(rcx, FieldOperand(function_object, JSFunction::kCodeOffset)); - CallCodeObject(rcx); + CallCodeObject(rcx, kJSEntrypointTag); #endif } @@ -2832,12 +2843,13 @@ void MacroAssembler::JumpJSFunction(Register function_object, // from the code pointer table instead of going through the Code object. In // this way, we avoid one memory load on this code path. LoadCodeEntrypointViaCodePointer( - rcx, FieldOperand(function_object, JSFunction::kCodeOffset)); + rcx, FieldOperand(function_object, JSFunction::kCodeOffset), + kJSEntrypointTag); DCHECK_EQ(jump_mode, JumpMode::kJump); jmp(rcx); #else LoadTaggedField(rcx, FieldOperand(function_object, JSFunction::kCodeOffset)); - JumpCodeObject(rcx, jump_mode); + JumpCodeObject(rcx, kJSEntrypointTag, jump_mode); #endif } @@ -3898,17 +3910,19 @@ void MacroAssembler::PrepareCallCFunction(int num_arguments) { kScratchRegister); } -void MacroAssembler::CallCFunction(ExternalReference function, - int num_arguments, - SetIsolateDataSlots set_isolate_data_slots) { +int MacroAssembler::CallCFunction(ExternalReference function, int num_arguments, + SetIsolateDataSlots set_isolate_data_slots, + Label* return_location) { // Note: The "CallCFunction" code comment will be generated by the other // CallCFunction method called below. LoadAddress(rax, function); - CallCFunction(rax, num_arguments, set_isolate_data_slots); + return CallCFunction(rax, num_arguments, set_isolate_data_slots, + return_location); } -void MacroAssembler::CallCFunction(Register function, int num_arguments, - SetIsolateDataSlots set_isolate_data_slots) { +int MacroAssembler::CallCFunction(Register function, int num_arguments, + SetIsolateDataSlots set_isolate_data_slots, + Label* return_location) { ASM_CODE_COMMENT(this); DCHECK_LE(num_arguments, kMaxCParameters); DCHECK(has_frame()); @@ -3920,11 +3934,11 @@ void MacroAssembler::CallCFunction(Register function, int num_arguments, // Save the frame pointer and PC so that the stack layout remains iterable, // even without an ExitFrame which normally exists between JS and C frames. Label get_pc; - DCHECK(!AreAliased(kScratchRegister, function)); - leaq(kScratchRegister, Operand(&get_pc, 0)); - bind(&get_pc); if (set_isolate_data_slots == SetIsolateDataSlots::kYes) { + DCHECK(!AreAliased(kScratchRegister, function)); + leaq(kScratchRegister, Operand(&get_pc, 0)); + // Addressing the following external references is tricky because we need // this to work in three situations: // 1. In wasm compilation, the isolate is nullptr and thus no @@ -3966,6 +3980,9 @@ void MacroAssembler::CallCFunction(Register function, int num_arguments, } call(function); + int call_pc_offset = pc_offset(); + bind(&get_pc); + if (return_location) bind(return_location); if (set_isolate_data_slots == SetIsolateDataSlots::kYes) { // We don't unset the PC; the FP is the source of truth. @@ -3985,6 +4002,20 @@ void MacroAssembler::CallCFunction(Register function, int num_arguments, int argument_slots_on_stack = ArgumentStackSlotsForCFunctionCall(num_arguments); movq(rsp, Operand(rsp, argument_slots_on_stack * kSystemPointerSize)); + + return call_pc_offset; +} + +void MacroAssembler::MemoryChunkHeaderFromObject(Register object, + Register header) { + constexpr intptr_t alignment_mask = + MemoryChunkHeader::GetAlignmentMaskForAssembler(); + if (header == object) { + andq(header, Immediate(~alignment_mask)); + } else { + movq(header, Immediate(~alignment_mask)); + andq(header, object); + } } void MacroAssembler::CheckPageFlag(Register object, Register scratch, int mask, @@ -3992,17 +4023,12 @@ void MacroAssembler::CheckPageFlag(Register object, Register scratch, int mask, Label::Distance condition_met_distance) { ASM_CODE_COMMENT(this); DCHECK(cc == zero || cc == not_zero); - if (scratch == object) { - andq(scratch, Immediate(~kPageAlignmentMask)); - } else { - movq(scratch, Immediate(~kPageAlignmentMask)); - andq(scratch, object); - } + MemoryChunkHeaderFromObject(object, scratch); if (mask < (1 << kBitsPerByte)) { - testb(Operand(scratch, BasicMemoryChunk::kFlagsOffset), + testb(Operand(scratch, MemoryChunkLayout::kFlagsOffset), Immediate(static_cast(mask))); } else { - testl(Operand(scratch, BasicMemoryChunk::kFlagsOffset), Immediate(mask)); + testl(Operand(scratch, MemoryChunkLayout::kFlagsOffset), Immediate(mask)); } j(cc, condition_met, condition_met_distance); } diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.h b/deps/v8/src/codegen/x64/macro-assembler-x64.h index 117b757318aec3..0f6b4b7649f4ba 100644 --- a/deps/v8/src/codegen/x64/macro-assembler-x64.h +++ b/deps/v8/src/codegen/x64/macro-assembler-x64.h @@ -113,21 +113,20 @@ class V8_EXPORT_PRIVATE MacroAssembler // garbage collection, since that might move the code and invalidate the // return address (unless this is somehow accounted for by the called // function). - enum class SetIsolateDataSlots { - kNo, - kYes, - }; - void CallCFunction( + int CallCFunction( ExternalReference function, int num_arguments, - SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes); - void CallCFunction( + SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes, + Label* return_location = nullptr); + int CallCFunction( Register function, int num_arguments, - SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes); + SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes, + Label* return_location = nullptr); // Calculate the number of stack slots to reserve for arguments when calling a // C function. static int ArgumentStackSlotsForCFunctionCall(int num_arguments); + void MemoryChunkHeaderFromObject(Register object, Register header); void CheckPageFlag(Register object, Register scratch, int mask, Condition cc, Label* condition_met, Label::Distance condition_met_distance = Label::kFar); @@ -474,9 +473,10 @@ class V8_EXPORT_PRIVATE MacroAssembler void TailCallBuiltin(Builtin builtin, Condition cc); // Load the code entry point from the Code object. - void LoadCodeInstructionStart(Register destination, Register code_object); - void CallCodeObject(Register code_object); - void JumpCodeObject(Register code_object, + void LoadCodeInstructionStart(Register destination, Register code_object, + CodeEntrypointTag tag); + void CallCodeObject(Register code_object, CodeEntrypointTag tag); + void JumpCodeObject(Register code_object, CodeEntrypointTag tag, JumpMode jump_mode = JumpMode::kJump); // Convenience functions to call/jmp to the code of a JSFunction object. @@ -778,7 +778,8 @@ class V8_EXPORT_PRIVATE MacroAssembler // Only available when the sandbox is enabled as it requires the code pointer // table. void LoadCodeEntrypointViaCodePointer(Register destination, - Operand field_operand); + Operand field_operand, + CodeEntrypointTag tag); #endif // V8_ENABLE_SANDBOX void LoadProtectedPointerField(Register destination, Operand field_operand); @@ -957,7 +958,8 @@ class V8_EXPORT_PRIVATE MacroAssembler // Tiering support. void AssertFeedbackCell(Register object, Register scratch) NOOP_UNLESS_DEBUG_CODE; - void AssertFeedbackVector(Register object) NOOP_UNLESS_DEBUG_CODE; + void AssertFeedbackVector(Register object, + Register scratch) NOOP_UNLESS_DEBUG_CODE; void ReplaceClosureCodeWithOptimizedCode(Register optimized_code, Register closure, Register scratch1, Register slot_address); diff --git a/deps/v8/src/common/globals.h b/deps/v8/src/common/globals.h index a0fad7e31bb647..5c8e134266f48f 100644 --- a/deps/v8/src/common/globals.h +++ b/deps/v8/src/common/globals.h @@ -439,8 +439,8 @@ constexpr size_t kReservedCodeRangePages = 0; #endif // These constants define the total trusted space memory per process. -constexpr size_t kMaximalTrustedRangeSize = 256 * MB; -constexpr size_t kMinimumTrustedRangeSize = 3 * MB; +constexpr size_t kMaximalTrustedRangeSize = 1 * GB; +constexpr size_t kMinimumTrustedRangeSize = 32 * MB; #else // V8_HOST_ARCH_64_BIT @@ -891,9 +891,6 @@ constexpr uint32_t kFreeListZapValue = 0xfeed1eaf; constexpr int kCodeZapValue = 0xbadc0de; constexpr uint32_t kPhantomReferenceZap = 0xca11bac; -// Page constants. -static const intptr_t kPageAlignmentMask = (intptr_t{1} << kPageSizeBits) - 1; - // On Intel architecture, cache line size is 64 bytes. // On ARM it may be less (32 bytes), but as far this constant is // used for aligning data, it doesn't hurt to align on a greater value. @@ -1739,7 +1736,10 @@ enum class VariableMode : uint8_t { // User declared variables: kLet, // declared via 'let' declarations (first lexical) - kConst, // declared via 'const' declarations (last lexical) + kConst, // declared via 'const' declarations + + kUsing, // declared via 'using' declaration for explicit memory management + // (last lexical) kVar, // declared via 'var', and 'function' declarations @@ -1778,7 +1778,7 @@ enum class VariableMode : uint8_t { kPrivateGetterAndSetter, // Does not coexist with any other variable with the // same name in the same scope. - kLastLexicalVariableMode = kConst, + kLastLexicalVariableMode = kUsing, }; // Printing support @@ -1807,6 +1807,8 @@ inline const char* VariableMode2String(VariableMode mode) { return "DYNAMIC_LOCAL"; case VariableMode::kTemporary: return "TEMPORARY"; + case VariableMode::kUsing: + return "USING"; } UNREACHABLE(); } @@ -2337,16 +2339,35 @@ enum IsolateAddressId { V(TrapStringOffsetOutOfBounds) enum class KeyedAccessLoadMode { - kInBounds, - kHandleOOB, + kInBounds = 0b00, + kHandleOOB = 0b01, + kHandleHoles = 0b10, + kHandleOOBAndHoles = 0b11, }; -inline bool LoadModeIsInBounds(KeyedAccessLoadMode load_mode) { - return load_mode == KeyedAccessLoadMode::kInBounds; +inline KeyedAccessLoadMode CreateKeyedAccessLoadMode(bool handle_oob, + bool handle_holes) { + return static_cast( + static_cast(handle_oob) | (static_cast(handle_holes) << 1)); +} + +inline KeyedAccessLoadMode GeneralizeKeyedAccessLoadMode( + KeyedAccessLoadMode mode1, KeyedAccessLoadMode mode2) { + using T = std::underlying_type::type; + return static_cast(static_cast(mode1) | + static_cast(mode2)); } inline bool LoadModeHandlesOOB(KeyedAccessLoadMode load_mode) { - return load_mode == KeyedAccessLoadMode::kHandleOOB; + using T = std::underlying_type::type; + return (static_cast(load_mode) & + static_cast(KeyedAccessLoadMode::kHandleOOB)) != 0; +} + +inline bool LoadModeHandlesHoles(KeyedAccessLoadMode load_mode) { + using T = std::underlying_type::type; + return (static_cast(load_mode) & + static_cast(KeyedAccessLoadMode::kHandleHoles)) != 0; } enum class KeyedAccessStoreMode { diff --git a/deps/v8/src/common/message-template.h b/deps/v8/src/common/message-template.h index 2ac6a3103c922a..d754bf9f042f3b 100644 --- a/deps/v8/src/common/message-template.h +++ b/deps/v8/src/common/message-template.h @@ -115,6 +115,9 @@ namespace internal { T(IllegalInvocation, "Illegal invocation") \ T(ImmutablePrototypeSet, \ "Immutable prototype object '%' cannot have their prototype set") \ + T(ImportAssertDeprecated, \ + "'assert' is deprecated in import statements and support will be removed " \ + "in %; use 'with' instead") \ T(ImportAssertionDuplicateKey, "Import assertion has duplicate key '%'") \ T(ImportCallNotNewExpression, "Cannot use new with import") \ T(ImportOutsideModule, "Cannot use import statement outside a module") \ @@ -508,8 +511,10 @@ namespace internal { T(InvalidPrivateMethodWrite, "Private method '%' is not writable") \ T(InvalidPrivateGetterAccess, "'%' was defined without a getter") \ T(InvalidPrivateSetterAccess, "'%' was defined without a setter") \ + T(InvalidSizeValue, "'%' is an invalid size") \ T(InvalidUnusedPrivateStaticMethodAccessedByDebugger, \ "Unused static private method '%' cannot be accessed at debug time") \ + T(InvalidUsingInForInLoop, "Invalid 'using' in for-in loop") \ T(JsonParseUnexpectedEOS, "Unexpected end of JSON input") \ T(JsonParseUnexpectedTokenNumber, \ "Unexpected number in JSON at position % (line % column %)") \ diff --git a/deps/v8/src/common/ptr-compr-inl.h b/deps/v8/src/common/ptr-compr-inl.h index efe78dcd282aa1..72f0a9dd041ba5 100644 --- a/deps/v8/src/common/ptr-compr-inl.h +++ b/deps/v8/src/common/ptr-compr-inl.h @@ -80,13 +80,13 @@ Tagged_t V8HeapCompressionSchemeImpl::CompressObject(Address tagged) { #ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE V8_ASSUME((tagged & kPtrComprCageBaseMask) == base() || HAS_SMI_TAG(tagged)); #endif - return static_cast(static_cast(tagged)); + return static_cast(tagged); } // static template Tagged_t V8HeapCompressionSchemeImpl::CompressAny(Address tagged) { - return static_cast(static_cast(tagged)); + return static_cast(tagged); } // static @@ -197,12 +197,12 @@ Tagged_t ExternalCodeCompressionScheme::CompressObject(Address tagged) { #ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE V8_ASSUME((tagged & kPtrComprCageBaseMask) == base() || HAS_SMI_TAG(tagged)); #endif - return static_cast(static_cast(tagged)); + return static_cast(tagged); } // static Tagged_t ExternalCodeCompressionScheme::CompressAny(Address tagged) { - return static_cast(static_cast(tagged)); + return static_cast(tagged); } // static diff --git a/deps/v8/src/common/ptr-compr.h b/deps/v8/src/common/ptr-compr.h index 487185a9b73f2b..a4ebe0991b3095 100644 --- a/deps/v8/src/common/ptr-compr.h +++ b/deps/v8/src/common/ptr-compr.h @@ -85,6 +85,21 @@ using TrustedSpaceCompressionScheme = V8HeapCompressionSchemeImpl; using TrustedSpaceCompressionScheme = V8HeapCompressionScheme; #endif // V8_ENABLE_SANDBOX +// A compression scheme which can be passed if the only objects we ever expect +// to see are Smis (e.g. for {TaggedField}). +class SmiCompressionScheme : public AllStatic { + public: + static Address DecompressTaggedSigned(Tagged_t raw_value) { + // For runtime code the upper 32-bits of the Smi value do not matter. + return static_cast
(raw_value); + } + + static Tagged_t CompressObject(Address tagged) { + V8_ASSUME(HAS_SMI_TAG(tagged)); + return static_cast(tagged); + } +}; + #ifdef V8_EXTERNAL_CODE_SPACE // Compression scheme used for fields containing InstructionStream objects // (namely for the Code::code field). Same as diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc index 09c47c7ebd1292..5f4ccc0b651acc 100644 --- a/deps/v8/src/compiler/access-builder.cc +++ b/deps/v8/src/compiler/access-builder.cc @@ -1464,6 +1464,18 @@ FieldAccess AccessBuilder::ForWasmArrayLength() { compiler::kNoWriteBarrier, "WasmArrayLength"}; } + +// static +FieldAccess AccessBuilder::ForWasmDispatchTableLength() { + return {compiler::kTaggedBase, + WasmDispatchTable::kLengthOffset, + MaybeHandle{}, + compiler::OptionalMapRef{}, + compiler::Type::OtherInternal(), + MachineType::Uint32(), + compiler::kNoWriteBarrier, + "WasmDispatchTableLength"}; +} #endif // V8_ENABLE_WEBASSEMBLY } // namespace compiler diff --git a/deps/v8/src/compiler/access-builder.h b/deps/v8/src/compiler/access-builder.h index e22af0b3d96965..ced4dd466c8ad0 100644 --- a/deps/v8/src/compiler/access-builder.h +++ b/deps/v8/src/compiler/access-builder.h @@ -376,6 +376,7 @@ class V8_EXPORT_PRIVATE AccessBuilder final #if V8_ENABLE_WEBASSEMBLY static FieldAccess ForWasmArrayLength(); + static FieldAccess ForWasmDispatchTableLength(); #endif private: diff --git a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc index d32a1c28ff3df5..0ebea44f6794a4 100644 --- a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc +++ b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc @@ -793,27 +793,31 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; case kArchCallCFunction: { int const num_parameters = MiscField::decode(instr->opcode()); + SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes; + Label return_location; #if V8_ENABLE_WEBASSEMBLY if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) { - // Put the current address in a stack slot, and record a safepoint on - // the same address. In most architectures, we record the address after - // the function call, but this works too as long as the address in the - // frame and safepoint table match. - __ str(pc, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset)); - // In Arm, the pc points two instructions after the currently executing - // instruction: see https://bit.ly/3CD80OA. To line up the safepoint - // address with the stored pc, we add a nop here. - __ nop(); - RecordSafepoint(instr->reference_map()); + // Put the return address in a stack slot. + Register pc_scratch = r5; + __ Push(pc_scratch); + __ GetLabelAddress(pc_scratch, &return_location); + __ str(pc_scratch, + MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset)); + __ Pop(pc_scratch); + set_isolate_data_slots = SetIsolateDataSlots::kNo; } #endif // V8_ENABLE_WEBASSEMBLY + int pc_offset; if (instr->InputAt(0)->IsImmediate()) { ExternalReference ref = i.InputExternalReference(0); - __ CallCFunction(ref, num_parameters); + pc_offset = __ CallCFunction(ref, num_parameters, + set_isolate_data_slots, &return_location); } else { Register func = i.InputRegister(0); - __ CallCFunction(func, num_parameters); + pc_offset = __ CallCFunction(func, num_parameters, + set_isolate_data_slots, &return_location); } + RecordSafepoint(instr->reference_map(), pc_offset); frame_access_state()->SetFrameAccessToDefault(); // Ideally, we should decrement SP delta to match the change of stack // pointer in CallCFunction. However, for certain architectures (e.g. @@ -3852,6 +3856,17 @@ void CodeGenerator::AssembleConstructFrame() { const int returns = frame()->GetReturnSlotCount(); // Create space for returns. __ AllocateStackSpace(returns * kSystemPointerSize); + + if (!frame()->tagged_slots().IsEmpty()) { + UseScratchRegisterScope temps(masm()); + Register zero = temps.Acquire(); + __ mov(zero, Operand(0)); + for (int spill_slot : frame()->tagged_slots()) { + FrameOffset offset = frame_access_state()->GetFrameOffset(spill_slot); + DCHECK(offset.from_frame_pointer()); + __ str(zero, MemOperand(fp, offset.offset())); + } + } } void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) { diff --git a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc index 8546553c8a82de..1bd87c7e9736f9 100644 --- a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc +++ b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc @@ -736,7 +736,8 @@ void VisitPairAtomicBinOp(InstructionSelectorT* selector, template void InstructionSelectorT::VisitStackSlot(node_t node) { StackSlotRepresentation rep = this->stack_slot_representation_of(node); - int slot = frame_->AllocateSpillSlot(rep.size(), rep.alignment()); + int slot = + frame_->AllocateSpillSlot(rep.size(), rep.alignment(), rep.is_tagged()); OperandGenerator g(this); Emit(kArchStackSlot, g.DefineAsRegister(node), @@ -2023,7 +2024,7 @@ void InstructionSelectorT::VisitWord32ReverseBytes(node_t node) { } template -void InstructionSelectorT::VisitSimd128ReverseBytes(Node* node) { +void InstructionSelectorT::VisitSimd128ReverseBytes(node_t node) { UNREACHABLE(); } diff --git a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc index 9bafa0156bb05b..541597ef969a8c 100644 --- a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc +++ b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc @@ -743,10 +743,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ Call(i.InputCode(0), RelocInfo::CODE_TARGET); } else { Register reg = i.InputRegister(0); + CodeEntrypointTag tag = + i.InputCodeEntrypointTag(instr->CodeEnrypointTagInputIndex()); DCHECK_IMPLIES( instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister), reg == kJavaScriptCallCodeStartRegister); - __ CallCodeObject(reg); + __ CallCodeObject(reg, tag); } RecordCallPosition(instr); frame_access_state()->ClearSPDelta(); @@ -801,10 +803,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET); } else { Register reg = i.InputRegister(0); + CodeEntrypointTag tag = + i.InputCodeEntrypointTag(instr->CodeEnrypointTagInputIndex()); DCHECK_IMPLIES( instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister), reg == kJavaScriptCallCodeStartRegister); - __ JumpCodeObject(reg); + __ JumpCodeObject(reg, tag); } unwinding_info_writer_.MarkBlockWillExit(); frame_access_state()->ClearSPDelta(); @@ -884,26 +888,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( int const num_gp_parameters = ParamField::decode(instr->opcode()); int const num_fp_parameters = FPParamField::decode(instr->opcode()); Label return_location; + SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes; #if V8_ENABLE_WEBASSEMBLY if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) { // Put the return address in a stack slot. __ StoreReturnAddressInWasmExitFrame(&return_location); + set_isolate_data_slots = SetIsolateDataSlots::kNo; } #endif // V8_ENABLE_WEBASSEMBLY - + int pc_offset; if (instr->InputAt(0)->IsImmediate()) { ExternalReference ref = i.InputExternalReference(0); - __ CallCFunction(ref, num_gp_parameters, num_fp_parameters); + pc_offset = __ CallCFunction(ref, num_gp_parameters, num_fp_parameters, + set_isolate_data_slots, &return_location); } else { Register func = i.InputRegister(0); - __ CallCFunction(func, num_gp_parameters, num_fp_parameters); - } - __ Bind(&return_location); -#if V8_ENABLE_WEBASSEMBLY - if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) { - RecordSafepoint(instr->reference_map()); + pc_offset = __ CallCFunction(func, num_gp_parameters, num_fp_parameters, + set_isolate_data_slots, &return_location); } -#endif // V8_ENABLE_WEBASSEMBLY + RecordSafepoint(instr->reference_map(), pc_offset); frame_access_state()->SetFrameAccessToDefault(); // Ideally, we should decrement SP delta to match the change of stack // pointer in CallCFunction. However, for certain architectures (e.g. @@ -3399,6 +3402,12 @@ void CodeGenerator::AssembleConstructFrame() { if (returns != 0) { __ Claim(returns); } + + for (int spill_slot : frame()->tagged_slots()) { + FrameOffset offset = frame_access_state()->GetFrameOffset(spill_slot); + DCHECK(offset.from_frame_pointer()); + __ Str(xzr, MemOperand(fp, offset.offset())); + } } void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) { @@ -3740,10 +3749,9 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, case MoveType::kRegisterToRegister: if (source->IsRegister()) { __ Mov(g.ToRegister(destination), g.ToRegister(source)); - } else if (source->IsFloatRegister() || source->IsDoubleRegister()) { - __ Mov(g.ToDoubleRegister(destination), g.ToDoubleRegister(source)); } else { - DCHECK(source->IsSimd128Register()); + DCHECK(source->IsSimd128Register() || source->IsFloatRegister() || + source->IsDoubleRegister()); __ Mov(g.ToDoubleRegister(destination).Q(), g.ToDoubleRegister(source).Q()); } diff --git a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc index 44e5f828c064e8..7dce0586e9d774 100644 --- a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc +++ b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc @@ -1145,7 +1145,8 @@ void InstructionSelectorT::VisitTraceInstruction(node_t node) {} template void InstructionSelectorT::VisitStackSlot(node_t node) { StackSlotRepresentation rep = this->stack_slot_representation_of(node); - int slot = frame_->AllocateSpillSlot(rep.size(), rep.alignment()); + int slot = + frame_->AllocateSpillSlot(rep.size(), rep.alignment(), rep.is_tagged()); OperandGenerator g(this); Emit(kArchStackSlot, g.DefineAsRegister(node), @@ -1876,7 +1877,7 @@ void InstructionSelectorT::VisitProtectedStore(node_t node) { } template -void InstructionSelectorT::VisitSimd128ReverseBytes(Node* node) { +void InstructionSelectorT::VisitSimd128ReverseBytes(node_t node) { UNREACHABLE(); } @@ -4418,7 +4419,7 @@ void VisitWord32Compare(InstructionSelectorT* selector, FlagsContinuationT* cont) { using namespace turboshaft; // NOLINT(build/namespaces) const Operation& compare = selector->Get(node); - DCHECK(compare.Is()); + DCHECK_GE(compare.input_count, 2); OpIndex lhs = compare.input(0); OpIndex rhs = compare.input(1); FlagsCondition cond = cont->condition(); diff --git a/deps/v8/src/compiler/backend/code-generator-impl.h b/deps/v8/src/compiler/backend/code-generator-impl.h index d3251e0199b39d..fd6220aaa239c2 100644 --- a/deps/v8/src/compiler/backend/code-generator-impl.h +++ b/deps/v8/src/compiler/backend/code-generator-impl.h @@ -86,6 +86,13 @@ class InstructionOperandConverter { return static_cast(InputInt32(index) & 0x3F); } + CodeEntrypointTag InputCodeEntrypointTag(size_t index) { + // Tags are stored shifted to the right so they fit into 32-bits. + uint64_t shifted_tag = InputUint32(index); + return static_cast(shifted_tag + << kCodeEntrypointTagShift); + } + ExternalReference InputExternalReference(size_t index) { return ToExternalReference(instr_->InputAt(index)); } diff --git a/deps/v8/src/compiler/backend/code-generator.cc b/deps/v8/src/compiler/backend/code-generator.cc index 1dc8017ddd7ebb..4c7129e7a88182 100644 --- a/deps/v8/src/compiler/backend/code-generator.cc +++ b/deps/v8/src/compiler/backend/code-generator.cc @@ -482,9 +482,6 @@ MaybeHandle CodeGenerator::FinalizeCode() { Handle source_positions = source_position_table_builder_.ToSourcePositionTable(isolate()); - // Allocate deoptimization data. - Handle deopt_data = GenerateDeoptimizationData(); - // Allocate and install the code. CodeDesc desc; masm()->GetCode(isolate()->main_thread_local_isolate(), &desc, safepoints(), @@ -500,17 +497,21 @@ MaybeHandle CodeGenerator::FinalizeCode() { unwinding_info_writer_.eh_frame_writer()->GetEhFrame(&desc); } - MaybeHandle maybe_code = - Factory::CodeBuilder(isolate(), desc, info()->code_kind()) - .set_builtin(info()->builtin()) - .set_inlined_bytecode_size(info()->inlined_bytecode_size()) - .set_source_position_table(source_positions) - .set_deoptimization_data(deopt_data) - .set_is_turbofanned() - .set_stack_slots(frame()->GetTotalFrameSlotCount()) - .set_profiler_data(info()->profiler_data()) - .set_osr_offset(info()->osr_offset()) - .TryBuild(); + Factory::CodeBuilder builder(isolate(), desc, info()->code_kind()); + builder.set_builtin(info()->builtin()) + .set_inlined_bytecode_size(info()->inlined_bytecode_size()) + .set_source_position_table(source_positions) + .set_is_turbofanned() + .set_stack_slots(frame()->GetTotalFrameSlotCount()) + .set_profiler_data(info()->profiler_data()) + .set_osr_offset(info()->osr_offset()); + + if (info()->code_kind() == CodeKind::TURBOFAN) { + // Deoptimization data is only used in this case. + builder.set_deoptimization_data(GenerateDeoptimizationData()); + } + + MaybeHandle maybe_code = builder.TryBuild(); Handle code; if (!maybe_code.ToHandle(&code)) { @@ -534,6 +535,11 @@ bool CodeGenerator::IsNextInAssemblyOrder(RpoNumber block) const { void CodeGenerator::RecordSafepoint(ReferenceMap* references, int pc_offset) { auto safepoint = safepoints()->DefineSafepoint(masm(), pc_offset); + + for (int tagged : frame()->tagged_slots()) { + safepoint.DefineTaggedStackSlot(tagged); + } + int frame_header_offset = frame()->GetFixedSlotCount(); for (const InstructionOperand& operand : references->reference_operands()) { if (operand.IsStackSlot()) { @@ -914,7 +920,7 @@ Handle CodeGenerator::GenerateDeoptimizationData() { return DeoptimizationData::Empty(isolate()); } Handle data = - DeoptimizationData::New(isolate(), deopt_count, AllocationType::kOld); + DeoptimizationData::New(isolate(), deopt_count); Handle translation_array = translations_.ToFrameTranslation( @@ -1297,9 +1303,13 @@ void CodeGenerator::AddTranslationForOperand(Instruction* instr, literal = DeoptimizationLiteral(static_cast(constant.ToInt64())); } else if (type.representation() == MachineRepresentation::kWord64) { - CHECK_EQ( - constant.ToInt64(), - static_cast(static_cast(constant.ToInt64()))); + // TODO(nicohartmann@, chromium:41497374): Disabling this CHECK + // because we can see cases where this is violated in unreachable + // code. We should re-enable once we have an idea on how to prevent + // this from happening. + // CHECK_EQ( + // constant.ToInt64(), + // static_cast(static_cast(constant.ToInt64()))); literal = DeoptimizationLiteral(static_cast(constant.ToInt64())); } else { diff --git a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc index 9caf14fa461340..2ddfb2dfff3790 100644 --- a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc +++ b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc @@ -818,6 +818,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArchCallCFunction: { int const num_parameters = MiscField::decode(instr->opcode()); Label return_location; + SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes; #if V8_ENABLE_WEBASSEMBLY if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) { // Put the return address in a stack slot. @@ -827,21 +828,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ mov(MemOperand(ebp, WasmExitFrameConstants::kCallingPCOffset), scratch); __ pop(scratch); + set_isolate_data_slots = SetIsolateDataSlots::kNo; } #endif // V8_ENABLE_WEBASSEMBLY + int pc_offset; if (HasImmediateInput(instr, 0)) { ExternalReference ref = i.InputExternalReference(0); - __ CallCFunction(ref, num_parameters); + pc_offset = __ CallCFunction(ref, num_parameters, + set_isolate_data_slots, &return_location); } else { Register func = i.InputRegister(0); - __ CallCFunction(func, num_parameters); + pc_offset = __ CallCFunction(func, num_parameters, + set_isolate_data_slots, &return_location); } - __ bind(&return_location); -#if V8_ENABLE_WEBASSEMBLY - if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) { - RecordSafepoint(instr->reference_map()); - } -#endif // V8_ENABLE_WEBASSEMBLY + RecordSafepoint(instr->reference_map(), pc_offset); frame_access_state()->SetFrameAccessToDefault(); // Ideally, we should decrement SP delta to match the change of stack // pointer in CallCFunction. However, for certain architectures (e.g. @@ -4114,6 +4114,12 @@ void CodeGenerator::AssembleConstructFrame() { if (frame()->GetReturnSlotCount() > 0) { __ AllocateStackSpace(frame()->GetReturnSlotCount() * kSystemPointerSize); } + + for (int spill_slot : frame()->tagged_slots()) { + FrameOffset offset = frame_access_state()->GetFrameOffset(spill_slot); + DCHECK(offset.from_frame_pointer()); + __ mov(Operand(ebp, offset.offset()), Immediate(0)); + } } void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) { diff --git a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc index 337deaf3de679e..116943fbfd9e5d 100644 --- a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc +++ b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc @@ -500,8 +500,6 @@ class IA32OperandGeneratorT final : public OperandGeneratorT { register_mode); } - DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(AddressingMode, - GetEffectiveAddressMemoryOperand) AddressingMode GetEffectiveAddressMemoryOperand( node_t node, InstructionOperand inputs[], size_t* input_count, RegisterMode register_mode = RegisterMode::kRegister) { @@ -548,13 +546,11 @@ class IA32OperandGeneratorT final : public OperandGeneratorT { // modes for the scale. UNIMPLEMENTED(); } else { - const turboshaft::Operation& op = this->turboshaft_graph()->Get(node); - DCHECK_GE(op.input_count, 2); - - inputs[(*input_count)++] = - UseRegisterWithMode(op.input(0), register_mode); - inputs[(*input_count)++] = - UseRegisterWithMode(op.input(1), register_mode); + // TODO(nicohartmann@): Turn this into a `DCHECK` once we have some + // coverage. + CHECK_EQ(m->displacement, 0); + inputs[(*input_count)++] = UseRegisterWithMode(m->base, register_mode); + inputs[(*input_count)++] = UseRegisterWithMode(m->index, register_mode); return kMode_MR1; } } else { @@ -609,7 +605,6 @@ class IA32OperandGeneratorT final : public OperandGeneratorT { } } - DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(bool, CanBeBetterLeftOperand) bool CanBeBetterLeftOperand(node_t node) const { return !selector()->IsLive(node); } @@ -882,7 +877,8 @@ void VisitI8x16Shift(InstructionSelectorT* selector, template void InstructionSelectorT::VisitStackSlot(node_t node) { StackSlotRepresentation rep = this->stack_slot_representation_of(node); - int slot = frame_->AllocateSpillSlot(rep.size(), rep.alignment()); + int slot = + frame_->AllocateSpillSlot(rep.size(), rep.alignment(), rep.is_tagged()); OperandGenerator g(this); Emit(kArchStackSlot, g.DefineAsRegister(node), @@ -1943,7 +1939,7 @@ void InstructionSelectorT::VisitWord32ReverseBytes(node_t node) { } template -void InstructionSelectorT::VisitSimd128ReverseBytes(Node* node) { +void InstructionSelectorT::VisitSimd128ReverseBytes(node_t node) { UNREACHABLE(); } diff --git a/deps/v8/src/compiler/backend/instruction-selector-adapter.h b/deps/v8/src/compiler/backend/instruction-selector-adapter.h index 3f4023c02e57e0..1ac063b41ec2bd 100644 --- a/deps/v8/src/compiler/backend/instruction-selector-adapter.h +++ b/deps/v8/src/compiler/backend/instruction-selector-adapter.h @@ -18,22 +18,6 @@ #include "src/compiler/turboshaft/operations.h" #include "src/compiler/turboshaft/use-map.h" -// TODO(nicohartmann@): -// During the transition period to a generic instruction selector, some -// instantiations with TurboshaftAdapter will still call functions with -// Node* arguments. Use `DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK` to define -// a temporary fallback for these functions such that compilation is possible -// while transitioning the instruction selector incrementally. Once all uses -// of Node*, BasicBlock*, ... have been replaced, remove those fallbacks. -#define DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(ret, name) \ - template \ - std::enable_if_t::value, \ - ret> \ - name(Args...) { \ - UNREACHABLE(); \ - } namespace v8::internal::compiler { namespace detail { @@ -293,6 +277,10 @@ struct TurbofanAdapter { UNREACHABLE(); } } + bool is_atomic() const { + return node_->opcode() == IrOpcode::kWord32AtomicStore || + node_->opcode() == IrOpcode::kWord64AtomicStore; + } node_t base() const { return node_->InputAt(0); } optional_node_t index() const { return node_->InputAt(1); } @@ -667,24 +655,40 @@ struct TurboshaftAdapter : public turboshaft::OperationMatcher { op_ = &graph->Get(node_).Cast(); } - bool is_int32() const { return op_->kind == Kind::kWord32; } + bool is_int32() const { + return op_->kind == Kind::kWord32 || (op_->kind == Kind::kSmi && !Is64()); + } bool is_relocatable_int32() const { // We don't have this in turboshaft currently. return false; } int32_t int32_value() const { DCHECK(is_int32() || is_relocatable_int32()); - return op_->word32(); + if (op_->kind == Kind::kWord32) { + return op_->word32(); + } else { + DCHECK_EQ(op_->kind, Kind::kSmi); + DCHECK(!Is64()); + return static_cast(op_->smi().ptr()); + } + } + bool is_int64() const { + return op_->kind == Kind::kWord64 || (op_->kind == Kind::kSmi && Is64()); } - bool is_int64() const { return op_->kind == Kind::kWord64; } bool is_relocatable_int64() const { return op_->kind == Kind::kRelocatableWasmCall || op_->kind == Kind::kRelocatableWasmStubCall; } int64_t int64_value() const { - if (is_int64()) return op_->word64(); - DCHECK(is_relocatable_int64()); - return static_cast(op_->integral()); + if (op_->kind == Kind::kWord64) { + return op_->word64(); + } else if (op_->kind == Kind::kSmi) { + DCHECK(Is64()); + return static_cast(op_->smi().ptr()); + } else { + DCHECK(is_relocatable_int64()); + return static_cast(op_->integral()); + } } bool is_heap_object() const { return op_->kind == Kind::kHeapObject; } bool is_compressed_heap_object() const { @@ -743,7 +747,7 @@ struct TurboshaftAdapter : public turboshaft::OperationMatcher { UNREACHABLE(); } node_t frame_state() const { - if (call_op_) return call_op_->frame_state(); + if (call_op_) return call_op_->frame_state().value(); UNREACHABLE(); } base::Vector arguments() const { @@ -930,6 +934,7 @@ struct TurboshaftAdapter : public turboshaft::OperationMatcher { return op_->kind.with_trap_handler ? MemoryAccessKind::kProtected : MemoryAccessKind::kNormal; } + bool is_atomic() const { return op_->kind.is_atomic; } node_t base() const { return op_->base(); } optional_node_t index() const { return op_->index(); } @@ -1300,7 +1305,8 @@ struct TurboshaftAdapter : public turboshaft::OperationMatcher { DCHECK(is_stack_slot(node)); const turboshaft::StackSlotOp& stack_slot = graph_->Get(node).Cast(); - return StackSlotRepresentation(stack_slot.size, stack_slot.alignment); + return StackSlotRepresentation(stack_slot.size, stack_slot.alignment, + stack_slot.is_tagged); } bool is_integer_constant(node_t node) const { if (const auto constant = diff --git a/deps/v8/src/compiler/backend/instruction-selector-impl.h b/deps/v8/src/compiler/backend/instruction-selector-impl.h index 4c73bb8deb530a..f2c52ad6597743 100644 --- a/deps/v8/src/compiler/backend/instruction-selector-impl.h +++ b/deps/v8/src/compiler/backend/instruction-selector-impl.h @@ -108,24 +108,20 @@ class OperandGeneratorT : public Adapter { return InstructionOperand(); // Generates an invalid operand. } - DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(InstructionOperand, DefineAsRegister) InstructionOperand DefineAsRegister(node_t node) { return Define(node, UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER, GetVReg(node))); } - DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(InstructionOperand, DefineSameAsInput) InstructionOperand DefineSameAsInput(node_t node, int input_index) { return Define(node, UnallocatedOperand(GetVReg(node), input_index)); } - DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(InstructionOperand, DefineSameAsFirst) InstructionOperand DefineSameAsFirst(node_t node) { return DefineSameAsInput(node, 0); } - DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(InstructionOperand, DefineAsFixed) InstructionOperand DefineAsFixed(node_t node, Register reg) { return Define(node, UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER, reg.code(), GetVReg(node))); @@ -138,7 +134,6 @@ class OperandGeneratorT : public Adapter { reg.code(), GetVReg(node))); } - DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(InstructionOperand, DefineAsConstant) InstructionOperand DefineAsConstant(node_t node) { selector()->MarkAsDefined(node); int virtual_register = GetVReg(node); @@ -146,13 +141,10 @@ class OperandGeneratorT : public Adapter { return ConstantOperand(virtual_register); } - DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(InstructionOperand, DefineAsLocation) InstructionOperand DefineAsLocation(node_t node, LinkageLocation location) { return Define(node, ToUnallocatedOperand(location, GetVReg(node))); } - DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(InstructionOperand, - DefineAsDualLocation) InstructionOperand DefineAsDualLocation(node_t node, LinkageLocation primary_location, LinkageLocation secondary_location) { @@ -161,58 +153,48 @@ class OperandGeneratorT : public Adapter { primary_location, secondary_location, GetVReg(node))); } - DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(InstructionOperand, Use) InstructionOperand Use(node_t node) { return Use(node, UnallocatedOperand(UnallocatedOperand::NONE, UnallocatedOperand::USED_AT_START, GetVReg(node))); } - DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(InstructionOperand, UseAnyAtEnd) InstructionOperand UseAnyAtEnd(node_t node) { return Use(node, UnallocatedOperand(UnallocatedOperand::REGISTER_OR_SLOT, UnallocatedOperand::USED_AT_END, GetVReg(node))); } - DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(InstructionOperand, UseAny) InstructionOperand UseAny(node_t node) { return Use(node, UnallocatedOperand(UnallocatedOperand::REGISTER_OR_SLOT, UnallocatedOperand::USED_AT_START, GetVReg(node))); } - DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(InstructionOperand, - UseRegisterOrSlotOrConstant) InstructionOperand UseRegisterOrSlotOrConstant(node_t node) { return Use(node, UnallocatedOperand( UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT, UnallocatedOperand::USED_AT_START, GetVReg(node))); } - DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(InstructionOperand, - UseUniqueRegisterOrSlotOrConstant) InstructionOperand UseUniqueRegisterOrSlotOrConstant(node_t node) { return Use(node, UnallocatedOperand( UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT, GetVReg(node))); } - DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(InstructionOperand, UseRegister) InstructionOperand UseRegister(node_t node) { return Use(node, UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER, UnallocatedOperand::USED_AT_START, GetVReg(node))); } - DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(InstructionOperand, UseRegisterAtEnd) InstructionOperand UseRegisterAtEnd(node_t node) { return Use(node, UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER, UnallocatedOperand::USED_AT_END, GetVReg(node))); } - DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(InstructionOperand, UseUniqueSlot) InstructionOperand UseUniqueSlot(node_t node) { return Use(node, UnallocatedOperand(UnallocatedOperand::MUST_HAVE_SLOT, GetVReg(node))); @@ -220,7 +202,6 @@ class OperandGeneratorT : public Adapter { // Use register or operand for the node. If a register is chosen, it won't // alias any temporary or output registers. - DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(InstructionOperand, UseUnique) InstructionOperand UseUnique(node_t node) { return Use(node, UnallocatedOperand(UnallocatedOperand::NONE, GetVReg(node))); @@ -228,7 +209,6 @@ class OperandGeneratorT : public Adapter { // Use a unique register for the node that does not alias any temporary or // output registers. - DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(InstructionOperand, UseUniqueRegister) InstructionOperand UseUniqueRegister(node_t node) { return Use(node, UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER, GetVReg(node))); @@ -244,7 +224,6 @@ class OperandGeneratorT : public Adapter { } } - DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(InstructionOperand, UseFixed) InstructionOperand UseFixed(node_t node, Register reg) { return Use(node, UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER, reg.code(), GetVReg(node))); @@ -264,18 +243,14 @@ class OperandGeneratorT : public Adapter { return sequence()->AddImmediate(Constant(immediate)); } - DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(InstructionOperand, UseImmediate) InstructionOperand UseImmediate(node_t node) { return sequence()->AddImmediate(ToConstant(node)); } - DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(InstructionOperand, - UseNegatedImmediate) InstructionOperand UseNegatedImmediate(node_t node) { return sequence()->AddImmediate(ToNegatedConstant(node)); } - DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(InstructionOperand, UseLocation) InstructionOperand UseLocation(node_t node, LinkageLocation location) { return Use(node, ToUnallocatedOperand(location, GetVReg(node))); } @@ -319,8 +294,6 @@ class OperandGeneratorT : public Adapter { kUniqueRegister, }; - DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(InstructionOperand, - UseRegisterWithMode) InstructionOperand UseRegisterWithMode(node_t node, RegisterMode register_mode) { return register_mode == kRegister ? UseRegister(node) @@ -382,7 +355,6 @@ class OperandGeneratorT : public Adapter { return ToUnallocatedOperand(location, sequence()->NextVirtualRegister()); } - DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(InstructionOperand, Label) InstructionOperand Label(block_t block) { return sequence()->AddImmediate(Constant(this->rpo_number(block))); } @@ -407,6 +379,12 @@ class OperandGeneratorT : public Adapter { return Constant(static_cast(constant->word32())); case Kind::kWord64: return Constant(static_cast(constant->word64())); + case Kind::kSmi: + if constexpr (Is64()) { + return Constant(static_cast(constant->smi().ptr())); + } else { + return Constant(static_cast(constant->smi().ptr())); + } case Kind::kHeapObject: case Kind::kCompressedHeapObject: return Constant(constant->handle(), diff --git a/deps/v8/src/compiler/backend/instruction-selector.cc b/deps/v8/src/compiler/backend/instruction-selector.cc index 964ade622ef5d3..c84e90685ea534 100644 --- a/deps/v8/src/compiler/backend/instruction-selector.cc +++ b/deps/v8/src/compiler/backend/instruction-selector.cc @@ -597,6 +597,7 @@ InstructionOperand OperandForDeopt(Isolate* isolate, switch (constant->kind) { case Kind::kWord32: case Kind::kWord64: + case Kind::kSmi: case Kind::kFloat32: case Kind::kFloat64: return g->UseImmediate(input); @@ -2040,6 +2041,27 @@ void InstructionSelectorT::VisitBitcastWordToTagged( g.Use(this->Get(node).Cast().input())); } +template <> +void InstructionSelectorT::VisitBitcastSmiToWord( + node_t node) { + // TODO(dmercadier): using EmitIdentity here is not ideal, because users of + // {node} will then use its input, which may not have the Word32 + // representation. This might in turn lead to the register allocator wrongly + // tracking Tagged values that are in fact just Smis. However, using + // Emit(kArchNop) hurts performance because it inserts a gap move which cannot + // always be eliminated because the operands may have different sizes (and the + // move is then truncating or extending). As a temporary work-around until the + // register allocator is fixed, we use Emit(kArchNop) in DEBUG mode to silence + // the register allocator verifier. +#ifdef DEBUG + OperandGenerator g(this); + Emit(kArchNop, g.DefineSameAsFirst(node), + g.Use(this->Get(node).Cast().input())); +#else + EmitIdentity(node); +#endif +} + // 32 bit targets do not implement the following instructions. #if V8_TARGET_ARCH_32_BIT @@ -2196,8 +2218,8 @@ IF_WASM(VISIT_UNSUPPORTED_OP, I64x2ReplaceLane) #endif // !V8_TARGET_ARCH_ARM64 #endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X && !V8_TARGET_ARCH_PPC64 -template -void InstructionSelectorT::VisitFinishRegion(Node* node) { +template <> +void InstructionSelectorT::VisitFinishRegion(Node* node) { EmitIdentity(node); } @@ -2392,6 +2414,11 @@ void InstructionSelectorT::VisitCall(node_t node, block_t handler) { EmitPrepareArguments(&buffer.pushed_nodes, call_descriptor, node); UpdateMaxPushedArgumentCount(buffer.pushed_nodes.size()); + if (call_descriptor->RequiresEntrypointTagForCall()) { + buffer.instruction_args.push_back( + g.TempImmediate(call_descriptor->shifted_tag())); + } + // Pass label of exception handler block. if (handler) { if constexpr (Adapter::IsTurbofan) { @@ -2503,6 +2530,10 @@ void InstructionSelectorT::VisitTailCall(node_t node) { Emit(kArchPrepareTailCall, g.NoOutput()); + if (callee->RequiresEntrypointTagForCall()) { + buffer.instruction_args.push_back(g.TempImmediate(callee->shifted_tag())); + } + // Add an immediate operand that represents the offset to the first slot // that is unused with respect to the stack pointer that has been updated // for the tail call instruction. Backends that pad arguments can write the @@ -2794,18 +2825,29 @@ void InstructionSelectorT::VisitUnreachable(node_t node) { } template -void InstructionSelectorT::VisitStaticAssert(Node* node) { - Node* asserted = node->InputAt(0); +void InstructionSelectorT::VisitStaticAssert(node_t node) { + DCHECK_EQ(this->value_input_count(node), 1); + node_t asserted = this->input_at(node, 0); UnparkedScopeIfNeeded scope(broker_); AllowHandleDereference allow_handle_dereference; - asserted->Print(4); - FATAL( - "Expected Turbofan static assert to hold, but got non-true input:\n %s", - StaticAssertSourceOf(node->op())); + if constexpr (Adapter::IsTurboshaft) { + StdoutStream os; + os << this->Get(asserted); + FATAL( + "Expected Turbofan static assert to hold, but got non-true input:\n " + "%s", + this->Get(node).template Cast().source); + } else { + asserted->Print(4); + FATAL( + "Expected Turbofan static assert to hold, but got non-true input:\n " + "%s", + StaticAssertSourceOf(node->op())); + } } -template -void InstructionSelectorT::VisitDeadValue(Node* node) { +template <> +void InstructionSelectorT::VisitDeadValue(Node* node) { OperandGenerator g(this); MarkAsRepresentation(DeadValueRepresentationOf(node->op()), node); Emit(kArchDebugBreak, g.DefineAsConstant(node)); @@ -2911,6 +2953,8 @@ void InstructionSelectorT::VisitControl(block_t block) { } case Opcode::kUnreachable: return VisitUnreachable(node); + case Opcode::kStaticAssert: + return VisitStaticAssert(node); default: { const std::string op_string = op.ToString(); PrintF("\033[31mNo ISEL support for: %s\033[m\n", op_string.c_str()); @@ -4544,6 +4588,7 @@ void InstructionSelectorT::VisitNode( switch (constant.kind) { case ConstantOp::Kind::kWord32: case ConstantOp::Kind::kWord64: + case ConstantOp::Kind::kSmi: case ConstantOp::Kind::kTaggedIndex: case ConstantOp::Kind::kExternal: break; @@ -5030,7 +5075,7 @@ void InstructionSelectorT::VisitNode( if constexpr (Is64()) { DCHECK_EQ(cast.kind, TaggedBitcastOp::Kind::kSmi); DCHECK(SmiValuesAre31Bits()); - return EmitIdentity(node); + return VisitBitcastSmiToWord(node); } else { return VisitBitcastTaggedToWord(node); } @@ -5048,7 +5093,11 @@ void InstructionSelectorT::VisitNode( } case multi(Rep::Compressed(), Rep::Word32()): MarkAsWord32(node); - return VisitBitcastTaggedToWord(node); + if (cast.kind == TaggedBitcastOp::Kind::kSmi) { + return VisitBitcastSmiToWord(node); + } else { + return VisitBitcastTaggedToWord(node); + } default: UNIMPLEMENTED(); } diff --git a/deps/v8/src/compiler/backend/instruction-selector.h b/deps/v8/src/compiler/backend/instruction-selector.h index 87578949a497be..aa630bbaf2bc14 100644 --- a/deps/v8/src/compiler/backend/instruction-selector.h +++ b/deps/v8/src/compiler/backend/instruction-selector.h @@ -479,7 +479,6 @@ class InstructionSelectorT final : public Adapter { size_t input_count, InstructionOperand* inputs, size_t temp_count, InstructionOperand* temps, FlagsContinuation* cont); - DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(void, EmitIdentity) void EmitIdentity(node_t node); // =========================================================================== @@ -507,7 +506,6 @@ class InstructionSelectorT final : public Adapter { // For pure nodes, CanCover(a,b) is checked to avoid duplicated execution: // If this is not the case, code for b must still be generated for other // users, and fusing is unlikely to improve performance. - DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(bool, CanCover) bool CanCover(node_t user, node_t node) const; // Used in pattern matching during code generation. @@ -537,27 +535,22 @@ class InstructionSelectorT final : public Adapter { // Checks if {node} was already defined, and therefore code was already // generated for it. - DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(bool, IsDefined) bool IsDefined(node_t node) const; // Checks if {node} has any uses, and therefore code has to be generated for // it. - DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(bool, IsUsed) bool IsUsed(node_t node) const; // Checks if {node} is currently live. - DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(bool, IsLive) bool IsLive(node_t node) const { return !IsDefined(node) && IsUsed(node); } // Gets the effect level of {node}. - DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(int, GetEffectLevel) int GetEffectLevel(node_t node) const; // Gets the effect level of {node}, appropriately adjusted based on // continuation flags if the node is a branch. int GetEffectLevel(node_t node, FlagsContinuation* cont) const; - DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(int, GetVirtualRegister) int GetVirtualRegister(node_t node); const std::map GetVirtualRegistersForTesting() const; @@ -597,61 +590,48 @@ class InstructionSelectorT final : public Adapter { void TryRename(InstructionOperand* op); int GetRename(int virtual_register); - DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(void, SetRename) void SetRename(node_t node, node_t rename); void UpdateRenames(Instruction* instruction); void UpdateRenamesInPhi(PhiInstruction* phi); // Inform the instruction selection that {node} was just defined. - DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(void, MarkAsDefined) void MarkAsDefined(node_t node); // Inform the instruction selection that {node} has at least one use and we // will need to generate code for it. - DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(void, MarkAsUsed) void MarkAsUsed(node_t node); // Sets the effect level of {node}. - DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(void, SetEffectLevel) void SetEffectLevel(node_t node, int effect_level); // Inform the register allocation of the representation of the value produced // by {node}. - DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(void, MarkAsRepresentation) void MarkAsRepresentation(MachineRepresentation rep, node_t node); void MarkAsRepresentation(turboshaft::RegisterRepresentation rep, node_t node) { MarkAsRepresentation(rep.machine_representation(), node); } - DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(void, MarkAsWord32) void MarkAsWord32(node_t node) { MarkAsRepresentation(MachineRepresentation::kWord32, node); } - DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(void, MarkAsWord64) void MarkAsWord64(node_t node) { MarkAsRepresentation(MachineRepresentation::kWord64, node); } - DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(void, MarkAsFloat32) void MarkAsFloat32(node_t node) { MarkAsRepresentation(MachineRepresentation::kFloat32, node); } - DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(void, MarkAsFloat64) void MarkAsFloat64(node_t node) { MarkAsRepresentation(MachineRepresentation::kFloat64, node); } - DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(void, MarkAsSimd128) void MarkAsSimd128(node_t node) { MarkAsRepresentation(MachineRepresentation::kSimd128, node); } - DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(void, MarkAsSimd256) void MarkAsSimd256(node_t node) { MarkAsRepresentation(MachineRepresentation::kSimd256, node); } - DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(void, MarkAsTagged) void MarkAsTagged(node_t node) { MarkAsRepresentation(MachineRepresentation::kTagged, node); } - DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(void, MarkAsCompressed) void MarkAsCompressed(node_t node) { MarkAsRepresentation(MachineRepresentation::kCompressed, node); } @@ -788,6 +768,7 @@ class InstructionSelectorT final : public Adapter { DECLARE_GENERATOR_T(ProtectedStore) DECLARE_GENERATOR_T(BitcastTaggedToWord) DECLARE_GENERATOR_T(BitcastWordToTagged) + DECLARE_GENERATOR_T(BitcastSmiToWord) DECLARE_GENERATOR_T(ChangeInt32ToInt64) DECLARE_GENERATOR_T(ChangeInt32ToFloat64) DECLARE_GENERATOR_T(ChangeFloat32ToFloat64) @@ -937,19 +918,15 @@ class InstructionSelectorT final : public Adapter { DECLARE_GENERATOR_T(Word32AtomicPairXor) DECLARE_GENERATOR_T(Word32AtomicPairExchange) DECLARE_GENERATOR_T(Word32AtomicPairCompareExchange) + DECLARE_GENERATOR_T(Simd128ReverseBytes) MACHINE_SIMD128_OP_LIST(DECLARE_GENERATOR_T) MACHINE_SIMD256_OP_LIST(DECLARE_GENERATOR_T) IF_WASM(DECLARE_GENERATOR_T, LoadStackPointer) IF_WASM(DECLARE_GENERATOR_T, SetStackPointer) #undef DECLARE_GENERATOR_T -#define DECLARE_GENERATOR(x) void Visit##x(Node* node); - DECLARE_GENERATOR(Simd128ReverseBytes) -#undef DECLARE_GENERATOR - // Visit the load node with a value and opcode to replace with. void VisitLoad(node_t node, node_t value, InstructionCode opcode); - DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(void, VisitLoad) void VisitLoadTransform(Node* node, Node* value, InstructionCode opcode); void VisitFinishRegion(Node* node); void VisitParameter(node_t node); @@ -975,7 +952,7 @@ class InstructionSelectorT final : public Adapter { void VisitThrow(Node* node); void VisitRetain(node_t node); void VisitUnreachable(node_t node); - void VisitStaticAssert(Node* node); + void VisitStaticAssert(node_t node); void VisitDeadValue(Node* node); void VisitBitcastWord32PairToFloat64(node_t node); @@ -1071,15 +1048,11 @@ class InstructionSelectorT final : public Adapter { void MarkPairProjectionsAsWord32(node_t node); bool IsSourcePositionUsed(node_t node); - DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(void, - VisitWord32AtomicBinaryOperation) void VisitWord32AtomicBinaryOperation(node_t node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op, ArchOpcode uint16_op, ArchOpcode word32_op); - DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(void, - VisitWord64AtomicBinaryOperation) void VisitWord64AtomicBinaryOperation(node_t node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode uint32_op, @@ -1088,7 +1061,6 @@ class InstructionSelectorT final : public Adapter { ArchOpcode uint16_op, ArchOpcode uint32_op); #if V8_TARGET_ARCH_64_BIT - DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(bool, ZeroExtendsWord32ToWord64) bool ZeroExtendsWord32ToWord64(node_t node, int recursion_depth = 0); bool ZeroExtendsWord32ToWord64NoPhis(node_t node); diff --git a/deps/v8/src/compiler/backend/instruction.cc b/deps/v8/src/compiler/backend/instruction.cc index ca3e86672f278e..1a884812e2837c 100644 --- a/deps/v8/src/compiler/backend/instruction.cc +++ b/deps/v8/src/compiler/backend/instruction.cc @@ -20,6 +20,7 @@ #include "src/compiler/node.h" #include "src/compiler/schedule.h" #include "src/compiler/turboshaft/graph.h" +#include "src/compiler/turboshaft/loop-finder.h" #include "src/compiler/turboshaft/operations.h" #include "src/deoptimizer/deoptimizer.h" #include "src/execution/frames.h" @@ -723,11 +724,9 @@ static InstructionBlock* InstructionBlockFor(Zone* zone, return instr_block; } -static InstructionBlock* InstructionBlockFor(Zone* zone, - const turboshaft::Graph& graph, - const turboshaft::Block* block) { - // TODO(nicohartmann@): Properly get the loop_header. - turboshaft::Block* loop_header = nullptr; // block->loop_header() +static InstructionBlock* InstructionBlockFor( + Zone* zone, const turboshaft::Graph& graph, const turboshaft::Block* block, + const turboshaft::Block* loop_header) { bool is_handler = block->FirstOperation(graph).Is(); bool deferred = block->get_custom_data( @@ -830,10 +829,16 @@ InstructionBlocks* InstructionSequence::InstructionBlocksFor( new (blocks) InstructionBlocks(static_cast(graph.block_count()), nullptr, zone); size_t rpo_number = 0; + // TODO(dmercadier): currently, the LoopFinder is just used to compute loop + // headers. Since it's somewhat expensive to compute this, we should also use + // the LoopFinder to compute the special RPO (we would only need to run the + // LoopFinder once to compute both the special RPO and the loop headers). + turboshaft::LoopFinder loop_finder(zone, &graph); for (const turboshaft::Block& block : graph.blocks()) { DCHECK(!(*blocks)[rpo_number]); DCHECK_EQ(RpoNumber::FromInt(block.index().id()).ToSize(), rpo_number); - (*blocks)[rpo_number] = InstructionBlockFor(zone, graph, &block); + (*blocks)[rpo_number] = InstructionBlockFor( + zone, graph, &block, loop_finder.GetLoopHeader(&block)); ++rpo_number; } return blocks; diff --git a/deps/v8/src/compiler/backend/instruction.h b/deps/v8/src/compiler/backend/instruction.h index b48d3e04a78728..9916ff4a23b787 100644 --- a/deps/v8/src/compiler/backend/instruction.h +++ b/deps/v8/src/compiler/backend/instruction.h @@ -879,7 +879,7 @@ std::ostream& operator<<(std::ostream&, const ParallelMove&); class ReferenceMap final : public ZoneObject { public: explicit ReferenceMap(Zone* zone) - : reference_operands_(8, zone), instruction_position_(-1) {} + : reference_operands_(zone), instruction_position_(-1) {} const ZoneVector& reference_operands() const { return reference_operands_; @@ -1050,6 +1050,22 @@ class V8_EXPORT_PRIVATE Instruction final { return MiscField::decode(opcode()) & flag; } + // For call instructions, computes the index of the CodeEntrypointTag input. + size_t CodeEnrypointTagInputIndex() const { + // Keep in sync with instruction-selector.cc where the inputs are assembled. + switch (arch_opcode()) { + case kArchCallCodeObject: + return InputCount() - + (HasCallDescriptorFlag(CallDescriptor::kHasExceptionHandler) + ? 2 + : 1); + case kArchTailCallCodeObject: + return InputCount() - 3; + default: + UNREACHABLE(); + } + } + enum GapPosition { START, END, diff --git a/deps/v8/src/compiler/backend/loong64/code-generator-loong64.cc b/deps/v8/src/compiler/backend/loong64/code-generator-loong64.cc index 4427984e233d16..f916b1661875ba 100644 --- a/deps/v8/src/compiler/backend/loong64/code-generator-loong64.cc +++ b/deps/v8/src/compiler/backend/loong64/code-generator-loong64.cc @@ -775,12 +775,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArchCallCFunction: { int const num_gp_parameters = ParamField::decode(instr->opcode()); int const num_fp_parameters = FPParamField::decode(instr->opcode()); + SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes; #if V8_ENABLE_WEBASSEMBLY Label start_call; bool isWasmCapiFunction = linkage()->GetIncomingDescriptor()->IsWasmCapiFunction(); // from start_call to return address. - int offset = __ root_array_available() ? 36 : 80; // 9 or 20 instrs + int offset = 0; + // TODO(loongarch): Use a more robust way to calculate offset of pc. + // See CallCFunction. + if (isWasmCapiFunction) { + offset = 16; // SetIsolateDataSlots::kNo + } else if (__ root_array_available()) { + offset = 36; // SetIsolateDataSlots::kYes and root_array_available + } else { + offset = 80; // SetIsolateDataSlots::kYes but not root_array_available + } #endif // V8_ENABLE_WEBASSEMBLY #if V8_HOST_ARCH_LOONG64 if (v8_flags.debug_code) { @@ -792,14 +802,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ bind(&start_call); __ pcaddi(t7, offset >> kInstrSizeLog2); __ St_d(t7, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset)); + set_isolate_data_slots = SetIsolateDataSlots::kNo; } #endif // V8_ENABLE_WEBASSEMBLY if (instr->InputAt(0)->IsImmediate()) { ExternalReference ref = i.InputExternalReference(0); - __ CallCFunction(ref, num_gp_parameters, num_fp_parameters); + __ CallCFunction(ref, num_gp_parameters, num_fp_parameters, + set_isolate_data_slots); } else { Register func = i.InputRegister(0); - __ CallCFunction(func, num_gp_parameters, num_fp_parameters); + __ CallCFunction(func, num_gp_parameters, num_fp_parameters, + set_isolate_data_slots); } #if V8_ENABLE_WEBASSEMBLY if (isWasmCapiFunction) { diff --git a/deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc b/deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc index 7c2f8d84185422..6e836989abf732 100644 --- a/deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc +++ b/deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc @@ -1446,7 +1446,7 @@ void InstructionSelectorT::VisitWord64ReverseBytes(node_t node) { } template -void InstructionSelectorT::VisitSimd128ReverseBytes(Node* node) { +void InstructionSelectorT::VisitSimd128ReverseBytes(node_t node) { UNREACHABLE(); } diff --git a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc index 7331b33b73239d..47c8f5a3d05876 100644 --- a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc +++ b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc @@ -699,12 +699,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArchCallCFunction: { int const num_gp_parameters = ParamField::decode(instr->opcode()); int const num_fp_parameters = FPParamField::decode(instr->opcode()); + SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes; #if V8_ENABLE_WEBASSEMBLY Label start_call; bool isWasmCapiFunction = linkage()->GetIncomingDescriptor()->IsWasmCapiFunction(); // from start_call to return address. - int offset = __ root_array_available() ? 64 : 112; + int offset = 0; + // TODO(mips): Use a more robust way to calculate offset of pc. + // See CallCFunction. + if (isWasmCapiFunction) { + offset = 32; // SetIsolateDataSlots::kNo + } else if (__ root_array_available()) { + offset = 64; // SetIsolateDataSlots::kYes and root_array_available + } else { + offset = 112; // SetIsolateDataSlots::kYes but not root_array_available + } #endif // V8_ENABLE_WEBASSEMBLY #if V8_HOST_ARCH_MIPS64 if (v8_flags.debug_code) { @@ -721,14 +731,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ Daddu(ra, ra, offset - 8); // 8 = nop + nal __ sd(ra, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset)); __ mov(ra, kScratchReg); + set_isolate_data_slots = SetIsolateDataSlots::kNo; } #endif // V8_ENABLE_WEBASSEMBLY if (instr->InputAt(0)->IsImmediate()) { ExternalReference ref = i.InputExternalReference(0); - __ CallCFunction(ref, num_gp_parameters, num_fp_parameters); + __ CallCFunction(ref, num_gp_parameters, num_fp_parameters, + set_isolate_data_slots); } else { Register func = i.InputRegister(0); - __ CallCFunction(func, num_gp_parameters, num_fp_parameters); + __ CallCFunction(func, num_gp_parameters, num_fp_parameters, + set_isolate_data_slots); } #if V8_ENABLE_WEBASSEMBLY if (isWasmCapiFunction) { diff --git a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc index 91dab50dd2382f..d466adb2c41774 100644 --- a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc +++ b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc @@ -1340,7 +1340,7 @@ void InstructionSelectorT::VisitWord32ReverseBytes(node_t node) { } template -void InstructionSelectorT::VisitSimd128ReverseBytes(Node* node) { +void InstructionSelectorT::VisitSimd128ReverseBytes(node_t node) { UNREACHABLE(); } diff --git a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc index bcde804f58c88f..caf64ed255b511 100644 --- a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc +++ b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc @@ -981,6 +981,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( int const fp_param_field = FPParamField::decode(instr->opcode()); int num_fp_parameters = fp_param_field; bool has_function_descriptor = false; + SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes; #if ABI_USES_FUNCTION_DESCRIPTORS // AIX/PPC64BE Linux uses a function descriptor int kNumFPParametersMask = kHasFunctionDescriptorBitMask - 1; @@ -1006,18 +1007,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ StoreU64(kScratchReg, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset)); __ mtlr(r0); + set_isolate_data_slots = SetIsolateDataSlots::kNo; } #endif // V8_ENABLE_WEBASSEMBLY if (instr->InputAt(0)->IsImmediate()) { ExternalReference ref = i.InputExternalReference(0); __ CallCFunction(ref, num_gp_parameters, num_fp_parameters, - MacroAssembler::SetIsolateDataSlots::kYes, - has_function_descriptor); + set_isolate_data_slots, has_function_descriptor); } else { Register func = i.InputRegister(0); __ CallCFunction(func, num_gp_parameters, num_fp_parameters, - MacroAssembler::SetIsolateDataSlots::kYes, - has_function_descriptor); + set_isolate_data_slots, has_function_descriptor); } #if V8_ENABLE_WEBASSEMBLY if (isWasmCapiFunction) { diff --git a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc index 97a08400c24546..8f68e6004ed4ce 100644 --- a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc +++ b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc @@ -6,6 +6,7 @@ #include "src/compiler/backend/instruction-selector-impl.h" #include "src/compiler/node-matchers.h" #include "src/compiler/node-properties.h" +#include "src/compiler/turboshaft/opmasks.h" #include "src/execution/ppc/frame-constants-ppc.h" #include "src/roots/roots-inl.h" @@ -34,15 +35,17 @@ class PPCOperandGeneratorT final : public OperandGeneratorT { InstructionSelectorT* selector) : super(selector) {} - InstructionOperand UseOperand(Node* node, ImmediateMode mode) { + InstructionOperand UseOperand(node_t node, ImmediateMode mode) { if (CanBeImmediate(node, mode)) { return UseImmediate(node); } return UseRegister(node); } - bool CanBeImmediate(Node* node, ImmediateMode mode) { - if (node->opcode() == IrOpcode::kCompressedHeapConstant) { + bool CanBeImmediate(node_t node, ImmediateMode mode) { + if (!this->is_constant(node)) return false; + auto constant = this->constant_view(node); + if (constant.is_compressed_heap_object()) { if (!COMPRESS_POINTERS_BOOL) return false; // For builtin code we need static roots if (selector()->isolate()->bootstrapper() && !V8_STATIC_ROOTS_BOOL) { @@ -50,9 +53,8 @@ class PPCOperandGeneratorT final : public OperandGeneratorT { } const RootsTable& roots_table = selector()->isolate()->roots_table(); RootIndex root_index; - CompressedHeapObjectMatcher m(node); - if (m.HasResolvedValue() && - roots_table.IsRootHandle(m.ResolvedValue(), &root_index)) { + Handle value = constant.heap_object_value(); + if (roots_table.IsRootHandle(value, &root_index)) { if (!RootsTable::IsReadOnly(root_index)) return false; return CanBeImmediate(MacroAssemblerBase::ReadOnlyRootPtr( root_index, selector()->isolate()), @@ -61,13 +63,8 @@ class PPCOperandGeneratorT final : public OperandGeneratorT { return false; } - int64_t value; - if (node->opcode() == IrOpcode::kInt32Constant) - value = OpParameter(node->op()); - else if (node->opcode() == IrOpcode::kInt64Constant) - value = OpParameter(node->op()); - else - return false; + if (!selector()->is_integer_constant(node)) return false; + int64_t value = selector()->integer_constant(node); return CanBeImmediate(value, mode); } @@ -98,42 +95,44 @@ namespace { template void VisitRR(InstructionSelectorT* selector, InstructionCode opcode, - Node* node) { + typename Adapter::node_t node) { PPCOperandGeneratorT g(selector); selector->Emit(opcode, g.DefineAsRegister(node), - g.UseRegister(node->InputAt(0))); + g.UseRegister(selector->input_at(node, 0))); } template void VisitRRR(InstructionSelectorT* selector, InstructionCode opcode, - Node* node) { + typename Adapter::node_t node) { PPCOperandGeneratorT g(selector); selector->Emit(opcode, g.DefineAsRegister(node), - g.UseRegister(node->InputAt(0)), - g.UseRegister(node->InputAt(1))); + g.UseRegister(selector->input_at(node, 0)), + g.UseRegister(selector->input_at(node, 1))); } template void VisitRRO(InstructionSelectorT* selector, InstructionCode opcode, - Node* node, ImmediateMode operand_mode) { + typename Adapter::node_t node, ImmediateMode operand_mode) { PPCOperandGeneratorT g(selector); selector->Emit(opcode, g.DefineAsRegister(node), - g.UseRegister(node->InputAt(0)), - g.UseOperand(node->InputAt(1), operand_mode)); + g.UseRegister(selector->input_at(node, 0)), + g.UseOperand(selector->input_at(node, 1), operand_mode)); } #if V8_TARGET_ARCH_PPC64 template void VisitTryTruncateDouble(InstructionSelectorT* selector, - InstructionCode opcode, Node* node) { + InstructionCode opcode, + typename Adapter::node_t node) { + using node_t = typename Adapter::node_t; PPCOperandGeneratorT g(selector); - InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))}; + InstructionOperand inputs[] = {g.UseRegister(selector->input_at(node, 0))}; InstructionOperand outputs[2]; size_t output_count = 0; outputs[output_count++] = g.DefineAsRegister(node); - Node* success_output = NodeProperties::FindProjection(node, 1); - if (success_output) { + node_t success_output = selector->FindProjection(node, 1); + if (selector->valid(success_output)) { outputs[output_count++] = g.DefineAsRegister(success_output); } @@ -142,19 +141,19 @@ void VisitTryTruncateDouble(InstructionSelectorT* selector, #endif // Shared routine for multiple binary operations. -template -void VisitBinop(InstructionSelectorT* selector, Node* node, - InstructionCode opcode, ImmediateMode operand_mode, - FlagsContinuationT* cont) { +template +void VisitBinop(InstructionSelectorT* selector, + typename Adapter::node_t node, InstructionCode opcode, + ImmediateMode operand_mode, FlagsContinuationT* cont) { PPCOperandGeneratorT g(selector); - Matcher m(node); InstructionOperand inputs[4]; size_t input_count = 0; InstructionOperand outputs[2]; size_t output_count = 0; - inputs[input_count++] = g.UseRegister(m.left().node()); - inputs[input_count++] = g.UseOperand(m.right().node(), operand_mode); + inputs[input_count++] = g.UseRegister(selector->input_at(node, 0)); + inputs[input_count++] = + g.UseOperand(selector->input_at(node, 1), operand_mode); if (cont->IsDeoptimize()) { // If we can deoptimize as a result of the binop, we need to make sure that @@ -175,32 +174,30 @@ void VisitBinop(InstructionSelectorT* selector, Node* node, } // Shared routine for multiple binary operations. -template -void VisitBinop(InstructionSelectorT* selector, Node* node, - InstructionCode opcode, ImmediateMode operand_mode) { +template +void VisitBinop(InstructionSelectorT* selector, + typename Adapter::node_t node, InstructionCode opcode, + ImmediateMode operand_mode) { FlagsContinuationT cont; - VisitBinop(selector, node, opcode, operand_mode, &cont); + VisitBinop(selector, node, opcode, operand_mode, &cont); } } // namespace template void InstructionSelectorT::VisitStackSlot(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { - StackSlotRepresentation rep = StackSlotRepresentationOf(node->op()); - int slot = frame_->AllocateSpillSlot(rep.size(), rep.alignment()); - OperandGenerator g(this); + StackSlotRepresentation rep = this->stack_slot_representation_of(node); + int slot = frame_->AllocateSpillSlot(rep.size(), rep.alignment()); + OperandGenerator g(this); - Emit(kArchStackSlot, g.DefineAsRegister(node), - sequence()->AddImmediate(Constant(slot)), 0, nullptr); - } + Emit(kArchStackSlot, g.DefineAsRegister(node), + sequence()->AddImmediate(Constant(slot)), 0, nullptr); } template void InstructionSelectorT::VisitAbortCSADcheck(node_t node) { if constexpr (Adapter::IsTurboshaft) { + // This is currently not used by Turboshaft. UNIMPLEMENTED(); } else { PPCOperandGeneratorT g(this); @@ -209,47 +206,48 @@ void InstructionSelectorT::VisitAbortCSADcheck(node_t node) { } template -static void VisitLoadCommon(InstructionSelectorT* selector, Node* node, +static void VisitLoadCommon(InstructionSelectorT* selector, + typename Adapter::node_t node, LoadRepresentation load_rep) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); + using node_t = typename Adapter::node_t; + PPCOperandGeneratorT g(selector); + auto load_view = selector->load_view(node); + node_t base = load_view.base(); + node_t offset = load_view.index(); + + InstructionCode opcode = kArchNop; + ImmediateMode mode; + if (CpuFeatures::IsSupported(PPC_10_PLUS)) { + mode = kInt34Imm; } else { - PPCOperandGeneratorT g(selector); - Node* base = node->InputAt(0); - Node* offset = node->InputAt(1); - InstructionCode opcode = kArchNop; - ImmediateMode mode; - if (CpuFeatures::IsSupported(PPC_10_PLUS)) { - mode = kInt34Imm; - } else { - mode = kInt16Imm; - } - switch (load_rep.representation()) { - case MachineRepresentation::kFloat32: - opcode = kPPC_LoadFloat32; - break; - case MachineRepresentation::kFloat64: - opcode = kPPC_LoadDouble; - break; - case MachineRepresentation::kBit: // Fall through. - case MachineRepresentation::kWord8: - opcode = load_rep.IsSigned() ? kPPC_LoadWordS8 : kPPC_LoadWordU8; - break; - case MachineRepresentation::kWord16: - opcode = load_rep.IsSigned() ? kPPC_LoadWordS16 : kPPC_LoadWordU16; - break; - case MachineRepresentation::kWord32: - opcode = kPPC_LoadWordU32; - break; - case MachineRepresentation::kCompressedPointer: // Fall through. - case MachineRepresentation::kCompressed: + mode = kInt16Imm; + } + switch (load_rep.representation()) { + case MachineRepresentation::kFloat32: + opcode = kPPC_LoadFloat32; + break; + case MachineRepresentation::kFloat64: + opcode = kPPC_LoadDouble; + break; + case MachineRepresentation::kBit: // Fall through. + case MachineRepresentation::kWord8: + opcode = load_rep.IsSigned() ? kPPC_LoadWordS8 : kPPC_LoadWordU8; + break; + case MachineRepresentation::kWord16: + opcode = load_rep.IsSigned() ? kPPC_LoadWordS16 : kPPC_LoadWordU16; + break; + case MachineRepresentation::kWord32: + opcode = kPPC_LoadWordU32; + break; + case MachineRepresentation::kCompressedPointer: // Fall through. + case MachineRepresentation::kCompressed: #ifdef V8_COMPRESS_POINTERS - opcode = kPPC_LoadWordS32; - if (mode != kInt34Imm) mode = kInt16Imm_4ByteAligned; + opcode = kPPC_LoadWordS32; + if (mode != kInt34Imm) mode = kInt16Imm_4ByteAligned; + break; #else - UNREACHABLE(); + UNREACHABLE(); #endif - break; case MachineRepresentation::kIndirectPointer: UNREACHABLE(); case MachineRepresentation::kSandboxedPointer: @@ -266,29 +264,28 @@ static void VisitLoadCommon(InstructionSelectorT* selector, Node* node, opcode = kPPC_LoadDecompressTagged; break; #else - case MachineRepresentation::kTaggedSigned: // Fall through. - case MachineRepresentation::kTaggedPointer: // Fall through. - case MachineRepresentation::kTagged: // Fall through. + case MachineRepresentation::kTaggedSigned: // Fall through. + case MachineRepresentation::kTaggedPointer: // Fall through. + case MachineRepresentation::kTagged: // Fall through. #endif - case MachineRepresentation::kWord64: - opcode = kPPC_LoadWord64; - if (mode != kInt34Imm) mode = kInt16Imm_4ByteAligned; - break; - case MachineRepresentation::kSimd128: - opcode = kPPC_LoadSimd128; - // Vectors do not support MRI mode, only MRR is available. - mode = kNoImmediate; - break; - case MachineRepresentation::kSimd256: // Fall through. - case MachineRepresentation::kMapWord: // Fall through. - case MachineRepresentation::kNone: - UNREACHABLE(); - } + case MachineRepresentation::kWord64: + opcode = kPPC_LoadWord64; + if (mode != kInt34Imm) mode = kInt16Imm_4ByteAligned; + break; + case MachineRepresentation::kSimd128: + opcode = kPPC_LoadSimd128; + // Vectors do not support MRI mode, only MRR is available. + mode = kNoImmediate; + break; + case MachineRepresentation::kSimd256: // Fall through. + case MachineRepresentation::kMapWord: // Fall through. + case MachineRepresentation::kNone: + UNREACHABLE(); + } - bool is_atomic = (node->opcode() == IrOpcode::kWord32AtomicLoad || - node->opcode() == IrOpcode::kWord64AtomicLoad); + bool is_atomic = load_view.is_atomic(); - if (base != nullptr && base->opcode() == IrOpcode::kLoadRootRegister) { + if (selector->is_load_root_register(base)) { selector->Emit(opcode |= AddressingModeField::encode(kMode_Root), g.DefineAsRegister(node), g.UseRegister(offset), g.UseImmediate(is_atomic)); @@ -305,18 +302,13 @@ static void VisitLoadCommon(InstructionSelectorT* selector, Node* node, g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(offset), g.UseImmediate(is_atomic)); } - } } template void InstructionSelectorT::VisitLoad(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { typename Adapter::LoadView load = this->load_view(node); LoadRepresentation load_rep = load.loaded_rep(); VisitLoadCommon(this, node, load_rep); - } } template @@ -326,17 +318,17 @@ void InstructionSelectorT::VisitProtectedLoad(node_t node) { } template -void VisitStoreCommon(InstructionSelectorT* selector, Node* node, +void VisitStoreCommon(InstructionSelectorT* selector, + typename Adapter::node_t node, StoreRepresentation store_rep, base::Optional atomic_order) { + using node_t = typename Adapter::node_t; PPCOperandGeneratorT g(selector); - Node* base = node->InputAt(0); - Node* offset = node->InputAt(1); - Node* value = node->InputAt(2); - - // TODO(miladfarca): maybe use atomic_order? - bool is_atomic = (node->opcode() == IrOpcode::kWord32AtomicStore || - node->opcode() == IrOpcode::kWord64AtomicStore); + auto store_view = selector->store_view(node); + node_t base = store_view.base(); + node_t offset = selector->value(store_view.index()); + node_t value = store_view.value(); + bool is_atomic = store_view.is_atomic(); MachineRepresentation rep = store_rep.representation(); WriteBarrierKind write_barrier_kind = kNoWriteBarrier; @@ -377,11 +369,10 @@ void VisitStoreCommon(InstructionSelectorT* selector, Node* node, size_t const temp_count = arraysize(temps); InstructionCode code; if (rep == MachineRepresentation::kIndirectPointer) { - DCHECK_EQ(node->opcode(), IrOpcode::kStoreIndirectPointer); DCHECK_EQ(write_barrier_kind, kIndirectPointerWriteBarrier); // In this case we need to add the IndirectPointerTag as additional input. code = kArchStoreIndirectWithWriteBarrier; - Node* tag = node->InputAt(3); + node_t tag = store_view.indirect_pointer_tag(); inputs[input_count++] = g.UseImmediate(tag); } else { code = kArchStoreWithWriteBarrier; @@ -398,7 +389,6 @@ void VisitStoreCommon(InstructionSelectorT* selector, Node* node, } else { mode = kInt16Imm; } - NodeMatcher m(value); switch (rep) { case MachineRepresentation::kFloat32: opcode = kPPC_StoreFloat32; @@ -413,14 +403,24 @@ void VisitStoreCommon(InstructionSelectorT* selector, Node* node, case MachineRepresentation::kWord16: opcode = kPPC_StoreWord16; break; - case MachineRepresentation::kWord32: + case MachineRepresentation::kWord32: { opcode = kPPC_StoreWord32; - if (m.IsWord32ReverseBytes()) { + bool is_w32_reverse_bytes = false; + if constexpr (Adapter::IsTurboshaft) { + using namespace turboshaft; // NOLINT(build/namespaces) + const Operation& reverse_op = selector->Get(value); + is_w32_reverse_bytes = reverse_op.Is(); + } else { + NodeMatcher m(value); + is_w32_reverse_bytes = m.IsWord32ReverseBytes(); + } + if (is_w32_reverse_bytes) { opcode = kPPC_StoreByteRev32; - value = value->InputAt(0); + value = selector->input_at(value, 0); mode = kNoImmediate; } break; + } case MachineRepresentation::kCompressedPointer: // Fall through. case MachineRepresentation::kCompressed: #ifdef V8_COMPRESS_POINTERS @@ -444,15 +444,25 @@ void VisitStoreCommon(InstructionSelectorT* selector, Node* node, if (mode != kInt34Imm) mode = kInt16Imm_4ByteAligned; opcode = kPPC_StoreCompressTagged; break; - case MachineRepresentation::kWord64: + case MachineRepresentation::kWord64: { opcode = kPPC_StoreWord64; if (mode != kInt34Imm) mode = kInt16Imm_4ByteAligned; - if (m.IsWord64ReverseBytes()) { + bool is_w64_reverse_bytes = false; + if constexpr (Adapter::IsTurboshaft) { + using namespace turboshaft; // NOLINT(build/namespaces) + const Operation& reverse_op = selector->Get(value); + is_w64_reverse_bytes = reverse_op.Is(); + } else { + NodeMatcher m(value); + is_w64_reverse_bytes = m.IsWord64ReverseBytes(); + } + if (is_w64_reverse_bytes) { opcode = kPPC_StoreByteRev64; - value = value->InputAt(0); + value = selector->input_at(value, 0); mode = kNoImmediate; } break; + } case MachineRepresentation::kSimd128: opcode = kPPC_StoreSimd128; // Vectors do not support MRI mode, only MRR is available. @@ -464,7 +474,7 @@ void VisitStoreCommon(InstructionSelectorT* selector, Node* node, UNREACHABLE(); } - if (base != nullptr && base->opcode() == IrOpcode::kLoadRootRegister) { + if (selector->is_load_root_register(base)) { selector->Emit(opcode | AddressingModeField::encode(kMode_Root), g.NoOutput(), g.UseRegister(offset), g.UseRegister(value), g.UseImmediate(is_atomic)); @@ -491,12 +501,8 @@ void InstructionSelectorT::VisitStorePair(node_t node) { template void InstructionSelectorT::VisitStore(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { - VisitStoreCommon(this, node, StoreRepresentationOf(node->op()), - base::nullopt); - } + VisitStoreCommon(this, node, this->store_view(node).stored_rep(), + base::nullopt); } template @@ -517,6 +523,60 @@ void InstructionSelectorT::VisitUnalignedStore(node_t node) { UNREACHABLE(); } +static void VisitLogical(InstructionSelectorT* selector, + turboshaft::OpIndex node, ArchOpcode opcode, + bool left_can_cover, bool right_can_cover, + ImmediateMode imm_mode) { + using namespace turboshaft; // NOLINT(build/namespaces) + PPCOperandGeneratorT g(selector); + const WordBinopOp& logical_op = selector->Get(node).Cast(); + const Operation& lhs = selector->Get(logical_op.left()); + const Operation& rhs = selector->Get(logical_op.right()); + + // Map instruction to equivalent operation with inverted right input. + ArchOpcode inv_opcode = opcode; + switch (opcode) { + case kPPC_And: + inv_opcode = kPPC_AndComplement; + break; + case kPPC_Or: + inv_opcode = kPPC_OrComplement; + break; + default: + UNREACHABLE(); + } + + // Select Logical(y, ~x) for Logical(Xor(x, -1), y). + if (lhs.Is() && left_can_cover) { + const WordBinopOp& xor_op = lhs.Cast(); + int64_t xor_rhs_val; + if (selector->MatchSignedIntegralConstant(xor_op.right(), &xor_rhs_val) && + xor_rhs_val == -1) { + // TODO(all): support shifted operand on right. + selector->Emit(inv_opcode, g.DefineAsRegister(node), + g.UseRegister(logical_op.right()), + g.UseRegister(xor_op.left())); + return; + } + } + + // Select Logical(x, ~y) for Logical(x, Xor(y, -1)). + if (rhs.Is() && right_can_cover) { + const WordBinopOp& xor_op = rhs.Cast(); + int64_t xor_rhs_val; + if (selector->MatchSignedIntegralConstant(xor_op.right(), &xor_rhs_val) && + xor_rhs_val == -1) { + // TODO(all): support shifted operand on right. + selector->Emit(inv_opcode, g.DefineAsRegister(node), + g.UseRegister(logical_op.left()), + g.UseRegister(xor_op.left())); + return; + } + } + + VisitBinop(selector, node, opcode, imm_mode); +} + template static void VisitLogical(InstructionSelectorT* selector, Node* node, Matcher* m, ArchOpcode opcode, bool left_can_cover, @@ -560,7 +620,7 @@ static void VisitLogical(InstructionSelectorT* selector, Node* node, } } - VisitBinop(selector, node, opcode, imm_mode); + VisitBinop(selector, node, opcode, imm_mode); } static inline bool IsContiguousMask32(uint32_t value, int* mb, int* me) { @@ -587,57 +647,154 @@ static inline bool IsContiguousMask64(uint64_t value, int* mb, int* me) { } #endif +template <> +void InstructionSelectorT::VisitWord32And(node_t node) { + using namespace turboshaft; // NOLINT(build/namespaces) + PPCOperandGeneratorT g(this); + + const WordBinopOp& bitwise_and = Get(node).Cast(); + int mb = 0; + int me = 0; + if (is_integer_constant(bitwise_and.right()) && + IsContiguousMask32(integer_constant(bitwise_and.right()), &mb, &me)) { + int sh = 0; + node_t left = bitwise_and.left(); + const Operation& lhs = Get(left); + if ((lhs.Is() || + lhs.Is()) && + CanCover(node, left)) { + // Try to absorb left/right shift into rlwinm + int32_t shift_by; + const ShiftOp& shift_op = lhs.Cast(); + if (MatchIntegralWord32Constant(shift_op.right(), &shift_by) && + base::IsInRange(shift_by, 0, 31)) { + left = shift_op.left(); + sh = integer_constant(shift_op.right()); + if (lhs.Is()) { + // Adjust the mask such that it doesn't include any rotated bits. + if (mb > 31 - sh) mb = 31 - sh; + sh = (32 - sh) & 0x1F; + } else { + // Adjust the mask such that it doesn't include any rotated bits. + if (me < sh) me = sh; + } + } + } + if (mb >= me) { + Emit(kPPC_RotLeftAndMask32, g.DefineAsRegister(node), g.UseRegister(left), + g.TempImmediate(sh), g.TempImmediate(mb), g.TempImmediate(me)); + return; + } + } + VisitLogical(this, node, kPPC_And, CanCover(node, bitwise_and.left()), + CanCover(node, bitwise_and.right()), kInt16Imm_Unsigned); +} + // TODO(mbrandy): Absorb rotate-right into rlwinm? template void InstructionSelectorT::VisitWord32And(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { - PPCOperandGeneratorT g(this); - Int32BinopMatcher m(node); - int mb = 0; - int me = 0; - if (m.right().HasResolvedValue() && - IsContiguousMask32(m.right().ResolvedValue(), &mb, &me)) { - int sh = 0; - Node* left = m.left().node(); - if ((m.left().IsWord32Shr() || m.left().IsWord32Shl()) && - CanCover(node, left)) { - // Try to absorb left/right shift into rlwinm - Int32BinopMatcher mleft(m.left().node()); - if (mleft.right().IsInRange(0, 31)) { - left = mleft.left().node(); - sh = mleft.right().ResolvedValue(); - if (m.left().IsWord32Shr()) { - // Adjust the mask such that it doesn't include any rotated bits. - if (mb > 31 - sh) mb = 31 - sh; - sh = (32 - sh) & 0x1F; - } else { - // Adjust the mask such that it doesn't include any rotated bits. - if (me < sh) me = sh; - } + PPCOperandGeneratorT g(this); + Int32BinopMatcher m(node); + int mb = 0; + int me = 0; + if (m.right().HasResolvedValue() && + IsContiguousMask32(m.right().ResolvedValue(), &mb, &me)) { + int sh = 0; + Node* left = m.left().node(); + if ((m.left().IsWord32Shr() || m.left().IsWord32Shl()) && + CanCover(node, left)) { + // Try to absorb left/right shift into rlwinm + Int32BinopMatcher mleft(m.left().node()); + if (mleft.right().IsInRange(0, 31)) { + left = mleft.left().node(); + sh = mleft.right().ResolvedValue(); + if (m.left().IsWord32Shr()) { + // Adjust the mask such that it doesn't include any rotated bits. + if (mb > 31 - sh) mb = 31 - sh; + sh = (32 - sh) & 0x1F; + } else { + // Adjust the mask such that it doesn't include any rotated bits. + if (me < sh) me = sh; } } - if (mb >= me) { - Emit(kPPC_RotLeftAndMask32, g.DefineAsRegister(node), - g.UseRegister(left), g.TempImmediate(sh), g.TempImmediate(mb), - g.TempImmediate(me)); - return; - } } + if (mb >= me) { + Emit(kPPC_RotLeftAndMask32, g.DefineAsRegister(node), g.UseRegister(left), + g.TempImmediate(sh), g.TempImmediate(mb), g.TempImmediate(me)); + return; + } + } VisitLogical( this, node, &m, kPPC_And, CanCover(node, m.left().node()), CanCover(node, m.right().node()), kInt16Imm_Unsigned); - } } #if V8_TARGET_ARCH_PPC64 + +template <> +void InstructionSelectorT::VisitWord64And(node_t node) { + using namespace turboshaft; // NOLINT(build/namespaces) + PPCOperandGeneratorT g(this); + + const WordBinopOp& bitwise_and = Get(node).Cast(); + int mb = 0; + int me = 0; + if (is_integer_constant(bitwise_and.right()) && + IsContiguousMask64(integer_constant(bitwise_and.right()), &mb, &me)) { + int sh = 0; + node_t left = bitwise_and.left(); + const Operation& lhs = Get(left); + if ((lhs.Is() || + lhs.Is()) && + CanCover(node, left)) { + // Try to absorb left/right shift into rldic + int64_t shift_by; + const ShiftOp& shift_op = lhs.Cast(); + if (MatchIntegralWord64Constant(shift_op.right(), &shift_by) && + base::IsInRange(shift_by, 0, 63)) { + left = shift_op.left(); + sh = integer_constant(shift_op.right()); + if (lhs.Is()) { + // Adjust the mask such that it doesn't include any rotated bits. + if (mb > 63 - sh) mb = 63 - sh; + sh = (64 - sh) & 0x3F; + } else { + // Adjust the mask such that it doesn't include any rotated bits. + if (me < sh) me = sh; + } + } + } + if (mb >= me) { + bool match = false; + ArchOpcode opcode; + int mask; + if (me == 0) { + match = true; + opcode = kPPC_RotLeftAndClearLeft64; + mask = mb; + } else if (mb == 63) { + match = true; + opcode = kPPC_RotLeftAndClearRight64; + mask = me; + } else if (sh && me <= sh && lhs.Is()) { + match = true; + opcode = kPPC_RotLeftAndClear64; + mask = mb; + } + if (match) { + Emit(opcode, g.DefineAsRegister(node), g.UseRegister(left), + g.TempImmediate(sh), g.TempImmediate(mask)); + return; + } + } + } + VisitLogical(this, node, kPPC_And, CanCover(node, bitwise_and.left()), + CanCover(node, bitwise_and.right()), kInt16Imm_Unsigned); +} + // TODO(mbrandy): Absorb rotate-right into rldic? template void InstructionSelectorT::VisitWord64And(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { PPCOperandGeneratorT g(this); Int64BinopMatcher m(node); int mb = 0; @@ -690,14 +847,16 @@ void InstructionSelectorT::VisitWord64And(node_t node) { VisitLogical( this, node, &m, kPPC_And, CanCover(node, m.left().node()), CanCover(node, m.right().node()), kInt16Imm_Unsigned); - } } #endif template void InstructionSelectorT::VisitWord32Or(node_t node) { if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); + using namespace turboshaft; // NOLINT(build/namespaces) + const WordBinopOp& op = this->Get(node).template Cast(); + VisitLogical(this, node, kPPC_Or, CanCover(node, op.left()), + CanCover(node, op.right()), kInt16Imm_Unsigned); } else { Int32BinopMatcher m(node); VisitLogical( @@ -710,7 +869,10 @@ void InstructionSelectorT::VisitWord32Or(node_t node) { template void InstructionSelectorT::VisitWord64Or(node_t node) { if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); + using namespace turboshaft; // NOLINT(build/namespaces) + const WordBinopOp& op = this->Get(node).template Cast(); + VisitLogical(this, node, kPPC_Or, CanCover(node, op.left()), + CanCover(node, op.right()), kInt16Imm_Unsigned); } else { Int64BinopMatcher m(node); VisitLogical( @@ -722,16 +884,25 @@ void InstructionSelectorT::VisitWord64Or(node_t node) { template void InstructionSelectorT::VisitWord32Xor(node_t node) { + PPCOperandGeneratorT g(this); if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); + using namespace turboshaft; // NOLINT(build/namespaces) + const WordBinopOp& bitwise_xor = + this->Get(node).template Cast(); + int32_t mask; + if (this->MatchIntegralWord32Constant(bitwise_xor.right(), &mask) && + mask == -1) { + Emit(kPPC_Not, g.DefineAsRegister(node), + g.UseRegister(bitwise_xor.left())); + } else { + VisitBinop(this, node, kPPC_Xor, kInt16Imm_Unsigned); + } } else { - PPCOperandGeneratorT g(this); Int32BinopMatcher m(node); if (m.right().Is(-1)) { Emit(kPPC_Not, g.DefineAsRegister(node), g.UseRegister(m.left().node())); } else { - VisitBinop(this, node, kPPC_Xor, - kInt16Imm_Unsigned); + VisitBinop(this, node, kPPC_Xor, kInt16Imm_Unsigned); } } } @@ -780,27 +951,96 @@ void InstructionSelectorT::VisitStackPointerGreaterThan( #if V8_TARGET_ARCH_PPC64 template void InstructionSelectorT::VisitWord64Xor(node_t node) { + PPCOperandGeneratorT g(this); if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); + using namespace turboshaft; // NOLINT(build/namespaces) + const WordBinopOp& bitwise_xor = + this->Get(node).template Cast(); + int64_t mask; + if (this->MatchIntegralWord64Constant(bitwise_xor.right(), &mask) && + mask == -1) { + Emit(kPPC_Not, g.DefineAsRegister(node), + g.UseRegister(bitwise_xor.left())); + } else { + VisitBinop(this, node, kPPC_Xor, kInt16Imm_Unsigned); + } } else { - PPCOperandGeneratorT g(this); Int64BinopMatcher m(node); if (m.right().Is(-1)) { Emit(kPPC_Not, g.DefineAsRegister(node), g.UseRegister(m.left().node())); } else { - VisitBinop(this, node, kPPC_Xor, - kInt16Imm_Unsigned); + VisitBinop(this, node, kPPC_Xor, kInt16Imm_Unsigned); } } } #endif +template <> +Node* InstructionSelectorT::FindProjection( + Node* node, size_t projection_index) { + return NodeProperties::FindProjection(node, projection_index); +} + +template <> +TurboshaftAdapter::node_t +InstructionSelectorT::FindProjection( + node_t node, size_t projection_index) { + using namespace turboshaft; // NOLINT(build/namespaces) + const turboshaft::Graph* graph = this->turboshaft_graph(); + // Projections are always emitted right after the operation. + for (OpIndex next = graph->NextIndex(node); next.valid(); + next = graph->NextIndex(next)) { + const ProjectionOp* projection = graph->Get(next).TryCast(); + if (projection == nullptr) break; + if (projection->index == projection_index) return next; + } + + // If there is no Projection with index {projection_index} following the + // operation, then there shouldn't be any such Projection in the graph. We + // verify this in Debug mode. +#ifdef DEBUG + for (turboshaft::OpIndex use : turboshaft_uses(node)) { + if (const turboshaft::ProjectionOp* projection = + this->Get(use).TryCast()) { + DCHECK_EQ(projection->input(), node); + if (projection->index == projection_index) { + UNREACHABLE(); + } + } + } +#endif // DEBUG + return turboshaft::OpIndex::Invalid(); +} + template void InstructionSelectorT::VisitWord32Shl(node_t node) { + PPCOperandGeneratorT g(this); if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); + using namespace turboshaft; // NOLINT(build/namespaces) + const ShiftOp& shl = this->Get(node).template Cast(); + const Operation& lhs = this->Get(shl.left()); + if (lhs.Is() && + this->is_integer_constant(shl.right()) && + base::IsInRange(this->integer_constant(shl.right()), 0, 31)) { + int sh = this->integer_constant(shl.right()); + int mb; + int me; + const WordBinopOp& bitwise_and = lhs.Cast(); + if (this->is_integer_constant(bitwise_and.right()) && + IsContiguousMask32(this->integer_constant(bitwise_and.right()) << sh, + &mb, &me)) { + // Adjust the mask such that it doesn't include any rotated bits. + if (me < sh) me = sh; + if (mb >= me) { + Emit(kPPC_RotLeftAndMask32, g.DefineAsRegister(node), + g.UseRegister(bitwise_and.left()), g.TempImmediate(sh), + g.TempImmediate(mb), g.TempImmediate(me)); + return; + } + } + } + VisitRRO(this, kPPC_ShiftLeft32, node, kShift32Imm); } else { - PPCOperandGeneratorT g(this); Int32BinopMatcher m(node); if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) { // Try to absorb logical-and into rlwinm @@ -827,59 +1067,126 @@ void InstructionSelectorT::VisitWord32Shl(node_t node) { #if V8_TARGET_ARCH_PPC64 template void InstructionSelectorT::VisitWord64Shl(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { PPCOperandGeneratorT g(this); - Int64BinopMatcher m(node); - // TODO(mbrandy): eliminate left sign extension if right >= 32 - if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) { - // Try to absorb logical-and into rldic - Int64BinopMatcher mleft(m.left().node()); - int sh = m.right().ResolvedValue(); - int mb; - int me; - if (mleft.right().HasResolvedValue() && - IsContiguousMask64(mleft.right().ResolvedValue() << sh, &mb, &me)) { - // Adjust the mask such that it doesn't include any rotated bits. - if (me < sh) me = sh; - if (mb >= me) { - bool match = false; - ArchOpcode opcode; - int mask; - if (me == 0) { - match = true; - opcode = kPPC_RotLeftAndClearLeft64; - mask = mb; - } else if (mb == 63) { - match = true; - opcode = kPPC_RotLeftAndClearRight64; - mask = me; - } else if (sh && me <= sh) { - match = true; - opcode = kPPC_RotLeftAndClear64; - mask = mb; + if constexpr (Adapter::IsTurboshaft) { + using namespace turboshaft; // NOLINT(build/namespaces) + const ShiftOp& shl = this->Get(node).template Cast(); + const Operation& lhs = this->Get(shl.left()); + if (lhs.Is() && + this->is_integer_constant(shl.right()) && + base::IsInRange(this->integer_constant(shl.right()), 0, 63)) { + int sh = this->integer_constant(shl.right()); + int mb; + int me; + const WordBinopOp& bitwise_and = lhs.Cast(); + if (this->is_integer_constant(bitwise_and.right()) && + IsContiguousMask64( + this->integer_constant(bitwise_and.right()) << sh, &mb, &me)) { + // Adjust the mask such that it doesn't include any rotated bits. + if (me < sh) me = sh; + if (mb >= me) { + bool match = false; + ArchOpcode opcode; + int mask; + if (me == 0) { + match = true; + opcode = kPPC_RotLeftAndClearLeft64; + mask = mb; + } else if (mb == 63) { + match = true; + opcode = kPPC_RotLeftAndClearRight64; + mask = me; + } else if (sh && me <= sh) { + match = true; + opcode = kPPC_RotLeftAndClear64; + mask = mb; + } + if (match) { + Emit(opcode, g.DefineAsRegister(node), + g.UseRegister(bitwise_and.left()), g.TempImmediate(sh), + g.TempImmediate(mask)); + return; + } } - if (match) { - Emit(opcode, g.DefineAsRegister(node), - g.UseRegister(mleft.left().node()), g.TempImmediate(sh), - g.TempImmediate(mask)); - return; + } + } + VisitRRO(this, kPPC_ShiftLeft64, node, kShift64Imm); + } else { + Int64BinopMatcher m(node); + // TODO(mbrandy): eliminate left sign extension if right >= 32 + if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) { + // Try to absorb logical-and into rldic + Int64BinopMatcher mleft(m.left().node()); + int sh = m.right().ResolvedValue(); + int mb; + int me; + if (mleft.right().HasResolvedValue() && + IsContiguousMask64(mleft.right().ResolvedValue() << sh, &mb, &me)) { + // Adjust the mask such that it doesn't include any rotated bits. + if (me < sh) me = sh; + if (mb >= me) { + bool match = false; + ArchOpcode opcode; + int mask; + if (me == 0) { + match = true; + opcode = kPPC_RotLeftAndClearLeft64; + mask = mb; + } else if (mb == 63) { + match = true; + opcode = kPPC_RotLeftAndClearRight64; + mask = me; + } else if (sh && me <= sh) { + match = true; + opcode = kPPC_RotLeftAndClear64; + mask = mb; + } + if (match) { + Emit(opcode, g.DefineAsRegister(node), + g.UseRegister(mleft.left().node()), g.TempImmediate(sh), + g.TempImmediate(mask)); + return; + } } } } + VisitRRO(this, kPPC_ShiftLeft64, node, kShift64Imm); } - VisitRRO(this, kPPC_ShiftLeft64, node, kShift64Imm); - } } #endif template void InstructionSelectorT::VisitWord32Shr(node_t node) { + PPCOperandGeneratorT g(this); if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); + using namespace turboshaft; // NOLINT(build/namespaces) + const ShiftOp& shr = this->Get(node).template Cast(); + const Operation& lhs = this->Get(shr.left()); + if (lhs.Is() && + this->is_integer_constant(shr.right()) && + base::IsInRange(this->integer_constant(shr.right()), 0, 31)) { + int sh = this->integer_constant(shr.right()); + int mb; + int me; + const WordBinopOp& bitwise_and = lhs.Cast(); + if (this->is_integer_constant(bitwise_and.right()) && + IsContiguousMask32( + static_cast( + this->integer_constant(bitwise_and.right()) >> sh), + &mb, &me)) { + // Adjust the mask such that it doesn't include any rotated bits. + if (mb > 31 - sh) mb = 31 - sh; + sh = (32 - sh) & 0x1F; + if (mb >= me) { + Emit(kPPC_RotLeftAndMask32, g.DefineAsRegister(node), + g.UseRegister(bitwise_and.left()), g.TempImmediate(sh), + g.TempImmediate(mb), g.TempImmediate(me)); + return; + } + } + } + VisitRRO(this, kPPC_ShiftRight32, node, kShift32Imm); } else { - PPCOperandGeneratorT g(this); Int32BinopMatcher m(node); if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) { // Try to absorb logical-and into rlwinm @@ -908,56 +1215,116 @@ void InstructionSelectorT::VisitWord32Shr(node_t node) { #if V8_TARGET_ARCH_PPC64 template void InstructionSelectorT::VisitWord64Shr(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { PPCOperandGeneratorT g(this); - Int64BinopMatcher m(node); - if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) { - // Try to absorb logical-and into rldic - Int64BinopMatcher mleft(m.left().node()); - int sh = m.right().ResolvedValue(); - int mb; - int me; - if (mleft.right().HasResolvedValue() && - IsContiguousMask64((uint64_t)(mleft.right().ResolvedValue()) >> sh, - &mb, &me)) { - // Adjust the mask such that it doesn't include any rotated bits. - if (mb > 63 - sh) mb = 63 - sh; - sh = (64 - sh) & 0x3F; - if (mb >= me) { - bool match = false; - ArchOpcode opcode; - int mask; - if (me == 0) { - match = true; - opcode = kPPC_RotLeftAndClearLeft64; - mask = mb; - } else if (mb == 63) { - match = true; - opcode = kPPC_RotLeftAndClearRight64; - mask = me; - } - if (match) { - Emit(opcode, g.DefineAsRegister(node), - g.UseRegister(mleft.left().node()), g.TempImmediate(sh), - g.TempImmediate(mask)); - return; + if constexpr (Adapter::IsTurboshaft) { + using namespace turboshaft; // NOLINT(build/namespaces) + const ShiftOp& shr = this->Get(node).template Cast(); + const Operation& lhs = this->Get(shr.left()); + if (lhs.Is() && + this->is_integer_constant(shr.right()) && + base::IsInRange(this->integer_constant(shr.right()), 0, 63)) { + int sh = this->integer_constant(shr.right()); + int mb; + int me; + const WordBinopOp& bitwise_and = lhs.Cast(); + if (this->is_integer_constant(bitwise_and.right()) && + IsContiguousMask64( + static_cast( + this->integer_constant(bitwise_and.right()) >> sh), + &mb, &me)) { + // Adjust the mask such that it doesn't include any rotated bits. + if (mb > 63 - sh) mb = 63 - sh; + sh = (64 - sh) & 0x3F; + if (mb >= me) { + bool match = false; + ArchOpcode opcode; + int mask; + if (me == 0) { + match = true; + opcode = kPPC_RotLeftAndClearLeft64; + mask = mb; + } else if (mb == 63) { + match = true; + opcode = kPPC_RotLeftAndClearRight64; + mask = me; + } + if (match) { + Emit(opcode, g.DefineAsRegister(node), + g.UseRegister(bitwise_and.left()), g.TempImmediate(sh), + g.TempImmediate(mask)); + return; + } } } } + VisitRRO(this, kPPC_ShiftRight64, node, kShift64Imm); + } else { + Int64BinopMatcher m(node); + if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) { + // Try to absorb logical-and into rldic + Int64BinopMatcher mleft(m.left().node()); + int sh = m.right().ResolvedValue(); + int mb; + int me; + if (mleft.right().HasResolvedValue() && + IsContiguousMask64((uint64_t)(mleft.right().ResolvedValue()) >> sh, + &mb, &me)) { + // Adjust the mask such that it doesn't include any rotated bits. + if (mb > 63 - sh) mb = 63 - sh; + sh = (64 - sh) & 0x3F; + if (mb >= me) { + bool match = false; + ArchOpcode opcode; + int mask; + if (me == 0) { + match = true; + opcode = kPPC_RotLeftAndClearLeft64; + mask = mb; + } else if (mb == 63) { + match = true; + opcode = kPPC_RotLeftAndClearRight64; + mask = me; + } + if (match) { + Emit(opcode, g.DefineAsRegister(node), + g.UseRegister(mleft.left().node()), g.TempImmediate(sh), + g.TempImmediate(mask)); + return; + } + } + } + } + VisitRRO(this, kPPC_ShiftRight64, node, kShift64Imm); } - VisitRRO(this, kPPC_ShiftRight64, node, kShift64Imm); - } } #endif template void InstructionSelectorT::VisitWord32Sar(node_t node) { + PPCOperandGeneratorT g(this); if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); + using namespace turboshaft; // NOLINT(build/namespaces) + const ShiftOp& sar = this->Get(node).template Cast(); + const Operation& lhs = this->Get(sar.left()); + if (CanCover(node, sar.left()) && lhs.Is()) { + const ShiftOp& shl = lhs.Cast(); + if (this->is_integer_constant(sar.right()) && + this->is_integer_constant(shl.right())) { + uint32_t sar_by = this->integer_constant(sar.right()); + uint32_t shl_by = this->integer_constant(shl.right()); + if ((sar_by == shl_by) && (sar_by == 16)) { + Emit(kPPC_ExtendSignWord16, g.DefineAsRegister(node), + g.UseRegister(shl.left())); + return; + } else if ((sar_by == shl_by) && (sar_by == 24)) { + Emit(kPPC_ExtendSignWord8, g.DefineAsRegister(node), + g.UseRegister(shl.left())); + return; + } + } + } + VisitRRO(this, kPPC_ShiftRightAlg32, node, kShift32Imm); } else { - PPCOperandGeneratorT g(this); Int32BinopMatcher m(node); // Replace with sign extension for (x << K) >> K where K is 16 or 24. if (CanCover(node, m.left().node()) && m.left().IsWord32Shl()) { @@ -976,191 +1343,44 @@ void InstructionSelectorT::VisitWord32Sar(node_t node) { } } -#if !V8_TARGET_ARCH_PPC64 -template -void VisitPairBinop(InstructionSelectorT* selector, - InstructionCode opcode, InstructionCode opcode2, - Node* node) { - PPCOperandGeneratorT g(selector); - - Node* projection1 = NodeProperties::FindProjection(node, 1); - if (projection1) { - // We use UseUniqueRegister here to avoid register sharing with the output - // registers. - InstructionOperand inputs[] = { - g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)), - g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))}; - - InstructionOperand outputs[] = { - g.DefineAsRegister(node), - g.DefineAsRegister(NodeProperties::FindProjection(node, 1))}; - - selector->Emit(opcode, 2, outputs, 4, inputs); - } else { - // The high word of the result is not used, so we emit the standard 32 bit - // instruction. - selector->Emit(opcode2, g.DefineSameAsFirst(node), - g.UseRegister(node->InputAt(0)), - g.UseRegister(node->InputAt(2))); - } -} - -template -void InstructionSelectorT::VisitInt32PairAdd(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { - VisitPairBinop(this, kPPC_AddPair, kPPC_Add32, node); - } -} - -template -void InstructionSelectorT::VisitInt32PairSub(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { - VisitPairBinop(this, kPPC_SubPair, kPPC_Sub, node); - } -} - -template -void InstructionSelectorT::VisitInt32PairMul(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { - PPCOperandGeneratorT g(this); - Node* projection1 = NodeProperties::FindProjection(node, 1); - if (projection1) { - InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)), - g.UseUniqueRegister(node->InputAt(1)), - g.UseUniqueRegister(node->InputAt(2)), - g.UseUniqueRegister(node->InputAt(3))}; - - InstructionOperand outputs[] = { - g.DefineAsRegister(node), - g.DefineAsRegister(NodeProperties::FindProjection(node, 1))}; - - InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()}; - - Emit(kPPC_MulPair, 2, outputs, 4, inputs, 2, temps); - } else { - // The high word of the result is not used, so we emit the standard 32 bit - // instruction. - Emit(kPPC_Mul32, g.DefineSameAsFirst(node), - g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(2))); - } - } -} - -namespace { -// Shared routine for multiple shift operations. -template -void VisitPairShift(InstructionSelectorT* selector, - InstructionCode opcode, Node* node) { - PPCOperandGeneratorT g(selector); - // We use g.UseUniqueRegister here to guarantee that there is - // no register aliasing of input registers with output registers. - Int32Matcher m(node->InputAt(2)); - InstructionOperand shift_operand; - if (m.HasResolvedValue()) { - shift_operand = g.UseImmediate(m.node()); - } else { - shift_operand = g.UseUniqueRegister(m.node()); - } - - InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)), - g.UseUniqueRegister(node->InputAt(1)), - shift_operand}; - - Node* projection1 = NodeProperties::FindProjection(node, 1); - - InstructionOperand outputs[2]; - InstructionOperand temps[1]; - int32_t output_count = 0; - int32_t temp_count = 0; - - outputs[output_count++] = g.DefineAsRegister(node); - if (projection1) { - outputs[output_count++] = g.DefineAsRegister(projection1); - } else { - temps[temp_count++] = g.TempRegister(); - } - - selector->Emit(opcode, output_count, outputs, 3, inputs, temp_count, temps); -} -} // namespace - -template -void InstructionSelectorT::VisitWord32PairShl(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { - VisitPairShift(this, kPPC_ShiftLeftPair, node); - } -} - -template -void InstructionSelectorT::VisitWord32PairShr(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { - VisitPairShift(this, kPPC_ShiftRightPair, node); - } -} - -template -void InstructionSelectorT::VisitWord32PairSar(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { - VisitPairShift(this, kPPC_ShiftRightAlgPair, node); - } -} -#endif - #if V8_TARGET_ARCH_PPC64 template void InstructionSelectorT::VisitWord64Sar(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { PPCOperandGeneratorT g(this); - Int64BinopMatcher m(node); - if (CanCover(m.node(), m.left().node()) && m.left().IsLoad() && - m.right().Is(32)) { - // Just load and sign-extend the interesting 4 bytes instead. This - // happens, for example, when we're loading and untagging SMIs. - BaseWithIndexAndDisplacement64Matcher mleft(m.left().node(), - AddressOption::kAllowAll); - if (mleft.matches() && mleft.index() == nullptr) { - int64_t offset = 0; - Node* displacement = mleft.displacement(); - if (displacement != nullptr) { - Int64Matcher mdisplacement(displacement); - DCHECK(mdisplacement.HasResolvedValue()); - offset = mdisplacement.ResolvedValue(); - } - offset = SmiWordOffset(offset); - if (g.CanBeImmediate(offset, kInt16Imm_4ByteAligned)) { - Emit(kPPC_LoadWordS32 | AddressingModeField::encode(kMode_MRI), - g.DefineAsRegister(node), g.UseRegister(mleft.base()), - g.TempImmediate(offset), g.UseImmediate(0)); - return; + // TODO(miladfarca): Implement for Turboshaft. + if constexpr (!Adapter::IsTurboshaft) { + Int64BinopMatcher m(node); + if (CanCover(m.node(), m.left().node()) && m.left().IsLoad() && + m.right().Is(32)) { + // Just load and sign-extend the interesting 4 bytes instead. This + // happens, for example, when we're loading and untagging SMIs. + BaseWithIndexAndDisplacement64Matcher mleft(m.left().node(), + AddressOption::kAllowAll); + if (mleft.matches() && mleft.index() == nullptr) { + int64_t offset = 0; + Node* displacement = mleft.displacement(); + if (displacement != nullptr) { + Int64Matcher mdisplacement(displacement); + DCHECK(mdisplacement.HasResolvedValue()); + offset = mdisplacement.ResolvedValue(); + } + offset = SmiWordOffset(offset); + if (g.CanBeImmediate(offset, kInt16Imm_4ByteAligned)) { + Emit(kPPC_LoadWordS32 | AddressingModeField::encode(kMode_MRI), + g.DefineAsRegister(node), g.UseRegister(mleft.base()), + g.TempImmediate(offset), g.UseImmediate(0)); + return; + } } } } VisitRRO(this, kPPC_ShiftRightAlg64, node, kShift64Imm); - } } #endif template void InstructionSelectorT::VisitWord32Rol(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { UNREACHABLE(); - } } template @@ -1171,70 +1391,46 @@ void InstructionSelectorT::VisitWord64Rol(node_t node) { // TODO(mbrandy): Absorb logical-and into rlwinm? template void InstructionSelectorT::VisitWord32Ror(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRRO(this, kPPC_RotRight32, node, kShift32Imm); - } } #if V8_TARGET_ARCH_PPC64 // TODO(mbrandy): Absorb logical-and into rldic? template void InstructionSelectorT::VisitWord64Ror(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRRO(this, kPPC_RotRight64, node, kShift64Imm); - } } #endif template void InstructionSelectorT::VisitWord32Clz(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { PPCOperandGeneratorT g(this); Emit(kPPC_Cntlz32, g.DefineAsRegister(node), - g.UseRegister(node->InputAt(0))); - } + g.UseRegister(this->input_at(node, 0))); } #if V8_TARGET_ARCH_PPC64 template void InstructionSelectorT::VisitWord64Clz(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { PPCOperandGeneratorT g(this); Emit(kPPC_Cntlz64, g.DefineAsRegister(node), - g.UseRegister(node->InputAt(0))); - } + g.UseRegister(this->input_at(node, 0))); } #endif template void InstructionSelectorT::VisitWord32Popcnt(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { PPCOperandGeneratorT g(this); Emit(kPPC_Popcnt32, g.DefineAsRegister(node), - g.UseRegister(node->InputAt(0))); - } + g.UseRegister(this->input_at(node, 0))); } #if V8_TARGET_ARCH_PPC64 template void InstructionSelectorT::VisitWord64Popcnt(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { PPCOperandGeneratorT g(this); Emit(kPPC_Popcnt64, g.DefineAsRegister(node), - g.UseRegister(node->InputAt(0))); - } + g.UseRegister(this->input_at(node, 0))); } #endif @@ -1265,7 +1461,26 @@ void InstructionSelectorT::VisitWord64ReverseBits(node_t node) { template void InstructionSelectorT::VisitWord64ReverseBytes(node_t node) { if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); + using namespace turboshaft; // NOLINT(build/namespaces) + PPCOperandGeneratorT g(this); + InstructionOperand temp[] = {g.TempRegister()}; + node_t input = this->Get(node).input(0); + const Operation& input_op = this->Get(input); + if (CanCover(node, input) && input_op.Is()) { + auto load = this->load_view(input); + LoadRepresentation load_rep = load.loaded_rep(); + if (load_rep.representation() == MachineRepresentation::kWord64) { + node_t base = load.base(); + node_t offset = load.index(); + bool is_atomic = load.is_atomic(); + Emit(kPPC_LoadByteRev64 | AddressingModeField::encode(kMode_MRR), + g.DefineAsRegister(node), g.UseRegister(base), + g.UseRegister(offset), g.UseImmediate(is_atomic)); + return; + } + } + Emit(kPPC_ByteRev64, g.DefineAsRegister(node), + g.UseUniqueRegister(this->input_at(node, 0)), 1, temp); } else { PPCOperandGeneratorT g(this); InstructionOperand temp[] = {g.TempRegister()}; @@ -1292,7 +1507,25 @@ void InstructionSelectorT::VisitWord64ReverseBytes(node_t node) { template void InstructionSelectorT::VisitWord32ReverseBytes(node_t node) { if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); + using namespace turboshaft; // NOLINT(build/namespaces) + PPCOperandGeneratorT g(this); + node_t input = this->Get(node).input(0); + const Operation& input_op = this->Get(input); + if (CanCover(node, input) && input_op.Is()) { + auto load = this->load_view(input); + LoadRepresentation load_rep = load.loaded_rep(); + if (load_rep.representation() == MachineRepresentation::kWord32) { + node_t base = load.base(); + node_t offset = load.index(); + bool is_atomic = load.is_atomic(); + Emit(kPPC_LoadByteRev32 | AddressingModeField::encode(kMode_MRR), + g.DefineAsRegister(node), g.UseRegister(base), + g.UseRegister(offset), g.UseImmediate(is_atomic)); + return; + } + } + Emit(kPPC_ByteRev32, g.DefineAsRegister(node), + g.UseUniqueRegister(this->input_at(node, 0))); } else { PPCOperandGeneratorT g(this); NodeMatcher input(node->InputAt(0)); @@ -1316,61 +1549,65 @@ void InstructionSelectorT::VisitWord32ReverseBytes(node_t node) { } template -void InstructionSelectorT::VisitSimd128ReverseBytes(Node* node) { +void InstructionSelectorT::VisitSimd128ReverseBytes(node_t node) { PPCOperandGeneratorT g(this); Emit(kPPC_LoadReverseSimd128RR, g.DefineAsRegister(node), - g.UseRegister(node->InputAt(0))); + g.UseRegister(this->input_at(node, 0))); } template void InstructionSelectorT::VisitInt32Add(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { - VisitBinop(this, node, kPPC_Add32, kInt16Imm); - } + VisitBinop(this, node, kPPC_Add32, kInt16Imm); } #if V8_TARGET_ARCH_PPC64 template void InstructionSelectorT::VisitInt64Add(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { - VisitBinop(this, node, kPPC_Add64, kInt16Imm); - } + VisitBinop(this, node, kPPC_Add64, kInt16Imm); } #endif template void InstructionSelectorT::VisitInt32Sub(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { PPCOperandGeneratorT g(this); - Int32BinopMatcher m(node); - if (m.left().Is(0)) { - Emit(kPPC_Neg, g.DefineAsRegister(node), g.UseRegister(m.right().node())); + if constexpr (Adapter::IsTurboshaft) { + using namespace turboshaft; // NOLINT(build/namespaces) + const WordBinopOp& sub = this->Get(node).template Cast(); + if (this->MatchIntegralZero(sub.left())) { + Emit(kPPC_Neg, g.DefineAsRegister(node), g.UseRegister(sub.right())); + } else { + VisitBinop(this, node, kPPC_Sub, kInt16Imm_Negate); + } } else { - VisitBinop(this, node, kPPC_Sub, - kInt16Imm_Negate); + Int32BinopMatcher m(node); + if (m.left().Is(0)) { + Emit(kPPC_Neg, g.DefineAsRegister(node), + g.UseRegister(m.right().node())); + } else { + VisitBinop(this, node, kPPC_Sub, kInt16Imm_Negate); + } } - } } #if V8_TARGET_ARCH_PPC64 template void InstructionSelectorT::VisitInt64Sub(node_t node) { + PPCOperandGeneratorT g(this); if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); + using namespace turboshaft; // NOLINT(build/namespaces) + const WordBinopOp& sub = this->Get(node).template Cast(); + if (this->MatchIntegralZero(sub.left())) { + Emit(kPPC_Neg, g.DefineAsRegister(node), g.UseRegister(sub.right())); + } else { + VisitBinop(this, node, kPPC_Sub, kInt16Imm_Negate); + } } else { PPCOperandGeneratorT g(this); Int64BinopMatcher m(node); if (m.left().Is(0)) { Emit(kPPC_Neg, g.DefineAsRegister(node), g.UseRegister(m.right().node())); } else { - VisitBinop(this, node, kPPC_Sub, - kInt16Imm_Negate); + VisitBinop(this, node, kPPC_Sub, kInt16Imm_Negate); } } } @@ -1384,411 +1621,266 @@ void VisitCompare(InstructionSelectorT* selector, InstructionOperand right, FlagsContinuationT* cont); template void EmitInt32MulWithOverflow(InstructionSelectorT* selector, - Node* node, FlagsContinuationT* cont) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { - PPCOperandGeneratorT g(selector); - Int32BinopMatcher m(node); - InstructionOperand result_operand = g.DefineAsRegister(node); - InstructionOperand high32_operand = g.TempRegister(); - InstructionOperand temp_operand = g.TempRegister(); - { - InstructionOperand outputs[] = {result_operand, high32_operand}; - InstructionOperand inputs[] = {g.UseRegister(m.left().node()), - g.UseRegister(m.right().node())}; - selector->Emit(kPPC_Mul32WithHigh32, 2, outputs, 2, inputs); - } - { - InstructionOperand shift_31 = g.UseImmediate(31); - InstructionOperand outputs[] = {temp_operand}; - InstructionOperand inputs[] = {result_operand, shift_31}; - selector->Emit(kPPC_ShiftRightAlg32, 1, outputs, 2, inputs); - } - - VisitCompare(selector, kPPC_Cmp32, high32_operand, temp_operand, cont); + typename Adapter::node_t node, + FlagsContinuationT* cont) { + PPCOperandGeneratorT g(selector); + typename Adapter::node_t lhs = selector->input_at(node, 0); + typename Adapter::node_t rhs = selector->input_at(node, 1); + InstructionOperand result_operand = g.DefineAsRegister(node); + InstructionOperand high32_operand = g.TempRegister(); + InstructionOperand temp_operand = g.TempRegister(); + { + InstructionOperand outputs[] = {result_operand, high32_operand}; + InstructionOperand inputs[] = {g.UseRegister(lhs), g.UseRegister(rhs)}; + selector->Emit(kPPC_Mul32WithHigh32, 2, outputs, 2, inputs); + } + { + InstructionOperand shift_31 = g.UseImmediate(31); + InstructionOperand outputs[] = {temp_operand}; + InstructionOperand inputs[] = {result_operand, shift_31}; + selector->Emit(kPPC_ShiftRightAlg32, 1, outputs, 2, inputs); } + + VisitCompare(selector, kPPC_Cmp32, high32_operand, temp_operand, cont); } template void EmitInt64MulWithOverflow(InstructionSelectorT* selector, - Node* node, FlagsContinuationT* cont) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { - PPCOperandGeneratorT g(selector); - Int64BinopMatcher m(node); - InstructionOperand result = g.DefineAsRegister(node); - InstructionOperand left = g.UseRegister(m.left().node()); - InstructionOperand high = g.TempRegister(); - InstructionOperand result_sign = g.TempRegister(); - InstructionOperand right = g.UseRegister(m.right().node()); - selector->Emit(kPPC_Mul64, result, left, right); - selector->Emit(kPPC_MulHighS64, high, left, right); - selector->Emit(kPPC_ShiftRightAlg64, result_sign, result, - g.TempImmediate(63)); - // Test whether {high} is a sign-extension of {result}. - selector->EmitWithContinuation(kPPC_Cmp64, high, result_sign, cont); - } + typename Adapter::node_t node, + FlagsContinuationT* cont) { + PPCOperandGeneratorT g(selector); + typename Adapter::node_t lhs = selector->input_at(node, 0); + typename Adapter::node_t rhs = selector->input_at(node, 1); + InstructionOperand result = g.DefineAsRegister(node); + InstructionOperand left = g.UseRegister(lhs); + InstructionOperand high = g.TempRegister(); + InstructionOperand result_sign = g.TempRegister(); + InstructionOperand right = g.UseRegister(rhs); + selector->Emit(kPPC_Mul64, result, left, right); + selector->Emit(kPPC_MulHighS64, high, left, right); + selector->Emit(kPPC_ShiftRightAlg64, result_sign, result, + g.TempImmediate(63)); + // Test whether {high} is a sign-extension of {result}. + selector->EmitWithContinuation(kPPC_Cmp64, high, result_sign, cont); } } // namespace template void InstructionSelectorT::VisitInt32Mul(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRRR(this, kPPC_Mul32, node); - } } #if V8_TARGET_ARCH_PPC64 template void InstructionSelectorT::VisitInt64Mul(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRRR(this, kPPC_Mul64, node); - } } #endif template void InstructionSelectorT::VisitInt32MulHigh(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { PPCOperandGeneratorT g(this); Emit(kPPC_MulHigh32, g.DefineAsRegister(node), - g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); - } + g.UseRegister(this->input_at(node, 0)), + g.UseRegister(this->input_at(node, 1))); } template void InstructionSelectorT::VisitUint32MulHigh(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { PPCOperandGeneratorT g(this); Emit(kPPC_MulHighU32, g.DefineAsRegister(node), - g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); - } + g.UseRegister(this->input_at(node, 0)), + g.UseRegister(this->input_at(node, 1))); } template void InstructionSelectorT::VisitInt64MulHigh(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { PPCOperandGeneratorT g(this); Emit(kPPC_MulHighS64, g.DefineAsRegister(node), - g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); - } + g.UseRegister(this->input_at(node, 0)), + g.UseRegister(this->input_at(node, 1))); } template void InstructionSelectorT::VisitUint64MulHigh(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { PPCOperandGeneratorT g(this); Emit(kPPC_MulHighU64, g.DefineAsRegister(node), - g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); - } + g.UseRegister(this->input_at(node, 0)), + g.UseRegister(this->input_at(node, 1))); } template void InstructionSelectorT::VisitInt32Div(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRRR(this, kPPC_Div32, node); - } } #if V8_TARGET_ARCH_PPC64 template void InstructionSelectorT::VisitInt64Div(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRRR(this, kPPC_Div64, node); - } } #endif template void InstructionSelectorT::VisitUint32Div(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRRR(this, kPPC_DivU32, node); - } } #if V8_TARGET_ARCH_PPC64 template void InstructionSelectorT::VisitUint64Div(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRRR(this, kPPC_DivU64, node); - } } #endif template void InstructionSelectorT::VisitInt32Mod(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRRR(this, kPPC_Mod32, node); - } } #if V8_TARGET_ARCH_PPC64 template void InstructionSelectorT::VisitInt64Mod(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRRR(this, kPPC_Mod64, node); - } } #endif template void InstructionSelectorT::VisitUint32Mod(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRRR(this, kPPC_ModU32, node); - } } #if V8_TARGET_ARCH_PPC64 template void InstructionSelectorT::VisitUint64Mod(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRRR(this, kPPC_ModU64, node); - } } #endif template void InstructionSelectorT::VisitChangeFloat32ToFloat64(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRR(this, kPPC_Float32ToDouble, node); - } } template void InstructionSelectorT::VisitRoundInt32ToFloat32(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRR(this, kPPC_Int32ToFloat32, node); - } } template void InstructionSelectorT::VisitRoundUint32ToFloat32(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRR(this, kPPC_Uint32ToFloat32, node); - } } template void InstructionSelectorT::VisitChangeInt32ToFloat64(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRR(this, kPPC_Int32ToDouble, node); - } } template void InstructionSelectorT::VisitChangeUint32ToFloat64(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRR(this, kPPC_Uint32ToDouble, node); - } } template void InstructionSelectorT::VisitChangeFloat64ToInt32(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRR(this, kPPC_DoubleToInt32, node); - } } template void InstructionSelectorT::VisitChangeFloat64ToUint32(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRR(this, kPPC_DoubleToUint32, node); - } } template void InstructionSelectorT::VisitTruncateFloat64ToUint32(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRR(this, kPPC_DoubleToUint32, node); - } } template void InstructionSelectorT::VisitSignExtendWord8ToInt32(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { // TODO(mbrandy): inspect input to see if nop is appropriate. VisitRR(this, kPPC_ExtendSignWord8, node); - } } template void InstructionSelectorT::VisitSignExtendWord16ToInt32(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { // TODO(mbrandy): inspect input to see if nop is appropriate. VisitRR(this, kPPC_ExtendSignWord16, node); - } } #if V8_TARGET_ARCH_PPC64 template void InstructionSelectorT::VisitTryTruncateFloat32ToInt64( node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitTryTruncateDouble(this, kPPC_DoubleToInt64, node); - } } template void InstructionSelectorT::VisitTryTruncateFloat64ToInt64( node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitTryTruncateDouble(this, kPPC_DoubleToInt64, node); - } } template void InstructionSelectorT::VisitTruncateFloat64ToInt64(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRR(this, kPPC_DoubleToInt64, node); - } } template void InstructionSelectorT::VisitTryTruncateFloat32ToUint64( node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitTryTruncateDouble(this, kPPC_DoubleToUint64, node); - } } template void InstructionSelectorT::VisitTryTruncateFloat64ToUint64( node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitTryTruncateDouble(this, kPPC_DoubleToUint64, node); - } } template void InstructionSelectorT::VisitTryTruncateFloat64ToInt32( node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitTryTruncateDouble(this, kPPC_DoubleToInt32, node); - } } template void InstructionSelectorT::VisitTryTruncateFloat64ToUint32( node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitTryTruncateDouble(this, kPPC_DoubleToUint32, node); - } } template void InstructionSelectorT::VisitBitcastWord32ToWord64(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { DCHECK(SmiValuesAre31Bits()); DCHECK(COMPRESS_POINTERS_BOOL); EmitIdentity(node); - } } template void InstructionSelectorT::VisitChangeInt32ToInt64(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { // TODO(mbrandy): inspect input to see if nop is appropriate. VisitRR(this, kPPC_ExtendSignWord32, node); - } } template void InstructionSelectorT::VisitSignExtendWord8ToInt64(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { // TODO(mbrandy): inspect input to see if nop is appropriate. VisitRR(this, kPPC_ExtendSignWord8, node); - } } template void InstructionSelectorT::VisitSignExtendWord16ToInt64(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { // TODO(mbrandy): inspect input to see if nop is appropriate. VisitRR(this, kPPC_ExtendSignWord16, node); - } } template void InstructionSelectorT::VisitSignExtendWord32ToInt64(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { // TODO(mbrandy): inspect input to see if nop is appropriate. VisitRR(this, kPPC_ExtendSignWord32, node); - } } template @@ -1799,67 +1891,49 @@ bool InstructionSelectorT::ZeroExtendsWord32ToWord64NoPhis( template void InstructionSelectorT::VisitChangeUint32ToUint64(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { // TODO(mbrandy): inspect input to see if nop is appropriate. VisitRR(this, kPPC_Uint32ToUint64, node); - } } template void InstructionSelectorT::VisitChangeFloat64ToUint64(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRR(this, kPPC_DoubleToUint64, node); - } } template void InstructionSelectorT::VisitChangeFloat64ToInt64(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRR(this, kPPC_DoubleToInt64, node); - } } #endif template void InstructionSelectorT::VisitTruncateFloat64ToFloat32(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRR(this, kPPC_DoubleToFloat32, node); - } } template void InstructionSelectorT::VisitTruncateFloat64ToWord32(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { - VisitRR(this, kArchTruncateDoubleToI, node); - } + VisitRR(this, kArchTruncateDoubleToI, node); } template void InstructionSelectorT::VisitRoundFloat64ToInt32(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRR(this, kPPC_DoubleToInt32, node); - } } template void InstructionSelectorT::VisitTruncateFloat32ToInt32(node_t node) { + PPCOperandGeneratorT g(this); if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); + using namespace turboshaft; // NOLINT(build/namespaces) + const Operation& op = this->Get(node); + InstructionCode opcode = kPPC_Float32ToInt32; + if (op.Is()) { + opcode |= MiscField::encode(true); + } + Emit(opcode, g.DefineAsRegister(node), + g.UseRegister(this->input_at(node, 0))); } else { - PPCOperandGeneratorT g(this); - InstructionCode opcode = kPPC_Float32ToInt32; TruncateKind kind = OpParameter(node->op()); if (kind == TruncateKind::kSetOverflowToMin) { @@ -1872,10 +1946,18 @@ void InstructionSelectorT::VisitTruncateFloat32ToInt32(node_t node) { template void InstructionSelectorT::VisitTruncateFloat32ToUint32(node_t node) { + PPCOperandGeneratorT g(this); if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); + using namespace turboshaft; // NOLINT(build/namespaces) + const Operation& op = this->Get(node); + InstructionCode opcode = kPPC_Float32ToUint32; + if (op.Is()) { + opcode |= MiscField::encode(true); + } + + Emit(opcode, g.DefineAsRegister(node), + g.UseRegister(this->input_at(node, 0))); } else { - PPCOperandGeneratorT g(this); InstructionCode opcode = kPPC_Float32ToUint32; TruncateKind kind = OpParameter(node->op()); @@ -1890,459 +1972,283 @@ void InstructionSelectorT::VisitTruncateFloat32ToUint32(node_t node) { #if V8_TARGET_ARCH_PPC64 template void InstructionSelectorT::VisitTruncateInt64ToInt32(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { // TODO(mbrandy): inspect input to see if nop is appropriate. VisitRR(this, kPPC_Int64ToInt32, node); - } } -template -void InstructionSelectorT::VisitRoundInt64ToFloat32(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { +template +void InstructionSelectorT::VisitRoundInt64ToFloat32(node_t node) { VisitRR(this, kPPC_Int64ToFloat32, node); - } } template void InstructionSelectorT::VisitRoundInt64ToFloat64(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRR(this, kPPC_Int64ToDouble, node); - } } template void InstructionSelectorT::VisitChangeInt64ToFloat64(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRR(this, kPPC_Int64ToDouble, node); - } } template void InstructionSelectorT::VisitRoundUint64ToFloat32(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRR(this, kPPC_Uint64ToFloat32, node); - } } template void InstructionSelectorT::VisitRoundUint64ToFloat64(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRR(this, kPPC_Uint64ToDouble, node); - } } #endif template void InstructionSelectorT::VisitBitcastFloat32ToInt32(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { - VisitRR(this, kPPC_BitcastFloat32ToInt32, node); - } + VisitRR(this, kPPC_BitcastFloat32ToInt32, node); } #if V8_TARGET_ARCH_PPC64 template void InstructionSelectorT::VisitBitcastFloat64ToInt64(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { - VisitRR(this, kPPC_BitcastDoubleToInt64, node); - } + VisitRR(this, kPPC_BitcastDoubleToInt64, node); } #endif template void InstructionSelectorT::VisitBitcastInt32ToFloat32(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRR(this, kPPC_BitcastInt32ToFloat32, node); - } } #if V8_TARGET_ARCH_PPC64 template void InstructionSelectorT::VisitBitcastInt64ToFloat64(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRR(this, kPPC_BitcastInt64ToDouble, node); - } } #endif template void InstructionSelectorT::VisitFloat32Add(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRRR(this, kPPC_AddDouble | MiscField::encode(1), node); - } } template void InstructionSelectorT::VisitFloat64Add(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { // TODO(mbrandy): detect multiply-add VisitRRR(this, kPPC_AddDouble, node); - } } template void InstructionSelectorT::VisitFloat32Sub(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRRR(this, kPPC_SubDouble | MiscField::encode(1), node); - } } template void InstructionSelectorT::VisitFloat64Sub(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { // TODO(mbrandy): detect multiply-subtract VisitRRR(this, kPPC_SubDouble, node); - } } template void InstructionSelectorT::VisitFloat32Mul(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRRR(this, kPPC_MulDouble | MiscField::encode(1), node); - } } template void InstructionSelectorT::VisitFloat64Mul(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { // TODO(mbrandy): detect negate VisitRRR(this, kPPC_MulDouble, node); - } } template void InstructionSelectorT::VisitFloat32Div(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRRR(this, kPPC_DivDouble | MiscField::encode(1), node); - } } template void InstructionSelectorT::VisitFloat64Div(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRRR(this, kPPC_DivDouble, node); - } } template void InstructionSelectorT::VisitFloat64Mod(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { PPCOperandGeneratorT g(this); Emit(kPPC_ModDouble, g.DefineAsFixed(node, d1), - g.UseFixed(node->InputAt(0), d1), g.UseFixed(node->InputAt(1), d2)) + g.UseFixed(this->input_at(node, 0), d1), + g.UseFixed(this->input_at(node, 1), d2)) ->MarkAsCall(); - } } template void InstructionSelectorT::VisitFloat32Max(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRRR(this, kPPC_MaxDouble | MiscField::encode(1), node); - } } template void InstructionSelectorT::VisitFloat64Max(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRRR(this, kPPC_MaxDouble, node); - } } template void InstructionSelectorT::VisitFloat64SilenceNaN(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRR(this, kPPC_Float64SilenceNaN, node); - } } template void InstructionSelectorT::VisitFloat32Min(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRRR(this, kPPC_MinDouble | MiscField::encode(1), node); - } } template void InstructionSelectorT::VisitFloat64Min(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRRR(this, kPPC_MinDouble, node); - } } template void InstructionSelectorT::VisitFloat32Abs(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRR(this, kPPC_AbsDouble | MiscField::encode(1), node); - } } template void InstructionSelectorT::VisitFloat64Abs(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRR(this, kPPC_AbsDouble, node); - } } template void InstructionSelectorT::VisitFloat32Sqrt(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRR(this, kPPC_SqrtDouble | MiscField::encode(1), node); - } } template void InstructionSelectorT::VisitFloat64Ieee754Unop( node_t node, InstructionCode opcode) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { - PPCOperandGeneratorT g(this); - Emit(opcode, g.DefineAsFixed(node, d1), g.UseFixed(node->InputAt(0), d1)) - ->MarkAsCall(); - } + PPCOperandGeneratorT g(this); + Emit(opcode, g.DefineAsFixed(node, d1), + g.UseFixed(this->input_at(node, 0), d1)) + ->MarkAsCall(); } template void InstructionSelectorT::VisitFloat64Ieee754Binop( node_t node, InstructionCode opcode) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { PPCOperandGeneratorT g(this); - Emit(opcode, g.DefineAsFixed(node, d1), g.UseFixed(node->InputAt(0), d1), - g.UseFixed(node->InputAt(1), d2)) + Emit(opcode, g.DefineAsFixed(node, d1), + g.UseFixed(this->input_at(node, 0), d1), + g.UseFixed(this->input_at(node, 1), d2)) ->MarkAsCall(); - } } template void InstructionSelectorT::VisitFloat64Sqrt(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRR(this, kPPC_SqrtDouble, node); - } } template void InstructionSelectorT::VisitFloat32RoundDown(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRR(this, kPPC_FloorDouble | MiscField::encode(1), node); - } } template void InstructionSelectorT::VisitFloat64RoundDown(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRR(this, kPPC_FloorDouble, node); - } } template void InstructionSelectorT::VisitFloat32RoundUp(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRR(this, kPPC_CeilDouble | MiscField::encode(1), node); - } } template void InstructionSelectorT::VisitFloat64RoundUp(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRR(this, kPPC_CeilDouble, node); - } } template void InstructionSelectorT::VisitFloat32RoundTruncate(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRR(this, kPPC_TruncateDouble | MiscField::encode(1), node); - } } template void InstructionSelectorT::VisitFloat64RoundTruncate(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRR(this, kPPC_TruncateDouble, node); - } } template void InstructionSelectorT::VisitFloat64RoundTiesAway(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRR(this, kPPC_RoundDouble, node); - } } template void InstructionSelectorT::VisitFloat32Neg(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRR(this, kPPC_NegDouble, node); - } } template void InstructionSelectorT::VisitFloat64Neg(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { VisitRR(this, kPPC_NegDouble, node); - } } template void InstructionSelectorT::VisitInt32AddWithOverflow(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { - if (Node* ovf = NodeProperties::FindProjection(node, 1)) { - FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf); - return VisitBinop( - this, node, kPPC_AddWithOverflow32, kInt16Imm, &cont); - } - FlagsContinuation cont; - VisitBinop(this, node, kPPC_AddWithOverflow32, - kInt16Imm, &cont); + node_t ovf = FindProjection(node, 1); + if (this->valid(ovf)) { + FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf); + return VisitBinop(this, node, kPPC_AddWithOverflow32, kInt16Imm, + &cont); } + FlagsContinuation cont; + VisitBinop(this, node, kPPC_AddWithOverflow32, kInt16Imm, &cont); } template void InstructionSelectorT::VisitInt32SubWithOverflow(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { - if (Node* ovf = NodeProperties::FindProjection(node, 1)) { - FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf); - return VisitBinop( - this, node, kPPC_SubWithOverflow32, kInt16Imm_Negate, &cont); - } - FlagsContinuation cont; - VisitBinop(this, node, kPPC_SubWithOverflow32, - kInt16Imm_Negate, &cont); + node_t ovf = FindProjection(node, 1); + if (this->valid(ovf)) { + FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf); + return VisitBinop(this, node, kPPC_SubWithOverflow32, + kInt16Imm_Negate, &cont); } + FlagsContinuation cont; + VisitBinop(this, node, kPPC_SubWithOverflow32, kInt16Imm_Negate, + &cont); } #if V8_TARGET_ARCH_PPC64 template void InstructionSelectorT::VisitInt64AddWithOverflow(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { - if (Node* ovf = NodeProperties::FindProjection(node, 1)) { - FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf); - return VisitBinop(this, node, kPPC_Add64, - kInt16Imm, &cont); - } - FlagsContinuation cont; - VisitBinop(this, node, kPPC_Add64, kInt16Imm, - &cont); + node_t ovf = FindProjection(node, 1); + if (this->valid(ovf)) { + FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf); + return VisitBinop(this, node, kPPC_Add64, kInt16Imm, &cont); } + FlagsContinuation cont; + VisitBinop(this, node, kPPC_Add64, kInt16Imm, &cont); } template void InstructionSelectorT::VisitInt64SubWithOverflow(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { - if (Node* ovf = NodeProperties::FindProjection(node, 1)) { - FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf); - return VisitBinop(this, node, kPPC_Sub, - kInt16Imm_Negate, &cont); - } - FlagsContinuation cont; - VisitBinop(this, node, kPPC_Sub, - kInt16Imm_Negate, &cont); + node_t ovf = FindProjection(node, 1); + if (this->valid(ovf)) { + FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf); + return VisitBinop(this, node, kPPC_Sub, kInt16Imm_Negate, &cont); } + FlagsContinuation cont; + VisitBinop(this, node, kPPC_Sub, kInt16Imm_Negate, &cont); } template void InstructionSelectorT::VisitInt64MulWithOverflow(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { - if (Node* ovf = NodeProperties::FindProjection(node, 1)) { - FlagsContinuation cont = FlagsContinuation::ForSet(kNotEqual, ovf); - return EmitInt64MulWithOverflow(this, node, &cont); - } + node_t ovf = FindProjection(node, 1); + if (this->valid(ovf)) { + FlagsContinuation cont = FlagsContinuation::ForSet(kNotEqual, ovf); + return EmitInt64MulWithOverflow(this, node, &cont); + } FlagsContinuation cont; EmitInt64MulWithOverflow(this, node, &cont); - } } #endif template static bool CompareLogical(FlagsContinuationT* cont) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { switch (cont->condition()) { case kUnsignedLessThan: case kUnsignedGreaterThanOrEqual: @@ -2353,7 +2259,6 @@ static bool CompareLogical(FlagsContinuationT* cont) { return false; } UNREACHABLE(); - } } namespace { @@ -2372,44 +2277,37 @@ void VisitWordCompare(InstructionSelectorT* selector, typename Adapter::node_t node, InstructionCode opcode, FlagsContinuationT* cont, bool commutative, ImmediateMode immediate_mode) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { PPCOperandGeneratorT g(selector); - Node* left = node->InputAt(0); - Node* right = node->InputAt(1); + typename Adapter::node_t lhs = selector->input_at(node, 0); + typename Adapter::node_t rhs = selector->input_at(node, 1); // Match immediates on left or right side of comparison. - if (g.CanBeImmediate(right, immediate_mode)) { - VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right), + if (g.CanBeImmediate(rhs, immediate_mode)) { + VisitCompare(selector, opcode, g.UseRegister(lhs), g.UseImmediate(rhs), cont); - } else if (g.CanBeImmediate(left, immediate_mode)) { + } else if (g.CanBeImmediate(lhs, immediate_mode)) { if (!commutative) cont->Commute(); - VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left), + VisitCompare(selector, opcode, g.UseRegister(rhs), g.UseImmediate(lhs), cont); } else { - VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right), + VisitCompare(selector, opcode, g.UseRegister(lhs), g.UseRegister(rhs), cont); } - } } template void VisitWord32Compare(InstructionSelectorT* selector, typename Adapter::node_t node, FlagsContinuationT* cont) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { ImmediateMode mode = (CompareLogical(cont) ? kInt16Imm_Unsigned : kInt16Imm); VisitWordCompare(selector, node, kPPC_Cmp32, cont, false, mode); - } } #if V8_TARGET_ARCH_PPC64 template -void VisitWord64Compare(InstructionSelectorT* selector, Node* node, +void VisitWord64Compare(InstructionSelectorT* selector, + typename Adapter::node_t node, FlagsContinuationT* cont) { ImmediateMode mode = (CompareLogical(cont) ? kInt16Imm_Unsigned : kInt16Imm); VisitWordCompare(selector, node, kPPC_Cmp64, cont, false, mode); @@ -2421,15 +2319,11 @@ template void VisitFloat32Compare(InstructionSelectorT* selector, typename Adapter::node_t node, FlagsContinuationT* cont) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { PPCOperandGeneratorT g(selector); - Node* left = node->InputAt(0); - Node* right = node->InputAt(1); - VisitCompare(selector, kPPC_CmpDouble, g.UseRegister(left), - g.UseRegister(right), cont); - } + typename Adapter::node_t lhs = selector->input_at(node, 0); + typename Adapter::node_t rhs = selector->input_at(node, 1); + VisitCompare(selector, kPPC_CmpDouble, g.UseRegister(lhs), + g.UseRegister(rhs), cont); } // Shared routine for multiple float64 compare operations. @@ -2437,15 +2331,11 @@ template void VisitFloat64Compare(InstructionSelectorT* selector, typename Adapter::node_t node, FlagsContinuationT* cont) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { PPCOperandGeneratorT g(selector); - Node* left = node->InputAt(0); - Node* right = node->InputAt(1); - VisitCompare(selector, kPPC_CmpDouble, g.UseRegister(left), - g.UseRegister(right), cont); - } + typename Adapter::node_t lhs = selector->input_at(node, 0); + typename Adapter::node_t rhs = selector->input_at(node, 1); + VisitCompare(selector, kPPC_CmpDouble, g.UseRegister(lhs), + g.UseRegister(rhs), cont); } } // namespace @@ -2454,9 +2344,6 @@ void VisitFloat64Compare(InstructionSelectorT* selector, template void InstructionSelectorT::VisitWordCompareZero( node_t user, node_t value, FlagsContinuation* cont) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { // Try to combine with comparisons against 0 by simply inverting the branch. while (value->opcode() == IrOpcode::kWord32Equal && CanCover(user, value)) { Int32BinopMatcher m(value); @@ -2534,24 +2421,24 @@ void InstructionSelectorT::VisitWordCompareZero( switch (node->opcode()) { case IrOpcode::kInt32AddWithOverflow: cont->OverwriteAndNegateIfEqual(kOverflow); - return VisitBinop( - this, node, kPPC_AddWithOverflow32, kInt16Imm, cont); + return VisitBinop(this, node, kPPC_AddWithOverflow32, + kInt16Imm, cont); case IrOpcode::kInt32SubWithOverflow: cont->OverwriteAndNegateIfEqual(kOverflow); - return VisitBinop( - this, node, kPPC_SubWithOverflow32, kInt16Imm_Negate, cont); + return VisitBinop(this, node, kPPC_SubWithOverflow32, + kInt16Imm_Negate, cont); case IrOpcode::kInt32MulWithOverflow: cont->OverwriteAndNegateIfEqual(kNotEqual); return EmitInt32MulWithOverflow(this, node, cont); #if V8_TARGET_ARCH_PPC64 case IrOpcode::kInt64AddWithOverflow: cont->OverwriteAndNegateIfEqual(kOverflow); - return VisitBinop( - this, node, kPPC_Add64, kInt16Imm, cont); + return VisitBinop(this, node, kPPC_Add64, kInt16Imm, + cont); case IrOpcode::kInt64SubWithOverflow: cont->OverwriteAndNegateIfEqual(kOverflow); - return VisitBinop( - this, node, kPPC_Sub, kInt16Imm_Negate, cont); + return VisitBinop(this, node, kPPC_Sub, + kInt16Imm_Negate, cont); case IrOpcode::kInt64MulWithOverflow: cont->OverwriteAndNegateIfEqual(kNotEqual); return EmitInt64MulWithOverflow(this, node, cont); @@ -2604,17 +2491,132 @@ void InstructionSelectorT::VisitWordCompareZero( PPCOperandGeneratorT g(this); VisitCompare(this, kPPC_Cmp32, g.UseRegister(value), g.TempImmediate(0), cont); +} + +template <> +void InstructionSelectorT::VisitWordCompareZero( + node_t user, node_t value, FlagsContinuation* cont) { + using namespace turboshaft; // NOLINT(build/namespaces) + // Try to combine with comparisons against 0 by simply inverting the branch. + while (const ComparisonOp* equal = + this->TryCast(value)) { + if (!CanCover(user, value)) break; + if (!MatchIntegralZero(equal->right())) break; + + user = value; + value = equal->left(); + cont->Negate(); + } + + if (CanCover(user, value)) { + const Operation& value_op = Get(value); + if (const ComparisonOp* comparison = value_op.TryCast()) { + switch (comparison->rep.MapTaggedToWord().value()) { + case RegisterRepresentation::Word32(): + cont->OverwriteAndNegateIfEqual( + GetComparisonFlagCondition(*comparison)); + return VisitWord32Compare(this, value, cont); + case RegisterRepresentation::Word64(): + cont->OverwriteAndNegateIfEqual( + GetComparisonFlagCondition(*comparison)); + return VisitWord64Compare(this, value, cont); + case RegisterRepresentation::Float32(): + switch (comparison->kind) { + case ComparisonOp::Kind::kEqual: + cont->OverwriteAndNegateIfEqual(kEqual); + return VisitFloat32Compare(this, value, cont); + case ComparisonOp::Kind::kSignedLessThan: + cont->OverwriteAndNegateIfEqual(kUnsignedLessThan); + return VisitFloat32Compare(this, value, cont); + case ComparisonOp::Kind::kSignedLessThanOrEqual: + cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); + return VisitFloat32Compare(this, value, cont); + default: + UNREACHABLE(); + } + case RegisterRepresentation::Float64(): + switch (comparison->kind) { + case ComparisonOp::Kind::kEqual: + cont->OverwriteAndNegateIfEqual(kEqual); + return VisitFloat64Compare(this, value, cont); + case ComparisonOp::Kind::kSignedLessThan: + cont->OverwriteAndNegateIfEqual(kUnsignedLessThan); + return VisitFloat64Compare(this, value, cont); + case ComparisonOp::Kind::kSignedLessThanOrEqual: + cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); + return VisitFloat64Compare(this, value, cont); + default: + UNREACHABLE(); + } + default: + break; + } + } else if (const ProjectionOp* projection = + value_op.TryCast()) { + // Check if this is the overflow output projection of an + // WithOverflow node. + if (projection->index == 1u) { + // We cannot combine the WithOverflow with this branch + // unless the 0th projection (the use of the actual value of the + // is either nullptr, which means there's no use of the + // actual value, or was already defined, which means it is scheduled + // *AFTER* this branch). + OpIndex node = projection->input(); + OpIndex result = FindProjection(node, 0); + if (!result.valid() || IsDefined(result)) { + if (const OverflowCheckedBinopOp* binop = + TryCast(node)) { + const bool is64 = binop->rep == WordRepresentation::Word64(); + switch (binop->kind) { + case OverflowCheckedBinopOp::Kind::kSignedAdd: + cont->OverwriteAndNegateIfEqual(kOverflow); + return VisitBinop(this, node, + is64 ? kPPC_Add64 : kPPC_AddWithOverflow32, + kInt16Imm, cont); + case OverflowCheckedBinopOp::Kind::kSignedSub: + cont->OverwriteAndNegateIfEqual(kOverflow); + return VisitBinop(this, node, + is64 ? kPPC_Sub : kPPC_SubWithOverflow32, + kInt16Imm_Negate, cont); + case OverflowCheckedBinopOp::Kind::kSignedMul: + if (is64) { + cont->OverwriteAndNegateIfEqual(kNotEqual); + return EmitInt64MulWithOverflow(this, node, cont); + } else { + cont->OverwriteAndNegateIfEqual(kNotEqual); + return EmitInt32MulWithOverflow(this, node, cont); + } + } + } + } + } + } else if (value_op.Is()) { + return VisitWord32Compare(this, value, cont); + } else if (value_op.Is()) { + return VisitWordCompare(this, value, kPPC_Tst32, cont, true, + kInt16Imm_Unsigned); + } else if (value_op.Is()) { + return VisitWord64Compare(this, value, cont); + } else if (value_op.Is()) { + return VisitWordCompare(this, value, kPPC_Tst64, cont, true, + kInt16Imm_Unsigned); + } else if (value_op.Is()) { + cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition); + return VisitStackPointerGreaterThan(value, cont); + } } + + // Branch could not be combined with a compare, emit compare against 0. + PPCOperandGeneratorT g(this); + VisitCompare(this, kPPC_Cmp32, g.UseRegister(value), g.TempImmediate(0), + cont); } template void InstructionSelectorT::VisitSwitch(node_t node, const SwitchInfo& sw) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { PPCOperandGeneratorT g(this); - InstructionOperand value_operand = g.UseRegister(node->InputAt(0)); + InstructionOperand value_operand = g.UseRegister(this->input_at(node, 0)); // Emit either ArchTableSwitch or ArchBinarySearchSwitch. if (enable_switch_jump_table_ == @@ -2646,26 +2648,23 @@ void InstructionSelectorT::VisitSwitch(node_t node, // Generate a tree of conditional jumps. return EmitBinarySearchSwitch(sw, value_operand); - } } -template -void InstructionSelectorT::VisitWord32Equal(node_t const node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { +template <> +void InstructionSelectorT::VisitWord32Equal( + node_t const node) { FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node); if (isolate() && (V8_STATIC_ROOTS_BOOL || (COMPRESS_POINTERS_BOOL && !isolate()->bootstrapper()))) { - PPCOperandGeneratorT g(this); - const RootsTable& roots_table = isolate()->roots_table(); - RootIndex root_index; - Node* left = nullptr; - Handle right; - // HeapConstants and CompressedHeapConstants can be treated the same when - // using them as an input to a 32-bit comparison. Check whether either is - // present. - { + PPCOperandGeneratorT g(this); + const RootsTable& roots_table = isolate()->roots_table(); + RootIndex root_index; + Node* left = nullptr; + Handle right; + // HeapConstants and CompressedHeapConstants can be treated the same when + // using them as an input to a 32-bit comparison. Check whether either is + // present. + { CompressedHeapObjectBinopMatcher m(node); if (m.right().HasResolvedValue()) { left = m.left().node(); @@ -2677,7 +2676,7 @@ void InstructionSelectorT::VisitWord32Equal(node_t const node) { right = m2.right().ResolvedValue(); } } - } + } if (!right.is_null() && roots_table.IsRootHandle(right, &root_index)) { DCHECK_NE(left, nullptr); if (RootsTable::IsReadOnly(root_index)) { @@ -2686,11 +2685,42 @@ void InstructionSelectorT::VisitWord32Equal(node_t const node) { return VisitCompare(this, kPPC_Cmp32, g.UseRegister(left), g.TempImmediate(ptr), &cont); } - } - } + } + } + } + VisitWord32Compare(this, node, &cont); +} + +template <> +void InstructionSelectorT::VisitWord32Equal( + node_t const node) { + using namespace turboshaft; // NOLINT(build/namespaces) + const Operation& equal = Get(node); + DCHECK(equal.Is()); + OpIndex left = equal.input(0); + FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node); + if (isolate() && (V8_STATIC_ROOTS_BOOL || + (COMPRESS_POINTERS_BOOL && !isolate()->bootstrapper()))) { + PPCOperandGeneratorT g(this); + const RootsTable& roots_table = isolate()->roots_table(); + RootIndex root_index; + Handle right; + // HeapConstants and CompressedHeapConstants can be treated the same when + // using them as an input to a 32-bit comparison. Check whether either is + // present. + if (MatchTaggedConstant(node, &right) && !right.is_null() && + roots_table.IsRootHandle(right, &root_index)) { + if (RootsTable::IsReadOnly(root_index)) { + Tagged_t ptr = + MacroAssemblerBase::ReadOnlyRootPtr(root_index, isolate()); + if (g.CanBeImmediate(ptr, kInt16Imm)) { + return VisitCompare(this, kPPC_Cmp32, g.UseRegister(left), + g.TempImmediate(ptr), &cont); + } + } + } } VisitWord32Compare(this, node, &cont); - } } template @@ -2722,69 +2752,46 @@ void InstructionSelectorT::VisitUint32LessThanOrEqual(node_t node) { #if V8_TARGET_ARCH_PPC64 template void InstructionSelectorT::VisitWord64Equal(node_t const node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node); VisitWord64Compare(this, node, &cont); - } } template void InstructionSelectorT::VisitInt64LessThan(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node); VisitWord64Compare(this, node, &cont); - } } template void InstructionSelectorT::VisitInt64LessThanOrEqual(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThanOrEqual, node); VisitWord64Compare(this, node, &cont); - } } template void InstructionSelectorT::VisitUint64LessThan(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node); VisitWord64Compare(this, node, &cont); - } } template void InstructionSelectorT::VisitUint64LessThanOrEqual(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node); VisitWord64Compare(this, node, &cont); - } } #endif template void InstructionSelectorT::VisitInt32MulWithOverflow(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { - if (Node* ovf = NodeProperties::FindProjection(node, 1)) { + node_t ovf = FindProjection(node, 1); + if (this->valid(ovf)) { FlagsContinuation cont = FlagsContinuation::ForSet(kNotEqual, ovf); return EmitInt32MulWithOverflow(this, node, &cont); } FlagsContinuation cont; EmitInt32MulWithOverflow(this, node, &cont); - } } template @@ -2837,9 +2844,6 @@ template void InstructionSelectorT::EmitPrepareArguments( ZoneVector* arguments, const CallDescriptor* call_descriptor, node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { PPCOperandGeneratorT g(this); // Prepare for C function call. @@ -2851,7 +2855,7 @@ void InstructionSelectorT::EmitPrepareArguments( // Poke any stack arguments. int slot = kStackFrameExtraParamSlot; for (PushParameter input : (*arguments)) { - if (input.node == nullptr) continue; + if (!this->valid(input.node)) continue; Emit(kPPC_StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node), g.TempImmediate(slot)); ++slot; @@ -2862,13 +2866,12 @@ void InstructionSelectorT::EmitPrepareArguments( for (PushParameter input : base::Reversed(*arguments)) { stack_decrement += kSystemPointerSize; // Skip any alignment holes in pushed nodes. - if (input.node == nullptr) continue; + if (!this->valid(input.node)) continue; InstructionOperand decrement = g.UseImmediate(stack_decrement); stack_decrement = 0; Emit(kPPC_Push, g.NoOutput(), decrement, g.UseRegister(input.node)); } } - } } template @@ -2878,24 +2881,16 @@ bool InstructionSelectorT::IsTailCallAddressImmediate() { template void InstructionSelectorT::VisitFloat64ExtractLowWord32(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { PPCOperandGeneratorT g(this); Emit(kPPC_DoubleExtractLowWord32, g.DefineAsRegister(node), - g.UseRegister(node->InputAt(0))); - } + g.UseRegister(this->input_at(node, 0))); } template void InstructionSelectorT::VisitFloat64ExtractHighWord32(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { PPCOperandGeneratorT g(this); Emit(kPPC_DoubleExtractHighWord32, g.DefineAsRegister(node), - g.UseRegister(node->InputAt(0))); - } + g.UseRegister(this->input_at(node, 0))); } template @@ -2940,67 +2935,57 @@ void InstructionSelectorT::VisitFloat64InsertHighWord32(node_t node) { template void InstructionSelectorT::VisitMemoryBarrier(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { PPCOperandGeneratorT g(this); Emit(kPPC_Sync, g.NoOutput()); - } } template void InstructionSelectorT::VisitWord32AtomicLoad(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { - AtomicLoadParameters atomic_load_params = - AtomicLoadParametersOf(node->op()); - LoadRepresentation load_rep = atomic_load_params.representation(); - VisitLoadCommon(this, node, load_rep); - } + OperandGeneratorT g(this); + auto load = this->load_view(node); + LoadRepresentation load_rep = load.loaded_rep(); + VisitLoadCommon(this, node, load_rep); } template void InstructionSelectorT::VisitWord64AtomicLoad(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { - AtomicLoadParameters atomic_load_params = - AtomicLoadParametersOf(node->op()); - LoadRepresentation load_rep = atomic_load_params.representation(); - VisitLoadCommon(this, node, load_rep); - } + OperandGeneratorT g(this); + auto load = this->load_view(node); + LoadRepresentation load_rep = load.loaded_rep(); + VisitLoadCommon(this, node, load_rep); } template void InstructionSelectorT::VisitWord32AtomicStore(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { - AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op()); - VisitStoreCommon(this, node, store_params.store_representation(), - store_params.order()); - } + auto store = this->store_view(node); + AtomicStoreParameters store_params(store.stored_rep().representation(), + store.stored_rep().write_barrier_kind(), + store.memory_order().value(), + store.access_kind()); + VisitStoreCommon(this, node, store_params.store_representation(), + store_params.order()); } template void InstructionSelectorT::VisitWord64AtomicStore(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { - AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op()); - VisitStoreCommon(this, node, store_params.store_representation(), - store_params.order()); - } + auto store = this->store_view(node); + AtomicStoreParameters store_params(store.stored_rep().representation(), + store.stored_rep().write_barrier_kind(), + store.memory_order().value(), + store.access_kind()); + VisitStoreCommon(this, node, store_params.store_representation(), + store_params.order()); } template -void VisitAtomicExchange(InstructionSelectorT* selector, Node* node, - ArchOpcode opcode) { +void VisitAtomicExchange(InstructionSelectorT* selector, + typename Adapter::node_t node, ArchOpcode opcode) { + using node_t = typename Adapter::node_t; PPCOperandGeneratorT g(selector); - Node* base = node->InputAt(0); - Node* index = node->InputAt(1); - Node* value = node->InputAt(2); + auto atomic_op = selector->atomic_rmw_view(node); + node_t base = atomic_op.base(); + node_t index = atomic_op.index(); + node_t value = atomic_op.value(); AddressingMode addressing_mode = kMode_MRR; InstructionOperand inputs[3]; @@ -3014,66 +2999,90 @@ void VisitAtomicExchange(InstructionSelectorT* selector, Node* node, selector->Emit(code, 1, outputs, input_count, inputs); } -template <> -void InstructionSelectorT::VisitWord32AtomicExchange( - node_t node) { - UNIMPLEMENTED(); -} - -template <> -void InstructionSelectorT::VisitWord32AtomicExchange( - Node* node) { +template +void InstructionSelectorT::VisitWord32AtomicExchange(node_t node) { ArchOpcode opcode; - MachineType type = AtomicOpType(node->op()); - if (type == MachineType::Int8()) { - opcode = kAtomicExchangeInt8; - } else if (type == MachineType::Uint8()) { - opcode = kPPC_AtomicExchangeUint8; - } else if (type == MachineType::Int16()) { - opcode = kAtomicExchangeInt16; - } else if (type == MachineType::Uint16()) { - opcode = kPPC_AtomicExchangeUint16; - } else if (type == MachineType::Int32() || type == MachineType::Uint32()) { - opcode = kPPC_AtomicExchangeWord32; + if constexpr (Adapter::IsTurboshaft) { + using namespace turboshaft; // NOLINT(build/namespaces) + const AtomicRMWOp& atomic_op = this->Get(node).template Cast(); + if (atomic_op.input_rep == MemoryRepresentation::Int8()) { + opcode = kAtomicExchangeInt8; + } else if (atomic_op.input_rep == MemoryRepresentation::Uint8()) { + opcode = kPPC_AtomicExchangeUint8; + } else if (atomic_op.input_rep == MemoryRepresentation::Int16()) { + opcode = kAtomicExchangeInt16; + } else if (atomic_op.input_rep == MemoryRepresentation::Uint16()) { + opcode = kPPC_AtomicExchangeUint16; + } else if (atomic_op.input_rep == MemoryRepresentation::Int32() || + atomic_op.input_rep == MemoryRepresentation::Uint32()) { + opcode = kPPC_AtomicExchangeWord32; + } else { + UNREACHABLE(); + } } else { - UNREACHABLE(); + MachineType type = AtomicOpType(node->op()); + if (type == MachineType::Int8()) { + opcode = kAtomicExchangeInt8; + } else if (type == MachineType::Uint8()) { + opcode = kPPC_AtomicExchangeUint8; + } else if (type == MachineType::Int16()) { + opcode = kAtomicExchangeInt16; + } else if (type == MachineType::Uint16()) { + opcode = kPPC_AtomicExchangeUint16; + } else if (type == MachineType::Int32() || type == MachineType::Uint32()) { + opcode = kPPC_AtomicExchangeWord32; + } else { + UNREACHABLE(); + } } VisitAtomicExchange(this, node, opcode); } -template <> -void InstructionSelectorT::VisitWord64AtomicExchange( - node_t node) { - UNIMPLEMENTED(); -} - -template <> -void InstructionSelectorT::VisitWord64AtomicExchange( - Node* node) { +template +void InstructionSelectorT::VisitWord64AtomicExchange(node_t node) { ArchOpcode opcode; - MachineType type = AtomicOpType(node->op()); - if (type == MachineType::Uint8()) { - opcode = kPPC_AtomicExchangeUint8; - } else if (type == MachineType::Uint16()) { - opcode = kPPC_AtomicExchangeUint16; - } else if (type == MachineType::Uint32()) { - opcode = kPPC_AtomicExchangeWord32; - } else if (type == MachineType::Uint64()) { - opcode = kPPC_AtomicExchangeWord64; + if constexpr (Adapter::IsTurboshaft) { + using namespace turboshaft; // NOLINT(build/namespaces) + const AtomicRMWOp& atomic_op = this->Get(node).template Cast(); + if (atomic_op.input_rep == MemoryRepresentation::Uint8()) { + opcode = kPPC_AtomicExchangeUint8; + } else if (atomic_op.input_rep == MemoryRepresentation::Uint16()) { + opcode = kPPC_AtomicExchangeUint16; + } else if (atomic_op.input_rep == MemoryRepresentation::Uint32()) { + opcode = kPPC_AtomicExchangeWord32; + } else if (atomic_op.input_rep == MemoryRepresentation::Uint64()) { + opcode = kPPC_AtomicExchangeWord64; + } else { + UNREACHABLE(); + } } else { - UNREACHABLE(); + MachineType type = AtomicOpType(node->op()); + if (type == MachineType::Uint8()) { + opcode = kPPC_AtomicExchangeUint8; + } else if (type == MachineType::Uint16()) { + opcode = kPPC_AtomicExchangeUint16; + } else if (type == MachineType::Uint32()) { + opcode = kPPC_AtomicExchangeWord32; + } else if (type == MachineType::Uint64()) { + opcode = kPPC_AtomicExchangeWord64; + } else { + UNREACHABLE(); + } } VisitAtomicExchange(this, node, opcode); } template void VisitAtomicCompareExchange(InstructionSelectorT* selector, - Node* node, ArchOpcode opcode) { + typename Adapter::node_t node, + ArchOpcode opcode) { + using node_t = typename Adapter::node_t; PPCOperandGeneratorT g(selector); - Node* base = node->InputAt(0); - Node* index = node->InputAt(1); - Node* old_value = node->InputAt(2); - Node* new_value = node->InputAt(3); + auto atomic_op = selector->atomic_rmw_view(node); + node_t base = atomic_op.base(); + node_t index = atomic_op.index(); + node_t old_value = atomic_op.expected(); + node_t new_value = atomic_op.value(); AddressingMode addressing_mode = kMode_MRR; InstructionCode code = opcode | AddressingModeField::encode(addressing_mode); @@ -3092,91 +3101,140 @@ void VisitAtomicCompareExchange(InstructionSelectorT* selector, selector->Emit(code, output_count, outputs, input_count, inputs); } -template <> -void InstructionSelectorT::VisitWord32AtomicCompareExchange( +template +void InstructionSelectorT::VisitWord32AtomicCompareExchange( node_t node) { - UNIMPLEMENTED(); -} - -template <> -void InstructionSelectorT::VisitWord32AtomicCompareExchange( - Node* node) { - MachineType type = AtomicOpType(node->op()); + using namespace turboshaft; // NOLINT(build/namespaces) ArchOpcode opcode; - if (type == MachineType::Int8()) { - opcode = kAtomicCompareExchangeInt8; - } else if (type == MachineType::Uint8()) { - opcode = kPPC_AtomicCompareExchangeUint8; - } else if (type == MachineType::Int16()) { - opcode = kAtomicCompareExchangeInt16; - } else if (type == MachineType::Uint16()) { - opcode = kPPC_AtomicCompareExchangeUint16; - } else if (type == MachineType::Int32() || type == MachineType::Uint32()) { - opcode = kPPC_AtomicCompareExchangeWord32; + if constexpr (Adapter::IsTurboshaft) { + const AtomicRMWOp& atomic_op = this->Get(node).template Cast(); + if (atomic_op.input_rep == MemoryRepresentation::Int8()) { + opcode = kAtomicCompareExchangeInt8; + } else if (atomic_op.input_rep == MemoryRepresentation::Uint8()) { + opcode = kPPC_AtomicCompareExchangeUint8; + } else if (atomic_op.input_rep == MemoryRepresentation::Int16()) { + opcode = kAtomicCompareExchangeInt16; + } else if (atomic_op.input_rep == MemoryRepresentation::Uint16()) { + opcode = kPPC_AtomicCompareExchangeUint16; + } else if (atomic_op.input_rep == MemoryRepresentation::Int32() || + atomic_op.input_rep == MemoryRepresentation::Uint32()) { + opcode = kPPC_AtomicCompareExchangeWord32; + } else { + UNREACHABLE(); + } } else { - UNREACHABLE(); + MachineType type = AtomicOpType(node->op()); + if (type == MachineType::Int8()) { + opcode = kAtomicCompareExchangeInt8; + } else if (type == MachineType::Uint8()) { + opcode = kPPC_AtomicCompareExchangeUint8; + } else if (type == MachineType::Int16()) { + opcode = kAtomicCompareExchangeInt16; + } else if (type == MachineType::Uint16()) { + opcode = kPPC_AtomicCompareExchangeUint16; + } else if (type == MachineType::Int32() || type == MachineType::Uint32()) { + opcode = kPPC_AtomicCompareExchangeWord32; + } else { + UNREACHABLE(); + } } VisitAtomicCompareExchange(this, node, opcode); } -template <> -void InstructionSelectorT::VisitWord64AtomicCompareExchange( +template +void InstructionSelectorT::VisitWord64AtomicCompareExchange( node_t node) { - UNIMPLEMENTED(); -} - -template <> -void InstructionSelectorT::VisitWord64AtomicCompareExchange( - Node* node) { - MachineType type = AtomicOpType(node->op()); ArchOpcode opcode; - if (type == MachineType::Uint8()) { - opcode = kPPC_AtomicCompareExchangeUint8; - } else if (type == MachineType::Uint16()) { - opcode = kPPC_AtomicCompareExchangeUint16; - } else if (type == MachineType::Uint32()) { - opcode = kPPC_AtomicCompareExchangeWord32; - } else if (type == MachineType::Uint64()) { - opcode = kPPC_AtomicCompareExchangeWord64; + if constexpr (Adapter::IsTurboshaft) { + using namespace turboshaft; // NOLINT(build/namespaces) + const AtomicRMWOp& atomic_op = this->Get(node).template Cast(); + if (atomic_op.input_rep == MemoryRepresentation::Uint8()) { + opcode = kPPC_AtomicCompareExchangeUint8; + } else if (atomic_op.input_rep == MemoryRepresentation::Uint16()) { + opcode = kPPC_AtomicCompareExchangeUint16; + } else if (atomic_op.input_rep == MemoryRepresentation::Uint32()) { + opcode = kPPC_AtomicCompareExchangeWord32; + } else if (atomic_op.input_rep == MemoryRepresentation::Uint64()) { + opcode = kPPC_AtomicCompareExchangeWord64; + } else { + UNREACHABLE(); + } } else { - UNREACHABLE(); + MachineType type = AtomicOpType(node->op()); + if (type == MachineType::Uint8()) { + opcode = kPPC_AtomicCompareExchangeUint8; + } else if (type == MachineType::Uint16()) { + opcode = kPPC_AtomicCompareExchangeUint16; + } else if (type == MachineType::Uint32()) { + opcode = kPPC_AtomicCompareExchangeWord32; + } else if (type == MachineType::Uint64()) { + opcode = kPPC_AtomicCompareExchangeWord64; + } else { + UNREACHABLE(); + } } VisitAtomicCompareExchange(this, node, opcode); } template void VisitAtomicBinaryOperation(InstructionSelectorT* selector, - Node* node, ArchOpcode int8_op, - ArchOpcode uint8_op, ArchOpcode int16_op, - ArchOpcode uint16_op, ArchOpcode int32_op, - ArchOpcode uint32_op, ArchOpcode int64_op, - ArchOpcode uint64_op) { + typename Adapter::node_t node, + ArchOpcode int8_op, ArchOpcode uint8_op, + ArchOpcode int16_op, ArchOpcode uint16_op, + ArchOpcode int32_op, ArchOpcode uint32_op, + ArchOpcode int64_op, ArchOpcode uint64_op) { + using node_t = typename Adapter::node_t; PPCOperandGeneratorT g(selector); - Node* base = node->InputAt(0); - Node* index = node->InputAt(1); - Node* value = node->InputAt(2); - MachineType type = AtomicOpType(node->op()); + auto atomic_op = selector->atomic_rmw_view(node); + node_t base = atomic_op.base(); + node_t index = atomic_op.index(); + node_t value = atomic_op.value(); ArchOpcode opcode; - - if (type == MachineType::Int8()) { - opcode = int8_op; - } else if (type == MachineType::Uint8()) { - opcode = uint8_op; - } else if (type == MachineType::Int16()) { - opcode = int16_op; - } else if (type == MachineType::Uint16()) { - opcode = uint16_op; - } else if (type == MachineType::Int32()) { - opcode = int32_op; - } else if (type == MachineType::Uint32()) { - opcode = uint32_op; - } else if (type == MachineType::Int64()) { - opcode = int64_op; - } else if (type == MachineType::Uint64()) { - opcode = uint64_op; + if constexpr (Adapter::IsTurboshaft) { + using namespace turboshaft; // NOLINT(build/namespaces) + const AtomicRMWOp& atomic_op = + selector->Get(node).template Cast(); + if (atomic_op.input_rep == MemoryRepresentation::Int8()) { + opcode = int8_op; + } else if (atomic_op.input_rep == MemoryRepresentation::Uint8()) { + opcode = uint8_op; + } else if (atomic_op.input_rep == MemoryRepresentation::Int16()) { + opcode = int16_op; + } else if (atomic_op.input_rep == MemoryRepresentation::Uint16()) { + opcode = uint16_op; + } else if (atomic_op.input_rep == MemoryRepresentation::Int32()) { + opcode = int32_op; + } else if (atomic_op.input_rep == MemoryRepresentation::Uint32()) { + opcode = uint32_op; + } else if (atomic_op.input_rep == MemoryRepresentation::Int64()) { + opcode = int64_op; + } else if (atomic_op.input_rep == MemoryRepresentation::Uint64()) { + opcode = uint64_op; + } else { + UNREACHABLE(); + } } else { - UNREACHABLE(); + MachineType type = AtomicOpType(node->op()); + if (type == MachineType::Int8()) { + opcode = int8_op; + } else if (type == MachineType::Uint8()) { + opcode = uint8_op; + } else if (type == MachineType::Int16()) { + opcode = int16_op; + } else if (type == MachineType::Uint16()) { + opcode = uint16_op; + } else if (type == MachineType::Int32()) { + opcode = int32_op; + } else if (type == MachineType::Uint32()) { + opcode = uint32_op; + } else if (type == MachineType::Int64()) { + opcode = int64_op; + } else if (type == MachineType::Uint64()) { + opcode = uint64_op; + } else { + UNREACHABLE(); + } } AddressingMode addressing_mode = kMode_MRR; @@ -3214,27 +3272,19 @@ void InstructionSelectorT::VisitWord64AtomicBinaryOperation( #define VISIT_ATOMIC_BINOP(op) \ template \ void InstructionSelectorT::VisitWord32Atomic##op(node_t node) { \ - if constexpr (Adapter::IsTurboshaft) { \ - UNIMPLEMENTED(); \ - } else { \ VisitAtomicBinaryOperation( \ this, node, kPPC_Atomic##op##Int8, kPPC_Atomic##op##Uint8, \ kPPC_Atomic##op##Int16, kPPC_Atomic##op##Uint16, \ kPPC_Atomic##op##Int32, kPPC_Atomic##op##Uint32, \ kPPC_Atomic##op##Int64, kPPC_Atomic##op##Uint64); \ - } \ } \ template \ void InstructionSelectorT::VisitWord64Atomic##op(node_t node) { \ - if constexpr (Adapter::IsTurboshaft) { \ - UNIMPLEMENTED(); \ - } else { \ VisitAtomicBinaryOperation( \ this, node, kPPC_Atomic##op##Int8, kPPC_Atomic##op##Uint8, \ kPPC_Atomic##op##Int16, kPPC_Atomic##op##Uint16, \ kPPC_Atomic##op##Int32, kPPC_Atomic##op##Uint32, \ kPPC_Atomic##op##Int64, kPPC_Atomic##op##Uint64); \ - } \ } VISIT_ATOMIC_BINOP(Add) VISIT_ATOMIC_BINOP(Sub) @@ -3442,19 +3492,22 @@ void InstructionSelectorT::VisitInt64AbsWithOverflow(node_t node) { V(S128Not) \ V(V128AnyTrue) -#define SIMD_VISIT_EXTRACT_LANE(Type, T, Sign, LaneSize) \ - template \ - void InstructionSelectorT::Visit##Type##ExtractLane##Sign( \ - node_t node) { \ - if constexpr (Adapter::IsTurboshaft) { \ - UNIMPLEMENTED(); \ - } else { \ - PPCOperandGeneratorT g(this); \ - int32_t lane = OpParameter(node->op()); \ - Emit(kPPC_##T##ExtractLane##Sign | LaneSizeField::encode(LaneSize), \ - g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)), \ - g.UseImmediate(lane)); \ - } \ +#define SIMD_VISIT_EXTRACT_LANE(Type, T, Sign, LaneSize) \ + template \ + void InstructionSelectorT::Visit##Type##ExtractLane##Sign( \ + node_t node) { \ + PPCOperandGeneratorT g(this); \ + int32_t lane; \ + if constexpr (Adapter::IsTurboshaft) { \ + using namespace turboshaft; /* NOLINT(build/namespaces) */ \ + const Operation& op = this->Get(node); \ + lane = op.template Cast().lane; \ + } else { \ + lane = OpParameter(node->op()); \ + } \ + Emit(kPPC_##T##ExtractLane##Sign | LaneSizeField::encode(LaneSize), \ + g.DefineAsRegister(node), g.UseRegister(this->input_at(node, 0)), \ + g.UseImmediate(lane)); \ } SIMD_VISIT_EXTRACT_LANE(F64x2, F, , 64) SIMD_VISIT_EXTRACT_LANE(F32x4, F, , 32) @@ -3469,15 +3522,18 @@ SIMD_VISIT_EXTRACT_LANE(I8x16, I, S, 8) #define SIMD_VISIT_REPLACE_LANE(Type, T, LaneSize) \ template \ void InstructionSelectorT::Visit##Type##ReplaceLane(node_t node) { \ + PPCOperandGeneratorT g(this); \ + int32_t lane; \ if constexpr (Adapter::IsTurboshaft) { \ - UNIMPLEMENTED(); \ + using namespace turboshaft; /* NOLINT(build/namespaces) */ \ + const Operation& op = this->Get(node); \ + lane = op.template Cast().lane; \ } else { \ - PPCOperandGeneratorT g(this); \ - int32_t lane = OpParameter(node->op()); \ - Emit(kPPC_##T##ReplaceLane | LaneSizeField::encode(LaneSize), \ - g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)), \ - g.UseImmediate(lane), g.UseRegister(node->InputAt(1))); \ + lane = OpParameter(node->op()); \ } \ + Emit(kPPC_##T##ReplaceLane | LaneSizeField::encode(LaneSize), \ + g.DefineSameAsFirst(node), g.UseRegister(this->input_at(node, 0)), \ + g.UseImmediate(lane), g.UseRegister(this->input_at(node, 1))); \ } SIMD_VISIT_REPLACE_LANE(F64x2, F, 64) SIMD_VISIT_REPLACE_LANE(F32x4, F, 32) @@ -3487,18 +3543,14 @@ SIMD_VISIT_REPLACE_LANE(I16x8, I, 16) SIMD_VISIT_REPLACE_LANE(I8x16, I, 8) #undef SIMD_VISIT_REPLACE_LANE -#define SIMD_VISIT_BINOP(Opcode) \ - template \ - void InstructionSelectorT::Visit##Opcode(node_t node) { \ - if constexpr (Adapter::IsTurboshaft) { \ - UNIMPLEMENTED(); \ - } else { \ - PPCOperandGeneratorT g(this); \ - InstructionOperand temps[] = {g.TempRegister()}; \ - Emit(kPPC_##Opcode, g.DefineAsRegister(node), \ - g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), \ - arraysize(temps), temps); \ - } \ +#define SIMD_VISIT_BINOP(Opcode) \ + template \ + void InstructionSelectorT::Visit##Opcode(node_t node) { \ + PPCOperandGeneratorT g(this); \ + InstructionOperand temps[] = {g.TempRegister()}; \ + Emit(kPPC_##Opcode, g.DefineAsRegister(node), \ + g.UseRegister(this->input_at(node, 0)), \ + g.UseRegister(this->input_at(node, 1)), arraysize(temps), temps); \ } SIMD_BINOP_LIST(SIMD_VISIT_BINOP) #undef SIMD_VISIT_BINOP @@ -3507,29 +3559,22 @@ SIMD_BINOP_LIST(SIMD_VISIT_BINOP) #define SIMD_VISIT_UNOP(Opcode) \ template \ void InstructionSelectorT::Visit##Opcode(node_t node) { \ - if constexpr (Adapter::IsTurboshaft) { \ - UNIMPLEMENTED(); \ - } else { \ - PPCOperandGeneratorT g(this); \ - Emit(kPPC_##Opcode, g.DefineAsRegister(node), \ - g.UseRegister(node->InputAt(0))); \ - } \ + PPCOperandGeneratorT g(this); \ + Emit(kPPC_##Opcode, g.DefineAsRegister(node), \ + g.UseRegister(this->input_at(node, 0))); \ } SIMD_UNOP_LIST(SIMD_VISIT_UNOP) #undef SIMD_VISIT_UNOP #undef SIMD_UNOP_LIST -#define SIMD_VISIT_QFMOP(Opcode) \ - template \ - void InstructionSelectorT::Visit##Opcode(node_t node) { \ - if constexpr (Adapter::IsTurboshaft) { \ - UNIMPLEMENTED(); \ - } else { \ - PPCOperandGeneratorT g(this); \ - Emit(kPPC_##Opcode, g.DefineSameAsFirst(node), \ - g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), \ - g.UseRegister(node->InputAt(2))); \ - } \ +#define SIMD_VISIT_QFMOP(Opcode) \ + template \ + void InstructionSelectorT::Visit##Opcode(node_t node) { \ + PPCOperandGeneratorT g(this); \ + Emit(kPPC_##Opcode, g.DefineSameAsFirst(node), \ + g.UseRegister(this->input_at(node, 0)), \ + g.UseRegister(this->input_at(node, 1)), \ + g.UseRegister(this->input_at(node, 2))); \ } SIMD_VISIT_QFMOP(F64x2Qfma) SIMD_VISIT_QFMOP(F64x2Qfms) @@ -3565,9 +3610,6 @@ SIMD_RELAXED_OP_LIST(SIMD_VISIT_RELAXED_OP) #if V8_ENABLE_WEBASSEMBLY template void InstructionSelectorT::VisitI8x16Shuffle(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { uint8_t shuffle[kSimd128Size]; bool is_swizzle; // TODO(nicohartmann@): Properly use view here once Turboshaft support is @@ -3575,8 +3617,8 @@ void InstructionSelectorT::VisitI8x16Shuffle(node_t node) { auto view = this->simd_shuffle_view(node); CanonicalizeShuffle(view, shuffle, &is_swizzle); PPCOperandGeneratorT g(this); - Node* input0 = node->InputAt(0); - Node* input1 = node->InputAt(1); + node_t input0 = view.input(0); + node_t input1 = view.input(1); // Remap the shuffle indices to match IBM lane numbering. int max_index = 15; int total_lane_count = 2 * kSimd128Size; @@ -3594,24 +3636,16 @@ void InstructionSelectorT::VisitI8x16Shuffle(node_t node) { g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle_remapped + 4)), g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle_remapped + 8)), g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle_remapped + 12))); - } } -template <> -void InstructionSelectorT::VisitSetStackPointer(Node* node) { +template +void InstructionSelectorT::VisitSetStackPointer(node_t node) { OperandGenerator g(this); // TODO(miladfarca): Optimize by using UseAny. - auto input = g.UseRegister(node->InputAt(0)); + auto input = g.UseRegister(this->input_at(node, 0)); Emit(kArchSetStackPointer, 0, nullptr, 1, &input); } -template <> -void InstructionSelectorT::VisitSetStackPointer( - node_t node) { - // TODO(miladfarca): Implement. - UNREACHABLE(); -} - #else template void InstructionSelectorT::VisitI8x16Shuffle(node_t node) { @@ -3621,24 +3655,17 @@ void InstructionSelectorT::VisitI8x16Shuffle(node_t node) { template void InstructionSelectorT::VisitS128Zero(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { PPCOperandGeneratorT g(this); Emit(kPPC_S128Zero, g.DefineAsRegister(node)); - } } template void InstructionSelectorT::VisitS128Select(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { PPCOperandGeneratorT g(this); Emit(kPPC_S128Select, g.DefineAsRegister(node), - g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), - g.UseRegister(node->InputAt(2))); - } + g.UseRegister(this->input_at(node, 0)), + g.UseRegister(this->input_at(node, 1)), + g.UseRegister(this->input_at(node, 2))); } // This is a replica of SimdShuffle::Pack4Lanes. However, above function will @@ -3655,12 +3682,15 @@ static int32_t Pack4Lanes(const uint8_t* shuffle) { template void InstructionSelectorT::VisitS128Const(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { PPCOperandGeneratorT g(this); uint32_t val[kSimd128Size / sizeof(uint32_t)]; - memcpy(val, S128ImmediateParameterOf(node->op()).data(), kSimd128Size); + if constexpr (Adapter::IsTurboshaft) { + const turboshaft::Simd128ConstantOp& constant = + this->Get(node).template Cast(); + memcpy(val, constant.value, kSimd128Size); + } else { + memcpy(val, S128ImmediateParameterOf(node->op()).data(), kSimd128Size); + } // If all bytes are zeros, avoid emitting code for generic constants. bool all_zeros = !(val[0] || val[1] || val[2] || val[3]); bool all_ones = val[0] == UINT32_MAX && val[1] == UINT32_MAX && @@ -3680,47 +3710,35 @@ void InstructionSelectorT::VisitS128Const(node_t node) { g.UseImmediate(Pack4Lanes(reinterpret_cast(&val[0]) + 8)), g.UseImmediate(Pack4Lanes(reinterpret_cast(&val[0]) + 12))); } - } } template void InstructionSelectorT::VisitI16x8DotI8x16I7x16S(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { PPCOperandGeneratorT g(this); Emit(kPPC_I16x8DotI8x16S, g.DefineAsRegister(node), - g.UseUniqueRegister(node->InputAt(0)), - g.UseUniqueRegister(node->InputAt(1))); - } + g.UseUniqueRegister(this->input_at(node, 0)), + g.UseUniqueRegister(this->input_at(node, 1))); } template void InstructionSelectorT::VisitI32x4DotI8x16I7x16AddS(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { PPCOperandGeneratorT g(this); Emit(kPPC_I32x4DotI8x16AddS, g.DefineAsRegister(node), - g.UseUniqueRegister(node->InputAt(0)), - g.UseUniqueRegister(node->InputAt(1)), - g.UseUniqueRegister(node->InputAt(2))); - } + g.UseUniqueRegister(this->input_at(node, 0)), + g.UseUniqueRegister(this->input_at(node, 1)), + g.UseUniqueRegister(this->input_at(node, 2))); } template void InstructionSelectorT::EmitPrepareResults( ZoneVector* results, const CallDescriptor* call_descriptor, node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { PPCOperandGeneratorT g(this); for (PushParameter output : *results) { if (!output.location.IsCallerFrameSlot()) continue; // Skip any alignment holes in nodes. - if (output.node != nullptr) { + if (this->valid(output.node)) { DCHECK(!call_descriptor->IsCFunctionCall()); if (output.location.GetType() == MachineType::Float32()) { MarkAsFloat32(output.node); @@ -3734,17 +3752,40 @@ void InstructionSelectorT::EmitPrepareResults( Emit(kPPC_Peek, g.DefineAsRegister(output.node), g.UseImmediate(reverse_slot)); } - } } } template void InstructionSelectorT::VisitLoadLane(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); + PPCOperandGeneratorT g(this); + InstructionCode opcode = kArchNop; + int32_t lane; + if constexpr (Adapter::IsTurboshaft) { + using namespace turboshaft; // NOLINT(build/namespaces) + const Simd128LaneMemoryOp& load = + this->Get(node).template Cast(); + lane = load.lane; + switch (load.lane_kind) { + case Simd128LaneMemoryOp::LaneKind::k8: + opcode = kPPC_S128Load8Lane; + break; + case Simd128LaneMemoryOp::LaneKind::k16: + opcode = kPPC_S128Load16Lane; + break; + case Simd128LaneMemoryOp::LaneKind::k32: + opcode = kPPC_S128Load32Lane; + break; + case Simd128LaneMemoryOp::LaneKind::k64: + opcode = kPPC_S128Load64Lane; + break; + } + Emit(opcode | AddressingModeField::encode(kMode_MRR), + g.DefineSameAsFirst(node), g.UseRegister(load.value()), + g.UseRegister(load.base()), g.UseRegister(load.index()), + g.UseImmediate(lane)); } else { LoadLaneParameters params = LoadLaneParametersOf(node->op()); - InstructionCode opcode = kArchNop; + lane = params.laneidx; if (params.rep == MachineType::Int8()) { opcode = kPPC_S128Load8Lane; } else if (params.rep == MachineType::Int16()) { @@ -3756,8 +3797,6 @@ void InstructionSelectorT::VisitLoadLane(node_t node) { } else { UNREACHABLE(); } - - PPCOperandGeneratorT g(this); Emit(opcode | AddressingModeField::encode(kMode_MRR), g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(2)), g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), @@ -3767,15 +3806,63 @@ void InstructionSelectorT::VisitLoadLane(node_t node) { template void InstructionSelectorT::VisitLoadTransform(node_t node) { + PPCOperandGeneratorT g(this); + ArchOpcode opcode; if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); + using namespace turboshaft; // NOLINT(build/namespaces) + const Simd128LoadTransformOp& op = + this->Get(node).template Cast(); + node_t base = op.base(); + node_t index = op.index(); + + switch (op.transform_kind) { + case Simd128LoadTransformOp::TransformKind::k8Splat: + opcode = kPPC_S128Load8Splat; + break; + case Simd128LoadTransformOp::TransformKind::k16Splat: + opcode = kPPC_S128Load16Splat; + break; + case Simd128LoadTransformOp::TransformKind::k32Splat: + opcode = kPPC_S128Load32Splat; + break; + case Simd128LoadTransformOp::TransformKind::k64Splat: + opcode = kPPC_S128Load64Splat; + break; + case Simd128LoadTransformOp::TransformKind::k8x8S: + opcode = kPPC_S128Load8x8S; + break; + case Simd128LoadTransformOp::TransformKind::k8x8U: + opcode = kPPC_S128Load8x8U; + break; + case Simd128LoadTransformOp::TransformKind::k16x4S: + opcode = kPPC_S128Load16x4S; + break; + case Simd128LoadTransformOp::TransformKind::k16x4U: + opcode = kPPC_S128Load16x4U; + break; + case Simd128LoadTransformOp::TransformKind::k32x2S: + opcode = kPPC_S128Load32x2S; + break; + case Simd128LoadTransformOp::TransformKind::k32x2U: + opcode = kPPC_S128Load32x2U; + break; + case Simd128LoadTransformOp::TransformKind::k32Zero: + opcode = kPPC_S128Load32Zero; + break; + case Simd128LoadTransformOp::TransformKind::k64Zero: + opcode = kPPC_S128Load64Zero; + break; + default: + UNIMPLEMENTED(); + } + Emit(opcode | AddressingModeField::encode(kMode_MRR), + g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index)); } else { LoadTransformParameters params = LoadTransformParametersOf(node->op()); PPCOperandGeneratorT g(this); Node* base = node->InputAt(0); Node* index = node->InputAt(1); - ArchOpcode opcode; switch (params.transformation) { case LoadTransformation::kS128Load8Splat: opcode = kPPC_S128Load8Splat; @@ -3823,34 +3910,53 @@ void InstructionSelectorT::VisitLoadTransform(node_t node) { template void InstructionSelectorT::VisitStoreLane(node_t node) { - if constexpr (Adapter::IsTurboshaft) { - UNIMPLEMENTED(); - } else { PPCOperandGeneratorT g(this); - - StoreLaneParameters params = StoreLaneParametersOf(node->op()); InstructionCode opcode = kArchNop; - if (params.rep == MachineRepresentation::kWord8) { - opcode = kPPC_S128Store8Lane; - } else if (params.rep == MachineRepresentation::kWord16) { - opcode = kPPC_S128Store16Lane; - } else if (params.rep == MachineRepresentation::kWord32) { - opcode = kPPC_S128Store32Lane; - } else if (params.rep == MachineRepresentation::kWord64) { - opcode = kPPC_S128Store64Lane; + InstructionOperand inputs[4]; + if constexpr (Adapter::IsTurboshaft) { + using namespace turboshaft; // NOLINT(build/namespaces) + const Simd128LaneMemoryOp& store = + this->Get(node).template Cast(); + switch (store.lane_kind) { + case Simd128LaneMemoryOp::LaneKind::k8: + opcode = kPPC_S128Store8Lane; + break; + case Simd128LaneMemoryOp::LaneKind::k16: + opcode = kPPC_S128Store16Lane; + break; + case Simd128LaneMemoryOp::LaneKind::k32: + opcode = kPPC_S128Store32Lane; + break; + case Simd128LaneMemoryOp::LaneKind::k64: + opcode = kPPC_S128Store64Lane; + break; + } + + inputs[0] = g.UseRegister(store.value()); + inputs[1] = g.UseRegister(store.base()); + inputs[2] = g.UseRegister(store.index()); + inputs[3] = g.UseImmediate(store.lane); } else { - UNREACHABLE(); - } + StoreLaneParameters params = StoreLaneParametersOf(node->op()); + if (params.rep == MachineRepresentation::kWord8) { + opcode = kPPC_S128Store8Lane; + } else if (params.rep == MachineRepresentation::kWord16) { + opcode = kPPC_S128Store16Lane; + } else if (params.rep == MachineRepresentation::kWord32) { + opcode = kPPC_S128Store32Lane; + } else if (params.rep == MachineRepresentation::kWord64) { + opcode = kPPC_S128Store64Lane; + } else { + UNREACHABLE(); + } - InstructionOperand inputs[4]; - InstructionOperand value_operand = g.UseRegister(node->InputAt(2)); - inputs[0] = value_operand; - inputs[1] = g.UseRegister(node->InputAt(0)); - inputs[2] = g.UseRegister(node->InputAt(1)); - inputs[3] = g.UseImmediate(params.laneidx); + inputs[0] = g.UseRegister(node->InputAt(2)); + inputs[1] = g.UseRegister(node->InputAt(0)); + inputs[2] = g.UseRegister(node->InputAt(1)); + inputs[3] = g.UseImmediate(params.laneidx); + } Emit(opcode | AddressingModeField::encode(kMode_MRR), 0, nullptr, 4, inputs); - } } template @@ -3879,19 +3985,6 @@ void InstructionSelectorT::VisitF32x4NearestInt(node_t node) { UNREACHABLE(); } -template <> -Node* InstructionSelectorT::FindProjection( - Node* node, size_t projection_index) { - return NodeProperties::FindProjection(node, projection_index); -} - -template <> -TurboshaftAdapter::node_t -InstructionSelectorT::FindProjection( - node_t node, size_t projection_index) { - UNIMPLEMENTED(); -} - MachineOperatorBuilder::Flags InstructionSelector::SupportedMachineOperatorFlags() { return MachineOperatorBuilder::kFloat32RoundDown | diff --git a/deps/v8/src/compiler/backend/register-allocator.cc b/deps/v8/src/compiler/backend/register-allocator.cc index f94a6fe5e03916..cf8c6637340cee 100644 --- a/deps/v8/src/compiler/backend/register-allocator.cc +++ b/deps/v8/src/compiler/backend/register-allocator.cc @@ -585,6 +585,12 @@ bool LiveRange::Covers(LifetimePosition position) { } ++interval; } + if (!covers && interval > intervals_.begin()) { + // To ensure that we advance {current_interval_} below, move back to the + // last interval starting before position. + interval--; + DCHECK_LE(interval->start(), position); + } AdvanceLastProcessedMarker(interval, position); return covers; } diff --git a/deps/v8/src/compiler/backend/riscv/code-generator-riscv.cc b/deps/v8/src/compiler/backend/riscv/code-generator-riscv.cc index 8965624fe33f02..d3cc06e524e9fa 100644 --- a/deps/v8/src/compiler/backend/riscv/code-generator-riscv.cc +++ b/deps/v8/src/compiler/backend/riscv/code-generator-riscv.cc @@ -641,8 +641,8 @@ void CodeGenerator::AssembleCodeStartRegisterCheck() { // 3. if it is not zero then it jumps to the builtin. void CodeGenerator::BailoutIfDeoptimized() { int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize; - __ LoadTaggedField(kScratchReg, - MemOperand(kJavaScriptCallCodeStartRegister, offset)); + __ LoadProtectedPointerField( + kScratchReg, MemOperand(kJavaScriptCallCodeStartRegister, offset)); __ Lw(kScratchReg, FieldMemOperand(kScratchReg, Code::kFlagsOffset)); __ And(kScratchReg, kScratchReg, Operand(1 << Code::kMarkedForDeoptimizationBit)); @@ -789,6 +789,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( int const num_gp_parameters = ParamField::decode(instr->opcode()); int const num_fp_parameters = FPParamField::decode(instr->opcode()); Label after_call; + SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes; +#if V8_ENABLE_WEBASSEMBLY bool isWasmCapiFunction = linkage()->GetIncomingDescriptor()->IsWasmCapiFunction(); if (isWasmCapiFunction) { @@ -796,18 +798,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ LoadAddress(kScratchReg, &after_call, RelocInfo::EXTERNAL_REFERENCE); __ StoreWord(kScratchReg, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset)); + set_isolate_data_slots = SetIsolateDataSlots::kNo; } +#endif // V8_ENABLE_WEBASSEMBLY if (instr->InputAt(0)->IsImmediate()) { ExternalReference ref = i.InputExternalReference(0); - __ CallCFunction(ref, num_gp_parameters, num_fp_parameters); + __ CallCFunction(ref, num_gp_parameters, num_fp_parameters, + set_isolate_data_slots); } else { Register func = i.InputOrZeroRegister(0); - __ CallCFunction(func, num_gp_parameters, num_fp_parameters); + __ CallCFunction(func, num_gp_parameters, num_fp_parameters, + set_isolate_data_slots); } __ bind(&after_call); +#if V8_ENABLE_WEBASSEMBLY if (isWasmCapiFunction) { RecordSafepoint(instr->reference_map()); } +#endif // V8_ENABLE_WEBASSEMBLY frame_access_state()->SetFrameAccessToDefault(); // Ideally, we should decrement SP delta to match the change of stack diff --git a/deps/v8/src/compiler/backend/riscv/instruction-selector-riscv32.cc b/deps/v8/src/compiler/backend/riscv/instruction-selector-riscv32.cc index 69b1cca7d0a5d5..3aab14d20f4e7e 100644 --- a/deps/v8/src/compiler/backend/riscv/instruction-selector-riscv32.cc +++ b/deps/v8/src/compiler/backend/riscv/instruction-selector-riscv32.cc @@ -414,7 +414,7 @@ void InstructionSelectorT::VisitWord32ReverseBytes(node_t node) { } template -void InstructionSelectorT::VisitSimd128ReverseBytes(Node* node) { +void InstructionSelectorT::VisitSimd128ReverseBytes(node_t node) { UNREACHABLE(); } diff --git a/deps/v8/src/compiler/backend/riscv/instruction-selector-riscv64.cc b/deps/v8/src/compiler/backend/riscv/instruction-selector-riscv64.cc index b011533d8a2106..657d6177bad2b2 100644 --- a/deps/v8/src/compiler/backend/riscv/instruction-selector-riscv64.cc +++ b/deps/v8/src/compiler/backend/riscv/instruction-selector-riscv64.cc @@ -842,7 +842,7 @@ void InstructionSelectorT::VisitWord32ReverseBytes(node_t node) { } template -void InstructionSelectorT::VisitSimd128ReverseBytes(Node* node) { +void InstructionSelectorT::VisitSimd128ReverseBytes(node_t node) { UNREACHABLE(); } diff --git a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc index 993fe89ce8e839..f22568ad7d3fab 100644 --- a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc +++ b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc @@ -1298,6 +1298,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArchCallCFunction: { int const num_gp_parameters = ParamField::decode(instr->opcode()); int const num_fp_parameters = FPParamField::decode(instr->opcode()); + SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes; Label return_location; // Put the return address in a stack slot. #if V8_ENABLE_WEBASSEMBLY @@ -1306,14 +1307,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ larl(r0, &return_location); __ StoreU64(r0, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset)); + set_isolate_data_slots = SetIsolateDataSlots::kNo; } #endif // V8_ENABLE_WEBASSEMBLY if (instr->InputAt(0)->IsImmediate()) { ExternalReference ref = i.InputExternalReference(0); - __ CallCFunction(ref, num_gp_parameters, num_fp_parameters); + __ CallCFunction(ref, num_gp_parameters, num_fp_parameters, + set_isolate_data_slots); } else { Register func = i.InputRegister(0); - __ CallCFunction(func, num_gp_parameters, num_fp_parameters); + __ CallCFunction(func, num_gp_parameters, num_fp_parameters, + set_isolate_data_slots); } __ bind(&return_location); #if V8_ENABLE_WEBASSEMBLY diff --git a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc index 781a1283571ad7..feeea3bcb5b0d8 100644 --- a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc +++ b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc @@ -1247,23 +1247,27 @@ void InstructionSelectorT::VisitWord32ReverseBytes(node_t node) { } template -void InstructionSelectorT::VisitSimd128ReverseBytes(Node* node) { - S390OperandGeneratorT g(this); - NodeMatcher input(node->InputAt(0)); - if (CanCover(node, input.node()) && input.IsLoad()) { - LoadRepresentation load_rep = LoadRepresentationOf(input.node()->op()); - if (load_rep.representation() == MachineRepresentation::kSimd128) { - Node* base = input.node()->InputAt(0); - Node* offset = input.node()->InputAt(1); - Emit(kS390_LoadReverseSimd128 | AddressingModeField::encode(kMode_MRR), - // TODO(miladfar): one of the base and offset can be imm. - g.DefineAsRegister(node), g.UseRegister(base), - g.UseRegister(offset)); - return; +void InstructionSelectorT::VisitSimd128ReverseBytes(node_t node) { + if constexpr (Adapter::IsTurboshaft) { + UNIMPLEMENTED(); + } else { + S390OperandGeneratorT g(this); + NodeMatcher input(node->InputAt(0)); + if (CanCover(node, input.node()) && input.IsLoad()) { + LoadRepresentation load_rep = LoadRepresentationOf(input.node()->op()); + if (load_rep.representation() == MachineRepresentation::kSimd128) { + Node* base = input.node()->InputAt(0); + Node* offset = input.node()->InputAt(1); + Emit(kS390_LoadReverseSimd128 | AddressingModeField::encode(kMode_MRR), + // TODO(miladfar): one of the base and offset can be imm. + g.DefineAsRegister(node), g.UseRegister(base), + g.UseRegister(offset)); + return; + } } + Emit(kS390_LoadReverseSimd128RR, g.DefineAsRegister(node), + g.UseRegister(node->InputAt(0))); } - Emit(kS390_LoadReverseSimd128RR, g.DefineAsRegister(node), - g.UseRegister(node->InputAt(0))); } template diff --git a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc index dcb361bcfc6baa..5691a8b6d88c7d 100644 --- a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc +++ b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc @@ -1399,10 +1399,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ Call(code, RelocInfo::CODE_TARGET); } else { Register reg = i.InputRegister(0); + CodeEntrypointTag tag = + i.InputCodeEntrypointTag(instr->CodeEnrypointTagInputIndex()); DCHECK_IMPLIES( instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister), reg == kJavaScriptCallCodeStartRegister); - __ LoadCodeInstructionStart(reg, reg); + __ LoadCodeInstructionStart(reg, reg, tag); __ call(reg); } RecordCallPosition(instr); @@ -1459,10 +1461,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ Jump(code, RelocInfo::CODE_TARGET); } else { Register reg = i.InputRegister(0); + CodeEntrypointTag tag = + i.InputCodeEntrypointTag(instr->CodeEnrypointTagInputIndex()); DCHECK_IMPLIES( instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister), reg == kJavaScriptCallCodeStartRegister); - __ LoadCodeInstructionStart(reg, reg); + __ LoadCodeInstructionStart(reg, reg, tag); __ jmp(reg); } unwinding_info_writer_.MarkBlockWillExit(); @@ -1536,27 +1540,28 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( int const num_gp_parameters = ParamField::decode(instr->opcode()); int const num_fp_parameters = FPParamField::decode(instr->opcode()); Label return_location; + SetIsolateDataSlots set_isolate_data_slots = SetIsolateDataSlots::kYes; #if V8_ENABLE_WEBASSEMBLY if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) { // Put the return address in a stack slot. __ leaq(kScratchRegister, Operand(&return_location, 0)); __ movq(MemOperand(rbp, WasmExitFrameConstants::kCallingPCOffset), kScratchRegister); + set_isolate_data_slots = SetIsolateDataSlots::kNo; } #endif // V8_ENABLE_WEBASSEMBLY + int pc_offset; if (HasImmediateInput(instr, 0)) { ExternalReference ref = i.InputExternalReference(0); - __ CallCFunction(ref, num_gp_parameters + num_fp_parameters); + pc_offset = __ CallCFunction(ref, num_gp_parameters + num_fp_parameters, + set_isolate_data_slots, &return_location); } else { Register func = i.InputRegister(0); - __ CallCFunction(func, num_gp_parameters + num_fp_parameters); + pc_offset = + __ CallCFunction(func, num_gp_parameters + num_fp_parameters, + set_isolate_data_slots, &return_location); } - __ bind(&return_location); -#if V8_ENABLE_WEBASSEMBLY - if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) { - RecordSafepoint(instr->reference_map()); - } -#endif // V8_ENABLE_WEBASSEMBLY + RecordSafepoint(instr->reference_map(), pc_offset); frame_access_state()->SetFrameAccessToDefault(); // Ideally, we should decrement SP delta to match the change of stack // pointer in CallCFunction. However, for certain architectures (e.g. @@ -7146,6 +7151,12 @@ void CodeGenerator::AssembleConstructFrame() { if (frame()->GetReturnSlotCount() > 0) { __ AllocateStackSpace(frame()->GetReturnSlotCount() * kSystemPointerSize); } + + for (int spill_slot : frame()->tagged_slots()) { + FrameOffset offset = frame_access_state()->GetFrameOffset(spill_slot); + DCHECK(offset.from_frame_pointer()); + __ movq(Operand(rbp, offset.offset()), Immediate(0)); + } } void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) { diff --git a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc index 7e592318b265c6..09b57b4ebf5de9 100644 --- a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc +++ b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc @@ -23,6 +23,7 @@ #include "src/compiler/node-matchers.h" #include "src/compiler/node-properties.h" #include "src/compiler/opcodes.h" +#include "src/compiler/turboshaft/load-store-simplification-reducer.h" #include "src/compiler/turboshaft/operations.h" #include "src/compiler/turboshaft/opmasks.h" #include "src/compiler/turboshaft/representations.h" @@ -487,7 +488,6 @@ class X64OperandGeneratorT final : public OperandGeneratorT { return false; } - DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(int32_t, GetImmediateIntegerValue) int32_t GetImmediateIntegerValue(node_t node) { DCHECK(CanBeImmediate(node)); auto constant = this->constant_view(node); @@ -499,7 +499,6 @@ class X64OperandGeneratorT final : public OperandGeneratorT { return static_cast(constant.number_value()); } - DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(bool, CanBeMemoryOperand) bool CanBeMemoryOperand(InstructionCode opcode, node_t node, node_t input, int effect_level) { if (!this->IsLoadOrLoadImmutable(input)) return false; @@ -558,8 +557,13 @@ class X64OperandGeneratorT final : public OperandGeneratorT { bool ValueFitsIntoImmediate(int64_t value) const { // int32_t min will overflow if displacement mode is kNegativeDisplacement. - return std::numeric_limits::min() < value && - value <= std::numeric_limits::max(); + constexpr int64_t kImmediateMin = std::numeric_limits::min() + 1; + constexpr int64_t kImmediateMax = std::numeric_limits::max(); + static_assert(kImmediateMin == + turboshaft::LoadStoreSimplificationConfiguration::kMinOffset); + static_assert(kImmediateMax == + turboshaft::LoadStoreSimplificationConfiguration::kMaxOffset); + return kImmediateMin <= value && value <= kImmediateMax; } bool IsZeroIntConstant(node_t node) const { @@ -714,9 +718,6 @@ class X64OperandGeneratorT final : public OperandGeneratorT { } } - DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(AddressingMode, - GetEffectiveAddressMemoryOperand) - AddressingMode GetEffectiveAddressMemoryOperand( node_t operand, InstructionOperand inputs[], size_t* input_count, RegisterUseKind reg_kind = RegisterUseKind::kUseRegister); @@ -732,7 +733,6 @@ class X64OperandGeneratorT final : public OperandGeneratorT { } } - DECLARE_UNREACHABLE_TURBOSHAFT_FALLBACK(bool, CanBeBetterLeftOperand) bool CanBeBetterLeftOperand(node_t node) const { return !selector()->IsLive(node); } @@ -822,11 +822,11 @@ X64OperandGeneratorT::GetEffectiveAddressMemoryOperand( // modes for the scale. UNIMPLEMENTED(); } else { - const turboshaft::Operation& op = this->turboshaft_graph()->Get(operand); - DCHECK_GE(op.input_count, 2); - - inputs[(*input_count)++] = UseRegister(op.input(0), reg_kind); - inputs[(*input_count)++] = UseRegister(op.input(1), reg_kind); + // TODO(nicohartmann@): Turn this into a `DCHECK` once we have some + // coverage. + CHECK_EQ(m->displacement, 0); + inputs[(*input_count)++] = UseRegister(m->base, reg_kind); + inputs[(*input_count)++] = UseRegister(m->index, reg_kind); return kMode_MR1; } } @@ -1036,7 +1036,8 @@ void InstructionSelectorT::VisitTraceInstruction(node_t node) { template void InstructionSelectorT::VisitStackSlot(node_t node) { StackSlotRepresentation rep = this->stack_slot_representation_of(node); - int slot = frame_->AllocateSpillSlot(rep.size(), rep.alignment()); + int slot = + frame_->AllocateSpillSlot(rep.size(), rep.alignment(), rep.is_tagged()); OperandGenerator g(this); Emit(kArchStackSlot, g.DefineAsRegister(node), @@ -2251,7 +2252,7 @@ void InstructionSelectorT::VisitWord32ReverseBytes(node_t node) { } template -void InstructionSelectorT::VisitSimd128ReverseBytes(Node* node) { +void InstructionSelectorT::VisitSimd128ReverseBytes(node_t node) { UNREACHABLE(); } @@ -2284,13 +2285,13 @@ void InstructionSelectorT::VisitInt32Add(node_t node) { if (const turboshaft::ChangeOp* change = this->Get(left) .template TryCast< - turboshaft::Opmask::kTruncateInt64ToInt32>()) { + turboshaft::Opmask::kTruncateWord64ToWord32>()) { left = change->input(); } if (const turboshaft::ChangeOp* change = this->Get(right) .template TryCast< - turboshaft::Opmask::kTruncateInt64ToInt32>()) { + turboshaft::Opmask::kTruncateWord64ToWord32>()) { right = change->input(); } @@ -2340,7 +2341,6 @@ void InstructionSelectorT::VisitInt64AddWithOverflow(node_t node) { template <> void InstructionSelectorT::VisitInt32Sub(node_t node) { - // TODO(mliedtke): Handle truncate consistently with Turbofan. X64OperandGeneratorT g(this); auto binop = this->word_binop_view(node); auto left = binop.left(); diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc index ff5b96b7895a61..1c1ad26dc3acc0 100644 --- a/deps/v8/src/compiler/bytecode-graph-builder.cc +++ b/deps/v8/src/compiler/bytecode-graph-builder.cc @@ -1725,6 +1725,26 @@ void BytecodeGraphBuilder::VisitStaCurrentContextSlot() { NewNode(op, value); } +void BytecodeGraphBuilder::VisitStaScriptContextSlot() { + PrepareEagerCheckpoint(); + const Operator* op = javascript()->StoreScriptContext( + bytecode_iterator().GetUnsignedImmediateOperand(2), + bytecode_iterator().GetIndexOperand(1)); + Node* value = environment()->LookupAccumulator(); + Node* node = NewNode(op, value); + Node* context = + environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0)); + NodeProperties::ReplaceContextInput(node, context); +} + +void BytecodeGraphBuilder::VisitStaCurrentScriptContextSlot() { + PrepareEagerCheckpoint(); + const Operator* op = javascript()->StoreScriptContext( + 0, bytecode_iterator().GetIndexOperand(0)); + Node* value = environment()->LookupAccumulator(); + NewNode(op, value); +} + void BytecodeGraphBuilder::BuildLdaLookupSlot(TypeofMode typeof_mode) { PrepareEagerCheckpoint(); Node* name = @@ -1807,6 +1827,11 @@ BytecodeGraphBuilder::Environment* BytecodeGraphBuilder::CheckContextExtensions( // in the same scope as the variable itself has no way of shadowing it. Environment* slow_environment = nullptr; for (uint32_t d = 0; d < depth; d++) { + // Const tracking let data is stored in the extension slot of a + // ScriptContext - however, it's unrelated to the sloppy eval variable + // extension. We should never iterate through a ScriptContext here. + DCHECK_NE(scope_info.scope_type(), ScopeType::SCRIPT_SCOPE); + if (scope_info.HasContextExtensionSlot()) { slow_environment = CheckContextExtensionAtDepth(slow_environment, d); } diff --git a/deps/v8/src/compiler/c-linkage.cc b/deps/v8/src/compiler/c-linkage.cc index 9742e41b0c9b76..af7c4be8dbf812 100644 --- a/deps/v8/src/compiler/c-linkage.cc +++ b/deps/v8/src/compiler/c-linkage.cc @@ -345,8 +345,10 @@ CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone, LinkageLocation target_loc = LinkageLocation::ForAnyRegister(target_type); flags |= CallDescriptor::kNoAllocate; + // TODO(saelo): here we probably want to use a c-call specific tag. return zone->New( // -- CallDescriptor::kCallAddress, // kind + kDefaultCodeEntrypointTag, // tag target_type, // target MachineType target_loc, // target location locations.Build(), // location_sig diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h index c746a419536497..db09113a9041c6 100644 --- a/deps/v8/src/compiler/code-assembler.h +++ b/deps/v8/src/compiler/code-assembler.h @@ -122,7 +122,6 @@ class WasmExceptionPackage; class WasmExceptionTag; class WasmExportedFunctionData; class WasmGlobalObject; -class WasmIndirectFunctionTable; class WasmJSFunctionData; class WasmMemoryObject; class WasmModuleObject; @@ -158,6 +157,7 @@ OBJECT_TYPE_CASE(HeapObject) OBJECT_TYPE_CASE(HeapObjectReference) OBJECT_TYPE_LIST(OBJECT_TYPE_CASE) HEAP_OBJECT_ORDINARY_TYPE_LIST(OBJECT_TYPE_CASE) +HEAP_OBJECT_TRUSTED_TYPE_LIST(OBJECT_TYPE_CASE) STRUCT_LIST(OBJECT_TYPE_STRUCT_CASE) HEAP_OBJECT_TEMPLATE_TYPE_LIST(OBJECT_TYPE_TEMPLATE_CASE) OBJECT_TYPE_ODDBALL_CASE(Null) diff --git a/deps/v8/src/compiler/compilation-dependencies.cc b/deps/v8/src/compiler/compilation-dependencies.cc index dfafdbf8e40e8b..5758826b4908db 100644 --- a/deps/v8/src/compiler/compilation-dependencies.cc +++ b/deps/v8/src/compiler/compilation-dependencies.cc @@ -39,7 +39,8 @@ namespace compiler { V(PrototypeProperty) \ V(StableMap) \ V(Transition) \ - V(ObjectSlotValue) + V(ObjectSlotValue) \ + V(ConstTrackingLet) CompilationDependencies::CompilationDependencies(JSHeapBroker* broker, Zone* zone) @@ -899,6 +900,44 @@ class GlobalPropertyDependency final : public CompilationDependency { const bool read_only_; }; +class ConstTrackingLetDependency final : public CompilationDependency { + public: + ConstTrackingLetDependency(ContextRef script_context, size_t index) + : CompilationDependency(kConstTrackingLet), + script_context_(script_context), + index_(index) { + DCHECK(v8_flags.const_tracking_let); + } + + bool IsValid(JSHeapBroker* broker) const override { + return script_context_.object()->ConstTrackingLetSideDataIsConst(index_); + } + + void Install(JSHeapBroker* broker, PendingDependencies* deps) const override { + SLOW_DCHECK(IsValid(broker)); + Isolate* isolate = broker->isolate(); + deps->Register(handle(Context::GetOrCreateConstTrackingLetCell( + script_context_.object(), index_, isolate), + isolate), + DependentCode::kConstTrackingLetChangedGroup); + } + + private: + size_t Hash() const override { + ObjectRef::Hash h; + return base::hash_combine(h(script_context_), index_); + } + + bool Equals(const CompilationDependency* that) const override { + const ConstTrackingLetDependency* const zat = that->AsConstTrackingLet(); + return script_context_.equals(zat->script_context_) && + index_ == zat->index_; + } + + const ContextRef script_context_; + size_t index_; +}; + class ProtectorDependency final : public CompilationDependency { public: explicit ProtectorDependency(PropertyCellRef cell) @@ -1195,6 +1234,35 @@ void CompilationDependencies::DependOnGlobalProperty(PropertyCellRef cell) { RecordDependency(zone_->New(cell, type, read_only)); } +bool CompilationDependencies::DependOnConstTrackingLet( + ContextRef script_context, size_t index, JSHeapBroker* broker) { + if (v8_flags.const_tracking_let) { + OptionalObjectRef maybe_side_data = + script_context.TryGetSideData(broker, static_cast(index)); + // The side data element is either + // - kConstMarker (the value is a constant thus far but no code depends on + // it yet) + // - a ConstTrackingLetCell pointing to a DependentCode (the value is a + // constant thus far and some code depends on it) + // - kNonConstMarker (the value is no longer a constant) + // - undefined (we're reading an uninitialized value (this will throw but we + // might still optimize the code which does that)) + // In the first 2 cases we can embed the value as a constant in the code. + if (maybe_side_data.has_value()) { + ObjectRef side_data = maybe_side_data.value(); + if ((side_data.IsSmi() && + side_data.AsSmi() == + Smi::ToInt(ConstTrackingLetCell::kConstMarker)) || + (!side_data.IsSmi() && !side_data.IsUndefined())) { + RecordDependency( + zone_->New(script_context, index)); + return true; + } + } + } + return false; +} + bool CompilationDependencies::DependOnProtector(PropertyCellRef cell) { cell.CacheAsProtector(broker_); if (cell.value(broker_).AsSmi() != Protectors::kProtectorValid) return false; diff --git a/deps/v8/src/compiler/compilation-dependencies.h b/deps/v8/src/compiler/compilation-dependencies.h index 05c2829837e305..ad57b2d75eb9bc 100644 --- a/deps/v8/src/compiler/compilation-dependencies.h +++ b/deps/v8/src/compiler/compilation-dependencies.h @@ -27,7 +27,7 @@ class SlackTrackingPrediction { class CompilationDependency; // Collects and installs dependencies of the code that is being generated. -class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject { +class V8_EXPORT CompilationDependencies : public ZoneObject { public: CompilationDependencies(JSHeapBroker* broker, Zone* zone); @@ -82,6 +82,11 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject { // {IsReadOnly()} flag of {cell}'s {PropertyDetails}. void DependOnGlobalProperty(PropertyCellRef cell); + // Record the assumption that a const-tracked let variable doesn't change, if + // true. + bool DependOnConstTrackingLet(ContextRef script_context, size_t index, + JSHeapBroker* broker); + // Return the validity of the given protector and, if true, record the // assumption that the protector remains valid. bool DependOnProtector(PropertyCellRef cell); diff --git a/deps/v8/src/compiler/const-tracking-let-helpers.cc b/deps/v8/src/compiler/const-tracking-let-helpers.cc new file mode 100644 index 00000000000000..65df501d324f53 --- /dev/null +++ b/deps/v8/src/compiler/const-tracking-let-helpers.cc @@ -0,0 +1,71 @@ +// Copyright 2024 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/compiler/const-tracking-let-helpers.h" + +#include "src/compiler/access-builder.h" +#include "src/compiler/js-graph.h" +#include "src/compiler/node.h" +#include "src/compiler/simplified-operator.h" +#include "src/objects/property-cell.h" + +namespace v8::internal::compiler { + +int ConstTrackingLetSideDataIndexForAccess(size_t access_index) { + return static_cast(access_index) - Context::MIN_CONTEXT_EXTENDED_SLOTS; +} + +void GenerateCheckConstTrackingLetSideData(Node* context, Node** effect, + Node** control, int side_data_index, + JSGraph* jsgraph) { + Node* side_data = *effect = jsgraph->graph()->NewNode( + jsgraph->simplified()->LoadField(AccessBuilder::ForContextSlot( + Context::CONST_TRACKING_LET_SIDE_DATA_INDEX)), + context, *effect, *control); + Node* side_data_value = *effect = jsgraph->graph()->NewNode( + jsgraph->simplified()->LoadField( + AccessBuilder::ForFixedArraySlot(side_data_index)), + side_data, *effect, *control); + + // TODO(v8:13567): If the value is the same as the value we already have, we + // don't need to deopt. + + // Deoptimize if the side_data_value is something else than the "not a + // constant" sentinel: the value might be a constant and something might + // depend on it. + static_assert(ConstTrackingLetCell::kNonConstMarker.value() == 0); + Node* check = + jsgraph->graph()->NewNode(jsgraph->simplified()->ReferenceEqual(), + side_data_value, jsgraph->ZeroConstant()); + *effect = jsgraph->graph()->NewNode( + jsgraph->simplified()->CheckIf(DeoptimizeReason::kConstTrackingLet), + check, *effect, *control); +} + +bool IsConstTrackingLetVariableSurelyNotConstant( + OptionalContextRef maybe_context, size_t depth, int side_data_index, + JSHeapBroker* broker) { + if (maybe_context.has_value() && depth == 0) { + ContextRef context = maybe_context.value(); + OptionalObjectRef side_data = + context.get(broker, Context::CONST_TRACKING_LET_SIDE_DATA_INDEX); + if (side_data.has_value()) { + OptionalObjectRef side_data_value = + side_data->AsFixedArray().TryGet(broker, side_data_index); + if (side_data_value.has_value()) { + auto value = side_data_value.value(); + if (value.IsSmi() && + value.AsSmi() == + Smi::ToInt(ConstTrackingLetCell::kNonConstMarker)) { + // The value is not a constant any more. + return true; + } + } + } + } + // Either the value is not a constant, or we don't know. + return false; +} + +} // namespace v8::internal::compiler diff --git a/deps/v8/src/compiler/const-tracking-let-helpers.h b/deps/v8/src/compiler/const-tracking-let-helpers.h new file mode 100644 index 00000000000000..b91d25e38b7a95 --- /dev/null +++ b/deps/v8/src/compiler/const-tracking-let-helpers.h @@ -0,0 +1,30 @@ +// Copyright 2024 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_CONST_TRACKING_LET_HELPERS_H_ +#define V8_COMPILER_CONST_TRACKING_LET_HELPERS_H_ + +#include + +#include "src/compiler/heap-refs.h" + +namespace v8::internal::compiler { + +class HeapBroker; +class JSGraph; +class Node; + +int ConstTrackingLetSideDataIndexForAccess(size_t access_index); + +void GenerateCheckConstTrackingLetSideData(Node* context, Node** effect, + Node** control, int side_data_index, + JSGraph* jsgraph); + +bool IsConstTrackingLetVariableSurelyNotConstant( + OptionalContextRef maybe_context, size_t depth, int side_data_index, + JSHeapBroker* broker); + +} // namespace v8::internal::compiler + +#endif // V8_COMPILER_CONST_TRACKING_LET_HELPERS_H_ diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc index 4d42a7cbce0d49..14fc78c877332f 100644 --- a/deps/v8/src/compiler/effect-control-linearizer.cc +++ b/deps/v8/src/compiler/effect-control-linearizer.cc @@ -81,6 +81,7 @@ class EffectControlLinearizer { Node* LowerChangeUint64ToTagged(Node* node); Node* LowerChangeFloat64ToTagged(Node* node); Node* LowerChangeFloat64ToTaggedPointer(Node* node); + Node* LowerChangeFloat64HoleToTagged(Node* node); Node* LowerChangeTaggedSignedToInt32(Node* node); Node* LowerChangeTaggedSignedToInt64(Node* node); Node* LowerChangeTaggedToBit(Node* node); @@ -1064,6 +1065,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node, if (v8_flags.turboshaft) return false; result = LowerChangeFloat64ToTaggedPointer(node); break; + case IrOpcode::kChangeFloat64HoleToTagged: + result = LowerChangeFloat64HoleToTagged(node); + break; case IrOpcode::kChangeTaggedSignedToInt32: if (v8_flags.turboshaft) return false; result = LowerChangeTaggedSignedToInt32(node); @@ -6069,6 +6073,34 @@ Node* EffectControlLinearizer::LowerCheckFloat64Hole(Node* node, return value; } +Node* EffectControlLinearizer::LowerChangeFloat64HoleToTagged(Node* node) { + DCHECK(!v8_flags.turboshaft); + Node* value = node->InputAt(0); + + auto if_nan = __ MakeDeferredLabel(); + auto allocate_heap_number = __ MakeLabel(); + auto done = __ MakeLabel(MachineRepresentation::kTagged); + + // First check whether {value} is a NaN at all... + __ Branch(__ Float64Equal(value, value), &allocate_heap_number, &if_nan); + + __ Bind(&if_nan); + { + // ...and only if {value} is a NaN, perform the expensive bit + // check. See http://crbug.com/v8/8264 for details. + __ GotoIfNot(__ Word32Equal(__ Float64ExtractHighWord32(value), + __ Int32Constant(kHoleNanUpper32)), + &allocate_heap_number); + __ Goto(&done, __ UndefinedConstant()); + } + + __ Bind(&allocate_heap_number); + __ Goto(&done, AllocateHeapNumberWithValue(value)); + + __ Bind(&done); + return done.PhiAt(0); +} + Node* EffectControlLinearizer::LowerCheckNotTaggedHole(Node* node, Node* frame_state) { DCHECK(!v8_flags.turboshaft); @@ -6787,8 +6819,6 @@ Node* EffectControlLinearizer::ClampFastCallArgument( Node* EffectControlLinearizer::AdaptFastCallArgument( Node* node, CTypeInfo arg_type, GraphAssemblerLabel<0>* if_error) { - int kAlign = alignof(uintptr_t); - int kSize = sizeof(uintptr_t); switch (arg_type.GetSequenceType()) { case CTypeInfo::SequenceType::kScalar: { uint8_t flags = uint8_t(arg_type.GetFlags()); @@ -6821,12 +6851,7 @@ Node* EffectControlLinearizer::AdaptFastCallArgument( } else { switch (arg_type.GetType()) { case CTypeInfo::Type::kV8Value: { - Node* stack_slot = __ StackSlot(kSize, kAlign); - __ Store(StoreRepresentation(MachineType::PointerRepresentation(), - kNoWriteBarrier), - stack_slot, 0, __ BitcastTaggedToWord(node)); - - return stack_slot; + return fast_api_call::AdaptLocalArgument(gasm(), node); } case CTypeInfo::Type::kFloat32: { return __ TruncateFloat64ToFloat32(node); @@ -6922,10 +6947,7 @@ Node* EffectControlLinearizer::AdaptFastCallArgument( Node* value_is_smi = ObjectIsSmi(node); __ GotoIf(value_is_smi, if_error); - Node* stack_slot = __ StackSlot(kSize, kAlign); - __ Store(StoreRepresentation(MachineType::PointerRepresentation(), - kNoWriteBarrier), - stack_slot, 0, __ BitcastTaggedToWord(node)); + Node* node_to_pass = fast_api_call::AdaptLocalArgument(gasm(), node); // Check that the value is a JSArray. Node* value_map = __ LoadField(AccessBuilder::ForMap(), node); @@ -6935,7 +6957,7 @@ Node* EffectControlLinearizer::AdaptFastCallArgument( __ Word32Equal(value_instance_type, __ Int32Constant(JS_ARRAY_TYPE)); __ GotoIfNot(value_is_js_array, if_error); - return stack_slot; + return node_to_pass; } case CTypeInfo::SequenceType::kIsTypedArray: { // Check that the value is a HeapObject. @@ -6985,17 +7007,11 @@ EffectControlLinearizer::AdaptOverloadedFastCallArgument( value_instance_type, __ Int32Constant(JS_ARRAY_TYPE)); __ GotoIfNot(value_is_js_array, &next); - int kAlign = alignof(uintptr_t); - int kSize = sizeof(uintptr_t); - Node* stack_slot = __ StackSlot(kSize, kAlign); - - __ Store(StoreRepresentation(MachineType::PointerRepresentation(), - kNoWriteBarrier), - stack_slot, 0, __ BitcastTaggedToWord(node)); + Node* node_to_pass = fast_api_call::AdaptLocalArgument(gasm(), node); Node* target_address = __ ExternalConstant(ExternalReference::Create( c_functions[func_index].address, ref_type)); - __ Goto(&merge, target_address, stack_slot); + __ Goto(&merge, target_address, node_to_pass); break; } diff --git a/deps/v8/src/compiler/fast-api-calls.cc b/deps/v8/src/compiler/fast-api-calls.cc index 6086554ffe862f..935cc0ad7beefd 100644 --- a/deps/v8/src/compiler/fast-api-calls.cc +++ b/deps/v8/src/compiler/fast-api-calls.cc @@ -349,16 +349,13 @@ Node* FastApiCallBuilder::Build(const FastApiCallFunctionVector& c_functions, static_cast(offsetof(v8::FastApiCallbackOptions, fallback)), __ Int32Constant(0)); - Node* data_stack_slot = __ StackSlot(sizeof(uintptr_t), alignof(uintptr_t)); - __ Store(StoreRepresentation(MachineType::PointerRepresentation(), - kNoWriteBarrier), - data_stack_slot, 0, __ BitcastTaggedToWord(data_argument)); + Node* data_argument_to_pass = AdaptLocalArgument(gasm(), data_argument); __ Store(StoreRepresentation(MachineType::PointerRepresentation(), kNoWriteBarrier), stack_slot, static_cast(offsetof(v8::FastApiCallbackOptions, data)), - data_stack_slot); + data_argument_to_pass); initialize_options_(stack_slot); diff --git a/deps/v8/src/compiler/fast-api-calls.h b/deps/v8/src/compiler/fast-api-calls.h index b97b37e5746433..c05e302988101b 100644 --- a/deps/v8/src/compiler/fast-api-calls.h +++ b/deps/v8/src/compiler/fast-api-calls.h @@ -60,6 +60,26 @@ Node* BuildFastApiCall(Isolate* isolate, Graph* graph, const InitializeOptions& initialize_options, const GenerateSlowApiCall& generate_slow_api_call); +inline Node* AdaptLocalArgument(GraphAssembler* graph_assembler, + Node* argument) { +#define __ graph_assembler-> + +#ifdef V8_ENABLE_DIRECT_LOCAL + // With direct locals, the argument can be passed directly. + return __ BitcastTaggedToWord(argument); +#else + // With indirect locals, the argument has to be stored on the stack and the + // slot address is passed. + Node* stack_slot = __ StackSlot(sizeof(uintptr_t), alignof(uintptr_t), true); + __ Store(StoreRepresentation(MachineType::PointerRepresentation(), + kNoWriteBarrier), + stack_slot, 0, __ BitcastTaggedToWord(argument)); + return stack_slot; +#endif + +#undef __ +} + } // namespace fast_api_call } // namespace compiler } // namespace internal diff --git a/deps/v8/src/compiler/frame.cc b/deps/v8/src/compiler/frame.cc index 6413adfe49d16d..7dc7a87ad654cc 100644 --- a/deps/v8/src/compiler/frame.cc +++ b/deps/v8/src/compiler/frame.cc @@ -8,10 +8,11 @@ namespace v8 { namespace internal { namespace compiler { -Frame::Frame(int fixed_frame_size_in_slots) +Frame::Frame(int fixed_frame_size_in_slots, Zone* zone) : fixed_slot_count_(fixed_frame_size_in_slots), allocated_registers_(nullptr), - allocated_double_registers_(nullptr) { + allocated_double_registers_(nullptr), + zone_(zone) { slot_allocator_.AllocateUnaligned(fixed_frame_size_in_slots); } diff --git a/deps/v8/src/compiler/frame.h b/deps/v8/src/compiler/frame.h index 96eb901d2b5d5a..6786698f6da660 100644 --- a/deps/v8/src/compiler/frame.h +++ b/deps/v8/src/compiler/frame.h @@ -90,7 +90,7 @@ class CallDescriptor; // class V8_EXPORT_PRIVATE Frame : public ZoneObject { public: - explicit Frame(int fixed_frame_size_in_slots); + explicit Frame(int fixed_frame_size_in_slots, Zone* zone); Frame(const Frame&) = delete; Frame& operator=(const Frame&) = delete; @@ -135,9 +135,11 @@ class V8_EXPORT_PRIVATE Frame : public ZoneObject { slot_allocator_.AllocateUnaligned(count); } - int AllocateSpillSlot(int width, int alignment = 0) { + int AllocateSpillSlot(int width, int alignment = 0, bool is_tagged = false) { DCHECK_EQ(GetTotalFrameSlotCount(), fixed_slot_count_ + spill_slot_count_ + return_slot_count_); + DCHECK_IMPLIES(is_tagged, width == sizeof(uintptr_t)); + DCHECK_IMPLIES(is_tagged, alignment == sizeof(uintptr_t)); // Never allocate spill slots after the callee-saved slots are defined. DCHECK(!spill_slots_finished_); DCHECK(!frame_aligned_); @@ -163,7 +165,9 @@ class V8_EXPORT_PRIVATE Frame : public ZoneObject { int end = slot_allocator_.Size(); spill_slot_count_ += end - old_end; - return slot + slots - 1; + int result_slot = slot + slots - 1; + if (is_tagged) tagged_slots_bits_.Add(result_slot, zone_); + return result_slot; } void EnsureReturnSlots(int count) { @@ -181,6 +185,8 @@ class V8_EXPORT_PRIVATE Frame : public ZoneObject { return slot_allocator_.Size() - 1; } + const GrowableBitVector& tagged_slots() const { return tagged_slots_bits_; } + private: int fixed_slot_count_; int spill_slot_count_ = 0; @@ -190,6 +196,8 @@ class V8_EXPORT_PRIVATE Frame : public ZoneObject { AlignedSlotAllocator slot_allocator_; BitVector* allocated_registers_; BitVector* allocated_double_registers_; + Zone* zone_; + GrowableBitVector tagged_slots_bits_; #if DEBUG bool spill_slots_finished_ = false; bool frame_aligned_ = false; diff --git a/deps/v8/src/compiler/graph-assembler.cc b/deps/v8/src/compiler/graph-assembler.cc index 2e330e60177911..5ad82d160d435e 100644 --- a/deps/v8/src/compiler/graph-assembler.cc +++ b/deps/v8/src/compiler/graph-assembler.cc @@ -1030,9 +1030,10 @@ Node* GraphAssembler::UnreachableWithoutConnectToEnd() { graph()->NewNode(common()->Unreachable(), effect(), control())); } -TNode GraphAssembler::StackSlot(int size, int alignment) { +TNode GraphAssembler::StackSlot(int size, int alignment, + bool is_tagged) { return AddNode( - graph()->NewNode(machine()->StackSlot(size, alignment))); + graph()->NewNode(machine()->StackSlot(size, alignment, is_tagged))); } Node* GraphAssembler::Store(StoreRepresentation rep, Node* object, Node* offset, diff --git a/deps/v8/src/compiler/graph-assembler.h b/deps/v8/src/compiler/graph-assembler.h index 6e678420576853..f7c8b77d55daa2 100644 --- a/deps/v8/src/compiler/graph-assembler.h +++ b/deps/v8/src/compiler/graph-assembler.h @@ -401,7 +401,7 @@ class V8_EXPORT_PRIVATE GraphAssembler { } Node* Checkpoint(FrameState frame_state); - TNode StackSlot(int size, int alignment); + TNode StackSlot(int size, int alignment, bool is_tagged = false); Node* Store(StoreRepresentation rep, Node* object, Node* offset, Node* value); Node* Store(StoreRepresentation rep, Node* object, int offset, Node* value); diff --git a/deps/v8/src/compiler/graph-visualizer.cc b/deps/v8/src/compiler/graph-visualizer.cc index 956f510c00ad1a..1b1b67c1abb3b1 100644 --- a/deps/v8/src/compiler/graph-visualizer.cc +++ b/deps/v8/src/compiler/graph-visualizer.cc @@ -133,13 +133,15 @@ void JsonPrintFunctionSource(std::ostream& os, int source_id, sb << '\n'; str.write(sb.start(), sb.length()); - wasm::WireBytesRef wire_bytes_ref = - module->functions[function_data->function_index()].code; + const wasm::WasmFunction& function = + module->functions[function_data->function_index()]; + wasm::WireBytesRef wire_bytes_ref = function.code; base::Vector bytes(native_module->wire_bytes().SubVector( wire_bytes_ref.offset(), wire_bytes_ref.end_offset())); + bool is_shared = module->types[function.sig_index].is_shared; wasm::FunctionBody func_body{function_data->sig(), wire_bytes_ref.offset(), bytes.begin(), - bytes.end()}; + bytes.end(), is_shared}; AccountingAllocator allocator; wasm::PrintRawWasmCode(&allocator, func_body, module, wasm::kPrintLocals, str); @@ -279,8 +281,9 @@ void JsonPrintAllSourceWithPositionsWasm( << fct.func_index << "\", \"sourceName\": \"\", \"sourceText\": \""; wasm::WireBytesRef wire_bytes_ref = fct.code; base::Vector bytes = wire_bytes->GetCode(wire_bytes_ref); + bool is_shared = module->types[fct.sig_index].is_shared; wasm::FunctionBody func_body{fct.sig, wire_bytes_ref.offset(), - bytes.begin(), bytes.end()}; + bytes.begin(), bytes.end(), is_shared}; AccountingAllocator allocator; std::ostringstream wasm_str; wasm::PrintRawWasmCode(&allocator, func_body, module, wasm::kPrintLocals, @@ -294,6 +297,7 @@ void JsonPrintAllSourceWithPositionsWasm( os << "\"inlinings\": {"; for (size_t i = 0; i < positions.size(); ++i) { if (i != 0) os << ", "; + DCHECK(source_map.contains(positions[i].inlinee_func_index)); size_t source_id = source_map.find(positions[i].inlinee_func_index)->second; SourcePosition inlining_pos = positions[i].caller_pos; os << '"' << i << "\": {\"inliningId\": " << i diff --git a/deps/v8/src/compiler/heap-refs.cc b/deps/v8/src/compiler/heap-refs.cc index 6425b97868736e..3187640efcc1e4 100644 --- a/deps/v8/src/compiler/heap-refs.cc +++ b/deps/v8/src/compiler/heap-refs.cc @@ -966,6 +966,27 @@ OptionalObjectRef ContextRef::get(JSHeapBroker* broker, int index) const { return TryMakeRef(broker, object()->get(index)); } +OptionalObjectRef ContextRef::TryGetSideData(JSHeapBroker* broker, + int index) const { + if (!object()->IsScriptContext()) { + return {}; + } + + // No side data for slots which are not variables in the context. + if (index < Context::MIN_CONTEXT_EXTENDED_SLOTS) { + return {}; + } + + OptionalObjectRef maybe_side_data = + get(broker, Context::CONST_TRACKING_LET_SIDE_DATA_INDEX); + if (!maybe_side_data.has_value()) return {}; + // The FixedArray itself will stay constant, but its contents may change while + // we compile in the background. + FixedArrayRef side_data_fixed_array = maybe_side_data.value().AsFixedArray(); + return side_data_fixed_array.TryGet( + broker, index - Context::MIN_CONTEXT_EXTENDED_SLOTS); +} + void JSHeapBroker::InitializeAndStartSerializing( Handle target_native_context) { TraceScope tracer(this, "JSHeapBroker::InitializeAndStartSerializing"); @@ -1627,6 +1648,7 @@ HEAP_ACCESSOR_C(ScopeInfo, int, ContextLength) HEAP_ACCESSOR_C(ScopeInfo, bool, HasContextExtensionSlot) HEAP_ACCESSOR_C(ScopeInfo, bool, HasOuterScopeInfo) HEAP_ACCESSOR_C(ScopeInfo, bool, ClassScopeHasPrivateBrand) +HEAP_ACCESSOR_C(ScopeInfo, ScopeType, scope_type) ScopeInfoRef ScopeInfoRef::OuterScopeInfo(JSHeapBroker* broker) const { return MakeRefAssumeMemoryFence(broker, object()->OuterScopeInfo()); @@ -1999,7 +2021,7 @@ OptionalObjectRef JSObjectRef::GetOwnFastConstantDataProperty( return result; } -base::Optional JSObjectRef::GetOwnFastConstantDoubleProperty( +base::Optional JSObjectRef::GetOwnFastConstantDoubleProperty( JSHeapBroker* broker, FieldIndex index, CompilationDependencies* dependencies) const { base::Optional> constant = @@ -2014,7 +2036,7 @@ base::Optional JSObjectRef::GetOwnFastConstantDoubleProperty( dependencies->DependOnOwnConstantDoubleProperty(*this, map(broker), index, unboxed_value); - return unboxed_value.get_scalar(); + return unboxed_value; } OptionalObjectRef JSObjectRef::GetOwnDictionaryProperty( diff --git a/deps/v8/src/compiler/heap-refs.h b/deps/v8/src/compiler/heap-refs.h index e94b0939103ba7..249178dff14439 100644 --- a/deps/v8/src/compiler/heap-refs.h +++ b/deps/v8/src/compiler/heap-refs.h @@ -564,7 +564,7 @@ class JSObjectRef : public JSReceiverRef { // is constant. // If a property was successfully read, then the function will take a // dependency to check the value of the property at code finalization time. - base::Optional GetOwnFastConstantDoubleProperty( + base::Optional GetOwnFastConstantDoubleProperty( JSHeapBroker* broker, FieldIndex index, CompilationDependencies* dependencies) const; @@ -671,6 +671,9 @@ class ContextRef : public HeapObjectRef { OptionalObjectRef get(JSHeapBroker* broker, int index) const; ScopeInfoRef scope_info(JSHeapBroker* broker) const; + + // Only returns a value if the index is valid for this ContextRef. + OptionalObjectRef TryGetSideData(JSHeapBroker* broker, int index) const; }; #define BROKER_NATIVE_CONTEXT_FIELDS(V) \ @@ -1036,6 +1039,7 @@ class ScopeInfoRef : public HeapObjectRef { bool HasOuterScopeInfo() const; bool HasContextExtensionSlot() const; bool ClassScopeHasPrivateBrand() const; + ScopeType scope_type() const; ScopeInfoRef OuterScopeInfo(JSHeapBroker* broker) const; }; @@ -1236,6 +1240,16 @@ namespace compiler { template using ZoneRefSet = ZoneCompactSet::ref_type>; +inline bool AnyMapIsHeapNumber(const ZoneRefSet& maps) { + return std::any_of(maps.begin(), maps.end(), + [](MapRef map) { return map.IsHeapNumberMap(); }); +} + +inline bool AnyMapIsHeapNumber(const base::Vector& maps) { + return std::any_of(maps.begin(), maps.end(), + [](MapRef map) { return map.IsHeapNumberMap(); }); +} + } // namespace compiler } // namespace internal diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc index a5d09dd69bcb8c..32b336b8286b3d 100644 --- a/deps/v8/src/compiler/js-call-reducer.cc +++ b/deps/v8/src/compiler/js-call-reducer.cc @@ -168,6 +168,15 @@ class JSCallReducerAssembler : public JSGraphAssembler { } } + TNode ConvertHoleToUndefined(TNode value, ElementsKind kind) { + DCHECK(IsHoleyElementsKind(kind)); + if (kind == HOLEY_DOUBLE_ELEMENTS) { + return AddNode( + graph()->NewNode(simplified()->ChangeFloat64HoleToTagged(), value)); + } + return ConvertTaggedHoleToUndefined(value); + } + class TryCatchBuilder0 { public: using TryFunction = VoidGenerator0; @@ -521,26 +530,6 @@ class IteratingArrayBuiltinReducerAssembler : public JSCallReducerAssembler { ? NumberIsFloat64Hole(TNode::UncheckedCast(v)) : IsTheHole(v); } - - TNode CheckFloat64Hole(TNode value, - CheckFloat64HoleMode mode) { - return AddNode( - graph()->NewNode(simplified()->CheckFloat64Hole(mode, feedback()), - value, effect(), control())); - } - - // May deopt for holey double elements. - TNode TryConvertHoleToUndefined(TNode value, - ElementsKind kind) { - DCHECK(IsHoleyElementsKind(kind)); - if (kind == HOLEY_DOUBLE_ELEMENTS) { - // TODO(7409): avoid deopt if not all uses of value are truncated. - TNode number = TNode::UncheckedCast(value); - return CheckFloat64Hole(number, CheckFloat64HoleMode::kAllowReturnHole); - } - - return ConvertTaggedHoleToUndefined(value); - } }; class PromiseBuiltinReducerAssembler : public JSCallReducerAssembler { @@ -1322,7 +1311,7 @@ TNode IteratingArrayBuiltinReducerAssembler::ReduceArrayPrototypeAt( // automatic converstion performed by // RepresentationChanger::GetTaggedRepresentationFor does not handle // holes, so we convert manually a potential hole here. - element = TryConvertHoleToUndefined(element, map.elements_kind()); + element = ConvertHoleToUndefined(element, map.elements_kind()); } Goto(&out, element); @@ -2095,7 +2084,7 @@ TNode IteratingArrayBuiltinReducerAssembler::ReduceArrayPrototypeFind( std::tie(k, element) = SafeLoadElement(kind, receiver, k); if (IsHoleyElementsKind(kind)) { - element = TryConvertHoleToUndefined(element, kind); + element = ConvertHoleToUndefined(element, kind); } TNode if_found_value = is_find_variant ? element : k; @@ -4136,6 +4125,14 @@ bool IsCallWithArrayLikeOrSpread(Node* node) { } // namespace +Node* JSCallReducer::ConvertHoleToUndefined(Node* value, ElementsKind kind) { + DCHECK(IsHoleyElementsKind(kind)); + if (kind == HOLEY_DOUBLE_ELEMENTS) { + return graph()->NewNode(simplified()->ChangeFloat64HoleToTagged(), value); + } + return graph()->NewNode(simplified()->ConvertTaggedHoleToUndefined(), value); +} + void JSCallReducer::CheckIfConstructor(Node* construct) { JSConstructNode n(construct); Node* new_target = n.new_target(); @@ -4527,16 +4524,7 @@ Reduction JSCallReducer::ReduceCallOrConstructWithArrayLikeOrSpread( // In "holey" arrays some arguments might be missing and we pass // 'undefined' instead. if (IsHoleyElementsKind(elements_kind)) { - if (elements_kind == HOLEY_DOUBLE_ELEMENTS) { - // May deopt for holey double elements. - load = effect = graph()->NewNode( - simplified()->CheckFloat64Hole( - CheckFloat64HoleMode::kAllowReturnHole, feedback_source), - load, effect, control); - } else { - load = graph()->NewNode(simplified()->ConvertTaggedHoleToUndefined(), - load); - } + load = ConvertHoleToUndefined(load, elements_kind); } node->InsertInput(graph()->zone(), arraylike_or_spread_index + i, load); @@ -6632,16 +6620,8 @@ Reduction JSCallReducer::ReduceArrayIteratorPrototypeNext(Node* node) { elements, index, etrue, if_true); // Convert hole to undefined if needed. - if (elements_kind == HOLEY_ELEMENTS || - elements_kind == HOLEY_SMI_ELEMENTS) { - value_true = graph()->NewNode( - simplified()->ConvertTaggedHoleToUndefined(), value_true); - } else if (elements_kind == HOLEY_DOUBLE_ELEMENTS) { - // TODO(6587): avoid deopt if not all uses of value are truncated. - CheckFloat64HoleMode mode = CheckFloat64HoleMode::kAllowReturnHole; - value_true = etrue = graph()->NewNode( - simplified()->CheckFloat64Hole(mode, p.feedback()), value_true, - etrue, if_true); + if (IsHoleyElementsKind(elements_kind)) { + value_true = ConvertHoleToUndefined(value_true, elements_kind); } } diff --git a/deps/v8/src/compiler/js-call-reducer.h b/deps/v8/src/compiler/js-call-reducer.h index 5713bbfbf0f133..a8212a03542188 100644 --- a/deps/v8/src/compiler/js-call-reducer.h +++ b/deps/v8/src/compiler/js-call-reducer.h @@ -268,6 +268,8 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer { // Check whether the given new target value is a constructor function. void CheckIfConstructor(Node* call); + Node* ConvertHoleToUndefined(Node* value, ElementsKind elements_kind); + Graph* graph() const; JSGraph* jsgraph() const { return jsgraph_; } Zone* temp_zone() const { return temp_zone_; } diff --git a/deps/v8/src/compiler/js-context-specialization.cc b/deps/v8/src/compiler/js-context-specialization.cc index 6f3f1b0d861c89..bf2d70168e0dc8 100644 --- a/deps/v8/src/compiler/js-context-specialization.cc +++ b/deps/v8/src/compiler/js-context-specialization.cc @@ -4,12 +4,17 @@ #include "src/compiler/js-context-specialization.h" +#include "src/compiler/access-builder.h" #include "src/compiler/common-operator.h" +#include "src/compiler/compilation-dependencies.h" +#include "src/compiler/const-tracking-let-helpers.h" #include "src/compiler/js-graph.h" #include "src/compiler/js-heap-broker.h" #include "src/compiler/js-operator.h" #include "src/compiler/linkage.h" #include "src/compiler/node-properties.h" +#include "src/compiler/simplified-operator.h" +#include "src/deoptimizer/deoptimize-reason.h" #include "src/objects/contexts-inl.h" namespace v8 { @@ -24,6 +29,8 @@ Reduction JSContextSpecialization::Reduce(Node* node) { return ReduceJSLoadContext(node); case IrOpcode::kJSStoreContext: return ReduceJSStoreContext(node); + case IrOpcode::kJSStoreScriptContext: + return ReduceJSStoreScriptContext(node); case IrOpcode::kJSGetImportMeta: return ReduceJSGetImportMeta(node); default: @@ -159,7 +166,9 @@ Reduction JSContextSpecialization::ReduceJSLoadContext(Node* node) { node, jsgraph()->ConstantNoHole(concrete, broker()), depth); } - if (!access.immutable()) { + if (!access.immutable() && + !broker()->dependencies()->DependOnConstTrackingLet( + concrete, access.index(), broker())) { // We found the requested context object but since the context slot is // mutable we can only partially reduce the load. return SimplifyJSLoadContext( @@ -226,6 +235,54 @@ Reduction JSContextSpecialization::ReduceJSStoreContext(Node* node) { node, jsgraph()->ConstantNoHole(concrete, broker()), depth); } +Reduction JSContextSpecialization::ReduceJSStoreScriptContext(Node* node) { + DCHECK(v8_flags.const_tracking_let); + DCHECK_EQ(IrOpcode::kJSStoreScriptContext, node->opcode()); + + const ContextAccess& access = ContextAccessOf(node->op()); + size_t depth = access.depth(); + int side_data_index = ConstTrackingLetSideDataIndexForAccess(access.index()); + + // First walk up the context chain in the graph until we reduce the depth to 0 + // or hit a node that does not have a CreateXYZContext operator. + Node* context = NodeProperties::GetOuterContext(node, &depth); + + OptionalContextRef maybe_context = + GetSpecializationContext(broker(), context, &depth, outer()); + if (IsConstTrackingLetVariableSurelyNotConstant(maybe_context, depth, + side_data_index, broker())) { + // The value is not a constant any more, so we don't need to generate + // code for invalidating the side data. + const Operator* op = + jsgraph_->javascript()->StoreContext(0, access.index()); + NodeProperties::ChangeOp(node, op); + return Changed(node); + } + + // The value might be a constant. Generate code which checks the side data and + // potentially invalidates the constness. + Node* effect = NodeProperties::GetEffectInput(node); + Node* control = NodeProperties::GetControlInput(node); + + // Generate code to walk up the contexts the remaining depth. + for (size_t i = 0; i < depth; ++i) { + context = effect = jsgraph_->graph()->NewNode( + jsgraph_->simplified()->LoadField( + AccessBuilder::ForContextSlotKnownPointer(Context::PREVIOUS_INDEX)), + context, effect, control); + } + + GenerateCheckConstTrackingLetSideData(context, &effect, &control, + side_data_index, jsgraph_); + + // If we're still here (not deopted) the side data implied that the value was + // already not a constant, so we can just store into it. + const Operator* op = jsgraph_->javascript()->StoreContext(0, access.index()); + Node* new_store = jsgraph_->graph()->NewNode( + op, NodeProperties::GetValueInput(node, 0), context, effect, control); + return Replace(new_store); +} + OptionalContextRef GetModuleContext(JSHeapBroker* broker, Node* node, Maybe maybe_context) { size_t depth = std::numeric_limits::max(); diff --git a/deps/v8/src/compiler/js-context-specialization.h b/deps/v8/src/compiler/js-context-specialization.h index 2da6d8d732f613..9c3f3e86e1caf0 100644 --- a/deps/v8/src/compiler/js-context-specialization.h +++ b/deps/v8/src/compiler/js-context-specialization.h @@ -57,6 +57,7 @@ class V8_EXPORT_PRIVATE JSContextSpecialization final : public AdvancedReducer { Reduction ReduceParameter(Node* node); Reduction ReduceJSLoadContext(Node* node); Reduction ReduceJSStoreContext(Node* node); + Reduction ReduceJSStoreScriptContext(Node* node); Reduction ReduceJSGetImportMeta(Node* node); Reduction SimplifyJSStoreContext(Node* node, Node* new_context, diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc index bcc118347def70..05f91ead11bb7b 100644 --- a/deps/v8/src/compiler/js-generic-lowering.cc +++ b/deps/v8/src/compiler/js-generic-lowering.cc @@ -472,12 +472,20 @@ void JSGenericLowering::LowerJSSetNamedProperty(Node* node) { // the paths are controlled by feedback. // TODO(v8:12548): refactor SetNamedIC as a subclass of StoreIC, which can // be called here. - ReplaceWithBuiltinCall(node, Builtin::kStoreICTrampoline); + ReplaceWithBuiltinCall( + node, ShouldUseMegamorphicAccessBuiltin(p.feedback(), {}, + AccessMode::kStore, broker()) + ? Builtin::kStoreICTrampoline_Megamorphic + : Builtin::kStoreICTrampoline); } else { node->InsertInput(zone(), 1, jsgraph()->ConstantNoHole(p.name(), broker())); node->InsertInput(zone(), 3, jsgraph()->TaggedIndexConstant(p.feedback().index())); - ReplaceWithBuiltinCall(node, Builtin::kStoreIC); + ReplaceWithBuiltinCall( + node, ShouldUseMegamorphicAccessBuiltin(p.feedback(), {}, + AccessMode::kStore, broker()) + ? Builtin::kStoreIC_Megamorphic + : Builtin::kStoreIC); } } @@ -592,6 +600,9 @@ void JSGenericLowering::LowerJSStoreContext(Node* node) { UNREACHABLE(); // Eliminated in typed lowering. } +void JSGenericLowering::LowerJSStoreScriptContext(Node* node) { + UNREACHABLE(); // Eliminated in context specialization. +} void JSGenericLowering::LowerJSCreate(Node* node) { ReplaceWithBuiltinCall(node, Builtin::kFastNewObject); diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc index 2a6207cac41058..380d1c8671d4f4 100644 --- a/deps/v8/src/compiler/js-inlining.cc +++ b/deps/v8/src/compiler/js-inlining.cc @@ -471,7 +471,9 @@ Reduction JSInliner::ReduceJSWasmCall(Node* node) { // for wasm gc objects). WasmInlineResult inline_result; if (inline_wasm_fct_if_supported_ && fct_index != -1 && native_module && - native_module->enabled_features().has_gc()) { + // Disable inlining for asm.js functions because we haven't tested it + // and most asm.js opcodes aren't supported anyway. + native_module->enabled_features() != wasm::WasmFeatures::ForAsmjs()) { inline_result = TryWasmInlining(call_node); } diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc index 3e7b604e97a7ae..bc1f457ddf9171 100644 --- a/deps/v8/src/compiler/js-native-context-specialization.cc +++ b/deps/v8/src/compiler/js-native-context-specialization.cc @@ -15,6 +15,7 @@ #include "src/compiler/allocation-builder.h" #include "src/compiler/common-operator.h" #include "src/compiler/compilation-dependencies.h" +#include "src/compiler/const-tracking-let-helpers.h" #include "src/compiler/frame-states.h" #include "src/compiler/graph-assembler.h" #include "src/compiler/js-graph.h" @@ -1344,10 +1345,22 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreGlobal(Node* node) { GlobalAccessFeedback const& feedback = processed.AsGlobalAccess(); if (feedback.IsScriptContextSlot()) { if (feedback.immutable()) return NoChange(); - Effect effect = n.effect(); - Control control = n.control(); + Node* effect = n.effect(); + Node* control = n.control(); Node* script_context = jsgraph()->ConstantNoHole(feedback.script_context(), broker()); + + // StoreGlobal can store to `let` variables declared by another script. + // Thus, we must check the const tracking let side data and potentially + // invalidate the constness. + if (v8_flags.const_tracking_let) { + int side_data_index = + ConstTrackingLetSideDataIndexForAccess(feedback.slot_index()); + GenerateCheckConstTrackingLetSideData(script_context, &effect, &control, + side_data_index, jsgraph_); + // If we're still here (not deopted) the side data implied that the + // variable was already not-a-constant, so we can just store into it. + } effect = graph()->NewNode(javascript()->StoreContext(0, feedback.slot_index()), value, script_context, effect, control); @@ -3306,9 +3319,8 @@ JSNativeContextSpecialization::BuildElementAccess( graph()->NewNode(simplified()->LoadElement(element_access), elements, index, etrue, if_true); - // Handle loading from holey backing stores correctly, by either - // mapping the hole to undefined if possible, or deoptimizing - // otherwise. + // Handle loading from holey backing stores correctly by mapping + // the hole to undefined. if (elements_kind == HOLEY_ELEMENTS || elements_kind == HOLEY_SMI_ELEMENTS) { // Turn the hole into undefined. @@ -3317,10 +3329,15 @@ JSNativeContextSpecialization::BuildElementAccess( } else if (elements_kind == HOLEY_DOUBLE_ELEMENTS) { // Return the signaling NaN hole directly if all uses are // truncating. - vtrue = etrue = graph()->NewNode( - simplified()->CheckFloat64Hole( - CheckFloat64HoleMode::kAllowReturnHole, FeedbackSource()), - vtrue, etrue, if_true); + if (LoadModeHandlesHoles(keyed_mode.load_mode())) { + vtrue = graph()->NewNode(simplified()->ChangeFloat64HoleToTagged(), + vtrue); + } else { + vtrue = etrue = graph()->NewNode( + simplified()->CheckFloat64Hole( + CheckFloat64HoleMode::kAllowReturnHole, FeedbackSource()), + vtrue, etrue, if_true); + } } } @@ -3358,16 +3375,25 @@ JSNativeContextSpecialization::BuildElementAccess( } } else if (elements_kind == HOLEY_DOUBLE_ELEMENTS) { // Perform the hole check on the result. - CheckFloat64HoleMode mode = CheckFloat64HoleMode::kNeverReturnHole; // Check if we are allowed to return the hole directly. if (CanTreatHoleAsUndefined(receiver_maps)) { - // Return the signaling NaN hole directly if all uses are - // truncating. - mode = CheckFloat64HoleMode::kAllowReturnHole; + if (LoadModeHandlesHoles(keyed_mode.load_mode())) { + // Return the signaling NaN hole directly if all uses are + // truncating. + value = graph()->NewNode(simplified()->ChangeFloat64HoleToTagged(), + value); + } else { + value = effect = graph()->NewNode( + simplified()->CheckFloat64Hole( + CheckFloat64HoleMode::kAllowReturnHole, FeedbackSource()), + value, effect, control); + } + } else { + value = effect = graph()->NewNode( + simplified()->CheckFloat64Hole( + CheckFloat64HoleMode::kNeverReturnHole, FeedbackSource()), + value, effect, control); } - value = effect = graph()->NewNode( - simplified()->CheckFloat64Hole(mode, FeedbackSource()), value, - effect, control); } } } else if (keyed_mode.access_mode() == AccessMode::kHas) { diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc index c697fda66aaa02..04fcde32fa0657 100644 --- a/deps/v8/src/compiler/js-operator.cc +++ b/deps/v8/src/compiler/js-operator.cc @@ -181,7 +181,8 @@ std::ostream& operator<<(std::ostream& os, ContextAccess const& access) { ContextAccess const& ContextAccessOf(Operator const* op) { DCHECK(op->opcode() == IrOpcode::kJSLoadContext || - op->opcode() == IrOpcode::kJSStoreContext); + op->opcode() == IrOpcode::kJSStoreContext || + op->opcode() == IrOpcode::kJSStoreScriptContext); return OpParameter(op); } @@ -1246,6 +1247,17 @@ const Operator* JSOperatorBuilder::StoreContext(size_t depth, size_t index) { access); // parameter } +const Operator* JSOperatorBuilder::StoreScriptContext(size_t depth, + size_t index) { + ContextAccess access(depth, index, false); + return zone()->New>( // -- + IrOpcode::kJSStoreScriptContext, // opcode + Operator::kNoRead | Operator::kNoThrow, // flags + "JSStoreScriptContext", // name + 1, 1, 1, 0, 1, 0, // counts + access); // parameter +} + const Operator* JSOperatorBuilder::LoadModule(int32_t cell_index) { return zone()->New>( // -- IrOpcode::kJSLoadModule, // opcode diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h index 87f9cb6822918e..c5d9795ff11681 100644 --- a/deps/v8/src/compiler/js-operator.h +++ b/deps/v8/src/compiler/js-operator.h @@ -1061,6 +1061,7 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final const Operator* HasContextExtension(size_t depth); const Operator* LoadContext(size_t depth, size_t index, bool immutable); const Operator* StoreContext(size_t depth, size_t index); + const Operator* StoreScriptContext(size_t depth, size_t index); const Operator* LoadModule(int32_t cell_index); const Operator* StoreModule(int32_t cell_index); diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc index 0d77d183140163..4725218df4d1ad 100644 --- a/deps/v8/src/compiler/js-typed-lowering.cc +++ b/deps/v8/src/compiler/js-typed-lowering.cc @@ -1424,7 +1424,34 @@ Reduction JSTypedLowering::ReduceJSHasContextExtension(Node* node) { Node* effect = NodeProperties::GetEffectInput(node); Node* context = NodeProperties::GetContextInput(node); Node* control = graph()->start(); + for (size_t i = 0; i < depth; ++i) { +#if DEBUG + // Const tracking let data is stored in the extension slot of a + // ScriptContext - however, it's unrelated to the sloppy eval variable + // extension. We should never iterate through a ScriptContext here. + Node* const scope_info = effect = graph()->NewNode( + simplified()->LoadField( + AccessBuilder::ForContextSlot(Context::SCOPE_INFO_INDEX)), + context, effect, control); + Node* scope_info_flags = effect = graph()->NewNode( + simplified()->LoadField(AccessBuilder::ForScopeInfoFlags()), scope_info, + effect, control); + Node* scope_type = graph()->NewNode( + simplified()->NumberBitwiseAnd(), scope_info_flags, + jsgraph()->SmiConstant(ScopeInfo::ScopeTypeBits::kMask)); + Node* is_script_scope = + graph()->NewNode(simplified()->NumberEqual(), scope_type, + jsgraph()->SmiConstant(ScopeType::SCRIPT_SCOPE)); + Node* is_not_script_scope = + graph()->NewNode(simplified()->BooleanNot(), is_script_scope); + JSGraphAssembler gasm(broker(), jsgraph_, jsgraph_->zone(), + BranchSemantics::kJS); + gasm.InitializeEffectControl(effect, control); + gasm.Assert(is_not_script_scope, "we should no see a ScriptContext here", + __FILE__, __LINE__); +#endif + context = effect = graph()->NewNode( simplified()->LoadField( AccessBuilder::ForContextSlotKnownPointer(Context::PREVIOUS_INDEX)), diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc index d7cfb6cf95f1e5..229355df777d70 100644 --- a/deps/v8/src/compiler/linkage.cc +++ b/deps/v8/src/compiler/linkage.cc @@ -368,6 +368,7 @@ CallDescriptor* Linkage::GetCEntryStubCallDescriptor( LinkageLocation::ForAnyRegister(MachineType::AnyTagged()); return zone->New( // -- CallDescriptor::kCallCodeObject, // kind + kDefaultCodeEntrypointTag, // tag target_type, // target MachineType target_loc, // target location locations.Build(), // location_sig @@ -426,6 +427,7 @@ CallDescriptor* Linkage::GetJSCallDescriptor(Zone* zone, bool is_osr, CallDescriptor::Kind descriptor_kind = CallDescriptor::kCallJSFunction; return zone->New( // -- descriptor_kind, // kind + kJSEntrypointTag, // tag target_type, // target MachineType target_loc, // target location locations.Build(), // location_sig @@ -526,6 +528,7 @@ CallDescriptor* Linkage::GetStubCallDescriptor( LinkageLocation target_loc = LinkageLocation::ForAnyRegister(target_type); return zone->New( // -- kind, // kind + descriptor.tag(), // tag target_type, // target MachineType target_loc, // target location locations.Build(), // location_sig @@ -571,16 +574,17 @@ CallDescriptor* Linkage::GetBytecodeDispatchCallDescriptor( LinkageLocation target_loc = LinkageLocation::ForAnyRegister(target_type); const CallDescriptor::Flags kFlags = CallDescriptor::kCanUseRoots | CallDescriptor::kFixedTargetRegister; - return zone->New( // -- - CallDescriptor::kCallAddress, // kind - target_type, // target MachineType - target_loc, // target location - locations.Build(), // location_sig - stack_parameter_count, // stack_parameter_count - Operator::kNoProperties, // properties - kNoCalleeSaved, // callee-saved registers - kNoCalleeSavedFp, // callee-saved fp - kFlags, // flags + return zone->New( // -- + CallDescriptor::kCallAddress, // kind + kBytecodeHandlerEntrypointTag, // tag + target_type, // target MachineType + target_loc, // target location + locations.Build(), // location_sig + stack_parameter_count, // stack_parameter_count + Operator::kNoProperties, // properties + kNoCalleeSaved, // callee-saved registers + kNoCalleeSavedFp, // callee-saved fp + kFlags, // flags descriptor.DebugName()); } diff --git a/deps/v8/src/compiler/linkage.h b/deps/v8/src/compiler/linkage.h index 672c4a68fa1540..778f976c2036bd 100644 --- a/deps/v8/src/compiler/linkage.h +++ b/deps/v8/src/compiler/linkage.h @@ -99,9 +99,9 @@ class V8_EXPORT_PRIVATE CallDescriptor final }; using Flags = base::Flags; - CallDescriptor(Kind kind, MachineType target_type, LinkageLocation target_loc, - LocationSignature* location_sig, size_t param_slot_count, - Operator::Properties properties, + CallDescriptor(Kind kind, CodeEntrypointTag tag, MachineType target_type, + LinkageLocation target_loc, LocationSignature* location_sig, + size_t param_slot_count, Operator::Properties properties, RegList callee_saved_registers, DoubleRegList callee_saved_fp_registers, Flags flags, const char* debug_name = "", @@ -109,6 +109,7 @@ class V8_EXPORT_PRIVATE CallDescriptor final const RegList allocatable_registers = {}, size_t return_slot_count = 0) : kind_(kind), + tag_(tag), target_type_(target_type), target_loc_(target_loc), location_sig_(location_sig), @@ -128,6 +129,19 @@ class V8_EXPORT_PRIVATE CallDescriptor final // Returns the kind of this call. Kind kind() const { return kind_; } + // Returns the entrypoint tag for this call. + CodeEntrypointTag tag() const { return tag_; } + + // Returns the entrypoint tag for this call, shifted to the right by + // kCodeEntrypointTagShift so that it fits into a 32-bit immediate. + uint32_t shifted_tag() const { + static_assert(kCodeEntrypointTagShift >= 32); + return tag_ >> kCodeEntrypointTagShift; + } + + // Returns {true} if this descriptor is a call to a Code object. + bool IsCodeObjectCall() const { return kind_ == kCallCodeObject; } + // Returns {true} if this descriptor is a call to a C function. bool IsCFunctionCall() const { return kind_ == kCallAddress; } @@ -145,6 +159,8 @@ class V8_EXPORT_PRIVATE CallDescriptor final bool IsWasmCapiFunction() const { return kind_ == kCallWasmCapiFunction; } #endif // V8_ENABLE_WEBASSEMBLY + bool IsBuiltinPointerCall() const { return kind_ == kCallBuiltinPointer; } + bool RequiresFrameAsIncoming() const { if (IsCFunctionCall() || IsJSFunctionCall()) return true; #if V8_ENABLE_WEBASSEMBLY @@ -154,6 +170,8 @@ class V8_EXPORT_PRIVATE CallDescriptor final return false; } + bool RequiresEntrypointTagForCall() const { return IsCodeObjectCall(); } + // The number of return values from this call. size_t ReturnCount() const { return location_sig_->return_count(); } @@ -296,6 +314,7 @@ class V8_EXPORT_PRIVATE CallDescriptor final friend class Linkage; const Kind kind_; + const CodeEntrypointTag tag_; const MachineType target_type_; const LinkageLocation target_loc_; const LocationSignature* const location_sig_; diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc index e34232f38e0e02..4af8c6883ce2e8 100644 --- a/deps/v8/src/compiler/machine-operator.cc +++ b/deps/v8/src/compiler/machine-operator.cc @@ -1094,14 +1094,15 @@ std::ostream& operator<<(std::ostream& os, TruncateKind kind) { SIMD_I16x8_LANES(V) V(8) V(9) V(10) V(11) V(12) V(13) V(14) V(15) #define STACK_SLOT_CACHED_SIZES_ALIGNMENTS_LIST(V) \ - V(4, 0) V(8, 0) V(16, 0) V(4, 4) V(8, 8) V(16, 16) + V(4, 0, false) \ + V(8, 0, false) V(16, 0, false) V(4, 4, false) V(8, 8, false) V(16, 16, false) struct StackSlotOperator : public Operator1 { - explicit StackSlotOperator(int size, int alignment) + explicit StackSlotOperator(int size, int alignment, bool is_tagged) : Operator1( IrOpcode::kStackSlot, Operator::kNoDeopt | Operator::kNoThrow, "StackSlot", 0, 0, 0, 1, 0, 0, - StackSlotRepresentation(size, alignment)) {} + StackSlotRepresentation(size, alignment, is_tagged)) {} }; struct MachineOperatorGlobalCache { @@ -1291,14 +1292,14 @@ struct MachineOperatorGlobalCache { #undef LOAD_TRANSFORM_KIND #endif // V8_ENABLE_WEBASSEMBLY -#define STACKSLOT(Size, Alignment) \ - struct StackSlotOfSize##Size##OfAlignment##Alignment##Operator final \ - : public StackSlotOperator { \ - StackSlotOfSize##Size##OfAlignment##Alignment##Operator() \ - : StackSlotOperator(Size, Alignment) {} \ - }; \ - StackSlotOfSize##Size##OfAlignment##Alignment##Operator \ - kStackSlotOfSize##Size##OfAlignment##Alignment; +#define STACKSLOT(Size, Alignment, IsTagged) \ + struct StackSlotOfSize##Size##OfAlignment##Alignment##IsTagged##Operator \ + final : public StackSlotOperator { \ + StackSlotOfSize##Size##OfAlignment##Alignment##IsTagged##Operator() \ + : StackSlotOperator(Size, Alignment, IsTagged) {} \ + }; \ + StackSlotOfSize##Size##OfAlignment##Alignment##IsTagged##Operator \ + kStackSlotOfSize##Size##OfAlignment##Alignment##IsTagged; STACK_SLOT_CACHED_SIZES_ALIGNMENTS_LIST(STACKSLOT) #undef STACKSLOT @@ -2017,18 +2018,19 @@ const Operator* MachineOperatorBuilder::StoreLane(MemoryAccessKind kind, } #endif // V8_ENABLE_WEBASSEMBLY -const Operator* MachineOperatorBuilder::StackSlot(int size, int alignment) { +const Operator* MachineOperatorBuilder::StackSlot(int size, int alignment, + bool is_tagged) { DCHECK_LE(0, size); DCHECK(alignment == 0 || alignment == 4 || alignment == 8 || alignment == 16); -#define CASE_CACHED_SIZE(Size, Alignment) \ - if (size == Size && alignment == Alignment) { \ - return &cache_.kStackSlotOfSize##Size##OfAlignment##Alignment; \ +#define CASE_CACHED_SIZE(Size, Alignment, IsTagged) \ + if (size == Size && alignment == Alignment && is_tagged == IsTagged) { \ + return &cache_.kStackSlotOfSize##Size##OfAlignment##Alignment##IsTagged; \ } STACK_SLOT_CACHED_SIZES_ALIGNMENTS_LIST(CASE_CACHED_SIZE) #undef CASE_CACHED_SIZE - return zone_->New(size, alignment); + return zone_->New(size, alignment, is_tagged); } const Operator* MachineOperatorBuilder::StackSlot(MachineRepresentation rep, diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h index b274bb4b918d6a..5d267856be52e0 100644 --- a/deps/v8/src/compiler/machine-operator.h +++ b/deps/v8/src/compiler/machine-operator.h @@ -266,15 +266,17 @@ V8_EXPORT_PRIVATE StoreLaneParameters const& StoreLaneParametersOf( class StackSlotRepresentation final { public: - StackSlotRepresentation(int size, int alignment) - : size_(size), alignment_(alignment) {} + StackSlotRepresentation(int size, int alignment, bool is_tagged) + : size_(size), alignment_(alignment), is_tagged_(is_tagged) {} int size() const { return size_; } int alignment() const { return alignment_; } + bool is_tagged() const { return is_tagged_; } private: int size_; int alignment_; + bool is_tagged_; }; V8_EXPORT_PRIVATE bool operator==(StackSlotRepresentation, @@ -1199,7 +1201,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final // unaligned store [base + index], value const Operator* UnalignedStore(UnalignedStoreRepresentation rep); - const Operator* StackSlot(int size, int alignment = 0); + const Operator* StackSlot(int size, int alignment = 0, + bool is_tagged = false); const Operator* StackSlot(MachineRepresentation rep, int alignment = 0); // Note: Only use this operator to: diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h index 03f990c6936049..c4e1fe51daf7f9 100644 --- a/deps/v8/src/compiler/opcodes.h +++ b/deps/v8/src/compiler/opcodes.h @@ -200,6 +200,7 @@ V(JSHasContextExtension) \ V(JSLoadContext) \ V(JSStoreContext) \ + V(JSStoreScriptContext) \ V(JSCreateFunctionContext) \ V(JSCreateCatchContext) \ V(JSCreateWithContext) \ @@ -434,6 +435,7 @@ V(ArgumentsLength) \ V(AssertType) \ V(BooleanNot) \ + V(ChangeFloat64HoleToTagged) \ V(CheckBounds) \ V(CheckClosure) \ V(CheckEqualsInternalizedString) \ diff --git a/deps/v8/src/compiler/operator-properties.cc b/deps/v8/src/compiler/operator-properties.cc index 0ee112ada74a03..0bd35da166547c 100644 --- a/deps/v8/src/compiler/operator-properties.cc +++ b/deps/v8/src/compiler/operator-properties.cc @@ -84,6 +84,7 @@ bool OperatorProperties::NeedsExactContext(const Operator* op) { case IrOpcode::kJSLoadNamedFromSuper: case IrOpcode::kJSLoadProperty: case IrOpcode::kJSStoreContext: + case IrOpcode::kJSStoreScriptContext: case IrOpcode::kJSDefineKeyedOwnPropertyInLiteral: case IrOpcode::kJSStoreGlobal: case IrOpcode::kJSStoreInArrayLiteral: diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc index a5b8430259b4a9..3ea64d117865b7 100644 --- a/deps/v8/src/compiler/pipeline.cc +++ b/deps/v8/src/compiler/pipeline.cc @@ -129,6 +129,7 @@ #include "src/compiler/turboshaft/wasm-gc-optimize-phase.h" #include "src/compiler/turboshaft/wasm-lowering-phase.h" #include "src/compiler/turboshaft/wasm-optimize-phase.h" +#include "src/compiler/turboshaft/wasm-turboshaft-compiler.h" #include "src/compiler/wasm-compiler.h" #include "src/compiler/wasm-escape-analysis.h" #include "src/compiler/wasm-gc-lowering.h" @@ -209,9 +210,7 @@ class PipelineData { register_allocation_zone_scope_(zone_stats_, kRegisterAllocationZoneName), register_allocation_zone_(register_allocation_zone_scope_.zone()), - assembler_options_(AssemblerOptions::Default(isolate)), - inline_wasm_into_js_( - isolate_->IsWasmInliningIntoJSEnabled(isolate_->native_context())) { + assembler_options_(AssemblerOptions::Default(isolate)) { PhaseScope scope(pipeline_statistics, "V8.TFInitPipelineData"); graph_ = graph_zone_->New(graph_zone_); source_positions_ = graph_zone_->New(graph_); @@ -314,7 +313,7 @@ class PipelineData { machine_ = jsgraph->machine(); common_ = jsgraph->common(); javascript_ = jsgraph->javascript(); - } else { + } else if (graph_) { simplified_ = graph_zone_->New(graph_zone_); machine_ = graph_zone_->New( graph_zone_, MachineType::PointerRepresentation(), @@ -570,7 +569,7 @@ class PipelineData { fixed_frame_size = call_descriptor->CalculateFixedFrameSize(info()->code_kind()); } - frame_ = codegen_zone()->New(fixed_frame_size); + frame_ = codegen_zone()->New(fixed_frame_size, codegen_zone()); if (osr_helper_.has_value()) osr_helper()->SetupFrame(frame()); } @@ -651,8 +650,6 @@ class PipelineData { } #endif - bool inline_wasm_into_js() const { return inline_wasm_into_js_; } - private: Isolate* const isolate_; #if V8_ENABLE_WEBASSEMBLY @@ -734,7 +731,6 @@ class PipelineData { const ProfileDataFromFile* profile_data_ = nullptr; bool has_js_wasm_calls_ = false; - bool inline_wasm_into_js_ = false; }; class PipelineImpl final { @@ -1475,8 +1471,7 @@ struct InliningPhase { // Note: By not setting data->info()->set_source_positions(), even with // wasm inlining, source positions shouldn't be kept alive after // compilation is finished (if not for tracing, ...) - if (data->inline_wasm_into_js() && - !data->source_positions()->IsEnabled()) { + if (!data->source_positions()->IsEnabled()) { data->source_positions()->Enable(); data->source_positions()->AddDecorator(); } @@ -1500,9 +1495,7 @@ struct JSWasmInliningPhase { CommonOperatorReducer common_reducer( &graph_reducer, data->graph(), data->broker(), data->common(), data->machine(), temp_zone, BranchSemantics::kMachine); - JSInliningHeuristic::Mode mode = - data->inline_wasm_into_js() ? JSInliningHeuristic::kWasmFullInlining - : JSInliningHeuristic::kWasmWrappersOnly; + JSInliningHeuristic::Mode mode = JSInliningHeuristic::kWasmFullInlining; JSInliningHeuristic inlining(&graph_reducer, temp_zone, data->info(), data->jsgraph(), data->broker(), data->source_positions(), data->node_origins(), @@ -1736,7 +1729,8 @@ struct WasmInliningPhase { ZoneVector* inlining_positions, wasm::WasmFeatures* detected) { if (!WasmInliner::graph_size_allows_inlining( - data->graph()->NodeCount(), v8_flags.wasm_inlining_budget)) { + env->module, data->graph()->NodeCount(), + v8_flags.wasm_inlining_budget)) { return; } GraphReducer graph_reducer( @@ -2215,12 +2209,12 @@ struct WasmOptimizationPhase { void Run(PipelineData* data, Zone* temp_zone, MachineOperatorReducer::SignallingNanPropagation signalling_nan_propagation, - wasm::WasmFeatures features) { + wasm::WasmFeatures detected_features) { // Run optimizations in two rounds: First one around load elimination and // then one around branch elimination. This is because those two // optimizations sometimes display quadratic complexity when run together. // We only need load elimination for managed objects. - if (features.has_gc()) { + if (detected_features.has_gc()) { GraphReducer graph_reducer(temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(), data->jsgraph()->Dead(), @@ -2728,6 +2722,58 @@ class WasmHeapStubCompilationJob final : public TurbofanCompilationJob { PipelineImpl pipeline_; }; +class WasmTurboshaftWrapperCompilationJob final + : public turboshaft::TurboshaftCompilationJob { + public: + WasmTurboshaftWrapperCompilationJob( + Isolate* isolate, const wasm::FunctionSig* sig, bool is_import, + const wasm::WasmModule* module, CodeKind kind, + std::unique_ptr debug_name, const AssemblerOptions& options) + // Note that the OptimizedCompilationInfo is not initialized at the time + // we pass it to the CompilationJob constructor, but it is not + // dereferenced there. + : TurboshaftCompilationJob(&info_, + CompilationJob::State::kReadyToExecute), + zone_(wasm::GetWasmEngine()->allocator(), ZONE_NAME), + debug_name_(std::move(debug_name)), + info_(base::CStrVector(debug_name_.get()), &zone_, kind), + sig_(sig), + is_import_(is_import), + module_(module), + call_descriptor_(Linkage::GetJSCallDescriptor( + &zone_, false, static_cast(sig->parameter_count()) + 1, + CallDescriptor::kNoFlags)), + zone_stats_(zone_.allocator()), + data_(&zone_stats_, &info_, isolate, wasm::GetWasmEngine()->allocator(), + nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, options, + nullptr), + pipeline_(&data_) {} + + WasmTurboshaftWrapperCompilationJob( + const WasmTurboshaftWrapperCompilationJob&) = delete; + WasmTurboshaftWrapperCompilationJob& operator=( + const WasmTurboshaftWrapperCompilationJob&) = delete; + + protected: + Status PrepareJobImpl(Isolate* isolate) final; + Status ExecuteJobImpl(RuntimeCallStats* stats, + LocalIsolate* local_isolate) final; + Status FinalizeJobImpl(Isolate* isolate) final; + + private: + Zone zone_; + std::unique_ptr debug_name_; + OptimizedCompilationInfo info_; + const wasm::FunctionSig* sig_; + bool is_import_; + const wasm::WasmModule* module_; + CallDescriptor* call_descriptor_; + ZoneStats zone_stats_; + PipelineData data_; + PipelineImpl pipeline_; +}; + +#if V8_ENABLE_WEBASSEMBLY // static std::unique_ptr Pipeline::NewWasmHeapStubCompilationJob( Isolate* isolate, CallDescriptor* call_descriptor, @@ -2738,11 +2784,78 @@ std::unique_ptr Pipeline::NewWasmHeapStubCompilationJob( std::move(debug_name), options); } +// static +std::unique_ptr +Pipeline::NewWasmTurboshaftWrapperCompilationJob( + Isolate* isolate, const wasm::FunctionSig* sig, bool is_import, + const wasm::WasmModule* module, CodeKind kind, + std::unique_ptr debug_name, const AssemblerOptions& options) { + return std::make_unique( + isolate, sig, is_import, module, kind, std::move(debug_name), options); +} +#endif + CompilationJob::Status WasmHeapStubCompilationJob::PrepareJobImpl( Isolate* isolate) { UNREACHABLE(); } +namespace { +// Temporary helpers for logic shared by the TurboFan and Turboshaft wrapper +// compilation jobs. Remove them once wrappers are fully ported to Turboshaft. +void TraceWrapperCompilation(const char* compiler, + OptimizedCompilationInfo* info, + PipelineData* data) { + if (info->trace_turbo_json() || info->trace_turbo_graph()) { + CodeTracer::StreamScope tracing_scope(data->GetCodeTracer()); + tracing_scope.stream() + << "---------------------------------------------------\n" + << "Begin compiling method " << info->GetDebugName().get() << " using " + << compiler << std::endl; + } + if (info->trace_turbo_graph() && + data->graph() != nullptr) { // Simple textual RPO. + StdoutStream{} << "-- wasm stub " << CodeKindToString(info->code_kind()) + << " graph -- " << std::endl + << AsRPO(*data->graph()); + } + + if (info->trace_turbo_json()) { + TurboJsonFile json_of(info, std::ios_base::trunc); + json_of << "{\"function\":\"" << info->GetDebugName().get() + << "\", \"source\":\"\",\n\"phases\":["; + } +} + +CompilationJob::Status FinalizeWrapperCompilation( + PipelineImpl* pipeline, OptimizedCompilationInfo* info, + CallDescriptor* call_descriptor, Isolate* isolate, + const char* method_name) { + Handle code; + if (!pipeline->FinalizeCode(call_descriptor).ToHandle(&code)) { + V8::FatalProcessOutOfMemory(isolate, method_name); + } + if (pipeline->CommitDependencies(code)) { + info->SetCode(code); +#ifdef ENABLE_DISASSEMBLER + if (v8_flags.print_wasm_code) { + CodeTracer::StreamScope tracing_scope(isolate->GetCodeTracer()); + code->Disassemble(info->GetDebugName().get(), tracing_scope.stream(), + isolate); + } +#endif + + if (isolate->IsLoggingCodeCreation()) { + PROFILE(isolate, CodeCreateEvent(LogEventListener::CodeTag::kStub, + Handle::cast(code), + info->GetDebugName().get())); + } + return CompilationJob::SUCCEEDED; + } + return CompilationJob::FAILED; +} +} // namespace + CompilationJob::Status WasmHeapStubCompilationJob::ExecuteJobImpl( RuntimeCallStats* stats, LocalIsolate* local_isolate) { std::unique_ptr pipeline_statistics; @@ -2752,24 +2865,7 @@ CompilationJob::Status WasmHeapStubCompilationJob::ExecuteJobImpl( &zone_stats_)); pipeline_statistics->BeginPhaseKind("V8.WasmStubCodegen"); } - if (info_.trace_turbo_json() || info_.trace_turbo_graph()) { - CodeTracer::StreamScope tracing_scope(data_.GetCodeTracer()); - tracing_scope.stream() - << "---------------------------------------------------\n" - << "Begin compiling method " << info_.GetDebugName().get() - << " using TurboFan" << std::endl; - } - if (info_.trace_turbo_graph()) { // Simple textual RPO. - StdoutStream{} << "-- wasm stub " << CodeKindToString(info_.code_kind()) - << " graph -- " << std::endl - << AsRPO(*data_.graph()); - } - - if (info_.trace_turbo_json()) { - TurboJsonFile json_of(&info_, std::ios_base::trunc); - json_of << "{\"function\":\"" << info_.GetDebugName().get() - << "\", \"source\":\"\",\n\"phases\":["; - } + TraceWrapperCompilation("Turbofan", &info_, &data_); pipeline_.RunPrintAndVerify("V8.WasmMachineCode", true); pipeline_.Run(); pipeline_.ComputeScheduledGraph(); @@ -2781,31 +2877,103 @@ CompilationJob::Status WasmHeapStubCompilationJob::ExecuteJobImpl( CompilationJob::Status WasmHeapStubCompilationJob::FinalizeJobImpl( Isolate* isolate) { - Handle code; - if (!pipeline_.FinalizeCode(call_descriptor_).ToHandle(&code)) { - V8::FatalProcessOutOfMemory(isolate, - "WasmHeapStubCompilationJob::FinalizeJobImpl"); + return FinalizeWrapperCompilation( + &pipeline_, &info_, call_descriptor_, isolate, + "WasmHeapStubCompilationJob::FinalizeJobImpl"); +} + +CompilationJob::Status WasmTurboshaftWrapperCompilationJob::PrepareJobImpl( + Isolate* isolate) { + UNREACHABLE(); +} + +CompilationJob::Status WasmTurboshaftWrapperCompilationJob::ExecuteJobImpl( + RuntimeCallStats* stats, LocalIsolate* local_isolate) { + std::unique_ptr pipeline_statistics; + if (v8_flags.turbo_stats || v8_flags.turbo_stats_nvp) { + pipeline_statistics.reset(new TurbofanPipelineStatistics( + &info_, wasm::GetWasmEngine()->GetOrCreateTurboStatistics(), + &zone_stats_)); + pipeline_statistics->BeginPhaseKind("V8.WasmStubCodegen"); } - if (pipeline_.CommitDependencies(code)) { - info_.SetCode(code); -#ifdef ENABLE_DISASSEMBLER - if (v8_flags.print_opt_code) { - CodeTracer::StreamScope tracing_scope(isolate->GetCodeTracer()); - code->Disassemble(compilation_info()->GetDebugName().get(), - tracing_scope.stream(), isolate); - } -#endif + TraceWrapperCompilation("Turboshaft", &info_, &data_); + Linkage linkage(call_descriptor_); - if (isolate->IsLoggingCodeCreation()) { - PROFILE(isolate, - CodeCreateEvent(LogEventListener::CodeTag::kStub, - Handle::cast(code), - compilation_info()->GetDebugName().get())); + base::Optional turboshaft_scope( + pipeline_.GetTurboshaftPipelineData( + turboshaft::TurboshaftPipelineKind::kJSToWasm)); + auto& turboshaft_pipeline = turboshaft_scope.value(); + turboshaft_pipeline.Value().SetIsWasm(module_, sig_); + DCHECK_NOT_NULL(turboshaft::PipelineData::Get().wasm_module()); + + AccountingAllocator allocator; + BuildWasmWrapper(&allocator, turboshaft_pipeline.Value().graph(), + info_.code_kind(), sig_, is_import_, module_); + CodeTracer* code_tracer = nullptr; + if (info_.trace_turbo_graph()) { + // NOTE: We must not call `GetCodeTracer` if tracing is not enabled, + // because it may not yet be initialized then and doing so from the + // background thread is not threadsafe. + code_tracer = data_.GetCodeTracer(); + } + Zone printing_zone(&allocator, ZONE_NAME); + turboshaft::PrintTurboshaftGraph(&printing_zone, code_tracer, + "Graph generation"); + + // Skip the LoopUnrolling and WasmGCOptimize phases for wrappers. + pipeline_.Run(); + // TODO(14108): Do we need value numbering if wasm_opt is turned off? + if (v8_flags.wasm_opt) { + pipeline_.Run(); + } + + if (!Is64()) { + pipeline_.Run(); + } + + // This is more than an optimization currently: We need it to sort blocks to + // work around a bug in RecreateSchedulePhase. + pipeline_.Run(); + + if (V8_UNLIKELY(v8_flags.turboshaft_enable_debug_features)) { + // This phase has to run very late to allow all previous phases to use + // debug features. + pipeline_.Run(); + } + + if (v8_flags.turboshaft_instruction_selection) { + // Run Turboshaft instruction selection. + if (!pipeline_.SelectInstructionsTurboshaft(&linkage)) { + return CompilationJob::FAILED; } - return SUCCEEDED; + + turboshaft_scope.reset(); + data_.DeleteGraphZone(); + pipeline_.AllocateRegisters(linkage.GetIncomingDescriptor(), false); + } else { + auto [new_graph, new_schedule] = + pipeline_.Run(&linkage); + data_.set_graph(new_graph); + data_.set_schedule(new_schedule); + TraceSchedule(data_.info(), &data_, data_.schedule(), + turboshaft::RecreateSchedulePhase::phase_name()); + + turboshaft_scope.reset(); + CHECK(pipeline_.SelectInstructions(&linkage)); } - return FAILED; + + pipeline_.AssembleCode(&linkage); + + return CompilationJob::SUCCEEDED; } + +CompilationJob::Status WasmTurboshaftWrapperCompilationJob::FinalizeJobImpl( + Isolate* isolate) { + return FinalizeWrapperCompilation( + &pipeline_, &info_, call_descriptor_, isolate, + "WasmTurboshaftWrapperCompilationJob::FinalizeJobImpl"); +} + #endif // V8_ENABLE_WEBASSEMBLY void PipelineImpl::RunPrintAndVerify(const char* phase, bool untyped) { @@ -2943,17 +3111,15 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) { DCHECK(data->info()->inline_js_wasm_calls()); Run(); RunPrintAndVerify(JSWasmInliningPhase::phase_name(), true); - if (data->inline_wasm_into_js()) { - Run(-1); - RunPrintAndVerify(WasmTypingPhase::phase_name(), true); - if (v8_flags.wasm_opt) { - Run(data->wasm_module_for_inlining(), - data->jsgraph()); - RunPrintAndVerify(WasmGCOptimizationPhase::phase_name(), true); - } - Run(); - RunPrintAndVerify(JSWasmLoweringPhase::phase_name(), true); + Run(-1); + RunPrintAndVerify(WasmTypingPhase::phase_name(), true); + if (v8_flags.wasm_opt) { + Run(data->wasm_module_for_inlining(), + data->jsgraph()); + RunPrintAndVerify(WasmGCOptimizationPhase::phase_name(), true); } + Run(); + RunPrintAndVerify(JSWasmLoweringPhase::phase_name(), true); } #endif // V8_ENABLE_WEBASSEMBLY @@ -3016,7 +3182,7 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) { RunPrintAndVerify(DecompressionOptimizationPhase::phase_name(), true); #if V8_ENABLE_WEBASSEMBLY - if (data->has_js_wasm_calls() && data->inline_wasm_into_js()) { + if (data->has_js_wasm_calls()) { Run(); RunPrintAndVerify(WasmJSLoweringPhase::phase_name(), true); } @@ -3105,8 +3271,9 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) { Run(); -#if defined(V8_TARGET_ARCH_X64) or defined(V8_TARGET_ARCH_ARM64) or \ - defined(V8_TARGET_ARCH_ARM) +#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64) || \ + defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_IA32) || \ + defined(V8_TARGET_ARCH_PPC64) if (v8_flags.turboshaft_instruction_selection) { // Run Turboshaft instruction selection. if (!SelectInstructionsTurboshaft(linkage)) { @@ -3335,6 +3502,7 @@ MaybeHandle Pipeline::GenerateCodeForCodeStub( pipeline.Run(&linkage); CHECK(!bailout.has_value()); + pipeline.Run(); pipeline.Run(); pipeline.Run(); pipeline.Run(); @@ -3525,14 +3693,18 @@ void LowerInt64(const wasm::FunctionSig* sig, MachineGraph* mcgraph, base::OwnedVector SerializeInliningPositions( const ZoneVector& positions) { - const size_t entry_size = - sizeof positions[0].inlinee_func_index + sizeof positions[0].caller_pos; + const size_t entry_size = sizeof positions[0].inlinee_func_index + + sizeof positions[0].was_tail_call + + sizeof positions[0].caller_pos; auto result = base::OwnedVector::New(positions.size() * entry_size); uint8_t* iter = result.begin(); - for (const auto& [func_index, caller_pos] : positions) { + for (const auto& [func_index, was_tail_call, caller_pos] : positions) { size_t index_size = sizeof func_index; std::memcpy(iter, &func_index, index_size); iter += index_size; + size_t was_tail_call_size = sizeof was_tail_call; + std::memcpy(iter, &was_tail_call, was_tail_call_size); + iter += was_tail_call_size; size_t pos_size = sizeof caller_pos; std::memcpy(iter, &caller_pos, pos_size); iter += pos_size; @@ -3772,7 +3944,7 @@ bool Pipeline::GenerateWasmCodeFromTurboshaftGraph( tracing_scope.stream() << "---------------------------------------------------\n" << "Begin compiling method " << data.info()->GetDebugName().get() - << " using TurboFan" << std::endl; + << " using Turboshaft" << std::endl; } if (mcgraph->machine()->Is32()) { @@ -3865,8 +4037,9 @@ bool Pipeline::GenerateWasmCodeFromTurboshaftGraph( data.BeginPhaseKind("V8.InstructionSelection"); -#if V8_TARGET_ARCH_X64 or V8_TARGET_ARCH_ARM64 or V8_TARGET_ARCH_ARM or \ - V8_TARGET_ARCH_IA32 +#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64) || \ + defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_IA32) || \ + defined(V8_TARGET_ARCH_PPC64) bool use_turboshaft_instruction_selection = v8_flags.turboshaft_wasm_instruction_selection_staged; #else @@ -3941,7 +4114,7 @@ bool Pipeline::GenerateWasmCodeFromTurboshaftGraph( tracing_scope.stream() << "---------------------------------------------------\n" << "Finished compiling method " << data.info()->GetDebugName().get() - << " using TurboFan" << std::endl; + << " using Turboshaft" << std::endl; } if (V8_UNLIKELY(v8_flags.trace_wasm_compilation_times)) { diff --git a/deps/v8/src/compiler/pipeline.h b/deps/v8/src/compiler/pipeline.h index 7883c93df318a9..c472d7aa9ba3aa 100644 --- a/deps/v8/src/compiler/pipeline.h +++ b/deps/v8/src/compiler/pipeline.h @@ -13,6 +13,10 @@ #include "src/objects/code.h" #include "src/zone/zone-containers.h" +#if V8_ENABLE_WEBASSEMBLY +#include "src/wasm/value-type.h" +#endif + namespace v8 { namespace internal { @@ -28,8 +32,14 @@ struct CompilationEnv; struct FunctionBody; struct WasmCompilationResult; class WasmFeatures; +struct WasmModule; } // namespace wasm +namespace compiler::turboshaft { +class TurboshaftCompilationJob; +class Graph; +} // namespace compiler::turboshaft + namespace compiler { class CallDescriptor; @@ -78,12 +88,20 @@ class Pipeline : public AllStatic { WasmCompilationData& compilation_data, MachineGraph* mcgraph, wasm::WasmFeatures* detected, CallDescriptor* call_descriptor); +#if V8_ENABLE_WEBASSEMBLY // Returns a new compilation job for a wasm heap stub. static std::unique_ptr NewWasmHeapStubCompilationJob( Isolate* isolate, CallDescriptor* call_descriptor, std::unique_ptr zone, Graph* graph, CodeKind kind, std::unique_ptr debug_name, const AssemblerOptions& options); + static std::unique_ptr + NewWasmTurboshaftWrapperCompilationJob( + Isolate* isolate, const wasm::FunctionSig* sig, bool is_import, + const wasm::WasmModule* module, CodeKind kind, + std::unique_ptr debug_name, const AssemblerOptions& options); +#endif + // Run the pipeline on a machine graph and generate code. static MaybeHandle GenerateCodeForCodeStub( Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph, diff --git a/deps/v8/src/compiler/property-access-builder.cc b/deps/v8/src/compiler/property-access-builder.cc index daae394817efb9..15fbff9449dfab 100644 --- a/deps/v8/src/compiler/property-access-builder.cc +++ b/deps/v8/src/compiler/property-access-builder.cc @@ -209,9 +209,10 @@ Node* PropertyAccessBuilder::TryFoldLoadConstantDataField( } if (access_info.field_representation().IsDouble()) { - base::Optional value = holder->GetOwnFastConstantDoubleProperty( + base::Optional value = holder->GetOwnFastConstantDoubleProperty( broker(), access_info.field_index(), dependencies()); - return value.has_value() ? jsgraph()->ConstantNoHole(*value) : nullptr; + return value.has_value() ? jsgraph()->ConstantNoHole(value->get_scalar()) + : nullptr; } OptionalObjectRef value = holder->GetOwnFastConstantDataProperty( broker(), access_info.field_representation(), access_info.field_index(), diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc index bb75f8295d73fa..834fec40c7228a 100644 --- a/deps/v8/src/compiler/simplified-lowering.cc +++ b/deps/v8/src/compiler/simplified-lowering.cc @@ -4305,6 +4305,21 @@ class RepresentationSelector { } return; } + case IrOpcode::kChangeFloat64HoleToTagged: { + // If the {truncation} identifies NaN and undefined, we can just pass + // along the {truncation} and completely wipe the {node}. + if (truncation.IsUnused()) return VisitUnused(node); + if (truncation.TruncatesOddballAndBigIntToNumber()) { + VisitUnop(node, UseInfo::TruncatingFloat64(), + MachineRepresentation::kFloat64); + if (lower()) DeferReplacement(node, node->InputAt(0)); + return; + } + VisitUnop( + node, UseInfo(MachineRepresentation::kFloat64, Truncation::Any()), + MachineRepresentation::kTagged); + return; + } case IrOpcode::kCheckNotTaggedHole: { VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged); diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc index 288af0f7d253ca..9491361be1f9d8 100644 --- a/deps/v8/src/compiler/simplified-operator.cc +++ b/deps/v8/src/compiler/simplified-operator.cc @@ -834,6 +834,7 @@ bool operator==(AssertNotNullParameters const& lhs, V(ChangeTaggedToFloat64, Operator::kNoProperties, 1, 0) \ V(ChangeTaggedToTaggedSigned, Operator::kNoProperties, 1, 0) \ V(ChangeFloat64ToTaggedPointer, Operator::kNoProperties, 1, 0) \ + V(ChangeFloat64HoleToTagged, Operator::kNoProperties, 1, 0) \ V(ChangeInt31ToTaggedSigned, Operator::kNoProperties, 1, 0) \ V(ChangeInt32ToTagged, Operator::kNoProperties, 1, 0) \ V(ChangeInt64ToTagged, Operator::kNoProperties, 1, 0) \ diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h index 07831323937fc1..a4f6da3789a2d3 100644 --- a/deps/v8/src/compiler/simplified-operator.h +++ b/deps/v8/src/compiler/simplified-operator.h @@ -971,6 +971,7 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final const Operator* ChangeUint64ToTagged(); const Operator* ChangeFloat64ToTagged(CheckForMinusZeroMode); const Operator* ChangeFloat64ToTaggedPointer(); + const Operator* ChangeFloat64HoleToTagged(); const Operator* ChangeTaggedToBit(); const Operator* ChangeBitToTagged(); const Operator* TruncateBigIntToWord64(); diff --git a/deps/v8/src/compiler/turboshaft/analyzer-iterator.cc b/deps/v8/src/compiler/turboshaft/analyzer-iterator.cc index ecd0f33f7fb2d9..7d89e20cf35f44 100644 --- a/deps/v8/src/compiler/turboshaft/analyzer-iterator.cc +++ b/deps/v8/src/compiler/turboshaft/analyzer-iterator.cc @@ -16,20 +16,20 @@ void AnalyzerIterator::PopOutdated() { } } -Block* AnalyzerIterator::Next() { +const Block* AnalyzerIterator::Next() { DCHECK(HasNext()); DCHECK(!IsOutdated(stack_.back())); curr_ = stack_.back(); stack_.pop_back(); - Block* curr_header = curr_.block->IsLoop() - ? curr_.block - : loop_finder_.GetLoopHeader(curr_.block); + const Block* curr_header = curr_.block->IsLoop() + ? curr_.block + : loop_finder_.GetLoopHeader(curr_.block); // Pushing on the stack the children that are not in the same loop as Next // (remember that since we're doing a DFS with a Last-In-First-Out stack, // pushing them first on the stack means that they will be visited last). - for (Block* child = curr_.block->LastChild(); child != nullptr; + for (const Block* child = curr_.block->LastChild(); child != nullptr; child = child->NeighboringChild()) { if (loop_finder_.GetLoopHeader(child) != curr_header) { stack_.push_back({child, current_generation_}); @@ -38,7 +38,7 @@ Block* AnalyzerIterator::Next() { // Pushing on the stack the children that are in the same loop as Next (they // are pushed last, so that they will be visited first). - for (Block* child = curr_.block->LastChild(); child != nullptr; + for (const Block* child = curr_.block->LastChild(); child != nullptr; child = child->NeighboringChild()) { if (loop_finder_.GetLoopHeader(child) == curr_header) { stack_.push_back({child, current_generation_}); @@ -61,7 +61,8 @@ void AnalyzerIterator::MarkLoopForRevisit() { DCHECK_NOT_NULL(curr_.block); DCHECK_NE(curr_.generation, kNotVisitedGeneration); DCHECK(curr_.block->HasBackedge(graph_)); - Block* header = curr_.block->LastOperation(graph_).Cast().destination; + const Block* header = + curr_.block->LastOperation(graph_).Cast().destination; stack_.push_back({header, ++current_generation_}); } @@ -69,8 +70,9 @@ void AnalyzerIterator::MarkLoopForRevisitSkipHeader() { DCHECK_NOT_NULL(curr_.block); DCHECK_NE(curr_.generation, kNotVisitedGeneration); DCHECK(curr_.block->HasBackedge(graph_)); - Block* header = curr_.block->LastOperation(graph_).Cast().destination; - for (Block* child = header->LastChild(); child != nullptr; + const Block* header = + curr_.block->LastOperation(graph_).Cast().destination; + for (const Block* child = header->LastChild(); child != nullptr; child = child->NeighboringChild()) { stack_.push_back({child, ++current_generation_}); } diff --git a/deps/v8/src/compiler/turboshaft/analyzer-iterator.h b/deps/v8/src/compiler/turboshaft/analyzer-iterator.h index 7762789214c020..56f3440285c88f 100644 --- a/deps/v8/src/compiler/turboshaft/analyzer-iterator.h +++ b/deps/v8/src/compiler/turboshaft/analyzer-iterator.h @@ -73,9 +73,9 @@ namespace v8::internal::compiler::turboshaft { // recorded in {visited_}, it means that we've revisited a loop since the last // time we visited this block, so we should revisit it as well. -class AnalyzerIterator { +class V8_EXPORT_PRIVATE AnalyzerIterator { public: - AnalyzerIterator(Zone* phase_zone, Graph& graph, + AnalyzerIterator(Zone* phase_zone, const Graph& graph, const LoopFinder& loop_finder) : graph_(graph), loop_finder_(loop_finder), @@ -88,7 +88,7 @@ class AnalyzerIterator { DCHECK_IMPLIES(!stack_.empty(), !IsOutdated(stack_.back())); return !stack_.empty(); } - Block* Next(); + const Block* Next(); // Schedule the loop pointed to by the current block (as a backedge) // to be revisited on the next iteration. void MarkLoopForRevisit(); @@ -98,7 +98,7 @@ class AnalyzerIterator { private: struct StackNode { - Block* block; + const Block* block; uint64_t generation; }; static constexpr uint64_t kNotVisitedGeneration = 0; @@ -109,7 +109,7 @@ class AnalyzerIterator { return visited_[node.block->index()] >= node.generation; } - Graph& graph_; + const Graph& graph_; const LoopFinder& loop_finder_; uint64_t current_generation_ = kGenerationForFirstVisit; diff --git a/deps/v8/src/compiler/turboshaft/assembler.h b/deps/v8/src/compiler/turboshaft/assembler.h index 7a8149b4c92922..ad7de77faf1c38 100644 --- a/deps/v8/src/compiler/turboshaft/assembler.h +++ b/deps/v8/src/compiler/turboshaft/assembler.h @@ -38,6 +38,8 @@ #include "src/compiler/turboshaft/utils.h" #include "src/flags/flags.h" #include "src/logging/runtime-call-stats.h" +#include "src/objects/elements-kind.h" +#include "src/objects/fixed-array.h" #include "src/objects/heap-number.h" #include "src/objects/oddball.h" #include "src/objects/tagged.h" @@ -167,9 +169,12 @@ class LabelBase { Block* block() { return data_.block; } + bool has_incoming_jump() const { return has_incoming_jump_; } + template void Goto(A& assembler, const values_t& values) { if (assembler.generating_unreachable_operations()) return; + has_incoming_jump_ = true; Block* current_block = assembler.current_block(); DCHECK_NOT_NULL(current_block); assembler.Goto(data_.block); @@ -180,6 +185,7 @@ class LabelBase { void GotoIf(A& assembler, OpIndex condition, BranchHint hint, const values_t& values) { if (assembler.generating_unreachable_operations()) return; + has_incoming_jump_ = true; Block* current_block = assembler.current_block(); DCHECK_NOT_NULL(current_block); if (assembler.GotoIf(condition, data_.block, hint) & @@ -192,6 +198,7 @@ class LabelBase { void GotoIfNot(A& assembler, OpIndex condition, BranchHint hint, const values_t& values) { if (assembler.generating_unreachable_operations()) return; + has_incoming_jump_ = true; Block* current_block = assembler.current_block(); DCHECK_NOT_NULL(current_block); if (assembler.GotoIfNot(condition, data_.block, hint) & @@ -283,6 +290,7 @@ class LabelBase { } BlockData data_; + bool has_incoming_jump_ = false; }; template @@ -584,19 +592,20 @@ class GenericReducerBase; // TURBOSHAFT_REDUCER_GENERIC_BOILERPLATE should almost never be needed: it // should only be used by the IR-specific base class, while other reducers // should simply use `TURBOSHAFT_REDUCER_BOILERPLATE`. -#define TURBOSHAFT_REDUCER_GENERIC_BOILERPLATE() \ +#define TURBOSHAFT_REDUCER_GENERIC_BOILERPLATE(Name) \ using ReducerList = typename Next::ReducerList; \ Assembler& Asm() { \ return *static_cast*>(this); \ } \ template \ using ScopedVar = turboshaft::ScopedVariable>; \ - using CatchScope = CatchScopeImpl>; + using CatchScope = CatchScopeImpl>; \ + static constexpr auto& ReducerName() { return #Name; } // Defines a few helpers to use the Assembler and its stack in Reducers. -#define TURBOSHAFT_REDUCER_BOILERPLATE() \ - TURBOSHAFT_REDUCER_GENERIC_BOILERPLATE() \ - using node_t = typename Next::node_t; \ +#define TURBOSHAFT_REDUCER_BOILERPLATE(Name) \ + TURBOSHAFT_REDUCER_GENERIC_BOILERPLATE(Name) \ + using node_t = typename Next::node_t; \ using block_t = typename Next::block_t; template @@ -655,7 +664,7 @@ template class EmitProjectionReducer : public UniformReducerAdapter { public: - TURBOSHAFT_REDUCER_BOILERPLATE() + TURBOSHAFT_REDUCER_BOILERPLATE(EmitProjection) OpIndex ReduceCatchBlockBegin() { // CatchBlockBegin have a single output, so they never have projections, @@ -704,7 +713,7 @@ template class TSReducerBase : public Next { public: static constexpr bool kIsBottomOfStack = true; - TURBOSHAFT_REDUCER_GENERIC_BOILERPLATE() + TURBOSHAFT_REDUCER_GENERIC_BOILERPLATE(TSReducerBase) using node_t = OpIndex; using block_t = Block; @@ -718,6 +727,10 @@ class TSReducerBase : public Next { Asm().output_graph().operation_origins()[result] = Asm().current_operation_origin(); #ifdef DEBUG + if (v8_flags.turboshaft_trace_intermediate_reductions) { + std::cout << std::setw(Asm().intermediate_tracing_depth()) << ' ' << "[" + << ReducerName() << "]: emitted " << op << "\n"; + } op_to_block_[result] = Asm().current_block(); DCHECK(ValidInputs(result)); #endif // DEBUG @@ -767,7 +780,7 @@ class TSReducerBase : public Next { template class ReducerBaseForwarder : public Next { public: - TURBOSHAFT_REDUCER_BOILERPLATE() + TURBOSHAFT_REDUCER_BOILERPLATE(ReducerBaseForwarder) #define EMIT_OP(Name) \ OpIndex ReduceInputGraph##Name(OpIndex ig_index, const Name##Op& op) { \ @@ -788,12 +801,17 @@ class ReducerBaseForwarder : public Next { template class GenericReducerBase : public ReducerBaseForwarder { public: - TURBOSHAFT_REDUCER_BOILERPLATE() + TURBOSHAFT_REDUCER_BOILERPLATE(GenericReducerBase) using Base = ReducerBaseForwarder; void Bind(Block* block) {} + // CanAutoInlineBlocksWithSinglePredecessor is used to control whether the + // CopyingPhase is allowed to automatically inline blocks with a single + // predecessor or not. + bool CanAutoInlineBlocksWithSinglePredecessor() const { return true; } + void Analyze() {} #ifdef DEBUG @@ -834,20 +852,19 @@ class GenericReducerBase : public ReducerBaseForwarder { input_phi.rep); } - OpIndex ReducePhi(base::Vector inputs, - RegisterRepresentation rep) { + OpIndex REDUCE(Phi)(base::Vector inputs, + RegisterRepresentation rep) { DCHECK(Asm().current_block()->IsMerge() && inputs.size() == Asm().current_block()->Predecessors().size()); return Base::ReducePhi(inputs, rep); } - template - OpIndex ReducePendingLoopPhi(Args... args) { + OpIndex REDUCE(PendingLoopPhi)(OpIndex first, RegisterRepresentation rep) { DCHECK(Asm().current_block()->IsLoop()); - return Base::ReducePendingLoopPhi(args...); + return Base::ReducePendingLoopPhi(first, rep); } - OpIndex ReduceGoto(Block* destination, bool is_backedge) { + OpIndex REDUCE(Goto)(Block* destination, bool is_backedge) { // Calling Base::Goto will call Emit, which will call FinalizeBlock, // which will reset {current_block_}. We thus save {current_block_} before // calling Base::Goto, as we'll need it for AddPredecessor. Note also that @@ -860,8 +877,8 @@ class GenericReducerBase : public ReducerBaseForwarder { return new_opindex; } - OpIndex ReduceBranch(OpIndex condition, Block* if_true, Block* if_false, - BranchHint hint) { + OpIndex REDUCE(Branch)(OpIndex condition, Block* if_true, Block* if_false, + BranchHint hint) { // There should never be a good reason to generate a Branch where both the // {if_true} and {if_false} are the same Block. If we ever decide to lift // this condition, then AddPredecessor and SplitEdge should be updated @@ -875,7 +892,7 @@ class GenericReducerBase : public ReducerBaseForwarder { return new_opindex; } - OpIndex ReduceCatchBlockBegin() { + OpIndex REDUCE(CatchBlockBegin)() { Block* current_block = Asm().current_block(); if (current_block->IsBranchTarget()) { DCHECK_EQ(current_block->PredecessorCount(), 1); @@ -901,8 +918,8 @@ class GenericReducerBase : public ReducerBaseForwarder { RegisterRepresentation::Tagged()); } - OpIndex ReduceSwitch(OpIndex input, base::Vector cases, - Block* default_case, BranchHint default_hint) { + OpIndex REDUCE(Switch)(OpIndex input, base::Vector cases, + Block* default_case, BranchHint default_hint) { #ifdef DEBUG // Making sure that all cases and {default_case} are different. If we ever // decide to lift this condition, then AddPredecessor and SplitEdge should @@ -924,9 +941,9 @@ class GenericReducerBase : public ReducerBaseForwarder { return new_opindex; } - OpIndex ReduceCall(OpIndex callee, OpIndex frame_state, - base::Vector arguments, - const TSCallDescriptor* descriptor, OpEffects effects) { + OpIndex REDUCE(Call)(OpIndex callee, OptionalOpIndex frame_state, + base::Vector arguments, + const TSCallDescriptor* descriptor, OpEffects effects) { OpIndex raw_call = Base::ReduceCall(callee, frame_state, arguments, descriptor, effects); bool has_catch_block = false; @@ -941,8 +958,8 @@ class GenericReducerBase : public ReducerBaseForwarder { // automatically by `CatchIfInCatchScope` and `DoNotCatch` defined below and // never explicitly. using Base::ReduceDidntThrow; - OpIndex ReduceCheckException(OpIndex throwing_operation, Block* successor, - Block* catch_block) { + OpIndex REDUCE(CheckException)(OpIndex throwing_operation, Block* successor, + Block* catch_block) { // {successor} and {catch_block} should never be the same. AddPredecessor // and SplitEdge rely on this. DCHECK_NE(successor, catch_block); @@ -969,7 +986,7 @@ class GenericReducerBase : public ReducerBaseForwarder { template class GenericAssemblerOpInterface : public Next { public: - TURBOSHAFT_REDUCER_BOILERPLATE() + TURBOSHAFT_REDUCER_BOILERPLATE(GenericAssemblerOpInterface) ~GenericAssemblerOpInterface() { // If the {if_scope_stack_} is not empty, it means that a END_IF is missing. @@ -1100,7 +1117,7 @@ template class TurboshaftAssemblerOpInterface : public GenericAssemblerOpInterface { public: - TURBOSHAFT_REDUCER_BOILERPLATE() + TURBOSHAFT_REDUCER_BOILERPLATE(TurboshaftAssemblerOpInterface) template explicit TurboshaftAssemblerOpInterface(Args... args) @@ -1116,8 +1133,8 @@ class TurboshaftAssemblerOpInterface // stack, and that the BaseReducer will actually emit an Operation. If we put // this projection-to-tuple-simplification in the BaseReducer, then this // assumption of the ValueNumberingReducer will break. - OpIndex ReduceProjection(OpIndex tuple, uint16_t index, - RegisterRepresentation rep) { + OpIndex REDUCE(Projection)(OpIndex tuple, uint16_t index, + RegisterRepresentation rep) { if (auto* tuple_op = Asm().matcher().template TryCast(tuple)) { return tuple_op->input(index); } @@ -1151,7 +1168,7 @@ class TurboshaftAssemblerOpInterface DECL_SINGLE_REP_BINOP_V(Word64Add, WordBinop, Add, Word64) DECL_SINGLE_REP_BINOP_V(WordPtrAdd, WordBinop, Add, WordPtr) DECL_SINGLE_REP_BINOP(PointerAdd, WordBinop, Add, - WordRepresentation::PointerSized()) + WordRepresentation::WordPtr()) DECL_MULTI_REP_BINOP(WordMul, WordBinop, WordRepresentation, Mul) DECL_SINGLE_REP_BINOP_V(Word32Mul, WordBinop, Mul, Word32) @@ -1167,6 +1184,7 @@ class TurboshaftAssemblerOpInterface DECL_MULTI_REP_BINOP(WordBitwiseOr, WordBinop, WordRepresentation, BitwiseOr) DECL_SINGLE_REP_BINOP_V(Word32BitwiseOr, WordBinop, BitwiseOr, Word32) DECL_SINGLE_REP_BINOP_V(Word64BitwiseOr, WordBinop, BitwiseOr, Word64) + DECL_SINGLE_REP_BINOP_V(WordPtrBitwiseOr, WordBinop, BitwiseOr, WordPtr) DECL_MULTI_REP_BINOP(WordBitwiseXor, WordBinop, WordRepresentation, BitwiseXor) @@ -1178,7 +1196,7 @@ class TurboshaftAssemblerOpInterface DECL_SINGLE_REP_BINOP_V(Word64Sub, WordBinop, Sub, Word64) DECL_SINGLE_REP_BINOP_V(WordPtrSub, WordBinop, Sub, WordPtr) DECL_SINGLE_REP_BINOP(PointerSub, WordBinop, Sub, - WordRepresentation::PointerSized()) + WordRepresentation::WordPtr()) DECL_MULTI_REP_BINOP(IntDiv, WordBinop, WordRepresentation, SignedDiv) DECL_SINGLE_REP_BINOP_V(Int32Div, WordBinop, SignedDiv, Word32) @@ -1355,7 +1373,7 @@ class TurboshaftAssemblerOpInterface DECL_SINGLE_REP_COMPARISON_V(Uint32LessThan, UnsignedLessThan, Word32) DECL_SINGLE_REP_COMPARISON_V(Uint64LessThan, UnsignedLessThan, Word64) DECL_SINGLE_REP_BINOP(UintPtrLessThan, Comparison, UnsignedLessThan, - WordRepresentation::PointerSized()) + WordRepresentation::WordPtr()) DECL_MULTI_REP_BINOP(FloatLessThan, Comparison, RegisterRepresentation, SignedLessThan) DECL_SINGLE_REP_COMPARISON_V(Float32LessThan, SignedLessThan, Float32) @@ -1374,8 +1392,7 @@ class TurboshaftAssemblerOpInterface DECL_SINGLE_REP_COMPARISON_V(Uint64LessThanOrEqual, UnsignedLessThanOrEqual, Word64) DECL_SINGLE_REP_BINOP(UintPtrLessThanOrEqual, Comparison, - UnsignedLessThanOrEqual, - WordRepresentation::PointerSized()) + UnsignedLessThanOrEqual, WordRepresentation::WordPtr()) DECL_MULTI_REP_BINOP(FloatLessThanOrEqual, Comparison, RegisterRepresentation, SignedLessThanOrEqual) DECL_SINGLE_REP_COMPARISON_V(Float32LessThanOrEqual, SignedLessThanOrEqual, @@ -1482,6 +1499,41 @@ class TurboshaftAssemblerOpInterface #undef DECL_SINGLE_REP_UNARY_V #undef DECL_MULTI_REP_UNARY + OpIndex WordBinopDeoptOnOverflow(OpIndex left, OpIndex right, + OpIndex frame_state, + WordBinopDeoptOnOverflowOp::Kind kind, + WordRepresentation rep, + FeedbackSource feedback, + CheckForMinusZeroMode mode) { + return ReduceIfReachableWordBinopDeoptOnOverflow(left, right, frame_state, + kind, rep, feedback, mode); + } +#define DECL_SINGLE_REP_BINOP_DEOPT_OVERFLOW(operation, rep_type) \ + OpIndex rep_type##operation##DeoptOnOverflow( \ + ConstOrV left, ConstOrV right, OpIndex frame_state, \ + FeedbackSource feedback, \ + CheckForMinusZeroMode mode = \ + CheckForMinusZeroMode::kDontCheckForMinusZero) { \ + return WordBinopDeoptOnOverflow( \ + resolve(left), resolve(right), frame_state, \ + WordBinopDeoptOnOverflowOp::Kind::k##operation, \ + WordRepresentation::rep_type(), feedback, mode); \ + } + + DECL_SINGLE_REP_BINOP_DEOPT_OVERFLOW(SignedAdd, Word32) + DECL_SINGLE_REP_BINOP_DEOPT_OVERFLOW(SignedAdd, Word64) + DECL_SINGLE_REP_BINOP_DEOPT_OVERFLOW(SignedSub, Word32) + DECL_SINGLE_REP_BINOP_DEOPT_OVERFLOW(SignedSub, Word64) + DECL_SINGLE_REP_BINOP_DEOPT_OVERFLOW(SignedMul, Word32) + DECL_SINGLE_REP_BINOP_DEOPT_OVERFLOW(SignedMul, Word64) + DECL_SINGLE_REP_BINOP_DEOPT_OVERFLOW(SignedDiv, Word32) + DECL_SINGLE_REP_BINOP_DEOPT_OVERFLOW(SignedDiv, Word64) + DECL_SINGLE_REP_BINOP_DEOPT_OVERFLOW(SignedMod, Word32) + DECL_SINGLE_REP_BINOP_DEOPT_OVERFLOW(SignedMod, Word64) + DECL_SINGLE_REP_BINOP_DEOPT_OVERFLOW(UnsignedDiv, Word32) + DECL_SINGLE_REP_BINOP_DEOPT_OVERFLOW(UnsignedMod, Word32) +#undef DECL_SINGLE_REP_BINOP_DEOPT_OVERFLOW + V BitcastWord32PairToFloat64(ConstOrV high_word32, ConstOrV low_word32) { return ReduceIfReachableBitcastWord32PairToFloat64(resolve(high_word32), @@ -1493,10 +1545,10 @@ class TurboshaftAssemblerOpInterface return ReduceIfReachableTaggedBitcast(input, from, to, kind); } -#define DECL_TAGGED_BITCAST(FromT, ToT, kind) \ - V Bitcast##FromT##To##ToT(V from) { \ - return TaggedBitcast(from, V::rep, V::rep, \ - TaggedBitcastOp::Kind::kind); \ +#define DECL_TAGGED_BITCAST(FromT, ToT, kind) \ + V Bitcast##FromT##To##ToT(V input) { \ + return TaggedBitcast(input, V::rep, V::rep, \ + TaggedBitcastOp::Kind::kind); \ } DECL_TAGGED_BITCAST(Smi, Word32, kSmi) DECL_TAGGED_BITCAST(Word32, Smi, kSmi) @@ -1507,6 +1559,11 @@ class TurboshaftAssemblerOpInterface DECL_TAGGED_BITCAST(WordPtr, Tagged, kAny) DECL_TAGGED_BITCAST(Tagged, WordPtr, kAny) #undef DECL_TAGGED_BITCAST + V BitcastTaggedToWordPtrForTagAndSmiBits(V input) { + return TaggedBitcast(input, RegisterRepresentation::Tagged(), + RegisterRepresentation::WordPtr(), + TaggedBitcastOp::Kind::kTagAndSmiBits); + } V ObjectIs(V input, ObjectIsOp::Kind kind, ObjectIsOp::InputAssumptions input_assumptions) { @@ -1667,10 +1724,14 @@ class TurboshaftAssemblerOpInterface } V UintPtrConstant(uintptr_t value) { return WordConstant(static_cast(value), - WordRepresentation::PointerSized()); + WordRepresentation::WordPtr()); + } + V SmiConstant(intptr_t value) { + return SmiConstant(i::Tagged(value)); } V SmiConstant(i::Tagged value) { - return V::Cast(UintPtrConstant(value.ptr())); + return V::Cast( + ReduceIfReachableConstant(ConstantOp::Kind::kSmi, value)); } V Float32Constant(float value) { return ReduceIfReachableConstant(ConstantOp::Kind::kFloat32, value); @@ -2051,6 +2112,52 @@ class TurboshaftAssemblerOpInterface rep.SizeInBytesLog2()); } + OpIndex LoadProtectedPointerField(OpIndex base, int32_t offset) { +#if V8_ENABLE_SANDBOX + static_assert(COMPRESS_POINTERS_BOOL); + OpIndex tagged = Load(base, LoadOp::Kind::TaggedBase(), + MemoryRepresentation::Uint32(), offset); + OpIndex trusted_cage_base = + Load(LoadRootRegister(), LoadOp::Kind::RawAligned().Immutable(), + MemoryRepresentation::UintPtr(), + IsolateData::trusted_cage_base_offset()); + // The bit cast is needed to change the type of the node to Tagged. This is + // necessary so that if this value gets spilled on the stack, then the GC + // will process it. + return BitcastWordPtrToTagged( + WordPtrBitwiseOr(ChangeUint32ToUintPtr(tagged), trusted_cage_base)); +#else + return Load(base, LoadOp::Kind::TaggedBase(), + MemoryRepresentation::TaggedPointer(), offset); +#endif // V8_ENABLE_SANDBOX + } + + V LoadFixedArrayElement(V array, int index) { + return Load(array, LoadOp::Kind::TaggedBase(), + MemoryRepresentation::AnyTagged(), + FixedArray::OffsetOfElementAt(index)); + } + V LoadFixedArrayElement(V array, V index) { + return Load(array, index, LoadOp::Kind::TaggedBase(), + MemoryRepresentation::AnyTagged(), + FixedArray::OffsetOfElementAt(0), kTaggedSizeLog2); + } + + V LoadFixedDoubleArrayElement(V array, int index) { + return Load(array, LoadOp::Kind::TaggedBase(), + MemoryRepresentation::Float64(), + FixedDoubleArray::OffsetOfElementAt(index)); + } + V LoadFixedDoubleArrayElement(V array, + V index) { + DCHECK_EQ(ElementsKindToShiftSize(PACKED_DOUBLE_ELEMENTS), + ElementsKindToShiftSize(HOLEY_DOUBLE_ELEMENTS)); + return Load(array, index, LoadOp::Kind::TaggedBase(), + MemoryRepresentation::Float64(), + FixedDoubleArray::OffsetOfElementAt(0), + ElementsKindToShiftSize(PACKED_DOUBLE_ELEMENTS)); + } + void Store( OpIndex base, OptionalOpIndex index, OpIndex value, StoreOp::Kind kind, MemoryRepresentation stored_rep, WriteBarrierKind write_barrier, @@ -2130,7 +2237,7 @@ class TurboshaftAssemblerOpInterface if (access.is_bounded_size_access) { DCHECK(!is_sandboxed_external); value = ShiftRightLogical(value, kBoundedSizeShift, - WordRepresentation::PointerSized()); + WordRepresentation::WordPtr()); } #endif // V8_ENABLE_SANDBOX return value; @@ -2141,12 +2248,12 @@ class TurboshaftAssemblerOpInterface V LoadMapField(V object) { return LoadField(object, AccessBuilder::ForMap()); } + V LoadInstanceTypeField(V map) { return LoadField(map, AccessBuilder::ForMapInstanceType()); } V HasInstanceType(V object, InstanceType instance_type) { - // TODO(mliedtke): For Wasm, these loads should be immutable. return Word32Equal(LoadInstanceTypeField(LoadMapField(object)), Word32Constant(instance_type)); } @@ -2180,8 +2287,8 @@ class TurboshaftAssemblerOpInterface #ifdef V8_ENABLE_SANDBOX if (access.is_bounded_size_access) { - value = ShiftLeft(value, kBoundedSizeShift, - WordRepresentation::PointerSized()); + value = + ShiftLeft(value, kBoundedSizeShift, WordRepresentation::WordPtr()); } #endif // V8_ENABLE_SANDBOX @@ -2282,8 +2389,8 @@ class TurboshaftAssemblerOpInterface FrameConstantOp::Kind::kParentFramePointer); } - V StackSlot(int size, int alignment) { - return ReduceIfReachableStackSlot(size, alignment); + V StackSlot(int size, int alignment, bool is_tagged = false) { + return ReduceIfReachableStackSlot(size, alignment, is_tagged); } OpIndex LoadRootRegister() { return ReduceIfReachableLoadRootRegister(); } @@ -2805,16 +2912,17 @@ class TurboshaftAssemblerOpInterface } #if V8_ENABLE_WEBASSEMBLY - void TrapIf(V condition, OpIndex frame_state, TrapId trap_id) { + void TrapIf(V condition, OptionalOpIndex frame_state, + TrapId trap_id) { ReduceIfReachableTrapIf(condition, frame_state, false, trap_id); } - void TrapIfNot(V condition, OpIndex frame_state, TrapId trap_id) { + void TrapIfNot(V condition, OptionalOpIndex frame_state, + TrapId trap_id) { ReduceIfReachableTrapIf(condition, frame_state, true, trap_id); } #endif // V8_ENABLE_WEBASSEMBLY void StaticAssert(OpIndex condition, const char* source) { - CHECK(v8_flags.turboshaft_enable_debug_features); ReduceIfReachableStaticAssert(condition, source); } @@ -2990,7 +3098,7 @@ class TurboshaftAssemblerOpInterface ReduceIfReachableDebugPrint(input, rep); } void DebugPrint(V input) { - return DebugPrint(input, RegisterRepresentation::PointerSized()); + return DebugPrint(input, RegisterRepresentation::WordPtr()); } void DebugPrint(V input) { return DebugPrint(input, RegisterRepresentation::Float64()); @@ -3296,12 +3404,12 @@ class TurboshaftAssemblerOpInterface return ReduceIfReachableRttCanon(rtts, type_index); } - V WasmTypeCheck(V object, V rtt, + V WasmTypeCheck(V object, OptionalV rtt, WasmTypeCheckConfig config) { return ReduceIfReachableWasmTypeCheck(object, rtt, config); } - V WasmTypeCast(V object, V rtt, + V WasmTypeCast(V object, OptionalV rtt, WasmTypeCheckConfig config) { return ReduceIfReachableWasmTypeCast(object, rtt, config); } @@ -3436,16 +3544,10 @@ class TurboshaftAssemblerOpInterface RegisterRepresentation::Tagged()); } - V LoadFixedArrayElement(V array, int index) { - return Load(array, LoadOp::Kind::TaggedBase(), - MemoryRepresentation::AnyTagged(), - FixedArray::kHeaderSize + index * kTaggedSize); - } - - V LoadFixedArrayElement(V array, V index) { - return Load(array, index, LoadOp::Kind::TaggedBase(), - MemoryRepresentation::AnyTagged(), FixedArray::kHeaderSize, - kTaggedSizeLog2); + V LoadProtectedFixedArrayElement(V array, + int index) { + return LoadProtectedPointerField( + array, ProtectedFixedArray::OffsetOfElementAt(index)); } void StoreFixedArrayElement(V array, int index, V value, @@ -3679,6 +3781,10 @@ class Assembler : public AssemblerData, return this->output_graph().Get(op_idx); } +#ifdef DEBUG + int& intermediate_tracing_depth() { return intermediate_tracing_depth_; } +#endif + // Adds {source} to the predecessors of {destination}. void AddPredecessor(Block* source, Block* destination, bool branch) { DCHECK_IMPLIES(branch, source->EndsWithBranchingOp(this->output_graph())); @@ -3890,6 +3996,10 @@ class Assembler : public AssemblerData, // additional parameters to ReduceXXX methods. OpIndex current_operation_origin_ = OpIndex::Invalid(); +#ifdef DEBUG + int intermediate_tracing_depth_ = 0; +#endif + template friend class TSReducerBase; template @@ -3934,14 +4044,8 @@ class TSAssembler : public Assembler> { public: -#ifdef _WIN32 - explicit TSAssembler(Graph& input_graph, Graph& output_graph, - Zone* phase_zone) - : Assembler(input_graph, output_graph, phase_zone) {} -#else using Assembler>::Assembler; -#endif }; #include "src/compiler/turboshaft/undef-assembler-macros.inc" diff --git a/deps/v8/src/compiler/turboshaft/assert-types-reducer.h b/deps/v8/src/compiler/turboshaft/assert-types-reducer.h index f3d691779ed4d0..f28b5b0c0c9aa2 100644 --- a/deps/v8/src/compiler/turboshaft/assert-types-reducer.h +++ b/deps/v8/src/compiler/turboshaft/assert-types-reducer.h @@ -34,7 +34,7 @@ class AssertTypesReducer #endif public: - TURBOSHAFT_REDUCER_BOILERPLATE() + TURBOSHAFT_REDUCER_BOILERPLATE(AssertTypes) using Adapter = UniformReducerAdapter; diff --git a/deps/v8/src/compiler/turboshaft/branch-elimination-reducer.h b/deps/v8/src/compiler/turboshaft/branch-elimination-reducer.h index 9f54e92e8d7a2b..c8c69017442c75 100644 --- a/deps/v8/src/compiler/turboshaft/branch-elimination-reducer.h +++ b/deps/v8/src/compiler/turboshaft/branch-elimination-reducer.h @@ -194,10 +194,9 @@ class BranchEliminationReducer : public Next { // that's the case, then we copy the destination block, and the 1st // optimization will replace its final Branch by a Goto when reaching it. public: - TURBOSHAFT_REDUCER_BOILERPLATE() -#if defined(__clang__) - static_assert(reducer_list_contains::value); -#endif + TURBOSHAFT_REDUCER_BOILERPLATE(BranchElimination) + // TODO(dmercadier): Add static_assert that this is ran as part of a + // CopyingPhase. void Bind(Block* new_block) { Next::Bind(new_block); @@ -304,15 +303,25 @@ class BranchEliminationReducer : public Next { goto no_change; } - if (destination_origin->PredecessorCount() == 1) { - // This block has a single successor and `destination_origin` has a single - // predecessor. We can merge these blocks (optimization 5). - __ CloneAndInlineBlock(destination_origin); - return OpIndex::Invalid(); - } + // Maximum size up to which we allow cloning a block. Cloning too large + // blocks will lead to increasing the size of the graph too much, which will + // lead to slower compile time, and larger generated code. + // TODO(dmercadier): we might want to exclude Phis from this, since they are + // typically removed when we clone a block. However, computing the number of + // operations in a block excluding Phis is more costly (because we'd have to + // iterate all of the operations one by one). + // TODO(dmercadier): this "13" was selected fairly arbitrarily (= it sounded + // reasonable). It could be useful to run a few benchmarks to see if we can + // find a more optimal number. + static constexpr int kMaxOpCountForCloning = 13; const Operation& last_op = destination_origin->LastOperation(__ input_graph()); + + if (destination_origin->OpCountUpperBound() > kMaxOpCountForCloning) { + goto no_change; + } + if (const BranchOp* branch = last_op.template TryCast()) { OpIndex condition = __ template MapToNewGraph(branch->condition()); if (condition.valid()) { @@ -327,7 +336,7 @@ class BranchEliminationReducer : public Next { // condition is already known. As per the 2nd optimization, we'll // process {new_dst} right away, and we'll end it with a Goto instead of // its current Branch. - __ CloneAndInlineBlock(destination_origin); + __ CloneBlockAndGoto(destination_origin); return OpIndex::Invalid(); } else { // Optimization 2bis: @@ -339,14 +348,14 @@ class BranchEliminationReducer : public Next { if (const PhiOp* cond = __ input_graph() .Get(branch->condition()) .template TryCast()) { - __ CloneAndInlineBlock(destination_origin); + __ CloneBlockAndGoto(destination_origin); return OpIndex::Invalid(); } else if (CanBeConstantFolded(branch->condition(), destination_origin)) { // If the {cond} only uses constant Phis that come from the current // block, it's probably worth it to clone the block in order to // constant-fold away the Branch. - __ CloneAndInlineBlock(destination_origin); + __ CloneBlockAndGoto(destination_origin); return OpIndex::Invalid(); } else { goto no_change; @@ -358,9 +367,6 @@ class BranchEliminationReducer : public Next { // The destination block in the old graph ends with a Return // and the old destination is a merge block, so we can directly // inline the destination block in place of the Goto. - // TODO(nicohartmann@): Temporarily disable this "optimization" because - // it prevents dead code elimination in some cases. Reevaluate this and - // reenable if phases have been reordered properly. Asm().CloneAndInlineBlock(destination_origin); return OpIndex::Invalid(); } @@ -393,8 +399,8 @@ class BranchEliminationReducer : public Next { } #if V8_ENABLE_WEBASSEMBLY - OpIndex REDUCE(TrapIf)(OpIndex condition, OpIndex frame_state, bool negated, - const TrapId trap_id) { + OpIndex REDUCE(TrapIf)(OpIndex condition, OptionalOpIndex frame_state, + bool negated, const TrapId trap_id) { LABEL_BLOCK(no_change) { return Next::ReduceTrapIf(condition, frame_state, negated, trap_id); } diff --git a/deps/v8/src/compiler/turboshaft/builtin-call-descriptors.h b/deps/v8/src/compiler/turboshaft/builtin-call-descriptors.h index 70e4128c8687ea..3d6f8d61c76266 100644 --- a/deps/v8/src/compiler/turboshaft/builtin-call-descriptors.h +++ b/deps/v8/src/compiler/turboshaft/builtin-call-descriptors.h @@ -534,7 +534,8 @@ struct BuiltinCallDescriptor { static constexpr bool kNeedsFrameState = false; static constexpr bool kNeedsContext = false; - static constexpr Operator::Properties kProperties = Operator::kEliminatable; + static constexpr Operator::Properties kProperties = + Operator::kNoDeopt | Operator::kNoWrite; static constexpr OpEffects kEffects = base_effects.CanAllocateWithoutIdentity(); }; @@ -604,6 +605,18 @@ struct BuiltinCallDescriptor { base_effects.CanReadMemory().CanWriteHeapMemory(); }; + struct WasmStringToUtf8Array : public Descriptor { + static constexpr auto kFunction = Builtin::kWasmStringToUtf8Array; + using arguments_t = std::tuple>; + using results_t = std::tuple>; + static constexpr bool kNeedsFrameState = false; + static constexpr bool kNeedsContext = false; + static constexpr Operator::Properties kProperties = + Operator::kNoDeopt | Operator::kNoThrow; + static constexpr OpEffects kEffects = + base_effects.CanReadMemory().CanAllocate(); + }; + struct WasmStringEncodeWtf16Array : public Descriptor { static constexpr auto kFunction = Builtin::kWasmStringEncodeWtf16Array; diff --git a/deps/v8/src/compiler/turboshaft/code-elimination-and-simplification-phase.cc b/deps/v8/src/compiler/turboshaft/code-elimination-and-simplification-phase.cc index fcb579f6f62d7a..c005f88a7d3142 100644 --- a/deps/v8/src/compiler/turboshaft/code-elimination-and-simplification-phase.cc +++ b/deps/v8/src/compiler/turboshaft/code-elimination-and-simplification-phase.cc @@ -10,7 +10,7 @@ #include "src/compiler/turboshaft/duplication-optimization-reducer.h" #include "src/compiler/turboshaft/load-store-simplification-reducer.h" #include "src/compiler/turboshaft/phase.h" -#include "src/compiler/turboshaft/stack-check-reducer.h" +#include "src/compiler/turboshaft/stack-check-lowering-reducer.h" #if V8_ENABLE_WEBASSEMBLY #include "src/compiler/turboshaft/wasm-js-lowering-reducer.h" @@ -21,7 +21,7 @@ namespace v8::internal::compiler::turboshaft { void CodeEliminationAndSimplificationPhase::Run(Zone* temp_zone) { UnparkedScopeIfNeeded scope(PipelineData::Get().broker(), DEBUG_BOOL); - CopyingPhase::Run(temp_zone); + ValueNumberingReducer>::Run(temp_zone); } } // namespace v8::internal::compiler::turboshaft diff --git a/deps/v8/src/compiler/turboshaft/copying-phase.h b/deps/v8/src/compiler/turboshaft/copying-phase.h index fa2acafd12d6f2..97640672fb7290 100644 --- a/deps/v8/src/compiler/turboshaft/copying-phase.h +++ b/deps/v8/src/compiler/turboshaft/copying-phase.h @@ -22,7 +22,9 @@ #include "src/compiler/turboshaft/operations.h" #include "src/compiler/turboshaft/phase.h" #include "src/compiler/turboshaft/reducer-traits.h" +#include "src/compiler/turboshaft/representations.h" #include "src/compiler/turboshaft/snapshot-table.h" +#include "src/compiler/turboshaft/variable-reducer.h" #include "src/zone/zone-containers.h" namespace v8::internal::compiler::turboshaft { @@ -36,33 +38,56 @@ struct PaddingSpace { V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, PaddingSpace padding); -template -class VariableReducerHotfix : public Next { -public: - TURBOSHAFT_REDUCER_BOILERPLATE() +template +class ReducerBaseForwarder; - void SetVariable(Variable var, OpIndex new_index) {} - Variable NewLoopInvariantVariable(MaybeRegisterRepresentation rep) { return Variable(); } +template +class OutputGraphAssembler : public Base { +#define FRIEND(op) friend struct op##Op; + TURBOSHAFT_OPERATION_LIST(FRIEND) +#undef FRIEND + template + friend struct FixedArityOperationT; - OpIndex GetVariable(Variable var) { return OpIndex(); } - OpIndex GetPredecessorValue(Variable var, int predecessor_index) { return OpIndex(); } -}; + OpIndex Map(OpIndex index) { return derived_this()->MapToNewGraph(index); } -template -class ReducerBaseForwarder; -template -class VariableReducer; + OptionalOpIndex Map(OptionalOpIndex index) { + return derived_this()->MapToNewGraph(index); + } -template -class GraphVisitor : public Next { - template - friend class ReducerBaseForwarder; + template + base::SmallVector Map(base::Vector indices) { + return derived_this()->template MapToNewGraph(indices); + } + + public: +#define ASSEMBLE(operation) \ + OpIndex AssembleOutputGraph##operation(const operation##Op& op) { \ + return op.Explode( \ + [a = assembler()](auto... args) { \ + return a->Reduce##operation(args...); \ + }, \ + *this); \ + } + TURBOSHAFT_OPERATION_LIST(ASSEMBLE) +#undef ASSEMBLE private: - bool contains_variable_reducer_; + Derived* derived_this() { return static_cast(this); } + Assembler* assembler() { + return &derived_this()->Asm(); + } +}; + +template +class GraphVisitor : public OutputGraphAssembler, + VariableReducer> { + template + friend class ReducerBaseForwarder; public: - TURBOSHAFT_REDUCER_BOILERPLATE() + using Next = VariableReducer; + TURBOSHAFT_REDUCER_BOILERPLATE(CopyingPhase) GraphVisitor() : input_graph_(Asm().modifiable_input_graph()), @@ -74,15 +99,15 @@ class GraphVisitor : public Next { blocks_needing_variables_(Asm().input_graph().block_count(), Asm().phase_zone()), old_opindex_to_variables(Asm().input_graph().op_id_count(), - Asm().phase_zone(), &Asm().input_graph()) { + Asm().phase_zone(), &Asm().input_graph()), + blocks_to_clone_(Asm().phase_zone()) { Asm().output_graph().Reset(); } // `trace_reduction` is a template parameter to avoid paying for tracing at // runtime. template - void VisitGraph(bool contains_variable_reducer) { - contains_variable_reducer_ = contains_variable_reducer; + void VisitGraph() { Asm().Analyze(); // Creating initial old-to-new Block mapping. @@ -130,10 +155,48 @@ class GraphVisitor : public Next { return &turn_loop_without_backedge_into_merge_; } - // Visits and emits {input_block} right now (ie, in the current block). + // Emits a Goto to a cloned version of {input_block}, assuming that the only + // predecessor of this cloned copy will be the current block. {input_block} is + // not cloned right away (because this would recursively call VisitBlockBody, + // which could cause stack overflows), and is instead added to the + // {blocks_to_clone_} stack, whose blocks will be cloned once the current + // block has been fully visited. + void CloneBlockAndGoto(const Block* input_block) { + Block* new_block = + Asm().output_graph().NewBlock(input_block->kind(), input_block); + + // Computing which input of Phi operations to use when visiting + // {input_block} (since {input_block} doesn't really have predecessors + // anymore). + int added_block_phi_input = input_block->GetPredecessorIndex( + Asm().current_block()->OriginForBlockEnd()); + + // There is no guarantees that {input_block} will be entirely removed just + // because it's cloned/inlined, since it's possible that it has predecessors + // for which this optimization didn't apply. As a result, we add it to + // {blocks_needing_variables_}, so that if it's ever generated + // normally, Variables are used when emitting its content, so that + // they can later be merged when control flow merges with the current + // version of {input_block} that we just cloned. + blocks_needing_variables_.Add(input_block->index().id()); + + Asm().Goto(new_block); + + blocks_to_clone_.push_back({input_block, added_block_phi_input, new_block}); + } + + // Visits and emits {input_block} right now (ie, in the current block). This + // should not be called recursively in order to avoid stack overflow (ie, + // processing {input_block} should never lead to calling CloneAndInlingBlock). void CloneAndInlineBlock(const Block* input_block) { if (Asm().generating_unreachable_operations()) return; +#ifdef DEBUG + // Making sure that we didn't call CloneAndInlineBlock recursively. + DCHECK(!is_in_recursive_inlining_); + ScopedModification recursive_guard(&is_in_recursive_inlining_, true); +#endif + // Computing which input of Phi operations to use when visiting // {input_block} (since {input_block} doesn't really have predecessors // anymore). @@ -150,34 +213,8 @@ class GraphVisitor : public Next { blocks_needing_variables_.Add(input_block->index().id()); ScopedModification set_true(¤t_block_needs_variables_, true); - - // Similarly as VisitBlock does, we visit the Phis first, then update all of - // the Phi mappings at once and visit the rest of the block. - base::SmallVector new_phi_values; - // Emitting new phis and recording mapping. - DCHECK_NOT_NULL(Asm().current_block()); - for (OpIndex index : Asm().input_graph().OperationIndices(*input_block)) { - if (const PhiOp* phi = - Asm().input_graph().Get(index).template TryCast()) { - if (ShouldSkipOperation(*phi)) continue; - // This Phi has been cloned/inlined, and has thus now a single - // predecessor, and shouldn't be a Phi anymore. - OpIndex newval = MapToNewGraph(phi->input(added_block_phi_input)); - new_phi_values.push_back(newval); - } - } - // Visiting the other operations of the block and emitting the new Phi - // mappings. - int phi_num = 0; - for (OpIndex index : Asm().input_graph().OperationIndices(*input_block)) { - const Operation& op = Asm().input_graph().Get(index); - if (op.template Is()) { - if (ShouldSkipOperation(op)) continue; - CreateOldToNewMapping(index, new_phi_values[phi_num++]); - } else { - if (!VisitOpAndUpdateMapping(index, input_block)) break; - } - } + VisitBlockBody( + input_block, added_block_phi_input); } // {InlineOp} introduces two limitations unlike {CloneAndInlineBlock}: @@ -193,24 +230,23 @@ class GraphVisitor : public Next { DCHECK(old_index.valid()); OpIndex result = op_mapping_[old_index]; - if (contains_variable_reducer_) { - if (!result.valid()) { - // {op_mapping} doesn't have a mapping for {old_index}. The assembler - // should provide the mapping. - MaybeVariable var = GetVariableFor(old_index); - if constexpr (can_be_invalid) { - if (!var.has_value()) { - return OpIndex::Invalid(); - } - } - DCHECK(var.has_value()); - if (predecessor_index == -1) { - result = Asm().GetVariable(var.value()); - } else { - result = Asm().GetPredecessorValue(var.value(), predecessor_index); + if (!result.valid()) { + // {op_mapping} doesn't have a mapping for {old_index}. The + // VariableReducer should provide the mapping. + MaybeVariable var = GetVariableFor(old_index); + if constexpr (can_be_invalid) { + if (!var.has_value()) { + return OpIndex::Invalid(); } } + DCHECK(var.has_value()); + if (predecessor_index == -1) { + result = Asm().GetVariable(var.value()); + } else { + result = Asm().GetPredecessorValue(var.value(), predecessor_index); + } } + DCHECK_IMPLIES(!can_be_invalid, result.valid()); return result; } @@ -244,9 +280,10 @@ class GraphVisitor : public Next { // The BlockIndex of the blocks of `sub_graph` should be sorted so that // visiting them in order is correct (all of the predecessors of a block // should always be visited before the block itself). - DCHECK(std::is_sorted( - sub_graph.begin(), sub_graph.end(), - [](Block* a, Block* b) { return a->index().id() <= b->index().id(); })); + DCHECK(std::is_sorted(sub_graph.begin(), sub_graph.end(), + [](const Block* a, const Block* b) { + return a->index().id() <= b->index().id(); + })); // 1. Create new blocks, and update old->new mapping. This is required to // emit multiple times the blocks of {sub_graph}: if a block `B1` in @@ -275,9 +312,10 @@ class GraphVisitor : public Next { #endif Asm().Goto(start); // Visiting `sub_graph`. - for (Block* block : sub_graph) { + for (const Block* block : sub_graph) { blocks_needing_variables_.Add(block->index().id()); VisitBlock(block); + ProcessWaitingCloningAndInlining(); } // 3. Restore initial old->new mapping @@ -289,13 +327,6 @@ class GraphVisitor : public Next { return start; } - template - OpIndex MapToNewGraphIfValid(OpIndex old_index, int predecessor_index = -1) { - return old_index.valid() - ? MapToNewGraph(old_index, predecessor_index) - : OpIndex::Invalid(); - } - template OptionalOpIndex MapToNewGraph(OptionalOpIndex old_index, int predecessor_index = -1) { @@ -303,6 +334,16 @@ class GraphVisitor : public Next { return MapToNewGraph(old_index.value(), predecessor_index); } + template + base::SmallVector MapToNewGraph( + base::Vector inputs) { + base::SmallVector result; + for (OpIndex input : inputs) { + result.push_back(MapToNewGraph(input)); + } + return result; + } + private: template void VisitAllBlocks() { @@ -313,6 +354,7 @@ class GraphVisitor : public Next { const Block* block = visit_stack.back(); visit_stack.pop_back(); VisitBlock(block); + ProcessWaitingCloningAndInlining(); for (Block* child = block->LastChild(); child != nullptr; child = child->NeighboringChild()) { @@ -323,7 +365,6 @@ class GraphVisitor : public Next { template void VisitBlock(const Block* input_block) { - current_input_block_ = input_block; current_block_needs_variables_ = blocks_needing_variables_.Contains(input_block->index().id()); if constexpr (trace_reduction) { @@ -335,54 +376,8 @@ class GraphVisitor : public Next { } Block* new_block = MapToNewGraph(input_block); if (Asm().Bind(new_block)) { - // Phis could be mutually recursive, for instance (in a loop header): - // - // p1 = phi(a, p2) - // p2 = phi(b, p1) - // - // In this case, if we are currently unrolling the loop and visiting this - // loop header that is now part of the loop body, then if we visit Phis - // and emit new mapping (with CreateOldToNewMapping) as we go along, we - // would visit p1 and emit a mapping saying "p1 = p2", and use this - // mapping when visiting p2, then we'd map p2 to p2 instead of p1. To - // overcome this issue, we first visit the Phis of the loop, emit the new - // phis, and record the new mapping in a side-table ({new_phi_values}). - // Then, we visit all of the operations of the loop and commit the new - // mappings: phis were emitted before using the old mapping, and all of - // the other operations will use the new mapping (as they should). - - // Visiting Phis and collecting their new OpIndices. - base::SmallVector new_phi_values; - for (OpIndex index : Asm().input_graph().OperationIndices(*input_block)) { - DCHECK_NOT_NULL(Asm().current_block()); - if (Asm().input_graph().Get(index).template Is()) { - OpIndex new_index = - VisitOpNoMappingUpdate(index, input_block); - new_phi_values.push_back(new_index); - if (!Asm().current_block()) { - // A reducer has detected, based on the Phis of the block that were - // visited so far, that we are in unreachable code (or, less likely, - // decided, based on some Phis only, to jump away from this block?). - break; - } - } - } - - // Visiting everything, updating Phi mappings, and emitting non-phi - // operations. - if (Asm().current_block()) { - int phi_num = 0; - for (OpIndex index : - Asm().input_graph().OperationIndices(*input_block)) { - if (Asm().input_graph().Get(index).template Is()) { - CreateOldToNewMapping(index, new_phi_values[phi_num++]); - } else { - if (!VisitOpAndUpdateMapping(index, input_block)) { - break; - } - } - } - } + VisitBlockBody( + input_block); if constexpr (trace_reduction) TraceBlockFinished(); } else { if constexpr (trace_reduction) TraceBlockUnreachable(); @@ -394,7 +389,7 @@ class GraphVisitor : public Next { const Operation& last_op = input_block->LastOperation(Asm().input_graph()); if (auto* final_goto = last_op.TryCast()) { if (final_goto->destination->IsLoop()) { - if (input_block->index() > final_goto->destination->index()) { + if (input_block->index() >= final_goto->destination->index()) { Asm().FinalizeLoop(MapToNewGraph(final_goto->destination)); } else { // We have a forward jump to a loop, rather than a backedge. We @@ -404,6 +399,111 @@ class GraphVisitor : public Next { } } + enum class CanHavePhis { kNo, kYes }; + enum class ForCloning { kNo, kYes }; + + template + void VisitBlockBody(const Block* input_block, + int added_block_phi_input = -1) { + DCHECK_NOT_NULL(Asm().current_block()); + current_input_block_ = input_block; + + // Phis could be mutually recursive, for instance (in a loop header): + // + // p1 = phi(a, p2) + // p2 = phi(b, p1) + // + // In this case, if we are currently unrolling the loop and visiting this + // loop header that is now part of the loop body, then if we visit Phis + // and emit new mapping (with CreateOldToNewMapping) as we go along, we + // would visit p1 and emit a mapping saying "p1 = p2", and use this + // mapping when visiting p2, then we'd map p2 to p2 instead of p1. To + // overcome this issue, we first visit the Phis of the loop, emit the new + // phis, and record the new mapping in a side-table ({new_phi_values}). + // Then, we visit all of the operations of the loop and commit the new + // mappings: phis were emitted before using the old mapping, and all of + // the other operations will use the new mapping (as they should). + // + // Note that Phis are not always at the begining of blocks, but when they + // aren't, they can't have inputs from the current block (except on their + // backedge for loop phis, but they start as PendingLoopPhis without + // backedge input), so visiting all Phis first is safe. + + // Visiting Phis and collecting their new OpIndices. + base::SmallVector new_phi_values; + if constexpr (can_have_phis == CanHavePhis::kYes) { + for (OpIndex index : Asm().input_graph().OperationIndices(*input_block)) { + if (ShouldSkipOperation(Asm().input_graph().Get(index))) continue; + DCHECK_NOT_NULL(Asm().current_block()); + if (Asm().input_graph().Get(index).template Is()) { + OpIndex new_index; + if constexpr (for_cloning == ForCloning::kYes) { + // When cloning a block, it only has a single predecessor, and Phis + // should therefore be removed and be replaced by the input + // corresponding to this predecessor. + DCHECK_NE(added_block_phi_input, -1); + // This Phi has been cloned/inlined, and has thus now a single + // predecessor, and shouldn't be a Phi anymore. + new_index = MapToNewGraph( + Asm().input_graph().Get(index).input(added_block_phi_input)); + } else { + new_index = + VisitOpNoMappingUpdate(index, input_block); + } + new_phi_values.push_back(new_index); + if (!Asm().current_block()) { + // A reducer has detected, based on the Phis of the block that were + // visited so far, that we are in unreachable code (or, less likely, + // decided, based on some Phis only, to jump away from this block?). + return; + } + } + } + } + DCHECK_NOT_NULL(Asm().current_block()); + + // Visiting everything, updating Phi mappings, and emitting non-phi + // operations. + int phi_num = 0; + bool stopped_early = false; + for (OpIndex index : base::IterateWithoutLast( + Asm().input_graph().OperationIndices(*input_block))) { + if (ShouldSkipOperation(Asm().input_graph().Get(index))) continue; + const Operation& op = Asm().input_graph().Get(index); + if constexpr (can_have_phis == CanHavePhis::kYes) { + if (op.Is()) { + CreateOldToNewMapping(index, new_phi_values[phi_num++]); + continue; + } + } + // Blocks with a single predecessor (for which CanHavePhis might be kNo) + // can still have phis if they used to be loop header that were turned + // into regular blocks. + DCHECK_IMPLIES(op.Is(), op.input_count == 1); + + if (!VisitOpAndUpdateMapping(index, input_block)) { + stopped_early = true; + break; + } + } + // If the last operation of the loop above (= the one-before-last operation + // of the block) was lowered to an unconditional deopt/trap/something like + // that, then current_block will now be null, and there is no need visit the + // last operation of the block. + if (stopped_early || Asm().current_block() == nullptr) return; + + // The final operation (which should be a block terminator) of the block + // is processed separately, because if it's a Goto to a block with a + // single predecessor, we'll inline it. (we could have had a check `if (op + // is a Goto)` in the loop above, but since this can only be true for the + // last operation, we instead extracted it here to make things faster). + const Operation& terminator = + input_block->LastOperation(Asm().input_graph()); + DCHECK(terminator.IsBlockTerminator()); + VisitBlockTerminator(terminator, input_block); + } + template bool VisitOpAndUpdateMapping(OpIndex index, const Block* input_block) { if (Asm().current_block() == nullptr) return false; @@ -470,6 +570,69 @@ class GraphVisitor : public Next { return new_index; } + template + void VisitBlockTerminator(const Operation& terminator, + const Block* input_block) { + if (Asm().CanAutoInlineBlocksWithSinglePredecessor() && + terminator.Is()) { + Block* destination = terminator.Cast().destination; + if (destination->PredecessorCount() == 1) { + block_to_inline_now_ = destination; + return; + } + } + // Just going through the regular VisitOp function. + OpIndex index = Asm().input_graph().Index(terminator); + VisitOpAndUpdateMapping(index, input_block); + } + + template + void ProcessWaitingCloningAndInlining() { + InlineWaitingBlock(); + while (!blocks_to_clone_.empty()) { + BlockToClone item = blocks_to_clone_.back(); + blocks_to_clone_.pop_back(); + DoCloneBlock( + item.input_block, item.added_block_phi_input, item.new_output_block); + InlineWaitingBlock(); + } + } + + template + void InlineWaitingBlock() { + while (block_to_inline_now_) { + Block* input_block = block_to_inline_now_; + block_to_inline_now_ = nullptr; + ScopedModification set_true(¤t_block_needs_variables_, true); + if constexpr (trace_reduction) { + std::cout << "Inlining " << PrintAsBlockHeader{*input_block} << "\n"; + } + VisitBlockBody( + input_block); + } + } + + template + void DoCloneBlock(const Block* input_block, int added_block_phi_input, + Block* output_block) { + DCHECK_EQ(output_block->PredecessorCount(), 1); + if constexpr (trace_reduction) { + std::cout << "\nCloning old " << PrintAsBlockHeader{*input_block} << "\n"; + std::cout << "As new " + << PrintAsBlockHeader{*output_block, + Asm().output_graph().next_block_index()} + << "\n"; + } + + ScopedModification set_true(¤t_block_needs_variables_, true); + + Asm().BindReachable(output_block); + VisitBlockBody( + input_block, added_block_phi_input); + + if constexpr (trace_reduction) TraceBlockFinished(); + } + void TraceReductionStart(OpIndex index) { std::cout << "╭── o" << index.id() << ": " << PaddingSpace{5 - CountDecimalDigits(index.id())} @@ -546,8 +709,16 @@ class GraphVisitor : public Next { MapToNewGraph(op.default_case), op.default_hint); } OpIndex AssembleOutputGraphPhi(const PhiOp& op) { + if (op.input_count == 1) { + // If, in the previous CopyingPhase, a loop header was turned into a + // regular blocks, its PendingLoopPhis became Phis with a single input. We + // can now just get rid of these Phis. + return MapToNewGraph(op.input(0)); + } + OpIndex ig_index = Asm().input_graph().Index(op); if (Asm().current_block()->IsLoop()) { + DCHECK_EQ(op.input_count, 2); if (ig_index == op.input(PhiOp::kLoopPhiBackEdgeIndex)) { // Avoid emitting a Loop Phi which points to itself, instead // emit it's 0'th input. @@ -658,7 +829,7 @@ class GraphVisitor : public Next { } OpIndex AssembleOutputGraphCall(const CallOp& op) { OpIndex callee = MapToNewGraph(op.callee()); - OpIndex frame_state = MapToNewGraphIfValid(op.frame_state()); + OptionalOpIndex frame_state = MapToNewGraph(op.frame_state()); auto arguments = MapToNewGraph<16>(op.arguments()); return Asm().ReduceCall(callee, frame_state, base::VectorOf(arguments), op.descriptor, op.Effects()); @@ -710,623 +881,28 @@ class GraphVisitor : public Next { } return OpIndex::Invalid(); } - OpIndex AssembleOutputGraphCatchBlockBegin(const CatchBlockBeginOp& op) { - return Asm().ReduceCatchBlockBegin(); - } - OpIndex AssembleOutputGraphTailCall(const TailCallOp& op) { - OpIndex callee = MapToNewGraph(op.callee()); - auto arguments = MapToNewGraph<16>(op.arguments()); - return Asm().ReduceTailCall(callee, base::VectorOf(arguments), - op.descriptor); - } - OpIndex AssembleOutputGraphReturn(const ReturnOp& op) { - // We very rarely have tuples longer than 4. - auto return_values = MapToNewGraph<4>(op.return_values()); - return Asm().ReduceReturn(MapToNewGraph(op.pop_count()), - base::VectorOf(return_values)); - } - OpIndex AssembleOutputGraphOverflowCheckedBinop( - const OverflowCheckedBinopOp& op) { - return Asm().ReduceOverflowCheckedBinop( - MapToNewGraph(op.left()), MapToNewGraph(op.right()), op.kind, op.rep); - } - OpIndex AssembleOutputGraphWordUnary(const WordUnaryOp& op) { - return Asm().ReduceWordUnary(MapToNewGraph(op.input()), op.kind, op.rep); - } - OpIndex AssembleOutputGraphFloatUnary(const FloatUnaryOp& op) { - return Asm().ReduceFloatUnary(MapToNewGraph(op.input()), op.kind, op.rep); - } - OpIndex AssembleOutputGraphShift(const ShiftOp& op) { - return Asm().ReduceShift(MapToNewGraph(op.left()), - MapToNewGraph(op.right()), op.kind, op.rep); - } - OpIndex AssembleOutputGraphComparison(const ComparisonOp& op) { - return Asm().ReduceComparison(MapToNewGraph(op.left()), - MapToNewGraph(op.right()), op.kind, op.rep); - } - OpIndex AssembleOutputGraphChange(const ChangeOp& op) { - return Asm().ReduceChange(MapToNewGraph(op.input()), op.kind, op.assumption, - op.from, op.to); - } - OpIndex AssembleOutputGraphChangeOrDeopt(const ChangeOrDeoptOp& op) { - return Asm().ReduceChangeOrDeopt(MapToNewGraph(op.input()), - MapToNewGraph(op.frame_state()), op.kind, - op.minus_zero_mode, op.feedback); - } - OpIndex AssembleOutputGraphTryChange(const TryChangeOp& op) { - return Asm().ReduceTryChange(MapToNewGraph(op.input()), op.kind, op.from, - op.to); - } - OpIndex AssembleOutputGraphBitcastWord32PairToFloat64( - const BitcastWord32PairToFloat64Op& op) { - return Asm().ReduceBitcastWord32PairToFloat64( - MapToNewGraph(op.high_word32()), MapToNewGraph(op.low_word32())); - } - OpIndex AssembleOutputGraphTaggedBitcast(const TaggedBitcastOp& op) { - return Asm().ReduceTaggedBitcast(MapToNewGraph(op.input()), op.from, op.to, - op.kind); - } - OpIndex AssembleOutputGraphObjectIs(const ObjectIsOp& op) { - return Asm().ReduceObjectIs(MapToNewGraph(op.input()), op.kind, - op.input_assumptions); - } - OpIndex AssembleOutputGraphFloatIs(const FloatIsOp& op) { - return Asm().ReduceFloatIs(MapToNewGraph(op.input()), op.kind, - op.input_rep); - } - OpIndex AssembleOutputGraphObjectIsNumericValue( - const ObjectIsNumericValueOp& op) { - return Asm().ReduceObjectIsNumericValue(MapToNewGraph(op.input()), op.kind, - op.input_rep); - } - OpIndex AssembleOutputGraphConvert(const ConvertOp& op) { - return Asm().ReduceConvert(MapToNewGraph(op.input()), op.from, op.to); - } - OpIndex AssembleOutputGraphConvertUntaggedToJSPrimitive( - const ConvertUntaggedToJSPrimitiveOp& op) { - return Asm().ReduceConvertUntaggedToJSPrimitive( - MapToNewGraph(op.input()), op.kind, op.input_rep, - op.input_interpretation, op.minus_zero_mode); - } - OpIndex AssembleOutputGraphConvertUntaggedToJSPrimitiveOrDeopt( - const ConvertUntaggedToJSPrimitiveOrDeoptOp& op) { - return Asm().ReduceConvertUntaggedToJSPrimitiveOrDeopt( - MapToNewGraph(op.input()), MapToNewGraph(op.frame_state()), op.kind, - op.input_rep, op.input_interpretation, op.feedback); - } - OpIndex AssembleOutputGraphConvertJSPrimitiveToUntagged( - const ConvertJSPrimitiveToUntaggedOp& op) { - return Asm().ReduceConvertJSPrimitiveToUntagged( - MapToNewGraph(op.input()), op.kind, op.input_assumptions); - } - OpIndex AssembleOutputGraphConvertJSPrimitiveToUntaggedOrDeopt( - const ConvertJSPrimitiveToUntaggedOrDeoptOp& op) { - return Asm().ReduceConvertJSPrimitiveToUntaggedOrDeopt( - MapToNewGraph(op.input()), MapToNewGraph(op.frame_state()), - op.from_kind, op.to_kind, op.minus_zero_mode, op.feedback); - } - OpIndex AssembleOutputGraphTruncateJSPrimitiveToUntagged( - const TruncateJSPrimitiveToUntaggedOp& op) { - return Asm().ReduceTruncateJSPrimitiveToUntagged( - MapToNewGraph(op.input()), op.kind, op.input_assumptions); - } - OpIndex AssembleOutputGraphTruncateJSPrimitiveToUntaggedOrDeopt( - const TruncateJSPrimitiveToUntaggedOrDeoptOp& op) { - return Asm().ReduceTruncateJSPrimitiveToUntaggedOrDeopt( - MapToNewGraph(op.input()), MapToNewGraph(op.frame_state()), op.kind, - op.input_requirement, op.feedback); - } - OpIndex AssembleOutputGraphConvertJSPrimitiveToObject( - const ConvertJSPrimitiveToObjectOp& op) { - return Asm().ReduceConvertJSPrimitiveToObject( - MapToNewGraph(op.value()), MapToNewGraph(op.native_context()), - MapToNewGraph(op.global_proxy()), op.mode); - } - OpIndex AssembleOutputGraphSelect(const SelectOp& op) { - return Asm().ReduceSelect( - MapToNewGraph(op.cond()), MapToNewGraph(op.vtrue()), - MapToNewGraph(op.vfalse()), op.rep, op.hint, op.implem); - } - OpIndex AssembleOutputGraphConstant(const ConstantOp& op) { - return Asm().ReduceConstant(op.kind, op.storage); - } - OpIndex AssembleOutputGraphAtomicRMW(const AtomicRMWOp& op) { - return Asm().ReduceAtomicRMW( - MapToNewGraph(op.base()), MapToNewGraph(op.index()), - MapToNewGraph(op.value()), MapToNewGraph(op.expected()), op.bin_op, - op.result_rep, op.input_rep, op.memory_access_kind); - } - - OpIndex AssembleOutputGraphAtomicWord32Pair(const AtomicWord32PairOp& op) { - return Asm().ReduceAtomicWord32Pair( - MapToNewGraph(op.base()), MapToNewGraph(op.index()), - MapToNewGraph(op.value_low()), MapToNewGraph(op.value_high()), - MapToNewGraph(op.expected_low()), MapToNewGraph(op.expected_high()), - op.kind, op.offset); - } - - OpIndex AssembleOutputGraphMemoryBarrier(const MemoryBarrierOp& op) { - return Asm().MemoryBarrier(op.memory_order); - } - - OpIndex AssembleOutputGraphLoad(const LoadOp& op) { - return Asm().ReduceLoad(MapToNewGraph(op.base()), MapToNewGraph(op.index()), - op.kind, op.loaded_rep, op.result_rep, op.offset, - op.element_size_log2); - } - OpIndex AssembleOutputGraphStore(const StoreOp& op) { - return Asm().ReduceStore( - MapToNewGraph(op.base()), MapToNewGraph(op.index()), - MapToNewGraph(op.value()), op.kind, op.stored_rep, op.write_barrier, - op.offset, op.element_size_log2, op.maybe_initializing_or_transitioning, - op.indirect_pointer_tag()); - } - OpIndex AssembleOutputGraphAllocate(const AllocateOp& op) { - return Asm().FinishInitialization( - Asm().Allocate(MapToNewGraph(op.size()), op.type)); - } - OpIndex AssembleOutputGraphDecodeExternalPointer( - const DecodeExternalPointerOp& op) { - return Asm().DecodeExternalPointer(MapToNewGraph(op.handle()), op.tag); - } - - OpIndex AssembleOutputGraphStackCheck(const StackCheckOp& op) { - return Asm().ReduceStackCheck(op.check_origin, op.check_kind); - } - - OpIndex AssembleOutputGraphRetain(const RetainOp& op) { - return Asm().ReduceRetain(MapToNewGraph(op.retained())); - } - OpIndex AssembleOutputGraphParameter(const ParameterOp& op) { - return Asm().ReduceParameter(op.parameter_index, op.rep, op.debug_name); - } - OpIndex AssembleOutputGraphOsrValue(const OsrValueOp& op) { - return Asm().ReduceOsrValue(op.index); - } - OpIndex AssembleOutputGraphStackPointerGreaterThan( - const StackPointerGreaterThanOp& op) { - return Asm().ReduceStackPointerGreaterThan(MapToNewGraph(op.stack_limit()), - op.kind); - } - OpIndex AssembleOutputGraphStackSlot(const StackSlotOp& op) { - return Asm().ReduceStackSlot(op.size, op.alignment); - } - OpIndex AssembleOutputGraphFrameConstant(const FrameConstantOp& op) { - return Asm().ReduceFrameConstant(op.kind); - } - OpIndex AssembleOutputGraphDeoptimize(const DeoptimizeOp& op) { - return Asm().ReduceDeoptimize(MapToNewGraph(op.frame_state()), - op.parameters); - } - OpIndex AssembleOutputGraphDeoptimizeIf(const DeoptimizeIfOp& op) { - return Asm().ReduceDeoptimizeIf(MapToNewGraph(op.condition()), - MapToNewGraph(op.frame_state()), op.negated, - op.parameters); - } - -#if V8_ENABLE_WEBASSEMBLY - OpIndex AssembleOutputGraphTrapIf(const TrapIfOp& op) { - return Asm().ReduceTrapIf(MapToNewGraph(op.condition()), - MapToNewGraphIfValid(op.frame_state()), - op.negated, op.trap_id); - } -#endif // V8_ENABLE_WEBASSEMBLY - - OpIndex AssembleOutputGraphTuple(const TupleOp& op) { - return Asm().ReduceTuple(base::VectorOf(MapToNewGraph<4>(op.inputs()))); - } - OpIndex AssembleOutputGraphProjection(const ProjectionOp& op) { - return Asm().ReduceProjection(MapToNewGraph(op.input()), op.index, op.rep); - } - OpIndex AssembleOutputGraphWordBinop(const WordBinopOp& op) { - return Asm().ReduceWordBinop(MapToNewGraph(op.left()), - MapToNewGraph(op.right()), op.kind, op.rep); - } - OpIndex AssembleOutputGraphFloatBinop(const FloatBinopOp& op) { - return Asm().ReduceFloatBinop(MapToNewGraph(op.left()), - MapToNewGraph(op.right()), op.kind, op.rep); - } - OpIndex AssembleOutputGraphUnreachable(const UnreachableOp& op) { - return Asm().ReduceUnreachable(); - } - OpIndex AssembleOutputGraphStaticAssert(const StaticAssertOp& op) { - return Asm().ReduceStaticAssert(MapToNewGraph(op.condition()), op.source); - } - OpIndex AssembleOutputGraphCheckTurboshaftTypeOf( - const CheckTurboshaftTypeOfOp& op) { - return Asm().ReduceCheckTurboshaftTypeOf(MapToNewGraph(op.input()), op.rep, - op.type, op.successful); - } - OpIndex AssembleOutputGraphNewConsString(const NewConsStringOp& op) { - return Asm().ReduceNewConsString(MapToNewGraph(op.length()), - MapToNewGraph(op.first()), - MapToNewGraph(op.second())); - } - OpIndex AssembleOutputGraphNewArray(const NewArrayOp& op) { - return Asm().ReduceNewArray(MapToNewGraph(op.length()), op.kind, - op.allocation_type); - } - OpIndex AssembleOutputGraphDoubleArrayMinMax(const DoubleArrayMinMaxOp& op) { - return Asm().ReduceDoubleArrayMinMax(MapToNewGraph(op.array()), op.kind); - } - OpIndex AssembleOutputGraphLoadFieldByIndex(const LoadFieldByIndexOp& op) { - return Asm().ReduceLoadFieldByIndex(MapToNewGraph(op.object()), - MapToNewGraph(op.index())); - } - OpIndex AssembleOutputGraphDebugBreak(const DebugBreakOp& op) { - return Asm().ReduceDebugBreak(); - } - OpIndex AssembleOutputGraphDebugPrint(const DebugPrintOp& op) { - return Asm().ReduceDebugPrint(MapToNewGraph(op.input()), op.rep); - } - OpIndex AssembleOutputGraphBigIntBinop(const BigIntBinopOp& op) { - return Asm().ReduceBigIntBinop(MapToNewGraph(op.left()), - MapToNewGraph(op.right()), - MapToNewGraph(op.frame_state()), op.kind); - } - OpIndex AssembleOutputGraphBigIntComparison(const BigIntComparisonOp& op) { - return Asm().ReduceBigIntComparison(MapToNewGraph(op.left()), - MapToNewGraph(op.right()), op.kind); - } - OpIndex AssembleOutputGraphBigIntUnary(const BigIntUnaryOp& op) { - return Asm().ReduceBigIntUnary(MapToNewGraph(op.input()), op.kind); - } - OpIndex AssembleOutputGraphLoadRootRegister(const LoadRootRegisterOp& op) { - return Asm().ReduceLoadRootRegister(); - } - OpIndex AssembleOutputGraphStringAt(const StringAtOp& op) { - return Asm().ReduceStringAt(MapToNewGraph(op.string()), - MapToNewGraph(op.position()), op.kind); - } -#ifdef V8_INTL_SUPPORT - OpIndex AssembleOutputGraphStringToCaseIntl(const StringToCaseIntlOp& op) { - return Asm().ReduceStringToCaseIntl(MapToNewGraph(op.string()), op.kind); - } -#endif // V8_INTL_SUPPORT - OpIndex AssembleOutputGraphStringLength(const StringLengthOp& op) { - return Asm().ReduceStringLength(MapToNewGraph(op.string())); - } - OpIndex AssembleOutputGraphStringIndexOf(const StringIndexOfOp& op) { - return Asm().ReduceStringIndexOf(MapToNewGraph(op.string()), - MapToNewGraph(op.search()), - MapToNewGraph(op.position())); - } - OpIndex AssembleOutputGraphStringFromCodePointAt( - const StringFromCodePointAtOp& op) { - return Asm().ReduceStringFromCodePointAt(MapToNewGraph(op.string()), - MapToNewGraph(op.index())); - } - OpIndex AssembleOutputGraphStringSubstring(const StringSubstringOp& op) { - return Asm().ReduceStringSubstring(MapToNewGraph(op.string()), - MapToNewGraph(op.start()), - MapToNewGraph(op.end())); - } - OpIndex AssembleOutputGraphStringConcat(const StringConcatOp& op) { - return Asm().ReduceStringConcat(MapToNewGraph(op.left()), - MapToNewGraph(op.right())); - } - OpIndex AssembleOutputGraphStringComparison(const StringComparisonOp& op) { - return Asm().ReduceStringComparison(MapToNewGraph(op.left()), - MapToNewGraph(op.right()), op.kind); - } - OpIndex AssembleOutputGraphArgumentsLength(const ArgumentsLengthOp& op) { - return Asm().ReduceArgumentsLength(op.kind, op.formal_parameter_count); - } - OpIndex AssembleOutputGraphNewArgumentsElements( - const NewArgumentsElementsOp& op) { - return Asm().ReduceNewArgumentsElements(MapToNewGraph(op.arguments_count()), - op.type, op.formal_parameter_count); - } - OpIndex AssembleOutputGraphLoadTypedElement(const LoadTypedElementOp& op) { - return Asm().ReduceLoadTypedElement( - MapToNewGraph(op.buffer()), MapToNewGraph(op.base()), - MapToNewGraph(op.external()), MapToNewGraph(op.index()), op.array_type); - } - OpIndex AssembleOutputGraphLoadDataViewElement( - const LoadDataViewElementOp& op) { - return Asm().ReduceLoadDataViewElement( - MapToNewGraph(op.object()), MapToNewGraph(op.storage()), - MapToNewGraph(op.index()), MapToNewGraph(op.is_little_endian()), - op.element_type); - } - OpIndex AssembleOutputGraphLoadStackArgument(const LoadStackArgumentOp& op) { - return Asm().ReduceLoadStackArgument(MapToNewGraph(op.base()), - MapToNewGraph(op.index())); - } - OpIndex AssembleOutputGraphStoreTypedElement(const StoreTypedElementOp& op) { - return Asm().ReduceStoreTypedElement( - MapToNewGraph(op.buffer()), MapToNewGraph(op.base()), - MapToNewGraph(op.external()), MapToNewGraph(op.index()), - MapToNewGraph(op.value()), op.array_type); - } - OpIndex AssembleOutputGraphStoreDataViewElement( - const StoreDataViewElementOp& op) { - return Asm().ReduceStoreDataViewElement( - MapToNewGraph(op.object()), MapToNewGraph(op.storage()), - MapToNewGraph(op.index()), MapToNewGraph(op.value()), - MapToNewGraph(op.is_little_endian()), op.element_type); - } - OpIndex AssembleOutputGraphTransitionAndStoreArrayElement( - const TransitionAndStoreArrayElementOp& op) { - return Asm().ReduceTransitionAndStoreArrayElement( - MapToNewGraph(op.array()), MapToNewGraph(op.index()), - MapToNewGraph(op.value()), op.kind, op.fast_map, op.double_map); - } - OpIndex AssembleOutputGraphCompareMaps(const CompareMapsOp& op) { - return Asm().ReduceCompareMaps(MapToNewGraph(op.heap_object()), op.maps); - } - OpIndex AssembleOutputGraphCheckMaps(const CheckMapsOp& op) { - return Asm().ReduceCheckMaps(MapToNewGraph(op.heap_object()), - MapToNewGraph(op.frame_state()), op.maps, - op.flags, op.feedback); - } - OpIndex AssembleOutputGraphAssumeMap(const AssumeMapOp& op) { - return Asm().ReduceAssumeMap(MapToNewGraph(op.heap_object()), op.maps); - } - OpIndex AssembleOutputGraphCheckedClosure(const CheckedClosureOp& op) { - return Asm().ReduceCheckedClosure(MapToNewGraph(op.input()), - MapToNewGraph(op.frame_state()), - op.feedback_cell); - } - OpIndex AssembleOutputGraphCheckEqualsInternalizedString( - const CheckEqualsInternalizedStringOp& op) { - return Asm().ReduceCheckEqualsInternalizedString( - MapToNewGraph(op.expected()), MapToNewGraph(op.value()), - MapToNewGraph(op.frame_state())); - } - OpIndex AssembleOutputGraphLoadMessage(const LoadMessageOp& op) { - return Asm().ReduceLoadMessage(MapToNewGraph(op.offset())); - } - OpIndex AssembleOutputGraphStoreMessage(const StoreMessageOp& op) { - return Asm().ReduceStoreMessage(MapToNewGraph(op.offset()), - MapToNewGraph(op.object())); - } - OpIndex AssembleOutputGraphSameValue(const SameValueOp& op) { - return Asm().ReduceSameValue(MapToNewGraph(op.left()), - MapToNewGraph(op.right()), op.mode); - } - OpIndex AssembleOutputGraphFloat64SameValue(const Float64SameValueOp& op) { - return Asm().ReduceFloat64SameValue(MapToNewGraph(op.left()), - MapToNewGraph(op.right())); - } - OpIndex AssembleOutputGraphFastApiCall(const FastApiCallOp& op) { - auto arguments = MapToNewGraph<8>(op.arguments()); - return Asm().ReduceFastApiCall(MapToNewGraph(op.data_argument()), - base::VectorOf(arguments), op.parameters); - } - OpIndex AssembleOutputGraphRuntimeAbort(const RuntimeAbortOp& op) { - return Asm().ReduceRuntimeAbort(op.reason); - } - OpIndex AssembleOutputGraphEnsureWritableFastElements( - const EnsureWritableFastElementsOp& op) { - return Asm().ReduceEnsureWritableFastElements(MapToNewGraph(op.object()), - MapToNewGraph(op.elements())); - } - OpIndex AssembleOutputGraphMaybeGrowFastElements( - const MaybeGrowFastElementsOp& op) { - return Asm().ReduceMaybeGrowFastElements( - MapToNewGraph(op.object()), MapToNewGraph(op.elements()), - MapToNewGraph(op.index()), MapToNewGraph(op.elements_length()), - MapToNewGraph(op.frame_state()), op.mode, op.feedback); - } - OpIndex AssembleOutputGraphTransitionElementsKind( - const TransitionElementsKindOp& op) { - return Asm().ReduceTransitionElementsKind(MapToNewGraph(op.object()), - op.transition); - } - OpIndex AssembleOutputGraphFindOrderedHashEntry( - const FindOrderedHashEntryOp& op) { - return Asm().ReduceFindOrderedHashEntry(MapToNewGraph(op.data_structure()), - MapToNewGraph(op.key()), op.kind); - } - OpIndex AssembleOutputGraphSpeculativeNumberBinop( - const SpeculativeNumberBinopOp& op) { - return Asm().ReduceSpeculativeNumberBinop( - MapToNewGraph(op.left()), MapToNewGraph(op.right()), - MapToNewGraph(op.frame_state()), op.kind); - } - OpIndex AssembleOutputGraphWord32PairBinop(const Word32PairBinopOp& op) { - return Asm().ReduceWord32PairBinop( - MapToNewGraph(op.left_low()), MapToNewGraph(op.left_high()), - MapToNewGraph(op.right_low()), MapToNewGraph(op.right_high()), op.kind); - } - - OpIndex AssembleOutputGraphComment(const CommentOp& op) { - return Asm().ReduceComment(op.message); - } - -#ifdef V8_ENABLE_WEBASSEMBLY - OpIndex AssembleOutputGraphGlobalGet(const GlobalGetOp& op) { - return Asm().ReduceGlobalGet(MapToNewGraph(op.instance()), op.global); - } - - OpIndex AssembleOutputGraphGlobalSet(const GlobalSetOp& op) { - return Asm().ReduceGlobalSet(MapToNewGraph(op.instance()), - MapToNewGraph(op.value()), op.global); - } - - OpIndex AssembleOutputGraphNull(const NullOp& op) { - return Asm().ReduceNull(op.type); - } - - OpIndex AssembleOutputGraphIsNull(const IsNullOp& op) { - return Asm().ReduceIsNull(MapToNewGraph(op.object()), op.type); - } - - OpIndex AssembleOutputGraphAssertNotNull(const AssertNotNullOp& op) { - return Asm().ReduceAssertNotNull(MapToNewGraph(op.object()), op.type, - op.trap_id); - } - - OpIndex AssembleOutputGraphRttCanon(const RttCanonOp& op) { - return Asm().ReduceRttCanon(MapToNewGraph(op.rtts()), op.type_index); - } - - OpIndex AssembleOutputGraphWasmTypeCheck(const WasmTypeCheckOp& op) { - return Asm().ReduceWasmTypeCheck(MapToNewGraph(op.object()), - MapToNewGraphIfValid(op.rtt()), op.config); - } - - OpIndex AssembleOutputGraphWasmTypeCast(const WasmTypeCastOp& op) { - return Asm().ReduceWasmTypeCast(MapToNewGraph(op.object()), - MapToNewGraphIfValid(op.rtt()), op.config); - } - - OpIndex AssembleOutputGraphAnyConvertExtern(const AnyConvertExternOp& op) { - return Asm().ReduceAnyConvertExtern(MapToNewGraph(op.object())); - } - - OpIndex AssembleOutputGraphExternConvertAny(const ExternConvertAnyOp& op) { - return Asm().ReduceExternConvertAny(MapToNewGraph(op.object())); - } - - OpIndex AssembleOutputGraphWasmTypeAnnotation( - const WasmTypeAnnotationOp& op) { - return Asm().ReduceWasmTypeAnnotation(MapToNewGraph(op.value()), op.type); - } - - OpIndex AssembleOutputGraphStructGet(const StructGetOp& op) { - return Asm().ReduceStructGet(MapToNewGraph(op.object()), op.type, - op.type_index, op.field_index, op.is_signed, - op.null_check); - } - - OpIndex AssembleOutputGraphStructSet(const StructSetOp& op) { - return Asm().ReduceStructSet(MapToNewGraph(op.object()), - MapToNewGraph(op.value()), op.type, - op.type_index, op.field_index, op.null_check); - } - - OpIndex AssembleOutputGraphArrayGet(const ArrayGetOp& op) { - return Asm().ReduceArrayGet(MapToNewGraph(op.array()), - MapToNewGraph(op.index()), op.array_type, - op.is_signed); - } - - OpIndex AssembleOutputGraphArraySet(const ArraySetOp& op) { - return Asm().ReduceArraySet(MapToNewGraph(op.array()), - MapToNewGraph(op.index()), - MapToNewGraph(op.value()), op.element_type); - } - - OpIndex AssembleOutputGraphArrayLength(const ArrayLengthOp& op) { - return Asm().ReduceArrayLength(MapToNewGraph(op.array()), op.null_check); - } - - OpIndex AssembleOutputGraphWasmAllocateArray(const WasmAllocateArrayOp& op) { - return Asm().ReduceWasmAllocateArray( - MapToNewGraph(op.rtt()), MapToNewGraph(op.length()), op.array_type); - } - - OpIndex AssembleOutputGraphWasmAllocateStruct( - const WasmAllocateStructOp& op) { - return Asm().ReduceWasmAllocateStruct(MapToNewGraph(op.rtt()), - op.struct_type); - } - - OpIndex AssembleOutputGraphWasmRefFunc(const WasmRefFuncOp& op) { - return Asm().ReduceWasmRefFunc(MapToNewGraph(op.instance()), - op.function_index); - } - - OpIndex AssembleOutputGraphStringAsWtf16(const StringAsWtf16Op& op) { - return Asm().ReduceStringAsWtf16(MapToNewGraph(op.string())); - } - - OpIndex AssembleOutputGraphStringPrepareForGetCodeUnit( - const StringPrepareForGetCodeUnitOp& op) { - return Asm().ReduceStringPrepareForGetCodeUnit(MapToNewGraph(op.string())); - } - - OpIndex AssembleOutputGraphSimd128Constant(const Simd128ConstantOp& op) { - return Asm().ReduceSimd128Constant(op.value); - } - - OpIndex AssembleOutputGraphSimd128Binop(const Simd128BinopOp& op) { - return Asm().ReduceSimd128Binop(MapToNewGraph(op.left()), - MapToNewGraph(op.right()), op.kind); - } - - OpIndex AssembleOutputGraphSimd128Unary(const Simd128UnaryOp& op) { - return Asm().ReduceSimd128Unary(MapToNewGraph(op.input()), op.kind); - } - - OpIndex AssembleOutputGraphSimd128Shift(const Simd128ShiftOp& op) { - return Asm().ReduceSimd128Shift(MapToNewGraph(op.input()), - MapToNewGraph(op.shift()), op.kind); - } - - OpIndex AssembleOutputGraphSimd128Test(const Simd128TestOp& op) { - return Asm().ReduceSimd128Test(MapToNewGraph(op.input()), op.kind); - } - - OpIndex AssembleOutputGraphSimd128Splat(const Simd128SplatOp& op) { - return Asm().ReduceSimd128Splat(MapToNewGraph(op.input()), op.kind); - } - - OpIndex AssembleOutputGraphSimd128Ternary(const Simd128TernaryOp& op) { - return Asm().ReduceSimd128Ternary(MapToNewGraph(op.first()), - MapToNewGraph(op.second()), - MapToNewGraph(op.third()), op.kind); - } - OpIndex AssembleOutputGraphSimd128ExtractLane( - const Simd128ExtractLaneOp& op) { - return Asm().ReduceSimd128ExtractLane(MapToNewGraph(op.input()), op.kind, - op.lane); - } - OpIndex AssembleOutputGraphSimd128ReplaceLane( - const Simd128ReplaceLaneOp& op) { - return Asm().ReduceSimd128ReplaceLane(MapToNewGraph(op.into()), - MapToNewGraph(op.new_lane()), op.kind, - op.lane); - } - OpIndex AssembleOutputGraphSimd128LaneMemory(const Simd128LaneMemoryOp& op) { - return Asm().ReduceSimd128LaneMemory( - MapToNewGraph(op.base()), MapToNewGraph(op.index()), - MapToNewGraph(op.value()), op.mode, op.kind, op.lane_kind, op.lane, - op.offset); - } - OpIndex AssembleOutputGraphSimd128LoadTransform( - const Simd128LoadTransformOp& op) { - return Asm().ReduceSimd128LoadTransform( - MapToNewGraph(op.base()), MapToNewGraph(op.index()), op.load_kind, - op.transform_kind, op.offset); - } - OpIndex AssembleOutputGraphSimd128Shuffle(const Simd128ShuffleOp& op) { - return Asm().ReduceSimd128Shuffle(MapToNewGraph(op.left()), - MapToNewGraph(op.right()), op.shuffle); - } - OpIndex AssembleOutputGraphLoadStackPointer(const LoadStackPointerOp& op) { - return Asm().ReduceLoadStackPointer(); - } - OpIndex AssembleOutputGraphSetStackPointer(const SetStackPointerOp& op) { - return Asm().ReduceSetStackPointer(MapToNewGraph(op.value()), op.fp_scope); - } -#endif // V8_ENABLE_WEBASSEMBLY void CreateOldToNewMapping(OpIndex old_index, OpIndex new_index) { DCHECK(old_index.valid()); DCHECK(Asm().input_graph().BelongsToThisGraph(old_index)); DCHECK_IMPLIES(new_index.valid(), Asm().output_graph().BelongsToThisGraph(new_index)); - if (contains_variable_reducer_) { - if (current_block_needs_variables_) { - MaybeVariable var = GetVariableFor(old_index); - if (!var.has_value()) { - MaybeRegisterRepresentation rep = - Asm().input_graph().Get(old_index).outputs_rep().size() == 1 - ? static_cast( - Asm().input_graph().Get(old_index).outputs_rep()[0]) - : MaybeRegisterRepresentation::None(); - var = Asm().NewLoopInvariantVariable(rep); - SetVariableFor(old_index, *var); - } - Asm().SetVariable(*var, new_index); - return; + + if (current_block_needs_variables_) { + MaybeVariable var = GetVariableFor(old_index); + if (!var.has_value()) { + MaybeRegisterRepresentation rep = + Asm().input_graph().Get(old_index).outputs_rep().size() == 1 + ? static_cast( + Asm().input_graph().Get(old_index).outputs_rep()[0]) + : MaybeRegisterRepresentation::None(); + var = Asm().NewLoopInvariantVariable(rep); + SetVariableFor(old_index, *var); } - } else { - DCHECK(!current_block_needs_variables_); + Asm().SetVariable(*var, new_index); + return; } + DCHECK(!op_mapping_[old_index].valid()); op_mapping_[old_index] = new_index; } @@ -1340,16 +916,6 @@ class GraphVisitor : public Next { old_opindex_to_variables[old_index] = var; } - template - base::SmallVector MapToNewGraph( - base::Vector inputs) { - base::SmallVector result; - for (OpIndex input : inputs) { - result.push_back(MapToNewGraph(input)); - } - return result; - } - void FixLoopPhis(Block* input_graph_loop) { DCHECK(input_graph_loop->IsLoop()); Block* output_graph_loop = MapToNewGraph(input_graph_loop); @@ -1398,6 +964,37 @@ class GraphVisitor : public Next { // Mapping from old OpIndex to Variables. FixedOpIndexSidetable old_opindex_to_variables; + + // When the last operation of a Block is a Goto to a Block with a single + // predecessor, we always inline the destination into the current block. To + // avoid making this process recursive (which could lead to stack overflows), + // we set the variable {block_to_inline_now_} instead. Right after we're done + // visiting a Block, the function ProcessWaitingCloningAndInlining will inline + // {block_to_inline_now_} (if it's set) in a non-recursive way. + Block* block_to_inline_now_ = nullptr; + + // When a Reducer wants to clone a block (for instance, + // BranchEliminationReducer, in order to remove Phis or to replace a Branch by + // a Goto), this block is not cloned right away, in order to avoid recursion + // (which could lead to stack overflows). Instead, we add this block to + // {blocks_to_clone_}. Right after we're done visiting a Block, the function + // ProcessWaitingCloningAndInlining will actually clone the blocks in + // {blocks_to_clone_} in a non-recursive way. + struct BlockToClone { + const Block* input_block; + int added_block_phi_input; + Block* new_output_block; + }; + ZoneVector blocks_to_clone_; + +#ifdef DEBUG + // Recursively inlining blocks is still allowed (mainly for + // LoopUnrollingReducer), but it shouldn't be actually recursive. This is + // checked by the {is_in_recursive_inlining_}, which is set to true while + // recursively inlining a block. Trying to inline a block while + // {is_in_recursive_inlining_} is true will lead to a DCHECK failure. + bool is_in_recursive_inlining_ = false; +#endif }; template