diff --git a/common.gypi b/common.gypi index 8120bed999992f..c33f4429dbcb55 100644 --- a/common.gypi +++ b/common.gypi @@ -36,7 +36,7 @@ # Reset this number to 0 on major V8 upgrades. # Increment by one for each non-official patch applied to deps/v8. - 'v8_embedder_string': '-node.12', + 'v8_embedder_string': '-node.11', ##### V8 defaults for Node.js ##### diff --git a/deps/v8/.ycm_extra_conf.py b/deps/v8/.ycm_extra_conf.py index 25d01c1881797f..6d79c46245eb4e 100644 --- a/deps/v8/.ycm_extra_conf.py +++ b/deps/v8/.ycm_extra_conf.py @@ -114,7 +114,7 @@ def GetClangCommandFromNinjaForFilename(v8_root, filename): # should contain most/all of the interesting flags for other targets too. filename = os.path.join(v8_root, 'src', 'utils', 'utils.cc') - sys.path.append(os.path.join(v8_root, 'tools', 'ninja')) + sys.path.append(os.path.join(v8_root, 'tools', 'vim')) from ninja_output import GetNinjaOutputDirectory out_dir = os.path.realpath(GetNinjaOutputDirectory(v8_root)) @@ -133,7 +133,7 @@ def GetClangCommandFromNinjaForFilename(v8_root, filename): # Ninja might execute several commands to build something. We want the last # clang command. clang_line = None - for line in reversed(stdout.split('\n')): + for line in reversed(stdout.decode('utf-8').splitlines()): if 'clang' in line: clang_line = line break diff --git a/deps/v8/BUILD.bazel b/deps/v8/BUILD.bazel index e0d5ce713852ca..c5b4a94f911a7b 100644 --- a/deps/v8/BUILD.bazel +++ b/deps/v8/BUILD.bazel @@ -6,6 +6,7 @@ load("@bazel_skylib//lib:selects.bzl", "selects") load( "@v8//:bazel/defs.bzl", "v8_binary", + "v8_build_config", "v8_config", "v8_custom_config", "v8_raw_flag", @@ -358,6 +359,7 @@ filegroup( srcs = [ "include/cppgc/allocation.h", "include/cppgc/common.h", + "include/cppgc/cross-thread-persistent.h", "include/cppgc/custom-space.h", "include/cppgc/default-platform.h", "include/cppgc/ephemeron-pair.h", @@ -610,11 +612,14 @@ filegroup( srcs = [ "src/builtins/aggregate-error.tq", "src/builtins/array-at.tq", + "src/builtins/array-concat.tq", "src/builtins/array-copywithin.tq", "src/builtins/array-every.tq", "src/builtins/array-filter.tq", "src/builtins/array-find.tq", "src/builtins/array-findindex.tq", + "src/builtins/array-findlast.tq", + "src/builtins/array-findlastindex.tq", "src/builtins/array-foreach.tq", "src/builtins/array-from.tq", "src/builtins/array-isarray.tq", @@ -716,6 +721,8 @@ filegroup( "src/builtins/typed-array-filter.tq", "src/builtins/typed-array-find.tq", "src/builtins/typed-array-findindex.tq", + "src/builtins/typed-array-findlast.tq", + "src/builtins/typed-array-findlastindex.tq", "src/builtins/typed-array-foreach.tq", "src/builtins/typed-array-from.tq", "src/builtins/typed-array-keys.tq", @@ -1018,6 +1025,7 @@ filegroup( "src/codegen/reloc-info.h", "src/codegen/safepoint-table.cc", "src/codegen/safepoint-table.h", + "src/codegen/script-details.h", "src/codegen/signature.h", "src/codegen/source-position-table.cc", "src/codegen/source-position-table.h", @@ -1041,8 +1049,8 @@ filegroup( "src/common/message-template.h", "src/common/ptr-compr-inl.h", "src/common/ptr-compr.h", - "src/compiler-dispatcher/compiler-dispatcher.cc", - "src/compiler-dispatcher/compiler-dispatcher.h", + "src/compiler-dispatcher/lazy-compile-dispatcher.cc", + "src/compiler-dispatcher/lazy-compile-dispatcher.h", "src/compiler-dispatcher/optimizing-compile-dispatcher.cc", "src/compiler-dispatcher/optimizing-compile-dispatcher.h", "src/date/date.cc", @@ -1253,6 +1261,7 @@ filegroup( "src/heap/invalidated-slots.h", "src/heap/large-spaces.cc", "src/heap/large-spaces.h", + "src/heap/linear-allocation-area.h", "src/heap/list.h", "src/heap/local-allocator-inl.h", "src/heap/local-allocator.h", @@ -1403,6 +1412,7 @@ filegroup( "src/logging/counters-definitions.h", "src/logging/counters.cc", "src/logging/counters.h", + "src/logging/counters-scopes.h", "src/logging/local-logger.cc", "src/logging/local-logger.h", "src/logging/log-inl.h", @@ -1591,6 +1601,7 @@ filegroup( "src/objects/prototype-info.h", "src/objects/prototype.h", "src/objects/prototype-inl.h", + "src/objects/regexp-match-info-inl.h", "src/objects/regexp-match-info.h", "src/objects/scope-info-inl.h", "src/objects/scope-info.cc", @@ -2433,9 +2444,6 @@ filegroup( "src/compiler/scheduler.h", "src/compiler/select-lowering.cc", "src/compiler/select-lowering.h", - "src/compiler/serializer-for-background-compilation.cc", - "src/compiler/serializer-for-background-compilation.h", - "src/compiler/serializer-hints.h", "src/compiler/simplified-lowering.cc", "src/compiler/simplified-lowering.h", "src/compiler/simplified-operator.cc", @@ -2672,10 +2680,12 @@ filegroup( "src/bigint/bigint-internal.h", "src/bigint/bigint.h", "src/bigint/digit-arithmetic.h", + "src/bigint/div-barrett.cc", "src/bigint/div-burnikel.cc", "src/bigint/div-helpers.cc", "src/bigint/div-helpers.h", "src/bigint/div-schoolbook.cc", + "src/bigint/fromstring.cc", "src/bigint/mul-fft.cc", "src/bigint/mul-karatsuba.cc", "src/bigint/mul-schoolbook.cc", @@ -3050,3 +3060,42 @@ v8_binary( ], deps = [ ":v8" ], ) + +# ================================================= +# Tests +# ================================================= + +v8_build_config( + name = "v8_build_config", +) + +# Runs mjunit with d8. +py_test( + name = "mjsunit", + size = "medium", + srcs = [ + "test/mjsunit/testcfg.py", + "tools/predictable_wrapper.py", + "tools/run-tests.py", + ] + glob(["tools/testrunner/**/*.py"]), + args = [ + "--no-sorting", + "--nopresubmit", + # TODO(victorgomes): Create a flag to pass the variant in the cmdline. + "--variant=default", + "--outdir bazel-bin", + "mjsunit", + ], + data = [ + ":v8_build_config", + ":d8", + "test", + ] + glob(["test/**"]) + glob(["tools/**/*.js"]) + glob(["tools/**/*.mjs"]), + main = "tools/run-tests.py", + # TODO(victorgomes): Move this to PY3. + python_version = "PY2", + tags = [ + # Disable sanitizers, as they don't work in general in V8. + "nosan", + ], +) diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn index 17bab98e8faba5..3e48fb11bff9f9 100644 --- a/deps/v8/BUILD.gn +++ b/deps/v8/BUILD.gn @@ -452,6 +452,12 @@ if (v8_enable_shared_ro_heap == "") { v8_enable_pointer_compression_shared_cage } +# Check if it is a Chromium build and activate PAC/BTI if needed. +if (build_with_chromium && v8_current_cpu == "arm64" && + arm_control_flow_integrity == "standard") { + v8_control_flow_integrity = true +} + assert(!v8_disable_write_barriers || v8_enable_single_generation, "Disabling write barriers works only with single generation") @@ -548,6 +554,10 @@ config("internal_config") { if (is_component_build) { defines += [ "BUILDING_V8_SHARED" ] } + + if (v8_current_cpu == "riscv64") { + libs = [ "atomic" ] + } } # Should be applied to all targets that write trace events. @@ -616,6 +626,10 @@ config("external_config") { if (is_component_build) { defines += [ "USING_V8_SHARED" ] } + + if (current_cpu == "riscv64") { + libs = [ "atomic" ] + } } # This config should only be applied to code that needs to be explicitly @@ -918,6 +932,9 @@ config("features") { if (v8_allocation_site_tracking) { defines += [ "V8_ALLOCATION_SITE_TRACKING" ] } + if (v8_advanced_bigint_algorithms) { + defines += [ "V8_ADVANCED_BIGINT_ALGORITHMS" ] + } } config("toolchain") { @@ -1396,11 +1413,14 @@ action("postmortem-metadata") { torque_files = [ "src/builtins/aggregate-error.tq", "src/builtins/array-at.tq", + "src/builtins/array-concat.tq", "src/builtins/array-copywithin.tq", "src/builtins/array-every.tq", "src/builtins/array-filter.tq", "src/builtins/array-find.tq", "src/builtins/array-findindex.tq", + "src/builtins/array-findlast.tq", + "src/builtins/array-findlastindex.tq", "src/builtins/array-foreach.tq", "src/builtins/array-from.tq", "src/builtins/array-isarray.tq", @@ -1502,6 +1522,8 @@ torque_files = [ "src/builtins/typed-array-filter.tq", "src/builtins/typed-array-find.tq", "src/builtins/typed-array-findindex.tq", + "src/builtins/typed-array-findlast.tq", + "src/builtins/typed-array-findlastindex.tq", "src/builtins/typed-array-foreach.tq", "src/builtins/typed-array-from.tq", "src/builtins/typed-array-keys.tq", @@ -2113,6 +2135,7 @@ v8_source_set("v8_initializers") { "src/builtins/builtins-call-gen.cc", "src/builtins/builtins-call-gen.h", "src/builtins/builtins-collections-gen.cc", + "src/builtins/builtins-collections-gen.h", "src/builtins/builtins-constructor-gen.cc", "src/builtins/builtins-constructor-gen.h", "src/builtins/builtins-constructor.h", @@ -2457,6 +2480,7 @@ v8_header_set("v8_internal_headers") { "src/codegen/reglist.h", "src/codegen/reloc-info.h", "src/codegen/safepoint-table.h", + "src/codegen/script-details.h", "src/codegen/signature.h", "src/codegen/source-position-table.h", "src/codegen/source-position.h", @@ -2472,8 +2496,11 @@ v8_header_set("v8_internal_headers") { "src/common/message-template.h", "src/common/ptr-compr-inl.h", "src/common/ptr-compr.h", - "src/compiler-dispatcher/compiler-dispatcher.h", + "src/compiler-dispatcher/lazy-compile-dispatcher.h", "src/compiler-dispatcher/optimizing-compile-dispatcher.h", + "src/compiler/access-builder.h", + "src/compiler/access-info.h", + "src/compiler/add-type-assertions-reducer.h", "src/compiler/all-nodes.h", "src/compiler/allocation-builder-inl.h", "src/compiler/allocation-builder.h", @@ -2529,6 +2556,7 @@ v8_header_set("v8_internal_headers") { "src/compiler/graph-visualizer.h", "src/compiler/graph-zone-traits.h", "src/compiler/graph.h", + "src/compiler/heap-refs.h", "src/compiler/js-call-reducer.h", "src/compiler/js-context-specialization.h", "src/compiler/js-create-lowering.h", @@ -2582,8 +2610,6 @@ v8_header_set("v8_internal_headers") { "src/compiler/schedule.h", "src/compiler/scheduler.h", "src/compiler/select-lowering.h", - "src/compiler/serializer-for-background-compilation.h", - "src/compiler/serializer-hints.h", "src/compiler/simplified-lowering.h", "src/compiler/simplified-operator-reducer.h", "src/compiler/simplified-operator.h", @@ -2695,6 +2721,7 @@ v8_header_set("v8_internal_headers") { "src/heap/cppgc-js/unified-heap-marking-verifier.h", "src/heap/cppgc-js/unified-heap-marking-visitor.h", "src/heap/embedder-tracing.h", + "src/heap/factory-base-inl.h", "src/heap/factory-base.h", "src/heap/factory-inl.h", "src/heap/factory.h", @@ -2715,6 +2742,7 @@ v8_header_set("v8_internal_headers") { "src/heap/invalidated-slots-inl.h", "src/heap/invalidated-slots.h", "src/heap/large-spaces.h", + "src/heap/linear-allocation-area.h", "src/heap/list.h", "src/heap/local-allocator-inl.h", "src/heap/local-allocator.h", @@ -2807,6 +2835,7 @@ v8_header_set("v8_internal_headers") { "src/libsampler/sampler.h", "src/logging/code-events.h", "src/logging/counters-definitions.h", + "src/logging/counters-scopes.h", "src/logging/counters.h", "src/logging/local-logger.h", "src/logging/log-inl.h", @@ -2872,6 +2901,7 @@ v8_header_set("v8_internal_headers") { "src/objects/free-space-inl.h", "src/objects/free-space.h", "src/objects/function-kind.h", + "src/objects/function-syntax-kind.h", "src/objects/hash-table-inl.h", "src/objects/hash-table.h", "src/objects/heap-number-inl.h", @@ -2934,6 +2964,7 @@ v8_header_set("v8_internal_headers") { "src/objects/object-type.h", "src/objects/objects-body-descriptors-inl.h", "src/objects/objects-body-descriptors.h", + "src/objects/objects-definitions.h", "src/objects/objects-inl.h", "src/objects/objects.h", "src/objects/oddball-inl.h", @@ -2969,6 +3000,8 @@ v8_header_set("v8_internal_headers") { "src/objects/slots-atomic-inl.h", "src/objects/slots-inl.h", "src/objects/slots.h", + "src/objects/smi-inl.h", + "src/objects/smi.h", "src/objects/source-text-module-inl.h", "src/objects/source-text-module.h", "src/objects/stack-frame-info-inl.h", @@ -3149,6 +3182,7 @@ v8_header_set("v8_internal_headers") { if (v8_enable_webassembly) { sources += [ "src/asmjs/asm-js.h", + "src/asmjs/asm-names.h", "src/asmjs/asm-parser.h", "src/asmjs/asm-scanner.h", "src/asmjs/asm-types.h", @@ -3364,8 +3398,16 @@ v8_header_set("v8_internal_headers") { if (v8_control_flow_integrity) { sources += [ "src/execution/arm64/pointer-authentication-arm64.h" ] } - if (v8_enable_webassembly && current_cpu == "arm64" && is_mac) { - sources += [ "src/trap-handler/handler-inside-posix.h" ] + if (v8_enable_webassembly) { + # Trap handling is enabled on arm64 Mac and in simulators on x64 on Linux + # and Mac. + if ((current_cpu == "arm64" && is_mac) || + (current_cpu == "x64" && (is_linux || is_chromeos || is_mac))) { + sources += [ "src/trap-handler/handler-inside-posix.h" ] + } + if (current_cpu == "x64" && (is_linux || is_chromeos || is_mac)) { + sources += [ "src/trap-handler/trap-handler-simulator.h" ] + } } if (is_win) { sources += [ "src/diagnostics/unwinding-info-win64.h" ] @@ -3449,6 +3491,8 @@ v8_header_set("v8_internal_headers") { ] } else if (v8_current_cpu == "riscv64") { sources += [ ### gcmole(arch:riscv64) ### + "src/baseline/riscv64/baseline-assembler-riscv64-inl.h", + "src/baseline/riscv64/baseline-compiler-riscv64-inl.h", "src/codegen/riscv64/assembler-riscv64-inl.h", "src/codegen/riscv64/assembler-riscv64.h", "src/codegen/riscv64/constants-riscv64.h", @@ -3576,7 +3620,6 @@ v8_compiler_sources = [ "src/compiler/schedule.cc", "src/compiler/scheduler.cc", "src/compiler/select-lowering.cc", - "src/compiler/serializer-for-background-compilation.cc", "src/compiler/simplified-lowering.cc", "src/compiler/simplified-operator-reducer.cc", "src/compiler/simplified-operator.cc", @@ -3756,7 +3799,7 @@ v8_source_set("v8_base_without_compiler") { "src/codegen/turbo-assembler.cc", "src/codegen/unoptimized-compilation-info.cc", "src/common/assert-scope.cc", - "src/compiler-dispatcher/compiler-dispatcher.cc", + "src/compiler-dispatcher/lazy-compile-dispatcher.cc", "src/compiler-dispatcher/optimizing-compile-dispatcher.cc", "src/date/date.cc", "src/date/dateparser.cc", @@ -4265,11 +4308,19 @@ v8_source_set("v8_base_without_compiler") { "src/execution/arm64/simulator-logic-arm64.cc", "src/regexp/arm64/regexp-macro-assembler-arm64.cc", ] - if (v8_enable_webassembly && current_cpu == "arm64" && is_mac) { - sources += [ - "src/trap-handler/handler-inside-posix.cc", - "src/trap-handler/handler-outside-posix.cc", - ] + if (v8_enable_webassembly) { + # Trap handling is enabled on arm64 Mac and in simulators on x64 on Linux + # and Mac. + if ((current_cpu == "arm64" && is_mac) || + (current_cpu == "x64" && (is_linux || is_chromeos || is_mac))) { + sources += [ + "src/trap-handler/handler-inside-posix.cc", + "src/trap-handler/handler-outside-posix.cc", + ] + } + if (current_cpu == "x64" && (is_linux || is_chromeos || is_mac)) { + sources += [ "src/trap-handler/handler-outside-simulator.cc" ] + } } if (is_win) { sources += [ "src/diagnostics/unwinding-info-win64.cc" ] @@ -4712,10 +4763,12 @@ v8_component("v8_libbase") { "src/base/sys-info.cc", "src/base/sys-info.h", "src/base/template-utils.h", + "src/base/threaded-list.h", "src/base/timezone-cache.h", "src/base/type-traits.h", "src/base/utils/random-number-generator.cc", "src/base/utils/random-number-generator.h", + "src/base/v8-fallthrough.h", "src/base/vector.h", "src/base/vlq-base64.cc", "src/base/vlq-base64.h", @@ -4927,6 +4980,10 @@ v8_component("v8_libplatform") { sources += [ "src/libplatform/tracing/recorder-win.cc" ] } } + + if (v8_current_cpu == "riscv64") { + libs = [ "atomic" ] + } } v8_source_set("fuzzer_support") { @@ -4957,6 +5014,7 @@ v8_source_set("v8_bigint") { "src/bigint/div-helpers.cc", "src/bigint/div-helpers.h", "src/bigint/div-schoolbook.cc", + "src/bigint/fromstring.cc", "src/bigint/mul-karatsuba.cc", "src/bigint/mul-schoolbook.cc", "src/bigint/tostring.cc", @@ -4967,11 +5025,10 @@ v8_source_set("v8_bigint") { if (v8_advanced_bigint_algorithms) { sources += [ + "src/bigint/div-barrett.cc", "src/bigint/mul-fft.cc", "src/bigint/mul-toom.cc", ] - - defines = [ "V8_ADVANCED_BIGINT_ALGORITHMS" ] } configs = [ ":internal_config" ] @@ -4983,6 +5040,7 @@ v8_source_set("v8_cppgc_shared") { "src/heap/base/stack.h", "src/heap/base/worklist.cc", "src/heap/base/worklist.h", + "src/heap/cppgc/globals.h", ] if (is_clang || !is_win) { @@ -5017,7 +5075,10 @@ v8_source_set("v8_cppgc_shared") { configs = [ ":internal_config" ] - public_deps = [ ":v8_libbase" ] + public_deps = [ + ":cppgc_headers", + ":v8_libbase", + ] } # This is split out to be a non-code containing target that the Chromium browser @@ -5075,7 +5136,10 @@ v8_header_set("cppgc_headers") { sources += [ "include/cppgc/internal/caged-heap-local-data.h" ] } - deps = [ ":v8_libplatform" ] + deps = [ + ":v8_libbase", + ":v8_libplatform", + ] public_deps = [ ":v8_config_headers" ] } @@ -5171,6 +5235,7 @@ v8_source_set("cppgc_base") { "src/heap/cppgc/virtual-memory.cc", "src/heap/cppgc/virtual-memory.h", "src/heap/cppgc/visitor.cc", + "src/heap/cppgc/visitor.h", "src/heap/cppgc/write-barrier.cc", "src/heap/cppgc/write-barrier.h", ] diff --git a/deps/v8/DEPS b/deps/v8/DEPS index b1e297b106f44c..439f45ca583385 100644 --- a/deps/v8/DEPS +++ b/deps/v8/DEPS @@ -49,19 +49,19 @@ vars = { 'reclient_version': 're_client_version:0.33.0.3e223d5', # GN CIPD package version. - 'gn_version': 'git_revision:24e2f7df92641de0351a96096fb2c490b2436bb8', + 'gn_version': 'git_revision:eea3906f0e2a8d3622080127d2005ff214d51383', # luci-go CIPD package version. - 'luci_go': 'git_revision:8b8a9a6040ca6debd30694a71a99a1eac97d72fd', + 'luci_go': 'git_revision:1120f810b7ab7eb71bd618c4c57fe82a60d4f2fe', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_build-tools_version # and whatever else without interference from each other. - 'android_sdk_build-tools_version': '8LZujEmLjSh0g3JciDA3cslSptxKs9HOa_iUPXkOeYQC', + 'android_sdk_build-tools_version': 'tRoD45SCi7UleQqSV7MrMQO1_e5P8ysphkCcj6z_cCQC', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_emulator_version # and whatever else without interference from each other. - 'android_sdk_emulator_version': 'A4EvXZUIuQho0QRDJopMUpgyp6NA3aiDQjGKPUKbowMC', + 'android_sdk_emulator_version': 'gMHhUuoQRKfxr-MBn3fNNXZtkAVXtOwMwT7kfx8jkIgC', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_extras_version # and whatever else without interference from each other. @@ -73,28 +73,28 @@ vars = { # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_platform-tools_version # and whatever else without interference from each other. - 'android_sdk_platform-tools_version': '8tF0AOj7Dwlv4j7_nfkhxWB0jzrvWWYjEIpirt8FIWYC', + 'android_sdk_platform-tools_version': 'qi_k82nm6j9nz4dQosOoqXew4_TFAy8rcGOHDLptx1sC', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_platforms_version # and whatever else without interference from each other. - 'android_sdk_platforms_version': 'YMUu9EHNZ__2Xcxl-KsaSf-dI5TMt_P62IseUVsxktMC', + 'android_sdk_platforms_version': 'lL3IGexKjYlwjO_1Ga-xwxgwbE_w-lmi2Zi1uOlWUIAC', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_sources_version # and whatever else without interference from each other. - 'android_sdk_sources_version': '4gxhM8E62bvZpQs7Q3d0DinQaW0RLCIefhXrQBFkNy8C', + 'android_sdk_sources_version': 'n7svc8KYah-i4s8zwkVa85SI3_H0WFOniP0mpwNdFO0C', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_tools-lint_version # and whatever else without interference from each other. - 'android_sdk_cmdline-tools_version': 'V__2Ycej-H2-6AcXX5A3gi7sIk74SuN44PBm2uC_N1sC', + 'android_sdk_cmdline-tools_version': 'ZT3JmI6GMG4YVcZ1OtECRVMOLLJAWAdPbi-OclubJLMC', } deps = { 'base/trace_event/common': - Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + 'd5bb24e5d9802c8c917fcaa4375d5239a586c168', + Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '3da1e2fcf66acd5c7194497b4285ac163f32e239', 'build': - Var('chromium_url') + '/chromium/src/build.git' + '@' + '2d999384c270a340f592cce0a0fb3f8f94c15290', + Var('chromium_url') + '/chromium/src/build.git' + '@' + 'bbf7f0ed65548c4df862d2a2748e3a9b908a3217', 'buildtools': - Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '2500c1d8f3a20a66a7cbafe3f69079a2edb742dd', + Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '37dc929ecb351687006a61744b116cda601753d7', 'buildtools/clang_format/script': Var('chromium_url') + '/external/github.com/llvm/llvm-project/clang/tools/clang-format.git' + '@' + '99803d74e35962f63a775f29477882afd4d57d94', 'buildtools/linux64': { @@ -110,7 +110,7 @@ deps = { 'buildtools/mac': { 'packages': [ { - 'package': 'gn/gn/mac-amd64', + 'package': 'gn/gn/mac-${{arch}}', 'version': Var('gn_version'), } ], @@ -120,9 +120,9 @@ deps = { 'buildtools/third_party/libc++/trunk': Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + '79a2e924d96e2fc1e4b937c42efd08898fa472d7', 'buildtools/third_party/libc++abi/trunk': - Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + '6803464b0f46df0a51862347d39e0791b59cf568', + Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + '24e92c2beed59b76ddabe7ceb5ee4b40f09e0712', 'buildtools/third_party/libunwind/trunk': - Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + 'a5feaf61658af4453e282142a76aeb6f9c045311', + Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + 'b825591df326b2725e6b88bdf74fdc88fefdf460', 'buildtools/win': { 'packages': [ { @@ -148,14 +148,14 @@ deps = { 'test/mozilla/data': Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be', 'test/test262/data': - Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'ebb6c34fa5dd76a6bea01c54ed7b182596492176', + Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'ab353c6e732b9e175d3ad6779e3acf3ea82d3761', 'test/test262/harness': Var('chromium_url') + '/external/github.com/test262-utils/test262-harness-py.git' + '@' + '278bcfaed0dcaa13936831fb1769d15e7c1e3b2b', 'third_party/aemu-linux-x64': { 'packages': [ { 'package': 'fuchsia/third_party/aemu/linux-amd64', - 'version': 'm4sM10idq7LeFHXpoLKLBtaOZsQzuj63Usa3Cl9af1YC' + 'version': 'qWiGSH8A_xdaUVO-GsDJsJ5HCkIRwZqb-HDyxsLiuWwC' }, ], 'condition': 'host_os == "linux" and checkout_fuchsia', @@ -176,13 +176,13 @@ deps = { 'condition': 'checkout_android', }, 'third_party/android_platform': { - 'url': Var('chromium_url') + '/chromium/src/third_party/android_platform.git' + '@' + 'b291e88d8e3e6774d6d46151e11dc3189ddeeb09', + 'url': Var('chromium_url') + '/chromium/src/third_party/android_platform.git' + '@' + 'e98c753917587d320f4e7a24f1c7474535adac3f', 'condition': 'checkout_android', }, 'third_party/android_sdk/public': { 'packages': [ { - 'package': 'chromium/third_party/android_sdk/public/build-tools/30.0.1', + 'package': 'chromium/third_party/android_sdk/public/build-tools/31.0.0', 'version': Var('android_sdk_build-tools_version'), }, { @@ -202,11 +202,11 @@ deps = { 'version': Var('android_sdk_platform-tools_version'), }, { - 'package': 'chromium/third_party/android_sdk/public/platforms/android-30', + 'package': 'chromium/third_party/android_sdk/public/platforms/android-31', 'version': Var('android_sdk_platforms_version'), }, { - 'package': 'chromium/third_party/android_sdk/public/sources/android-29', + 'package': 'chromium/third_party/android_sdk/public/sources/android-30', 'version': Var('android_sdk_sources_version'), }, { @@ -218,7 +218,7 @@ deps = { 'dep_type': 'cipd', }, 'third_party/catapult': { - 'url': Var('chromium_url') + '/catapult.git' + '@' + '2814ff3716a8512518bee705a0f91425ce06b27b', + 'url': Var('chromium_url') + '/catapult.git' + '@' + 'abc7ba7d871fe3c25b0a1bec7fc84fb309034cb7', 'condition': 'checkout_android', }, 'third_party/colorama/src': { @@ -226,24 +226,24 @@ deps = { 'condition': 'checkout_android', }, 'third_party/depot_tools': - Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + 'a806594b95a39141fdbf1f359087a44ffb2deaaf', + Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '49a703f3d915b140c9f373107e1ba17f30e2487d', 'third_party/fuchsia-sdk': { 'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-sdk.git' + '@' + '18896843130c33372c455c153ad07d2217bd2085', 'condition': 'checkout_fuchsia', }, 'third_party/google_benchmark/src': { - 'url': Var('chromium_url') + '/external/github.com/google/benchmark.git' + '@' + 'e451e50e9b8af453f076dec10bd6890847f1624e', + 'url': Var('chromium_url') + '/external/github.com/google/benchmark.git' + '@' + '4124223bf5303d1d65fe2c40f33e28372bbb986c', }, 'third_party/googletest/src': - Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '4ec4cd23f486bf70efcc5d2caa40f24368f752e3', + Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '47f819c3ca54fb602f432904443e00a0a1fe2f42', 'third_party/icu': - Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'b9dfc58bf9b02ea0365509244aca13841322feb0', + Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '75e34bcccea0be165c31fdb278b3712c516c5876', 'third_party/instrumented_libraries': - Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + '4ae2535e8e894c3cd81d46aacdaf151b5df30709', + Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + '9a8087bbbf43a355950fc1667575d1a753f8aaa4', 'third_party/ittapi': { # Force checkout ittapi libraries to pass v8 header includes check on # bots that has check_v8_header_includes enabled. - 'url': Var('chromium_url') + '/external/github.com/intel/ittapi' + '@' + 'b4ae0122ba749163096058b4f1bb065bf4a7de94', + 'url': Var('chromium_url') + '/external/github.com/intel/ittapi' + '@' + 'a3911fff01a775023a06af8754f9ec1e5977dd97', 'condition': "checkout_ittapi or check_v8_header_includes", }, 'third_party/jinja2': @@ -251,7 +251,7 @@ deps = { 'third_party/jsoncpp/source': Var('chromium_url') + '/external/github.com/open-source-parsers/jsoncpp.git'+ '@' + '9059f5cad030ba11d37818847443a53918c327b1', 'third_party/logdog/logdog': - Var('chromium_url') + '/infra/luci/luci-py/client/libs/logdog' + '@' + '794d09a24c10401953880c253d0c7e267234ab75', + Var('chromium_url') + '/infra/luci/luci-py/client/libs/logdog' + '@' + '17ec234f823f7bff6ada6584fdbbee9d54b8fc58', 'third_party/markupsafe': Var('chromium_url') + '/chromium/src/third_party/markupsafe.git' + '@' + '1b882ef6372b58bfd55a3285f37ed801be9137cd', 'third_party/perfetto': @@ -283,9 +283,9 @@ deps = { 'condition': 'checkout_android', }, 'third_party/zlib': - Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + 'dfbc590f5855bc2765256a743cad0abc56330a30', + Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + '563140dd9c24f84bf40919196e9e7666d351cc0d', 'tools/clang': - Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '9d0a403e85d25b5b0d3016a342d4b83b12941fd5', + Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '6a8e571efd68de48d226950d1e10cb8982e71496', 'tools/clang/dsymutil': { 'packages': [ { diff --git a/deps/v8/OWNERS b/deps/v8/OWNERS index 6f0ac017208f5c..f9b23f237f546e 100644 --- a/deps/v8/OWNERS +++ b/deps/v8/OWNERS @@ -27,8 +27,8 @@ per-file codereview.settings=file:INFRA_OWNERS per-file AUTHORS=file:COMMON_OWNERS per-file WATCHLISTS=file:COMMON_OWNERS -per-file *-mips*=file:MIPS_OWNERS -per-file *-mips64*=file:MIPS_OWNERS -per-file *-ppc*=file:PPC_OWNERS -per-file *-riscv64*=file:RISCV_OWNERS -per-file *-s390*=file:S390_OWNERS +per-file ...-mips*=file:MIPS_OWNERS +per-file ...-mips64*=file:MIPS_OWNERS +per-file ...-ppc*=file:PPC_OWNERS +per-file ...-riscv64*=file:RISCV_OWNERS +per-file ...-s390*=file:S390_OWNERS diff --git a/deps/v8/WATCHLISTS b/deps/v8/WATCHLISTS index f691d88e921606..b8b7eac99a22b7 100644 --- a/deps/v8/WATCHLISTS +++ b/deps/v8/WATCHLISTS @@ -33,6 +33,9 @@ { 'WATCHLIST_DEFINITIONS': { + 'api': { + 'filepath': 'include/', + }, 'snapshot': { 'filepath': 'src/snapshot/', }, @@ -52,7 +55,7 @@ '|test/unittests/interpreter/', }, 'baseline': { - 'filepath': 'src/baseline/' + 'filepath': 'src/baseline/', }, 'feature_shipping_status': { 'filepath': 'src/flags/flag-definitions.h', @@ -67,9 +70,6 @@ 'filepath': 'src/codegen/code-stub-assembler\.(cc|h)$' \ '|src/builtins/.*-gen.(cc|h)$', }, - 'ia32': { - 'filepath': '/ia32/', - }, 'merges': { 'filepath': '.', }, @@ -102,7 +102,7 @@ 'filepath': 'BUILD.gn' \ '|BUILD.bazel' \ '|WORKSPACE' \ - '|bazel/' + '|bazel/', }, }, @@ -153,17 +153,21 @@ 'alph+watch@chromium.org', 'lpy+v8tracing@chromium.org', 'fmeawad@chromium.org', + 'cbruni+watch@chromium.org', ], 'ieee754': [ 'rtoy+watch@chromium.org', - 'hongchan+watch@chromium.org' + 'hongchan+watch@chromium.org', ], 'regexp': [ 'jgruber+watch@chromium.org', - 'pthier+watch@chromium.org' + 'pthier+watch@chromium.org', + ], + 'bazel': [ + 'victorgomes+watch@chromium.org', ], - 'bazel' : [ - 'victorgomes+watch@chromium.org' + 'api': [ + 'cbruni+watch@chromium.org', ], }, } diff --git a/deps/v8/base/trace_event/common/trace_event_common.h b/deps/v8/base/trace_event/common/trace_event_common.h index dff2f9b2773c68..76391985c1af4e 100644 --- a/deps/v8/base/trace_event/common/trace_event_common.h +++ b/deps/v8/base/trace_event/common/trace_event_common.h @@ -56,12 +56,12 @@ // static int send_count = 0; // ++send_count; // TRACE_EVENT_NESTABLE_ASYNC_BEGIN0( -// "ipc", "message", TRACE_ID_LOCAL(send_count)); +// "ipc", "message", TRACE_ID_WITH_SCOPE("message", send_count)); // Send(new MyMessage(send_count)); // [receive code] // void OnMyMessage(send_count) { // TRACE_NESTABLE_EVENT_ASYNC_END0( -// "ipc", "message", TRACE_ID_LOCAL(send_count)); +// "ipc", "message", TRACE_ID_WITH_SCOPE("message", send_count)); // } // The third parameter is a unique ID to match NESTABLE_ASYNC_BEGIN/ASYNC_END // pairs. NESTABLE_ASYNC_BEGIN and ASYNC_END can occur on any thread of any @@ -71,10 +71,12 @@ // class MyTracedClass { // public: // MyTracedClass() { -// TRACE_EVENT_NESTABLE_ASYNC_BEGIN0("category", "MyTracedClass", this); +// TRACE_EVENT_NESTABLE_ASYNC_BEGIN0("category", "MyTracedClass", +// TRACE_ID_LOCAL(this)); // } // ~MyTracedClass() { -// TRACE_EVENT_NESTABLE_ASYNC_END0("category", "MyTracedClass", this); +// TRACE_EVENT_NESTABLE_ASYNC_END0("category", "MyTracedClass", +// TRACE_ID_LOCAL(this)); // } // } // @@ -390,12 +392,15 @@ struct BASE_EXPORT TraceTimestampTraits<::base::TimeTicks> { TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \ arg2_name, arg2_val) -// Similar to TRACE_EVENT_BEGINx but with a custom |at| timestamp provided. +// Similar to TRACE_EVENT_BEGINx but with a custom |timestamp| provided. // - |id| is used to match the _BEGIN event with the _END event. // Events are considered to match if their category_group, name and id values // all match. |id| must either be a pointer or an integer value up to 64 bits. // If it's a pointer, the bits will be xored with a hash of the process ID so // that the same pointer on two different processes will not collide. +// - |timestamp| must be non-null or it crashes. Use DCHECK(timestamp) before +// calling this to detect an invalid timestamp even when tracing is not +// enabled, as the commit queue doesn't run all tests with tracing enabled. #define TRACE_EVENT_BEGIN_WITH_ID_TID_AND_TIMESTAMP0(category_group, name, id, \ thread_id, timestamp) \ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ @@ -446,6 +451,10 @@ struct BASE_EXPORT TraceTimestampTraits<::base::TimeTicks> { TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \ arg2_name, arg2_val) +// Adds a trace event with the given |name| and |timestamp|. |timestamp| must be +// non-null or it crashes. Use DCHECK(timestamp) before calling this to detect +// an invalid timestamp even when tracing is not enabled, as the commit queue +// doesn't run all tests with tracing enabled. #define TRACE_EVENT_MARK_WITH_TIMESTAMP0(category_group, name, timestamp) \ INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP( \ TRACE_EVENT_PHASE_MARK, category_group, name, timestamp, \ @@ -476,12 +485,15 @@ struct BASE_EXPORT TraceTimestampTraits<::base::TimeTicks> { TRACE_EVENT_PHASE_MARK, category_group, name, timestamp, \ TRACE_EVENT_FLAG_COPY) -// Similar to TRACE_EVENT_ENDx but with a custom |at| timestamp provided. +// Similar to TRACE_EVENT_ENDx but with a custom |timestamp| provided. // - |id| is used to match the _BEGIN event with the _END event. // Events are considered to match if their category_group, name and id values // all match. |id| must either be a pointer or an integer value up to 64 bits. // If it's a pointer, the bits will be xored with a hash of the process ID so // that the same pointer on two different processes will not collide. +// - |timestamp| must be non-null or it crashes. Use DCHECK(timestamp) before +// calling this to detect an invalid timestamp even when tracing is not +// enabled, as the commit queue doesn't run all tests with tracing enabled. #define TRACE_EVENT_END_WITH_ID_TID_AND_TIMESTAMP0(category_group, name, id, \ thread_id, timestamp) \ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ @@ -540,6 +552,9 @@ struct BASE_EXPORT TraceTimestampTraits<::base::TimeTicks> { static_cast(value2_val)) // Similar to TRACE_COUNTERx, but with a custom |timestamp| provided. +// - |timestamp| must be non-null or it crashes. Use DCHECK(timestamp) before +// calling this to detect an invalid timestamp even when tracing is not +// enabled, as the commit queue doesn't run all tests with tracing enabled. #define TRACE_COUNTER_WITH_TIMESTAMP1(category_group, name, timestamp, value) \ INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP( \ TRACE_EVENT_PHASE_COUNTER, category_group, name, timestamp, \ @@ -925,6 +940,16 @@ struct BASE_EXPORT TraceTimestampTraits<::base::TimeTicks> { INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, \ category_group, name, id, \ TRACE_EVENT_FLAG_COPY) +#define TRACE_EVENT_COPY_NESTABLE_ASYNC_BEGIN1(category_group, name, id, \ + arg1_name, arg1_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, \ + category_group, name, id, \ + TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val) +#define TRACE_EVENT_COPY_NESTABLE_ASYNC_BEGIN2( \ + category_group, name, id, arg1_name, arg1_val, arg2_name, arg2_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID( \ + TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id, \ + TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name, arg2_val) #define TRACE_EVENT_COPY_NESTABLE_ASYNC_END0(category_group, name, id) \ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, \ category_group, name, id, \ @@ -934,6 +959,12 @@ struct BASE_EXPORT TraceTimestampTraits<::base::TimeTicks> { INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id, \ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_COPY) +#define TRACE_EVENT_COPY_NESTABLE_ASYNC_BEGIN_WITH_TIMESTAMP1( \ + category_group, name, id, timestamp, arg1_name, arg1_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ + TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id, \ + TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_COPY, \ + arg1_name, arg1_val) #define TRACE_EVENT_COPY_NESTABLE_ASYNC_END_WITH_TIMESTAMP0( \ category_group, name, id, timestamp) \ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ @@ -1088,9 +1119,6 @@ struct BASE_EXPORT TraceTimestampTraits<::base::TimeTicks> { #define TRACE_EVENT_FLAG_HAS_PROCESS_ID (static_cast(1 << 10)) #define TRACE_EVENT_FLAG_HAS_LOCAL_ID (static_cast(1 << 11)) #define TRACE_EVENT_FLAG_HAS_GLOBAL_ID (static_cast(1 << 12)) -// TODO(eseckler): Remove once we have native support for typed proto events in -// TRACE_EVENT macros. -#define TRACE_EVENT_FLAG_TYPED_PROTO_ARGS (static_cast(1 << 15)) #define TRACE_EVENT_FLAG_JAVA_STRING_LITERALS \ (static_cast(1 << 16)) diff --git a/deps/v8/bazel/defs.bzl b/deps/v8/bazel/defs.bzl index fbd1830ecb1e35..58fd53ed607e12 100644 --- a/deps/v8/bazel/defs.bzl +++ b/deps/v8/bazel/defs.bzl @@ -237,3 +237,62 @@ v8_mksnapshot = rule( ), } ) + +def _quote(val): + if val[0] == '"' and val[-1] == '"': + fail("String", val, "already quoted") + return '"' + val + '"' + +def _kv_bool_pair(k, v): + return _quote(k) + ": " + v + +def _json(kv_pairs): + content = "{" + for (k, v) in kv_pairs[:-1]: + content += _kv_bool_pair(k, v) + ", " + (k, v) = kv_pairs[-1] + content += _kv_bool_pair(k, v) + content += "}\n" + return content + +# TODO(victorgomes): Create a rule (instead of a macro), that can +# dynamically populate the build config. +def v8_build_config(name): + cpu = _quote("x64") + content = _json([ + ("current_cpu", cpu), + ("dcheck_always_on", "false"), + ("is_android", "false"), + ("is_asan", "false"), + ("is_cfi", "false"), + ("is_clang", "true"), + ("is_component_build", "false"), + ("is_debug", "false"), + ("is_full_debug", "false"), + ("is_gcov_coverage", "false"), + ("is_msan", "false"), + ("is_tsan", "false"), + ("is_ubsan_vptr", "false"), + ("target_cpu", cpu), + ("v8_current_cpu", cpu), + ("v8_enable_atomic_marking_state", "false"), + ("v8_enable_atomic_object_field_writes", "false"), + ("v8_enable_concurrent_marking", "false"), + ("v8_enable_i18n_support", "true"), + ("v8_enable_verify_predictable", "false"), + ("v8_enable_verify_csa", "false"), + ("v8_enable_lite_mode", "false"), + ("v8_enable_runtime_call_stats", "false"), + ("v8_enable_pointer_compression", "true"), + ("v8_enable_pointer_compression_shared_cage", "false"), + ("v8_enable_third_party_heap", "false"), + ("v8_enable_webassembly", "false"), + ("v8_control_flow_integrity", "false"), + ("v8_enable_single_generation", "false"), + ("v8_target_cpu", cpu), + ]) + native.genrule( + name = name, + outs = [name + ".json"], + cmd = "echo '" + content + "' > \"$@\"", + ) diff --git a/deps/v8/include/cppgc/allocation.h b/deps/v8/include/cppgc/allocation.h index b06d9d70206409..d75f1a9729624f 100644 --- a/deps/v8/include/cppgc/allocation.h +++ b/deps/v8/include/cppgc/allocation.h @@ -10,6 +10,7 @@ #include #include #include +#include #include "cppgc/custom-space.h" #include "cppgc/internal/api-constants.h" diff --git a/deps/v8/include/cppgc/cross-thread-persistent.h b/deps/v8/include/cppgc/cross-thread-persistent.h index fe61e9acbc3815..0a9afdcd2bd3cc 100644 --- a/deps/v8/include/cppgc/cross-thread-persistent.h +++ b/deps/v8/include/cppgc/cross-thread-persistent.h @@ -13,12 +13,34 @@ #include "cppgc/visitor.h" namespace cppgc { - namespace internal { +// Wrapper around PersistentBase that allows accessing poisoned memory when +// using ASAN. This is needed as the GC of the heap that owns the value +// of a CTP, may clear it (heap termination, weakness) while the object +// holding the CTP may be poisoned as itself may be deemed dead. +class CrossThreadPersistentBase : public PersistentBase { + public: + CrossThreadPersistentBase() = default; + explicit CrossThreadPersistentBase(const void* raw) : PersistentBase(raw) {} + + V8_CLANG_NO_SANITIZE("address") const void* GetValueFromGC() const { + return raw_; + } + + V8_CLANG_NO_SANITIZE("address") + PersistentNode* GetNodeFromGC() const { return node_; } + + V8_CLANG_NO_SANITIZE("address") + void ClearFromGC() const { + raw_ = nullptr; + node_ = nullptr; + } +}; + template -class BasicCrossThreadPersistent final : public PersistentBase, +class BasicCrossThreadPersistent final : public CrossThreadPersistentBase, public LocationPolicy, private WeaknessPolicy, private CheckingPolicy { @@ -38,11 +60,11 @@ class BasicCrossThreadPersistent final : public PersistentBase, BasicCrossThreadPersistent( SentinelPointer s, const SourceLocation& loc = SourceLocation::Current()) - : PersistentBase(s), LocationPolicy(loc) {} + : CrossThreadPersistentBase(s), LocationPolicy(loc) {} BasicCrossThreadPersistent( T* raw, const SourceLocation& loc = SourceLocation::Current()) - : PersistentBase(raw), LocationPolicy(loc) { + : CrossThreadPersistentBase(raw), LocationPolicy(loc) { if (!IsValid(raw)) return; PersistentRegionLock guard; CrossThreadPersistentRegion& region = this->GetPersistentRegion(raw); @@ -61,7 +83,7 @@ class BasicCrossThreadPersistent final : public PersistentBase, BasicCrossThreadPersistent( UnsafeCtorTag, T* raw, const SourceLocation& loc = SourceLocation::Current()) - : PersistentBase(raw), LocationPolicy(loc) { + : CrossThreadPersistentBase(raw), LocationPolicy(loc) { if (!IsValid(raw)) return; CrossThreadPersistentRegion& region = this->GetPersistentRegion(raw); SetNode(region.AllocateNode(this, &Trace)); @@ -329,12 +351,19 @@ class BasicCrossThreadPersistent final : public PersistentBase, } void ClearFromGC() const { - if (IsValid(GetValue())) { - WeaknessPolicy::GetPersistentRegion(GetValue()).FreeNode(GetNode()); - PersistentBase::ClearFromGC(); + if (IsValid(GetValueFromGC())) { + WeaknessPolicy::GetPersistentRegion(GetValueFromGC()) + .FreeNode(GetNodeFromGC()); + CrossThreadPersistentBase::ClearFromGC(); } } + // See Get() for details. + V8_CLANG_NO_SANITIZE("cfi-unrelated-cast") + T* GetFromGC() const { + return static_cast(const_cast(GetValueFromGC())); + } + friend class cppgc::Visitor; }; diff --git a/deps/v8/include/cppgc/heap-consistency.h b/deps/v8/include/cppgc/heap-consistency.h index 47caea18470a0c..8e603d5d8af2de 100644 --- a/deps/v8/include/cppgc/heap-consistency.h +++ b/deps/v8/include/cppgc/heap-consistency.h @@ -68,6 +68,23 @@ class HeapConsistency final { return internal::WriteBarrier::GetWriteBarrierType(slot, params, callback); } + /** + * Gets the required write barrier type for a specific write. + * This version is meant to be used in conjunction with with a marking write + * barrier barrier which doesn't consider the slot. + * + * \param value The pointer to the object. May be an interior pointer to an + * interface of the actual object. + * \param params Parameters that may be used for actual write barrier calls. + * Only filled if return value indicates that a write barrier is needed. The + * contents of the `params` are an implementation detail. + * \returns whether a write barrier is needed and which barrier to invoke. + */ + static V8_INLINE WriteBarrierType + GetWriteBarrierType(const void* value, WriteBarrierParams& params) { + return internal::WriteBarrier::GetWriteBarrierType(value, params); + } + /** * Conservative Dijkstra-style write barrier that processes an object if it * has not yet been processed. diff --git a/deps/v8/include/cppgc/internal/persistent-node.h b/deps/v8/include/cppgc/internal/persistent-node.h index 5626b17820b190..b5dba476a47900 100644 --- a/deps/v8/include/cppgc/internal/persistent-node.h +++ b/deps/v8/include/cppgc/internal/persistent-node.h @@ -75,7 +75,7 @@ class PersistentNode final { TraceCallback trace_ = nullptr; }; -class V8_EXPORT PersistentRegion final { +class V8_EXPORT PersistentRegion { using PersistentNodeSlots = std::array; public: @@ -116,6 +116,9 @@ class V8_EXPORT PersistentRegion final { private: void EnsureNodeSlots(); + template + void ClearAllUsedNodes(); + std::vector> nodes_; PersistentNode* free_list_head_ = nullptr; size_t nodes_in_use_ = 0; @@ -135,7 +138,7 @@ class V8_EXPORT PersistentRegionLock final { // Variant of PersistentRegion that checks whether the PersistentRegionLock is // locked. -class V8_EXPORT CrossThreadPersistentRegion final { +class V8_EXPORT CrossThreadPersistentRegion final : protected PersistentRegion { public: CrossThreadPersistentRegion() = default; // Clears Persistent fields to avoid stale pointers after heap teardown. @@ -147,12 +150,12 @@ class V8_EXPORT CrossThreadPersistentRegion final { V8_INLINE PersistentNode* AllocateNode(void* owner, TraceCallback trace) { PersistentRegionLock::AssertLocked(); - return persistent_region_.AllocateNode(owner, trace); + return PersistentRegion::AllocateNode(owner, trace); } V8_INLINE void FreeNode(PersistentNode* node) { PersistentRegionLock::AssertLocked(); - persistent_region_.FreeNode(node); + PersistentRegion::FreeNode(node); } void Trace(Visitor*); @@ -160,9 +163,6 @@ class V8_EXPORT CrossThreadPersistentRegion final { size_t NodesInUse() const; void ClearAllUsedNodes(); - - private: - PersistentRegion persistent_region_; }; } // namespace internal diff --git a/deps/v8/include/cppgc/internal/write-barrier.h b/deps/v8/include/cppgc/internal/write-barrier.h index c1b3b3e34d056a..28184dc9c83078 100644 --- a/deps/v8/include/cppgc/internal/write-barrier.h +++ b/deps/v8/include/cppgc/internal/write-barrier.h @@ -11,6 +11,7 @@ #include "cppgc/heap-state.h" #include "cppgc/internal/api-constants.h" #include "cppgc/internal/atomic-entry-flag.h" +#include "cppgc/platform.h" #include "cppgc/sentinel-pointer.h" #include "cppgc/trace-trait.h" #include "v8config.h" // NOLINT(build/include_directory) @@ -66,6 +67,8 @@ class V8_EXPORT WriteBarrier final { template static V8_INLINE Type GetWriteBarrierType(const void* slot, Params& params, HeapHandleCallback callback); + // Returns the required write barrier for a given `value`. + static V8_INLINE Type GetWriteBarrierType(const void* value, Params& params); template static V8_INLINE Type GetWriteBarrierTypeForExternallyReferencedObject( @@ -147,9 +150,27 @@ class V8_EXPORT WriteBarrierTypeForCagedHeapPolicy final { return ValueModeDispatch::Get(slot, value, params, callback); } + template + static V8_INLINE WriteBarrier::Type Get(const void* value, + WriteBarrier::Params& params, + HeapHandleCallback callback) { + return GetNoSlot(value, params, callback); + } + template static V8_INLINE WriteBarrier::Type GetForExternallyReferenced( - const void* value, WriteBarrier::Params& params, HeapHandleCallback) { + const void* value, WriteBarrier::Params& params, + HeapHandleCallback callback) { + return GetNoSlot(value, params, callback); + } + + private: + WriteBarrierTypeForCagedHeapPolicy() = delete; + + template + static V8_INLINE WriteBarrier::Type GetNoSlot(const void* value, + WriteBarrier::Params& params, + HeapHandleCallback) { if (!TryGetCagedHeap(value, value, params)) { return WriteBarrier::Type::kNone; } @@ -159,14 +180,14 @@ class V8_EXPORT WriteBarrierTypeForCagedHeapPolicy final { return SetAndReturnType(params); } - private: - WriteBarrierTypeForCagedHeapPolicy() = delete; - template struct ValueModeDispatch; static V8_INLINE bool TryGetCagedHeap(const void* slot, const void* value, WriteBarrier::Params& params) { + // TODO(chromium:1056170): Check if the null check can be folded in with + // the rest of the write barrier. + if (!value) return false; params.start = reinterpret_cast(value) & ~(api_constants::kCagedHeapReservationAlignment - 1); const uintptr_t slot_offset = @@ -257,6 +278,15 @@ class V8_EXPORT WriteBarrierTypeForNonCagedHeapPolicy final { return ValueModeDispatch::Get(slot, value, params, callback); } + template + static V8_INLINE WriteBarrier::Type Get(const void* value, + WriteBarrier::Params& params, + HeapHandleCallback callback) { + // The slot will never be used in `Get()` below. + return Get(nullptr, value, params, + callback); + } + template static V8_INLINE WriteBarrier::Type GetForExternallyReferenced( const void* value, WriteBarrier::Params& params, @@ -330,6 +360,13 @@ WriteBarrier::Type WriteBarrier::GetWriteBarrierType( slot, nullptr, params, callback); } +// static +WriteBarrier::Type WriteBarrier::GetWriteBarrierType( + const void* value, WriteBarrier::Params& params) { + return WriteBarrierTypePolicy::Get(value, params, + []() {}); +} + // static template WriteBarrier::Type diff --git a/deps/v8/include/cppgc/liveness-broker.h b/deps/v8/include/cppgc/liveness-broker.h index e449091280d9d7..c94eef0d4acdad 100644 --- a/deps/v8/include/cppgc/liveness-broker.h +++ b/deps/v8/include/cppgc/liveness-broker.h @@ -44,7 +44,10 @@ class V8_EXPORT LivenessBroker final { public: template bool IsHeapObjectAlive(const T* object) const { - return object && + // nullptr objects are considered alive to allow weakness to be used from + // stack while running into a conservative GC. Treating nullptr as dead + // would mean that e.g. custom collectins could not be strongified on stack. + return !object || IsHeapObjectAliveImpl( TraceTrait::GetTraceDescriptor(object).base_object_payload); } diff --git a/deps/v8/include/cppgc/member.h b/deps/v8/include/cppgc/member.h index d0bf414c69d7f5..38105b8e4323b9 100644 --- a/deps/v8/include/cppgc/member.h +++ b/deps/v8/include/cppgc/member.h @@ -218,6 +218,8 @@ class BasicMember final : private MemberBase, private CheckingPolicy { void ClearFromGC() const { MemberBase::ClearFromGC(); } + T* GetFromGC() const { return Get(); } + friend class cppgc::Visitor; template friend struct cppgc::TraceTrait; diff --git a/deps/v8/include/cppgc/persistent.h b/deps/v8/include/cppgc/persistent.h index 03b5e5b06b5e28..b83a464576e78c 100644 --- a/deps/v8/include/cppgc/persistent.h +++ b/deps/v8/include/cppgc/persistent.h @@ -41,7 +41,7 @@ class PersistentBase { node_ = nullptr; } - private: + protected: mutable const void* raw_ = nullptr; mutable PersistentNode* node_ = nullptr; @@ -259,6 +259,12 @@ class BasicPersistent final : public PersistentBase, } } + // Set Get() for details. + V8_CLANG_NO_SANITIZE("cfi-unrelated-cast") + T* GetFromGC() const { + return static_cast(const_cast(GetValue())); + } + friend class cppgc::Visitor; }; diff --git a/deps/v8/include/cppgc/platform.h b/deps/v8/include/cppgc/platform.h index 2d933d620dc011..3276a26b6520b6 100644 --- a/deps/v8/include/cppgc/platform.h +++ b/deps/v8/include/cppgc/platform.h @@ -148,6 +148,7 @@ namespace internal { V8_EXPORT void Abort(); } // namespace internal + } // namespace cppgc #endif // INCLUDE_CPPGC_PLATFORM_H_ diff --git a/deps/v8/include/cppgc/visitor.h b/deps/v8/include/cppgc/visitor.h index 98de9957bd66ac..57e2ce3963af1e 100644 --- a/deps/v8/include/cppgc/visitor.h +++ b/deps/v8/include/cppgc/visitor.h @@ -12,6 +12,7 @@ #include "cppgc/internal/pointer-policies.h" #include "cppgc/liveness-broker.h" #include "cppgc/member.h" +#include "cppgc/sentinel-pointer.h" #include "cppgc/source-location.h" #include "cppgc/trace-trait.h" #include "cppgc/type-traits.h" @@ -318,10 +319,10 @@ class V8_EXPORT Visitor { template static void HandleWeak(const LivenessBroker& info, const void* object) { const PointerType* weak = static_cast(object); + auto* raw_ptr = weak->GetFromGC(); // Sentinel values are preserved for weak pointers. - if (*weak == kSentinelPointer) return; - const auto* raw = weak->Get(); - if (!info.IsHeapObjectAlive(raw)) { + if (raw_ptr == kSentinelPointer) return; + if (!info.IsHeapObjectAlive(raw_ptr)) { weak->ClearFromGC(); } } @@ -335,11 +336,11 @@ class V8_EXPORT Visitor { static_assert(internal::IsGarbageCollectedOrMixinType::value, "Persistent's pointee type must be GarbageCollected or " "GarbageCollectedMixin"); - if (!p.Get()) { + auto* ptr = p.GetFromGC(); + if (!ptr) { return; } - VisitRoot(p.Get(), TraceTrait::GetTraceDescriptor(p.Get()), - loc); + VisitRoot(ptr, TraceTrait::GetTraceDescriptor(ptr), loc); } template < @@ -354,7 +355,8 @@ class V8_EXPORT Visitor { "GarbageCollectedMixin"); static_assert(!internal::IsAllocatedOnCompactableSpace::value, "Weak references to compactable objects are not allowed"); - VisitWeakRoot(p.Get(), TraceTrait::GetTraceDescriptor(p.Get()), + auto* ptr = p.GetFromGC(); + VisitWeakRoot(ptr, TraceTrait::GetTraceDescriptor(ptr), &HandleWeak, &p, loc); } diff --git a/deps/v8/include/js_protocol.pdl b/deps/v8/include/js_protocol.pdl index 63baa3da13b843..ebf9eb7fe82b14 100644 --- a/deps/v8/include/js_protocol.pdl +++ b/deps/v8/include/js_protocol.pdl @@ -1672,6 +1672,8 @@ domain Runtime parameters RemoteObject object object hints + # Identifier of the context where the call was made. + experimental optional ExecutionContextId executionContextId # This domain is deprecated. deprecated domain Schema diff --git a/deps/v8/include/v8-fast-api-calls.h b/deps/v8/include/v8-fast-api-calls.h index 1848800b488683..5dc7473eaa95c2 100644 --- a/deps/v8/include/v8-fast-api-calls.h +++ b/deps/v8/include/v8-fast-api-calls.h @@ -225,8 +225,9 @@ #include #include -#include "v8.h" // NOLINT(build/include_directory) -#include "v8config.h" // NOLINT(build/include_directory) +#include "v8-internal.h" // NOLINT(build/include_directory) +#include "v8.h" // NOLINT(build/include_directory) +#include "v8config.h" // NOLINT(build/include_directory) namespace v8 { @@ -298,10 +299,36 @@ class CTypeInfo { Flags flags_; }; +struct FastApiTypedArrayBase { + public: + // Returns the length in number of elements. + size_t V8_EXPORT length() const { return length_; } + // Checks whether the given index is within the bounds of the collection. + void V8_EXPORT ValidateIndex(size_t index) const; + + protected: + size_t length_ = 0; +}; + template -struct FastApiTypedArray { - T* data; // should include the typed array offset applied - size_t length; // length in number of elements +struct FastApiTypedArray : public FastApiTypedArrayBase { + public: + V8_INLINE T get(size_t index) const { +#ifdef DEBUG + ValidateIndex(index); +#endif // DEBUG + T tmp; + memcpy(&tmp, reinterpret_cast(data_) + index, sizeof(T)); + return tmp; + } + + private: + // This pointer should include the typed array offset applied. + // It's not guaranteed that it's aligned to sizeof(T), it's only + // guaranteed that it's 4-byte aligned, so for 8-byte types we need to + // provide a special implementation for reading from it, which hides + // the possibly unaligned read in the `get` method. + void* data_; }; // Any TypedArray. It uses kTypedArrayBit with base type void @@ -578,7 +605,7 @@ PRIMITIVE_C_TYPES(DEFINE_TYPE_INFO_TRAITS) #define SPECIALIZE_GET_TYPE_INFO_HELPER_FOR_TA(T, Enum) \ template <> \ - struct TypeInfoHelper> { \ + struct TypeInfoHelper&> { \ static constexpr CTypeInfo::Flags Flags() { \ return CTypeInfo::Flags::kNone; \ } \ @@ -770,6 +797,10 @@ CFunction CFunction::ArgUnwrap::Make(R (*func)(Args...)) { using CFunctionBuilder = internal::CFunctionBuilder; +static constexpr CTypeInfo kTypeInfoInt32 = CTypeInfo(CTypeInfo::Type::kInt32); +static constexpr CTypeInfo kTypeInfoFloat64 = + CTypeInfo(CTypeInfo::Type::kFloat64); + /** * Copies the contents of this JavaScript array to a C++ buffer with * a given max_length. A CTypeInfo is passed as an argument, @@ -783,8 +814,22 @@ using CFunctionBuilder = internal::CFunctionBuilder; * returns true on success. `type_info` will be used for conversions. */ template -bool CopyAndConvertArrayToCppBuffer(Local src, T* dst, - uint32_t max_length); +bool V8_EXPORT V8_WARN_UNUSED_RESULT TryCopyAndConvertArrayToCppBuffer( + Local src, T* dst, uint32_t max_length); + +template <> +inline bool V8_WARN_UNUSED_RESULT +TryCopyAndConvertArrayToCppBuffer<&kTypeInfoInt32, int32_t>( + Local src, int32_t* dst, uint32_t max_length) { + return CopyAndConvertArrayToCppBufferInt32(src, dst, max_length); +} + +template <> +inline bool V8_WARN_UNUSED_RESULT +TryCopyAndConvertArrayToCppBuffer<&kTypeInfoFloat64, double>( + Local src, double* dst, uint32_t max_length) { + return CopyAndConvertArrayToCppBufferFloat64(src, dst, max_length); +} } // namespace v8 diff --git a/deps/v8/include/v8-inspector.h b/deps/v8/include/v8-inspector.h index 0c19104a3774ab..e6621ccd75c591 100644 --- a/deps/v8/include/v8-inspector.h +++ b/deps/v8/include/v8-inspector.h @@ -194,9 +194,6 @@ class V8_EXPORT V8InspectorClient { v8::Local, v8::Local) { return nullptr; } - virtual bool formatAccessorsAsProperties(v8::Local) { - return false; - } virtual bool isInspectableHeapObject(v8::Local) { return true; } virtual v8::Local ensureDefaultContextInGroup( diff --git a/deps/v8/include/v8-internal.h b/deps/v8/include/v8-internal.h index 1826dd6fca2eb3..0222ab2f7e1199 100644 --- a/deps/v8/include/v8-internal.h +++ b/deps/v8/include/v8-internal.h @@ -15,9 +15,12 @@ namespace v8 { +class Array; class Context; class Data; class Isolate; +template +class Local; namespace internal { @@ -185,6 +188,8 @@ V8_EXPORT internal::Isolate* IsolateFromNeverReadOnlySpaceObject(Address obj); // language mode is strict. V8_EXPORT bool ShouldThrowOnError(v8::internal::Isolate* isolate); +V8_EXPORT bool CanHaveInternalField(int instance_type); + /** * This class exports constants and functionality from within v8 that * is necessary to implement inline functions in the v8 api. Don't @@ -263,8 +268,9 @@ class Internals { static const int kOddballType = 0x43; static const int kForeignType = 0x46; static const int kJSSpecialApiObjectType = 0x410; - static const int kJSApiObjectType = 0x420; static const int kJSObjectType = 0x421; + static const int kFirstJSApiObjectType = 0x422; + static const int kLastJSApiObjectType = 0x80A; static const int kUndefinedOddballKind = 5; static const int kNullOddballKind = 3; @@ -505,6 +511,15 @@ V8_INLINE void PerformCastCheck(T* data) { class BackingStoreBase {}; } // namespace internal + +V8_EXPORT bool CopyAndConvertArrayToCppBufferInt32(Local src, + int32_t* dst, + uint32_t max_length); + +V8_EXPORT bool CopyAndConvertArrayToCppBufferFloat64(Local src, + double* dst, + uint32_t max_length); + } // namespace v8 #endif // INCLUDE_V8_INTERNAL_H_ diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h index 30a4182357505d..845d32f360a8df 100644 --- a/deps/v8/include/v8-version.h +++ b/deps/v8/include/v8-version.h @@ -9,9 +9,9 @@ // NOTE these macros are used by some of the tool scripts and the build // system so their names cannot be changed without changing the scripts. #define V8_MAJOR_VERSION 9 -#define V8_MINOR_VERSION 3 -#define V8_BUILD_NUMBER 345 -#define V8_PATCH_LEVEL 19 +#define V8_MINOR_VERSION 4 +#define V8_BUILD_NUMBER 146 +#define V8_PATCH_LEVEL 18 // Use 1 for candidates and 0 otherwise. // (Boolean macro values are not supported by all preprocessors.) diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h index 1cc78f63b8ee7e..8e664887bec911 100644 --- a/deps/v8/include/v8.h +++ b/deps/v8/include/v8.h @@ -128,6 +128,7 @@ template class PropertyCallbackInfo; template class ReturnValue; namespace internal { +class BackgroundDeserializeTask; class BasicTracedReferenceExtractor; class ExternalString; class FunctionCallbackArguments; @@ -1441,7 +1442,7 @@ class ScriptOriginOptions { class ScriptOrigin { public: #if defined(_MSC_VER) && _MSC_VER >= 1910 /* Disable on VS2015 */ - V8_DEPRECATE_SOON("Use constructor with primitive C++ types") + V8_DEPRECATED("Use constructor with primitive C++ types") #endif V8_INLINE explicit ScriptOrigin( Local resource_name, Local resource_line_offset, @@ -1454,7 +1455,7 @@ class ScriptOrigin { Local is_module = Local(), Local host_defined_options = Local()); #if defined(_MSC_VER) && _MSC_VER >= 1910 /* Disable on VS2015 */ - V8_DEPRECATE_SOON("Use constructor that takes an isolate") + V8_DEPRECATED("Use constructor that takes an isolate") #endif V8_INLINE explicit ScriptOrigin( Local resource_name, int resource_line_offset = 0, @@ -1474,11 +1475,11 @@ class ScriptOrigin { Local host_defined_options = Local()); V8_INLINE Local ResourceName() const; - V8_DEPRECATE_SOON("Use getter with primitvie C++ types.") + V8_DEPRECATED("Use getter with primitvie C++ types.") V8_INLINE Local ResourceLineOffset() const; - V8_DEPRECATE_SOON("Use getter with primitvie C++ types.") + V8_DEPRECATED("Use getter with primitvie C++ types.") V8_INLINE Local ResourceColumnOffset() const; - V8_DEPRECATE_SOON("Use getter with primitvie C++ types.") + V8_DEPRECATED("Use getter with primitvie C++ types.") V8_INLINE Local ScriptID() const; V8_INLINE int LineOffset() const; V8_INLINE int ColumnOffset() const; @@ -1630,14 +1631,14 @@ class V8_EXPORT Module : public Data { /** * Returns the number of modules requested by this module. */ - V8_DEPRECATE_SOON("Use Module::GetModuleRequests() and FixedArray::Length().") + V8_DEPRECATED("Use Module::GetModuleRequests() and FixedArray::Length().") int GetModuleRequestsLength() const; /** * Returns the ith module specifier in this module. * i must be < GetModuleRequestsLength() and >= 0. */ - V8_DEPRECATE_SOON( + V8_DEPRECATED( "Use Module::GetModuleRequests() and ModuleRequest::GetSpecifier().") Local GetModuleRequest(int i) const; @@ -1645,7 +1646,7 @@ class V8_EXPORT Module : public Data { * Returns the source location (line number and column number) of the ith * module specifier's first occurrence in this module. */ - V8_DEPRECATE_SOON( + V8_DEPRECATED( "Use Module::GetModuleRequests(), ModuleRequest::GetSourceOffset(), and " "Module::SourceOffsetToLocation().") Location GetModuleRequestLocation(int i) const; @@ -1680,7 +1681,7 @@ class V8_EXPORT Module : public Data { * instantiation. (In the case where the callback throws an exception, that * exception is propagated.) */ - V8_DEPRECATE_SOON( + V8_DEPRECATED( "Use the version of InstantiateModule that takes a ResolveModuleCallback " "parameter") V8_WARN_UNUSED_RESULT Maybe InstantiateModule(Local context, @@ -1771,13 +1772,6 @@ class V8_EXPORT Module : public Data { */ V8_WARN_UNUSED_RESULT Maybe SetSyntheticModuleExport( Isolate* isolate, Local export_name, Local export_value); - V8_DEPRECATED( - "Use the preceding SetSyntheticModuleExport with an Isolate parameter, " - "instead of the one that follows. The former will throw a runtime " - "error if called for an export that doesn't exist (as per spec); " - "the latter will crash with a failed CHECK().") - void SetSyntheticModuleExport(Local export_name, - Local export_value); V8_INLINE static Module* Cast(Data* data); @@ -1818,6 +1812,8 @@ enum class ScriptType { kClassic, kModule }; */ class V8_EXPORT ScriptCompiler { public: + class ConsumeCodeCacheTask; + /** * Compilation data that the embedder can cache and pass back to speed up * future compilations. The data is produced if the CompilerOptions passed to @@ -1861,12 +1857,15 @@ class V8_EXPORT ScriptCompiler { */ class Source { public: - // Source takes ownership of CachedData. + // Source takes ownership of both CachedData and CodeCacheConsumeTask. V8_INLINE Source(Local source_string, const ScriptOrigin& origin, - CachedData* cached_data = nullptr); - V8_INLINE explicit Source(Local source_string, - CachedData* cached_data = nullptr); - V8_INLINE ~Source(); + CachedData* cached_data = nullptr, + ConsumeCodeCacheTask* consume_cache_task = nullptr); + // Source takes ownership of both CachedData and CodeCacheConsumeTask. + V8_INLINE explicit Source( + Local source_string, CachedData* cached_data = nullptr, + ConsumeCodeCacheTask* consume_cache_task = nullptr); + V8_INLINE ~Source() = default; // Ownership of the CachedData or its buffers is *not* transferred to the // caller. The CachedData object is alive as long as the Source object is @@ -1875,10 +1874,6 @@ class V8_EXPORT ScriptCompiler { V8_INLINE const ScriptOriginOptions& GetResourceOptions() const; - // Prevent copying. - Source(const Source&) = delete; - Source& operator=(const Source&) = delete; - private: friend class ScriptCompiler; @@ -1895,7 +1890,8 @@ class V8_EXPORT ScriptCompiler { // Cached data from previous compilation (if a kConsume*Cache flag is // set), or hold newly generated cache data (kProduce*Cache flags) are // set when calling a compile method. - CachedData* cached_data; + std::unique_ptr cached_data; + std::unique_ptr consume_cache_task; }; /** @@ -1957,12 +1953,6 @@ class V8_EXPORT ScriptCompiler { public: enum Encoding { ONE_BYTE, TWO_BYTE, UTF8, WINDOWS_1252 }; -#if defined(_MSC_VER) && _MSC_VER >= 1910 /* Disable on VS2015 */ - V8_DEPRECATED( - "This class takes ownership of source_stream, so use the constructor " - "taking a unique_ptr to make these semantics clearer") -#endif - StreamedSource(ExternalSourceStream* source_stream, Encoding encoding); StreamedSource(std::unique_ptr source_stream, Encoding encoding); ~StreamedSource(); @@ -1994,6 +1984,26 @@ class V8_EXPORT ScriptCompiler { internal::ScriptStreamingData* data_; }; + /** + * A task which the embedder must run on a background thread to + * consume a V8 code cache. Returned by + * ScriptCompiler::StarConsumingCodeCache. + */ + class V8_EXPORT ConsumeCodeCacheTask final { + public: + ~ConsumeCodeCacheTask(); + + void Run(); + + private: + friend class ScriptCompiler; + + explicit ConsumeCodeCacheTask( + std::unique_ptr impl); + + std::unique_ptr impl_; + }; + enum CompileOptions { kNoCompileOptions = 0, kConsumeCodeCache, @@ -2067,14 +2077,13 @@ class V8_EXPORT ScriptCompiler { * This API allows to start the streaming with as little data as possible, and * the remaining data (for example, the ScriptOrigin) is passed to Compile. */ - V8_DEPRECATED("Use ScriptCompiler::StartStreaming instead.") - static ScriptStreamingTask* StartStreamingScript( - Isolate* isolate, StreamedSource* source, - CompileOptions options = kNoCompileOptions); static ScriptStreamingTask* StartStreaming( Isolate* isolate, StreamedSource* source, ScriptType type = ScriptType::kClassic); + static ConsumeCodeCacheTask* StartConsumingCodeCache( + Isolate* isolate, std::unique_ptr source); + /** * Compiles a streamed script (bound to current context). * @@ -4309,11 +4318,13 @@ class V8_EXPORT Object : public Value { /** * Returns the context in which the object was created. */ + // TODO(chromium:1166077): Mark as deprecate once users are updated. V8_DEPRECATE_SOON("Use MaybeLocal GetCreationContext()") Local CreationContext(); MaybeLocal GetCreationContext(); /** Same as above, but works for Persistents */ + // TODO(chromium:1166077): Mark as deprecate once users are updated. V8_DEPRECATE_SOON( "Use MaybeLocal GetCreationContext(const " "PersistentBase& object)") @@ -6524,9 +6535,9 @@ class V8_EXPORT FunctionTemplate : public Template { Local signature = Local(), int length = 0, ConstructorBehavior behavior = ConstructorBehavior::kAllow, SideEffectType side_effect_type = SideEffectType::kHasSideEffect, - const CFunction* c_function = nullptr, uint8_t instance_type = 0, - uint8_t allowed_receiver_range_start = 0, - uint8_t allowed_receiver_range_end = 0); + const CFunction* c_function = nullptr, uint16_t instance_type = 0, + uint16_t allowed_receiver_instance_type_range_start = 0, + uint16_t allowed_receiver_instance_type_range_end = 0); /** Creates a function template for multiple overloaded fast API calls.*/ static Local NewWithCFunctionOverloads( @@ -7246,7 +7257,9 @@ using MessageCallback = void (*)(Local message, Local data); // --- Tracing --- -using LogEventCallback = void (*)(const char* name, int event); +enum LogEventStatus : int { kStart = 0, kEnd = 1, kStamp = 2 }; +using LogEventCallback = void (*)(const char* name, + int /* LogEventStatus */ status); /** * Create new error objects by calling the corresponding error object @@ -8369,11 +8382,6 @@ class V8_EXPORT Isolate { */ int embedder_wrapper_type_index = -1; int embedder_wrapper_object_index = -1; - - V8_DEPRECATED( - "Setting this has no effect. Embedders should ignore import assertions " - "that they do not use.") - std::vector supported_import_assertions; }; /** @@ -8712,7 +8720,7 @@ class V8_EXPORT Isolate { * This specifies the callback called by the upcoming dynamic * import() language feature to load modules. */ - V8_DEPRECATE_SOON( + V8_DEPRECATED( "Use the version of SetHostImportModuleDynamicallyCallback that takes a " "HostImportModuleDynamicallyWithImportAssertionsCallback instead") void SetHostImportModuleDynamicallyCallback( @@ -8885,10 +8893,6 @@ class V8_EXPORT Isolate { std::unique_ptr delegate, MeasureMemoryExecution execution = MeasureMemoryExecution::kDefault); - V8_DEPRECATED("Use the version with a delegate") - MaybeLocal MeasureMemory(Local context, - MeasureMemoryMode mode); - /** * Get a call stack sample from the isolate. * \param state Execution state. @@ -9566,13 +9570,6 @@ class V8_EXPORT Isolate { * Set the callback to invoke to check if code generation from * strings should be allowed. */ - V8_DEPRECATED( - "Use Isolate::SetModifyCodeGenerationFromStringsCallback with " - "ModifyCodeGenerationFromStringsCallback2 instead. See " - "http://crbug.com/1096017 and TC39 Dynamic Code Brand Checks proposal " - "at https://github.com/tc39/proposal-dynamic-code-brand-checks.") - void SetModifyCodeGenerationFromStringsCallback( - ModifyCodeGenerationFromStringsCallback callback); void SetModifyCodeGenerationFromStringsCallback( ModifyCodeGenerationFromStringsCallback2 callback); @@ -9920,30 +9917,6 @@ class V8_EXPORT V8 { */ static void ShutdownPlatform(); -#if V8_OS_POSIX - /** - * Give the V8 signal handler a chance to handle a fault. - * - * This function determines whether a memory access violation can be recovered - * by V8. If so, it will return true and modify context to return to a code - * fragment that can recover from the fault. Otherwise, TryHandleSignal will - * return false. - * - * The parameters to this function correspond to those passed to a Linux - * signal handler. - * - * \param signal_number The signal number. - * - * \param info A pointer to the siginfo_t structure provided to the signal - * handler. - * - * \param context The third argument passed to the Linux signal handler, which - * points to a ucontext_t structure. - */ - V8_DEPRECATED("Use TryHandleWebAssemblyTrapPosix") - static bool TryHandleSignal(int signal_number, void* info, void* context); -#endif // V8_OS_POSIX - /** * Activate trap-based bounds checking for WebAssembly. * @@ -9971,15 +9944,6 @@ class V8_EXPORT V8 { */ static void GetSharedMemoryStatistics(SharedMemoryStatistics* statistics); - /** - * Notifies V8 that the process is cross-origin-isolated, which enables - * defining the SharedArrayBuffer function on the global object of Contexts. - */ - V8_DEPRECATED( - "Use the command line argument --enable-sharedarraybuffer-per-context " - "together with SetSharedArrayBufferConstructorEnabledCallback") - static void SetIsCrossOriginIsolated(); - private: V8(); @@ -11590,7 +11554,8 @@ int ScriptOrigin::ScriptId() const { return script_id_; } Local ScriptOrigin::SourceMapUrl() const { return source_map_url_; } ScriptCompiler::Source::Source(Local string, const ScriptOrigin& origin, - CachedData* data) + CachedData* data, + ConsumeCodeCacheTask* consume_cache_task) : source_string(string), resource_name(origin.ResourceName()), resource_line_offset(origin.LineOffset()), @@ -11598,21 +11563,18 @@ ScriptCompiler::Source::Source(Local string, const ScriptOrigin& origin, resource_options(origin.Options()), source_map_url(origin.SourceMapUrl()), host_defined_options(origin.HostDefinedOptions()), - cached_data(data) {} - -ScriptCompiler::Source::Source(Local string, - CachedData* data) - : source_string(string), cached_data(data) {} - - -ScriptCompiler::Source::~Source() { - delete cached_data; -} + cached_data(data), + consume_cache_task(consume_cache_task) {} +ScriptCompiler::Source::Source(Local string, CachedData* data, + ConsumeCodeCacheTask* consume_cache_task) + : source_string(string), + cached_data(data), + consume_cache_task(consume_cache_task) {} const ScriptCompiler::CachedData* ScriptCompiler::Source::GetCachedData() const { - return cached_data; + return cached_data.get(); } const ScriptOriginOptions& ScriptCompiler::Source::GetResourceOptions() const { @@ -11665,10 +11627,8 @@ Local Object::GetInternalField(int index) { A obj = *reinterpret_cast(this); // Fast path: If the object is a plain JSObject, which is the common case, we // know where to find the internal fields and can return the value directly. - auto instance_type = I::GetInstanceType(obj); - if (instance_type == I::kJSObjectType || - instance_type == I::kJSApiObjectType || - instance_type == I::kJSSpecialApiObjectType) { + int instance_type = I::GetInstanceType(obj); + if (v8::internal::CanHaveInternalField(instance_type)) { int offset = I::kJSObjectHeaderSize + (I::kEmbedderDataSlotSize * index); A value = I::ReadRawField(obj, offset); #ifdef V8_COMPRESS_POINTERS @@ -11694,9 +11654,7 @@ void* Object::GetAlignedPointerFromInternalField(int index) { // Fast path: If the object is a plain JSObject, which is the common case, we // know where to find the internal fields and can return the value directly. auto instance_type = I::GetInstanceType(obj); - if (V8_LIKELY(instance_type == I::kJSObjectType || - instance_type == I::kJSApiObjectType || - instance_type == I::kJSSpecialApiObjectType)) { + if (v8::internal::CanHaveInternalField(instance_type)) { int offset = I::kJSObjectHeaderSize + (I::kEmbedderDataSlotSize * index); #ifdef V8_HEAP_SANDBOX offset += I::kEmbedderDataSlotRawPayloadOffset; diff --git a/deps/v8/include/v8config.h b/deps/v8/include/v8config.h index c1bb691f8789e5..b010b65dfd648b 100644 --- a/deps/v8/include/v8config.h +++ b/deps/v8/include/v8config.h @@ -86,51 +86,80 @@ path. Add it with -I to the command line # define V8_OS_ANDROID 1 # define V8_OS_LINUX 1 # define V8_OS_POSIX 1 +# define V8_OS_STRING "android" + #elif defined(__APPLE__) # define V8_OS_BSD 1 # define V8_OS_MACOSX 1 # define V8_OS_POSIX 1 # if defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE # define V8_OS_IOS 1 +# define V8_OS_STRING "ios" +# else +# define V8_OS_STRING "macos" # endif // defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE + #elif defined(__CYGWIN__) # define V8_OS_CYGWIN 1 # define V8_OS_POSIX 1 +# define V8_OS_STRING "cygwin" + #elif defined(__linux__) # define V8_OS_LINUX 1 # define V8_OS_POSIX 1 +# define V8_OS_STRING "linux" + #elif defined(__sun) # define V8_OS_POSIX 1 # define V8_OS_SOLARIS 1 +# define V8_OS_STRING "sun" + #elif defined(STARBOARD) # define V8_OS_STARBOARD 1 +# define V8_OS_STRING "starboard" + #elif defined(_AIX) -#define V8_OS_POSIX 1 -#define V8_OS_AIX 1 +# define V8_OS_POSIX 1 +# define V8_OS_AIX 1 +# define V8_OS_STRING "aix" + #elif defined(__FreeBSD__) # define V8_OS_BSD 1 # define V8_OS_FREEBSD 1 # define V8_OS_POSIX 1 +# define V8_OS_STRING "freebsd" + #elif defined(__Fuchsia__) # define V8_OS_FUCHSIA 1 # define V8_OS_POSIX 1 +# define V8_OS_STRING "fuchsia" + #elif defined(__DragonFly__) # define V8_OS_BSD 1 # define V8_OS_DRAGONFLYBSD 1 # define V8_OS_POSIX 1 +# define V8_OS_STRING "dragonflybsd" + #elif defined(__NetBSD__) # define V8_OS_BSD 1 # define V8_OS_NETBSD 1 # define V8_OS_POSIX 1 +# define V8_OS_STRING "netbsd" + #elif defined(__OpenBSD__) # define V8_OS_BSD 1 # define V8_OS_OPENBSD 1 # define V8_OS_POSIX 1 +# define V8_OS_STRING "openbsd" + #elif defined(__QNXNTO__) # define V8_OS_POSIX 1 # define V8_OS_QNX 1 +# define V8_OS_STRING "qnx" + #elif defined(_WIN32) # define V8_OS_WIN 1 +# define V8_OS_STRING "windows" #endif // ----------------------------------------------------------------------------- @@ -195,6 +224,22 @@ path. Add it with -I to the command line #endif // V8_HAVE_TARGET_OS +#if defined(V8_TARGET_OS_ANDROID) +# define V8_TARGET_OS_STRING "android" +#elif defined(V8_TARGET_OS_FUCHSIA) +# define V8_TARGET_OS_STRING "fuchsia" +#elif defined(V8_TARGET_OS_IOS) +# define V8_TARGET_OS_STRING "ios" +#elif defined(V8_TARGET_OS_LINUX) +# define V8_TARGET_OS_STRING "linux" +#elif defined(V8_TARGET_OS_MACOSX) +# define V8_TARGET_OS_STRING "macos" +#elif defined(V8_TARGET_OS_WINDOWS) +# define V8_TARGET_OS_STRING "windows" +#else +# define V8_TARGET_OS_STRING "unknown" +#endif + // ----------------------------------------------------------------------------- // C library detection // diff --git a/deps/v8/infra/mb/mb_config.pyl b/deps/v8/infra/mb/mb_config.pyl index aaeda39f1c73b5..236dc1e8474103 100644 --- a/deps/v8/infra/mb/mb_config.pyl +++ b/deps/v8/infra/mb/mb_config.pyl @@ -62,7 +62,9 @@ 'V8 Linux - verify csa': 'release_x86_verify_csa', # Linux64. 'V8 Linux64 - builder': 'release_x64', + 'V8 Linux64 - builder (goma cache silo)': 'release_x64', 'V8 Linux64 - builder (reclient)': 'release_x64_reclient', + 'V8 Linux64 - builder (reclient compare)': 'release_x64_reclient', 'V8 Linux64 - debug builder': 'debug_x64', 'V8 Linux64 - dict tracking - debug - builder': 'debug_x64_dict_tracking_trybot', 'V8 Linux64 - external code space - debug - builder': 'debug_x64_external_code_space', @@ -104,6 +106,7 @@ # FYI. 'V8 iOS - sim': 'release_x64_ios_simulator', 'V8 Linux64 - debug - perfetto - builder': 'debug_x64_perfetto', + 'V8 Linux64 - disable runtime call stats': 'release_x64_disable_runtime_call_stats', 'V8 Linux64 - debug - single generation - builder': 'debug_x64_single_generation', 'V8 Linux64 - pointer compression': 'release_x64_pointer_compression', 'V8 Linux64 - pointer compression without dchecks': @@ -215,6 +218,7 @@ 'release_simulate_arm64_pointer_compression', 'v8_linux64_dbg_ng': 'debug_x64_trybot', 'v8_linux64_dict_tracking_dbg_ng': 'debug_x64_dict_tracking_trybot', + 'v8_linux64_disable_runtime_call_stats_rel': 'release_x64_disable_runtime_call_stats', 'v8_linux64_external_code_space_dbg_ng': 'debug_x64_external_code_space', 'v8_linux64_gc_stress_custom_snapshot_dbg_ng': 'debug_x64_trybot_custom', 'v8_linux64_gcc_compile_dbg': 'debug_x64_gcc', @@ -254,7 +258,10 @@ 'v8_mac_arm64_rel_ng': 'release_arm64', 'v8_mac_arm64_dbg_ng': 'debug_arm64', 'v8_mac_arm64_full_dbg_ng': 'full_debug_arm64', + 'v8_mac_arm64_compile_dbg': 'debug_arm64', 'v8_mac_arm64_compile_rel': 'release_arm64', + 'v8_mac_arm64_sim_compile_dbg': 'debug_simulate_arm64', + 'v8_mac_arm64_sim_compile_rel': 'release_simulate_arm64', 'v8_mac_arm64_sim_rel_ng': 'release_simulate_arm64_trybot', 'v8_mac_arm64_sim_dbg_ng': 'debug_simulate_arm64', 'v8_mac_arm64_sim_nodcheck_rel_ng': 'release_simulate_arm64', @@ -483,6 +490,8 @@ 'release_bot_no_goma', 'x64', 'minimal_symbols', 'msvc'], 'release_x64_correctness_fuzzer' : [ 'release_bot', 'x64', 'v8_correctness_fuzzer'], + 'release_x64_disable_runtime_call_stats': [ + 'release_bot', 'x64', 'v8_disable_runtime_call_stats'], 'release_x64_fuchsia': [ 'release_bot', 'x64', 'fuchsia'], 'release_x64_fuchsia_trybot': [ @@ -779,7 +788,7 @@ }, 'release': { - 'gn_args': 'is_debug=false', + 'gn_args': 'is_debug=false dcheck_always_on=false', }, 'release_bot': { @@ -876,6 +885,10 @@ 'gn_args': 'v8_control_flow_integrity=true', }, + 'v8_disable_runtime_call_stats': { + 'gn_args': 'v8_enable_runtime_call_stats=false', + }, + 'v8_enable_heap_sandbox': { 'gn_args': 'v8_enable_heap_sandbox=true', }, diff --git a/deps/v8/infra/playground/OWNERS b/deps/v8/infra/playground/OWNERS new file mode 100644 index 00000000000000..8082f8328b14df --- /dev/null +++ b/deps/v8/infra/playground/OWNERS @@ -0,0 +1,5 @@ +set noparent + +almuthanna@chromium.org +liviurau@chromium.org +tmrts@chromium.org \ No newline at end of file diff --git a/deps/v8/infra/playground/README.md b/deps/v8/infra/playground/README.md new file mode 100644 index 00000000000000..0e26001058892d --- /dev/null +++ b/deps/v8/infra/playground/README.md @@ -0,0 +1 @@ +This directory's purpose is test OWNERS enforcement. \ No newline at end of file diff --git a/deps/v8/infra/testing/builders.pyl b/deps/v8/infra/testing/builders.pyl index d40e4ed9e7a167..f37c66ba90a8ef 100644 --- a/deps/v8/infra/testing/builders.pyl +++ b/deps/v8/infra/testing/builders.pyl @@ -1517,8 +1517,8 @@ 'priority': 35, }, 'tests': [ - {'name': 'd8testing', 'shards': 2}, - {'name': 'd8testing', 'variant': 'extra', 'shards': 2}, + {'name': 'v8testing', 'shards': 2}, + {'name': 'v8testing', 'variant': 'extra', 'shards': 2}, ], }, 'V8 Mac - arm64 - sim - debug': { diff --git a/deps/v8/src/DEPS b/deps/v8/src/DEPS index 0a5e3b8480b84c..b3fcddf2f43e95 100644 --- a/deps/v8/src/DEPS +++ b/deps/v8/src/DEPS @@ -58,6 +58,7 @@ include_rules = [ "+src/trap-handler/handler-inside-posix.h", "+src/trap-handler/handler-inside-win.h", "+src/trap-handler/trap-handler.h", + "+src/trap-handler/trap-handler-simulator.h", "+testing/gtest/include/gtest/gtest_prod.h", "-src/libplatform", "-include/libplatform", diff --git a/deps/v8/src/api/api-arguments-inl.h b/deps/v8/src/api/api-arguments-inl.h index f6825e592225ec..786f849be6c148 100644 --- a/deps/v8/src/api/api-arguments-inl.h +++ b/deps/v8/src/api/api-arguments-inl.h @@ -76,7 +76,6 @@ inline JSReceiver FunctionCallbackArguments::holder() { CALLBACK_INFO, RECEIVER, Debug::k##ACCESSOR_KIND)) { \ return RETURN_VALUE(); \ } \ - VMState state(ISOLATE); \ ExternalCallbackScope call_scope(ISOLATE, FUNCTION_ADDR(F)); \ PropertyCallbackInfo callback_info(values_); @@ -85,7 +84,6 @@ inline JSReceiver FunctionCallbackArguments::holder() { if (ISOLATE->debug_execution_mode() == DebugInfo::kSideEffects) { \ return RETURN_VALUE(); \ } \ - VMState state(ISOLATE); \ ExternalCallbackScope call_scope(ISOLATE, FUNCTION_ADDR(F)); \ PropertyCallbackInfo callback_info(values_); @@ -149,7 +147,6 @@ Handle FunctionCallbackArguments::Call(CallHandlerInfo handler) { Debug::kNotAccessor)) { return Handle(); } - VMState state(isolate); ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); FunctionCallbackInfo info(values_, argv_, argc_); f(info); diff --git a/deps/v8/src/api/api-inl.h b/deps/v8/src/api/api-inl.h index 9bd266395e18f6..c5c774800b77ac 100644 --- a/deps/v8/src/api/api-inl.h +++ b/deps/v8/src/api/api-inl.h @@ -9,6 +9,7 @@ #include "src/api/api.h" #include "src/execution/interrupts-scope.h" #include "src/execution/microtask-queue.h" +#include "src/execution/protectors.h" #include "src/handles/handles-inl.h" #include "src/heap/heap-inl.h" #include "src/objects/foreign-inl.h" @@ -279,20 +280,32 @@ bool CopyAndConvertArrayToCppBuffer(Local src, T* dst, i::DisallowGarbageCollection no_gc; i::JSArray obj = *reinterpret_cast(*src); + if (obj.IterationHasObservableEffects()) { + // The array has a custom iterator. + return false; + } i::FixedArrayBase elements = obj.elements(); - if (obj.HasSmiElements()) { - CopySmiElementsToTypedBuffer(dst, length, i::FixedArray::cast(elements)); - return true; - } else if (obj.HasDoubleElements()) { - CopyDoubleElementsToTypedBuffer(dst, length, - i::FixedDoubleArray::cast(elements)); - return true; - } else { - return false; + switch (obj.GetElementsKind()) { + case i::PACKED_SMI_ELEMENTS: + CopySmiElementsToTypedBuffer(dst, length, i::FixedArray::cast(elements)); + return true; + case i::PACKED_DOUBLE_ELEMENTS: + CopyDoubleElementsToTypedBuffer(dst, length, + i::FixedDoubleArray::cast(elements)); + return true; + default: + return false; } } +template +inline bool V8_EXPORT TryCopyAndConvertArrayToCppBuffer(Local src, + T* dst, + uint32_t max_length) { + return CopyAndConvertArrayToCppBuffer(src, dst, max_length); +} + namespace internal { Handle HandleScopeImplementer::LastEnteredContext() { diff --git a/deps/v8/src/api/api-natives.cc b/deps/v8/src/api/api-natives.cc index 985f5956a82e6b..c64107f3b8cd5e 100644 --- a/deps/v8/src/api/api-natives.cc +++ b/deps/v8/src/api/api-natives.cc @@ -528,12 +528,14 @@ MaybeHandle InstantiateFunction( Handle::cast(parent_prototype)); } } - InstanceType function_type = - (!data->needs_access_check() && - data->GetNamedPropertyHandler().IsUndefined(isolate) && - data->GetIndexedPropertyHandler().IsUndefined(isolate)) - ? JS_API_OBJECT_TYPE - : JS_SPECIAL_API_OBJECT_TYPE; + InstanceType function_type = JS_SPECIAL_API_OBJECT_TYPE; + if (!data->needs_access_check() && + data->GetNamedPropertyHandler().IsUndefined(isolate) && + data->GetIndexedPropertyHandler().IsUndefined(isolate)) { + function_type = FLAG_embedder_instance_types && data->HasInstanceType() + ? static_cast(data->InstanceType()) + : JS_API_OBJECT_TYPE; + } Handle function = ApiNatives::CreateApiFunction( isolate, native_context, data, prototype, function_type, maybe_name); diff --git a/deps/v8/src/api/api.cc b/deps/v8/src/api/api.cc index 84295e5cde0b28..a8af304a530893 100644 --- a/deps/v8/src/api/api.cc +++ b/deps/v8/src/api/api.cc @@ -30,10 +30,11 @@ #include "src/builtins/builtins-utils.h" #include "src/codegen/compiler.h" #include "src/codegen/cpu-features.h" +#include "src/codegen/script-details.h" #include "src/common/assert-scope.h" #include "src/common/external-pointer.h" #include "src/common/globals.h" -#include "src/compiler-dispatcher/compiler-dispatcher.h" +#include "src/compiler-dispatcher/lazy-compile-dispatcher.h" #include "src/date/date.h" #include "src/debug/liveedit.h" #include "src/deoptimizer/deoptimizer.h" @@ -57,7 +58,7 @@ #include "src/init/v8.h" #include "src/json/json-parser.h" #include "src/json/json-stringifier.h" -#include "src/logging/counters.h" +#include "src/logging/counters-scopes.h" #include "src/logging/metrics.h" #include "src/logging/runtime-call-stats-scope.h" #include "src/logging/tracing-flags.h" @@ -130,9 +131,11 @@ #endif #if V8_OS_WIN -#include #include +// This has to come after windows.h. +#include + #include "include/v8-wasm-trap-handler-win.h" #include "src/trap-handler/handler-inside-win.h" #if defined(V8_OS_WIN64) @@ -1223,8 +1226,9 @@ static Local FunctionTemplateNew( v8::Local cached_property_name = v8::Local(), SideEffectType side_effect_type = SideEffectType::kHasSideEffect, const MemorySpan& c_function_overloads = {}, - uint8_t instance_type = 0, uint8_t allowed_receiver_range_start = 0, - uint8_t allowed_receiver_range_end = 0) { + uint8_t instance_type = 0, + uint8_t allowed_receiver_instance_type_range_start = 0, + uint8_t allowed_receiver_instance_type_range_end = 0) { i::Handle struct_obj = isolate->factory()->NewStruct( i::FUNCTION_TEMPLATE_INFO_TYPE, i::AllocationType::kOld); i::Handle obj = @@ -1247,8 +1251,10 @@ static Local FunctionTemplateNew( : *Utils::OpenHandle(*cached_property_name)); if (behavior == ConstructorBehavior::kThrow) raw.set_remove_prototype(true); raw.SetInstanceType(instance_type); - raw.set_allowed_receiver_range_start(allowed_receiver_range_start); - raw.set_allowed_receiver_range_end(allowed_receiver_range_end); + raw.set_allowed_receiver_instance_type_range_start( + allowed_receiver_instance_type_range_start); + raw.set_allowed_receiver_instance_type_range_end( + allowed_receiver_instance_type_range_end); } if (callback != nullptr) { Utils::ToLocal(obj)->SetCallHandler(callback, data, side_effect_type, @@ -1261,8 +1267,8 @@ Local FunctionTemplate::New( Isolate* isolate, FunctionCallback callback, v8::Local data, v8::Local signature, int length, ConstructorBehavior behavior, SideEffectType side_effect_type, const CFunction* c_function, - uint8_t instance_type, uint8_t allowed_receiver_range_start, - uint8_t allowed_receiver_range_end) { + uint16_t instance_type, uint16_t allowed_receiver_instance_type_range_start, + uint16_t allowed_receiver_instance_type_range_end) { i::Isolate* i_isolate = reinterpret_cast(isolate); // Changes to the environment cannot be captured in the snapshot. Expect no // function templates when the isolate is created for serialization. @@ -1273,7 +1279,8 @@ Local FunctionTemplate::New( Local(), side_effect_type, c_function ? MemorySpan{c_function, 1} : MemorySpan{}, - instance_type, allowed_receiver_range_start, allowed_receiver_range_end); + instance_type, allowed_receiver_instance_type_range_start, + allowed_receiver_instance_type_range_end); } Local FunctionTemplate::NewWithCFunctionOverloads( @@ -1874,10 +1881,6 @@ bool ScriptCompiler::ExternalSourceStream::SetBookmark() { return false; } void ScriptCompiler::ExternalSourceStream::ResetToBookmark() { UNREACHABLE(); } -ScriptCompiler::StreamedSource::StreamedSource(ExternalSourceStream* stream, - Encoding encoding) - : StreamedSource(std::unique_ptr(stream), encoding) {} - ScriptCompiler::StreamedSource::StreamedSource( std::unique_ptr stream, Encoding encoding) : impl_(new i::ScriptStreamingData(std::move(stream), encoding)) {} @@ -1964,10 +1967,11 @@ MaybeLocal Script::Run(Local context) { TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.Execute"); ENTER_V8(isolate, context, Script, Run, MaybeLocal(), InternalEscapableScope); - i::HistogramTimerScope execute_timer(isolate->counters()->execute(), true); + i::TimerEventScope timer_scope(isolate); + i::NestedTimedHistogramScope execute_timer( + isolate->counters()->execute_precise()); i::AggregatingHistogramTimerScope histogram_timer( isolate->counters()->compile_lazy()); - i::TimerEventScope timer_scope(isolate); auto fun = i::Handle::cast(Utils::OpenHandle(this)); // TODO(crbug.com/1193459): remove once ablation study is completed @@ -2102,14 +2106,15 @@ Local ModuleRequest::GetImportAssertions() const { Module::Status Module::GetStatus() const { i::Handle self = Utils::OpenHandle(this); switch (self->status()) { - case i::Module::kUninstantiated: - case i::Module::kPreInstantiating: + case i::Module::kUnlinked: + case i::Module::kPreLinking: return kUninstantiated; - case i::Module::kInstantiating: + case i::Module::kLinking: return kInstantiating; - case i::Module::kInstantiated: + case i::Module::kLinked: return kInstantiated; case i::Module::kEvaluating: + case i::Module::kEvaluatingAsync: return kEvaluating; case i::Module::kEvaluated: return kEvaluated; @@ -2290,13 +2295,14 @@ MaybeLocal Module::Evaluate(Local context) { TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.Execute"); ENTER_V8(isolate, context, Module, Evaluate, MaybeLocal(), InternalEscapableScope); - i::HistogramTimerScope execute_timer(isolate->counters()->execute(), true); - i::AggregatingHistogramTimerScope timer(isolate->counters()->compile_lazy()); i::TimerEventScope timer_scope(isolate); + i::NestedTimedHistogramScope execute_timer( + isolate->counters()->execute_precise()); + i::AggregatingHistogramTimerScope timer(isolate->counters()->compile_lazy()); i::Handle self = Utils::OpenHandle(this); - Utils::ApiCheck(self->status() >= i::Module::kInstantiated, - "Module::Evaluate", "Expected instantiated module"); + Utils::ApiCheck(self->status() >= i::Module::kLinked, "Module::Evaluate", + "Expected instantiated module"); Local result; has_pending_exception = !ToLocal(i::Module::Evaluate(isolate, self), &result); @@ -2345,31 +2351,17 @@ Maybe Module::SetSyntheticModuleExport(Isolate* isolate, return Just(true); } -void Module::SetSyntheticModuleExport(Local export_name, - Local export_value) { - i::Handle i_export_name = Utils::OpenHandle(*export_name); - i::Handle i_export_value = Utils::OpenHandle(*export_value); - i::Handle self = Utils::OpenHandle(this); - ASSERT_NO_SCRIPT_NO_EXCEPTION(self->GetIsolate()); - Utils::ApiCheck(self->IsSyntheticModule(), - "v8::Module::SetSyntheticModuleExport", - "v8::Module::SetSyntheticModuleExport must only be called on " - "a SyntheticModule"); - i::SyntheticModule::SetExportStrict(self->GetIsolate(), - i::Handle::cast(self), - i_export_name, i_export_value); -} - namespace { -i::Compiler::ScriptDetails GetScriptDetails( - i::Isolate* isolate, Local resource_name, int resource_line_offset, - int resource_column_offset, Local source_map_url, - Local host_defined_options) { - i::Compiler::ScriptDetails script_details; - if (!resource_name.IsEmpty()) { - script_details.name_obj = Utils::OpenHandle(*(resource_name)); - } +i::ScriptDetails GetScriptDetails(i::Isolate* isolate, + Local resource_name, + int resource_line_offset, + int resource_column_offset, + Local source_map_url, + Local host_defined_options, + ScriptOriginOptions origin_options) { + i::ScriptDetails script_details(Utils::OpenHandle(*(resource_name), true), + origin_options); script_details.line_offset = resource_line_offset; script_details.column_offset = resource_column_offset; script_details.host_defined_options = @@ -2393,29 +2385,44 @@ MaybeLocal ScriptCompiler::CompileUnboundInternal( CompileUnbound, MaybeLocal(), InternalEscapableScope); - i::ScriptData* script_data = nullptr; + i::Handle str = Utils::OpenHandle(*(source->source_string)); + + std::unique_ptr cached_data; if (options == kConsumeCodeCache) { - DCHECK(source->cached_data); - // ScriptData takes care of pointer-aligning the data. - script_data = new i::ScriptData(source->cached_data->data, - source->cached_data->length); + if (source->consume_cache_task) { + // If there's a cache consume task, finish it + i::MaybeHandle maybe_function_info = + source->consume_cache_task->impl_->Finish(isolate, str, + source->resource_options); + i::Handle result; + if (maybe_function_info.ToHandle(&result)) { + RETURN_ESCAPED(ToApiHandle(result)); + } + // If the above failed, then we must have rejected the cache. Continue + // with normal compilation, disabling the code cache consumption. + source->cached_data->rejected = true; + options = kNoCompileOptions; + } else { + DCHECK(source->cached_data); + // AlignedCachedData takes care of pointer-aligning the data. + cached_data.reset(new i::AlignedCachedData(source->cached_data->data, + source->cached_data->length)); + } } - i::Handle str = Utils::OpenHandle(*(source->source_string)); i::Handle result; TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileScript"); - i::Compiler::ScriptDetails script_details = GetScriptDetails( + i::ScriptDetails script_details = GetScriptDetails( isolate, source->resource_name, source->resource_line_offset, source->resource_column_offset, source->source_map_url, - source->host_defined_options); + source->host_defined_options, source->resource_options); i::MaybeHandle maybe_function_info = i::Compiler::GetSharedFunctionInfoForScript( - isolate, str, script_details, source->resource_options, nullptr, - script_data, options, no_cache_reason, i::NOT_NATIVES_CODE); + isolate, str, script_details, nullptr, cached_data.get(), options, + no_cache_reason, i::NOT_NATIVES_CODE); if (options == kConsumeCodeCache) { - source->cached_data->rejected = script_data->rejected(); + source->cached_data->rejected = cached_data->rejected(); } - delete script_data; has_pending_exception = !maybe_function_info.ToHandle(&result); RETURN_ON_FAILED_EXECUTION(UnboundScript); RETURN_ESCAPED(ToApiHandle(result)); @@ -2532,30 +2539,28 @@ MaybeLocal ScriptCompiler::CompileFunctionInContext( extension); } - i::Compiler::ScriptDetails script_details = GetScriptDetails( + i::ScriptDetails script_details = GetScriptDetails( isolate, source->resource_name, source->resource_line_offset, source->resource_column_offset, source->source_map_url, - source->host_defined_options); + source->host_defined_options, source->resource_options); - i::ScriptData* script_data = nullptr; + std::unique_ptr cached_data; if (options == kConsumeCodeCache) { DCHECK(source->cached_data); // ScriptData takes care of pointer-aligning the data. - script_data = new i::ScriptData(source->cached_data->data, - source->cached_data->length); + cached_data.reset(new i::AlignedCachedData(source->cached_data->data, + source->cached_data->length)); } i::Handle scoped_result; has_pending_exception = !i::Compiler::GetWrappedFunction( Utils::OpenHandle(*source->source_string), arguments_list, context, - script_details, source->resource_options, script_data, options, - no_cache_reason) + script_details, cached_data.get(), options, no_cache_reason) .ToHandle(&scoped_result); if (options == kConsumeCodeCache) { - source->cached_data->rejected = script_data->rejected(); + source->cached_data->rejected = cached_data->rejected(); } - delete script_data; RETURN_ON_FAILED_EXECUTION(Function); result = handle_scope.Escape(Utils::CallableToLocal(scoped_result)); } @@ -2574,14 +2579,6 @@ MaybeLocal ScriptCompiler::CompileFunctionInContext( void ScriptCompiler::ScriptStreamingTask::Run() { data_->task->Run(); } -ScriptCompiler::ScriptStreamingTask* ScriptCompiler::StartStreamingScript( - Isolate* v8_isolate, StreamedSource* source, CompileOptions options) { - // We don't support other compile options on streaming background compiles. - // TODO(rmcilroy): remove CompileOptions from the API. - CHECK(options == ScriptCompiler::kNoCompileOptions); - return StartStreaming(v8_isolate, source); -} - ScriptCompiler::ScriptStreamingTask* ScriptCompiler::StartStreaming( Isolate* v8_isolate, StreamedSource* source, v8::ScriptType type) { if (!i::FLAG_script_streaming) return nullptr; @@ -2594,18 +2591,36 @@ ScriptCompiler::ScriptStreamingTask* ScriptCompiler::StartStreaming( return new ScriptCompiler::ScriptStreamingTask(data); } +ScriptCompiler::ConsumeCodeCacheTask::ConsumeCodeCacheTask( + std::unique_ptr impl) + : impl_(std::move(impl)) {} + +ScriptCompiler::ConsumeCodeCacheTask::~ConsumeCodeCacheTask() = default; + +void ScriptCompiler::ConsumeCodeCacheTask::Run() { impl_->Run(); } + +ScriptCompiler::ConsumeCodeCacheTask* ScriptCompiler::StartConsumingCodeCache( + Isolate* v8_isolate, std::unique_ptr cached_data) { + if (!i::FLAG_concurrent_cache_deserialization) return nullptr; + i::Isolate* isolate = reinterpret_cast(v8_isolate); + ASSERT_NO_SCRIPT_NO_EXCEPTION(isolate); + return new ScriptCompiler::ConsumeCodeCacheTask( + std::make_unique(isolate, + std::move(cached_data))); +} + namespace { i::MaybeHandle CompileStreamedSource( i::Isolate* isolate, ScriptCompiler::StreamedSource* v8_source, Local full_source_string, const ScriptOrigin& origin) { i::Handle str = Utils::OpenHandle(*(full_source_string)); - i::Compiler::ScriptDetails script_details = + i::ScriptDetails script_details = GetScriptDetails(isolate, origin.ResourceName(), origin.LineOffset(), origin.ColumnOffset(), origin.SourceMapUrl(), - origin.HostDefinedOptions()); + origin.HostDefinedOptions(), origin.Options()); i::ScriptStreamingData* data = v8_source->impl(); return i::Compiler::GetSharedFunctionInfoForStreamedScript( - isolate, str, script_details, origin.Options(), data); + isolate, str, script_details, data); } } // namespace @@ -3676,6 +3691,13 @@ bool i::ShouldThrowOnError(i::Isolate* isolate) { i::ShouldThrow::kThrowOnError; } +bool i::CanHaveInternalField(int instance_type) { + return instance_type == i::Internals::kJSObjectType || + instance_type == i::Internals::kJSSpecialApiObjectType || + v8::internal::InstanceTypeChecker::IsJSApiObject( + static_cast(instance_type)); +} + void i::Internals::CheckInitializedImpl(v8::Isolate* external_isolate) { i::Isolate* isolate = reinterpret_cast(external_isolate); Utils::ApiCheck(isolate != nullptr && !isolate->IsDead(), @@ -4947,6 +4969,8 @@ MaybeLocal Object::CallAsFunction(Local context, ENTER_V8(isolate, context, Object, CallAsFunction, MaybeLocal(), InternalEscapableScope); i::TimerEventScope timer_scope(isolate); + i::NestedTimedHistogramScope execute_timer( + isolate->counters()->execute_precise()); auto self = Utils::OpenHandle(this); auto recv_obj = Utils::OpenHandle(*recv); STATIC_ASSERT(sizeof(v8::Local) == sizeof(i::Handle)); @@ -4965,6 +4989,8 @@ MaybeLocal Object::CallAsConstructor(Local context, int argc, ENTER_V8(isolate, context, Object, CallAsConstructor, MaybeLocal(), InternalEscapableScope); i::TimerEventScope timer_scope(isolate); + i::NestedTimedHistogramScope execute_timer( + isolate->counters()->execute_precise()); auto self = Utils::OpenHandle(this); STATIC_ASSERT(sizeof(v8::Local) == sizeof(i::Handle)); i::Handle* args = reinterpret_cast*>(argv); @@ -5002,6 +5028,8 @@ MaybeLocal Function::NewInstanceWithSideEffectType( ENTER_V8(isolate, context, Function, NewInstance, MaybeLocal(), InternalEscapableScope); i::TimerEventScope timer_scope(isolate); + i::NestedTimedHistogramScope execute_timer( + isolate->counters()->execute_precise()); auto self = Utils::OpenHandle(this); STATIC_ASSERT(sizeof(v8::Local) == sizeof(i::Handle)); bool should_set_has_no_side_effect = @@ -5051,6 +5079,8 @@ MaybeLocal Function::Call(Local context, ENTER_V8(isolate, context, Function, Call, MaybeLocal(), InternalEscapableScope); i::TimerEventScope timer_scope(isolate); + i::NestedTimedHistogramScope execute_timer( + isolate->counters()->execute_precise()); auto self = Utils::OpenHandle(this); Utils::ApiCheck(!self.is_null(), "v8::Function::Call", "Function to be called is a null pointer"); @@ -5859,29 +5889,17 @@ bool v8::V8::Initialize(const int build_config) { #if V8_OS_LINUX || V8_OS_MACOSX bool TryHandleWebAssemblyTrapPosix(int sig_code, siginfo_t* info, void* context) { - // When the target code runs on the V8 arm simulator, the trap handler does - // not behave as expected: the instruction pointer points inside the simulator - // code rather than the wasm code, so the trap handler cannot find the landing - // pad and lets the process crash. Therefore, only enable trap handlers if - // the host and target arch are the same. -#if V8_ENABLE_WEBASSEMBLY && \ - ((V8_TARGET_ARCH_X64 && !V8_OS_ANDROID) || \ - (V8_HOST_ARCH_ARM64 && V8_TARGET_ARCH_ARM64 && V8_OS_MACOSX)) +#if V8_ENABLE_WEBASSEMBLY && V8_TRAP_HANDLER_SUPPORTED return i::trap_handler::TryHandleSignal(sig_code, info, context); #else return false; #endif } - -bool V8::TryHandleSignal(int signum, void* info, void* context) { - return TryHandleWebAssemblyTrapPosix( - signum, reinterpret_cast(info), context); -} #endif #if V8_OS_WIN bool TryHandleWebAssemblyTrapWindows(EXCEPTION_POINTERS* exception) { -#if V8_ENABLE_WEBASSEMBLY && V8_TARGET_ARCH_X64 +#if V8_ENABLE_WEBASSEMBLY && V8_TRAP_HANDLER_SUPPORTED return i::trap_handler::TryHandleWasmTrap(exception); #else return false; @@ -5984,8 +6002,6 @@ void V8::GetSharedMemoryStatistics(SharedMemoryStatistics* statistics) { i::ReadOnlyHeap::PopulateReadOnlySpaceStatistics(statistics); } -void V8::SetIsCrossOriginIsolated() {} - template struct InvokeBootstrapper; @@ -8684,11 +8700,6 @@ bool Isolate::GetHeapCodeAndMetadataStatistics( return true; } -v8::MaybeLocal Isolate::MeasureMemory( - v8::Local context, MeasureMemoryMode mode) { - return v8::MaybeLocal(); -} - bool Isolate::MeasureMemory(std::unique_ptr delegate, MeasureMemoryExecution execution) { i::Isolate* isolate = reinterpret_cast(this); @@ -8888,7 +8899,7 @@ bool Isolate::IdleNotificationDeadline(double deadline_in_seconds) { void Isolate::LowMemoryNotification() { i::Isolate* isolate = reinterpret_cast(this); { - i::HistogramTimerScope idle_notification_scope( + i::NestedTimedHistogramScope idle_notification_scope( isolate->counters()->gc_low_memory_notification()); TRACE_EVENT0("v8", "V8.GCLowMemoryNotification"); isolate->heap()->CollectAllAvailableGarbage( @@ -9631,7 +9642,7 @@ CpuProfile* CpuProfiler::StopProfiling(Local title) { void CpuProfiler::UseDetailedSourcePositionsForProfiling(Isolate* isolate) { reinterpret_cast(isolate) - ->set_detailed_source_positions_for_profiling(true); + ->SetDetailedSourcePositionsForProfiling(true); } uintptr_t CodeEvent::GetCodeStartAddress() { @@ -10034,6 +10045,10 @@ const CTypeInfo& CFunctionInfo::ArgumentInfo(unsigned int index) const { return arg_info_[index]; } +void FastApiTypedArrayBase::ValidateIndex(size_t index) const { + DCHECK_LT(index, length_); +} + RegisterState::RegisterState() : pc(nullptr), sp(nullptr), fp(nullptr), lr(nullptr) {} RegisterState::~RegisterState() = default; @@ -10234,7 +10249,6 @@ void InvokeAccessorGetterCallback( Isolate* isolate = reinterpret_cast(info.GetIsolate()); RCS_SCOPE(isolate, RuntimeCallCounterId::kAccessorGetterCallback); Address getter_address = reinterpret_cast
(getter); - VMState state(isolate); ExternalCallbackScope call_scope(isolate, getter_address); getter(property, info); } @@ -10244,7 +10258,6 @@ void InvokeFunctionCallback(const v8::FunctionCallbackInfo& info, Isolate* isolate = reinterpret_cast(info.GetIsolate()); RCS_SCOPE(isolate, RuntimeCallCounterId::kFunctionCallback); Address callback_address = reinterpret_cast
(callback); - VMState state(isolate); ExternalCallbackScope call_scope(isolate, callback_address); callback(info); } @@ -10322,6 +10335,7 @@ bool ConvertDouble(double d) { #undef CALLBACK_SETTER } // namespace internal + } // namespace v8 #undef TRACE_BS diff --git a/deps/v8/src/base/atomicops.h b/deps/v8/src/base/atomicops.h index 30fb983d28190f..888157dc61f1ed 100644 --- a/deps/v8/src/base/atomicops.h +++ b/deps/v8/src/base/atomicops.h @@ -316,6 +316,43 @@ inline void Relaxed_Memcpy(volatile Atomic8* dst, volatile const Atomic8* src, } } +inline void Relaxed_Memmove(volatile Atomic8* dst, volatile const Atomic8* src, + size_t bytes) { + // Use Relaxed_Memcpy if copying forwards is safe. This is the case if there + // is no overlap, or {dst} lies before {src}. + // This single check checks for both: + if (reinterpret_cast(dst) - reinterpret_cast(src) >= + bytes) { + Relaxed_Memcpy(dst, src, bytes); + return; + } + + // Otherwise copy backwards. + dst += bytes; + src += bytes; + constexpr size_t kAtomicWordSize = sizeof(AtomicWord); + while (bytes > 0 && + !IsAligned(reinterpret_cast(dst), kAtomicWordSize)) { + Relaxed_Store(--dst, Relaxed_Load(--src)); + --bytes; + } + if (IsAligned(reinterpret_cast(src), kAtomicWordSize) && + IsAligned(reinterpret_cast(dst), kAtomicWordSize)) { + while (bytes >= kAtomicWordSize) { + dst -= kAtomicWordSize; + src -= kAtomicWordSize; + bytes -= kAtomicWordSize; + Relaxed_Store( + reinterpret_cast(dst), + Relaxed_Load(reinterpret_cast(src))); + } + } + while (bytes > 0) { + Relaxed_Store(--dst, Relaxed_Load(--src)); + --bytes; + } +} + } // namespace base } // namespace v8 diff --git a/deps/v8/src/base/bits.h b/deps/v8/src/base/bits.h index f790dfaab47bba..5c31addd39531d 100644 --- a/deps/v8/src/base/bits.h +++ b/deps/v8/src/base/bits.h @@ -194,7 +194,9 @@ inline size_t RoundUpToPowerOfTwo(size_t value) { if (sizeof(size_t) == sizeof(uint64_t)) { return RoundUpToPowerOfTwo64(value); } else { - return RoundUpToPowerOfTwo32(value); + // Without windows.h included this line triggers a truncation warning on + // 64-bit builds. Presumably windows.h disables the relevant warning. + return RoundUpToPowerOfTwo32(static_cast(value)); } } diff --git a/deps/v8/src/base/bounded-page-allocator.cc b/deps/v8/src/base/bounded-page-allocator.cc index e6c1a61bcbe962..fa7b10324d83ba 100644 --- a/deps/v8/src/base/bounded-page-allocator.cc +++ b/deps/v8/src/base/bounded-page-allocator.cc @@ -14,9 +14,9 @@ BoundedPageAllocator::BoundedPageAllocator(v8::PageAllocator* page_allocator, commit_page_size_(page_allocator->CommitPageSize()), page_allocator_(page_allocator), region_allocator_(start, size, allocate_page_size_) { - CHECK_NOT_NULL(page_allocator); - CHECK(IsAligned(allocate_page_size, page_allocator->AllocatePageSize())); - CHECK(IsAligned(allocate_page_size_, commit_page_size_)); + DCHECK_NOT_NULL(page_allocator); + DCHECK(IsAligned(allocate_page_size, page_allocator->AllocatePageSize())); + DCHECK(IsAligned(allocate_page_size_, commit_page_size_)); } BoundedPageAllocator::Address BoundedPageAllocator::begin() const { @@ -29,11 +29,11 @@ void* BoundedPageAllocator::AllocatePages(void* hint, size_t size, size_t alignment, PageAllocator::Permission access) { MutexGuard guard(&mutex_); - CHECK(IsAligned(alignment, region_allocator_.page_size())); + DCHECK(IsAligned(alignment, region_allocator_.page_size())); // Region allocator does not support alignments bigger than it's own // allocation alignment. - CHECK_LE(alignment, allocate_page_size_); + DCHECK_LE(alignment, allocate_page_size_); // TODO(ishell): Consider using randomized version here. Address address = region_allocator_.AllocateRegion(size); @@ -47,13 +47,18 @@ void* BoundedPageAllocator::AllocatePages(void* hint, size_t size, bool BoundedPageAllocator::AllocatePagesAt(Address address, size_t size, PageAllocator::Permission access) { - CHECK(IsAligned(address, allocate_page_size_)); - CHECK(IsAligned(size, allocate_page_size_)); - CHECK(region_allocator_.contains(address, size)); + DCHECK(IsAligned(address, allocate_page_size_)); + DCHECK(IsAligned(size, allocate_page_size_)); - if (!region_allocator_.AllocateRegionAt(address, size)) { - return false; + { + MutexGuard guard(&mutex_); + DCHECK(region_allocator_.contains(address, size)); + + if (!region_allocator_.AllocateRegionAt(address, size)) { + return false; + } } + CHECK(page_allocator_->SetPermissions(reinterpret_cast(address), size, access)); return true; @@ -62,16 +67,20 @@ bool BoundedPageAllocator::AllocatePagesAt(Address address, size_t size, bool BoundedPageAllocator::ReserveForSharedMemoryMapping(void* ptr, size_t size) { Address address = reinterpret_cast
(ptr); - CHECK(IsAligned(address, allocate_page_size_)); - CHECK(IsAligned(size, commit_page_size_)); - CHECK(region_allocator_.contains(address, size)); - - // Region allocator requires page size rather than commit size so just over- - // allocate there since any extra space couldn't be used anyway. - size_t region_size = RoundUp(size, allocate_page_size_); - if (!region_allocator_.AllocateRegionAt( - address, region_size, RegionAllocator::RegionState::kExcluded)) { - return false; + DCHECK(IsAligned(address, allocate_page_size_)); + DCHECK(IsAligned(size, commit_page_size_)); + + { + MutexGuard guard(&mutex_); + DCHECK(region_allocator_.contains(address, size)); + + // Region allocator requires page size rather than commit size so just over- + // allocate there since any extra space couldn't be used anyway. + size_t region_size = RoundUp(size, allocate_page_size_); + if (!region_allocator_.AllocateRegionAt( + address, region_size, RegionAllocator::RegionState::kExcluded)) { + return false; + } } CHECK(page_allocator_->SetPermissions(ptr, size, @@ -93,7 +102,7 @@ bool BoundedPageAllocator::FreePages(void* raw_address, size_t size) { bool BoundedPageAllocator::ReleasePages(void* raw_address, size_t size, size_t new_size) { Address address = reinterpret_cast
(raw_address); - CHECK(IsAligned(address, allocate_page_size_)); + DCHECK(IsAligned(address, allocate_page_size_)); DCHECK_LT(new_size, size); DCHECK(IsAligned(size - new_size, commit_page_size_)); @@ -107,7 +116,7 @@ bool BoundedPageAllocator::ReleasePages(void* raw_address, size_t size, // There must be an allocated region at given |address| of a size not // smaller than |size|. MutexGuard guard(&mutex_); - CHECK_EQ(allocated_size, region_allocator_.CheckRegion(address)); + DCHECK_EQ(allocated_size, region_allocator_.CheckRegion(address)); } #endif diff --git a/deps/v8/src/base/build_config.h b/deps/v8/src/base/build_config.h index 260747201a2216..d7a0c9f3cf6a81 100644 --- a/deps/v8/src/base/build_config.h +++ b/deps/v8/src/base/build_config.h @@ -211,6 +211,13 @@ #error Unknown target architecture endianness #endif +// pthread_jit_write_protect is only available on arm64 Mac. +#if defined(V8_OS_MACOSX) && !defined(V8_OS_IOS) && defined(V8_HOST_ARCH_ARM64) +#define V8_HAS_PTHREAD_JIT_WRITE_PROTECT 1 +#else +#define V8_HAS_PTHREAD_JIT_WRITE_PROTECT 0 +#endif + #if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X64) #define V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK true #else diff --git a/deps/v8/src/base/page-allocator.cc b/deps/v8/src/base/page-allocator.cc index 2defe619281f6b..1438c883377897 100644 --- a/deps/v8/src/base/page-allocator.cc +++ b/deps/v8/src/base/page-allocator.cc @@ -45,7 +45,7 @@ void* PageAllocator::GetRandomMmapAddr() { void* PageAllocator::AllocatePages(void* hint, size_t size, size_t alignment, PageAllocator::Permission access) { -#if !(V8_OS_MACOSX && V8_HOST_ARCH_ARM64 && defined(MAP_JIT)) +#if !V8_HAS_PTHREAD_JIT_WRITE_PROTECT // kNoAccessWillJitLater is only used on Apple Silicon. Map it to regular // kNoAccess on other platforms, so code doesn't have to handle both enum // values. diff --git a/deps/v8/src/base/platform/condition-variable.cc b/deps/v8/src/base/platform/condition-variable.cc index 04ea29181bb4b9..5ab66d39a4df2a 100644 --- a/deps/v8/src/base/platform/condition-variable.cc +++ b/deps/v8/src/base/platform/condition-variable.cc @@ -9,6 +9,10 @@ #include "src/base/platform/time.h" +#if V8_OS_WIN +#include +#endif + namespace v8 { namespace base { @@ -119,22 +123,25 @@ bool ConditionVariable::WaitFor(Mutex* mutex, const TimeDelta& rel_time) { #elif V8_OS_WIN ConditionVariable::ConditionVariable() { - InitializeConditionVariable(&native_handle_); + InitializeConditionVariable(V8ToWindowsType(&native_handle_)); } ConditionVariable::~ConditionVariable() {} -void ConditionVariable::NotifyOne() { WakeConditionVariable(&native_handle_); } +void ConditionVariable::NotifyOne() { + WakeConditionVariable(V8ToWindowsType(&native_handle_)); +} void ConditionVariable::NotifyAll() { - WakeAllConditionVariable(&native_handle_); + WakeAllConditionVariable(V8ToWindowsType(&native_handle_)); } void ConditionVariable::Wait(Mutex* mutex) { mutex->AssertHeldAndUnmark(); - SleepConditionVariableSRW(&native_handle_, &mutex->native_handle(), INFINITE, + SleepConditionVariableSRW(V8ToWindowsType(&native_handle_), + V8ToWindowsType(&mutex->native_handle()), INFINITE, 0); mutex->AssertUnheldAndMark(); } @@ -144,7 +151,8 @@ bool ConditionVariable::WaitFor(Mutex* mutex, const TimeDelta& rel_time) { int64_t msec = rel_time.InMilliseconds(); mutex->AssertHeldAndUnmark(); BOOL result = SleepConditionVariableSRW( - &native_handle_, &mutex->native_handle(), static_cast(msec), 0); + V8ToWindowsType(&native_handle_), + V8ToWindowsType(&mutex->native_handle()), static_cast(msec), 0); #ifdef DEBUG if (!result) { // On failure, we only expect the CV to timeout. Any other error value means diff --git a/deps/v8/src/base/platform/condition-variable.h b/deps/v8/src/base/platform/condition-variable.h index 79e653a32aa1d3..3ca6ba8d1b1f7b 100644 --- a/deps/v8/src/base/platform/condition-variable.h +++ b/deps/v8/src/base/platform/condition-variable.h @@ -69,7 +69,7 @@ class V8_BASE_EXPORT ConditionVariable final { #if V8_OS_POSIX using NativeHandle = pthread_cond_t; #elif V8_OS_WIN - using NativeHandle = CONDITION_VARIABLE; + using NativeHandle = V8_CONDITION_VARIABLE; #elif V8_OS_STARBOARD using NativeHandle = SbConditionVariable; #endif diff --git a/deps/v8/src/base/platform/elapsed-timer.h b/deps/v8/src/base/platform/elapsed-timer.h index 3406831cbe83d7..2947c31237b2cf 100644 --- a/deps/v8/src/base/platform/elapsed-timer.h +++ b/deps/v8/src/base/platform/elapsed-timer.h @@ -13,16 +13,17 @@ namespace base { class ElapsedTimer final { public: -#ifdef DEBUG - ElapsedTimer() : started_(false) {} -#endif + ElapsedTimer() : start_ticks_() {} // Starts this timer. Once started a timer can be checked with // |Elapsed()| or |HasExpired()|, and may be restarted using |Restart()|. // This method must not be called on an already started timer. - void Start() { + void Start() { Start(Now()); } + + void Start(TimeTicks now) { + DCHECK(!now.IsNull()); DCHECK(!IsStarted()); - start_ticks_ = Now(); + set_start_ticks(now); #ifdef DEBUG started_ = true; #endif @@ -33,7 +34,7 @@ class ElapsedTimer final { // started before. void Stop() { DCHECK(IsStarted()); - start_ticks_ = TimeTicks(); + set_start_ticks(TimeTicks()); #ifdef DEBUG started_ = false; #endif @@ -42,31 +43,65 @@ class ElapsedTimer final { // Returns |true| if this timer was started previously. bool IsStarted() const { - DCHECK(started_ || start_ticks_.IsNull()); - DCHECK(!started_ || !start_ticks_.IsNull()); + DCHECK(!paused_); + DCHECK_NE(started_, start_ticks_.IsNull()); return !start_ticks_.IsNull(); } +#if DEBUG + bool IsPaused() const { return paused_; } +#endif + // Restarts the timer and returns the time elapsed since the previous start. // This method is equivalent to obtaining the elapsed time with |Elapsed()| // and then starting the timer again, but does so in one single operation, // avoiding the need to obtain the clock value twice. It may only be called // on a previously started timer. - TimeDelta Restart() { + TimeDelta Restart() { return Restart(Now()); } + + TimeDelta Restart(TimeTicks now) { + DCHECK(!now.IsNull()); DCHECK(IsStarted()); - TimeTicks ticks = Now(); - TimeDelta elapsed = ticks - start_ticks_; + TimeDelta elapsed = now - start_ticks_; DCHECK_GE(elapsed.InMicroseconds(), 0); - start_ticks_ = ticks; + set_start_ticks(now); DCHECK(IsStarted()); return elapsed; } + void Pause() { Pause(Now()); } + + void Pause(TimeTicks now) { + TimeDelta elapsed = Elapsed(now); + DCHECK(IsStarted()); +#ifdef DEBUG + paused_ = true; +#endif + set_paused_elapsed(elapsed); + } + + void Resume() { Resume(Now()); } + + void Resume(TimeTicks now) { + DCHECK(!now.IsNull()); + DCHECK(started_); + DCHECK(paused_); + TimeDelta elapsed = paused_elapsed(); +#ifdef DEBUG + paused_ = false; +#endif + set_start_ticks(now - elapsed); + DCHECK(IsStarted()); + } + // Returns the time elapsed since the previous start. This method may only // be called on a previously started timer. - TimeDelta Elapsed() const { + TimeDelta Elapsed() const { return Elapsed(Now()); } + + TimeDelta Elapsed(TimeTicks now) const { + DCHECK(!now.IsNull()); DCHECK(IsStarted()); - TimeDelta elapsed = Now() - start_ticks_; + TimeDelta elapsed = now - start_ticks(); DCHECK_GE(elapsed.InMicroseconds(), 0); return elapsed; } @@ -86,9 +121,35 @@ class ElapsedTimer final { return now; } - TimeTicks start_ticks_; + TimeDelta paused_elapsed() { + // Only used started_ since paused_elapsed_ can be 0. + DCHECK(paused_); + DCHECK(started_); + return paused_elapsed_; + } + + void set_paused_elapsed(TimeDelta delta) { + DCHECK(paused_); + DCHECK(started_); + paused_elapsed_ = delta; + } + + TimeTicks start_ticks() const { + DCHECK(!paused_); + return start_ticks_; + } + void set_start_ticks(TimeTicks start_ticks) { + DCHECK(!paused_); + start_ticks_ = start_ticks; + } + + union { + TimeTicks start_ticks_; + TimeDelta paused_elapsed_; + }; #ifdef DEBUG - bool started_; + bool started_ = false; + bool paused_ = false; #endif }; diff --git a/deps/v8/src/base/platform/mutex.cc b/deps/v8/src/base/platform/mutex.cc index 5a347246a95538..7bf60996ee4626 100644 --- a/deps/v8/src/base/platform/mutex.cc +++ b/deps/v8/src/base/platform/mutex.cc @@ -10,6 +10,10 @@ #include #endif // DEBUG +#if V8_OS_WIN +#include +#endif + namespace v8 { namespace base { @@ -218,6 +222,37 @@ bool RecursiveMutex::TryLock() { return true; } +#if V8_OS_MACOSX + +SharedMutex::SharedMutex() { InitializeNativeHandle(&native_handle_); } + +SharedMutex::~SharedMutex() { DestroyNativeHandle(&native_handle_); } + +void SharedMutex::LockShared() { LockExclusive(); } + +void SharedMutex::LockExclusive() { + DCHECK(TryHoldSharedMutex(this)); + LockNativeHandle(&native_handle_); +} + +void SharedMutex::UnlockShared() { UnlockExclusive(); } + +void SharedMutex::UnlockExclusive() { + DCHECK(TryReleaseSharedMutex(this)); + UnlockNativeHandle(&native_handle_); +} + +bool SharedMutex::TryLockShared() { return TryLockExclusive(); } + +bool SharedMutex::TryLockExclusive() { + DCHECK(SharedMutexNotHeld(this)); + if (!TryLockNativeHandle(&native_handle_)) return false; + DCHECK(TryHoldSharedMutex(this)); + return true; +} + +#else // !V8_OS_MACOSX + SharedMutex::SharedMutex() { pthread_rwlock_init(&native_handle_, nullptr); } SharedMutex::~SharedMutex() { @@ -266,6 +301,8 @@ bool SharedMutex::TryLockExclusive() { return result; } +#endif // !V8_OS_MACOSX + #elif V8_OS_WIN Mutex::Mutex() : native_handle_(SRWLOCK_INIT) { @@ -281,19 +318,19 @@ Mutex::~Mutex() { void Mutex::Lock() { - AcquireSRWLockExclusive(&native_handle_); + AcquireSRWLockExclusive(V8ToWindowsType(&native_handle_)); AssertUnheldAndMark(); } void Mutex::Unlock() { AssertHeldAndUnmark(); - ReleaseSRWLockExclusive(&native_handle_); + ReleaseSRWLockExclusive(V8ToWindowsType(&native_handle_)); } bool Mutex::TryLock() { - if (!TryAcquireSRWLockExclusive(&native_handle_)) { + if (!TryAcquireSRWLockExclusive(V8ToWindowsType(&native_handle_))) { return false; } AssertUnheldAndMark(); @@ -302,7 +339,7 @@ bool Mutex::TryLock() { RecursiveMutex::RecursiveMutex() { - InitializeCriticalSection(&native_handle_); + InitializeCriticalSection(V8ToWindowsType(&native_handle_)); #ifdef DEBUG level_ = 0; #endif @@ -310,13 +347,13 @@ RecursiveMutex::RecursiveMutex() { RecursiveMutex::~RecursiveMutex() { - DeleteCriticalSection(&native_handle_); + DeleteCriticalSection(V8ToWindowsType(&native_handle_)); DCHECK_EQ(0, level_); } void RecursiveMutex::Lock() { - EnterCriticalSection(&native_handle_); + EnterCriticalSection(V8ToWindowsType(&native_handle_)); #ifdef DEBUG DCHECK_LE(0, level_); level_++; @@ -329,12 +366,12 @@ void RecursiveMutex::Unlock() { DCHECK_LT(0, level_); level_--; #endif - LeaveCriticalSection(&native_handle_); + LeaveCriticalSection(V8ToWindowsType(&native_handle_)); } bool RecursiveMutex::TryLock() { - if (!TryEnterCriticalSection(&native_handle_)) { + if (!TryEnterCriticalSection(V8ToWindowsType(&native_handle_))) { return false; } #ifdef DEBUG @@ -350,34 +387,34 @@ SharedMutex::~SharedMutex() {} void SharedMutex::LockShared() { DCHECK(TryHoldSharedMutex(this)); - AcquireSRWLockShared(&native_handle_); + AcquireSRWLockShared(V8ToWindowsType(&native_handle_)); } void SharedMutex::LockExclusive() { DCHECK(TryHoldSharedMutex(this)); - AcquireSRWLockExclusive(&native_handle_); + AcquireSRWLockExclusive(V8ToWindowsType(&native_handle_)); } void SharedMutex::UnlockShared() { DCHECK(TryReleaseSharedMutex(this)); - ReleaseSRWLockShared(&native_handle_); + ReleaseSRWLockShared(V8ToWindowsType(&native_handle_)); } void SharedMutex::UnlockExclusive() { DCHECK(TryReleaseSharedMutex(this)); - ReleaseSRWLockExclusive(&native_handle_); + ReleaseSRWLockExclusive(V8ToWindowsType(&native_handle_)); } bool SharedMutex::TryLockShared() { DCHECK(SharedMutexNotHeld(this)); - bool result = TryAcquireSRWLockShared(&native_handle_); + bool result = TryAcquireSRWLockShared(V8ToWindowsType(&native_handle_)); if (result) DCHECK(TryHoldSharedMutex(this)); return result; } bool SharedMutex::TryLockExclusive() { DCHECK(SharedMutexNotHeld(this)); - bool result = TryAcquireSRWLockExclusive(&native_handle_); + bool result = TryAcquireSRWLockExclusive(V8ToWindowsType(&native_handle_)); if (result) DCHECK(TryHoldSharedMutex(this)); return result; } diff --git a/deps/v8/src/base/platform/mutex.h b/deps/v8/src/base/platform/mutex.h index 328c593a30eb60..5fefa25ab6f953 100644 --- a/deps/v8/src/base/platform/mutex.h +++ b/deps/v8/src/base/platform/mutex.h @@ -66,7 +66,7 @@ class V8_BASE_EXPORT Mutex final { #if V8_OS_POSIX using NativeHandle = pthread_mutex_t; #elif V8_OS_WIN - using NativeHandle = SRWLOCK; + using NativeHandle = V8_SRWLOCK; #elif V8_OS_STARBOARD using NativeHandle = SbMutex; #endif @@ -171,7 +171,7 @@ class V8_BASE_EXPORT RecursiveMutex final { #if V8_OS_POSIX using NativeHandle = pthread_mutex_t; #elif V8_OS_WIN - using NativeHandle = CRITICAL_SECTION; + using NativeHandle = V8_CRITICAL_SECTION; #elif V8_OS_STARBOARD using NativeHandle = starboard::RecursiveMutex; #endif @@ -265,10 +265,15 @@ class V8_BASE_EXPORT SharedMutex final { private: // The implementation-defined native handle type. -#if V8_OS_POSIX +#if V8_OS_MACOSX + // pthread_rwlock_t is broken on MacOS when signals are being sent to the + // process (see https://crbug.com/v8/11399). Until Apple fixes that in the OS, + // we have to fall back to a non-shared mutex. + using NativeHandle = pthread_mutex_t; +#elif V8_OS_POSIX using NativeHandle = pthread_rwlock_t; #elif V8_OS_WIN - using NativeHandle = SRWLOCK; + using NativeHandle = V8_SRWLOCK; #elif V8_OS_STARBOARD using NativeHandle = starboard::RWLock; #endif diff --git a/deps/v8/src/base/platform/platform-fuchsia.cc b/deps/v8/src/base/platform/platform-fuchsia.cc index 9538d81671cfe6..bd0000c4a1ac92 100644 --- a/deps/v8/src/base/platform/platform-fuchsia.cc +++ b/deps/v8/src/base/platform/platform-fuchsia.cc @@ -127,8 +127,10 @@ bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) { // static bool OS::DiscardSystemPages(void* address, size_t size) { - // TODO(hpayer): Does Fuchsia have madvise? - return true; + uint64_t address_int = reinterpret_cast(address); + zx_status_t status = zx::vmar::root_self()->op_range( + ZX_VMO_OP_DECOMMIT, address_int, size, nullptr, 0); + return status == ZX_OK; } // static diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc index ea6ba0585f3c13..179a17cc0f4ba8 100644 --- a/deps/v8/src/base/platform/platform-posix.cc +++ b/deps/v8/src/base/platform/platform-posix.cc @@ -153,7 +153,7 @@ int GetFlagsForMemoryPermission(OS::MemoryPermission access, flags |= MAP_LAZY; #endif // V8_OS_QNX } -#if V8_OS_MACOSX && V8_HOST_ARCH_ARM64 && defined(MAP_JIT) +#if V8_HAS_PTHREAD_JIT_WRITE_PROTECT if (access == OS::MemoryPermission::kNoAccessWillJitLater) { flags |= MAP_JIT; } diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc index 9fbb2570760650..79c1aa06ce957c 100644 --- a/deps/v8/src/base/platform/platform-win32.cc +++ b/deps/v8/src/base/platform/platform-win32.cc @@ -15,9 +15,15 @@ #endif // MINGW_HAS_SECURE_API #endif // __MINGW32__ -#include +#include -#include "src/base/win32-headers.h" +// This has to come after windows.h. +#include +#include // For SymLoadModule64 and al. +#include // For timeGetTime(). +#include // For Module32First and al. + +#include #include "src/base/bits.h" #include "src/base/lazy-instance.h" @@ -26,13 +32,34 @@ #include "src/base/platform/time.h" #include "src/base/timezone-cache.h" #include "src/base/utils/random-number-generator.h" - -#include +#include "src/base/win32-headers.h" #if defined(_MSC_VER) #include #endif // defined(_MSC_VER) +// Check that type sizes and alignments match. +STATIC_ASSERT(sizeof(V8_CONDITION_VARIABLE) == sizeof(CONDITION_VARIABLE)); +STATIC_ASSERT(alignof(V8_CONDITION_VARIABLE) == alignof(CONDITION_VARIABLE)); +STATIC_ASSERT(sizeof(V8_SRWLOCK) == sizeof(SRWLOCK)); +STATIC_ASSERT(alignof(V8_SRWLOCK) == alignof(SRWLOCK)); +STATIC_ASSERT(sizeof(V8_CRITICAL_SECTION) == sizeof(CRITICAL_SECTION)); +STATIC_ASSERT(alignof(V8_CRITICAL_SECTION) == alignof(CRITICAL_SECTION)); + +// Check that CRITICAL_SECTION offsets match. +STATIC_ASSERT(offsetof(V8_CRITICAL_SECTION, DebugInfo) == + offsetof(CRITICAL_SECTION, DebugInfo)); +STATIC_ASSERT(offsetof(V8_CRITICAL_SECTION, LockCount) == + offsetof(CRITICAL_SECTION, LockCount)); +STATIC_ASSERT(offsetof(V8_CRITICAL_SECTION, RecursionCount) == + offsetof(CRITICAL_SECTION, RecursionCount)); +STATIC_ASSERT(offsetof(V8_CRITICAL_SECTION, OwningThread) == + offsetof(CRITICAL_SECTION, OwningThread)); +STATIC_ASSERT(offsetof(V8_CRITICAL_SECTION, LockSemaphore) == + offsetof(CRITICAL_SECTION, LockSemaphore)); +STATIC_ASSERT(offsetof(V8_CRITICAL_SECTION, SpinCount) == + offsetof(CRITICAL_SECTION, SpinCount)); + // Extra functions for MinGW. Most of these are the _s functions which are in // the Microsoft Visual Studio C++ CRT. #ifdef __MINGW32__ diff --git a/deps/v8/src/base/platform/platform.h b/deps/v8/src/base/platform/platform.h index 7beefbe572c3c8..d196578342f58b 100644 --- a/deps/v8/src/base/platform/platform.h +++ b/deps/v8/src/base/platform/platform.h @@ -40,6 +40,17 @@ #include #endif // V8_USE_ADDRESS_SANITIZER +#ifndef V8_NO_FAST_TLS +#if V8_CC_MSVC && V8_HOST_ARCH_IA32 +// __readfsdword is supposed to be declared in intrin.h but it is missing from +// some versions of that file. See https://bugs.llvm.org/show_bug.cgi?id=51188 +// And, intrin.h is a very expensive header that we want to avoid here, and +// the cheaper intrin0.h is not available for all build configurations. That is +// why we declare this intrinsic. +extern "C" unsigned long __readfsdword(unsigned long); // NOLINT(runtime/int) +#endif // V8_CC_MSVC && V8_HOST_ARCH_IA32 +#endif // V8_NO_FAST_TLS + namespace v8 { namespace base { diff --git a/deps/v8/src/base/platform/semaphore.cc b/deps/v8/src/base/platform/semaphore.cc index 0cd04634ba14ad..2fc748da87b6bf 100644 --- a/deps/v8/src/base/platform/semaphore.cc +++ b/deps/v8/src/base/platform/semaphore.cc @@ -6,6 +6,8 @@ #if V8_OS_MACOSX #include +#elif V8_OS_WIN +#include #endif #include diff --git a/deps/v8/src/base/platform/time.cc b/deps/v8/src/base/platform/time.cc index c399c52cb2d90d..9979f33fcecdd1 100644 --- a/deps/v8/src/base/platform/time.cc +++ b/deps/v8/src/base/platform/time.cc @@ -19,6 +19,11 @@ #include #if V8_OS_WIN +#include + +// This has to come after windows.h. +#include // For timeGetTime(). + #include "src/base/lazy-instance.h" #include "src/base/win32-headers.h" #endif @@ -69,19 +74,22 @@ int64_t ComputeThreadTicks() { V8_INLINE int64_t ClockNow(clockid_t clk_id) { #if (defined(_POSIX_MONOTONIC_CLOCK) && _POSIX_MONOTONIC_CLOCK >= 0) || \ defined(V8_OS_BSD) || defined(V8_OS_ANDROID) -// On AIX clock_gettime for CLOCK_THREAD_CPUTIME_ID outputs time with -// resolution of 10ms. thread_cputime API provides the time in ns #if defined(V8_OS_AIX) - thread_cputime_t tc; + // On AIX clock_gettime for CLOCK_THREAD_CPUTIME_ID outputs time with + // resolution of 10ms. thread_cputime API provides the time in ns. if (clk_id == CLOCK_THREAD_CPUTIME_ID) { #if defined(__PASE__) // CLOCK_THREAD_CPUTIME_ID clock not supported on IBMi return 0; -#endif +#else + thread_cputime_t tc; if (thread_cputime(-1, &tc) != 0) { UNREACHABLE(); } + return (tc.stime / v8::base::Time::kNanosecondsPerMicrosecond) + + (tc.utime / v8::base::Time::kNanosecondsPerMicrosecond); +#endif // defined(__PASE__) } -#endif +#endif // defined(V8_OS_AIX) struct timespec ts; if (clock_gettime(clk_id, &ts) != 0) { UNREACHABLE(); @@ -94,15 +102,7 @@ V8_INLINE int64_t ClockNow(clockid_t clk_id) { 1; CHECK_GT(kSecondsLimit, ts.tv_sec); int64_t result = int64_t{ts.tv_sec} * v8::base::Time::kMicrosecondsPerSecond; -#if defined(V8_OS_AIX) - if (clk_id == CLOCK_THREAD_CPUTIME_ID) { - result += (tc.stime / v8::base::Time::kNanosecondsPerMicrosecond); - } else { - result += (ts.tv_nsec / v8::base::Time::kNanosecondsPerMicrosecond); - } -#else result += (ts.tv_nsec / v8::base::Time::kNanosecondsPerMicrosecond); -#endif return result; #else // Monotonic clock not supported. return 0; diff --git a/deps/v8/src/base/sys-info.cc b/deps/v8/src/base/sys-info.cc index 01035185e020ed..6f69e2aa9c5e4f 100644 --- a/deps/v8/src/base/sys-info.cc +++ b/deps/v8/src/base/sys-info.cc @@ -23,6 +23,8 @@ #include "src/base/logging.h" #include "src/base/macros.h" #if V8_OS_WIN +#include + #include "src/base/win32-headers.h" #endif diff --git a/deps/v8/src/base/win32-headers.h b/deps/v8/src/base/win32-headers.h index 82555463c09087..e4e845d86d0c12 100644 --- a/deps/v8/src/base/win32-headers.h +++ b/deps/v8/src/base/win32-headers.h @@ -5,6 +5,12 @@ #ifndef V8_BASE_WIN32_HEADERS_H_ #define V8_BASE_WIN32_HEADERS_H_ +// This file contains defines and typedefs that allow popular Windows types to +// be used without the overhead of including windows.h. +// This file no longer includes windows.h but it still sets the defines that +// tell windows.h to omit some includes so that the V8 source files that do +// include windows.h will still get the minimal version. + #ifndef WIN32_LEAN_AND_MEAN // WIN32_LEAN_AND_MEAN implies NOCRYPT and NOGDI. #define WIN32_LEAN_AND_MEAN @@ -33,9 +39,6 @@ #define _WIN32_WINNT 0x0600 #endif -#include - -#include // For timeGetTime(). #include // For raise(). #include // For LocalOffset() implementation. #ifdef __MINGW32__ @@ -45,40 +48,81 @@ #define _WIN32_WINNT 0x501 #endif // __MINGW32__ #if !defined(__MINGW32__) || defined(__MINGW64_VERSION_MAJOR) -#include // For SymLoadModule64 and al. #include // For STRUNCATE -#include // For IsWindows8OrGreater(). #endif // !defined(__MINGW32__) || defined(__MINGW64_VERSION_MAJOR) #include // For INT_MAX and al. -#include // For Module32First and al. - -// These additional WIN32 includes have to be right here as the #undef's below -// makes it impossible to have them elsewhere. -#include -#include -#ifndef __MINGW32__ -#include -#endif // __MINGW32__ #include // For _beginthreadex(). #include -#undef VOID -#undef DELETE -#undef IN -#undef THIS -#undef CONST -#undef NAN -#undef UNKNOWN -#undef NONE -#undef ANY -#undef IGNORE -#undef STRICT -#undef GetObject -#undef CreateSemaphore -#undef Yield -#undef RotateRight32 -#undef RotateLeft32 -#undef RotateRight64 -#undef RotateLeft64 +// typedef and define the most commonly used Windows integer types. + +typedef int BOOL; // NOLINT(runtime/int) +typedef unsigned long DWORD; // NOLINT(runtime/int) +typedef long LONG; // NOLINT(runtime/int) +typedef void* LPVOID; +typedef void* PVOID; +typedef void* HANDLE; + +#define WINAPI __stdcall + +#if defined(_WIN64) +typedef unsigned __int64 ULONG_PTR, *PULONG_PTR; +#else +typedef __w64 unsigned long ULONG_PTR, *PULONG_PTR; // NOLINT(runtime/int) +#endif + +typedef struct _RTL_SRWLOCK SRWLOCK; +typedef struct _RTL_CONDITION_VARIABLE CONDITION_VARIABLE; +typedef struct _RTL_CRITICAL_SECTION CRITICAL_SECTION; +typedef struct _RTL_CRITICAL_SECTION_DEBUG* PRTL_CRITICAL_SECTION_DEBUG; + +// Declare V8 versions of some Windows structures. These are needed for +// when we need a concrete type but don't want to pull in Windows.h. We can't +// declare the Windows types so we declare our types and cast to the Windows +// types in a few places. The sizes must match the Windows types so we verify +// that with static asserts in platform-win32.cc. +// ChromeToWindowsType functions are provided for pointer conversions. + +struct V8_SRWLOCK { + PVOID Ptr; +}; + +struct V8_CONDITION_VARIABLE { + PVOID Ptr; +}; + +struct V8_CRITICAL_SECTION { + PRTL_CRITICAL_SECTION_DEBUG DebugInfo; + LONG LockCount; + LONG RecursionCount; + HANDLE OwningThread; + HANDLE LockSemaphore; + ULONG_PTR SpinCount; +}; + +inline SRWLOCK* V8ToWindowsType(V8_SRWLOCK* p) { + return reinterpret_cast(p); +} + +inline const SRWLOCK* V8ToWindowsType(const V8_SRWLOCK* p) { + return reinterpret_cast(p); +} + +inline CONDITION_VARIABLE* V8ToWindowsType(V8_CONDITION_VARIABLE* p) { + return reinterpret_cast(p); +} + +inline const CONDITION_VARIABLE* V8ToWindowsType( + const V8_CONDITION_VARIABLE* p) { + return reinterpret_cast(p); +} + +inline CRITICAL_SECTION* V8ToWindowsType(V8_CRITICAL_SECTION* p) { + return reinterpret_cast(p); +} + +inline const CRITICAL_SECTION* V8ToWindowsType(const V8_CRITICAL_SECTION* p) { + return reinterpret_cast(p); +} #endif // V8_BASE_WIN32_HEADERS_H_ diff --git a/deps/v8/src/baseline/arm/baseline-compiler-arm-inl.h b/deps/v8/src/baseline/arm/baseline-compiler-arm-inl.h index 0fc2389c35c2cb..86a62b658b92c3 100644 --- a/deps/v8/src/baseline/arm/baseline-compiler-arm-inl.h +++ b/deps/v8/src/baseline/arm/baseline-compiler-arm-inl.h @@ -18,7 +18,8 @@ void BaselineCompiler::Prologue() { // Enter the frame here, since CallBuiltin will override lr. __ masm()->EnterFrame(StackFrame::BASELINE); DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister); - int max_frame_size = bytecode_->frame_size() + max_call_args_; + int max_frame_size = + bytecode_->frame_size() + max_call_args_ * kSystemPointerSize; CallBuiltin( kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister, max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_); diff --git a/deps/v8/src/baseline/arm64/baseline-compiler-arm64-inl.h b/deps/v8/src/baseline/arm64/baseline-compiler-arm64-inl.h index 333c0a2a342dbb..59cffa47b310ea 100644 --- a/deps/v8/src/baseline/arm64/baseline-compiler-arm64-inl.h +++ b/deps/v8/src/baseline/arm64/baseline-compiler-arm64-inl.h @@ -18,7 +18,8 @@ void BaselineCompiler::Prologue() { // Enter the frame here, since CallBuiltin will override lr. __ masm()->EnterFrame(StackFrame::BASELINE); DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister); - int max_frame_size = bytecode_->frame_size() + max_call_args_; + int max_frame_size = + bytecode_->frame_size() + max_call_args_ * kSystemPointerSize; CallBuiltin( kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister, max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_); diff --git a/deps/v8/src/baseline/baseline-assembler-inl.h b/deps/v8/src/baseline/baseline-assembler-inl.h index 416fdbf98b80b7..83c102176f8b39 100644 --- a/deps/v8/src/baseline/baseline-assembler-inl.h +++ b/deps/v8/src/baseline/baseline-assembler-inl.h @@ -5,16 +5,15 @@ #ifndef V8_BASELINE_BASELINE_ASSEMBLER_INL_H_ #define V8_BASELINE_BASELINE_ASSEMBLER_INL_H_ +#include "src/baseline/baseline-assembler.h" + // TODO(v8:11421): Remove #if once baseline compiler is ported to other // architectures. -#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \ - V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || \ - V8_TARGET_ARCH_MIPS +#if ENABLE_SPARKPLUG #include #include -#include "src/baseline/baseline-assembler.h" #include "src/codegen/interface-descriptors-inl.h" #include "src/interpreter/bytecode-register.h" #include "src/objects/feedback-cell.h" @@ -142,6 +141,6 @@ SaveAccumulatorScope::~SaveAccumulatorScope() { } // namespace internal } // namespace v8 -#endif +#endif // ENABLE_SPARKPLUG #endif // V8_BASELINE_BASELINE_ASSEMBLER_INL_H_ diff --git a/deps/v8/src/baseline/baseline-assembler.h b/deps/v8/src/baseline/baseline-assembler.h index 8cafa8d94aaf14..e1063ff2b26564 100644 --- a/deps/v8/src/baseline/baseline-assembler.h +++ b/deps/v8/src/baseline/baseline-assembler.h @@ -7,9 +7,8 @@ // TODO(v8:11421): Remove #if once baseline compiler is ported to other // architectures. -#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \ - V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || \ - V8_TARGET_ARCH_MIPS +#include "src/flags/flags.h" +#if ENABLE_SPARKPLUG #include "src/codegen/macro-assembler.h" #include "src/objects/tagged-index.h" diff --git a/deps/v8/src/baseline/baseline-compiler.cc b/deps/v8/src/baseline/baseline-compiler.cc index 83286c0aa71af8..f30812c85a278b 100644 --- a/deps/v8/src/baseline/baseline-compiler.cc +++ b/deps/v8/src/baseline/baseline-compiler.cc @@ -4,14 +4,13 @@ // TODO(v8:11421): Remove #if once baseline compiler is ported to other // architectures. -#include "src/base/bits.h" -#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \ - V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || \ - V8_TARGET_ARCH_MIPS +#include "src/flags/flags.h" +#if ENABLE_SPARKPLUG #include #include +#include "src/base/bits.h" #include "src/baseline/baseline-assembler-inl.h" #include "src/baseline/baseline-assembler.h" #include "src/baseline/baseline-compiler.h" @@ -242,8 +241,10 @@ namespace { // than pre-allocating a large enough buffer. #ifdef V8_TARGET_ARCH_IA32 const int kAverageBytecodeToInstructionRatio = 5; +const int kMinimumEstimatedInstructionSize = 200; #else const int kAverageBytecodeToInstructionRatio = 7; +const int kMinimumEstimatedInstructionSize = 300; #endif std::unique_ptr AllocateBuffer( Isolate* isolate, Handle bytecodes, @@ -259,9 +260,6 @@ std::unique_ptr AllocateBuffer( if (code_location == BaselineCompiler::kOnHeap && Code::SizeFor(estimated_size) < heap->MaxRegularHeapObjectSize(AllocationType::kCode)) { - // TODO(victorgomes): We're currently underestimating the size of the - // buffer, since we don't know how big the reloc info will be. We could - // use a separate zone vector for the RelocInfo. return NewOnHeapAssemblerBuffer(isolate, estimated_size); } return NewAssemblerBuffer(RoundUp(estimated_size, 4 * KB)); @@ -271,7 +269,7 @@ std::unique_ptr AllocateBuffer( BaselineCompiler::BaselineCompiler( Isolate* isolate, Handle shared_function_info, Handle bytecode, CodeLocation code_location) - : isolate_(isolate), + : local_isolate_(isolate->AsLocalIsolate()), stats_(isolate->counters()->runtime_call_stats()), shared_function_info_(shared_function_info), bytecode_(bytecode), @@ -329,7 +327,8 @@ MaybeHandle BaselineCompiler::Build(Isolate* isolate) { } int BaselineCompiler::EstimateInstructionSize(BytecodeArray bytecode) { - return bytecode.length() * kAverageBytecodeToInstructionRatio; + return bytecode.length() * kAverageBytecodeToInstructionRatio + + kMinimumEstimatedInstructionSize; } interpreter::Register BaselineCompiler::RegisterOperand(int operand_index) { @@ -354,7 +353,7 @@ void BaselineCompiler::StoreRegisterPair(int operand_index, Register val0, template Handle BaselineCompiler::Constant(int operand_index) { return Handle::cast( - iterator().GetConstantForIndexOperand(operand_index, isolate_)); + iterator().GetConstantForIndexOperand(operand_index, local_isolate_)); } Smi BaselineCompiler::ConstantSmi(int operand_index) { return iterator().GetConstantAtIndexAsSmi(operand_index); @@ -559,7 +558,7 @@ void BaselineCompiler::UpdateInterruptBudgetAndJumpToLabel( if (weight < 0) { SaveAccumulatorScope accumulator_scope(&basm_); - CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, + CallRuntime(Runtime::kBytecodeBudgetInterruptWithStackCheckFromBytecode, __ FunctionOperand()); } } @@ -1871,7 +1870,7 @@ void BaselineCompiler::VisitJumpLoop() { Register osr_level = scratch; __ LoadRegister(osr_level, interpreter::Register::bytecode_array()); __ LoadByteField(osr_level, osr_level, - BytecodeArray::kOsrNestingLevelOffset); + BytecodeArray::kOsrLoopNestingLevelOffset); int loop_depth = iterator().GetImmediateOperand(1); __ JumpIfByte(Condition::kUnsignedLessThanEqual, osr_level, loop_depth, &osr_not_armed); @@ -2057,7 +2056,7 @@ void BaselineCompiler::VisitSetPendingMessage() { BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_); Register pending_message = scratch_scope.AcquireScratch(); __ Move(pending_message, - ExternalReference::address_of_pending_message_obj(isolate_)); + ExternalReference::address_of_pending_message(local_isolate_)); Register tmp = scratch_scope.AcquireScratch(); __ Move(tmp, kInterpreterAccumulatorRegister); __ Move(kInterpreterAccumulatorRegister, MemOperand(pending_message, 0)); @@ -2252,4 +2251,4 @@ DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK) } // namespace internal } // namespace v8 -#endif +#endif // ENABLE_SPARKPLUG diff --git a/deps/v8/src/baseline/baseline-compiler.h b/deps/v8/src/baseline/baseline-compiler.h index 68478804ea71dc..d8cd9ac5c68362 100644 --- a/deps/v8/src/baseline/baseline-compiler.h +++ b/deps/v8/src/baseline/baseline-compiler.h @@ -7,9 +7,8 @@ // TODO(v8:11421): Remove #if once baseline compiler is ported to other // architectures. -#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \ - V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || \ - V8_TARGET_ARCH_MIPS +#include "src/flags/flags.h" +#if ENABLE_SPARKPLUG #include "src/base/logging.h" #include "src/base/threaded-list.h" @@ -160,7 +159,7 @@ class BaselineCompiler { const interpreter::BytecodeArrayIterator& iterator() { return iterator_; } - Isolate* isolate_; + LocalIsolate* local_isolate_; RuntimeCallStats* stats_; Handle shared_function_info_; Handle bytecode_; @@ -197,6 +196,6 @@ class BaselineCompiler { } // namespace internal } // namespace v8 -#endif +#endif // ENABLE_SPARKPLUG #endif // V8_BASELINE_BASELINE_COMPILER_H_ diff --git a/deps/v8/src/baseline/baseline.cc b/deps/v8/src/baseline/baseline.cc index be6e4ec3b32d6a..cec0805aece31a 100644 --- a/deps/v8/src/baseline/baseline.cc +++ b/deps/v8/src/baseline/baseline.cc @@ -9,9 +9,8 @@ // TODO(v8:11421): Remove #if once baseline compiler is ported to other // architectures. -#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \ - V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || \ - V8_TARGET_ARCH_MIPS +#include "src/flags/flags.h" +#if ENABLE_SPARKPLUG #include "src/baseline/baseline-assembler-inl.h" #include "src/baseline/baseline-compiler.h" diff --git a/deps/v8/src/baseline/ia32/baseline-compiler-ia32-inl.h b/deps/v8/src/baseline/ia32/baseline-compiler-ia32-inl.h index 6ce19ec60d48be..f68d2c21fb5ffe 100644 --- a/deps/v8/src/baseline/ia32/baseline-compiler-ia32-inl.h +++ b/deps/v8/src/baseline/ia32/baseline-compiler-ia32-inl.h @@ -17,7 +17,8 @@ namespace baseline { void BaselineCompiler::Prologue() { DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister); - int max_frame_size = bytecode_->frame_size() + max_call_args_; + int max_frame_size = + bytecode_->frame_size() + max_call_args_ * kSystemPointerSize; CallBuiltin( kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister, max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_); diff --git a/deps/v8/src/baseline/mips/baseline-assembler-mips-inl.h b/deps/v8/src/baseline/mips/baseline-assembler-mips-inl.h index 2e41e554dac02b..31bc96861b9b46 100644 --- a/deps/v8/src/baseline/mips/baseline-assembler-mips-inl.h +++ b/deps/v8/src/baseline/mips/baseline-assembler-mips-inl.h @@ -22,7 +22,7 @@ class BaselineAssembler::ScratchRegisterScope { if (!assembler_->scratch_register_scope_) { // If we haven't opened a scratch scope yet, for the first one add a // couple of extra registers. - wrapped_scope_.Include(t0.bit() | t1.bit() | t2.bit() | t3.bit()); + wrapped_scope_.Include(t4.bit() | t5.bit() | t6.bit() | t7.bit()); } assembler_->scratch_register_scope_ = this; } diff --git a/deps/v8/src/baseline/mips/baseline-compiler-mips-inl.h b/deps/v8/src/baseline/mips/baseline-compiler-mips-inl.h index 6897d9b48cbc7c..3e8bb98e14c196 100644 --- a/deps/v8/src/baseline/mips/baseline-compiler-mips-inl.h +++ b/deps/v8/src/baseline/mips/baseline-compiler-mips-inl.h @@ -18,7 +18,8 @@ void BaselineCompiler::Prologue() { ASM_CODE_COMMENT(&masm_); __ masm()->EnterFrame(StackFrame::BASELINE); DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister); - int max_frame_size = bytecode_->frame_size() + max_call_args_; + int max_frame_size = + bytecode_->frame_size() + max_call_args_ * kSystemPointerSize; CallBuiltin( kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister, max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_); diff --git a/deps/v8/src/baseline/mips64/baseline-compiler-mips64-inl.h b/deps/v8/src/baseline/mips64/baseline-compiler-mips64-inl.h index 5971e17aa420e7..f919635674a524 100644 --- a/deps/v8/src/baseline/mips64/baseline-compiler-mips64-inl.h +++ b/deps/v8/src/baseline/mips64/baseline-compiler-mips64-inl.h @@ -18,7 +18,8 @@ void BaselineCompiler::Prologue() { ASM_CODE_COMMENT(&masm_); __ masm()->EnterFrame(StackFrame::BASELINE); DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister); - int max_frame_size = bytecode_->frame_size() + max_call_args_; + int max_frame_size = + bytecode_->frame_size() + max_call_args_ * kSystemPointerSize; CallBuiltin( kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister, max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_); diff --git a/deps/v8/src/baseline/riscv64/baseline-compiler-riscv64-inl.h b/deps/v8/src/baseline/riscv64/baseline-compiler-riscv64-inl.h index deb5aba0efb3ea..fc73105b8e9ad6 100644 --- a/deps/v8/src/baseline/riscv64/baseline-compiler-riscv64-inl.h +++ b/deps/v8/src/baseline/riscv64/baseline-compiler-riscv64-inl.h @@ -18,7 +18,8 @@ void BaselineCompiler::Prologue() { // Enter the frame here, since CallBuiltin will override lr. __ masm()->EnterFrame(StackFrame::BASELINE); DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister); - int max_frame_size = bytecode_->frame_size() + max_call_args_; + int max_frame_size = + bytecode_->frame_size() + max_call_args_ * kSystemPointerSize; CallBuiltin( kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister, max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_); diff --git a/deps/v8/src/baseline/x64/baseline-compiler-x64-inl.h b/deps/v8/src/baseline/x64/baseline-compiler-x64-inl.h index cc7956fadc78da..b4742f39559957 100644 --- a/deps/v8/src/baseline/x64/baseline-compiler-x64-inl.h +++ b/deps/v8/src/baseline/x64/baseline-compiler-x64-inl.h @@ -18,7 +18,8 @@ namespace baseline { void BaselineCompiler::Prologue() { ASM_CODE_COMMENT(&masm_); DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister); - int max_frame_size = bytecode_->frame_size() + max_call_args_; + int max_frame_size = + bytecode_->frame_size() + max_call_args_ * kSystemPointerSize; CallBuiltin( kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister, max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_); @@ -33,7 +34,8 @@ void BaselineCompiler::PrologueFillFrame() { bytecode_->incoming_new_target_or_generator_register(); if (FLAG_debug_code) { __ masm()->Cmp(kInterpreterAccumulatorRegister, - isolate_->factory()->undefined_value()); + handle(ReadOnlyRoots(local_isolate_).undefined_value(), + local_isolate_)); __ masm()->Assert(equal, AbortReason::kUnexpectedValue); } int register_count = bytecode_->register_count(); diff --git a/deps/v8/src/bigint/bigint-internal.cc b/deps/v8/src/bigint/bigint-internal.cc index 828a450e8aaec2..2d74f3572cc6fd 100644 --- a/deps/v8/src/bigint/bigint-internal.cc +++ b/deps/v8/src/bigint/bigint-internal.cc @@ -7,6 +7,15 @@ namespace v8 { namespace bigint { +// Used for checking consistency between library and public header. +#if DEBUG +#if V8_ADVANCED_BIGINT_ALGORITHMS +bool kAdvancedAlgorithmsEnabledInLibrary = true; +#else +bool kAdvancedAlgorithmsEnabledInLibrary = false; +#endif // V8_ADVANCED_BIGINT_ALGORITHMS +#endif // DEBUG + ProcessorImpl::ProcessorImpl(Platform* platform) : platform_(platform) {} ProcessorImpl::~ProcessorImpl() { delete platform_; } @@ -58,7 +67,16 @@ void ProcessorImpl::Divide(RWDigits Q, Digits A, Digits B) { if (B.len() < kBurnikelThreshold) { return DivideSchoolbook(Q, RWDigits(nullptr, 0), A, B); } +#if !V8_ADVANCED_BIGINT_ALGORITHMS return DivideBurnikelZiegler(Q, RWDigits(nullptr, 0), A, B); +#else + if (B.len() < kBarrettThreshold || A.len() == B.len()) { + DivideBurnikelZiegler(Q, RWDigits(nullptr, 0), A, B); + } else { + ScratchDigits R(B.len()); + DivideBarrett(Q, R, A, B); + } +#endif } void ProcessorImpl::Modulo(RWDigits R, Digits A, Digits B) { @@ -84,7 +102,15 @@ void ProcessorImpl::Modulo(RWDigits R, Digits A, Digits B) { } int q_len = DivideResultLength(A, B); ScratchDigits Q(q_len); +#if !V8_ADVANCED_BIGINT_ALGORITHMS return DivideBurnikelZiegler(Q, R, A, B); +#else + if (B.len() < kBarrettThreshold || A.len() == B.len()) { + DivideBurnikelZiegler(Q, R, A, B); + } else { + DivideBarrett(Q, R, A, B); + } +#endif } Status Processor::Multiply(RWDigits Z, Digits X, Digits Y) { diff --git a/deps/v8/src/bigint/bigint-internal.h b/deps/v8/src/bigint/bigint-internal.h index 41ef9526e586bf..4c214153bf36f1 100644 --- a/deps/v8/src/bigint/bigint-internal.h +++ b/deps/v8/src/bigint/bigint-internal.h @@ -18,6 +18,10 @@ constexpr int kFftThreshold = 1500; constexpr int kFftInnerThreshold = 200; constexpr int kBurnikelThreshold = 57; +constexpr int kNewtonInversionThreshold = 50; +// kBarrettThreshold is defined in bigint.h. + +constexpr int kToStringFastThreshold = 43; class ProcessorImpl : public Processor { public: @@ -47,11 +51,24 @@ class ProcessorImpl : public Processor { void Toom3Main(RWDigits Z, Digits X, Digits Y); void MultiplyFFT(RWDigits Z, Digits X, Digits Y); + + void DivideBarrett(RWDigits Q, RWDigits R, Digits A, Digits B); + void DivideBarrett(RWDigits Q, RWDigits R, Digits A, Digits B, Digits I, + RWDigits scratch); + + void Invert(RWDigits Z, Digits V, RWDigits scratch); + void InvertBasecase(RWDigits Z, Digits V, RWDigits scratch); + void InvertNewton(RWDigits Z, Digits V, RWDigits scratch); #endif // V8_ADVANCED_BIGINT_ALGORITHMS // {out_length} initially contains the allocated capacity of {out}, and // upon return will be set to the actual length of the result string. void ToString(char* out, int* out_length, Digits X, int radix, bool sign); + void ToStringImpl(char* out, int* out_length, Digits X, int radix, bool sign, + bool use_fast_algorithm); + + void FromString(RWDigits Z, FromStringAccumulator* accumulator); + void FromStringClassic(RWDigits Z, FromStringAccumulator* accumulator); bool should_terminate() { return status_ == Status::kInterrupted; } @@ -78,6 +95,20 @@ class ProcessorImpl : public Processor { Platform* platform_; }; +// These constants are primarily needed for Barrett division in div-barrett.cc, +// and they're also needed by fast to-string conversion in tostring.cc. +constexpr int DivideBarrettScratchSpace(int n) { return n + 2; } +// Local values S and W need "n plus a few" digits; U needs 2*n "plus a few". +// In all tested cases the "few" were either 2 or 3, so give 5 to be safe. +// S and W are not live at the same time. +constexpr int kInvertNewtonExtraSpace = 5; +constexpr int InvertNewtonScratchSpace(int n) { + return 3 * n + 2 * kInvertNewtonExtraSpace; +} +constexpr int InvertScratchSpace(int n) { + return n < kNewtonInversionThreshold ? 2 * n : InvertNewtonScratchSpace(n); +} + #define CHECK(cond) \ if (!(cond)) { \ std::cerr << __FILE__ << ":" << __LINE__ << ": "; \ diff --git a/deps/v8/src/bigint/bigint.h b/deps/v8/src/bigint/bigint.h index 6d3790808c7fc6..218bf4616cb903 100644 --- a/deps/v8/src/bigint/bigint.h +++ b/deps/v8/src/bigint/bigint.h @@ -10,6 +10,7 @@ #include #include #include +#include namespace v8 { namespace bigint { @@ -23,6 +24,8 @@ namespace bigint { std::cerr << "Assertion failed: " #cond "\n"; \ abort(); \ } + +extern bool kAdvancedAlgorithmsEnabledInLibrary; #else #define BIGINT_H_DCHECK(cond) (void(0)) #endif @@ -233,6 +236,8 @@ bool SubtractSigned(RWDigits Z, Digits X, bool x_negative, Digits Y, enum class Status { kOk, kInterrupted }; +class FromStringAccumulator; + class Processor { public: // Takes ownership of {platform}. @@ -256,6 +261,8 @@ class Processor { // {out_length} initially contains the allocated capacity of {out}, and // upon return will be set to the actual length of the result string. Status ToString(char* out, int* out_length, Digits X, int radix, bool sign); + + Status FromString(RWDigits Z, FromStringAccumulator* accumulator); }; inline int AddResultLength(int x_length, int y_length) { @@ -274,8 +281,19 @@ inline int SubtractSignedResultLength(int x_length, int y_length, inline int MultiplyResultLength(Digits X, Digits Y) { return X.len() + Y.len(); } +constexpr int kBarrettThreshold = 13310; inline int DivideResultLength(Digits A, Digits B) { - return A.len() - B.len() + 1; +#if V8_ADVANCED_BIGINT_ALGORITHMS + BIGINT_H_DCHECK(kAdvancedAlgorithmsEnabledInLibrary); + // The Barrett division algorithm needs one extra digit for temporary use. + int kBarrettExtraScratch = B.len() >= kBarrettThreshold ? 1 : 0; +#else + // If this fails, set -DV8_ADVANCED_BIGINT_ALGORITHMS in any compilation unit + // that #includes this header. + BIGINT_H_DCHECK(!kAdvancedAlgorithmsEnabledInLibrary); + constexpr int kBarrettExtraScratch = 0; +#endif + return A.len() - B.len() + 1 + kBarrettExtraScratch; } inline int ModuloResultLength(Digits B) { return B.len(); } @@ -283,9 +301,207 @@ int ToStringResultLength(Digits X, int radix, bool sign); // In DEBUG builds, the result of {ToString} will be initialized to this value. constexpr char kStringZapValue = '?'; +// Support for parsing BigInts from Strings, using an Accumulator object +// for intermediate state. + +class ProcessorImpl; + +#if !defined(DEBUG) && (defined(__GNUC__) || defined(__clang__)) +// Clang supports this since 3.9, GCC since 4.x. +#define ALWAYS_INLINE inline __attribute__((always_inline)) +#elif !defined(DEBUG) && defined(_MSC_VER) +#define ALWAYS_INLINE __forceinline +#else +#define ALWAYS_INLINE inline +#endif + +static constexpr int kStackParts = 8; + +// A container object for all metadata required for parsing a BigInt from +// a string. +// Aggressively optimized not to waste instructions for small cases, while +// also scaling transparently to huge cases. +// Defined here in the header so that it can be inlined. +class FromStringAccumulator { + public: + enum class Result { kOk, kMaxSizeExceeded }; + + // Step 1: Create a FromStringAccumulator instance. For best performance, + // stack allocation is recommended. + // {max_digits} is only used for refusing to grow beyond a given size + // (see "Step 2" below). It does not cause pre-allocation, so feel free to + // specify a large maximum. + // TODO(jkummerow): The limit applies to the number of intermediate chunks, + // whereas the final result will be slightly smaller (depending on {radix}). + // So for sufficiently large N, setting max_digits=N here will not actually + // allow parsing BigInts with N digits. We can fix that if/when anyone cares. + explicit FromStringAccumulator(int max_digits) + : max_digits_(std::max(max_digits - kStackParts, kStackParts)) {} + + // Step 2: Call this method to read all characters. + // {Char} should be a character type, such as uint8_t or uint16_t. + // {end} should be one past the last character (i.e. {start == end} would + // indicate an empty string). + // Returns the current position when an invalid character is encountered. + template + ALWAYS_INLINE const Char* Parse(const Char* start, const Char* end, + digit_t radix); + + // Step 3: Check if a result is available, and determine its required + // allocation size. + Result result() { return result_; } + int ResultLength() { + return std::max(stack_parts_used_, static_cast(heap_parts_.size())); + } + + // Step 4: Use BigIntProcessor::FromString() to retrieve the result into an + // {RWDigits} struct allocated for the size returned by step 2. + + private: + friend class ProcessorImpl; + + ALWAYS_INLINE bool AddPart(digit_t multiplier, digit_t part, + bool is_last = false); + + digit_t stack_parts_[kStackParts]; + std::vector heap_parts_; + digit_t max_multiplier_{0}; + digit_t last_multiplier_; + const int max_digits_; + Result result_{Result::kOk}; + int stack_parts_used_{0}; + bool inline_everything_{false}; +}; + +// The rest of this file is the inlineable implementation of +// FromStringAccumulator methods. + +#if defined(__GNUC__) || defined(__clang__) +// Clang supports this since 3.9, GCC since 5.x. +#define HAVE_BUILTIN_MUL_OVERFLOW 1 +#else +#define HAVE_BUILTIN_MUL_OVERFLOW 0 +#endif + +// Numerical value of the first 127 ASCII characters, using 255 as sentinel +// for "invalid". +static constexpr uint8_t kCharValue[] = { + 255, 255, 255, 255, 255, 255, 255, 255, // 0..7 + 255, 255, 255, 255, 255, 255, 255, 255, // 8..15 + 255, 255, 255, 255, 255, 255, 255, 255, // 16..23 + 255, 255, 255, 255, 255, 255, 255, 255, // 24..31 + 255, 255, 255, 255, 255, 255, 255, 255, // 32..39 + 255, 255, 255, 255, 255, 255, 255, 255, // 40..47 + 0, 1, 2, 3, 4, 5, 6, 7, // 48..55 '0' == 48 + 8, 9, 255, 255, 255, 255, 255, 255, // 56..63 '9' == 57 + 255, 10, 11, 12, 13, 14, 15, 16, // 64..71 'A' == 65 + 17, 18, 19, 20, 21, 22, 23, 24, // 72..79 + 25, 26, 27, 28, 29, 30, 31, 32, // 80..87 + 33, 34, 35, 255, 255, 255, 255, 255, // 88..95 'Z' == 90 + 255, 10, 11, 12, 13, 14, 15, 16, // 96..103 'a' == 97 + 17, 18, 19, 20, 21, 22, 23, 24, // 104..111 + 25, 26, 27, 28, 29, 30, 31, 32, // 112..119 + 33, 34, 35, 255, 255, 255, 255, 255, // 120..127 'z' == 122 +}; +template +const Char* FromStringAccumulator::Parse(const Char* start, const Char* end, + digit_t radix) { + BIGINT_H_DCHECK(2 <= radix && radix <= 36); + const Char* current = start; +#if !HAVE_BUILTIN_MUL_OVERFLOW + const digit_t kMaxMultiplier = (~digit_t{0}) / radix; +#endif +#if HAVE_TWODIGIT_T // The inlined path requires twodigit_t availability. + // The max supported radix is 36, and Math.log2(36) == 5.169..., so we + // need at most 5.17 bits per char. + static constexpr int kInlineThreshold = kStackParts * kDigitBits * 100 / 517; + inline_everything_ = (end - start) <= kInlineThreshold; +#endif + bool done = false; + do { + digit_t multiplier = 1; + digit_t part = 0; + while (true) { + digit_t d; + uint32_t c = *current; + if (c > 127 || (d = bigint::kCharValue[c]) >= radix) { + done = true; + break; + } + +#if HAVE_BUILTIN_MUL_OVERFLOW + digit_t new_multiplier; + if (__builtin_mul_overflow(multiplier, radix, &new_multiplier)) break; + multiplier = new_multiplier; +#else + if (multiplier > kMaxMultiplier) break; + multiplier *= radix; +#endif + part = part * radix + d; + + ++current; + if (current == end) { + done = true; + break; + } + } + if (!AddPart(multiplier, part, done)) return current; + } while (!done); + return current; +} + +bool FromStringAccumulator::AddPart(digit_t multiplier, digit_t part, + bool is_last) { +#if HAVE_TWODIGIT_T + if (inline_everything_) { + // Inlined version of {MultiplySingle}. + digit_t carry = part; + digit_t high = 0; + for (int i = 0; i < stack_parts_used_; i++) { + twodigit_t result = twodigit_t{stack_parts_[i]} * multiplier; + digit_t new_high = result >> bigint::kDigitBits; + digit_t low = static_cast(result); + result = twodigit_t{low} + high + carry; + carry = result >> bigint::kDigitBits; + stack_parts_[i] = static_cast(result); + high = new_high; + } + stack_parts_[stack_parts_used_++] = carry + high; + return true; + } +#else + BIGINT_H_DCHECK(!inline_everything_); +#endif + if (is_last) { + last_multiplier_ = multiplier; + } else { + BIGINT_H_DCHECK(max_multiplier_ == 0 || max_multiplier_ == multiplier); + max_multiplier_ = multiplier; + } + if (stack_parts_used_ < kStackParts) { + stack_parts_[stack_parts_used_++] = part; + return true; + } + if (heap_parts_.size() == 0) { + // Initialize heap storage. Copy the stack part to make things easier later. + heap_parts_.reserve(kStackParts * 2); + for (int i = 0; i < kStackParts; i++) { + heap_parts_.push_back(stack_parts_[i]); + } + } + if (static_cast(heap_parts_.size()) >= max_digits_ && !is_last) { + result_ = Result::kMaxSizeExceeded; + return false; + } + heap_parts_.push_back(part); + return true; +} + } // namespace bigint } // namespace v8 #undef BIGINT_H_DCHECK +#undef ALWAYS_INLINE +#undef HAVE_BUILTIN_MUL_OVERFLOW #endif // V8_BIGINT_BIGINT_H_ diff --git a/deps/v8/src/bigint/div-barrett.cc b/deps/v8/src/bigint/div-barrett.cc new file mode 100644 index 00000000000000..39f09d0ac15804 --- /dev/null +++ b/deps/v8/src/bigint/div-barrett.cc @@ -0,0 +1,366 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Barrett division, finding the inverse with Newton's method. +// Reference: "Fast Division of Large Integers" by Karl Hasselström, +// found at https://treskal.com/s/masters-thesis.pdf + +// Many thanks to Karl Wiberg, k@w5.se, for both writing up an +// understandable theoretical description of the algorithm and privately +// providing a demo implementation, on which the implementation in this file is +// based. + +#include + +#include "src/bigint/bigint-internal.h" +#include "src/bigint/digit-arithmetic.h" +#include "src/bigint/div-helpers.h" +#include "src/bigint/vector-arithmetic.h" + +namespace v8 { +namespace bigint { + +namespace { + +void DcheckIntegerPartRange(Digits X, digit_t min, digit_t max) { +#if DEBUG + digit_t integer_part = X.msd(); + DCHECK(integer_part >= min); + DCHECK(integer_part <= max); +#else + USE(X); + USE(min); + USE(max); +#endif +} + +} // namespace + +// Z := (the fractional part of) 1/V, via naive division. +// See comments at {Invert} and {InvertNewton} below for details. +void ProcessorImpl::InvertBasecase(RWDigits Z, Digits V, RWDigits scratch) { + DCHECK(Z.len() > V.len()); + DCHECK(V.len() > 0); // NOLINT(readability/check) + DCHECK(scratch.len() >= 2 * V.len()); + int n = V.len(); + RWDigits X(scratch, 0, 2 * n); + digit_t borrow = 0; + int i = 0; + for (; i < n; i++) X[i] = 0; + for (; i < 2 * n; i++) X[i] = digit_sub2(0, V[i - n], borrow, &borrow); + DCHECK(borrow == 1); // NOLINT(readability/check) + RWDigits R(nullptr, 0); // We don't need the remainder. + if (n < kBurnikelThreshold) { + DivideSchoolbook(Z, R, X, V); + } else { + DivideBurnikelZiegler(Z, R, X, V); + } +} + +// This is Algorithm 4.2 from the paper. +// Computes the inverse of V, shifted by kDigitBits * 2 * V.len, accurate to +// V.len+1 digits. The V.len low digits of the result digits will be written +// to Z, plus there is an implicit top digit with value 1. +// Needs InvertNewtonScratchSpace(V.len) of scratch space. +// The result is either correct or off by one (about half the time it is +// correct, half the time it is one too much, and in the corner case where V is +// minimal and the implicit top digit would have to be 2 it is one too little). +// Barrett's division algorithm can handle that, so we don't care. +void ProcessorImpl::InvertNewton(RWDigits Z, Digits V, RWDigits scratch) { + const int vn = V.len(); + DCHECK(Z.len() >= vn); + DCHECK(scratch.len() >= InvertNewtonScratchSpace(vn)); + const int kSOffset = 0; + const int kWOffset = 0; // S and W can share their scratch space. + const int kUOffset = vn + kInvertNewtonExtraSpace; + + // The base case won't work otherwise. + DCHECK(V.len() >= 3); // NOLINT(readability/check) + + constexpr int kBasecasePrecision = kNewtonInversionThreshold - 1; + // V must have more digits than the basecase. + DCHECK(V.len() > kBasecasePrecision); + DCHECK(IsBitNormalized(V)); + + // Step (1): Setup. + // Calculate precision required at each step. + // {k} is the number of fraction bits for the current iteration. + int k = vn * kDigitBits; + int target_fraction_bits[8 * sizeof(vn)]; // "k_i" in the paper. + int iteration = -1; // "i" in the paper, except inverted to run downwards. + while (k > kBasecasePrecision * kDigitBits) { + iteration++; + target_fraction_bits[iteration] = k; + k = DIV_CEIL(k, 2); + } + // At this point, k <= kBasecasePrecision*kDigitBits is the number of + // fraction bits to use in the base case. {iteration} is the highest index + // in use for f[]. + + // Step (2): Initial approximation. + int initial_digits = DIV_CEIL(k + 1, kDigitBits); + Digits top_part_of_v(V, vn - initial_digits, initial_digits); + InvertBasecase(Z, top_part_of_v, scratch); + Z[initial_digits] = Z[initial_digits] + 1; // Implicit top digit. + // From now on, we'll keep Z.len updated to the part that's already computed. + Z.set_len(initial_digits + 1); + + // Step (3): Precision doubling loop. + while (true) { + DcheckIntegerPartRange(Z, 1, 2); + + // (3b): S = Z^2 + RWDigits S(scratch, kSOffset, 2 * Z.len()); + Multiply(S, Z, Z); + if (should_terminate()) return; + S.TrimOne(); // Top digit of S is unused. + DcheckIntegerPartRange(S, 1, 4); + + // (3c): T = V, truncated so that at least 2k+3 fraction bits remain. + int fraction_digits = DIV_CEIL(2 * k + 3, kDigitBits); + int t_len = std::min(V.len(), fraction_digits); + Digits T(V, V.len() - t_len, t_len); + + // (3d): U = T * S, truncated so that at least 2k+1 fraction bits remain + // (U has one integer digit, which might be zero). + fraction_digits = DIV_CEIL(2 * k + 1, kDigitBits); + RWDigits U(scratch, kUOffset, S.len() + T.len()); + DCHECK(U.len() > fraction_digits); + Multiply(U, S, T); + if (should_terminate()) return; + U = U + (U.len() - (1 + fraction_digits)); + DcheckIntegerPartRange(U, 0, 3); + + // (3e): W = 2 * Z, padded with "0" fraction bits so that it has the + // same number of fraction bits as U. + DCHECK(U.len() >= Z.len()); + RWDigits W(scratch, kWOffset, U.len()); + int padding_digits = U.len() - Z.len(); + for (int i = 0; i < padding_digits; i++) W[i] = 0; + LeftShift(W + padding_digits, Z, 1); + DcheckIntegerPartRange(W, 2, 4); + + // (3f): Z = W - U. + // This check is '<=' instead of '<' because U's top digit is its + // integer part, and we want vn fraction digits. + if (U.len() <= vn) { + // Normal subtraction. + // This is not the last iteration. + DCHECK(iteration > 0); // NOLINT(readability/check) + Z.set_len(U.len()); + digit_t borrow = SubtractAndReturnBorrow(Z, W, U); + DCHECK(borrow == 0); // NOLINT(readability/check) + USE(borrow); + DcheckIntegerPartRange(Z, 1, 2); + } else { + // Truncate some least significant digits so that we get vn + // fraction digits, and compute the integer digit separately. + // This is the last iteration. + DCHECK(iteration == 0); // NOLINT(readability/check) + Z.set_len(vn); + Digits W_part(W, W.len() - vn - 1, vn); + Digits U_part(U, U.len() - vn - 1, vn); + digit_t borrow = SubtractAndReturnBorrow(Z, W_part, U_part); + digit_t integer_part = W.msd() - U.msd() - borrow; + DCHECK(integer_part == 1 || integer_part == 2); + if (integer_part == 2) { + // This is the rare case where the correct result would be 2.0, but + // since we can't express that by returning only the fractional part + // with an implicit 1-digit, we have to return [1.]9999... instead. + for (int i = 0; i < Z.len(); i++) Z[i] = ~digit_t{0}; + } + break; + } + // (3g, 3h): Update local variables and loop. + k = target_fraction_bits[iteration]; + iteration--; + } +} + +// Computes the inverse of V, shifted by kDigitBits * 2 * V.len, accurate to +// V.len+1 digits. The V.len low digits of the result digits will be written +// to Z, plus there is an implicit top digit with value 1. +// (Corner case: if V is minimal, the implicit digit should be 2; in that case +// we return one less than the correct answer. DivideBarrett can handle that.) +// Needs InvertScratchSpace(V.len) digits of scratch space. +void ProcessorImpl::Invert(RWDigits Z, Digits V, RWDigits scratch) { + DCHECK(Z.len() > V.len()); + DCHECK(V.len() >= 1); // NOLINT(readability/check) + DCHECK(IsBitNormalized(V)); + DCHECK(scratch.len() >= InvertScratchSpace(V.len())); + + int vn = V.len(); + if (vn >= kNewtonInversionThreshold) { + return InvertNewton(Z, V, scratch); + } + if (vn == 1) { + digit_t d = V[0]; + digit_t dummy_remainder; + Z[0] = digit_div(~d, ~digit_t{0}, d, &dummy_remainder); + Z[1] = 0; + } else { + InvertBasecase(Z, V, scratch); + if (Z[vn] == 1) { + for (int i = 0; i < vn; i++) Z[i] = ~digit_t{0}; + Z[vn] = 0; + } + } +} + +// This is algorithm 3.5 from the paper. +// Computes Q(uotient) and R(emainder) for A/B using I, which is a +// precomputed approximation of 1/B (e.g. with Invert() above). +// Needs DivideBarrettScratchSpace(A.len) scratch space. +void ProcessorImpl::DivideBarrett(RWDigits Q, RWDigits R, Digits A, Digits B, + Digits I, RWDigits scratch) { + DCHECK(Q.len() > A.len() - B.len()); + DCHECK(R.len() >= B.len()); + DCHECK(A.len() > B.len()); // Careful: This is *not* '>=' ! + DCHECK(A.len() <= 2 * B.len()); + DCHECK(B.len() > 0); // NOLINT(readability/check) + DCHECK(IsBitNormalized(B)); + DCHECK(I.len() == A.len() - B.len()); + DCHECK(scratch.len() >= DivideBarrettScratchSpace(A.len())); + + int orig_q_len = Q.len(); + + // (1): A1 = A with B.len fewer digits. + Digits A1 = A + B.len(); + DCHECK(A1.len() == I.len()); + + // (2): Q = A1*I with I.len fewer digits. + // {I} has an implicit high digit with value 1, so we add {A1} to the high + // part of the multiplication result. + RWDigits K(scratch, 0, 2 * I.len()); + Multiply(K, A1, I); + if (should_terminate()) return; + Q.set_len(I.len() + 1); + Add(Q, K + I.len(), A1); + // K is no longer used, can re-use {scratch} for P. + + // (3): R = A - B*Q (approximate remainder). + RWDigits P(scratch, 0, A.len() + 1); + Multiply(P, B, Q); + if (should_terminate()) return; + digit_t borrow = SubtractAndReturnBorrow(R, A, Digits(P, 0, B.len())); + // R may be allocated wider than B, zero out any extra digits if so. + for (int i = B.len(); i < R.len(); i++) R[i] = 0; + digit_t r_high = A[B.len()] - P[B.len()] - borrow; + + // Adjust R and Q so that they become the correct remainder and quotient. + // The number of iterations is guaranteed to be at most some very small + // constant, unless the caller gave us a bad approximate quotient. + if (r_high >> (kDigitBits - 1) == 1) { + // (5b): R < 0, so R += B + digit_t q_sub = 0; + do { + r_high += AddAndReturnCarry(R, R, B); + q_sub++; + DCHECK(q_sub <= 5); // NOLINT(readability/check) + } while (r_high != 0); + Subtract(Q, q_sub); + } else { + digit_t q_add = 0; + while (r_high != 0 || GreaterThanOrEqual(R, B)) { + // (5c): R >= B, so R -= B + r_high -= SubtractAndReturnBorrow(R, R, B); + q_add++; + DCHECK(q_add <= 5); // NOLINT(readability/check) + } + Add(Q, q_add); + } + // (5a): Return. + int final_q_len = Q.len(); + Q.set_len(orig_q_len); + for (int i = final_q_len; i < orig_q_len; i++) Q[i] = 0; +} + +// Computes Q(uotient) and R(emainder) for A/B, using Barrett division. +void ProcessorImpl::DivideBarrett(RWDigits Q, RWDigits R, Digits A, Digits B) { + DCHECK(Q.len() > A.len() - B.len()); + DCHECK(R.len() >= B.len()); + DCHECK(A.len() > B.len()); // Careful: This is *not* '>=' ! + DCHECK(B.len() > 0); // NOLINT(readability/check) + + // Normalize B, and shift A by the same amount. + ShiftedDigits b_normalized(B); + ShiftedDigits a_normalized(A, b_normalized.shift()); + // Keep the code below more concise. + B = b_normalized; + A = a_normalized; + + // The core DivideBarrett function above only supports A having at most + // twice as many digits as B. We generalize this to arbitrary inputs + // similar to Burnikel-Ziegler division by performing a t-by-1 division + // of B-sized chunks. It's easy to special-case the situation where we + // don't need to bother. + int barrett_dividend_length = A.len() <= 2 * B.len() ? A.len() : 2 * B.len(); + int i_len = barrett_dividend_length - B.len(); + ScratchDigits I(i_len + 1); // +1 is for temporary use by Invert(). + int scratch_len = + std::max(InvertScratchSpace(i_len), + DivideBarrettScratchSpace(barrett_dividend_length)); + ScratchDigits scratch(scratch_len); + Invert(I, Digits(B, B.len() - i_len, i_len), scratch); + if (should_terminate()) return; + I.TrimOne(); + DCHECK(I.len() == i_len); + if (A.len() > 2 * B.len()) { + // This follows the variable names and and algorithmic steps of + // DivideBurnikelZiegler(). + int n = B.len(); // Chunk length. + // (5): {t} is the number of B-sized chunks of A. + int t = DIV_CEIL(A.len(), n); + DCHECK(t >= 3); // NOLINT(readability/check) + // (6)/(7): Z is used for the current 2-chunk block to be divided by B, + // initialized to the two topmost chunks of A. + int z_len = n * 2; + ScratchDigits Z(z_len); + PutAt(Z, A + n * (t - 2), z_len); + // (8): For i from t-2 downto 0 do + int qi_len = n + 1; + ScratchDigits Qi(qi_len); + ScratchDigits Ri(n); + // First iteration unrolled and specialized. + { + int i = t - 2; + DivideBarrett(Qi, Ri, Z, B, I, scratch); + if (should_terminate()) return; + RWDigits target = Q + n * i; + // In the first iteration, all qi_len = n + 1 digits may be used. + int to_copy = std::min(qi_len, target.len()); + for (int j = 0; j < to_copy; j++) target[j] = Qi[j]; + for (int j = to_copy; j < target.len(); j++) target[j] = 0; +#if DEBUG + for (int j = to_copy; j < Qi.len(); j++) { + DCHECK(Qi[j] == 0); // NOLINT(readability/check) + } +#endif + } + // Now loop over any remaining iterations. + for (int i = t - 3; i >= 0; i--) { + // (8b): If i > 0, set Z_(i-1) = [Ri, A_(i-1)]. + // (De-duped with unrolled first iteration, hence reading A_(i).) + PutAt(Z + n, Ri, n); + PutAt(Z, A + n * i, n); + // (8a): Compute Qi, Ri such that Zi = B*Qi + Ri. + DivideBarrett(Qi, Ri, Z, B, I, scratch); + DCHECK(Qi[qi_len - 1] == 0); // NOLINT(readability/check) + if (should_terminate()) return; + // (9): Return Q = [Q_(t-2), ..., Q_0]... + PutAt(Q + n * i, Qi, n); + } + Ri.Normalize(); + DCHECK(Ri.len() <= R.len()); + // (9): ...and R = R_0 * 2^(-leading_zeros). + RightShift(R, Ri, b_normalized.shift()); + } else { + DivideBarrett(Q, R, A, B, I, scratch); + if (should_terminate()) return; + RightShift(R, R, b_normalized.shift()); + } +} + +} // namespace bigint +} // namespace v8 diff --git a/deps/v8/src/bigint/fromstring.cc b/deps/v8/src/bigint/fromstring.cc new file mode 100644 index 00000000000000..0307745cad83c4 --- /dev/null +++ b/deps/v8/src/bigint/fromstring.cc @@ -0,0 +1,72 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/bigint/bigint-internal.h" +#include "src/bigint/vector-arithmetic.h" + +namespace v8 { +namespace bigint { + +// The classic algorithm: for every part, multiply the accumulator with +// the appropriate multiplier, and add the part. O(n²) overall. +void ProcessorImpl::FromStringClassic(RWDigits Z, + FromStringAccumulator* accumulator) { + // We always have at least one part to process. + DCHECK(accumulator->stack_parts_used_ > 0); // NOLINT(readability/check) + Z[0] = accumulator->stack_parts_[0]; + RWDigits already_set(Z, 0, 1); + for (int i = 1; i < Z.len(); i++) Z[i] = 0; + + // The {FromStringAccumulator} uses stack-allocated storage for the first + // few parts; if heap storage is used at all then all parts are copied there. + int num_stack_parts = accumulator->stack_parts_used_; + if (num_stack_parts == 1) return; + const std::vector& heap_parts = accumulator->heap_parts_; + int num_heap_parts = static_cast(heap_parts.size()); + // All multipliers are the same, except possibly for the last. + const digit_t max_multiplier = accumulator->max_multiplier_; + + if (num_heap_parts == 0) { + for (int i = 1; i < num_stack_parts - 1; i++) { + MultiplySingle(Z, already_set, max_multiplier); + Add(Z, accumulator->stack_parts_[i]); + already_set.set_len(already_set.len() + 1); + } + MultiplySingle(Z, already_set, accumulator->last_multiplier_); + Add(Z, accumulator->stack_parts_[num_stack_parts - 1]); + return; + } + // Parts are stored on the heap. + for (int i = 1; i < num_heap_parts - 1; i++) { + MultiplySingle(Z, already_set, max_multiplier); + if (should_terminate()) return; + Add(Z, accumulator->heap_parts_[i]); + already_set.set_len(already_set.len() + 1); + } + MultiplySingle(Z, already_set, accumulator->last_multiplier_); + Add(Z, accumulator->heap_parts_.back()); +} + +void ProcessorImpl::FromString(RWDigits Z, FromStringAccumulator* accumulator) { + if (accumulator->inline_everything_) { + int i = 0; + for (; i < accumulator->stack_parts_used_; i++) { + Z[i] = accumulator->stack_parts_[i]; + } + for (; i < Z.len(); i++) Z[i] = 0; + } else if (accumulator->stack_parts_used_ == 0) { + for (int i = 0; i < Z.len(); i++) Z[i] = 0; + } else { + FromStringClassic(Z, accumulator); + } +} + +Status Processor::FromString(RWDigits Z, FromStringAccumulator* accumulator) { + ProcessorImpl* impl = static_cast(this); + impl->FromString(Z, accumulator); + return impl->get_and_clear_status(); +} + +} // namespace bigint +} // namespace v8 diff --git a/deps/v8/src/bigint/mul-karatsuba.cc b/deps/v8/src/bigint/mul-karatsuba.cc index 2a141f213cf449..d4b5a58383a489 100644 --- a/deps/v8/src/bigint/mul-karatsuba.cc +++ b/deps/v8/src/bigint/mul-karatsuba.cc @@ -201,5 +201,7 @@ void ProcessorImpl::KaratsubaMain(RWDigits Z, Digits X, Digits Y, USE(overflow); } +#undef MAYBE_TERMINATE + } // namespace bigint } // namespace v8 diff --git a/deps/v8/src/bigint/tostring.cc b/deps/v8/src/bigint/tostring.cc index b426c864cd7511..51fb75957aaef7 100644 --- a/deps/v8/src/bigint/tostring.cc +++ b/deps/v8/src/bigint/tostring.cc @@ -106,6 +106,16 @@ char* DivideByMagic(RWDigits rest, Digits input, char* output) { return output; } +class RecursionLevel; + +// The classic algorithm must check for interrupt requests if no faster +// algorithm is available. +#if V8_ADVANCED_BIGINT_ALGORITHMS +#define MAYBE_INTERRUPT(code) ((void)0) +#else +#define MAYBE_INTERRUPT(code) code +#endif + class ToStringFormatter { public: ToStringFormatter(Digits X, int radix, bool sign, char* out, @@ -142,16 +152,16 @@ class ToStringFormatter { if (radix_ == 10) { // Faster but costs binary size, so we optimize the most common case. out_ = DivideByMagic<10>(rest, dividend, out_); - processor_->AddWorkEstimate(rest.len() * 2); + MAYBE_INTERRUPT(processor_->AddWorkEstimate(rest.len() * 2)); } else { digit_t chunk; processor_->DivideSingle(rest, &chunk, dividend, chunk_divisor_); out_ = BasecaseMiddle(chunk, out_); // Assume that a division is about ten times as expensive as a // multiplication. - processor_->AddWorkEstimate(rest.len() * 10); + MAYBE_INTERRUPT(processor_->AddWorkEstimate(rest.len() * 10)); } - if (processor_->should_terminate()) return; + MAYBE_INTERRUPT(if (processor_->should_terminate()) return ); rest.Normalize(); dividend = rest; } while (rest.len() > 1); @@ -160,6 +170,12 @@ class ToStringFormatter { void BasePowerOfTwo(); + void Fast(); + char* FillWithZeros(RecursionLevel* level, char* prev_cursor, char* out, + bool is_last_on_level); + char* ProcessLevel(RecursionLevel* level, Digits chunk, char* out, + bool is_last_on_level); + private: // When processing the last (most significant) digit, don't write leading // zeros. @@ -197,6 +213,8 @@ class ToStringFormatter { ProcessorImpl* processor_; }; +#undef MAYBE_INTERRUPT + // Prepares data for {Classic}. Not needed for {BasePowerOfTwo}. void ToStringFormatter::Start() { max_bits_per_char_ = kMaxBitsPerChar[radix_]; @@ -251,16 +269,305 @@ void ToStringFormatter::BasePowerOfTwo() { } } +#if V8_ADVANCED_BIGINT_ALGORITHMS + +// "Fast" divide-and-conquer conversion to string. The basic idea is to +// recursively cut the BigInt in half (using a division with remainder, +// the divisor being ~half as large (in bits) as the current dividend). +// +// As preparation, we build up a linked list of metadata for each recursion +// level. We do this bottom-up, i.e. start with the level that will produce +// two halves that are register-sized and bail out to the base case. +// Each higher level (executed earlier, prepared later) uses a divisor that is +// the square of the previously-created "next" level's divisor. Preparation +// terminates when the current divisor is at least half as large as the bigint. +// We also precompute each level's divisor's inverse, so we can use +// Barrett division later. +// +// Example: say we want to format 1234567890123, and we can fit two decimal +// digits into a register for the base case. +// +// 1234567890123 +// ↓ +// %100000000 (a) // RecursionLevel 2, +// / \ // is_toplevel_ == true. +// 12345 67890123 +// ↓ ↓ +// (e) %10000 %10000 (b) // RecursionLevel 1 +// / \ / \ +// 1 2345 6789 0123 +// ↓ (f) ↓ ↓ (d) ↓ +// (g) %100 %100 %100 %100 (c) // RecursionLevel 0 +// / \ / \ / \ / \ +// 00 01 23 45 67 89 01 23 +// ↓ ↓ ↓ ↓ ↓ ↓ ↓ // Base case. +// "1" "23" "45" "67" "89" "01" "23" +// +// We start building RecursionLevels in order 0 -> 1 -> 2, performing the +// squarings 100² = 10000 and 10000² = 100000000 each only once. Execution +// then happens in order (a) through (g); lower-level divisors are used +// repeatedly. We build the string from right to left. +// Note that we can skip the division at (g) and fall through directly. +// Also, note that there are two chunks with value 1: one of them must produce +// a leading "0" in its string representation, the other must not. +// +// In this example, {base_divisor} is 100 and {base_char_count} is 2. + +// TODO(jkummerow): Investigate whether it is beneficial to build one or two +// fewer RecursionLevels, and use the topmost level for more than one division. + +class RecursionLevel { + public: + static RecursionLevel* CreateLevels(digit_t base_divisor, int base_char_count, + int target_bit_length, + ProcessorImpl* processor); + ~RecursionLevel() { delete next_; } + + void ComputeInverse(ProcessorImpl* proc, int dividend_length = 0); + Digits GetInverse(int dividend_length); + + private: + friend class ToStringFormatter; + RecursionLevel(digit_t base_divisor, int base_char_count) + : char_count_(base_char_count), divisor_(1) { + divisor_[0] = base_divisor; + } + explicit RecursionLevel(RecursionLevel* next) + : char_count_(next->char_count_ * 2), + next_(next), + divisor_(next->divisor_.len() * 2) { + next->is_toplevel_ = false; + } + + void LeftShiftDivisor() { + leading_zero_shift_ = CountLeadingZeros(divisor_.msd()); + LeftShift(divisor_, divisor_, leading_zero_shift_); + } + + int leading_zero_shift_{0}; + // The number of characters generated by *each half* of this level. + int char_count_; + bool is_toplevel_{true}; + RecursionLevel* next_{nullptr}; + ScratchDigits divisor_; + std::unique_ptr inverse_storage_; + Digits inverse_{nullptr, 0}; +}; + +// static +RecursionLevel* RecursionLevel::CreateLevels(digit_t base_divisor, + int base_char_count, + int target_bit_length, + ProcessorImpl* processor) { + RecursionLevel* level = new RecursionLevel(base_divisor, base_char_count); + // We can stop creating levels when the next level's divisor, which is the + // square of the current level's divisor, would be strictly bigger (in terms + // of its numeric value) than the input we're formatting. Since computing that + // next divisor is expensive, we want to predict the necessity based on bit + // lengths. Bit lengths are an imperfect predictor of numeric value, so we + // have to be careful: + // - since we can't estimate which one of two numbers of equal bit length + // is bigger, we have to aim for a strictly bigger bit length. + // - when squaring, the bit length sometimes doubles (e.g. 0b11² == 0b1001), + // but usually we "lose" a bit (e.g. 0b10² == 0b100). + while (BitLength(level->divisor_) * 2 - 1 <= target_bit_length) { + RecursionLevel* prev = level; + level = new RecursionLevel(prev); + processor->Multiply(level->divisor_, prev->divisor_, prev->divisor_); + if (processor->should_terminate()) { + delete level; + return nullptr; + } + level->divisor_.Normalize(); + // Left-shifting the divisor must only happen after it's been used to + // compute the next divisor. + prev->LeftShiftDivisor(); + prev->ComputeInverse(processor); + } + level->LeftShiftDivisor(); + // Not calling info->ComputeInverse here so that it can take the input's + // length into account to save some effort on inverse generation. + return level; +} + +// The top level might get by with a smaller inverse than we could maximally +// compute, so the caller should provide the dividend length. +void RecursionLevel::ComputeInverse(ProcessorImpl* processor, + int dividend_length) { + int inverse_len = divisor_.len(); + if (dividend_length != 0) { + inverse_len = dividend_length - divisor_.len(); + DCHECK(inverse_len <= divisor_.len()); + } + int scratch_len = InvertScratchSpace(inverse_len); + ScratchDigits scratch(scratch_len); + Storage* inv_storage = new Storage(inverse_len + 1); + inverse_storage_.reset(inv_storage); + RWDigits inverse_initializer(inv_storage->get(), inverse_len + 1); + Digits input(divisor_, divisor_.len() - inverse_len, inverse_len); + processor->Invert(inverse_initializer, input, scratch); + inverse_initializer.TrimOne(); + inverse_ = inverse_initializer; +} + +Digits RecursionLevel::GetInverse(int dividend_length) { + DCHECK(inverse_.len() != 0); // NOLINT(readability/check) + int inverse_len = dividend_length - divisor_.len(); + DCHECK(inverse_len <= inverse_.len()); + return inverse_ + (inverse_.len() - inverse_len); +} + +void ToStringFormatter::Fast() { + std::unique_ptr recursion_levels(RecursionLevel::CreateLevels( + chunk_divisor_, chunk_chars_, BitLength(digits_), processor_)); + if (processor_->should_terminate()) return; + out_ = ProcessLevel(recursion_levels.get(), digits_, out_, true); +} + +// Writes '0' characters right-to-left, starting at {out}-1, until the distance +// from {right_boundary} to {out} equals the number of characters that {level} +// is supposed to produce. +char* ToStringFormatter::FillWithZeros(RecursionLevel* level, + char* right_boundary, char* out, + bool is_last_on_level) { + // Fill up with zeros up to the character count expected to be generated + // on this level; unless this is the left edge of the result. + if (is_last_on_level) return out; + int chunk_chars = level == nullptr ? chunk_chars_ : level->char_count_ * 2; + char* end = right_boundary - chunk_chars; + DCHECK(out >= end); + while (out > end) { + *(--out) = '0'; + } + return out; +} + +char* ToStringFormatter::ProcessLevel(RecursionLevel* level, Digits chunk, + char* out, bool is_last_on_level) { + // Step 0: if only one digit is left, bail out to the base case. + Digits normalized = chunk; + normalized.Normalize(); + if (normalized.len() <= 1) { + char* right_boundary = out; + if (normalized.len() == 1) { + out = BasecaseLast(normalized[0], out); + } + return FillWithZeros(level, right_boundary, out, is_last_on_level); + } + + // Step 1: If the chunk is guaranteed to remain smaller than the divisor + // even after left-shifting, fall through to the next level immediately. + if (normalized.len() < level->divisor_.len()) { + char* right_boundary = out; + out = ProcessLevel(level->next_, chunk, out, is_last_on_level); + return FillWithZeros(level, right_boundary, out, is_last_on_level); + } + // Step 2: Prepare the chunk. + bool allow_inplace_modification = chunk.digits() != digits_.digits(); + Digits original_chunk = chunk; + ShiftedDigits chunk_shifted(chunk, level->leading_zero_shift_, + allow_inplace_modification); + chunk = chunk_shifted; + chunk.Normalize(); + // Check (now precisely) if the chunk is smaller than the divisor. + int comparison = Compare(chunk, level->divisor_); + if (comparison <= 0) { + char* right_boundary = out; + if (comparison < 0) { + // If the chunk is strictly smaller than the divisor, we can process + // it directly on the next level as the right half, and know that the + // left half is all '0'. + // In case we shifted {chunk} in-place, we must undo that + // before the call... + chunk_shifted.Reset(); + // ...and otherwise undo the {chunk = chunk_shifted} assignment above. + chunk = original_chunk; + out = ProcessLevel(level->next_, chunk, out, is_last_on_level); + } else { + DCHECK(comparison == 0); // NOLINT(readability/check) + // If the chunk is equal to the divisor, we know that the right half + // is all '0', and the left half is '...0001'. + // Handling this case specially is an optimization; we could also + // fall through to the generic "chunk > divisor" path below. + out = FillWithZeros(level->next_, right_boundary, out, false); + *(--out) = '1'; + } + // In both cases, make sure the left half is fully written. + return FillWithZeros(level, right_boundary, out, is_last_on_level); + } + // Step 3: Allocate space for the results. + // Allocate one extra digit so the next level can left-shift in-place. + ScratchDigits right(level->divisor_.len() + 1); + // Allocate one extra digit because DivideBarrett requires it. + ScratchDigits left(chunk.len() - level->divisor_.len() + 1); + + // Step 4: Divide to split {chunk} into {left} and {right}. + int inverse_len = chunk.len() - level->divisor_.len(); + if (inverse_len == 0) { + processor_->DivideSchoolbook(left, right, chunk, level->divisor_); + } else if (level->divisor_.len() == 1) { + processor_->DivideSingle(left, right.digits(), chunk, level->divisor_[0]); + for (int i = 1; i < right.len(); i++) right[i] = 0; + } else { + ScratchDigits scratch(DivideBarrettScratchSpace(chunk.len())); + // The top level only computes its inverse when {chunk.len()} is + // available. Other levels have precomputed theirs. + if (level->is_toplevel_) { + level->ComputeInverse(processor_, chunk.len()); + if (processor_->should_terminate()) return out; + } + Digits inverse = level->GetInverse(chunk.len()); + processor_->DivideBarrett(left, right, chunk, level->divisor_, inverse, + scratch); + if (processor_->should_terminate()) return out; + } + RightShift(right, right, level->leading_zero_shift_); +#if DEBUG + Digits left_test = left; + left_test.Normalize(); + DCHECK(left_test.len() <= level->divisor_.len()); +#endif + + // Step 5: Recurse. + char* end_of_right_part = ProcessLevel(level->next_, right, out, false); + // The recursive calls are required and hence designed to write exactly as + // many characters as their level is responsible for. + DCHECK(end_of_right_part == out - level->char_count_); + USE(end_of_right_part); + if (processor_->should_terminate()) return out; + // We intentionally don't use {end_of_right_part} here to be prepared for + // potential future multi-threaded execution. + return ProcessLevel(level->next_, left, out - level->char_count_, + is_last_on_level); +} + +#endif // V8_ADVANCED_BIGINT_ALGORITHMS + } // namespace void ProcessorImpl::ToString(char* out, int* out_length, Digits X, int radix, bool sign) { + const bool use_fast_algorithm = X.len() >= kToStringFastThreshold; + ToStringImpl(out, out_length, X, radix, sign, use_fast_algorithm); +} + +// Factored out so that tests can call it. +void ProcessorImpl::ToStringImpl(char* out, int* out_length, Digits X, + int radix, bool sign, bool fast) { #if DEBUG for (int i = 0; i < *out_length; i++) out[i] = kStringZapValue; #endif ToStringFormatter formatter(X, radix, sign, out, *out_length, this); if (IsPowerOfTwo(radix)) { formatter.BasePowerOfTwo(); +#if V8_ADVANCED_BIGINT_ALGORITHMS + } else if (fast) { + formatter.Start(); + formatter.Fast(); + if (should_terminate()) return; +#else + USE(fast); +#endif // V8_ADVANCED_BIGINT_ALGORITHMS } else { formatter.Start(); formatter.Classic(); diff --git a/deps/v8/src/bigint/vector-arithmetic.h b/deps/v8/src/bigint/vector-arithmetic.h index 3247660f95b161..d8b79a961a3419 100644 --- a/deps/v8/src/bigint/vector-arithmetic.h +++ b/deps/v8/src/bigint/vector-arithmetic.h @@ -45,6 +45,9 @@ digit_t AddAndReturnCarry(RWDigits Z, Digits X, Digits Y); digit_t SubtractAndReturnBorrow(RWDigits Z, Digits X, Digits Y); inline bool IsDigitNormalized(Digits X) { return X.len() == 0 || X.msd() != 0; } +inline bool IsBitNormalized(Digits X) { + return (X.msd() >> (kDigitBits - 1)) == 1; +} inline bool GreaterThanOrEqual(Digits A, Digits B) { return Compare(A, B) >= 0; diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc index 20312d83366e54..f45c927e67546e 100644 --- a/deps/v8/src/builtins/arm/builtins-arm.cc +++ b/deps/v8/src/builtins/arm/builtins-arm.cc @@ -129,9 +129,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { } // Remove caller arguments from the stack and return. - STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); - __ add(sp, sp, Operand(scratch, LSL, kPointerSizeLog2 - kSmiTagSize)); - __ add(sp, sp, Operand(kPointerSize)); + __ DropArguments(scratch, TurboAssembler::kCountIsSmi, + TurboAssembler::kCountExcludesReceiver); __ Jump(lr); __ bind(&stack_overflow); @@ -276,9 +275,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { __ LeaveFrame(StackFrame::CONSTRUCT); // Remove caller arguments from the stack and return. - STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); - __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize)); - __ add(sp, sp, Operand(kPointerSize)); + __ DropArguments(r1, TurboAssembler::kCountIsSmi, + TurboAssembler::kCountExcludesReceiver); __ Jump(lr); __ bind(&check_receiver); @@ -828,7 +826,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1, __ LeaveFrame(StackFrame::INTERPRETED); // Drop receiver + arguments. - __ add(sp, sp, params_size, LeaveCC); + __ DropArguments(params_size, TurboAssembler::kCountIsBytes, + TurboAssembler::kCountIncludesReceiver); } // Tail-call |function_id| if |actual_marker| == |expected_marker| @@ -1113,14 +1112,15 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { // are 8-bit fields next to each other, so we could just optimize by writing // a 16-bit. These static asserts guard our assumption is valid. STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset == - BytecodeArray::kOsrNestingLevelOffset + kCharSize); + BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize); STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0); { UseScratchRegisterScope temps(masm); Register scratch = temps.Acquire(); __ mov(scratch, Operand(0)); - __ strh(scratch, FieldMemOperand(bytecodeArray, - BytecodeArray::kOsrNestingLevelOffset)); + __ strh(scratch, + FieldMemOperand(bytecodeArray, + BytecodeArray::kOsrLoopNestingLevelOffset)); } __ Push(argc, bytecodeArray); @@ -1266,11 +1266,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // 8-bit fields next to each other, so we could just optimize by writing a // 16-bit. These static asserts guard our assumption is valid. STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset == - BytecodeArray::kOsrNestingLevelOffset + kCharSize); + BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize); STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0); __ mov(r9, Operand(0)); __ strh(r9, FieldMemOperand(kInterpreterBytecodeArrayRegister, - BytecodeArray::kOsrNestingLevelOffset)); + BytecodeArray::kOsrLoopNestingLevelOffset)); // Load the initial bytecode offset. __ mov(kInterpreterBytecodeOffsetRegister, @@ -1861,8 +1861,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { __ ldr(r5, MemOperand(sp, kSystemPointerSize), ge); // thisArg __ cmp(r0, Operand(2), ge); __ ldr(r2, MemOperand(sp, 2 * kSystemPointerSize), ge); // argArray - __ add(sp, sp, Operand(r0, LSL, kSystemPointerSizeLog2)); - __ str(r5, MemOperand(sp, 0)); + __ DropArgumentsAndPushNewReceiver(r0, r5, TurboAssembler::kCountIsInteger, + TurboAssembler::kCountExcludesReceiver); } // ----------- S t a t e ------------- @@ -1938,8 +1938,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) { __ ldr(r5, MemOperand(sp, 2 * kSystemPointerSize), ge); // thisArgument __ cmp(r0, Operand(3), ge); __ ldr(r2, MemOperand(sp, 3 * kSystemPointerSize), ge); // argumentsList - __ add(sp, sp, Operand(r0, LSL, kSystemPointerSizeLog2)); - __ str(r5, MemOperand(sp, 0)); + __ DropArgumentsAndPushNewReceiver(r0, r5, TurboAssembler::kCountIsInteger, + TurboAssembler::kCountExcludesReceiver); } // ----------- S t a t e ------------- @@ -1981,8 +1981,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { __ ldr(r2, MemOperand(sp, 2 * kSystemPointerSize), ge); // argumentsList __ cmp(r0, Operand(3), ge); __ ldr(r3, MemOperand(sp, 3 * kSystemPointerSize), ge); // new.target - __ add(sp, sp, Operand(r0, LSL, kSystemPointerSizeLog2)); - __ str(r4, MemOperand(sp, 0)); // set undefined to the receiver + __ DropArgumentsAndPushNewReceiver(r0, r4, TurboAssembler::kCountIsInteger, + TurboAssembler::kCountExcludesReceiver); } // ----------- S t a t e ------------- @@ -3479,12 +3479,13 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) { namespace { -// Converts an interpreter frame into a baseline frame and continues execution -// in baseline code (baseline code has to exist on the shared function info), -// either at the current or next (in execution order) bytecode. -void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode, - bool is_osr = false) { - __ Push(kInterpreterAccumulatorRegister); +// Restarts execution either at the current or next (in execution order) +// bytecode. If there is baseline code on the shared function info, converts an +// interpreter frame into a baseline frame and continues execution in baseline +// code. Otherwise execution continues with bytecode. +void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, + bool next_bytecode, + bool is_osr = false) { Label start; __ bind(&start); @@ -3492,6 +3493,38 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode, Register closure = r1; __ ldr(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); + // Get the Code object from the shared function info. + Register code_obj = r4; + __ ldr(code_obj, + FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); + __ ldr(code_obj, + FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset)); + + // Check if we have baseline code. For OSR entry it is safe to assume we + // always have baseline code. + if (!is_osr) { + Label start_with_baseline; + __ CompareObjectType(code_obj, r3, r3, BASELINE_DATA_TYPE); + __ b(eq, &start_with_baseline); + + // Start with bytecode as there is no baseline code. + Builtin builtin_id = next_bytecode + ? Builtin::kInterpreterEnterAtNextBytecode + : Builtin::kInterpreterEnterAtBytecode; + __ Jump(masm->isolate()->builtins()->code_handle(builtin_id), + RelocInfo::CODE_TARGET); + + // Start with baseline code. + __ bind(&start_with_baseline); + } else if (FLAG_debug_code) { + __ CompareObjectType(code_obj, r3, r3, BASELINE_DATA_TYPE); + __ Assert(eq, AbortReason::kExpectedBaselineData); + } + + // Load baseline code from baseline data. + __ ldr(code_obj, + FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset)); + // Load the feedback vector. Register feedback_vector = r2; __ ldr(feedback_vector, @@ -3513,15 +3546,6 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); feedback_vector = no_reg; - // Get the Code object from the shared function info. - Register code_obj = r4; - __ ldr(code_obj, - FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); - __ ldr(code_obj, - FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset)); - __ ldr(code_obj, - FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset)); - // Compute baseline pc for bytecode offset. ExternalReference get_baseline_pc_extref; if (next_bytecode || is_osr) { @@ -3554,6 +3578,8 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode, // Get bytecode array from the stack frame. __ ldr(kInterpreterBytecodeArrayRegister, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); + // Save the accumulator register, since it's clobbered by the below call. + __ Push(kInterpreterAccumulatorRegister); { Register arg_reg_1 = r0; Register arg_reg_2 = r1; @@ -3575,8 +3601,9 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode, UseScratchRegisterScope temps(masm); Register scratch = temps.Acquire(); __ mov(scratch, Operand(0)); - __ strh(scratch, FieldMemOperand(kInterpreterBytecodeArrayRegister, - BytecodeArray::kOsrNestingLevelOffset)); + __ strh(scratch, + FieldMemOperand(kInterpreterBytecodeArrayRegister, + BytecodeArray::kOsrLoopNestingLevelOffset)); Generate_OSREntry(masm, code_obj, Operand(Code::kHeaderSize - kHeapObjectTag)); } else { @@ -3600,8 +3627,10 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode, __ bind(&install_baseline_code); { FrameScope scope(masm, StackFrame::INTERNAL); + __ Push(kInterpreterAccumulatorRegister); __ Push(closure); __ CallRuntime(Runtime::kInstallBaselineCode, 1); + __ Pop(kInterpreterAccumulatorRegister); } // Retry from the start after installing baseline code. __ b(&start); @@ -3609,17 +3638,19 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode, } // namespace -void Builtins::Generate_BaselineEnterAtBytecode(MacroAssembler* masm) { - Generate_BaselineEntry(masm, false); +void Builtins::Generate_BaselineOrInterpreterEnterAtBytecode( + MacroAssembler* masm) { + Generate_BaselineOrInterpreterEntry(masm, false); } -void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) { - Generate_BaselineEntry(masm, true); +void Builtins::Generate_BaselineOrInterpreterEnterAtNextBytecode( + MacroAssembler* masm) { + Generate_BaselineOrInterpreterEntry(masm, true); } void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline( MacroAssembler* masm) { - Generate_BaselineEntry(masm, false, true); + Generate_BaselineOrInterpreterEntry(masm, false, true); } void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) { diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc index 06245ea51ffcf9..b1f9a63e3c7e9c 100644 --- a/deps/v8/src/builtins/arm64/builtins-arm64.cc +++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc @@ -1297,10 +1297,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { // are 8-bit fields next to each other, so we could just optimize by writing // a 16-bit. These static asserts guard our assumption is valid. STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset == - BytecodeArray::kOsrNestingLevelOffset + kCharSize); + BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize); STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0); __ Strh(wzr, FieldMemOperand(bytecode_array, - BytecodeArray::kOsrNestingLevelOffset)); + BytecodeArray::kOsrLoopNestingLevelOffset)); __ Push(argc, bytecode_array); @@ -1456,10 +1456,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // 8-bit fields next to each other, so we could just optimize by writing a // 16-bit. These static asserts guard our assumption is valid. STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset == - BytecodeArray::kOsrNestingLevelOffset + kCharSize); + BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize); STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0); __ Strh(wzr, FieldMemOperand(kInterpreterBytecodeArrayRegister, - BytecodeArray::kOsrNestingLevelOffset)); + BytecodeArray::kOsrLoopNestingLevelOffset)); // Load the initial bytecode offset. __ Mov(kInterpreterBytecodeOffsetRegister, @@ -4005,12 +4005,13 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) { namespace { -// Converts an interpreter frame into a baseline frame and continues execution -// in baseline code (baseline code has to exist on the shared function info), -// either at the current or next (in execution order) bytecode. -void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode, - bool is_osr = false) { - __ Push(padreg, kInterpreterAccumulatorRegister); +// Restarts execution either at the current or next (in execution order) +// bytecode. If there is baseline code on the shared function info, converts an +// interpreter frame into a baseline frame and continues execution in baseline +// code. Otherwise execution continues with bytecode. +void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, + bool next_bytecode, + bool is_osr = false) { Label start; __ bind(&start); @@ -4018,6 +4019,43 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode, Register closure = x1; __ Ldr(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); + // Get the Code object from the shared function info. + Register code_obj = x22; + __ LoadTaggedPointerField( + code_obj, + FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); + __ LoadTaggedPointerField( + code_obj, + FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset)); + + // Check if we have baseline code. For OSR entry it is safe to assume we + // always have baseline code. + if (!is_osr) { + Label start_with_baseline; + __ CompareObjectType(code_obj, x3, x3, BASELINE_DATA_TYPE); + __ B(eq, &start_with_baseline); + + // Start with bytecode as there is no baseline code. + Builtin builtin_id = next_bytecode + ? Builtin::kInterpreterEnterAtNextBytecode + : Builtin::kInterpreterEnterAtBytecode; + __ Jump(masm->isolate()->builtins()->code_handle(builtin_id), + RelocInfo::CODE_TARGET); + + // Start with baseline code. + __ bind(&start_with_baseline); + } else if (FLAG_debug_code) { + __ CompareObjectType(code_obj, x3, x3, BASELINE_DATA_TYPE); + __ Assert(eq, AbortReason::kExpectedBaselineData); + } + + // Load baseline code from baseline data. + __ LoadTaggedPointerField( + code_obj, FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset)); + if (V8_EXTERNAL_CODE_SPACE_BOOL) { + __ LoadCodeDataContainerCodeNonBuiltin(code_obj, code_obj); + } + // Load the feedback vector. Register feedback_vector = x2; __ LoadTaggedPointerField( @@ -4040,20 +4078,6 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); feedback_vector = no_reg; - // Get the Code object from the shared function info. - Register code_obj = x22; - __ LoadTaggedPointerField( - code_obj, - FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); - __ LoadTaggedPointerField( - code_obj, - FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset)); - __ LoadTaggedPointerField( - code_obj, FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset)); - if (V8_EXTERNAL_CODE_SPACE_BOOL) { - __ LoadCodeDataContainerCodeNonBuiltin(code_obj, code_obj); - } - // Compute baseline pc for bytecode offset. ExternalReference get_baseline_pc_extref; if (next_bytecode || is_osr) { @@ -4086,6 +4110,8 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode, // Get bytecode array from the stack frame. __ ldr(kInterpreterBytecodeArrayRegister, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); + // Save the accumulator register, since it's clobbered by the below call. + __ Push(padreg, kInterpreterAccumulatorRegister); { Register arg_reg_1 = x0; Register arg_reg_2 = x1; @@ -4104,7 +4130,7 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode, // TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm // Sparkplug here. __ Strh(wzr, FieldMemOperand(kInterpreterBytecodeArrayRegister, - BytecodeArray::kOsrNestingLevelOffset)); + BytecodeArray::kOsrLoopNestingLevelOffset)); Generate_OSREntry(masm, code_obj, Code::kHeaderSize - kHeapObjectTag); } else { __ Add(code_obj, code_obj, Code::kHeaderSize - kHeapObjectTag); @@ -4127,8 +4153,10 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode, __ bind(&install_baseline_code); { FrameScope scope(masm, StackFrame::INTERNAL); + __ Push(padreg, kInterpreterAccumulatorRegister); __ PushArgument(closure); __ CallRuntime(Runtime::kInstallBaselineCode, 1); + __ Pop(kInterpreterAccumulatorRegister, padreg); } // Retry from the start after installing baseline code. __ B(&start); @@ -4136,17 +4164,19 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode, } // namespace -void Builtins::Generate_BaselineEnterAtBytecode(MacroAssembler* masm) { - Generate_BaselineEntry(masm, false); +void Builtins::Generate_BaselineOrInterpreterEnterAtBytecode( + MacroAssembler* masm) { + Generate_BaselineOrInterpreterEntry(masm, false); } -void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) { - Generate_BaselineEntry(masm, true); +void Builtins::Generate_BaselineOrInterpreterEnterAtNextBytecode( + MacroAssembler* masm) { + Generate_BaselineOrInterpreterEntry(masm, true); } void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline( MacroAssembler* masm) { - Generate_BaselineEntry(masm, false, true); + Generate_BaselineOrInterpreterEntry(masm, false, true); } void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) { diff --git a/deps/v8/src/builtins/array-concat.tq b/deps/v8/src/builtins/array-concat.tq new file mode 100644 index 00000000000000..5eb66e6ce8796d --- /dev/null +++ b/deps/v8/src/builtins/array-concat.tq @@ -0,0 +1,49 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +namespace array { + +extern builtin ArrayConcat(Context, JSFunction, JSAny, int32): JSAny; + +transitioning javascript builtin +ArrayPrototypeConcat( + js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny { + // Fast path if we invoke as `x.concat()`. + if (arguments.length == 0) { + typeswitch (receiver) { + case (a: FastJSArrayForConcat): { + return CloneFastJSArray(context, a); + } + case (JSAny): { + // Fallthrough. + } + } + } + + // Fast path if we invoke as `[].concat(x)`. + try { + const receiverAsArray: FastJSArrayForConcat = + Cast(receiver) + otherwise ReceiverIsNotFastJSArrayForConcat; + if (receiverAsArray.IsEmpty() && arguments.length == 1) { + typeswitch (arguments[0]) { + case (a: FastJSArrayForCopy): { + return CloneFastJSArray(context, a); + } + case (JSAny): { + // Fallthrough. + } + } + } + } label ReceiverIsNotFastJSArrayForConcat { + // Fallthrough. + } + + // TODO(victorgomes): Implement slow path ArrayConcat in Torque. + tail ArrayConcat( + context, LoadTargetFromFrame(), Undefined, + Convert(arguments.length)); +} + +} // namespace array diff --git a/deps/v8/src/builtins/array-findlast.tq b/deps/v8/src/builtins/array-findlast.tq new file mode 100644 index 00000000000000..a359ec915f90b7 --- /dev/null +++ b/deps/v8/src/builtins/array-findlast.tq @@ -0,0 +1,110 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +namespace array { +// https://tc39.es/proposal-array-find-from-last/index.html#sec-array.prototype.findlast +transitioning builtin ArrayFindLastLoopContinuation(implicit context: Context)( + predicate: Callable, thisArg: JSAny, o: JSReceiver, + initialK: Number): JSAny { + // 5. Repeat, while k >= 0 + for (let k: Number = initialK; k >= 0; k--) { + // 5a. Let Pk be ! ToString(𝔽(k)). + // k is guaranteed to be a positive integer, hence ToString is + // side-effect free and HasProperty/GetProperty do the conversion inline. + + // 5b. Let kValue be ? Get(O, Pk). + const value: JSAny = GetProperty(o, k); + + // 5c. Let testResult be ! ToBoolean(? Call(predicate, thisArg, « kValue, + // 𝔽(k), O »)). + const testResult: JSAny = Call(context, predicate, thisArg, value, k, o); + + // 5d. If testResult is true, return kValue. + if (ToBoolean(testResult)) { + return value; + } + + // 5e. Set k to k - 1. (done by the loop). + } + + // 6. Return undefined. + return Undefined; +} + +// https://tc39.es/proposal-array-find-from-last/index.html#sec-array.prototype.findlast +transitioning macro FastArrayFindLast(implicit context: Context)( + o: JSReceiver, len: Number, predicate: Callable, thisArg: JSAny): JSAny + labels Bailout(Number) { + const smiLen = Cast(len) otherwise goto Bailout(len - 1); + // 4. Let k be len - 1. + let k: Smi = smiLen - 1; + const fastO = Cast(o) otherwise goto Bailout(k); + let fastOW = NewFastJSArrayWitness(fastO); + + // 5. Repeat, while k ≥ 0 + // Build a fast loop over the smi array. + for (; k >= 0; k--) { + fastOW.Recheck() otherwise goto Bailout(k); + + // Ensure that we haven't walked beyond a possibly updated length. + if (k >= fastOW.Get().length) goto Bailout(k); + + // 5a. Let Pk be ! ToString(𝔽(k)). + // k is guaranteed to be a positive integer, hence there is no need to + // cast ToString for LoadElementOrUndefined. + + // 5b. Let kValue be ? Get(O, Pk). + const value: JSAny = fastOW.LoadElementOrUndefined(k); + // 5c. Let testResult be ! ToBoolean(? Call(predicate, thisArg, « kValue, + // 𝔽(k), O »)). + const testResult: JSAny = + Call(context, predicate, thisArg, value, k, fastOW.Get()); + // 5d. If testResult is true, return kValue. + if (ToBoolean(testResult)) { + return value; + } + + // 5e. Set k to k - 1. (done by the loop). + } + + // 6. Return undefined. + return Undefined; +} + +// https://tc39.es/proposal-array-find-from-last/index.html#sec-array.prototype.findlast +transitioning javascript builtin +ArrayPrototypeFindLast( + js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny { + try { + RequireObjectCoercible(receiver, 'Array.prototype.findLast'); + + // 1. Let O be ? ToObject(this value). + const o: JSReceiver = ToObject_Inline(context, receiver); + + // 2. Let len be ? LengthOfArrayLike(O). + const len: Number = GetLengthProperty(o); + + // 3. If IsCallable(predicate) is false, throw a TypeError exception. + if (arguments.length == 0) { + goto NotCallableError; + } + const predicate = Cast(arguments[0]) otherwise NotCallableError; + + // If a thisArg parameter is provided, it will be used as the this value for + // each invocation of predicate. If it is not provided, undefined is used + // instead. + const thisArg: JSAny = arguments[1]; + + // Special cases. + try { + return FastArrayFindLast(o, len, predicate, thisArg) + otherwise Bailout; + } label Bailout(k: Number) deferred { + return ArrayFindLastLoopContinuation(predicate, thisArg, o, k); + } + } label NotCallableError deferred { + ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]); + } +} +} diff --git a/deps/v8/src/builtins/array-findlastindex.tq b/deps/v8/src/builtins/array-findlastindex.tq new file mode 100644 index 00000000000000..3b5498f9617eeb --- /dev/null +++ b/deps/v8/src/builtins/array-findlastindex.tq @@ -0,0 +1,111 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +namespace array { +// https://tc39.es/proposal-array-find-from-last/index.html#sec-array.prototype.findlastindex +transitioning builtin ArrayFindLastIndexLoopContinuation( + implicit context: Context)( + predicate: Callable, thisArg: JSAny, o: JSReceiver, + initialK: Number): Number { + // 5. Repeat, while k >= 0 + for (let k: Number = initialK; k >= 0; k--) { + // 5a. Let Pk be ! ToString(𝔽(k)). + // k is guaranteed to be a positive integer, hence ToString is + // side-effect free and HasProperty/GetProperty do the conversion inline. + + // 5b. Let kValue be ? Get(O, Pk). + const value: JSAny = GetProperty(o, k); + + // 5c. Let testResult be ! ToBoolean(? Call(predicate, thisArg, « kValue, + // 𝔽(k), O »)). + const testResult: JSAny = Call(context, predicate, thisArg, value, k, o); + + // 5d. If testResult is true, return 𝔽(k). + if (ToBoolean(testResult)) { + return k; + } + + // 5e. Set k to k - 1. (done by the loop). + } + + // 6. Return -1𝔽. + return Convert(-1); +} + +// https://tc39.es/proposal-array-find-from-last/index.html#sec-array.prototype.findlastindex +transitioning macro FastArrayFindLastIndex(implicit context: Context)( + o: JSReceiver, len: Number, predicate: Callable, thisArg: JSAny): Number + labels Bailout(Number) { + const smiLen = Cast(len) otherwise goto Bailout(len - 1); + // 4. Let k be len - 1. + let k: Smi = smiLen - 1; + const fastO = Cast(o) otherwise goto Bailout(k); + let fastOW = NewFastJSArrayWitness(fastO); + + // 5. Repeat, while k ≥ 0 + // Build a fast loop over the smi array. + for (; k >= 0; k--) { + fastOW.Recheck() otherwise goto Bailout(k); + + // Ensure that we haven't walked beyond a possibly updated length. + if (k >= fastOW.Get().length) goto Bailout(k); + + // 5a. Let Pk be ! ToString(𝔽(k)). + // k is guaranteed to be a positive integer, hence there is no need to + // cast ToString for LoadElementOrUndefined. + + // 5b. Let kValue be ? Get(O, Pk). + const value: JSAny = fastOW.LoadElementOrUndefined(k); + // 5c. Let testResult be ! ToBoolean(? Call(predicate, thisArg, « kValue, + // 𝔽(k), O »)). + const testResult: JSAny = + Call(context, predicate, thisArg, value, k, fastOW.Get()); + // 5d. If testResult is true, return 𝔽(k). + if (ToBoolean(testResult)) { + return k; + } + + // 5e. Set k to k - 1. (done by the loop). + } + + // 6. Return -1𝔽. + return -1; +} + +// https://tc39.es/proposal-array-find-from-last/index.html#sec-array.prototype.findlastindex +transitioning javascript builtin +ArrayPrototypeFindLastIndex( + js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny { + try { + RequireObjectCoercible(receiver, 'Array.prototype.findLastIndex'); + + // 1. Let O be ? ToObject(this value). + const o: JSReceiver = ToObject_Inline(context, receiver); + + // 2. Let len be ? LengthOfArrayLike(O). + const len: Number = GetLengthProperty(o); + + // 3. If IsCallable(predicate) is false, throw a TypeError exception. + if (arguments.length == 0) { + goto NotCallableError; + } + const predicate = Cast(arguments[0]) otherwise NotCallableError; + + // If a thisArg parameter is provided, it will be used as the this value for + // each invocation of predicate. If it is not provided, undefined is used + // instead. + const thisArg: JSAny = arguments[1]; + + // Special cases. + try { + return FastArrayFindLastIndex(o, len, predicate, thisArg) + otherwise Bailout; + } label Bailout(k: Number) deferred { + return ArrayFindLastIndexLoopContinuation(predicate, thisArg, o, k); + } + } label NotCallableError deferred { + ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]); + } +} +} diff --git a/deps/v8/src/builtins/arraybuffer.tq b/deps/v8/src/builtins/arraybuffer.tq index 5794414443b455..fc0152f51ab129 100644 --- a/deps/v8/src/builtins/arraybuffer.tq +++ b/deps/v8/src/builtins/arraybuffer.tq @@ -18,115 +18,103 @@ transitioning javascript builtin ArrayBufferPrototypeGetByteLength( ThrowTypeError( MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver); } - // 4. If IsResizableArrayBuffer(O) is true, throw a TypeError exception. - if (IsResizableArrayBuffer(o)) { - ThrowTypeError( - MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver); - } - // 5. If IsDetachedBuffer(O) is true, throw a TypeError exception. - // TODO(v8:4895): We don't actually throw here. - // 6. Let length be O.[[ArrayBufferByteLength]]. + // 4. Let length be O.[[ArrayBufferByteLength]]. const length = o.byte_length; - // 7. Return length. + // 5. Return length. return Convert(length); } -// #sec-get-sharedarraybuffer.prototype.bytelength -transitioning javascript builtin SharedArrayBufferPrototypeGetByteLength( +// #sec-get-arraybuffer.prototype.maxbytelength +transitioning javascript builtin ArrayBufferPrototypeGetMaxByteLength( js-implicit context: NativeContext, receiver: JSAny)(): Number { // 1. Let O be the this value. // 2. Perform ? RequireInternalSlot(O, [[ArrayBufferData]]). - const functionName = 'get SharedArrayBuffer.prototype.byteLength'; + const functionName = 'get ArrayBuffer.prototype.maxByteLength'; const o = Cast(receiver) otherwise ThrowTypeError( MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver); - // 3. Perform ? RequireInternalSlot(O, [[ArrayBufferData]]). - if (!IsSharedArrayBuffer(o)) { + // 3. If IsSharedArrayBuffer(O) is true, throw a TypeError exception. + if (IsSharedArrayBuffer(o)) { ThrowTypeError( MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver); } - // 4. If IsResizableArrayBuffer(O) is true, throw a TypeError exception. - if (IsResizableArrayBuffer(o)) { - ThrowTypeError( - MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver); + // 4. If IsDetachedBuffer(O) is true, return 0_F. + if (IsDetachedBuffer(o)) { + return 0; } - // 5. Let length be O.[[ArrayBufferByteLength]]. - const length = o.byte_length; - // 6. Return length. - return Convert(length); + // 5. If IsResizableArrayBuffer(O) is true, then + // a. Let length be O.[[ArrayBufferMaxByteLength]]. + // 6. Else, + // a. Let length be O.[[ArrayBufferByteLength]]. + // 7. Return F(length); + assert(IsResizableArrayBuffer(o) || o.max_byte_length == o.byte_length); + return Convert(o.max_byte_length); } -// #sec-get-resizablearraybuffer.prototype.bytelength -transitioning javascript builtin ResizableArrayBufferPrototypeGetByteLength( - js-implicit context: NativeContext, receiver: JSAny)(): Number { +// #sec-get-arraybuffer.prototype.resizable +transitioning javascript builtin ArrayBufferPrototypeGetResizable( + js-implicit context: NativeContext, receiver: JSAny)(): Boolean { // 1. Let O be the this value. - // 2. Perform ? RequireInternalSlot(O, [[ArrayBufferMaxByteLength]]). - const functionName = 'get ResizableArrayBuffer.prototype.byteLength'; + // 2. Perform ? RequireInternalSlot(O, [[ArrayBufferData]]). + const functionName = 'get ArrayBuffer.prototype.resizable'; const o = Cast(receiver) otherwise ThrowTypeError( MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver); - if (!IsResizableArrayBuffer(o)) { - ThrowTypeError( - MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver); - } // 3. If IsSharedArrayBuffer(O) is true, throw a TypeError exception. if (IsSharedArrayBuffer(o)) { ThrowTypeError( MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver); } - // 4. Let length be O.[[ArrayBufferByteLength]]. - const length = o.byte_length; - // 5. Return length. - return Convert(length); + // 4. Return IsResizableArrayBuffer(O). + if (IsResizableArrayBuffer(o)) { + return True; + } + return False; } -// #sec-get-resizablearraybuffer.prototype.maxbytelength -transitioning javascript builtin ResizableArrayBufferPrototypeGetMaxByteLength( +// #sec-get-growablesharedarraybuffer.prototype.maxbytelength +transitioning javascript builtin +SharedArrayBufferPrototypeGetMaxByteLength( js-implicit context: NativeContext, receiver: JSAny)(): Number { // 1. Let O be the this value. - // 2. Perform ? RequireInternalSlot(O, [[ArrayBufferMaxByteLength]]). - const functionName = 'get ResizableArrayBuffer.prototype.maxByteLength'; + // 2. Perform ? RequireInternalSlot(O, [[ArrayBufferData]]). + const functionName = 'get SharedArrayBuffer.prototype.maxByteLength'; const o = Cast(receiver) otherwise ThrowTypeError( MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver); - if (!IsResizableArrayBuffer(o)) { - ThrowTypeError( - MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver); - } - // 3. If IsSharedArrayBuffer(O) is true, throw a TypeError exception. - if (IsSharedArrayBuffer(o)) { + // 3. If IsSharedArrayBuffer(O) is false, throw a TypeError exception. + if (!IsSharedArrayBuffer(o)) { ThrowTypeError( MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver); } - // 4. Let length be O.[[ArrayBufferMaxByteLength]]. - const length = o.max_byte_length; - // 5. Return length. - return Convert(length); + // 4. If IsResizableArrayBuffer(O) is true, then + // a. Let length be O.[[ArrayBufferMaxByteLength]]. + // 5. Else, + // a. Let length be O.[[ArrayBufferByteLength]]. + // 6. Return F(length); + assert(IsResizableArrayBuffer(o) || o.max_byte_length == o.byte_length); + return Convert(o.max_byte_length); } -// #sec-get-growablesharedarraybuffer.prototype.maxbytelength -transitioning javascript builtin -GrowableSharedArrayBufferPrototypeGetMaxByteLength( - js-implicit context: NativeContext, receiver: JSAny)(): Number { +// #sec-get-sharedarraybuffer.prototype.growable +transitioning javascript builtin SharedArrayBufferPrototypeGetGrowable( + js-implicit context: NativeContext, receiver: JSAny)(): Boolean { // 1. Let O be the this value. - // 2. Perform ? RequireInternalSlot(O, [[ArrayBufferMaxByteLength]]). - const functionName = 'get GrowableSharedArrayBuffer.prototype.maxByteLength'; + // 2. Perform ? RequireInternalSlot(O, [[ArrayBufferData]]). + const functionName = 'get SharedArrayBuffer.prototype.growable'; const o = Cast(receiver) otherwise ThrowTypeError( MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver); - if (!IsResizableArrayBuffer(o)) { - ThrowTypeError( - MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver); - } // 3. If IsSharedArrayBuffer(O) is false, throw a TypeError exception. if (!IsSharedArrayBuffer(o)) { ThrowTypeError( MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver); } - // 4. Let length be O.[[ArrayBufferMaxByteLength]]. - const length = o.max_byte_length; - // 5. Return length. - return Convert(length); + // 4. Return IsResizableArrayBuffer(O). + if (IsResizableArrayBuffer(o)) { + return True; + } + return False; } // #sec-arraybuffer.isview diff --git a/deps/v8/src/builtins/base.tq b/deps/v8/src/builtins/base.tq index c0acc905938231..af1813b61d76b6 100644 --- a/deps/v8/src/builtins/base.tq +++ b/deps/v8/src/builtins/base.tq @@ -214,7 +214,7 @@ extern class GlobalDictionary extends HashTable; extern class SimpleNumberDictionary extends HashTable; extern class EphemeronHashTable extends HashTable; type ObjectHashTable extends HashTable - generates 'TNode'; + generates 'TNode' constexpr 'ObjectHashTable'; extern class NumberDictionary extends HashTable; type RawPtr generates 'TNode' constexpr 'Address'; @@ -552,8 +552,20 @@ extern class Filler extends HeapObject generates 'TNode'; // but not their own class definitions: // Like JSObject, but created from API function. -@apiExposedInstanceTypeValue(0x420) +@apiExposedInstanceTypeValue(0x422) +@doNotGenerateCast +@noVerifier extern class JSApiObject extends JSObject generates 'TNode'; + +// TODO(gsathya): This only exists to make JSApiObject instance type into a +// range. +@apiExposedInstanceTypeValue(0x80A) +@doNotGenerateCast +@highestInstanceTypeWithinParentClassRange +@noVerifier +extern class JSLastDummyApiObject extends JSApiObject + generates 'TNode'; + // Like JSApiObject, but requires access checks and/or has interceptors. @apiExposedInstanceTypeValue(0x410) extern class JSSpecialApiObject extends JSSpecialObject @@ -669,6 +681,8 @@ extern macro ThrowTypeError(implicit context: Context)( constexpr MessageTemplate, Object, Object, Object): never; extern transitioning runtime ThrowTypeErrorIfStrict(implicit context: Context)( Smi, Object, Object): void; +extern transitioning runtime ThrowIteratorError(implicit context: Context)( + JSAny): never; extern transitioning runtime ThrowCalledNonCallable(implicit context: Context)( JSAny): never; @@ -1198,6 +1212,7 @@ extern macro IsPrototypeInitialArrayPrototype(implicit context: Context)(Map): extern macro IsNoElementsProtectorCellInvalid(): bool; extern macro IsArrayIteratorProtectorCellInvalid(): bool; extern macro IsArraySpeciesProtectorCellInvalid(): bool; +extern macro IsIsConcatSpreadableProtectorCellInvalid(): bool; extern macro IsTypedArraySpeciesProtectorCellInvalid(): bool; extern macro IsPromiseSpeciesProtectorCellInvalid(): bool; extern macro IsMockArrayBufferAllocatorFlag(): bool; diff --git a/deps/v8/src/builtins/builtins-arraybuffer.cc b/deps/v8/src/builtins/builtins-arraybuffer.cc index c3a7f1b98cf3da..f995299b7e2917 100644 --- a/deps/v8/src/builtins/builtins-arraybuffer.cc +++ b/deps/v8/src/builtins/builtins-arraybuffer.cc @@ -50,16 +50,11 @@ bool RoundUpToPageSize(size_t byte_length, size_t page_size, Object ConstructBuffer(Isolate* isolate, Handle target, Handle new_target, Handle length, Handle max_length, InitializedFlag initialized) { - SharedFlag shared = - (*target != target->native_context().array_buffer_fun() && - *target != target->native_context().resizable_array_buffer_fun()) - ? SharedFlag::kShared - : SharedFlag::kNotShared; - ResizableFlag resizable = - (*target == target->native_context().resizable_array_buffer_fun() || - *target == target->native_context().growable_shared_array_buffer_fun()) - ? ResizableFlag::kResizable - : ResizableFlag::kNotResizable; + SharedFlag shared = *target != target->native_context().array_buffer_fun() + ? SharedFlag::kShared + : SharedFlag::kNotShared; + ResizableFlag resizable = max_length.is_null() ? ResizableFlag::kNotResizable + : ResizableFlag::kResizable; Handle result; ASSIGN_RETURN_FAILURE_ON_EXCEPTION( isolate, result, @@ -83,12 +78,9 @@ Object ConstructBuffer(Isolate* isolate, Handle target, if (resizable == ResizableFlag::kNotResizable) { backing_store = BackingStore::Allocate(isolate, byte_length, shared, initialized); + max_byte_length = byte_length; } else { - Handle number_max_length; - ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, number_max_length, - Object::ToInteger(isolate, max_length)); - - if (!TryNumberToSize(*number_max_length, &max_byte_length)) { + if (!TryNumberToSize(*max_length, &max_byte_length)) { THROW_NEW_ERROR_RETURN_FAILURE( isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferMaxLength)); @@ -116,8 +108,8 @@ Object ConstructBuffer(Isolate* isolate, Handle target, } constexpr bool kIsWasmMemory = false; backing_store = BackingStore::TryAllocateAndPartiallyCommitMemory( - isolate, byte_length, page_size, initial_pages, max_pages, - kIsWasmMemory, shared); + isolate, byte_length, max_byte_length, page_size, initial_pages, + max_pages, kIsWasmMemory, shared); } if (!backing_store) { // Allocation of backing store failed. @@ -137,10 +129,7 @@ BUILTIN(ArrayBufferConstructor) { HandleScope scope(isolate); Handle target = args.target(); DCHECK(*target == target->native_context().array_buffer_fun() || - *target == target->native_context().shared_array_buffer_fun() || - *target == target->native_context().resizable_array_buffer_fun() || - *target == - target->native_context().growable_shared_array_buffer_fun()); + *target == target->native_context().shared_array_buffer_fun()); if (args.new_target()->IsUndefined(isolate)) { // [[Call]] THROW_NEW_ERROR_RETURN_FAILURE( isolate, NewTypeError(MessageTemplate::kConstructorNotFunction, @@ -158,9 +147,22 @@ BUILTIN(ArrayBufferConstructor) { isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength)); } - Handle max_length = args.atOrUndefined(isolate, 2); - return ConstructBuffer(isolate, target, new_target, number_length, max_length, - InitializedFlag::kZeroInitialized); + Handle number_max_length; + if (FLAG_harmony_rab_gsab) { + Handle max_length; + Handle options = args.atOrUndefined(isolate, 2); + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, max_length, + JSObject::ReadFromOptionsBag( + options, isolate->factory()->max_byte_length_string(), isolate)); + + if (!max_length->IsUndefined(isolate)) { + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, number_max_length, Object::ToInteger(isolate, max_length)); + } + } + return ConstructBuffer(isolate, target, new_target, number_length, + number_max_length, InitializedFlag::kZeroInitialized); } // This is a helper to construct an ArrayBuffer with uinitialized memory. @@ -462,45 +464,48 @@ static Object ResizeHelper(BuiltinArguments args, Isolate* isolate, return ReadOnlyRoots(isolate).undefined_value(); } -// ES #sec-get-growablesharedarraybuffer.prototype.bytelength -// get GrowableSharedArrayBuffer.prototype.byteLength -BUILTIN(GrowableSharedArrayBufferPrototypeGetByteLength) { - const char* const kMethodName = - "get GrowableSharedArrayBuffer.prototype.byteLength"; +// ES #sec-get-sharedarraybuffer.prototype.bytelength +// get SharedArrayBuffer.prototype.byteLength +BUILTIN(SharedArrayBufferPrototypeGetByteLength) { + const char* const kMethodName = "get SharedArrayBuffer.prototype.byteLength"; HandleScope scope(isolate); - // 1. Let O be the this value. - // 2. Perform ? RequireInternalSlot(O, [[ArrayBufferMaxLength]]). + // 2. Perform ? RequireInternalSlot(O, [[ArrayBufferData]]). CHECK_RECEIVER(JSArrayBuffer, array_buffer, kMethodName); - CHECK_RESIZABLE(true, array_buffer, kMethodName); // 3. If IsSharedArrayBuffer(O) is false, throw a TypeError exception. CHECK_SHARED(true, array_buffer, kMethodName); - // 4. Let length be ArrayBufferByteLength(O, SeqCst). - - // Invariant: byte_length for GSAB is 0 (it needs to be read from the - // BackingStore). - DCHECK_EQ(0, array_buffer->byte_length()); + DCHECK_EQ(array_buffer->max_byte_length(), + array_buffer->GetBackingStore()->max_byte_length()); - size_t byte_length = - array_buffer->GetBackingStore()->byte_length(std::memory_order_seq_cst); + // 4. Let length be ArrayBufferByteLength(O, SeqCst). + size_t byte_length; + if (array_buffer->is_resizable()) { + // Invariant: byte_length for GSAB is 0 (it needs to be read from the + // BackingStore). + DCHECK_EQ(0, array_buffer->byte_length()); - // 5. Return length. + byte_length = + array_buffer->GetBackingStore()->byte_length(std::memory_order_seq_cst); + } else { + byte_length = array_buffer->byte_length(); + } + // 5. Return F(length). return *isolate->factory()->NewNumberFromSize(byte_length); } -// ES #sec-resizablearraybuffer.prototype.resize -// ResizableArrayBuffer.prototype.resize(new_size)) -BUILTIN(ResizableArrayBufferPrototypeResize) { - const char* const kMethodName = "ResizableArrayBuffer.prototype.resize"; +// ES #sec-arraybuffer.prototype.resize +// ArrayBuffer.prototype.resize(new_size)) +BUILTIN(ArrayBufferPrototypeResize) { + const char* const kMethodName = "ArrayBuffer.prototype.resize"; constexpr bool kIsShared = false; return ResizeHelper(args, isolate, kMethodName, kIsShared); } -// ES #sec-growablesharedarraybuffer.prototype.grow -// GrowableSharedArrayBuffer.prototype.grow(new_size)) -BUILTIN(GrowableSharedArrayBufferPrototypeGrow) { - const char* const kMethodName = "GrowableSharedArrayBuffer.prototype.grow"; +// ES #sec-sharedarraybuffer.prototype.grow +// SharedArrayBuffer.prototype.grow(new_size)) +BUILTIN(SharedArrayBufferPrototypeGrow) { + const char* const kMethodName = "SharedArrayBuffer.prototype.grow"; constexpr bool kIsShared = true; return ResizeHelper(args, isolate, kMethodName, kIsShared); } diff --git a/deps/v8/src/builtins/builtins-console.cc b/deps/v8/src/builtins/builtins-console.cc index 21841e382caea8..a1359cd4221e0c 100644 --- a/deps/v8/src/builtins/builtins-console.cc +++ b/deps/v8/src/builtins/builtins-console.cc @@ -46,22 +46,6 @@ void ConsoleCall( CHECK(!isolate->has_scheduled_exception()); if (!isolate->console_delegate()) return; HandleScope scope(isolate); - - // Access check. The current context has to match the context of all - // arguments, otherwise the inspector might leak objects across contexts. - Handle context = handle(isolate->context(), isolate); - for (int i = 0; i < args.length(); ++i) { - Handle argument = args.at(i); - if (!argument->IsJSObject()) continue; - - Handle argument_obj = Handle::cast(argument); - if (argument->IsAccessCheckNeeded(isolate) && - !isolate->MayAccess(context, argument_obj)) { - isolate->ReportFailedAccessCheck(argument_obj); - return; - } - } - debug::ConsoleCallArguments wrapper(args); Handle context_id_obj = JSObject::GetDataProperty( args.target(), isolate->factory()->console_context_id_symbol()); @@ -78,7 +62,7 @@ void ConsoleCall( } void LogTimerEvent(Isolate* isolate, BuiltinArguments args, - Logger::StartEnd se) { + v8::LogEventStatus se) { if (!isolate->logger()->is_logging()) return; HandleScope scope(isolate); std::unique_ptr name; @@ -102,21 +86,21 @@ CONSOLE_METHOD_LIST(CONSOLE_BUILTIN_IMPLEMENTATION) #undef CONSOLE_BUILTIN_IMPLEMENTATION BUILTIN(ConsoleTime) { - LogTimerEvent(isolate, args, Logger::START); + LogTimerEvent(isolate, args, v8::LogEventStatus::kStart); ConsoleCall(isolate, args, &debug::ConsoleDelegate::Time); RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate); return ReadOnlyRoots(isolate).undefined_value(); } BUILTIN(ConsoleTimeEnd) { - LogTimerEvent(isolate, args, Logger::END); + LogTimerEvent(isolate, args, v8::LogEventStatus::kEnd); ConsoleCall(isolate, args, &debug::ConsoleDelegate::TimeEnd); RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate); return ReadOnlyRoots(isolate).undefined_value(); } BUILTIN(ConsoleTimeStamp) { - LogTimerEvent(isolate, args, Logger::STAMP); + LogTimerEvent(isolate, args, v8::LogEventStatus::kStamp); ConsoleCall(isolate, args, &debug::ConsoleDelegate::TimeStamp); RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate); return ReadOnlyRoots(isolate).undefined_value(); diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h index b5caebd7c41e4d..70eb349dab9541 100644 --- a/deps/v8/src/builtins/builtins-definitions.h +++ b/deps/v8/src/builtins/builtins-definitions.h @@ -170,8 +170,8 @@ namespace internal { ASM(BaselineOutOfLinePrologue, BaselineOutOfLinePrologue) \ ASM(BaselineOnStackReplacement, Void) \ ASM(BaselineLeaveFrame, BaselineLeaveFrame) \ - ASM(BaselineEnterAtBytecode, Void) \ - ASM(BaselineEnterAtNextBytecode, Void) \ + ASM(BaselineOrInterpreterEnterAtBytecode, Void) \ + ASM(BaselineOrInterpreterEnterAtNextBytecode, Void) \ ASM(InterpreterOnStackReplacement_ToBaseline, Void) \ \ /* Code life-cycle */ \ @@ -394,6 +394,8 @@ namespace internal { CPP(ArrayBufferConstructor) \ CPP(ArrayBufferConstructor_DoNotInitialize) \ CPP(ArrayBufferPrototypeSlice) \ + /* https://tc39.es/proposal-resizablearraybuffer/ */ \ + CPP(ArrayBufferPrototypeResize) \ \ /* AsyncFunction */ \ TFS(AsyncFunctionEnter, kClosure, kReceiver) \ @@ -799,11 +801,6 @@ namespace internal { ASM(RegExpInterpreterTrampoline, CCall) \ ASM(RegExpExperimentalTrampoline, CCall) \ \ - /* ResizableArrayBuffer & GrowableSharedArrayBuffer */ \ - CPP(ResizableArrayBufferPrototypeResize) \ - CPP(GrowableSharedArrayBufferPrototypeGrow) \ - CPP(GrowableSharedArrayBufferPrototypeGetByteLength) \ - \ /* Set */ \ TFJ(SetConstructor, kDontAdaptArgumentsSentinel) \ TFJ(SetPrototypeHas, 1, kReceiver, kKey) \ @@ -823,7 +820,11 @@ namespace internal { TFS(SetOrSetIteratorToList, kSource) \ \ /* SharedArrayBuffer */ \ + CPP(SharedArrayBufferPrototypeGetByteLength) \ CPP(SharedArrayBufferPrototypeSlice) \ + /* https://tc39.es/proposal-resizablearraybuffer/ */ \ + CPP(SharedArrayBufferPrototypeGrow) \ + \ TFJ(AtomicsLoad, 2, kReceiver, kArray, kIndex) \ TFJ(AtomicsStore, 3, kReceiver, kArray, kIndex, kValue) \ TFJ(AtomicsExchange, 3, kReceiver, kArray, kIndex, kValue) \ diff --git a/deps/v8/src/builtins/builtins-regexp-gen.cc b/deps/v8/src/builtins/builtins-regexp-gen.cc index 5920d9fe7ce451..535188c567ee3e 100644 --- a/deps/v8/src/builtins/builtins-regexp-gen.cc +++ b/deps/v8/src/builtins/builtins-regexp-gen.cc @@ -1085,6 +1085,7 @@ TNode RegExpBuiltinsAssembler::FlagsGetter(TNode context, CASE_FOR_FLAG("dotAll", JSRegExp::kDotAll); CASE_FOR_FLAG("unicode", JSRegExp::kUnicode); CASE_FOR_FLAG("sticky", JSRegExp::kSticky); + CASE_FOR_FLAG("hasIndices", JSRegExp::kHasIndices); #undef CASE_FOR_FLAG #define CASE_FOR_FLAG(NAME, V8_FLAG_EXTERN_REF, FLAG) \ @@ -1106,10 +1107,6 @@ TNode RegExpBuiltinsAssembler::FlagsGetter(TNode context, BIND(&next); \ } while (false) - CASE_FOR_FLAG( - "hasIndices", - ExternalReference::address_of_harmony_regexp_match_indices_flag(), - JSRegExp::kHasIndices); CASE_FOR_FLAG( "linear", ExternalReference::address_of_enable_experimental_regexp_engine(), diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.cc b/deps/v8/src/builtins/builtins-typed-array-gen.cc index 735d8b674fced9..a76650d052d927 100644 --- a/deps/v8/src/builtins/builtins-typed-array-gen.cc +++ b/deps/v8/src/builtins/builtins-typed-array-gen.cc @@ -154,13 +154,16 @@ TF_BUILTIN(TypedArrayPrototypeByteOffset, TypedArrayBuiltinsAssembler) { // Check if the {receiver} is actually a JSTypedArray. ThrowIfNotInstanceType(context, receiver, JS_TYPED_ARRAY_TYPE, kMethodName); - // Default to zero if the {receiver}s buffer was detached. - TNode receiver_buffer = - LoadJSArrayBufferViewBuffer(CAST(receiver)); - TNode byte_offset = Select( - IsDetachedBuffer(receiver_buffer), [=] { return UintPtrConstant(0); }, - [=] { return LoadJSArrayBufferViewByteOffset(CAST(receiver)); }); - Return(ChangeUintPtrToTagged(byte_offset)); + // Default to zero if the {receiver}s buffer was detached / out of bounds. + Label detached_or_oob(this), not_detached_or_oob(this); + IsTypedArrayDetachedOrOutOfBounds(CAST(receiver), &detached_or_oob, + ¬_detached_or_oob); + BIND(&detached_or_oob); + Return(ChangeUintPtrToTagged(UintPtrConstant(0))); + + BIND(¬_detached_or_oob); + Return( + ChangeUintPtrToTagged(LoadJSArrayBufferViewByteOffset(CAST(receiver)))); } // ES6 #sec-get-%typedarray%.prototype.length @@ -267,6 +270,17 @@ void TypedArrayBuiltinsAssembler::CallCMemmove(TNode dest_ptr, std::make_pair(MachineType::UintPtr(), byte_length)); } +void TypedArrayBuiltinsAssembler::CallCRelaxedMemmove( + TNode dest_ptr, TNode src_ptr, + TNode byte_length) { + TNode memmove = + ExternalConstant(ExternalReference::relaxed_memmove_function()); + CallCFunction(memmove, MachineType::AnyTagged(), + std::make_pair(MachineType::Pointer(), dest_ptr), + std::make_pair(MachineType::Pointer(), src_ptr), + std::make_pair(MachineType::UintPtr(), byte_length)); +} + void TypedArrayBuiltinsAssembler::CallCMemcpy(TNode dest_ptr, TNode src_ptr, TNode byte_length) { diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.h b/deps/v8/src/builtins/builtins-typed-array-gen.h index 0ec179ac9e422c..bb8a15ef021663 100644 --- a/deps/v8/src/builtins/builtins-typed-array-gen.h +++ b/deps/v8/src/builtins/builtins-typed-array-gen.h @@ -52,6 +52,9 @@ class TypedArrayBuiltinsAssembler : public CodeStubAssembler { void CallCMemmove(TNode dest_ptr, TNode src_ptr, TNode byte_length); + void CallCRelaxedMemmove(TNode dest_ptr, TNode src_ptr, + TNode byte_length); + void CallCMemcpy(TNode dest_ptr, TNode src_ptr, TNode byte_length); diff --git a/deps/v8/src/builtins/builtins-typed-array.cc b/deps/v8/src/builtins/builtins-typed-array.cc index bb936e6e463ef9..d6be81615dde32 100644 --- a/deps/v8/src/builtins/builtins-typed-array.cc +++ b/deps/v8/src/builtins/builtins-typed-array.cc @@ -99,7 +99,12 @@ BUILTIN(TypedArrayPrototypeCopyWithin) { count = count * element_size; uint8_t* data = static_cast(array->DataPtr()); - std::memmove(data + to, data + from, count); + if (array->buffer().is_shared()) { + base::Relaxed_Memmove(reinterpret_cast(data + to), + reinterpret_cast(data + from), count); + } else { + std::memmove(data + to, data + from, count); + } return *array; } @@ -114,7 +119,7 @@ BUILTIN(TypedArrayPrototypeFill) { ElementsKind kind = array->GetElementsKind(); Handle obj_value = args.atOrUndefined(isolate, 1); - if (kind == BIGINT64_ELEMENTS || kind == BIGUINT64_ELEMENTS) { + if (IsBigIntTypedArrayElementsKind(kind)) { ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, obj_value, BigInt::FromObject(isolate, obj_value)); } else { @@ -122,7 +127,7 @@ BUILTIN(TypedArrayPrototypeFill) { Object::ToNumber(isolate, obj_value)); } - int64_t len = array->length(); + int64_t len = array->GetLength(); int64_t start = 0; int64_t end = len; @@ -142,11 +147,22 @@ BUILTIN(TypedArrayPrototypeFill) { } } + if (V8_UNLIKELY(array->IsVariableLength())) { + bool out_of_bounds = false; + array->GetLengthOrOutOfBounds(out_of_bounds); + if (out_of_bounds) { + const MessageTemplate message = MessageTemplate::kDetachedOperation; + Handle operation = + isolate->factory()->NewStringFromAsciiChecked(method); + THROW_NEW_ERROR_RETURN_FAILURE(isolate, NewTypeError(message, operation)); + } + } else if (V8_UNLIKELY(array->WasDetached())) { + return *array; + } + int64_t count = end - start; if (count <= 0) return *array; - if (V8_UNLIKELY(array->WasDetached())) return *array; - // Ensure processed indexes are within array bounds DCHECK_GE(start, 0); DCHECK_LT(start, len); diff --git a/deps/v8/src/builtins/builtins-utils.h b/deps/v8/src/builtins/builtins-utils.h index 3d813cd598509d..e219aec65d9f5e 100644 --- a/deps/v8/src/builtins/builtins-utils.h +++ b/deps/v8/src/builtins/builtins-utils.h @@ -79,6 +79,7 @@ class BuiltinArguments : public JavaScriptArguments { // through the BuiltinArguments object args. // TODO(cbruni): add global flag to check whether any tracing events have been // enabled. +#ifdef V8_RUNTIME_CALL_STATS #define BUILTIN(name) \ V8_WARN_UNUSED_RESULT static Object Builtin_Impl_##name( \ BuiltinArguments args, Isolate* isolate); \ @@ -105,6 +106,21 @@ class BuiltinArguments : public JavaScriptArguments { V8_WARN_UNUSED_RESULT static Object Builtin_Impl_##name( \ BuiltinArguments args, Isolate* isolate) +#else // V8_RUNTIME_CALL_STATS +#define BUILTIN(name) \ + V8_WARN_UNUSED_RESULT static Object Builtin_Impl_##name( \ + BuiltinArguments args, Isolate* isolate); \ + \ + V8_WARN_UNUSED_RESULT Address Builtin_##name( \ + int args_length, Address* args_object, Isolate* isolate) { \ + DCHECK(isolate->context().is_null() || isolate->context().IsContext()); \ + BuiltinArguments args(args_length, args_object); \ + return CONVERT_OBJECT(Builtin_Impl_##name(args, isolate)); \ + } \ + \ + V8_WARN_UNUSED_RESULT static Object Builtin_Impl_##name( \ + BuiltinArguments args, Isolate* isolate) +#endif // V8_RUNTIME_CALL_STATS // ---------------------------------------------------------------------------- #define CHECK_RECEIVER(Type, name, method) \ diff --git a/deps/v8/src/builtins/cast.tq b/deps/v8/src/builtins/cast.tq index a10bc7c9466a4d..b12ea5d9fe41f0 100644 --- a/deps/v8/src/builtins/cast.tq +++ b/deps/v8/src/builtins/cast.tq @@ -547,10 +547,19 @@ Cast(implicit context: Context)(o: HeapObject): FastJSArrayForCopy labels CastError { if (IsArraySpeciesProtectorCellInvalid()) goto CastError; + // TODO(victorgomes): Check if we can cast from FastJSArrayForRead instead. const a = Cast(o) otherwise CastError; return %RawDownCast(a); } +Cast(implicit context: Context)(o: HeapObject): + FastJSArrayForConcat + labels CastError { + if (IsIsConcatSpreadableProtectorCellInvalid()) goto CastError; + const a = Cast(o) otherwise CastError; + return %RawDownCast(a); +} + Cast(implicit context: Context)( o: HeapObject): FastJSArrayWithNoCustomIteration labels CastError { diff --git a/deps/v8/src/builtins/conversion.tq b/deps/v8/src/builtins/conversion.tq index 636f49a024d813..266fcaa55253f4 100644 --- a/deps/v8/src/builtins/conversion.tq +++ b/deps/v8/src/builtins/conversion.tq @@ -138,7 +138,7 @@ transitioning builtin ToObject(implicit context: Context)(input: JSAny): } case (o: JSAnyNotSmi): { const index: intptr = Convert( - o.map.in_object_properties_start_or_constructor_function_index); + o.map.inobject_properties_start_or_constructor_function_index); if (index != kNoConstructorFunctionIndex) goto WrapPrimitive( %RawDownCast>(index)); diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc index 54013e7698482a..7a8875fee9a5b7 100644 --- a/deps/v8/src/builtins/ia32/builtins-ia32.cc +++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. +#include "src/codegen/register-arch.h" #if V8_TARGET_ARCH_IA32 #include "src/api/api-arguments.h" @@ -128,11 +129,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { } // Remove caller arguments from the stack and return. - STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); - __ PopReturnAddressTo(ecx); - __ lea(esp, Operand(esp, edx, times_half_system_pointer_size, - 1 * kSystemPointerSize)); // 1 ~ receiver - __ PushReturnAddressFrom(ecx); + __ DropArguments(edx, ecx, TurboAssembler::kCountIsSmi, + TurboAssembler::kCountExcludesReceiver); __ ret(0); __ bind(&stack_overflow); @@ -283,11 +281,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { __ LeaveFrame(StackFrame::CONSTRUCT); // Remove caller arguments from the stack and return. - STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); - __ pop(ecx); - __ lea(esp, Operand(esp, edx, times_half_system_pointer_size, - 1 * kSystemPointerSize)); // 1 ~ receiver - __ push(ecx); + __ DropArguments(edx, ecx, TurboAssembler::kCountIsSmi, + TurboAssembler::kCountExcludesReceiver); __ ret(0); // Otherwise we do a smi check and fall through to check if the return value @@ -776,10 +771,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1, __ leave(); // Drop receiver + arguments. - Register return_pc = scratch2; - __ PopReturnAddressTo(return_pc); - __ add(esp, params_size); - __ PushReturnAddressFrom(return_pc); + __ DropArguments(params_size, scratch2, TurboAssembler::kCountIsBytes, + TurboAssembler::kCountIncludesReceiver); } // Tail-call |function_id| if |actual_marker| == |expected_marker| @@ -1102,10 +1095,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // 8-bit fields next to each other, so we could just optimize by writing a // 16-bit. These static asserts guard our assumption is valid. STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset == - BytecodeArray::kOsrNestingLevelOffset + kCharSize); + BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize); STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0); __ mov_w(FieldOperand(kInterpreterBytecodeArrayRegister, - BytecodeArray::kOsrNestingLevelOffset), + BytecodeArray::kOsrLoopNestingLevelOffset), Immediate(0)); // Push bytecode array. @@ -1725,10 +1718,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { // are 8-bit fields next to each other, so we could just optimize by writing // a 16-bit. These static asserts guard our assumption is valid. STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset == - BytecodeArray::kOsrNestingLevelOffset + kCharSize); + BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize); STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0); __ mov_w( - FieldOperand(bytecode_array, BytecodeArray::kOsrNestingLevelOffset), + FieldOperand(bytecode_array, BytecodeArray::kOsrLoopNestingLevelOffset), Immediate(0)); __ Push(bytecode_array); @@ -1915,11 +1908,9 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { __ bind(&no_arg_array); } __ bind(&no_this_arg); - __ PopReturnAddressTo(ecx); - __ lea(esp, - Operand(esp, eax, times_system_pointer_size, kSystemPointerSize)); - __ Push(edi); - __ PushReturnAddressFrom(ecx); + __ DropArgumentsAndPushNewReceiver(eax, edi, ecx, + TurboAssembler::kCountIsInteger, + TurboAssembler::kCountExcludesReceiver); // Restore receiver to edi. __ movd(edi, xmm0); @@ -2026,11 +2017,9 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) { // Spill argumentsList to use edx as a scratch register. __ movd(xmm0, edx); - __ PopReturnAddressTo(edx); - __ lea(esp, - Operand(esp, eax, times_system_pointer_size, kSystemPointerSize)); - __ Push(ecx); - __ PushReturnAddressFrom(edx); + __ DropArgumentsAndPushNewReceiver(eax, ecx, edx, + TurboAssembler::kCountIsInteger, + TurboAssembler::kCountExcludesReceiver); // Restore argumentsList. __ movd(edx, xmm0); @@ -2086,11 +2075,10 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { // Spill argumentsList to use ecx as a scratch register. __ movd(xmm0, ecx); - __ PopReturnAddressTo(ecx); - __ lea(esp, - Operand(esp, eax, times_system_pointer_size, kSystemPointerSize)); - __ PushRoot(RootIndex::kUndefinedValue); - __ PushReturnAddressFrom(ecx); + __ DropArgumentsAndPushNewReceiver( + eax, masm->RootAsOperand(RootIndex::kUndefinedValue), ecx, + TurboAssembler::kCountIsInteger, + TurboAssembler::kCountExcludesReceiver); // Restore argumentsList. __ movd(ecx, xmm0); @@ -3986,16 +3974,9 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm, __ movsd(Operand(esi, dst_offset), xmm0); } - if (FLAG_debug_code) { - const int kTopMask = 0x3800; - __ push(eax); - __ fwait(); - __ fnstsw_ax(); - __ test(eax, Immediate(kTopMask)); - __ Assert(zero, AbortReason::kFpuTopIsNotZeroInDeoptimizer); - __ pop(eax); - } // Clear FPU all exceptions. + // TODO(ulan): Find out why the TOP register is not zero here in some cases, + // and check that the generated code never deoptimizes with unbalanced stack. __ fnclex(); // Mark the stack as not iterable for the CPU profiler which won't be able to @@ -4115,19 +4096,57 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) { namespace { -// Converts an interpreter frame into a baseline frame and continues execution -// in baseline code (baseline code has to exist on the shared function info), -// either at the current or next (in execution order) bytecode. -void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode, - bool is_osr = false) { - __ push(kInterpreterAccumulatorRegister); +// Restarts execution either at the current or next (in execution order) +// bytecode. If there is baseline code on the shared function info, converts an +// interpreter frame into a baseline frame and continues execution in baseline +// code. Otherwise execution continues with bytecode. +void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, + bool next_bytecode, + bool is_osr = false) { Label start; __ bind(&start); + // Spill the accumulator register; note that we're not within a frame, so we + // have to make sure to pop it before doing any GC-visible calls. + __ push(kInterpreterAccumulatorRegister); + // Get function from the frame. Register closure = eax; __ mov(closure, MemOperand(ebp, StandardFrameConstants::kFunctionOffset)); + // Get the Code object from the shared function info. + Register code_obj = esi; + __ mov(code_obj, + FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset)); + __ mov(code_obj, + FieldOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset)); + + // Check if we have baseline code. For OSR entry it is safe to assume we + // always have baseline code. + if (!is_osr) { + Label start_with_baseline; + __ CmpObjectType(code_obj, BASELINE_DATA_TYPE, + kInterpreterBytecodeOffsetRegister); + __ j(equal, &start_with_baseline); + + // Start with bytecode as there is no baseline code. + __ pop(kInterpreterAccumulatorRegister); + Builtin builtin_id = next_bytecode + ? Builtin::kInterpreterEnterAtNextBytecode + : Builtin::kInterpreterEnterAtBytecode; + __ Jump(masm->isolate()->builtins()->code_handle(builtin_id), + RelocInfo::CODE_TARGET); + + __ bind(&start_with_baseline); + } else if (FLAG_debug_code) { + __ CmpObjectType(code_obj, BASELINE_DATA_TYPE, + kInterpreterBytecodeOffsetRegister); + __ Assert(equal, AbortReason::kExpectedBaselineData); + } + + // Load baseline code from baseline data. + __ mov(code_obj, FieldOperand(code_obj, BaselineData::kBaselineCodeOffset)); + // Load the feedback vector. Register feedback_vector = ecx; __ mov(feedback_vector, @@ -4150,14 +4169,6 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode, feedback_vector); feedback_vector = no_reg; - // Get the Code object from the shared function info. - Register code_obj = esi; - __ mov(code_obj, - FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset)); - __ mov(code_obj, - FieldOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset)); - __ mov(code_obj, FieldOperand(code_obj, BaselineData::kBaselineCodeOffset)); - // Compute baseline pc for bytecode offset. ExternalReference get_baseline_pc_extref; if (next_bytecode || is_osr) { @@ -4209,7 +4220,7 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode, // TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm // Sparkplug here. __ mov_w(FieldOperand(kInterpreterBytecodeArrayRegister, - BytecodeArray::kOsrNestingLevelOffset), + BytecodeArray::kOsrLoopNestingLevelOffset), Immediate(0)); Generate_OSREntry(masm, code_obj); } else { @@ -4230,10 +4241,23 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode, } __ bind(&install_baseline_code); + // Pop/re-push the accumulator so that it's spilled within the below frame + // scope, to keep the stack valid. Use ecx for this -- we can't save it in + // kInterpreterAccumulatorRegister because that aliases with closure. + DCHECK(!AreAliased(ecx, kContextRegister, closure)); + __ pop(ecx); + // Restore the clobbered context register. + __ mov(kContextRegister, + Operand(ebp, StandardFrameConstants::kContextOffset)); { FrameScope scope(masm, StackFrame::INTERNAL); + __ Push(ecx); __ Push(closure); __ CallRuntime(Runtime::kInstallBaselineCode, 1); + // Now that we're restarting, we don't have to worry about closure and + // accumulator aliasing, so pop the spilled accumulator directly back into + // the right register. + __ Pop(kInterpreterAccumulatorRegister); } // Retry from the start after installing baseline code. __ jmp(&start); @@ -4241,17 +4265,19 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode, } // namespace -void Builtins::Generate_BaselineEnterAtBytecode(MacroAssembler* masm) { - Generate_BaselineEntry(masm, false); +void Builtins::Generate_BaselineOrInterpreterEnterAtBytecode( + MacroAssembler* masm) { + Generate_BaselineOrInterpreterEntry(masm, false); } -void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) { - Generate_BaselineEntry(masm, true); +void Builtins::Generate_BaselineOrInterpreterEnterAtNextBytecode( + MacroAssembler* masm) { + Generate_BaselineOrInterpreterEntry(masm, true); } void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline( MacroAssembler* masm) { - Generate_BaselineEntry(masm, false, true); + Generate_BaselineOrInterpreterEntry(masm, false, true); } void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) { diff --git a/deps/v8/src/builtins/iterator.tq b/deps/v8/src/builtins/iterator.tq index 150e3d2cb57fe5..c2652e7eb029b4 100644 --- a/deps/v8/src/builtins/iterator.tq +++ b/deps/v8/src/builtins/iterator.tq @@ -110,7 +110,7 @@ transitioning builtin CallIteratorWithFeedback( iteratorMethod, %MakeLazy('GetLazyReceiver', receiver), context, feedback, callSlotUnTagged); const iteratorCallable: Callable = Cast(iteratorMethod) - otherwise ThrowCalledNonCallable(iteratorMethod); + otherwise ThrowIteratorError(receiver); return Call(context, iteratorCallable, receiver); } diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc index 0f19f68c11a7f1..8f4bf4d06bd13f 100644 --- a/deps/v8/src/builtins/mips/builtins-mips.cc +++ b/deps/v8/src/builtins/mips/builtins-mips.cc @@ -1018,7 +1018,7 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot( // static void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { UseScratchRegisterScope temps(masm); - temps.Include(kScratchReg.bit() | kScratchReg2.bit()); + temps.Include(s1.bit() | s2.bit()); auto descriptor = Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue); Register closure = descriptor.GetRegisterParameter( @@ -1085,10 +1085,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { // are 8-bit fields next to each other, so we could just optimize by writing // a 16-bit. These static asserts guard our assumption is valid. STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset == - BytecodeArray::kOsrNestingLevelOffset + kCharSize); + BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize); STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0); __ sh(zero_reg, FieldMemOperand(bytecodeArray, - BytecodeArray::kOsrNestingLevelOffset)); + BytecodeArray::kOsrLoopNestingLevelOffset)); __ Push(argc, bytecodeArray); @@ -1243,10 +1243,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // 8-bit fields next to each other, so we could just optimize by writing a // 16-bit. These static asserts guard our assumption is valid. STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset == - BytecodeArray::kOsrNestingLevelOffset + kCharSize); + BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize); STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0); __ sh(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister, - BytecodeArray::kOsrNestingLevelOffset)); + BytecodeArray::kOsrLoopNestingLevelOffset)); // Load initial bytecode offset. __ li(kInterpreterBytecodeOffsetRegister, @@ -3938,12 +3938,13 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) { namespace { -// Converts an interpreter frame into a baseline frame and continues execution -// in baseline code (baseline code has to exist on the shared function info), -// either at the start or the end of the current bytecode. -void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode, - bool is_osr = false) { - __ Push(kInterpreterAccumulatorRegister); +// Restarts execution either at the current or next (in execution order) +// bytecode. If there is baseline code on the shared function info, converts an +// interpreter frame into a baseline frame and continues execution in baseline +// code. Otherwise execution continues with bytecode. +void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, + bool next_bytecode, + bool is_osr = false) { Label start; __ bind(&start); @@ -3951,6 +3952,38 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode, Register closure = a1; __ Lw(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); + // Get the Code object from the shared function info. + Register code_obj = s1; + __ Lw(code_obj, + FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); + __ Lw(code_obj, + FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset)); + + // Check if we have baseline code. For OSR entry it is safe to assume we + // always have baseline code. + if (!is_osr) { + Label start_with_baseline; + __ GetObjectType(code_obj, t6, t6); + __ Branch(&start_with_baseline, eq, t6, Operand(BASELINE_DATA_TYPE)); + + // Start with bytecode as there is no baseline code. + Builtin builtin_id = next_bytecode + ? Builtin::kInterpreterEnterAtNextBytecode + : Builtin::kInterpreterEnterAtBytecode; + __ Jump(masm->isolate()->builtins()->code_handle(builtin_id), + RelocInfo::CODE_TARGET); + + // Start with baseline code. + __ bind(&start_with_baseline); + } else if (FLAG_debug_code) { + __ GetObjectType(code_obj, t6, t6); + __ Assert(eq, AbortReason::kExpectedBaselineData, t6, + Operand(BASELINE_DATA_TYPE)); + } + + // Load baseline code from baseline data. + __ Lw(code_obj, FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset)); + // Replace BytecodeOffset with the feedback vector. Register feedback_vector = a2; __ Lw(feedback_vector, @@ -3972,14 +4005,6 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); feedback_vector = no_reg; - // Get the Code object from the shared function info. - Register code_obj = s1; - __ Lw(code_obj, - FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); - __ Lw(code_obj, - FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset)); - __ Lw(code_obj, FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset)); - // Compute baseline pc for bytecode offset. ExternalReference get_baseline_pc_extref; if (next_bytecode || is_osr) { @@ -4013,6 +4038,8 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode, // Get bytecode array from the stack frame. __ Lw(kInterpreterBytecodeArrayRegister, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); + // Save the accumulator register, since it's clobbered by the below call. + __ Push(kInterpreterAccumulatorRegister); { Register arg_reg_1 = a0; Register arg_reg_2 = a1; @@ -4034,7 +4061,7 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode, __ Lw(kInterpreterBytecodeArrayRegister, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); __ sh(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister, - BytecodeArray::kOsrNestingLevelOffset)); + BytecodeArray::kOsrLoopNestingLevelOffset)); Generate_OSREntry(masm, code_obj, Operand(Code::kHeaderSize - kHeapObjectTag)); } else { @@ -4058,25 +4085,29 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode, __ bind(&install_baseline_code); { FrameScope scope(masm, StackFrame::INTERNAL); + __ Push(kInterpreterAccumulatorRegister); __ Push(closure); __ CallRuntime(Runtime::kInstallBaselineCode, 1); + __ Pop(kInterpreterAccumulatorRegister); } // Retry from the start after installing baseline code. __ Branch(&start); } } // namespace -void Builtins::Generate_BaselineEnterAtBytecode(MacroAssembler* masm) { - Generate_BaselineEntry(masm, false); +void Builtins::Generate_BaselineOrInterpreterEnterAtBytecode( + MacroAssembler* masm) { + Generate_BaselineOrInterpreterEntry(masm, false); } -void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) { - Generate_BaselineEntry(masm, true); +void Builtins::Generate_BaselineOrInterpreterEnterAtNextBytecode( + MacroAssembler* masm) { + Generate_BaselineOrInterpreterEntry(masm, true); } void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline( MacroAssembler* masm) { - Generate_BaselineEntry(masm, false, true); + Generate_BaselineOrInterpreterEntry(masm, false, true); } void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) { diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc index ce1df3bd6a96b8..45e1c32f82f990 100644 --- a/deps/v8/src/builtins/mips64/builtins-mips64.cc +++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc @@ -1030,7 +1030,7 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot( // static void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { UseScratchRegisterScope temps(masm); - temps.Include(kScratchReg.bit() | kScratchReg2.bit()); + temps.Include(s1.bit() | s2.bit()); auto descriptor = Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue); Register closure = descriptor.GetRegisterParameter( @@ -1097,10 +1097,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { // are 8-bit fields next to each other, so we could just optimize by writing // a 16-bit. These static asserts guard our assumption is valid. STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset == - BytecodeArray::kOsrNestingLevelOffset + kCharSize); + BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize); STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0); __ Sh(zero_reg, FieldMemOperand(bytecodeArray, - BytecodeArray::kOsrNestingLevelOffset)); + BytecodeArray::kOsrLoopNestingLevelOffset)); __ Push(argc, bytecodeArray); @@ -1255,10 +1255,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // 8-bit fields next to each other, so we could just optimize by writing a // 16-bit. These static asserts guard our assumption is valid. STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset == - BytecodeArray::kOsrNestingLevelOffset + kCharSize); + BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize); STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0); __ sh(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister, - BytecodeArray::kOsrNestingLevelOffset)); + BytecodeArray::kOsrLoopNestingLevelOffset)); // Load initial bytecode offset. __ li(kInterpreterBytecodeOffsetRegister, @@ -3523,12 +3523,13 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) { namespace { -// Converts an interpreter frame into a baseline frame and continues execution -// in baseline code (baseline code has to exist on the shared function info), -// either at the start or the end of the current bytecode. -void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode, - bool is_osr = false) { - __ Push(kInterpreterAccumulatorRegister); +// Restarts execution either at the current or next (in execution order) +// bytecode. If there is baseline code on the shared function info, converts an +// interpreter frame into a baseline frame and continues execution in baseline +// code. Otherwise execution continues with bytecode. +void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, + bool next_bytecode, + bool is_osr = false) { Label start; __ bind(&start); @@ -3536,6 +3537,38 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode, Register closure = a1; __ Ld(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); + // Get the Code object from the shared function info. + Register code_obj = s1; + __ Ld(code_obj, + FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); + __ Ld(code_obj, + FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset)); + + // Check if we have baseline code. For OSR entry it is safe to assume we + // always have baseline code. + if (!is_osr) { + Label start_with_baseline; + __ GetObjectType(code_obj, t2, t2); + __ Branch(&start_with_baseline, eq, t2, Operand(BASELINE_DATA_TYPE)); + + // Start with bytecode as there is no baseline code. + Builtin builtin_id = next_bytecode + ? Builtin::kInterpreterEnterAtNextBytecode + : Builtin::kInterpreterEnterAtBytecode; + __ Jump(masm->isolate()->builtins()->code_handle(builtin_id), + RelocInfo::CODE_TARGET); + + // Start with baseline code. + __ bind(&start_with_baseline); + } else if (FLAG_debug_code) { + __ GetObjectType(code_obj, t2, t2); + __ Assert(eq, AbortReason::kExpectedBaselineData, t2, + Operand(BASELINE_DATA_TYPE)); + } + + // Load baseline code from baseline data. + __ Ld(code_obj, FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset)); + // Replace BytecodeOffset with the feedback vector. Register feedback_vector = a2; __ Ld(feedback_vector, @@ -3556,14 +3589,6 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); feedback_vector = no_reg; - // Get the Code object from the shared function info. - Register code_obj = s1; - __ Ld(code_obj, - FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); - __ Ld(code_obj, - FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset)); - __ Ld(code_obj, FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset)); - // Compute baseline pc for bytecode offset. ExternalReference get_baseline_pc_extref; if (next_bytecode || is_osr) { @@ -3597,6 +3622,8 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode, // Get bytecode array from the stack frame. __ Ld(kInterpreterBytecodeArrayRegister, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); + // Save the accumulator register, since it's clobbered by the below call. + __ Push(kInterpreterAccumulatorRegister); { Register arg_reg_1 = a0; Register arg_reg_2 = a1; @@ -3618,7 +3645,7 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode, __ Ld(kInterpreterBytecodeArrayRegister, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); __ Sh(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister, - BytecodeArray::kOsrNestingLevelOffset)); + BytecodeArray::kOsrLoopNestingLevelOffset)); Generate_OSREntry(masm, code_obj, Operand(Code::kHeaderSize - kHeapObjectTag)); } else { @@ -3642,8 +3669,10 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode, __ bind(&install_baseline_code); { FrameScope scope(masm, StackFrame::INTERNAL); + __ Push(kInterpreterAccumulatorRegister); __ Push(closure); __ CallRuntime(Runtime::kInstallBaselineCode, 1); + __ Pop(kInterpreterAccumulatorRegister); } // Retry from the start after installing baseline code. __ Branch(&start); @@ -3651,17 +3680,19 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode, } // namespace -void Builtins::Generate_BaselineEnterAtBytecode(MacroAssembler* masm) { - Generate_BaselineEntry(masm, false); +void Builtins::Generate_BaselineOrInterpreterEnterAtBytecode( + MacroAssembler* masm) { + Generate_BaselineOrInterpreterEntry(masm, false); } -void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) { - Generate_BaselineEntry(masm, true); +void Builtins::Generate_BaselineOrInterpreterEnterAtNextBytecode( + MacroAssembler* masm) { + Generate_BaselineOrInterpreterEntry(masm, true); } void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline( MacroAssembler* masm) { - Generate_BaselineEntry(masm, false, true); + Generate_BaselineOrInterpreterEntry(masm, false, true); } void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) { diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc index c0b7212aaccb84..02b76175ec128f 100644 --- a/deps/v8/src/builtins/ppc/builtins-ppc.cc +++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc @@ -125,11 +125,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { // Leave construct frame. } // Remove caller arguments from the stack and return. - STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); - - __ SmiToPtrArrayOffset(scratch, scratch); - __ add(sp, sp, scratch); - __ addi(sp, sp, Operand(kSystemPointerSize)); + __ DropArguments(scratch, TurboAssembler::kCountIsSmi, + TurboAssembler::kCountExcludesReceiver); __ blr(); __ bind(&stack_overflow); @@ -286,11 +283,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { __ LeaveFrame(StackFrame::CONSTRUCT); // Remove caller arguments from the stack and return. - STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); - - __ SmiToPtrArrayOffset(r4, r4); - __ add(sp, sp, r4); - __ addi(sp, sp, Operand(kSystemPointerSize)); + __ DropArguments(r4, TurboAssembler::kCountIsSmi, + TurboAssembler::kCountExcludesReceiver); __ blr(); __ bind(&check_receiver); @@ -407,7 +401,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ subi(r6, r6, Operand(1)); __ cmpi(r6, Operand::Zero()); __ blt(&done_loop); - __ ShiftLeftImm(r10, r6, Operand(kTaggedSizeLog2)); + __ ShiftLeftU64(r10, r6, Operand(kTaggedSizeLog2)); __ add(scratch, r5, r10); __ LoadAnyTaggedField( scratch, FieldMemOperand(scratch, FixedArray::kHeaderSize), r0); @@ -725,7 +719,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, __ cmpi(r7, Operand::Zero()); __ beq(&done); - __ ShiftLeftImm(r9, r7, Operand(kSystemPointerSizeLog2)); + __ ShiftLeftU64(r9, r7, Operand(kSystemPointerSizeLog2)); __ add(r8, r8, r9); // point to last arg __ mtctr(r7); @@ -821,7 +815,7 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1, // Compute the size of the actual parameters + receiver (in bytes). __ LoadU64(actual_params_size, MemOperand(fp, StandardFrameConstants::kArgCOffset)); - __ ShiftLeftImm(actual_params_size, actual_params_size, + __ ShiftLeftU64(actual_params_size, actual_params_size, Operand(kSystemPointerSizeLog2)); __ addi(actual_params_size, actual_params_size, Operand(kSystemPointerSize)); @@ -835,7 +829,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1, // Leave the frame (also dropping the register file). __ LeaveFrame(StackFrame::INTERPRETED); - __ add(sp, sp, params_size); + __ DropArguments(params_size, TurboAssembler::kCountIsBytes, + TurboAssembler::kCountIncludesReceiver); } // Tail-call |function_id| if |actual_marker| == |expected_marker| @@ -1129,12 +1124,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // 8-bit fields next to each other, so we could just optimize by writing a // 16-bit. These static asserts guard our assumption is valid. STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset == - BytecodeArray::kOsrNestingLevelOffset + kCharSize); + BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize); STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0); __ li(r8, Operand(0)); __ StoreU16(r8, FieldMemOperand(kInterpreterBytecodeArrayRegister, - BytecodeArray::kOsrNestingLevelOffset), + BytecodeArray::kOsrLoopNestingLevelOffset), r0); // Load initial bytecode offset. @@ -1162,7 +1157,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // TODO(rmcilroy): Consider doing more than one push per loop iteration. Label loop, no_args; __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); - __ ShiftRightImm(r5, r5, Operand(kSystemPointerSizeLog2), SetRC); + __ ShiftRightU64(r5, r5, Operand(kSystemPointerSizeLog2), SetRC); __ beq(&no_args, cr0); __ mtctr(r5); __ bind(&loop); @@ -1181,7 +1176,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { r0); __ cmpi(r8, Operand::Zero()); __ beq(&no_incoming_new_target_or_generator_register); - __ ShiftLeftImm(r8, r8, Operand(kSystemPointerSizeLog2)); + __ ShiftLeftU64(r8, r8, Operand(kSystemPointerSizeLog2)); __ StoreU64(r6, MemOperand(fp, r8)); __ bind(&no_incoming_new_target_or_generator_register); @@ -1204,7 +1199,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ExternalReference::interpreter_dispatch_table_address(masm->isolate())); __ lbzx(r6, MemOperand(kInterpreterBytecodeArrayRegister, kInterpreterBytecodeOffsetRegister)); - __ ShiftLeftImm(r6, r6, Operand(kSystemPointerSizeLog2)); + __ ShiftLeftU64(r6, r6, Operand(kSystemPointerSizeLog2)); __ LoadU64(kJavaScriptCallCodeStartRegister, MemOperand(kInterpreterDispatchTableRegister, r6)); __ Call(kJavaScriptCallCodeStartRegister); @@ -1277,7 +1272,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args, Register scratch) { ASM_CODE_COMMENT(masm); __ subi(scratch, num_args, Operand(1)); - __ ShiftLeftImm(scratch, scratch, Operand(kSystemPointerSizeLog2)); + __ ShiftLeftU64(scratch, scratch, Operand(kSystemPointerSizeLog2)); __ sub(start_address, start_address, scratch); // Push the arguments. __ PushArray(start_address, num_args, scratch, r0, @@ -1483,7 +1478,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { Register scratch = temps.Acquire(); __ lbzx(ip, MemOperand(kInterpreterBytecodeArrayRegister, kInterpreterBytecodeOffsetRegister)); - __ ShiftLeftImm(scratch, scratch, Operand(kSystemPointerSizeLog2)); + __ ShiftLeftU64(scratch, scratch, Operand(kSystemPointerSizeLog2)); __ LoadU64(kJavaScriptCallCodeStartRegister, MemOperand(kInterpreterDispatchTableRegister, scratch)); __ Jump(kJavaScriptCallCodeStartRegister); @@ -1572,7 +1567,7 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, // from LAZY is always the last argument. __ addi(r3, r3, Operand(BuiltinContinuationFrameConstants::kFixedSlotCount)); - __ ShiftLeftImm(r0, r3, Operand(kSystemPointerSizeLog2)); + __ ShiftLeftU64(r0, r3, Operand(kSystemPointerSizeLog2)); __ StoreU64(scratch, MemOperand(sp, r0)); // Recover arguments count. __ subi(r3, r3, @@ -1698,9 +1693,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { __ LoadU64(r5, MemOperand(sp, 2 * kSystemPointerSize)); // argArray __ bind(&done); - __ ShiftLeftImm(ip, r3, Operand(kSystemPointerSizeLog2)); - __ add(sp, sp, ip); - __ StoreU64(r8, MemOperand(sp)); + __ DropArgumentsAndPushNewReceiver(r3, r8, TurboAssembler::kCountIsInteger, + TurboAssembler::kCountExcludesReceiver); } // ----------- S t a t e ------------- @@ -1783,9 +1777,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) { __ LoadU64(r5, MemOperand(sp, 3 * kSystemPointerSize)); // argArray __ bind(&done); - __ ShiftLeftImm(ip, r3, Operand(kSystemPointerSizeLog2)); - __ add(sp, sp, ip); - __ StoreU64(r8, MemOperand(sp)); + __ DropArgumentsAndPushNewReceiver(r3, r8, TurboAssembler::kCountIsInteger, + TurboAssembler::kCountExcludesReceiver); } // ----------- S t a t e ------------- @@ -1833,9 +1826,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { __ blt(&done); __ LoadU64(r6, MemOperand(sp, 3 * kSystemPointerSize)); // argArray __ bind(&done); - __ ShiftLeftImm(r0, r3, Operand(kSystemPointerSizeLog2)); - __ add(sp, sp, r0); - __ StoreU64(r7, MemOperand(sp)); + __ DropArgumentsAndPushNewReceiver(r3, r7, TurboAssembler::kCountIsInteger, + TurboAssembler::kCountExcludesReceiver); } // ----------- S t a t e ------------- @@ -1902,7 +1894,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, Label copy; Register src = r9, dest = r8; __ addi(src, sp, Operand(-kSystemPointerSize)); - __ ShiftLeftImm(r0, r7, Operand(kSystemPointerSizeLog2)); + __ ShiftLeftU64(r0, r7, Operand(kSystemPointerSizeLog2)); __ sub(sp, sp, r0); // Update stack pointer. __ addi(dest, sp, Operand(-kSystemPointerSize)); @@ -1997,7 +1989,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, __ addi(r7, fp, Operand(CommonFrameConstants::kFixedFrameSizeAboveFp + kSystemPointerSize)); - __ ShiftLeftImm(scratch, r5, Operand(kSystemPointerSizeLog2)); + __ ShiftLeftU64(scratch, r5, Operand(kSystemPointerSizeLog2)); __ add(r7, r7, scratch); // Move the arguments already in the stack, @@ -2007,7 +1999,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, Register src = ip, dest = r5; // r7 and r10 are context and root. __ addi(src, sp, Operand(-kSystemPointerSize)); // Update stack pointer. - __ ShiftLeftImm(scratch, r8, Operand(kSystemPointerSizeLog2)); + __ ShiftLeftU64(scratch, r8, Operand(kSystemPointerSizeLog2)); __ sub(sp, sp, scratch); __ addi(dest, sp, Operand(-kSystemPointerSize)); __ addi(r0, r3, Operand(1)); @@ -2028,7 +2020,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, __ bind(&loop); { __ subi(r8, r8, Operand(1)); - __ ShiftLeftImm(scratch, r8, Operand(kSystemPointerSizeLog2)); + __ ShiftLeftU64(scratch, r8, Operand(kSystemPointerSizeLog2)); __ LoadU64(r0, MemOperand(r7, scratch)); __ StoreU64(r0, MemOperand(r5, scratch)); __ cmpi(r8, Operand::Zero()); @@ -2176,7 +2168,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { // Reserve stack space for the [[BoundArguments]]. { Label done; - __ ShiftLeftImm(r10, r7, Operand(kSystemPointerSizeLog2)); + __ ShiftLeftU64(r10, r7, Operand(kSystemPointerSizeLog2)); __ sub(r0, sp, r10); // Check the stack for overflow. We are not trying to catch interruptions // (i.e. debug break and preemption) here, so check the "real stack @@ -2206,7 +2198,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { __ bind(&loop); __ subi(r7, r7, Operand(1)); - __ ShiftLeftImm(scratch, r7, Operand(kTaggedSizeLog2)); + __ ShiftLeftU64(scratch, r7, Operand(kTaggedSizeLog2)); __ add(scratch, scratch, r5); __ LoadAnyTaggedField(scratch, MemOperand(scratch), r0); __ Push(scratch); @@ -2520,7 +2512,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, __ mr(r4, r5); } else { // Compute the argv pointer. - __ ShiftLeftImm(r4, r3, Operand(kSystemPointerSizeLog2)); + __ ShiftLeftU64(r4, r3, Operand(kSystemPointerSizeLog2)); __ add(r4, r4, sp); __ subi(r4, r4, Operand(kSystemPointerSize)); } @@ -2756,7 +2748,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) { STATIC_ASSERT(HeapNumber::kMantissaBitsInTopWord >= 16); __ oris(result_reg, result_reg, Operand(1 << ((HeapNumber::kMantissaBitsInTopWord)-16))); - __ slw(r0, result_reg, scratch); + __ ShiftLeftU32(r0, result_reg, scratch); __ orx(result_reg, scratch_low, r0); __ b(&negate); @@ -2768,7 +2760,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) { // 52 <= exponent <= 83, shift only scratch_low. // On entry, scratch contains: 52 - exponent. __ neg(scratch, scratch); - __ slw(result_reg, scratch_low, scratch); + __ ShiftLeftU32(result_reg, scratch_low, scratch); __ bind(&negate); // If input was positive, scratch_high ASR 31 equals 0 and @@ -2831,7 +2823,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm, __ lbz(scratch, MemOperand(scratch, 0)); __ cmpi(scratch, Operand::Zero()); - if (CpuFeatures::IsSupported(ISELECT)) { + if (CpuFeatures::IsSupported(PPC_7_PLUS)) { __ Move(scratch, thunk_ref); __ isel(eq, scratch, function_address, scratch); } else { @@ -3025,7 +3017,7 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { // from the API function here. __ mov(scratch, Operand((FCA::kArgsLength + 1 /* receiver */) * kSystemPointerSize)); - __ ShiftLeftImm(ip, argc, Operand(kSystemPointerSizeLog2)); + __ ShiftLeftU64(ip, argc, Operand(kSystemPointerSizeLog2)); __ add(scratch, scratch, ip); __ StoreU64(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 4) * kSystemPointerSize)); @@ -3327,7 +3319,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm, __ lwz(r4, MemOperand(r3, Deoptimizer::output_count_offset())); __ LoadU64(r7, MemOperand(r3, Deoptimizer::output_offset())); // r7 is output_. - __ ShiftLeftImm(r4, r4, Operand(kSystemPointerSizeLog2)); + __ ShiftLeftU64(r4, r4, Operand(kSystemPointerSizeLog2)); __ add(r4, r7, r4); __ b(&outer_loop_header); @@ -3420,12 +3412,14 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) { Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy); } -void Builtins::Generate_BaselineEnterAtBytecode(MacroAssembler* masm) { +void Builtins::Generate_BaselineOrInterpreterEnterAtBytecode( + MacroAssembler* masm) { // Implement on this platform, https://crrev.com/c/2695591. __ bkpt(0); } -void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) { +void Builtins::Generate_BaselineOrInterpreterEnterAtNextBytecode( + MacroAssembler* masm) { // Implement on this platform, https://crrev.com/c/2695591. __ bkpt(0); } diff --git a/deps/v8/src/builtins/riscv64/builtins-riscv64.cc b/deps/v8/src/builtins/riscv64/builtins-riscv64.cc index 03f20057e6ec94..f79e392f4800fc 100644 --- a/deps/v8/src/builtins/riscv64/builtins-riscv64.cc +++ b/deps/v8/src/builtins/riscv64/builtins-riscv64.cc @@ -1149,10 +1149,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { // are 8-bit fields next to each other, so we could just optimize by writing // a 16-bit. These static asserts guard our assumption is valid. STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset == - BytecodeArray::kOsrNestingLevelOffset + kCharSize); + BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize); STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0); __ Sh(zero_reg, FieldMemOperand(bytecodeArray, - BytecodeArray::kOsrNestingLevelOffset)); + BytecodeArray::kOsrLoopNestingLevelOffset)); __ Push(argc, bytecodeArray); @@ -1315,10 +1315,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // 8-bit fields next to each other, so we could just optimize by writing a // 16-bit. These static asserts guard our assumption is valid. STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset == - BytecodeArray::kOsrNestingLevelOffset + kCharSize); + BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize); STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0); __ Sh(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister, - BytecodeArray::kOsrNestingLevelOffset)); + BytecodeArray::kOsrLoopNestingLevelOffset)); // Load initial bytecode offset. __ li(kInterpreterBytecodeOffsetRegister, @@ -3633,11 +3633,13 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) { namespace { -// Converts an interpreter frame into a baseline frame and continues execution -// in baseline code (baseline code has to exist on the shared function info), -// either at the start or the end of the current bytecode. -void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode, - bool is_osr = false) { +// Restarts execution either at the current or next (in execution order) +// bytecode. If there is baseline code on the shared function info, converts an +// interpreter frame into a baseline frame and continues execution in baseline +// code. Otherwise execution continues with bytecode. +void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, + bool next_bytecode, + bool is_osr = false) { __ Push(zero_reg, kInterpreterAccumulatorRegister); Label start; __ bind(&start); @@ -3646,6 +3648,46 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode, Register closure = a1; __ Ld(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); + // Get the Code object from the shared function info. + Register code_obj = a4; + __ LoadTaggedPointerField( + code_obj, + FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); + __ LoadTaggedPointerField( + code_obj, + FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset)); + + // Check if we have baseline code. For OSR entry it is safe to assume we + // always have baseline code. + if (!is_osr) { + Label start_with_baseline; + UseScratchRegisterScope temps(masm); + Register scratch = temps.Acquire(); + __ GetObjectType(code_obj, scratch, scratch); + __ Branch(&start_with_baseline, eq, scratch, Operand(BASELINE_DATA_TYPE)); + + // Start with bytecode as there is no baseline code. + __ Pop(zero_reg, kInterpreterAccumulatorRegister); + Builtin builtin_id = next_bytecode + ? Builtin::kInterpreterEnterAtNextBytecode + : Builtin::kInterpreterEnterAtBytecode; + __ Jump(masm->isolate()->builtins()->code_handle(builtin_id), + RelocInfo::CODE_TARGET); + + // Start with baseline code. + __ bind(&start_with_baseline); + } else if (FLAG_debug_code) { + UseScratchRegisterScope temps(masm); + Register scratch = temps.Acquire(); + __ GetObjectType(code_obj, scratch, scratch); + __ Assert(eq, AbortReason::kExpectedBaselineData, scratch, + Operand(BASELINE_DATA_TYPE)); + } + + // Load baseline code from baseline data. + __ LoadTaggedPointerField( + code_obj, FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset)); + // Replace BytecodeOffset with the feedback vector. Register feedback_vector = a2; __ LoadTaggedPointerField( @@ -3668,17 +3710,6 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); feedback_vector = no_reg; - // Get the Code object from the shared function info. - Register code_obj = type; - __ LoadTaggedPointerField( - code_obj, - FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); - __ LoadTaggedPointerField( - code_obj, - FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset)); - __ LoadTaggedPointerField( - code_obj, FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset)); - // Compute baseline pc for bytecode offset. __ Push(zero_reg, kInterpreterAccumulatorRegister); ExternalReference get_baseline_pc_extref; @@ -3731,7 +3762,7 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode, // TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm // Sparkplug here. __ Sd(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister, - BytecodeArray::kOsrNestingLevelOffset)); + BytecodeArray::kOsrLoopNestingLevelOffset)); Generate_OSREntry(masm, code_obj, Operand(Code::kHeaderSize - kHeapObjectTag)); } else { @@ -3764,17 +3795,19 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode, } // namespace -void Builtins::Generate_BaselineEnterAtBytecode(MacroAssembler* masm) { - Generate_BaselineEntry(masm, false); +void Builtins::Generate_BaselineOrInterpreterEnterAtBytecode( + MacroAssembler* masm) { + Generate_BaselineOrInterpreterEntry(masm, false); } -void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) { - Generate_BaselineEntry(masm, true); +void Builtins::Generate_BaselineOrInterpreterEnterAtNextBytecode( + MacroAssembler* masm) { + Generate_BaselineOrInterpreterEntry(masm, true); } void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline( MacroAssembler* masm) { - Generate_BaselineEntry(masm, false, true); + Generate_BaselineOrInterpreterEntry(masm, false, true); } void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) { diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc index 2370f5ed5787eb..5129cc6ee31550 100644 --- a/deps/v8/src/builtins/s390/builtins-s390.cc +++ b/deps/v8/src/builtins/s390/builtins-s390.cc @@ -121,11 +121,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { // Leave construct frame. } // Remove caller arguments from the stack and return. - STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); - - __ SmiToPtrArrayOffset(scratch, scratch); - __ AddS64(sp, sp, scratch); - __ AddS64(sp, sp, Operand(kSystemPointerSize)); + __ DropArguments(scratch, TurboAssembler::kCountIsSmi, + TurboAssembler::kCountExcludesReceiver); __ Ret(); __ bind(&stack_overflow); @@ -278,11 +275,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { __ LeaveFrame(StackFrame::CONSTRUCT); // Remove caller arguments from the stack and return. - STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); - - __ SmiToPtrArrayOffset(r3, r3); - __ AddS64(sp, sp, r3); - __ AddS64(sp, sp, Operand(kSystemPointerSize)); + __ DropArguments(r3, TurboAssembler::kCountIsSmi, + TurboAssembler::kCountExcludesReceiver); __ Ret(); __ bind(&check_receiver); @@ -870,7 +864,7 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1, __ LoadU64(params_size, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); __ LoadU32(params_size, - FieldMemOperand(params_size, BytecodeArray::kParameterSizeOffset)); + FieldMemOperand(params_size, BytecodeArray::kParameterSizeOffset)); Register actual_params_size = scratch2; // Compute the size of the actual parameters + receiver (in bytes). @@ -892,7 +886,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1, // Leave the frame (also dropping the register file). __ LeaveFrame(StackFrame::INTERPRETED); - __ AddS64(sp, sp, params_size); + __ DropArguments(params_size, TurboAssembler::kCountIsBytes, + TurboAssembler::kCountIncludesReceiver); } // Tail-call |function_id| if |actual_marker| == |expected_marker| @@ -1174,12 +1169,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // 8-bit fields next to each other, so we could just optimize by writing a // 16-bit. These static asserts guard our assumption is valid. STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset == - BytecodeArray::kOsrNestingLevelOffset + kCharSize); + BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize); STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0); __ mov(r1, Operand(0)); __ StoreU16(r1, FieldMemOperand(kInterpreterBytecodeArrayRegister, - BytecodeArray::kOsrNestingLevelOffset), + BytecodeArray::kOsrLoopNestingLevelOffset), r0); // Load the initial bytecode offset. @@ -1730,9 +1725,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { __ LoadU64(r4, MemOperand(sp, 2 * kSystemPointerSize)); // argArray __ bind(&done); - __ ShiftLeftU64(r1, r2, Operand(kSystemPointerSizeLog2)); - __ lay(sp, MemOperand(sp, r1)); - __ StoreU64(r7, MemOperand(sp)); + __ DropArgumentsAndPushNewReceiver(r2, r7, TurboAssembler::kCountIsInteger, + TurboAssembler::kCountExcludesReceiver); } // ----------- S t a t e ------------- @@ -1816,9 +1810,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) { __ LoadU64(r4, MemOperand(sp, 3 * kSystemPointerSize)); // argArray __ bind(&done); - __ ShiftLeftU64(r1, r2, Operand(kSystemPointerSizeLog2)); - __ lay(sp, MemOperand(sp, r1)); - __ StoreU64(r7, MemOperand(sp)); + __ DropArgumentsAndPushNewReceiver(r2, r7, TurboAssembler::kCountIsInteger, + TurboAssembler::kCountExcludesReceiver); } // ----------- S t a t e ------------- @@ -1867,9 +1860,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { __ blt(&done); __ LoadU64(r5, MemOperand(sp, 3 * kSystemPointerSize)); // argArray __ bind(&done); - __ ShiftLeftU64(r1, r2, Operand(kSystemPointerSizeLog2)); - __ lay(sp, MemOperand(sp, r1)); - __ StoreU64(r6, MemOperand(sp)); + __ DropArgumentsAndPushNewReceiver(r2, r6, TurboAssembler::kCountIsInteger, + TurboAssembler::kCountExcludesReceiver); } // ----------- S t a t e ------------- @@ -3411,12 +3403,14 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) { Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy); } -void Builtins::Generate_BaselineEnterAtBytecode(MacroAssembler* masm) { +void Builtins::Generate_BaselineOrInterpreterEnterAtBytecode( + MacroAssembler* masm) { // Implement on this platform, https://crrev.com/c/2695591. __ bkpt(0); } -void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) { +void Builtins::Generate_BaselineOrInterpreterEnterAtNextBytecode( + MacroAssembler* masm) { // Implement on this platform, https://crrev.com/c/2695591. __ bkpt(0); } diff --git a/deps/v8/src/builtins/typed-array-createtypedarray.tq b/deps/v8/src/builtins/typed-array-createtypedarray.tq index 6646bbfa80d7ed..2f94f6205f751d 100644 --- a/deps/v8/src/builtins/typed-array-createtypedarray.tq +++ b/deps/v8/src/builtins/typed-array-createtypedarray.tq @@ -161,7 +161,7 @@ transitioning macro ConstructByArrayLike(implicit context: Context)( ThrowTypeError(MessageTemplate::kDetachedOperation, 'Construct'); } else if (src.elements_kind != elementsInfo.kind) { - goto IfSlow; + goto IfElementsKindMismatch(src.elements_kind); } else if (length > 0) { const byteLength = typedArray.byte_length; @@ -174,6 +174,12 @@ transitioning macro ConstructByArrayLike(implicit context: Context)( typedArray.data_ptr, src.data_ptr, byteLength); } } + } label IfElementsKindMismatch(srcKind: ElementsKind) deferred { + if (IsBigInt64ElementsKind(srcKind) != + IsBigInt64ElementsKind(elementsInfo.kind)) { + ThrowTypeError(MessageTemplate::kBigIntMixedTypes); + } + goto IfSlow; } label IfSlow deferred { if (length > 0) { TypedArrayCopyElements( diff --git a/deps/v8/src/builtins/typed-array-findlast.tq b/deps/v8/src/builtins/typed-array-findlast.tq new file mode 100644 index 00000000000000..634e17b9368eca --- /dev/null +++ b/deps/v8/src/builtins/typed-array-findlast.tq @@ -0,0 +1,112 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include 'src/builtins/builtins-typed-array-gen.h' + +namespace typed_array { +const kBuiltinNameFindLast: constexpr string = + '%TypedArray%.prototype.findLast'; + +// Continuation part of +// https://tc39.es/proposal-array-find-from-last/index.html#sec-%typedarray%.prototype.findlast +// when array buffer was detached. +transitioning builtin FindLastAllElementsDetachedContinuation( + implicit context: Context)( + array: JSTypedArray, predicate: Callable, thisArg: JSAny, + initialK: Number): JSAny { + // 6. Repeat, while k ≥ 0 + for (let k: Number = initialK; k >= 0; k--) { + // 6a. Let Pk be ! ToString(𝔽(k)). + // there is no need to cast ToString to load elements. + + // 6b. Let kValue be ! Get(O, Pk). + // kValue must be undefined when the buffer was detached. + + // 6c. Let testResult be ! ToBoolean(? Call(predicate, thisArg, « kValue, + // 𝔽(k), O »)). + // TODO(v8:4153): Consider versioning this loop for Smi and non-Smi + // indices to optimize Convert(k) for the most common case. + const result = + Call(context, predicate, thisArg, Undefined, Convert(k), array); + // 6d. If testResult is true, return kValue. + if (ToBoolean(result)) { + return Undefined; + } + + // 6e. Set k to k - 1. (done by the loop). + } + + // 7. Return undefined. + return Undefined; +} + +// https://tc39.es/proposal-array-find-from-last/index.html#sec-%typedarray%.prototype.findlast +transitioning macro FindLastAllElements(implicit context: Context)( + array: typed_array::AttachedJSTypedArray, predicate: Callable, + thisArg: JSAny): JSAny labels +Bailout(Number) { + let witness = typed_array::NewAttachedJSTypedArrayWitness(array); + // 3. Let len be O.[[ArrayLength]]. + const length: uintptr = witness.Get().length; + // 5. Let k be len - 1. + // 6. Repeat, while k ≥ 0 + for (let k: uintptr = length; k-- > 0;) { + witness.Recheck() otherwise goto Bailout(Convert(k)); + // 6a. Let Pk be ! ToString(𝔽(k)). + // there is no need to cast ToString to load elements. + + // 6b. Let kValue be ! Get(O, Pk). + const value: JSAny = witness.Load(k); + + // 6c. Let testResult be ! ToBoolean(? Call(predicate, thisArg, « kValue, + // 𝔽(k), O »)). + // TODO(v8:4153): Consider versioning this loop for Smi and non-Smi + // indices to optimize Convert(k) for the most common case. + const result = Call( + context, predicate, thisArg, value, Convert(k), + witness.GetStable()); + // 6d. If testResult is true, return kValue. + if (ToBoolean(result)) { + return value; + } + + // 6e. Set k to k - 1. (done by the loop). + } + + // 7. Return undefined. + return Undefined; +} + +// https://tc39.es/proposal-array-find-from-last/index.html#sec-%typedarray%.prototype.findlast +transitioning javascript builtin +TypedArrayPrototypeFindLast( + js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny { + // arguments[0] = callback + // arguments[1] = thisArg + try { + // 1. Let O be the this value. + const array: JSTypedArray = Cast(receiver) + otherwise NotTypedArray; + // 2. Perform ? ValidateTypedArray(O). + const uarray = typed_array::EnsureAttached(array) otherwise IsDetached; + + // 4. If IsCallable(predicate) is false, throw a TypeError exception. + const predicate = Cast(arguments[0]) otherwise NotCallable; + const thisArg = arguments[1]; + try { + return FindLastAllElements(uarray, predicate, thisArg) + otherwise Bailout; + } label Bailout(k: Number) deferred { + return FindLastAllElementsDetachedContinuation( + uarray, predicate, thisArg, k); + } + } label NotCallable deferred { + ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]); + } label NotTypedArray deferred { + ThrowTypeError(MessageTemplate::kNotTypedArray, kBuiltinNameFindLast); + } label IsDetached deferred { + ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameFindLast); + } +} +} diff --git a/deps/v8/src/builtins/typed-array-findlastindex.tq b/deps/v8/src/builtins/typed-array-findlastindex.tq new file mode 100644 index 00000000000000..4b20114c91b4cd --- /dev/null +++ b/deps/v8/src/builtins/typed-array-findlastindex.tq @@ -0,0 +1,115 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include 'src/builtins/builtins-typed-array-gen.h' + +namespace typed_array { +const kBuiltinNameFindLastIndex: constexpr string = + '%TypedArray%.prototype.findIndexLast'; + +// Continuation part of +// https://tc39.es/proposal-array-find-from-last/index.html#sec-%typedarray%.prototype.findlastindex +// when array buffer was detached. +transitioning builtin FindLastIndexAllElementsDetachedContinuation( + implicit context: Context)( + array: JSTypedArray, predicate: Callable, thisArg: JSAny, + initialK: Number): Number { + // 6. Repeat, while k ≥ 0 + for (let k: Number = initialK; k >= 0; k--) { + // 6a. Let Pk be ! ToString(𝔽(k)). + // there is no need to cast ToString to load elements. + + // 6b. Let kValue be ! Get(O, Pk). + // kValue must be undefined when the buffer was detached. + + // 6c. Let testResult be ! ToBoolean(? Call(predicate, thisArg, « kValue, + // 𝔽(k), O »)). + // TODO(v8:4153): Consider versioning this loop for Smi and non-Smi + // indices to optimize Convert(k) for the most common case. + const indexNumber: Number = Convert(k); + const result = + Call(context, predicate, thisArg, Undefined, indexNumber, array); + // 6d. If testResult is true, return 𝔽(k). + if (ToBoolean(result)) { + return indexNumber; + } + + // 6e. Set k to k - 1. (done by the loop). + } + + // 7. Return -1𝔽. + return -1; +} + +// https://tc39.es/proposal-array-find-from-last/index.html#sec-%typedarray%.prototype.findlastindex +transitioning macro FindLastIndexAllElements(implicit context: Context)( + array: typed_array::AttachedJSTypedArray, predicate: Callable, + thisArg: JSAny): Number labels +Bailout(Number) { + let witness = typed_array::NewAttachedJSTypedArrayWitness(array); + // 3. Let len be O.[[ArrayLength]]. + const length: uintptr = witness.Get().length; + // 5. Let k be len - 1. + // 6. Repeat, while k ≥ 0 + for (let k: uintptr = length; k-- > 0;) { + witness.Recheck() otherwise goto Bailout(Convert(k)); + // 6a. Let Pk be ! ToString(𝔽(k)). + // there is no need to cast ToString to load elements. + + // 6b. Let kValue be ! Get(O, Pk). + const value: JSAny = witness.Load(k); + + // 6c. Let testResult be ! ToBoolean(? Call(predicate, thisArg, « kValue, + // 𝔽(k), O »)). + // TODO(v8:4153): Consider versioning this loop for Smi and non-Smi + // indices to optimize Convert(k) for the most common case. + const indexNumber: Number = Convert(k); + const result = Call( + context, predicate, thisArg, value, indexNumber, witness.GetStable()); + // 6d. If testResult is true, return 𝔽(k). + if (ToBoolean(result)) { + return indexNumber; + } + + // 6e. Set k to k - 1. (done by the loop). + } + + // 7. Return -1𝔽. + return -1; +} + +// https://tc39.es/proposal-array-find-from-last/index.html#sec-%typedarray%.prototype.findlastindex +transitioning javascript builtin +TypedArrayPrototypeFindLastIndex( + js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny { + // arguments[0] = callback + // arguments[1] = thisArg. + try { + // 1. Let O be the this value. + const array: JSTypedArray = Cast(receiver) + otherwise NotTypedArray; + // 2. Perform ? ValidateTypedArray(O). + const uarray = typed_array::EnsureAttached(array) otherwise IsDetached; + + // 4. If IsCallable(predicate) is false, throw a TypeError exception. + const predicate = Cast(arguments[0]) otherwise NotCallable; + const thisArg = arguments[1]; + + try { + return FindLastIndexAllElements(uarray, predicate, thisArg) + otherwise Bailout; + } label Bailout(k: Number) deferred { + return FindLastIndexAllElementsDetachedContinuation( + uarray, predicate, thisArg, k); + } + } label NotCallable deferred { + ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]); + } label NotTypedArray deferred { + ThrowTypeError(MessageTemplate::kNotTypedArray, kBuiltinNameFindLastIndex); + } label IsDetached deferred { + ThrowTypeError( + MessageTemplate::kDetachedOperation, kBuiltinNameFindLastIndex); + } +} +} diff --git a/deps/v8/src/builtins/typed-array-set.tq b/deps/v8/src/builtins/typed-array-set.tq index b5c9dcb261df65..f4d2a40f41133a 100644 --- a/deps/v8/src/builtins/typed-array-set.tq +++ b/deps/v8/src/builtins/typed-array-set.tq @@ -281,7 +281,12 @@ TypedArrayPrototypeSetTypedArray(implicit context: Context, receiver: JSAny)( // value, true, Unordered). // iii. Set srcByteIndex to srcByteIndex + 1. // iv. Set targetByteIndex to targetByteIndex + 1. - CallCMemmove(dstPtr, typedArray.data_ptr, countBytes); + if (IsSharedArrayBuffer(target.buffer)) { + // SABs need a relaxed memmove to preserve atomicity. + CallCRelaxedMemmove(dstPtr, typedArray.data_ptr, countBytes); + } else { + CallCMemmove(dstPtr, typedArray.data_ptr, countBytes); + } } label IfSlow deferred { // 22. If target.[[ContentType]] is not equal to // typedArray.[[ContentType]], throw a TypeError exception. diff --git a/deps/v8/src/builtins/typed-array-slice.tq b/deps/v8/src/builtins/typed-array-slice.tq index 60604c548fc272..2a18433f93debf 100644 --- a/deps/v8/src/builtins/typed-array-slice.tq +++ b/deps/v8/src/builtins/typed-array-slice.tq @@ -36,7 +36,12 @@ macro FastCopy( assert(countBytes <= dest.byte_length); assert(countBytes <= src.byte_length - startOffset); - typed_array::CallCMemmove(dest.data_ptr, srcPtr, countBytes); + if (IsSharedArrayBuffer(src.buffer)) { + // SABs need a relaxed memmove to preserve atomicity. + typed_array::CallCRelaxedMemmove(dest.data_ptr, srcPtr, countBytes); + } else { + typed_array::CallCMemmove(dest.data_ptr, srcPtr, countBytes); + } } macro SlowCopy(implicit context: Context)( diff --git a/deps/v8/src/builtins/typed-array.tq b/deps/v8/src/builtins/typed-array.tq index 2686005ba5c12a..87bcb2fb5921b7 100644 --- a/deps/v8/src/builtins/typed-array.tq +++ b/deps/v8/src/builtins/typed-array.tq @@ -65,6 +65,8 @@ extern macro TypedArrayBuiltinsAssembler::CallCMemset( RawPtr, intptr, uintptr): void; extern macro TypedArrayBuiltinsAssembler::CallCRelaxedMemcpy( RawPtr, RawPtr, uintptr): void; +extern macro TypedArrayBuiltinsAssembler::CallCRelaxedMemmove( + RawPtr, RawPtr, uintptr): void; extern macro GetTypedArrayBuffer(implicit context: Context)(JSTypedArray): JSArrayBuffer; extern macro TypedArrayBuiltinsAssembler::GetTypedArrayElementsInfo( diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc index 993f8234af1d25..14186e3be6d1eb 100644 --- a/deps/v8/src/builtins/x64/builtins-x64.cc +++ b/deps/v8/src/builtins/x64/builtins-x64.cc @@ -92,7 +92,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { // ----------------------------------- Label stack_overflow; - __ StackOverflowCheck(rax, rcx, &stack_overflow, Label::kFar); + __ StackOverflowCheck(rax, &stack_overflow, Label::kFar); // Enter a construct frame. { @@ -129,10 +129,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { } // Remove caller arguments from the stack and return. - __ PopReturnAddressTo(rcx); - SmiIndex index = masm->SmiToIndex(rbx, rbx, kSystemPointerSizeLog2); - __ leaq(rsp, Operand(rsp, index.reg, index.scale, 1 * kSystemPointerSize)); - __ PushReturnAddressFrom(rcx); + __ DropArguments(rbx, rcx, TurboAssembler::kCountIsSmi, + TurboAssembler::kCountExcludesReceiver); __ ret(0); @@ -228,9 +226,9 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { __ SmiUntag(rax, Operand(rbp, ConstructFrameConstants::kLengthOffset)); // Check if we have enough stack space to push all arguments. - // Argument count in rax. Clobbers rcx. + // Argument count in rax. Label stack_overflow; - __ StackOverflowCheck(rax, rcx, &stack_overflow); + __ StackOverflowCheck(rax, &stack_overflow); // TODO(victorgomes): When the arguments adaptor is completely removed, we // should get the formal parameter count and copy the arguments in its @@ -281,10 +279,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { __ movq(rbx, Operand(rbp, ConstructFrameConstants::kLengthOffset)); __ LeaveFrame(StackFrame::CONSTRUCT); // Remove caller arguments from the stack and return. - __ PopReturnAddressTo(rcx); - SmiIndex index = masm->SmiToIndex(rbx, rbx, kSystemPointerSizeLog2); - __ leaq(rsp, Operand(rsp, index.reg, index.scale, 1 * kSystemPointerSize)); - __ PushReturnAddressFrom(rcx); + __ DropArguments(rbx, rcx, TurboAssembler::kCountIsSmi, + TurboAssembler::kCountExcludesReceiver); __ ret(0); // If the result is a smi, it is *not* an object in the ECMA sense. @@ -599,9 +595,9 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, // r9 : receiver // Check if we have enough stack space to push all arguments. - // Argument count in rax. Clobbers rcx. + // Argument count in rax. Label enough_stack_space, stack_overflow; - __ StackOverflowCheck(rax, rcx, &stack_overflow, Label::kNear); + __ StackOverflowCheck(rax, &stack_overflow, Label::kNear); __ jmp(&enough_stack_space, Label::kNear); __ bind(&stack_overflow); @@ -880,10 +876,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1, __ leave(); // Drop receiver + arguments. - Register return_pc = scratch2; - __ PopReturnAddressTo(return_pc); - __ addq(rsp, params_size); - __ PushReturnAddressFrom(return_pc); + __ DropArguments(params_size, scratch2, TurboAssembler::kCountIsBytes, + TurboAssembler::kCountIncludesReceiver); } // Tail-call |function_id| if |actual_marker| == |expected_marker| @@ -1187,10 +1181,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // 8-bit fields next to each other, so we could just optimize by writing a // 16-bit. These static asserts guard our assumption is valid. STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset == - BytecodeArray::kOsrNestingLevelOffset + kCharSize); + BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize); STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0); __ movw(FieldOperand(kInterpreterBytecodeArrayRegister, - BytecodeArray::kOsrNestingLevelOffset), + BytecodeArray::kOsrLoopNestingLevelOffset), Immediate(0)); // Load initial bytecode offset. @@ -1396,7 +1390,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl( __ leal(rcx, Operand(rax, 1)); // Add one for receiver. // Add a stack check before pushing arguments. - __ StackOverflowCheck(rcx, rdx, &stack_overflow); + __ StackOverflowCheck(rcx, &stack_overflow); // Pop return address to allow tail-call after pushing arguments. __ PopReturnAddressTo(kScratchRegister); @@ -1457,7 +1451,7 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl( Label stack_overflow; // Add a stack check before pushing arguments. - __ StackOverflowCheck(rax, r8, &stack_overflow); + __ StackOverflowCheck(rax, &stack_overflow); // Pop return address to allow tail-call after pushing arguments. __ PopReturnAddressTo(kScratchRegister); @@ -1704,11 +1698,11 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { // are 8-bit fields next to each other, so we could just optimize by // writing a 16-bit. These static asserts guard our assumption is valid. STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset == - BytecodeArray::kOsrNestingLevelOffset + kCharSize); + BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize); STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0); - __ movw( - FieldOperand(bytecode_array, BytecodeArray::kOsrNestingLevelOffset), - Immediate(0)); + __ movw(FieldOperand(bytecode_array, + BytecodeArray::kOsrLoopNestingLevelOffset), + Immediate(0)); __ Push(bytecode_array); // Baseline code frames store the feedback vector where interpreter would @@ -1899,11 +1893,9 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { __ bind(&no_arg_array); } __ bind(&no_this_arg); - __ PopReturnAddressTo(rcx); - __ leaq(rsp, - Operand(rsp, rax, times_system_pointer_size, kSystemPointerSize)); - __ Push(rdx); - __ PushReturnAddressFrom(rcx); + __ DropArgumentsAndPushNewReceiver(rax, rdx, rcx, + TurboAssembler::kCountIsInteger, + TurboAssembler::kCountExcludesReceiver); } // ----------- S t a t e ------------- @@ -2006,11 +1998,9 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) { __ j(below, &done, Label::kNear); __ movq(rbx, args[3]); // argumentsList __ bind(&done); - __ PopReturnAddressTo(rcx); - __ leaq(rsp, - Operand(rsp, rax, times_system_pointer_size, kSystemPointerSize)); - __ Push(rdx); - __ PushReturnAddressFrom(rcx); + __ DropArgumentsAndPushNewReceiver(rax, rdx, rcx, + TurboAssembler::kCountIsInteger, + TurboAssembler::kCountExcludesReceiver); } // ----------- S t a t e ------------- @@ -2059,11 +2049,10 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { __ j(below, &done, Label::kNear); __ movq(rdx, args[3]); // new.target __ bind(&done); - __ PopReturnAddressTo(rcx); - __ leaq(rsp, - Operand(rsp, rax, times_system_pointer_size, kSystemPointerSize)); - __ PushRoot(RootIndex::kUndefinedValue); - __ PushReturnAddressFrom(rcx); + __ DropArgumentsAndPushNewReceiver( + rax, masm->RootAsOperand(RootIndex::kUndefinedValue), rcx, + TurboAssembler::kCountIsInteger, + TurboAssembler::kCountExcludesReceiver); } // ----------- S t a t e ------------- @@ -2120,7 +2109,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, } Label stack_overflow; - __ StackOverflowCheck(rcx, r8, &stack_overflow, Label::kNear); + __ StackOverflowCheck(rcx, &stack_overflow, Label::kNear); // Push additional arguments onto the stack. // Move the arguments already in the stack, @@ -2222,7 +2211,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, // ----------------------------------- // Check for stack overflow. - __ StackOverflowCheck(r8, r12, &stack_overflow, Label::kNear); + __ StackOverflowCheck(r8, &stack_overflow, Label::kNear); // Forward the arguments from the caller frame. // Move the arguments already in the stack, @@ -3345,16 +3334,8 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) { // expected to be on the top of the stack). // We cannot use just the ret instruction for this, because we cannot pass the // number of slots to remove in a Register as an argument. - Register return_addr = rbx; - __ popq(return_addr); - Register caller_frame_slots_count = param_count; - // Add one to also pop the receiver. The receiver is passed to a JSFunction - // over the stack but is neither included in the number of parameters passed - // to this function nor in the number of parameters expected in this function. - __ addq(caller_frame_slots_count, Immediate(1)); - __ shlq(caller_frame_slots_count, Immediate(kSystemPointerSizeLog2)); - __ addq(rsp, caller_frame_slots_count); - __ pushq(return_addr); + __ DropArguments(param_count, rbx, TurboAssembler::kCountIsInteger, + TurboAssembler::kCountExcludesReceiver); __ ret(0); // -------------------------------------------------------------------------- @@ -4377,12 +4358,13 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) { namespace { -// Converts an interpreter frame into a baseline frame and continues execution -// in baseline code (baseline code has to exist on the shared function info), -// either at the current or next (in execution order) bytecode. -void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode, - bool is_osr = false) { - __ pushq(kInterpreterAccumulatorRegister); +// Restarts execution either at the current or next (in execution order) +// bytecode. If there is baseline code on the shared function info, converts an +// interpreter frame into a baseline frame and continues execution in baseline +// code. Otherwise execution continues with bytecode. +void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, + bool next_bytecode, + bool is_osr = false) { Label start; __ bind(&start); @@ -4390,8 +4372,44 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode, Register closure = rdi; __ movq(closure, MemOperand(rbp, StandardFrameConstants::kFunctionOffset)); + // Get the Code object from the shared function info. + Register code_obj = rbx; + __ LoadTaggedPointerField( + code_obj, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset)); + __ LoadTaggedPointerField( + code_obj, + FieldOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset)); + + // Check if we have baseline code. For OSR entry it is safe to assume we + // always have baseline code. + if (!is_osr) { + Label start_with_baseline; + __ CmpObjectType(code_obj, BASELINE_DATA_TYPE, kScratchRegister); + __ j(equal, &start_with_baseline); + + // Start with bytecode as there is no baseline code. + Builtin builtin_id = next_bytecode + ? Builtin::kInterpreterEnterAtNextBytecode + : Builtin::kInterpreterEnterAtBytecode; + __ Jump(masm->isolate()->builtins()->code_handle(builtin_id), + RelocInfo::CODE_TARGET); + + // Start with baseline code. + __ bind(&start_with_baseline); + } else if (FLAG_debug_code) { + __ CmpObjectType(code_obj, BASELINE_DATA_TYPE, kScratchRegister); + __ Assert(equal, AbortReason::kExpectedBaselineData); + } + + // Load baseline code from baseline data. + __ LoadTaggedPointerField( + code_obj, FieldOperand(code_obj, BaselineData::kBaselineCodeOffset)); + if (V8_EXTERNAL_CODE_SPACE_BOOL) { + __ LoadCodeDataContainerCodeNonBuiltin(code_obj, code_obj); + } + // Load the feedback vector. - Register feedback_vector = rbx; + Register feedback_vector = r11; __ LoadTaggedPointerField( feedback_vector, FieldOperand(closure, JSFunction::kFeedbackCellOffset)); __ LoadTaggedPointerField(feedback_vector, @@ -4412,19 +4430,6 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode, feedback_vector); feedback_vector = no_reg; - // Get the Code object from the shared function info. - Register code_obj = rbx; - __ LoadTaggedPointerField( - code_obj, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset)); - __ LoadTaggedPointerField( - code_obj, - FieldOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset)); - __ LoadTaggedPointerField( - code_obj, FieldOperand(code_obj, BaselineData::kBaselineCodeOffset)); - if (V8_EXTERNAL_CODE_SPACE_BOOL) { - __ LoadCodeDataContainerCodeNonBuiltin(code_obj, code_obj); - } - // Compute baseline pc for bytecode offset. ExternalReference get_baseline_pc_extref; if (next_bytecode || is_osr) { @@ -4434,7 +4439,7 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode, get_baseline_pc_extref = ExternalReference::baseline_pc_for_bytecode_offset(); } - Register get_baseline_pc = rax; + Register get_baseline_pc = r11; __ LoadAddress(get_baseline_pc, get_baseline_pc_extref); // If the code deoptimizes during the implicit function entry stack interrupt @@ -4457,6 +4462,7 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode, // Get bytecode array from the stack frame. __ movq(kInterpreterBytecodeArrayRegister, MemOperand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp)); + __ pushq(kInterpreterAccumulatorRegister); { FrameScope scope(masm, StackFrame::INTERNAL); __ PrepareCallCFunction(3); @@ -4474,7 +4480,7 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode, // TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm // Sparkplug here. __ movw(FieldOperand(kInterpreterBytecodeArrayRegister, - BytecodeArray::kOsrNestingLevelOffset), + BytecodeArray::kOsrLoopNestingLevelOffset), Immediate(0)); Generate_OSREntry(masm, code_obj); } else { @@ -4497,8 +4503,10 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode, __ bind(&install_baseline_code); { FrameScope scope(masm, StackFrame::INTERNAL); + __ pushq(kInterpreterAccumulatorRegister); __ Push(closure); __ CallRuntime(Runtime::kInstallBaselineCode, 1); + __ popq(kInterpreterAccumulatorRegister); } // Retry from the start after installing baseline code. __ jmp(&start); @@ -4506,17 +4514,19 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode, } // namespace -void Builtins::Generate_BaselineEnterAtBytecode(MacroAssembler* masm) { - Generate_BaselineEntry(masm, false); +void Builtins::Generate_BaselineOrInterpreterEnterAtBytecode( + MacroAssembler* masm) { + Generate_BaselineOrInterpreterEntry(masm, false); } -void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) { - Generate_BaselineEntry(masm, true); +void Builtins::Generate_BaselineOrInterpreterEnterAtNextBytecode( + MacroAssembler* masm) { + Generate_BaselineOrInterpreterEntry(masm, true); } void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline( MacroAssembler* masm) { - Generate_BaselineEntry(masm, false, true); + Generate_BaselineOrInterpreterEntry(masm, false, true); } void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) { diff --git a/deps/v8/src/codegen/arm/assembler-arm.cc b/deps/v8/src/codegen/arm/assembler-arm.cc index ec2588364c2f9d..970386be72c228 100644 --- a/deps/v8/src/codegen/arm/assembler-arm.cc +++ b/deps/v8/src/codegen/arm/assembler-arm.cc @@ -5172,10 +5172,28 @@ void Assembler::RecordConstPool(int size) { RecordRelocInfo(RelocInfo::CONST_POOL, static_cast(size)); } +void Assembler::FixOnHeapReferences(bool update_embedded_objects) { + if (!update_embedded_objects) return; + Address base = reinterpret_cast
(buffer_->start()); + for (auto p : saved_handles_for_raw_object_ptr_) { + Handle object(reinterpret_cast(p.second)); + WriteUnalignedValue(base + p.first, *object); + } +} + +void Assembler::FixOnHeapReferencesToHandles() { + Address base = reinterpret_cast
(buffer_->start()); + for (auto p : saved_handles_for_raw_object_ptr_) { + WriteUnalignedValue(base + p.first, p.second); + } + saved_handles_for_raw_object_ptr_.clear(); +} + void Assembler::GrowBuffer() { DCHECK_EQ(buffer_start_, buffer_->start()); bool previously_on_heap = buffer_->IsOnHeap(); + int previous_on_heap_gc_count = OnHeapGCCount(); // Compute new buffer size. int old_size = buffer_->size(); @@ -5209,11 +5227,12 @@ void Assembler::GrowBuffer() { reinterpret_cast
(reloc_info_writer.last_pc()) + pc_delta); reloc_info_writer.Reposition(new_reloc_start, new_last_pc); - // Patch on-heap references to handles. - if (previously_on_heap && !buffer_->IsOnHeap()) { - Address base = reinterpret_cast
(buffer_->start()); - for (auto p : saved_handles_for_raw_object_ptr_) { - WriteUnalignedValue(base + p.first, p.second); + // Fix on-heap references. + if (previously_on_heap) { + if (buffer_->IsOnHeap()) { + FixOnHeapReferences(previous_on_heap_gc_count != OnHeapGCCount()); + } else { + FixOnHeapReferencesToHandles(); } } @@ -5237,7 +5256,8 @@ void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) { DCHECK(is_const_pool_blocked() || pending_32_bit_constants_.empty()); CheckBuffer(); if (!RelocInfo::IsNone(rmode)) { - DCHECK(RelocInfo::IsDataEmbeddedObject(rmode)); + DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) || + RelocInfo::IsLiteralConstant(rmode)); RecordRelocInfo(rmode); } base::WriteUnalignedValue(reinterpret_cast
(pc_), data); @@ -5250,7 +5270,8 @@ void Assembler::dq(uint64_t value, RelocInfo::Mode rmode) { DCHECK(is_const_pool_blocked() || pending_32_bit_constants_.empty()); CheckBuffer(); if (!RelocInfo::IsNone(rmode)) { - DCHECK(RelocInfo::IsDataEmbeddedObject(rmode)); + DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) || + RelocInfo::IsLiteralConstant(rmode)); RecordRelocInfo(rmode); } base::WriteUnalignedValue(reinterpret_cast
(pc_), value); @@ -5450,13 +5471,12 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) { SetLdrRegisterImmediateOffset(instr, delta)); if (!entry.is_merged()) { if (IsOnHeap() && RelocInfo::IsEmbeddedObjectMode(entry.rmode())) { + int offset = pc_offset(); saved_handles_for_raw_object_ptr_.push_back( - std::make_pair(pc_offset(), entry.value())); - Handle handle(reinterpret_cast(entry.value())); - emit(handle->ptr()); - // We must ensure that `emit` is not growing the assembler buffer - // and falling back to off-heap compilation. - DCHECK(IsOnHeap()); + std::make_pair(offset, entry.value())); + Handle object(reinterpret_cast(entry.value())); + emit(object->ptr()); + DCHECK(EmbeddedObjectMatches(offset, object)); } else { emit(entry.value()); } diff --git a/deps/v8/src/codegen/arm/assembler-arm.h b/deps/v8/src/codegen/arm/assembler-arm.h index d96c761910b256..4a9fe49685de31 100644 --- a/deps/v8/src/codegen/arm/assembler-arm.h +++ b/deps/v8/src/codegen/arm/assembler-arm.h @@ -328,6 +328,15 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { GetCode(isolate, desc, kNoSafepointTable, kNoHandlerTable); } + // This function is called when on-heap-compilation invariants are + // invalidated. For instance, when the assembler buffer grows or a GC happens + // between Code object allocation and Code object finalization. + void FixOnHeapReferences(bool update_embedded_objects = true); + + // This function is called when we fallback from on-heap to off-heap + // compilation and patch on-heap references to handles. + void FixOnHeapReferencesToHandles(); + // Label operations & relative jumps (PPUM Appendix D) // // Takes a branch opcode (cc) and a label (L) and generates @@ -401,6 +410,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { void DataAlign(int m); // Aligns code to something that's optimal for a jump target for the platform. void CodeTargetAlign(); + void LoopHeaderAlign() { CodeTargetAlign(); } // Branch instructions void b(int branch_offset, Condition cond = al, @@ -1067,8 +1077,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // Record a deoptimization reason that can be used by a log or cpu profiler. // Use --trace-deopt to enable. - void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position, - int id); + void RecordDeoptReason(DeoptimizeReason reason, uint32_t node_id, + SourcePosition position, int id); // Record the emission of a constant pool. // @@ -1187,6 +1197,13 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { } } +#ifdef DEBUG + bool EmbeddedObjectMatches(int pc_offset, Handle object) { + return *reinterpret_cast(buffer_->start() + pc_offset) == + (IsOnHeap() ? object->ptr() : object.address()); + } +#endif + // Move a 32-bit immediate into a register, potentially via the constant pool. void Move32BitImmediate(Register rd, const Operand& x, Condition cond = al); diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.cc b/deps/v8/src/codegen/arm/macro-assembler-arm.cc index 49cb9d292ca4a7..26d16406a6277e 100644 --- a/deps/v8/src/codegen/arm/macro-assembler-arm.cc +++ b/deps/v8/src/codegen/arm/macro-assembler-arm.cc @@ -18,6 +18,7 @@ #include "src/codegen/macro-assembler.h" #include "src/codegen/register-configuration.h" #include "src/debug/debug.h" +#include "src/deoptimizer/deoptimizer.h" #include "src/execution/frames-inl.h" #include "src/heap/memory-chunk.h" #include "src/init/bootstrapper.h" @@ -1358,6 +1359,44 @@ void TurboAssembler::StubPrologue(StackFrame::Type type) { void TurboAssembler::Prologue() { PushStandardFrame(r1); } +void TurboAssembler::DropArguments(Register count, ArgumentsCountType type, + ArgumentsCountMode mode) { + int receiver_bytes = (mode == kCountExcludesReceiver) ? kPointerSize : 0; + switch (type) { + case kCountIsInteger: { + add(sp, sp, Operand(count, LSL, kPointerSizeLog2), LeaveCC); + break; + } + case kCountIsSmi: { + STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); + add(sp, sp, Operand(count, LSL, kPointerSizeLog2 - kSmiTagSize), LeaveCC); + break; + } + case kCountIsBytes: { + add(sp, sp, count, LeaveCC); + break; + } + } + if (receiver_bytes != 0) { + add(sp, sp, Operand(receiver_bytes), LeaveCC); + } +} + +void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc, + Register receiver, + ArgumentsCountType type, + ArgumentsCountMode mode) { + DCHECK(!AreAliased(argc, receiver)); + if (mode == kCountExcludesReceiver) { + // Drop arguments without receiver and override old receiver. + DropArguments(argc, type, kCountIncludesReceiver); + str(receiver, MemOperand(sp, 0)); + } else { + DropArguments(argc, type, mode); + push(receiver); + } +} + void TurboAssembler::EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg) { ASM_CODE_COMMENT(this); @@ -1369,6 +1408,9 @@ void TurboAssembler::EnterFrame(StackFrame::Type type, mov(scratch, Operand(StackFrame::TypeToMarker(type))); } PushCommonFrame(scratch); +#if V8_ENABLE_WEBASSEMBLY + if (type == StackFrame::WASM) Push(kWasmInstanceRegister); +#endif // V8_ENABLE_WEBASSEMBLY } int TurboAssembler::LeaveFrame(StackFrame::Type type) { @@ -1553,54 +1595,6 @@ void TurboAssembler::MovFromFloatParameter(DwVfpRegister dst) { MovFromFloatResult(dst); } -void TurboAssembler::PrepareForTailCall(Register callee_args_count, - Register caller_args_count, - Register scratch0, Register scratch1) { - ASM_CODE_COMMENT(this); - DCHECK(!AreAliased(callee_args_count, caller_args_count, scratch0, scratch1)); - - // Calculate the end of destination area where we will put the arguments - // after we drop current frame. We add kPointerSize to count the receiver - // argument which is not included into formal parameters count. - Register dst_reg = scratch0; - add(dst_reg, fp, Operand(caller_args_count, LSL, kPointerSizeLog2)); - add(dst_reg, dst_reg, - Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize)); - - Register src_reg = caller_args_count; - // Calculate the end of source area. +kPointerSize is for the receiver. - add(src_reg, sp, Operand(callee_args_count, LSL, kPointerSizeLog2)); - add(src_reg, src_reg, Operand(kPointerSize)); - - if (FLAG_debug_code) { - cmp(src_reg, dst_reg); - Check(lo, AbortReason::kStackAccessBelowStackPointer); - } - - // Restore caller's frame pointer and return address now as they will be - // overwritten by the copying loop. - ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset)); - ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - - // Now copy callee arguments to the caller frame going backwards to avoid - // callee arguments corruption (source and destination areas could overlap). - - // Both src_reg and dst_reg are pointing to the word after the one to copy, - // so they must be pre-decremented in the loop. - Register tmp_reg = scratch1; - Label loop, entry; - b(&entry); - bind(&loop); - ldr(tmp_reg, MemOperand(src_reg, -kPointerSize, PreIndex)); - str(tmp_reg, MemOperand(dst_reg, -kPointerSize, PreIndex)); - bind(&entry); - cmp(sp, src_reg); - b(ne, &loop); - - // Leave current frame. - mov(sp, dst_reg); -} - void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) { ASM_CODE_COMMENT(this); DCHECK(root_array_available()); diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.h b/deps/v8/src/codegen/arm/macro-assembler-arm.h index d6671fff3fb426..41bc5ec54432a7 100644 --- a/deps/v8/src/codegen/arm/macro-assembler-arm.h +++ b/deps/v8/src/codegen/arm/macro-assembler-arm.h @@ -76,6 +76,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { void StubPrologue(StackFrame::Type type); void Prologue(); + enum ArgumentsCountMode { kCountIncludesReceiver, kCountExcludesReceiver }; + enum ArgumentsCountType { kCountIsInteger, kCountIsSmi, kCountIsBytes }; + void DropArguments(Register count, ArgumentsCountType type, + ArgumentsCountMode mode); + void DropArgumentsAndPushNewReceiver(Register argc, Register receiver, + ArgumentsCountType type, + ArgumentsCountMode mode); + // Push a standard frame, consisting of lr, fp, context and JS function void PushStandardFrame(Register function_reg); @@ -233,15 +241,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { void PrepareCallCFunction(int num_reg_arguments, int num_double_registers = 0, Register scratch = no_reg); - // Removes current frame and its arguments from the stack preserving - // the arguments and a return address pushed to the stack for the next call. - // Both |callee_args_count| and |caller_args_count| do not include - // receiver. |callee_args_count| is not modified. |caller_args_count| - // is trashed. - void PrepareForTailCall(Register callee_args_count, - Register caller_args_count, Register scratch0, - Register scratch1); - // There are two ways of passing double arguments on ARM, depending on // whether soft or hard floating point ABI is used. These functions // abstract parameter passing for the three different ways we call diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.cc b/deps/v8/src/codegen/arm64/assembler-arm64.cc index bf39a2e416702f..f6a035a9e7737b 100644 --- a/deps/v8/src/codegen/arm64/assembler-arm64.cc +++ b/deps/v8/src/codegen/arm64/assembler-arm64.cc @@ -4275,8 +4275,41 @@ bool Assembler::IsImmFP64(double imm) { return true; } +void Assembler::FixOnHeapReferences(bool update_embedded_objects) { + Address base = reinterpret_cast
(buffer_->start()); + if (update_embedded_objects) { + for (auto p : saved_handles_for_raw_object_ptr_) { + Handle object = GetEmbeddedObject(p.second); + WriteUnalignedValue(base + p.first, object->ptr()); + } + } + for (auto p : saved_offsets_for_runtime_entries_) { + Instruction* instr = reinterpret_cast(base + p.first); + Address target = p.second * kInstrSize + options().code_range_start; + DCHECK(is_int26(p.second)); + DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch()); + instr->SetBranchImmTarget(reinterpret_cast(target)); + } +} + +void Assembler::FixOnHeapReferencesToHandles() { + Address base = reinterpret_cast
(buffer_->start()); + for (auto p : saved_handles_for_raw_object_ptr_) { + WriteUnalignedValue(base + p.first, p.second); + } + saved_handles_for_raw_object_ptr_.clear(); + for (auto p : saved_offsets_for_runtime_entries_) { + Instruction* instr = reinterpret_cast(base + p.first); + DCHECK(is_int26(p.second)); + DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch()); + instr->SetInstructionBits(instr->Mask(UnconditionalBranchMask) | p.second); + } + saved_offsets_for_runtime_entries_.clear(); +} + void Assembler::GrowBuffer() { bool previously_on_heap = buffer_->IsOnHeap(); + int previous_on_heap_gc_count = OnHeapGCCount(); // Compute new buffer size. int old_size = buffer_->size(); @@ -4320,18 +4353,12 @@ void Assembler::GrowBuffer() { WriteUnalignedValue(address, internal_ref); } - // Patch on-heap references to handles. - if (previously_on_heap && !buffer_->IsOnHeap()) { - Address base = reinterpret_cast
(buffer_->start()); - for (auto p : saved_handles_for_raw_object_ptr_) { - WriteUnalignedValue(base + p.first, p.second); - } - for (auto p : saved_offsets_for_runtime_entries_) { - Instruction* instr = reinterpret_cast(base + p.first); - DCHECK(is_int26(p.second)); - DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch()); - instr->SetInstructionBits(instr->Mask(UnconditionalBranchMask) | - p.second); + // Fix on-heap references. + if (previously_on_heap) { + if (buffer_->IsOnHeap()) { + FixOnHeapReferences(previous_on_heap_gc_count != OnHeapGCCount()); + } else { + FixOnHeapReferencesToHandles(); } } @@ -4345,12 +4372,16 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data, (rmode == RelocInfo::CONST_POOL) || (rmode == RelocInfo::VENEER_POOL) || (rmode == RelocInfo::DEOPT_SCRIPT_OFFSET) || (rmode == RelocInfo::DEOPT_INLINING_ID) || - (rmode == RelocInfo::DEOPT_REASON) || (rmode == RelocInfo::DEOPT_ID)) { + (rmode == RelocInfo::DEOPT_REASON) || (rmode == RelocInfo::DEOPT_ID) || + (rmode == RelocInfo::LITERAL_CONSTANT) || + (rmode == RelocInfo::DEOPT_NODE_ID)) { // Adjust code for new modes. DCHECK(RelocInfo::IsDeoptReason(rmode) || RelocInfo::IsDeoptId(rmode) || + RelocInfo::IsDeoptNodeId(rmode) || RelocInfo::IsDeoptPosition(rmode) || RelocInfo::IsInternalReference(rmode) || RelocInfo::IsDataEmbeddedObject(rmode) || + RelocInfo::IsLiteralConstant(rmode) || RelocInfo::IsConstPool(rmode) || RelocInfo::IsVeneerPool(rmode)); // These modes do not need an entry in the constant pool. } else if (constant_pool_mode == NEEDS_POOL_ENTRY) { diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.h b/deps/v8/src/codegen/arm64/assembler-arm64.h index 6a0245fcd628c2..8cdca7bfa83ef7 100644 --- a/deps/v8/src/codegen/arm64/assembler-arm64.h +++ b/deps/v8/src/codegen/arm64/assembler-arm64.h @@ -204,6 +204,15 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { GetCode(isolate, desc, kNoSafepointTable, kNoHandlerTable); } + // This function is called when on-heap-compilation invariants are + // invalidated. For instance, when the assembler buffer grows or a GC happens + // between Code object allocation and Code object finalization. + void FixOnHeapReferences(bool update_embedded_objects = true); + + // This function is called when we fallback from on-heap to off-heap + // compilation and patch on-heap references to handles. + void FixOnHeapReferencesToHandles(); + // Insert the smallest number of nop instructions // possible to align the pc offset to a multiple // of m. m must be a power of 2 (>= 4). @@ -213,6 +222,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { void DataAlign(int m); // Aligns code to something that's optimal for a jump target for the platform. void CodeTargetAlign(); + void LoopHeaderAlign() { CodeTargetAlign(); } inline void Unreachable(); @@ -339,8 +349,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // Record a deoptimization reason that can be used by a log or cpu profiler. // Use --trace-deopt to enable. - void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position, - int id); + void RecordDeoptReason(DeoptimizeReason reason, uint32_t node_id, + SourcePosition position, int id); int buffer_space() const; @@ -2067,7 +2077,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NONE) { BlockPoolsScope no_pool_scope(this); if (!RelocInfo::IsNone(rmode)) { - DCHECK(RelocInfo::IsDataEmbeddedObject(rmode)); + DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) || + RelocInfo::IsLiteralConstant(rmode)); RecordRelocInfo(rmode); } dc32(data); @@ -2075,7 +2086,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NONE) { BlockPoolsScope no_pool_scope(this); if (!RelocInfo::IsNone(rmode)) { - DCHECK(RelocInfo::IsDataEmbeddedObject(rmode)); + DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) || + RelocInfo::IsLiteralConstant(rmode)); RecordRelocInfo(rmode); } dc64(data); @@ -2083,7 +2095,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NONE) { BlockPoolsScope no_pool_scope(this); if (!RelocInfo::IsNone(rmode)) { - DCHECK(RelocInfo::IsDataEmbeddedObject(rmode)); + DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) || + RelocInfo::IsLiteralConstant(rmode)); RecordRelocInfo(rmode); } dc64(data); @@ -2676,6 +2689,12 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { static size_t GetApproxMaxDistToConstPoolForTesting() { return ConstantPool::kApproxDistToPool64; } + + bool EmbeddedObjectMatches(int pc_offset, Handle object, + EmbeddedObjectIndex index) { + return *reinterpret_cast(buffer_->start() + pc_offset) == + (IsOnHeap() ? object->ptr() : index); + } #endif class FarBranchInfo { diff --git a/deps/v8/src/codegen/arm64/cpu-arm64.cc b/deps/v8/src/codegen/arm64/cpu-arm64.cc index 4baf2e07ec4491..4c61e1fd82aa5f 100644 --- a/deps/v8/src/codegen/arm64/cpu-arm64.cc +++ b/deps/v8/src/codegen/arm64/cpu-arm64.cc @@ -13,6 +13,10 @@ #include #endif +#if V8_OS_WIN +#include +#endif + namespace v8 { namespace internal { diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc index 9dba8800d99956..ef95b4e8132400 100644 --- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc +++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc @@ -2215,62 +2215,6 @@ void TurboAssembler::CallForDeoptimization( } } -void TurboAssembler::PrepareForTailCall(Register callee_args_count, - Register caller_args_count, - Register scratch0, Register scratch1) { - ASM_CODE_COMMENT(this); - DCHECK(!AreAliased(callee_args_count, caller_args_count, scratch0, scratch1)); - - // Calculate the end of destination area where we will put the arguments - // after we drop current frame. We add kSystemPointerSize to count the - // receiver argument which is not included into formal parameters count. - Register dst_reg = scratch0; - Add(dst_reg, fp, Operand(caller_args_count, LSL, kSystemPointerSizeLog2)); - Add(dst_reg, dst_reg, - StandardFrameConstants::kCallerSPOffset + kSystemPointerSize); - // Round dst_reg up to a multiple of 16 bytes, so that we overwrite any - // potential padding. - Add(dst_reg, dst_reg, 15); - Bic(dst_reg, dst_reg, 15); - - Register src_reg = caller_args_count; - // Calculate the end of source area. +kSystemPointerSize is for the receiver. - Add(src_reg, sp, Operand(callee_args_count, LSL, kSystemPointerSizeLog2)); - Add(src_reg, src_reg, kSystemPointerSize); - - // Round src_reg up to a multiple of 16 bytes, so we include any potential - // padding in the copy. - Add(src_reg, src_reg, 15); - Bic(src_reg, src_reg, 15); - - if (FLAG_debug_code) { - Cmp(src_reg, dst_reg); - Check(lo, AbortReason::kStackAccessBelowStackPointer); - } - - // Restore caller's frame pointer and return address now as they will be - // overwritten by the copying loop. - RestoreFPAndLR(); - - // Now copy callee arguments to the caller frame going backwards to avoid - // callee arguments corruption (source and destination areas could overlap). - - // Both src_reg and dst_reg are pointing to the word after the one to copy, - // so they must be pre-decremented in the loop. - Register tmp_reg = scratch1; - Label loop, entry; - B(&entry); - bind(&loop); - Ldr(tmp_reg, MemOperand(src_reg, -kSystemPointerSize, PreIndex)); - Str(tmp_reg, MemOperand(dst_reg, -kSystemPointerSize, PreIndex)); - bind(&entry); - Cmp(sp, src_reg); - B(ne, &loop); - - // Leave current frame. - Mov(sp, dst_reg); -} - void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) { ASM_CODE_COMMENT(this); DCHECK(root_array_available()); @@ -2659,11 +2603,11 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) { Mov(type_reg, StackFrame::TypeToMarker(type)); Push(lr, fp); Mov(fp, sp); - Push(type_reg, padreg); + Push(type_reg, kWasmInstanceRegister); // sp[3] : lr // sp[2] : fp // sp[1] : type - // sp[0] : for alignment + // sp[0] : wasm instance #endif // V8_ENABLE_WEBASSEMBLY } else if (type == StackFrame::CONSTRUCT) { Register type_reg = temps.AcquireX(); diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h index 235b9a4b694aa6..9128ba2c18edb7 100644 --- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h +++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h @@ -555,15 +555,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { inline void Isb(); inline void Csdb(); - // Removes current frame and its arguments from the stack preserving - // the arguments and a return address pushed to the stack for the next call. - // Both |callee_args_count| and |caller_args_count| do not include - // receiver. |callee_args_count| is not modified. |caller_args_count| is - // trashed. - void PrepareForTailCall(Register callee_args_count, - Register caller_args_count, Register scratch0, - Register scratch1); - inline void SmiUntag(Register dst, Register src); inline void SmiUntag(Register dst, const MemOperand& src); inline void SmiUntag(Register smi); diff --git a/deps/v8/src/codegen/assembler.cc b/deps/v8/src/codegen/assembler.cc index 8eb5ae55e24d98..dfd406694a9a7e 100644 --- a/deps/v8/src/codegen/assembler.cc +++ b/deps/v8/src/codegen/assembler.cc @@ -57,7 +57,7 @@ AssemblerOptions AssemblerOptions::Default(Isolate* isolate) { const bool generating_embedded_builtin = isolate->IsGeneratingEmbeddedBuiltins(); options.record_reloc_info_for_serialization = serializer; - options.enable_root_array_delta_access = + options.enable_root_relative_access = !serializer && !generating_embedded_builtin; #ifdef USE_SIMULATOR // Even though the simulator is enabled, we may still need to generate code @@ -142,8 +142,9 @@ class ExternalAssemblerBufferImpl : public AssemblerBuffer { class OnHeapAssemblerBuffer : public AssemblerBuffer { public: - OnHeapAssemblerBuffer(Handle code, int size) - : code_(code), size_(size) {} + OnHeapAssemblerBuffer(Isolate* isolate, Handle code, int size, + int gc_count) + : isolate_(isolate), code_(code), size_(size), gc_count_(gc_count) {} byte* start() const override { return reinterpret_cast(code_->raw_instruction_start()); @@ -153,20 +154,32 @@ class OnHeapAssemblerBuffer : public AssemblerBuffer { std::unique_ptr Grow(int new_size) override { DCHECK_LT(size(), new_size); + Heap* heap = isolate_->heap(); + if (Code::SizeFor(new_size) < + heap->MaxRegularHeapObjectSize(AllocationType::kCode)) { + MaybeHandle code = + isolate_->factory()->NewEmptyCode(CodeKind::BASELINE, new_size); + if (!code.is_null()) { + return std::make_unique( + isolate_, code.ToHandleChecked(), new_size, heap->gc_count()); + } + } // We fall back to the slow path using the default assembler buffer and - // compile the code off the GC heap. Compiling directly on heap makes less - // sense now, since we will need to allocate a new Code object, copy the - // content generated so far and relocate. + // compile the code off the GC heap. return std::make_unique(new_size); } bool IsOnHeap() const override { return true; } + int OnHeapGCCount() const override { return gc_count_; } + MaybeHandle code() const override { return code_; } private: + Isolate* isolate_; Handle code_; const int size_; + const int gc_count_; }; static thread_local std::aligned_storage_t NewOnHeapAssemblerBuffer(Isolate* isolate, MaybeHandle code = isolate->factory()->NewEmptyCode(CodeKind::BASELINE, size); if (code.is_null()) return {}; - return std::make_unique(code.ToHandleChecked(), size); + return std::make_unique( + isolate, code.ToHandleChecked(), size, isolate->heap()->gc_count()); } // ----------------------------------------------------------------------------- @@ -281,13 +295,16 @@ HeapObjectRequest::HeapObjectRequest(const StringConstantBase* string, // Platform specific but identical code for all the platforms. -void Assembler::RecordDeoptReason(DeoptimizeReason reason, +void Assembler::RecordDeoptReason(DeoptimizeReason reason, uint32_t node_id, SourcePosition position, int id) { EnsureSpace ensure_space(this); RecordRelocInfo(RelocInfo::DEOPT_SCRIPT_OFFSET, position.ScriptOffset()); RecordRelocInfo(RelocInfo::DEOPT_INLINING_ID, position.InliningId()); RecordRelocInfo(RelocInfo::DEOPT_REASON, static_cast(reason)); RecordRelocInfo(RelocInfo::DEOPT_ID, id); +#ifdef DEBUG + RecordRelocInfo(RelocInfo::DEOPT_NODE_ID, node_id); +#endif // DEBUG } void Assembler::DataAlign(int m) { diff --git a/deps/v8/src/codegen/assembler.h b/deps/v8/src/codegen/assembler.h index ee5aef524d9493..7373b5d48b098b 100644 --- a/deps/v8/src/codegen/assembler.h +++ b/deps/v8/src/codegen/assembler.h @@ -157,9 +157,9 @@ struct V8_EXPORT_PRIVATE AssemblerOptions { // assembler is used on existing code directly (e.g. JumpTableAssembler) // without any buffer to hold reloc information. bool disable_reloc_info_for_patching = false; - // Enables access to exrefs by computing a delta from the root array. - // Only valid if code will not survive the process. - bool enable_root_array_delta_access = false; + // Enables root-relative access to arbitrary untagged addresses (usually + // external references). Only valid if code will not survive the process. + bool enable_root_relative_access = false; // Enables specific assembler sequences only used for the simulator. bool enable_simulator_code = false; // Enables use of isolate-independent constants, indirected through the @@ -204,6 +204,9 @@ class AssemblerBuffer { V8_WARN_UNUSED_RESULT = 0; virtual bool IsOnHeap() const { return false; } virtual MaybeHandle code() const { return MaybeHandle(); } + // Return the GC count when the buffer was allocated (only if the buffer is on + // the GC heap). + virtual int OnHeapGCCount() const { return 0; } }; // Allocate an AssemblerBuffer which uses an existing buffer. This buffer cannot @@ -283,6 +286,8 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced { bool IsOnHeap() const { return buffer_->IsOnHeap(); } + int OnHeapGCCount() const { return buffer_->OnHeapGCCount(); } + MaybeHandle code() const { DCHECK(IsOnHeap()); return buffer_->code(); @@ -404,6 +409,9 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced { !options().record_reloc_info_for_serialization && !FLAG_debug_code) { return false; } +#ifndef ENABLE_DISASSEMBLER + if (RelocInfo::IsLiteralConstant(rmode)) return false; +#endif return true; } diff --git a/deps/v8/src/codegen/bailout-reason.h b/deps/v8/src/codegen/bailout-reason.h index 2a5893974f0024..128858a47fa11a 100644 --- a/deps/v8/src/codegen/bailout-reason.h +++ b/deps/v8/src/codegen/bailout-reason.h @@ -22,9 +22,9 @@ namespace internal { "Expected optimized code cell or optimization sentinel") \ V(kExpectedUndefinedOrCell, "Expected undefined or cell in register") \ V(kExpectedFeedbackVector, "Expected feedback vector") \ + V(kExpectedBaselineData, "Expected baseline data") \ V(kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, \ "The function_data field should be a BytecodeArray on interpreter entry") \ - V(kFpuTopIsNotZeroInDeoptimizer, "FPU TOP is not zero in deoptimizer") \ V(kInputStringTooLong, "Input string too long") \ V(kInvalidBytecode, "Invalid bytecode") \ V(kInvalidBytecodeAdvance, "Cannot advance current bytecode, ") \ diff --git a/deps/v8/src/codegen/code-stub-assembler.cc b/deps/v8/src/codegen/code-stub-assembler.cc index 5493ba6caa3360..e25135decee421 100644 --- a/deps/v8/src/codegen/code-stub-assembler.cc +++ b/deps/v8/src/codegen/code-stub-assembler.cc @@ -1891,7 +1891,7 @@ TNode CodeStubAssembler::LoadMapInobjectPropertiesStartInWords( // See Map::GetInObjectPropertiesStartInWords() for details. CSA_ASSERT(this, IsJSObjectMap(map)); return ChangeInt32ToIntPtr(LoadObjectField( - map, Map::kInObjectPropertiesStartOrConstructorFunctionIndexOffset)); + map, Map::kInobjectPropertiesStartOrConstructorFunctionIndexOffset)); } TNode CodeStubAssembler::LoadMapConstructorFunctionIndex( @@ -1899,7 +1899,7 @@ TNode CodeStubAssembler::LoadMapConstructorFunctionIndex( // See Map::GetConstructorFunctionIndex() for details. CSA_ASSERT(this, IsPrimitiveInstanceType(LoadMapInstanceType(map))); return ChangeInt32ToIntPtr(LoadObjectField( - map, Map::kInObjectPropertiesStartOrConstructorFunctionIndexOffset)); + map, Map::kInobjectPropertiesStartOrConstructorFunctionIndexOffset)); } TNode CodeStubAssembler::LoadMapConstructor(TNode map) { @@ -6195,6 +6195,13 @@ TNode CodeStubAssembler::IsArraySpeciesProtectorCellInvalid() { return TaggedEqual(cell_value, invalid); } +TNode CodeStubAssembler::IsIsConcatSpreadableProtectorCellInvalid() { + TNode invalid = SmiConstant(Protectors::kProtectorInvalid); + TNode cell = IsConcatSpreadableProtectorConstant(); + TNode cell_value = LoadObjectField(cell, PropertyCell::kValueOffset); + return TaggedEqual(cell_value, invalid); +} + TNode CodeStubAssembler::IsTypedArraySpeciesProtectorCellInvalid() { TNode invalid = SmiConstant(Protectors::kProtectorInvalid); TNode cell = TypedArraySpeciesProtectorConstant(); @@ -9689,7 +9696,7 @@ void CodeStubAssembler::TryLookupElement( // TODO(verwaest): Support other elements kinds as well. Label if_isobjectorsmi(this), if_isdouble(this), if_isdictionary(this), if_isfaststringwrapper(this), if_isslowstringwrapper(this), if_oob(this), - if_typedarray(this); + if_typedarray(this), if_rab_gsab_typedarray(this); // clang-format off int32_t values[] = { // Handled by {if_isobjectorsmi}. @@ -9719,8 +9726,18 @@ void CodeStubAssembler::TryLookupElement( UINT8_CLAMPED_ELEMENTS, BIGUINT64_ELEMENTS, BIGINT64_ELEMENTS, + RAB_GSAB_UINT8_ELEMENTS, + RAB_GSAB_INT8_ELEMENTS, + RAB_GSAB_UINT16_ELEMENTS, + RAB_GSAB_INT16_ELEMENTS, + RAB_GSAB_UINT32_ELEMENTS, + RAB_GSAB_INT32_ELEMENTS, + RAB_GSAB_FLOAT32_ELEMENTS, + RAB_GSAB_FLOAT64_ELEMENTS, + RAB_GSAB_UINT8_CLAMPED_ELEMENTS, + RAB_GSAB_BIGUINT64_ELEMENTS, + RAB_GSAB_BIGINT64_ELEMENTS, }; - // TODO(v8:11111): Support RAB / GSAB. Label* labels[] = { &if_isobjectorsmi, &if_isobjectorsmi, &if_isobjectorsmi, &if_isobjectorsmi, &if_isobjectorsmi, &if_isobjectorsmi, @@ -9742,6 +9759,17 @@ void CodeStubAssembler::TryLookupElement( &if_typedarray, &if_typedarray, &if_typedarray, + &if_rab_gsab_typedarray, + &if_rab_gsab_typedarray, + &if_rab_gsab_typedarray, + &if_rab_gsab_typedarray, + &if_rab_gsab_typedarray, + &if_rab_gsab_typedarray, + &if_rab_gsab_typedarray, + &if_rab_gsab_typedarray, + &if_rab_gsab_typedarray, + &if_rab_gsab_typedarray, + &if_rab_gsab_typedarray, }; // clang-format on STATIC_ASSERT(arraysize(values) == arraysize(labels)); @@ -9808,6 +9836,13 @@ void CodeStubAssembler::TryLookupElement( TNode length = LoadJSTypedArrayLength(CAST(object)); Branch(UintPtrLessThan(intptr_index, length), if_found, if_absent); } + BIND(&if_rab_gsab_typedarray); + { + TNode buffer = LoadJSArrayBufferViewBuffer(CAST(object)); + TNode length = + LoadVariableLengthJSTypedArrayLength(CAST(object), buffer, if_absent); + Branch(UintPtrLessThan(intptr_index, length), if_found, if_absent); + } BIND(&if_oob); { // Positive OOB indices mean "not found", negative indices and indices @@ -13891,6 +13926,45 @@ TNode CodeStubAssembler::LoadVariableLengthJSTypedArrayLength( return result.value(); } +void CodeStubAssembler::IsTypedArrayDetachedOrOutOfBounds( + TNode array, Label* detached_or_oob, + Label* not_detached_nor_oob) { + TNode buffer = LoadJSArrayBufferViewBuffer(array); + + GotoIf(IsDetachedBuffer(buffer), detached_or_oob); + GotoIfNot(IsVariableLengthTypedArray(array), not_detached_nor_oob); + GotoIf(IsSharedArrayBuffer(buffer), not_detached_nor_oob); + + { + TNode buffer_byte_length = LoadJSArrayBufferByteLength(buffer); + TNode array_byte_offset = LoadJSArrayBufferViewByteOffset(array); + + Label length_tracking(this), not_length_tracking(this); + Branch(IsLengthTrackingTypedArray(array), &length_tracking, + ¬_length_tracking); + + BIND(&length_tracking); + { + // The backing RAB might have been shrunk so that the start of the + // TypedArray is already out of bounds. + Branch(UintPtrLessThanOrEqual(array_byte_offset, buffer_byte_length), + not_detached_nor_oob, detached_or_oob); + } + + BIND(¬_length_tracking); + { + // Check if the backing RAB has shrunk so that the buffer is out of + // bounds. + TNode array_byte_length = + LoadJSArrayBufferViewByteLength(array); + Branch(UintPtrGreaterThanOrEqual( + buffer_byte_length, + UintPtrAdd(array_byte_offset, array_byte_length)), + not_detached_nor_oob, detached_or_oob); + } + } +} + // ES #sec-integerindexedobjectbytelength TNode CodeStubAssembler::LoadVariableLengthJSTypedArrayByteLength( TNode context, TNode array, @@ -14376,7 +14450,7 @@ TNode CodeStubAssembler::AllocateFunctionWithMapAndContext( StoreObjectFieldNoWriteBarrier(fun, JSFunction::kSharedFunctionInfoOffset, shared_info); StoreObjectFieldNoWriteBarrier(fun, JSFunction::kContextOffset, context); - StoreObjectFieldNoWriteBarrier(fun, JSFunction::kCodeOffset, ToCodeT(code)); + StoreObjectField(fun, JSFunction::kCodeOffset, ToCodeT(code)); return CAST(fun); } diff --git a/deps/v8/src/codegen/code-stub-assembler.h b/deps/v8/src/codegen/code-stub-assembler.h index a6970a0a006d70..008af6006f5b63 100644 --- a/deps/v8/src/codegen/code-stub-assembler.h +++ b/deps/v8/src/codegen/code-stub-assembler.h @@ -68,6 +68,8 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol }; AsyncGeneratorYieldResolveSharedFun) \ V(AsyncIteratorValueUnwrapSharedFun, async_iterator_value_unwrap_shared_fun, \ AsyncIteratorValueUnwrapSharedFun) \ + V(IsConcatSpreadableProtector, is_concat_spreadable_protector, \ + IsConcatSpreadableProtector) \ V(MapIteratorProtector, map_iterator_protector, MapIteratorProtector) \ V(NoElementsProtector, no_elements_protector, NoElementsProtector) \ V(MegaDOMProtector, mega_dom_protector, MegaDOMProtector) \ @@ -2546,6 +2548,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode IsPromiseResolveProtectorCellInvalid(); TNode IsPromiseThenProtectorCellInvalid(); TNode IsArraySpeciesProtectorCellInvalid(); + TNode IsIsConcatSpreadableProtectorCellInvalid(); TNode IsTypedArraySpeciesProtectorCellInvalid(); TNode IsRegExpSpeciesProtectorCellInvalid(); TNode IsPromiseSpeciesProtectorCellInvalid(); @@ -3566,6 +3569,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode LoadVariableLengthJSTypedArrayByteLength( TNode context, TNode array, TNode buffer); + void IsTypedArrayDetachedOrOutOfBounds(TNode array, + Label* detached_or_oob, + Label* not_detached_nor_oob); + TNode RabGsabElementsKindToElementByteSize( TNode elementsKind); TNode LoadJSTypedArrayDataPtr(TNode typed_array); diff --git a/deps/v8/src/codegen/compilation-cache.cc b/deps/v8/src/codegen/compilation-cache.cc index ee50f8b0153a51..861bd2904f271f 100644 --- a/deps/v8/src/codegen/compilation-cache.cc +++ b/deps/v8/src/codegen/compilation-cache.cc @@ -4,6 +4,7 @@ #include "src/codegen/compilation-cache.h" +#include "src/codegen/script-details.h" #include "src/common/globals.h" #include "src/heap/factory.h" #include "src/logging/counters.h" @@ -104,42 +105,64 @@ void CompilationSubCache::Remove(Handle function_info) { CompilationCacheScript::CompilationCacheScript(Isolate* isolate) : CompilationSubCache(isolate, 1) {} +namespace { + // We only re-use a cached function for some script source code if the // script originates from the same place. This is to avoid issues // when reporting errors, etc. -bool CompilationCacheScript::HasOrigin(Handle function_info, - MaybeHandle maybe_name, - int line_offset, int column_offset, - ScriptOriginOptions resource_options) { +bool HasOrigin(Isolate* isolate, Handle function_info, + const ScriptDetails& script_details) { Handle