From 80ccae000e49ba97b5bd1393de8c96c99787e554 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C3=ABl=20Zasso?= Date: Wed, 21 Aug 2019 10:37:38 +0200 Subject: [PATCH 001/143] deps: update V8 to 7.7.299.4 Backport-PR-URL: https://github.com/nodejs/node/pull/29241 PR-URL: https://github.com/nodejs/node/pull/28918 Reviewed-By: Colin Ihrig Reviewed-By: Ben Noordhuis Reviewed-By: Jiawen Geng Reviewed-By: Rich Trott --- deps/v8/.flake8 | 11 + deps/v8/.gitignore | 18 +- deps/v8/AUTHORS | 5 + deps/v8/BUILD.gn | 244 +- deps/v8/ChangeLog | 1535 ++ deps/v8/DEPS | 44 +- deps/v8/INTL_OWNERS | 3 + deps/v8/OWNERS | 8 +- .../wasm-api-tests => benchmarks}/OWNERS | 0 deps/v8/gni/proto_library.gni | 3 + deps/v8/gni/v8.gni | 2 +- deps/v8/include/APIDesign.md | 3 + deps/v8/include/OWNERS | 7 +- .../js_protocol-1.2.json | 0 .../js_protocol-1.3.json | 0 .../inspector => include}/js_protocol.pdl | 0 deps/v8/include/libplatform/v8-tracing.h | 11 +- deps/v8/include/v8-inspector.h | 6 +- deps/v8/include/v8-internal.h | 2 - deps/v8/include/v8-platform.h | 8 + deps/v8/include/v8-profiler.h | 9 +- deps/v8/include/v8-util.h | 18 - deps/v8/include/v8-version.h | 6 +- deps/v8/include/v8.h | 399 +- deps/v8/include/v8config.h | 6 + deps/v8/infra/mb/gn_isolate_map.pyl | 4 + deps/v8/infra/mb/mb_config.pyl | 12 +- deps/v8/infra/testing/PRESUBMIT.py | 4 +- deps/v8/infra/testing/builders.pyl | 287 +- deps/v8/samples/process.cc | 14 +- deps/v8/src/DEPS | 6 + deps/v8/src/OWNERS | 10 +- deps/v8/src/api/OWNERS | 11 + deps/v8/src/api/api-natives.cc | 33 +- deps/v8/src/api/api.cc | 590 +- deps/v8/src/api/api.h | 2 +- deps/v8/src/asmjs/OWNERS | 2 - deps/v8/src/asmjs/asm-js.cc | 12 +- deps/v8/src/asmjs/asm-js.h | 2 +- deps/v8/src/asmjs/asm-parser.cc | 26 +- deps/v8/src/asmjs/asm-parser.h | 6 +- deps/v8/src/ast/OWNERS | 2 - deps/v8/src/ast/ast.cc | 15 +- deps/v8/src/ast/ast.h | 53 +- deps/v8/src/ast/modules.cc | 102 +- deps/v8/src/ast/modules.h | 16 +- deps/v8/src/ast/prettyprinter.cc | 26 +- deps/v8/src/ast/scopes.cc | 81 +- deps/v8/src/ast/scopes.h | 15 +- deps/v8/src/ast/variables.h | 53 +- deps/v8/src/base/adapters.h | 2 +- deps/v8/src/base/lsan.h | 29 + .../src/{common/v8memory.h => base/memory.h} | 30 +- deps/v8/src/base/platform/OWNERS | 2 - deps/v8/src/base/platform/platform-fuchsia.cc | 4 +- deps/v8/src/base/platform/platform-posix.cc | 6 + deps/v8/src/base/small-vector.h | 23 +- deps/v8/src/base/vlq-base64.cc | 58 + deps/v8/src/base/vlq-base64.h | 23 + deps/v8/src/builtins/OWNERS | 3 + deps/v8/src/builtins/accessors.cc | 5 +- deps/v8/src/builtins/arguments.tq | 4 +- deps/v8/src/builtins/arm/builtins-arm.cc | 43 +- deps/v8/src/builtins/arm64/builtins-arm64.cc | 39 +- deps/v8/src/builtins/array-copywithin.tq | 2 +- deps/v8/src/builtins/array-every.tq | 29 +- deps/v8/src/builtins/array-filter.tq | 31 +- deps/v8/src/builtins/array-find.tq | 34 +- deps/v8/src/builtins/array-findindex.tq | 35 +- deps/v8/src/builtins/array-foreach.tq | 30 +- deps/v8/src/builtins/array-join.tq | 39 +- deps/v8/src/builtins/array-lastindexof.tq | 4 +- deps/v8/src/builtins/array-map.tq | 49 +- deps/v8/src/builtins/array-of.tq | 5 +- deps/v8/src/builtins/array-reduce-right.tq | 53 +- deps/v8/src/builtins/array-reduce.tq | 50 +- deps/v8/src/builtins/array-reverse.tq | 2 +- deps/v8/src/builtins/array-shift.tq | 2 +- deps/v8/src/builtins/array-slice.tq | 9 +- deps/v8/src/builtins/array-some.tq | 30 +- deps/v8/src/builtins/array-splice.tq | 19 +- deps/v8/src/builtins/array-unshift.tq | 2 +- deps/v8/src/builtins/array.tq | 36 +- deps/v8/src/builtins/base.tq | 625 +- deps/v8/src/builtins/bigint.tq | 206 + deps/v8/src/builtins/boolean.tq | 29 +- deps/v8/src/builtins/builtins-api.cc | 16 +- .../v8/src/builtins/builtins-arguments-gen.cc | 3 +- deps/v8/src/builtins/builtins-array-gen.cc | 17 +- deps/v8/src/builtins/builtins-array.cc | 5 +- .../builtins/builtins-async-function-gen.cc | 41 +- deps/v8/src/builtins/builtins-bigint-gen.cc | 1 + deps/v8/src/builtins/builtins-bigint-gen.h | 80 + deps/v8/src/builtins/builtins-bigint.cc | 4 +- deps/v8/src/builtins/builtins-boolean-gen.cc | 19 +- deps/v8/src/builtins/builtins-call-gen.cc | 11 +- deps/v8/src/builtins/builtins-callsite.cc | 13 +- .../src/builtins/builtins-collections-gen.cc | 60 +- deps/v8/src/builtins/builtins-console.cc | 3 +- .../src/builtins/builtins-constructor-gen.cc | 36 +- .../src/builtins/builtins-constructor-gen.h | 12 +- .../src/builtins/builtins-conversion-gen.cc | 24 +- deps/v8/src/builtins/builtins-data-view-gen.h | 8 +- deps/v8/src/builtins/builtins-definitions.h | 19 +- deps/v8/src/builtins/builtins-error.cc | 9 +- deps/v8/src/builtins/builtins-global.cc | 18 +- deps/v8/src/builtins/builtins-handler-gen.cc | 38 +- deps/v8/src/builtins/builtins-internal-gen.cc | 117 +- deps/v8/src/builtins/builtins-intl.cc | 40 +- deps/v8/src/builtins/builtins-iterator-gen.cc | 31 +- deps/v8/src/builtins/builtins-iterator-gen.h | 29 +- deps/v8/src/builtins/builtins-math.cc | 26 +- deps/v8/src/builtins/builtins-number-gen.cc | 12 +- deps/v8/src/builtins/builtins-number.cc | 20 +- deps/v8/src/builtins/builtins-object-gen.cc | 44 +- deps/v8/src/builtins/builtins-object.cc | 72 +- deps/v8/src/builtins/builtins-promise-gen.cc | 32 +- deps/v8/src/builtins/builtins-proxy-gen.cc | 97 +- deps/v8/src/builtins/builtins-proxy-gen.h | 29 +- deps/v8/src/builtins/builtins-reflect.cc | 127 - deps/v8/src/builtins/builtins-regexp-gen.cc | 141 +- deps/v8/src/builtins/builtins-regexp-gen.h | 36 +- deps/v8/src/builtins/builtins-regexp.cc | 2 +- deps/v8/src/builtins/builtins-string-gen.cc | 48 +- deps/v8/src/builtins/builtins-string-gen.h | 3 - deps/v8/src/builtins/builtins-symbol-gen.cc | 16 +- .../src/builtins/builtins-typed-array-gen.cc | 73 +- .../src/builtins/builtins-typed-array-gen.h | 10 +- deps/v8/src/builtins/builtins-weak-refs.cc | 80 +- deps/v8/src/builtins/collections.tq | 2 +- deps/v8/src/builtins/data-view.tq | 383 +- deps/v8/src/builtins/extras-utils.tq | 7 +- deps/v8/src/builtins/ia32/builtins-ia32.cc | 46 +- deps/v8/src/builtins/internal-coverage.tq | 2 + deps/v8/src/builtins/iterator.tq | 8 +- deps/v8/src/builtins/math.tq | 48 +- deps/v8/src/builtins/mips/builtins-mips.cc | 76 +- .../v8/src/builtins/mips64/builtins-mips64.cc | 74 +- deps/v8/src/builtins/object-fromentries.tq | 7 +- deps/v8/src/builtins/object.tq | 138 + deps/v8/src/builtins/ppc/builtins-ppc.cc | 195 +- deps/v8/src/builtins/proxy-constructor.tq | 9 +- deps/v8/src/builtins/proxy-delete-property.tq | 67 + deps/v8/src/builtins/proxy-get-property.tq | 32 +- .../v8/src/builtins/proxy-get-prototype-of.tq | 70 + deps/v8/src/builtins/proxy-has-property.tq | 6 +- deps/v8/src/builtins/proxy-is-extensible.tq | 56 + .../src/builtins/proxy-prevent-extensions.tq | 66 + deps/v8/src/builtins/proxy-revocable.tq | 8 +- deps/v8/src/builtins/proxy-revoke.tq | 2 +- deps/v8/src/builtins/proxy-set-property.tq | 22 +- .../v8/src/builtins/proxy-set-prototype-of.tq | 77 + deps/v8/src/builtins/proxy.tq | 30 +- deps/v8/src/builtins/reflect.tq | 82 + deps/v8/src/builtins/regexp-replace.tq | 11 +- deps/v8/src/builtins/s390/builtins-s390.cc | 188 +- .../src/builtins/setup-builtins-internal.cc | 25 +- deps/v8/src/builtins/string-endswith.tq | 9 +- deps/v8/src/builtins/string-html.tq | 47 +- deps/v8/src/builtins/string-iterator.tq | 10 +- deps/v8/src/builtins/string-repeat.tq | 2 +- deps/v8/src/builtins/string-slice.tq | 2 +- deps/v8/src/builtins/string-startswith.tq | 16 +- deps/v8/src/builtins/string-substring.tq | 2 +- deps/v8/src/builtins/string.tq | 32 +- .../builtins/typed-array-createtypedarray.tq | 158 +- deps/v8/src/builtins/typed-array-every.tq | 4 +- deps/v8/src/builtins/typed-array-filter.tq | 2 +- deps/v8/src/builtins/typed-array-find.tq | 4 +- deps/v8/src/builtins/typed-array-findindex.tq | 4 +- deps/v8/src/builtins/typed-array-foreach.tq | 4 +- deps/v8/src/builtins/typed-array-reduce.tq | 10 +- .../src/builtins/typed-array-reduceright.tq | 10 +- deps/v8/src/builtins/typed-array-slice.tq | 2 +- deps/v8/src/builtins/typed-array-some.tq | 4 +- deps/v8/src/builtins/typed-array-subarray.tq | 3 +- deps/v8/src/builtins/typed-array.tq | 31 +- deps/v8/src/builtins/x64/builtins-x64.cc | 46 +- deps/v8/src/codegen/DEPS | 9 + deps/v8/src/codegen/OWNERS | 8 +- deps/v8/src/codegen/arm/assembler-arm.cc | 13 +- deps/v8/src/codegen/arm/assembler-arm.h | 3 +- .../v8/src/codegen/arm/macro-assembler-arm.cc | 26 +- deps/v8/src/codegen/arm/macro-assembler-arm.h | 5 +- .../src/codegen/arm64/assembler-arm64-inl.h | 130 +- deps/v8/src/codegen/arm64/assembler-arm64.cc | 423 +- deps/v8/src/codegen/arm64/assembler-arm64.h | 247 +- deps/v8/src/codegen/arm64/constants-arm64.h | 4 +- deps/v8/src/codegen/arm64/cpu-arm64.cc | 2 +- deps/v8/src/codegen/arm64/decoder-arm64.h | 2 +- .../v8/src/codegen/arm64/instructions-arm64.h | 1 + .../codegen/arm64/macro-assembler-arm64.cc | 34 +- .../src/codegen/arm64/macro-assembler-arm64.h | 25 +- deps/v8/src/codegen/arm64/register-arm64.h | 2 - deps/v8/src/codegen/assembler.cc | 36 +- deps/v8/src/codegen/assembler.h | 33 +- deps/v8/src/codegen/code-stub-assembler.cc | 871 +- deps/v8/src/codegen/code-stub-assembler.h | 391 +- deps/v8/src/codegen/compiler.cc | 155 +- deps/v8/src/codegen/compiler.h | 19 +- deps/v8/src/codegen/constant-pool.cc | 249 + deps/v8/src/codegen/constant-pool.h | 190 +- deps/v8/src/codegen/cpu-features.h | 1 + deps/v8/src/codegen/external-reference.cc | 37 +- deps/v8/src/codegen/external-reference.h | 7 + deps/v8/src/codegen/handler-table.cc | 38 +- deps/v8/src/codegen/handler-table.h | 10 +- deps/v8/src/codegen/ia32/assembler-ia32.cc | 7 + deps/v8/src/codegen/ia32/assembler-ia32.h | 1 + .../src/codegen/ia32/macro-assembler-ia32.cc | 14 +- .../src/codegen/ia32/macro-assembler-ia32.h | 5 +- deps/v8/src/codegen/interface-descriptors.cc | 5 + deps/v8/src/codegen/interface-descriptors.h | 12 + deps/v8/src/codegen/label.h | 2 +- deps/v8/src/codegen/mips/assembler-mips.cc | 3 +- deps/v8/src/codegen/mips/assembler-mips.h | 16 +- .../src/codegen/mips/macro-assembler-mips.cc | 33 +- .../src/codegen/mips/macro-assembler-mips.h | 15 +- .../v8/src/codegen/mips64/assembler-mips64.cc | 3 +- deps/v8/src/codegen/mips64/assembler-mips64.h | 4 +- .../codegen/mips64/macro-assembler-mips64.cc | 33 +- .../codegen/mips64/macro-assembler-mips64.h | 15 +- .../src/codegen/optimized-compilation-info.cc | 12 +- .../src/codegen/optimized-compilation-info.h | 21 +- .../src/codegen/pending-optimization-table.cc | 97 + .../src/codegen/pending-optimization-table.h | 44 + deps/v8/src/codegen/ppc/assembler-ppc.cc | 22 +- deps/v8/src/codegen/ppc/assembler-ppc.h | 27 +- deps/v8/src/codegen/ppc/code-stubs-ppc.cc | 28 - .../v8/src/codegen/ppc/macro-assembler-ppc.cc | 39 +- deps/v8/src/codegen/ppc/macro-assembler-ppc.h | 5 +- deps/v8/src/codegen/s390/assembler-s390.cc | 23 +- deps/v8/src/codegen/s390/assembler-s390.h | 29 +- deps/v8/src/codegen/s390/code-stubs-s390.cc | 27 - .../src/codegen/s390/macro-assembler-s390.cc | 22 +- .../src/codegen/s390/macro-assembler-s390.h | 5 +- deps/v8/src/codegen/safepoint-table.h | 13 +- deps/v8/src/codegen/source-position-table.cc | 15 +- deps/v8/src/codegen/tick-counter.cc | 23 + deps/v8/src/codegen/tick-counter.h | 28 + deps/v8/src/codegen/turbo-assembler.h | 6 +- deps/v8/src/codegen/x64/assembler-x64-inl.h | 4 +- deps/v8/src/codegen/x64/assembler-x64.cc | 220 +- deps/v8/src/codegen/x64/assembler-x64.h | 24 + deps/v8/src/codegen/x64/constants-x64.h | 3 +- .../v8/src/codegen/x64/macro-assembler-x64.cc | 82 +- deps/v8/src/codegen/x64/macro-assembler-x64.h | 18 +- deps/v8/src/codegen/x64/sse-instr.h | 5 + deps/v8/src/common/OWNERS | 3 + deps/v8/src/common/globals.h | 37 +- .../{execution => common}/message-template.h | 18 +- deps/v8/src/common/ptr-compr-inl.h | 31 +- deps/v8/src/compiler-dispatcher/OWNERS | 7 + deps/v8/src/compiler/OWNERS | 5 +- deps/v8/src/compiler/STYLE | 29 - deps/v8/src/compiler/access-builder.cc | 28 +- deps/v8/src/compiler/access-builder.h | 11 +- deps/v8/src/compiler/access-info.cc | 42 +- deps/v8/src/compiler/access-info.h | 35 +- .../compiler/add-type-assertions-reducer.cc | 51 + .../compiler/add-type-assertions-reducer.h | 45 + .../backend/arm/code-generator-arm.cc | 68 +- .../backend/arm/instruction-codes-arm.h | 1 + .../backend/arm/instruction-scheduler-arm.cc | 1 + .../backend/arm/instruction-selector-arm.cc | 9 +- .../backend/arm64/code-generator-arm64.cc | 73 +- .../backend/arm64/instruction-codes-arm64.h | 1 + .../arm64/instruction-scheduler-arm64.cc | 1 + .../arm64/instruction-selector-arm64.cc | 80 +- .../v8/src/compiler/backend/code-generator.cc | 4 + .../backend/ia32/code-generator-ia32.cc | 64 +- .../backend/ia32/instruction-codes-ia32.h | 3 +- .../ia32/instruction-scheduler-ia32.cc | 3 +- .../backend/ia32/instruction-selector-ia32.cc | 9 +- .../src/compiler/backend/instruction-codes.h | 2 +- .../compiler/backend/instruction-scheduler.cc | 2 +- .../backend/instruction-selector-impl.h | 12 +- .../compiler/backend/instruction-selector.cc | 105 +- .../compiler/backend/instruction-selector.h | 22 +- deps/v8/src/compiler/backend/instruction.cc | 5 +- deps/v8/src/compiler/backend/instruction.h | 6 +- deps/v8/src/compiler/backend/jump-threading.h | 13 +- .../compiler/backend/live-range-separator.cc | 24 +- .../backend/mips/code-generator-mips.cc | 70 +- .../backend/mips/instruction-codes-mips.h | 1 + .../mips/instruction-scheduler-mips.cc | 3 +- .../backend/mips/instruction-selector-mips.cc | 9 +- .../backend/mips64/code-generator-mips64.cc | 70 +- .../backend/mips64/instruction-codes-mips64.h | 1 + .../mips64/instruction-scheduler-mips64.cc | 3 +- .../mips64/instruction-selector-mips64.cc | 19 +- .../backend/ppc/code-generator-ppc.cc | 88 +- .../backend/ppc/instruction-codes-ppc.h | 1 + .../backend/ppc/instruction-scheduler-ppc.cc | 1 + .../backend/ppc/instruction-selector-ppc.cc | 9 +- .../compiler/backend/register-allocator.cc | 121 +- .../src/compiler/backend/register-allocator.h | 36 +- .../backend/s390/code-generator-s390.cc | 63 +- .../backend/s390/instruction-selector-s390.cc | 33 +- .../compiler/backend/unwinding-info-writer.h | 1 + .../backend/x64/code-generator-x64.cc | 352 +- .../backend/x64/instruction-codes-x64.h | 30 +- .../backend/x64/instruction-scheduler-x64.cc | 30 +- .../backend/x64/instruction-selector-x64.cc | 122 +- deps/v8/src/compiler/bytecode-analysis.cc | 94 +- deps/v8/src/compiler/bytecode-analysis.h | 45 +- .../v8/src/compiler/bytecode-graph-builder.cc | 324 +- deps/v8/src/compiler/bytecode-graph-builder.h | 11 +- deps/v8/src/compiler/code-assembler.cc | 32 +- deps/v8/src/compiler/code-assembler.h | 123 +- .../src/compiler/common-operator-reducer.cc | 8 +- deps/v8/src/compiler/common-operator.cc | 12 +- deps/v8/src/compiler/common-operator.h | 1 + .../src/compiler/compilation-dependencies.cc | 56 +- .../src/compiler/compilation-dependencies.h | 15 +- deps/v8/src/compiler/compilation-dependency.h | 32 + .../v8/src/compiler/control-flow-optimizer.cc | 7 +- deps/v8/src/compiler/control-flow-optimizer.h | 7 +- deps/v8/src/compiler/csa-load-elimination.cc | 336 + deps/v8/src/compiler/csa-load-elimination.h | 118 + .../src/compiler/decompression-elimination.cc | 37 +- .../src/compiler/decompression-elimination.h | 5 +- deps/v8/src/compiler/diamond.h | 4 +- .../src/compiler/effect-control-linearizer.cc | 320 +- deps/v8/src/compiler/escape-analysis.cc | 54 +- deps/v8/src/compiler/escape-analysis.h | 11 +- deps/v8/src/compiler/graph-assembler.cc | 9 + deps/v8/src/compiler/graph-assembler.h | 38 +- deps/v8/src/compiler/graph-reducer.cc | 12 +- deps/v8/src/compiler/graph-reducer.h | 8 +- deps/v8/src/compiler/heap-refs.h | 906 + deps/v8/src/compiler/int64-lowering.cc | 37 +- deps/v8/src/compiler/int64-lowering.h | 2 +- deps/v8/src/compiler/js-call-reducer.cc | 1265 +- deps/v8/src/compiler/js-call-reducer.h | 17 +- .../src/compiler/js-context-specialization.cc | 20 +- deps/v8/src/compiler/js-create-lowering.cc | 2 +- deps/v8/src/compiler/js-graph.cc | 8 + deps/v8/src/compiler/js-graph.h | 52 +- deps/v8/src/compiler/js-heap-broker.cc | 909 +- deps/v8/src/compiler/js-heap-broker.h | 831 +- deps/v8/src/compiler/js-heap-copy-reducer.cc | 3 +- deps/v8/src/compiler/js-inlining-heuristic.cc | 66 +- deps/v8/src/compiler/js-inlining-heuristic.h | 2 +- deps/v8/src/compiler/js-inlining.cc | 17 +- deps/v8/src/compiler/js-inlining.h | 3 +- .../js-native-context-specialization.cc | 320 +- .../js-native-context-specialization.h | 35 +- deps/v8/src/compiler/js-operator.cc | 15 +- deps/v8/src/compiler/js-operator.h | 14 +- deps/v8/src/compiler/js-type-hint-lowering.cc | 58 + deps/v8/src/compiler/js-type-hint-lowering.h | 3 +- deps/v8/src/compiler/js-typed-lowering.cc | 18 +- deps/v8/src/compiler/linkage.cc | 8 +- deps/v8/src/compiler/linkage.h | 2 +- deps/v8/src/compiler/load-elimination.cc | 28 +- deps/v8/src/compiler/load-elimination.h | 2 +- deps/v8/src/compiler/loop-analysis.cc | 20 +- deps/v8/src/compiler/loop-analysis.h | 6 +- .../v8/src/compiler/machine-graph-verifier.cc | 13 +- .../src/compiler/machine-operator-reducer.cc | 3 +- deps/v8/src/compiler/machine-operator.cc | 55 +- deps/v8/src/compiler/machine-operator.h | 42 +- deps/v8/src/compiler/map-inference.cc | 25 +- deps/v8/src/compiler/memory-optimizer.cc | 45 +- deps/v8/src/compiler/memory-optimizer.h | 6 +- deps/v8/src/compiler/node-properties.cc | 3 +- deps/v8/src/compiler/node-properties.h | 3 +- deps/v8/src/compiler/node.cc | 8 +- deps/v8/src/compiler/opcodes.h | 53 +- deps/v8/src/compiler/operation-typer.cc | 31 +- deps/v8/src/compiler/operation-typer.h | 6 +- deps/v8/src/compiler/pipeline.cc | 307 +- deps/v8/src/compiler/pipeline.h | 15 +- .../src/compiler/property-access-builder.cc | 13 +- deps/v8/src/compiler/raw-machine-assembler.cc | 4 +- deps/v8/src/compiler/raw-machine-assembler.h | 5 +- .../v8/src/compiler/redundancy-elimination.cc | 3 + deps/v8/src/compiler/representation-change.cc | 192 +- deps/v8/src/compiler/representation-change.h | 51 +- deps/v8/src/compiler/scheduler.cc | 38 +- deps/v8/src/compiler/scheduler.h | 10 +- .../serializer-for-background-compilation.cc | 1402 +- .../serializer-for-background-compilation.h | 329 +- deps/v8/src/compiler/simd-scalar-lowering.cc | 29 +- deps/v8/src/compiler/simd-scalar-lowering.h | 9 +- deps/v8/src/compiler/simplified-lowering.cc | 175 +- deps/v8/src/compiler/simplified-lowering.h | 8 +- deps/v8/src/compiler/simplified-operator.cc | 112 +- deps/v8/src/compiler/simplified-operator.h | 29 +- deps/v8/src/compiler/state-values-utils.cc | 8 +- deps/v8/src/compiler/state-values-utils.h | 4 +- .../src/compiler/store-store-elimination.cc | 16 +- .../v8/src/compiler/store-store-elimination.h | 6 +- deps/v8/src/compiler/typer.cc | 30 +- deps/v8/src/compiler/typer.h | 7 +- deps/v8/src/compiler/types.cc | 14 +- deps/v8/src/compiler/types.h | 3 +- deps/v8/src/compiler/verifier.cc | 46 +- deps/v8/src/compiler/wasm-compiler.cc | 712 +- deps/v8/src/compiler/wasm-compiler.h | 51 +- deps/v8/src/d8/d8.cc | 87 +- deps/v8/src/date/OWNERS | 3 + deps/v8/src/debug/OWNERS | 2 - deps/v8/src/debug/debug-coverage.cc | 15 +- deps/v8/src/debug/debug-evaluate.cc | 9 +- deps/v8/src/debug/debug-evaluate.h | 5 +- deps/v8/src/debug/debug-frames.cc | 15 +- deps/v8/src/debug/debug-frames.h | 11 +- deps/v8/src/debug/debug-interface.h | 5 +- deps/v8/src/debug/debug-scope-iterator.h | 1 - deps/v8/src/debug/debug-scopes.cc | 60 +- deps/v8/src/debug/debug-scopes.h | 2 +- .../src/debug/debug-stack-trace-iterator.cc | 5 +- deps/v8/src/debug/debug.cc | 20 +- deps/v8/src/debug/debug.h | 11 +- deps/v8/src/debug/liveedit.cc | 40 +- deps/v8/src/deoptimizer/OWNERS | 2 + .../v8/src/deoptimizer/arm/deoptimizer-arm.cc | 2 +- deps/v8/src/deoptimizer/deoptimize-reason.h | 1 + deps/v8/src/deoptimizer/deoptimizer.cc | 90 +- deps/v8/src/deoptimizer/deoptimizer.h | 26 +- .../src/deoptimizer/ia32/deoptimizer-ia32.cc | 10 + .../src/deoptimizer/mips/deoptimizer-mips.cc | 2 +- .../deoptimizer/mips64/deoptimizer-mips64.cc | 2 +- .../v8/src/deoptimizer/ppc/deoptimizer-ppc.cc | 42 +- .../src/deoptimizer/s390/deoptimizer-s390.cc | 2 +- .../v8/src/deoptimizer/x64/deoptimizer-x64.cc | 10 + deps/v8/src/diagnostics/DEPS | 3 + deps/v8/src/diagnostics/eh-frame.cc | 8 +- deps/v8/src/diagnostics/eh-frame.h | 14 +- deps/v8/src/diagnostics/gdb-jit.cc | 87 +- deps/v8/src/diagnostics/ia32/disasm-ia32.cc | 124 +- deps/v8/src/diagnostics/objects-debug.cc | 294 +- deps/v8/src/diagnostics/objects-printer.cc | 82 +- .../src/diagnostics/unwinding-info-win64.cc | 31 - deps/v8/src/diagnostics/x64/disasm-x64.cc | 108 +- deps/v8/src/execution/OWNERS | 5 +- .../src/execution/arm/frame-constants-arm.cc | 4 +- .../execution/arm64/frame-constants-arm64.cc | 6 +- deps/v8/src/execution/execution.cc | 350 +- deps/v8/src/execution/execution.h | 177 +- deps/v8/src/execution/frame-constants.h | 9 +- deps/v8/src/execution/frames-inl.h | 33 +- deps/v8/src/execution/frames.cc | 46 +- deps/v8/src/execution/frames.h | 25 +- .../execution/ia32/frame-constants-ia32.cc | 6 +- deps/v8/src/execution/interrupts-scope.cc | 42 + deps/v8/src/execution/interrupts-scope.h | 72 + deps/v8/src/execution/isolate-data.h | 8 + deps/v8/src/execution/isolate-inl.h | 7 +- deps/v8/src/execution/isolate-utils-inl.h | 64 + deps/v8/src/execution/isolate-utils.h | 31 + deps/v8/src/execution/isolate.cc | 231 +- deps/v8/src/execution/isolate.h | 83 +- deps/v8/src/execution/messages.cc | 312 +- deps/v8/src/execution/messages.h | 21 +- deps/v8/src/execution/microtask-queue.cc | 6 +- deps/v8/src/execution/microtask-queue.h | 2 +- .../execution/mips/frame-constants-mips.cc | 7 +- deps/v8/src/execution/mips/simulator-mips.cc | 372 +- deps/v8/src/execution/mips/simulator-mips.h | 18 +- .../mips64/frame-constants-mips64.cc | 3 +- .../src/execution/mips64/simulator-mips64.cc | 372 +- .../src/execution/mips64/simulator-mips64.h | 20 +- deps/v8/src/execution/ppc/simulator-ppc.cc | 2 +- deps/v8/src/execution/s390/simulator-s390.cc | 90 +- deps/v8/src/execution/stack-guard.cc | 345 + deps/v8/src/execution/stack-guard.h | 186 + .../src/execution/x64/frame-constants-x64.cc | 1 + deps/v8/src/extensions/OWNERS | 1 + .../src/extensions/cputracemark-extension.cc | 56 + .../src/extensions/cputracemark-extension.h | 38 + .../v8/src/extensions/statistics-extension.cc | 7 +- deps/v8/src/flags/OWNERS | 1 + deps/v8/src/flags/flag-definitions.h | 81 +- deps/v8/src/handles/OWNERS | 3 + deps/v8/src/handles/global-handles.cc | 57 +- deps/v8/src/handles/handles.cc | 4 +- deps/v8/src/heap/OWNERS | 2 - deps/v8/src/heap/array-buffer-tracker-inl.h | 2 - deps/v8/src/heap/array-buffer-tracker.h | 2 - deps/v8/src/heap/basic-memory-chunk.cc | 54 + deps/v8/src/heap/basic-memory-chunk.h | 229 + deps/v8/src/heap/code-stats.cc | 6 +- deps/v8/src/heap/combined-heap.cc | 10 +- deps/v8/src/heap/combined-heap.h | 20 +- deps/v8/src/heap/concurrent-marking.cc | 30 +- deps/v8/src/heap/embedder-tracing.cc | 6 +- deps/v8/src/heap/embedder-tracing.h | 21 +- deps/v8/src/heap/factory-inl.h | 9 + deps/v8/src/heap/factory.cc | 280 +- deps/v8/src/heap/factory.h | 48 +- deps/v8/src/heap/gc-tracer.cc | 29 +- deps/v8/src/heap/gc-tracer.h | 2 + deps/v8/src/heap/heap-controller.cc | 21 +- deps/v8/src/heap/heap-controller.h | 7 +- deps/v8/src/heap/heap-inl.h | 17 +- deps/v8/src/heap/heap-write-barrier-inl.h | 48 +- deps/v8/src/heap/heap-write-barrier.h | 2 +- deps/v8/src/heap/heap.cc | 649 +- deps/v8/src/heap/heap.h | 214 +- deps/v8/src/heap/incremental-marking.cc | 39 +- deps/v8/src/heap/incremental-marking.h | 12 +- deps/v8/src/heap/item-parallel-job.cc | 7 +- deps/v8/src/heap/item-parallel-job.h | 6 +- deps/v8/src/heap/mark-compact.cc | 194 +- deps/v8/src/heap/object-stats.cc | 2 +- deps/v8/src/heap/objects-visiting-inl.h | 28 +- deps/v8/src/heap/objects-visiting.h | 5 +- deps/v8/src/heap/read-only-heap-inl.h | 31 + deps/v8/src/heap/read-only-heap.cc | 85 +- deps/v8/src/heap/read-only-heap.h | 23 +- deps/v8/src/heap/remembered-set.h | 4 +- deps/v8/src/heap/scavenger-inl.h | 24 +- deps/v8/src/heap/scavenger.cc | 22 +- deps/v8/src/heap/setup-heap-internal.cc | 16 +- deps/v8/src/heap/spaces-inl.h | 36 +- deps/v8/src/heap/spaces.cc | 574 +- deps/v8/src/heap/spaces.h | 1193 +- deps/v8/src/heap/store-buffer.cc | 11 +- deps/v8/src/heap/stress-marking-observer.cc | 8 +- deps/v8/src/heap/stress-marking-observer.h | 4 +- deps/v8/src/heap/stress-scavenge-observer.cc | 24 +- deps/v8/src/heap/stress-scavenge-observer.h | 4 +- deps/v8/src/heap/sweeper.cc | 12 +- deps/v8/src/ic/OWNERS | 2 - deps/v8/src/ic/accessor-assembler.cc | 741 +- deps/v8/src/ic/accessor-assembler.h | 113 +- deps/v8/src/ic/binary-op-assembler.cc | 28 +- deps/v8/src/ic/call-optimization.cc | 3 +- deps/v8/src/ic/ic-inl.h | 24 - deps/v8/src/ic/ic.cc | 286 +- deps/v8/src/ic/ic.h | 37 +- deps/v8/src/ic/keyed-store-generic.cc | 87 +- deps/v8/src/ic/stub-cache.cc | 46 +- deps/v8/src/ic/stub-cache.h | 14 +- deps/v8/src/init/OWNERS | 11 +- deps/v8/src/init/bootstrapper.cc | 162 +- deps/v8/src/init/heap-symbols.h | 15 +- deps/v8/src/init/isolate-allocator.cc | 2 +- deps/v8/src/init/setup-isolate-deserialize.cc | 1 - deps/v8/src/inspector/BUILD.gn | 15 +- deps/v8/src/inspector/DEPS | 3 +- deps/v8/src/inspector/OWNERS | 8 - deps/v8/src/inspector/injected-script.cc | 42 +- .../inspector/inspector_protocol_config.json | 10 +- deps/v8/src/inspector/string-16.cc | 17 + deps/v8/src/inspector/string-16.h | 15 +- deps/v8/src/inspector/string-util.cc | 6 - deps/v8/src/inspector/string-util.h | 25 +- deps/v8/src/inspector/v8-console-message.cc | 21 +- deps/v8/src/inspector/v8-console.cc | 10 +- .../src/inspector/v8-debugger-agent-impl.cc | 54 +- deps/v8/src/inspector/v8-debugger-script.cc | 4 +- deps/v8/src/inspector/v8-debugger.cc | 3 +- .../inspector/v8-heap-profiler-agent-impl.cc | 31 +- .../inspector/v8-inspector-session-impl.cc | 93 +- .../src/inspector/v8-inspector-session-impl.h | 3 +- .../src/inspector/v8-profiler-agent-impl.cc | 92 +- .../v8/src/inspector/v8-runtime-agent-impl.cc | 15 +- deps/v8/src/inspector/v8-schema-agent-impl.cc | 9 +- deps/v8/src/inspector/v8-stack-trace-impl.cc | 15 +- deps/v8/src/inspector/v8-stack-trace-impl.h | 2 + .../v8/src/inspector/v8-string-conversions.cc | 7 +- deps/v8/src/inspector/value-mirror.cc | 86 +- deps/v8/src/interpreter/OWNERS | 2 - .../interpreter/bytecode-array-accessor.cc | 92 +- .../src/interpreter/bytecode-array-accessor.h | 33 +- .../interpreter/bytecode-array-iterator.cc | 4 + .../src/interpreter/bytecode-array-iterator.h | 4 +- .../bytecode-array-random-iterator.cc | 12 +- .../bytecode-array-random-iterator.h | 7 +- .../src/interpreter/bytecode-array-writer.cc | 12 +- deps/v8/src/interpreter/bytecode-decoder.cc | 10 +- deps/v8/src/interpreter/bytecode-generator.cc | 169 +- deps/v8/src/interpreter/bytecode-generator.h | 17 +- .../src/interpreter/handler-table-builder.h | 3 +- .../src/interpreter/interpreter-assembler.cc | 6 +- .../src/interpreter/interpreter-assembler.h | 2 +- .../src/interpreter/interpreter-generator.cc | 23 +- .../interpreter-intrinsics-generator.cc | 6 +- deps/v8/src/interpreter/interpreter.cc | 17 +- deps/v8/src/interpreter/interpreter.h | 6 +- deps/v8/src/json/OWNERS | 3 + deps/v8/src/json/json-parser.cc | 4 +- deps/v8/src/json/json-stringifier.cc | 24 +- deps/v8/src/libplatform/tracing/OWNERS | 1 + .../tracing/json-trace-event-listener.cc | 4 +- .../tracing/json-trace-event-listener.h | 5 +- .../libplatform/tracing/perfetto-consumer.cc | 44 - .../libplatform/tracing/perfetto-consumer.h | 80 - .../libplatform/tracing/perfetto-producer.cc | 45 - .../libplatform/tracing/perfetto-producer.h | 70 - .../tracing/perfetto-shared-memory.cc | 28 - .../tracing/perfetto-shared-memory.h | 45 - .../src/libplatform/tracing/perfetto-tasks.cc | 52 - .../src/libplatform/tracing/perfetto-tasks.h | 55 - .../tracing/perfetto-tracing-controller.cc | 130 - .../tracing/perfetto-tracing-controller.h | 86 - .../tracing/trace-event-listener.cc | 27 + .../tracing/trace-event-listener.h | 9 +- .../libplatform/tracing/tracing-controller.cc | 147 +- deps/v8/src/libsampler/OWNERS | 3 + deps/v8/src/logging/counters-definitions.h | 9 +- deps/v8/src/logging/counters.h | 4 +- deps/v8/src/logging/log.cc | 57 +- deps/v8/src/numbers/OWNERS | 6 +- deps/v8/src/numbers/conversions.cc | 4 +- deps/v8/src/objects/OWNERS | 3 + deps/v8/src/objects/api-callbacks-inl.h | 8 +- deps/v8/src/objects/api-callbacks.h | 16 +- deps/v8/src/objects/arguments-inl.h | 10 +- deps/v8/src/objects/arguments.h | 4 +- deps/v8/src/objects/bigint.cc | 183 +- deps/v8/src/objects/bigint.h | 10 + deps/v8/src/objects/code-inl.h | 50 +- deps/v8/src/objects/code.cc | 22 +- deps/v8/src/objects/code.h | 50 +- deps/v8/src/objects/compressed-slots-inl.h | 54 +- deps/v8/src/objects/compressed-slots.h | 29 +- deps/v8/src/objects/contexts-inl.h | 8 +- deps/v8/src/objects/contexts.cc | 40 +- deps/v8/src/objects/contexts.h | 52 +- deps/v8/src/objects/descriptor-array-inl.h | 114 +- deps/v8/src/objects/descriptor-array.h | 45 +- deps/v8/src/objects/dictionary-inl.h | 37 +- deps/v8/src/objects/dictionary.h | 11 +- deps/v8/src/objects/elements.cc | 251 +- deps/v8/src/objects/elements.h | 11 +- deps/v8/src/objects/embedder-data-slot-inl.h | 6 +- deps/v8/src/objects/feedback-vector-inl.h | 19 +- deps/v8/src/objects/feedback-vector.cc | 73 +- deps/v8/src/objects/feedback-vector.h | 14 +- deps/v8/src/objects/field-index-inl.h | 12 +- deps/v8/src/objects/field-index.h | 6 +- deps/v8/src/objects/fixed-array-inl.h | 78 +- deps/v8/src/objects/fixed-array.h | 45 +- deps/v8/src/objects/free-space-inl.h | 36 +- deps/v8/src/objects/free-space.h | 3 + deps/v8/src/objects/hash-table-inl.h | 20 +- deps/v8/src/objects/hash-table.h | 9 +- deps/v8/src/objects/heap-number-inl.h | 4 +- deps/v8/src/objects/heap-object-inl.h | 14 - deps/v8/src/objects/heap-object.h | 58 +- deps/v8/src/objects/instance-type.h | 40 +- deps/v8/src/objects/intl-objects.cc | 273 +- deps/v8/src/objects/intl-objects.h | 28 +- deps/v8/src/objects/intl-objects.tq | 24 +- deps/v8/src/objects/js-array-buffer-inl.h | 4 +- deps/v8/src/objects/js-array-buffer.h | 6 + deps/v8/src/objects/js-array-inl.h | 7 +- deps/v8/src/objects/js-array.h | 6 +- deps/v8/src/objects/js-break-iterator-inl.h | 12 +- deps/v8/src/objects/js-break-iterator.cc | 20 +- deps/v8/src/objects/js-break-iterator.h | 32 +- deps/v8/src/objects/js-collator-inl.h | 2 +- deps/v8/src/objects/js-collator.cc | 29 +- deps/v8/src/objects/js-collator.h | 14 +- deps/v8/src/objects/js-collection-iterator.h | 1 + deps/v8/src/objects/js-collection.h | 4 + deps/v8/src/objects/js-date-time-format.cc | 221 +- deps/v8/src/objects/js-date-time-format.h | 6 +- deps/v8/src/objects/js-list-format-inl.h | 4 +- deps/v8/src/objects/js-list-format.cc | 64 +- deps/v8/src/objects/js-list-format.h | 18 +- deps/v8/src/objects/js-locale.cc | 13 +- deps/v8/src/objects/js-locale.h | 9 +- deps/v8/src/objects/js-number-format-inl.h | 11 + deps/v8/src/objects/js-number-format.cc | 326 +- deps/v8/src/objects/js-number-format.h | 37 +- deps/v8/src/objects/js-objects-inl.h | 351 +- deps/v8/src/objects/js-objects.cc | 309 +- deps/v8/src/objects/js-objects.h | 263 +- deps/v8/src/objects/js-plural-rules-inl.h | 7 +- deps/v8/src/objects/js-plural-rules.cc | 229 +- deps/v8/src/objects/js-plural-rules.h | 20 +- deps/v8/src/objects/js-proxy-inl.h | 7 +- deps/v8/src/objects/js-proxy.h | 19 +- deps/v8/src/objects/js-regexp.h | 57 +- .../src/objects/js-relative-time-format-inl.h | 4 +- .../v8/src/objects/js-relative-time-format.cc | 39 +- deps/v8/src/objects/js-relative-time-format.h | 19 +- deps/v8/src/objects/js-segment-iterator-inl.h | 2 +- deps/v8/src/objects/js-segment-iterator.cc | 21 +- deps/v8/src/objects/js-segmenter-inl.h | 2 +- deps/v8/src/objects/js-segmenter.cc | 31 +- deps/v8/src/objects/js-segmenter.h | 13 +- deps/v8/src/objects/js-weak-refs-inl.h | 14 +- deps/v8/src/objects/js-weak-refs.h | 68 +- deps/v8/src/objects/keys.cc | 5 + deps/v8/src/objects/layout-descriptor-inl.h | 4 +- deps/v8/src/objects/literal-objects-inl.h | 72 + deps/v8/src/objects/literal-objects.cc | 75 +- deps/v8/src/objects/literal-objects.h | 19 +- deps/v8/src/objects/lookup-inl.h | 55 +- deps/v8/src/objects/lookup.cc | 487 +- deps/v8/src/objects/lookup.h | 10 +- deps/v8/src/objects/map-inl.h | 156 +- deps/v8/src/objects/map-updater.cc | 27 +- deps/v8/src/objects/map-updater.h | 5 +- deps/v8/src/objects/map.cc | 185 +- deps/v8/src/objects/map.h | 91 +- deps/v8/src/objects/maybe-object.h | 4 + deps/v8/src/objects/module-inl.h | 97 +- deps/v8/src/objects/module.cc | 768 +- deps/v8/src/objects/module.h | 220 +- deps/v8/src/objects/name-inl.h | 31 +- deps/v8/src/objects/name.h | 37 +- deps/v8/src/objects/object-list-macros.h | 28 +- deps/v8/src/objects/object-macros-undef.h | 7 +- deps/v8/src/objects/object-macros.h | 185 +- .../objects/objects-body-descriptors-inl.h | 13 +- deps/v8/src/objects/objects-definitions.h | 35 +- deps/v8/src/objects/objects-inl.h | 378 +- deps/v8/src/objects/objects.cc | 252 +- deps/v8/src/objects/objects.h | 67 +- deps/v8/src/objects/oddball-inl.h | 6 +- deps/v8/src/objects/ordered-hash-table-inl.h | 4 +- deps/v8/src/objects/ordered-hash-table.cc | 6 +- deps/v8/src/objects/ordered-hash-table.h | 10 +- deps/v8/src/objects/property-array-inl.h | 35 +- deps/v8/src/objects/property-array.h | 6 + deps/v8/src/objects/property-cell.h | 2 +- deps/v8/src/objects/property.cc | 3 +- deps/v8/src/objects/prototype-inl.h | 9 +- deps/v8/src/objects/scope-info.cc | 71 +- deps/v8/src/objects/scope-info.h | 23 +- .../v8/src/objects/shared-function-info-inl.h | 22 +- deps/v8/src/objects/shared-function-info.h | 31 +- deps/v8/src/objects/slots.h | 12 +- deps/v8/src/objects/source-text-module.cc | 661 + deps/v8/src/objects/source-text-module.h | 220 + deps/v8/src/objects/stack-frame-info-inl.h | 4 + deps/v8/src/objects/stack-frame-info.cc | 313 + deps/v8/src/objects/stack-frame-info.h | 35 +- deps/v8/src/objects/string-inl.h | 77 +- deps/v8/src/objects/string.cc | 46 +- deps/v8/src/objects/string.h | 91 +- deps/v8/src/objects/synthetic-module.cc | 108 + deps/v8/src/objects/synthetic-module.h | 69 + deps/v8/src/objects/tagged-field-inl.h | 162 + deps/v8/src/objects/tagged-field.h | 76 + deps/v8/src/objects/tagged-impl-inl.h | 44 +- deps/v8/src/objects/tagged-impl.h | 39 +- deps/v8/src/objects/tagged-value-inl.h | 31 +- deps/v8/src/objects/tagged-value.h | 6 +- deps/v8/src/objects/template-objects.cc | 4 +- deps/v8/src/objects/template-objects.h | 2 +- deps/v8/src/objects/templates-inl.h | 8 +- deps/v8/src/objects/templates.h | 2 +- deps/v8/src/objects/transitions-inl.h | 7 +- deps/v8/src/objects/transitions.cc | 2 +- deps/v8/src/objects/transitions.h | 2 +- deps/v8/src/objects/value-serializer.cc | 225 +- deps/v8/src/objects/value-serializer.h | 12 +- deps/v8/src/parsing/OWNERS | 2 - deps/v8/src/parsing/expression-scope.h | 11 +- deps/v8/src/parsing/parse-info.cc | 2 +- deps/v8/src/parsing/parser-base.h | 71 +- deps/v8/src/parsing/parser.cc | 29 +- deps/v8/src/parsing/parser.h | 30 +- .../pending-compilation-error-handler.cc | 29 +- .../pending-compilation-error-handler.h | 13 +- deps/v8/src/parsing/preparse-data.cc | 2 +- deps/v8/src/parsing/preparser.cc | 8 +- deps/v8/src/parsing/preparser.h | 81 +- .../src/parsing/scanner-character-streams.cc | 12 +- deps/v8/src/parsing/scanner.cc | 36 +- deps/v8/src/parsing/scanner.h | 4 +- deps/v8/src/profiler/heap-profiler.cc | 16 +- .../src/profiler/heap-snapshot-generator.cc | 7 +- .../v8/src/profiler/heap-snapshot-generator.h | 2 - .../v8/src/profiler/sampling-heap-profiler.cc | 10 - deps/v8/src/profiler/tick-sample.cc | 4 +- deps/v8/src/regexp/OWNERS | 2 - deps/v8/src/regexp/jsregexp-inl.h | 86 - deps/v8/src/regexp/jsregexp.cc | 7055 -------- deps/v8/src/regexp/jsregexp.h | 1548 -- deps/v8/src/regexp/regexp-ast.h | 24 +- ...-inl.h => regexp-bytecode-generator-inl.h} | 24 +- ...regexp.cc => regexp-bytecode-generator.cc} | 221 +- ...irregexp.h => regexp-bytecode-generator.h} | 39 +- ...ytecodes-irregexp.h => regexp-bytecodes.h} | 11 +- deps/v8/src/regexp/regexp-compiler-tonode.cc | 1678 ++ deps/v8/src/regexp/regexp-compiler.cc | 3551 ++++ deps/v8/src/regexp/regexp-compiler.h | 657 + deps/v8/src/regexp/regexp-dotprinter.cc | 244 + deps/v8/src/regexp/regexp-dotprinter.h | 23 + ...eter-irregexp.cc => regexp-interpreter.cc} | 269 +- ...preter-irregexp.h => regexp-interpreter.h} | 18 +- .../src/regexp/regexp-macro-assembler-arch.h | 30 + deps/v8/src/regexp/regexp-macro-assembler.cc | 9 +- deps/v8/src/regexp/regexp-macro-assembler.h | 8 +- deps/v8/src/regexp/regexp-nodes.h | 675 + deps/v8/src/regexp/regexp-parser.cc | 66 +- deps/v8/src/regexp/regexp-parser.h | 10 +- deps/v8/src/regexp/regexp-utils.cc | 6 +- deps/v8/src/regexp/regexp.cc | 1018 ++ deps/v8/src/regexp/regexp.h | 177 + deps/v8/src/roots/OWNERS | 11 + deps/v8/src/roots/roots-inl.h | 3 +- deps/v8/src/roots/roots.h | 3 +- deps/v8/src/runtime/OWNERS | 3 + deps/v8/src/runtime/runtime-classes.cc | 40 +- deps/v8/src/runtime/runtime-compiler.cc | 31 +- deps/v8/src/runtime/runtime-debug.cc | 30 +- deps/v8/src/runtime/runtime-generator.cc | 1 + deps/v8/src/runtime/runtime-internal.cc | 44 +- deps/v8/src/runtime/runtime-interpreter.cc | 7 +- deps/v8/src/runtime/runtime-literals.cc | 349 +- deps/v8/src/runtime/runtime-module.cc | 8 +- deps/v8/src/runtime/runtime-object.cc | 82 +- deps/v8/src/runtime/runtime-proxy.cc | 12 + deps/v8/src/runtime/runtime-regexp.cc | 48 +- deps/v8/src/runtime/runtime-scopes.cc | 18 +- deps/v8/src/runtime/runtime-strings.cc | 1 - deps/v8/src/runtime/runtime-test.cc | 127 +- deps/v8/src/runtime/runtime-typedarray.cc | 2 +- deps/v8/src/runtime/runtime-wasm.cc | 164 +- deps/v8/src/runtime/runtime-weak-refs.cc | 3 +- deps/v8/src/runtime/runtime.h | 92 +- deps/v8/src/snapshot/OWNERS | 2 - deps/v8/src/snapshot/code-serializer.cc | 14 +- .../v8/src/snapshot/deserializer-allocator.cc | 56 +- deps/v8/src/snapshot/deserializer-allocator.h | 12 +- deps/v8/src/snapshot/deserializer.cc | 127 +- deps/v8/src/snapshot/deserializer.h | 19 +- .../platform-embedded-file-writer-generic.cc | 10 +- .../platform-embedded-file-writer-win.cc | 7 +- deps/v8/src/snapshot/mksnapshot.cc | 6 +- deps/v8/src/snapshot/natives.h | 1 - deps/v8/src/snapshot/partial-deserializer.cc | 3 +- deps/v8/src/snapshot/partial-serializer.cc | 2 +- .../v8/src/snapshot/read-only-deserializer.cc | 2 +- deps/v8/src/snapshot/read-only-serializer.cc | 2 +- deps/v8/src/snapshot/references.h | 61 +- deps/v8/src/snapshot/serializer-allocator.cc | 66 +- deps/v8/src/snapshot/serializer-allocator.h | 8 +- deps/v8/src/snapshot/serializer-common.h | 30 +- deps/v8/src/snapshot/serializer.cc | 72 +- deps/v8/src/snapshot/serializer.h | 10 +- deps/v8/src/snapshot/snapshot-source-sink.h | 8 + deps/v8/src/snapshot/snapshot.h | 6 +- deps/v8/src/strings/OWNERS | 4 +- deps/v8/src/strings/char-predicates-inl.h | 12 + deps/v8/src/strings/char-predicates.h | 6 + deps/v8/src/strings/string-builder-inl.h | 11 + deps/v8/src/strings/string-builder.cc | 34 + deps/v8/src/strings/string-stream.cc | 5 +- deps/v8/src/tasks/OWNERS | 6 + deps/v8/src/third_party/siphash/OWNERS | 3 + deps/v8/src/third_party/utf8-decoder/OWNERS | 2 + deps/v8/src/third_party/valgrind/OWNERS | 1 + deps/v8/src/third_party/vtune/OWNERS | 1 + deps/v8/src/torque/ast.h | 226 +- deps/v8/src/torque/constants.h | 4 + deps/v8/src/torque/contextual.h | 18 +- deps/v8/src/torque/csa-generator.cc | 31 +- deps/v8/src/torque/declarable.h | 85 +- deps/v8/src/torque/declaration-visitor.cc | 23 +- deps/v8/src/torque/declaration-visitor.h | 12 + deps/v8/src/torque/declarations.cc | 12 + deps/v8/src/torque/declarations.h | 14 +- deps/v8/src/torque/earley-parser.cc | 25 +- deps/v8/src/torque/earley-parser.h | 4 +- deps/v8/src/torque/global-context.cc | 24 + deps/v8/src/torque/global-context.h | 35 +- deps/v8/src/torque/implementation-visitor.cc | 702 +- deps/v8/src/torque/implementation-visitor.h | 128 +- deps/v8/src/torque/ls/json-parser.cc | 2 +- deps/v8/src/torque/ls/message-handler.cc | 42 +- deps/v8/src/torque/ls/message-handler.h | 4 +- deps/v8/src/torque/ls/message-pipe.h | 2 +- deps/v8/src/torque/ls/message.h | 10 +- .../src/torque/ls/torque-language-server.cc | 6 +- deps/v8/src/torque/server-data.h | 1 + deps/v8/src/torque/source-positions.cc | 59 + deps/v8/src/torque/source-positions.h | 32 +- deps/v8/src/torque/torque-compiler.cc | 33 +- deps/v8/src/torque/torque-compiler.h | 3 +- deps/v8/src/torque/torque-parser.cc | 280 +- deps/v8/src/torque/torque.cc | 22 +- deps/v8/src/torque/type-oracle.cc | 10 +- deps/v8/src/torque/type-oracle.h | 30 +- deps/v8/src/torque/type-visitor.cc | 94 +- deps/v8/src/torque/types.cc | 22 +- deps/v8/src/torque/types.h | 23 +- deps/v8/src/torque/utils.cc | 38 +- deps/v8/src/torque/utils.h | 16 +- deps/v8/src/tracing/OWNERS | 2 + deps/v8/src/tracing/trace-event.h | 4 +- deps/v8/src/trap-handler/OWNERS | 2 - deps/v8/src/utils/OWNERS | 2 + deps/v8/src/utils/allocation.cc | 4 +- deps/v8/src/utils/allocation.h | 11 +- deps/v8/src/utils/splay-tree-inl.h | 292 - deps/v8/src/utils/splay-tree.h | 194 - deps/v8/src/utils/utils.h | 63 +- deps/v8/src/utils/vector.h | 5 + deps/v8/src/wasm/OWNERS | 2 - .../wasm/baseline/arm/liftoff-assembler-arm.h | 17 +- .../baseline/arm64/liftoff-assembler-arm64.h | 11 +- .../baseline/ia32/liftoff-assembler-ia32.h | 12 +- deps/v8/src/wasm/baseline/liftoff-assembler.h | 16 +- deps/v8/src/wasm/baseline/liftoff-compiler.cc | 251 +- deps/v8/src/wasm/baseline/liftoff-compiler.h | 32 + .../baseline/mips/liftoff-assembler-mips.h | 50 +- .../mips64/liftoff-assembler-mips64.h | 40 +- .../wasm/baseline/ppc/liftoff-assembler-ppc.h | 139 +- .../baseline/s390/liftoff-assembler-s390.h | 139 +- .../wasm/baseline/x64/liftoff-assembler-x64.h | 12 +- deps/v8/src/wasm/c-api.cc | 1058 +- deps/v8/src/wasm/decoder.h | 4 +- deps/v8/src/wasm/function-body-decoder-impl.h | 510 +- deps/v8/src/wasm/function-body-decoder.cc | 9 +- deps/v8/src/wasm/function-body-decoder.h | 2 +- deps/v8/src/wasm/function-compiler.cc | 116 +- deps/v8/src/wasm/function-compiler.h | 40 +- deps/v8/src/wasm/graph-builder-interface.cc | 18 +- deps/v8/src/wasm/js-to-wasm-wrapper-cache.h | 41 - deps/v8/src/wasm/jump-table-assembler.cc | 24 +- deps/v8/src/wasm/jump-table-assembler.h | 100 +- deps/v8/src/wasm/memory-tracing.cc | 6 +- deps/v8/src/wasm/module-compiler.cc | 335 +- deps/v8/src/wasm/module-compiler.h | 30 + deps/v8/src/wasm/module-decoder.cc | 223 +- deps/v8/src/wasm/module-decoder.h | 4 +- deps/v8/src/wasm/module-instantiate.cc | 212 +- deps/v8/src/wasm/value-type.h | 86 +- deps/v8/src/wasm/wasm-arguments.h | 73 + deps/v8/src/wasm/wasm-code-manager.cc | 103 +- deps/v8/src/wasm/wasm-code-manager.h | 19 +- deps/v8/src/wasm/wasm-constants.h | 6 +- deps/v8/src/wasm/wasm-debug.cc | 23 +- deps/v8/src/wasm/wasm-engine.cc | 74 +- deps/v8/src/wasm/wasm-engine.h | 7 +- deps/v8/src/wasm/wasm-external-refs.cc | 5 +- deps/v8/src/wasm/wasm-import-wrapper-cache.cc | 5 + deps/v8/src/wasm/wasm-import-wrapper-cache.h | 4 + deps/v8/src/wasm/wasm-interpreter.cc | 488 +- deps/v8/src/wasm/wasm-js.cc | 35 +- deps/v8/src/wasm/wasm-memory.cc | 3 + deps/v8/src/wasm/wasm-module-builder.cc | 314 +- deps/v8/src/wasm/wasm-module-builder.h | 54 +- deps/v8/src/wasm/wasm-module.h | 10 +- deps/v8/src/wasm/wasm-objects-inl.h | 104 +- deps/v8/src/wasm/wasm-objects.cc | 479 +- deps/v8/src/wasm/wasm-objects.h | 117 +- deps/v8/src/wasm/wasm-opcodes.cc | 42 +- deps/v8/src/wasm/wasm-opcodes.h | 54 +- deps/v8/src/wasm/wasm-result.cc | 18 +- deps/v8/src/wasm/wasm-serialization.cc | 2 + deps/v8/src/wasm/wasm-text.cc | 175 +- deps/v8/src/wasm/wasm-text.h | 11 +- deps/v8/src/wasm/wasm-value.h | 50 +- deps/v8/src/zone/OWNERS | 3 + deps/v8/src/zone/zone-allocator.h | 36 +- deps/v8/src/zone/zone-splay-tree.h | 38 - deps/v8/src/zone/zone.cc | 11 +- deps/v8/src/zone/zone.h | 8 +- deps/v8/test/OWNERS | 2 +- deps/v8/test/cctest/BUILD.gn | 57 +- deps/v8/test/cctest/DEPS | 3 +- deps/v8/test/cctest/OWNERS | 12 - deps/v8/test/cctest/cctest.cc | 16 +- deps/v8/test/cctest/cctest.status | 20 +- .../test/cctest/compiler/serializer-tester.cc | 16 +- .../cctest/compiler/test-code-assembler.cc | 17 +- .../cctest/compiler/test-code-generator.cc | 3 +- .../test-js-context-specialization.cc | 7 +- .../cctest/compiler/test-js-typed-lowering.cc | 9 +- .../cctest/compiler/test-jump-threading.cc | 26 +- .../cctest/compiler/test-loop-analysis.cc | 13 +- .../compiler/test-machine-operator-reducer.cc | 4 +- .../cctest/compiler/test-multiple-return.cc | 15 +- .../compiler/test-representation-change.cc | 20 +- .../cctest/compiler/test-run-native-calls.cc | 38 +- deps/v8/test/cctest/disasm-regex-helper.cc | 291 + deps/v8/test/cctest/disasm-regex-helper.h | 318 + deps/v8/test/cctest/heap/heap-utils.cc | 1 + deps/v8/test/cctest/heap/heap-utils.h | 16 + deps/v8/test/cctest/heap/test-compaction.cc | 5 +- .../test/cctest/heap/test-embedder-tracing.cc | 46 +- deps/v8/test/cctest/heap/test-heap.cc | 200 +- .../cctest/heap/test-invalidated-slots.cc | 2 +- deps/v8/test/cctest/heap/test-iterators.cc | 50 +- .../test/cctest/heap/test-page-promotion.cc | 3 +- deps/v8/test/cctest/heap/test-spaces.cc | 1 + .../test/cctest/heap/test-weak-references.cc | 178 +- .../bytecode-expectations-printer.cc | 9 +- .../bytecode-expectations-printer.h | 3 +- .../AsyncGenerators.golden | 14 +- .../DestructuringAssignment.golden | 42 +- .../bytecode_expectations/ForAwaitOf.golden | 50 +- .../bytecode_expectations/ForOf.golden | 56 +- .../bytecode_expectations/ForOfLoop.golden | 110 +- .../bytecode_expectations/Generators.golden | 14 +- .../PrivateMethods.golden | 133 +- .../generate-bytecode-expectations.cc | 144 +- .../interpreter/test-bytecode-generator.cc | 22 +- .../cctest/interpreter/test-interpreter.cc | 47 +- .../test/cctest/libplatform/test-tracing.cc | 433 +- .../cctest/log-eq-of-logging-and-traversal.js | 201 - deps/v8/test/cctest/test-allocation.cc | 2 +- deps/v8/test/cctest/test-api-array-buffer.cc | 529 + deps/v8/test/cctest/test-api-stack-traces.cc | 4 +- deps/v8/test/cctest/test-api-typed-array.cc | 661 + deps/v8/test/cctest/test-api.cc | 1552 +- deps/v8/test/cctest/test-api.h | 10 + deps/v8/test/cctest/test-assembler-arm.cc | 3 +- deps/v8/test/cctest/test-assembler-arm64.cc | 1058 +- deps/v8/test/cctest/test-assembler-mips.cc | 10 +- deps/v8/test/cctest/test-assembler-mips64.cc | 10 +- deps/v8/test/cctest/test-assembler-s390.cc | 117 + .../test/cctest/test-code-stub-assembler.cc | 49 +- deps/v8/test/cctest/test-compiler.cc | 4 +- deps/v8/test/cctest/test-cpu-profiler.cc | 1 + deps/v8/test/cctest/test-debug.cc | 26 +- deps/v8/test/cctest/test-disasm-ia32.cc | 2 + deps/v8/test/cctest/test-disasm-x64.cc | 13 + deps/v8/test/cctest/test-feedback-vector.cc | 2 +- .../test/cctest/test-field-type-tracking.cc | 198 +- deps/v8/test/cctest/test-global-handles.cc | 240 +- deps/v8/test/cctest/test-hashcode.cc | 8 +- deps/v8/test/cctest/test-heap-profiler.cc | 27 +- .../cctest/test-inobject-slack-tracking.cc | 16 +- deps/v8/test/cctest/test-inspector.cc | 15 + deps/v8/test/cctest/test-intl.cc | 2 +- deps/v8/test/cctest/test-js-weak-refs.cc | 4 +- deps/v8/test/cctest/test-liveedit.cc | 2 +- deps/v8/test/cctest/test-lockers.cc | 8 +- deps/v8/test/cctest/test-log-stack-tracer.cc | 1 + deps/v8/test/cctest/test-log.cc | 75 +- .../test/cctest/test-macro-assembler-arm64.cc | 97 + .../cctest/test-macro-assembler-mips64.cc | 10 +- deps/v8/test/cctest/test-parsing.cc | 24 +- deps/v8/test/cctest/test-poison-disasm-arm.cc | 283 +- .../test/cctest/test-poison-disasm-arm64.cc | 158 + deps/v8/test/cctest/test-profile-generator.cc | 9 +- deps/v8/test/cctest/test-regexp.cc | 212 +- deps/v8/test/cctest/test-roots.cc | 3 +- deps/v8/test/cctest/test-serialize.cc | 138 +- .../cctest/test-smi-lexicographic-compare.cc | 2 +- deps/v8/test/cctest/test-strings.cc | 10 +- .../v8/test/cctest/test-thread-termination.cc | 42 +- deps/v8/test/cctest/test-transitions.cc | 2 +- deps/v8/test/cctest/test-typedarrays.cc | 7 +- deps/v8/test/cctest/test-unboxed-doubles.cc | 6 +- deps/v8/test/cctest/torque/test-torque.cc | 88 +- deps/v8/test/cctest/wasm/test-c-wasm-entry.cc | 44 +- .../cctest/wasm/test-jump-table-assembler.cc | 2 +- .../test/cctest/wasm/test-run-wasm-asmjs.cc | 70 +- .../test/cctest/wasm/test-run-wasm-atomics.cc | 13 + .../cctest/wasm/test-run-wasm-bulk-memory.cc | 371 +- .../test/cctest/wasm/test-run-wasm-module.cc | 39 +- .../v8/test/cctest/wasm/test-run-wasm-simd.cc | 544 +- deps/v8/test/cctest/wasm/test-run-wasm.cc | 46 +- .../cctest/wasm/test-streaming-compilation.cc | 8 +- .../test/cctest/wasm/test-wasm-breakpoints.cc | 4 +- deps/v8/test/cctest/wasm/test-wasm-codegen.cc | 2 +- .../wasm/test-wasm-interpreter-entry.cc | 13 +- .../cctest/wasm/test-wasm-serialization.cc | 2 +- .../cctest/wasm/test-wasm-shared-engine.cc | 13 +- deps/v8/test/cctest/wasm/test-wasm-stack.cc | 29 +- .../cctest/wasm/test-wasm-trap-position.cc | 8 +- deps/v8/test/cctest/wasm/wasm-run-utils.cc | 33 +- deps/v8/test/cctest/wasm/wasm-run-utils.h | 5 +- deps/v8/test/common/types-fuzz.h | 5 +- deps/v8/test/common/wasm/OWNERS | 1 + deps/v8/test/common/wasm/test-signatures.h | 10 +- deps/v8/test/common/wasm/wasm-macro-gen.h | 23 +- .../v8/test/common/wasm/wasm-module-runner.cc | 8 +- deps/v8/test/debugger/OWNERS | 1 + .../debug/debug-evaluate-function-var.js | 42 + ...s => debug-modules-set-variable-value.mjs} | 6 +- ...g-scopes1.js => modules-debug-scopes1.mjs} | 2 - ...g-scopes2.js => modules-debug-scopes2.mjs} | 7 +- .../debugger/debug/regress/regress-5279.js | 1 + .../debug/regress/regress-crbug-387599.js | 6 +- deps/v8/test/debugger/regress/regress-7421.js | 1 + .../debugger/regress/regress-crbug-760225.js | 30 + deps/v8/test/debugger/test-api.js | 3 + deps/v8/test/debugger/testcfg.py | 13 +- deps/v8/test/fuzzer/multi-return.cc | 28 +- deps/v8/test/fuzzer/regexp-builtins.cc | 11 +- deps/v8/test/fuzzer/regexp.cc | 5 +- deps/v8/test/fuzzer/wasm-code.cc | 17 +- deps/v8/test/fuzzer/wasm-compile.cc | 166 +- deps/v8/test/fuzzer/wasm-fuzzer-common.cc | 4 +- deps/v8/test/fuzzer/wasm-fuzzer-common.h | 6 +- deps/v8/test/inspector/DEPS | 1 + deps/v8/test/inspector/OWNERS | 4 +- .../debugger/class-fields-scopes-expected.txt | 24 +- ...e-on-call-frame-return-values-expected.txt | 6 +- .../resource-name-to-url-expected.txt | 2 +- .../debugger/restart-frame-expected.txt | 10 + .../test/inspector/debugger/restart-frame.js | 33 + .../debugger/wasm-clone-module-expected.txt | 5 + .../inspector/debugger/wasm-clone-module.js | 40 + .../debugger/wasm-imports-expected.txt | 4 +- .../debugger/wasm-stack-expected.txt | 4 +- deps/v8/test/inspector/inspector-test.cc | 7 +- deps/v8/test/inspector/isolate-data.cc | 34 +- .../call-function-on-async-expected.txt | 4 +- .../inspector/runtime/es6-module-expected.txt | 44 +- deps/v8/test/inspector/runtime/es6-module.js | 76 +- .../runtime/evaluate-async-expected.txt | 6 +- .../runtime/exception-thrown-expected.txt | 10 +- .../exceptionthrown-on-connect-expected.txt | 4 +- .../runtime/query-objects-expected.txt | 3 + .../test/inspector/runtime/query-objects.js | 44 + .../runtime/run-script-async-expected.txt | 6 +- .../test/intl/general/supported-locales-of.js | 6 +- deps/v8/test/intl/intl.status | 3 + .../intl/number-format/property-override.js | 2 + .../unified/constructor-order.js | 16 +- .../number-format/unified/currency-display.js | 2 +- .../notation-engineering-formatToParts.js | 175 + .../notation-scientific-formatToParts.js | 177 + .../intl/number-format/unified/notation.js | 58 +- .../intl/number-format/unified/percent.js | 65 + .../number-format/unified/sign-display.js | 2 +- .../intl/number-format/unified/style-unit.js | 2 +- .../number-format/unified/unit-display.js | 2 +- deps/v8/test/intl/regress-8866.js | 11 + deps/v8/test/intl/regress-9312.js | 32 + deps/v8/test/intl/regress-9408.js | 28 + deps/v8/test/intl/regress-9513.js | 28 + .../intl/relative-time-format/format-en.js | 24 +- deps/v8/test/intl/testcfg.py | 3 + deps/v8/test/js-perf-test/BigInt/add.js | 142 + deps/v8/test/js-perf-test/BigInt/as-uint-n.js | 86 + .../test/js-perf-test/BigInt/bigint-util.js | 65 + deps/v8/test/js-perf-test/BigInt/run.js | 3 + .../test/js-perf-test/BigInt/test-config.js | 10 + .../BytecodeHandlers/LdaKeyedProperty.js | 40 + .../BytecodeHandlers/LdaNamedProperty.js | 60 + .../InterpreterEntryTrampoline/arguments.js | 119 + .../InterpreterEntryTrampoline/locals.js | 326 + .../InterpreterEntryTrampoline/run.js | 26 + deps/v8/test/js-perf-test/JSTests1.json | 43 +- deps/v8/test/js-perf-test/JSTests3.json | 60 + deps/v8/test/js-perf-test/JSTests4.json | 10 +- deps/v8/test/js-perf-test/JSTests5.json | 11 +- .../js-perf-test/ObjectFreeze/array-map.js | 32 + .../js-perf-test/ObjectFreeze/array-reduce.js | 59 + .../ObjectFreeze/has-own-property.js | 23 + deps/v8/test/js-perf-test/ObjectFreeze/run.js | 3 + deps/v8/test/js-perf-test/Proxies/proxies.js | 158 + deps/v8/test/js-perf-test/RegExp.json | 2 + .../test/js-perf-test/RegExp/RegExpTests.json | 2 + .../js-perf-test/RegExp/complex_case_test.js | 46 + deps/v8/test/js-perf-test/RegExp/run.js | 1 + .../message/fail/arrow-bare-rest-param.out | 4 +- deps/v8/test/message/fail/arrow-missing.out | 4 +- ... class-fields-private-throw-in-module.mjs} | 2 - .../class-fields-private-throw-in-module.out | 2 +- .../fail/class-methods-private-throw-write.js | 13 + .../class-methods-private-throw-write.out | 6 + .../message/fail/class-spread-property.out | 4 +- ...uplicate-as.js => export-duplicate-as.mjs} | 2 - .../test/message/fail/export-duplicate-as.out | 2 +- ...efault.js => export-duplicate-default.mjs} | 2 - .../message/fail/export-duplicate-default.out | 2 +- ...port-duplicate.js => export-duplicate.mjs} | 2 - .../v8/test/message/fail/export-duplicate.out | 2 +- .../fail/formal-parameters-trailing-comma.out | 4 +- .../{import-as-eval.js => import-as-eval.mjs} | 2 - deps/v8/test/message/fail/import-as-eval.out | 5 +- ...aration.js => import-as-redeclaration.mjs} | 2 - .../message/fail/import-as-redeclaration.out | 5 +- ...ed-word.js => import-as-reserved-word.mjs} | 2 - .../message/fail/import-as-reserved-word.out | 5 +- .../test/message/fail/import-blah-module.mjs | 5 + .../test/message/fail/import-blah-module.out | 4 + .../test/message/fail/import-blah-script.js | 5 + .../test/message/fail/import-blah-script.out | 7 + .../fail/{import-eval.js => import-eval.mjs} | 2 - deps/v8/test/message/fail/import-eval.out | 5 +- ...eclaration.js => import-redeclaration.mjs} | 2 - .../message/fail/import-redeclaration.out | 5 +- ...erved-word.js => import-reserved-word.mjs} | 2 - .../message/fail/import-reserved-word.out | 5 +- deps/v8/test/message/fail/import-script.js | 6 + deps/v8/test/message/fail/import-script.out | 7 + deps/v8/test/message/fail/invalid-spread.out | 4 +- .../{modules-cycle1.js => modules-cycle1.mjs} | 4 +- deps/v8/test/message/fail/modules-cycle1.out | 6 +- .../{modules-cycle2.js => modules-cycle2.mjs} | 4 +- deps/v8/test/message/fail/modules-cycle2.out | 6 +- deps/v8/test/message/fail/modules-cycle3.mjs | 6 + deps/v8/test/message/fail/modules-cycle3.out | 6 +- ...ules-skip-cycle2.js => modules-cycle4.mjs} | 2 +- deps/v8/test/message/fail/modules-cycle4.out | 7 +- deps/v8/test/message/fail/modules-cycle5.mjs | 6 + deps/v8/test/message/fail/modules-cycle5.out | 6 +- deps/v8/test/message/fail/modules-cycle6.js | 8 - .../fail/modules-cycle6.mjs} | 4 +- deps/v8/test/message/fail/modules-cycle6.out | 7 +- ...port2.js => modules-duplicate-export1.mjs} | 4 +- .../fail/modules-duplicate-export1.out | 2 +- ...port1.js => modules-duplicate-export2.mjs} | 4 +- .../fail/modules-duplicate-export2.out | 2 +- ...port3.js => modules-duplicate-export3.mjs} | 2 - .../fail/modules-duplicate-export3.out | 2 +- ...port4.js => modules-duplicate-export4.mjs} | 2 - .../fail/modules-duplicate-export4.out | 2 +- ...port5.js => modules-duplicate-export5.mjs} | 5 +- .../fail/modules-duplicate-export5.out | 5 +- ....js => modules-export-illformed-class.mjs} | 2 - .../fail/modules-export-illformed-class.out | 4 +- .../message/fail/modules-import-redeclare1.js | 8 - .../fail/modules-import-redeclare1.mjs | 6 + .../fail/modules-import-redeclare1.out | 4 +- .../message/fail/modules-import-redeclare2.js | 8 - .../fail/modules-import-redeclare2.mjs | 6 + .../fail/modules-import-redeclare2.out | 4 +- .../message/fail/modules-import-redeclare3.js | 8 - .../fail/modules-import-redeclare3.mjs | 6 + .../fail/modules-import-redeclare3.out | 2 +- deps/v8/test/message/fail/modules-import1.mjs | 5 + deps/v8/test/message/fail/modules-import1.out | 6 +- deps/v8/test/message/fail/modules-import2.js | 7 - deps/v8/test/message/fail/modules-import2.mjs | 5 + deps/v8/test/message/fail/modules-import2.out | 6 +- deps/v8/test/message/fail/modules-import3.js | 7 - deps/v8/test/message/fail/modules-import3.mjs | 5 + deps/v8/test/message/fail/modules-import3.out | 6 +- deps/v8/test/message/fail/modules-import4.js | 8 - deps/v8/test/message/fail/modules-import4.mjs | 6 + deps/v8/test/message/fail/modules-import4.out | 6 +- deps/v8/test/message/fail/modules-import5.js | 9 - ...{modules-cycle3.js => modules-import5.mjs} | 7 +- deps/v8/test/message/fail/modules-import5.out | 6 +- ...modules-import6.js => modules-import6.mjs} | 6 +- deps/v8/test/message/fail/modules-import6.out | 6 +- .../test/message/fail/modules-skip-cycle2.mjs | 5 + .../test/message/fail/modules-skip-cycle3.js | 6 - .../test/message/fail/modules-skip-cycle3.mjs | 6 + .../fail/modules-skip-cycle5.mjs} | 2 +- .../fail/modules-skip-cycle6.mjs} | 2 +- .../message/fail/modules-star-conflict1.js | 7 - ...-import1.js => modules-star-conflict1.mjs} | 4 +- .../message/fail/modules-star-conflict1.out | 6 +- ...onflict2.js => modules-star-conflict2.mjs} | 8 +- .../message/fail/modules-star-conflict2.out | 6 +- ...ar-default.js => modules-star-default.mjs} | 4 +- .../message/fail/modules-star-default.out | 6 +- ...port1.js => modules-undefined-export1.mjs} | 2 - .../fail/modules-undefined-export1.out | 2 +- ...port2.js => modules-undefined-export2.mjs} | 2 - .../fail/modules-undefined-export2.out | 2 +- .../message/fail/new-target-assignment.out | 4 +- .../message/fail/new-target-postfix-op.out | 4 +- .../message/fail/new-target-prefix-op.out | 4 +- .../{redeclaration5.js => redeclaration5.mjs} | 2 - deps/v8/test/message/fail/redeclaration5.out | 2 +- .../message/fail/wasm-exception-rethrow.out | 2 +- .../message/fail/wasm-exception-throw.out | 2 +- .../test/message/fail/wasm-function-name.out | 2 +- .../fail/wasm-module-and-function-name.out | 2 +- .../v8/test/message/fail/wasm-module-name.out | 2 +- deps/v8/test/message/fail/wasm-no-name.out | 2 +- deps/v8/test/message/fail/wasm-trap.out | 2 +- .../test/message/fail/weak-refs-unregister.js | 8 + .../message/fail/weak-refs-unregister.out | 6 + .../message/mjsunit/fail/assert_not_same.js | 7 + .../message/mjsunit/fail/assert_not_same.out | 10 + .../message/regress/fail/regress-8409.out | 4 +- .../{regress-900383.js => regress-900383.mjs} | 2 - .../message/regress/fail/regress-900383.out | 2 +- deps/v8/test/message/testcfg.py | 13 +- .../test/message/wasm-function-name-async.out | 2 +- .../message/wasm-function-name-streaming.out | 2 +- .../wasm-module-and-function-name-async.out | 2 +- ...asm-module-and-function-name-streaming.out | 2 +- .../test/message/wasm-module-name-async.out | 2 +- .../message/wasm-module-name-streaming.out | 2 +- deps/v8/test/message/wasm-no-name-async.out | 2 +- .../test/message/wasm-no-name-streaming.out | 2 +- deps/v8/test/mjsunit/allocation-folding.js | 5 + deps/v8/test/mjsunit/arguments-apply-deopt.js | 2 + deps/v8/test/mjsunit/arguments-deopt.js | 5 + .../mjsunit/array-bounds-check-removal.js | 9 +- .../mjsunit/array-constructor-feedback.js | 3 +- deps/v8/test/mjsunit/array-non-smi-length.js | 2 + deps/v8/test/mjsunit/array-pop.js | 1 + deps/v8/test/mjsunit/array-push.js | 2 +- deps/v8/test/mjsunit/array-push4.js | 1 + deps/v8/test/mjsunit/array-push6.js | 2 + deps/v8/test/mjsunit/array-push8.js | 3 + deps/v8/test/mjsunit/array-reduce.js | 202 +- deps/v8/test/mjsunit/array-shift3.js | 9 +- deps/v8/test/mjsunit/array-shift5.js | 96 +- deps/v8/test/mjsunit/array-sort.js | 15 +- deps/v8/test/mjsunit/asm/regress-9531.js | 28 + .../test/mjsunit/async-stack-traces-realms.js | 115 + .../test/mjsunit/bounds-checks-elimination.js | 1 + deps/v8/test/mjsunit/closures.js | 5 +- .../test/mjsunit/code-coverage-block-noopt.js | 18 +- .../test/mjsunit/code-coverage-block-opt.js | 18 +- deps/v8/test/mjsunit/compare-objects.js | 3 + .../mjsunit/comparison-ops-and-undefined.js | 1 + .../compiler/array-multiple-receiver-maps.js | 78 +- .../compiler/bigint-add-no-deopt-loop.js | 36 + deps/v8/test/mjsunit/compiler/bigint-add.js | 26 + .../mjsunit/compiler/bigint-int64-lowered.js | 82 + .../v8/test/mjsunit/compiler/bigint-negate.js | 26 + .../compiler/constant-fold-add-static.js | 17 +- .../test/mjsunit/compiler/dataview-deopt.js | 2 +- .../compiler/generator-jump-targets.js | 21 + .../mjsunit/compiler/keyed-load-on-string.js | 8 +- .../compiler/load-elimination-const-field.js | 14 +- .../compiler/manual-concurrent-recompile.js | 16 +- deps/v8/test/mjsunit/compiler/named-store.js | 19 +- .../test/mjsunit/compiler/optimized-for-in.js | 1 + .../v8/test/mjsunit/compiler/osr-alignment.js | 6 +- .../v8/test/mjsunit/compiler/osr-arguments.js | 1 + .../v8/test/mjsunit/compiler/osr-array-len.js | 4 +- .../mjsunit/compiler/osr-block-scope-func.js | 3 + .../mjsunit/compiler/osr-block-scope-id.js | 1 + .../test/mjsunit/compiler/osr-block-scope.js | 21 +- deps/v8/test/mjsunit/compiler/osr-for-let.js | 9 +- .../test/mjsunit/compiler/osr-forin-nested.js | 2 + deps/v8/test/mjsunit/compiler/osr-infinite.js | 1 + deps/v8/test/mjsunit/compiler/osr-labeled.js | 10 +- .../mjsunit/compiler/osr-literals-adapted.js | 5 + deps/v8/test/mjsunit/compiler/osr-literals.js | 5 + deps/v8/test/mjsunit/compiler/osr-manual1.js | 2 +- deps/v8/test/mjsunit/compiler/osr-manual2.js | 2 +- deps/v8/test/mjsunit/compiler/osr-maze1.js | 9 + deps/v8/test/mjsunit/compiler/osr-maze2.js | 2 +- deps/v8/test/mjsunit/compiler/osr-nested.js | 1 + deps/v8/test/mjsunit/compiler/osr-regex-id.js | 7 + deps/v8/test/mjsunit/compiler/osr-simple.js | 2 +- deps/v8/test/mjsunit/compiler/osr-top1.js | 22 +- deps/v8/test/mjsunit/compiler/osr-top2.js | 25 +- deps/v8/test/mjsunit/compiler/osr-top3.js | 27 +- .../v8/test/mjsunit/compiler/osr-try-catch.js | 3 + deps/v8/test/mjsunit/compiler/osr-two.js | 1 + .../v8/test/mjsunit/compiler/osr-while-let.js | 2 + .../mjsunit/compiler/promise-constructor.js | 1 + .../test/mjsunit/compiler/regress-607493.js | 10 +- .../test/mjsunit/compiler/regress-645851.js | 1 + deps/v8/test/mjsunit/compiler/regress-9041.js | 26 +- deps/v8/test/mjsunit/compiler/regress-9087.js | 4 +- .../test/mjsunit/compiler/regress-9137-1.js | 1 + .../test/mjsunit/compiler/regress-9137-2.js | 1 + .../test/mjsunit/compiler/regress-935092.js | 19 +- .../test/mjsunit/compiler/regress-939316.js | 11 +- .../test/mjsunit/compiler/regress-944062-1.js | 6 +- .../test/mjsunit/compiler/regress-944062-2.js | 5 +- .../test/mjsunit/compiler/regress-945187.js | 6 +- .../test/mjsunit/compiler/regress-945644.js | 13 +- .../test/mjsunit/compiler/regress-946889.js | 4 +- .../test/mjsunit/compiler/regress-949435.js | 6 +- .../test/mjsunit/compiler/regress-952586.js | 4 +- .../test/mjsunit/compiler/regress-957559.js | 6 +- .../test/mjsunit/compiler/regress-958021.js | 7 +- .../test/mjsunit/compiler/regress-958420.js | 4 +- .../test/mjsunit/compiler/regress-958716.js | 6 +- .../test/mjsunit/compiler/regress-961986.js | 4 +- .../test/mjsunit/compiler/regress-966560-1.js | 9 +- .../test/mjsunit/compiler/regress-966560-2.js | 1 + .../test/mjsunit/compiler/regress-977670.js | 21 + .../compiler/regress-closures-with-eval.js | 1 + .../mjsunit/compiler/regress-crbug-965513.js | 4 +- .../mjsunit/compiler/regress-crbug-974474.js | 18 + .../mjsunit/compiler/regress-crbug-974476.js | 34 + .../test/mjsunit/compiler/regress-v8-9113.js | 8 +- .../test/mjsunit/compiler/tagged-template.js | 51 + .../test/mjsunit/compiler/typedarray-keyed.js | 21 +- .../mjsunit/constant-compare-nil-value.js | 10 +- .../constant-fold-control-instructions.js | 2 +- deps/v8/test/mjsunit/constant-folding-2.js | 1 + .../mjsunit/cross-realm-global-prototype.js | 1 + deps/v8/test/mjsunit/default-nospec.js | 13 + deps/v8/test/mjsunit/deopt-global-accessor.js | 10 +- deps/v8/test/mjsunit/deopt-minus-zero.js | 2 + deps/v8/test/mjsunit/deopt-unlinked.js | 4 +- .../test/mjsunit/deopt-with-outer-context.js | 8 +- deps/v8/test/mjsunit/div-mod.js | 1 + deps/v8/test/mjsunit/double-truncation.js | 4 +- deps/v8/test/mjsunit/element-accessor.js | 44 +- deps/v8/test/mjsunit/elements-kind-depends.js | 2 + deps/v8/test/mjsunit/elements-kind.js | 2 +- .../test/mjsunit/elide-double-hole-check-2.js | 10 +- .../test/mjsunit/elide-double-hole-check-3.js | 5 +- .../test/mjsunit/elide-double-hole-check-9.js | 6 +- deps/v8/test/mjsunit/error-stack.js | 28 + deps/v8/test/mjsunit/es6/classes-accesors.js | 97 + .../test/mjsunit/es6/classes-constructor.js | 131 + .../test/mjsunit/es6/classes-name-binding.js | 98 + deps/v8/test/mjsunit/es6/classes-proto.js | 152 + .../es6/classes-restricted-properties.js | 165 + .../v8/test/mjsunit/es6/classes-test-super.js | 120 + deps/v8/test/mjsunit/es6/classes.js | 823 - deps/v8/test/mjsunit/es6/iterator-close.js | 16 +- .../test/mjsunit/es6/large-classes-methods.js | 38 + .../mjsunit/es6/large-classes-properties.js | 39 + .../es6/large-classes-static-methods.js | 44 + deps/v8/test/mjsunit/es6/map-iterator-8.js | 2 +- deps/v8/test/mjsunit/es6/map-iterator-9.js | 2 +- deps/v8/test/mjsunit/es6/math-trunc.js | 2 +- deps/v8/test/mjsunit/es6/new-target.js | 14 +- .../mjsunit/es6/proxies-prevent-extensions.js | 134 +- deps/v8/test/mjsunit/es6/set-iterator-8.js | 2 +- deps/v8/test/mjsunit/es6/set-iterator-9.js | 2 +- deps/v8/test/mjsunit/es6/symbols.js | 2 +- deps/v8/test/mjsunit/es6/templates.js | 20 +- .../mjsunit/es7/exponentiation-operator.js | 12 +- .../test/mjsunit/es7/regress/regress-5986.js | 18 +- .../test/mjsunit/es9/object-spread-basic.js | 5 + deps/v8/test/mjsunit/expose-cputracemark.js | 37 + deps/v8/test/mjsunit/external-array.js | 18 +- .../v8/test/mjsunit/fast-element-smi-check.js | 8 +- deps/v8/test/mjsunit/fast-literal.js | 10 +- deps/v8/test/mjsunit/frozen-array-reduce.js | 1420 ++ .../mjsunit/function-named-self-reference.js | 7 +- deps/v8/test/mjsunit/global-infinity.js | 1 + .../harmony/bigint/rematerialize-on-deopt.js | 28 + .../mjsunit/harmony/global-configurable.js | 2 - .../test/mjsunit/harmony/global-writable.js | 2 - deps/v8/test/mjsunit/harmony/global.js | 2 - deps/v8/test/mjsunit/harmony/hashbang-eval.js | 2 - .../import-from-compilation-errored.js | 4 +- .../harmony/import-from-evaluation-errored.js | 4 +- .../import-from-instantiation-errored.js | 4 +- ...arsing-eval.js => module-parsing-eval.mjs} | 0 ...dules-import-1.js => modules-import-1.mjs} | 2 +- ...les-import-10.js => modules-import-10.mjs} | 2 +- ...les-import-11.js => modules-import-11.mjs} | 2 +- ...les-import-12.js => modules-import-12.mjs} | 4 +- ...les-import-13.js => modules-import-13.mjs} | 8 +- ...les-import-14.js => modules-import-14.mjs} | 8 +- ...les-import-15.js => modules-import-15.mjs} | 8 +- .../test/mjsunit/harmony/modules-import-16.js | 36 - .../mjsunit/harmony/modules-import-16.mjs | 36 + ...les-import-17.js => modules-import-17.mjs} | 2 +- ...dules-import-2.js => modules-import-2.mjs} | 4 +- ...dules-import-3.js => modules-import-3.mjs} | 2 +- ...dules-import-4.js => modules-import-4.mjs} | 0 ...dules-import-5.js => modules-import-5.mjs} | 4 +- ...dules-import-6.js => modules-import-6.mjs} | 4 +- ...dules-import-7.js => modules-import-7.mjs} | 2 +- ...dules-import-8.js => modules-import-8.mjs} | 0 ...dules-import-9.js => modules-import-9.mjs} | 2 +- ...port-large.js => modules-import-large.mjs} | 6 +- ...import-meta.js => modules-import-meta.mjs} | 8 +- ...espace.js => modules-import-namespace.mjs} | 4 +- .../{modules-skip-1.js => modules-skip-1.mjs} | 0 .../harmony/modules-skip-10.mjs} | 2 +- ...modules-skip-11.js => modules-skip-11.mjs} | 0 ...modules-skip-12.js => modules-skip-12.mjs} | 0 ...modules-skip-13.js => modules-skip-13.mjs} | 2 +- .../{modules-skip-2.js => modules-skip-2.mjs} | 0 .../{modules-skip-3.js => modules-skip-3.mjs} | 0 .../test/mjsunit/harmony/modules-skip-4.mjs | 6 + .../{modules-skip-5.js => modules-skip-5.mjs} | 6 +- .../{modules-skip-6.js => modules-skip-6.mjs} | 0 .../{modules-skip-7.js => modules-skip-7.mjs} | 2 +- .../{modules-skip-8.js => modules-skip-8.mjs} | 0 .../test/mjsunit/harmony/modules-skip-9.mjs | 5 + ...s-skip-empty.js => modules-skip-empty.mjs} | 0 ...js => modules-skip-export-import-meta.mjs} | 0 ...skip-large1.js => modules-skip-large1.mjs} | 0 ...skip-large2.js => modules-skip-large2.mjs} | 0 .../mjsunit/harmony/object-fromentries.js | 2 - .../harmony/private-fields-special-object.js | 2 +- .../test/mjsunit/harmony/private-methods.js | 228 +- .../mjsunit/harmony/regexp-overriden-exec.js | 26 + .../mjsunit/harmony/regress/regress-912504.js | 2 +- .../harmony/sharedarraybuffer-stress.js | 36 + .../sharedarraybuffer-worker-gc-stress.js | 22 + .../test/mjsunit/harmony/weakrefs/basics.js | 32 +- .../mjsunit/harmony/weakrefs/cleanupsome-2.js | 31 + .../mjsunit/harmony/weakrefs/cleanupsome.js | 10 +- .../weakrefs/unregister-after-cleanup.js | 3 +- .../weakrefs/unregister-before-cleanup.js | 4 +- .../weakrefs/unregister-called-twice.js | 6 +- .../weakrefs/unregister-inside-cleanup1.js | 3 +- .../weakrefs/unregister-inside-cleanup2.js | 4 +- .../weakrefs/unregister-inside-cleanup3.js | 3 +- .../weakrefs/unregister-inside-cleanup4.js | 6 +- .../weakrefs/unregister-inside-cleanup5.js | 48 + .../harmony/weakrefs/unregister-many.js | 3 +- ...register-when-cleanup-already-scheduled.js | 3 +- deps/v8/test/mjsunit/has-own-property.js | 133 + deps/v8/test/mjsunit/hash-code.js | 6 +- .../mjsunit/ignition/optimized-stack-trace.js | 4 +- .../mjsunit/ignition/osr-from-bytecode.js | 2 +- .../mjsunit/ignition/osr-from-generator.js | 1 + deps/v8/test/mjsunit/invalid-lhs.js | 44 +- deps/v8/test/mjsunit/json2.js | 2 +- ...port.js => keyed-has-ic-module-export.mjs} | 2 - ...port.js => keyed-has-ic-module-import.mjs} | 4 +- deps/v8/test/mjsunit/keyed-has-ic.js | 14 +- deps/v8/test/mjsunit/keyed-named-access.js | 5 + .../test/mjsunit/keyed-store-array-literal.js | 5 + deps/v8/test/mjsunit/large-object-literal.js | 13 +- deps/v8/test/mjsunit/math-ceil.js | 1 + deps/v8/test/mjsunit/math-floor-negative.js | 12 +- deps/v8/test/mjsunit/math-floor-of-div.js | 326 +- deps/v8/test/mjsunit/math-floor-part2.js | 5 + deps/v8/test/mjsunit/math-floor-part3.js | 5 + deps/v8/test/mjsunit/math-imul.js | 18 +- deps/v8/test/mjsunit/mjsunit.js | 18 +- deps/v8/test/mjsunit/mjsunit.status | 56 +- .../v8/test/mjsunit/modules-circular-valid.js | 7 - .../modules-circular-valid.mjs} | 4 +- deps/v8/test/mjsunit/modules-cycle.js | 8 - deps/v8/test/mjsunit/modules-cycle.mjs | 6 + ...ult-name1.js => modules-default-name1.mjs} | 4 +- ...ult-name2.js => modules-default-name2.mjs} | 4 +- ...ult-name6.js => modules-default-name3.mjs} | 4 +- ...ult-name5.js => modules-default-name4.mjs} | 4 +- ...ult-name4.js => modules-default-name5.mjs} | 4 +- ...ult-name8.js => modules-default-name6.mjs} | 4 +- ...ult-name7.js => modules-default-name7.mjs} | 4 +- ...ult-name3.js => modules-default-name8.mjs} | 4 +- ...ult-name9.js => modules-default-name9.mjs} | 4 +- ...modules-default.js => modules-default.mjs} | 6 +- ...y-import1.js => modules-empty-import1.mjs} | 6 +- ...y-import2.js => modules-empty-import2.mjs} | 6 +- ...y-import3.js => modules-empty-import3.mjs} | 6 +- deps/v8/test/mjsunit/modules-empty-import4.js | 11 - .../v8/test/mjsunit/modules-empty-import4.mjs | 9 + ...error-trace.js => modules-error-trace.mjs} | 2 - ...tar-as1.js => modules-export-star-as1.mjs} | 4 +- ...tar-as2.js => modules-export-star-as2.mjs} | 10 +- ...tar-as3.js => modules-export-star-as3.mjs} | 6 +- ...dules-exports1.js => modules-exports1.mjs} | 2 - ...dules-exports2.js => modules-exports2.mjs} | 2 - ...dules-exports3.js => modules-exports3.mjs} | 2 - ...dules-imports1.js => modules-imports1.mjs} | 4 +- ...dules-imports2.js => modules-imports2.mjs} | 4 +- ...dules-imports3.js => modules-imports3.mjs} | 6 +- ...dules-imports4.js => modules-imports4.mjs} | 8 +- ...dules-imports5.js => modules-imports5.mjs} | 4 +- ...dules-imports6.js => modules-imports6.mjs} | 6 +- ...dules-imports7.js => modules-imports7.mjs} | 4 +- ...dules-imports8.js => modules-imports8.mjs} | 4 +- .../{modules-init1.js => modules-init1.mjs} | 4 +- .../{modules-init2.js => modules-init2.mjs} | 4 +- .../{modules-init3.js => modules-init3.mjs} | 4 +- deps/v8/test/mjsunit/modules-init4.js | 8 - .../modules-init4.mjs} | 6 +- ... => modules-namespace-getownproperty1.mjs} | 4 +- ... => modules-namespace-getownproperty2.mjs} | 6 +- ...s-namespace1.js => modules-namespace1.mjs} | 6 +- ...s-namespace2.js => modules-namespace2.mjs} | 6 +- ...s-namespace3.js => modules-namespace3.mjs} | 6 +- ...s-namespace4.js => modules-namespace4.mjs} | 4 +- ...dules-preparse.js => modules-preparse.mjs} | 2 - ...tive-path.js => modules-relative-path.mjs} | 4 +- .../{modules-skip-1.js => modules-skip-1.mjs} | 0 .../{modules-skip-2.js => modules-skip-2.mjs} | 4 +- .../{modules-skip-3.js => modules-skip-3.mjs} | 2 +- deps/v8/test/mjsunit/modules-skip-4.js | 6 - deps/v8/test/mjsunit/modules-skip-4.mjs | 6 + .../{modules-skip-5.js => modules-skip-5.mjs} | 0 .../{modules-skip-6.js => modules-skip-6.mjs} | 4 +- deps/v8/test/mjsunit/modules-skip-7.js | 6 - deps/v8/test/mjsunit/modules-skip-7.mjs | 6 + .../{modules-skip-8.js => modules-skip-8.mjs} | 2 +- .../{modules-skip-9.js => modules-skip-9.mjs} | 4 +- ...lid.js => modules-skip-circular-valid.mjs} | 2 +- .../modules-skip-cycle.mjs} | 2 +- ...ame1.js => modules-skip-default-name1.mjs} | 0 ...ame2.js => modules-skip-default-name2.mjs} | 0 ...ame3.js => modules-skip-default-name3.mjs} | 0 ...ame4.js => modules-skip-default-name4.mjs} | 0 ...ame5.js => modules-skip-default-name5.mjs} | 0 ...ame6.js => modules-skip-default-name6.mjs} | 0 ...ame7.js => modules-skip-default-name7.mjs} | 0 ...ame8.js => modules-skip-default-name8.mjs} | 0 ...ame9.js => modules-skip-default-name9.mjs} | 0 ...x.js => modules-skip-empty-import-aux.mjs} | 0 ...mport.js => modules-skip-empty-import.mjs} | 2 +- ...s-skip-init1.js => modules-skip-init1.mjs} | 2 +- ...s-skip-init3.js => modules-skip-init3.mjs} | 2 +- ...skip-init4a.js => modules-skip-init4a.mjs} | 2 +- ...skip-init4b.js => modules-skip-init4b.mjs} | 2 +- ...amespace.js => modules-skip-namespace.mjs} | 6 +- .../modules-skip-star-exports-conflict.js | 6 - .../modules-skip-star-exports-conflict.mjs | 6 + .../modules-skip-star-exports-cycle.js | 6 - .../modules-skip-star-exports-cycle.mjs | 6 + ...ycle.js => modules-star-exports-cycle.mjs} | 4 +- .../{modules-this.js => modules-this.mjs} | 2 - .../{modules-turbo1.js => modules-turbo1.mjs} | 2 - .../{modules-turbo2.js => modules-turbo2.mjs} | 2 - deps/v8/test/mjsunit/nans.js | 1 + .../mjsunit/non-extensible-array-reduce.js | 1420 ++ deps/v8/test/mjsunit/noopt.js | 6 +- .../test/mjsunit/number-tostring-subnormal.js | 92 + .../v8/test/mjsunit/object-define-property.js | 7 + deps/v8/test/mjsunit/object-freeze.js | 558 + .../test/mjsunit/object-literal-overwrite.js | 61 +- deps/v8/test/mjsunit/object-literal.js | 2 + .../test/mjsunit/object-prevent-extensions.js | 475 + deps/v8/test/mjsunit/object-seal.js | 529 +- .../v8/test/mjsunit/omit-constant-mapcheck.js | 20 +- deps/v8/test/mjsunit/optimized-array-every.js | 23 + deps/v8/test/mjsunit/optimized-array-find.js | 20 + .../test/mjsunit/optimized-array-findindex.js | 20 + deps/v8/test/mjsunit/optimized-array-some.js | 22 + .../test/mjsunit/optimized-foreach-holey-3.js | 51 +- deps/v8/test/mjsunit/optimized-foreach.js | 557 +- deps/v8/test/mjsunit/optimized-reduceright.js | 91 +- deps/v8/test/mjsunit/optimized-typeof.js | 12 +- .../test/mjsunit/outobject-double-for-in.js | 1 + deps/v8/test/mjsunit/packed-elements.js | 1 + deps/v8/test/mjsunit/polymorph-arrays.js | 2 + deps/v8/test/mjsunit/prototype-changes.js | 18 +- deps/v8/test/mjsunit/readonly.js | 87 +- deps/v8/test/mjsunit/recursive-store-opt.js | 4 +- deps/v8/test/mjsunit/regexp.js | 11 + deps/v8/test/mjsunit/regress-958725.js | 7 +- .../regress/binop-in-effect-context-deopt.js | 7 +- .../call-function-in-effect-context-deopt.js | 5 +- .../test/mjsunit/regress/compare-map-elim1.js | 14 +- .../comparison-in-effect-context-deopt.js | 8 +- .../regress/consolidated-holey-load.js | 6 +- .../test/mjsunit/regress/cross-script-vars.js | 7 +- .../regress/internalized-string-not-equal.js | 7 +- deps/v8/test/mjsunit/regress/math-min.js | 24 +- .../regress/number-named-call-deopt.js | 10 +- .../mjsunit/regress/poly_count_operation.js | 7 + .../polymorphic-accessor-test-context.js | 30 +- .../regress/post-increment-close-context.js | 5 +- .../v8/test/mjsunit/regress/regress-100409.js | 11 +- deps/v8/test/mjsunit/regress/regress-1079.js | 1 + .../v8/test/mjsunit/regress/regress-108296.js | 12 +- deps/v8/test/mjsunit/regress/regress-1099.js | 1 + .../v8/test/mjsunit/regress/regress-110509.js | 4 +- deps/v8/test/mjsunit/regress/regress-1106.js | 20 +- deps/v8/test/mjsunit/regress/regress-1117.js | 22 +- deps/v8/test/mjsunit/regress/regress-1118.js | 5 +- .../v8/test/mjsunit/regress/regress-115100.js | 9 +- deps/v8/test/mjsunit/regress/regress-1166.js | 10 +- deps/v8/test/mjsunit/regress/regress-1167.js | 19 +- deps/v8/test/mjsunit/regress/regress-1210.js | 12 +- .../v8/test/mjsunit/regress/regress-123512.js | 8 +- deps/v8/test/mjsunit/regress/regress-1237.js | 10 +- .../v8/test/mjsunit/regress/regress-123919.js | 10 +- .../v8/test/mjsunit/regress/regress-124594.js | 10 +- deps/v8/test/mjsunit/regress/regress-1323.js | 7 +- deps/v8/test/mjsunit/regress/regress-1337.js | 22 +- deps/v8/test/mjsunit/regress/regress-1351.js | 6 +- .../v8/test/mjsunit/regress/regress-137768.js | 6 +- deps/v8/test/mjsunit/regress/regress-1412.js | 15 +- deps/v8/test/mjsunit/regress/regress-1423.js | 4 +- deps/v8/test/mjsunit/regress/regress-1434.js | 4 +- deps/v8/test/mjsunit/regress/regress-1476.js | 12 +- deps/v8/test/mjsunit/regress/regress-1521.js | 8 +- deps/v8/test/mjsunit/regress/regress-1560.js | 4 +- deps/v8/test/mjsunit/regress/regress-1563.js | 4 +- deps/v8/test/mjsunit/regress/regress-1582.js | 2 + deps/v8/test/mjsunit/regress/regress-1583.js | 1 + deps/v8/test/mjsunit/regress/regress-1592.js | 6 +- .../v8/test/mjsunit/regress/regress-164442.js | 4 +- deps/v8/test/mjsunit/regress/regress-1647.js | 13 +- deps/v8/test/mjsunit/regress/regress-1650.js | 12 +- .../v8/test/mjsunit/regress/regress-166379.js | 8 +- .../v8/test/mjsunit/regress/regress-171641.js | 4 +- deps/v8/test/mjsunit/regress/regress-1898.js | 4 +- .../v8/test/mjsunit/regress/regress-201590.js | 1 + deps/v8/test/mjsunit/regress/regress-2030.js | 4 +- deps/v8/test/mjsunit/regress/regress-2045.js | 8 +- deps/v8/test/mjsunit/regress/regress-2056.js | 44 +- deps/v8/test/mjsunit/regress/regress-2110.js | 8 +- deps/v8/test/mjsunit/regress/regress-2170.js | 6 +- deps/v8/test/mjsunit/regress/regress-2193.js | 4 + deps/v8/test/mjsunit/regress/regress-2234.js | 4 +- deps/v8/test/mjsunit/regress/regress-2250.js | 2 + deps/v8/test/mjsunit/regress/regress-2261.js | 18 +- deps/v8/test/mjsunit/regress/regress-2294.js | 4 +- deps/v8/test/mjsunit/regress/regress-2315.js | 2 + .../v8/test/mjsunit/regress/regress-234101.js | 6 +- .../v8/test/mjsunit/regress/regress-247688.js | 30 +- deps/v8/test/mjsunit/regress/regress-2489.js | 6 +- deps/v8/test/mjsunit/regress/regress-2499.js | 8 +- deps/v8/test/mjsunit/regress/regress-2537.js | 8 +- deps/v8/test/mjsunit/regress/regress-2539.js | 11 +- deps/v8/test/mjsunit/regress/regress-2595.js | 23 +- deps/v8/test/mjsunit/regress/regress-2596.js | 6 +- deps/v8/test/mjsunit/regress/regress-2612.js | 2 + deps/v8/test/mjsunit/regress/regress-2618.js | 10 +- .../v8/test/mjsunit/regress/regress-264203.js | 2 + .../v8/test/mjsunit/regress/regress-2671-1.js | 2 + deps/v8/test/mjsunit/regress/regress-2671.js | 2 + deps/v8/test/mjsunit/regress/regress-2758.js | 1 + deps/v8/test/mjsunit/regress/regress-2813.js | 4 +- deps/v8/test/mjsunit/regress/regress-2843.js | 2 + .../v8/test/mjsunit/regress/regress-298269.js | 10 +- deps/v8/test/mjsunit/regress/regress-2987.js | 6 +- deps/v8/test/mjsunit/regress/regress-2989.js | 7 +- deps/v8/test/mjsunit/regress/regress-3029.js | 4 +- deps/v8/test/mjsunit/regress/regress-3039.js | 4 +- deps/v8/test/mjsunit/regress/regress-3158.js | 4 +- deps/v8/test/mjsunit/regress/regress-3176.js | 3 + deps/v8/test/mjsunit/regress/regress-3183.js | 6 + .../v8/test/mjsunit/regress/regress-318420.js | 3 +- deps/v8/test/mjsunit/regress/regress-3204.js | 20 +- .../v8/test/mjsunit/regress/regress-320532.js | 3 + .../test/mjsunit/regress/regress-3218915.js | 2 + .../v8/test/mjsunit/regress/regress-323845.js | 8 +- .../v8/test/mjsunit/regress/regress-330046.js | 2 + deps/v8/test/mjsunit/regress/regress-3307.js | 4 +- .../v8/test/mjsunit/regress/regress-331416.js | 10 +- .../v8/test/mjsunit/regress/regress-333594.js | 4 +- .../v8/test/mjsunit/regress/regress-334708.js | 4 +- deps/v8/test/mjsunit/regress/regress-3359.js | 3 +- deps/v8/test/mjsunit/regress/regress-3380.js | 8 +- deps/v8/test/mjsunit/regress/regress-3392.js | 4 +- .../v8/test/mjsunit/regress/regress-343609.js | 2 + .../v8/test/mjsunit/regress/regress-346343.js | 9 +- .../v8/test/mjsunit/regress/regress-346587.js | 6 +- .../v8/test/mjsunit/regress/regress-347262.js | 8 +- .../v8/test/mjsunit/regress/regress-347542.js | 3 +- .../v8/test/mjsunit/regress/regress-347543.js | 4 +- deps/v8/test/mjsunit/regress/regress-3476.js | 10 +- .../v8/test/mjsunit/regress/regress-347904.js | 2 + .../v8/test/mjsunit/regress/regress-347909.js | 5 +- .../v8/test/mjsunit/regress/regress-347914.js | 1 + .../v8/test/mjsunit/regress/regress-348280.js | 9 +- .../v8/test/mjsunit/regress/regress-348512.js | 14 +- .../v8/test/mjsunit/regress/regress-349885.js | 3 +- .../v8/test/mjsunit/regress/regress-350863.js | 5 +- .../v8/test/mjsunit/regress/regress-351261.js | 5 +- .../v8/test/mjsunit/regress/regress-351263.js | 5 +- .../v8/test/mjsunit/regress/regress-351319.js | 3 +- .../v8/test/mjsunit/regress/regress-351624.js | 6 +- .../v8/test/mjsunit/regress/regress-352982.js | 4 +- .../v8/test/mjsunit/regress/regress-354433.js | 1 + .../v8/test/mjsunit/regress/regress-355486.js | 10 +- .../v8/test/mjsunit/regress/regress-355523.js | 11 +- deps/v8/test/mjsunit/regress/regress-3564.js | 10 +- .../v8/test/mjsunit/regress/regress-357105.js | 8 +- .../v8/test/mjsunit/regress/regress-358057.js | 4 +- .../v8/test/mjsunit/regress/regress-358059.js | 9 +- .../v8/test/mjsunit/regress/regress-359441.js | 4 +- .../v8/test/mjsunit/regress/regress-359491.js | 12 +- .../v8/test/mjsunit/regress/regress-361608.js | 1 + .../v8/test/mjsunit/regress/regress-362128.js | 1 + .../v8/test/mjsunit/regress/regress-363956.js | 9 +- .../v8/test/mjsunit/regress/regress-3650-1.js | 1 + .../v8/test/mjsunit/regress/regress-3650-2.js | 1 + .../v8/test/mjsunit/regress/regress-368243.js | 1 + .../v8/test/mjsunit/regress/regress-370827.js | 8 +- .../v8/test/mjsunit/regress/regress-379770.js | 5 +- .../v8/test/mjsunit/regress/regress-380092.js | 8 +- .../v8/test/mjsunit/regress/regress-381313.js | 8 +- .../v8/test/mjsunit/regress/regress-385054.js | 4 +- .../v8/test/mjsunit/regress/regress-385565.js | 1 + .../v8/test/mjsunit/regress/regress-386034.js | 8 +- deps/v8/test/mjsunit/regress/regress-3865.js | 3 +- deps/v8/test/mjsunit/regress/regress-3884.js | 4 +- deps/v8/test/mjsunit/regress/regress-3969.js | 3 +- deps/v8/test/mjsunit/regress/regress-397.js | 4 +- deps/v8/test/mjsunit/regress/regress-3985.js | 27 +- deps/v8/test/mjsunit/regress/regress-4023.js | 10 +- .../v8/test/mjsunit/regress/regress-410912.js | 4 + deps/v8/test/mjsunit/regress/regress-4121.js | 8 +- .../v8/test/mjsunit/regress/regress-412162.js | 4 +- .../v8/test/mjsunit/regress/regress-416730.js | 10 +- deps/v8/test/mjsunit/regress/regress-4173.js | 7 +- deps/v8/test/mjsunit/regress/regress-4266.js | 4 +- deps/v8/test/mjsunit/regress/regress-4267.js | 1 + deps/v8/test/mjsunit/regress/regress-4325.js | 11 +- .../v8/test/mjsunit/regress/regress-435073.js | 6 +- .../v8/test/mjsunit/regress/regress-435477.js | 4 +- .../v8/test/mjsunit/regress/regress-436893.js | 1 + deps/v8/test/mjsunit/regress/regress-4374.js | 1 + .../v8/test/mjsunit/regress/regress-437765.js | 4 +- deps/v8/test/mjsunit/regress/regress-4388.js | 2 + .../v8/test/mjsunit/regress/regress-446389.js | 1 + .../v8/test/mjsunit/regress/regress-447526.js | 1 + .../v8/test/mjsunit/regress/regress-449291.js | 12 +- .../v8/test/mjsunit/regress/regress-4493-1.js | 13 +- deps/v8/test/mjsunit/regress/regress-4507.js | 5 +- .../v8/test/mjsunit/regress/regress-451322.js | 4 +- deps/v8/test/mjsunit/regress/regress-4515.js | 4 +- .../v8/test/mjsunit/regress/regress-451958.js | 2 + deps/v8/test/mjsunit/regress/regress-4521.js | 1 + deps/v8/test/mjsunit/regress/regress-4525.js | 1 + .../v8/test/mjsunit/regress/regress-457935.js | 6 +- .../v8/test/mjsunit/regress/regress-460917.js | 4 +- .../v8/test/mjsunit/regress/regress-466993.js | 4 +- .../v8/test/mjsunit/regress/regress-467481.js | 3 +- deps/v8/test/mjsunit/regress/regress-4715.js | 1 + .../v8/test/mjsunit/regress/regress-4788-1.js | 2 + .../v8/test/mjsunit/regress/regress-4788-2.js | 4 +- deps/v8/test/mjsunit/regress/regress-4800.js | 1 + .../v8/test/mjsunit/regress/regress-487981.js | 8 +- .../v8/test/mjsunit/regress/regress-488398.js | 1 + deps/v8/test/mjsunit/regress/regress-4970.js | 1 + deps/v8/test/mjsunit/regress/regress-4971.js | 6 +- deps/v8/test/mjsunit/regress/regress-5006.js | 5 +- .../v8/test/mjsunit/regress/regress-500831.js | 16 +- deps/v8/test/mjsunit/regress/regress-5033.js | 8 +- deps/v8/test/mjsunit/regress/regress-5085.js | 37 +- deps/v8/test/mjsunit/regress/regress-5205.js | 1 + deps/v8/test/mjsunit/regress/regress-5252.js | 3 +- deps/v8/test/mjsunit/regress/regress-5262.js | 1 + .../v8/test/mjsunit/regress/regress-5275-1.js | 6 +- .../v8/test/mjsunit/regress/regress-5275-2.js | 6 +- deps/v8/test/mjsunit/regress/regress-5286.js | 56 +- deps/v8/test/mjsunit/regress/regress-5332.js | 40 +- deps/v8/test/mjsunit/regress/regress-5357.js | 4 +- deps/v8/test/mjsunit/regress/regress-5404.js | 2 + .../v8/test/mjsunit/regress/regress-556543.js | 3 +- .../v8/test/mjsunit/regress/regress-5636-1.js | 5 +- .../v8/test/mjsunit/regress/regress-5636-2.js | 5 +- deps/v8/test/mjsunit/regress/regress-5638.js | 1 + deps/v8/test/mjsunit/regress/regress-5638b.js | 1 + .../v8/test/mjsunit/regress/regress-572589.js | 1 + deps/v8/test/mjsunit/regress/regress-5749.js | 10 +- deps/v8/test/mjsunit/regress/regress-5767.js | 5 - deps/v8/test/mjsunit/regress/regress-5790.js | 2 + deps/v8/test/mjsunit/regress/regress-5802.js | 8 + .../v8/test/mjsunit/regress/regress-585041.js | 4 +- .../v8/test/mjsunit/regress/regress-590074.js | 3 +- .../v8/test/mjsunit/regress/regress-592341.js | 18 +- .../v8/test/mjsunit/regress/regress-592353.js | 1 + deps/v8/test/mjsunit/regress/regress-5943.js | 4 +- deps/v8/test/mjsunit/regress/regress-5972.js | 4 +- .../regress/regress-599068-func-bindings.js | 6 +- .../v8/test/mjsunit/regress/regress-599412.js | 8 +- .../v8/test/mjsunit/regress/regress-599710.js | 68 +- .../v8/test/mjsunit/regress/regress-606021.js | 1 + deps/v8/test/mjsunit/regress/regress-6063.js | 4 +- deps/v8/test/mjsunit/regress/regress-6082.js | 5 +- deps/v8/test/mjsunit/regress/regress-6121.js | 1 + .../v8/test/mjsunit/regress/regress-612146.js | 13 +- deps/v8/test/mjsunit/regress/regress-6248.js | 2 + .../v8/test/mjsunit/regress/regress-625121.js | 1 + .../v8/test/mjsunit/regress/regress-632289.js | 11 +- .../test/mjsunit/regress/regress-634-debug.js | 1 + deps/v8/test/mjsunit/regress/regress-6373.js | 10 +- .../v8/test/mjsunit/regress/regress-639270.js | 1 + .../v8/test/mjsunit/regress/regress-642409.js | 1 + .../v8/test/mjsunit/regress/regress-653407.js | 12 +- .../v8/test/mjsunit/regress/regress-662845.js | 8 +- .../v8/test/mjsunit/regress/regress-662904.js | 1 + .../v8/test/mjsunit/regress/regress-664087.js | 18 +- .../v8/test/mjsunit/regress/regress-666046.js | 2 +- .../{regress-6681.js => regress-6681.mjs} | 4 +- .../v8/test/mjsunit/regress/regress-669024.js | 14 +- .../v8/test/mjsunit/regress/regress-673242.js | 6 +- .../v8/test/mjsunit/regress/regress-681383.js | 1 + .../v8/test/mjsunit/regress/regress-683617.js | 3 +- deps/v8/test/mjsunit/regress/regress-6907.js | 3 +- .../v8/test/mjsunit/regress/regress-696651.js | 12 +- .../v8/test/mjsunit/regress/regress-698790.js | 7 +- .../v8/test/mjsunit/regress/regress-709782.js | 10 +- .../v8/test/mjsunit/regress/regress-718891.js | 2 + .../v8/test/mjsunit/regress/regress-725858.js | 1 + .../v8/test/mjsunit/regress/regress-727662.js | 26 +- .../v8/test/mjsunit/regress/regress-730254.js | 5 +- .../v8/test/mjsunit/regress/regress-747075.js | 9 +- .../v8/test/mjsunit/regress/regress-747825.js | 1 + deps/v8/test/mjsunit/regress/regress-7510.js | 2 + deps/v8/test/mjsunit/regress/regress-7740.js | 4 +- .../v8/test/mjsunit/regress/regress-774824.js | 4 +- .../v8/test/mjsunit/regress/regress-775888.js | 6 +- .../v8/test/mjsunit/regress/regress-776309.js | 1 + .../v8/test/mjsunit/regress/regress-781218.js | 5 +- .../v8/test/mjsunit/regress/regress-782754.js | 9 +- .../v8/test/mjsunit/regress/regress-783051.js | 5 +- .../{regress-791334.js => regress-791334.mjs} | 2 - .../v8/test/mjsunit/regress/regress-791958.js | 7 +- .../v8/test/mjsunit/regress/regress-794822.js | 8 +- .../v8/test/mjsunit/regress/regress-794825.js | 4 +- .../v8/test/mjsunit/regress/regress-797581.js | 2 +- .../v8/test/mjsunit/regress/regress-802060.js | 5 +- .../v8/test/mjsunit/regress/regress-804837.js | 7 +- .../v8/test/mjsunit/regress/regress-805768.js | 6 +- .../v8/test/mjsunit/regress/regress-818070.js | 3 + deps/v8/test/mjsunit/regress/regress-8384.js | 17 +- .../v8/test/mjsunit/regress/regress-838766.js | 3 +- .../v8/test/mjsunit/regress/regress-842612.js | 4 +- .../test/mjsunit/regress/regress-843062-3.js | 5 +- .../v8/test/mjsunit/regress/regress-843543.js | 8 +- deps/v8/test/mjsunit/regress/regress-8510.js | 7 + deps/v8/test/mjsunit/regress/regress-8630.js | 6 +- .../v8/test/mjsunit/regress/regress-865310.js | 6 +- .../v8/test/mjsunit/regress/regress-869735.js | 4 +- deps/v8/test/mjsunit/regress/regress-8913.js | 1 + .../v8/test/mjsunit/regress/regress-895691.js | 6 +- .../v8/test/mjsunit/regress/regress-897815.js | 8 +- .../v8/test/mjsunit/regress/regress-899115.js | 4 +- .../v8/test/mjsunit/regress/regress-900585.js | 2 +- .../v8/test/mjsunit/regress/regress-901798.js | 4 +- .../v8/test/mjsunit/regress/regress-904417.js | 4 +- .../v8/test/mjsunit/regress/regress-919340.js | 8 +- .../v8/test/mjsunit/regress/regress-932953.js | 12 +- .../v8/test/mjsunit/regress/regress-936077.js | 6 +- deps/v8/test/mjsunit/regress/regress-9383.js | 50 + .../v8/test/mjsunit/regress/regress-940361.js | 4 +- deps/v8/test/mjsunit/regress/regress-9466.js | 10 + .../v8/test/mjsunit/regress/regress-950328.js | 10 +- .../regress/regress-961709-classes-opt.js | 34 + .../mjsunit/regress/regress-961709-classes.js | 29 + deps/v8/test/mjsunit/regress/regress-962.js | 1 + .../v8/test/mjsunit/regress/regress-963891.js | 3 +- deps/v8/test/mjsunit/regress/regress-97116.js | 8 +- .../v8/test/mjsunit/regress/regress-97116b.js | 4 +- .../v8/test/mjsunit/regress/regress-977870.js | 14 + .../v8/test/mjsunit/regress/regress-980891.js | 15 + .../v8/test/mjsunit/regress/regress-981236.js | 17 + .../v8/test/mjsunit/regress/regress-982702.js | 21 + .../mjsunit/regress/regress-add-minus-zero.js | 13 +- .../regress/regress-alloc-smi-check.js | 4 +- .../regress/regress-arg-materialize-store.js | 6 +- .../regress-arguments-liveness-analysis.js | 17 +- .../regress/regress-arm64-spillslots.js | 4 +- .../regress/regress-array-pop-deopt.js | 6 +- .../mjsunit/regress/regress-bce-underflow.js | 1 + deps/v8/test/mjsunit/regress/regress-binop.js | 7 + ...regress-calls-with-migrating-prototypes.js | 19 +- .../regress-captured-object-no-dummy-use.js | 9 +- .../mjsunit/regress/regress-charat-empty.js | 5 +- .../regress-check-eliminate-loop-phis.js | 1 + .../regress/regress-clobbered-fp-regs.js | 6 +- .../regress-compare-constant-doubles.js | 1 + .../mjsunit/regress/regress-convert-hole.js | 3 + .../mjsunit/regress/regress-convert-hole2.js | 4 + .../regress/regress-copy-hole-to-field.js | 32 +- .../mjsunit/regress/regress-crbug-125148.js | 3 + .../mjsunit/regress/regress-crbug-134055.js | 4 +- .../mjsunit/regress/regress-crbug-134609.js | 19 +- .../mjsunit/regress/regress-crbug-138887.js | 2 + .../mjsunit/regress/regress-crbug-140083.js | 13 +- .../mjsunit/regress/regress-crbug-142218.js | 4 +- .../mjsunit/regress/regress-crbug-145961.js | 4 +- .../mjsunit/regress/regress-crbug-147475.js | 2 + .../mjsunit/regress/regress-crbug-150545.js | 5 +- .../mjsunit/regress/regress-crbug-150729.js | 3 +- .../mjsunit/regress/regress-crbug-163530.js | 2 + .../mjsunit/regress/regress-crbug-173907.js | 26 +- .../mjsunit/regress/regress-crbug-173907b.js | 22 +- .../mjsunit/regress/regress-crbug-173974.js | 5 +- .../mjsunit/regress/regress-crbug-196583.js | 21 +- .../mjsunit/regress/regress-crbug-217858.js | 9 +- .../mjsunit/regress/regress-crbug-233737.js | 4 +- .../mjsunit/regress/regress-crbug-240032.js | 3 +- .../mjsunit/regress/regress-crbug-242502.js | 14 +- .../mjsunit/regress/regress-crbug-242870.js | 6 +- .../mjsunit/regress/regress-crbug-242924.js | 24 +- .../mjsunit/regress/regress-crbug-243868.js | 6 +- .../mjsunit/regress/regress-crbug-244461.js | 4 +- .../mjsunit/regress/regress-crbug-245424.js | 10 +- .../mjsunit/regress/regress-crbug-245480.js | 10 +- .../mjsunit/regress/regress-crbug-258519.js | 10 +- .../mjsunit/regress/regress-crbug-263276.js | 4 +- .../mjsunit/regress/regress-crbug-272564.js | 6 +- .../mjsunit/regress/regress-crbug-274438.js | 10 +- .../mjsunit/regress/regress-crbug-280333.js | 1 + .../mjsunit/regress/regress-crbug-285355.js | 6 +- .../mjsunit/regress/regress-crbug-305309.js | 3 +- .../mjsunit/regress/regress-crbug-306851.js | 12 +- .../mjsunit/regress/regress-crbug-309623.js | 4 +- .../mjsunit/regress/regress-crbug-315252.js | 12 +- .../mjsunit/regress/regress-crbug-318671.js | 6 +- .../mjsunit/regress/regress-crbug-319835.js | 8 +- .../mjsunit/regress/regress-crbug-319860.js | 4 +- .../mjsunit/regress/regress-crbug-323942.js | 24 +- .../mjsunit/regress/regress-crbug-329709.js | 17 +- .../mjsunit/regress/regress-crbug-336148.js | 3 + .../mjsunit/regress/regress-crbug-340064.js | 4 +- .../mjsunit/regress/regress-crbug-344186.js | 3 +- .../mjsunit/regress/regress-crbug-345715.js | 16 +- .../mjsunit/regress/regress-crbug-345820.js | 3 +- .../mjsunit/regress/regress-crbug-346636.js | 6 +- .../mjsunit/regress/regress-crbug-347903.js | 8 +- .../mjsunit/regress/regress-crbug-349079.js | 3 +- .../mjsunit/regress/regress-crbug-349465.js | 3 +- .../mjsunit/regress/regress-crbug-349878.js | 1 + .../mjsunit/regress/regress-crbug-350434.js | 6 +- .../mjsunit/regress/regress-crbug-351320.js | 8 +- .../mjsunit/regress/regress-crbug-351658.js | 2 +- .../mjsunit/regress/regress-crbug-352058.js | 4 +- .../mjsunit/regress/regress-crbug-352929.js | 11 +- .../mjsunit/regress/regress-crbug-354391.js | 4 +- .../mjsunit/regress/regress-crbug-357330.js | 1 + .../mjsunit/regress/regress-crbug-374838.js | 4 +- .../mjsunit/regress/regress-crbug-380512.js | 6 +- .../mjsunit/regress/regress-crbug-381534.js | 26 +- .../mjsunit/regress/regress-crbug-382513.js | 5 +- .../mjsunit/regress/regress-crbug-387636.js | 4 +- .../mjsunit/regress/regress-crbug-390918.js | 4 +- .../mjsunit/regress/regress-crbug-397662.js | 9 + .../mjsunit/regress/regress-crbug-405517.js | 10 +- .../mjsunit/regress/regress-crbug-407946.js | 6 +- .../mjsunit/regress/regress-crbug-412208.js | 1 + .../mjsunit/regress/regress-crbug-412210.js | 3 +- .../mjsunit/regress/regress-crbug-412215.js | 2 + .../mjsunit/regress/regress-crbug-412319.js | 11 +- .../mjsunit/regress/regress-crbug-417508.js | 50 +- .../mjsunit/regress/regress-crbug-425519.js | 4 +- .../mjsunit/regress/regress-crbug-433332.js | 1 + .../mjsunit/regress/regress-crbug-476477-2.js | 4 +- .../mjsunit/regress/regress-crbug-478612.js | 29 +- .../mjsunit/regress/regress-crbug-485410.js | 7 +- .../mjsunit/regress/regress-crbug-485548-1.js | 18 +- .../mjsunit/regress/regress-crbug-485548-2.js | 18 +- .../mjsunit/regress/regress-crbug-487608.js | 4 +- .../mjsunit/regress/regress-crbug-489293.js | 5 +- .../mjsunit/regress/regress-crbug-490021.js | 4 +- .../mjsunit/regress/regress-crbug-500435.js | 1 + .../mjsunit/regress/regress-crbug-500497.js | 8 +- .../mjsunit/regress/regress-crbug-500824.js | 1 + .../mjsunit/regress/regress-crbug-505354.js | 6 +- .../mjsunit/regress/regress-crbug-510738.js | 1 + .../mjsunit/regress/regress-crbug-513471.js | 4 +- .../mjsunit/regress/regress-crbug-522895.js | 1 + .../mjsunit/regress/regress-crbug-523213.js | 8 +- .../mjsunit/regress/regress-crbug-523307.js | 4 +- .../mjsunit/regress/regress-crbug-527364.js | 1 + .../mjsunit/regress/regress-crbug-530598.js | 2 + .../mjsunit/regress/regress-crbug-537444.js | 4 +- .../mjsunit/regress/regress-crbug-551287.js | 16 +- .../mjsunit/regress/regress-crbug-557807.js | 9 +- .../mjsunit/regress/regress-crbug-571064.js | 3 +- .../mjsunit/regress/regress-crbug-573858.js | 19 +- .../mjsunit/regress/regress-crbug-577112.js | 3 +- .../mjsunit/regress/regress-crbug-589792.js | 1 + .../mjsunit/regress/regress-crbug-590989-1.js | 2 + .../mjsunit/regress/regress-crbug-590989-2.js | 6 +- .../mjsunit/regress/regress-crbug-595615.js | 16 +- .../mjsunit/regress/regress-crbug-598998.js | 4 +- .../mjsunit/regress/regress-crbug-599003.js | 4 +- .../mjsunit/regress/regress-crbug-601617.js | 6 +- .../mjsunit/regress/regress-crbug-602595.js | 6 +- .../mjsunit/regress/regress-crbug-604680.js | 12 +- .../mjsunit/regress/regress-crbug-608278.js | 14 +- .../mjsunit/regress/regress-crbug-613494.js | 7 +- .../mjsunit/regress/regress-crbug-613919.js | 3 +- .../mjsunit/regress/regress-crbug-614292.js | 4 +- .../mjsunit/regress/regress-crbug-614644.js | 6 +- .../mjsunit/regress/regress-crbug-616709-1.js | 6 +- .../mjsunit/regress/regress-crbug-616709-2.js | 6 +- .../mjsunit/regress/regress-crbug-617567.js | 4 +- .../mjsunit/regress/regress-crbug-621816.js | 5 +- .../mjsunit/regress/regress-crbug-621868.js | 6 +- .../mjsunit/regress/regress-crbug-624747.js | 6 +- .../mjsunit/regress/regress-crbug-624919.js | 4 +- .../mjsunit/regress/regress-crbug-625547.js | 4 +- .../mjsunit/regress/regress-crbug-627828.js | 35 +- .../mjsunit/regress/regress-crbug-629062.js | 6 +- .../mjsunit/regress/regress-crbug-629435.js | 3 +- .../mjsunit/regress/regress-crbug-629823.js | 7 +- .../mjsunit/regress/regress-crbug-630923.js | 3 +- .../mjsunit/regress/regress-crbug-631027.js | 1 + .../mjsunit/regress/regress-crbug-631318-1.js | 9 +- .../regress/regress-crbug-631318-10.js | 9 +- .../regress/regress-crbug-631318-11.js | 9 +- .../regress/regress-crbug-631318-12.js | 9 +- .../regress/regress-crbug-631318-13.js | 9 +- .../regress/regress-crbug-631318-14.js | 9 +- .../regress/regress-crbug-631318-15.js | 9 +- .../mjsunit/regress/regress-crbug-631318-2.js | 9 +- .../mjsunit/regress/regress-crbug-631318-3.js | 9 +- .../mjsunit/regress/regress-crbug-631318-4.js | 9 +- .../mjsunit/regress/regress-crbug-631318-5.js | 9 +- .../mjsunit/regress/regress-crbug-631318-6.js | 9 +- .../mjsunit/regress/regress-crbug-631318-7.js | 9 +- .../mjsunit/regress/regress-crbug-631318-8.js | 9 +- .../mjsunit/regress/regress-crbug-631318-9.js | 9 +- .../mjsunit/regress/regress-crbug-635923.js | 11 +- .../mjsunit/regress/regress-crbug-638551.js | 1 + .../mjsunit/regress/regress-crbug-640497.js | 11 +- .../mjsunit/regress/regress-crbug-642056.js | 6 +- .../mjsunit/regress/regress-crbug-643073.js | 6 +- .../mjsunit/regress/regress-crbug-644245.js | 1 + .../mjsunit/regress/regress-crbug-644689-1.js | 6 +- .../mjsunit/regress/regress-crbug-644689-2.js | 6 +- .../mjsunit/regress/regress-crbug-645103.js | 1 + .../mjsunit/regress/regress-crbug-645438.js | 14 +- .../mjsunit/regress/regress-crbug-647217.js | 10 +- .../mjsunit/regress/regress-crbug-647887.js | 1 + .../mjsunit/regress/regress-crbug-648539.js | 3 +- .../mjsunit/regress/regress-crbug-648737.js | 10 +- .../mjsunit/regress/regress-crbug-650404.js | 6 +- .../mjsunit/regress/regress-crbug-654723.js | 4 +- .../mjsunit/regress/regress-crbug-655004.js | 3 +- .../mjsunit/regress/regress-crbug-656037.js | 4 +- .../mjsunit/regress/regress-crbug-656275.js | 6 +- .../mjsunit/regress/regress-crbug-657478.js | 1 + .../mjsunit/regress/regress-crbug-658185.js | 7 +- .../mjsunit/regress/regress-crbug-658691.js | 4 +- .../mjsunit/regress/regress-crbug-659475-1.js | 2 + .../mjsunit/regress/regress-crbug-659475-2.js | 2 + .../mjsunit/regress/regress-crbug-659915a.js | 1 + .../mjsunit/regress/regress-crbug-659915b.js | 31 +- .../mjsunit/regress/regress-crbug-660379.js | 8 +- .../mjsunit/regress/regress-crbug-661949.js | 9 +- .../mjsunit/regress/regress-crbug-662367.js | 9 +- .../mjsunit/regress/regress-crbug-662410.js | 8 +- .../mjsunit/regress/regress-crbug-662830.js | 1 + .../mjsunit/regress/regress-crbug-663340.js | 10 +- .../mjsunit/regress/regress-crbug-663402.js | 1 + .../mjsunit/regress/regress-crbug-663750.js | 2 + .../mjsunit/regress/regress-crbug-664084.js | 4 +- .../mjsunit/regress/regress-crbug-664942.js | 3 +- .../mjsunit/regress/regress-crbug-665793.js | 3 +- .../mjsunit/regress/regress-crbug-667689.js | 12 +- .../mjsunit/regress/regress-crbug-669451.js | 18 +- .../mjsunit/regress/regress-crbug-669850.js | 1 + .../mjsunit/regress/regress-crbug-671576.js | 1 + .../mjsunit/regress/regress-crbug-672792.js | 2 + .../mjsunit/regress/regress-crbug-679202.js | 6 +- .../mjsunit/regress/regress-crbug-679378.js | 13 +- .../mjsunit/regress/regress-crbug-681983.js | 1 + .../mjsunit/regress/regress-crbug-684208.js | 1 + .../mjsunit/regress/regress-crbug-685050.js | 3 +- .../mjsunit/regress/regress-crbug-685506.js | 5 +- .../mjsunit/regress/regress-crbug-685634.js | 5 +- .../mjsunit/regress/regress-crbug-685680.js | 4 +- .../mjsunit/regress/regress-crbug-686102.js | 3 +- .../mjsunit/regress/regress-crbug-686427.js | 3 +- .../mjsunit/regress/regress-crbug-686737.js | 11 +- .../mjsunit/regress/regress-crbug-687029.js | 3 +- .../mjsunit/regress/regress-crbug-687063.js | 5 +- .../mjsunit/regress/regress-crbug-687990.js | 1 + .../mjsunit/regress/regress-crbug-694416.js | 4 +- .../mjsunit/regress/regress-crbug-694709.js | 3 +- .../mjsunit/regress/regress-crbug-696622.js | 1 + .../mjsunit/regress/regress-crbug-698607.js | 8 +- .../mjsunit/regress/regress-crbug-699282.js | 5 +- .../mjsunit/regress/regress-crbug-700733.js | 2 + .../mjsunit/regress/regress-crbug-703610.js | 3 +- .../mjsunit/regress/regress-crbug-706642.js | 1 + .../mjsunit/regress/regress-crbug-708050-1.js | 6 +- .../mjsunit/regress/regress-crbug-708050-2.js | 4 +- .../mjsunit/regress/regress-crbug-709537.js | 3 +- .../mjsunit/regress/regress-crbug-709753.js | 8 +- .../mjsunit/regress/regress-crbug-711166.js | 15 +- .../mjsunit/regress/regress-crbug-712802.js | 6 +- .../mjsunit/regress/regress-crbug-715151.js | 3 +- .../mjsunit/regress/regress-crbug-715404.js | 5 +- .../mjsunit/regress/regress-crbug-715862.js | 4 +- .../mjsunit/regress/regress-crbug-719479.js | 5 +- .../mjsunit/regress/regress-crbug-722756.js | 16 +- .../mjsunit/regress/regress-crbug-723455.js | 4 +- .../mjsunit/regress/regress-crbug-724153.js | 1 + .../mjsunit/regress/regress-crbug-724608.js | 3 +- .../mjsunit/regress/regress-crbug-725201.js | 1 + .../mjsunit/regress/regress-crbug-729573-1.js | 98 +- .../mjsunit/regress/regress-crbug-729573-2.js | 6 +- .../mjsunit/regress/regress-crbug-732169.js | 2 + .../mjsunit/regress/regress-crbug-736575.js | 6 +- .../mjsunit/regress/regress-crbug-736633.js | 4 +- .../mjsunit/regress/regress-crbug-740116.js | 15 +- .../mjsunit/regress/regress-crbug-741078.js | 4 +- .../mjsunit/regress/regress-crbug-747062.js | 42 +- .../mjsunit/regress/regress-crbug-747979.js | 4 +- .../mjsunit/regress/regress-crbug-751715.js | 1 + .../mjsunit/regress/regress-crbug-752481.js | 1 + .../mjsunit/regress/regress-crbug-752826.js | 3 +- .../mjsunit/regress/regress-crbug-752846.js | 2 +- .../mjsunit/regress/regress-crbug-755044.js | 5 +- .../mjsunit/regress/regress-crbug-757199.js | 6 +- .../mjsunit/regress/regress-crbug-762874-1.js | 4 +- .../mjsunit/regress/regress-crbug-762874-2.js | 4 +- .../mjsunit/regress/regress-crbug-766635.js | 71 +- .../mjsunit/regress/regress-crbug-768080.js | 12 +- .../mjsunit/regress/regress-crbug-768367.js | 6 +- .../mjsunit/regress/regress-crbug-769852.js | 7 +- .../mjsunit/regress/regress-crbug-770543.js | 6 +- .../mjsunit/regress/regress-crbug-770581.js | 6 +- .../mjsunit/regress/regress-crbug-771971.js | 6 +- .../mjsunit/regress/regress-crbug-772610.js | 1 + .../mjsunit/regress/regress-crbug-772672.js | 6 +- .../mjsunit/regress/regress-crbug-772689.js | 6 +- .../mjsunit/regress/regress-crbug-772720.js | 1 + .../mjsunit/regress/regress-crbug-774459.js | 22 +- .../mjsunit/regress/regress-crbug-776511.js | 1 + .../mjsunit/regress/regress-crbug-779367.js | 4 +- .../mjsunit/regress/regress-crbug-781116-1.js | 8 +- .../mjsunit/regress/regress-crbug-781116-2.js | 8 +- .../mjsunit/regress/regress-crbug-781506-1.js | 6 +- .../mjsunit/regress/regress-crbug-781506-2.js | 6 +- .../mjsunit/regress/regress-crbug-781506-3.js | 6 +- .../mjsunit/regress/regress-crbug-781583.js | 4 +- .../mjsunit/regress/regress-crbug-786723.js | 2 + .../mjsunit/regress/regress-crbug-791245-1.js | 6 +- .../mjsunit/regress/regress-crbug-791245-2.js | 6 +- .../mjsunit/regress/regress-crbug-801627.js | 1 + .../mjsunit/regress/regress-crbug-802333.js | 16 +- .../mjsunit/regress/regress-crbug-819086.js | 4 +- .../mjsunit/regress/regress-crbug-819298.js | 4 +- .../mjsunit/regress/regress-crbug-820820.js | 1 + .../mjsunit/regress/regress-crbug-822284.js | 4 +- .../mjsunit/regress/regress-crbug-825045.js | 10 +- .../mjsunit/regress/regress-crbug-879560.js | 6 +- .../mjsunit/regress/regress-crbug-879898.js | 3 +- .../mjsunit/regress/regress-crbug-880207.js | 12 +- .../mjsunit/regress/regress-crbug-884933.js | 128 +- .../mjsunit/regress/regress-crbug-890243.js | 21 +- .../mjsunit/regress/regress-crbug-891627.js | 2 + .../mjsunit/regress/regress-crbug-895199.js | 5 +- .../mjsunit/regress/regress-crbug-899524.js | 11 +- .../mjsunit/regress/regress-crbug-900674.js | 3 +- .../mjsunit/regress/regress-crbug-902395.js | 37 +- .../mjsunit/regress/regress-crbug-903043.js | 50 +- .../mjsunit/regress/regress-crbug-905457.js | 64 +- .../mjsunit/regress/regress-crbug-906043.js | 1 + .../mjsunit/regress/regress-crbug-906220.js | 6 +- .../mjsunit/regress/regress-crbug-906870.js | 64 +- .../mjsunit/regress/regress-crbug-908309.js | 32 +- .../mjsunit/regress/regress-crbug-913296.js | 4 +- .../mjsunit/regress/regress-crbug-930948.js | 6 +- .../mjsunit/regress/regress-crbug-931664.js | 1 + .../mjsunit/regress/regress-crbug-934166.js | 3 +- .../mjsunit/regress/regress-crbug-935932.js | 5 + .../mjsunit/regress/regress-crbug-936302.js | 30 +- .../mjsunit/regress/regress-crbug-937618.js | 15 +- .../mjsunit/regress/regress-crbug-937649.js | 21 +- .../mjsunit/regress/regress-crbug-941743.js | 11 +- .../mjsunit/regress/regress-crbug-942068.js | 4 +- .../mjsunit/regress/regress-crbug-944865.js | 5 +- .../mjsunit/regress/regress-crbug-951400.js | 4 +- .../mjsunit/regress/regress-crbug-961522.js | 3 +- .../mjsunit/regress/regress-crbug-961709-1.js | 11 + .../mjsunit/regress/regress-crbug-961709-2.js | 17 +- .../mjsunit/regress/regress-crbug-964833.js | 1 + .../mjsunit/regress/regress-crbug-966450.js | 17 + .../mjsunit/regress/regress-crbug-967101.js | 48 + .../mjsunit/regress/regress-crbug-967434.js | 12 +- .../mjsunit/regress/regress-crbug-969368.js | 19 + .../mjsunit/regress/regress-crbug-969498.js | 16 + .../mjsunit/regress/regress-crbug-971782.js | 18 + .../mjsunit/regress/regress-crbug-976256.js | 24 + .../mjsunit/regress/regress-crbug-976598.js | 18 + .../mjsunit/regress/regress-crbug-976934.js | 22 + .../mjsunit/regress/regress-crbug-977012.js | 17 + .../mjsunit/regress/regress-crbug-977089.js | 45 + .../mjsunit/regress/regress-crbug-979023.js | 18 + .../mjsunit/regress/regress-crbug-979401.js | 20 + .../mjsunit/regress/regress-crbug-980168.js | 56 + .../mjsunit/regress/regress-crbug-980292.js | 19 + .../mjsunit/regress/regress-crbug-985660.js | 23 + .../mjsunit/regress/regress-deep-proto.js | 4 +- .../regress-deopt-in-array-literal-spread.js | 12 +- .../regress/regress-deopt-store-effect.js | 46 +- .../regress-deoptimize-constant-keyed-load.js | 13 +- .../regress-double-canonicalization.js | 8 +- .../regress/regress-embedded-cons-string.js | 1 + .../regress-empty-fixed-double-array.js | 4 +- .../regress/regress-ensure-initial-map.js | 12 +- ...ress-escape-preserve-smi-representation.js | 23 +- .../regress/regress-et-clobbers-doubles.js | 14 +- .../regress-fast-literal-transition.js | 16 +- .../regress/regress-filter-contexts.js | 5 +- .../regress-force-constant-representation.js | 4 +- .../regress/regress-force-representation.js | 1 + .../v8/test/mjsunit/regress/regress-freeze.js | 12 +- .../test/mjsunit/regress/regress-fundecl.js | 12 +- .../mjsunit/regress/regress-grow-deopt.js | 4 +- .../regress/regress-grow-store-smi-check.js | 8 +- .../test/mjsunit/regress/regress-gvn-ftt.js | 1 + .../regress/regress-hoist-load-named-field.js | 2 + .../regress-indirect-push-unchecked.js | 4 +- .../regress-inline-arrow-as-construct.js | 6 +- .../regress-inline-class-constructor.js | 10 +- .../regress/regress-inline-constant-load.js | 12 +- .../regress-inline-getter-near-stack-limit.js | 24 +- ...gress-inlining-function-literal-context.js | 6 +- .../regress/regress-int32-truncation.js | 8 +- .../mjsunit/regress/regress-is-smi-repr.js | 1 + .../regress-keyed-access-string-length.js | 4 +- .../regress/regress-lazy-deopt-inlining.js | 6 +- .../regress/regress-lazy-deopt-inlining2.js | 6 +- .../mjsunit/regress/regress-load-elements.js | 6 +- .../regress/regress-load-field-by-index.js | 1 + ...ess-loop-var-assign-without-block-scope.js | 1 + .../regress/regress-map-invalidation-2.js | 1 + .../regress/regress-mul-canoverflow.js | 13 +- .../regress/regress-mul-canoverflowb.js | 9 +- ...gress-no-dummy-use-for-arguments-object.js | 4 +- deps/v8/test/mjsunit/regress/regress-ntl.js | 2 + .../mjsunit/regress/regress-omit-checks.js | 16 +- .../regress/regress-opt-typeof-null.js | 3 +- .../test/mjsunit/regress/regress-parseint.js | 8 +- .../mjsunit/regress/regress-phi-truncation.js | 1 + .../regress/regress-polymorphic-load.js | 8 +- .../regress/regress-polymorphic-store.js | 8 +- .../regress/regress-smi-math-floor-round.js | 30 +- deps/v8/test/mjsunit/regress/regress-sqrt.js | 4 +- .../regress/regress-store-heapobject.js | 4 +- .../regress-string-from-char-code-tonumber.js | 12 +- .../regress/regress-stringAt-boundsCheck.js | 6 +- .../regress/regress-sync-optimized-lists.js | 4 +- .../regress/regress-typedarray-length.js | 139 +- .../mjsunit/regress/regress-undefined-nan.js | 12 +- .../mjsunit/regress/regress-undefined-nan3.js | 14 +- ...ress-undefined-store-keyed-fast-element.js | 4 +- .../regress-unlink-closures-on-deopt.js | 2 + .../test/mjsunit/regress/regress-v8-4839.js | 60 +- .../test/mjsunit/regress/regress-v8-5254-1.js | 16 +- .../test/mjsunit/regress/regress-v8-5254-2.js | 16 +- .../test/mjsunit/regress/regress-v8-5255-1.js | 4 +- .../test/mjsunit/regress/regress-v8-5255-2.js | 4 +- .../test/mjsunit/regress/regress-v8-5255-3.js | 4 +- .../test/mjsunit/regress/regress-v8-5697.js | 23 +- .../test/mjsunit/regress/regress-v8-6515.js | 8 + .../test/mjsunit/regress/regress-v8-6906.js | 4 +- .../test/mjsunit/regress/regress-v8-7848.js | 26 + .../test/mjsunit/regress/regress-v8-8070.js | 1 + .../test/mjsunit/regress/regress-v8-8770.js | 10 + .../test/mjsunit/regress/regress-v8-9233.js | 6 +- .../test/mjsunit/regress/regress-v8-9394-2.js | 23 + .../test/mjsunit/regress/regress-v8-9394.js | 83 + .../test/mjsunit/regress/regress-v8-9460.js | 20 + .../v8/test/mjsunit/regress/regress_967104.js | 12 + .../mjsunit/regress/string-next-encoding.js | 4 +- .../mjsunit/regress/typed-array-lifetime.js | 1 + .../mjsunit/regress/wasm/regress-02256.js | 2 + .../mjsunit/regress/wasm/regress-02256b.js | 2 + .../test/mjsunit/regress/wasm/regress-9425.js | 20 + .../test/mjsunit/regress/wasm/regress-9447.js | 37 + .../mjsunit/regress/wasm/regress-968078.js | 47 + .../mjsunit/regress/wasm/regress-980007.js | 14 + .../mjsunit/regress/wasm/regress-985154.js | 34 + deps/v8/test/mjsunit/sealed-array-reduce.js | 1431 ++ deps/v8/test/mjsunit/setters-on-elements.js | 7 + deps/v8/test/mjsunit/smi-representation.js | 10 +- .../mjsunit/strict-mode-implicit-receiver.js | 2 + deps/v8/test/mjsunit/strict-mode-opt.js | 5 +- deps/v8/test/mjsunit/string-charcodeat.js | 34 +- deps/v8/test/mjsunit/string-fromcharcode.js | 30 +- deps/v8/test/mjsunit/string-indexof-1.js | 6 + deps/v8/test/mjsunit/string-slices.js | 84 +- .../mjsunit/sum-0-plus-undefined-is-NaN.js | 7 +- deps/v8/test/mjsunit/switch-opt.js | 14 + deps/v8/test/mjsunit/testcfg.py | 19 +- deps/v8/test/mjsunit/thin-strings.js | 20 +- .../mjsunit/tools/compiler-trace-flags.js | 4 +- deps/v8/test/mjsunit/tools/tickprocessor.js | 41 + .../test/mjsunit/transition-elements-kind.js | 5 +- deps/v8/test/mjsunit/ubsan-fuzzerbugs.js | 2 + .../mjsunit/unbox-double-field-indexed.js | 4 +- deps/v8/test/mjsunit/unbox-double-field.js | 4 +- .../test/mjsunit/unbox-smi-field-indexed.js | 4 +- deps/v8/test/mjsunit/undetectable-compare.js | 1 + deps/v8/test/mjsunit/wasm/anyref-table.js | 2 +- deps/v8/test/mjsunit/wasm/atomics-stress.js | 810 +- deps/v8/test/mjsunit/wasm/atomics64-stress.js | 894 +- .../test/mjsunit/wasm/bulk-memory-spec/README | 19 - .../mjsunit/wasm/bulk-memory-spec/binary.wast | 1047 -- .../wasm/bulk-memory-spec/binary.wast.js | 445 - .../mjsunit/wasm/bulk-memory-spec/bulk.wast | 308 - .../wasm/bulk-memory-spec/bulk.wast.js | 470 - .../mjsunit/wasm/bulk-memory-spec/custom.wast | 130 - .../wasm/bulk-memory-spec/custom.wast.js | 170 - .../wasm/bulk-memory-spec/linking.wast | 392 - .../wasm/bulk-memory-spec/linking.wast.js | 505 - .../wasm/bulk-memory-spec/memory_copy.wast | 5685 ------- .../wasm/bulk-memory-spec/memory_copy.wast.js | 13859 ---------------- .../wasm/bulk-memory-spec/memory_fill.wast | 673 - .../wasm/bulk-memory-spec/memory_fill.wast.js | 440 - .../wasm/bulk-memory-spec/memory_init.wast | 947 -- .../wasm/bulk-memory-spec/memory_init.wast.js | 866 - .../wasm/bulk-memory-spec/table_copy.wast | 1469 -- .../wasm/bulk-memory-spec/table_copy.wast.js | 2651 --- .../wasm/bulk-memory-spec/table_init.wast | 1602 -- .../wasm/bulk-memory-spec/table_init.wast.js | 2096 --- .../v8/test/mjsunit/wasm/exceptions-anyref.js | 4 +- .../v8/test/mjsunit/wasm/exceptions-global.js | 88 +- deps/v8/test/mjsunit/wasm/exceptions.js | 8 +- deps/v8/test/mjsunit/wasm/export-identity.js | 46 + ...ndirect-call-non-zero-table-interpreter.js | 12 + .../v8/test/mjsunit/wasm/interpreter-mixed.js | 4 +- deps/v8/test/mjsunit/wasm/interpreter.js | 24 +- .../mjsunit/wasm/shared-memory-gc-stress.js | 37 + ...shared-memory-worker-explicit-gc-stress.js | 33 + .../wasm/shared-memory-worker-gc-stress.js | 27 + .../mjsunit/wasm/shared-memory-worker-gc.js | 34 + .../wasm/shared-memory-worker-stress.js | 27 + deps/v8/test/mjsunit/wasm/stack.js | 37 +- .../mjsunit/wasm/table-access-interpreter.js | 12 + deps/v8/test/mjsunit/wasm/table-access.js | 12 +- .../v8/test/mjsunit/wasm/table-copy-anyref.js | 73 + .../mjsunit/wasm/table-fill-interpreter.js | 12 + deps/v8/test/mjsunit/wasm/table-fill.js | 2 +- .../wasm/table-grow-from-wasm-interpreter.js | 12 + .../test/mjsunit/wasm/table-grow-from-wasm.js | 2 +- .../wasm/type-reflection-with-anyref.js | 45 + deps/v8/test/mjsunit/wasm/type-reflection.js | 98 + .../test/mjsunit/wasm/wasm-module-builder.js | 16 +- deps/v8/test/mjsunit/worker-ping-test.js | 125 + deps/v8/test/mkgrokdump/mkgrokdump.cc | 14 +- deps/v8/test/mozilla/mozilla.status | 4 + deps/v8/test/preparser/OWNERS | 2 + deps/v8/test/test262/OWNERS | 2 + deps/v8/test/test262/harness-adapt.js | 5 +- deps/v8/test/test262/test262.status | 64 +- deps/v8/test/test262/testcfg.py | 9 +- deps/v8/test/torque/OWNERS | 1 + deps/v8/test/torque/test-torque.tq | 234 +- deps/v8/test/unittests/BUILD.gn | 4 + .../api/resource-constraints-unittest.cc | 56 + .../test/unittests/api/v8-object-unittest.cc | 2 + .../utils/random-number-generator-unittest.cc | 5 +- .../unittests/base/vlq-base64-unittest.cc | 137 + .../compiler-dispatcher-unittest.cc | 2 +- .../instruction-selector-arm64-unittest.cc | 119 +- .../backend/instruction-selector-unittest.cc | 6 +- .../compiler/branch-elimination-unittest.cc | 3 +- .../compiler/bytecode-analysis-unittest.cc | 3 +- .../common-operator-reducer-unittest.cc | 2 +- .../constant-folding-reducer-unittest.cc | 4 +- .../control-flow-optimizer-unittest.cc | 3 +- .../decompression-elimination-unittest.cc | 966 +- .../compiler/graph-reducer-unittest.cc | 25 +- .../test/unittests/compiler/graph-unittest.cc | 5 +- .../test/unittests/compiler/graph-unittest.h | 4 + .../compiler/js-call-reducer-unittest.cc | 3 +- .../compiler/js-create-lowering-unittest.cc | 3 +- .../js-intrinsic-lowering-unittest.cc | 2 +- .../compiler/js-typed-lowering-unittest.cc | 2 +- .../compiler/loop-peeling-unittest.cc | 2 +- .../machine-operator-reducer-unittest.cc | 2 +- .../unittests/compiler/node-test-utils.cc | 17 +- .../test/unittests/compiler/node-test-utils.h | 5 + .../redundancy-elimination-unittest.cc | 63 +- .../compiler/regalloc/live-range-unittest.cc | 12 +- .../unittests/compiler/scheduler-unittest.cc | 13 +- .../compiler/simplified-lowering-unittest.cc | 9 +- .../simplified-operator-reducer-unittest.cc | 5 +- .../compiler/simplified-operator-unittest.cc | 1 + .../compiler/typed-optimization-unittest.cc | 2 +- .../test/unittests/compiler/typer-unittest.cc | 2 +- .../heap/heap-controller-unittest.cc | 59 +- deps/v8/test/unittests/heap/heap-unittest.cc | 73 +- .../heap/item-parallel-job-unittest.cc | 8 +- .../bytecode-array-builder-unittest.cc | 10 +- .../bytecode-array-iterator-unittest.cc | 6 +- ...bytecode-array-random-iterator-unittest.cc | 18 +- .../interpreter-assembler-unittest.cc | 43 +- .../interpreter-assembler-unittest.h | 4 + .../unittests/logging/counters-unittest.cc | 48 +- .../objects/value-serializer-unittest.cc | 63 + .../tasks/background-compile-task-unittest.cc | 2 +- deps/v8/test/unittests/test-helpers.cc | 4 +- .../torque/earley-parser-unittest.cc | 2 +- .../unittests/torque/ls-message-unittest.cc | 85 +- .../torque/ls-server-data-unittest.cc | 24 +- .../test/unittests/torque/torque-unittest.cc | 108 +- .../wasm/control-transfer-unittest.cc | 3 +- .../wasm/function-body-decoder-unittest.cc | 224 +- .../unittests/wasm/module-decoder-unittest.cc | 78 +- .../test/unittests/wasm/wasm-text-unittest.cc | 121 + deps/v8/test/wasm-api-tests/BUILD.gn | 9 + deps/v8/test/wasm-api-tests/callbacks.cc | 299 +- deps/v8/test/wasm-api-tests/finalize.cc | 77 + deps/v8/test/wasm-api-tests/globals.cc | 208 + deps/v8/test/wasm-api-tests/memory.cc | 123 + deps/v8/test/wasm-api-tests/reflect.cc | 109 + deps/v8/test/wasm-api-tests/serialize.cc | 48 + deps/v8/test/wasm-api-tests/table.cc | 116 + deps/v8/test/wasm-api-tests/threads.cc | 105 + deps/v8/test/wasm-api-tests/traps.cc | 60 + deps/v8/test/wasm-api-tests/wasm-api-test.h | 162 + deps/v8/test/wasm-js/OWNERS | 1 + deps/v8/test/wasm-js/testcfg.py | 4 +- deps/v8/test/wasm-spec-tests/testcfg.py | 15 + .../v8/test/wasm-spec-tests/tests.tar.gz.sha1 | 2 +- .../wasm-spec-tests/wasm-spec-tests.status | 10 + .../class-syntax-declaration-expected.txt | 6 +- .../test/webkit/class-syntax-declaration.js | 6 +- .../class-syntax-expression-expected.txt | 2 +- .../v8/test/webkit/class-syntax-expression.js | 2 +- .../webkit/class-syntax-extends-expected.txt | 6 +- .../webkit/class-syntax-name-expected.txt | 8 +- .../class-syntax-semicolon-expected.txt | 10 +- deps/v8/test/webkit/class-syntax-semicolon.js | 10 +- .../fast/js/basic-strict-mode-expected.txt | 32 +- .../function-constructor-error-expected.txt | 4 +- ...function-toString-parentheses-expected.txt | 174 +- .../fast/js/function-toString-parentheses.js | 17 +- .../fast/js/object-extra-comma-expected.txt | 4 +- .../fast/js/parser-syntax-check-expected.txt | 84 +- .../webkit/fast/js/parser-syntax-check.js | 36 +- .../webkit/fast/regex/toString-expected.txt | 2 +- ...tion-toString-object-literals-expected.txt | 10 +- .../parser-xml-close-comment-expected.txt | 6 +- deps/v8/{infra => }/testing/OWNERS | 2 - deps/v8/third_party/binutils/OWNERS | 1 + deps/v8/third_party/colorama/OWNERS | 1 + .../third_party/inspector_protocol/BUILD.gn | 27 + deps/v8/third_party/inspector_protocol/OWNERS | 2 - .../third_party/inspector_protocol/README.md | 9 +- .../third_party/inspector_protocol/README.v8 | 2 +- .../inspector_protocol/bindings/bindings.cc} | 4 +- .../inspector_protocol/bindings/bindings.h | 81 + .../bindings/bindings_test.cc | 44 + .../bindings/bindings_test_helper.h | 18 + .../inspector_protocol/code_generator.py | 1215 +- .../concatenate_protocols.py | 47 +- .../inspector_protocol/encoding/encoding.cc | 60 +- .../inspector_protocol/encoding/encoding.h | 32 +- .../encoding/encoding_test.cc | 82 +- .../inspector_protocol/inspector_protocol.gni | 7 +- .../inspector_protocol.gypi | 34 - .../lib/Allocator_h.template | 25 - .../inspector_protocol/lib/Array_h.template | 138 - .../lib/DispatcherBase_h.template | 3 + .../inspector_protocol/lib/Forward_h.template | 50 +- .../inspector_protocol/lib/Maybe_h.template | 139 - .../lib/ValueConversions_h.template | 66 + .../lib/Values_cpp.template | 28 +- .../inspector_protocol/lib/Values_h.template | 1 + .../lib/base_string_adapter_cc.template | 4 +- .../lib/base_string_adapter_h.template | 2 +- .../lib/encoding_cpp.template | 2201 --- .../lib/encoding_h.template | 520 - deps/v8/third_party/inspector_protocol/pdl.py | 295 +- .../v8/third_party/inspector_protocol/roll.py | 9 + deps/v8/third_party/v8/builtins/OWNERS | 3 + deps/v8/third_party/v8/builtins/array-sort.tq | 45 +- deps/v8/third_party/wasm-api/LICENSE | 202 + deps/v8/third_party/wasm-api/OWNERS | 2 + deps/v8/third_party/wasm-api/README.v8 | 17 + .../third_party/wasm-api/example/callback.c | 167 + .../third_party/wasm-api/example/callback.cc | 145 + .../wasm-api/example/callback.wasm | Bin 0 -> 102 bytes .../third_party/wasm-api/example/callback.wat | 10 + .../third_party/wasm-api/example/finalize.c | 75 + .../third_party/wasm-api/example/finalize.cc | 70 + .../wasm-api/example/finalize.wasm | Bin 0 -> 75 bytes .../third_party/wasm-api/example/finalize.wat | 5 + deps/v8/third_party/wasm-api/example/global.c | 222 + .../v8/third_party/wasm-api/example/global.cc | 193 + .../third_party/wasm-api/example/global.wasm | Bin 0 -> 576 bytes .../third_party/wasm-api/example/global.wat | 27 + deps/v8/third_party/wasm-api/example/hello.c | 107 + deps/v8/third_party/wasm-api/example/hello.cc | 91 + .../third_party/wasm-api/example/hello.wasm | Bin 0 -> 71 bytes .../v8/third_party/wasm-api/example/hello.wat | 4 + deps/v8/third_party/wasm-api/example/memory.c | 217 + .../v8/third_party/wasm-api/example/memory.cc | 169 + .../third_party/wasm-api/example/memory.wasm | Bin 0 -> 146 bytes .../third_party/wasm-api/example/memory.wat | 11 + .../v8/third_party/wasm-api/example/reflect.c | 164 + .../third_party/wasm-api/example/reflect.cc | 138 + .../third_party/wasm-api/example/reflect.wasm | Bin 0 -> 124 bytes .../third_party/wasm-api/example/reflect.wat | 6 + .../third_party/wasm-api/example/serialize.c | 122 + .../third_party/wasm-api/example/serialize.cc | 103 + .../wasm-api/example/serialize.wasm | Bin 0 -> 71 bytes .../wasm-api/example/serialize.wat | 4 + deps/v8/third_party/wasm-api/example/table.c | 208 + deps/v8/third_party/wasm-api/example/table.cc | 189 + .../third_party/wasm-api/example/table.wasm | Bin 0 -> 139 bytes .../v8/third_party/wasm-api/example/table.wat | 12 + .../v8/third_party/wasm-api/example/threads.c | 152 + .../third_party/wasm-api/example/threads.cc | 124 + .../third_party/wasm-api/example/threads.wasm | Bin 0 -> 84 bytes .../third_party/wasm-api/example/threads.wat | 5 + deps/v8/third_party/wasm-api/example/trap.c | 121 + deps/v8/third_party/wasm-api/example/trap.cc | 100 + .../v8/third_party/wasm-api/example/trap.wasm | Bin 0 -> 105 bytes deps/v8/third_party/wasm-api/example/trap.wat | 5 + deps/v8/third_party/wasm-api/wasm.h | 677 + deps/v8/third_party/wasm-api/wasm.hh | 770 + deps/v8/tools/OWNERS | 3 +- deps/v8/tools/callstats.py | 49 +- deps/v8/tools/clusterfuzz/OWNERS | 2 - .../clusterfuzz/testdata/failure_output.txt | 4 +- .../testdata/sanity_check_output.txt | 2 +- deps/v8/tools/clusterfuzz/v8_foozzie.py | 39 +- deps/v8/tools/clusterfuzz/v8_foozzie_test.py | 10 +- deps/v8/tools/clusterfuzz/v8_fuzz_config.py | 54 +- deps/v8/tools/clusterfuzz/v8_sanity_checks.js | 11 + deps/v8/tools/gcmole/BUILD.gn | 1 + deps/v8/tools/gdbinit | 44 +- deps/v8/tools/gen-postmortem-metadata.py | 53 +- deps/v8/tools/get_landmines.py | 1 + deps/v8/tools/grokdump.py | 7 +- deps/v8/tools/heap-stats/categories.js | 2 +- deps/v8/tools/mb/mb.py | 2 +- deps/v8/tools/node/fetch_deps.py | 1 - deps/v8/tools/run_perf.py | 20 +- deps/v8/tools/testrunner/OWNERS | 2 - deps/v8/tools/testrunner/base_runner.py | 14 +- .../v8/tools/testrunner/local/junit_output.py | 49 - deps/v8/tools/testrunner/local/testsuite.py | 22 +- deps/v8/tools/testrunner/local/variants.py | 1 + deps/v8/tools/testrunner/outproc/message.py | 9 +- deps/v8/tools/testrunner/standard_runner.py | 17 +- deps/v8/tools/testrunner/testproc/progress.py | 40 - deps/v8/tools/tickprocessor.js | 46 +- deps/v8/tools/torque/format-torque.py | 15 +- .../tools/torque/vim-torque/syntax/torque.vim | 4 +- deps/v8/tools/torque/vscode-torque/.npmrc | 1 - deps/v8/tools/torque/vscode-torque/README.md | 33 - .../vscode-torque/language-configuration.json | 25 - .../torque/vscode-torque/out/extension.js | 99 - .../tools/torque/vscode-torque/package.json | 90 - .../torque/vscode-torque/src/extension.ts | 104 - .../syntaxes/torque.tmLanguage.json | 177 - .../tools/torque/vscode-torque/tsconfig.json | 17 - .../v8/tools/torque/vscode-torque/tslint.json | 11 - deps/v8/tools/try_perf.py | 18 +- deps/v8/tools/turbolizer/OWNERS | 1 + deps/v8/tools/turbolizer/info-view.html | 4 +- deps/v8/tools/turbolizer/src/graph-view.ts | 27 +- deps/v8/tools/turbolizer/src/sequence-view.ts | 14 +- .../testdata/expected_test_results1.json | 36 +- .../testdata/expected_test_results2.json | 24 +- deps/v8/tools/v8_presubmit.py | 3 +- deps/v8/tools/v8heapconst.py | 597 +- deps/v8/tools/wasm/update-wasm-spec-tests.sh | 98 +- deps/v8/tools/whitespace.txt | 2 +- deps/v8/tools/windbg.js | 194 +- 2620 files changed, 77919 insertions(+), 81808 deletions(-) create mode 100644 deps/v8/.flake8 create mode 100644 deps/v8/INTL_OWNERS rename deps/v8/{test/wasm-api-tests => benchmarks}/OWNERS (100%) rename deps/v8/{src/inspector => include}/js_protocol-1.2.json (100%) rename deps/v8/{src/inspector => include}/js_protocol-1.3.json (100%) rename deps/v8/{src/inspector => include}/js_protocol.pdl (100%) create mode 100644 deps/v8/src/api/OWNERS create mode 100644 deps/v8/src/base/lsan.h rename deps/v8/src/{common/v8memory.h => base/memory.h} (79%) create mode 100644 deps/v8/src/base/vlq-base64.cc create mode 100644 deps/v8/src/base/vlq-base64.h create mode 100644 deps/v8/src/builtins/OWNERS create mode 100644 deps/v8/src/builtins/bigint.tq create mode 100644 deps/v8/src/builtins/builtins-bigint-gen.h create mode 100644 deps/v8/src/builtins/object.tq create mode 100644 deps/v8/src/builtins/proxy-delete-property.tq create mode 100644 deps/v8/src/builtins/proxy-get-prototype-of.tq create mode 100644 deps/v8/src/builtins/proxy-is-extensible.tq create mode 100644 deps/v8/src/builtins/proxy-prevent-extensions.tq create mode 100644 deps/v8/src/builtins/proxy-set-prototype-of.tq create mode 100644 deps/v8/src/builtins/reflect.tq create mode 100644 deps/v8/src/codegen/DEPS create mode 100644 deps/v8/src/codegen/pending-optimization-table.cc create mode 100644 deps/v8/src/codegen/pending-optimization-table.h delete mode 100644 deps/v8/src/codegen/ppc/code-stubs-ppc.cc delete mode 100644 deps/v8/src/codegen/s390/code-stubs-s390.cc create mode 100644 deps/v8/src/codegen/tick-counter.cc create mode 100644 deps/v8/src/codegen/tick-counter.h create mode 100644 deps/v8/src/common/OWNERS rename deps/v8/src/{execution => common}/message-template.h (98%) create mode 100644 deps/v8/src/compiler-dispatcher/OWNERS delete mode 100644 deps/v8/src/compiler/STYLE create mode 100644 deps/v8/src/compiler/add-type-assertions-reducer.cc create mode 100644 deps/v8/src/compiler/add-type-assertions-reducer.h create mode 100644 deps/v8/src/compiler/compilation-dependency.h create mode 100644 deps/v8/src/compiler/csa-load-elimination.cc create mode 100644 deps/v8/src/compiler/csa-load-elimination.h create mode 100644 deps/v8/src/compiler/heap-refs.h create mode 100644 deps/v8/src/diagnostics/DEPS create mode 100644 deps/v8/src/execution/interrupts-scope.cc create mode 100644 deps/v8/src/execution/interrupts-scope.h create mode 100644 deps/v8/src/execution/isolate-utils-inl.h create mode 100644 deps/v8/src/execution/isolate-utils.h create mode 100644 deps/v8/src/execution/stack-guard.cc create mode 100644 deps/v8/src/execution/stack-guard.h create mode 100644 deps/v8/src/extensions/OWNERS create mode 100644 deps/v8/src/extensions/cputracemark-extension.cc create mode 100644 deps/v8/src/extensions/cputracemark-extension.h create mode 100644 deps/v8/src/flags/OWNERS create mode 100644 deps/v8/src/heap/basic-memory-chunk.cc create mode 100644 deps/v8/src/heap/basic-memory-chunk.h create mode 100644 deps/v8/src/heap/read-only-heap-inl.h create mode 100644 deps/v8/src/libplatform/tracing/OWNERS delete mode 100644 deps/v8/src/libplatform/tracing/perfetto-consumer.cc delete mode 100644 deps/v8/src/libplatform/tracing/perfetto-consumer.h delete mode 100644 deps/v8/src/libplatform/tracing/perfetto-producer.cc delete mode 100644 deps/v8/src/libplatform/tracing/perfetto-producer.h delete mode 100644 deps/v8/src/libplatform/tracing/perfetto-shared-memory.cc delete mode 100644 deps/v8/src/libplatform/tracing/perfetto-shared-memory.h delete mode 100644 deps/v8/src/libplatform/tracing/perfetto-tasks.cc delete mode 100644 deps/v8/src/libplatform/tracing/perfetto-tasks.h delete mode 100644 deps/v8/src/libplatform/tracing/perfetto-tracing-controller.cc delete mode 100644 deps/v8/src/libplatform/tracing/perfetto-tracing-controller.h create mode 100644 deps/v8/src/libplatform/tracing/trace-event-listener.cc create mode 100644 deps/v8/src/objects/OWNERS create mode 100644 deps/v8/src/objects/source-text-module.cc create mode 100644 deps/v8/src/objects/source-text-module.h create mode 100644 deps/v8/src/objects/synthetic-module.cc create mode 100644 deps/v8/src/objects/synthetic-module.h create mode 100644 deps/v8/src/objects/tagged-field-inl.h create mode 100644 deps/v8/src/objects/tagged-field.h delete mode 100644 deps/v8/src/regexp/jsregexp-inl.h delete mode 100644 deps/v8/src/regexp/jsregexp.cc delete mode 100644 deps/v8/src/regexp/jsregexp.h rename deps/v8/src/regexp/{regexp-macro-assembler-irregexp-inl.h => regexp-bytecode-generator-inl.h} (61%) rename deps/v8/src/regexp/{regexp-macro-assembler-irregexp.cc => regexp-bytecode-generator.cc} (57%) rename deps/v8/src/regexp/{regexp-macro-assembler-irregexp.h => regexp-bytecode-generator.h} (75%) rename deps/v8/src/regexp/{bytecodes-irregexp.h => regexp-bytecodes.h} (96%) create mode 100644 deps/v8/src/regexp/regexp-compiler-tonode.cc create mode 100644 deps/v8/src/regexp/regexp-compiler.cc create mode 100644 deps/v8/src/regexp/regexp-compiler.h create mode 100644 deps/v8/src/regexp/regexp-dotprinter.cc create mode 100644 deps/v8/src/regexp/regexp-dotprinter.h rename deps/v8/src/regexp/{interpreter-irregexp.cc => regexp-interpreter.cc} (77%) rename deps/v8/src/regexp/{interpreter-irregexp.h => regexp-interpreter.h} (58%) create mode 100644 deps/v8/src/regexp/regexp-macro-assembler-arch.h create mode 100644 deps/v8/src/regexp/regexp-nodes.h create mode 100644 deps/v8/src/regexp/regexp.cc create mode 100644 deps/v8/src/regexp/regexp.h create mode 100644 deps/v8/src/roots/OWNERS create mode 100644 deps/v8/src/runtime/OWNERS create mode 100644 deps/v8/src/tasks/OWNERS create mode 100644 deps/v8/src/third_party/siphash/OWNERS create mode 100644 deps/v8/src/third_party/utf8-decoder/OWNERS create mode 100644 deps/v8/src/third_party/valgrind/OWNERS create mode 100644 deps/v8/src/third_party/vtune/OWNERS create mode 100644 deps/v8/src/torque/global-context.cc delete mode 100644 deps/v8/src/utils/splay-tree-inl.h delete mode 100644 deps/v8/src/utils/splay-tree.h delete mode 100644 deps/v8/src/wasm/js-to-wasm-wrapper-cache.h create mode 100644 deps/v8/src/wasm/wasm-arguments.h create mode 100644 deps/v8/src/zone/OWNERS delete mode 100644 deps/v8/src/zone/zone-splay-tree.h create mode 100644 deps/v8/test/cctest/disasm-regex-helper.cc create mode 100644 deps/v8/test/cctest/disasm-regex-helper.h delete mode 100644 deps/v8/test/cctest/log-eq-of-logging-and-traversal.js create mode 100644 deps/v8/test/cctest/test-api-array-buffer.cc create mode 100644 deps/v8/test/cctest/test-api-typed-array.cc create mode 100644 deps/v8/test/cctest/test-macro-assembler-arm64.cc create mode 100644 deps/v8/test/cctest/test-poison-disasm-arm64.cc create mode 100644 deps/v8/test/common/wasm/OWNERS create mode 100644 deps/v8/test/debugger/OWNERS create mode 100644 deps/v8/test/debugger/debug/debug-evaluate-function-var.js rename deps/v8/test/debugger/debug/{debug-modules-set-variable-value.js => debug-modules-set-variable-value.mjs} (99%) rename deps/v8/test/debugger/debug/harmony/{modules-debug-scopes1.js => modules-debug-scopes1.mjs} (99%) rename deps/v8/test/debugger/debug/harmony/{modules-debug-scopes2.js => modules-debug-scopes2.mjs} (99%) create mode 100644 deps/v8/test/debugger/regress/regress-crbug-760225.js create mode 100644 deps/v8/test/inspector/debugger/restart-frame-expected.txt create mode 100644 deps/v8/test/inspector/debugger/restart-frame.js create mode 100644 deps/v8/test/inspector/debugger/wasm-clone-module-expected.txt create mode 100644 deps/v8/test/inspector/debugger/wasm-clone-module.js create mode 100644 deps/v8/test/intl/number-format/unified/notation-engineering-formatToParts.js create mode 100644 deps/v8/test/intl/number-format/unified/notation-scientific-formatToParts.js create mode 100644 deps/v8/test/intl/number-format/unified/percent.js create mode 100644 deps/v8/test/intl/regress-8866.js create mode 100644 deps/v8/test/intl/regress-9312.js create mode 100644 deps/v8/test/intl/regress-9408.js create mode 100644 deps/v8/test/intl/regress-9513.js create mode 100644 deps/v8/test/js-perf-test/BigInt/add.js create mode 100644 deps/v8/test/js-perf-test/BigInt/as-uint-n.js create mode 100644 deps/v8/test/js-perf-test/BigInt/bigint-util.js create mode 100644 deps/v8/test/js-perf-test/BigInt/test-config.js create mode 100644 deps/v8/test/js-perf-test/BytecodeHandlers/LdaKeyedProperty.js create mode 100644 deps/v8/test/js-perf-test/BytecodeHandlers/LdaNamedProperty.js create mode 100644 deps/v8/test/js-perf-test/InterpreterEntryTrampoline/arguments.js create mode 100644 deps/v8/test/js-perf-test/InterpreterEntryTrampoline/locals.js create mode 100644 deps/v8/test/js-perf-test/InterpreterEntryTrampoline/run.js create mode 100644 deps/v8/test/js-perf-test/ObjectFreeze/array-map.js create mode 100644 deps/v8/test/js-perf-test/ObjectFreeze/array-reduce.js create mode 100644 deps/v8/test/js-perf-test/ObjectFreeze/has-own-property.js create mode 100644 deps/v8/test/js-perf-test/RegExp/complex_case_test.js rename deps/v8/test/message/fail/{class-fields-private-throw-in-module.js => class-fields-private-throw-in-module.mjs} (94%) create mode 100644 deps/v8/test/message/fail/class-methods-private-throw-write.js create mode 100644 deps/v8/test/message/fail/class-methods-private-throw-write.out rename deps/v8/test/message/fail/{export-duplicate-as.js => export-duplicate-as.mjs} (94%) rename deps/v8/test/message/fail/{export-duplicate-default.js => export-duplicate-default.mjs} (94%) rename deps/v8/test/message/fail/{export-duplicate.js => export-duplicate.mjs} (94%) rename deps/v8/test/message/fail/{import-as-eval.js => import-as-eval.mjs} (93%) rename deps/v8/test/message/fail/{import-as-redeclaration.js => import-as-redeclaration.mjs} (94%) rename deps/v8/test/message/fail/{import-as-reserved-word.js => import-as-reserved-word.mjs} (94%) create mode 100644 deps/v8/test/message/fail/import-blah-module.mjs create mode 100644 deps/v8/test/message/fail/import-blah-module.out create mode 100644 deps/v8/test/message/fail/import-blah-script.js create mode 100644 deps/v8/test/message/fail/import-blah-script.out rename deps/v8/test/message/fail/{import-eval.js => import-eval.mjs} (93%) rename deps/v8/test/message/fail/{import-redeclaration.js => import-redeclaration.mjs} (94%) rename deps/v8/test/message/fail/{import-reserved-word.js => import-reserved-word.mjs} (93%) create mode 100644 deps/v8/test/message/fail/import-script.js create mode 100644 deps/v8/test/message/fail/import-script.out rename deps/v8/test/message/fail/{modules-cycle1.js => modules-cycle1.mjs} (78%) rename deps/v8/test/message/fail/{modules-cycle2.js => modules-cycle2.mjs} (77%) create mode 100644 deps/v8/test/message/fail/modules-cycle3.mjs rename deps/v8/test/message/fail/{modules-skip-cycle2.js => modules-cycle4.mjs} (80%) create mode 100644 deps/v8/test/message/fail/modules-cycle5.mjs delete mode 100644 deps/v8/test/message/fail/modules-cycle6.js rename deps/v8/test/{mjsunit/harmony/modules-skip-4.js => message/fail/modules-cycle6.mjs} (70%) rename deps/v8/test/message/fail/{modules-duplicate-export2.js => modules-duplicate-export1.mjs} (93%) rename deps/v8/test/message/fail/{modules-duplicate-export1.js => modules-duplicate-export2.mjs} (93%) rename deps/v8/test/message/fail/{modules-duplicate-export3.js => modules-duplicate-export3.mjs} (94%) rename deps/v8/test/message/fail/{modules-duplicate-export4.js => modules-duplicate-export4.mjs} (94%) rename deps/v8/test/message/fail/{modules-duplicate-export5.js => modules-duplicate-export5.mjs} (78%) rename deps/v8/test/message/fail/{modules-export-illformed-class.js => modules-export-illformed-class.mjs} (94%) delete mode 100644 deps/v8/test/message/fail/modules-import-redeclare1.js create mode 100644 deps/v8/test/message/fail/modules-import-redeclare1.mjs delete mode 100644 deps/v8/test/message/fail/modules-import-redeclare2.js create mode 100644 deps/v8/test/message/fail/modules-import-redeclare2.mjs delete mode 100644 deps/v8/test/message/fail/modules-import-redeclare3.js create mode 100644 deps/v8/test/message/fail/modules-import-redeclare3.mjs create mode 100644 deps/v8/test/message/fail/modules-import1.mjs delete mode 100644 deps/v8/test/message/fail/modules-import2.js create mode 100644 deps/v8/test/message/fail/modules-import2.mjs delete mode 100644 deps/v8/test/message/fail/modules-import3.js create mode 100644 deps/v8/test/message/fail/modules-import3.mjs delete mode 100644 deps/v8/test/message/fail/modules-import4.js create mode 100644 deps/v8/test/message/fail/modules-import4.mjs delete mode 100644 deps/v8/test/message/fail/modules-import5.js rename deps/v8/test/message/fail/{modules-cycle3.js => modules-import5.mjs} (56%) rename deps/v8/test/message/fail/{modules-import6.js => modules-import6.mjs} (66%) create mode 100644 deps/v8/test/message/fail/modules-skip-cycle2.mjs delete mode 100644 deps/v8/test/message/fail/modules-skip-cycle3.js create mode 100644 deps/v8/test/message/fail/modules-skip-cycle3.mjs rename deps/v8/test/{mjsunit/harmony/modules-skip-10.js => message/fail/modules-skip-cycle5.mjs} (80%) rename deps/v8/test/{mjsunit/harmony/modules-skip-9.js => message/fail/modules-skip-cycle6.mjs} (80%) delete mode 100644 deps/v8/test/message/fail/modules-star-conflict1.js rename deps/v8/test/message/fail/{modules-import1.js => modules-star-conflict1.mjs} (76%) rename deps/v8/test/message/fail/{modules-star-conflict2.js => modules-star-conflict2.mjs} (67%) rename deps/v8/test/message/fail/{modules-star-default.js => modules-star-default.mjs} (81%) rename deps/v8/test/message/fail/{modules-undefined-export1.js => modules-undefined-export1.mjs} (93%) rename deps/v8/test/message/fail/{modules-undefined-export2.js => modules-undefined-export2.mjs} (93%) rename deps/v8/test/message/fail/{redeclaration5.js => redeclaration5.mjs} (94%) create mode 100644 deps/v8/test/message/fail/weak-refs-unregister.js create mode 100644 deps/v8/test/message/fail/weak-refs-unregister.out create mode 100644 deps/v8/test/message/mjsunit/fail/assert_not_same.js create mode 100644 deps/v8/test/message/mjsunit/fail/assert_not_same.out rename deps/v8/test/message/regress/fail/{regress-900383.js => regress-900383.mjs} (94%) create mode 100644 deps/v8/test/mjsunit/asm/regress-9531.js create mode 100644 deps/v8/test/mjsunit/async-stack-traces-realms.js create mode 100644 deps/v8/test/mjsunit/compiler/bigint-add-no-deopt-loop.js create mode 100644 deps/v8/test/mjsunit/compiler/bigint-add.js create mode 100644 deps/v8/test/mjsunit/compiler/bigint-int64-lowered.js create mode 100644 deps/v8/test/mjsunit/compiler/bigint-negate.js create mode 100644 deps/v8/test/mjsunit/compiler/generator-jump-targets.js create mode 100644 deps/v8/test/mjsunit/compiler/regress-977670.js create mode 100644 deps/v8/test/mjsunit/compiler/regress-crbug-974474.js create mode 100644 deps/v8/test/mjsunit/compiler/regress-crbug-974476.js create mode 100644 deps/v8/test/mjsunit/compiler/tagged-template.js create mode 100644 deps/v8/test/mjsunit/es6/classes-accesors.js create mode 100644 deps/v8/test/mjsunit/es6/classes-constructor.js create mode 100644 deps/v8/test/mjsunit/es6/classes-name-binding.js create mode 100644 deps/v8/test/mjsunit/es6/classes-proto.js create mode 100644 deps/v8/test/mjsunit/es6/classes-restricted-properties.js create mode 100644 deps/v8/test/mjsunit/es6/classes-test-super.js create mode 100644 deps/v8/test/mjsunit/es6/large-classes-methods.js create mode 100644 deps/v8/test/mjsunit/es6/large-classes-properties.js create mode 100644 deps/v8/test/mjsunit/es6/large-classes-static-methods.js create mode 100644 deps/v8/test/mjsunit/expose-cputracemark.js create mode 100644 deps/v8/test/mjsunit/frozen-array-reduce.js create mode 100644 deps/v8/test/mjsunit/harmony/bigint/rematerialize-on-deopt.js rename deps/v8/test/mjsunit/harmony/{module-parsing-eval.js => module-parsing-eval.mjs} (100%) rename deps/v8/test/mjsunit/harmony/{modules-import-1.js => modules-import-1.mjs} (80%) rename deps/v8/test/mjsunit/harmony/{modules-import-10.js => modules-import-10.mjs} (83%) rename deps/v8/test/mjsunit/harmony/{modules-import-11.js => modules-import-11.mjs} (89%) rename deps/v8/test/mjsunit/harmony/{modules-import-12.js => modules-import-12.mjs} (83%) rename deps/v8/test/mjsunit/harmony/{modules-import-13.js => modules-import-13.mjs} (78%) rename deps/v8/test/mjsunit/harmony/{modules-import-14.js => modules-import-14.mjs} (69%) rename deps/v8/test/mjsunit/harmony/{modules-import-15.js => modules-import-15.mjs} (81%) delete mode 100644 deps/v8/test/mjsunit/harmony/modules-import-16.js create mode 100644 deps/v8/test/mjsunit/harmony/modules-import-16.mjs rename deps/v8/test/mjsunit/harmony/{modules-import-17.js => modules-import-17.mjs} (86%) rename deps/v8/test/mjsunit/harmony/{modules-import-2.js => modules-import-2.mjs} (75%) rename deps/v8/test/mjsunit/harmony/{modules-import-3.js => modules-import-3.mjs} (89%) rename deps/v8/test/mjsunit/harmony/{modules-import-4.js => modules-import-4.mjs} (100%) rename deps/v8/test/mjsunit/harmony/{modules-import-5.js => modules-import-5.mjs} (86%) rename deps/v8/test/mjsunit/harmony/{modules-import-6.js => modules-import-6.mjs} (89%) rename deps/v8/test/mjsunit/harmony/{modules-import-7.js => modules-import-7.mjs} (89%) rename deps/v8/test/mjsunit/harmony/{modules-import-8.js => modules-import-8.mjs} (100%) rename deps/v8/test/mjsunit/harmony/{modules-import-9.js => modules-import-9.mjs} (88%) rename deps/v8/test/mjsunit/harmony/{modules-import-large.js => modules-import-large.mjs} (99%) rename deps/v8/test/mjsunit/harmony/{modules-import-meta.js => modules-import-meta.mjs} (89%) rename deps/v8/test/mjsunit/harmony/{modules-import-namespace.js => modules-import-namespace.mjs} (88%) rename deps/v8/test/mjsunit/harmony/{modules-skip-1.js => modules-skip-1.mjs} (100%) rename deps/v8/test/{message/fail/modules-skip-cycle6.js => mjsunit/harmony/modules-skip-10.mjs} (80%) rename deps/v8/test/mjsunit/harmony/{modules-skip-11.js => modules-skip-11.mjs} (100%) rename deps/v8/test/mjsunit/harmony/{modules-skip-12.js => modules-skip-12.mjs} (100%) rename deps/v8/test/mjsunit/harmony/{modules-skip-13.js => modules-skip-13.mjs} (79%) rename deps/v8/test/mjsunit/harmony/{modules-skip-2.js => modules-skip-2.mjs} (100%) rename deps/v8/test/mjsunit/harmony/{modules-skip-3.js => modules-skip-3.mjs} (100%) create mode 100644 deps/v8/test/mjsunit/harmony/modules-skip-4.mjs rename deps/v8/test/mjsunit/harmony/{modules-skip-5.js => modules-skip-5.mjs} (63%) rename deps/v8/test/mjsunit/harmony/{modules-skip-6.js => modules-skip-6.mjs} (100%) rename deps/v8/test/mjsunit/harmony/{modules-skip-7.js => modules-skip-7.mjs} (86%) rename deps/v8/test/mjsunit/harmony/{modules-skip-8.js => modules-skip-8.mjs} (100%) create mode 100644 deps/v8/test/mjsunit/harmony/modules-skip-9.mjs rename deps/v8/test/mjsunit/harmony/{modules-skip-empty.js => modules-skip-empty.mjs} (100%) rename deps/v8/test/mjsunit/harmony/{modules-skip-export-import-meta.js => modules-skip-export-import-meta.mjs} (100%) rename deps/v8/test/mjsunit/harmony/{modules-skip-large1.js => modules-skip-large1.mjs} (100%) rename deps/v8/test/mjsunit/harmony/{modules-skip-large2.js => modules-skip-large2.mjs} (100%) create mode 100644 deps/v8/test/mjsunit/harmony/regexp-overriden-exec.js create mode 100644 deps/v8/test/mjsunit/harmony/sharedarraybuffer-stress.js create mode 100644 deps/v8/test/mjsunit/harmony/sharedarraybuffer-worker-gc-stress.js create mode 100644 deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-2.js create mode 100644 deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup5.js rename deps/v8/test/mjsunit/{keyed-has-ic-module-export.js => keyed-has-ic-module-export.mjs} (95%) rename deps/v8/test/mjsunit/{keyed-has-ic-module-import.js => keyed-has-ic-module-import.mjs} (93%) delete mode 100644 deps/v8/test/mjsunit/modules-circular-valid.js rename deps/v8/test/{message/fail/modules-cycle4.js => mjsunit/modules-circular-valid.mjs} (75%) delete mode 100644 deps/v8/test/mjsunit/modules-cycle.js create mode 100644 deps/v8/test/mjsunit/modules-cycle.mjs rename deps/v8/test/mjsunit/{modules-default-name1.js => modules-default-name1.mjs} (80%) rename deps/v8/test/mjsunit/{modules-default-name2.js => modules-default-name2.mjs} (80%) rename deps/v8/test/mjsunit/{modules-default-name6.js => modules-default-name3.mjs} (80%) rename deps/v8/test/mjsunit/{modules-default-name5.js => modules-default-name4.mjs} (80%) rename deps/v8/test/mjsunit/{modules-default-name4.js => modules-default-name5.mjs} (80%) rename deps/v8/test/mjsunit/{modules-default-name8.js => modules-default-name6.mjs} (80%) rename deps/v8/test/mjsunit/{modules-default-name7.js => modules-default-name7.mjs} (82%) rename deps/v8/test/mjsunit/{modules-default-name3.js => modules-default-name8.mjs} (80%) rename deps/v8/test/mjsunit/{modules-default-name9.js => modules-default-name9.mjs} (80%) rename deps/v8/test/mjsunit/{modules-default.js => modules-default.mjs} (68%) rename deps/v8/test/mjsunit/{modules-empty-import1.js => modules-empty-import1.mjs} (63%) rename deps/v8/test/mjsunit/{modules-empty-import2.js => modules-empty-import2.mjs} (62%) rename deps/v8/test/mjsunit/{modules-empty-import3.js => modules-empty-import3.mjs} (62%) delete mode 100644 deps/v8/test/mjsunit/modules-empty-import4.js create mode 100644 deps/v8/test/mjsunit/modules-empty-import4.mjs rename deps/v8/test/mjsunit/{modules-error-trace.js => modules-error-trace.mjs} (96%) rename deps/v8/test/mjsunit/{modules-export-star-as1.js => modules-export-star-as1.mjs} (83%) rename deps/v8/test/mjsunit/{modules-export-star-as2.js => modules-export-star-as2.mjs} (73%) rename deps/v8/test/mjsunit/{modules-export-star-as3.js => modules-export-star-as3.mjs} (73%) rename deps/v8/test/mjsunit/{modules-exports1.js => modules-exports1.mjs} (99%) rename deps/v8/test/mjsunit/{modules-exports2.js => modules-exports2.mjs} (98%) rename deps/v8/test/mjsunit/{modules-exports3.js => modules-exports3.mjs} (98%) rename deps/v8/test/mjsunit/{modules-imports1.js => modules-imports1.mjs} (89%) rename deps/v8/test/mjsunit/{modules-imports2.js => modules-imports2.mjs} (89%) rename deps/v8/test/mjsunit/{modules-imports3.js => modules-imports3.mjs} (87%) rename deps/v8/test/mjsunit/{modules-imports4.js => modules-imports4.mjs} (80%) rename deps/v8/test/mjsunit/{modules-imports5.js => modules-imports5.mjs} (80%) rename deps/v8/test/mjsunit/{modules-imports6.js => modules-imports6.mjs} (83%) rename deps/v8/test/mjsunit/{modules-imports7.js => modules-imports7.mjs} (79%) rename deps/v8/test/mjsunit/{modules-imports8.js => modules-imports8.mjs} (83%) rename deps/v8/test/mjsunit/{modules-init1.js => modules-init1.mjs} (82%) rename deps/v8/test/mjsunit/{modules-init2.js => modules-init2.mjs} (78%) rename deps/v8/test/mjsunit/{modules-init3.js => modules-init3.mjs} (91%) delete mode 100644 deps/v8/test/mjsunit/modules-init4.js rename deps/v8/test/{message/fail/modules-cycle5.js => mjsunit/modules-init4.mjs} (66%) rename deps/v8/test/mjsunit/{modules-namespace-getownproperty1.js => modules-namespace-getownproperty1.mjs} (98%) rename deps/v8/test/mjsunit/{modules-namespace-getownproperty2.js => modules-namespace-getownproperty2.mjs} (96%) rename deps/v8/test/mjsunit/{modules-namespace1.js => modules-namespace1.mjs} (96%) rename deps/v8/test/mjsunit/{modules-namespace2.js => modules-namespace2.mjs} (81%) rename deps/v8/test/mjsunit/{modules-namespace3.js => modules-namespace3.mjs} (74%) rename deps/v8/test/mjsunit/{modules-namespace4.js => modules-namespace4.mjs} (96%) rename deps/v8/test/mjsunit/{modules-preparse.js => modules-preparse.mjs} (95%) rename deps/v8/test/mjsunit/{modules-relative-path.js => modules-relative-path.mjs} (80%) rename deps/v8/test/mjsunit/{modules-skip-1.js => modules-skip-1.mjs} (100%) rename deps/v8/test/mjsunit/{modules-skip-2.js => modules-skip-2.mjs} (68%) rename deps/v8/test/mjsunit/{modules-skip-3.js => modules-skip-3.mjs} (85%) delete mode 100644 deps/v8/test/mjsunit/modules-skip-4.js create mode 100644 deps/v8/test/mjsunit/modules-skip-4.mjs rename deps/v8/test/mjsunit/{modules-skip-5.js => modules-skip-5.mjs} (100%) rename deps/v8/test/mjsunit/{modules-skip-6.js => modules-skip-6.mjs} (72%) delete mode 100644 deps/v8/test/mjsunit/modules-skip-7.js create mode 100644 deps/v8/test/mjsunit/modules-skip-7.mjs rename deps/v8/test/mjsunit/{modules-skip-8.js => modules-skip-8.mjs} (78%) rename deps/v8/test/mjsunit/{modules-skip-9.js => modules-skip-9.mjs} (67%) rename deps/v8/test/mjsunit/{modules-skip-circular-valid.js => modules-skip-circular-valid.mjs} (84%) rename deps/v8/test/{message/fail/modules-skip-cycle5.js => mjsunit/modules-skip-cycle.mjs} (81%) rename deps/v8/test/mjsunit/{modules-skip-default-name1.js => modules-skip-default-name1.mjs} (100%) rename deps/v8/test/mjsunit/{modules-skip-default-name2.js => modules-skip-default-name2.mjs} (100%) rename deps/v8/test/mjsunit/{modules-skip-default-name3.js => modules-skip-default-name3.mjs} (100%) rename deps/v8/test/mjsunit/{modules-skip-default-name4.js => modules-skip-default-name4.mjs} (100%) rename deps/v8/test/mjsunit/{modules-skip-default-name5.js => modules-skip-default-name5.mjs} (100%) rename deps/v8/test/mjsunit/{modules-skip-default-name6.js => modules-skip-default-name6.mjs} (100%) rename deps/v8/test/mjsunit/{modules-skip-default-name7.js => modules-skip-default-name7.mjs} (100%) rename deps/v8/test/mjsunit/{modules-skip-default-name8.js => modules-skip-default-name8.mjs} (100%) rename deps/v8/test/mjsunit/{modules-skip-default-name9.js => modules-skip-default-name9.mjs} (100%) rename deps/v8/test/mjsunit/{modules-skip-empty-import-aux.js => modules-skip-empty-import-aux.mjs} (100%) rename deps/v8/test/mjsunit/{modules-skip-empty-import.js => modules-skip-empty-import.mjs} (75%) rename deps/v8/test/mjsunit/{modules-skip-init1.js => modules-skip-init1.mjs} (83%) rename deps/v8/test/mjsunit/{modules-skip-init3.js => modules-skip-init3.mjs} (94%) rename deps/v8/test/mjsunit/{modules-skip-init4a.js => modules-skip-init4a.mjs} (84%) rename deps/v8/test/mjsunit/{modules-skip-init4b.js => modules-skip-init4b.mjs} (81%) rename deps/v8/test/mjsunit/{modules-skip-namespace.js => modules-skip-namespace.mjs} (73%) delete mode 100644 deps/v8/test/mjsunit/modules-skip-star-exports-conflict.js create mode 100644 deps/v8/test/mjsunit/modules-skip-star-exports-conflict.mjs delete mode 100644 deps/v8/test/mjsunit/modules-skip-star-exports-cycle.js create mode 100644 deps/v8/test/mjsunit/modules-skip-star-exports-cycle.mjs rename deps/v8/test/mjsunit/{modules-star-exports-cycle.js => modules-star-exports-cycle.mjs} (76%) rename deps/v8/test/mjsunit/{modules-this.js => modules-this.mjs} (94%) rename deps/v8/test/mjsunit/{modules-turbo1.js => modules-turbo1.mjs} (97%) rename deps/v8/test/mjsunit/{modules-turbo2.js => modules-turbo2.mjs} (97%) create mode 100644 deps/v8/test/mjsunit/non-extensible-array-reduce.js create mode 100644 deps/v8/test/mjsunit/number-tostring-subnormal.js delete mode 100644 deps/v8/test/mjsunit/regress/regress-5767.js rename deps/v8/test/mjsunit/regress/{regress-6681.js => regress-6681.mjs} (82%) rename deps/v8/test/mjsunit/regress/{regress-791334.js => regress-791334.mjs} (95%) create mode 100644 deps/v8/test/mjsunit/regress/regress-8510.js create mode 100644 deps/v8/test/mjsunit/regress/regress-9383.js create mode 100644 deps/v8/test/mjsunit/regress/regress-9466.js create mode 100644 deps/v8/test/mjsunit/regress/regress-961709-classes-opt.js create mode 100644 deps/v8/test/mjsunit/regress/regress-961709-classes.js create mode 100644 deps/v8/test/mjsunit/regress/regress-977870.js create mode 100644 deps/v8/test/mjsunit/regress/regress-980891.js create mode 100644 deps/v8/test/mjsunit/regress/regress-981236.js create mode 100644 deps/v8/test/mjsunit/regress/regress-982702.js create mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-397662.js create mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-966450.js create mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-967101.js create mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-969368.js create mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-969498.js create mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-971782.js create mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-976256.js create mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-976598.js create mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-976934.js create mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-977012.js create mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-977089.js create mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-979023.js create mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-979401.js create mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-980168.js create mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-980292.js create mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-985660.js create mode 100644 deps/v8/test/mjsunit/regress/regress-v8-6515.js create mode 100644 deps/v8/test/mjsunit/regress/regress-v8-7848.js create mode 100644 deps/v8/test/mjsunit/regress/regress-v8-8770.js create mode 100644 deps/v8/test/mjsunit/regress/regress-v8-9394-2.js create mode 100644 deps/v8/test/mjsunit/regress/regress-v8-9394.js create mode 100644 deps/v8/test/mjsunit/regress/regress-v8-9460.js create mode 100644 deps/v8/test/mjsunit/regress/regress_967104.js create mode 100644 deps/v8/test/mjsunit/regress/wasm/regress-9425.js create mode 100644 deps/v8/test/mjsunit/regress/wasm/regress-9447.js create mode 100644 deps/v8/test/mjsunit/regress/wasm/regress-968078.js create mode 100644 deps/v8/test/mjsunit/regress/wasm/regress-980007.js create mode 100644 deps/v8/test/mjsunit/regress/wasm/regress-985154.js create mode 100644 deps/v8/test/mjsunit/sealed-array-reduce.js delete mode 100644 deps/v8/test/mjsunit/wasm/bulk-memory-spec/README delete mode 100644 deps/v8/test/mjsunit/wasm/bulk-memory-spec/binary.wast delete mode 100644 deps/v8/test/mjsunit/wasm/bulk-memory-spec/binary.wast.js delete mode 100644 deps/v8/test/mjsunit/wasm/bulk-memory-spec/bulk.wast delete mode 100644 deps/v8/test/mjsunit/wasm/bulk-memory-spec/bulk.wast.js delete mode 100644 deps/v8/test/mjsunit/wasm/bulk-memory-spec/custom.wast delete mode 100644 deps/v8/test/mjsunit/wasm/bulk-memory-spec/custom.wast.js delete mode 100644 deps/v8/test/mjsunit/wasm/bulk-memory-spec/linking.wast delete mode 100644 deps/v8/test/mjsunit/wasm/bulk-memory-spec/linking.wast.js delete mode 100644 deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_copy.wast delete mode 100644 deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_copy.wast.js delete mode 100644 deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_fill.wast delete mode 100644 deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_fill.wast.js delete mode 100644 deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_init.wast delete mode 100644 deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_init.wast.js delete mode 100644 deps/v8/test/mjsunit/wasm/bulk-memory-spec/table_copy.wast delete mode 100644 deps/v8/test/mjsunit/wasm/bulk-memory-spec/table_copy.wast.js delete mode 100644 deps/v8/test/mjsunit/wasm/bulk-memory-spec/table_init.wast delete mode 100644 deps/v8/test/mjsunit/wasm/bulk-memory-spec/table_init.wast.js create mode 100644 deps/v8/test/mjsunit/wasm/export-identity.js create mode 100644 deps/v8/test/mjsunit/wasm/indirect-call-non-zero-table-interpreter.js create mode 100644 deps/v8/test/mjsunit/wasm/shared-memory-gc-stress.js create mode 100644 deps/v8/test/mjsunit/wasm/shared-memory-worker-explicit-gc-stress.js create mode 100644 deps/v8/test/mjsunit/wasm/shared-memory-worker-gc-stress.js create mode 100644 deps/v8/test/mjsunit/wasm/shared-memory-worker-gc.js create mode 100644 deps/v8/test/mjsunit/wasm/shared-memory-worker-stress.js create mode 100644 deps/v8/test/mjsunit/wasm/table-access-interpreter.js create mode 100644 deps/v8/test/mjsunit/wasm/table-copy-anyref.js create mode 100644 deps/v8/test/mjsunit/wasm/table-fill-interpreter.js create mode 100644 deps/v8/test/mjsunit/wasm/table-grow-from-wasm-interpreter.js create mode 100644 deps/v8/test/mjsunit/worker-ping-test.js create mode 100644 deps/v8/test/preparser/OWNERS create mode 100644 deps/v8/test/test262/OWNERS create mode 100644 deps/v8/test/torque/OWNERS create mode 100644 deps/v8/test/unittests/api/resource-constraints-unittest.cc create mode 100644 deps/v8/test/unittests/base/vlq-base64-unittest.cc create mode 100644 deps/v8/test/unittests/wasm/wasm-text-unittest.cc create mode 100644 deps/v8/test/wasm-api-tests/finalize.cc create mode 100644 deps/v8/test/wasm-api-tests/globals.cc create mode 100644 deps/v8/test/wasm-api-tests/memory.cc create mode 100644 deps/v8/test/wasm-api-tests/reflect.cc create mode 100644 deps/v8/test/wasm-api-tests/serialize.cc create mode 100644 deps/v8/test/wasm-api-tests/table.cc create mode 100644 deps/v8/test/wasm-api-tests/threads.cc create mode 100644 deps/v8/test/wasm-api-tests/traps.cc create mode 100644 deps/v8/test/wasm-api-tests/wasm-api-test.h create mode 100644 deps/v8/test/wasm-js/OWNERS rename deps/v8/{infra => }/testing/OWNERS (58%) create mode 100644 deps/v8/third_party/binutils/OWNERS create mode 100644 deps/v8/third_party/colorama/OWNERS rename deps/v8/{test/mjsunit/modules-skip-cycle.js => third_party/inspector_protocol/bindings/bindings.cc} (50%) create mode 100644 deps/v8/third_party/inspector_protocol/bindings/bindings.h create mode 100644 deps/v8/third_party/inspector_protocol/bindings/bindings_test.cc create mode 100644 deps/v8/third_party/inspector_protocol/bindings/bindings_test_helper.h delete mode 100644 deps/v8/third_party/inspector_protocol/inspector_protocol.gypi delete mode 100644 deps/v8/third_party/inspector_protocol/lib/Allocator_h.template delete mode 100644 deps/v8/third_party/inspector_protocol/lib/Array_h.template delete mode 100644 deps/v8/third_party/inspector_protocol/lib/Maybe_h.template delete mode 100644 deps/v8/third_party/inspector_protocol/lib/encoding_cpp.template delete mode 100644 deps/v8/third_party/inspector_protocol/lib/encoding_h.template create mode 100644 deps/v8/third_party/v8/builtins/OWNERS create mode 100644 deps/v8/third_party/wasm-api/LICENSE create mode 100644 deps/v8/third_party/wasm-api/OWNERS create mode 100644 deps/v8/third_party/wasm-api/README.v8 create mode 100644 deps/v8/third_party/wasm-api/example/callback.c create mode 100644 deps/v8/third_party/wasm-api/example/callback.cc create mode 100644 deps/v8/third_party/wasm-api/example/callback.wasm create mode 100644 deps/v8/third_party/wasm-api/example/callback.wat create mode 100644 deps/v8/third_party/wasm-api/example/finalize.c create mode 100644 deps/v8/third_party/wasm-api/example/finalize.cc create mode 100644 deps/v8/third_party/wasm-api/example/finalize.wasm create mode 100644 deps/v8/third_party/wasm-api/example/finalize.wat create mode 100644 deps/v8/third_party/wasm-api/example/global.c create mode 100644 deps/v8/third_party/wasm-api/example/global.cc create mode 100644 deps/v8/third_party/wasm-api/example/global.wasm create mode 100644 deps/v8/third_party/wasm-api/example/global.wat create mode 100644 deps/v8/third_party/wasm-api/example/hello.c create mode 100644 deps/v8/third_party/wasm-api/example/hello.cc create mode 100644 deps/v8/third_party/wasm-api/example/hello.wasm create mode 100644 deps/v8/third_party/wasm-api/example/hello.wat create mode 100644 deps/v8/third_party/wasm-api/example/memory.c create mode 100644 deps/v8/third_party/wasm-api/example/memory.cc create mode 100644 deps/v8/third_party/wasm-api/example/memory.wasm create mode 100644 deps/v8/third_party/wasm-api/example/memory.wat create mode 100644 deps/v8/third_party/wasm-api/example/reflect.c create mode 100644 deps/v8/third_party/wasm-api/example/reflect.cc create mode 100644 deps/v8/third_party/wasm-api/example/reflect.wasm create mode 100644 deps/v8/third_party/wasm-api/example/reflect.wat create mode 100644 deps/v8/third_party/wasm-api/example/serialize.c create mode 100644 deps/v8/third_party/wasm-api/example/serialize.cc create mode 100644 deps/v8/third_party/wasm-api/example/serialize.wasm create mode 100644 deps/v8/third_party/wasm-api/example/serialize.wat create mode 100644 deps/v8/third_party/wasm-api/example/table.c create mode 100644 deps/v8/third_party/wasm-api/example/table.cc create mode 100644 deps/v8/third_party/wasm-api/example/table.wasm create mode 100644 deps/v8/third_party/wasm-api/example/table.wat create mode 100644 deps/v8/third_party/wasm-api/example/threads.c create mode 100644 deps/v8/third_party/wasm-api/example/threads.cc create mode 100644 deps/v8/third_party/wasm-api/example/threads.wasm create mode 100644 deps/v8/third_party/wasm-api/example/threads.wat create mode 100644 deps/v8/third_party/wasm-api/example/trap.c create mode 100644 deps/v8/third_party/wasm-api/example/trap.cc create mode 100644 deps/v8/third_party/wasm-api/example/trap.wasm create mode 100644 deps/v8/third_party/wasm-api/example/trap.wat create mode 100644 deps/v8/third_party/wasm-api/wasm.h create mode 100644 deps/v8/third_party/wasm-api/wasm.hh delete mode 100644 deps/v8/tools/testrunner/local/junit_output.py delete mode 100644 deps/v8/tools/torque/vscode-torque/.npmrc delete mode 100644 deps/v8/tools/torque/vscode-torque/README.md delete mode 100644 deps/v8/tools/torque/vscode-torque/language-configuration.json delete mode 100644 deps/v8/tools/torque/vscode-torque/out/extension.js delete mode 100644 deps/v8/tools/torque/vscode-torque/package.json delete mode 100644 deps/v8/tools/torque/vscode-torque/src/extension.ts delete mode 100644 deps/v8/tools/torque/vscode-torque/syntaxes/torque.tmLanguage.json delete mode 100644 deps/v8/tools/torque/vscode-torque/tsconfig.json delete mode 100644 deps/v8/tools/torque/vscode-torque/tslint.json diff --git a/deps/v8/.flake8 b/deps/v8/.flake8 new file mode 100644 index 00000000000000..c58d00ca051b81 --- /dev/null +++ b/deps/v8/.flake8 @@ -0,0 +1,11 @@ +[flake8] +ignore = E111,E114,E310 # We use 2-space indentation +exclude = + ./third_party/, # third-party code + ./build/, # third-party code + ./buildtools/, # third-party code + ./tools/swarming_client/, # third-party code + ./test/wasm-js/, # third-party code + ./test/wasm-js/data/, # third-party code + ./test/test262/data/, # third-party code + ./test/test262/harness/, # third-party code diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore index 6350393ebf376e..ce47fa36103f45 100644 --- a/deps/v8/.gitignore +++ b/deps/v8/.gitignore @@ -26,7 +26,6 @@ .ccls-cache .cpplint-cache .cproject -.d8_history .gclient_entries .gdb_history .landmines @@ -39,8 +38,7 @@ /build /buildtools /check-header-includes -/hydrogen.cfg -/obj +/Default/ /out /out.gn /perf.data @@ -72,6 +70,7 @@ /third_party/googletest/src/googletest/include/gtest/* !/third_party/googletest/src/googletest/include/gtest/gtest_prod.h !/third_party/v8 +!/third_party/wasm-api /tools/clang /tools/gcmole/gcmole-tools /tools/gcmole/gcmole-tools.tar.gz @@ -83,6 +82,9 @@ /tools/oom_dump/oom_dump /tools/oom_dump/oom_dump.o /tools/swarming_client +/tools/turbolizer/build +/tools/turbolizer/.rpt2_cache +/tools/turbolizer/deploy /tools/visual_studio/Debug /tools/visual_studio/Release /v8.log.ll @@ -94,23 +96,15 @@ GTAGS TAGS bsuite compile_commands.json -!/test/mjsunit/d8 -d8_g gccauses gcsuspects gtags.files -shell -shell_g +node_modules tags turbo*.cfg turbo*.dot turbo*.json v8.ignition_dispatches_table.json -/Default/ -node_modules -tools/turbolizer/build -tools/turbolizer/.rpt2_cache -tools/turbolizer/deploy !/third_party/jinja2 !/third_party/markupsafe diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS index 5a8628b4cb2a77..827d124b0dcf0d 100644 --- a/deps/v8/AUTHORS +++ b/deps/v8/AUTHORS @@ -128,17 +128,20 @@ Matthew Sporleder Maxim Mazurok Maxim Mossienko Michael Lutz +Michael Mclaughlin Michael Smith Michaël Zasso Mike Gilbert Mike Pennisi Mikhail Gusarov Milton Chiang +Mu Tao Myeong-bo Shim Nicolas Antonius Ernst Leopold Maria Kaiser Niklas Hambüchen Noj Vek Oleksandr Chekhovskyi +Oliver Dunk Paolo Giarrusso Patrick Gansterer Peng Fei @@ -160,6 +163,7 @@ Rob Wu Robert Meijer Robert Mustacchi Robert Nagy +Ross Kirsling Ruben Bridgewater Ryan Dahl Sakthipriyan Vairamani (thefourtheye) @@ -168,6 +172,7 @@ Sandro Santilli Sanjoy Das Seo Sanghyeon Shawn Anastasio +Shawn Presser Stefan Penner Sylvestre Ledru Taketoshi Aono diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn index 8640517ae5c23d..efca4a626f1633 100644 --- a/deps/v8/BUILD.gn +++ b/deps/v8/BUILD.gn @@ -225,7 +225,7 @@ if (v8_enable_snapshot_native_code_counters == "") { v8_enable_snapshot_native_code_counters = v8_enable_debugging_features } if (v8_enable_shared_ro_heap == "") { - v8_enable_shared_ro_heap = v8_enable_lite_mode + v8_enable_shared_ro_heap = !v8_enable_pointer_compression && v8_use_snapshot } if (v8_enable_fast_torque == "") { v8_enable_fast_torque = v8_enable_fast_mksnapshot @@ -242,6 +242,8 @@ assert(!v8_enable_lite_mode || v8_use_snapshot, assert( !v8_enable_pointer_compression || !v8_enable_shared_ro_heap, "Pointer compression is not supported with shared read-only heap enabled") +assert(v8_use_snapshot || !v8_enable_shared_ro_heap, + "Shared read-only heap requires snapshot") v8_random_seed = "314159265" v8_toolset_for_shell = "host" @@ -408,6 +410,7 @@ config("features") { if (v8_enable_test_features) { defines += [ "V8_ENABLE_ALLOCATION_TIMEOUT" ] defines += [ "V8_ENABLE_FORCE_SLOW_PATH" ] + defines += [ "V8_ENABLE_DOUBLE_CONST_STORE_CHECK" ] } if (v8_enable_i18n_support) { defines += [ "V8_INTL_SUPPORT" ] @@ -940,6 +943,7 @@ torque_files = [ "src/builtins/array-unshift.tq", "src/builtins/array.tq", "src/builtins/base.tq", + "src/builtins/bigint.tq", "src/builtins/boolean.tq", "src/builtins/collections.tq", "src/builtins/data-view.tq", @@ -950,13 +954,20 @@ torque_files = [ "src/builtins/iterator.tq", "src/builtins/math.tq", "src/builtins/object-fromentries.tq", + "src/builtins/object.tq", "src/builtins/proxy-constructor.tq", + "src/builtins/proxy-delete-property.tq", "src/builtins/proxy-get-property.tq", + "src/builtins/proxy-get-prototype-of.tq", "src/builtins/proxy-has-property.tq", + "src/builtins/proxy-is-extensible.tq", + "src/builtins/proxy-prevent-extensions.tq", "src/builtins/proxy-revocable.tq", "src/builtins/proxy-revoke.tq", "src/builtins/proxy-set-property.tq", + "src/builtins/proxy-set-prototype-of.tq", "src/builtins/proxy.tq", + "src/builtins/reflect.tq", "src/builtins/regexp-replace.tq", "src/builtins/regexp.tq", "src/builtins/string.tq", @@ -988,57 +999,6 @@ if (!v8_enable_i18n_support) { torque_files -= [ "src/objects/intl-objects.tq" ] } -torque_namespaces = [ - "arguments", - "array", - "array-copywithin", - "array-filter", - "array-find", - "array-findindex", - "array-foreach", - "array-join", - "array-map", - "array-of", - "array-reverse", - "array-shift", - "array-slice", - "array-splice", - "array-unshift", - "array-lastindexof", - "base", - "boolean", - "collections", - "data-view", - "extras-utils", - "growable-fixed-array", - "internal-coverage", - "iterator", - "math", - "object", - "proxy", - "regexp", - "regexp-replace", - "string", - "string-html", - "string-iterator", - "string-repeat", - "string-slice", - "string-substring", - "test", - "typed-array", - "typed-array-createtypedarray", - "typed-array-every", - "typed-array-filter", - "typed-array-find", - "typed-array-findindex", - "typed-array-foreach", - "typed-array-reduce", - "typed-array-reduceright", - "typed-array-slice", - "typed-array-some", - "typed-array-subarray", -] - action("run_torque") { visibility = [ ":*", @@ -1066,11 +1026,13 @@ action("run_torque") { "$target_gen_dir/torque-generated/exported-macros-assembler-tq.cc", "$target_gen_dir/torque-generated/exported-macros-assembler-tq.h", "$target_gen_dir/torque-generated/csa-types-tq.h", + "$target_gen_dir/torque-generated/instance-types-tq.h", ] - foreach(namespace, torque_namespaces) { + foreach(file, torque_files) { + filetq = string_replace(file, ".tq", "-tq-csa") outputs += [ - "$target_gen_dir/torque-generated/builtins-$namespace-gen-tq.cc", - "$target_gen_dir/torque-generated/builtins-$namespace-gen-tq.h", + "$target_gen_dir/torque-generated/$filetq.cc", + "$target_gen_dir/torque-generated/$filetq.h", ] } @@ -1080,11 +1042,10 @@ action("run_torque") { root_build_dir), "-o", rebase_path("$target_gen_dir/torque-generated", root_build_dir), + "-v8-root", + rebase_path(".", root_build_dir), ] - - foreach(file, torque_files) { - args += [ rebase_path(file, root_build_dir) ] - } + args += torque_files } group("v8_maybe_icu") { @@ -1112,10 +1073,11 @@ v8_source_set("torque_generated_initializers") { "$target_gen_dir/torque-generated/exported-macros-assembler-tq.cc", "$target_gen_dir/torque-generated/exported-macros-assembler-tq.h", ] - foreach(namespace, torque_namespaces) { + foreach(file, torque_files) { + filetq = string_replace(file, ".tq", "-tq-csa") sources += [ - "$target_gen_dir/torque-generated/builtins-$namespace-gen-tq.cc", - "$target_gen_dir/torque-generated/builtins-$namespace-gen-tq.h", + "$target_gen_dir/torque-generated/$filetq.cc", + "$target_gen_dir/torque-generated/$filetq.h", ] } @@ -1515,6 +1477,7 @@ v8_source_set("v8_initializers") { "src/builtins/builtins-async-generator-gen.cc", "src/builtins/builtins-async-iterator-gen.cc", "src/builtins/builtins-bigint-gen.cc", + "src/builtins/builtins-bigint-gen.h", "src/builtins/builtins-boolean-gen.cc", "src/builtins/builtins-call-gen.cc", "src/builtins/builtins-call-gen.h", @@ -1776,6 +1739,8 @@ v8_compiler_sources = [ "src/compiler/control-equivalence.h", "src/compiler/control-flow-optimizer.cc", "src/compiler/control-flow-optimizer.h", + "src/compiler/csa-load-elimination.cc", + "src/compiler/csa-load-elimination.h", "src/compiler/dead-code-elimination.cc", "src/compiler/dead-code-elimination.h", "src/compiler/decompression-elimination.cc", @@ -1913,6 +1878,8 @@ v8_compiler_sources = [ "src/compiler/state-values-utils.h", "src/compiler/store-store-elimination.cc", "src/compiler/store-store-elimination.h", + "src/compiler/add-type-assertions-reducer.cc", + "src/compiler/add-type-assertions-reducer.h", "src/compiler/type-cache.cc", "src/compiler/type-cache.h", "src/compiler/type-narrowing-reducer.cc", @@ -2123,6 +2090,8 @@ v8_source_set("v8_base_without_compiler") { "src/codegen/macro-assembler.h", "src/codegen/optimized-compilation-info.cc", "src/codegen/optimized-compilation-info.h", + "src/codegen/pending-optimization-table.cc", + "src/codegen/pending-optimization-table.h", "src/codegen/register-arch.h", "src/codegen/register-configuration.cc", "src/codegen/register-configuration.h", @@ -2139,6 +2108,8 @@ v8_source_set("v8_base_without_compiler") { "src/codegen/source-position.h", "src/codegen/string-constants.cc", "src/codegen/string-constants.h", + "src/codegen/tick-counter.cc", + "src/codegen/tick-counter.h", "src/codegen/turbo-assembler.cc", "src/codegen/turbo-assembler.h", "src/codegen/unoptimized-compilation-info.cc", @@ -2148,7 +2119,6 @@ v8_source_set("v8_base_without_compiler") { "src/common/checks.h", "src/common/ptr-compr-inl.h", "src/common/ptr-compr.h", - "src/common/v8memory.h", "src/compiler-dispatcher/compiler-dispatcher.cc", "src/compiler-dispatcher/compiler-dispatcher.h", "src/compiler-dispatcher/optimizing-compile-dispatcher.cc", @@ -2212,8 +2182,11 @@ v8_source_set("v8_base_without_compiler") { "src/execution/frames.h", "src/execution/futex-emulation.cc", "src/execution/futex-emulation.h", + "src/execution/interrupts-scope.cc", + "src/execution/interrupts-scope.h", "src/execution/isolate-data.h", "src/execution/isolate-inl.h", + "src/execution/isolate-utils.h", "src/execution/isolate.cc", "src/execution/isolate.h", "src/execution/message-template.h", @@ -2226,6 +2199,8 @@ v8_source_set("v8_base_without_compiler") { "src/execution/simulator-base.cc", "src/execution/simulator-base.h", "src/execution/simulator.h", + "src/execution/stack-guard.cc", + "src/execution/stack-guard.h", "src/execution/thread-id.cc", "src/execution/thread-id.h", "src/execution/thread-local-top.cc", @@ -2234,6 +2209,8 @@ v8_source_set("v8_base_without_compiler") { "src/execution/v8threads.h", "src/execution/vm-state-inl.h", "src/execution/vm-state.h", + "src/extensions/cputracemark-extension.cc", + "src/extensions/cputracemark-extension.h", "src/extensions/externalize-string-extension.cc", "src/extensions/externalize-string-extension.h", "src/extensions/free-buffer-extension.cc", @@ -2262,6 +2239,8 @@ v8_source_set("v8_base_without_compiler") { "src/heap/array-buffer-tracker.cc", "src/heap/array-buffer-tracker.h", "src/heap/barrier.h", + "src/heap/basic-memory-chunk.cc", + "src/heap/basic-memory-chunk.h", "src/heap/code-stats.cc", "src/heap/code-stats.h", "src/heap/combined-heap.cc", @@ -2308,6 +2287,7 @@ v8_source_set("v8_base_without_compiler") { "src/heap/objects-visiting-inl.h", "src/heap/objects-visiting.cc", "src/heap/objects-visiting.h", + "src/heap/read-only-heap-inl.h", "src/heap/read-only-heap.cc", "src/heap/read-only-heap.h", "src/heap/remembered-set.h", @@ -2623,6 +2603,8 @@ v8_source_set("v8_base_without_compiler") { "src/objects/slots-atomic-inl.h", "src/objects/slots-inl.h", "src/objects/slots.h", + "src/objects/source-text-module.cc", + "src/objects/source-text-module.h", "src/objects/stack-frame-info-inl.h", "src/objects/stack-frame-info.cc", "src/objects/stack-frame-info.h", @@ -2635,6 +2617,10 @@ v8_source_set("v8_base_without_compiler") { "src/objects/string.h", "src/objects/struct-inl.h", "src/objects/struct.h", + "src/objects/synthetic-module.cc", + "src/objects/synthetic-module.h", + "src/objects/tagged-field-inl.h", + "src/objects/tagged-field.h", "src/objects/tagged-impl-inl.h", "src/objects/tagged-impl.cc", "src/objects/tagged-impl.h", @@ -2709,23 +2695,27 @@ v8_source_set("v8_base_without_compiler") { "src/profiler/tick-sample.h", "src/profiler/tracing-cpu-profiler.cc", "src/profiler/tracing-cpu-profiler.h", - "src/regexp/bytecodes-irregexp.h", - "src/regexp/interpreter-irregexp.cc", - "src/regexp/interpreter-irregexp.h", - "src/regexp/jsregexp-inl.h", - "src/regexp/jsregexp.cc", - "src/regexp/jsregexp.h", "src/regexp/property-sequences.cc", "src/regexp/property-sequences.h", "src/regexp/regexp-ast.cc", "src/regexp/regexp-ast.h", - "src/regexp/regexp-macro-assembler-irregexp-inl.h", - "src/regexp/regexp-macro-assembler-irregexp.cc", - "src/regexp/regexp-macro-assembler-irregexp.h", + "src/regexp/regexp-bytecode-generator-inl.h", + "src/regexp/regexp-bytecode-generator.cc", + "src/regexp/regexp-bytecode-generator.h", + "src/regexp/regexp-bytecodes.h", + "src/regexp/regexp-compiler-tonode.cc", + "src/regexp/regexp-compiler.cc", + "src/regexp/regexp-compiler.h", + "src/regexp/regexp-dotprinter.cc", + "src/regexp/regexp-dotprinter.h", + "src/regexp/regexp-interpreter.cc", + "src/regexp/regexp-interpreter.h", + "src/regexp/regexp-macro-assembler-arch.h", "src/regexp/regexp-macro-assembler-tracer.cc", "src/regexp/regexp-macro-assembler-tracer.h", "src/regexp/regexp-macro-assembler.cc", "src/regexp/regexp-macro-assembler.h", + "src/regexp/regexp-nodes.h", "src/regexp/regexp-parser.cc", "src/regexp/regexp-parser.h", "src/regexp/regexp-special-case.h", @@ -2733,6 +2723,8 @@ v8_source_set("v8_base_without_compiler") { "src/regexp/regexp-stack.h", "src/regexp/regexp-utils.cc", "src/regexp/regexp-utils.h", + "src/regexp/regexp.cc", + "src/regexp/regexp.h", "src/roots/roots-inl.h", "src/roots/roots.cc", "src/roots/roots.h", @@ -2866,8 +2858,6 @@ v8_source_set("v8_base_without_compiler") { "src/utils/ostreams.cc", "src/utils/ostreams.h", "src/utils/pointer-with-payload.h", - "src/utils/splay-tree-inl.h", - "src/utils/splay-tree.h", "src/utils/utils-inl.h", "src/utils/utils.cc", "src/utils/utils.h", @@ -2889,7 +2879,6 @@ v8_source_set("v8_base_without_compiler") { "src/wasm/function-compiler.h", "src/wasm/graph-builder-interface.cc", "src/wasm/graph-builder-interface.h", - "src/wasm/js-to-wasm-wrapper-cache.h", "src/wasm/jump-table-assembler.cc", "src/wasm/jump-table-assembler.h", "src/wasm/leb-helper.h", @@ -2909,6 +2898,7 @@ v8_source_set("v8_base_without_compiler") { "src/wasm/streaming-decoder.cc", "src/wasm/streaming-decoder.h", "src/wasm/value-type.h", + "src/wasm/wasm-arguments.h", "src/wasm/wasm-code-manager.cc", "src/wasm/wasm-code-manager.h", "src/wasm/wasm-constants.h", @@ -2956,7 +2946,6 @@ v8_source_set("v8_base_without_compiler") { "src/zone/zone-list-inl.h", "src/zone/zone-segment.cc", "src/zone/zone-segment.h", - "src/zone/zone-splay-tree.h", "src/zone/zone.cc", "src/zone/zone.h", ] @@ -3348,6 +3337,7 @@ v8_source_set("torque_base") { "src/torque/declarations.h", "src/torque/earley-parser.cc", "src/torque/earley-parser.h", + "src/torque/global-context.cc", "src/torque/global-context.h", "src/torque/implementation-visitor.cc", "src/torque/implementation-visitor.h", @@ -3379,6 +3369,9 @@ v8_source_set("torque_base") { ":v8_libbase", ] + # The use of exceptions for Torque in violation of the Chromium style-guide + # is justified by the fact that it is only used from the non-essential + # language server and can be removed anytime if it causes problems. configs = [ ":internal_config", "//build/config/compiler:exceptions", @@ -3421,6 +3414,9 @@ v8_source_set("torque_ls_base") { ":torque_base", ] + # The use of exceptions for Torque in violation of the Chromium style-guide + # is justified by the fact that it is only used from the non-essential + # language server and can be removed anytime if it causes problems. configs = [ ":internal_config", "//build/config/compiler:exceptions", @@ -3476,7 +3472,9 @@ v8_component("v8_libbase") { "src/base/list.h", "src/base/logging.cc", "src/base/logging.h", + "src/base/lsan.h", "src/base/macros.h", + "src/base/memory.h", "src/base/once.cc", "src/base/once.h", "src/base/optional.h", @@ -3506,6 +3504,8 @@ v8_component("v8_libbase") { "src/base/type-traits.h", "src/base/utils/random-number-generator.cc", "src/base/utils/random-number-generator.h", + "src/base/vlq-base64.cc", + "src/base/vlq-base64.h", ] configs = [ ":internal_config_base" ] @@ -3671,21 +3671,15 @@ v8_component("v8_libplatform") { sources += [ "src/libplatform/tracing/json-trace-event-listener.cc", "src/libplatform/tracing/json-trace-event-listener.h", - "src/libplatform/tracing/perfetto-consumer.cc", - "src/libplatform/tracing/perfetto-consumer.h", - "src/libplatform/tracing/perfetto-producer.cc", - "src/libplatform/tracing/perfetto-producer.h", - "src/libplatform/tracing/perfetto-shared-memory.cc", - "src/libplatform/tracing/perfetto-shared-memory.h", - "src/libplatform/tracing/perfetto-tasks.cc", - "src/libplatform/tracing/perfetto-tasks.h", - "src/libplatform/tracing/perfetto-tracing-controller.cc", - "src/libplatform/tracing/perfetto-tracing-controller.h", + "src/libplatform/tracing/trace-event-listener.cc", "src/libplatform/tracing/trace-event-listener.h", ] deps += [ - "//third_party/perfetto:libperfetto", + "//third_party/perfetto/protos/perfetto/trace:lite", "//third_party/perfetto/protos/perfetto/trace/chrome:minimal_complete_lite", + "//third_party/perfetto/protos/perfetto/trace/chrome:zero", + "//third_party/perfetto/src/tracing:client_api", + "//third_party/perfetto/src/tracing:platform_posix", ] } } @@ -3846,6 +3840,9 @@ if (current_toolchain == v8_snapshot_toolchain) { "//build/win:default_exe_manifest", ] + # The use of exceptions for Torque in violation of the Chromium style-guide + # is justified by the fact that it is only used from the non-essential + # language server and can be removed anytime if it causes problems. configs = [ ":internal_config", "//build/config/compiler:exceptions", @@ -3876,6 +3873,9 @@ v8_executable("torque-language-server") { "//build/win:default_exe_manifest", ] + # The use of exceptions for Torque in violation of the Chromium style-guide + # is justified by the fact that it is only used from the non-essential + # language server and can be removed anytime if it causes problems. configs = [ ":internal_config", "//build/config/compiler:exceptions", @@ -3892,48 +3892,51 @@ v8_executable("torque-language-server") { } } -if (current_toolchain == v8_generator_toolchain) { - v8_executable("gen-regexp-special-case") { - visibility = [ ":*" ] # Only targets in this file can depend on this. +if (v8_enable_i18n_support) { + if (current_toolchain == v8_generator_toolchain) { + v8_executable("gen-regexp-special-case") { + visibility = [ ":*" ] # Only targets in this file can depend on this. - sources = [ - "src/regexp/gen-regexp-special-case.cc", - ] + sources = [ + "src/regexp/gen-regexp-special-case.cc", + ] - deps = [ - ":v8_libbase", - "//build/win:default_exe_manifest", - "//third_party/icu", - ] + deps = [ + ":v8_libbase", + "//build/win:default_exe_manifest", + "//third_party/icu", + ] - configs = [ ":internal_config" ] + configs = [ ":internal_config" ] + } } -} -action("run_gen-regexp-special-case") { - visibility = [ ":*" ] # Only targets in this file can depend on this. + action("run_gen-regexp-special-case") { + visibility = [ ":*" ] # Only targets in this file can depend on this. - script = "tools/run.py" + script = "tools/run.py" - sources = v8_extra_library_files + sources = v8_extra_library_files - deps = [ - ":gen-regexp-special-case($v8_generator_toolchain)", - ] + deps = [ + ":gen-regexp-special-case($v8_generator_toolchain)", + ] - output_file = "$target_gen_dir/src/regexp/special-case.cc" + output_file = "$target_gen_dir/src/regexp/special-case.cc" - outputs = [ - output_file, - ] + outputs = [ + output_file, + ] - args = [ - "./" + rebase_path( - get_label_info(":gen-regexp-special-case($v8_generator_toolchain)", - "root_out_dir") + "/gen-regexp-special-case", - root_build_dir), - rebase_path(output_file, root_build_dir), - ] + args = [ + "./" + rebase_path( + get_label_info( + ":gen-regexp-special-case($v8_generator_toolchain)", + "root_out_dir") + "/gen-regexp-special-case", + root_build_dir), + rebase_path(output_file, root_build_dir), + ] + } } ############################################################################### @@ -4146,6 +4149,10 @@ v8_executable("d8") { if (v8_enable_vtunejit) { deps += [ "src/third_party/vtune:v8_vtune" ] } + + if (v8_use_perfetto) { + deps += [ "//third_party/perfetto/include/perfetto/tracing" ] + } } v8_executable("v8_hello_world") { @@ -4451,7 +4458,6 @@ group("v8_generated_cc_files") { ":js2c_extras", ":run_torque", "src/inspector:v8_generated_cc_files", - "test/cctest:v8_generated_cc_files", ] } diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog index c21ac1176032d5..27afc18a5117cd 100644 --- a/deps/v8/ChangeLog +++ b/deps/v8/ChangeLog @@ -1,3 +1,1538 @@ +2019-07-23: Version 7.7.299 + + Performance and stability improvements on all platforms. + + +2019-07-23: Version 7.7.298 + + Performance and stability improvements on all platforms. + + +2019-07-23: Version 7.7.297 + + Performance and stability improvements on all platforms. + + +2019-07-23: Version 7.7.296 + + Performance and stability improvements on all platforms. + + +2019-07-22: Version 7.7.295 + + Performance and stability improvements on all platforms. + + +2019-07-22: Version 7.7.294 + + Performance and stability improvements on all platforms. + + +2019-07-22: Version 7.7.293 + + Performance and stability improvements on all platforms. + + +2019-07-22: Version 7.7.292 + + Performance and stability improvements on all platforms. + + +2019-07-22: Version 7.7.291 + + Performance and stability improvements on all platforms. + + +2019-07-21: Version 7.7.290 + + Performance and stability improvements on all platforms. + + +2019-07-20: Version 7.7.289 + + Performance and stability improvements on all platforms. + + +2019-07-20: Version 7.7.288 + + Performance and stability improvements on all platforms. + + +2019-07-19: Version 7.7.287 + + Performance and stability improvements on all platforms. + + +2019-07-19: Version 7.7.286 + + Performance and stability improvements on all platforms. + + +2019-07-19: Version 7.7.285 + + Performance and stability improvements on all platforms. + + +2019-07-19: Version 7.7.284 + + Performance and stability improvements on all platforms. + + +2019-07-19: Version 7.7.283 + + Performance and stability improvements on all platforms. + + +2019-07-19: Version 7.7.282 + + Performance and stability improvements on all platforms. + + +2019-07-19: Version 7.7.281 + + Performance and stability improvements on all platforms. + + +2019-07-19: Version 7.7.280 + + Performance and stability improvements on all platforms. + + +2019-07-19: Version 7.7.279 + + Performance and stability improvements on all platforms. + + +2019-07-19: Version 7.7.278 + + Performance and stability improvements on all platforms. + + +2019-07-19: Version 7.7.277 + + Performance and stability improvements on all platforms. + + +2019-07-18: Version 7.7.276 + + Performance and stability improvements on all platforms. + + +2019-07-18: Version 7.7.275 + + Performance and stability improvements on all platforms. + + +2019-07-18: Version 7.7.274 + + Performance and stability improvements on all platforms. + + +2019-07-18: Version 7.7.273 + + Performance and stability improvements on all platforms. + + +2019-07-18: Version 7.7.272 + + Performance and stability improvements on all platforms. + + +2019-07-18: Version 7.7.271 + + Performance and stability improvements on all platforms. + + +2019-07-18: Version 7.7.270 + + Performance and stability improvements on all platforms. + + +2019-07-18: Version 7.7.269 + + Performance and stability improvements on all platforms. + + +2019-07-17: Version 7.7.268 + + Performance and stability improvements on all platforms. + + +2019-07-17: Version 7.7.267 + + Performance and stability improvements on all platforms. + + +2019-07-17: Version 7.7.266 + + Performance and stability improvements on all platforms. + + +2019-07-17: Version 7.7.265 + + Performance and stability improvements on all platforms. + + +2019-07-17: Version 7.7.264 + + Performance and stability improvements on all platforms. + + +2019-07-17: Version 7.7.263 + + Performance and stability improvements on all platforms. + + +2019-07-17: Version 7.7.262 + + Performance and stability improvements on all platforms. + + +2019-07-17: Version 7.7.261 + + Performance and stability improvements on all platforms. + + +2019-07-17: Version 7.7.260 + + Performance and stability improvements on all platforms. + + +2019-07-17: Version 7.7.259 + + Performance and stability improvements on all platforms. + + +2019-07-17: Version 7.7.258 + + Performance and stability improvements on all platforms. + + +2019-07-17: Version 7.7.257 + + Performance and stability improvements on all platforms. + + +2019-07-17: Version 7.7.256 + + Performance and stability improvements on all platforms. + + +2019-07-17: Version 7.7.255 + + Performance and stability improvements on all platforms. + + +2019-07-16: Version 7.7.254 + + Performance and stability improvements on all platforms. + + +2019-07-16: Version 7.7.253 + + Performance and stability improvements on all platforms. + + +2019-07-16: Version 7.7.252 + + Performance and stability improvements on all platforms. + + +2019-07-16: Version 7.7.251 + + Performance and stability improvements on all platforms. + + +2019-07-16: Version 7.7.250 + + Performance and stability improvements on all platforms. + + +2019-07-16: Version 7.7.249 + + Performance and stability improvements on all platforms. + + +2019-07-16: Version 7.7.248 + + Performance and stability improvements on all platforms. + + +2019-07-16: Version 7.7.247 + + Performance and stability improvements on all platforms. + + +2019-07-16: Version 7.7.246 + + Performance and stability improvements on all platforms. + + +2019-07-16: Version 7.7.245 + + Performance and stability improvements on all platforms. + + +2019-07-16: Version 7.7.244 + + Performance and stability improvements on all platforms. + + +2019-07-16: Version 7.7.243 + + Performance and stability improvements on all platforms. + + +2019-07-16: Version 7.7.242 + + Performance and stability improvements on all platforms. + + +2019-07-15: Version 7.7.241 + + Performance and stability improvements on all platforms. + + +2019-07-15: Version 7.7.240 + + Performance and stability improvements on all platforms. + + +2019-07-15: Version 7.7.239 + + Performance and stability improvements on all platforms. + + +2019-07-15: Version 7.7.238 + + Performance and stability improvements on all platforms. + + +2019-07-15: Version 7.7.237 + + Performance and stability improvements on all platforms. + + +2019-07-15: Version 7.7.236 + + Performance and stability improvements on all platforms. + + +2019-07-15: Version 7.7.235 + + Performance and stability improvements on all platforms. + + +2019-07-15: Version 7.7.234 + + Performance and stability improvements on all platforms. + + +2019-07-15: Version 7.7.233 + + Performance and stability improvements on all platforms. + + +2019-07-15: Version 7.7.232 + + Performance and stability improvements on all platforms. + + +2019-07-15: Version 7.7.231 + + Performance and stability improvements on all platforms. + + +2019-07-14: Version 7.7.230 + + Performance and stability improvements on all platforms. + + +2019-07-14: Version 7.7.229 + + Performance and stability improvements on all platforms. + + +2019-07-13: Version 7.7.228 + + Performance and stability improvements on all platforms. + + +2019-07-13: Version 7.7.227 + + Performance and stability improvements on all platforms. + + +2019-07-13: Version 7.7.226 + + Performance and stability improvements on all platforms. + + +2019-07-12: Version 7.7.225 + + Performance and stability improvements on all platforms. + + +2019-07-12: Version 7.7.224 + + Performance and stability improvements on all platforms. + + +2019-07-12: Version 7.7.223 + + Performance and stability improvements on all platforms. + + +2019-07-12: Version 7.7.222 + + Performance and stability improvements on all platforms. + + +2019-07-12: Version 7.7.221 + + Performance and stability improvements on all platforms. + + +2019-07-12: Version 7.7.220 + + Performance and stability improvements on all platforms. + + +2019-07-12: Version 7.7.219 + + Performance and stability improvements on all platforms. + + +2019-07-12: Version 7.7.218 + + Performance and stability improvements on all platforms. + + +2019-07-11: Version 7.7.217 + + Performance and stability improvements on all platforms. + + +2019-07-11: Version 7.7.216 + + Performance and stability improvements on all platforms. + + +2019-07-11: Version 7.7.215 + + Performance and stability improvements on all platforms. + + +2019-07-10: Version 7.7.214 + + Performance and stability improvements on all platforms. + + +2019-07-09: Version 7.7.213 + + Performance and stability improvements on all platforms. + + +2019-07-09: Version 7.7.212 + + Performance and stability improvements on all platforms. + + +2019-07-09: Version 7.7.211 + + Performance and stability improvements on all platforms. + + +2019-07-09: Version 7.7.210 + + Performance and stability improvements on all platforms. + + +2019-07-09: Version 7.7.209 + + Performance and stability improvements on all platforms. + + +2019-07-09: Version 7.7.208 + + Performance and stability improvements on all platforms. + + +2019-07-09: Version 7.7.207 + + Performance and stability improvements on all platforms. + + +2019-07-09: Version 7.7.206 + + Performance and stability improvements on all platforms. + + +2019-07-08: Version 7.7.205 + + Performance and stability improvements on all platforms. + + +2019-07-08: Version 7.7.204 + + Performance and stability improvements on all platforms. + + +2019-07-08: Version 7.7.203 + + Performance and stability improvements on all platforms. + + +2019-07-08: Version 7.7.202 + + Performance and stability improvements on all platforms. + + +2019-07-08: Version 7.7.201 + + Performance and stability improvements on all platforms. + + +2019-07-08: Version 7.7.200 + + Performance and stability improvements on all platforms. + + +2019-07-08: Version 7.7.199 + + Performance and stability improvements on all platforms. + + +2019-07-08: Version 7.7.198 + + Performance and stability improvements on all platforms. + + +2019-07-08: Version 7.7.197 + + Performance and stability improvements on all platforms. + + +2019-07-08: Version 7.7.196 + + Performance and stability improvements on all platforms. + + +2019-07-08: Version 7.7.195 + + Performance and stability improvements on all platforms. + + +2019-07-08: Version 7.7.194 + + Performance and stability improvements on all platforms. + + +2019-07-08: Version 7.7.193 + + Performance and stability improvements on all platforms. + + +2019-07-06: Version 7.7.192 + + Performance and stability improvements on all platforms. + + +2019-07-06: Version 7.7.191 + + Performance and stability improvements on all platforms. + + +2019-07-05: Version 7.7.190 + + Performance and stability improvements on all platforms. + + +2019-07-05: Version 7.7.189 + + Performance and stability improvements on all platforms. + + +2019-07-05: Version 7.7.188 + + Performance and stability improvements on all platforms. + + +2019-07-05: Version 7.7.187 + + Performance and stability improvements on all platforms. + + +2019-07-05: Version 7.7.186 + + Performance and stability improvements on all platforms. + + +2019-07-03: Version 7.7.185 + + Performance and stability improvements on all platforms. + + +2019-07-03: Version 7.7.184 + + Performance and stability improvements on all platforms. + + +2019-07-03: Version 7.7.183 + + Performance and stability improvements on all platforms. + + +2019-07-03: Version 7.7.182 + + Performance and stability improvements on all platforms. + + +2019-07-03: Version 7.7.181 + + Performance and stability improvements on all platforms. + + +2019-07-03: Version 7.7.180 + + Performance and stability improvements on all platforms. + + +2019-07-02: Version 7.7.179 + + Performance and stability improvements on all platforms. + + +2019-07-02: Version 7.7.178 + + Performance and stability improvements on all platforms. + + +2019-07-02: Version 7.7.177 + + Performance and stability improvements on all platforms. + + +2019-07-02: Version 7.7.176 + + Performance and stability improvements on all platforms. + + +2019-07-02: Version 7.7.175 + + Performance and stability improvements on all platforms. + + +2019-07-02: Version 7.7.174 + + Performance and stability improvements on all platforms. + + +2019-07-02: Version 7.7.173 + + Performance and stability improvements on all platforms. + + +2019-07-02: Version 7.7.172 + + Performance and stability improvements on all platforms. + + +2019-07-01: Version 7.7.171 + + Performance and stability improvements on all platforms. + + +2019-07-01: Version 7.7.170 + + Performance and stability improvements on all platforms. + + +2019-07-01: Version 7.7.169 + + Performance and stability improvements on all platforms. + + +2019-07-01: Version 7.7.168 + + Performance and stability improvements on all platforms. + + +2019-07-01: Version 7.7.167 + + Performance and stability improvements on all platforms. + + +2019-06-28: Version 7.7.166 + + Performance and stability improvements on all platforms. + + +2019-06-28: Version 7.7.165 + + Performance and stability improvements on all platforms. + + +2019-06-28: Version 7.7.164 + + Performance and stability improvements on all platforms. + + +2019-06-27: Version 7.7.163 + + Performance and stability improvements on all platforms. + + +2019-06-27: Version 7.7.162 + + Performance and stability improvements on all platforms. + + +2019-06-27: Version 7.7.161 + + Performance and stability improvements on all platforms. + + +2019-06-27: Version 7.7.160 + + Performance and stability improvements on all platforms. + + +2019-06-27: Version 7.7.159 + + Performance and stability improvements on all platforms. + + +2019-06-27: Version 7.7.158 + + Performance and stability improvements on all platforms. + + +2019-06-27: Version 7.7.157 + + Performance and stability improvements on all platforms. + + +2019-06-27: Version 7.7.156 + + Performance and stability improvements on all platforms. + + +2019-06-27: Version 7.7.155 + + Performance and stability improvements on all platforms. + + +2019-06-27: Version 7.7.154 + + Performance and stability improvements on all platforms. + + +2019-06-26: Version 7.7.153 + + Performance and stability improvements on all platforms. + + +2019-06-26: Version 7.7.152 + + Performance and stability improvements on all platforms. + + +2019-06-26: Version 7.7.151 + + Performance and stability improvements on all platforms. + + +2019-06-26: Version 7.7.150 + + Performance and stability improvements on all platforms. + + +2019-06-25: Version 7.7.149 + + Performance and stability improvements on all platforms. + + +2019-06-25: Version 7.7.148 + + Performance and stability improvements on all platforms. + + +2019-06-25: Version 7.7.147 + + Performance and stability improvements on all platforms. + + +2019-06-25: Version 7.7.146 + + Performance and stability improvements on all platforms. + + +2019-06-25: Version 7.7.145 + + Performance and stability improvements on all platforms. + + +2019-06-25: Version 7.7.144 + + Performance and stability improvements on all platforms. + + +2019-06-25: Version 7.7.143 + + Performance and stability improvements on all platforms. + + +2019-06-25: Version 7.7.142 + + Performance and stability improvements on all platforms. + + +2019-06-25: Version 7.7.141 + + Performance and stability improvements on all platforms. + + +2019-06-24: Version 7.7.140 + + Performance and stability improvements on all platforms. + + +2019-06-24: Version 7.7.139 + + Performance and stability improvements on all platforms. + + +2019-06-24: Version 7.7.138 + + Performance and stability improvements on all platforms. + + +2019-06-24: Version 7.7.137 + + Performance and stability improvements on all platforms. + + +2019-06-24: Version 7.7.136 + + Performance and stability improvements on all platforms. + + +2019-06-23: Version 7.7.135 + + Performance and stability improvements on all platforms. + + +2019-06-23: Version 7.7.134 + + Performance and stability improvements on all platforms. + + +2019-06-22: Version 7.7.133 + + Performance and stability improvements on all platforms. + + +2019-06-21: Version 7.7.132 + + Performance and stability improvements on all platforms. + + +2019-06-21: Version 7.7.131 + + Performance and stability improvements on all platforms. + + +2019-06-21: Version 7.7.130 + + Performance and stability improvements on all platforms. + + +2019-06-21: Version 7.7.129 + + Performance and stability improvements on all platforms. + + +2019-06-21: Version 7.7.128 + + Performance and stability improvements on all platforms. + + +2019-06-21: Version 7.7.127 + + Performance and stability improvements on all platforms. + + +2019-06-21: Version 7.7.126 + + Performance and stability improvements on all platforms. + + +2019-06-21: Version 7.7.125 + + Performance and stability improvements on all platforms. + + +2019-06-20: Version 7.7.124 + + Performance and stability improvements on all platforms. + + +2019-06-20: Version 7.7.123 + + Performance and stability improvements on all platforms. + + +2019-06-20: Version 7.7.122 + + Performance and stability improvements on all platforms. + + +2019-06-20: Version 7.7.121 + + Performance and stability improvements on all platforms. + + +2019-06-20: Version 7.7.120 + + Performance and stability improvements on all platforms. + + +2019-06-19: Version 7.7.119 + + Performance and stability improvements on all platforms. + + +2019-06-19: Version 7.7.118 + + Performance and stability improvements on all platforms. + + +2019-06-19: Version 7.7.117 + + Performance and stability improvements on all platforms. + + +2019-06-19: Version 7.7.116 + + Performance and stability improvements on all platforms. + + +2019-06-19: Version 7.7.115 + + Performance and stability improvements on all platforms. + + +2019-06-19: Version 7.7.114 + + Performance and stability improvements on all platforms. + + +2019-06-19: Version 7.7.113 + + Performance and stability improvements on all platforms. + + +2019-06-19: Version 7.7.112 + + Performance and stability improvements on all platforms. + + +2019-06-19: Version 7.7.111 + + Performance and stability improvements on all platforms. + + +2019-06-19: Version 7.7.110 + + Performance and stability improvements on all platforms. + + +2019-06-19: Version 7.7.109 + + Performance and stability improvements on all platforms. + + +2019-06-19: Version 7.7.108 + + Performance and stability improvements on all platforms. + + +2019-06-19: Version 7.7.107 + + Performance and stability improvements on all platforms. + + +2019-06-18: Version 7.7.106 + + Performance and stability improvements on all platforms. + + +2019-06-18: Version 7.7.105 + + Performance and stability improvements on all platforms. + + +2019-06-18: Version 7.7.104 + + Performance and stability improvements on all platforms. + + +2019-06-18: Version 7.7.103 + + Performance and stability improvements on all platforms. + + +2019-06-18: Version 7.7.102 + + Performance and stability improvements on all platforms. + + +2019-06-18: Version 7.7.101 + + Performance and stability improvements on all platforms. + + +2019-06-18: Version 7.7.100 + + Performance and stability improvements on all platforms. + + +2019-06-18: Version 7.7.99 + + Performance and stability improvements on all platforms. + + +2019-06-18: Version 7.7.98 + + Performance and stability improvements on all platforms. + + +2019-06-18: Version 7.7.97 + + Performance and stability improvements on all platforms. + + +2019-06-18: Version 7.7.96 + + Performance and stability improvements on all platforms. + + +2019-06-18: Version 7.7.95 + + Performance and stability improvements on all platforms. + + +2019-06-17: Version 7.7.94 + + Performance and stability improvements on all platforms. + + +2019-06-17: Version 7.7.93 + + Performance and stability improvements on all platforms. + + +2019-06-17: Version 7.7.92 + + Performance and stability improvements on all platforms. + + +2019-06-17: Version 7.7.91 + + Performance and stability improvements on all platforms. + + +2019-06-17: Version 7.7.90 + + Performance and stability improvements on all platforms. + + +2019-06-17: Version 7.7.89 + + Performance and stability improvements on all platforms. + + +2019-06-17: Version 7.7.88 + + Performance and stability improvements on all platforms. + + +2019-06-17: Version 7.7.87 + + Performance and stability improvements on all platforms. + + +2019-06-16: Version 7.7.86 + + Performance and stability improvements on all platforms. + + +2019-06-16: Version 7.7.85 + + Performance and stability improvements on all platforms. + + +2019-06-14: Version 7.7.84 + + Performance and stability improvements on all platforms. + + +2019-06-14: Version 7.7.83 + + Performance and stability improvements on all platforms. + + +2019-06-14: Version 7.7.82 + + Performance and stability improvements on all platforms. + + +2019-06-14: Version 7.7.81 + + Performance and stability improvements on all platforms. + + +2019-06-14: Version 7.7.80 + + Performance and stability improvements on all platforms. + + +2019-06-14: Version 7.7.79 + + Performance and stability improvements on all platforms. + + +2019-06-14: Version 7.7.78 + + Performance and stability improvements on all platforms. + + +2019-06-14: Version 7.7.77 + + Performance and stability improvements on all platforms. + + +2019-06-14: Version 7.7.76 + + Performance and stability improvements on all platforms. + + +2019-06-14: Version 7.7.75 + + Performance and stability improvements on all platforms. + + +2019-06-14: Version 7.7.74 + + Performance and stability improvements on all platforms. + + +2019-06-14: Version 7.7.73 + + Performance and stability improvements on all platforms. + + +2019-06-13: Version 7.7.72 + + Performance and stability improvements on all platforms. + + +2019-06-13: Version 7.7.71 + + Performance and stability improvements on all platforms. + + +2019-06-13: Version 7.7.70 + + Performance and stability improvements on all platforms. + + +2019-06-13: Version 7.7.69 + + Performance and stability improvements on all platforms. + + +2019-06-13: Version 7.7.68 + + Performance and stability improvements on all platforms. + + +2019-06-13: Version 7.7.67 + + Performance and stability improvements on all platforms. + + +2019-06-13: Version 7.7.66 + + Performance and stability improvements on all platforms. + + +2019-06-13: Version 7.7.65 + + Performance and stability improvements on all platforms. + + +2019-06-13: Version 7.7.64 + + Performance and stability improvements on all platforms. + + +2019-06-13: Version 7.7.63 + + Performance and stability improvements on all platforms. + + +2019-06-13: Version 7.7.62 + + Performance and stability improvements on all platforms. + + +2019-06-12: Version 7.7.61 + + Performance and stability improvements on all platforms. + + +2019-06-12: Version 7.7.60 + + Performance and stability improvements on all platforms. + + +2019-06-12: Version 7.7.59 + + Performance and stability improvements on all platforms. + + +2019-06-12: Version 7.7.58 + + Performance and stability improvements on all platforms. + + +2019-06-12: Version 7.7.57 + + Performance and stability improvements on all platforms. + + +2019-06-12: Version 7.7.56 + + Performance and stability improvements on all platforms. + + +2019-06-12: Version 7.7.55 + + Performance and stability improvements on all platforms. + + +2019-06-12: Version 7.7.54 + + Performance and stability improvements on all platforms. + + +2019-06-12: Version 7.7.53 + + Performance and stability improvements on all platforms. + + +2019-06-12: Version 7.7.52 + + Performance and stability improvements on all platforms. + + +2019-06-12: Version 7.7.51 + + Performance and stability improvements on all platforms. + + +2019-06-11: Version 7.7.50 + + Performance and stability improvements on all platforms. + + +2019-06-11: Version 7.7.49 + + Performance and stability improvements on all platforms. + + +2019-06-11: Version 7.7.48 + + Performance and stability improvements on all platforms. + + +2019-06-11: Version 7.7.47 + + Performance and stability improvements on all platforms. + + +2019-06-11: Version 7.7.46 + + Performance and stability improvements on all platforms. + + +2019-06-11: Version 7.7.45 + + Performance and stability improvements on all platforms. + + +2019-06-11: Version 7.7.44 + + Performance and stability improvements on all platforms. + + +2019-06-11: Version 7.7.43 + + Performance and stability improvements on all platforms. + + +2019-06-11: Version 7.7.42 + + Performance and stability improvements on all platforms. + + +2019-06-11: Version 7.7.41 + + Performance and stability improvements on all platforms. + + +2019-06-10: Version 7.7.40 + + Performance and stability improvements on all platforms. + + +2019-06-10: Version 7.7.39 + + Performance and stability improvements on all platforms. + + +2019-06-10: Version 7.7.38 + + Performance and stability improvements on all platforms. + + +2019-06-10: Version 7.7.37 + + Performance and stability improvements on all platforms. + + +2019-06-09: Version 7.7.36 + + Performance and stability improvements on all platforms. + + +2019-06-09: Version 7.7.35 + + Performance and stability improvements on all platforms. + + +2019-06-09: Version 7.7.34 + + Performance and stability improvements on all platforms. + + +2019-06-08: Version 7.7.33 + + Performance and stability improvements on all platforms. + + +2019-06-08: Version 7.7.32 + + Performance and stability improvements on all platforms. + + +2019-06-08: Version 7.7.31 + + Performance and stability improvements on all platforms. + + +2019-06-08: Version 7.7.30 + + Performance and stability improvements on all platforms. + + +2019-06-07: Version 7.7.29 + + Performance and stability improvements on all platforms. + + +2019-06-07: Version 7.7.28 + + Performance and stability improvements on all platforms. + + +2019-06-07: Version 7.7.27 + + Performance and stability improvements on all platforms. + + +2019-06-07: Version 7.7.26 + + Performance and stability improvements on all platforms. + + +2019-06-07: Version 7.7.25 + + Performance and stability improvements on all platforms. + + +2019-06-07: Version 7.7.24 + + Performance and stability improvements on all platforms. + + +2019-06-05: Version 7.7.23 + + Performance and stability improvements on all platforms. + + +2019-06-05: Version 7.7.22 + + Performance and stability improvements on all platforms. + + +2019-06-05: Version 7.7.21 + + Performance and stability improvements on all platforms. + + +2019-06-05: Version 7.7.20 + + Performance and stability improvements on all platforms. + + +2019-06-05: Version 7.7.19 + + Performance and stability improvements on all platforms. + + +2019-06-05: Version 7.7.18 + + Performance and stability improvements on all platforms. + + +2019-06-04: Version 7.7.17 + + Performance and stability improvements on all platforms. + + +2019-06-04: Version 7.7.16 + + Performance and stability improvements on all platforms. + + +2019-06-04: Version 7.7.15 + + Performance and stability improvements on all platforms. + + +2019-06-04: Version 7.7.14 + + Performance and stability improvements on all platforms. + + +2019-06-04: Version 7.7.13 + + Performance and stability improvements on all platforms. + + +2019-06-03: Version 7.7.12 + + Performance and stability improvements on all platforms. + + +2019-06-03: Version 7.7.11 + + Performance and stability improvements on all platforms. + + +2019-06-03: Version 7.7.10 + + Performance and stability improvements on all platforms. + + +2019-06-03: Version 7.7.9 + + Performance and stability improvements on all platforms. + + +2019-06-03: Version 7.7.8 + + Performance and stability improvements on all platforms. + + +2019-06-03: Version 7.7.7 + + Performance and stability improvements on all platforms. + + +2019-06-03: Version 7.7.6 + + Performance and stability improvements on all platforms. + + +2019-06-03: Version 7.7.5 + + Performance and stability improvements on all platforms. + + +2019-06-03: Version 7.7.4 + + Performance and stability improvements on all platforms. + + +2019-06-03: Version 7.7.3 + + Performance and stability improvements on all platforms. + + +2019-06-03: Version 7.7.2 + + Performance and stability improvements on all platforms. + + +2019-05-31: Version 7.7.1 + + Performance and stability improvements on all platforms. + + +2019-05-29: Version 7.6.311 + + Performance and stability improvements on all platforms. + + +2019-05-29: Version 7.6.310 + + Performance and stability improvements on all platforms. + + +2019-05-29: Version 7.6.309 + + Performance and stability improvements on all platforms. + + +2019-05-29: Version 7.6.308 + + Performance and stability improvements on all platforms. + + +2019-05-28: Version 7.6.307 + + Performance and stability improvements on all platforms. + + +2019-05-28: Version 7.6.306 + + Performance and stability improvements on all platforms. + + +2019-05-28: Version 7.6.305 + + Performance and stability improvements on all platforms. + + +2019-05-28: Version 7.6.304 + + Performance and stability improvements on all platforms. + + 2019-05-28: Version 7.6.303 Performance and stability improvements on all platforms. diff --git a/deps/v8/DEPS b/deps/v8/DEPS index bca59b724f292a..986264356f99cc 100644 --- a/deps/v8/DEPS +++ b/deps/v8/DEPS @@ -12,10 +12,10 @@ vars = { 'check_v8_header_includes': False, # GN CIPD package version. - 'gn_version': 'git_revision:81ee1967d3fcbc829bac1c005c3da59739c88df9', + 'gn_version': 'git_revision:972ed755f8e6d31cae9ba15fcd08136ae1a7886f', # luci-go CIPD package version. - 'luci_go': 'git_revision:25958d48e89e980e2a97daeddc977fb5e2e1fb8c', + 'luci_go': 'git_revision:7d11fd9e66407c49cb6c8546a2ae45ea993a240c', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_build-tools_version @@ -24,11 +24,11 @@ vars = { # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_emulator_version # and whatever else without interference from each other. - 'android_sdk_emulator_version': 'ki7EDQRAiZAUYlnTWR1XmI6cJTk65fJ-DNZUU1zrtS8C', + 'android_sdk_emulator_version': 'xhyuoquVvBTcJelgRjMKZeoBVSQRjB7pLVJPt5C9saIC', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_extras_version # and whatever else without interference from each other. - 'android_sdk_extras_version': 'iIwhhDox5E-mHgwUhCz8JACWQCpUjdqt5KTY9VLugKQC', + 'android_sdk_extras_version': 'ppQ4TnqDvBHQ3lXx5KPq97egzF5X2FFyOrVHkGmiTMQC', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_patcher_version # and whatever else without interference from each other. @@ -36,7 +36,7 @@ vars = { # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_platform-tools_version # and whatever else without interference from each other. - 'android_sdk_platform-tools_version': '4Y2Cb2LGzoc-qt-oIUIlhySotJaKeE3ELFedSVe6Uk8C', + 'android_sdk_platform-tools_version': 'MSnxgXN7IurL-MQs1RrTkSFSb8Xd1UtZjLArI8Ty1FgC', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_platforms_version # and whatever else without interference from each other. @@ -57,15 +57,15 @@ vars = { deps = { 'v8/build': - Var('chromium_url') + '/chromium/src/build.git' + '@' + '4cebfa34c79bcfbce6a3f55d1b4f7628bb70ea8a', + Var('chromium_url') + '/chromium/src/build.git' + '@' + '1e5d7d692f816af8136c738b79fe9e8dde8057f6', 'v8/third_party/depot_tools': - Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '26af0d34d281440ad0dc6d2e43fe60f32ef62da0', + Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + 'ee7b9dda90e409fb92031d511151debe5db7db9f', 'v8/third_party/icu': - Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '64e5d7d43a1ff205e3787ab6150bbc1a1837332b', + Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'fd97d4326fac6da84452b2d5fe75ff0949368dab', 'v8/third_party/instrumented_libraries': - Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + 'a959e4f0cb643003f2d75d179cede449979e3e77', + Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + 'b1c3ca20848c117eb935b02c25d441f03e6fbc5e', 'v8/buildtools': - Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '0218c0f9ac9fdba00e5c27b5aca94d3a64c74f34', + Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '67b293ca1316d06f7f00160ce35c92b8849a9dc9', 'v8/buildtools/clang_format/script': Var('chromium_url') + '/chromium/llvm-project/cfe/tools/clang-format.git' + '@' + '96636aa0e9f047f17447f2d45a094d0b59ed7917', 'v8/buildtools/linux64': { @@ -110,11 +110,6 @@ deps = { 'url': Var('chromium_url') + '/android_ndk.git' + '@' + '4e2cea441bfd43f0863d14f57b1e1844260b9884', 'condition': 'checkout_android', }, - # This is deprecated. - 'v8/third_party/android_tools': { - 'url': Var('chromium_url') + '/android_tools.git' + '@' + '347a7c8078a009e98995985b7ab6ec6b35696dea', - 'condition': 'checkout_android', - }, 'v8/third_party/android_sdk/public': { 'packages': [ { @@ -158,7 +153,7 @@ deps = { 'dep_type': 'cipd', }, 'v8/third_party/catapult': { - 'url': Var('chromium_url') + '/catapult.git' + '@' + 'a7b33124672f301cebe0ca94a67ca7d0362e3d6a', + 'url': Var('chromium_url') + '/catapult.git' + '@' + '53913cecb11a3ef993f6496b9110964e2e2aeec3', 'condition': 'checkout_android', }, 'v8/third_party/colorama/src': { @@ -166,23 +161,23 @@ deps = { 'condition': 'checkout_android', }, 'v8/third_party/fuchsia-sdk': { - 'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-sdk.git' + '@' + 'ae68779f84fc36bd88ba4fe0ff78ed9ea3c91d73', + 'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-sdk.git' + '@' + '5fd29151cf35c0813c33cc368a7c78389e3f5caa', 'condition': 'checkout_fuchsia', }, 'v8/third_party/googletest/src': - Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + 'f71fb4f9a912ec945401cc49a287a759b6131026', + Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '6077f444da944d96d311d358d761164261f1cdd0', 'v8/third_party/jinja2': Var('chromium_url') + '/chromium/src/third_party/jinja2.git' + '@' + 'b41863e42637544c2941b574c7877d3e1f663e25', 'v8/third_party/markupsafe': Var('chromium_url') + '/chromium/src/third_party/markupsafe.git' + '@' + '8f45f5cfa0009d2a70589bcda0349b8cb2b72783', 'v8/tools/swarming_client': - Var('chromium_url') + '/infra/luci/client-py.git' + '@' + '779c4f0f8488c64587b75dbb001d18c3c0c4cda9', + Var('chromium_url') + '/infra/luci/client-py.git' + '@' + '96f125709acfd0b48fc1e5dae7d6ea42291726ac', 'v8/test/benchmarks/data': Var('chromium_url') + '/v8/deps/third_party/benchmarks.git' + '@' + '05d7188267b4560491ff9155c5ee13e207ecd65f', 'v8/test/mozilla/data': Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be', 'v8/test/test262/data': - Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'a9abd418ccc7999b00b8c7df60b25620a7d3c541', + Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '26a2268436f28f64c4539d9aab9ebd0f0b7c99c5', 'v8/test/test262/harness': Var('chromium_url') + '/external/github.com/test262-utils/test262-harness-py.git' + '@' + '4555345a943d0c99a9461182705543fb171dda4b', 'v8/third_party/qemu-linux-x64': { @@ -206,7 +201,7 @@ deps = { 'dep_type': 'cipd', }, 'v8/tools/clang': - Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'fe8ba88894e4b3927d3cd9e24274a0f1a688cf71', + Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'f485a21a9cb05494161d97d545c3b29447610ffb', 'v8/tools/luci-go': { 'packages': [ { @@ -236,15 +231,12 @@ deps = { 'dep_type': 'cipd', }, 'v8/test/wasm-js/data': - Var('chromium_url') + '/external/github.com/WebAssembly/spec.git' + '@' + 'bc7d3006bbda0de5031c2a1b9266a62fa7895019', + Var('chromium_url') + '/external/github.com/WebAssembly/spec.git' + '@' + '1a411f713d9850ce7da24719aba5bb80c535f562', 'v8/third_party/perfetto': - Var('android_url') + '/platform/external/perfetto.git' + '@' + '10c98fe0cfae669f71610d97e9da94260a6da173', + Var('android_url') + '/platform/external/perfetto.git' + '@' + '0e8281399fd854de13461f2c1c9f2fb0b8e9c3ae', 'v8/third_party/protobuf': Var('chromium_url') + '/external/github.com/google/protobuf'+ '@' + 'b68a347f56137b4b1a746e8c7438495a6ac1bd91', } -recursedeps = [ - 'v8/third_party/android_tools', -] include_rules = [ # Everybody can use some things. diff --git a/deps/v8/INTL_OWNERS b/deps/v8/INTL_OWNERS new file mode 100644 index 00000000000000..dbe6f3b7b54292 --- /dev/null +++ b/deps/v8/INTL_OWNERS @@ -0,0 +1,3 @@ +cira@chromium.org +mnita@google.com +jshin@chromium.org diff --git a/deps/v8/OWNERS b/deps/v8/OWNERS index c428ba6d0bbd7b..be360966665b38 100644 --- a/deps/v8/OWNERS +++ b/deps/v8/OWNERS @@ -2,20 +2,20 @@ # Disagreement among owners should be escalated to eng reviewers. file://ENG_REVIEW_OWNERS -# TODO(9247) remove this. -file://COMMON_OWNERS - per-file .clang-format=file://INFRA_OWNERS per-file .clang-tidy=file://INFRA_OWNERS per-file .editorconfig=file://INFRA_OWNERS +per-file .flake8=file://INFRA_OWNERS per-file .git-blame-ignore-revs=file://INFRA_OWNERS per-file .gitattributes=file://INFRA_OWNERS per-file .gitignore=file://INFRA_OWNERS per-file .gn=file://INFRA_OWNERS per-file .vpython=file://INFRA_OWNERS per-file .ycm_extra_conf.py=file://INFRA_OWNERS -per-file BUILD.gn=file://INFRA_OWNERS +per-file BUILD.gn=file://COMMON_OWNERS per-file DEPS=file://INFRA_OWNERS +# For Test262 rolls. +per-file DEPS=mathias@chromium.org per-file PRESUBMIT=file://INFRA_OWNERS per-file codereview.settings=file://INFRA_OWNERS diff --git a/deps/v8/test/wasm-api-tests/OWNERS b/deps/v8/benchmarks/OWNERS similarity index 100% rename from deps/v8/test/wasm-api-tests/OWNERS rename to deps/v8/benchmarks/OWNERS diff --git a/deps/v8/gni/proto_library.gni b/deps/v8/gni/proto_library.gni index cf581ed46e4dfe..b16d8f93bd8fcd 100644 --- a/deps/v8/gni/proto_library.gni +++ b/deps/v8/gni/proto_library.gni @@ -187,7 +187,10 @@ template("proto_library") { "visibility", ]) + # Exclude the config.descriptor file which is an output for some reason. + set_sources_assignment_filter([ "*.descriptor" ]) sources = get_target_outputs(":$action_name") + set_sources_assignment_filter(sources_assignment_filter) # configs -= [ "//gn/standalone:extra_warnings" ] if (defined(invoker.extra_configs)) { diff --git a/deps/v8/gni/v8.gni b/deps/v8/gni/v8.gni index 506b8428ee3217..e55c4cf3468460 100644 --- a/deps/v8/gni/v8.gni +++ b/deps/v8/gni/v8.gni @@ -92,7 +92,7 @@ if (v8_enable_backtrace == "") { # subdirectories. v8_path_prefix = get_path_info("../", "abspath") -v8_inspector_js_protocol = v8_path_prefix + "/src/inspector/js_protocol.pdl" +v8_inspector_js_protocol = v8_path_prefix + "/include/js_protocol.pdl" ############################################################################### # Templates diff --git a/deps/v8/include/APIDesign.md b/deps/v8/include/APIDesign.md index 8830fff7d1897a..fe42c8ed5da36e 100644 --- a/deps/v8/include/APIDesign.md +++ b/deps/v8/include/APIDesign.md @@ -67,3 +67,6 @@ which in turn guarantees long-term stability of the API. # The V8 inspector All debugging capabilities of V8 should be exposed via the inspector protocol. +The exception to this are profiling features exposed via v8-profiler.h. +Changes to the inspector protocol need to ensure backwards compatibility and +commitment to maintain. diff --git a/deps/v8/include/OWNERS b/deps/v8/include/OWNERS index 7953cfe133a2c0..7ffbf74ce94d90 100644 --- a/deps/v8/include/OWNERS +++ b/deps/v8/include/OWNERS @@ -1,16 +1,17 @@ -set noparent - adamk@chromium.org danno@chromium.org ulan@chromium.org yangguo@chromium.org -per-file v8-internal.h=file://OWNERS +per-file *DEPS=file://COMMON_OWNERS +per-file v8-internal.h=file://COMMON_OWNERS per-file v8-inspector.h=dgozman@chromium.org per-file v8-inspector.h=pfeldman@chromium.org per-file v8-inspector.h=kozyatinskiy@chromium.org per-file v8-inspector-protocol.h=dgozman@chromium.org per-file v8-inspector-protocol.h=pfeldman@chromium.org per-file v8-inspector-protocol.h=kozyatinskiy@chromium.org +per-file js_protocol.pdl=dgozman@chromium.org +per-file js_protocol.pdl=pfeldman@chromium.org # COMPONENT: Blink>JavaScript>API diff --git a/deps/v8/src/inspector/js_protocol-1.2.json b/deps/v8/include/js_protocol-1.2.json similarity index 100% rename from deps/v8/src/inspector/js_protocol-1.2.json rename to deps/v8/include/js_protocol-1.2.json diff --git a/deps/v8/src/inspector/js_protocol-1.3.json b/deps/v8/include/js_protocol-1.3.json similarity index 100% rename from deps/v8/src/inspector/js_protocol-1.3.json rename to deps/v8/include/js_protocol-1.3.json diff --git a/deps/v8/src/inspector/js_protocol.pdl b/deps/v8/include/js_protocol.pdl similarity index 100% rename from deps/v8/src/inspector/js_protocol.pdl rename to deps/v8/include/js_protocol.pdl diff --git a/deps/v8/include/libplatform/v8-tracing.h b/deps/v8/include/libplatform/v8-tracing.h index ccdca0a8c5cfc6..e7cd8bfcdb66d0 100644 --- a/deps/v8/include/libplatform/v8-tracing.h +++ b/deps/v8/include/libplatform/v8-tracing.h @@ -14,6 +14,10 @@ #include "libplatform/libplatform-export.h" #include "v8-platform.h" // NOLINT(build/include) +namespace perfetto { +class TracingSession; +} + namespace v8 { namespace base { @@ -23,8 +27,8 @@ class Mutex; namespace platform { namespace tracing { -class PerfettoTracingController; class TraceEventListener; +class JSONTraceEventListener; const int kTraceMaxNumArgs = 2; @@ -292,11 +296,10 @@ class V8_PLATFORM_EXPORT TracingController std::unordered_set observers_; std::atomic_bool recording_{false}; #ifdef V8_USE_PERFETTO - std::atomic_bool perfetto_recording_{false}; - std::unique_ptr perfetto_tracing_controller_; std::ostream* output_stream_ = nullptr; - std::unique_ptr json_listener_; + std::unique_ptr json_listener_; TraceEventListener* listener_for_testing_ = nullptr; + std::unique_ptr tracing_session_; #endif // Disallow copy and assign diff --git a/deps/v8/include/v8-inspector.h b/deps/v8/include/v8-inspector.h index b96a6e29ac0cb6..cfa2aaba96d12e 100644 --- a/deps/v8/include/v8-inspector.h +++ b/deps/v8/include/v8-inspector.h @@ -109,6 +109,8 @@ class V8_EXPORT V8StackTrace { virtual ~V8StackTrace() = default; virtual std::unique_ptr buildInspectorObject() const = 0; + virtual std::unique_ptr + buildInspectorObject(int maxAsyncDepth) const = 0; virtual std::unique_ptr toString() const = 0; // Safe to pass between threads, drops async chain. @@ -130,10 +132,6 @@ class V8_EXPORT V8InspectorSession { // Dispatching protocol messages. static bool canDispatchMethod(const StringView& method); virtual void dispatchProtocolMessage(const StringView& message) = 0; - virtual V8_DEPRECATED("Use state() instead", - std::unique_ptr stateJSON()) { - return nullptr; - } virtual std::vector state() = 0; virtual std::vector> supportedDomains() = 0; diff --git a/deps/v8/include/v8-internal.h b/deps/v8/include/v8-internal.h index fe2ce67e0df04e..ef13006d137929 100644 --- a/deps/v8/include/v8-internal.h +++ b/deps/v8/include/v8-internal.h @@ -174,8 +174,6 @@ class Internals { static const int kNodeStateMask = 0x7; static const int kNodeStateIsWeakValue = 2; static const int kNodeStateIsPendingValue = 3; - static const int kNodeIsIndependentShift = 3; - static const int kNodeIsActiveShift = 4; static const int kFirstNonstringType = 0x40; static const int kOddballType = 0x43; diff --git a/deps/v8/include/v8-platform.h b/deps/v8/include/v8-platform.h index b707fafc49229a..7e43b0d9db4a9d 100644 --- a/deps/v8/include/v8-platform.h +++ b/deps/v8/include/v8-platform.h @@ -439,6 +439,14 @@ class Platform { */ virtual void DumpWithoutCrashing() {} + /** + * Lets the embedder to add crash keys. + */ + virtual void AddCrashKey(int id, const char* name, uintptr_t value) { + // "noop" is a valid implementation if the embedder doesn't care to log + // additional data for crashes. + } + protected: /** * Default implementation of current wall-clock time in milliseconds diff --git a/deps/v8/include/v8-profiler.h b/deps/v8/include/v8-profiler.h index 645920d9c1b357..46d3eb8aa4a24e 100644 --- a/deps/v8/include/v8-profiler.h +++ b/deps/v8/include/v8-profiler.h @@ -356,9 +356,8 @@ class V8_EXPORT CpuProfiler { * initialized. The profiler object must be disposed after use by calling * |Dispose| method. */ - static CpuProfiler* New(Isolate* isolate); static CpuProfiler* New(Isolate* isolate, - CpuProfilingNamingMode mode); + CpuProfilingNamingMode = kDebugNaming); /** * Synchronously collect current stack sample in all profilers attached to @@ -407,10 +406,8 @@ class V8_EXPORT CpuProfiler { * discarded. */ void StartProfiling( - Local title, CpuProfilingMode mode, bool record_samples = false); - void StartProfiling( - Local title, CpuProfilingMode mode, bool record_samples, - unsigned max_samples); + Local title, CpuProfilingMode mode, bool record_samples = false, + unsigned max_samples = CpuProfilingOptions::kNoSampleLimit); /** * The same as StartProfiling above, but the CpuProfilingMode defaults to * kLeafNodeLineNumbers mode, which was the previous default behavior of the diff --git a/deps/v8/include/v8-util.h b/deps/v8/include/v8-util.h index 24962607076f78..29d813e4274d16 100644 --- a/deps/v8/include/v8-util.h +++ b/deps/v8/include/v8-util.h @@ -194,14 +194,6 @@ class PersistentValueMapBase { return SetReturnValueFromVal(&returnValue, Traits::Get(&impl_, key)); } - /** - * Call V8::RegisterExternallyReferencedObject with the map value for given - * key. - */ - V8_DEPRECATED( - "Used TracedGlobal and EmbedderHeapTracer::RegisterEmbedderReference", - inline void RegisterExternallyReferencedObject(K& key)); - /** * Return value for key and remove it from the map. */ @@ -352,16 +344,6 @@ class PersistentValueMapBase { const char* label_; }; -template -inline void -PersistentValueMapBase::RegisterExternallyReferencedObject( - K& key) { - assert(Contains(key)); - V8::RegisterExternallyReferencedObject( - reinterpret_cast(FromVal(Traits::Get(&impl_, key))), - reinterpret_cast(GetIsolate())); -} - template class PersistentValueMap : public PersistentValueMapBase { public: diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h index e9c5c339f280d8..91d7633b0551e2 100644 --- a/deps/v8/include/v8-version.h +++ b/deps/v8/include/v8-version.h @@ -9,9 +9,9 @@ // NOTE these macros are used by some of the tool scripts and the build // system so their names cannot be changed without changing the scripts. #define V8_MAJOR_VERSION 7 -#define V8_MINOR_VERSION 6 -#define V8_BUILD_NUMBER 303 -#define V8_PATCH_LEVEL 29 +#define V8_MINOR_VERSION 7 +#define V8_BUILD_NUMBER 299 +#define V8_PATCH_LEVEL 4 // Use 1 for candidates and 0 otherwise. // (Boolean macro values are not supported by all preprocessors.) diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h index 3b73ae6413a44d..ca96c32088df39 100644 --- a/deps/v8/include/v8.h +++ b/deps/v8/include/v8.h @@ -122,7 +122,6 @@ class ExternalString; class Isolate; class LocalEmbedderHeapTracer; class MicrotaskQueue; -class NeverReadOnlySpaceObject; struct ScriptStreamingData; template class CustomArguments; class PropertyCallbackArguments; @@ -545,38 +544,6 @@ template class PersistentBase { */ V8_INLINE void AnnotateStrongRetainer(const char* label); - /** - * Allows the embedder to tell the v8 garbage collector that a certain object - * is alive. Only allowed when the embedder is asked to trace its heap by - * EmbedderHeapTracer. - */ - V8_DEPRECATED( - "Used TracedGlobal and EmbedderHeapTracer::RegisterEmbedderReference", - V8_INLINE void RegisterExternalReference(Isolate* isolate) const); - - /** - * Marks the reference to this object independent. Garbage collector is free - * to ignore any object groups containing this object. Weak callback for an - * independent handle should not assume that it will be preceded by a global - * GC prologue callback or followed by a global GC epilogue callback. - */ - V8_DEPRECATED( - "Weak objects are always considered independent. " - "Use TracedGlobal when trying to use EmbedderHeapTracer. " - "Use a strong handle when trying to keep an object alive.", - V8_INLINE void MarkIndependent()); - - /** - * Marks the reference to this object as active. The scavenge garbage - * collection should not reclaim the objects marked as active, even if the - * object held by the handle is otherwise unreachable. - * - * This bit is cleared after the each garbage collection pass. - */ - V8_DEPRECATED("Use TracedGlobal.", V8_INLINE void MarkActive()); - - V8_DEPRECATED("See MarkIndependent.", V8_INLINE bool IsIndependent() const); - /** Returns true if the handle's reference is weak. */ V8_INLINE bool IsWeak() const; @@ -1359,6 +1326,37 @@ class V8_EXPORT Module { * kEvaluated or kErrored. */ Local GetUnboundModuleScript(); + + /* + * Callback defined in the embedder. This is responsible for setting + * the module's exported values with calls to SetSyntheticModuleExport(). + * The callback must return a Value to indicate success (where no + * exception was thrown) and return an empy MaybeLocal to indicate falure + * (where an exception was thrown). + */ + typedef MaybeLocal (*SyntheticModuleEvaluationSteps)( + Local context, Local module); + + /** + * Creates a new SyntheticModule with the specified export names, where + * evaluation_steps will be executed upon module evaluation. + * export_names must not contain duplicates. + * module_name is used solely for logging/debugging and doesn't affect module + * behavior. + */ + static Local CreateSyntheticModule( + Isolate* isolate, Local module_name, + const std::vector>& export_names, + SyntheticModuleEvaluationSteps evaluation_steps); + + /** + * Set this module's exported value for the name export_name to the specified + * export_value. This method must be called only on Modules created via + * CreateSyntheticModule. export_name must be one of the export_names that + * were passed in that CreateSyntheticModule call. + */ + void SetSyntheticModuleExport(Local export_name, + Local export_value); }; /** @@ -1701,14 +1699,8 @@ class V8_EXPORT ScriptCompiler { Local arguments[], size_t context_extension_count, Local context_extensions[], CompileOptions options = kNoCompileOptions, - NoCacheReason no_cache_reason = kNoCacheNoReason); - - static V8_WARN_UNUSED_RESULT MaybeLocal CompileFunctionInContext( - Local context, Source* source, size_t arguments_count, - Local arguments[], size_t context_extension_count, - Local context_extensions[], CompileOptions options, - NoCacheReason no_cache_reason, - Local* script_or_module_out); + NoCacheReason no_cache_reason = kNoCacheNoReason, + Local* script_or_module_out = nullptr); /** * Creates and returns code cache for the specified unbound_script. @@ -2533,9 +2525,6 @@ class V8_EXPORT Value : public Data { V8_WARN_UNUSED_RESULT MaybeLocal ToBigInt( Local context) const; - V8_DEPRECATED("ToBoolean can never throw. Use Local version.", - V8_WARN_UNUSED_RESULT MaybeLocal ToBoolean( - Local context) const); V8_WARN_UNUSED_RESULT MaybeLocal ToNumber( Local context) const; V8_WARN_UNUSED_RESULT MaybeLocal ToString( @@ -2551,16 +2540,6 @@ class V8_EXPORT Value : public Data { V8_WARN_UNUSED_RESULT MaybeLocal ToInt32(Local context) const; Local ToBoolean(Isolate* isolate) const; - V8_DEPRECATED("Use maybe version", - Local ToNumber(Isolate* isolate) const); - V8_DEPRECATED("Use maybe version", - Local ToString(Isolate* isolate) const); - V8_DEPRECATED("Use maybe version", - Local ToObject(Isolate* isolate) const); - V8_DEPRECATED("Use maybe version", - Local ToInteger(Isolate* isolate) const); - V8_DEPRECATED("Use maybe version", - Local ToInt32(Isolate* isolate) const); /** * Attempts to convert a string to an array index. @@ -2571,9 +2550,6 @@ class V8_EXPORT Value : public Data { bool BooleanValue(Isolate* isolate) const; - V8_DEPRECATED("BooleanValue can never throw. Use Isolate version.", - V8_WARN_UNUSED_RESULT Maybe BooleanValue( - Local context) const); V8_WARN_UNUSED_RESULT Maybe NumberValue(Local context) const; V8_WARN_UNUSED_RESULT Maybe IntegerValue( Local context) const; @@ -2893,43 +2869,23 @@ class V8_EXPORT String : public Name { V8_INLINE static String* Cast(v8::Value* obj); - // TODO(dcarney): remove with deprecation of New functions. - enum NewStringType { - kNormalString = static_cast(v8::NewStringType::kNormal), - kInternalizedString = static_cast(v8::NewStringType::kInternalized) - }; - - /** Allocates a new string from UTF-8 data.*/ - static V8_DEPRECATED( - "Use maybe version", - Local NewFromUtf8(Isolate* isolate, const char* data, - NewStringType type = kNormalString, - int length = -1)); - /** Allocates a new string from UTF-8 data. Only returns an empty value when * length > kMaxLength. **/ static V8_WARN_UNUSED_RESULT MaybeLocal NewFromUtf8( - Isolate* isolate, const char* data, v8::NewStringType type, - int length = -1); + Isolate* isolate, const char* data, + NewStringType type = NewStringType::kNormal, int length = -1); /** Allocates a new string from Latin-1 data. Only returns an empty value * when length > kMaxLength. **/ static V8_WARN_UNUSED_RESULT MaybeLocal NewFromOneByte( - Isolate* isolate, const uint8_t* data, v8::NewStringType type, - int length = -1); - - /** Allocates a new string from UTF-16 data.*/ - static V8_DEPRECATED( - "Use maybe version", - Local NewFromTwoByte(Isolate* isolate, const uint16_t* data, - NewStringType type = kNormalString, - int length = -1)); + Isolate* isolate, const uint8_t* data, + NewStringType type = NewStringType::kNormal, int length = -1); /** Allocates a new string from UTF-16 data. Only returns an empty value when * length > kMaxLength. **/ static V8_WARN_UNUSED_RESULT MaybeLocal NewFromTwoByte( - Isolate* isolate, const uint16_t* data, v8::NewStringType type, - int length = -1); + Isolate* isolate, const uint16_t* data, + NewStringType type = NewStringType::kNormal, int length = -1); /** * Creates a new string by concatenating the left and the right strings @@ -2968,10 +2924,6 @@ class V8_EXPORT String : public Name { * should the underlying buffer be deallocated or modified except through the * destructor of the external string resource. */ - static V8_DEPRECATED( - "Use maybe version", - Local NewExternal(Isolate* isolate, - ExternalOneByteStringResource* resource)); static V8_WARN_UNUSED_RESULT MaybeLocal NewExternalOneByte( Isolate* isolate, ExternalOneByteStringResource* resource); @@ -3368,8 +3320,6 @@ enum class IntegrityLevel { kFrozen, kSealed }; */ class V8_EXPORT Object : public Value { public: - V8_DEPRECATED("Use maybe version", - bool Set(Local key, Local value)); /** * Set only return Just(true) or Empty(), so if it should never fail, use * result.Check(). @@ -3377,8 +3327,6 @@ class V8_EXPORT Object : public Value { V8_WARN_UNUSED_RESULT Maybe Set(Local context, Local key, Local value); - V8_DEPRECATED("Use maybe version", - bool Set(uint32_t index, Local value)); V8_WARN_UNUSED_RESULT Maybe Set(Local context, uint32_t index, Local value); @@ -3420,13 +3368,12 @@ class V8_EXPORT Object : public Value { // // Returns true on success. V8_WARN_UNUSED_RESULT Maybe DefineProperty( - Local context, Local key, PropertyDescriptor& descriptor); + Local context, Local key, + PropertyDescriptor& descriptor); // NOLINT(runtime/references) - V8_DEPRECATED("Use maybe version", Local Get(Local key)); V8_WARN_UNUSED_RESULT MaybeLocal Get(Local context, Local key); - V8_DEPRECATED("Use maybe version", Local Get(uint32_t index)); V8_WARN_UNUSED_RESULT MaybeLocal Get(Local context, uint32_t index); @@ -3910,9 +3857,6 @@ class ReturnValue { } // Local setters template - V8_INLINE V8_DEPRECATED("Use Global<> instead", - void Set(const Persistent& handle)); - template V8_INLINE void Set(const Global& handle); template V8_INLINE void Set(const TracedGlobal& handle); @@ -5177,8 +5121,7 @@ class V8_EXPORT SharedArrayBuffer : public Object { allocation_length_(0), allocation_mode_(Allocator::AllocationMode::kNormal), deleter_(nullptr), - deleter_data_(nullptr), - is_growable_(false) {} + deleter_data_(nullptr) {} void* AllocationBase() const { return allocation_base_; } size_t AllocationLength() const { return allocation_length_; } @@ -5190,13 +5133,12 @@ class V8_EXPORT SharedArrayBuffer : public Object { size_t ByteLength() const { return byte_length_; } DeleterCallback Deleter() const { return deleter_; } void* DeleterData() const { return deleter_data_; } - bool IsGrowable() const { return is_growable_; } private: Contents(void* data, size_t byte_length, void* allocation_base, size_t allocation_length, Allocator::AllocationMode allocation_mode, DeleterCallback deleter, - void* deleter_data, bool is_growable); + void* deleter_data); void* data_; size_t byte_length_; @@ -5205,7 +5147,6 @@ class V8_EXPORT SharedArrayBuffer : public Object { Allocator::AllocationMode allocation_mode_; DeleterCallback deleter_; void* deleter_data_; - bool is_growable_; friend class SharedArrayBuffer; }; @@ -5303,38 +5244,6 @@ class V8_EXPORT Date : public Object { V8_INLINE static Date* Cast(Value* obj); - /** - * Time zone redetection indicator for - * DateTimeConfigurationChangeNotification. - * - * kSkip indicates V8 that the notification should not trigger redetecting - * host time zone. kRedetect indicates V8 that host time zone should be - * redetected, and used to set the default time zone. - * - * The host time zone detection may require file system access or similar - * operations unlikely to be available inside a sandbox. If v8 is run inside a - * sandbox, the host time zone has to be detected outside the sandbox before - * calling DateTimeConfigurationChangeNotification function. - */ - enum class TimeZoneDetection { kSkip, kRedetect }; - - /** - * Notification that the embedder has changed the time zone, - * daylight savings time, or other date / time configuration - * parameters. V8 keeps a cache of various values used for - * date / time computation. This notification will reset - * those cached values for the current context so that date / - * time configuration changes would be reflected in the Date - * object. - * - * This API should not be called more than needed as it will - * negatively impact the performance of date operations. - */ - V8_DEPRECATED("Use Isolate::DateTimeConfigurationChangeNotification", - static void DateTimeConfigurationChangeNotification( - Isolate* isolate, TimeZoneDetection time_zone_detection = - TimeZoneDetection::kSkip)); - private: static void CheckCast(Value* obj); }; @@ -5437,6 +5346,8 @@ class V8_EXPORT RegExp : public Object { kDotAll = 1 << 5, }; + static constexpr int kFlagCount = 6; + /** * Creates a regular expression from the given pattern string and * the flags bit field. May throw a JavaScript exception as @@ -6020,21 +5931,6 @@ class V8_EXPORT FunctionTemplate : public Template { */ void SetAcceptAnyReceiver(bool value); - /** - * Determines whether the __proto__ accessor ignores instances of - * the function template. If instances of the function template are - * ignored, __proto__ skips all instances and instead returns the - * next object in the prototype chain. - * - * Call with a value of true to make the __proto__ accessor ignore - * instances of the function template. Call with a value of false - * to make the __proto__ accessor not ignore instances of the - * function template. By default, instances of a function template - * are not ignored. - */ - V8_DEPRECATED("This feature is incompatible with ES6+.", - void SetHiddenPrototype(bool value)); - /** * Sets the ReadOnly flag in the attributes of the 'prototype' property * of functions created from this FunctionTemplate to true. @@ -6538,7 +6434,19 @@ V8_INLINE Local False(Isolate* isolate); */ class V8_EXPORT ResourceConstraints { public: - ResourceConstraints(); + /** + * Configures the constraints with reasonable default values based on the + * provided heap size limit. The heap size includes both the young and + * the old generation. + * + * \param maximum_heap_size_in_bytes The hard limit for the heap size. + * When the heap size approaches this limit, V8 will perform series of + * garbage collections and invoke the NearHeapLimitCallback. + * If the garbage collections do not help and the callback does not + * increase the limit, then V8 will crash with V8::FatalProcessOutOfMemory. + */ + void ConfigureDefaultsFromHeapSize(size_t initial_heap_size_in_bytes, + size_t maximum_heap_size_in_bytes); /** * Configures the constraints with reasonable default values based on the @@ -6552,26 +6460,81 @@ class V8_EXPORT ResourceConstraints { void ConfigureDefaults(uint64_t physical_memory, uint64_t virtual_memory_limit); - // Returns the max semi-space size in KB. - size_t max_semi_space_size_in_kb() const { - return max_semi_space_size_in_kb_; + /** + * The address beyond which the VM's stack may not grow. + */ + uint32_t* stack_limit() const { return stack_limit_; } + void set_stack_limit(uint32_t* value) { stack_limit_ = value; } + + /** + * The amount of virtual memory reserved for generated code. This is relevant + * for 64-bit architectures that rely on code range for calls in code. + */ + size_t code_range_size_in_bytes() const { return code_range_size_; } + void set_code_range_size_in_bytes(size_t limit) { code_range_size_ = limit; } + + /** + * The maximum size of the old generation. + * When the old generation approaches this limit, V8 will perform series of + * garbage collections and invoke the NearHeapLimitCallback. + * If the garbage collections do not help and the callback does not + * increase the limit, then V8 will crash with V8::FatalProcessOutOfMemory. + */ + size_t max_old_generation_size_in_bytes() const { + return max_old_generation_size_; + } + void set_max_old_generation_size_in_bytes(size_t limit) { + max_old_generation_size_ = limit; } - // Sets the max semi-space size in KB. - void set_max_semi_space_size_in_kb(size_t limit_in_kb) { - max_semi_space_size_in_kb_ = limit_in_kb; + /** + * The maximum size of the young generation, which consists of two semi-spaces + * and a large object space. This affects frequency of Scavenge garbage + * collections and should be typically much smaller that the old generation. + */ + size_t max_young_generation_size_in_bytes() const { + return max_young_generation_size_; + } + void set_max_young_generation_size_in_bytes(size_t limit) { + max_young_generation_size_ = limit; } - size_t max_old_space_size() const { return max_old_space_size_; } - void set_max_old_space_size(size_t limit_in_mb) { - max_old_space_size_ = limit_in_mb; + size_t initial_old_generation_size_in_bytes() const { + return initial_old_generation_size_; } - uint32_t* stack_limit() const { return stack_limit_; } - // Sets an address beyond which the VM's stack may not grow. - void set_stack_limit(uint32_t* value) { stack_limit_ = value; } - size_t code_range_size() const { return code_range_size_; } - void set_code_range_size(size_t limit_in_mb) { - code_range_size_ = limit_in_mb; + void set_initial_old_generation_size_in_bytes(size_t initial_size) { + initial_old_generation_size_ = initial_size; + } + + size_t initial_young_generation_size_in_bytes() const { + return initial_young_generation_size_; + } + void set_initial_young_generation_size_in_bytes(size_t initial_size) { + initial_young_generation_size_ = initial_size; + } + + /** + * Deprecated functions. Do not use in new code. + */ + V8_DEPRECATE_SOON("Use code_range_size_in_bytes.", + size_t code_range_size() const) { + return code_range_size_ / kMB; + } + V8_DEPRECATE_SOON("Use set_code_range_size_in_bytes.", + void set_code_range_size(size_t limit_in_mb)) { + code_range_size_ = limit_in_mb * kMB; + } + V8_DEPRECATE_SOON("Use max_young_generation_size_in_bytes.", + size_t max_semi_space_size_in_kb() const); + V8_DEPRECATE_SOON("Use set_max_young_generation_size_in_bytes.", + void set_max_semi_space_size_in_kb(size_t limit_in_kb)); + V8_DEPRECATE_SOON("Use max_old_generation_size_in_bytes.", + size_t max_old_space_size() const) { + return max_old_generation_size_ / kMB; + } + V8_DEPRECATE_SOON("Use set_max_old_generation_size_in_bytes.", + void set_max_old_space_size(size_t limit_in_mb)) { + max_old_generation_size_ = limit_in_mb * kMB; } V8_DEPRECATE_SOON("Zone does not pool memory any more.", size_t max_zone_pool_size() const) { @@ -6583,14 +6546,14 @@ class V8_EXPORT ResourceConstraints { } private: - // max_semi_space_size_ is in KB - size_t max_semi_space_size_in_kb_; - - // The remaining limits are in MB - size_t max_old_space_size_; - uint32_t* stack_limit_; - size_t code_range_size_; - size_t max_zone_pool_size_; + static constexpr size_t kMB = 1048576u; + size_t code_range_size_ = 0; + size_t max_old_generation_size_ = 0; + size_t max_young_generation_size_ = 0; + size_t max_zone_pool_size_ = 0; + size_t initial_old_generation_size_ = 0; + size_t initial_young_generation_size_ = 0; + uint32_t* stack_limit_ = nullptr; }; @@ -6749,7 +6712,8 @@ class PromiseRejectMessage { typedef void (*PromiseRejectCallback)(PromiseRejectMessage message); // --- Microtasks Callbacks --- -typedef void (*MicrotasksCompletedCallback)(Isolate*); +V8_DEPRECATE_SOON("Use *WithData version.", + typedef void (*MicrotasksCompletedCallback)(Isolate*)); typedef void (*MicrotasksCompletedCallbackWithData)(Isolate*, void*); typedef void (*MicrotaskCallback)(void* data); @@ -6783,7 +6747,8 @@ class V8_EXPORT MicrotaskQueue { /** * Creates an empty MicrotaskQueue instance. */ - static std::unique_ptr New(Isolate* isolate); + static std::unique_ptr New( + Isolate* isolate, MicrotasksPolicy policy = MicrotasksPolicy::kAuto); virtual ~MicrotaskQueue() = default; @@ -6831,6 +6796,15 @@ class V8_EXPORT MicrotaskQueue { */ virtual bool IsRunningMicrotasks() const = 0; + /** + * Returns the current depth of nested MicrotasksScope that has + * kRunMicrotasks. + */ + virtual int GetMicrotasksScopeDepth() const = 0; + + MicrotaskQueue(const MicrotaskQueue&) = delete; + MicrotaskQueue& operator=(const MicrotaskQueue&) = delete; + private: friend class internal::MicrotaskQueue; MicrotaskQueue() = default; @@ -6892,6 +6866,8 @@ typedef void (*FailedAccessCheckCallback)(Local target, */ typedef bool (*AllowCodeGenerationFromStringsCallback)(Local context, Local source); +typedef MaybeLocal (*ModifyCodeGenerationFromStringsCallback)( + Local context, Local source); // --- WebAssembly compilation callbacks --- typedef bool (*ExtensionCallback)(const FunctionCallbackInfo&); @@ -7352,12 +7328,13 @@ class V8_EXPORT EmbedderHeapTracer { void GarbageCollectionForTesting(EmbedderStackState stack_state); /* - * Called by the embedder to signal newly allocated memory. Not bound to - * tracing phases. Embedders should trade off when increments are reported as - * V8 may consult global heuristics on whether to trigger garbage collection - * on this change. + * Called by the embedder to signal newly allocated or freed memory. Not bound + * to tracing phases. Embedders should trade off when increments are reported + * as V8 may consult global heuristics on whether to trigger garbage + * collection on this change. */ void IncreaseAllocatedSize(size_t bytes); + void DecreaseAllocatedSize(size_t bytes); /* * Returns the v8::Isolate this tracer is attached too and |nullptr| if it @@ -7685,6 +7662,8 @@ class V8_EXPORT Isolate { kRegExpMatchIsFalseishOnJSRegExp = 73, kDateGetTimezoneOffset = 74, kStringNormalize = 75, + kCallSiteAPIGetFunctionSloppyCall = 76, + kCallSiteAPIGetThisSloppyCall = 77, // If you add new values here, you'll also need to update Chromium's: // web_feature.mojom, UseCounterCallback.cpp, and enums.xml. V8 changes to @@ -8489,6 +8468,8 @@ class V8_EXPORT Isolate { */ void SetAllowCodeGenerationFromStringsCallback( AllowCodeGenerationFromStringsCallback callback); + void SetModifyCodeGenerationFromStringsCallback( + ModifyCodeGenerationFromStringsCallback callback); /** * Set the callback to invoke to check if wasm code generation should @@ -8717,7 +8698,9 @@ class V8_EXPORT V8 { * Sets V8 flags from a string. */ static void SetFlagsFromString(const char* str); - static void SetFlagsFromString(const char* str, int length); + static void SetFlagsFromString(const char* str, size_t length); + V8_DEPRECATED("use size_t version", + static void SetFlagsFromString(const char* str, int length)); /** * Sets V8 flags from the command line. @@ -8888,9 +8871,6 @@ class V8_EXPORT V8 { const char* label); static Value* Eternalize(Isolate* isolate, Value* handle); - static void RegisterExternallyReferencedObject(internal::Address* location, - internal::Isolate* isolate); - template friend class PersistentValueMapBase; @@ -9524,6 +9504,15 @@ class V8_EXPORT Context { template V8_INLINE MaybeLocal GetDataFromSnapshotOnce(size_t index); + /** + * If callback is set, abort any attempt to execute JavaScript in this + * context, call the specified callback, and throw an exception. + * To unset abort, pass nullptr as callback. + */ + typedef void (*AbortScriptExecutionCallback)(Isolate* isolate, + Local context); + void SetAbortScriptExecution(AbortScriptExecutionCallback callback); + /** * Stack-allocated class which sets the execution context for all * operations executed within a local scope. @@ -9837,14 +9826,6 @@ void Persistent::Copy(const Persistent& that) { M::Copy(that, this); } -template -bool PersistentBase::IsIndependent() const { - typedef internal::Internals I; - if (this->IsEmpty()) return false; - return I::GetNodeFlag(reinterpret_cast(this->val_), - I::kNodeIsIndependentShift); -} - template bool PersistentBase::IsWeak() const { typedef internal::Internals I; @@ -9911,31 +9892,6 @@ void PersistentBase::AnnotateStrongRetainer(const char* label) { label); } -template -void PersistentBase::RegisterExternalReference(Isolate* isolate) const { - if (IsEmpty()) return; - V8::RegisterExternallyReferencedObject( - reinterpret_cast(this->val_), - reinterpret_cast(isolate)); -} - -template -void PersistentBase::MarkIndependent() { - typedef internal::Internals I; - if (this->IsEmpty()) return; - I::UpdateNodeFlag(reinterpret_cast(this->val_), true, - I::kNodeIsIndependentShift); -} - -template -void PersistentBase::MarkActive() { - typedef internal::Internals I; - if (this->IsEmpty()) return; - I::UpdateNodeFlag(reinterpret_cast(this->val_), true, - I::kNodeIsActiveShift); -} - - template void PersistentBase::SetWrapperClassId(uint16_t class_id) { typedef internal::Internals I; @@ -10061,17 +10017,6 @@ void TracedGlobal::SetFinalizationCallback( template ReturnValue::ReturnValue(internal::Address* slot) : value_(slot) {} -template -template -void ReturnValue::Set(const Persistent& handle) { - TYPE_CHECK(T, S); - if (V8_UNLIKELY(handle.IsEmpty())) { - *value_ = GetDefaultValue(); - } else { - *value_ = *reinterpret_cast(*handle); - } -} - template template void ReturnValue::Set(const Global& handle) { diff --git a/deps/v8/include/v8config.h b/deps/v8/include/v8config.h index 5ec0480cf5c0c2..7bd2938225bc74 100644 --- a/deps/v8/include/v8config.h +++ b/deps/v8/include/v8config.h @@ -353,6 +353,12 @@ #define V8_WARN_UNUSED_RESULT /* NOT SUPPORTED */ #endif +#if defined(BUILDING_V8_SHARED) && defined(USING_V8_SHARED) +#error Inconsistent build configuration: To build the V8 shared library \ +set BUILDING_V8_SHARED, to include its headers for linking against the \ +V8 shared library set USING_V8_SHARED. +#endif + #ifdef V8_OS_WIN // Setup for Windows DLL export/import. When building the V8 DLL the diff --git a/deps/v8/infra/mb/gn_isolate_map.pyl b/deps/v8/infra/mb/gn_isolate_map.pyl index 05b147d503f000..110b36c500bbd7 100644 --- a/deps/v8/infra/mb/gn_isolate_map.pyl +++ b/deps/v8/infra/mb/gn_isolate_map.pyl @@ -31,6 +31,10 @@ "label": "//test:v8_d8_default", "type": "script", }, + "generate-bytecode-expectations": { + "label": "//test/cctest:generate-bytecode-expectations", + "type": "script", + }, "mjsunit": { "label": "//test/mjsunit:v8_mjsunit", "type": "script", diff --git a/deps/v8/infra/mb/mb_config.pyl b/deps/v8/infra/mb/mb_config.pyl index 354415ef438ccf..d5d192fb20ca94 100644 --- a/deps/v8/infra/mb/mb_config.pyl +++ b/deps/v8/infra/mb/mb_config.pyl @@ -95,6 +95,8 @@ 'V8 iOS - sim': 'release_x64_ios_simulator', 'V8 Linux64 - debug - perfetto - builder': 'debug_x64_perfetto', 'V8 Linux64 - pointer compression': 'release_x64_pointer_compression', + 'V8 Linux64 - pointer compression without dchecks': + 'release_x64_pointer_compression_without_dchecks', 'V8 Linux64 - arm64 - sim - pointer compression - builder': 'release_simulate_arm64_pointer_compression', 'V8 Linux - noembed': 'release_x86_noembed', @@ -201,6 +203,7 @@ 'v8_linux_verify_csa_rel_ng': 'release_x86_verify_csa', 'v8_linux_nodcheck_rel_ng': 'release_x86_minimal_symbols', 'v8_linux_dbg_ng': 'debug_x86_trybot', + 'v8_linux_noi18n_compile_dbg': 'debug_x86_no_i18n', 'v8_linux_noi18n_rel_ng': 'release_x86_no_i18n_trybot', 'v8_linux_gc_stress_dbg': 'debug_x86_trybot', 'v8_linux_nosnap_rel': 'release_x86_no_snap_trybot', @@ -458,6 +461,8 @@ 'release_x64_pointer_compression': [ 'release_bot', 'x64', 'dcheck_always_on', 'v8_enable_slow_dchecks', 'v8_enable_pointer_compression'], + 'release_x64_pointer_compression_without_dchecks': [ + 'release_bot', 'x64', 'v8_enable_pointer_compression'], 'release_x64_trybot': [ 'release_trybot', 'x64'], 'release_x64_test_features_trybot': [ @@ -491,7 +496,7 @@ 'debug_x64_fuchsia': [ 'debug_bot', 'x64', 'fuchsia'], 'debug_x64_gcc': [ - 'debug_bot', 'x64', 'gcc'], + 'debug_bot', 'x64', 'gcc', 'v8_check_header_includes'], 'debug_x64_header_includes': [ 'debug_bot', 'x64', 'v8_check_header_includes'], 'debug_x64_jumbo': [ @@ -535,9 +540,10 @@ 'release_x86_noembed_trybot': [ 'release_trybot', 'x86', 'v8_no_enable_embedded_builtins'], 'release_x86_gcc': [ - 'release_bot', 'x86', 'gcc'], + 'release_bot', 'x86', 'gcc', 'v8_check_header_includes'], 'release_x86_gcc_minimal_symbols': [ - 'release_bot', 'x86', 'gcc', 'minimal_symbols'], + 'release_bot', 'x86', 'gcc', 'minimal_symbols', + 'v8_check_header_includes'], 'release_x86_gcmole': [ 'release_bot', 'x86', 'gcmole'], 'release_x86_gcmole_trybot': [ diff --git a/deps/v8/infra/testing/PRESUBMIT.py b/deps/v8/infra/testing/PRESUBMIT.py index b8e059724e0805..f1a64707b9cb1c 100644 --- a/deps/v8/infra/testing/PRESUBMIT.py +++ b/deps/v8/infra/testing/PRESUBMIT.py @@ -33,7 +33,9 @@ ] # This is not an exhaustive list. It only reflects what we currently use. If -# there's need to specify a different property, just add it here. +# there's need to specify a different property, add it here and update the +# properties passed to swarming in: +# //build/scripts/slave/recipe_modules/v8/testing.py. SUPPORTED_SWARMING_TASK_ATTRS = [ 'expiration', 'hard_timeout', diff --git a/deps/v8/infra/testing/builders.pyl b/deps/v8/infra/testing/builders.pyl index 0d39ea31f75512..13a73f3e94cd90 100644 --- a/deps/v8/infra/testing/builders.pyl +++ b/deps/v8/infra/testing/builders.pyl @@ -51,7 +51,7 @@ 'v8_linux_dbg_ng_triggered': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'benchmarks'}, @@ -67,7 +67,7 @@ }, 'v8_linux_gc_stress_dbg': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'mjsunit', 'variant': 'slow_path', 'test_args': ['--gc-stress'], 'shards': 2}, @@ -85,7 +85,7 @@ 'v8_linux_nodcheck_rel_ng_triggered': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'benchmarks'}, @@ -100,7 +100,7 @@ }, 'v8_linux_noembed_rel_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'v8testing', 'shards': 2}, @@ -108,7 +108,7 @@ }, 'v8_linux_noi18n_rel_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'mozilla', 'variant': 'default'}, @@ -118,7 +118,7 @@ }, 'v8_linux_nosnap_rel': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'v8testing', 'variant': 'default', 'shards': 6}, @@ -135,7 +135,7 @@ 'v8_linux_rel_ng_triggered': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'benchmarks'}, @@ -155,7 +155,7 @@ 'v8_linux_optional_rel_ng_triggered': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ # Code serializer. @@ -210,7 +210,7 @@ }, 'v8_linux_verify_csa_rel_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'v8testing', 'shards': 2}, @@ -220,7 +220,7 @@ # Linux32 with arm simulators 'v8_linux_arm_dbg': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'mjsunit_sp_frame_access'}, @@ -233,7 +233,7 @@ }, 'v8_linux_arm_lite_rel_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'v8testing', 'variant': 'default', 'shards': 4}, @@ -241,7 +241,7 @@ }, 'v8_linux_arm_rel_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'mjsunit_sp_frame_access', 'shards': 2}, @@ -256,7 +256,7 @@ # Linux64 'v8_linux64_asan_rel_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'test262_variants', 'shards': 7}, @@ -267,7 +267,7 @@ }, 'v8_linux64_cfi_rel_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'benchmarks'}, @@ -280,7 +280,7 @@ 'v8_linux64_dbg_ng_triggered': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'benchmarks'}, @@ -297,7 +297,7 @@ }, 'v8_linux64_gc_stress_custom_snapshot_dbg_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ { @@ -309,7 +309,7 @@ }, 'v8_linux64_fyi_rel_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ # Stress sampling. @@ -322,7 +322,7 @@ }, 'v8_linux64_msan_rel': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'test262', 'shards': 2}, @@ -332,23 +332,28 @@ 'v8_linux64_nodcheck_rel_ng_triggered': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'benchmarks'}, + {'name': 'benchmarks', 'variant': 'assert_types'}, {'name': 'benchmarks', 'variant': 'extra'}, + {'name': 'check-bytecode-baseline'}, {'name': 'mozilla'}, + {'name': 'mozilla', 'variant': 'assert_types'}, {'name': 'mozilla', 'variant': 'extra'}, {'name': 'perf_integration'}, {'name': 'test262_variants', 'shards': 2}, + {'name': 'test262_variants', 'variant': 'assert_types', 'shards': 2}, {'name': 'test262_variants', 'variant': 'extra', 'shards': 2}, {'name': 'v8testing', 'shards': 2}, + {'name': 'v8testing', 'variant': 'assert_types'}, {'name': 'v8testing', 'variant': 'extra'}, ], }, 'v8_linux64_perfetto_dbg_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'v8testing', 'shards': 3}, @@ -356,7 +361,7 @@ }, 'v8_linux64_pointer_compression_rel_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'v8testing', 'shards': 3}, @@ -365,7 +370,7 @@ 'v8_linux64_rel_ng_triggered': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ # TODO(machenbach): Add benchmarks. @@ -386,7 +391,7 @@ 'v8_linux64_rel_xg': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'v8initializers'}, @@ -395,7 +400,7 @@ }, 'v8_linux64_sanitizer_coverage_rel': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'v8testing', 'shards': 3}, @@ -403,7 +408,7 @@ }, 'v8_linux64_tsan_rel': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'benchmarks'}, @@ -416,7 +421,7 @@ }, 'v8_linux64_tsan_isolates_rel_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'v8testing', 'test_args': ['--isolates'], 'shards': 7}, @@ -424,7 +429,7 @@ }, 'v8_linux64_ubsan_rel_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'v8testing', 'shards': 2}, @@ -432,7 +437,7 @@ }, 'v8_linux64_verify_csa_rel_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'v8testing', 'shards': 2}, @@ -442,7 +447,7 @@ # Linux64 with arm64 simulators 'v8_linux_arm64_dbg': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'mjsunit_sp_frame_access'}, @@ -455,7 +460,7 @@ }, 'v8_linux_arm64_gc_stress_dbg': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'd8testing', 'test_args': ['--gc-stress'], 'shards': 10}, @@ -463,7 +468,7 @@ }, 'v8_linux_arm64_rel_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'mjsunit_sp_frame_access', 'shards': 2}, @@ -476,7 +481,7 @@ }, 'v8_linux64_arm64_pointer_compression_rel_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'swarming_task_attrs': { 'expiration': 14400, @@ -632,7 +637,7 @@ # Main. 'V8 Fuzzer': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'swarming_task_attrs': { 'expiration': 14400, @@ -646,7 +651,7 @@ 'V8 Linux': { 'swarming_dimensions': { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'benchmarks'}, @@ -688,7 +693,7 @@ }, 'V8 Linux - arm64 - sim - MSAN': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'test262', 'shards': 3}, @@ -698,7 +703,7 @@ 'V8 Linux - debug': { 'swarming_dimensions': { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'benchmarks'}, @@ -759,7 +764,7 @@ }, 'V8 Linux - noembed': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'v8testing'}, @@ -767,7 +772,7 @@ }, 'V8 Linux - noembed - debug': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'v8testing', 'shards': 3}, @@ -775,7 +780,7 @@ }, 'V8 Linux - full debug': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'swarming_task_attrs': { 'expiration': 14400, @@ -788,7 +793,7 @@ }, 'V8 Linux - gc stress': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ { @@ -806,7 +811,7 @@ }, 'V8 Linux - noi18n - debug': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'mozilla', 'variant': 'default'}, @@ -816,7 +821,7 @@ }, 'V8 Linux - nosnap': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'swarming_task_attrs': { 'expiration': 14400, @@ -831,7 +836,7 @@ }, 'V8 Linux - nosnap - debug': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'swarming_task_attrs': { 'expiration': 14400, @@ -844,7 +849,7 @@ }, 'V8 Linux - predictable': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'benchmarks'}, @@ -854,7 +859,7 @@ }, 'V8 Linux - shared': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'mozilla'}, @@ -864,7 +869,7 @@ }, 'V8 Linux - verify csa': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'v8testing'}, @@ -881,20 +886,25 @@ 'V8 Linux64': { 'swarming_dimensions': { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'benchmarks'}, + {'name': 'benchmarks', 'variant': 'assert_types'}, {'name': 'benchmarks', 'variant': 'extra'}, + {'name': 'check-bytecode-baseline'}, {'name': 'mjsunit_sp_frame_access'}, {'name': 'mozilla'}, + {'name': 'mozilla', 'variant': 'assert_types'}, {'name': 'mozilla', 'variant': 'extra'}, {'name': 'optimize_for_size'}, {'name': 'perf_integration'}, {'name': 'test262_variants', 'shards': 2}, + {'name': 'test262_variants', 'variant': 'assert_types'}, {'name': 'test262_variants', 'variant': 'extra'}, {'name': 'v8initializers'}, {'name': 'v8testing'}, + {'name': 'v8testing', 'variant': 'assert_types'}, {'name': 'v8testing', 'variant': 'extra'}, {'name': 'v8testing', 'variant': 'minor_mc', 'shards': 1}, # Noavx. @@ -917,7 +927,7 @@ }, 'V8 Linux64 - cfi': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'benchmarks'}, @@ -929,7 +939,7 @@ }, 'V8 Linux64 - custom snapshot - debug': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'mjsunit', 'test_args': ['--no-harness']}, @@ -938,7 +948,7 @@ 'V8 Linux64 - debug': { 'swarming_dimensions': { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'benchmarks'}, @@ -974,7 +984,7 @@ }, 'V8 Linux64 - debug - fyi': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ # Infra staging. @@ -986,7 +996,7 @@ }, 'V8 Linux64 - debug - perfetto': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'swarming_task_attrs': { 'expiration': 14400, @@ -999,7 +1009,7 @@ }, 'V8 Linux64 - fyi': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ # Infra staging. @@ -1011,7 +1021,7 @@ }, 'V8 Linux64 - gcov coverage': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'v8testing'}, @@ -1019,7 +1029,7 @@ }, 'V8 Linux64 - internal snapshot': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'v8testing'}, @@ -1027,7 +1037,7 @@ }, 'V8 Linux64 - pointer compression': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'v8testing', 'shards': 2}, @@ -1035,7 +1045,7 @@ }, 'V8 Linux64 - shared': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'mozilla'}, @@ -1045,7 +1055,7 @@ }, 'V8 Linux64 - verify csa': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'v8testing'}, @@ -1053,7 +1063,7 @@ }, 'V8 Linux64 ASAN': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'test262_variants', 'shards': 5}, @@ -1064,7 +1074,7 @@ }, 'V8 Linux64 GC Stress - custom snapshot': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ { @@ -1076,7 +1086,7 @@ }, 'V8 Linux64 TSAN': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'benchmarks'}, @@ -1089,7 +1099,7 @@ }, 'V8 Linux64 TSAN - concurrent marking': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'swarming_task_attrs': { 'expiration': 14400, @@ -1119,7 +1129,7 @@ }, 'V8 Linux64 TSAN - isolates': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'v8testing', 'test_args': ['--isolates'], 'shards': 7}, @@ -1127,7 +1137,7 @@ }, 'V8 Linux64 UBSan': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'mozilla'}, @@ -1156,8 +1166,8 @@ 'tests': [ {'name': 'mozilla'}, {'name': 'test262', 'shards': 2}, - {'name': 'v8testing', 'shards': 3}, - {'name': 'v8testing', 'variant': 'extra'}, + {'name': 'v8testing', 'shards': 4}, + {'name': 'v8testing', 'variant': 'extra', 'shards': 2}, ], }, 'V8 Mac64 ASAN': { @@ -1266,74 +1276,44 @@ }, 'V8 Arm': { 'swarming_dimensions': { - 'cores': '2', - 'cpu': 'armv7l', - 'os': 'Ubuntu-14.04', + 'cores': '8', + 'cpu': 'armv7l-32-ODROID-XU4', + 'os': 'Ubuntu-16.04', }, 'swarming_task_attrs': { 'expiration': 21600, 'hard_timeout': 5400, }, 'tests': [ - {'name': 'benchmarks'}, - {'name': 'optimize_for_size'}, - {'name': 'v8testing', 'shards': 2}, # Odroid. { 'name': 'benchmarks', 'suffix': 'ODROID', - 'swarming_dimensions': { - 'cores': '8', - 'os': 'Ubuntu-16.04', - 'cpu': 'armv7l-32-ODROID-XU4', - }, # Less parallelism to prevent OOMs in benchmarks. 'test_args': ['-j2'], }, { 'name': 'optimize_for_size', 'suffix': 'ODROID', - 'swarming_dimensions': { - 'cores': '8', - 'os': 'Ubuntu-16.04', - 'cpu': 'armv7l-32-ODROID-XU4', - } }, { 'name': 'v8testing', 'suffix': 'ODROID', 'shards': 2, - 'swarming_dimensions': { - 'cores': '8', - 'os': 'Ubuntu-16.04', - 'cpu': 'armv7l-32-ODROID-XU4', - } }, ], }, 'V8 Arm - debug': { 'swarming_dimensions': { - 'cores': '2', - 'cpu': 'armv7l', - 'os': 'Ubuntu-14.04', + 'cores': '8', + 'cpu': 'armv7l-32-ODROID-XU4', + 'os': 'Ubuntu-16.04', }, 'swarming_task_attrs': { 'expiration': 21600, 'hard_timeout': 3600, }, 'tests': [ - { - 'name': 'optimize_for_size', - 'variant': 'default', - 'test_args': ['--extra-flags=--verify-heap-skip-remembered-set'], - 'shards': 2 - }, - { - 'name': 'v8testing', - 'variant': 'default', - 'test_args': ['--extra-flags=--verify-heap-skip-remembered-set'], - 'shards': 3 - }, # Odroid. { 'name': 'optimize_for_size', @@ -1341,11 +1321,6 @@ 'variant': 'default', 'test_args': ['--extra-flags=--verify-heap-skip-remembered-set'], 'shards': 2, - 'swarming_dimensions': { - 'cores': '8', - 'os': 'Ubuntu-16.04', - 'cpu': 'armv7l-32-ODROID-XU4', - } }, { 'name': 'v8testing', @@ -1353,48 +1328,32 @@ 'variant': 'default', 'test_args': ['--extra-flags=--verify-heap-skip-remembered-set'], 'shards': 3, - 'swarming_dimensions': { - 'cores': '8', - 'os': 'Ubuntu-16.04', - 'cpu': 'armv7l-32-ODROID-XU4', - } }, ], }, 'V8 Arm GC Stress': { 'swarming_dimensions': { - 'cores': '2', - 'cpu': 'armv7l', - 'os': 'Ubuntu-14.04', + 'cores': '8', + 'cpu': 'armv7l-32-ODROID-XU4', + 'os': 'Ubuntu-16.04', }, 'swarming_task_attrs': { 'expiration': 21600, 'hard_timeout': 7200, }, 'tests': [ - { - 'name': 'd8testing', - 'variant': 'default', - 'test_args': ['--gc-stress', '--extra-flags=--verify-heap-skip-remembered-set'], - 'shards': 3 - }, { 'name': 'd8testing', 'suffix': 'ODROID', 'variant': 'default', 'test_args': ['--gc-stress', '--extra-flags=--verify-heap-skip-remembered-set'], 'shards': 3, - 'swarming_dimensions': { - 'cores': '8', - 'os': 'Ubuntu-16.04', - 'cpu': 'armv7l-32-ODROID-XU4', - } }, ], }, 'V8 Linux - arm - sim': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'mjsunit_sp_frame_access'}, @@ -1433,7 +1392,7 @@ }, 'V8 Linux - arm - sim - debug': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'mjsunit_sp_frame_access'}, @@ -1446,12 +1405,14 @@ { 'name': 'mozilla', 'suffix': 'armv8-a', - 'test_args': ['--extra-flags', '--enable-armv8'] + 'test_args': ['--extra-flags', '--enable-armv8'], + 'shards': 2, }, { 'name': 'test262', 'suffix': 'armv8-a', - 'test_args': ['--extra-flags', '--enable-armv8'] + 'test_args': ['--extra-flags', '--enable-armv8'], + 'shards': 2, }, { 'name': 'v8testing', @@ -1483,7 +1444,7 @@ }, 'V8 Linux - arm - sim - lite': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'v8testing', 'variant': 'default', 'shards': 2}, @@ -1491,7 +1452,7 @@ }, 'V8 Linux - arm - sim - lite - debug': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'v8testing', 'variant': 'default', 'shards': 4}, @@ -1499,7 +1460,7 @@ }, 'V8 Linux - arm64 - sim': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'mjsunit_sp_frame_access'}, @@ -1512,7 +1473,7 @@ }, 'V8 Linux - arm64 - sim - debug': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, # TODO(machenbach): Remove longer timeout when this builder scales better. 'swarming_task_attrs': { @@ -1529,7 +1490,7 @@ }, 'V8 Linux - arm64 - sim - gc stress': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'swarming_task_attrs': { 'expiration': 14400, @@ -1546,7 +1507,7 @@ }, 'V8 Linux - mips64el - sim': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'swarming_task_attrs': { 'expiration': 14400, @@ -1560,7 +1521,7 @@ }, 'V8 Linux - mipsel - sim': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'swarming_task_attrs': { 'expiration': 14400, @@ -1574,7 +1535,7 @@ }, 'V8 Linux - ppc64 - sim': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'swarming_task_attrs': { 'expiration': 14400, @@ -1587,7 +1548,7 @@ }, 'V8 Linux - s390x - sim': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'swarming_task_attrs': { 'expiration': 14400, @@ -1600,7 +1561,7 @@ }, 'V8 Linux64 - arm64 - sim - pointer compression': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'swarming_task_attrs': { 'expiration': 14400, @@ -1628,7 +1589,7 @@ # Clusterfuzz. 'V8 NumFuzz': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'swarming_task_attrs': { 'expiration': 13800, @@ -1645,7 +1606,7 @@ }, 'V8 NumFuzz - TSAN': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'swarming_task_attrs': { 'expiration': 13800, @@ -1693,7 +1654,7 @@ }, 'V8 NumFuzz - debug': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'swarming_task_attrs': { 'expiration': 13800, @@ -1750,7 +1711,7 @@ # Branches. 'V8 Linux - beta branch': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'mozilla'}, @@ -1760,7 +1721,7 @@ }, 'V8 Linux - beta branch - debug': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'mozilla'}, @@ -1770,7 +1731,7 @@ }, 'V8 Linux - stable branch': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'mozilla'}, @@ -1780,7 +1741,7 @@ }, 'V8 Linux - stable branch - debug': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'mozilla'}, @@ -1790,7 +1751,7 @@ }, 'V8 Linux64 - beta branch': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'mozilla'}, @@ -1800,7 +1761,7 @@ }, 'V8 Linux64 - beta branch - debug': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'mozilla'}, @@ -1810,7 +1771,7 @@ }, 'V8 Linux64 - stable branch': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'mozilla'}, @@ -1820,7 +1781,7 @@ }, 'V8 Linux64 - stable branch - debug': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'mozilla'}, @@ -1830,7 +1791,7 @@ }, 'V8 arm - sim - beta branch': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'mozilla'}, @@ -1840,7 +1801,7 @@ }, 'V8 arm - sim - beta branch - debug': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'mozilla'}, @@ -1850,7 +1811,7 @@ }, 'V8 arm - sim - stable branch': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'mozilla'}, @@ -1860,7 +1821,7 @@ }, 'V8 arm - sim - stable branch - debug': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'mozilla'}, @@ -1870,7 +1831,7 @@ }, 'V8 mips64el - sim - beta branch': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'unittests'}, @@ -1878,7 +1839,7 @@ }, 'V8 mips64el - sim - stable branch': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'unittests'}, @@ -1886,7 +1847,7 @@ }, 'V8 mipsel - sim - beta branch': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'v8testing', 'shards': 4}, @@ -1894,7 +1855,7 @@ }, 'V8 mipsel - sim - stable branch': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'v8testing', 'shards': 4}, @@ -1902,7 +1863,7 @@ }, 'V8 ppc64 - sim - beta branch': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'unittests'}, @@ -1910,7 +1871,7 @@ }, 'V8 ppc64 - sim - stable branch': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'unittests'}, @@ -1918,7 +1879,7 @@ }, 'V8 s390x - sim - beta branch': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'unittests'}, @@ -1926,7 +1887,7 @@ }, 'V8 s390x - sim - stable branch': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'unittests'}, diff --git a/deps/v8/samples/process.cc b/deps/v8/samples/process.cc index 9af1c0b23b7894..e4f6fd9cee733d 100644 --- a/deps/v8/samples/process.cc +++ b/deps/v8/samples/process.cc @@ -676,19 +676,17 @@ StringHttpRequest kSampleRequests[kSampleSize] = { StringHttpRequest("/", "localhost", "yahoo.com", "firefox") }; - -bool ProcessEntries(v8::Platform* platform, HttpRequestProcessor* processor, - int count, StringHttpRequest* reqs) { +bool ProcessEntries(v8::Isolate* isolate, v8::Platform* platform, + HttpRequestProcessor* processor, int count, + StringHttpRequest* reqs) { for (int i = 0; i < count; i++) { bool result = processor->Process(&reqs[i]); - while (v8::platform::PumpMessageLoop(platform, Isolate::GetCurrent())) - continue; + while (v8::platform::PumpMessageLoop(platform, isolate)) continue; if (!result) return false; } return true; } - void PrintMap(map* m) { for (map::iterator i = m->begin(); i != m->end(); i++) { pair entry = *i; @@ -727,7 +725,9 @@ int main(int argc, char* argv[]) { fprintf(stderr, "Error initializing processor.\n"); return 1; } - if (!ProcessEntries(platform.get(), &processor, kSampleSize, kSampleRequests)) + if (!ProcessEntries(isolate, platform.get(), &processor, kSampleSize, + kSampleRequests)) { return 1; + } PrintMap(&output); } diff --git a/deps/v8/src/DEPS b/deps/v8/src/DEPS index d24e647b24157d..1ae6a569e70e0f 100644 --- a/deps/v8/src/DEPS +++ b/deps/v8/src/DEPS @@ -16,6 +16,7 @@ include_rules = [ "+src/heap/heap-inl.h", "+src/heap/heap-write-barrier-inl.h", "+src/heap/heap-write-barrier.h", + "+src/heap/read-only-heap-inl.h", "+src/heap/read-only-heap.h", "-src/inspector", "-src/interpreter", @@ -29,6 +30,10 @@ include_rules = [ "+src/interpreter/interpreter.h", "+src/interpreter/interpreter-generator.h", "+src/interpreter/setup-interpreter.h", + "-src/regexp", + "+src/regexp/regexp.h", + "+src/regexp/regexp-stack.h", + "+src/regexp/regexp-utils.h", "-src/trap-handler", "+src/trap-handler/handler-inside-posix.h", "+src/trap-handler/handler-inside-win.h", @@ -44,5 +49,6 @@ specific_include_rules = { "d8\.cc": [ "+include/libplatform/libplatform.h", "+include/libplatform/v8-tracing.h", + "+perfetto/tracing.h" ], } diff --git a/deps/v8/src/OWNERS b/deps/v8/src/OWNERS index abad5274c880f8..c6881f232117b2 100644 --- a/deps/v8/src/OWNERS +++ b/deps/v8/src/OWNERS @@ -1,9 +1,5 @@ -per-file intl.*=cira@chromium.org -per-file intl.*=mnita@google.com -per-file intl.*=jshin@chromium.org -per-file typing-asm.*=aseemgarg@chromium.org -per-file objects-body-descriptors*=hpayer@chromium.org -per-file objects-body-descriptors*=mlippautz@chromium.org -per-file objects-body-descriptors*=ulan@chromium.org +per-file *DEPS=file://COMMON_OWNERS +per-file intl-*=file://INTL_OWNERS +per-file *-intl*=file://INTL_OWNERS # COMPONENT: Blink>JavaScript diff --git a/deps/v8/src/api/OWNERS b/deps/v8/src/api/OWNERS new file mode 100644 index 00000000000000..ce6fb20af84d38 --- /dev/null +++ b/deps/v8/src/api/OWNERS @@ -0,0 +1,11 @@ +file://include/OWNERS +clemensh@chromium.org +ishell@chromium.org +jkummerow@chromium.org +leszeks@chromium.org +mlippautz@chromium.org +mslekova@chromium.org +mstarzinger@chromium.org +verwaest@chromium.org + +# COMPONENT: Blink>JavaScript>API diff --git a/deps/v8/src/api/api-natives.cc b/deps/v8/src/api/api-natives.cc index c22b7c47f9cf4b..cd380d3cda1aa2 100644 --- a/deps/v8/src/api/api-natives.cc +++ b/deps/v8/src/api/api-natives.cc @@ -5,8 +5,8 @@ #include "src/api/api-natives.h" #include "src/api/api-inl.h" +#include "src/common/message-template.h" #include "src/execution/isolate-inl.h" -#include "src/execution/message-template.h" #include "src/objects/api-callbacks.h" #include "src/objects/hash-table-inl.h" #include "src/objects/lookup.h" @@ -39,7 +39,6 @@ class InvokeScope { MaybeHandle InstantiateObject(Isolate* isolate, Handle data, Handle new_target, - bool is_hidden_prototype, bool is_prototype); MaybeHandle InstantiateFunction( @@ -54,7 +53,7 @@ MaybeHandle Instantiate( isolate, Handle::cast(data), maybe_name); } else if (data->IsObjectTemplateInfo()) { return InstantiateObject(isolate, Handle::cast(data), - Handle(), false, false); + Handle(), false); } else { return data; } @@ -129,7 +128,7 @@ void DisableAccessChecks(Isolate* isolate, Handle object) { // Copy map so it won't interfere constructor's initial map. Handle new_map = Map::Copy(isolate, old_map, "DisableAccessChecks"); new_map->set_is_access_check_needed(false); - JSObject::MigrateToMap(Handle::cast(object), new_map); + JSObject::MigrateToMap(isolate, Handle::cast(object), new_map); } void EnableAccessChecks(Isolate* isolate, Handle object) { @@ -138,7 +137,7 @@ void EnableAccessChecks(Isolate* isolate, Handle object) { Handle new_map = Map::Copy(isolate, old_map, "EnableAccessChecks"); new_map->set_is_access_check_needed(true); new_map->set_may_have_interesting_symbols(true); - JSObject::MigrateToMap(object, new_map); + JSObject::MigrateToMap(isolate, object, new_map); } class AccessCheckDisableScope { @@ -178,8 +177,7 @@ Object GetIntrinsic(Isolate* isolate, v8::Intrinsic intrinsic) { template MaybeHandle ConfigureInstance(Isolate* isolate, Handle obj, - Handle data, - bool is_hidden_prototype) { + Handle data) { HandleScope scope(isolate); // Disable access checks while instantiating the object. AccessCheckDisableScope access_check_scope(isolate, obj); @@ -246,11 +244,10 @@ MaybeHandle ConfigureInstance(Isolate* isolate, Handle obj, } else { auto getter = handle(properties->get(i++), isolate); auto setter = handle(properties->get(i++), isolate); - RETURN_ON_EXCEPTION( - isolate, - DefineAccessorProperty(isolate, obj, name, getter, setter, - attributes, is_hidden_prototype), - JSObject); + RETURN_ON_EXCEPTION(isolate, + DefineAccessorProperty(isolate, obj, name, getter, + setter, attributes, false), + JSObject); } } else { // Intrinsic data property --- Get appropriate value from the current @@ -364,7 +361,6 @@ bool IsSimpleInstantiation(Isolate* isolate, ObjectTemplateInfo info, MaybeHandle InstantiateObject(Isolate* isolate, Handle info, Handle new_target, - bool is_hidden_prototype, bool is_prototype) { Handle constructor; int serial_number = Smi::ToInt(info->serial_number()); @@ -413,8 +409,7 @@ MaybeHandle InstantiateObject(Isolate* isolate, if (is_prototype) JSObject::OptimizeAsPrototype(object); ASSIGN_RETURN_ON_EXCEPTION( - isolate, result, - ConfigureInstance(isolate, object, info, is_hidden_prototype), JSObject); + isolate, result, ConfigureInstance(isolate, object, info), JSObject); if (info->immutable_proto()) { JSObject::SetImmutableProto(object); } @@ -486,7 +481,7 @@ MaybeHandle InstantiateFunction(Isolate* isolate, InstantiateObject( isolate, handle(ObjectTemplateInfo::cast(prototype_templ), isolate), - Handle(), false, true), + Handle(), true), JSFunction); } Object parent = data->GetParentTemplate(); @@ -514,8 +509,7 @@ MaybeHandle InstantiateFunction(Isolate* isolate, CacheTemplateInstantiation(isolate, serial_number, CachingMode::kUnlimited, function); } - MaybeHandle result = - ConfigureInstance(isolate, function, data, false); + MaybeHandle result = ConfigureInstance(isolate, function, data); if (result.is_null()) { // Uncache on error. if (serial_number) { @@ -560,8 +554,7 @@ MaybeHandle ApiNatives::InstantiateObject( Isolate* isolate, Handle data, Handle new_target) { InvokeScope invoke_scope(isolate); - return ::v8::internal::InstantiateObject(isolate, data, new_target, false, - false); + return ::v8::internal::InstantiateObject(isolate, data, new_target, false); } MaybeHandle ApiNatives::InstantiateRemoteObject( diff --git a/deps/v8/src/api/api.cc b/deps/v8/src/api/api.cc index 0965e23632e3b1..e02c74416b81c6 100644 --- a/deps/v8/src/api/api.cc +++ b/deps/v8/src/api/api.cc @@ -237,18 +237,10 @@ namespace v8 { #define RETURN_ON_FAILED_EXECUTION_PRIMITIVE(T) \ EXCEPTION_BAILOUT_CHECK_SCOPED_DO_NOT_USE(isolate, Nothing()) -#define RETURN_TO_LOCAL_UNCHECKED(maybe_local, T) \ - return maybe_local.FromMaybe(Local()); - #define RETURN_ESCAPED(value) return handle_scope.Escape(value); namespace { -Local ContextFromNeverReadOnlySpaceObject( - i::Handle obj) { - return reinterpret_cast(obj->GetIsolate())->GetCurrentContext(); -} - class InternalEscapableScope : public v8::EscapableHandleScope { public: explicit inline InternalEscapableScope(i::Isolate* isolate) @@ -447,7 +439,7 @@ void i::V8::FatalProcessOutOfMemory(i::Isolate* isolate, const char* location, heap_stats.end_marker = &end_marker; if (isolate->heap()->HasBeenSetUp()) { // BUG(1718): Don't use the take_snapshot since we don't support - // HeapIterator here without doing a special GC. + // HeapObjectIterator here without doing a special GC. isolate->heap()->RecordStats(&heap_stats, false); char* first_newline = strchr(last_few_messages, '\n'); if (first_newline == nullptr || first_newline[1] == '\0') @@ -764,9 +756,9 @@ StartupData SnapshotCreator::CreateBlob( std::vector> sfis_to_clear; { // Heap allocation is disallowed within this scope. - i::HeapIterator heap_iterator(isolate->heap()); - for (i::HeapObject current_obj = heap_iterator.next(); - !current_obj.is_null(); current_obj = heap_iterator.next()) { + i::HeapObjectIterator heap_iterator(isolate->heap()); + for (i::HeapObject current_obj = heap_iterator.Next(); + !current_obj.is_null(); current_obj = heap_iterator.Next()) { if (current_obj.IsSharedFunctionInfo()) { i::SharedFunctionInfo shared = i::SharedFunctionInfo::cast(current_obj); @@ -810,17 +802,19 @@ StartupData SnapshotCreator::CreateBlob( i::SerializedHandleChecker handle_checker(isolate, &contexts); CHECK(handle_checker.CheckGlobalAndEternalHandles()); - i::HeapIterator heap_iterator(isolate->heap()); - for (i::HeapObject current_obj = heap_iterator.next(); !current_obj.is_null(); - current_obj = heap_iterator.next()) { + i::HeapObjectIterator heap_iterator(isolate->heap()); + for (i::HeapObject current_obj = heap_iterator.Next(); !current_obj.is_null(); + current_obj = heap_iterator.Next()) { if (current_obj.IsJSFunction()) { i::JSFunction fun = i::JSFunction::cast(current_obj); // Complete in-object slack tracking for all functions. fun.CompleteInobjectSlackTrackingIfActive(); + fun.ResetIfBytecodeFlushed(); + // Also, clear out feedback vectors, or any optimized code. - if (!fun.raw_feedback_cell().value().IsUndefined()) { + if (fun.IsOptimized() || fun.IsInterpreted()) { fun.raw_feedback_cell().set_value( i::ReadOnlyRoots(isolate).undefined_value()); fun.set_code(isolate->builtins()->builtin(i::Builtins::kCompileLazy)); @@ -889,13 +883,17 @@ void V8::SetDcheckErrorHandler(DcheckErrorCallback that) { } void V8::SetFlagsFromString(const char* str) { - SetFlagsFromString(str, static_cast(strlen(str))); + SetFlagsFromString(str, strlen(str)); +} + +void V8::SetFlagsFromString(const char* str, size_t length) { + i::FlagList::SetFlagsFromString(str, length); + i::FlagList::EnforceFlagImplications(); } void V8::SetFlagsFromString(const char* str, int length) { CHECK_LE(0, length); - i::FlagList::SetFlagsFromString(str, static_cast(length)); - i::FlagList::EnforceFlagImplications(); + SetFlagsFromString(str, static_cast(length)); } void V8::SetFlagsFromCommandLine(int* argc, char** argv, bool remove_flags) { @@ -959,42 +957,57 @@ Extension::Extension(const char* name, const char* source, int dep_count, CHECK(source != nullptr || source_length_ == 0); } -ResourceConstraints::ResourceConstraints() - : max_semi_space_size_in_kb_(0), - max_old_space_size_(0), - stack_limit_(nullptr), - code_range_size_(0), - max_zone_pool_size_(0) {} +void ResourceConstraints::ConfigureDefaultsFromHeapSize( + size_t initial_heap_size_in_bytes, size_t maximum_heap_size_in_bytes) { + CHECK_LE(initial_heap_size_in_bytes, maximum_heap_size_in_bytes); + if (maximum_heap_size_in_bytes == 0) { + return; + } + size_t young_generation, old_generation; + i::Heap::GenerationSizesFromHeapSize(maximum_heap_size_in_bytes, + &young_generation, &old_generation); + set_max_young_generation_size_in_bytes( + i::Max(young_generation, i::Heap::MinYoungGenerationSize())); + set_max_old_generation_size_in_bytes( + i::Max(old_generation, i::Heap::MinOldGenerationSize())); + if (initial_heap_size_in_bytes > 0) { + i::Heap::GenerationSizesFromHeapSize(initial_heap_size_in_bytes, + &young_generation, &old_generation); + // We do not set lower bounds for the initial sizes. + set_initial_young_generation_size_in_bytes(young_generation); + set_initial_old_generation_size_in_bytes(old_generation); + } + if (i::kRequiresCodeRange) { + set_code_range_size_in_bytes( + i::Min(i::kMaximalCodeRangeSize, maximum_heap_size_in_bytes)); + } +} void ResourceConstraints::ConfigureDefaults(uint64_t physical_memory, uint64_t virtual_memory_limit) { - set_max_semi_space_size_in_kb( - i::Heap::ComputeMaxSemiSpaceSize(physical_memory)); - set_max_old_space_size(i::Heap::ComputeMaxOldGenerationSize(physical_memory)); + size_t heap_size = i::Heap::HeapSizeFromPhysicalMemory(physical_memory); + size_t young_generation, old_generation; + i::Heap::GenerationSizesFromHeapSize(heap_size, &young_generation, + &old_generation); + set_max_young_generation_size_in_bytes(young_generation); + set_max_old_generation_size_in_bytes(old_generation); if (virtual_memory_limit > 0 && i::kRequiresCodeRange) { - // Reserve no more than 1/8 of the memory for the code range, but at most - // kMaximalCodeRangeSize. - set_code_range_size( - i::Min(i::kMaximalCodeRangeSize / i::MB, - static_cast((virtual_memory_limit >> 3) / i::MB))); + set_code_range_size_in_bytes( + i::Min(i::kMaximalCodeRangeSize, + static_cast(virtual_memory_limit / 8))); } } -void SetResourceConstraints(i::Isolate* isolate, - const ResourceConstraints& constraints) { - size_t semi_space_size = constraints.max_semi_space_size_in_kb(); - size_t old_space_size = constraints.max_old_space_size(); - size_t code_range_size = constraints.code_range_size(); - if (semi_space_size != 0 || old_space_size != 0 || code_range_size != 0) { - isolate->heap()->ConfigureHeap(semi_space_size, old_space_size, - code_range_size); - } +size_t ResourceConstraints::max_semi_space_size_in_kb() const { + return i::Heap::SemiSpaceSizeFromYoungGenerationSize( + max_young_generation_size_) / + i::KB; +} - if (constraints.stack_limit() != nullptr) { - uintptr_t limit = reinterpret_cast(constraints.stack_limit()); - isolate->stack_guard()->SetStackLimit(limit); - } +void ResourceConstraints::set_max_semi_space_size_in_kb(size_t limit_in_kb) { + set_max_young_generation_size_in_bytes( + i::Heap::YoungGenerationSizeFromSemiSpaceSize(limit_in_kb * i::KB)); } i::Address* V8::GlobalizeReference(i::Isolate* isolate, i::Address* obj) { @@ -1035,11 +1048,6 @@ void V8::MoveTracedGlobalReference(internal::Address** from, i::GlobalHandles::MoveTracedGlobal(from, to); } -void V8::RegisterExternallyReferencedObject(i::Address* location, - i::Isolate* isolate) { - isolate->heap()->RegisterExternallyReferencedObject(location); -} - void V8::MakeWeak(i::Address* location, void* parameter, WeakCallbackInfo::Callback weak_callback, WeakCallbackType type) { @@ -1370,29 +1378,28 @@ static Local ObjectTemplateNew( bool do_not_cache); Local FunctionTemplate::PrototypeTemplate() { - i::Isolate* i_isolate = Utils::OpenHandle(this)->GetIsolate(); + auto self = Utils::OpenHandle(this); + i::Isolate* i_isolate = self->GetIsolate(); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); - i::Handle result(Utils::OpenHandle(this)->GetPrototypeTemplate(), - i_isolate); + i::Handle result(self->GetPrototypeTemplate(), i_isolate); if (result->IsUndefined(i_isolate)) { // Do not cache prototype objects. result = Utils::OpenHandle( *ObjectTemplateNew(i_isolate, Local(), true)); - i::FunctionTemplateInfo::SetPrototypeTemplate( - i_isolate, Utils::OpenHandle(this), result); + i::FunctionTemplateInfo::SetPrototypeTemplate(i_isolate, self, result); } return ToApiHandle(result); } void FunctionTemplate::SetPrototypeProviderTemplate( Local prototype_provider) { - i::Isolate* i_isolate = Utils::OpenHandle(this)->GetIsolate(); + auto self = Utils::OpenHandle(this); + i::Isolate* i_isolate = self->GetIsolate(); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); i::Handle result = Utils::OpenHandle(*prototype_provider); - auto info = Utils::OpenHandle(this); - CHECK(info->GetPrototypeTemplate().IsUndefined(i_isolate)); - CHECK(info->GetParentTemplate().IsUndefined(i_isolate)); - i::FunctionTemplateInfo::SetPrototypeProviderTemplate(i_isolate, info, + CHECK(self->GetPrototypeTemplate().IsUndefined(i_isolate)); + CHECK(self->GetParentTemplate().IsUndefined(i_isolate)); + i::FunctionTemplateInfo::SetPrototypeProviderTemplate(i_isolate, self, result); } @@ -1421,17 +1428,21 @@ static Local FunctionTemplateNew( i::FUNCTION_TEMPLATE_INFO_TYPE, i::AllocationType::kOld); i::Handle obj = i::Handle::cast(struct_obj); - InitializeFunctionTemplate(obj); - obj->set_do_not_cache(do_not_cache); - int next_serial_number = i::FunctionTemplateInfo::kInvalidSerialNumber; - if (!do_not_cache) { - next_serial_number = isolate->heap()->GetNextTemplateSerialNumber(); + { + // Disallow GC until all fields of obj have acceptable types. + i::DisallowHeapAllocation no_gc; + InitializeFunctionTemplate(obj); + obj->set_length(length); + obj->set_do_not_cache(do_not_cache); + int next_serial_number = i::FunctionTemplateInfo::kInvalidSerialNumber; + if (!do_not_cache) { + next_serial_number = isolate->heap()->GetNextTemplateSerialNumber(); + } + obj->set_serial_number(i::Smi::FromInt(next_serial_number)); } - obj->set_serial_number(i::Smi::FromInt(next_serial_number)); if (callback != nullptr) { Utils::ToLocal(obj)->SetCallHandler(callback, data, side_effect_type); } - obj->set_length(length); obj->set_undetectable(false); obj->set_needs_access_check(false); obj->set_accept_any_receiver(true); @@ -1608,10 +1619,6 @@ void FunctionTemplate::SetAcceptAnyReceiver(bool value) { info->set_accept_any_receiver(value); } -void FunctionTemplate::SetHiddenPrototype(bool value) { - /* No-op for ABI compatibility. */ -} - void FunctionTemplate::ReadOnlyPrototype() { auto info = Utils::OpenHandle(this); EnsureNotInstantiated(info, "v8::FunctionTemplate::ReadOnlyPrototype"); @@ -2005,9 +2012,10 @@ bool ObjectTemplate::IsImmutableProto() { } void ObjectTemplate::SetImmutableProto() { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); + auto self = Utils::OpenHandle(this); + i::Isolate* isolate = self->GetIsolate(); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate); - Utils::OpenHandle(this)->set_immutable_proto(true); + self->set_immutable_proto(true); } // --- S c r i p t s --- @@ -2227,29 +2235,40 @@ Local Module::GetException() const { int Module::GetModuleRequestsLength() const { i::Handle self = Utils::OpenHandle(this); - return self->info().module_requests().length(); + if (self->IsSyntheticModule()) return 0; + return i::Handle::cast(self) + ->info() + .module_requests() + .length(); } Local Module::GetModuleRequest(int i) const { CHECK_GE(i, 0); i::Handle self = Utils::OpenHandle(this); + CHECK(self->IsSourceTextModule()); i::Isolate* isolate = self->GetIsolate(); - i::Handle module_requests(self->info().module_requests(), - isolate); + i::Handle module_requests( + i::Handle::cast(self)->info().module_requests(), + isolate); CHECK_LT(i, module_requests->length()); return ToApiHandle(i::handle(module_requests->get(i), isolate)); } Location Module::GetModuleRequestLocation(int i) const { CHECK_GE(i, 0); - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - i::HandleScope scope(isolate); i::Handle self = Utils::OpenHandle(this); + i::Isolate* isolate = self->GetIsolate(); + i::HandleScope scope(isolate); + CHECK(self->IsSourceTextModule()); i::Handle module_request_positions( - self->info().module_request_positions(), isolate); + i::Handle::cast(self) + ->info() + .module_request_positions(), + isolate); CHECK_LT(i, module_request_positions->length()); int position = i::Smi::ToInt(module_request_positions->get(i)); - i::Handle script(self->script(), isolate); + i::Handle script( + i::Handle::cast(self)->script(), isolate); i::Script::PositionInfo info; i::Script::GetPositionInfo(script, position, &info, i::Script::WITH_OFFSET); return v8::Location(info.line, info.column); @@ -2270,8 +2289,10 @@ Local Module::GetUnboundModuleScript() { GetStatus() < kEvaluating, "v8::Module::GetUnboundScript", "v8::Module::GetUnboundScript must be used on an unevaluated module"); i::Handle self = Utils::OpenHandle(this); + CHECK(self->IsSourceTextModule()); return ToApiHandle(i::Handle( - self->GetSharedFunctionInfo(), self->GetIsolate())); + i::Handle::cast(self)->GetSharedFunctionInfo(), + self->GetIsolate())); } int Module::GetIdentityHash() const { return Utils::OpenHandle(this)->hash(); } @@ -2306,6 +2327,37 @@ MaybeLocal Module::Evaluate(Local context) { RETURN_ESCAPED(result); } +Local Module::CreateSyntheticModule( + Isolate* isolate, Local module_name, + const std::vector>& export_names, + v8::Module::SyntheticModuleEvaluationSteps evaluation_steps) { + auto i_isolate = reinterpret_cast(isolate); + i::Handle i_module_name = Utils::OpenHandle(*module_name); + i::Handle i_export_names = i_isolate->factory()->NewFixedArray( + static_cast(export_names.size())); + for (int i = 0; i < i_export_names->length(); ++i) { + i::Handle str = Utils::OpenHandle(*export_names[i]); + i_export_names->set(i, *str); + } + return v8::Utils::ToLocal( + i::Handle(i_isolate->factory()->NewSyntheticModule( + i_module_name, i_export_names, evaluation_steps))); +} + +void Module::SetSyntheticModuleExport(Local export_name, + Local export_value) { + i::Handle i_export_name = Utils::OpenHandle(*export_name); + i::Handle i_export_value = Utils::OpenHandle(*export_value); + i::Handle self = Utils::OpenHandle(this); + Utils::ApiCheck(self->IsSyntheticModule(), + "v8::Module::SetSyntheticModuleExport", + "v8::Module::SetSyntheticModuleExport must only be called on " + "a SyntheticModule"); + i::SyntheticModule::SetExport(self->GetIsolate(), + i::Handle::cast(self), + i_export_name, i_export_value); +} + namespace { i::Compiler::ScriptDetails GetScriptDetails( @@ -2416,7 +2468,7 @@ MaybeLocal ScriptCompiler::CompileModule( if (!maybe.ToLocal(&unbound)) return MaybeLocal(); i::Handle shared = Utils::OpenHandle(*unbound); - return ToApiHandle(i_isolate->factory()->NewModule(shared)); + return ToApiHandle(i_isolate->factory()->NewSourceTextModule(shared)); } namespace { @@ -2442,16 +2494,6 @@ bool IsIdentifier(i::Isolate* isolate, i::Handle string) { } } // anonymous namespace -MaybeLocal ScriptCompiler::CompileFunctionInContext( - Local v8_context, Source* source, size_t arguments_count, - Local arguments[], size_t context_extension_count, - Local context_extensions[], CompileOptions options, - NoCacheReason no_cache_reason) { - return ScriptCompiler::CompileFunctionInContext( - v8_context, source, arguments_count, arguments, context_extension_count, - context_extensions, options, no_cache_reason, nullptr); -} - MaybeLocal ScriptCompiler::CompileFunctionInContext( Local v8_context, Source* source, size_t arguments_count, Local arguments[], size_t context_extension_count, @@ -2760,11 +2802,12 @@ void v8::TryCatch::SetCaptureMessage(bool value) { capture_message_ = value; } // --- M e s s a g e --- Local Message::Get() const { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); + auto self = Utils::OpenHandle(this); + i::Isolate* isolate = self->GetIsolate(); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate); EscapableHandleScope scope(reinterpret_cast(isolate)); - i::Handle obj = Utils::OpenHandle(this); - i::Handle raw_result = i::MessageHandler::GetMessage(isolate, obj); + i::Handle raw_result = + i::MessageHandler::GetMessage(isolate, self); Local result = Utils::ToLocal(raw_result); return scope.Escape(result); } @@ -2775,10 +2818,10 @@ v8::Isolate* Message::GetIsolate() const { } ScriptOrigin Message::GetScriptOrigin() const { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); + auto self = Utils::OpenHandle(this); + i::Isolate* isolate = self->GetIsolate(); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate); - auto message = i::Handle::cast(Utils::OpenHandle(this)); - i::Handle script(message->script(), isolate); + i::Handle script(self->script(), isolate); return GetScriptOriginForScript(isolate, script); } @@ -2787,11 +2830,11 @@ v8::Local Message::GetScriptResourceName() const { } v8::Local Message::GetStackTrace() const { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); + auto self = Utils::OpenHandle(this); + i::Isolate* isolate = self->GetIsolate(); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate); EscapableHandleScope scope(reinterpret_cast(isolate)); - auto message = i::Handle::cast(Utils::OpenHandle(this)); - i::Handle stackFramesObj(message->stack_frames(), isolate); + i::Handle stackFramesObj(self->stack_frames(), isolate); if (!stackFramesObj->IsFixedArray()) return v8::Local(); auto stackTrace = i::Handle::cast(stackFramesObj); return scope.Escape(Utils::StackTraceToLocal(stackTrace)); @@ -2860,18 +2903,17 @@ Maybe Message::GetEndColumn(Local context) const { } bool Message::IsSharedCrossOrigin() const { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); + auto self = Utils::OpenHandle(this); + i::Isolate* isolate = self->GetIsolate(); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate); - return Utils::OpenHandle(this) - ->script() - .origin_options() - .IsSharedCrossOrigin(); + return self->script().origin_options().IsSharedCrossOrigin(); } bool Message::IsOpaque() const { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); + auto self = Utils::OpenHandle(this); + i::Isolate* isolate = self->GetIsolate(); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate); - return Utils::OpenHandle(this)->script().origin_options().IsOpaque(); + return self->script().origin_options().IsOpaque(); } MaybeLocal Message::GetSourceLine(Local context) const { @@ -2918,11 +2960,11 @@ Local StackTrace::CurrentStackTrace(Isolate* isolate, // --- S t a c k F r a m e --- int StackFrame::GetLineNumber() const { - return i::StackTraceFrame::GetLineNumber(Utils::OpenHandle(this)); + return i::StackTraceFrame::GetOneBasedLineNumber(Utils::OpenHandle(this)); } int StackFrame::GetColumn() const { - return i::StackTraceFrame::GetColumnNumber(Utils::OpenHandle(this)); + return i::StackTraceFrame::GetOneBasedColumnNumber(Utils::OpenHandle(this)); } int StackFrame::GetScriptId() const { @@ -2930,30 +2972,31 @@ int StackFrame::GetScriptId() const { } Local StackFrame::GetScriptName() const { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); + auto self = Utils::OpenHandle(this); + i::Isolate* isolate = self->GetIsolate(); EscapableHandleScope scope(reinterpret_cast(isolate)); - i::Handle name = - i::StackTraceFrame::GetFileName(Utils::OpenHandle(this)); + i::Handle name = i::StackTraceFrame::GetFileName(self); return name->IsString() ? scope.Escape(Local::Cast(Utils::ToLocal(name))) : Local(); } Local StackFrame::GetScriptNameOrSourceURL() const { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); + auto self = Utils::OpenHandle(this); + i::Isolate* isolate = self->GetIsolate(); EscapableHandleScope scope(reinterpret_cast(isolate)); i::Handle name = - i::StackTraceFrame::GetScriptNameOrSourceUrl(Utils::OpenHandle(this)); + i::StackTraceFrame::GetScriptNameOrSourceUrl(self); return name->IsString() ? scope.Escape(Local::Cast(Utils::ToLocal(name))) : Local(); } Local StackFrame::GetFunctionName() const { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); + auto self = Utils::OpenHandle(this); + i::Isolate* isolate = self->GetIsolate(); EscapableHandleScope scope(reinterpret_cast(isolate)); - i::Handle name = - i::StackTraceFrame::GetFunctionName(Utils::OpenHandle(this)); + i::Handle name = i::StackTraceFrame::GetFunctionName(self); return name->IsString() ? scope.Escape(Local::Cast(Utils::ToLocal(name))) : Local(); @@ -3445,12 +3488,6 @@ MaybeLocal Value::ToString(Local context) const { RETURN_ESCAPED(result); } - -Local Value::ToString(Isolate* isolate) const { - RETURN_TO_LOCAL_UNCHECKED(ToString(isolate->GetCurrentContext()), String); -} - - MaybeLocal Value::ToDetailString(Local context) const { i::Handle obj = Utils::OpenHandle(this); if (obj->IsString()) return ToApiHandle(obj); @@ -3472,11 +3509,6 @@ MaybeLocal Value::ToObject(Local context) const { RETURN_ESCAPED(result); } - -Local Value::ToObject(Isolate* isolate) const { - RETURN_TO_LOCAL_UNCHECKED(ToObject(isolate->GetCurrentContext()), Object); -} - MaybeLocal Value::ToBigInt(Local context) const { i::Handle obj = Utils::OpenHandle(this); if (obj->IsBigInt()) return ToApiHandle(obj); @@ -3493,11 +3525,6 @@ bool Value::BooleanValue(Isolate* v8_isolate) const { reinterpret_cast(v8_isolate)); } -MaybeLocal Value::ToBoolean(Local context) const { - return ToBoolean(context->GetIsolate()); -} - - Local Value::ToBoolean(Isolate* v8_isolate) const { auto isolate = reinterpret_cast(v8_isolate); return ToApiHandle( @@ -3515,12 +3542,6 @@ MaybeLocal Value::ToNumber(Local context) const { RETURN_ESCAPED(result); } - -Local Value::ToNumber(Isolate* isolate) const { - RETURN_TO_LOCAL_UNCHECKED(ToNumber(isolate->GetCurrentContext()), Number); -} - - MaybeLocal Value::ToInteger(Local context) const { auto obj = Utils::OpenHandle(this); if (obj->IsSmi()) return ToApiHandle(obj); @@ -3532,12 +3553,6 @@ MaybeLocal Value::ToInteger(Local context) const { RETURN_ESCAPED(result); } - -Local Value::ToInteger(Isolate* isolate) const { - RETURN_TO_LOCAL_UNCHECKED(ToInteger(isolate->GetCurrentContext()), Integer); -} - - MaybeLocal Value::ToInt32(Local context) const { auto obj = Utils::OpenHandle(this); if (obj->IsSmi()) return ToApiHandle(obj); @@ -3549,12 +3564,6 @@ MaybeLocal Value::ToInt32(Local context) const { RETURN_ESCAPED(result); } - -Local Value::ToInt32(Isolate* isolate) const { - RETURN_TO_LOCAL_UNCHECKED(ToInt32(isolate->GetCurrentContext()), Int32); -} - - MaybeLocal Value::ToUint32(Local context) const { auto obj = Utils::OpenHandle(this); if (obj->IsSmi()) return ToApiHandle(obj); @@ -3567,8 +3576,7 @@ MaybeLocal Value::ToUint32(Local context) const { } i::Isolate* i::IsolateFromNeverReadOnlySpaceObject(i::Address obj) { - return i::NeverReadOnlySpaceObject::GetIsolate( - i::HeapObject::cast(i::Object(obj))); + return i::GetIsolateFromWritableObject(i::HeapObject::cast(i::Object(obj))); } bool i::ShouldThrowOnError(i::Isolate* isolate) { @@ -3782,13 +3790,6 @@ void v8::RegExp::CheckCast(v8::Value* that) { "Could not convert to regular expression"); } - -Maybe Value::BooleanValue(Local context) const { - i::Isolate* isolate = reinterpret_cast(context->GetIsolate()); - return Just(Utils::OpenHandle(this)->BooleanValue(isolate)); -} - - Maybe Value::NumberValue(Local context) const { auto obj = Utils::OpenHandle(this); if (obj->IsNumber()) return Just(obj->Number()); @@ -3922,11 +3923,6 @@ Maybe v8::Object::Set(v8::Local context, return Just(true); } -bool v8::Object::Set(v8::Local key, v8::Local value) { - auto context = ContextFromNeverReadOnlySpaceObject(Utils::OpenHandle(this)); - return Set(context, key, value).FromMaybe(false); -} - Maybe v8::Object::Set(v8::Local context, uint32_t index, v8::Local value) { auto isolate = reinterpret_cast(context->GetIsolate()); @@ -3940,11 +3936,6 @@ Maybe v8::Object::Set(v8::Local context, uint32_t index, return Just(true); } -bool v8::Object::Set(uint32_t index, v8::Local value) { - auto context = ContextFromNeverReadOnlySpaceObject(Utils::OpenHandle(this)); - return Set(context, index, value).FromMaybe(false); -} - Maybe v8::Object::CreateDataProperty(v8::Local context, v8::Local key, v8::Local value) { @@ -4162,11 +4153,6 @@ MaybeLocal v8::Object::Get(Local context, RETURN_ESCAPED(Utils::ToLocal(result)); } -Local v8::Object::Get(v8::Local key) { - auto context = ContextFromNeverReadOnlySpaceObject(Utils::OpenHandle(this)); - RETURN_TO_LOCAL_UNCHECKED(Get(context, key), Value); -} - MaybeLocal v8::Object::Get(Local context, uint32_t index) { PREPARE_FOR_EXECUTION(context, Object, Get, Value); auto self = Utils::OpenHandle(this); @@ -4177,11 +4163,6 @@ MaybeLocal v8::Object::Get(Local context, uint32_t index) { RETURN_ESCAPED(Utils::ToLocal(result)); } -Local v8::Object::Get(uint32_t index) { - auto context = ContextFromNeverReadOnlySpaceObject(Utils::OpenHandle(this)); - RETURN_TO_LOCAL_UNCHECKED(Get(context, index), Value); -} - MaybeLocal v8::Object::GetPrivate(Local context, Local key) { return Get(context, Local(reinterpret_cast(*key))); @@ -4227,8 +4208,8 @@ MaybeLocal v8::Object::GetOwnPropertyDescriptor(Local context, } Local v8::Object::GetPrototype() { - auto isolate = Utils::OpenHandle(this)->GetIsolate(); auto self = Utils::OpenHandle(this); + auto isolate = self->GetIsolate(); i::PrototypeIterator iter(isolate, self); return Utils::ToLocal(i::PrototypeIterator::GetCurrent(iter)); } @@ -4480,10 +4461,10 @@ void Object::SetAccessorProperty(Local name, Local getter, AccessControl settings) { // TODO(verwaest): Remove |settings|. DCHECK_EQ(v8::DEFAULT, settings); - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); + auto self = Utils::OpenHandle(this); + i::Isolate* isolate = self->GetIsolate(); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate); i::HandleScope scope(isolate); - auto self = Utils::OpenHandle(this); if (!self->IsJSObject()) return; i::Handle getter_i = v8::Utils::OpenHandle(*getter); i::Handle setter_i = v8::Utils::OpenHandle(*setter, true); @@ -4693,9 +4674,9 @@ Local v8::Object::CreationContext() { int v8::Object::GetIdentityHash() { i::DisallowHeapAllocation no_gc; - auto isolate = Utils::OpenHandle(this)->GetIsolate(); - i::HandleScope scope(isolate); auto self = Utils::OpenHandle(this); + auto isolate = self->GetIsolate(); + i::HandleScope scope(isolate); return self->GetOrCreateIdentityHash(isolate).value(); } @@ -4881,9 +4862,9 @@ Local Function::GetDebugName() const { } Local Function::GetDisplayName() const { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate); auto self = Utils::OpenHandle(this); + i::Isolate* isolate = self->GetIsolate(); + ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate); if (!self->IsJSFunction()) { return ToApiHandle(isolate->factory()->undefined_value()); } @@ -5414,20 +5395,15 @@ Local Symbol::Name() const { i::Handle sym = Utils::OpenHandle(this); i::Isolate* isolate; - if (!i::GetIsolateFromWritableObject(*sym, &isolate)) { - // If the Symbol is in RO_SPACE, then its name must be too. Since RO_SPACE - // objects are immovable we can use the Handle(Address*) constructor with - // the address of the name field in the Symbol object without needing an - // isolate. -#ifdef V8_COMPRESS_POINTERS - // Compressed fields can't serve as handle locations. - // TODO(ishell): get Isolate as a parameter. - isolate = i::Isolate::Current(); -#else + if (!i::GetIsolateFromHeapObject(*sym, &isolate)) { + // Symbol is in RO_SPACE, which means that its name is also in RO_SPACE. + // Since RO_SPACE objects are immovable we can use the Handle(Address*) + // constructor with the address of the name field in the Symbol object + // without needing an isolate. + DCHECK(!COMPRESS_POINTERS_BOOL); i::Handle ro_name(reinterpret_cast( sym->GetFieldAddress(i::Symbol::kNameOffset))); return Utils::ToLocal(ro_name); -#endif } i::Handle name(sym->name(), isolate); @@ -5973,6 +5949,19 @@ void Context::SetErrorMessageForCodeGenerationFromStrings(Local error) { context->set_error_message_for_code_gen_from_strings(*error_handle); } +void Context::SetAbortScriptExecution( + Context::AbortScriptExecutionCallback callback) { + i::Handle context = Utils::OpenHandle(this); + i::Isolate* isolate = context->GetIsolate(); + if (callback == nullptr) { + context->set_script_execution_callback( + i::ReadOnlyRoots(isolate).undefined_value()); + } else { + SET_FIELD_WRAPPED(isolate, context, set_script_execution_callback, + callback); + } +} + namespace { i::Address* GetSerializedDataFromFixedArray(i::Isolate* isolate, i::FixedArray list, size_t index) { @@ -6126,9 +6115,9 @@ inline int StringLength(const uint16_t* string) { V8_WARN_UNUSED_RESULT inline i::MaybeHandle NewString(i::Factory* factory, - v8::NewStringType type, + NewStringType type, i::Vector string) { - if (type == v8::NewStringType::kInternalized) { + if (type == NewStringType::kInternalized) { return factory->InternalizeUtf8String(string); } return factory->NewStringFromUtf8(string); @@ -6136,9 +6125,9 @@ inline i::MaybeHandle NewString(i::Factory* factory, V8_WARN_UNUSED_RESULT inline i::MaybeHandle NewString(i::Factory* factory, - v8::NewStringType type, + NewStringType type, i::Vector string) { - if (type == v8::NewStringType::kInternalized) { + if (type == NewStringType::kInternalized) { return factory->InternalizeString(string); } return factory->NewStringFromOneByte(string); @@ -6146,15 +6135,14 @@ inline i::MaybeHandle NewString(i::Factory* factory, V8_WARN_UNUSED_RESULT inline i::MaybeHandle NewString(i::Factory* factory, - v8::NewStringType type, + NewStringType type, i::Vector string) { - if (type == v8::NewStringType::kInternalized) { + if (type == NewStringType::kInternalized) { return factory->InternalizeString(string); } return factory->NewStringFromTwoByte(string); } - STATIC_ASSERT(v8::String::kMaxLength == i::String::kMaxLength); } // anonymous namespace @@ -6179,43 +6167,21 @@ STATIC_ASSERT(v8::String::kMaxLength == i::String::kMaxLength); result = Utils::ToLocal(handle_result); \ } -Local String::NewFromUtf8(Isolate* isolate, - const char* data, - NewStringType type, - int length) { - NEW_STRING(isolate, String, NewFromUtf8, char, data, - static_cast(type), length); - RETURN_TO_LOCAL_UNCHECKED(result, String); -} - - MaybeLocal String::NewFromUtf8(Isolate* isolate, const char* data, - v8::NewStringType type, int length) { + NewStringType type, int length) { NEW_STRING(isolate, String, NewFromUtf8, char, data, type, length); return result; } - MaybeLocal String::NewFromOneByte(Isolate* isolate, const uint8_t* data, - v8::NewStringType type, int length) { + NewStringType type, int length) { NEW_STRING(isolate, String, NewFromOneByte, uint8_t, data, type, length); return result; } - -Local String::NewFromTwoByte(Isolate* isolate, - const uint16_t* data, - NewStringType type, - int length) { - NEW_STRING(isolate, String, NewFromTwoByte, uint16_t, data, - static_cast(type), length); - RETURN_TO_LOCAL_UNCHECKED(result, String); -} - - MaybeLocal String::NewFromTwoByte(Isolate* isolate, const uint16_t* data, - v8::NewStringType type, int length) { + NewStringType type, int length) { NEW_STRING(isolate, String, NewFromTwoByte, uint16_t, data, type, length); return result; } @@ -6282,13 +6248,6 @@ MaybeLocal v8::String::NewExternalOneByte( } } - -Local v8::String::NewExternal( - Isolate* isolate, v8::String::ExternalOneByteStringResource* resource) { - RETURN_TO_LOCAL_UNCHECKED(NewExternalOneByte(isolate, resource), String); -} - - bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) { i::DisallowHeapAllocation no_allocation; @@ -6304,8 +6263,7 @@ bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) { // It is safe to call GetIsolateFromWritableHeapObject because // SupportsExternalization already checked that the object is writable. - i::Isolate* isolate; - i::GetIsolateFromWritableObject(obj, &isolate); + i::Isolate* isolate = i::GetIsolateFromWritableObject(obj); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate); CHECK(resource && resource->data()); @@ -6332,8 +6290,7 @@ bool v8::String::MakeExternal( // It is safe to call GetIsolateFromWritableHeapObject because // SupportsExternalization already checked that the object is writable. - i::Isolate* isolate; - i::GetIsolateFromWritableObject(obj, &isolate); + i::Isolate* isolate = i::GetIsolateFromWritableObject(obj); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate); CHECK(resource && resource->data()); @@ -6450,10 +6407,11 @@ Local v8::NumberObject::New(Isolate* isolate, double value) { double v8::NumberObject::ValueOf() const { i::Handle obj = Utils::OpenHandle(this); - i::Handle jsvalue = i::Handle::cast(obj); - i::Isolate* isolate = jsvalue->GetIsolate(); + i::Handle js_primitive_wrapper = + i::Handle::cast(obj); + i::Isolate* isolate = js_primitive_wrapper->GetIsolate(); LOG_API(isolate, NumberObject, NumberValue); - return jsvalue->value().Number(); + return js_primitive_wrapper->value().Number(); } Local v8::BigIntObject::New(Isolate* isolate, int64_t value) { @@ -6468,11 +6426,12 @@ Local v8::BigIntObject::New(Isolate* isolate, int64_t value) { Local v8::BigIntObject::ValueOf() const { i::Handle obj = Utils::OpenHandle(this); - i::Handle jsvalue = i::Handle::cast(obj); - i::Isolate* isolate = jsvalue->GetIsolate(); + i::Handle js_primitive_wrapper = + i::Handle::cast(obj); + i::Isolate* isolate = js_primitive_wrapper->GetIsolate(); LOG_API(isolate, BigIntObject, BigIntValue); - return Utils::ToLocal( - i::Handle(i::BigInt::cast(jsvalue->value()), isolate)); + return Utils::ToLocal(i::Handle( + i::BigInt::cast(js_primitive_wrapper->value()), isolate)); } Local v8::BooleanObject::New(Isolate* isolate, bool value) { @@ -6490,10 +6449,11 @@ Local v8::BooleanObject::New(Isolate* isolate, bool value) { bool v8::BooleanObject::ValueOf() const { i::Handle obj = Utils::OpenHandle(this); - i::Handle jsvalue = i::Handle::cast(obj); - i::Isolate* isolate = jsvalue->GetIsolate(); + i::Handle js_primitive_wrapper = + i::Handle::cast(obj); + i::Isolate* isolate = js_primitive_wrapper->GetIsolate(); LOG_API(isolate, BooleanObject, BooleanValue); - return jsvalue->value().IsTrue(isolate); + return js_primitive_wrapper->value().IsTrue(isolate); } Local v8::StringObject::New(Isolate* v8_isolate, @@ -6509,11 +6469,12 @@ Local v8::StringObject::New(Isolate* v8_isolate, Local v8::StringObject::ValueOf() const { i::Handle obj = Utils::OpenHandle(this); - i::Handle jsvalue = i::Handle::cast(obj); - i::Isolate* isolate = jsvalue->GetIsolate(); + i::Handle js_primitive_wrapper = + i::Handle::cast(obj); + i::Isolate* isolate = js_primitive_wrapper->GetIsolate(); LOG_API(isolate, StringObject, StringValue); - return Utils::ToLocal( - i::Handle(i::String::cast(jsvalue->value()), isolate)); + return Utils::ToLocal(i::Handle( + i::String::cast(js_primitive_wrapper->value()), isolate)); } Local v8::SymbolObject::New(Isolate* isolate, Local value) { @@ -6528,11 +6489,12 @@ Local v8::SymbolObject::New(Isolate* isolate, Local value) { Local v8::SymbolObject::ValueOf() const { i::Handle obj = Utils::OpenHandle(this); - i::Handle jsvalue = i::Handle::cast(obj); - i::Isolate* isolate = jsvalue->GetIsolate(); + i::Handle js_primitive_wrapper = + i::Handle::cast(obj); + i::Isolate* isolate = js_primitive_wrapper->GetIsolate(); LOG_API(isolate, SymbolObject, SymbolValue); - return Utils::ToLocal( - i::Handle(i::Symbol::cast(jsvalue->value()), isolate)); + return Utils::ToLocal(i::Handle( + i::Symbol::cast(js_primitive_wrapper->value()), isolate)); } MaybeLocal v8::Date::New(Local context, double time) { @@ -6559,23 +6521,14 @@ double v8::Date::ValueOf() const { // Assert that the static TimeZoneDetection cast in // DateTimeConfigurationChangeNotification is valid. -#define TIME_ZONE_DETECTION_ASSERT_EQ(value) \ - STATIC_ASSERT( \ - static_cast(v8::Isolate::TimeZoneDetection::value) == \ - static_cast(base::TimezoneCache::TimeZoneDetection::value)); \ - STATIC_ASSERT(static_cast(v8::Isolate::TimeZoneDetection::value) == \ - static_cast(v8::Date::TimeZoneDetection::value)); +#define TIME_ZONE_DETECTION_ASSERT_EQ(value) \ + STATIC_ASSERT( \ + static_cast(v8::Isolate::TimeZoneDetection::value) == \ + static_cast(base::TimezoneCache::TimeZoneDetection::value)); TIME_ZONE_DETECTION_ASSERT_EQ(kSkip) TIME_ZONE_DETECTION_ASSERT_EQ(kRedetect) #undef TIME_ZONE_DETECTION_ASSERT_EQ -// static -void v8::Date::DateTimeConfigurationChangeNotification( - Isolate* isolate, TimeZoneDetection time_zone_detection) { - isolate->DateTimeConfigurationChangeNotification( - static_cast(time_zone_detection)); -} - MaybeLocal v8::RegExp::New(Local context, Local pattern, Flags flags) { PREPARE_FOR_EXECUTION(context, RegExp, New, RegExp); @@ -7494,15 +7447,14 @@ v8::SharedArrayBuffer::Contents v8::SharedArrayBuffer::Externalize() { v8::SharedArrayBuffer::Contents::Contents( void* data, size_t byte_length, void* allocation_base, size_t allocation_length, Allocator::AllocationMode allocation_mode, - DeleterCallback deleter, void* deleter_data, bool is_growable) + DeleterCallback deleter, void* deleter_data) : data_(data), byte_length_(byte_length), allocation_base_(allocation_base), allocation_length_(allocation_length), allocation_mode_(allocation_mode), deleter_(deleter), - deleter_data_(deleter_data), - is_growable_(is_growable) { + deleter_data_(deleter_data) { DCHECK_LE(allocation_base_, data_); DCHECK_LE(byte_length_, allocation_length_); } @@ -7520,8 +7472,7 @@ v8::SharedArrayBuffer::Contents v8::SharedArrayBuffer::GetContents() { : reinterpret_cast(ArrayBufferDeleter), self->is_wasm_memory() ? static_cast(self->GetIsolate()->wasm_engine()) - : static_cast(self->GetIsolate()->array_buffer_allocator()), - false); + : static_cast(self->GetIsolate()->array_buffer_allocator())); return contents; } @@ -7936,7 +7887,12 @@ void Isolate::Initialize(Isolate* isolate, i_isolate->set_api_external_references(params.external_references); i_isolate->set_allow_atomics_wait(params.allow_atomics_wait); - SetResourceConstraints(i_isolate, params.constraints); + i_isolate->heap()->ConfigureHeap(params.constraints); + if (params.constraints.stack_limit() != nullptr) { + uintptr_t limit = + reinterpret_cast(params.constraints.stack_limit()); + i_isolate->stack_guard()->SetStackLimit(limit); + } // TODO(jochen): Once we got rid of Isolate::Current(), we can remove this. Isolate::Scope isolate_scope(isolate); if (!i::Snapshot::Initialize(i_isolate)) { @@ -8388,9 +8344,9 @@ void Isolate::LowMemoryNotification() { i::GarbageCollectionReason::kLowMemoryNotification); } { - i::HeapIterator iterator(isolate->heap()); - for (i::HeapObject obj = iterator.next(); !obj.is_null(); - obj = iterator.next()) { + i::HeapObjectIterator iterator(isolate->heap()); + for (i::HeapObject obj = iterator.Next(); !obj.is_null(); + obj = iterator.Next()) { if (obj.IsAbstractCode()) { i::AbstractCode::cast(obj).DropStackFrameCache(); } @@ -8401,9 +8357,14 @@ void Isolate::LowMemoryNotification() { int Isolate::ContextDisposedNotification(bool dependant_context) { i::Isolate* isolate = reinterpret_cast(this); if (!dependant_context) { - // We left the current context, we can abort all WebAssembly compilations on - // that isolate. - isolate->wasm_engine()->DeleteCompileJobsOnIsolate(isolate); + if (!isolate->context().is_null()) { + // We left the current context, we can abort all WebAssembly compilations + // of that context. + // A handle scope for the native context. + i::HandleScope handle_scope(isolate); + isolate->wasm_engine()->DeleteCompileJobsOnContext( + isolate->native_context()); + } } // TODO(ahaas): move other non-heap activity out of the heap call. return isolate->heap()->NotifyContextDisposed(dependant_context); @@ -8505,6 +8466,9 @@ CALLBACK_SETTER(FatalErrorHandler, FatalErrorCallback, exception_behavior) CALLBACK_SETTER(OOMErrorHandler, OOMErrorCallback, oom_behavior) CALLBACK_SETTER(AllowCodeGenerationFromStringsCallback, AllowCodeGenerationFromStringsCallback, allow_code_gen_callback) +CALLBACK_SETTER(ModifyCodeGenerationFromStringsCallback, + ModifyCodeGenerationFromStringsCallback, + modify_code_gen_callback) CALLBACK_SETTER(AllowWasmCodeGenerationCallback, AllowWasmCodeGenerationCallback, allow_wasm_code_gen_callback) @@ -8648,8 +8612,13 @@ void v8::Isolate::LocaleConfigurationChangeNotification() { } // static -std::unique_ptr MicrotaskQueue::New(Isolate* isolate) { - return i::MicrotaskQueue::New(reinterpret_cast(isolate)); +std::unique_ptr MicrotaskQueue::New(Isolate* isolate, + MicrotasksPolicy policy) { + auto microtask_queue = + i::MicrotaskQueue::New(reinterpret_cast(isolate)); + microtask_queue->set_microtasks_policy(policy); + std::unique_ptr ret(std::move(microtask_queue)); + return ret; } MicrotasksScope::MicrotasksScope(Isolate* isolate, MicrotasksScope::Type type) @@ -8931,9 +8900,9 @@ std::vector debug::Script::LineEnds() const { } MaybeLocal debug::Script::Name() const { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - i::HandleScope handle_scope(isolate); i::Handle script = Utils::OpenHandle(this); + i::Isolate* isolate = script->GetIsolate(); + i::HandleScope handle_scope(isolate); i::Handle value(script->name(), isolate); if (!value->IsString()) return MaybeLocal(); return Utils::ToLocal( @@ -8941,9 +8910,9 @@ MaybeLocal debug::Script::Name() const { } MaybeLocal debug::Script::SourceURL() const { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - i::HandleScope handle_scope(isolate); i::Handle script = Utils::OpenHandle(this); + i::Isolate* isolate = script->GetIsolate(); + i::HandleScope handle_scope(isolate); i::Handle value(script->source_url(), isolate); if (!value->IsString()) return MaybeLocal(); return Utils::ToLocal( @@ -8951,9 +8920,9 @@ MaybeLocal debug::Script::SourceURL() const { } MaybeLocal debug::Script::SourceMappingURL() const { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - i::HandleScope handle_scope(isolate); i::Handle script = Utils::OpenHandle(this); + i::Isolate* isolate = script->GetIsolate(); + i::HandleScope handle_scope(isolate); i::Handle value(script->source_mapping_url(), isolate); if (!value->IsString()) return MaybeLocal(); return Utils::ToLocal( @@ -8961,18 +8930,18 @@ MaybeLocal debug::Script::SourceMappingURL() const { } Maybe debug::Script::ContextId() const { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - i::HandleScope handle_scope(isolate); i::Handle script = Utils::OpenHandle(this); + i::Isolate* isolate = script->GetIsolate(); + i::HandleScope handle_scope(isolate); i::Object value = script->context_data(); if (value.IsSmi()) return Just(i::Smi::ToInt(value)); return Nothing(); } MaybeLocal debug::Script::Source() const { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - i::HandleScope handle_scope(isolate); i::Handle script = Utils::OpenHandle(this); + i::Isolate* isolate = script->GetIsolate(); + i::HandleScope handle_scope(isolate); i::Handle value(script->source(), isolate); if (!value->IsString()) return MaybeLocal(); return Utils::ToLocal( @@ -9831,10 +9800,6 @@ int CpuProfile::GetSamplesCount() const { return reinterpret_cast(this)->samples_count(); } -CpuProfiler* CpuProfiler::New(Isolate* isolate) { - return New(isolate, kDebugNaming); -} - CpuProfiler* CpuProfiler::New(Isolate* isolate, CpuProfilingNamingMode mode) { return reinterpret_cast( new i::CpuProfiler(reinterpret_cast(isolate), mode)); @@ -9876,12 +9841,6 @@ void CpuProfiler::StartProfiling(Local title, bool record_samples) { *Utils::OpenHandle(*title), options); } -void CpuProfiler::StartProfiling(Local title, CpuProfilingMode mode, - bool record_samples) { - StartProfiling(title, mode, record_samples, - CpuProfilingOptions::kNoSampleLimit); -} - void CpuProfiler::StartProfiling(Local title, CpuProfilingMode mode, bool record_samples, unsigned max_samples) { CpuProfilingOptions options(mode, record_samples ? max_samples : 0); @@ -10273,6 +10232,17 @@ void EmbedderHeapTracer::IncreaseAllocatedSize(size_t bytes) { } } +void EmbedderHeapTracer::DecreaseAllocatedSize(size_t bytes) { + if (isolate_) { + i::LocalEmbedderHeapTracer* const tracer = + reinterpret_cast(isolate_) + ->heap() + ->local_embedder_heap_tracer(); + DCHECK_NOT_NULL(tracer); + tracer->DecreaseAllocatedSize(bytes); + } +} + void EmbedderHeapTracer::RegisterEmbedderReference( const TracedGlobal& ref) { if (ref.IsEmpty()) return; @@ -10462,8 +10432,7 @@ void InvokeAccessorGetterCallback( void InvokeFunctionCallback(const v8::FunctionCallbackInfo& info, v8::FunctionCallback callback) { Isolate* isolate = reinterpret_cast(info.GetIsolate()); - RuntimeCallTimerScope timer(isolate, - RuntimeCallCounterId::kInvokeFunctionCallback); + RuntimeCallTimerScope timer(isolate, RuntimeCallCounterId::kFunctionCallback); Address callback_address = reinterpret_cast
(callback); VMState state(isolate); ExternalCallbackScope call_scope(isolate, callback_address); @@ -10484,7 +10453,6 @@ void InvokeFunctionCallback(const v8::FunctionCallbackInfo& info, #undef EXCEPTION_BAILOUT_CHECK_SCOPED_DO_NOT_USE #undef RETURN_ON_FAILED_EXECUTION #undef RETURN_ON_FAILED_EXECUTION_PRIMITIVE -#undef RETURN_TO_LOCAL_UNCHECKED #undef RETURN_ESCAPED #undef SET_FIELD_WRAPPED #undef NEW_STRING diff --git a/deps/v8/src/api/api.h b/deps/v8/src/api/api.h index e041a5daf086b3..6135a7dfc62024 100644 --- a/deps/v8/src/api/api.h +++ b/deps/v8/src/api/api.h @@ -14,9 +14,9 @@ #include "src/objects/js-generator.h" #include "src/objects/js-promise.h" #include "src/objects/js-proxy.h" -#include "src/objects/module.h" #include "src/objects/objects.h" #include "src/objects/shared-function-info.h" +#include "src/objects/source-text-module.h" #include "src/utils/detachable-vector.h" #include "src/objects/templates.h" diff --git a/deps/v8/src/asmjs/OWNERS b/deps/v8/src/asmjs/OWNERS index d4103ae0c1be91..08f39f8d6a2df3 100644 --- a/deps/v8/src/asmjs/OWNERS +++ b/deps/v8/src/asmjs/OWNERS @@ -1,5 +1,3 @@ -set noparent - ahaas@chromium.org clemensh@chromium.org mstarzinger@chromium.org diff --git a/deps/v8/src/asmjs/asm-js.cc b/deps/v8/src/asmjs/asm-js.cc index 5a38eeef361fba..7433b6a12cbb72 100644 --- a/deps/v8/src/asmjs/asm-js.cc +++ b/deps/v8/src/asmjs/asm-js.cc @@ -12,9 +12,9 @@ #include "src/codegen/compiler.h" #include "src/codegen/unoptimized-compilation-info.h" #include "src/common/assert-scope.h" +#include "src/common/message-template.h" #include "src/execution/execution.h" #include "src/execution/isolate.h" -#include "src/execution/message-template.h" #include "src/handles/handles.h" #include "src/heap/factory.h" #include "src/logging/counters.h" @@ -249,9 +249,9 @@ UnoptimizedCompilationJob::Status AsmJsCompilationJob::ExecuteJobImpl() { return FAILED; } module_ = new (compile_zone) wasm::ZoneBuffer(compile_zone); - parser.module_builder()->WriteTo(*module_); + parser.module_builder()->WriteTo(module_); asm_offsets_ = new (compile_zone) wasm::ZoneBuffer(compile_zone); - parser.module_builder()->WriteAsmJsOffsetTable(*asm_offsets_); + parser.module_builder()->WriteAsmJsOffsetTable(asm_offsets_); stdlib_uses_ = *parser.stdlib_uses(); size_t compile_zone_size = @@ -287,7 +287,7 @@ UnoptimizedCompilationJob::Status AsmJsCompilationJob::FinalizeJobImpl( isolate, &thrower, wasm::ModuleWireBytes(module_->begin(), module_->end()), Vector(asm_offsets_->begin(), asm_offsets_->size()), - uses_bitset) + uses_bitset, shared_info->language_mode()) .ToHandleChecked(); DCHECK(!thrower.error()); compile_time_ = compile_timer.Elapsed().InMillisecondsF(); @@ -319,10 +319,10 @@ void AsmJsCompilationJob::RecordHistograms(Isolate* isolate) { translation_throughput); } -UnoptimizedCompilationJob* AsmJs::NewCompilationJob( +std::unique_ptr AsmJs::NewCompilationJob( ParseInfo* parse_info, FunctionLiteral* literal, AccountingAllocator* allocator) { - return new AsmJsCompilationJob(parse_info, literal, allocator); + return base::make_unique(parse_info, literal, allocator); } namespace { diff --git a/deps/v8/src/asmjs/asm-js.h b/deps/v8/src/asmjs/asm-js.h index 46dd3f2e34bb68..3e714cba7a67ed 100644 --- a/deps/v8/src/asmjs/asm-js.h +++ b/deps/v8/src/asmjs/asm-js.h @@ -23,7 +23,7 @@ class UnoptimizedCompilationJob; // Interface to compile and instantiate for asm.js modules. class AsmJs { public: - static UnoptimizedCompilationJob* NewCompilationJob( + static std::unique_ptr NewCompilationJob( ParseInfo* parse_info, FunctionLiteral* literal, AccountingAllocator* allocator); static MaybeHandle InstantiateAsmWasm(Isolate* isolate, diff --git a/deps/v8/src/asmjs/asm-parser.cc b/deps/v8/src/asmjs/asm-parser.cc index 3d290a1fe1a93b..6ac39dc89ccf31 100644 --- a/deps/v8/src/asmjs/asm-parser.cc +++ b/deps/v8/src/asmjs/asm-parser.cc @@ -253,7 +253,7 @@ void AsmJsParser::DeclareGlobal(VarInfo* info, bool mutable_variable, const WasmInitExpr& init) { info->kind = VarKind::kGlobal; info->type = type; - info->index = module_builder_->AddGlobal(vtype, false, true, init); + info->index = module_builder_->AddGlobal(vtype, true, init); info->mutable_variable = mutable_variable; } @@ -385,7 +385,8 @@ void AsmJsParser::ValidateModule() { module_builder_->MarkStartFunction(start); for (auto& global_import : global_imports_) { uint32_t import_index = module_builder_->AddGlobalImport( - global_import.import_name, global_import.value_type); + global_import.import_name, global_import.value_type, + false /* mutability */); start->EmitWithI32V(kExprGetGlobal, import_index); start->EmitWithI32V(kExprSetGlobal, VarIndex(global_import.var_info)); } @@ -754,7 +755,7 @@ void AsmJsParser::ValidateFunction() { // Record start of the function, used as position for the stack check. current_function_builder_->SetAsmFunctionStartPosition(scanner_.Position()); - CachedVector params(cached_asm_type_p_vectors_); + CachedVector params(&cached_asm_type_p_vectors_); ValidateFunctionParams(¶ms); // Check against limit on number of parameters. @@ -762,7 +763,7 @@ void AsmJsParser::ValidateFunction() { FAIL("Number of parameters exceeds internal limit"); } - CachedVector locals(cached_valuetype_vectors_); + CachedVector locals(&cached_valuetype_vectors_); ValidateFunctionLocals(params.size(), &locals); function_temp_locals_offset_ = static_cast( @@ -837,7 +838,7 @@ void AsmJsParser::ValidateFunctionParams(ZoneVector* params) { scanner_.EnterLocalScope(); EXPECT_TOKEN('('); CachedVector function_parameters( - cached_token_t_vectors_); + &cached_token_t_vectors_); while (!failed_ && !Peek(')')) { if (!scanner_.IsLocal()) { FAIL("Expected parameter name"); @@ -969,7 +970,8 @@ void AsmJsParser::ValidateFunctionLocals(size_t param_count, if (negate) { dvalue = -dvalue; } - current_function_builder_->EmitF32Const(dvalue); + float fvalue = DoubleToFloat32(dvalue); + current_function_builder_->EmitF32Const(fvalue); current_function_builder_->EmitSetLocal(info->index); } else if (CheckForUnsigned(&uvalue)) { if (uvalue > 0x7FFFFFFF) { @@ -1314,7 +1316,7 @@ void AsmJsParser::SwitchStatement() { Begin(pending_label_); pending_label_ = 0; // TODO(bradnelson): Make less weird. - CachedVector cases(cached_int_vectors_); + CachedVector cases(&cached_int_vectors_); GatherCases(&cases); EXPECT_TOKEN('{'); size_t count = cases.size() + 1; @@ -2108,7 +2110,11 @@ AsmType* AsmJsParser::ValidateCall() { // need to match the information stored at this point. base::Optional tmp; if (Check('[')) { - RECURSEn(EqualityExpression()); + AsmType* index = nullptr; + RECURSEn(index = EqualityExpression()); + if (!index->IsA(AsmType::Intish())) { + FAILn("Expected intish index"); + } EXPECT_TOKENn('&'); uint32_t mask = 0; if (!CheckForUnsigned(&mask)) { @@ -2161,8 +2167,8 @@ AsmType* AsmJsParser::ValidateCall() { } // Parse argument list and gather types. - CachedVector param_types(cached_asm_type_p_vectors_); - CachedVector param_specific_types(cached_asm_type_p_vectors_); + CachedVector param_types(&cached_asm_type_p_vectors_); + CachedVector param_specific_types(&cached_asm_type_p_vectors_); EXPECT_TOKENn('('); while (!failed_ && !Peek(')')) { AsmType* t; diff --git a/deps/v8/src/asmjs/asm-parser.h b/deps/v8/src/asmjs/asm-parser.h index 8740cdad1198db..c7bf30c29e56cb 100644 --- a/deps/v8/src/asmjs/asm-parser.h +++ b/deps/v8/src/asmjs/asm-parser.h @@ -154,9 +154,9 @@ class AsmJsParser { template class CachedVector final : public ZoneVector { public: - explicit CachedVector(CachedVectors& cache) - : ZoneVector(cache.zone()), cache_(&cache) { - cache.fill(this); + explicit CachedVector(CachedVectors* cache) + : ZoneVector(cache->zone()), cache_(cache) { + cache->fill(this); } ~CachedVector() { cache_->reuse(this); } diff --git a/deps/v8/src/ast/OWNERS b/deps/v8/src/ast/OWNERS index e95afc8afa05fa..e6daa80ec97fe7 100644 --- a/deps/v8/src/ast/OWNERS +++ b/deps/v8/src/ast/OWNERS @@ -1,5 +1,3 @@ -set noparent - adamk@chromium.org bmeurer@chromium.org gsathya@chromium.org diff --git a/deps/v8/src/ast/ast.cc b/deps/v8/src/ast/ast.cc index a930a374b8eaa1..9987eb28449a21 100644 --- a/deps/v8/src/ast/ast.cc +++ b/deps/v8/src/ast/ast.cc @@ -49,8 +49,6 @@ static const char* NameForNativeContextIntrinsicIndex(uint32_t idx) { return "UnknownIntrinsicIndex"; } -void AstNode::Print() { Print(Isolate::Current()); } - void AstNode::Print(Isolate* isolate) { AllowHandleDereference allow_deref; AstPrinter::PrintOut(isolate, this); @@ -132,6 +130,10 @@ bool Expression::ToBooleanIsFalse() const { return IsLiteral() && AsLiteral()->ToBooleanIsFalse(); } +bool Expression::IsPrivateName() const { + return IsVariableProxy() && AsVariableProxy()->IsPrivateName(); +} + bool Expression::IsValidReferenceExpression() const { return IsProperty() || (IsVariableProxy() && AsVariableProxy()->IsValidReferenceExpression()); @@ -176,7 +178,7 @@ void VariableProxy::BindTo(Variable* var) { set_var(var); set_is_resolved(); var->set_is_used(); - if (is_assigned()) var->set_maybe_assigned(); + if (is_assigned()) var->SetMaybeAssigned(); } Assignment::Assignment(NodeType node_type, Token::Value op, Expression* target, @@ -601,8 +603,8 @@ void ArrayLiteral::BuildBoilerplateDescription(Isolate* isolate) { boilerplate_value = handle(Smi::kZero, isolate); } - kind = GetMoreGeneralElementsKind(kind, - boilerplate_value->OptimalElementsKind()); + kind = GetMoreGeneralElementsKind( + kind, boilerplate_value->OptimalElementsKind(isolate)); fixed_array->set(array_index, *boilerplate_value); } @@ -832,6 +834,9 @@ Call::CallType Call::GetCallType() const { Property* property = expression()->AsProperty(); if (property != nullptr) { + if (property->IsPrivateReference()) { + return PRIVATE_CALL; + } bool is_super = property->IsSuperAccess(); if (property->key()->IsPropertyName()) { return is_super ? NAMED_SUPER_PROPERTY_CALL : NAMED_PROPERTY_CALL; diff --git a/deps/v8/src/ast/ast.h b/deps/v8/src/ast/ast.h index 27d298c88ea186..bd52d1b2c04065 100644 --- a/deps/v8/src/ast/ast.h +++ b/deps/v8/src/ast/ast.h @@ -147,7 +147,6 @@ class AstNode: public ZoneObject { int position() const { return position_; } #ifdef DEBUG - void Print(); void Print(Isolate* isolate); #endif // DEBUG @@ -205,6 +204,9 @@ class Expression : public AstNode { // True iff the expression is a valid reference expression. bool IsValidReferenceExpression() const; + // True iff the expression is a private name. + bool IsPrivateName() const; + // Helpers for ToBoolean conversion. bool ToBooleanIsTrue() const; bool ToBooleanIsFalse() const; @@ -1421,32 +1423,6 @@ class ObjectLiteral final : public AggregateLiteral { : public BitField {}; }; - -// A map from property names to getter/setter pairs allocated in the zone. -class AccessorTable - : public base::TemplateHashMap { - public: - explicit AccessorTable(Zone* zone) - : base::TemplateHashMap( - Literal::Match, ZoneAllocationPolicy(zone)), - zone_(zone) {} - - Iterator lookup(Literal* literal) { - Iterator it = find(literal, true, ZoneAllocationPolicy(zone_)); - if (it->second == nullptr) { - it->second = new (zone_) ObjectLiteral::Accessors(); - } - return it; - } - - private: - Zone* zone_; -}; - - // An array literal has a literals object that is used // for minimizing the work when constructing it at runtime. class ArrayLiteral final : public AggregateLiteral { @@ -1533,7 +1509,7 @@ class VariableProxy final : public Expression { void set_is_assigned() { bit_field_ = IsAssignedField::update(bit_field_, true); if (is_resolved()) { - var()->set_maybe_assigned(); + var()->SetMaybeAssigned(); } } @@ -1635,11 +1611,12 @@ class VariableProxy final : public Expression { // Otherwise, the assignment is to a non-property (a global, a local slot, a // parameter slot, or a destructuring pattern). enum AssignType { - NON_PROPERTY, - NAMED_PROPERTY, - KEYED_PROPERTY, - NAMED_SUPER_PROPERTY, - KEYED_SUPER_PROPERTY + NON_PROPERTY, // destructuring + NAMED_PROPERTY, // obj.key + KEYED_PROPERTY, // obj[key] + NAMED_SUPER_PROPERTY, // super.key + KEYED_SUPER_PROPERTY, // super[key] + PRIVATE_METHOD // obj.#key: #key is a private method }; class Property final : public Expression { @@ -1650,10 +1627,19 @@ class Property final : public Expression { Expression* key() const { return key_; } bool IsSuperAccess() { return obj()->IsSuperPropertyReference(); } + bool IsPrivateReference() const { return key()->IsPrivateName(); } // Returns the properties assign type. static AssignType GetAssignType(Property* property) { if (property == nullptr) return NON_PROPERTY; + if (property->IsPrivateReference()) { + DCHECK(!property->IsSuperAccess()); + VariableProxy* proxy = property->key()->AsVariableProxy(); + DCHECK_NOT_NULL(proxy); + Variable* var = proxy->var(); + // Use KEYED_PROPERTY for private fields. + return var->requires_brand_check() ? PRIVATE_METHOD : KEYED_PROPERTY; + } bool super_access = property->IsSuperAccess(); return (property->key()->IsPropertyName()) ? (super_access ? NAMED_SUPER_PROPERTY : NAMED_PROPERTY) @@ -1715,6 +1701,7 @@ class Call final : public Expression { KEYED_PROPERTY_CALL, NAMED_SUPER_PROPERTY_CALL, KEYED_SUPER_PROPERTY_CALL, + PRIVATE_CALL, SUPER_CALL, RESOLVED_PROPERTY_CALL, OTHER_CALL diff --git a/deps/v8/src/ast/modules.cc b/deps/v8/src/ast/modules.cc index 5e9bbc6332e40a..261b72c352a55d 100644 --- a/deps/v8/src/ast/modules.cc +++ b/deps/v8/src/ast/modules.cc @@ -12,7 +12,7 @@ namespace v8 { namespace internal { -bool ModuleDescriptor::AstRawStringComparer::operator()( +bool SourceTextModuleDescriptor::AstRawStringComparer::operator()( const AstRawString* lhs, const AstRawString* rhs) const { // Fast path for equal pointers: a pointer is not strictly less than itself. if (lhs == rhs) return false; @@ -27,12 +27,10 @@ bool ModuleDescriptor::AstRawStringComparer::operator()( return memcmp(lhs->raw_data(), rhs->raw_data(), lhs->byte_length()) < 0; } -void ModuleDescriptor::AddImport(const AstRawString* import_name, - const AstRawString* local_name, - const AstRawString* module_request, - const Scanner::Location loc, - const Scanner::Location specifier_loc, - Zone* zone) { +void SourceTextModuleDescriptor::AddImport( + const AstRawString* import_name, const AstRawString* local_name, + const AstRawString* module_request, const Scanner::Location loc, + const Scanner::Location specifier_loc, Zone* zone) { Entry* entry = new (zone) Entry(loc); entry->local_name = local_name; entry->import_name = import_name; @@ -40,38 +38,34 @@ void ModuleDescriptor::AddImport(const AstRawString* import_name, AddRegularImport(entry); } -void ModuleDescriptor::AddStarImport(const AstRawString* local_name, - const AstRawString* module_request, - const Scanner::Location loc, - const Scanner::Location specifier_loc, - Zone* zone) { +void SourceTextModuleDescriptor::AddStarImport( + const AstRawString* local_name, const AstRawString* module_request, + const Scanner::Location loc, const Scanner::Location specifier_loc, + Zone* zone) { Entry* entry = new (zone) Entry(loc); entry->local_name = local_name; entry->module_request = AddModuleRequest(module_request, specifier_loc); AddNamespaceImport(entry, zone); } -void ModuleDescriptor::AddEmptyImport(const AstRawString* module_request, - const Scanner::Location specifier_loc) { +void SourceTextModuleDescriptor::AddEmptyImport( + const AstRawString* module_request, const Scanner::Location specifier_loc) { AddModuleRequest(module_request, specifier_loc); } - -void ModuleDescriptor::AddExport( - const AstRawString* local_name, const AstRawString* export_name, - Scanner::Location loc, Zone* zone) { +void SourceTextModuleDescriptor::AddExport(const AstRawString* local_name, + const AstRawString* export_name, + Scanner::Location loc, Zone* zone) { Entry* entry = new (zone) Entry(loc); entry->export_name = export_name; entry->local_name = local_name; AddRegularExport(entry); } -void ModuleDescriptor::AddExport(const AstRawString* import_name, - const AstRawString* export_name, - const AstRawString* module_request, - const Scanner::Location loc, - const Scanner::Location specifier_loc, - Zone* zone) { +void SourceTextModuleDescriptor::AddExport( + const AstRawString* import_name, const AstRawString* export_name, + const AstRawString* module_request, const Scanner::Location loc, + const Scanner::Location specifier_loc, Zone* zone) { DCHECK_NOT_NULL(import_name); DCHECK_NOT_NULL(export_name); Entry* entry = new (zone) Entry(loc); @@ -81,10 +75,9 @@ void ModuleDescriptor::AddExport(const AstRawString* import_name, AddSpecialExport(entry, zone); } -void ModuleDescriptor::AddStarExport(const AstRawString* module_request, - const Scanner::Location loc, - const Scanner::Location specifier_loc, - Zone* zone) { +void SourceTextModuleDescriptor::AddStarExport( + const AstRawString* module_request, const Scanner::Location loc, + const Scanner::Location specifier_loc, Zone* zone) { Entry* entry = new (zone) Entry(loc); entry->module_request = AddModuleRequest(module_request, specifier_loc); AddSpecialExport(entry, zone); @@ -98,24 +91,25 @@ Handle ToStringOrUndefined(Isolate* isolate, const AstRawString* s) { } } // namespace -Handle ModuleDescriptor::Entry::Serialize( +Handle SourceTextModuleDescriptor::Entry::Serialize( Isolate* isolate) const { CHECK(Smi::IsValid(module_request)); // TODO(neis): Check earlier? - return ModuleInfoEntry::New( + return SourceTextModuleInfoEntry::New( isolate, ToStringOrUndefined(isolate, export_name), ToStringOrUndefined(isolate, local_name), ToStringOrUndefined(isolate, import_name), module_request, cell_index, location.beg_pos, location.end_pos); } -Handle ModuleDescriptor::SerializeRegularExports(Isolate* isolate, - Zone* zone) const { +Handle SourceTextModuleDescriptor::SerializeRegularExports( + Isolate* isolate, Zone* zone) const { // We serialize regular exports in a way that lets us later iterate over their // local names and for each local name immediately access all its export // names. (Regular exports have neither import name nor module request.) ZoneVector> data( - ModuleInfo::kRegularExportLength * regular_exports_.size(), zone); + SourceTextModuleInfo::kRegularExportLength * regular_exports_.size(), + zone); int index = 0; for (auto it = regular_exports_.begin(); it != regular_exports_.end();) { @@ -130,12 +124,13 @@ Handle ModuleDescriptor::SerializeRegularExports(Isolate* isolate, } while (next != regular_exports_.end() && next->first == it->first); Handle export_names = isolate->factory()->NewFixedArray(count); - data[index + ModuleInfo::kRegularExportLocalNameOffset] = + data[index + SourceTextModuleInfo::kRegularExportLocalNameOffset] = it->second->local_name->string(); - data[index + ModuleInfo::kRegularExportCellIndexOffset] = + data[index + SourceTextModuleInfo::kRegularExportCellIndexOffset] = handle(Smi::FromInt(it->second->cell_index), isolate); - data[index + ModuleInfo::kRegularExportExportNamesOffset] = export_names; - index += ModuleInfo::kRegularExportLength; + data[index + SourceTextModuleInfo::kRegularExportExportNamesOffset] = + export_names; + index += SourceTextModuleInfo::kRegularExportLength; // Collect the export names. int i = 0; @@ -159,7 +154,7 @@ Handle ModuleDescriptor::SerializeRegularExports(Isolate* isolate, return result; } -void ModuleDescriptor::MakeIndirectExportsExplicit(Zone* zone) { +void SourceTextModuleDescriptor::MakeIndirectExportsExplicit(Zone* zone) { for (auto it = regular_exports_.begin(); it != regular_exports_.end();) { Entry* entry = it->second; DCHECK_NOT_NULL(entry->local_name); @@ -191,14 +186,14 @@ void ModuleDescriptor::MakeIndirectExportsExplicit(Zone* zone) { } } -ModuleDescriptor::CellIndexKind ModuleDescriptor::GetCellIndexKind( - int cell_index) { +SourceTextModuleDescriptor::CellIndexKind +SourceTextModuleDescriptor::GetCellIndexKind(int cell_index) { if (cell_index > 0) return kExport; if (cell_index < 0) return kImport; return kInvalid; } -void ModuleDescriptor::AssignCellIndices() { +void SourceTextModuleDescriptor::AssignCellIndices() { int export_index = 1; for (auto it = regular_exports_.begin(); it != regular_exports_.end();) { auto current_key = it->first; @@ -230,10 +225,11 @@ void ModuleDescriptor::AssignCellIndices() { namespace { -const ModuleDescriptor::Entry* BetterDuplicate( - const ModuleDescriptor::Entry* candidate, - ZoneMap& export_names, - const ModuleDescriptor::Entry* current_duplicate) { +const SourceTextModuleDescriptor::Entry* BetterDuplicate( + const SourceTextModuleDescriptor::Entry* candidate, + ZoneMap& + export_names, + const SourceTextModuleDescriptor::Entry* current_duplicate) { DCHECK_NOT_NULL(candidate->export_name); DCHECK(candidate->location.IsValid()); auto insert_result = @@ -249,11 +245,11 @@ const ModuleDescriptor::Entry* BetterDuplicate( } // namespace -const ModuleDescriptor::Entry* ModuleDescriptor::FindDuplicateExport( - Zone* zone) const { - const ModuleDescriptor::Entry* duplicate = nullptr; - ZoneMap export_names( - zone); +const SourceTextModuleDescriptor::Entry* +SourceTextModuleDescriptor::FindDuplicateExport(Zone* zone) const { + const SourceTextModuleDescriptor::Entry* duplicate = nullptr; + ZoneMap + export_names(zone); for (const auto& elem : regular_exports_) { duplicate = BetterDuplicate(elem.second, export_names, duplicate); } @@ -264,9 +260,9 @@ const ModuleDescriptor::Entry* ModuleDescriptor::FindDuplicateExport( return duplicate; } -bool ModuleDescriptor::Validate(ModuleScope* module_scope, - PendingCompilationErrorHandler* error_handler, - Zone* zone) { +bool SourceTextModuleDescriptor::Validate( + ModuleScope* module_scope, PendingCompilationErrorHandler* error_handler, + Zone* zone) { DCHECK_EQ(this, module_scope->module()); DCHECK_NOT_NULL(error_handler); diff --git a/deps/v8/src/ast/modules.h b/deps/v8/src/ast/modules.h index c3aa2bd0ada21f..4921d41932e31b 100644 --- a/deps/v8/src/ast/modules.h +++ b/deps/v8/src/ast/modules.h @@ -13,13 +13,13 @@ namespace internal { class AstRawString; -class ModuleInfo; -class ModuleInfoEntry; +class SourceTextModuleInfo; +class SourceTextModuleInfoEntry; class PendingCompilationErrorHandler; -class ModuleDescriptor : public ZoneObject { +class SourceTextModuleDescriptor : public ZoneObject { public: - explicit ModuleDescriptor(Zone* zone) + explicit SourceTextModuleDescriptor(Zone* zone) : module_requests_(zone), special_exports_(zone), namespace_imports_(zone), @@ -84,9 +84,9 @@ class ModuleDescriptor : public ZoneObject { const AstRawString* import_name; // The module_request value records the order in which modules are - // requested. It also functions as an index into the ModuleInfo's array of - // module specifiers and into the Module's array of requested modules. A - // negative value means no module request. + // requested. It also functions as an index into the SourceTextModuleInfo's + // array of module specifiers and into the Module's array of requested + // modules. A negative value means no module request. int module_request; // Import/export entries that are associated with a MODULE-allocated @@ -107,7 +107,7 @@ class ModuleDescriptor : public ZoneObject { module_request(-1), cell_index(0) {} - Handle Serialize(Isolate* isolate) const; + Handle Serialize(Isolate* isolate) const; }; enum CellIndexKind { kInvalid, kExport, kImport }; diff --git a/deps/v8/src/ast/prettyprinter.cc b/deps/v8/src/ast/prettyprinter.cc index eca091d61ff80e..c0fe3baff398bc 100644 --- a/deps/v8/src/ast/prettyprinter.cc +++ b/deps/v8/src/ast/prettyprinter.cc @@ -1278,14 +1278,24 @@ void AstPrinter::VisitProperty(Property* node) { IndentedScope indent(this, buf.begin(), node->position()); Visit(node->obj()); - AssignType property_kind = Property::GetAssignType(node); - if (property_kind == NAMED_PROPERTY || - property_kind == NAMED_SUPER_PROPERTY) { - PrintLiteralIndented("NAME", node->key()->AsLiteral(), false); - } else { - DCHECK(property_kind == KEYED_PROPERTY || - property_kind == KEYED_SUPER_PROPERTY); - PrintIndentedVisit("KEY", node->key()); + AssignType type = Property::GetAssignType(node); + switch (type) { + case NAMED_PROPERTY: + case NAMED_SUPER_PROPERTY: { + PrintLiteralIndented("NAME", node->key()->AsLiteral(), false); + break; + } + case PRIVATE_METHOD: { + PrintIndentedVisit("PRIVATE_METHOD", node->key()); + break; + } + case KEYED_PROPERTY: + case KEYED_SUPER_PROPERTY: { + PrintIndentedVisit("KEY", node->key()); + break; + } + case NON_PROPERTY: + UNREACHABLE(); } } diff --git a/deps/v8/src/ast/scopes.cc b/deps/v8/src/ast/scopes.cc index e45303c64b577b..237d98ec6047f2 100644 --- a/deps/v8/src/ast/scopes.cc +++ b/deps/v8/src/ast/scopes.cc @@ -9,7 +9,7 @@ #include "src/ast/ast.h" #include "src/base/optional.h" #include "src/builtins/accessors.h" -#include "src/execution/message-template.h" +#include "src/common/message-template.h" #include "src/init/bootstrapper.h" #include "src/logging/counters.h" #include "src/objects/module-inl.h" @@ -40,6 +40,7 @@ Variable* VariableMap::Declare(Zone* zone, Scope* scope, VariableKind kind, InitializationFlag initialization_flag, MaybeAssignedFlag maybe_assigned_flag, + RequiresBrandCheckFlag requires_brand_check, bool* was_added) { // AstRawStrings are unambiguous, i.e., the same string is always represented // by the same AstRawString*. @@ -51,8 +52,9 @@ Variable* VariableMap::Declare(Zone* zone, Scope* scope, if (*was_added) { // The variable has not been declared yet -> insert it. DCHECK_EQ(name, p->key); - Variable* variable = new (zone) Variable( - scope, name, mode, kind, initialization_flag, maybe_assigned_flag); + Variable* variable = + new (zone) Variable(scope, name, mode, kind, initialization_flag, + maybe_assigned_flag, requires_brand_check); p->value = variable; } return reinterpret_cast(p->value); @@ -128,7 +130,7 @@ ModuleScope::ModuleScope(DeclarationScope* script_scope, AstValueFactory* avfactory) : DeclarationScope(avfactory->zone(), script_scope, MODULE_SCOPE, kModule), module_descriptor_(new (avfactory->zone()) - ModuleDescriptor(avfactory->zone())) { + SourceTextModuleDescriptor(avfactory->zone())) { set_language_mode(LanguageMode::kStrict); DeclareThis(avfactory); } @@ -262,7 +264,6 @@ void Scope::SetDefaults() { is_debug_evaluate_scope_ = false; inner_scope_calls_eval_ = false; - force_context_allocation_ = false; force_context_allocation_for_parameters_ = false; is_declaration_scope_ = false; @@ -506,8 +507,9 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) { DCHECK(is_being_lazily_parsed_); bool was_added; Variable* var = DeclareVariableName(name, VariableMode::kVar, &was_added); - if (sloppy_block_function->init() == Token::ASSIGN) - var->set_maybe_assigned(); + if (sloppy_block_function->init() == Token::ASSIGN) { + var->SetMaybeAssigned(); + } } } } @@ -785,11 +787,13 @@ Variable* Scope::LookupInScopeInfo(const AstRawString* name, Scope* cache) { VariableMode mode; InitializationFlag init_flag; MaybeAssignedFlag maybe_assigned_flag; + RequiresBrandCheckFlag requires_brand_check = kNoBrandCheck; { location = VariableLocation::CONTEXT; index = ScopeInfo::ContextSlotIndex(*scope_info_, name_handle, &mode, - &init_flag, &maybe_assigned_flag); + &init_flag, &maybe_assigned_flag, + &requires_brand_check); found = index >= 0; } @@ -814,9 +818,9 @@ Variable* Scope::LookupInScopeInfo(const AstRawString* name, Scope* cache) { } bool was_added; - Variable* var = - cache->variables_.Declare(zone(), this, name, mode, NORMAL_VARIABLE, - init_flag, maybe_assigned_flag, &was_added); + Variable* var = cache->variables_.Declare( + zone(), this, name, mode, NORMAL_VARIABLE, init_flag, maybe_assigned_flag, + requires_brand_check, &was_added); DCHECK(was_added); var->AllocateTo(location, index); return var; @@ -889,7 +893,7 @@ Variable* Scope::DeclareLocal(const AstRawString* name, VariableMode mode, // assigned because they might be accessed by a lazily parsed top-level // function, which, for efficiency, we preparse without variable tracking. if (is_script_scope() || is_module_scope()) { - if (mode != VariableMode::kConst) var->set_maybe_assigned(); + if (mode != VariableMode::kConst) var->SetMaybeAssigned(); var->set_is_used(); } @@ -938,7 +942,7 @@ Variable* Scope::DeclareVariable( DCHECK(*was_added); } } else { - var->set_maybe_assigned(); + var->SetMaybeAssigned(); if (V8_UNLIKELY(IsLexicalVariableMode(mode) || IsLexicalVariableMode(var->mode()))) { // The name was declared in this scope before; check for conflicting @@ -1009,7 +1013,7 @@ Variable* Scope::DeclareVariableName(const AstRawString* name, } // Sloppy block function redefinition. } - var->set_maybe_assigned(); + var->SetMaybeAssigned(); } var->set_is_used(); return var; @@ -1040,7 +1044,7 @@ Variable* DeclarationScope::DeclareDynamicGlobal(const AstRawString* name, bool was_added; return cache->variables_.Declare( zone(), this, name, VariableMode::kDynamicGlobal, kind, - kCreatedInitialized, kNotAssigned, &was_added); + kCreatedInitialized, kNotAssigned, kNoBrandCheck, &was_added); // TODO(neis): Mark variable as maybe-assigned? } @@ -1063,7 +1067,7 @@ Variable* Scope::NewTemporary(const AstRawString* name, Variable* var = new (zone()) Variable(scope, name, VariableMode::kTemporary, NORMAL_VARIABLE, kCreatedInitialized); scope->AddLocal(var); - if (maybe_assigned == kMaybeAssigned) var->set_maybe_assigned(); + if (maybe_assigned == kMaybeAssigned) var->SetMaybeAssigned(); return var; } @@ -1401,7 +1405,7 @@ void Scope::AnalyzePartially(DeclarationScope* max_outer_scope, } } else { var->set_is_used(); - if (proxy->is_assigned()) var->set_maybe_assigned(); + if (proxy->is_assigned()) var->SetMaybeAssigned(); } } @@ -1592,6 +1596,10 @@ void PrintVar(int indent, Variable* var) { if (comma) PrintF(", "); PrintF("hole initialization elided"); } + if (var->requires_brand_check()) { + if (comma) PrintF(", "); + PrintF("requires brand check"); + } PrintF("\n"); } @@ -1766,9 +1774,9 @@ Variable* Scope::NonLocal(const AstRawString* name, VariableMode mode) { // Declare a new non-local. DCHECK(IsDynamicVariableMode(mode)); bool was_added; - Variable* var = - variables_.Declare(zone(), this, name, mode, NORMAL_VARIABLE, - kCreatedInitialized, kNotAssigned, &was_added); + Variable* var = variables_.Declare(zone(), this, name, mode, NORMAL_VARIABLE, + kCreatedInitialized, kNotAssigned, + kNoBrandCheck, &was_added); // Allocate it by giving it a dynamic lookup. var->AllocateTo(VariableLocation::LOOKUP, -1); return var; @@ -1879,11 +1887,14 @@ Variable* Scope::LookupWith(VariableProxy* proxy, Scope* scope, DCHECK(!scope->already_resolved_); var->set_is_used(); var->ForceContextAllocation(); - if (proxy->is_assigned()) var->set_maybe_assigned(); + if (proxy->is_assigned()) var->SetMaybeAssigned(); } if (entry_point != nullptr) entry_point->variables_.Remove(var); Scope* target = entry_point == nullptr ? scope : entry_point; - return target->NonLocal(proxy->raw_name(), VariableMode::kDynamic); + Variable* dynamic = + target->NonLocal(proxy->raw_name(), VariableMode::kDynamic); + dynamic->set_local_if_not_shadowed(var); + return dynamic; } Variable* Scope::LookupSloppyEval(VariableProxy* proxy, Scope* scope, @@ -1912,7 +1923,7 @@ Variable* Scope::LookupSloppyEval(VariableProxy* proxy, Scope* scope, // script scope are always dynamic. if (var->IsGlobalObjectProperty()) { Scope* target = entry_point == nullptr ? scope : entry_point; - return target->NonLocal(proxy->raw_name(), VariableMode::kDynamicGlobal); + var = target->NonLocal(proxy->raw_name(), VariableMode::kDynamicGlobal); } if (var->is_dynamic()) return var; @@ -2010,7 +2021,7 @@ void Scope::ResolvePreparsedVariable(VariableProxy* proxy, Scope* scope, var->set_is_used(); if (!var->is_dynamic()) { var->ForceContextAllocation(); - if (proxy->is_assigned()) var->set_maybe_assigned(); + if (proxy->is_assigned()) var->SetMaybeAssigned(); return; } } @@ -2054,7 +2065,7 @@ bool Scope::MustAllocate(Variable* var) { if (!var->raw_name()->IsEmpty() && (inner_scope_calls_eval_ || is_catch_scope() || is_script_scope())) { var->set_is_used(); - if (inner_scope_calls_eval_) var->set_maybe_assigned(); + if (inner_scope_calls_eval_) var->SetMaybeAssigned(); } DCHECK(!var->has_forced_context_allocation() || var->is_used()); // Global variables do not need to be allocated. @@ -2124,7 +2135,7 @@ void DeclarationScope::AllocateParameterLocals() { DCHECK_EQ(this, var->scope()); if (has_mapped_arguments) { var->set_is_used(); - var->set_maybe_assigned(); + var->SetMaybeAssigned(); var->ForceContextAllocation(); } AllocateParameter(var, i); @@ -2315,12 +2326,13 @@ int Scope::ContextLocalCount() const { (is_function_var_in_context ? 1 : 0); } -Variable* ClassScope::DeclarePrivateName(const AstRawString* name, - bool* was_added) { +Variable* ClassScope::DeclarePrivateName( + const AstRawString* name, RequiresBrandCheckFlag requires_brand_check, + bool* was_added) { Variable* result = EnsureRareData()->private_name_map.Declare( zone(), this, name, VariableMode::kConst, NORMAL_VARIABLE, InitializationFlag::kNeedsInitialization, - MaybeAssignedFlag::kMaybeAssigned, was_added); + MaybeAssignedFlag::kMaybeAssigned, requires_brand_check, was_added); if (*was_added) { locals_.Add(result); } @@ -2404,8 +2416,10 @@ Variable* ClassScope::LookupPrivateNameInScopeInfo(const AstRawString* name) { VariableMode mode; InitializationFlag init_flag; MaybeAssignedFlag maybe_assigned_flag; - int index = ScopeInfo::ContextSlotIndex(*scope_info_, name_handle, &mode, - &init_flag, &maybe_assigned_flag); + RequiresBrandCheckFlag requires_brand_check; + int index = + ScopeInfo::ContextSlotIndex(*scope_info_, name_handle, &mode, &init_flag, + &maybe_assigned_flag, &requires_brand_check); if (index < 0) { return nullptr; } @@ -2417,7 +2431,7 @@ Variable* ClassScope::LookupPrivateNameInScopeInfo(const AstRawString* name) { // Add the found private name to the map to speed up subsequent // lookups for the same name. bool was_added; - Variable* var = DeclarePrivateName(name, &was_added); + Variable* var = DeclarePrivateName(name, requires_brand_check, &was_added); DCHECK(was_added); var->AllocateTo(VariableLocation::CONTEXT, index); return var; @@ -2454,8 +2468,7 @@ bool ClassScope::ResolvePrivateNames(ParseInfo* info) { Scanner::Location loc = proxy->location(); info->pending_error_handler()->ReportMessageAt( loc.beg_pos, loc.end_pos, - MessageTemplate::kInvalidPrivateFieldResolution, proxy->raw_name(), - kSyntaxError); + MessageTemplate::kInvalidPrivateFieldResolution, proxy->raw_name()); return false; } else { var->set_is_used(); diff --git a/deps/v8/src/ast/scopes.h b/deps/v8/src/ast/scopes.h index 1feaad2a9041b5..932d5c70b937b8 100644 --- a/deps/v8/src/ast/scopes.h +++ b/deps/v8/src/ast/scopes.h @@ -41,7 +41,9 @@ class VariableMap : public ZoneHashMap { Variable* Declare(Zone* zone, Scope* scope, const AstRawString* name, VariableMode mode, VariableKind kind, InitializationFlag initialization_flag, - MaybeAssignedFlag maybe_assigned_flag, bool* was_added); + MaybeAssignedFlag maybe_assigned_flag, + RequiresBrandCheckFlag requires_brand_check, + bool* was_added); V8_EXPORT_PRIVATE Variable* Lookup(const AstRawString* name); void Remove(Variable* var); @@ -556,7 +558,7 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) { MaybeAssignedFlag maybe_assigned_flag, bool* was_added) { Variable* result = variables_.Declare(zone, this, name, mode, kind, initialization_flag, - maybe_assigned_flag, was_added); + maybe_assigned_flag, kNoBrandCheck, was_added); if (*was_added) locals_.Add(result); return result; } @@ -712,7 +714,6 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) { // True if one of the inner scopes or the scope itself calls eval. bool inner_scope_calls_eval_ : 1; - bool force_context_allocation_ : 1; bool force_context_allocation_for_parameters_ : 1; // True if it holds 'var' declarations. @@ -1155,14 +1156,14 @@ class ModuleScope final : public DeclarationScope { AstValueFactory* avfactory); // Returns nullptr in a deserialized scope. - ModuleDescriptor* module() const { return module_descriptor_; } + SourceTextModuleDescriptor* module() const { return module_descriptor_; } // Set MODULE as VariableLocation for all variables that will live in a // module's export table. void AllocateModuleVariables(); private: - ModuleDescriptor* const module_descriptor_; + SourceTextModuleDescriptor* const module_descriptor_; }; class V8_EXPORT_PRIVATE ClassScope : public Scope { @@ -1174,7 +1175,9 @@ class V8_EXPORT_PRIVATE ClassScope : public Scope { // Declare a private name in the private name map and add it to the // local variables of this scope. - Variable* DeclarePrivateName(const AstRawString* name, bool* was_added); + Variable* DeclarePrivateName(const AstRawString* name, + RequiresBrandCheckFlag requires_brand_check, + bool* was_added); void AddUnresolvedPrivateName(VariableProxy* proxy); diff --git a/deps/v8/src/ast/variables.h b/deps/v8/src/ast/variables.h index df40fee754073a..7805fa20c8c8f6 100644 --- a/deps/v8/src/ast/variables.h +++ b/deps/v8/src/ast/variables.h @@ -21,7 +21,8 @@ class Variable final : public ZoneObject { public: Variable(Scope* scope, const AstRawString* name, VariableMode mode, VariableKind kind, InitializationFlag initialization_flag, - MaybeAssignedFlag maybe_assigned_flag = kNotAssigned) + MaybeAssignedFlag maybe_assigned_flag = kNotAssigned, + RequiresBrandCheckFlag requires_brand_check = kNoBrandCheck) : scope_(scope), name_(name), local_if_not_shadowed_(nullptr), @@ -31,6 +32,7 @@ class Variable final : public ZoneObject { bit_field_(MaybeAssignedFlagField::encode(maybe_assigned_flag) | InitializationFlagField::encode(initialization_flag) | VariableModeField::encode(mode) | + RequiresBrandCheckField::encode(requires_brand_check) | IsUsedField::encode(false) | ForceContextAllocationField::encode(false) | ForceHoleInitializationField::encode(false) | @@ -69,8 +71,31 @@ class Variable final : public ZoneObject { MaybeAssignedFlag maybe_assigned() const { return MaybeAssignedFlagField::decode(bit_field_); } - void set_maybe_assigned() { - bit_field_ = MaybeAssignedFlagField::update(bit_field_, kMaybeAssigned); + void SetMaybeAssigned() { + // If this variable is dynamically shadowing another variable, then that + // variable could also be assigned (in the non-shadowing case). + if (has_local_if_not_shadowed()) { + // Avoid repeatedly marking the same tree of variables by only recursing + // when this variable's maybe_assigned status actually changes. + if (!maybe_assigned()) { + local_if_not_shadowed()->SetMaybeAssigned(); + } + DCHECK(local_if_not_shadowed()->maybe_assigned()); + } + set_maybe_assigned(); + } + + RequiresBrandCheckFlag get_requires_brand_check_flag() const { + return RequiresBrandCheckField::decode(bit_field_); + } + + bool requires_brand_check() const { + return get_requires_brand_check_flag() == kRequiresBrandCheck; + } + + void set_requires_brand_check() { + bit_field_ = + RequiresBrandCheckField::update(bit_field_, kRequiresBrandCheck); } int initializer_position() { return initializer_position_; } @@ -143,11 +168,16 @@ class Variable final : public ZoneObject { } Variable* local_if_not_shadowed() const { - DCHECK(mode() == VariableMode::kDynamicLocal && - local_if_not_shadowed_ != nullptr); + DCHECK((mode() == VariableMode::kDynamicLocal || + mode() == VariableMode::kDynamic) && + has_local_if_not_shadowed()); return local_if_not_shadowed_; } + bool has_local_if_not_shadowed() const { + return local_if_not_shadowed_ != nullptr; + } + void set_local_if_not_shadowed(Variable* local) { local_if_not_shadowed_ = local; } @@ -200,15 +230,19 @@ class Variable final : public ZoneObject { const AstRawString* name_; // If this field is set, this variable references the stored locally bound - // variable, but it might be shadowed by variable bindings introduced by - // sloppy 'eval' calls between the reference scope (inclusive) and the - // binding scope (exclusive). + // variable, but it might be shadowed by variable bindings introduced by with + // blocks or sloppy 'eval' calls between the reference scope (inclusive) and + // the binding scope (exclusive). Variable* local_if_not_shadowed_; Variable* next_; int index_; int initializer_position_; uint16_t bit_field_; + void set_maybe_assigned() { + bit_field_ = MaybeAssignedFlagField::update(bit_field_, kMaybeAssigned); + } + class VariableModeField : public BitField16 {}; class VariableKindField : public BitField16 {}; @@ -225,6 +259,9 @@ class Variable final : public ZoneObject { class MaybeAssignedFlagField : public BitField16 {}; + class RequiresBrandCheckField + : public BitField16 {}; Variable** next() { return &next_; } friend List; friend base::ThreadedListTraits; diff --git a/deps/v8/src/base/adapters.h b/deps/v8/src/base/adapters.h index 92c500085d1ed2..f684b52ccb6dc0 100644 --- a/deps/v8/src/base/adapters.h +++ b/deps/v8/src/base/adapters.h @@ -45,7 +45,7 @@ class ReversedAdapter { // // iterates through v from back to front // } template -ReversedAdapter Reversed(T& t) { +ReversedAdapter Reversed(T&& t) { return ReversedAdapter(t); } diff --git a/deps/v8/src/base/lsan.h b/deps/v8/src/base/lsan.h new file mode 100644 index 00000000000000..fd9bbd21c1b818 --- /dev/null +++ b/deps/v8/src/base/lsan.h @@ -0,0 +1,29 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// LeakSanitizer support. + +#ifndef V8_BASE_LSAN_H_ +#define V8_BASE_LSAN_H_ + +#include + +// There is no compile time flag for LSan, to enable this whenever ASan is +// enabled. Note that LSan can be used as part of ASan with 'detect_leaks=1'. +// On windows, LSan is not implemented yet, so disable it there. +#if defined(V8_USE_ADDRESS_SANITIZER) && !defined(V8_OS_WIN) + +#include + +#define LSAN_IGNORE_OBJECT(ptr) __lsan_ignore_object(ptr) + +#else // defined(V8_USE_ADDRESS_SANITIZER) && !defined(V8_OS_WIN) + +#define LSAN_IGNORE_OBJECT(ptr) \ + static_assert(std::is_convertible::value, \ + "LSAN_IGNORE_OBJECT can only be used with pointer types") + +#endif // defined(V8_USE_ADDRESS_SANITIZER) && !defined(V8_OS_WIN) + +#endif // V8_BASE_LSAN_H_ diff --git a/deps/v8/src/common/v8memory.h b/deps/v8/src/base/memory.h similarity index 79% rename from deps/v8/src/common/v8memory.h rename to deps/v8/src/base/memory.h index 02ba2de8481334..087f67291d201d 100644 --- a/deps/v8/src/common/v8memory.h +++ b/deps/v8/src/base/memory.h @@ -2,14 +2,16 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -#ifndef V8_COMMON_V8MEMORY_H_ -#define V8_COMMON_V8MEMORY_H_ +#ifndef V8_BASE_MEMORY_H_ +#define V8_BASE_MEMORY_H_ #include "src/base/macros.h" -#include "src/common/globals.h" namespace v8 { -namespace internal { +namespace base { + +using Address = uintptr_t; +using byte = uint8_t; // Memory provides an interface to 'raw' memory. It encapsulates the casts // that typically are needed when incompatible pointer types are used. @@ -39,22 +41,6 @@ static inline void WriteUnalignedValue(Address p, V value) { memcpy(reinterpret_cast(p), &value, sizeof(V)); } -static inline uint16_t ReadUnalignedUInt16(Address p) { - return ReadUnalignedValue(p); -} - -static inline void WriteUnalignedUInt16(Address p, uint16_t value) { - WriteUnalignedValue(p, value); -} - -static inline uint32_t ReadUnalignedUInt32(Address p) { - return ReadUnalignedValue(p); -} - -static inline void WriteUnalignedUInt32(Address p, uint32_t value) { - WriteUnalignedValue(p, value); -} - template static inline V ReadLittleEndianValue(Address p) { #if defined(V8_TARGET_LITTLE_ENDIAN) @@ -93,7 +79,7 @@ static inline void WriteLittleEndianValue(V* p, V value) { WriteLittleEndianValue(reinterpret_cast
(p), value); } -} // namespace internal +} // namespace base } // namespace v8 -#endif // V8_COMMON_V8MEMORY_H_ +#endif // V8_BASE_MEMORY_H_ diff --git a/deps/v8/src/base/platform/OWNERS b/deps/v8/src/base/platform/OWNERS index 7f64f4dedb8102..bf5455c9afaa8a 100644 --- a/deps/v8/src/base/platform/OWNERS +++ b/deps/v8/src/base/platform/OWNERS @@ -1,5 +1,3 @@ -set noparent - hpayer@chromium.org mlippautz@chromium.org ulan@chromium.org diff --git a/deps/v8/src/base/platform/platform-fuchsia.cc b/deps/v8/src/base/platform/platform-fuchsia.cc index 11499f572cfd93..fa175c39177aea 100644 --- a/deps/v8/src/base/platform/platform-fuchsia.cc +++ b/deps/v8/src/base/platform/platform-fuchsia.cc @@ -48,7 +48,7 @@ void* OS::Allocate(void* address, size_t size, size_t alignment, size_t request_size = size + (alignment - page_size); zx_handle_t vmo; - if (zx_vmo_create(request_size, ZX_VMO_NON_RESIZABLE, &vmo) != ZX_OK) { + if (zx_vmo_create(request_size, 0, &vmo) != ZX_OK) { return nullptr; } static const char kVirtualMemoryName[] = "v8-virtualmem"; @@ -152,7 +152,7 @@ int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) { const auto kMicrosPerSecond = 1000000ULL; zx_time_t nanos_since_thread_started; zx_status_t status = - zx_clock_get_new(ZX_CLOCK_THREAD, &nanos_since_thread_started); + zx_clock_get(ZX_CLOCK_THREAD, &nanos_since_thread_started); if (status != ZX_OK) { return -1; } diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc index 7f4ce192dbcfed..6da83d7e0208a3 100644 --- a/deps/v8/src/base/platform/platform-posix.cc +++ b/deps/v8/src/base/platform/platform-posix.cc @@ -199,6 +199,12 @@ void* OS::GetRandomMmapAddr() { MutexGuard guard(rng_mutex.Pointer()); GetPlatformRandomNumberGenerator()->NextBytes(&raw_addr, sizeof(raw_addr)); } +#if defined(__APPLE__) +#if V8_TARGET_ARCH_ARM64 + DCHECK_EQ(1 << 14, AllocatePageSize()); + raw_addr = RoundDown(raw_addr, 1 << 14); +#endif +#endif #if defined(V8_USE_ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \ defined(THREAD_SANITIZER) || defined(LEAK_SANITIZER) // If random hint addresses interfere with address ranges hard coded in diff --git a/deps/v8/src/base/small-vector.h b/deps/v8/src/base/small-vector.h index bb024ca87ead36..b11dfb86b446dc 100644 --- a/deps/v8/src/base/small-vector.h +++ b/deps/v8/src/base/small-vector.h @@ -88,22 +88,29 @@ class SmallVector { DCHECK_NE(0, size()); return end_[-1]; } + const T& back() const { + DCHECK_NE(0, size()); + return end_[-1]; + } T& operator[](size_t index) { DCHECK_GT(size(), index); return begin_[index]; } - const T& operator[](size_t index) const { + const T& at(size_t index) const { DCHECK_GT(size(), index); return begin_[index]; } + const T& operator[](size_t index) const { return at(index); } + template void emplace_back(Args&&... args) { - if (V8_UNLIKELY(end_ == end_of_storage_)) Grow(); - new (end_) T(std::forward(args)...); - ++end_; + T* end = end_; + if (V8_UNLIKELY(end == end_of_storage_)) end = Grow(); + new (end) T(std::forward(args)...); + end_ = end + 1; } void pop_back(size_t count = 1) { @@ -135,7 +142,12 @@ class SmallVector { typename std::aligned_storage::type inline_storage_; - void Grow(size_t min_capacity = 0) { + // Grows the backing store by a factor of two. Returns the new end of the used + // storage (this reduces binary size). + V8_NOINLINE T* Grow() { return Grow(0); } + + // Grows the backing store by a factor of two, and at least to {min_capacity}. + V8_NOINLINE T* Grow(size_t min_capacity) { size_t in_use = end_ - begin_; size_t new_capacity = base::bits::RoundUpToPowerOfTwo(std::max(min_capacity, 2 * capacity())); @@ -145,6 +157,7 @@ class SmallVector { begin_ = new_storage; end_ = new_storage + in_use; end_of_storage_ = new_storage + new_capacity; + return end_; } bool is_big() const { return begin_ != inline_storage_begin(); } diff --git a/deps/v8/src/base/vlq-base64.cc b/deps/v8/src/base/vlq-base64.cc new file mode 100644 index 00000000000000..62e63ac87261d4 --- /dev/null +++ b/deps/v8/src/base/vlq-base64.cc @@ -0,0 +1,58 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include +#include + +#include "src/base/logging.h" +#include "src/base/vlq-base64.h" + +namespace v8 { +namespace base { + +namespace { +constexpr int8_t kCharToDigit[] = { + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, 0x3e, -1, -1, -1, 0x3f, + 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, -1, -1, + -1, -1, -1, -1, -1, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, + 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, + 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, -1, -1, -1, -1, -1, + -1, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, + 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, + 0x31, 0x32, 0x33, -1, -1, -1, -1, -1}; + +constexpr uint32_t kContinueShift = 5; +constexpr uint32_t kContinueMask = 1 << kContinueShift; +constexpr uint32_t kDataMask = kContinueMask - 1; + +int8_t charToDigitDecode(uint8_t c) { return c < 128u ? kCharToDigit[c] : -1; } +} // namespace + +int8_t charToDigitDecodeForTesting(uint8_t c) { return charToDigitDecode(c); } + +int32_t VLQBase64Decode(const char* start, size_t sz, size_t* pos) { + uint32_t res = 0; + uint64_t shift = 0; + int32_t digit; + + do { + if (*pos >= sz) { + return std::numeric_limits::min(); + } + digit = static_cast(charToDigitDecode(start[*pos])); + bool is_last_byte = (shift + kContinueShift >= 32); + if (digit == -1 || (is_last_byte && (digit >> 2) != 0)) { + return std::numeric_limits::min(); + } + res += (digit & kDataMask) << shift; + shift += kContinueShift; + (*pos)++; + } while (digit & kContinueMask); + return (res & 1) ? -static_cast(res >> 1) : (res >> 1); +} +} // namespace base +} // namespace v8 diff --git a/deps/v8/src/base/vlq-base64.h b/deps/v8/src/base/vlq-base64.h new file mode 100644 index 00000000000000..5d8633798bcf30 --- /dev/null +++ b/deps/v8/src/base/vlq-base64.h @@ -0,0 +1,23 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_BASE_VLQ_BASE64_H_ +#define V8_BASE_VLQ_BASE64_H_ + +#include + +#include "src/base/base-export.h" + +namespace v8 { +namespace base { +V8_BASE_EXPORT int8_t charToDigitDecodeForTesting(uint8_t c); + +// Decodes a VLQ-Base64-encoded string into 32bit digits. A valid return value +// is within [-2^31+1, 2^31-1]. This function returns -2^31 +// (std::numeric_limits::min()) when bad input s is passed. +V8_BASE_EXPORT int32_t VLQBase64Decode(const char* start, size_t sz, + size_t* pos); +} // namespace base +} // namespace v8 +#endif // V8_BASE_VLQ_BASE64_H_ diff --git a/deps/v8/src/builtins/OWNERS b/deps/v8/src/builtins/OWNERS new file mode 100644 index 00000000000000..450423f87850ba --- /dev/null +++ b/deps/v8/src/builtins/OWNERS @@ -0,0 +1,3 @@ +file://COMMON_OWNERS + +# COMPONENT: Blink>JavaScript>Runtime diff --git a/deps/v8/src/builtins/accessors.cc b/deps/v8/src/builtins/accessors.cc index 25d37d73b4a646..ea6308622da13b 100644 --- a/deps/v8/src/builtins/accessors.cc +++ b/deps/v8/src/builtins/accessors.cc @@ -287,7 +287,8 @@ void Accessors::StringLengthGetter( if (!value.IsString()) { // Not a string value. That means that we either got a String wrapper or // a Value with a String wrapper in its prototype chain. - value = JSValue::cast(*Utils::OpenHandle(*info.Holder())).value(); + value = + JSPrimitiveWrapper::cast(*Utils::OpenHandle(*info.Holder())).value(); } Object result = Smi::FromInt(String::cast(value).length()); info.GetReturnValue().Set(Utils::ToLocal(Handle(result, isolate))); @@ -305,7 +306,7 @@ Handle Accessors::MakeStringLengthInfo(Isolate* isolate) { static Handle GetFunctionPrototype(Isolate* isolate, Handle function) { if (!function->has_prototype()) { - Handle proto = isolate->factory()->NewFunctionPrototype(function); + Handle proto = isolate->factory()->NewFunctionPrototype(function); JSFunction::SetPrototype(function, proto); } return Handle(function->prototype(), isolate); diff --git a/deps/v8/src/builtins/arguments.tq b/deps/v8/src/builtins/arguments.tq index add66917c03689..6df5f801a3945a 100644 --- a/deps/v8/src/builtins/arguments.tq +++ b/deps/v8/src/builtins/arguments.tq @@ -34,13 +34,13 @@ namespace arguments { @export macro GetArgumentsFrameAndCount(implicit context: Context)(f: JSFunction): ArgumentsInfo { - let frame: Frame = LoadParentFramePointer(); + const frame: Frame = LoadParentFramePointer(); assert(frame.function == f); const shared: SharedFunctionInfo = f.shared_function_info; const formalParameterCount: bint = Convert(Convert(shared.formal_parameter_count)); - let argumentCount: bint = formalParameterCount; + const argumentCount: bint = formalParameterCount; const adaptor: ArgumentsAdaptorFrame = Cast(frame.caller) diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc index 54c16932fa3a83..9b9956b0fbba0a 100644 --- a/deps/v8/src/builtins/arm/builtins-arm.cc +++ b/deps/v8/src/builtins/arm/builtins-arm.cc @@ -1093,11 +1093,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // 8-bit fields next to each other, so we could just optimize by writing a // 16-bit. These static asserts guard our assumption is valid. STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset == - BytecodeArray::kOSRNestingLevelOffset + kCharSize); + BytecodeArray::kOsrNestingLevelOffset + kCharSize); STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0); __ mov(r9, Operand(0)); __ strh(r9, FieldMemOperand(kInterpreterBytecodeArrayRegister, - BytecodeArray::kOSRNestingLevelOffset)); + BytecodeArray::kOsrNestingLevelOffset)); // Load the initial bytecode offset. __ mov(kInterpreterBytecodeOffsetRegister, @@ -1509,13 +1509,16 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, __ ldr(fp, MemOperand( sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp)); + // Load builtin index (stored as a Smi) and use it to get the builtin start + // address from the builtins table. UseScratchRegisterScope temps(masm); - Register scratch = temps.Acquire(); - __ Pop(scratch); + Register builtin = temps.Acquire(); + __ Pop(builtin); __ add(sp, sp, Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp)); __ Pop(lr); - __ add(pc, scratch, Operand(Code::kHeaderSize - kHeapObjectTag)); + __ LoadEntryFromBuiltinIndex(builtin); + __ bx(builtin); } } // namespace @@ -2577,7 +2580,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, __ tst(sp, Operand(frame_alignment_mask)); __ b(eq, &alignment_as_expected); // Don't use Check here, as it will call Runtime_Abort re-entering here. - __ stop("Unexpected alignment"); + __ stop(); __ bind(&alignment_as_expected); } } @@ -2606,7 +2609,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, __ CompareRoot(r3, RootIndex::kTheHoleValue); // Cannot use check here as it attempts to generate call into runtime. __ b(eq, &okay); - __ stop("Unexpected pending exception"); + __ stop(); __ bind(&okay); } @@ -2835,19 +2838,25 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address, DCHECK(function_address == r1 || function_address == r2); - Label profiler_disabled; - Label end_profiler_check; + Label profiler_enabled, end_profiler_check; __ Move(r9, ExternalReference::is_profiling_address(isolate)); __ ldrb(r9, MemOperand(r9, 0)); __ cmp(r9, Operand(0)); - __ b(eq, &profiler_disabled); - - // Additional parameter is the address of the actual callback. - __ Move(r3, thunk_ref); - __ jmp(&end_profiler_check); - - __ bind(&profiler_disabled); - __ Move(r3, function_address); + __ b(ne, &profiler_enabled); + __ Move(r9, ExternalReference::address_of_runtime_stats_flag()); + __ ldr(r9, MemOperand(r9, 0)); + __ cmp(r9, Operand(0)); + __ b(ne, &profiler_enabled); + { + // Call the api function directly. + __ Move(r3, function_address); + __ b(&end_profiler_check); + } + __ bind(&profiler_enabled); + { + // Additional parameter is the address of the actual callback. + __ Move(r3, thunk_ref); + } __ bind(&end_profiler_check); // Allocate HandleScope in callee-save registers. diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc index f81a1955eeb474..bcee8f0b5dcbbe 100644 --- a/deps/v8/src/builtins/arm64/builtins-arm64.cc +++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc @@ -1201,10 +1201,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // 8-bit fields next to each other, so we could just optimize by writing a // 16-bit. These static asserts guard our assumption is valid. STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset == - BytecodeArray::kOSRNestingLevelOffset + kCharSize); + BytecodeArray::kOsrNestingLevelOffset + kCharSize); STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0); __ Strh(wzr, FieldMemOperand(kInterpreterBytecodeArrayRegister, - BytecodeArray::kOSRNestingLevelOffset)); + BytecodeArray::kOsrNestingLevelOffset)); // Load the initial bytecode offset. __ Mov(kInterpreterBytecodeOffsetRegister, @@ -1683,18 +1683,20 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, if (java_script_builtin) __ SmiUntag(kJavaScriptCallArgCountRegister); - // Load builtin object. + // Load builtin index (stored as a Smi) and use it to get the builtin start + // address from the builtins table. UseScratchRegisterScope temps(masm); Register builtin = temps.AcquireX(); - __ Ldr(builtin, - MemOperand(fp, BuiltinContinuationFrameConstants::kBuiltinOffset)); + __ Ldr( + builtin, + MemOperand(fp, BuiltinContinuationFrameConstants::kBuiltinIndexOffset)); // Restore fp, lr. __ Mov(sp, fp); __ Pop(fp, lr); - // Call builtin. - __ JumpCodeObject(builtin); + __ LoadEntryFromBuiltinIndex(builtin); + __ Jump(builtin); } } // namespace @@ -3400,16 +3402,23 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address, DCHECK(function_address.is(x1) || function_address.is(x2)); - Label profiler_disabled; - Label end_profiler_check; + Label profiler_enabled, end_profiler_check; __ Mov(x10, ExternalReference::is_profiling_address(isolate)); __ Ldrb(w10, MemOperand(x10)); - __ Cbz(w10, &profiler_disabled); - __ Mov(x3, thunk_ref); - __ B(&end_profiler_check); - - __ Bind(&profiler_disabled); - __ Mov(x3, function_address); + __ Cbnz(w10, &profiler_enabled); + __ Mov(x10, ExternalReference::address_of_runtime_stats_flag()); + __ Ldrsw(w10, MemOperand(x10)); + __ Cbnz(w10, &profiler_enabled); + { + // Call the api function directly. + __ Mov(x3, function_address); + __ B(&end_profiler_check); + } + __ Bind(&profiler_enabled); + { + // Additional parameter is the address of the actual callback. + __ Mov(x3, thunk_ref); + } __ Bind(&end_profiler_check); // Save the callee-save registers we are going to use. diff --git a/deps/v8/src/builtins/array-copywithin.tq b/deps/v8/src/builtins/array-copywithin.tq index bfc95a28bf46df..94d871e8f74c13 100644 --- a/deps/v8/src/builtins/array-copywithin.tq +++ b/deps/v8/src/builtins/array-copywithin.tq @@ -9,7 +9,7 @@ namespace array_copywithin { // https://tc39.github.io/ecma262/#sec-array.prototype.copyWithin transitioning javascript builtin ArrayPrototypeCopyWithin( - context: Context, receiver: Object, ...arguments): Object { + js-implicit context: Context, receiver: Object)(...arguments): Object { // 1. Let O be ? ToObject(this value). const object: JSReceiver = ToObject_Inline(context, receiver); diff --git a/deps/v8/src/builtins/array-every.tq b/deps/v8/src/builtins/array-every.tq index 245b07556cba3a..3451cd769b92e7 100644 --- a/deps/v8/src/builtins/array-every.tq +++ b/deps/v8/src/builtins/array-every.tq @@ -4,8 +4,9 @@ namespace array { transitioning javascript builtin - ArrayEveryLoopEagerDeoptContinuation(implicit context: Context)( - receiver: Object, callback: Object, thisArg: Object, initialK: Object, + ArrayEveryLoopEagerDeoptContinuation( + js-implicit context: Context, receiver: Object)( + callback: Object, thisArg: Object, initialK: Object, length: Object): Object { // All continuation points in the optimized every implementation are // after the ToObject(O) call that ensures we are dealing with a @@ -25,9 +26,10 @@ namespace array { } transitioning javascript builtin - ArrayEveryLoopLazyDeoptContinuation(implicit context: Context)( - receiver: Object, callback: Object, thisArg: Object, initialK: Object, - length: Object, result: Object): Object { + ArrayEveryLoopLazyDeoptContinuation( + js-implicit context: Context, receiver: Object)( + callback: Object, thisArg: Object, initialK: Object, length: Object, + result: Object): Object { // All continuation points in the optimized every implementation are // after the ToObject(O) call that ensures we are dealing with a // JSReceiver. @@ -51,9 +53,9 @@ namespace array { } transitioning builtin ArrayEveryLoopContinuation(implicit context: Context)( - receiver: JSReceiver, callbackfn: Callable, thisArg: Object, - array: Object, o: JSReceiver, initialK: Number, length: Number, - initialTo: Object): Object { + _receiver: JSReceiver, callbackfn: Callable, thisArg: Object, + _array: Object, o: JSReceiver, initialK: Number, length: Number, + _initialTo: Object): Object { // 5. Let k be 0. // 6. Repeat, while k < len for (let k: Number = initialK; k < length; k++) { @@ -88,7 +90,7 @@ namespace array { labels Bailout(Smi) { let k: Smi = 0; const smiLen = Cast(len) otherwise goto Bailout(k); - let fastO: FastJSArray = Cast(o) otherwise goto Bailout(k); + const fastO: FastJSArray = Cast(o) otherwise goto Bailout(k); let fastOW = NewFastJSArrayWitness(fastO); // Build a fast loop over the smi array. @@ -109,12 +111,10 @@ namespace array { // https://tc39.github.io/ecma262/#sec-array.prototype.every transitioning javascript builtin - ArrayEvery(implicit context: Context)(receiver: Object, ...arguments): + ArrayEvery(js-implicit context: Context, receiver: Object)(...arguments): Object { try { - if (IsNullOrUndefined(receiver)) { - goto NullOrUndefinedError; - } + RequireObjectCoercible(receiver, 'Array.prototype.every'); // 1. Let O be ? ToObject(this value). const o: JSReceiver = ToObject_Inline(context, receiver); @@ -144,8 +144,5 @@ namespace array { label TypeError deferred { ThrowTypeError(kCalledNonCallable, arguments[0]); } - label NullOrUndefinedError deferred { - ThrowTypeError(kCalledOnNullOrUndefined, 'Array.prototype.every'); - } } } diff --git a/deps/v8/src/builtins/array-filter.tq b/deps/v8/src/builtins/array-filter.tq index 4bf175a787aefe..9acd0d04ee3cd7 100644 --- a/deps/v8/src/builtins/array-filter.tq +++ b/deps/v8/src/builtins/array-filter.tq @@ -4,9 +4,10 @@ namespace array_filter { transitioning javascript builtin - ArrayFilterLoopEagerDeoptContinuation(implicit context: Context)( - receiver: Object, callback: Object, thisArg: Object, array: Object, - initialK: Object, length: Object, initialTo: Object): Object { + ArrayFilterLoopEagerDeoptContinuation( + js-implicit context: Context, receiver: Object)( + callback: Object, thisArg: Object, array: Object, initialK: Object, + length: Object, initialTo: Object): Object { // All continuation points in the optimized filter implementation are // after the ToObject(O) call that ensures we are dealing with a // JSReceiver. @@ -27,9 +28,10 @@ namespace array_filter { } transitioning javascript builtin - ArrayFilterLoopLazyDeoptContinuation(implicit context: Context)( - receiver: Object, callback: Object, thisArg: Object, array: Object, - initialK: Object, length: Object, valueK: Object, initialTo: Object, + ArrayFilterLoopLazyDeoptContinuation( + js-implicit context: Context, receiver: Object)( + callback: Object, thisArg: Object, array: Object, initialK: Object, + length: Object, valueK: Object, initialTo: Object, result: Object): Object { // All continuation points in the optimized filter implementation are // after the ToObject(O) call that ensures we are dealing with a @@ -42,9 +44,9 @@ namespace array_filter { const numberLength = Cast(length) otherwise unreachable; // This custom lazy deopt point is right after the callback. filter() needs - // to pick up at the next step, which is setting the callback result in - // the output array. After incrementing k and to, we can glide into the loop - // continuation builtin. + // to pick up at the next step, which is setting the callback + // result in the output array. After incrementing k and to, we can glide + // into the loop continuation builtin. if (ToBoolean(result)) { FastCreateDataProperty(outputArray, numberTo, valueK); numberTo = numberTo + 1; @@ -58,7 +60,7 @@ namespace array_filter { } transitioning builtin ArrayFilterLoopContinuation(implicit context: Context)( - receiver: JSReceiver, callbackfn: Callable, thisArg: Object, + _receiver: JSReceiver, callbackfn: Callable, thisArg: Object, array: JSReceiver, o: JSReceiver, initialK: Number, length: Number, initialTo: Number): Object { let to: Number = initialTo; @@ -145,12 +147,10 @@ namespace array_filter { // https://tc39.github.io/ecma262/#sec-array.prototype.filter transitioning javascript builtin - ArrayFilter(implicit context: Context)(receiver: Object, ...arguments): + ArrayFilter(js-implicit context: Context, receiver: Object)(...arguments): Object { try { - if (IsNullOrUndefined(receiver)) { - goto NullOrUndefinedError; - } + RequireObjectCoercible(receiver, 'Array.prototype.filter'); // 1. Let O be ? ToObject(this value). const o: JSReceiver = ToObject_Inline(context, receiver); @@ -199,8 +199,5 @@ namespace array_filter { label TypeError deferred { ThrowTypeError(kCalledNonCallable, arguments[0]); } - label NullOrUndefinedError deferred { - ThrowTypeError(kCalledOnNullOrUndefined, 'Array.prototype.filter'); - } } } diff --git a/deps/v8/src/builtins/array-find.tq b/deps/v8/src/builtins/array-find.tq index 28223e4c492bdc..ef54dd4666ef72 100644 --- a/deps/v8/src/builtins/array-find.tq +++ b/deps/v8/src/builtins/array-find.tq @@ -4,8 +4,9 @@ namespace array_find { transitioning javascript builtin - ArrayFindLoopEagerDeoptContinuation(implicit context: Context)( - receiver: Object, callback: Object, thisArg: Object, initialK: Object, + ArrayFindLoopEagerDeoptContinuation( + js-implicit context: Context, receiver: Object)( + callback: Object, thisArg: Object, initialK: Object, length: Object): Object { // All continuation points in the optimized find implementation are // after the ToObject(O) call that ensures we are dealing with a @@ -24,9 +25,10 @@ namespace array_find { } transitioning javascript builtin - ArrayFindLoopLazyDeoptContinuation(implicit context: Context)( - receiver: Object, callback: Object, thisArg: Object, initialK: Object, - length: Object, result: Object): Object { + ArrayFindLoopLazyDeoptContinuation( + js-implicit context: Context, receiver: Object)( + _callback: Object, _thisArg: Object, _initialK: Object, _length: Object, + _result: Object): Object { // This deopt continuation point is never actually called, it just // exists to make stack traces correct from a ThrowTypeError if the // callback was found to be non-callable. @@ -37,15 +39,16 @@ namespace array_find { // happens right after the callback and it's returned value must be handled // before iteration continues. transitioning javascript builtin - ArrayFindLoopAfterCallbackLazyDeoptContinuation(implicit context: Context)( - receiver: Object, callback: Object, thisArg: Object, initialK: Object, - length: Object, foundValue: Object, isFound: Object): Object { + ArrayFindLoopAfterCallbackLazyDeoptContinuation( + js-implicit context: Context, receiver: Object)( + callback: Object, thisArg: Object, initialK: Object, length: Object, + foundValue: Object, isFound: Object): Object { // All continuation points in the optimized find implementation are // after the ToObject(O) call that ensures we are dealing with a // JSReceiver. const jsreceiver = Cast(receiver) otherwise unreachable; const callbackfn = Cast(callback) otherwise unreachable; - let numberK = Cast(initialK) otherwise unreachable; + const numberK = Cast(initialK) otherwise unreachable; const numberLength = Cast(length) otherwise unreachable; // This custom lazy deopt point is right after the callback. find() needs @@ -62,7 +65,7 @@ namespace array_find { } transitioning builtin ArrayFindLoopContinuation(implicit context: Context)( - receiver: JSReceiver, callbackfn: Callable, thisArg: Object, + _receiver: JSReceiver, callbackfn: Callable, thisArg: Object, o: JSReceiver, initialK: Number, length: Number): Object { // 5. Let k be 0. // 6. Repeat, while k < len @@ -116,12 +119,10 @@ namespace array_find { // https://tc39.github.io/ecma262/#sec-array.prototype.find transitioning javascript builtin - ArrayPrototypeFind(implicit context: Context)(receiver: Object, ...arguments): - Object { + ArrayPrototypeFind(js-implicit context: Context, receiver: Object)( + ...arguments): Object { try { - if (IsNullOrUndefined(receiver)) { - goto NullOrUndefinedError; - } + RequireObjectCoercible(receiver, 'Array.prototype.find'); // 1. Let O be ? ToObject(this value). const o: JSReceiver = ToObject_Inline(context, receiver); @@ -151,8 +152,5 @@ namespace array_find { label NotCallableError deferred { ThrowTypeError(kCalledNonCallable, arguments[0]); } - label NullOrUndefinedError deferred { - ThrowTypeError(kCalledOnNullOrUndefined, 'Array.prototype.find'); - } } } diff --git a/deps/v8/src/builtins/array-findindex.tq b/deps/v8/src/builtins/array-findindex.tq index 00d8378dfa6979..5a8bb85fbadd4c 100644 --- a/deps/v8/src/builtins/array-findindex.tq +++ b/deps/v8/src/builtins/array-findindex.tq @@ -4,8 +4,9 @@ namespace array_findindex { transitioning javascript builtin - ArrayFindIndexLoopEagerDeoptContinuation(implicit context: Context)( - receiver: Object, callback: Object, thisArg: Object, initialK: Object, + ArrayFindIndexLoopEagerDeoptContinuation( + js-implicit context: Context, receiver: Object)( + callback: Object, thisArg: Object, initialK: Object, length: Object): Object { // All continuation points in the optimized findIndex implementation are // after the ToObject(O) call that ensures we are dealing with a @@ -24,9 +25,10 @@ namespace array_findindex { } transitioning javascript builtin - ArrayFindIndexLoopLazyDeoptContinuation(implicit context: Context)( - receiver: Object, callback: Object, thisArg: Object, initialK: Object, - length: Object, result: Object): Object { + ArrayFindIndexLoopLazyDeoptContinuation( + js-implicit context: Context, receiver: Object)( + _callback: Object, _thisArg: Object, _initialK: Object, _length: Object, + _result: Object): Object { // This deopt continuation point is never actually called, it just // exists to make stack traces correct from a ThrowTypeError if the // callback was found to be non-callable. @@ -37,16 +39,16 @@ namespace array_findindex { // happens right after the callback and it's returned value must be handled // before iteration continues. transitioning javascript builtin - ArrayFindIndexLoopAfterCallbackLazyDeoptContinuation(implicit context: - Context)( - receiver: Object, callback: Object, thisArg: Object, initialK: Object, - length: Object, foundValue: Object, isFound: Object): Object { + ArrayFindIndexLoopAfterCallbackLazyDeoptContinuation( + js-implicit context: Context, receiver: Object)( + callback: Object, thisArg: Object, initialK: Object, length: Object, + foundValue: Object, isFound: Object): Object { // All continuation points in the optimized findIndex implementation are // after the ToObject(O) call that ensures we are dealing with a // JSReceiver. const jsreceiver = Cast(receiver) otherwise unreachable; const callbackfn = Cast(callback) otherwise unreachable; - let numberK = Cast(initialK) otherwise unreachable; + const numberK = Cast(initialK) otherwise unreachable; const numberLength = Cast(length) otherwise unreachable; // This custom lazy deopt point is right after the callback. find() needs @@ -64,7 +66,7 @@ namespace array_findindex { transitioning builtin ArrayFindIndexLoopContinuation(implicit context: Context)( - receiver: JSReceiver, callbackfn: Callable, thisArg: Object, + _receiver: JSReceiver, callbackfn: Callable, thisArg: Object, o: JSReceiver, initialK: Number, length: Number): Number { // 5. Let k be 0. // 6. Repeat, while k < len @@ -118,12 +120,10 @@ namespace array_findindex { // https://tc39.github.io/ecma262/#sec-array.prototype.findIndex transitioning javascript builtin - ArrayPrototypeFindIndex(implicit context: - Context)(receiver: Object, ...arguments): Object { + ArrayPrototypeFindIndex(js-implicit context: Context, receiver: Object)( + ...arguments): Object { try { - if (IsNullOrUndefined(receiver)) { - goto NullOrUndefinedError; - } + RequireObjectCoercible(receiver, 'Array.prototype.findIndex'); // 1. Let O be ? ToObject(this value). const o: JSReceiver = ToObject_Inline(context, receiver); @@ -154,8 +154,5 @@ namespace array_findindex { label NotCallableError deferred { ThrowTypeError(kCalledNonCallable, arguments[0]); } - label NullOrUndefinedError deferred { - ThrowTypeError(kCalledOnNullOrUndefined, 'Array.prototype.findIndex'); - } } } diff --git a/deps/v8/src/builtins/array-foreach.tq b/deps/v8/src/builtins/array-foreach.tq index d362e95950dc62..f52d944291ec7f 100644 --- a/deps/v8/src/builtins/array-foreach.tq +++ b/deps/v8/src/builtins/array-foreach.tq @@ -4,8 +4,9 @@ namespace array_foreach { transitioning javascript builtin - ArrayForEachLoopEagerDeoptContinuation(implicit context: Context)( - receiver: Object, callback: Object, thisArg: Object, initialK: Object, + ArrayForEachLoopEagerDeoptContinuation( + js-implicit context: Context, receiver: Object)( + callback: Object, thisArg: Object, initialK: Object, length: Object): Object { // All continuation points in the optimized forEach implemntation are // after the ToObject(O) call that ensures we are dealing with a @@ -21,9 +22,10 @@ namespace array_foreach { } transitioning javascript builtin - ArrayForEachLoopLazyDeoptContinuation(implicit context: Context)( - receiver: Object, callback: Object, thisArg: Object, initialK: Object, - length: Object, result: Object): Object { + ArrayForEachLoopLazyDeoptContinuation( + js-implicit context: Context, receiver: Object)( + callback: Object, thisArg: Object, initialK: Object, length: Object, + _result: Object): Object { // All continuation points in the optimized forEach implemntation are // after the ToObject(O) call that ensures we are dealing with a // JSReceiver. @@ -38,9 +40,9 @@ namespace array_foreach { } transitioning builtin ArrayForEachLoopContinuation(implicit context: Context)( - receiver: JSReceiver, callbackfn: Callable, thisArg: Object, - array: Object, o: JSReceiver, initialK: Number, len: Number, - to: Object): Object { + _receiver: JSReceiver, callbackfn: Callable, thisArg: Object, + _array: Object, o: JSReceiver, initialK: Number, len: Number, + _to: Object): Object { // variables {array} and {to} are ignored. // 5. Let k be 0. @@ -72,7 +74,7 @@ namespace array_foreach { labels Bailout(Smi) { let k: Smi = 0; const smiLen = Cast(len) otherwise goto Bailout(k); - let fastO = Cast(o) otherwise goto Bailout(k); + const fastO = Cast(o) otherwise goto Bailout(k); let fastOW = NewFastJSArrayWitness(fastO); // Build a fast loop over the smi array. @@ -90,11 +92,10 @@ namespace array_foreach { // https://tc39.github.io/ecma262/#sec-array.prototype.foreach transitioning javascript builtin - ArrayForEach(context: Context, receiver: Object, ...arguments): Object { + ArrayForEach(js-implicit context: Context, receiver: Object)(...arguments): + Object { try { - if (IsNullOrUndefined(receiver)) { - goto NullOrUndefinedError; - } + RequireObjectCoercible(receiver, 'Array.prototype.forEach'); // 1. Let O be ? ToObject(this value). const o: JSReceiver = ToObject_Inline(context, receiver); @@ -127,8 +128,5 @@ namespace array_foreach { label TypeError deferred { ThrowTypeError(kCalledNonCallable, arguments[0]); } - label NullOrUndefinedError deferred { - ThrowTypeError(kCalledOnNullOrUndefined, 'Array.prototype.forEach'); - } } } diff --git a/deps/v8/src/builtins/array-join.tq b/deps/v8/src/builtins/array-join.tq index 72e1a3661ecb33..c04233b22244ec 100644 --- a/deps/v8/src/builtins/array-join.tq +++ b/deps/v8/src/builtins/array-join.tq @@ -37,7 +37,7 @@ namespace array_join { const array: JSArray = UnsafeCast(receiver); const fixedArray: FixedArray = UnsafeCast(array.elements); const element: Object = fixedArray.objects[UnsafeCast(k)]; - return element == Hole ? kEmptyString : element; + return element == TheHole ? kEmptyString : element; } LoadJoinElement( @@ -56,7 +56,7 @@ namespace array_join { assert(!IsDetachedBuffer(typedArray.buffer)); return typed_array::LoadFixedTypedArrayElementAsTagged( typedArray.data_ptr, UnsafeCast(k), - typed_array::KindForArrayType(), SMI_PARAMETERS); + typed_array::KindForArrayType()); } transitioning builtin ConvertToLocaleString( @@ -103,8 +103,8 @@ namespace array_join { } CannotUseSameArrayAccessor(implicit context: Context)( - loadFn: LoadJoinElementFn, receiver: JSReceiver, initialMap: Map, - initialLen: Number): never + _loadFn: LoadJoinElementFn, receiver: JSReceiver, _initialMap: Map, + _initialLen: Number): never labels Cannot, Can { const typedArray: JSTypedArray = UnsafeCast(receiver); if (IsDetachedBuffer(typedArray.buffer)) goto Cannot; @@ -246,7 +246,7 @@ namespace array_join { case (nofSeparators: Number): { return StringRepeat(context, sep, nofSeparators); } - case (obj: Object): { + case (Object): { unreachable; } } @@ -448,7 +448,7 @@ namespace array_join { const previouslyVisited: Object = stack.objects[i]; // Add `receiver` to the first open slot - if (previouslyVisited == Hole) { + if (previouslyVisited == TheHole) { stack.objects[i] = receiver; return True; } @@ -473,7 +473,7 @@ namespace array_join { try { const stack: FixedArray = LoadJoinStack() otherwise IfUninitialized; - if (stack.objects[0] == Hole) { + if (stack.objects[0] == TheHole) { stack.objects[0] = receiver; } else if (JoinStackPush(stack, receiver) == False) deferred { @@ -504,7 +504,7 @@ namespace array_join { SetJoinStack(newStack); } else { - stack.objects[i] = Hole; + stack.objects[i] = TheHole; } return Undefined; } @@ -521,7 +521,7 @@ namespace array_join { // Builtin call was not nested (receiver is the first entry) and // did not contain other nested arrays that expanded the stack. if (stack.objects[0] == receiver && len == kMinJoinStackSize) { - StoreFixedArrayElement(stack, 0, Hole, SKIP_WRITE_BARRIER); + StoreFixedArrayElement(stack, 0, TheHole, SKIP_WRITE_BARRIER); } else deferred { JoinStackPop(stack, receiver); @@ -535,7 +535,7 @@ namespace array_join { sepObj: Object, locales: Object, options: Object): Object { // 3. If separator is undefined, let sep be the single-element String ",". // 4. Else, let sep be ? ToString(separator). - let sep: String = + const sep: String = sepObj == Undefined ? ',' : ToString_Inline(context, sepObj); // If the receiver is not empty and not already being joined, continue with @@ -557,7 +557,8 @@ namespace array_join { // https://tc39.github.io/ecma262/#sec-array.prototype.join transitioning javascript builtin - ArrayPrototypeJoin(context: Context, receiver: Object, ...arguments): Object { + ArrayPrototypeJoin(js-implicit context: Context, receiver: Object)( + ...arguments): Object { const separator: Object = arguments[0]; // 1. Let O be ? ToObject(this value). @@ -566,8 +567,8 @@ namespace array_join { // 2. Let len be ? ToLength(? Get(O, "length")). const len: Number = GetLengthProperty(o); - // Only handle valid array lengths. Although the spec allows larger values, - // this matches historical V8 behavior. + // Only handle valid array lengths. Although the spec allows larger + // values, this matches historical V8 behavior. if (len > kMaxArrayIndex + 1) ThrowTypeError(kInvalidArrayLength); return CycleProtectedArrayJoin( @@ -576,7 +577,7 @@ namespace array_join { // https://tc39.github.io/ecma262/#sec-array.prototype.tolocalestring transitioning javascript builtin ArrayPrototypeToLocaleString( - context: Context, receiver: Object, ...arguments): Object { + js-implicit context: Context, receiver: Object)(...arguments): Object { const locales: Object = arguments[0]; const options: Object = arguments[1]; @@ -586,8 +587,8 @@ namespace array_join { // 2. Let len be ? ToLength(? Get(O, "length")). const len: Number = GetLengthProperty(o); - // Only handle valid array lengths. Although the spec allows larger values, - // this matches historical V8 behavior. + // Only handle valid array lengths. Although the spec allows larger + // values, this matches historical V8 behavior. if (len > kMaxArrayIndex + 1) ThrowTypeError(kInvalidArrayLength); return CycleProtectedArrayJoin( @@ -596,7 +597,7 @@ namespace array_join { // https://tc39.github.io/ecma262/#sec-array.prototype.tostring transitioning javascript builtin ArrayPrototypeToString( - context: Context, receiver: Object, ...arguments): Object { + js-implicit context: Context, receiver: Object)(...arguments): Object { // 1. Let array be ? ToObject(this value). const array: JSReceiver = ToObject_Inline(context, receiver); @@ -617,7 +618,7 @@ namespace array_join { // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.join transitioning javascript builtin TypedArrayPrototypeJoin( - context: Context, receiver: Object, ...arguments): Object { + js-implicit context: Context, receiver: Object)(...arguments): Object { const separator: Object = arguments[0]; // Spec: ValidateTypedArray is applied to the this value prior to evaluating @@ -632,7 +633,7 @@ namespace array_join { // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.tolocalestring transitioning javascript builtin TypedArrayPrototypeToLocaleString( - context: Context, receiver: Object, ...arguments): Object { + js-implicit context: Context, receiver: Object)(...arguments): Object { const locales: Object = arguments[0]; const options: Object = arguments[1]; diff --git a/deps/v8/src/builtins/array-lastindexof.tq b/deps/v8/src/builtins/array-lastindexof.tq index d6213157dc1eff..5ebc451e435117 100644 --- a/deps/v8/src/builtins/array-lastindexof.tq +++ b/deps/v8/src/builtins/array-lastindexof.tq @@ -12,7 +12,7 @@ namespace array_lastindexof { labels IfHole { const elements: FixedArray = UnsafeCast(elements); const element: Object = elements.objects[index]; - if (element == Hole) goto IfHole; + if (element == TheHole) goto IfHole; return element; } @@ -131,7 +131,7 @@ namespace array_lastindexof { // https://tc39.github.io/ecma262/#sec-array.prototype.lastIndexOf transitioning javascript builtin ArrayPrototypeLastIndexOf( - context: Context, receiver: Object, ...arguments): Object { + js-implicit context: Context, receiver: Object)(...arguments): Object { // 1. Let O be ? ToObject(this value). const object: JSReceiver = ToObject_Inline(context, receiver); diff --git a/deps/v8/src/builtins/array-map.tq b/deps/v8/src/builtins/array-map.tq index 7546f1cd00542d..dda569c68236b3 100644 --- a/deps/v8/src/builtins/array-map.tq +++ b/deps/v8/src/builtins/array-map.tq @@ -4,9 +4,10 @@ namespace array_map { transitioning javascript builtin - ArrayMapLoopEagerDeoptContinuation(implicit context: Context)( - receiver: Object, callback: Object, thisArg: Object, array: Object, - initialK: Object, length: Object): Object { + ArrayMapLoopEagerDeoptContinuation( + js-implicit context: Context, receiver: Object)( + callback: Object, thisArg: Object, array: Object, initialK: Object, + length: Object): Object { // All continuation points in the optimized filter implementation are // after the ToObject(O) call that ensures we are dealing with a // JSReceiver. @@ -26,9 +27,10 @@ namespace array_map { } transitioning javascript builtin - ArrayMapLoopLazyDeoptContinuation(implicit context: Context)( - receiver: Object, callback: Object, thisArg: Object, array: Object, - initialK: Object, length: Object, result: Object): Object { + ArrayMapLoopLazyDeoptContinuation( + js-implicit context: Context, receiver: Object)( + callback: Object, thisArg: Object, array: Object, initialK: Object, + length: Object, result: Object): Object { // All continuation points in the optimized filter implementation are // after the ToObject(O) call that ensures we are dealing with a // JSReceiver. @@ -55,7 +57,7 @@ namespace array_map { } transitioning builtin ArrayMapLoopContinuation(implicit context: Context)( - receiver: JSReceiver, callbackfn: Callable, thisArg: Object, + _receiver: JSReceiver, callbackfn: Callable, thisArg: Object, array: JSReceiver, o: JSReceiver, initialK: Number, length: Number): Object { // 6. Let k be 0. @@ -94,7 +96,7 @@ namespace array_map { } CreateJSArray(implicit context: Context)(validLength: Smi): JSArray { - let length: Smi = this.fixedArray.length; + const length: Smi = this.fixedArray.length; assert(validLength <= length); let kind: ElementsKind = PACKED_SMI_ELEMENTS; if (!this.onlySmis) { @@ -114,7 +116,7 @@ namespace array_map { kind = FastHoleyElementsKind(kind); } - let map: Map = LoadJSArrayElementsMap(kind, LoadNativeContext(context)); + const map: Map = LoadJSArrayElementsMap(kind, LoadNativeContext(context)); let a: JSArray; if (IsDoubleElementsKind(kind)) { @@ -130,7 +132,7 @@ namespace array_map { elements.floats[i] = Convert(n); } case (h: HeapObject): { - assert(h == Hole); + assert(h == TheHole); } } } @@ -182,11 +184,11 @@ namespace array_map { } transitioning macro FastArrayMap(implicit context: Context)( - fastO: FastJSArray, len: Smi, callbackfn: Callable, + fastO: FastJSArrayForRead, len: Smi, callbackfn: Callable, thisArg: Object): JSArray labels Bailout(JSArray, Smi) { let k: Smi = 0; - let fastOW = NewFastJSArrayWitness(fastO); + let fastOW = NewFastJSArrayForReadWitness(fastO); let vector = NewVector(len); // Build a fast loop over the smi array. @@ -220,24 +222,12 @@ namespace array_map { return vector.CreateJSArray(len); } - // Bails out if the slow path needs to be taken. - // It's useful to structure it this way, because the consequences of - // using the slow path on species creation are interesting to the caller. - macro FastMapSpeciesCreate(implicit context: Context)( - receiver: JSReceiver, length: Number): JSArray labels Bailout { - if (IsArraySpeciesProtectorCellInvalid()) goto Bailout; - const o = Cast(receiver) otherwise Bailout; - const smiLength = Cast(length) otherwise Bailout; - const newMap: Map = - LoadJSArrayElementsMap(PACKED_SMI_ELEMENTS, LoadNativeContext(context)); - return AllocateJSArray(PACKED_SMI_ELEMENTS, newMap, smiLength, smiLength); - } - // https://tc39.github.io/ecma262/#sec-array.prototype.map transitioning javascript builtin - ArrayMap(implicit context: Context)(receiver: Object, ...arguments): Object { + ArrayMap(js-implicit context: Context, receiver: Object)(...arguments): + Object { try { - if (IsNullOrUndefined(receiver)) goto NullOrUndefinedError; + RequireObjectCoercible(receiver, 'Array.prototype.map'); // 1. Let O be ? ToObject(this value). const o: JSReceiver = ToObject_Inline(context, receiver); @@ -258,7 +248,7 @@ namespace array_map { try { // 5. Let A be ? ArraySpeciesCreate(O, len). if (IsArraySpeciesProtectorCellInvalid()) goto SlowSpeciesCreate; - const o: FastJSArray = Cast(receiver) + const o: FastJSArrayForRead = Cast(receiver) otherwise SlowSpeciesCreate; const smiLength: Smi = Cast(len) otherwise SlowSpeciesCreate; @@ -279,8 +269,5 @@ namespace array_map { label TypeError deferred { ThrowTypeError(kCalledNonCallable, arguments[0]); } - label NullOrUndefinedError deferred { - ThrowTypeError(kCalledOnNullOrUndefined, 'Array.prototype.map'); - } } } diff --git a/deps/v8/src/builtins/array-of.tq b/deps/v8/src/builtins/array-of.tq index 76123207fd34b0..72933186257231 100644 --- a/deps/v8/src/builtins/array-of.tq +++ b/deps/v8/src/builtins/array-of.tq @@ -5,7 +5,8 @@ namespace array_of { // https://tc39.github.io/ecma262/#sec-array.of transitioning javascript builtin - ArrayOf(implicit context: Context)(receiver: Object, ...arguments): Object { + ArrayOf(js-implicit context: Context, receiver: Object)(...arguments): + Object { // 1. Let len be the actual number of arguments passed to this function. const len: Smi = Convert(arguments.length); @@ -35,7 +36,7 @@ namespace array_of { // 7. Repeat, while k < len while (k < len) { // a. Let kValue be items[k]. - let kValue: Object = items[Convert(k)]; + const kValue: Object = items[Convert(k)]; // b. Let Pk be ! ToString(k). // c. Perform ? CreateDataPropertyOrThrow(A, Pk, kValue). diff --git a/deps/v8/src/builtins/array-reduce-right.tq b/deps/v8/src/builtins/array-reduce-right.tq index 33661c38d106c1..b1aa71b85b4623 100644 --- a/deps/v8/src/builtins/array-reduce-right.tq +++ b/deps/v8/src/builtins/array-reduce-right.tq @@ -4,8 +4,9 @@ namespace array { transitioning javascript builtin - ArrayReduceRightPreLoopEagerDeoptContinuation(implicit context: Context)( - receiver: Object, callback: Object, length: Object): Object { + ArrayReduceRightPreLoopEagerDeoptContinuation( + js-implicit context: Context, + receiver: Object)(callback: Object, length: Object): Object { // All continuation points in the optimized every implementation are // after the ToObject(O) call that ensures we are dealing with a // JSReceiver. @@ -21,12 +22,13 @@ namespace array { // the hole. The continuation stub will search for the initial non-hole // element, rightly throwing an exception if not found. return ArrayReduceRightLoopContinuation( - jsreceiver, callbackfn, Hole, jsreceiver, 0, numberLength); + jsreceiver, callbackfn, TheHole, jsreceiver, 0, numberLength); } transitioning javascript builtin - ArrayReduceRightLoopEagerDeoptContinuation(implicit context: Context)( - receiver: Object, callback: Object, initialK: Object, length: Object, + ArrayReduceRightLoopEagerDeoptContinuation( + js-implicit context: Context, receiver: Object)( + callback: Object, initialK: Object, length: Object, accumulator: Object): Object { // All continuation points in the optimized every implementation are // after the ToObject(O) call that ensures we are dealing with a @@ -45,27 +47,28 @@ namespace array { } transitioning javascript builtin - ArrayReduceRightLoopLazyDeoptContinuation(implicit context: Context)( - receiver: Object, callback: Object, initialK: Object, length: Object, + ArrayReduceRightLoopLazyDeoptContinuation( + js-implicit context: Context, receiver: Object)( + callback: Object, initialK: Object, length: Object, result: Object): Object { // All continuation points in the optimized every implementation are // after the ToObject(O) call that ensures we are dealing with a // JSReceiver. const jsreceiver = Cast(receiver) otherwise unreachable; const callbackfn = Cast(callback) otherwise unreachable; - let numberK = Cast(initialK) otherwise unreachable; + const numberK = Cast(initialK) otherwise unreachable; const numberLength = Cast(length) otherwise unreachable; // The accumulator is the result from the callback call which just occured. - let r = ArrayReduceRightLoopContinuation( + const r = ArrayReduceRightLoopContinuation( jsreceiver, callbackfn, result, jsreceiver, numberK, numberLength); return r; } transitioning builtin ArrayReduceRightLoopContinuation(implicit context: Context)( - receiver: JSReceiver, callbackfn: Callable, initialAccumulator: Object, - o: JSReceiver, initialK: Number, length: Number): Object { + _receiver: JSReceiver, callbackfn: Callable, initialAccumulator: Object, + o: JSReceiver, initialK: Number, _length: Number): Object { let accumulator = initialAccumulator; // 8b and 9. Repeat, while k >= 0 @@ -82,7 +85,7 @@ namespace array { // 8b iii and 9c i. Let kValue be ? Get(O, Pk). const value: Object = GetProperty(o, k); - if (accumulator == Hole) { + if (accumulator == TheHole) { // 8b iii 1. accumulator = value; } else { @@ -99,7 +102,7 @@ namespace array { // 8c. if kPresent is false, throw a TypeError exception. // If the accumulator is discovered with the sentinel hole value, // this means kPresent is false. - if (accumulator == Hole) { + if (accumulator == TheHole) { ThrowTypeError(kReduceNoInitial, 'Array.prototype.reduceRight'); } return accumulator; @@ -111,9 +114,9 @@ namespace array { labels Bailout(Number, Object) { let accumulator = initialAccumulator; const smiLen = Cast(len) otherwise goto Bailout(len - 1, accumulator); - let fastO = - Cast(o) otherwise goto Bailout(len - 1, accumulator); - let fastOW = NewFastJSArrayWitness(fastO); + const fastO = Cast(o) + otherwise goto Bailout(len - 1, accumulator); + let fastOW = NewFastJSArrayForReadWitness(fastO); // Build a fast loop over the array. for (let k: Smi = smiLen - 1; k >= 0; k--) { @@ -123,7 +126,7 @@ namespace array { if (k >= fastOW.Get().length) goto Bailout(k, accumulator); const value: Object = fastOW.LoadElementNoHole(k) otherwise continue; - if (accumulator == Hole) { + if (accumulator == TheHole) { accumulator = value; } else { accumulator = Call( @@ -131,7 +134,7 @@ namespace array { fastOW.Get()); } } - if (accumulator == Hole) { + if (accumulator == TheHole) { ThrowTypeError(kReduceNoInitial, 'Array.prototype.reduceRight'); } return accumulator; @@ -139,12 +142,10 @@ namespace array { // https://tc39.github.io/ecma262/#sec-array.prototype.reduceRight transitioning javascript builtin - ArrayReduceRight(implicit context: Context)(receiver: Object, ...arguments): - Object { + ArrayReduceRight(js-implicit context: Context, receiver: Object)( + ...arguments): Object { try { - if (IsNullOrUndefined(receiver)) { - goto NullOrUndefinedError; - } + RequireObjectCoercible(receiver, 'Array.prototype.reduceRight'); // 1. Let O be ? ToObject(this value). const o: JSReceiver = ToObject_Inline(context, receiver); @@ -162,7 +163,8 @@ namespace array { // exception. (This case is handled at the end of // ArrayReduceRightLoopContinuation). - const initialValue: Object = arguments.length > 1 ? arguments[1] : Hole; + const initialValue: Object = + arguments.length > 1 ? arguments[1] : TheHole; try { return FastArrayReduceRight(o, len, callbackfn, initialValue) @@ -176,8 +178,5 @@ namespace array { label NoCallableError deferred { ThrowTypeError(kCalledNonCallable, arguments[0]); } - label NullOrUndefinedError deferred { - ThrowTypeError(kCalledOnNullOrUndefined, 'Array.prototype.reduceRight'); - } } } diff --git a/deps/v8/src/builtins/array-reduce.tq b/deps/v8/src/builtins/array-reduce.tq index 67a112fd418878..a5f6feb9ccedf3 100644 --- a/deps/v8/src/builtins/array-reduce.tq +++ b/deps/v8/src/builtins/array-reduce.tq @@ -4,8 +4,9 @@ namespace array { transitioning javascript builtin - ArrayReducePreLoopEagerDeoptContinuation(implicit context: Context)( - receiver: Object, callback: Object, length: Object): Object { + ArrayReducePreLoopEagerDeoptContinuation( + js-implicit context: Context, + receiver: Object)(callback: Object, length: Object): Object { // All continuation points in the optimized every implementation are // after the ToObject(O) call that ensures we are dealing with a // JSReceiver. @@ -21,12 +22,13 @@ namespace array { // the hole. The continuation stub will search for the initial non-hole // element, rightly throwing an exception if not found. return ArrayReduceLoopContinuation( - jsreceiver, callbackfn, Hole, jsreceiver, 0, numberLength); + jsreceiver, callbackfn, TheHole, jsreceiver, 0, numberLength); } transitioning javascript builtin - ArrayReduceLoopEagerDeoptContinuation(implicit context: Context)( - receiver: Object, callback: Object, initialK: Object, length: Object, + ArrayReduceLoopEagerDeoptContinuation( + js-implicit context: Context, receiver: Object)( + callback: Object, initialK: Object, length: Object, accumulator: Object): Object { // All continuation points in the optimized every implementation are // after the ToObject(O) call that ensures we are dealing with a @@ -45,25 +47,26 @@ namespace array { } transitioning javascript builtin - ArrayReduceLoopLazyDeoptContinuation(implicit context: Context)( - receiver: Object, callback: Object, initialK: Object, length: Object, + ArrayReduceLoopLazyDeoptContinuation( + js-implicit context: Context, receiver: Object)( + callback: Object, initialK: Object, length: Object, result: Object): Object { // All continuation points in the optimized every implementation are // after the ToObject(O) call that ensures we are dealing with a // JSReceiver. const jsreceiver = Cast(receiver) otherwise unreachable; const callbackfn = Cast(callback) otherwise unreachable; - let numberK = Cast(initialK) otherwise unreachable; + const numberK = Cast(initialK) otherwise unreachable; const numberLength = Cast(length) otherwise unreachable; // The accumulator is the result from the callback call which just occured. - let r = ArrayReduceLoopContinuation( + const r = ArrayReduceLoopContinuation( jsreceiver, callbackfn, result, jsreceiver, numberK, numberLength); return r; } transitioning builtin ArrayReduceLoopContinuation(implicit context: Context)( - receiver: JSReceiver, callbackfn: Callable, initialAccumulator: Object, + _receiver: JSReceiver, callbackfn: Callable, initialAccumulator: Object, o: JSReceiver, initialK: Number, length: Number): Object { let accumulator = initialAccumulator; @@ -81,7 +84,7 @@ namespace array { // 6c. i. Let kValue be ? Get(O, Pk). const value: Object = GetProperty(o, k); - if (accumulator == Hole) { + if (accumulator == TheHole) { // 8b. accumulator = value; } else { @@ -98,7 +101,7 @@ namespace array { // 8c. if kPresent is false, throw a TypeError exception. // If the accumulator is discovered with the sentinel hole value, // this means kPresent is false. - if (accumulator == Hole) { + if (accumulator == TheHole) { ThrowTypeError(kReduceNoInitial, 'Array.prototype.reduce'); } return accumulator; @@ -110,9 +113,10 @@ namespace array { labels Bailout(Number, Object) { const k = 0; let accumulator = initialAccumulator; - const smiLen = Cast(len) otherwise goto Bailout(k, accumulator); - let fastO = Cast(o) otherwise goto Bailout(k, accumulator); - let fastOW = NewFastJSArrayWitness(fastO); + Cast(len) otherwise goto Bailout(k, accumulator); + const fastO = + Cast(o) otherwise goto Bailout(k, accumulator); + let fastOW = NewFastJSArrayForReadWitness(fastO); // Build a fast loop over the array. for (let k: Smi = 0; k < len; k++) { @@ -122,7 +126,7 @@ namespace array { if (k >= fastOW.Get().length) goto Bailout(k, accumulator); const value: Object = fastOW.LoadElementNoHole(k) otherwise continue; - if (accumulator == Hole) { + if (accumulator == TheHole) { accumulator = value; } else { accumulator = Call( @@ -130,7 +134,7 @@ namespace array { fastOW.Get()); } } - if (accumulator == Hole) { + if (accumulator == TheHole) { ThrowTypeError(kReduceNoInitial, 'Array.prototype.reduce'); } return accumulator; @@ -138,12 +142,10 @@ namespace array { // https://tc39.github.io/ecma262/#sec-array.prototype.reduce transitioning javascript builtin - ArrayReduce(implicit context: Context)(receiver: Object, ...arguments): + ArrayReduce(js-implicit context: Context, receiver: Object)(...arguments): Object { try { - if (IsNullOrUndefined(receiver)) { - goto NullOrUndefinedError; - } + RequireObjectCoercible(receiver, 'Array.prototype.reduce'); // 1. Let O be ? ToObject(this value). const o: JSReceiver = ToObject_Inline(context, receiver); @@ -161,7 +163,8 @@ namespace array { // exception. (This case is handled at the end of // ArrayReduceLoopContinuation). - const initialValue: Object = arguments.length > 1 ? arguments[1] : Hole; + const initialValue: Object = + arguments.length > 1 ? arguments[1] : TheHole; try { return FastArrayReduce(o, len, callbackfn, initialValue) @@ -175,8 +178,5 @@ namespace array { label NoCallableError deferred { ThrowTypeError(kCalledNonCallable, arguments[0]); } - label NullOrUndefinedError deferred { - ThrowTypeError(kCalledOnNullOrUndefined, 'Array.prototype.reduce'); - } } } diff --git a/deps/v8/src/builtins/array-reverse.tq b/deps/v8/src/builtins/array-reverse.tq index f1ba8fddf7cafe..82d2e6b6058661 100644 --- a/deps/v8/src/builtins/array-reverse.tq +++ b/deps/v8/src/builtins/array-reverse.tq @@ -165,7 +165,7 @@ namespace array_reverse { // https://tc39.github.io/ecma262/#sec-array.prototype.reverse transitioning javascript builtin ArrayPrototypeReverse( - context: Context, receiver: Object, ...arguments): Object { + js-implicit context: Context, receiver: Object)(...arguments): Object { try { TryFastPackedArrayReverse(receiver) otherwise Baseline; return receiver; diff --git a/deps/v8/src/builtins/array-shift.tq b/deps/v8/src/builtins/array-shift.tq index 3c8c1491bb4dd0..4dd82d7b886d0a 100644 --- a/deps/v8/src/builtins/array-shift.tq +++ b/deps/v8/src/builtins/array-shift.tq @@ -103,7 +103,7 @@ namespace array_shift { // https://tc39.github.io/ecma262/#sec-array.prototype.shift transitioning javascript builtin ArrayPrototypeShift( - implicit context: Context)(receiver: Object, ...arguments): Object { + js-implicit context: Context, receiver: Object)(...arguments): Object { try { return TryFastArrayShift(receiver, arguments) otherwise Slow; } diff --git a/deps/v8/src/builtins/array-slice.tq b/deps/v8/src/builtins/array-slice.tq index 51623294082433..c3a6ac75cb0ec7 100644 --- a/deps/v8/src/builtins/array-slice.tq +++ b/deps/v8/src/builtins/array-slice.tq @@ -63,7 +63,7 @@ namespace array_slice { for (let current: Smi = start; current < to; ++current) { const e: Object = sloppyElements.objects[current + kSloppyArgumentsParameterMapStart]; - const newElement: Object = e != Hole ? + const newElement: Object = e != TheHole ? argumentsContext[UnsafeCast(e)] : unmappedElements.objects[current]; // It is safe to skip the write barrier here because resultElements was @@ -105,7 +105,6 @@ namespace array_slice { return ExtractFastJSArray(context, a, start, count); } case (a: JSArgumentsObjectWithLength): { - const nativeContext: NativeContext = LoadNativeContext(context); const map: Map = a.map; if (IsFastAliasedArgumentsMap(map)) { return HandleFastAliasedSloppyArgumentsSlice(context, a, start, count) @@ -123,8 +122,8 @@ namespace array_slice { // https://tc39.github.io/ecma262/#sec-array.prototype.slice transitioning javascript builtin - ArrayPrototypeSlice(context: Context, receiver: Object, ...arguments): - Object { + ArrayPrototypeSlice(js-implicit context: Context, receiver: Object)( + ...arguments): Object { // Handle array cloning case if the receiver is a fast array. if (arguments.length == 0) { typeswitch (receiver) { @@ -186,7 +185,7 @@ namespace array_slice { // 10. Repeat, while k < final while (k < final) { // a. Let Pk be ! ToString(k). - let pK: Number = k; + const pK: Number = k; // b. Let kPresent be ? HasProperty(O, Pk). const fromPresent: Boolean = HasProperty(o, pK); diff --git a/deps/v8/src/builtins/array-some.tq b/deps/v8/src/builtins/array-some.tq index f68ea4ac30be66..a30af4e47a42c4 100644 --- a/deps/v8/src/builtins/array-some.tq +++ b/deps/v8/src/builtins/array-some.tq @@ -4,8 +4,9 @@ namespace array { transitioning javascript builtin - ArraySomeLoopEagerDeoptContinuation(implicit context: Context)( - receiver: Object, callback: Object, thisArg: Object, initialK: Object, + ArraySomeLoopEagerDeoptContinuation( + js-implicit context: Context, receiver: Object)( + callback: Object, thisArg: Object, initialK: Object, length: Object): Object { // All continuation points in the optimized some implementation are // after the ToObject(O) call that ensures we are dealing with a @@ -25,9 +26,10 @@ namespace array { } transitioning javascript builtin - ArraySomeLoopLazyDeoptContinuation(implicit context: Context)( - receiver: Object, callback: Object, thisArg: Object, initialK: Object, - length: Object, result: Object): Object { + ArraySomeLoopLazyDeoptContinuation( + js-implicit context: Context, receiver: Object)( + callback: Object, thisArg: Object, initialK: Object, length: Object, + result: Object): Object { // All continuation points in the optimized some implementation are // after the ToObject(O) call that ensures we are dealing with a // JSReceiver. @@ -51,9 +53,9 @@ namespace array { } transitioning builtin ArraySomeLoopContinuation(implicit context: Context)( - receiver: JSReceiver, callbackfn: Callable, thisArg: Object, - array: Object, o: JSReceiver, initialK: Number, length: Number, - initialTo: Object): Object { + _receiver: JSReceiver, callbackfn: Callable, thisArg: Object, + _array: Object, o: JSReceiver, initialK: Number, length: Number, + _initialTo: Object): Object { // 5. Let k be 0. // 6. Repeat, while k < len for (let k: Number = initialK; k < length; k++) { @@ -88,7 +90,7 @@ namespace array { labels Bailout(Smi) { let k: Smi = 0; const smiLen = Cast(len) otherwise goto Bailout(k); - let fastO = Cast(o) otherwise goto Bailout(k); + const fastO = Cast(o) otherwise goto Bailout(k); let fastOW = NewFastJSArrayWitness(fastO); // Build a fast loop over the smi array. @@ -109,11 +111,10 @@ namespace array { // https://tc39.github.io/ecma262/#sec-array.prototype.some transitioning javascript builtin - ArraySome(implicit context: Context)(receiver: Object, ...arguments): Object { + ArraySome(js-implicit context: Context, receiver: Object)(...arguments): + Object { try { - if (IsNullOrUndefined(receiver)) { - goto NullOrUndefinedError; - } + RequireObjectCoercible(receiver, 'Array.prototype.some'); // 1. Let O be ? ToObject(this value). const o: JSReceiver = ToObject_Inline(context, receiver); @@ -143,8 +144,5 @@ namespace array { label TypeError deferred { ThrowTypeError(kCalledNonCallable, arguments[0]); } - label NullOrUndefinedError deferred { - ThrowTypeError(kCalledOnNullOrUndefined, 'Array.prototype.some'); - } } } diff --git a/deps/v8/src/builtins/array-splice.tq b/deps/v8/src/builtins/array-splice.tq index e24b51760c155e..3b65bb03d48bac 100644 --- a/deps/v8/src/builtins/array-splice.tq +++ b/deps/v8/src/builtins/array-splice.tq @@ -54,8 +54,7 @@ namespace array_splice { macro FastSplice(implicit context: Context)( args: Arguments, a: JSArray, length: Smi, newLength: Smi, - lengthDelta: Smi, actualStart: Smi, insertCount: Smi, - actualDeleteCount: Smi): void labels Bailout { + actualStart: Smi, insertCount: Smi, actualDeleteCount: Smi): void { // Make sure elements are writable. array::EnsureWriteableFastElements(a); @@ -77,7 +76,7 @@ namespace array_splice { UnsafeCast(elements), dstIndex, srcIndex, count); } else { // Grow. - let capacity: Smi = CalculateNewElementsCapacity(newLength); + const capacity: Smi = CalculateNewElementsCapacity(newLength); const newElements: FixedArrayType = Extract(elements, 0, actualStart, capacity); a.elements = newElements; @@ -168,12 +167,12 @@ namespace array_splice { if (IsFastSmiOrTaggedElementsKind(elementsKind)) { FastSplice( - args, a, length, newLength, lengthDelta, actualStart, insertCount, - actualDeleteCount) otherwise Bailout; + args, a, length, newLength, actualStart, insertCount, + actualDeleteCount); } else { FastSplice( - args, a, length, newLength, lengthDelta, actualStart, insertCount, - actualDeleteCount) otherwise Bailout; + args, a, length, newLength, actualStart, insertCount, + actualDeleteCount); } return deletedResult; @@ -301,8 +300,6 @@ namespace array_splice { context: Context, arguments: Arguments, o: JSReceiver, len: Number, actualStart: Number, insertCount: Smi, actualDeleteCount: Number): Object { - const affected: Number = len - actualStart - actualDeleteCount; - // 9. Let A be ? ArraySpeciesCreate(O, actualDeleteCount). const a: JSReceiver = ArraySpeciesCreate(context, o, actualDeleteCount); const itemCount: Number = insertCount; @@ -353,8 +350,8 @@ namespace array_splice { // https://tc39.github.io/ecma262/#sec-array.prototype.splice transitioning javascript builtin - ArrayPrototypeSplice(context: Context, receiver: Object, ...arguments): - Object { + ArrayPrototypeSplice(js-implicit context: Context, receiver: Object)( + ...arguments): Object { // 1. Let O be ? ToObject(this value). const o: JSReceiver = ToObject(context, receiver); diff --git a/deps/v8/src/builtins/array-unshift.tq b/deps/v8/src/builtins/array-unshift.tq index b2e746db470bab..e685d520cd963a 100644 --- a/deps/v8/src/builtins/array-unshift.tq +++ b/deps/v8/src/builtins/array-unshift.tq @@ -93,7 +93,7 @@ namespace array_unshift { // https://tc39.github.io/ecma262/#sec-array.prototype.unshift transitioning javascript builtin ArrayPrototypeUnshift( - context: Context, receiver: Object, ...arguments): Object { + js-implicit context: Context, receiver: Object)(...arguments): Object { try { TryFastArrayUnshift(context, receiver, arguments) otherwise Baseline; } diff --git a/deps/v8/src/builtins/array.tq b/deps/v8/src/builtins/array.tq index 9807db19c6d774..7e044e086b89b3 100644 --- a/deps/v8/src/builtins/array.tq +++ b/deps/v8/src/builtins/array.tq @@ -33,18 +33,19 @@ namespace array { } macro IsJSArray(implicit context: Context)(o: Object): bool { - try { - const array: JSArray = Cast(o) otherwise NotArray; - return true; - } - label NotArray { - return false; + typeswitch (o) { + case (JSArray): { + return true; + } + case (Object): { + return false; + } } } macro LoadElementOrUndefined(a: FixedArray, i: Smi): Object { const e: Object = a.objects[i]; - return e == Hole ? Undefined : e; + return e == TheHole ? Undefined : e; } macro LoadElementOrUndefined(a: FixedDoubleArray, i: Smi): NumberOrUndefined { @@ -62,26 +63,7 @@ namespace array { } macro StoreArrayHole(elements: FixedArray, k: Smi): void { - elements.objects[k] = Hole; - } - - macro CopyArrayElement( - elements: FixedArray, newElements: FixedArray, from: Smi, to: Smi): void { - const e: Object = elements.objects[from]; - newElements.objects[to] = e; - } - - macro CopyArrayElement( - elements: FixedDoubleArray, newElements: FixedDoubleArray, from: Smi, - to: Smi): void { - try { - const floatValue: float64 = LoadDoubleWithHoleCheck(elements, from) - otherwise FoundHole; - newElements.floats[to] = floatValue; - } - label FoundHole { - StoreArrayHole(newElements, to); - } + elements.objects[k] = TheHole; } extern macro SetPropertyLength(implicit context: Context)(Object, Number); diff --git a/deps/v8/src/builtins/base.tq b/deps/v8/src/builtins/base.tq index 76e1a486c815ab..4aa1d578374898 100644 --- a/deps/v8/src/builtins/base.tq +++ b/deps/v8/src/builtins/base.tq @@ -14,9 +14,11 @@ #include 'src/objects/js-generator.h' #include 'src/objects/js-promise.h' #include 'src/objects/js-regexp-string-iterator.h' -#include 'src/objects/module.h' +#include 'src/objects/js-weak-refs.h' #include 'src/objects/objects.h' +#include 'src/objects/source-text-module.h' #include 'src/objects/stack-frame-info.h' +#include 'src/objects/synthetic-module.h' #include 'src/objects/template-objects.h' type void; @@ -31,12 +33,16 @@ type PositiveSmi extends Smi; // The Smi value zero, which is often used as null for HeapObject types. type Zero extends PositiveSmi; +// A value with the size of Tagged which may contain arbitrary data. +type Uninitialized extends Tagged; + @abstract extern class HeapObject extends Tagged { map: Map; } type Object = Smi | HeapObject; + type int32 generates 'TNode' constexpr 'int32_t'; type uint32 generates 'TNode' constexpr 'uint32_t'; type int31 extends int32 @@ -84,32 +90,33 @@ extern class Oddball extends HeapObject { extern class HeapNumber extends HeapObject { value: float64; } type Number = Smi | HeapNumber; -type BigInt extends HeapObject generates 'TNode'; type Numeric = Number | BigInt; @abstract -@noVerifier +@generateCppClass extern class Name extends HeapObject { - hash_field: int32; + hash_field: uint32; } +@generateCppClass extern class Symbol extends Name { flags: int32; - name: Object; + name: Object; // The print name of a symbol, or undefined if none. } @abstract +@generateCppClass extern class String extends Name { - length: uint32; + length: int32; } +@generateCppClass extern class ConsString extends String { first: String; second: String; } @abstract -@noVerifier extern class ExternalString extends String { resource: RawPtr; resource_data: RawPtr; @@ -118,28 +125,37 @@ extern class ExternalString extends String { extern class ExternalOneByteString extends ExternalString {} extern class ExternalTwoByteString extends ExternalString {} -extern class InternalizedString extends String {} +@generateCppClass +extern class InternalizedString extends String { +} // TODO(v8:8983): Add declaration for variable-sized region. @abstract -@noVerifier +@generateCppClass extern class SeqString extends String { } -extern class SeqOneByteString extends SeqString {} -extern class SeqTwoByteString extends SeqString {} +@generateCppClass +extern class SeqOneByteString extends SeqString { +} +@generateCppClass +extern class SeqTwoByteString extends SeqString { +} +@generateCppClass extern class SlicedString extends String { parent: String; offset: Smi; } -extern class ThinString extends String { actual: String; } +@generateCppClass +extern class ThinString extends String { + actual: String; +} // The HeapNumber value NaN type NaN extends HeapNumber; @abstract -@noVerifier @generatePrint @generateCppClass extern class Struct extends HeapObject { @@ -169,7 +185,6 @@ type DirectString extends String; type RootIndex generates 'TNode' constexpr 'RootIndex'; @abstract -@noVerifier @generateCppClass extern class FixedArrayBase extends HeapObject { length: Smi; @@ -190,9 +205,7 @@ type LayoutDescriptor extends ByteArray type TransitionArray extends WeakFixedArray generates 'TNode'; -// InstanceType actually extends uint16, but a bunch of methods in -// CodeStubAssembler expect a TNode, so keeping it signed for now. -type InstanceType extends int16 constexpr 'InstanceType'; +type InstanceType extends uint16 constexpr 'InstanceType'; extern class Map extends HeapObject { instance_size_in_words: uint8; @@ -214,19 +227,21 @@ extern class Map extends HeapObject { @ifnot(V8_DOUBLE_FIELDS_UNBOXING) layout_descriptor: void; dependent_code: DependentCode; prototype_validity_cell: Smi | Cell; + // TODO(v8:9108): Misusing "weak" keyword; type should be + // Map | Weak | TransitionArray | PrototypeInfo | Smi. weak transitions_or_prototype_info: Map | TransitionArray | PrototypeInfo | Smi; } -type BytecodeArray extends FixedArrayBase; - @generatePrint +@generateCppClass extern class EnumCache extends Struct { keys: FixedArray; indices: FixedArray; } @generatePrint +@generateCppClass extern class SourcePositionTableWithFrameCache extends Struct { source_position_table: ByteArray; stack_frame_cache: Object; @@ -250,8 +265,7 @@ extern class DescriptorArray extends HeapObject { // than building the definition from C++. intrinsic %GetAllocationBaseSize(map: Map): intptr; intrinsic %Allocate(size: intptr): Class; -intrinsic %AllocateInternalClass(slotCount: constexpr intptr): - Class; +intrinsic %GetStructMap(instanceKind: constexpr InstanceType): Map; intrinsic %AddIndexedFieldSizeToObjectSize( baseSize: intptr, indexSize: T, fieldSize: int32): intptr { @@ -282,24 +296,35 @@ intrinsic } @abstract -@noVerifier extern class JSReceiver extends HeapObject { - properties_or_hash: FixedArrayBase | Smi; + properties_or_hash: FixedArrayBase | PropertyArray | Smi; } type Constructor extends JSReceiver; @abstract @dirtyInstantiatedAbstractClass +@generateCppClass extern class JSObject extends JSReceiver { - @noVerifier elements: FixedArrayBase; + // [elements]: The elements (properties with names that are integers). + // + // Elements can be in two general modes: fast and slow. Each mode + // corresponds to a set of object representations of elements that + // have something in common. + // + // In the fast mode elements is a FixedArray and so each element can be + // quickly accessed. The elements array can have one of several maps in this + // mode: fixed_array_map, fixed_double_array_map, + // sloppy_arguments_elements_map or fixed_cow_array_map (for copy-on-write + // arrays). In the latter case the elements array may be shared by a few + // objects and so before writing to any element the array must be copied. Use + // EnsureWritableFastElements in this case. + // + // In the slow mode the elements is either a NumberDictionary or a + // FixedArray parameter map for a (sloppy) arguments object. + elements: FixedArrayBase; } -macro NewJSObject( - map: Map, properties: FixedArrayBase | Smi, - elements: FixedArrayBase): JSObject { - return new JSObject{map, properties_or_hash: properties, elements}; -} macro NewJSObject(implicit context: Context)(): JSObject { const objectFunction: JSFunction = GetObjectFunction(); const map: Map = Cast(objectFunction.prototype_or_initial_map) @@ -328,19 +353,33 @@ macro GetDerivedMap(implicit context: Context)( } } +macro AllocateFastOrSlowJSObjectFromMap(implicit context: Context)(map: Map): + JSObject { + let properties = kEmptyFixedArray; + if (IsDictionaryMap(map)) { + properties = AllocateNameDictionary(kNameDictionaryInitialCapacity); + } + return AllocateJSObjectFromMap( + map, properties, kEmptyFixedArray, kNone, kWithSlackTracking); +} + extern class JSFunction extends JSObject { shared_function_info: SharedFunctionInfo; context: Context; feedback_cell: FeedbackCell; weak code: Code; + + // Space for the following field may or may not be allocated. @noVerifier weak prototype_or_initial_map: JSReceiver | Map; } +@generateCppClass extern class JSProxy extends JSReceiver { - target: Object; - handler: Object; + target: JSReceiver | Null; + handler: JSReceiver | Null; } +// Just a starting shape for JSObject; properties can move after initialization. @noVerifier extern class JSProxyRevocableResult extends JSObject { proxy: Object; @@ -358,21 +397,39 @@ macro NewJSProxyRevocableResult(implicit context: Context)( }; } -extern class JSGlobalProxy extends JSObject { native_context: Object; } +@generateCppClass +extern class JSGlobalProxy extends JSObject { + // [native_context]: the owner native context of this global proxy object. + // It is null value if this object is not used by any context. + native_context: Object; +} -extern class JSValue extends JSObject { value: Object; } +@generateCppClass +extern class JSPrimitiveWrapper extends JSObject { + value: Object; +} extern class JSArgumentsObject extends JSObject {} + +// Just a starting shape for JSObject; properties can move after initialization. @noVerifier @hasSameInstanceTypeAsParent extern class JSArgumentsObjectWithLength extends JSArgumentsObject { length: Object; } + +// Just a starting shape for JSObject; properties can move after initialization. @hasSameInstanceTypeAsParent extern class JSSloppyArgumentsObject extends JSArgumentsObjectWithLength { callee: Object; } +// Just a starting shape for JSObject; properties can move after initialization. +@hasSameInstanceTypeAsParent +@noVerifier +extern class JSStrictArgumentsObject extends JSArgumentsObjectWithLength { +} + extern class JSArrayIterator extends JSObject { iterated_object: JSReceiver; next_index: Number; @@ -405,20 +462,6 @@ macro NewJSArray(implicit context: Context)(): JSArray { }; } -struct HoleIterator { - Next(): Object labels NoMore() { - return Hole; - } -} - -macro NewJSArray(implicit context: Context)(map: Map, length: Smi): JSArray { - const map = GetFastPackedSmiElementsJSArrayMap(); - const i = HoleIterator{}; - const elements = new FixedArray{map, length, objects: ...i}; - return new - JSArray{map, properties_or_hash: kEmptyFixedArray, elements, length}; -} - // A HeapObject with a JSArray map, and either fast packed elements, or fast // holey elements when the global NoElementsProtector is not invalidated. transient type FastJSArray extends JSArray; @@ -441,18 +484,61 @@ transient type FastJSArrayForReadWithNoCustomIteration extends type NoSharedNameSentinel extends Smi; -type JSModuleNamespace extends JSObject; -type WeakArrayList extends HeapObject; +@generateCppClass +extern class CallHandlerInfo extends Struct { + callback: Foreign | Undefined; + js_callback: Foreign | Undefined; + data: Object; +} + +type ObjectHashTable extends FixedArray; @abstract +extern class Module extends HeapObject { + exports: ObjectHashTable; + hash: Smi; + status: Smi; + module_namespace: JSModuleNamespace | Undefined; + exception: Object; +} + +type SourceTextModuleInfo extends FixedArray; + +extern class SourceTextModule extends Module { + code: SharedFunctionInfo | JSFunction | + JSGeneratorObject | SourceTextModuleInfo; + regular_exports: FixedArray; + regular_imports: FixedArray; + requested_modules: FixedArray; + script: Script; + import_meta: TheHole | JSObject; + dfs_index: Smi; + dfs_ancestor_index: Smi; +} + +extern class SyntheticModule extends Module { + name: String; + export_names: FixedArray; + evaluation_steps: Foreign; +} + +@abstract +extern class JSModuleNamespace extends JSObject { + module: Module; +} + +@hasSameInstanceTypeAsParent @noVerifier +extern class TemplateList extends FixedArray { +} + +@abstract extern class JSWeakCollection extends JSObject { table: Object; } extern class JSWeakSet extends JSWeakCollection {} extern class JSWeakMap extends JSWeakCollection {} -@noVerifier extern class JSCollectionIterator extends JSObject { table: Object; index: Object; @@ -474,12 +560,20 @@ extern class JSMessageObject extends JSObject { error_level: Smi; } +extern class WeakArrayList extends HeapObject { + capacity: Smi; + length: Smi; + // TODO(v8:8983): declare variable-sized region for contained MaybeObject's + // objects[length]: MaybeObject; +} + extern class PrototypeInfo extends Struct { js_module_namespace: JSModuleNamespace | Undefined; prototype_users: WeakArrayList | Zero; registry_slot: Smi; validity_cell: Object; - @noVerifier object_create_map: Smi | WeakArrayList; + // TODO(v8:9108): Should be Weak | Undefined. + @noVerifier object_create_map: Map | Undefined; bit_field: Smi; } @@ -503,7 +597,7 @@ extern class Script extends Struct { extern class EmbedderDataArray extends HeapObject { length: Smi; } -type ScopeInfo extends Object generates 'TNode'; +type ScopeInfo extends HeapObject generates 'TNode'; extern class PreparseData extends HeapObject { // TODO(v8:8983): Add declaration for variable-sized region. @@ -527,16 +621,30 @@ extern class SharedFunctionInfo extends HeapObject { expected_nof_properties: uint16; function_token_offset: int16; flags: int32; + function_literal_id: int32; @if(V8_SFI_HAS_UNIQUE_ID) unique_id: int32; } extern class JSBoundFunction extends JSObject { - bound_target_function: JSReceiver; + bound_target_function: Callable; bound_this: Object; bound_arguments: FixedArray; } -type Callable = JSFunction | JSBoundFunction | JSProxy; +// Specialized types. The following three type definitions don't correspond to +// actual C++ classes, but have Is... methods that check additional constraints. + +// A Foreign object whose raw pointer is not allowed to be null. +type NonNullForeign extends Foreign; + +// A function built with InstantiateFunction for the public API. +type CallableApiObject extends HeapObject; + +// A JSProxy with the callable bit set. +type CallableJSProxy extends JSProxy; + +type Callable = + JSFunction | JSBoundFunction | CallableJSProxy | CallableApiObject; extern operator '.length_intptr' macro LoadAndUntagFixedArrayBaseLength( FixedArrayBase): intptr; @@ -547,7 +655,7 @@ type NumberDictionary extends HeapObject extern class FreeSpace extends HeapObject { size: Smi; - @noVerifier next: FreeSpace; + next: FreeSpace | Uninitialized; } // %RawDownCast should *never* be used anywhere in Torque code except for @@ -609,45 +717,12 @@ extern class JSArrayBufferView extends JSObject { } extern class JSTypedArray extends JSArrayBufferView { - AttachOffHeapBuffer(buffer: JSArrayBuffer, byteOffset: uintptr): void { - const basePointer: Smi = 0; - - // The max byteOffset is 8 * MaxSmi on the particular platform. 32 bit - // platforms are self-limiting, because we can't allocate an array bigger - // than our 32-bit arithmetic range anyway. 64 bit platforms could - // theoretically have an offset up to 2^35 - 1. - const backingStore = buffer.backing_store; - const externalPointer = backingStore + Convert(byteOffset); - - // Assert no overflow has occurred. Only assert if the mock array buffer - // allocator is NOT used. When the mock array buffer is used, impossibly - // large allocations are allowed that would erroneously cause an overflow - // and this assertion to fail. - assert( - IsMockArrayBufferAllocatorFlag() || - Convert(externalPointer) >= Convert(backingStore)); - - this.elements = kEmptyByteArray; - this.buffer = buffer; - this.external_pointer = externalPointer; - this.base_pointer = basePointer; - } - length: uintptr; external_pointer: RawPtr; base_pointer: ByteArray | Smi; } -@noVerifier -extern class JSAccessorPropertyDescriptor extends JSObject { - get: Object; - set: Object; - enumerable: Object; - configurable: Object; -} - @abstract -@noVerifier extern class JSCollection extends JSObject { table: Object; } @@ -681,14 +756,6 @@ extern class JSStringIterator extends JSObject { next_index: Smi; } -@noVerifier -extern class JSDataPropertyDescriptor extends JSObject { - value: Object; - writable: Object; - enumerable: Object; - configurable: Object; -} - @abstract extern class TemplateInfo extends Struct { tag: Object; @@ -722,7 +789,7 @@ extern class FunctionTemplateInfo extends TemplateInfo { function_template_rare_data: Object; shared_function_info: Object; flag: Smi; - @noVerifier length: Smi; + length: Smi; cached_property_name: Object; } @@ -749,8 +816,6 @@ type LanguageMode extends Smi constexpr 'LanguageMode'; type ExtractFixedArrayFlags generates 'TNode' constexpr 'CodeStubAssembler::ExtractFixedArrayFlags'; -type ParameterMode - generates 'TNode' constexpr 'ParameterMode'; type WriteBarrierMode generates 'TNode' constexpr 'WriteBarrierMode'; @@ -770,21 +835,21 @@ const UTF32: extern class Foreign extends HeapObject { foreign_address: RawPtr; } extern class InterceptorInfo extends Struct { - @noVerifier getter: Foreign | Zero; - @noVerifier setter: Foreign | Zero; - @noVerifier query: Foreign | Zero; - @noVerifier descriptor: Foreign | Zero; - @noVerifier deleter: Foreign | Zero; - @noVerifier enumerator: Foreign | Zero; - @noVerifier definer: Foreign | Zero; + getter: NonNullForeign | Zero | Undefined; + setter: NonNullForeign | Zero | Undefined; + query: NonNullForeign | Zero | Undefined; + descriptor: NonNullForeign | Zero | Undefined; + deleter: NonNullForeign | Zero | Undefined; + enumerator: NonNullForeign | Zero | Undefined; + definer: NonNullForeign | Zero | Undefined; data: Object; flags: Smi; } extern class AccessCheckInfo extends Struct { - callback: Foreign | Zero; - named_interceptor: InterceptorInfo | Zero; - indexed_interceptor: InterceptorInfo | Zero; + callback: Foreign | Zero | Undefined; + named_interceptor: InterceptorInfo | Zero | Undefined; + indexed_interceptor: InterceptorInfo | Zero | Undefined; data: Object; } @@ -800,6 +865,9 @@ extern class Cell extends HeapObject { value: Object; } extern class DataHandler extends Struct { smi_handler: Smi | Code; validity_cell: Smi | Cell; + + // Space for the following fields may or may not be allocated. + // TODO(v8:9108): Misusing "weak" keyword; should be MaybeObject. @noVerifier weak data_1: Object; @noVerifier weak data_2: Object; @noVerifier weak data_3: Object; @@ -850,17 +918,22 @@ extern class StackFrameInfo extends Struct { column_number: Smi; promise_all_index: Smi; script_id: Smi; - script_name: Object; - script_name_or_source_url: Object; - function_name: Object; - wasm_module_name: Object; + script_name: String | Null | Undefined; + script_name_or_source_url: String | Null | Undefined; + function_name: String | Null | Undefined; + method_name: String | Null | Undefined; + type_name: String | Null | Undefined; + eval_origin: String | Null | Undefined; + wasm_module_name: String | Null | Undefined; flag: Smi; } +type FrameArray extends FixedArray; + extern class StackTraceFrame extends Struct { - frame_array: Object; + frame_array: FrameArray | Undefined; frame_index: Smi; - frame_info: Object; + frame_info: StackFrameInfo | Undefined; id: Smi; } @@ -876,9 +949,20 @@ extern class WasmExportedFunctionData extends Struct { instance: WasmInstanceObject; jump_table_offset: Smi; function_index: Smi; + // The remaining fields are for fast calling from C++. The contract is + // that they are lazily populated, and either all will be present or none. + c_wrapper_code: Object; + wasm_call_target: Smi; // Pseudo-smi: one-bit shift on all platforms. + packed_args_size: Smi; } -extern class WasmJSFunctionData extends Struct { wrapper_code: Code; } +extern class WasmJSFunctionData extends Struct { + callable: JSReceiver; + wrapper_code: Code; + serialized_return_count: Smi; + serialized_parameter_count: Smi; + serialized_signature: ByteArray; // PodArray +} extern class WasmCapiFunctionData extends Struct { call_target: RawPtr; @@ -887,6 +971,16 @@ extern class WasmCapiFunctionData extends Struct { serialized_signature: ByteArray; // PodArray } +extern class WasmIndirectFunctionTable extends Struct { + size: uint32; + @if(TAGGED_SIZE_8_BYTES) optional_padding: uint32; + @ifnot(TAGGED_SIZE_8_BYTES) optional_padding: void; + sig_ids: RawPtr; + targets: RawPtr; + managed_native_allocations: Foreign | Undefined; + refs: FixedArray; +} + extern class WasmDebugInfo extends Struct { instance: WasmInstanceObject; interpreter_handle: Foreign | Undefined; @@ -947,9 +1041,9 @@ const kAllowLargeObjectAllocation: constexpr AllocationFlags generates 'CodeStubAssembler::kAllowLargeObjectAllocation'; const kWithSlackTracking: constexpr SlackTrackingMode - generates 'SlackTrackingMode::kWithSlackTracking'; + generates 'CodeStubAssembler::SlackTrackingMode::kWithSlackTracking'; const kNoSlackTracking: constexpr SlackTrackingMode - generates 'SlackTrackingMode::kNoSlackTracking'; + generates 'CodeStubAssembler::SlackTrackingMode::kNoSlackTracking'; const kFixedDoubleArrays: constexpr ExtractFixedArrayFlags generates 'CodeStubAssembler::ExtractFixedArrayFlag::kFixedDoubleArrays'; @@ -977,6 +1071,8 @@ const kCalledNonCallable: constexpr MessageTemplate generates 'MessageTemplate::kCalledNonCallable'; const kCalledOnNullOrUndefined: constexpr MessageTemplate generates 'MessageTemplate::kCalledOnNullOrUndefined'; +const kProtoObjectOrNull: constexpr MessageTemplate + generates 'MessageTemplate::kProtoObjectOrNull'; const kInvalidOffset: constexpr MessageTemplate generates 'MessageTemplate::kInvalidOffset'; const kInvalidTypedArrayLength: constexpr MessageTemplate @@ -1003,13 +1099,17 @@ const kSymbolToString: constexpr MessageTemplate generates 'MessageTemplate::kSymbolToString'; const kPropertyNotFunction: constexpr MessageTemplate generates 'MessageTemplate::kPropertyNotFunction'; +const kBigIntMaxLength: constexpr intptr + generates 'BigInt::kMaxLength'; +const kBigIntTooBig: constexpr MessageTemplate + generates 'MessageTemplate::kBigIntTooBig'; const kMaxArrayIndex: constexpr uint32 generates 'JSArray::kMaxArrayIndex'; const kArrayBufferMaxByteLength: constexpr uintptr generates 'JSArrayBuffer::kMaxByteLength'; -const V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP: - constexpr int31 generates 'V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP'; +const kMaxTypedArrayInHeap: + constexpr int31 generates 'JSTypedArray::kMaxSizeInHeap'; const kMaxSafeInteger: constexpr float64 generates 'kMaxSafeInteger'; const kSmiMaxValue: constexpr uintptr generates 'kSmiMaxValue'; const kSmiMax: uintptr = kSmiMaxValue; @@ -1054,7 +1154,13 @@ const kStrictReadOnlyProperty: constexpr MessageTemplate const kString: constexpr PrimitiveType generates 'PrimitiveType::kString'; -type Hole extends Oddball; +const kExternalPointerForOnHeapArray: constexpr RawPtr + generates 'JSTypedArray::ExternalPointerForOnHeapArray()'; + +const kNameDictionaryInitialCapacity: + constexpr int32 generates 'NameDictionary::kInitialCapacity'; + +type TheHole extends Oddball; type Null extends Oddball; type Undefined extends Oddball; type True extends Oddball; @@ -1064,7 +1170,7 @@ type Boolean = True | False; type NumberOrUndefined = Number | Undefined; -extern macro TheHoleConstant(): Hole; +extern macro TheHoleConstant(): TheHole; extern macro NullConstant(): Null; extern macro UndefinedConstant(): Undefined; extern macro TrueConstant(): True; @@ -1075,7 +1181,7 @@ extern macro EmptyStringConstant(): EmptyString; extern macro LengthStringConstant(): String; extern macro NanConstant(): NaN; -const Hole: Hole = TheHoleConstant(); +const TheHole: TheHole = TheHoleConstant(); const Null: Null = NullConstant(); const Undefined: Undefined = UndefinedConstant(); const True: True = TrueConstant(); @@ -1090,11 +1196,6 @@ const false: constexpr bool generates 'false'; const kStrict: constexpr LanguageMode generates 'LanguageMode::kStrict'; const kSloppy: constexpr LanguageMode generates 'LanguageMode::kSloppy'; -const SMI_PARAMETERS: constexpr ParameterMode - generates 'CodeStubAssembler::SMI_PARAMETERS'; -const INTPTR_PARAMETERS: constexpr ParameterMode - generates 'CodeStubAssembler::INTPTR_PARAMETERS'; - const SKIP_WRITE_BARRIER: constexpr WriteBarrierMode generates 'SKIP_WRITE_BARRIER'; const UNSAFE_SKIP_WRITE_BARRIER: @@ -1107,7 +1208,7 @@ extern class AsyncGeneratorRequest extends Struct { promise: JSPromise; } -extern class ModuleInfoEntry extends Struct { +extern class SourceTextModuleInfoEntry extends Struct { export_name: String | Undefined; local_name: String | Undefined; import_name: String | Undefined; @@ -1134,7 +1235,7 @@ extern class PromiseReaction extends Struct { extern class PromiseReactionJobTask extends Microtask { argument: Object; context: Context; - @noVerifier handler: Callable | Undefined; + handler: Callable | Undefined; promise_or_capability: JSPromise | PromiseCapability | Undefined; } @@ -1155,22 +1256,8 @@ extern class JSRegExp extends JSObject { flags: Smi | Undefined; } -@noVerifier -extern class JSIteratorResult extends JSObject { - value: Object; - done: Boolean; -} - -macro NewJSIteratorResult(implicit context: Context)( - value: Object, done: Boolean): JSIteratorResult { - return new JSIteratorResult{ - map: GetIteratorResultMap(), - properties_or_hash: kEmptyFixedArray, - elements: kEmptyFixedArray, - value, - done - }; -} +extern transitioning macro AllocateJSIteratorResult(implicit context: Context)( + Object, Boolean): JSObject; // Note: Although a condition for a FastJSRegExp is having a positive smi // lastIndex (see RegExpBuiltinsAssembler::BranchIfFastRegExp), it is possible @@ -1230,9 +1317,9 @@ extern class AccessorInfo extends Struct { name: Object; flags: Smi; expected_receiver_type: Object; - @noVerifier setter: Foreign | Zero; - @noVerifier getter: Foreign | Zero; - @noVerifier js_getter: Foreign | Zero; + setter: NonNullForeign | Zero; + getter: NonNullForeign | Zero; + js_getter: NonNullForeign | Zero; data: Object; } @@ -1277,7 +1364,7 @@ extern class FeedbackCell extends Struct { type AllocationSite extends Struct; extern class AllocationMemento extends Struct { - @noVerifier allocation_site: AllocationSite; + allocation_site: AllocationSite; } extern class WasmModuleObject extends JSObject { @@ -1303,8 +1390,8 @@ extern class WasmMemoryObject extends JSObject { } extern class WasmGlobalObject extends JSObject { - untagged_buffer: JSArrayBuffer; - tagged_buffer: FixedArray; + untagged_buffer: JSArrayBuffer | Undefined; + tagged_buffer: FixedArray | Undefined; offset: Smi; flags: Smi; } @@ -1314,10 +1401,6 @@ extern class WasmExceptionObject extends JSObject { exception_tag: HeapObject; } -@noVerifier -extern class WasmExceptionPackage extends JSReceiver { -} - type WasmExportedFunction extends JSFunction; extern class AsmWasmData extends Struct { @@ -1327,6 +1410,46 @@ extern class AsmWasmData extends Struct { uses_bitset: HeapNumber; } +extern class JSFinalizationGroup extends JSObject { + native_context: NativeContext; + cleanup: Object; + active_cells: Undefined | WeakCell; + cleared_cells: Undefined | WeakCell; + key_map: Object; + next: Undefined | JSFinalizationGroup; + flags: Smi; +} + +extern class JSFinalizationGroupCleanupIterator extends JSObject { + finalization_group: JSFinalizationGroup; +} + +extern class WeakCell extends HeapObject { + finalization_group: Undefined | JSFinalizationGroup; + target: Undefined | JSReceiver; + holdings: Object; + prev: Undefined | WeakCell; + next: Undefined | WeakCell; + key: Object; + key_list_prev: Undefined | WeakCell; + key_list_next: Undefined | WeakCell; +} + +extern class JSWeakRef extends JSObject { target: Undefined | JSReceiver; } + +extern class BytecodeArray extends FixedArrayBase { + // TODO(v8:8983): bytecode array object sizes vary based on their contents. + constant_pool: FixedArray; + handler_table: ByteArray; + source_position_table: Undefined | ByteArray | + SourcePositionTableWithFrameCache; + frame_size: int32; + parameter_size: int32; + incoming_new_target_or_generator_register: int32; + osr_nesting_level: int8; + bytecode_age: int8; +} + extern macro Is64(): constexpr bool; extern macro SelectBooleanConstant(bool): Boolean; @@ -1358,7 +1481,7 @@ extern transitioning builtin SetProperty(implicit context: Context)( extern transitioning builtin SetPropertyInLiteral(implicit context: Context)( Object, Object, Object); extern transitioning builtin DeleteProperty(implicit context: Context)( - Object, Object, LanguageMode); + Object, Object, LanguageMode): Object; extern transitioning builtin HasProperty(implicit context: Context)( Object, Object): Boolean; extern transitioning macro HasProperty_Inline(implicit context: Context)( @@ -1403,6 +1526,10 @@ extern macro ConstructWithTarget(implicit context: Context)( extern macro SpeciesConstructor(implicit context: Context)( Object, JSReceiver): JSReceiver; +extern macro ConstructorBuiltinsAssembler::IsDictionaryMap(Map): bool; +extern macro CodeStubAssembler::AllocateNameDictionary(constexpr int32): + NameDictionary; + extern builtin ToObject(Context, Object): JSReceiver; extern macro ToObject_Inline(Context, Object): JSReceiver; extern macro IsNullOrUndefined(Object): bool; @@ -1598,6 +1725,7 @@ extern operator '==' macro Word32Equal(bool, bool): bool; extern operator '!=' macro Word32NotEqual(bool, bool): bool; extern operator '+' macro Float64Add(float64, float64): float64; +extern operator '-' macro Float64Sub(float64, float64): float64; extern operator '+' macro NumberAdd(Number, Number): Number; extern operator '-' macro NumberSub(Number, Number): Number; @@ -1650,6 +1778,8 @@ extern macro TaggedIsNotSmi(Object): bool; extern macro TaggedIsPositiveSmi(Object): bool; extern macro IsValidPositiveSmi(intptr): bool; +extern macro IsInteger(HeapNumber): bool; + extern macro HeapObjectToJSDataView(HeapObject): JSDataView labels CastError; extern macro HeapObjectToJSProxy(HeapObject): JSProxy @@ -1713,7 +1843,7 @@ macro Cast(o: HeapObject): A labels CastError; Cast(o: HeapObject): HeapObject - labels CastError { +labels _CastError { return o; } @@ -1837,6 +1967,11 @@ Cast(o: HeapObject): HeapNumber goto CastError; } +Cast(o: HeapObject): BigInt labels CastError { + if (IsBigInt(o)) return %RawDownCast(o); + goto CastError; +} + Cast(o: HeapObject): JSRegExp labels CastError { if (IsJSRegExp(o)) return %RawDownCast(o); @@ -1849,9 +1984,9 @@ Cast(implicit context: Context)(o: HeapObject): Map goto CastError; } -Cast(o: HeapObject): JSValue +Cast(o: HeapObject): JSPrimitiveWrapper labels CastError { - if (IsJSValue(o)) return %RawDownCast(o); + if (IsJSPrimitiveWrapper(o)) return %RawDownCast(o); goto CastError; } @@ -1915,24 +2050,24 @@ Cast(implicit context: Context)(o: HeapObject): FastJSArrayForCopy labels CastError { if (IsArraySpeciesProtectorCellInvalid()) goto CastError; - const a: FastJSArray = Cast(o) otherwise CastError; - return %RawDownCast(o); + const a = Cast(o) otherwise CastError; + return %RawDownCast(a); } Cast(implicit context: Context)( o: HeapObject): FastJSArrayWithNoCustomIteration labels CastError { if (IsArrayIteratorProtectorCellInvalid()) goto CastError; - const a: FastJSArray = Cast(o) otherwise CastError; - return %RawDownCast(o); + const a = Cast(o) otherwise CastError; + return %RawDownCast(a); } Cast(implicit context: Context)( o: HeapObject): FastJSArrayForReadWithNoCustomIteration labels CastError { if (IsArrayIteratorProtectorCellInvalid()) goto CastError; - const a: FastJSArrayForRead = Cast(o) otherwise CastError; - return %RawDownCast(o); + const a = Cast(o) otherwise CastError; + return %RawDownCast(a); } Cast(implicit context: Context)(o: HeapObject): JSReceiver @@ -1990,7 +2125,7 @@ extern macro ChangeInt32ToIntPtr(int32): intptr; // Sign-extends. extern macro ChangeUint32ToWord(uint32): uintptr; // Doesn't sign-extend. extern macro LoadNativeContext(Context): NativeContext; extern macro TruncateFloat64ToFloat32(float64): float32; -extern macro TruncateHeapNumberValueToWord32(Number): int32; +extern macro TruncateHeapNumberValueToWord32(HeapNumber): int32; extern macro LoadJSArrayElementsMap(constexpr ElementsKind, Context): Map; extern macro LoadJSArrayElementsMap(ElementsKind, Context): Map; extern macro ChangeNonnegativeNumberToUintPtr(Number): uintptr; @@ -2007,13 +2142,14 @@ extern macro Float64Constant(constexpr float64): float64; extern macro SmiConstant(constexpr int31): Smi; extern macro SmiConstant(constexpr Smi): Smi; extern macro SmiConstant(constexpr MessageTemplate): Smi; +extern macro SmiConstant(constexpr LanguageMode): Smi; extern macro BoolConstant(constexpr bool): bool; extern macro StringConstant(constexpr string): String; -extern macro LanguageModeConstant(constexpr LanguageMode): LanguageMode; extern macro Int32Constant(constexpr ElementsKind): ElementsKind; extern macro IntPtrConstant(constexpr NativeContextSlot): NativeContextSlot; extern macro IntPtrConstant(constexpr ContextSlot): ContextSlot; extern macro IntPtrConstant(constexpr intptr): intptr; +extern macro PointerConstant(constexpr RawPtr): RawPtr; extern macro SingleCharacterStringConstant(constexpr string): String; extern macro BitcastWordToTaggedSigned(intptr): Smi; @@ -2126,6 +2262,9 @@ Convert(i: int32): Number { Convert(i: int32): intptr { return ChangeInt32ToIntPtr(i); } +Convert(i: uint32): intptr { + return Signed(ChangeUint32ToWord(i)); +} Convert(i: int32): Smi { return SmiFromInt32(i); } @@ -2333,10 +2472,6 @@ extern operator '.floats[]=' macro StoreFixedDoubleArrayElement( FixedDoubleArray, intptr, float64): void; extern operator '.floats[]=' macro StoreFixedDoubleArrayElementSmi( FixedDoubleArray, Smi, float64): void; -operator '.floats[]=' macro StoreFixedDoubleArrayElementSmi( - a: FixedDoubleArray, i: Smi, n: Number): void { - StoreFixedDoubleArrayElementSmi(a, i, Convert(n)); -} operator '[]=' macro StoreFixedDoubleArrayDirect( a: FixedDoubleArray, i: Smi, v: Number) { a.floats[i] = Convert(v); @@ -2418,7 +2553,7 @@ extern macro AllocateJSArray(constexpr ElementsKind, Map, Smi, Smi): JSArray; extern macro AllocateJSArray(Map, FixedArrayBase, Smi): JSArray; extern macro AllocateJSObjectFromMap(Map): JSObject; extern macro AllocateJSObjectFromMap( - Map, FixedArray, FixedArray, constexpr AllocationFlags, + Map, FixedArray | PropertyArray, FixedArray, constexpr AllocationFlags, constexpr SlackTrackingMode): JSObject; extern macro LoadDoubleWithHoleCheck(FixedDoubleArray, Smi): float64 @@ -2531,10 +2666,10 @@ LoadElementNoHole(implicit context: Context)( a: JSArray, index: Smi): Object labels IfHole { try { - let elements: FixedArray = + const elements: FixedArray = Cast(a.elements) otherwise Unexpected; - let e: Object = elements.objects[index]; - if (e == Hole) { + const e: Object = elements.objects[index]; + if (e == TheHole) { goto IfHole; } return e; @@ -2548,9 +2683,10 @@ LoadElementNoHole(implicit context: Context)( a: JSArray, index: Smi): Object labels IfHole { try { - let elements: FixedDoubleArray = + const elements: FixedDoubleArray = Cast(a.elements) otherwise Unexpected; - let e: float64 = LoadDoubleWithHoleCheck(elements, index) otherwise IfHole; + const e: float64 = + LoadDoubleWithHoleCheck(elements, index) otherwise IfHole; return AllocateHeapNumberWithValue(e); } label Unexpected { @@ -2594,7 +2730,7 @@ struct FastJSArrayWitness { } else { const elements = Cast(this.unstable.elements) otherwise unreachable; - StoreFixedArrayElement(elements, k, Hole); + StoreFixedArrayElement(elements, k, TheHole); } } @@ -2638,12 +2774,12 @@ struct FastJSArrayWitness { MoveElements(dst: intptr, src: intptr, length: intptr) { assert(this.arrayIsPushable); if (this.hasDoubles) { - let elements: FixedDoubleArray = + const elements: FixedDoubleArray = Cast(this.unstable.elements) otherwise unreachable; TorqueMoveElements(elements, dst, src, length); } else { - let elements: FixedArray = Cast(this.unstable.elements) + const elements: FixedArray = Cast(this.unstable.elements) otherwise unreachable; if (this.hasSmis) { TorqueMoveElementsSmi(elements, dst, src, length); @@ -2662,17 +2798,62 @@ struct FastJSArrayWitness { } macro NewFastJSArrayWitness(array: FastJSArray): FastJSArrayWitness { - let kind = array.map.elements_kind; + const kind = array.map.elements_kind; return FastJSArrayWitness{ stable: array, unstable: array, map: array.map, - hasDoubles: !IsElementsKindLessThanOrEqual(kind, HOLEY_ELEMENTS), + hasDoubles: IsDoubleElementsKind(kind), hasSmis: IsElementsKindLessThanOrEqual(kind, HOLEY_SMI_ELEMENTS), arrayIsPushable: false }; } +struct FastJSArrayForReadWitness { + Get(): FastJSArrayForRead { + return this.unstable; + } + + Recheck() labels CastError { + if (this.stable.map != this.map) goto CastError; + // We don't need to check elements kind or whether the prototype + // has changed away from the default JSArray prototype, because + // if the map remains the same then those properties hold. + // + // However, we have to make sure there are no elements in the + // prototype chain. + if (IsNoElementsProtectorCellInvalid()) goto CastError; + this.unstable = %RawDownCast(this.stable); + } + + LoadElementNoHole(implicit context: Context)(k: Smi): Object + labels FoundHole { + if (this.hasDoubles) { + return LoadElementNoHole(this.unstable, k) + otherwise FoundHole; + } else { + return LoadElementNoHole(this.unstable, k) + otherwise FoundHole; + } + } + + const stable: JSArray; + unstable: FastJSArrayForRead; + const map: Map; + const hasDoubles: bool; +} + +macro NewFastJSArrayForReadWitness(array: FastJSArrayForRead): + FastJSArrayForReadWitness { + const kind = array.map.elements_kind; + return FastJSArrayForReadWitness{ + stable: array, + unstable: array, + map: array.map, + hasDoubles: IsDoubleElementsKind(kind) + }; +} + extern macro TransitionElementsKind( JSObject, Map, constexpr ElementsKind, constexpr ElementsKind): void labels Bailout; @@ -2693,6 +2874,7 @@ extern macro IsJSReceiver(HeapObject): bool; extern macro TaggedIsCallable(Object): bool; extern macro IsDetachedBuffer(JSArrayBuffer): bool; extern macro IsHeapNumber(HeapObject): bool; +extern macro IsBigInt(HeapObject): bool; extern macro IsFixedArray(HeapObject): bool; extern macro IsName(HeapObject): bool; extern macro IsPrivateSymbol(HeapObject): bool; @@ -2702,7 +2884,7 @@ extern macro IsOddball(HeapObject): bool; extern macro IsSymbol(HeapObject): bool; extern macro IsJSArrayMap(Map): bool; extern macro IsExtensibleMap(Map): bool; -extern macro IsJSValue(HeapObject): bool; +extern macro IsJSPrimitiveWrapper(HeapObject): bool; extern macro IsCustomElementsReceiverInstanceType(int32): bool; extern macro Typeof(Object): Object; @@ -2713,7 +2895,7 @@ macro NumberIsNaN(number: Number): bool { return false; } case (hn: HeapNumber): { - let value: float64 = Convert(hn); + const value: float64 = Convert(hn); return value != value; } } @@ -2722,6 +2904,8 @@ macro NumberIsNaN(number: Number): bool { extern macro GotoIfForceSlowPath() labels Taken; extern macro BranchIfToBooleanIsTrue(Object): never labels Taken, NotTaken; +extern macro BranchIfToBooleanIsFalse(Object): never + labels Taken, NotTaken; macro ToBoolean(obj: Object): bool { if (BranchIfToBooleanIsTrue(obj)) { @@ -2731,13 +2915,24 @@ macro ToBoolean(obj: Object): bool { } } +@export +macro RequireObjectCoercible(implicit context: Context)( + value: Object, name: constexpr string): Object { + if (IsNullOrUndefined(value)) { + ThrowTypeError(kCalledOnNullOrUndefined, name); + } + return value; +} + +extern macro BranchIfSameValue(Object, Object): never labels Taken, NotTaken; + transitioning macro ToIndex(input: Object, context: Context): Number labels RangeError { if (input == Undefined) { return 0; } - let value: Number = ToInteger_Inline(context, input, kTruncateMinusZero); + const value: Number = ToInteger_Inline(context, input, kTruncateMinusZero); if (value < 0 || value > kMaxSafeInteger) { goto RangeError; } @@ -2824,19 +3019,6 @@ macro BranchIfFastJSArrayForRead(o: Object, context: Context): BranchIf(o) otherwise True, False; } -macro BranchIfNotFastJSArray(o: Object, context: Context): never - labels True, False { - BranchIfNot(o) otherwise True, False; -} - -macro BranchIfFastJSArrayForCopy(o: Object, context: Context): never - labels True, False { - // Long-term, it's likely not a good idea to have this slow-path test here, - // since it fundamentally breaks the type system. - GotoIfForceSlowPath() otherwise False; - BranchIf(o) otherwise True, False; -} - @export macro IsFastJSArrayWithNoCustomIteration(context: Context, o: Object): bool { return Is(o); @@ -2859,7 +3041,7 @@ namespace runtime { transitioning builtin FastCreateDataProperty(implicit context: Context)( receiver: JSReceiver, key: Object, value: Object): Object { try { - let array = Cast(receiver) otherwise Slow; + const array = Cast(receiver) otherwise Slow; const index: Smi = Cast(key) otherwise goto Slow; if (index < 0 || index > array.length) goto Slow; array::EnsureWriteableFastElements(array); @@ -2929,3 +3111,46 @@ transitioning macro ToStringImpl(context: Context, o: Object): String { } unreachable; } + +macro VerifiedUnreachable(): never { + StaticAssert(false); + unreachable; +} + +macro Float64IsSomeInfinity(value: float64): bool { + if (value == V8_INFINITY) { + return true; + } + return value == (Convert(0) - V8_INFINITY); +} + +@export +macro IsIntegerOrSomeInfinity(o: Object): bool { + typeswitch (o) { + case (Smi): { + return true; + } + case (hn: HeapNumber): { + if (Float64IsSomeInfinity(Convert(hn))) { + return true; + } + return IsInteger(hn); + } + case (Object): { + return false; + } + } +} + +builtin CheckNumberInRange(implicit context: Context)( + value: Number, min: Number, max: Number): Undefined { + if (IsIntegerOrSomeInfinity(value) && min <= value && value <= max) { + return Undefined; + } else { + Print('Range type assertion failed! (value/min/max)'); + Print(value); + Print(min); + Print(max); + unreachable; + } +} diff --git a/deps/v8/src/builtins/bigint.tq b/deps/v8/src/builtins/bigint.tq new file mode 100644 index 00000000000000..a1b1cb67809d84 --- /dev/null +++ b/deps/v8/src/builtins/bigint.tq @@ -0,0 +1,206 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include 'src/builtins/builtins-bigint-gen.h' + +// TODO(nicohartmann): Discuss whether types used by multiple builtins should be +// in global namespace +@noVerifier +extern class BigIntBase extends HeapObject generates 'TNode' { +} + +type BigInt extends BigIntBase; + +@noVerifier +@hasSameInstanceTypeAsParent +extern class MutableBigInt extends BigIntBase generates 'TNode' { +} + +Convert(i: MutableBigInt): BigInt { + assert(bigint::IsCanonicalized(i)); + return %RawDownCast(Convert(i)); +} + +namespace bigint { + + const kPositiveSign: uint32 = 0; + const kNegativeSign: uint32 = 1; + + extern macro BigIntBuiltinsAssembler::CppAbsoluteAddAndCanonicalize( + MutableBigInt, BigIntBase, BigIntBase): void; + extern macro BigIntBuiltinsAssembler::CppAbsoluteSubAndCanonicalize( + MutableBigInt, BigIntBase, BigIntBase): void; + extern macro BigIntBuiltinsAssembler::CppAbsoluteCompare( + BigIntBase, BigIntBase): int32; + + extern macro BigIntBuiltinsAssembler::ReadBigIntSign(BigIntBase): uint32; + extern macro BigIntBuiltinsAssembler::ReadBigIntLength(BigIntBase): intptr; + extern macro BigIntBuiltinsAssembler::WriteBigIntSignAndLength( + MutableBigInt, uint32, intptr): void; + + extern macro CodeStubAssembler::AllocateBigInt(intptr): MutableBigInt; + extern macro CodeStubAssembler::StoreBigIntDigit( + MutableBigInt, intptr, uintptr): void; + extern macro CodeStubAssembler::LoadBigIntDigit(BigIntBase, intptr): uintptr; + + @export // Silence unused warning. + // TODO(szuend): Remove @export once macros that are only used in + // asserts are no longer detected as unused. + macro IsCanonicalized(bigint: BigIntBase): bool { + const length = ReadBigIntLength(bigint); + + if (length == 0) { + return ReadBigIntSign(bigint) == kPositiveSign; + } + + return LoadBigIntDigit(bigint, length - 1) != 0; + } + + macro InvertSign(sign: uint32): uint32 { + return sign == kPositiveSign ? kNegativeSign : kPositiveSign; + } + + macro AllocateEmptyBigIntNoThrow(implicit context: Context)( + sign: uint32, length: intptr): MutableBigInt labels BigIntTooBig { + if (length > kBigIntMaxLength) { + goto BigIntTooBig; + } + const result: MutableBigInt = AllocateBigInt(length); + + WriteBigIntSignAndLength(result, sign, length); + return result; + } + + macro AllocateEmptyBigInt(implicit context: Context)( + sign: uint32, length: intptr): MutableBigInt { + try { + return AllocateEmptyBigIntNoThrow(sign, length) otherwise BigIntTooBig; + } + label BigIntTooBig { + ThrowRangeError(kBigIntTooBig); + } + } + + macro MutableBigIntAbsoluteCompare(x: BigIntBase, y: BigIntBase): int32 { + return CppAbsoluteCompare(x, y); + } + + macro MutableBigIntAbsoluteSub(implicit context: Context)( + x: BigInt, y: BigInt, resultSign: uint32): BigInt { + const xlength = ReadBigIntLength(x); + const ylength = ReadBigIntLength(y); + const xsign = ReadBigIntSign(x); + + assert(MutableBigIntAbsoluteCompare(x, y) >= 0); + if (xlength == 0) { + assert(ylength == 0); + return x; + } + + if (ylength == 0) { + return resultSign == xsign ? x : BigIntUnaryMinus(x); + } + + const result = AllocateEmptyBigInt(resultSign, xlength); + CppAbsoluteSubAndCanonicalize(result, x, y); + return Convert(result); + } + + macro MutableBigIntAbsoluteAdd(implicit context: Context)( + xBigint: BigInt, yBigint: BigInt, + resultSign: uint32): BigInt labels BigIntTooBig { + let xlength = ReadBigIntLength(xBigint); + let ylength = ReadBigIntLength(yBigint); + + let x = xBigint; + let y = yBigint; + if (xlength < ylength) { + // Swap x and y so that x is longer. + x = yBigint; + y = xBigint; + const tempLength = xlength; + xlength = ylength; + ylength = tempLength; + } + + // case: 0n + 0n + if (xlength == 0) { + assert(ylength == 0); + return x; + } + + // case: x + 0n + if (ylength == 0) { + return resultSign == ReadBigIntSign(x) ? x : BigIntUnaryMinus(x); + } + + // case: x + y + const result = AllocateEmptyBigIntNoThrow(resultSign, xlength + 1) + otherwise BigIntTooBig; + CppAbsoluteAddAndCanonicalize(result, x, y); + return Convert(result); + } + + macro BigIntAddImpl(implicit context: Context)(x: BigInt, y: BigInt): BigInt + labels BigIntTooBig { + const xsign = ReadBigIntSign(x); + const ysign = ReadBigIntSign(y); + if (xsign == ysign) { + // x + y == x + y + // -x + -y == -(x + y) + return MutableBigIntAbsoluteAdd(x, y, xsign) otherwise BigIntTooBig; + } + + // x + -y == x - y == -(y - x) + // -x + y == y - x == -(x - y) + if (MutableBigIntAbsoluteCompare(x, y) >= 0) { + return MutableBigIntAbsoluteSub(x, y, xsign); + } + return MutableBigIntAbsoluteSub(y, x, InvertSign(xsign)); + } + + builtin BigIntAddNoThrow(implicit context: Context)(x: BigInt, y: BigInt): + Numeric { + try { + return BigIntAddImpl(x, y) otherwise BigIntTooBig; + } + label BigIntTooBig { + // Smi sentinal is used to signal BigIntTooBig exception. + return Convert(0); + } + } + + builtin BigIntAdd(implicit context: Context)(xNum: Numeric, yNum: Numeric): + BigInt { + try { + const x = Cast(xNum) otherwise MixedTypes; + const y = Cast(yNum) otherwise MixedTypes; + + return BigIntAddImpl(x, y) otherwise BigIntTooBig; + } + label MixedTypes { + ThrowTypeError(kBigIntMixedTypes); + } + label BigIntTooBig { + ThrowRangeError(kBigIntTooBig); + } + } + + builtin BigIntUnaryMinus(implicit context: Context)(bigint: BigInt): BigInt { + const length = ReadBigIntLength(bigint); + + // There is no -0n. + if (length == 0) { + return bigint; + } + + const result = + AllocateEmptyBigInt(InvertSign(ReadBigIntSign(bigint)), length); + for (let i: intptr = 0; i < length; ++i) { + StoreBigIntDigit(result, i, LoadBigIntDigit(bigint, i)); + } + return Convert(result); + } + +} // namespace bigint diff --git a/deps/v8/src/builtins/boolean.tq b/deps/v8/src/builtins/boolean.tq index a41ef76d2138a2..25f9ebd3961add 100644 --- a/deps/v8/src/builtins/boolean.tq +++ b/deps/v8/src/builtins/boolean.tq @@ -3,39 +3,20 @@ // found in the LICENSE file. namespace boolean { - const kNameDictionaryInitialCapacity: - constexpr int32 generates 'NameDictionary::kInitialCapacity'; - - extern macro ConstructorBuiltinsAssembler::IsDictionaryMap(Map): bool; - extern macro CodeStubAssembler::AllocateNameDictionary(constexpr int32): - NameDictionary; - - // TODO(v8:9120): This is a workaround to get access to target and new.target - // in javascript builtins. Requires cleanup once this is fully supported by - // torque. - const NEW_TARGET_INDEX: - constexpr int32 generates 'Descriptor::kJSNewTarget'; - const TARGET_INDEX: constexpr int32 generates 'Descriptor::kJSTarget'; - extern macro Parameter(constexpr int32): Object; - javascript builtin - BooleanConstructor(context: Context, receiver: Object, ...arguments): Object { + BooleanConstructor( + js-implicit context: Context, receiver: Object, newTarget: Object, + target: JSFunction)(...arguments): Object { const value = SelectBooleanConstant(ToBoolean(arguments[0])); - const newTarget = Parameter(NEW_TARGET_INDEX); if (newTarget == Undefined) { return value; } - const target = UnsafeCast(Parameter(TARGET_INDEX)); const map = GetDerivedMap(target, UnsafeCast(newTarget)); - let properties = kEmptyFixedArray; - if (IsDictionaryMap(map)) { - properties = AllocateNameDictionary(kNameDictionaryInitialCapacity); - } - const obj = UnsafeCast(AllocateJSObjectFromMap( - map, properties, kEmptyFixedArray, kNone, kWithSlackTracking)); + const obj = + UnsafeCast(AllocateFastOrSlowJSObjectFromMap(map)); obj.value = value; return obj; } diff --git a/deps/v8/src/builtins/builtins-api.cc b/deps/v8/src/builtins/builtins-api.cc index 7ee879ab51d070..0c30e521541717 100644 --- a/deps/v8/src/builtins/builtins-api.cc +++ b/deps/v8/src/builtins/builtins-api.cc @@ -32,14 +32,16 @@ JSReceiver GetCompatibleReceiver(Isolate* isolate, FunctionTemplateInfo info, JSObject js_obj_receiver = JSObject::cast(receiver); FunctionTemplateInfo signature = FunctionTemplateInfo::cast(recv_type); - // Check the receiver. Fast path for receivers with no hidden prototypes. + // Check the receiver. if (signature.IsTemplateFor(js_obj_receiver)) return receiver; - if (!js_obj_receiver.map().has_hidden_prototype()) return JSReceiver(); - for (PrototypeIterator iter(isolate, js_obj_receiver, kStartAtPrototype, - PrototypeIterator::END_AT_NON_HIDDEN); - !iter.IsAtEnd(); iter.Advance()) { - JSObject current = iter.GetCurrent(); - if (signature.IsTemplateFor(current)) return current; + + // The JSGlobalProxy might have a hidden prototype. + if (V8_UNLIKELY(js_obj_receiver.IsJSGlobalProxy())) { + HeapObject prototype = js_obj_receiver.map().prototype(); + if (!prototype.IsNull(isolate)) { + JSObject js_obj_prototype = JSObject::cast(prototype); + if (signature.IsTemplateFor(js_obj_prototype)) return js_obj_prototype; + } } return JSReceiver(); } diff --git a/deps/v8/src/builtins/builtins-arguments-gen.cc b/deps/v8/src/builtins/builtins-arguments-gen.cc index 6cc9fd9623ac81..d65d57cc79b079 100644 --- a/deps/v8/src/builtins/builtins-arguments-gen.cc +++ b/deps/v8/src/builtins/builtins-arguments-gen.cc @@ -266,7 +266,8 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context, var_list1, argument_offset, mapped_offset, [this, elements, ¤t_argument](Node* offset) { Increment(¤t_argument, kSystemPointerSize); - Node* arg = LoadBufferObject(current_argument.value(), 0); + Node* arg = LoadBufferObject( + UncheckedCast(current_argument.value()), 0); StoreNoWriteBarrier(MachineRepresentation::kTagged, elements, offset, arg); }, diff --git a/deps/v8/src/builtins/builtins-array-gen.cc b/deps/v8/src/builtins/builtins-array-gen.cc index 29bcae6feb97dc..07f74cb4298db9 100644 --- a/deps/v8/src/builtins/builtins-array-gen.cc +++ b/deps/v8/src/builtins/builtins-array-gen.cc @@ -227,7 +227,7 @@ ArrayBuiltinsAssembler::ArrayBuiltinsAssembler( VariableList list({&a_, &k_, &to_}, zone()); FastLoopBody body = [&](Node* index) { - GotoIf(IsDetachedBuffer(array_buffer), detached); + GotoIf(IsDetachedBuffer(CAST(array_buffer)), detached); TNode data_ptr = LoadJSTypedArrayBackingStore(typed_array); Node* value = LoadFixedTypedArrayElementAsTagged( data_ptr, index, source_elements_kind_, SMI_PARAMETERS); @@ -402,7 +402,7 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) { CodeStubArguments args(this, ChangeInt32ToIntPtr(argc)); TNode receiver = args.GetReceiver(); TNode array_receiver; - Node* kind = nullptr; + TNode kind; Label fast(this); BranchIfFastJSArray(receiver, context, &fast, &runtime); @@ -709,19 +709,19 @@ TF_BUILTIN(ArrayFrom, ArrayPopulatorAssembler) { iterator_assembler.GetIterator(context, items, iterator_method); TNode native_context = LoadNativeContext(context); - TNode fast_iterator_result_map = - LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX); + TNode fast_iterator_result_map = CAST( + LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX)); Goto(&loop); BIND(&loop); { // Loop while iterator is not done. - TNode next = iterator_assembler.IteratorStep( + TNode next = iterator_assembler.IteratorStep( context, iterator_record, &loop_done, fast_iterator_result_map); TVARIABLE(Object, value, - CAST(iterator_assembler.IteratorValue( - context, next, fast_iterator_result_map))); + iterator_assembler.IteratorValue(context, next, + fast_iterator_result_map)); // If a map_function is supplied then call it (using this_arg as // receiver), on the value returned from the iterator. Exceptions are @@ -2035,8 +2035,7 @@ void ArrayBuiltinsAssembler::CreateArrayDispatchSingleArgument( &normal_sequence); { // Make elements kind holey and update elements kind in the type info. - var_elements_kind = - Signed(Word32Or(var_elements_kind.value(), Int32Constant(1))); + var_elements_kind = Word32Or(var_elements_kind.value(), Int32Constant(1)); StoreObjectFieldNoWriteBarrier( allocation_site, AllocationSite::kTransitionInfoOrBoilerplateOffset, SmiOr(transition_info, SmiConstant(fast_elements_kind_holey_mask))); diff --git a/deps/v8/src/builtins/builtins-array.cc b/deps/v8/src/builtins/builtins-array.cc index e6ab965a7ed047..96c10ed0fd545e 100644 --- a/deps/v8/src/builtins/builtins-array.cc +++ b/deps/v8/src/builtins/builtins-array.cc @@ -970,8 +970,9 @@ void CollectElementIndices(Isolate* isolate, Handle object, } case FAST_STRING_WRAPPER_ELEMENTS: case SLOW_STRING_WRAPPER_ELEMENTS: { - DCHECK(object->IsJSValue()); - Handle js_value = Handle::cast(object); + DCHECK(object->IsJSPrimitiveWrapper()); + Handle js_value = + Handle::cast(object); DCHECK(js_value->value().IsString()); Handle string(String::cast(js_value->value()), isolate); uint32_t length = static_cast(string->length()); diff --git a/deps/v8/src/builtins/builtins-async-function-gen.cc b/deps/v8/src/builtins/builtins-async-function-gen.cc index 03df1aaaad16bb..a95365e4255c36 100644 --- a/deps/v8/src/builtins/builtins-async-function-gen.cc +++ b/deps/v8/src/builtins/builtins-async-function-gen.cc @@ -36,6 +36,21 @@ void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwaitResumeClosure( TNode async_function_object = CAST(LoadContextElement(context, Context::EXTENSION_INDEX)); + // Push the promise for the {async_function_object} back onto the catch + // prediction stack to handle exceptions thrown after resuming from the + // await properly. + Label if_instrumentation(this, Label::kDeferred), + if_instrumentation_done(this); + Branch(IsDebugActive(), &if_instrumentation, &if_instrumentation_done); + BIND(&if_instrumentation); + { + TNode promise = LoadObjectField( + async_function_object, JSAsyncFunctionObject::kPromiseOffset); + CallRuntime(Runtime::kDebugAsyncFunctionResumed, context, promise); + Goto(&if_instrumentation_done); + } + BIND(&if_instrumentation_done); + // Inline version of GeneratorPrototypeNext / GeneratorPrototypeReturn with // unnecessary runtime checks removed. @@ -80,27 +95,19 @@ TF_BUILTIN(AsyncFunctionEnter, AsyncFunctionBuiltinsAssembler) { Signed(IntPtrAdd(WordSar(frame_size, IntPtrConstant(kTaggedSizeLog2)), formal_parameter_count)); - // Allocate space for the promise, the async function object - // and the register file. - TNode size = IntPtrAdd( - IntPtrConstant(JSPromise::kSizeWithEmbedderFields + - JSAsyncFunctionObject::kSize + FixedArray::kHeaderSize), - Signed(WordShl(parameters_and_register_length, - IntPtrConstant(kTaggedSizeLog2)))); - TNode base = AllocateInNewSpace(size); - - // Initialize the register file. - TNode parameters_and_registers = UncheckedCast( - InnerAllocate(base, JSAsyncFunctionObject::kSize + - JSPromise::kSizeWithEmbedderFields)); - StoreMapNoWriteBarrier(parameters_and_registers, RootIndex::kFixedArrayMap); - StoreObjectFieldNoWriteBarrier(parameters_and_registers, - FixedArray::kLengthOffset, - SmiFromIntPtr(parameters_and_register_length)); + // Allocate and initialize the register file. + TNode parameters_and_registers = + AllocateFixedArray(HOLEY_ELEMENTS, parameters_and_register_length, + INTPTR_PARAMETERS, kAllowLargeObjectAllocation); FillFixedArrayWithValue(HOLEY_ELEMENTS, parameters_and_registers, IntPtrConstant(0), parameters_and_register_length, RootIndex::kUndefinedValue); + // Allocate space for the promise, the async function object. + TNode size = IntPtrConstant(JSPromise::kSizeWithEmbedderFields + + JSAsyncFunctionObject::kSize); + TNode base = AllocateInNewSpace(size); + // Initialize the promise. TNode native_context = LoadNativeContext(context); TNode promise_function = diff --git a/deps/v8/src/builtins/builtins-bigint-gen.cc b/deps/v8/src/builtins/builtins-bigint-gen.cc index 8a752f2517ed86..d4818f0e010a9e 100644 --- a/deps/v8/src/builtins/builtins-bigint-gen.cc +++ b/deps/v8/src/builtins/builtins-bigint-gen.cc @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. +#include "src/builtins/builtins-bigint-gen.h" #include "src/builtins/builtins-utils-gen.h" #include "src/builtins/builtins.h" #include "src/codegen/code-stub-assembler.h" diff --git a/deps/v8/src/builtins/builtins-bigint-gen.h b/deps/v8/src/builtins/builtins-bigint-gen.h new file mode 100644 index 00000000000000..288418258bf894 --- /dev/null +++ b/deps/v8/src/builtins/builtins-bigint-gen.h @@ -0,0 +1,80 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_BUILTINS_BUILTINS_BIGINT_GEN_H_ +#define V8_BUILTINS_BUILTINS_BIGINT_GEN_H_ + +#include "src/codegen/code-stub-assembler.h" +#include "src/objects/bigint.h" + +namespace v8 { +namespace internal { + +class BigIntBuiltinsAssembler : public CodeStubAssembler { + public: + explicit BigIntBuiltinsAssembler(compiler::CodeAssemblerState* state) + : CodeStubAssembler(state) {} + + TNode ReadBigIntLength(TNode value) { + TNode bitfield = LoadBigIntBitfield(value); + return ChangeInt32ToIntPtr( + Signed(DecodeWord32(bitfield))); + } + + TNode ReadBigIntSign(TNode value) { + TNode bitfield = LoadBigIntBitfield(value); + return DecodeWord32(bitfield); + } + + void WriteBigIntSignAndLength(TNode bigint, TNode sign, + TNode length) { + STATIC_ASSERT(BigIntBase::SignBits::kShift == 0); + TNode bitfield = Unsigned( + Word32Or(Word32Shl(TruncateIntPtrToInt32(length), + Int32Constant(BigIntBase::LengthBits::kShift)), + Word32And(sign, Int32Constant(BigIntBase::SignBits::kMask)))); + StoreBigIntBitfield(bigint, bitfield); + } + + void CppAbsoluteAddAndCanonicalize(TNode result, TNode x, + TNode y) { + TNode mutable_big_int_absolute_add_and_canonicalize = + ExternalConstant( + ExternalReference:: + mutable_big_int_absolute_add_and_canonicalize_function()); + CallCFunction(mutable_big_int_absolute_add_and_canonicalize, + MachineType::AnyTagged(), + std::make_pair(MachineType::AnyTagged(), result), + std::make_pair(MachineType::AnyTagged(), x), + std::make_pair(MachineType::AnyTagged(), y)); + } + + void CppAbsoluteSubAndCanonicalize(TNode result, TNode x, + TNode y) { + TNode mutable_big_int_absolute_sub_and_canonicalize = + ExternalConstant( + ExternalReference:: + mutable_big_int_absolute_sub_and_canonicalize_function()); + CallCFunction(mutable_big_int_absolute_sub_and_canonicalize, + MachineType::AnyTagged(), + std::make_pair(MachineType::AnyTagged(), result), + std::make_pair(MachineType::AnyTagged(), x), + std::make_pair(MachineType::AnyTagged(), y)); + } + + TNode CppAbsoluteCompare(TNode x, TNode y) { + TNode mutable_big_int_absolute_compare = + ExternalConstant( + ExternalReference::mutable_big_int_absolute_compare_function()); + TNode result = UncheckedCast( + CallCFunction(mutable_big_int_absolute_compare, MachineType::Int32(), + std::make_pair(MachineType::AnyTagged(), x), + std::make_pair(MachineType::AnyTagged(), y))); + return result; + } +}; + +} // namespace internal +} // namespace v8 +#endif // V8_BUILTINS_BUILTINS_BIGINT_GEN_H_ diff --git a/deps/v8/src/builtins/builtins-bigint.cc b/deps/v8/src/builtins/builtins-bigint.cc index a8a847ef479b08..09d71a056275cb 100644 --- a/deps/v8/src/builtins/builtins-bigint.cc +++ b/deps/v8/src/builtins/builtins-bigint.cc @@ -80,10 +80,10 @@ MaybeHandle ThisBigIntValue(Isolate* isolate, Handle value, // 1. If Type(value) is BigInt, return value. if (value->IsBigInt()) return Handle::cast(value); // 2. If Type(value) is Object and value has a [[BigIntData]] internal slot: - if (value->IsJSValue()) { + if (value->IsJSPrimitiveWrapper()) { // 2a. Assert: value.[[BigIntData]] is a BigInt value. // 2b. Return value.[[BigIntData]]. - Object data = JSValue::cast(*value).value(); + Object data = JSPrimitiveWrapper::cast(*value).value(); if (data.IsBigInt()) return handle(BigInt::cast(data), isolate); } // 3. Throw a TypeError exception. diff --git a/deps/v8/src/builtins/builtins-boolean-gen.cc b/deps/v8/src/builtins/builtins-boolean-gen.cc index 30cf7ba0c1972f..74474a8918f15c 100644 --- a/deps/v8/src/builtins/builtins-boolean-gen.cc +++ b/deps/v8/src/builtins/builtins-boolean-gen.cc @@ -15,22 +15,23 @@ namespace internal { // ES6 #sec-boolean.prototype.tostring TF_BUILTIN(BooleanPrototypeToString, CodeStubAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode receiver = CAST(Parameter(Descriptor::kReceiver)); - Node* value = ToThisValue(context, receiver, PrimitiveType::kBoolean, - "Boolean.prototype.toString"); - Node* result = LoadObjectField(value, Oddball::kToStringOffset); + TNode value = + CAST(ToThisValue(context, receiver, PrimitiveType::kBoolean, + "Boolean.prototype.toString")); + TNode result = CAST(LoadObjectField(value, Oddball::kToStringOffset)); Return(result); } // ES6 #sec-boolean.prototype.valueof TF_BUILTIN(BooleanPrototypeValueOf, CodeStubAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode receiver = CAST(Parameter(Descriptor::kReceiver)); - Node* result = ToThisValue(context, receiver, PrimitiveType::kBoolean, - "Boolean.prototype.valueOf"); + TNode result = CAST(ToThisValue( + context, receiver, PrimitiveType::kBoolean, "Boolean.prototype.valueOf")); Return(result); } diff --git a/deps/v8/src/builtins/builtins-call-gen.cc b/deps/v8/src/builtins/builtins-call-gen.cc index 05142a8f079a3b..deb91dee246811 100644 --- a/deps/v8/src/builtins/builtins-call-gen.cc +++ b/deps/v8/src/builtins/builtins-call-gen.cc @@ -475,14 +475,13 @@ TNode CallOrConstructBuiltinsAssembler::GetCompatibleReceiver( BIND(&holder_next); { - // Continue with the hidden prototype of the {holder} if it - // has one, or throw an illegal invocation exception, since - // the receiver did not pass the {signature} check. + // Continue with the hidden prototype of the {holder} if it is a + // JSGlobalProxy (the hidden prototype can either be null or a + // JSObject in that case), or throw an illegal invocation exception, + // since the receiver did not pass the {signature} check. TNode holder_map = LoadMap(holder); var_holder = LoadMapPrototype(holder_map); - GotoIf(IsSetWord32(LoadMapBitField2(holder_map), - Map::HasHiddenPrototypeBit::kMask), - &holder_loop); + GotoIf(IsJSGlobalProxyMap(holder_map), &holder_loop); ThrowTypeError(context, MessageTemplate::kIllegalInvocation); } } diff --git a/deps/v8/src/builtins/builtins-callsite.cc b/deps/v8/src/builtins/builtins-callsite.cc index d98eba4eeb8ffa..d1082291ef1414 100644 --- a/deps/v8/src/builtins/builtins-callsite.cc +++ b/deps/v8/src/builtins/builtins-callsite.cc @@ -8,6 +8,7 @@ #include "src/logging/counters.h" #include "src/objects/frame-array-inl.h" #include "src/objects/objects-inl.h" +#include "src/objects/stack-frame-info.h" namespace v8 { namespace internal { @@ -76,6 +77,9 @@ BUILTIN(CallSitePrototypeGetFunction) { StackFrameBase* frame = it.Frame(); if (frame->IsStrict()) return ReadOnlyRoots(isolate).undefined_value(); + + isolate->CountUsage(v8::Isolate::kCallSiteAPIGetFunctionSloppyCall); + return *frame->GetFunction(); } @@ -135,6 +139,9 @@ BUILTIN(CallSitePrototypeGetThis) { StackFrameBase* frame = it.Frame(); if (frame->IsStrict()) return ReadOnlyRoots(isolate).undefined_value(); + + isolate->CountUsage(v8::Isolate::kCallSiteAPIGetThisSloppyCall); + return *frame->GetReceiver(); } @@ -197,9 +204,9 @@ BUILTIN(CallSitePrototypeIsToplevel) { BUILTIN(CallSitePrototypeToString) { HandleScope scope(isolate); CHECK_CALLSITE(recv, "toString"); - FrameArrayIterator it(isolate, GetFrameArray(isolate, recv), - GetFrameIndex(isolate, recv)); - RETURN_RESULT_OR_FAILURE(isolate, it.Frame()->ToString()); + Handle frame = isolate->factory()->NewStackTraceFrame( + GetFrameArray(isolate, recv), GetFrameIndex(isolate, recv)); + RETURN_RESULT_OR_FAILURE(isolate, SerializeStackTraceFrame(isolate, frame)); } #undef CHECK_CALLSITE diff --git a/deps/v8/src/builtins/builtins-collections-gen.cc b/deps/v8/src/builtins/builtins-collections-gen.cc index b5a9851c7041c5..613e5f10ff2f17 100644 --- a/deps/v8/src/builtins/builtins-collections-gen.cc +++ b/deps/v8/src/builtins/builtins-collections-gen.cc @@ -66,19 +66,19 @@ class BaseCollectionsAssembler : public CodeStubAssembler { TNode iterable); // Constructs a collection instance. Choosing a fast path when possible. - TNode AllocateJSCollection(TNode context, - TNode constructor, - TNode new_target); + TNode AllocateJSCollection(TNode context, + TNode constructor, + TNode new_target); // Fast path for constructing a collection instance if the constructor // function has not been modified. - TNode AllocateJSCollectionFast(TNode constructor); + TNode AllocateJSCollectionFast(TNode constructor); // Fallback for constructing a collection instance if the constructor function // has been modified. - TNode AllocateJSCollectionSlow(TNode context, - TNode constructor, - TNode new_target); + TNode AllocateJSCollectionSlow(TNode context, + TNode constructor, + TNode new_target); // Allocates the backing store for a collection. virtual TNode AllocateTable(Variant variant, TNode context, @@ -320,17 +320,17 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromIterable( CSA_ASSERT(this, Word32BinaryNot(IsUndefined(iterator.object))); - TNode fast_iterator_result_map = - LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX); + TNode fast_iterator_result_map = CAST( + LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX)); TVARIABLE(Object, var_exception); Goto(&loop); BIND(&loop); { - TNode next = iterator_assembler.IteratorStep( + TNode next = iterator_assembler.IteratorStep( context, iterator, &exit, fast_iterator_result_map); - TNode next_value = CAST(iterator_assembler.IteratorValue( - context, next, fast_iterator_result_map)); + TNode next_value = iterator_assembler.IteratorValue( + context, next, fast_iterator_result_map); AddConstructorEntry(variant, context, collection, add_func, next_value, nullptr, &if_exception, &var_exception); Goto(&loop); @@ -367,33 +367,33 @@ void BaseCollectionsAssembler::GotoIfInitialAddFunctionModified( GetAddFunctionNameIndex(variant), if_modified); } -TNode BaseCollectionsAssembler::AllocateJSCollection( +TNode BaseCollectionsAssembler::AllocateJSCollection( TNode context, TNode constructor, - TNode new_target) { + TNode new_target) { TNode is_target_unmodified = WordEqual(constructor, new_target); - return Select(is_target_unmodified, - [=] { return AllocateJSCollectionFast(constructor); }, - [=] { - return AllocateJSCollectionSlow(context, constructor, - new_target); - }); + return Select( + is_target_unmodified, + [=] { return AllocateJSCollectionFast(constructor); }, + [=] { + return AllocateJSCollectionSlow(context, constructor, new_target); + }); } -TNode BaseCollectionsAssembler::AllocateJSCollectionFast( - TNode constructor) { +TNode BaseCollectionsAssembler::AllocateJSCollectionFast( + TNode constructor) { CSA_ASSERT(this, IsConstructorMap(LoadMap(constructor))); - TNode initial_map = - LoadObjectField(constructor, JSFunction::kPrototypeOrInitialMapOffset); - return CAST(AllocateJSObjectFromMap(initial_map)); + TNode initial_map = + CAST(LoadJSFunctionPrototypeOrInitialMap(constructor)); + return AllocateJSObjectFromMap(initial_map); } -TNode BaseCollectionsAssembler::AllocateJSCollectionSlow( +TNode BaseCollectionsAssembler::AllocateJSCollectionSlow( TNode context, TNode constructor, - TNode new_target) { + TNode new_target) { ConstructorBuiltinsAssembler constructor_assembler(this->state()); - return CAST(constructor_assembler.EmitFastNewObject(context, constructor, - new_target)); + return constructor_assembler.EmitFastNewObject(context, constructor, + new_target); } void BaseCollectionsAssembler::GenerateConstructor( @@ -408,7 +408,7 @@ void BaseCollectionsAssembler::GenerateConstructor( TNode native_context = LoadNativeContext(context); TNode collection = AllocateJSCollection( - context, GetConstructor(variant, native_context), new_target); + context, GetConstructor(variant, native_context), CAST(new_target)); AddConstructorEntries(variant, context, native_context, collection, iterable); Return(collection); diff --git a/deps/v8/src/builtins/builtins-console.cc b/deps/v8/src/builtins/builtins-console.cc index 973f1785d1ad62..9ab3566cecf32a 100644 --- a/deps/v8/src/builtins/builtins-console.cc +++ b/deps/v8/src/builtins/builtins-console.cc @@ -39,7 +39,8 @@ namespace internal { namespace { void ConsoleCall( - Isolate* isolate, internal::BuiltinArguments& args, + Isolate* isolate, + internal::BuiltinArguments& args, // NOLINT(runtime/references) void (debug::ConsoleDelegate::*func)(const v8::debug::ConsoleCallArguments&, const v8::debug::ConsoleContext&)) { CHECK(!isolate->has_pending_exception()); diff --git a/deps/v8/src/builtins/builtins-constructor-gen.cc b/deps/v8/src/builtins/builtins-constructor-gen.cc index a725f3c4a1e5f5..767e626432e681 100644 --- a/deps/v8/src/builtins/builtins-constructor-gen.cc +++ b/deps/v8/src/builtins/builtins-constructor-gen.cc @@ -147,44 +147,40 @@ TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) { } TF_BUILTIN(FastNewObject, ConstructorBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* target = Parameter(Descriptor::kTarget); - Node* new_target = Parameter(Descriptor::kNewTarget); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode target = CAST(Parameter(Descriptor::kTarget)); + TNode new_target = CAST(Parameter(Descriptor::kNewTarget)); Label call_runtime(this); - Node* result = EmitFastNewObject(context, target, new_target, &call_runtime); + TNode result = + EmitFastNewObject(context, target, new_target, &call_runtime); Return(result); BIND(&call_runtime); TailCallRuntime(Runtime::kNewObject, context, target, new_target); } -Node* ConstructorBuiltinsAssembler::EmitFastNewObject(Node* context, - Node* target, - Node* new_target) { - VARIABLE(var_obj, MachineRepresentation::kTagged); +compiler::TNode ConstructorBuiltinsAssembler::EmitFastNewObject( + SloppyTNode context, SloppyTNode target, + SloppyTNode new_target) { + TVARIABLE(JSObject, var_obj); Label call_runtime(this), end(this); - Node* result = EmitFastNewObject(context, target, new_target, &call_runtime); - var_obj.Bind(result); + var_obj = EmitFastNewObject(context, target, new_target, &call_runtime); Goto(&end); BIND(&call_runtime); - var_obj.Bind(CallRuntime(Runtime::kNewObject, context, target, new_target)); + var_obj = CAST(CallRuntime(Runtime::kNewObject, context, target, new_target)); Goto(&end); BIND(&end); return var_obj.value(); } -Node* ConstructorBuiltinsAssembler::EmitFastNewObject(Node* context, - Node* target, - Node* new_target, - Label* call_runtime) { - CSA_ASSERT(this, HasInstanceType(target, JS_FUNCTION_TYPE)); - CSA_ASSERT(this, IsJSReceiver(new_target)); - +compiler::TNode ConstructorBuiltinsAssembler::EmitFastNewObject( + SloppyTNode context, SloppyTNode target, + SloppyTNode new_target, Label* call_runtime) { // Verify that the new target is a JSFunction. Label fast(this), end(this); GotoIf(HasInstanceType(new_target, JS_FUNCTION_TYPE), &fast); @@ -732,7 +728,7 @@ TF_BUILTIN(NumberConstructor, ConstructorBuiltinsAssembler) { TNode target = LoadTargetFromFrame(); Node* result = CallBuiltin(Builtins::kFastNewObject, context, target, new_target); - StoreObjectField(result, JSValue::kValueOffset, n_value); + StoreObjectField(result, JSPrimitiveWrapper::kValueOffset, n_value); args.PopAndReturn(result); } } @@ -798,7 +794,7 @@ TF_BUILTIN(StringConstructor, ConstructorBuiltinsAssembler) { Node* result = CallBuiltin(Builtins::kFastNewObject, context, target, new_target); - StoreObjectField(result, JSValue::kValueOffset, s_value); + StoreObjectField(result, JSPrimitiveWrapper::kValueOffset, s_value); args.PopAndReturn(result); } } diff --git a/deps/v8/src/builtins/builtins-constructor-gen.h b/deps/v8/src/builtins/builtins-constructor-gen.h index 9093a5a77bbbaf..9208506c79eced 100644 --- a/deps/v8/src/builtins/builtins-constructor-gen.h +++ b/deps/v8/src/builtins/builtins-constructor-gen.h @@ -31,10 +31,14 @@ class ConstructorBuiltinsAssembler : public CodeStubAssembler { Label* call_runtime); Node* EmitCreateEmptyObjectLiteral(Node* context); - Node* EmitFastNewObject(Node* context, Node* target, Node* new_target); - - Node* EmitFastNewObject(Node* context, Node* target, Node* new_target, - Label* call_runtime); + TNode EmitFastNewObject(SloppyTNode context, + SloppyTNode target, + SloppyTNode new_target); + + TNode EmitFastNewObject(SloppyTNode context, + SloppyTNode target, + SloppyTNode new_target, + Label* call_runtime); }; } // namespace internal diff --git a/deps/v8/src/builtins/builtins-conversion-gen.cc b/deps/v8/src/builtins/builtins-conversion-gen.cc index bc7e349ce1f09b..71a9cbf1452836 100644 --- a/deps/v8/src/builtins/builtins-conversion-gen.cc +++ b/deps/v8/src/builtins/builtins-conversion-gen.cc @@ -392,7 +392,8 @@ TF_BUILTIN(ToInteger_TruncateMinusZero, CodeStubAssembler) { // ES6 section 7.1.13 ToObject (argument) TF_BUILTIN(ToObject, CodeStubAssembler) { Label if_smi(this, Label::kDeferred), if_jsreceiver(this), - if_noconstructor(this, Label::kDeferred), if_wrapjsvalue(this); + if_noconstructor(this, Label::kDeferred), + if_wrapjs_primitive_wrapper(this); Node* context = Parameter(Descriptor::kContext); Node* object = Parameter(Descriptor::kArgument); @@ -411,27 +412,30 @@ TF_BUILTIN(ToObject, CodeStubAssembler) { IntPtrConstant(Map::kNoConstructorFunctionIndex)), &if_noconstructor); constructor_function_index_var.Bind(constructor_function_index); - Goto(&if_wrapjsvalue); + Goto(&if_wrapjs_primitive_wrapper); BIND(&if_smi); constructor_function_index_var.Bind( IntPtrConstant(Context::NUMBER_FUNCTION_INDEX)); - Goto(&if_wrapjsvalue); + Goto(&if_wrapjs_primitive_wrapper); - BIND(&if_wrapjsvalue); + BIND(&if_wrapjs_primitive_wrapper); TNode native_context = LoadNativeContext(context); Node* constructor = LoadContextElement( native_context, constructor_function_index_var.value()); Node* initial_map = LoadObjectField(constructor, JSFunction::kPrototypeOrInitialMapOffset); - Node* js_value = Allocate(JSValue::kSize); - StoreMapNoWriteBarrier(js_value, initial_map); - StoreObjectFieldRoot(js_value, JSValue::kPropertiesOrHashOffset, + Node* js_primitive_wrapper = Allocate(JSPrimitiveWrapper::kSize); + StoreMapNoWriteBarrier(js_primitive_wrapper, initial_map); + StoreObjectFieldRoot(js_primitive_wrapper, + JSPrimitiveWrapper::kPropertiesOrHashOffset, RootIndex::kEmptyFixedArray); - StoreObjectFieldRoot(js_value, JSObject::kElementsOffset, + StoreObjectFieldRoot(js_primitive_wrapper, + JSPrimitiveWrapper::kElementsOffset, RootIndex::kEmptyFixedArray); - StoreObjectField(js_value, JSValue::kValueOffset, object); - Return(js_value); + StoreObjectField(js_primitive_wrapper, JSPrimitiveWrapper::kValueOffset, + object); + Return(js_primitive_wrapper); BIND(&if_noconstructor); ThrowTypeError(context, MessageTemplate::kUndefinedOrNullToObject, diff --git a/deps/v8/src/builtins/builtins-data-view-gen.h b/deps/v8/src/builtins/builtins-data-view-gen.h index eeb84f34dbfff4..d5c6571880fd4c 100644 --- a/deps/v8/src/builtins/builtins-data-view-gen.h +++ b/deps/v8/src/builtins/builtins-data-view-gen.h @@ -17,13 +17,13 @@ class DataViewBuiltinsAssembler : public CodeStubAssembler { explicit DataViewBuiltinsAssembler(compiler::CodeAssemblerState* state) : CodeStubAssembler(state) {} - TNode LoadUint8(TNode data_pointer, TNode offset) { - return UncheckedCast( + TNode LoadUint8(TNode data_pointer, TNode offset) { + return UncheckedCast( Load(MachineType::Uint8(), data_pointer, offset)); } - TNode LoadInt8(TNode data_pointer, TNode offset) { - return UncheckedCast( + TNode LoadInt8(TNode data_pointer, TNode offset) { + return UncheckedCast( Load(MachineType::Int8(), data_pointer, offset)); } diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h index 3412edb89d0aae..23ab4a88ca14ff 100644 --- a/deps/v8/src/builtins/builtins-definitions.h +++ b/deps/v8/src/builtins/builtins-definitions.h @@ -103,8 +103,8 @@ namespace internal { \ /* String helpers */ \ TFC(StringCharAt, StringAt) \ - TFC(StringCodePointAtUTF16, StringAt) \ - TFC(StringCodePointAtUTF32, StringAt) \ + TFC(StringCodePointAt, StringAt) \ + TFC(StringFromCodePointAt, StringAtAsString) \ TFC(StringEqual, Compare) \ TFC(StringGreaterThan, Compare) \ TFC(StringGreaterThanOrEqual, Compare) \ @@ -170,7 +170,9 @@ namespace internal { \ /* Adapters for Turbofan into runtime */ \ TFC(AllocateInYoungGeneration, Allocate) \ + TFC(AllocateRegularInYoungGeneration, Allocate) \ TFC(AllocateInOldGeneration, Allocate) \ + TFC(AllocateRegularInOldGeneration, Allocate) \ \ /* TurboFan support builtins */ \ TFS(CopyFastSmiOrObjectElements, kObject) \ @@ -266,7 +268,7 @@ namespace internal { \ /* Abort */ \ TFC(Abort, Abort) \ - TFC(AbortJS, Abort) \ + TFC(AbortCSAAssert, Abort) \ \ /* Built-in functions for Javascript */ \ /* Special internal builtins */ \ @@ -726,16 +728,12 @@ namespace internal { CPP(ObjectGetOwnPropertyDescriptors) \ TFJ(ObjectGetOwnPropertyNames, 1, kReceiver, kObject) \ CPP(ObjectGetOwnPropertySymbols) \ - CPP(ObjectGetPrototypeOf) \ - CPP(ObjectSetPrototypeOf) \ TFJ(ObjectIs, 2, kReceiver, kLeft, kRight) \ - CPP(ObjectIsExtensible) \ CPP(ObjectIsFrozen) \ CPP(ObjectIsSealed) \ TFJ(ObjectKeys, 1, kReceiver, kObject) \ CPP(ObjectLookupGetter) \ CPP(ObjectLookupSetter) \ - CPP(ObjectPreventExtensions) \ /* ES6 #sec-object.prototype.tostring */ \ TFJ(ObjectPrototypeToString, 0, kReceiver) \ /* ES6 #sec-object.prototype.valueof */ \ @@ -823,16 +821,10 @@ namespace internal { ASM(ReflectApply, Dummy) \ ASM(ReflectConstruct, Dummy) \ CPP(ReflectDefineProperty) \ - CPP(ReflectDeleteProperty) \ - CPP(ReflectGet) \ CPP(ReflectGetOwnPropertyDescriptor) \ - CPP(ReflectGetPrototypeOf) \ TFJ(ReflectHas, 2, kReceiver, kTarget, kKey) \ - CPP(ReflectIsExtensible) \ CPP(ReflectOwnKeys) \ - CPP(ReflectPreventExtensions) \ CPP(ReflectSet) \ - CPP(ReflectSetPrototypeOf) \ \ /* RegExp */ \ CPP(RegExpCapture1Getter) \ @@ -1150,6 +1142,7 @@ namespace internal { ASM(StackCheck, Dummy) \ ASM(DoubleToI, Dummy) \ TFC(GetProperty, GetProperty) \ + TFS(GetPropertyWithReceiver, kObject, kKey, kReceiver, kOnNonExistent) \ TFS(SetProperty, kReceiver, kKey, kValue) \ TFS(SetPropertyInLiteral, kReceiver, kKey, kValue) \ ASM(MemCopyUint8Uint8, CCall) \ diff --git a/deps/v8/src/builtins/builtins-error.cc b/deps/v8/src/builtins/builtins-error.cc index e099baeb34f663..3bcc7356d42c10 100644 --- a/deps/v8/src/builtins/builtins-error.cc +++ b/deps/v8/src/builtins/builtins-error.cc @@ -31,10 +31,11 @@ BUILTIN(ErrorConstructor) { } RETURN_RESULT_OR_FAILURE( - isolate, ErrorUtils::Construct(isolate, args.target(), - Handle::cast(args.new_target()), - args.atOrUndefined(isolate, 1), mode, - caller, false)); + isolate, + ErrorUtils::Construct(isolate, args.target(), + Handle::cast(args.new_target()), + args.atOrUndefined(isolate, 1), mode, caller, + ErrorUtils::StackTraceCollection::kDetailed)); } // static diff --git a/deps/v8/src/builtins/builtins-global.cc b/deps/v8/src/builtins/builtins-global.cc index 53e974c4521908..137f7f34021509 100644 --- a/deps/v8/src/builtins/builtins-global.cc +++ b/deps/v8/src/builtins/builtins-global.cc @@ -86,17 +86,27 @@ BUILTIN(GlobalEval) { Handle x = args.atOrUndefined(isolate, 1); Handle target = args.target(); Handle target_global_proxy(target->global_proxy(), isolate); - if (!x->IsString()) return *x; if (!Builtins::AllowDynamicFunction(isolate, target, target_global_proxy)) { isolate->CountUsage(v8::Isolate::kFunctionConstructorReturnedUndefined); return ReadOnlyRoots(isolate).undefined_value(); } + + // Run embedder pre-checks before executing eval. If the argument is a + // non-String (or other object the embedder doesn't know to handle), then + // return it directly. + MaybeHandle source; + bool unhandled_object; + std::tie(source, unhandled_object) = + Compiler::ValidateDynamicCompilationSource( + isolate, handle(target->native_context(), isolate), x); + if (unhandled_object) return *x; + Handle function; ASSIGN_RETURN_FAILURE_ON_EXCEPTION( isolate, function, - Compiler::GetFunctionFromString(handle(target->native_context(), isolate), - Handle::cast(x), - NO_PARSE_RESTRICTION, kNoSourcePosition)); + Compiler::GetFunctionFromValidatedString( + handle(target->native_context(), isolate), source, + NO_PARSE_RESTRICTION, kNoSourcePosition)); RETURN_RESULT_OR_FAILURE( isolate, Execution::Call(isolate, function, target_global_proxy, 0, nullptr)); diff --git a/deps/v8/src/builtins/builtins-handler-gen.cc b/deps/v8/src/builtins/builtins-handler-gen.cc index d1b50f2cdcdf35..973356f569cb67 100644 --- a/deps/v8/src/builtins/builtins-handler-gen.cc +++ b/deps/v8/src/builtins/builtins-handler-gen.cc @@ -28,7 +28,8 @@ class HandlerBuiltinsAssembler : public CodeStubAssembler { // kind. Use with caution. This produces a *lot* of code. using ElementsKindSwitchCase = std::function; void DispatchByElementsKind(TNode elements_kind, - const ElementsKindSwitchCase& case_function); + const ElementsKindSwitchCase& case_function, + bool handle_typed_elements_kind); // Dispatches over all possible combinations of {from,to} elements kinds. using ElementsKindTransitionSwitchCase = @@ -48,7 +49,7 @@ TF_BUILTIN(LoadIC_StringLength, CodeStubAssembler) { TF_BUILTIN(LoadIC_StringWrapperLength, CodeStubAssembler) { Node* value = Parameter(Descriptor::kReceiver); - Node* string = LoadJSValueValue(value); + Node* string = LoadJSPrimitiveWrapperValue(value); Return(LoadStringLengthAsSmi(string)); } @@ -227,7 +228,7 @@ void HandlerBuiltinsAssembler::Generate_ElementsTransitionAndStore( [=, &miss](ElementsKind from_kind, ElementsKind to_kind) { TransitionElementsKind(receiver, map, from_kind, to_kind, &miss); EmitElementStore(receiver, key, value, to_kind, store_mode, &miss, - context); + context, nullptr); }); Return(value); } @@ -280,7 +281,8 @@ TF_BUILTIN(ElementsTransitionAndStore_NoTransitionHandleCOW, V(BIGINT64_ELEMENTS) void HandlerBuiltinsAssembler::DispatchByElementsKind( - TNode elements_kind, const ElementsKindSwitchCase& case_function) { + TNode elements_kind, const ElementsKindSwitchCase& case_function, + bool handle_typed_elements_kind) { Label next(this), if_unknown_type(this, Label::kDeferred); int32_t elements_kinds[] = { @@ -300,6 +302,8 @@ void HandlerBuiltinsAssembler::DispatchByElementsKind( }; STATIC_ASSERT(arraysize(elements_kinds) == arraysize(elements_kind_labels)); + // TODO(mythria): Do not emit cases for typed elements kind when + // handle_typed_elements is false to decrease the size of the jump table. Switch(elements_kind, &if_unknown_type, elements_kinds, elements_kind_labels, arraysize(elements_kinds)); @@ -310,6 +314,9 @@ void HandlerBuiltinsAssembler::DispatchByElementsKind( IsFrozenOrSealedElementsKindUnchecked(KIND)) { \ /* Disable support for frozen or sealed elements kinds. */ \ Unreachable(); \ + } else if (!handle_typed_elements_kind && \ + IsTypedArrayElementsKind(KIND)) { \ + Unreachable(); \ } else { \ case_function(KIND); \ Goto(&next); \ @@ -340,17 +347,26 @@ void HandlerBuiltinsAssembler::Generate_StoreFastElementIC( Label miss(this); + bool handle_typed_elements_kind = + store_mode == STANDARD_STORE || store_mode == STORE_IGNORE_OUT_OF_BOUNDS; + // For typed arrays maybe_converted_value contains the value obtained after + // calling ToNumber. We should pass the converted value to the runtime to + // avoid doing the user visible conversion again. + VARIABLE(maybe_converted_value, MachineRepresentation::kTagged, value); + maybe_converted_value.Bind(value); // TODO(v8:8481): Pass elements_kind in feedback vector slots. - DispatchByElementsKind(LoadElementsKind(receiver), - [=, &miss](ElementsKind elements_kind) { - EmitElementStore(receiver, key, value, elements_kind, - store_mode, &miss, context); - }); + DispatchByElementsKind( + LoadElementsKind(receiver), + [=, &miss, &maybe_converted_value](ElementsKind elements_kind) { + EmitElementStore(receiver, key, value, elements_kind, store_mode, &miss, + context, &maybe_converted_value); + }, + handle_typed_elements_kind); Return(value); BIND(&miss); - TailCallRuntime(Runtime::kKeyedStoreIC_Miss, context, value, slot, vector, - receiver, key); + TailCallRuntime(Runtime::kKeyedStoreIC_Miss, context, + maybe_converted_value.value(), slot, vector, receiver, key); } TF_BUILTIN(StoreFastElementIC_Standard, HandlerBuiltinsAssembler) { diff --git a/deps/v8/src/builtins/builtins-internal-gen.cc b/deps/v8/src/builtins/builtins-internal-gen.cc index baaadb722ad153..8d22767b587d94 100644 --- a/deps/v8/src/builtins/builtins-internal-gen.cc +++ b/deps/v8/src/builtins/builtins-internal-gen.cc @@ -614,8 +614,9 @@ class SetOrCopyDataPropertiesAssembler : public CodeStubAssembler { Label if_done(this), if_noelements(this), if_sourcenotjsobject(this, Label::kDeferred); - // JSValue wrappers for numbers don't have any enumerable own properties, - // so we can immediately skip the whole operation if {source} is a Smi. + // JSPrimitiveWrapper wrappers for numbers don't have any enumerable own + // properties, so we can immediately skip the whole operation if {source} is + // a Smi. GotoIf(TaggedIsSmi(source), &if_done); // Otherwise check if {source} is a proper JSObject, and if not, defer @@ -809,17 +810,49 @@ TF_BUILTIN(AdaptorWithBuiltinExitFrame, CodeStubAssembler) { TF_BUILTIN(AllocateInYoungGeneration, CodeStubAssembler) { TNode requested_size = UncheckedCast(Parameter(Descriptor::kRequestedSize)); + CSA_CHECK(this, IsValidPositiveSmi(requested_size)); + TNode allocation_flags = + SmiConstant(Smi::FromInt(AllocateDoubleAlignFlag::encode(false) | + AllowLargeObjectAllocationFlag::encode(true))); TailCallRuntime(Runtime::kAllocateInYoungGeneration, NoContextConstant(), - SmiFromIntPtr(requested_size)); + SmiFromIntPtr(requested_size), allocation_flags); +} + +TF_BUILTIN(AllocateRegularInYoungGeneration, CodeStubAssembler) { + TNode requested_size = + UncheckedCast(Parameter(Descriptor::kRequestedSize)); + CSA_CHECK(this, IsValidPositiveSmi(requested_size)); + + TNode allocation_flags = + SmiConstant(Smi::FromInt(AllocateDoubleAlignFlag::encode(false) | + AllowLargeObjectAllocationFlag::encode(false))); + TailCallRuntime(Runtime::kAllocateInYoungGeneration, NoContextConstant(), + SmiFromIntPtr(requested_size), allocation_flags); } TF_BUILTIN(AllocateInOldGeneration, CodeStubAssembler) { TNode requested_size = UncheckedCast(Parameter(Descriptor::kRequestedSize)); + CSA_CHECK(this, IsValidPositiveSmi(requested_size)); + + TNode runtime_flags = + SmiConstant(Smi::FromInt(AllocateDoubleAlignFlag::encode(false) | + AllowLargeObjectAllocationFlag::encode(true))); + TailCallRuntime(Runtime::kAllocateInOldGeneration, NoContextConstant(), + SmiFromIntPtr(requested_size), runtime_flags); +} + +TF_BUILTIN(AllocateRegularInOldGeneration, CodeStubAssembler) { + TNode requested_size = + UncheckedCast(Parameter(Descriptor::kRequestedSize)); + CSA_CHECK(this, IsValidPositiveSmi(requested_size)); + TNode runtime_flags = + SmiConstant(Smi::FromInt(AllocateDoubleAlignFlag::encode(false) | + AllowLargeObjectAllocationFlag::encode(false))); TailCallRuntime(Runtime::kAllocateInOldGeneration, NoContextConstant(), - SmiFromIntPtr(requested_size), SmiConstant(0)); + SmiFromIntPtr(requested_size), runtime_flags); } TF_BUILTIN(Abort, CodeStubAssembler) { @@ -827,9 +860,9 @@ TF_BUILTIN(Abort, CodeStubAssembler) { TailCallRuntime(Runtime::kAbort, NoContextConstant(), message_id); } -TF_BUILTIN(AbortJS, CodeStubAssembler) { +TF_BUILTIN(AbortCSAAssert, CodeStubAssembler) { TNode message = CAST(Parameter(Descriptor::kMessageOrMessageId)); - TailCallRuntime(Runtime::kAbortJS, NoContextConstant(), message); + TailCallRuntime(Runtime::kAbortCSAAssert, NoContextConstant(), message); } void Builtins::Generate_CEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit( @@ -907,6 +940,8 @@ TF_BUILTIN(GetProperty, CodeStubAssembler) { Node* object = Parameter(Descriptor::kObject); Node* key = Parameter(Descriptor::kKey); Node* context = Parameter(Descriptor::kContext); + // TODO(duongn): consider tailcalling to GetPropertyWithReceiver(object, + // object, key, OnNonExistent::kReturnUndefined). Label if_notfound(this), if_proxy(this, Label::kDeferred), if_slow(this, Label::kDeferred); @@ -932,7 +967,7 @@ TF_BUILTIN(GetProperty, CodeStubAssembler) { Goto(if_bailout); }; - TryPrototypeChainLookup(object, key, lookup_property_in_holder, + TryPrototypeChainLookup(object, object, key, lookup_property_in_holder, lookup_element_in_holder, &if_notfound, &if_slow, &if_proxy); @@ -955,6 +990,74 @@ TF_BUILTIN(GetProperty, CodeStubAssembler) { } } +// ES6 [[Get]] operation with Receiver. +TF_BUILTIN(GetPropertyWithReceiver, CodeStubAssembler) { + Node* object = Parameter(Descriptor::kObject); + Node* key = Parameter(Descriptor::kKey); + Node* context = Parameter(Descriptor::kContext); + Node* receiver = Parameter(Descriptor::kReceiver); + Node* on_non_existent = Parameter(Descriptor::kOnNonExistent); + Label if_notfound(this), if_proxy(this, Label::kDeferred), + if_slow(this, Label::kDeferred); + + CodeStubAssembler::LookupInHolder lookup_property_in_holder = + [=](Node* receiver, Node* holder, Node* holder_map, + Node* holder_instance_type, Node* unique_name, Label* next_holder, + Label* if_bailout) { + VARIABLE(var_value, MachineRepresentation::kTagged); + Label if_found(this); + TryGetOwnProperty(context, receiver, holder, holder_map, + holder_instance_type, unique_name, &if_found, + &var_value, next_holder, if_bailout); + BIND(&if_found); + Return(var_value.value()); + }; + + CodeStubAssembler::LookupInHolder lookup_element_in_holder = + [=](Node* receiver, Node* holder, Node* holder_map, + Node* holder_instance_type, Node* index, Label* next_holder, + Label* if_bailout) { + // Not supported yet. + Use(next_holder); + Goto(if_bailout); + }; + + TryPrototypeChainLookup(receiver, object, key, lookup_property_in_holder, + lookup_element_in_holder, &if_notfound, &if_slow, + &if_proxy); + + BIND(&if_notfound); + Label throw_reference_error(this); + GotoIf(WordEqual(on_non_existent, + SmiConstant(OnNonExistent::kThrowReferenceError)), + &throw_reference_error); + CSA_ASSERT(this, WordEqual(on_non_existent, + SmiConstant(OnNonExistent::kReturnUndefined))); + Return(UndefinedConstant()); + + BIND(&throw_reference_error); + Return(CallRuntime(Runtime::kThrowReferenceError, context, key)); + + BIND(&if_slow); + TailCallRuntime(Runtime::kGetPropertyWithReceiver, context, object, key, + receiver, on_non_existent); + + BIND(&if_proxy); + { + // Convert the {key} to a Name first. + Node* name = CallBuiltin(Builtins::kToName, context, key); + + // Proxy cannot handle private symbol so bailout. + GotoIf(IsPrivateSymbol(name), &if_slow); + + // The {object} is a JSProxy instance, look up the {name} on it, passing + // {object} both as receiver and holder. If {name} is absent we can safely + // return undefined from here. + TailCallBuiltin(Builtins::kProxyGetProperty, context, object, name, + receiver, on_non_existent); + } +} + // ES6 [[Set]] operation. TF_BUILTIN(SetProperty, CodeStubAssembler) { TNode context = CAST(Parameter(Descriptor::kContext)); diff --git a/deps/v8/src/builtins/builtins-intl.cc b/deps/v8/src/builtins/builtins-intl.cc index 882afa3c32055e..ff8e96f4f512f0 100644 --- a/deps/v8/src/builtins/builtins-intl.cc +++ b/deps/v8/src/builtins/builtins-intl.cc @@ -276,15 +276,14 @@ Object LegacyFormatConstructor(BuiltinArguments args, Isolate* isolate, // 2. Let format be ? OrdinaryCreateFromConstructor(newTarget, // "%Prototype%", ...). - Handle obj; + Handle map; ASSIGN_RETURN_FAILURE_ON_EXCEPTION( - isolate, obj, - JSObject::New(target, new_target, Handle::null())); - Handle format = Handle::cast(obj); + isolate, map, JSFunction::GetDerivedMap(isolate, target, new_target)); // 3. Perform ? Initialize(Format, locales, options). - ASSIGN_RETURN_FAILURE_ON_EXCEPTION( - isolate, format, T::Initialize(isolate, format, locales, options)); + Handle format; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, format, + T::New(isolate, map, locales, options)); // 4. Let this be the this value. Handle receiver = args.receiver(); @@ -351,21 +350,17 @@ Object DisallowCallConstructor(BuiltinArguments args, Isolate* isolate, Handle target = args.target(); Handle new_target = Handle::cast(args.new_target()); - Handle obj; + Handle map; // 2. Let result be OrdinaryCreateFromConstructor(NewTarget, // "%Prototype%"). ASSIGN_RETURN_FAILURE_ON_EXCEPTION( - isolate, obj, - JSObject::New(target, new_target, Handle::null())); - Handle result = Handle::cast(obj); - result->set_flags(0); + isolate, map, JSFunction::GetDerivedMap(isolate, target, new_target)); Handle locales = args.atOrUndefined(isolate, 1); Handle options = args.atOrUndefined(isolate, 2); - // 3. Return Initialize(t, locales, options). - RETURN_RESULT_OR_FAILURE(isolate, - T::Initialize(isolate, result, locales, options)); + // 3. Return New(t, locales, options). + RETURN_RESULT_OR_FAILURE(isolate, T::New(isolate, map, locales, options)); } /** @@ -387,14 +382,11 @@ Object CallOrConstructConstructor(BuiltinArguments args, Isolate* isolate) { Handle locales = args.atOrUndefined(isolate, 1); Handle options = args.atOrUndefined(isolate, 2); - Handle obj; + Handle map; ASSIGN_RETURN_FAILURE_ON_EXCEPTION( - isolate, obj, - JSObject::New(target, new_target, Handle::null())); - Handle result = Handle::cast(obj); + isolate, map, JSFunction::GetDerivedMap(isolate, target, new_target)); - RETURN_RESULT_OR_FAILURE(isolate, - T::Initialize(isolate, result, locales, options)); + RETURN_RESULT_OR_FAILURE(isolate, T::New(isolate, map, locales, options)); } } // namespace @@ -591,12 +583,11 @@ MaybeHandle CreateLocale(Isolate* isolate, Handle constructor, Handle new_target, Handle tag, Handle options) { - Handle locale; + Handle map; // 6. Let locale be ? OrdinaryCreateFromConstructor(NewTarget, // %LocalePrototype%, internalSlotsList). ASSIGN_RETURN_ON_EXCEPTION( - isolate, locale, - JSObject::New(constructor, new_target, Handle::null()), + isolate, map, JSFunction::GetDerivedMap(isolate, constructor, new_target), JSLocale); // 7. If Type(tag) is not String or Object, throw a TypeError exception. @@ -628,8 +619,7 @@ MaybeHandle CreateLocale(Isolate* isolate, Object::ToObject(isolate, options), JSLocale); } - return JSLocale::Initialize(isolate, Handle::cast(locale), - locale_string, options_object); + return JSLocale::New(isolate, map, locale_string, options_object); } } // namespace diff --git a/deps/v8/src/builtins/builtins-iterator-gen.cc b/deps/v8/src/builtins/builtins-iterator-gen.cc index 0484501bfb23dd..b3d8e27dbc3a7d 100644 --- a/deps/v8/src/builtins/builtins-iterator-gen.cc +++ b/deps/v8/src/builtins/builtins-iterator-gen.cc @@ -75,7 +75,7 @@ IteratorRecord IteratorBuiltinsAssembler::GetIterator(Node* context, } } -TNode IteratorBuiltinsAssembler::IteratorStep( +TNode IteratorBuiltinsAssembler::IteratorStep( Node* context, const IteratorRecord& iterator, Label* if_done, Node* fast_iterator_result_map, Label* if_exception, Variable* exception) { DCHECK_NOT_NULL(if_done); @@ -125,23 +125,21 @@ TNode IteratorBuiltinsAssembler::IteratorStep( } BIND(&return_result); - return UncheckedCast(result); + return CAST(result); } -Node* IteratorBuiltinsAssembler::IteratorValue(Node* context, Node* result, - Node* fast_iterator_result_map, - Label* if_exception, - Variable* exception) { - CSA_ASSERT(this, IsJSReceiver(result)); - +TNode IteratorBuiltinsAssembler::IteratorValue( + TNode context, TNode result, + base::Optional> fast_iterator_result_map, Label* if_exception, + Variable* exception) { Label exit(this); - VARIABLE(var_value, MachineRepresentation::kTagged); - if (fast_iterator_result_map != nullptr) { + TVARIABLE(Object, var_value); + if (fast_iterator_result_map) { // Fast iterator result case: Label if_generic(this); Node* map = LoadMap(result); - GotoIfNot(WordEqual(map, fast_iterator_result_map), &if_generic); - var_value.Bind(LoadObjectField(result, JSIteratorResult::kValueOffset)); + GotoIfNot(WordEqual(map, *fast_iterator_result_map), &if_generic); + var_value = LoadObjectField(result, JSIteratorResult::kValueOffset); Goto(&exit); BIND(&if_generic); @@ -149,9 +147,10 @@ Node* IteratorBuiltinsAssembler::IteratorValue(Node* context, Node* result, // Generic iterator result case: { - Node* value = GetProperty(context, result, factory()->value_string()); + TNode value = + GetProperty(context, result, factory()->value_string()); GotoIfException(value, if_exception, exception); - var_value.Bind(value); + var_value = value; Goto(&exit); } @@ -217,10 +216,10 @@ TNode IteratorBuiltinsAssembler::IterableToList( BIND(&loop_start); { // a. Set next to ? IteratorStep(iteratorRecord). - TNode next = IteratorStep(context, iterator_record, &done); + TNode next = IteratorStep(context, iterator_record, &done); // b. If next is not false, then // i. Let nextValue be ? IteratorValue(next). - TNode next_value = CAST(IteratorValue(context, next)); + TNode next_value = IteratorValue(context, next); // ii. Append nextValue to the end of the List values. values.Push(next_value); Goto(&loop_start); diff --git a/deps/v8/src/builtins/builtins-iterator-gen.h b/deps/v8/src/builtins/builtins-iterator-gen.h index cf421dc5b79f83..db86c653857f52 100644 --- a/deps/v8/src/builtins/builtins-iterator-gen.h +++ b/deps/v8/src/builtins/builtins-iterator-gen.h @@ -32,18 +32,19 @@ class IteratorBuiltinsAssembler : public CodeStubAssembler { Variable* exception = nullptr); // https://tc39.github.io/ecma262/#sec-iteratorstep - // Returns `false` if the iterator is done, otherwise returns an - // iterator result. + // If the iterator is done, goto {if_done}, otherwise returns an iterator + // result. // `fast_iterator_result_map` refers to the map for the JSIteratorResult // object, loaded from the native context. - TNode IteratorStep(Node* context, const IteratorRecord& iterator, - Label* if_done, - Node* fast_iterator_result_map = nullptr, - Label* if_exception = nullptr, - Variable* exception = nullptr); - - TNode IteratorStep(Node* context, const IteratorRecord& iterator, - Node* fast_iterator_result_map, Label* if_done) { + TNode IteratorStep(Node* context, const IteratorRecord& iterator, + Label* if_done, + Node* fast_iterator_result_map = nullptr, + Label* if_exception = nullptr, + Variable* exception = nullptr); + + TNode IteratorStep(Node* context, const IteratorRecord& iterator, + Node* fast_iterator_result_map, + Label* if_done) { return IteratorStep(context, iterator, if_done, fast_iterator_result_map); } @@ -51,10 +52,10 @@ class IteratorBuiltinsAssembler : public CodeStubAssembler { // Return the `value` field from an iterator. // `fast_iterator_result_map` refers to the map for the JSIteratorResult // object, loaded from the native context. - Node* IteratorValue(Node* context, Node* result, - Node* fast_iterator_result_map = nullptr, - Label* if_exception = nullptr, - Variable* exception = nullptr); + TNode IteratorValue( + TNode context, TNode result, + base::Optional> fast_iterator_result_map = base::nullopt, + Label* if_exception = nullptr, Variable* exception = nullptr); // https://tc39.github.io/ecma262/#sec-iteratorclose void IteratorCloseOnException(Node* context, const IteratorRecord& iterator, diff --git a/deps/v8/src/builtins/builtins-math.cc b/deps/v8/src/builtins/builtins-math.cc index 6d3274a4a5a632..cce780ab9f6a25 100644 --- a/deps/v8/src/builtins/builtins-math.cc +++ b/deps/v8/src/builtins/builtins-math.cc @@ -20,7 +20,6 @@ BUILTIN(MathHypot) { if (length == 0) return Smi::kZero; DCHECK_LT(0, length); double max = 0; - bool one_arg_is_nan = false; std::vector abs_values; abs_values.reserve(length); for (int i = 0; i < length; i++) { @@ -28,29 +27,20 @@ BUILTIN(MathHypot) { ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, x, Object::ToNumber(isolate, x)); double abs_value = std::abs(x->Number()); - - if (std::isnan(abs_value)) { - one_arg_is_nan = true; - } else { - abs_values.push_back(abs_value); - if (max < abs_value) { - max = abs_value; - } + abs_values.push_back(abs_value); + // Use negation here to make sure that {max} is NaN + // in the end in case any of the arguments was NaN. + if (!(abs_value <= max)) { + max = abs_value; } } - if (max == V8_INFINITY) { - return *isolate->factory()->NewNumber(V8_INFINITY); - } - - if (one_arg_is_nan) { - return ReadOnlyRoots(isolate).nan_value(); - } - if (max == 0) { return Smi::kZero; + } else if (max == V8_INFINITY) { + return ReadOnlyRoots(isolate).infinity_value(); } - DCHECK_GT(max, 0); + DCHECK(!(max <= 0)); // Kahan summation to avoid rounding errors. // Normalize the numbers to the largest one to avoid overflow. diff --git a/deps/v8/src/builtins/builtins-number-gen.cc b/deps/v8/src/builtins/builtins-number-gen.cc index 5b3af79f006667..f5c4477c23df95 100644 --- a/deps/v8/src/builtins/builtins-number-gen.cc +++ b/deps/v8/src/builtins/builtins-number-gen.cc @@ -315,8 +315,8 @@ TF_BUILTIN(NumberParseInt, CodeStubAssembler) { // ES6 #sec-number.prototype.valueof TF_BUILTIN(NumberPrototypeValueOf, CodeStubAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode receiver = CAST(Parameter(Descriptor::kReceiver)); Node* result = ToThisValue(context, receiver, PrimitiveType::kNumber, "Number.prototype.valueOf"); @@ -538,8 +538,8 @@ TF_BUILTIN(Add, AddStubAssembler) { BIND(&do_bigint_add); { - Return(CallRuntime(Runtime::kBigIntBinaryOp, context, var_left.value(), - var_right.value(), SmiConstant(Operation::kAdd))); + TailCallBuiltin(Builtins::kBigIntAdd, context, var_left.value(), + var_right.value()); } BIND(&do_double_add); @@ -996,8 +996,8 @@ TF_BUILTIN(Equal, CodeStubAssembler) { } TF_BUILTIN(StrictEqual, CodeStubAssembler) { - Node* lhs = Parameter(Descriptor::kLeft); - Node* rhs = Parameter(Descriptor::kRight); + TNode lhs = CAST(Parameter(Descriptor::kLeft)); + TNode rhs = CAST(Parameter(Descriptor::kRight)); Return(StrictEqual(lhs, rhs)); } diff --git a/deps/v8/src/builtins/builtins-number.cc b/deps/v8/src/builtins/builtins-number.cc index 929e686604953a..d2fb0ff74c3a02 100644 --- a/deps/v8/src/builtins/builtins-number.cc +++ b/deps/v8/src/builtins/builtins-number.cc @@ -25,8 +25,8 @@ BUILTIN(NumberPrototypeToExponential) { Handle fraction_digits = args.atOrUndefined(isolate, 1); // Unwrap the receiver {value}. - if (value->IsJSValue()) { - value = handle(Handle::cast(value)->value(), isolate); + if (value->IsJSPrimitiveWrapper()) { + value = handle(Handle::cast(value)->value(), isolate); } if (!value->IsNumber()) { THROW_NEW_ERROR_RETURN_FAILURE( @@ -70,8 +70,8 @@ BUILTIN(NumberPrototypeToFixed) { Handle fraction_digits = args.atOrUndefined(isolate, 1); // Unwrap the receiver {value}. - if (value->IsJSValue()) { - value = handle(Handle::cast(value)->value(), isolate); + if (value->IsJSPrimitiveWrapper()) { + value = handle(Handle::cast(value)->value(), isolate); } if (!value->IsNumber()) { THROW_NEW_ERROR_RETURN_FAILURE( @@ -117,8 +117,8 @@ BUILTIN(NumberPrototypeToLocaleString) { Handle value = args.at(0); // Unwrap the receiver {value}. - if (value->IsJSValue()) { - value = handle(Handle::cast(value)->value(), isolate); + if (value->IsJSPrimitiveWrapper()) { + value = handle(Handle::cast(value)->value(), isolate); } // 1. Let x be ? thisNumberValue(this value) if (!value->IsNumber()) { @@ -147,8 +147,8 @@ BUILTIN(NumberPrototypeToPrecision) { Handle precision = args.atOrUndefined(isolate, 1); // Unwrap the receiver {value}. - if (value->IsJSValue()) { - value = handle(Handle::cast(value)->value(), isolate); + if (value->IsJSPrimitiveWrapper()) { + value = handle(Handle::cast(value)->value(), isolate); } if (!value->IsNumber()) { THROW_NEW_ERROR_RETURN_FAILURE( @@ -192,8 +192,8 @@ BUILTIN(NumberPrototypeToString) { Handle radix = args.atOrUndefined(isolate, 1); // Unwrap the receiver {value}. - if (value->IsJSValue()) { - value = handle(Handle::cast(value)->value(), isolate); + if (value->IsJSPrimitiveWrapper()) { + value = handle(Handle::cast(value)->value(), isolate); } if (!value->IsNumber()) { THROW_NEW_ERROR_RETURN_FAILURE( diff --git a/deps/v8/src/builtins/builtins-object-gen.cc b/deps/v8/src/builtins/builtins-object-gen.cc index 314331d49854ba..8d59ee3bd107cf 100644 --- a/deps/v8/src/builtins/builtins-object-gen.cc +++ b/deps/v8/src/builtins/builtins-object-gen.cc @@ -65,8 +65,6 @@ class ObjectEntriesValuesBuiltinsAssembler : public ObjectBuiltinsAssembler { TNode IsPropertyKindData(TNode kind); - TNode HasHiddenPrototype(TNode map); - TNode LoadPropertyKind(TNode details) { return DecodeWord32(details); } @@ -185,12 +183,6 @@ TNode ObjectEntriesValuesBuiltinsAssembler::IsPropertyKindData( return Word32Equal(kind, Int32Constant(PropertyKind::kData)); } -TNode ObjectEntriesValuesBuiltinsAssembler::HasHiddenPrototype( - TNode map) { - TNode bit_field2 = Unsigned(LoadMapBitField2(map)); - return DecodeWord32(bit_field2); -} - void ObjectEntriesValuesBuiltinsAssembler::GetOwnValuesOrEntries( TNode context, TNode maybe_object, CollectType collect_type) { @@ -254,7 +246,6 @@ void ObjectEntriesValuesBuiltinsAssembler::GotoIfMapHasSlowProperties( TNode map, Label* if_slow) { GotoIf(IsStringWrapperElementsKind(map), if_slow); GotoIf(IsSpecialReceiverMap(map), if_slow); - GotoIf(HasHiddenPrototype(map), if_slow); GotoIf(IsDictionaryMap(map), if_slow); } @@ -602,9 +593,19 @@ TF_BUILTIN(ObjectGetOwnPropertyNames, ObjectBuiltinsAssembler) { if_fast(this), try_fast(this, Label::kDeferred), if_slow(this, Label::kDeferred), if_join(this); - // Check if the {object} has a usable enum cache. + // Take the slow path if the {object} IsCustomElementsReceiverInstanceType or + // has any elements. GotoIf(TaggedIsSmi(object), &if_slow); Node* object_map = LoadMap(object); + TNode instance_type = LoadMapInstanceType(object_map); + GotoIf(IsCustomElementsReceiverInstanceType(instance_type), &if_slow); + Node* object_elements = LoadElements(object); + GotoIf(IsEmptyFixedArray(object_elements), &if_empty_elements); + Branch(IsEmptySlowElementDictionary(object_elements), &if_empty_elements, + &if_slow); + + // Check if the {object} has a usable enum cache. + BIND(&if_empty_elements); Node* object_bit_field3 = LoadMapBitField3(object_map); Node* object_enum_length = DecodeWordFromWord32(object_bit_field3); @@ -612,15 +613,7 @@ TF_BUILTIN(ObjectGetOwnPropertyNames, ObjectBuiltinsAssembler) { WordEqual(object_enum_length, IntPtrConstant(kInvalidEnumCacheSentinel)), &try_fast); - // Ensure that the {object} doesn't have any elements. - CSA_ASSERT(this, IsJSObjectMap(object_map)); - Node* object_elements = LoadElements(object); - GotoIf(IsEmptyFixedArray(object_elements), &if_empty_elements); - Branch(IsEmptySlowElementDictionary(object_elements), &if_empty_elements, - &if_slow); - // Check whether all own properties are enumerable. - BIND(&if_empty_elements); Node* number_descriptors = DecodeWordFromWord32(object_bit_field3); GotoIfNot(WordEqual(object_enum_length, number_descriptors), &if_slow); @@ -728,11 +721,11 @@ TF_BUILTIN(ObjectPrototypeIsPrototypeOf, ObjectBuiltinsAssembler) { // invoke the ToObject builtin, which raises the appropriate error. // Otherwise we don't need to invoke ToObject, since {receiver} is // either already a JSReceiver, in which case ToObject is a no-op, - // or it's a Primitive and ToObject would allocate a fresh JSValue + // or it's a Primitive and ToObject would allocate a fresh JSPrimitiveWrapper // wrapper, which wouldn't be identical to any existing JSReceiver // found in the prototype chain of {value}, hence it will return // false no matter if we search for the Primitive {receiver} or - // a newly allocated JSValue wrapper for {receiver}. + // a newly allocated JSPrimitiveWrapper wrapper for {receiver}. GotoIf(IsNull(receiver), &if_receiverisnullorundefined); GotoIf(IsUndefined(receiver), &if_receiverisnullorundefined); @@ -794,7 +787,7 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) { {JS_SPECIAL_API_OBJECT_TYPE, &if_apiobject}, {JS_PROXY_TYPE, &if_proxy}, {JS_ERROR_TYPE, &if_error}, - {JS_VALUE_TYPE, &if_value}}; + {JS_PRIMITIVE_WRAPPER_TYPE, &if_value}}; size_t const kNumCases = arraysize(kJumpTable); Label* case_labels[kNumCases]; int32_t case_values[kNumCases]; @@ -996,7 +989,7 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) { if_value_is_bigint(this, Label::kDeferred), if_value_is_string(this, Label::kDeferred); - Node* receiver_value = LoadJSValueValue(receiver); + Node* receiver_value = LoadJSPrimitiveWrapperValue(receiver); // We need to start with the object to see if the value was a subclass // which might have interesting properties. var_holder.Bind(receiver); @@ -1346,10 +1339,15 @@ TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) { StoreObjectFieldNoWriteBarrier( result, JSGeneratorObject::kParametersAndRegistersOffset, parameters_and_registers); + Node* resume_mode = SmiConstant(JSGeneratorObject::ResumeMode::kNext); + StoreObjectFieldNoWriteBarrier(result, JSGeneratorObject::kResumeModeOffset, + resume_mode); Node* executing = SmiConstant(JSGeneratorObject::kGeneratorExecuting); StoreObjectFieldNoWriteBarrier(result, JSGeneratorObject::kContinuationOffset, executing); - GotoIfNot(HasInstanceType(maybe_map, JS_ASYNC_GENERATOR_OBJECT_TYPE), &done); + GotoIfNot(InstanceTypeEqual(LoadMapInstanceType(maybe_map), + JS_ASYNC_GENERATOR_OBJECT_TYPE), + &done); StoreObjectFieldNoWriteBarrier( result, JSAsyncGeneratorObject::kIsAwaitingOffset, SmiConstant(0)); Goto(&done); diff --git a/deps/v8/src/builtins/builtins-object.cc b/deps/v8/src/builtins/builtins-object.cc index 59e4373f98b078..1ca5fffd8db556 100644 --- a/deps/v8/src/builtins/builtins-object.cc +++ b/deps/v8/src/builtins/builtins-object.cc @@ -5,7 +5,7 @@ #include "src/builtins/builtins-utils-inl.h" #include "src/builtins/builtins.h" #include "src/codegen/code-factory.h" -#include "src/execution/message-template.h" +#include "src/common/message-template.h" #include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop. #include "src/logging/counters.h" #include "src/objects/keys.h" @@ -218,52 +218,6 @@ BUILTIN(ObjectFreeze) { return *object; } -// ES section 19.1.2.9 Object.getPrototypeOf ( O ) -BUILTIN(ObjectGetPrototypeOf) { - HandleScope scope(isolate); - Handle object = args.atOrUndefined(isolate, 1); - - Handle receiver; - ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver, - Object::ToObject(isolate, object)); - - RETURN_RESULT_OR_FAILURE(isolate, - JSReceiver::GetPrototype(isolate, receiver)); -} - -// ES6 section 19.1.2.21 Object.setPrototypeOf ( O, proto ) -BUILTIN(ObjectSetPrototypeOf) { - HandleScope scope(isolate); - - // 1. Let O be ? RequireObjectCoercible(O). - Handle object = args.atOrUndefined(isolate, 1); - if (object->IsNullOrUndefined(isolate)) { - THROW_NEW_ERROR_RETURN_FAILURE( - isolate, NewTypeError(MessageTemplate::kCalledOnNullOrUndefined, - isolate->factory()->NewStringFromAsciiChecked( - "Object.setPrototypeOf"))); - } - - // 2. If Type(proto) is neither Object nor Null, throw a TypeError exception. - Handle proto = args.atOrUndefined(isolate, 2); - if (!proto->IsNull(isolate) && !proto->IsJSReceiver()) { - THROW_NEW_ERROR_RETURN_FAILURE( - isolate, NewTypeError(MessageTemplate::kProtoObjectOrNull, proto)); - } - - // 3. If Type(O) is not Object, return O. - if (!object->IsJSReceiver()) return *object; - Handle receiver = Handle::cast(object); - - // 4. Let status be ? O.[[SetPrototypeOf]](proto). - // 5. If status is false, throw a TypeError exception. - MAYBE_RETURN(JSReceiver::SetPrototype(receiver, proto, true, kThrowOnError), - ReadOnlyRoots(isolate).exception()); - - // 6. Return O. - return *receiver; -} - // ES6 section B.2.2.1.1 get Object.prototype.__proto__ BUILTIN(ObjectPrototypeGetProto) { HandleScope scope(isolate); @@ -332,18 +286,6 @@ BUILTIN(ObjectGetOwnPropertySymbols) { return GetOwnPropertyKeys(isolate, args, SKIP_STRINGS); } -// ES6 section 19.1.2.11 Object.isExtensible ( O ) -BUILTIN(ObjectIsExtensible) { - HandleScope scope(isolate); - Handle object = args.atOrUndefined(isolate, 1); - Maybe result = - object->IsJSReceiver() - ? JSReceiver::IsExtensible(Handle::cast(object)) - : Just(false); - MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception()); - return isolate->heap()->ToBoolean(result.FromJust()); -} - // ES6 section 19.1.2.12 Object.isFrozen ( O ) BUILTIN(ObjectIsFrozen) { HandleScope scope(isolate); @@ -403,18 +345,6 @@ BUILTIN(ObjectGetOwnPropertyDescriptors) { return *descriptors; } -// ES6 section 19.1.2.15 Object.preventExtensions ( O ) -BUILTIN(ObjectPreventExtensions) { - HandleScope scope(isolate); - Handle object = args.atOrUndefined(isolate, 1); - if (object->IsJSReceiver()) { - MAYBE_RETURN(JSReceiver::PreventExtensions(Handle::cast(object), - kThrowOnError), - ReadOnlyRoots(isolate).exception()); - } - return *object; -} - // ES6 section 19.1.2.17 Object.seal ( O ) BUILTIN(ObjectSeal) { HandleScope scope(isolate); diff --git a/deps/v8/src/builtins/builtins-promise-gen.cc b/deps/v8/src/builtins/builtins-promise-gen.cc index ad70fb1dd1a49e..1339e2dccd788e 100644 --- a/deps/v8/src/builtins/builtins-promise-gen.cc +++ b/deps/v8/src/builtins/builtins-promise-gen.cc @@ -2062,7 +2062,7 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll( // 5. Let _promiseResolve_ be ? Get(_constructor_, `"resolve"`). TNode resolve = GetProperty(native_context, constructor, factory()->resolve_string()); - GotoIfException(resolve, if_exception, var_exception); + GotoIfException(resolve, &close_iterator, var_exception); // 6. If IsCallable(_promiseResolve_) is *false*, throw a *TypeError* // exception. @@ -2077,9 +2077,9 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll( // Let next be IteratorStep(iteratorRecord.[[Iterator]]). // If next is an abrupt completion, set iteratorRecord.[[Done]] to true. // ReturnIfAbrupt(next). - Node* const fast_iterator_result_map = - LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX); - Node* const next = iter_assembler.IteratorStep( + TNode const fast_iterator_result_map = CAST( + LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX)); + TNode const next = iter_assembler.IteratorStep( native_context, iterator, &done_loop, fast_iterator_result_map, if_exception, var_exception); @@ -2087,7 +2087,7 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll( // If nextValue is an abrupt completion, set iteratorRecord.[[Done]] to // true. // ReturnIfAbrupt(nextValue). - Node* const next_value = iter_assembler.IteratorValue( + TNode const next_value = iter_assembler.IteratorValue( native_context, next, fast_iterator_result_map, if_exception, var_exception); @@ -2148,7 +2148,7 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll( &if_slow); GotoIf(IsPromiseSpeciesProtectorCellInvalid(), &if_slow); GotoIf(TaggedIsSmi(next_value), &if_slow); - Node* const next_value_map = LoadMap(next_value); + Node* const next_value_map = LoadMap(CAST(next_value)); BranchIfPromiseThenLookupChainIntact(native_context, next_value_map, &if_fast, &if_slow); @@ -2526,8 +2526,7 @@ TF_BUILTIN(PromiseAllSettledResolveElementClosure, PromiseBuiltinsAssembler) { LoadContextElement(native_context, Context::OBJECT_FUNCTION_INDEX)); TNode object_function_map = Cast(LoadObjectField( object_function, JSFunction::kPrototypeOrInitialMapOffset)); - TNode obj = - Cast(AllocateJSObjectFromMap(object_function_map)); + TNode obj = AllocateJSObjectFromMap(object_function_map); // 10. Perform ! CreateDataProperty(obj, "status", "fulfilled"). CallBuiltin(Builtins::kFastCreateDataProperty, context, obj, @@ -2557,8 +2556,7 @@ TF_BUILTIN(PromiseAllSettledRejectElementClosure, PromiseBuiltinsAssembler) { LoadContextElement(native_context, Context::OBJECT_FUNCTION_INDEX)); TNode object_function_map = Cast(LoadObjectField( object_function, JSFunction::kPrototypeOrInitialMapOffset)); - TNode obj = - Cast(AllocateJSObjectFromMap(object_function_map)); + TNode obj = AllocateJSObjectFromMap(object_function_map); // 10. Perform ! CreateDataProperty(obj, "status", "rejected"). CallBuiltin(Builtins::kFastCreateDataProperty, context, obj, @@ -2579,7 +2577,7 @@ TF_BUILTIN(PromiseRace, PromiseBuiltinsAssembler) { VARIABLE(var_exception, MachineRepresentation::kTagged, TheHoleConstant()); Node* const receiver = Parameter(Descriptor::kReceiver); - Node* const context = Parameter(Descriptor::kContext); + TNode const context = CAST(Parameter(Descriptor::kContext)); ThrowIfNotJSReceiver(context, receiver, MessageTemplate::kCalledOnNonObject, "Promise.race"); @@ -2626,11 +2624,11 @@ TF_BUILTIN(PromiseRace, PromiseBuiltinsAssembler) { // 3. Let _promiseResolve_ be ? Get(_constructor_, `"resolve"`). TNode resolve = GetProperty(native_context, receiver, factory()->resolve_string()); - GotoIfException(resolve, &reject_promise, &var_exception); + GotoIfException(resolve, &close_iterator, &var_exception); // 4. If IsCallable(_promiseResolve_) is *false*, throw a *TypeError* // exception. - ThrowIfNotCallable(CAST(context), resolve, "resolve"); + ThrowIfNotCallable(context, resolve, "resolve"); var_promise_resolve_function = resolve; Goto(&loop); @@ -2638,13 +2636,13 @@ TF_BUILTIN(PromiseRace, PromiseBuiltinsAssembler) { BIND(&loop); { - Node* const fast_iterator_result_map = LoadContextElement( - native_context, Context::ITERATOR_RESULT_MAP_INDEX); + TNode const fast_iterator_result_map = CAST(LoadContextElement( + native_context, Context::ITERATOR_RESULT_MAP_INDEX)); // Let next be IteratorStep(iteratorRecord.[[Iterator]]). // If next is an abrupt completion, set iteratorRecord.[[Done]] to true. // ReturnIfAbrupt(next). - Node* const next = iter_assembler.IteratorStep( + TNode const next = iter_assembler.IteratorStep( context, iterator, &break_loop, fast_iterator_result_map, &reject_promise, &var_exception); @@ -2652,7 +2650,7 @@ TF_BUILTIN(PromiseRace, PromiseBuiltinsAssembler) { // If nextValue is an abrupt completion, set iteratorRecord.[[Done]] to // true. // ReturnIfAbrupt(nextValue). - Node* const next_value = + TNode const next_value = iter_assembler.IteratorValue(context, next, fast_iterator_result_map, &reject_promise, &var_exception); diff --git a/deps/v8/src/builtins/builtins-proxy-gen.cc b/deps/v8/src/builtins/builtins-proxy-gen.cc index a1a2f6308ffde2..948540ea5f1d1d 100644 --- a/deps/v8/src/builtins/builtins-proxy-gen.cc +++ b/deps/v8/src/builtins/builtins-proxy-gen.cc @@ -13,8 +13,9 @@ namespace v8 { namespace internal { -Node* ProxiesCodeStubAssembler::AllocateProxy(Node* target, Node* handler, - Node* context) { +compiler::TNode ProxiesCodeStubAssembler::AllocateProxy( + TNode context, TNode target, + TNode handler) { VARIABLE(map, MachineRepresentation::kTagged); Label callable_target(this), constructor_target(this), none_target(this), @@ -53,7 +54,7 @@ Node* ProxiesCodeStubAssembler::AllocateProxy(Node* target, Node* handler, StoreObjectFieldNoWriteBarrier(proxy, JSProxy::kTargetOffset, target); StoreObjectFieldNoWriteBarrier(proxy, JSProxy::kHandlerOffset, handler); - return proxy; + return CAST(proxy); } Node* ProxiesCodeStubAssembler::AllocateJSArrayForCodeStubArguments( @@ -121,8 +122,9 @@ Node* ProxiesCodeStubAssembler::CreateProxyRevokeFunctionContext( return context; } -Node* ProxiesCodeStubAssembler::AllocateProxyRevokeFunction(Node* proxy, - Node* context) { +compiler::TNode +ProxiesCodeStubAssembler::AllocateProxyRevokeFunction(TNode context, + TNode proxy) { Node* const native_context = LoadNativeContext(context); Node* const proxy_context = @@ -132,13 +134,8 @@ Node* ProxiesCodeStubAssembler::AllocateProxyRevokeFunction(Node* proxy, Node* const revoke_info = LoadContextElement(native_context, Context::PROXY_REVOKE_SHARED_FUN); - return AllocateFunctionWithMapAndContext(revoke_map, revoke_info, - proxy_context); -} - -Node* ProxiesCodeStubAssembler::GetProxyConstructorJSNewTarget() { - return CodeAssembler::Parameter(static_cast( - Builtin_ProxyConstructor_InterfaceDescriptor::kJSNewTarget)); + return CAST(AllocateFunctionWithMapAndContext(revoke_map, revoke_info, + proxy_context)); } TF_BUILTIN(CallProxy, ProxiesCodeStubAssembler) { @@ -262,9 +259,11 @@ TF_BUILTIN(ConstructProxy, ProxiesCodeStubAssembler) { { ThrowTypeError(context, MessageTemplate::kProxyRevoked, "construct"); } } -Node* ProxiesCodeStubAssembler::CheckGetSetTrapResult( - Node* context, Node* target, Node* proxy, Node* name, Node* trap_result, +void ProxiesCodeStubAssembler::CheckGetSetTrapResult( + TNode context, TNode target, TNode proxy, + TNode name, TNode trap_result, JSProxy::AccessKind access_kind) { + // TODO(mslekova): Think of a better name for the trap_result param. Node* map = LoadMap(target); VARIABLE(var_value, MachineRepresentation::kTagged); VARIABLE(var_details, MachineRepresentation::kWord32); @@ -273,7 +272,7 @@ Node* ProxiesCodeStubAssembler::CheckGetSetTrapResult( Label if_found_value(this), check_in_runtime(this, Label::kDeferred), check_passed(this); - GotoIfNot(IsUniqueNameNoIndex(CAST(name)), &check_in_runtime); + GotoIfNot(IsUniqueNameNoIndex(name), &check_in_runtime); Node* instance_type = LoadInstanceType(target); TryGetOwnProperty(context, target, target, map, instance_type, name, &if_found_value, &var_value, &var_details, &var_raw_value, @@ -366,12 +365,13 @@ Node* ProxiesCodeStubAssembler::CheckGetSetTrapResult( } BIND(&check_passed); - return trap_result; } } -Node* ProxiesCodeStubAssembler::CheckHasTrapResult(Node* context, Node* target, - Node* proxy, Node* name) { +void ProxiesCodeStubAssembler::CheckHasTrapResult(TNode context, + TNode target, + TNode proxy, + TNode name) { Node* target_map = LoadMap(target); VARIABLE(var_value, MachineRepresentation::kTagged); VARIABLE(var_details, MachineRepresentation::kWord32); @@ -383,7 +383,7 @@ Node* ProxiesCodeStubAssembler::CheckHasTrapResult(Node* context, Node* target, check_in_runtime(this, Label::kDeferred); // 9.a. Let targetDesc be ? target.[[GetOwnProperty]](P). - GotoIfNot(IsUniqueNameNoIndex(CAST(name)), &check_in_runtime); + GotoIfNot(IsUniqueNameNoIndex(name), &check_in_runtime); Node* instance_type = LoadInstanceType(target); TryGetOwnProperty(context, target, target, target_map, instance_type, name, &if_found_value, &var_value, &var_details, &var_raw_value, @@ -419,7 +419,64 @@ Node* ProxiesCodeStubAssembler::CheckHasTrapResult(Node* context, Node* target, } BIND(&check_passed); - return FalseConstant(); +} + +void ProxiesCodeStubAssembler::CheckDeleteTrapResult(TNode context, + TNode target, + TNode proxy, + TNode name) { + TNode target_map = LoadMap(target); + TVARIABLE(Object, var_value); + TVARIABLE(Uint32T, var_details); + TVARIABLE(Object, var_raw_value); + + Label if_found_value(this, Label::kDeferred), + throw_non_configurable(this, Label::kDeferred), + throw_non_extensible(this, Label::kDeferred), check_passed(this), + check_in_runtime(this, Label::kDeferred); + + // 10. Let targetDesc be ? target.[[GetOwnProperty]](P). + GotoIfNot(IsUniqueNameNoIndex(name), &check_in_runtime); + TNode instance_type = LoadInstanceType(target); + TryGetOwnProperty(context, target, target, target_map, instance_type, name, + &if_found_value, &var_value, &var_details, &var_raw_value, + &check_passed, &check_in_runtime, kReturnAccessorPair); + + // 11. If targetDesc is undefined, return true. + BIND(&if_found_value); + { + // 12. If targetDesc.[[Configurable]] is false, throw a TypeError exception. + TNode non_configurable = IsSetWord32( + var_details.value(), PropertyDetails::kAttributesDontDeleteMask); + GotoIf(non_configurable, &throw_non_configurable); + + // 13. Let extensibleTarget be ? IsExtensible(target). + TNode target_extensible = IsExtensibleMap(target_map); + + // 14. If extensibleTarget is false, throw a TypeError exception. + GotoIfNot(target_extensible, &throw_non_extensible); + Goto(&check_passed); + } + + BIND(&throw_non_configurable); + { + ThrowTypeError(context, + MessageTemplate::kProxyDeletePropertyNonConfigurable, name); + } + + BIND(&throw_non_extensible); + { + ThrowTypeError(context, MessageTemplate::kProxyDeletePropertyNonExtensible, + name); + } + + BIND(&check_in_runtime); + { + CallRuntime(Runtime::kCheckProxyDeleteTrapResult, context, name, target); + Goto(&check_passed); + } + + BIND(&check_passed); } } // namespace internal diff --git a/deps/v8/src/builtins/builtins-proxy-gen.h b/deps/v8/src/builtins/builtins-proxy-gen.h index fcaac7df6661ae..cb51faf57553fd 100644 --- a/deps/v8/src/builtins/builtins-proxy-gen.h +++ b/deps/v8/src/builtins/builtins-proxy-gen.h @@ -17,19 +17,21 @@ class ProxiesCodeStubAssembler : public CodeStubAssembler { explicit ProxiesCodeStubAssembler(compiler::CodeAssemblerState* state) : CodeStubAssembler(state) {} - Node* AllocateProxy(Node* target, Node* handler, Node* context); - Node* AllocateProxyRevokeFunction(Node* proxy, Node* context); + TNode AllocateProxy(TNode context, TNode target, + TNode handler); + TNode AllocateProxyRevokeFunction(TNode context, + TNode proxy); - // Get JSNewTarget parameter for ProxyConstructor builtin (Torque). - // TODO(v8:9120): Remove this once torque support exists - Node* GetProxyConstructorJSNewTarget(); + void CheckGetSetTrapResult(TNode context, TNode target, + TNode proxy, TNode name, + TNode trap_result, + JSProxy::AccessKind access_kind); - Node* CheckGetSetTrapResult(Node* context, Node* target, Node* proxy, - Node* name, Node* trap_result, - JSProxy::AccessKind access_kind); + void CheckHasTrapResult(TNode context, TNode target, + TNode proxy, TNode name); - Node* CheckHasTrapResult(Node* context, Node* target, Node* proxy, - Node* name); + void CheckDeleteTrapResult(TNode context, TNode target, + TNode proxy, TNode name); protected: enum ProxyRevokeFunctionContextSlot { @@ -37,9 +39,10 @@ class ProxiesCodeStubAssembler : public CodeStubAssembler { kProxyContextLength, }; - Node* AllocateJSArrayForCodeStubArguments(Node* context, - CodeStubArguments& args, Node* argc, - ParameterMode mode); + Node* AllocateJSArrayForCodeStubArguments( + Node* context, + CodeStubArguments& args, // NOLINT(runtime/references) + Node* argc, ParameterMode mode); private: Node* CreateProxyRevokeFunctionContext(Node* proxy, Node* native_context); diff --git a/deps/v8/src/builtins/builtins-reflect.cc b/deps/v8/src/builtins/builtins-reflect.cc index e998652dad3224..6151fcbd4761c4 100644 --- a/deps/v8/src/builtins/builtins-reflect.cc +++ b/deps/v8/src/builtins/builtins-reflect.cc @@ -46,53 +46,6 @@ BUILTIN(ReflectDefineProperty) { return *isolate->factory()->ToBoolean(result.FromJust()); } -// ES6 section 26.1.4 Reflect.deleteProperty -BUILTIN(ReflectDeleteProperty) { - HandleScope scope(isolate); - DCHECK_EQ(3, args.length()); - Handle target = args.at(1); - Handle key = args.at(2); - - if (!target->IsJSReceiver()) { - THROW_NEW_ERROR_RETURN_FAILURE( - isolate, NewTypeError(MessageTemplate::kCalledOnNonObject, - isolate->factory()->NewStringFromAsciiChecked( - "Reflect.deleteProperty"))); - } - - Handle name; - ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name, - Object::ToName(isolate, key)); - - Maybe result = JSReceiver::DeletePropertyOrElement( - Handle::cast(target), name, LanguageMode::kSloppy); - MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception()); - return *isolate->factory()->ToBoolean(result.FromJust()); -} - -// ES6 section 26.1.6 Reflect.get -BUILTIN(ReflectGet) { - HandleScope scope(isolate); - Handle target = args.atOrUndefined(isolate, 1); - Handle key = args.atOrUndefined(isolate, 2); - Handle receiver = args.length() > 3 ? args.at(3) : target; - - if (!target->IsJSReceiver()) { - THROW_NEW_ERROR_RETURN_FAILURE( - isolate, NewTypeError(MessageTemplate::kCalledOnNonObject, - isolate->factory()->NewStringFromAsciiChecked( - "Reflect.get"))); - } - - Handle name; - ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name, - Object::ToName(isolate, key)); - - RETURN_RESULT_OR_FAILURE( - isolate, Object::GetPropertyOrElement(receiver, name, - Handle::cast(target))); -} - // ES6 section 26.1.7 Reflect.getOwnPropertyDescriptor BUILTIN(ReflectGetOwnPropertyDescriptor) { HandleScope scope(isolate); @@ -119,42 +72,6 @@ BUILTIN(ReflectGetOwnPropertyDescriptor) { return *desc.ToObject(isolate); } -// ES6 section 26.1.8 Reflect.getPrototypeOf -BUILTIN(ReflectGetPrototypeOf) { - HandleScope scope(isolate); - DCHECK_EQ(2, args.length()); - Handle target = args.at(1); - - if (!target->IsJSReceiver()) { - THROW_NEW_ERROR_RETURN_FAILURE( - isolate, NewTypeError(MessageTemplate::kCalledOnNonObject, - isolate->factory()->NewStringFromAsciiChecked( - "Reflect.getPrototypeOf"))); - } - Handle receiver = Handle::cast(target); - RETURN_RESULT_OR_FAILURE(isolate, - JSReceiver::GetPrototype(isolate, receiver)); -} - -// ES6 section 26.1.10 Reflect.isExtensible -BUILTIN(ReflectIsExtensible) { - HandleScope scope(isolate); - DCHECK_EQ(2, args.length()); - Handle target = args.at(1); - - if (!target->IsJSReceiver()) { - THROW_NEW_ERROR_RETURN_FAILURE( - isolate, NewTypeError(MessageTemplate::kCalledOnNonObject, - isolate->factory()->NewStringFromAsciiChecked( - "Reflect.isExtensible"))); - } - - Maybe result = - JSReceiver::IsExtensible(Handle::cast(target)); - MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception()); - return *isolate->factory()->ToBoolean(result.FromJust()); -} - // ES6 section 26.1.11 Reflect.ownKeys BUILTIN(ReflectOwnKeys) { HandleScope scope(isolate); @@ -177,25 +94,6 @@ BUILTIN(ReflectOwnKeys) { return *isolate->factory()->NewJSArrayWithElements(keys); } -// ES6 section 26.1.12 Reflect.preventExtensions -BUILTIN(ReflectPreventExtensions) { - HandleScope scope(isolate); - DCHECK_EQ(2, args.length()); - Handle target = args.at(1); - - if (!target->IsJSReceiver()) { - THROW_NEW_ERROR_RETURN_FAILURE( - isolate, NewTypeError(MessageTemplate::kCalledOnNonObject, - isolate->factory()->NewStringFromAsciiChecked( - "Reflect.preventExtensions"))); - } - - Maybe result = JSReceiver::PreventExtensions( - Handle::cast(target), kDontThrow); - MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception()); - return *isolate->factory()->ToBoolean(result.FromJust()); -} - // ES6 section 26.1.13 Reflect.set BUILTIN(ReflectSet) { HandleScope scope(isolate); @@ -223,30 +121,5 @@ BUILTIN(ReflectSet) { return *isolate->factory()->ToBoolean(result.FromJust()); } -// ES6 section 26.1.14 Reflect.setPrototypeOf -BUILTIN(ReflectSetPrototypeOf) { - HandleScope scope(isolate); - DCHECK_EQ(3, args.length()); - Handle target = args.at(1); - Handle proto = args.at(2); - - if (!target->IsJSReceiver()) { - THROW_NEW_ERROR_RETURN_FAILURE( - isolate, NewTypeError(MessageTemplate::kCalledOnNonObject, - isolate->factory()->NewStringFromAsciiChecked( - "Reflect.setPrototypeOf"))); - } - - if (!proto->IsJSReceiver() && !proto->IsNull(isolate)) { - THROW_NEW_ERROR_RETURN_FAILURE( - isolate, NewTypeError(MessageTemplate::kProtoObjectOrNull, proto)); - } - - Maybe result = JSReceiver::SetPrototype( - Handle::cast(target), proto, true, kDontThrow); - MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception()); - return *isolate->factory()->ToBoolean(result.FromJust()); -} - } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/builtins-regexp-gen.cc b/deps/v8/src/builtins/builtins-regexp-gen.cc index 51ee2796e678ef..d53518ff7ee094 100644 --- a/deps/v8/src/builtins/builtins-regexp-gen.cc +++ b/deps/v8/src/builtins/builtins-regexp-gen.cc @@ -15,7 +15,7 @@ #include "src/objects/js-regexp-string-iterator.h" #include "src/objects/js-regexp.h" #include "src/objects/regexp-match-info.h" -#include "src/regexp/regexp-macro-assembler.h" +#include "src/regexp/regexp.h" namespace v8 { namespace internal { @@ -94,12 +94,12 @@ TNode RegExpBuiltinsAssembler::RegExpCreate(TNode context, TNode pattern = Select( IsUndefined(maybe_string), [=] { return EmptyStringConstant(); }, [=] { return ToString_Inline(context, maybe_string); }); - TNode regexp = CAST(AllocateJSObjectFromMap(initial_map)); + TNode regexp = AllocateJSObjectFromMap(initial_map); return CallRuntime(Runtime::kRegExpInitializeAndCompile, context, regexp, pattern, flags); } -TNode RegExpBuiltinsAssembler::FastLoadLastIndex( +TNode RegExpBuiltinsAssembler::FastLoadLastIndexBeforeSmiCheck( TNode regexp) { // Load the in-object field. static const int field_offset = @@ -121,23 +121,27 @@ TNode RegExpBuiltinsAssembler::LoadLastIndex(TNode context, // The fast-path of StoreLastIndex when regexp is guaranteed to be an unmodified // JSRegExp instance. -void RegExpBuiltinsAssembler::FastStoreLastIndex(Node* regexp, Node* value) { +void RegExpBuiltinsAssembler::FastStoreLastIndex(TNode regexp, + TNode value) { // Store the in-object field. static const int field_offset = JSRegExp::kSize + JSRegExp::kLastIndexFieldIndex * kTaggedSize; StoreObjectField(regexp, field_offset, value); } -void RegExpBuiltinsAssembler::SlowStoreLastIndex(Node* context, Node* regexp, - Node* value) { - Node* const name = HeapConstant(isolate()->factory()->lastIndex_string()); - SetPropertyStrict(CAST(context), CAST(regexp), CAST(name), CAST(value)); +void RegExpBuiltinsAssembler::SlowStoreLastIndex(SloppyTNode context, + SloppyTNode regexp, + SloppyTNode value) { + TNode name = HeapConstant(isolate()->factory()->lastIndex_string()); + SetPropertyStrict(context, regexp, name, value); } -void RegExpBuiltinsAssembler::StoreLastIndex(Node* context, Node* regexp, - Node* value, bool is_fastpath) { +void RegExpBuiltinsAssembler::StoreLastIndex(TNode context, + TNode regexp, + TNode value, + bool is_fastpath) { if (is_fastpath) { - FastStoreLastIndex(regexp, value); + FastStoreLastIndex(CAST(regexp), CAST(value)); } else { SlowStoreLastIndex(context, regexp, value); } @@ -248,10 +252,10 @@ TNode RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo( TNode native_context = LoadNativeContext(context); TNode map = CAST(LoadContextElement( native_context, Context::SLOW_OBJECT_WITH_NULL_PROTOTYPE_MAP)); - TNode properties = AllocateNameDictionary(num_properties); + TNode properties = + AllocateNameDictionary(num_properties, kAllowLargeObjectAllocation); - TNode group_object = - CAST(AllocateJSObjectFromMap(map, properties)); + TNode group_object = AllocateJSObjectFromMap(map, properties); StoreObjectField(result, JSRegExpResult::kGroupsOffset, group_object); TVARIABLE(IntPtrT, var_i, IntPtrZero()); @@ -534,19 +538,18 @@ TNode RegExpBuiltinsAssembler::RegExpExecInternal( // We expect exactly one result since we force the called regexp to behave // as non-global. TNode int_result = ChangeInt32ToIntPtr(result); + GotoIf( + IntPtrEqual(int_result, IntPtrConstant(RegExp::kInternalRegExpSuccess)), + &if_success); + GotoIf( + IntPtrEqual(int_result, IntPtrConstant(RegExp::kInternalRegExpFailure)), + &if_failure); GotoIf(IntPtrEqual(int_result, - IntPtrConstant(NativeRegExpMacroAssembler::SUCCESS)), - &if_success); - GotoIf(IntPtrEqual(int_result, - IntPtrConstant(NativeRegExpMacroAssembler::FAILURE)), - &if_failure); - GotoIf(IntPtrEqual(int_result, - IntPtrConstant(NativeRegExpMacroAssembler::EXCEPTION)), + IntPtrConstant(RegExp::kInternalRegExpException)), &if_exception); - CSA_ASSERT(this, - IntPtrEqual(int_result, - IntPtrConstant(NativeRegExpMacroAssembler::RETRY))); + CSA_ASSERT(this, IntPtrEqual(int_result, + IntPtrConstant(RegExp::kInternalRegExpRetry))); Goto(&runtime); } @@ -755,7 +758,7 @@ RegExpBuiltinsAssembler::RegExpPrototypeExecBodyWithoutResult( GotoIfNot(should_update_last_index, &out); // Update the new last index from {match_indices}. - TNode new_lastindex = CAST(UnsafeLoadFixedArrayElement( + TNode new_lastindex = CAST(UnsafeLoadFixedArrayElement( CAST(match_indices), RegExpMatchInfo::kFirstCaptureIndex + 1)); StoreLastIndex(context, regexp, new_lastindex, is_fastpath); @@ -852,7 +855,7 @@ Node* RegExpBuiltinsAssembler::IsFastRegExpNoPrototype(Node* const context, // The smi check is required to omit ToLength(lastIndex) calls with possible // user-code execution on the fast path. - Node* const last_index = FastLoadLastIndex(CAST(object)); + TNode last_index = FastLoadLastIndexBeforeSmiCheck(CAST(object)); var_result.Bind(TaggedIsPositiveSmi(last_index)); Goto(&out); @@ -897,7 +900,7 @@ TNode RegExpBuiltinsAssembler::IsFastRegExpWithOriginalExec( BIND(&check_last_index); // The smi check is required to omit ToLength(lastIndex) calls with possible // user-code execution on the fast path. - TNode last_index = FastLoadLastIndex(object); + TNode last_index = FastLoadLastIndexBeforeSmiCheck(object); var_result = TaggedIsPositiveSmi(last_index); Goto(&out); @@ -925,9 +928,9 @@ void RegExpBuiltinsAssembler::BranchIfFastRegExp( // This should only be needed for String.p.(split||matchAll), but we are // conservative here. - GotoIf(IsRegExpSpeciesProtectorCellInvalid(), if_ismodified); + TNode native_context = LoadNativeContext(context); + GotoIf(IsRegExpSpeciesProtectorCellInvalid(native_context), if_ismodified); - Node* const native_context = LoadNativeContext(context); Node* const regexp_fun = LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX); Node* const initial_map = @@ -954,7 +957,7 @@ void RegExpBuiltinsAssembler::BranchIfFastRegExp( // The smi check is required to omit ToLength(lastIndex) calls with possible // user-code execution on the fast path. - Node* const last_index = FastLoadLastIndex(CAST(object)); + TNode last_index = FastLoadLastIndexBeforeSmiCheck(CAST(object)); Branch(TaggedIsPositiveSmi(last_index), if_isunmodified, if_ismodified); } @@ -1012,7 +1015,7 @@ TF_BUILTIN(RegExpPrototypeExecSlow, RegExpBuiltinsAssembler) { // Fast path stub for ATOM regexps. String matching is done by StringIndexOf, // and {match_info} is updated on success. -// The slow path is implemented in RegExpImpl::AtomExec. +// The slow path is implemented in RegExp::AtomExec. TF_BUILTIN(RegExpExecAtom, RegExpBuiltinsAssembler) { TNode regexp = CAST(Parameter(Descriptor::kRegExp)); TNode subject_string = CAST(Parameter(Descriptor::kString)); @@ -1538,7 +1541,8 @@ TNode RegExpBuiltinsAssembler::FastFlagGetter(TNode regexp, JSRegExp::Flag flag) { TNode flags = CAST(LoadObjectField(regexp, JSRegExp::kFlagsOffset)); TNode mask = SmiConstant(flag); - return SmiToInt32(SmiShr(SmiAnd(flags, mask), JSRegExp::FlagShiftBits(flag))); + return SmiToInt32(SmiShr(SmiAnd(flags, mask), base::bits::CountTrailingZeros( + static_cast(flag)))); } // Load through the GetProperty stub. @@ -1807,10 +1811,9 @@ TF_BUILTIN(RegExpPrototypeTestFast, RegExpBuiltinsAssembler) { Return(FalseConstant()); } -Node* RegExpBuiltinsAssembler::AdvanceStringIndex(Node* const string, - Node* const index, - Node* const is_unicode, - bool is_fastpath) { +TNode RegExpBuiltinsAssembler::AdvanceStringIndex( + SloppyTNode string, SloppyTNode index, + SloppyTNode is_unicode, bool is_fastpath) { CSA_ASSERT(this, IsString(string)); CSA_ASSERT(this, IsNumberNormalized(index)); if (is_fastpath) CSA_ASSERT(this, TaggedIsPositiveSmi(index)); @@ -1818,8 +1821,8 @@ Node* RegExpBuiltinsAssembler::AdvanceStringIndex(Node* const string, // Default to last_index + 1. // TODO(pwong): Consider using TrySmiAdd for the fast path to reduce generated // code. - Node* const index_plus_one = NumberInc(index); - VARIABLE(var_result, MachineRepresentation::kTagged, index_plus_one); + TNode index_plus_one = NumberInc(index); + TVARIABLE(Number, var_result, index_plus_one); // Advancing the index has some subtle issues involving the distinction // between Smis and HeapNumbers. There's three cases: @@ -1846,10 +1849,10 @@ Node* RegExpBuiltinsAssembler::AdvanceStringIndex(Node* const string, BIND(&if_isunicode); { TNode const string_length = LoadStringLengthAsWord(string); - TNode untagged_plus_one = SmiUntag(index_plus_one); + TNode untagged_plus_one = SmiUntag(CAST(index_plus_one)); GotoIfNot(IntPtrLessThan(untagged_plus_one, string_length), &out); - Node* const lead = StringCharCodeAt(string, SmiUntag(index)); + Node* const lead = StringCharCodeAt(string, SmiUntag(CAST(index))); GotoIfNot(Word32Equal(Word32And(lead, Int32Constant(0xFC00)), Int32Constant(0xD800)), &out); @@ -1860,8 +1863,8 @@ Node* RegExpBuiltinsAssembler::AdvanceStringIndex(Node* const string, &out); // At a surrogate pair, return index + 2. - Node* const index_plus_two = NumberInc(index_plus_one); - var_result.Bind(index_plus_two); + TNode index_plus_two = NumberInc(index_plus_one); + var_result = index_plus_two; Goto(&out); } @@ -1870,31 +1873,30 @@ Node* RegExpBuiltinsAssembler::AdvanceStringIndex(Node* const string, return var_result.value(); } -void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context, - Node* const regexp, +void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(TNode context, + TNode regexp, TNode string, const bool is_fastpath) { if (is_fastpath) CSA_ASSERT(this, IsFastRegExp(context, regexp)); Node* const is_global = - FlagGetter(CAST(context), CAST(regexp), JSRegExp::kGlobal, is_fastpath); + FlagGetter(context, regexp, JSRegExp::kGlobal, is_fastpath); Label if_isglobal(this), if_isnotglobal(this); Branch(is_global, &if_isglobal, &if_isnotglobal); BIND(&if_isnotglobal); { - Node* const result = - is_fastpath - ? RegExpPrototypeExecBody(CAST(context), CAST(regexp), string, true) - : RegExpExec(context, regexp, string); + Node* const result = is_fastpath ? RegExpPrototypeExecBody( + context, CAST(regexp), string, true) + : RegExpExec(context, regexp, string); Return(result); } BIND(&if_isglobal); { - Node* const is_unicode = FlagGetter(CAST(context), CAST(regexp), - JSRegExp::kUnicode, is_fastpath); + Node* const is_unicode = + FlagGetter(context, regexp, JSRegExp::kUnicode, is_fastpath); StoreLastIndex(context, regexp, SmiZero(), is_fastpath); @@ -1935,8 +1937,8 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context, // On the fast path, grab the matching string from the raw match index // array. TNode match_indices = - RegExpPrototypeExecBodyWithoutResult(CAST(context), CAST(regexp), - string, &if_didnotmatch, true); + RegExpPrototypeExecBodyWithoutResult(context, CAST(regexp), string, + &if_didnotmatch, true); Label dosubstring(this), donotsubstring(this); Branch(var_atom.value(), &donotsubstring, &dosubstring); @@ -1988,15 +1990,14 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context, TNode const match_length = LoadStringLengthAsSmi(match); GotoIfNot(SmiEqual(match_length, SmiZero()), &loop); - Node* last_index = - LoadLastIndex(CAST(context), CAST(regexp), is_fastpath); + Node* last_index = LoadLastIndex(context, regexp, is_fastpath); if (is_fastpath) { CSA_ASSERT(this, TaggedIsPositiveSmi(last_index)); } else { last_index = ToLength_Inline(context, last_index); } - Node* const new_last_index = + TNode new_last_index = AdvanceStringIndex(string, last_index, is_unicode, is_fastpath); if (is_fastpath) { @@ -2017,7 +2018,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context, { // Wrap the match in a JSArray. - Node* const result = array.ToJSArray(CAST(context)); + Node* const result = array.ToJSArray(context); Return(result); } } @@ -2034,7 +2035,7 @@ TF_BUILTIN(RegExpPrototypeMatch, RegExpBuiltinsAssembler) { ThrowIfNotJSReceiver(context, maybe_receiver, MessageTemplate::kIncompatibleMethodReceiver, "RegExp.prototype.@@match"); - Node* const receiver = maybe_receiver; + TNode receiver = CAST(maybe_receiver); // Convert {maybe_string} to a String. TNode const string = ToString_Inline(context, maybe_string); @@ -2086,7 +2087,8 @@ void RegExpMatchAllAssembler::Generate(TNode context, // 7. Let lastIndex be ? ToLength(? Get(R, "lastIndex")). // 8. Perform ? Set(matcher, "lastIndex", lastIndex, true). - FastStoreLastIndex(var_matcher.value(), FastLoadLastIndex(fast_regexp)); + FastStoreLastIndex(CAST(var_matcher.value()), + FastLoadLastIndex(fast_regexp)); // 9. If flags contains "g", let global be true. // 10. Else, let global be false. @@ -2226,12 +2228,11 @@ TF_BUILTIN(RegExpMatchFast, RegExpBuiltinsAssembler) { } void RegExpBuiltinsAssembler::RegExpPrototypeSearchBodyFast( - Node* const context, Node* const regexp, Node* const string) { + TNode context, TNode regexp, TNode string) { CSA_ASSERT(this, IsFastRegExp(context, regexp)); - CSA_ASSERT(this, IsString(string)); // Grab the initial value of last index. - Node* const previous_last_index = FastLoadLastIndex(CAST(regexp)); + TNode previous_last_index = FastLoadLastIndex(regexp); // Ensure last index is 0. FastStoreLastIndex(regexp, SmiZero()); @@ -2239,7 +2240,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSearchBodyFast( // Call exec. Label if_didnotmatch(this); TNode match_indices = RegExpPrototypeExecBodyWithoutResult( - CAST(context), CAST(regexp), CAST(string), &if_didnotmatch, true); + context, regexp, string, &if_didnotmatch, true); // Successful match. { @@ -2839,16 +2840,14 @@ TF_BUILTIN(RegExpStringIteratorPrototypeNext, RegExpStringIteratorAssembler) { GotoIfNot(IsEmptyString(match_str), &return_result); // 1. Let thisIndex be ? ToLength(? Get(R, "lastIndex")). - TNode this_index = CAST(FastLoadLastIndex(CAST(iterating_regexp))); - CSA_ASSERT(this, TaggedIsSmi(this_index)); + TNode this_index = FastLoadLastIndex(CAST(iterating_regexp)); // 2. Let nextIndex be ! AdvanceStringIndex(S, thisIndex, fullUnicode). - TNode next_index = CAST(AdvanceStringIndex( - iterating_string, this_index, HasUnicodeFlag(flags), true)); - CSA_ASSERT(this, TaggedIsSmi(next_index)); + TNode next_index = AdvanceStringIndexFast( + iterating_string, this_index, HasUnicodeFlag(flags)); // 3. Perform ? Set(R, "lastIndex", nextIndex, true). - FastStoreLastIndex(iterating_regexp, next_index); + FastStoreLastIndex(CAST(iterating_regexp), next_index); // iii. Return ! CreateIterResultObject(match, false). Goto(&return_result); @@ -2866,8 +2865,8 @@ TF_BUILTIN(RegExpStringIteratorPrototypeNext, RegExpStringIteratorAssembler) { TNode this_index = ToLength_Inline(context, last_index); // 2. Let nextIndex be ! AdvanceStringIndex(S, thisIndex, fullUnicode). - TNode next_index = CAST(AdvanceStringIndex( - iterating_string, this_index, HasUnicodeFlag(flags), false)); + TNode next_index = AdvanceStringIndex( + iterating_string, this_index, HasUnicodeFlag(flags), false); // 3. Perform ? Set(R, "lastIndex", nextIndex, true). SlowStoreLastIndex(context, iterating_regexp, next_index); diff --git a/deps/v8/src/builtins/builtins-regexp-gen.h b/deps/v8/src/builtins/builtins-regexp-gen.h index 88c00095b9d112..3677314f195ead 100644 --- a/deps/v8/src/builtins/builtins-regexp-gen.h +++ b/deps/v8/src/builtins/builtins-regexp-gen.h @@ -7,7 +7,7 @@ #include "src/base/optional.h" #include "src/codegen/code-stub-assembler.h" -#include "src/execution/message-template.h" +#include "src/common/message-template.h" namespace v8 { namespace internal { @@ -42,15 +42,20 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler { TNode context, TNode length, TNode index, TNode input, TNode* elements_out = nullptr); - TNode FastLoadLastIndex(TNode regexp); + TNode FastLoadLastIndexBeforeSmiCheck(TNode regexp); + TNode FastLoadLastIndex(TNode regexp) { + return CAST(FastLoadLastIndexBeforeSmiCheck(regexp)); + } TNode SlowLoadLastIndex(TNode context, TNode regexp); TNode LoadLastIndex(TNode context, TNode regexp, bool is_fastpath); - void FastStoreLastIndex(Node* regexp, Node* value); - void SlowStoreLastIndex(Node* context, Node* regexp, Node* value); - void StoreLastIndex(Node* context, Node* regexp, Node* value, - bool is_fastpath); + void FastStoreLastIndex(TNode regexp, TNode value); + void SlowStoreLastIndex(SloppyTNode context, + SloppyTNode regexp, + SloppyTNode value); + void StoreLastIndex(TNode context, TNode regexp, + TNode value, bool is_fastpath); // Loads {var_string_start} and {var_string_end} with the corresponding // offsets into the given {string_data}. @@ -127,20 +132,23 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler { Node* RegExpExec(Node* context, Node* regexp, Node* string); - Node* AdvanceStringIndex(Node* const string, Node* const index, - Node* const is_unicode, bool is_fastpath); + TNode AdvanceStringIndex(SloppyTNode string, + SloppyTNode index, + SloppyTNode is_unicode, + bool is_fastpath); - Node* AdvanceStringIndexFast(Node* const string, Node* const index, - Node* const is_unicode) { - return AdvanceStringIndex(string, index, is_unicode, true); + TNode AdvanceStringIndexFast(TNode string, TNode index, + TNode is_unicode) { + return CAST(AdvanceStringIndex(string, index, is_unicode, true)); } - void RegExpPrototypeMatchBody(Node* const context, Node* const regexp, + void RegExpPrototypeMatchBody(TNode context, TNode regexp, TNode const string, const bool is_fastpath); - void RegExpPrototypeSearchBodyFast(Node* const context, Node* const regexp, - Node* const string); + void RegExpPrototypeSearchBodyFast(TNode context, + TNode regexp, + TNode string); void RegExpPrototypeSearchBodySlow(Node* const context, Node* const regexp, Node* const string); diff --git a/deps/v8/src/builtins/builtins-regexp.cc b/deps/v8/src/builtins/builtins-regexp.cc index 3e0f7182c75285..e758782a997dd0 100644 --- a/deps/v8/src/builtins/builtins-regexp.cc +++ b/deps/v8/src/builtins/builtins-regexp.cc @@ -6,8 +6,8 @@ #include "src/builtins/builtins.h" #include "src/logging/counters.h" #include "src/objects/objects-inl.h" -#include "src/regexp/jsregexp.h" #include "src/regexp/regexp-utils.h" +#include "src/regexp/regexp.h" #include "src/strings/string-builder-inl.h" namespace v8 { diff --git a/deps/v8/src/builtins/builtins-string-gen.cc b/deps/v8/src/builtins/builtins-string-gen.cc index 5689b42619a95c..97dc8ca895b8ae 100644 --- a/deps/v8/src/builtins/builtins-string-gen.cc +++ b/deps/v8/src/builtins/builtins-string-gen.cc @@ -545,32 +545,33 @@ TF_BUILTIN(StringCharAt, StringBuiltinsAssembler) { Return(result); } -TF_BUILTIN(StringCodePointAtUTF16, StringBuiltinsAssembler) { +TF_BUILTIN(StringCodePointAt, StringBuiltinsAssembler) { Node* receiver = Parameter(Descriptor::kReceiver); Node* position = Parameter(Descriptor::kPosition); + // TODO(sigurds) Figure out if passing length as argument pays off. TNode length = LoadStringLengthAsWord(receiver); // Load the character code at the {position} from the {receiver}. TNode code = - LoadSurrogatePairAt(receiver, length, position, UnicodeEncoding::UTF16); + LoadSurrogatePairAt(receiver, length, position, UnicodeEncoding::UTF32); // And return it as TaggedSigned value. // TODO(turbofan): Allow builtins to return values untagged. TNode result = SmiFromInt32(code); Return(result); } -TF_BUILTIN(StringCodePointAtUTF32, StringBuiltinsAssembler) { - Node* receiver = Parameter(Descriptor::kReceiver); - Node* position = Parameter(Descriptor::kPosition); +TF_BUILTIN(StringFromCodePointAt, StringBuiltinsAssembler) { + TNode receiver = CAST(Parameter(Descriptor::kReceiver)); + TNode position = + UncheckedCast(Parameter(Descriptor::kPosition)); // TODO(sigurds) Figure out if passing length as argument pays off. TNode length = LoadStringLengthAsWord(receiver); // Load the character code at the {position} from the {receiver}. TNode code = - LoadSurrogatePairAt(receiver, length, position, UnicodeEncoding::UTF32); - // And return it as TaggedSigned value. - // TODO(turbofan): Allow builtins to return values untagged. - TNode result = SmiFromInt32(code); + LoadSurrogatePairAt(receiver, length, position, UnicodeEncoding::UTF16); + // Create a String from the UTF16 encoded code point + TNode result = StringFromSingleUTF16EncodedCodePoint(code); Return(result); } @@ -952,19 +953,6 @@ void StringIncludesIndexOfAssembler::Generate(SearchVariant variant, } } -void StringBuiltinsAssembler::RequireObjectCoercible(Node* const context, - Node* const value, - const char* method_name) { - Label out(this), throw_exception(this, Label::kDeferred); - Branch(IsNullOrUndefined(value), &throw_exception, &out); - - BIND(&throw_exception); - ThrowTypeError(context, MessageTemplate::kCalledOnNullOrUndefined, - method_name); - - BIND(&out); -} - void StringBuiltinsAssembler::MaybeCallFunctionAtSymbol( Node* const context, Node* const object, Node* const maybe_string, Handle symbol, DescriptorIndexAndName symbol_index, @@ -1072,10 +1060,10 @@ compiler::Node* StringBuiltinsAssembler::GetSubstitution( TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) { Label out(this); - Node* const receiver = Parameter(Descriptor::kReceiver); + TNode receiver = CAST(Parameter(Descriptor::kReceiver)); Node* const search = Parameter(Descriptor::kSearch); Node* const replace = Parameter(Descriptor::kReplace); - Node* const context = Parameter(Descriptor::kContext); + TNode context = CAST(Parameter(Descriptor::kContext)); TNode const smi_zero = SmiConstant(0); @@ -1578,7 +1566,7 @@ TF_BUILTIN(StringPrototypeSplit, StringBuiltinsAssembler) { ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount)); CodeStubArguments args(this, argc); - Node* const receiver = args.GetReceiver(); + TNode receiver = args.GetReceiver(); Node* const separator = args.GetOptionalArgumentValue(kSeparatorArg); Node* const limit = args.GetOptionalArgumentValue(kLimitArg); TNode context = CAST(Parameter(Descriptor::kContext)); @@ -1986,12 +1974,12 @@ TNode StringBuiltinsAssembler::LoadSurrogatePairAt( switch (encoding) { case UnicodeEncoding::UTF16: - var_result = Signed(Word32Or( + var_result = Word32Or( // Need to swap the order for big-endian platforms #if V8_TARGET_BIG_ENDIAN - Word32Shl(lead, Int32Constant(16)), trail)); + Word32Shl(lead, Int32Constant(16)), trail); #else - Word32Shl(trail, Int32Constant(16)), lead)); + Word32Shl(trail, Int32Constant(16)), lead); #endif break; @@ -2002,8 +1990,8 @@ TNode StringBuiltinsAssembler::LoadSurrogatePairAt( Int32Constant(0x10000 - (0xD800 << 10) - 0xDC00); // (lead << 10) + trail + SURROGATE_OFFSET - var_result = Signed(Int32Add(Word32Shl(lead, Int32Constant(10)), - Int32Add(trail, surrogate_offset))); + var_result = Int32Add(Word32Shl(lead, Int32Constant(10)), + Int32Add(trail, surrogate_offset)); break; } } diff --git a/deps/v8/src/builtins/builtins-string-gen.h b/deps/v8/src/builtins/builtins-string-gen.h index 92ebd3803b8bd5..679ce0e17fe8ee 100644 --- a/deps/v8/src/builtins/builtins-string-gen.h +++ b/deps/v8/src/builtins/builtins-string-gen.h @@ -76,9 +76,6 @@ class StringBuiltinsAssembler : public CodeStubAssembler { TNode subject_length, TNode limit_number); - void RequireObjectCoercible(Node* const context, Node* const value, - const char* method_name); - TNode SmiIsNegative(TNode value) { return SmiLessThan(value, SmiConstant(0)); } diff --git a/deps/v8/src/builtins/builtins-symbol-gen.cc b/deps/v8/src/builtins/builtins-symbol-gen.cc index 4e8c9f98502a4c..610a8baeb314f2 100644 --- a/deps/v8/src/builtins/builtins-symbol-gen.cc +++ b/deps/v8/src/builtins/builtins-symbol-gen.cc @@ -13,8 +13,8 @@ namespace internal { // ES #sec-symbol-objects // ES #sec-symbol.prototype.description TF_BUILTIN(SymbolPrototypeDescriptionGetter, CodeStubAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode receiver = CAST(Parameter(Descriptor::kReceiver)); Node* value = ToThisValue(context, receiver, PrimitiveType::kSymbol, "Symbol.prototype.description"); @@ -24,8 +24,8 @@ TF_BUILTIN(SymbolPrototypeDescriptionGetter, CodeStubAssembler) { // ES6 #sec-symbol.prototype-@@toprimitive TF_BUILTIN(SymbolPrototypeToPrimitive, CodeStubAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode receiver = CAST(Parameter(Descriptor::kReceiver)); Node* result = ToThisValue(context, receiver, PrimitiveType::kSymbol, "Symbol.prototype [ @@toPrimitive ]"); @@ -34,8 +34,8 @@ TF_BUILTIN(SymbolPrototypeToPrimitive, CodeStubAssembler) { // ES6 #sec-symbol.prototype.tostring TF_BUILTIN(SymbolPrototypeToString, CodeStubAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode receiver = CAST(Parameter(Descriptor::kReceiver)); Node* value = ToThisValue(context, receiver, PrimitiveType::kSymbol, "Symbol.prototype.toString"); @@ -45,8 +45,8 @@ TF_BUILTIN(SymbolPrototypeToString, CodeStubAssembler) { // ES6 #sec-symbol.prototype.valueof TF_BUILTIN(SymbolPrototypeValueOf, CodeStubAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode receiver = CAST(Parameter(Descriptor::kReceiver)); Node* result = ToThisValue(context, receiver, PrimitiveType::kSymbol, "Symbol.prototype.valueOf"); diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.cc b/deps/v8/src/builtins/builtins-typed-array-gen.cc index 8484685a6a5912..857d33988f32f5 100644 --- a/deps/v8/src/builtins/builtins-typed-array-gen.cc +++ b/deps/v8/src/builtins/builtins-typed-array-gen.cc @@ -18,32 +18,12 @@ using compiler::Node; template using TNode = compiler::TNode; -// This is needed for gc_mole which will compile this file without the full set -// of GN defined macros. -#ifndef V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP -#define V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP 64 -#endif - // ----------------------------------------------------------------------------- // ES6 section 22.2 TypedArray Objects -// Setup the TypedArray which is under construction. -// - Set the length. -// - Set the byte_offset. -// - Set the byte_length. -// - Set EmbedderFields to 0. -void TypedArrayBuiltinsAssembler::SetupTypedArray(TNode holder, - TNode length, - TNode byte_offset, - TNode byte_length) { - StoreObjectFieldNoWriteBarrier(holder, JSTypedArray::kLengthOffset, length, - MachineType::PointerRepresentation()); - StoreObjectFieldNoWriteBarrier(holder, JSArrayBufferView::kByteOffsetOffset, - byte_offset, - MachineType::PointerRepresentation()); - StoreObjectFieldNoWriteBarrier(holder, JSArrayBufferView::kByteLengthOffset, - byte_length, - MachineType::PointerRepresentation()); +// Sets the embedder fields to 0 for a TypedArray which is under construction. +void TypedArrayBuiltinsAssembler::SetupTypedArrayEmbedderFields( + TNode holder) { for (int offset = JSTypedArray::kHeaderSize; offset < JSTypedArray::kSizeWithEmbedderFields; offset += kTaggedSize) { StoreObjectField(holder, offset, SmiConstant(0)); @@ -54,8 +34,7 @@ void TypedArrayBuiltinsAssembler::SetupTypedArray(TNode holder, // elements. // TODO(bmeurer,v8:4153): Rename this and maybe fix up the implementation a bit. TNode TypedArrayBuiltinsAssembler::AllocateEmptyOnHeapBuffer( - TNode context, TNode holder, - TNode byte_length) { + TNode context, TNode byte_length) { TNode native_context = LoadNativeContext(context); TNode map = CAST(LoadContextElement(native_context, Context::ARRAY_BUFFER_MAP_INDEX)); @@ -97,16 +76,6 @@ TNode TypedArrayBuiltinsAssembler::AllocateEmptyOnHeapBuffer( offset < JSArrayBuffer::kSizeWithEmbedderFields; offset += kTaggedSize) { StoreObjectFieldNoWriteBarrier(buffer, offset, SmiConstant(0)); } - - StoreObjectField(holder, JSTypedArray::kBufferOffset, buffer); - - TNode elements = AllocateByteArray(byte_length); - StoreObjectField(holder, JSTypedArray::kElementsOffset, elements); - StoreObjectField(holder, JSTypedArray::kBasePointerOffset, elements); - StoreObjectFieldNoWriteBarrier( - holder, JSTypedArray::kExternalPointerOffset, - PointerConstant(JSTypedArray::ExternalPointerForOnHeapArray()), - MachineType::PointerRepresentation()); return buffer; } @@ -200,13 +169,13 @@ TF_BUILTIN(TypedArrayPrototypeLength, TypedArrayBuiltinsAssembler) { Return(ChangeUintPtrToTagged(length)); } -TNode TypedArrayBuiltinsAssembler::IsUint8ElementsKind( +TNode TypedArrayBuiltinsAssembler::IsUint8ElementsKind( TNode kind) { return Word32Or(Word32Equal(kind, Int32Constant(UINT8_ELEMENTS)), Word32Equal(kind, Int32Constant(UINT8_CLAMPED_ELEMENTS))); } -TNode TypedArrayBuiltinsAssembler::IsBigInt64ElementsKind( +TNode TypedArrayBuiltinsAssembler::IsBigInt64ElementsKind( TNode kind) { return Word32Or(Word32Equal(kind, Int32Constant(BIGINT64_ELEMENTS)), Word32Equal(kind, Int32Constant(BIGUINT64_ELEMENTS))); @@ -228,7 +197,12 @@ TNode TypedArrayBuiltinsAssembler::GetTypedArrayElementSize( TorqueStructTypedArrayElementsInfo TypedArrayBuiltinsAssembler::GetTypedArrayElementsInfo( TNode typed_array) { - TNode elements_kind = LoadElementsKind(typed_array); + return GetTypedArrayElementsInfo(LoadMap(typed_array)); +} + +TorqueStructTypedArrayElementsInfo +TypedArrayBuiltinsAssembler::GetTypedArrayElementsInfo(TNode map) { + TNode elements_kind = LoadMapElementsKind(map); TVARIABLE(UintPtrT, var_size_log2); TVARIABLE(Map, var_map); ReadOnlyRoots roots(isolate()); @@ -294,10 +268,9 @@ TNode TypedArrayBuiltinsAssembler::GetBuffer( Label call_runtime(this), done(this); TVARIABLE(Object, var_result); - TNode buffer = LoadObjectField(array, JSTypedArray::kBufferOffset); + TNode buffer = LoadJSArrayBufferViewBuffer(array); GotoIf(IsDetachedBuffer(buffer), &call_runtime); - TNode backing_store = LoadObjectField( - CAST(buffer), JSArrayBuffer::kBackingStoreOffset); + TNode backing_store = LoadJSArrayBufferBackingStore(buffer); GotoIf(WordEqual(backing_store, IntPtrConstant(0)), &call_runtime); var_result = buffer; Goto(&done); @@ -327,10 +300,10 @@ void TypedArrayBuiltinsAssembler::SetTypedArraySource( TNode context, TNode source, TNode target, TNode offset, Label* call_runtime, Label* if_source_too_large) { - CSA_ASSERT(this, Word32BinaryNot(IsDetachedBuffer( - LoadObjectField(source, JSTypedArray::kBufferOffset)))); - CSA_ASSERT(this, Word32BinaryNot(IsDetachedBuffer( - LoadObjectField(target, JSTypedArray::kBufferOffset)))); + CSA_ASSERT(this, Word32BinaryNot( + IsDetachedBuffer(LoadJSArrayBufferViewBuffer(source)))); + CSA_ASSERT(this, Word32BinaryNot( + IsDetachedBuffer(LoadJSArrayBufferViewBuffer(target)))); CSA_ASSERT(this, IntPtrGreaterThanOrEqual(offset, IntPtrConstant(0))); CSA_ASSERT(this, IntPtrLessThanOrEqual(offset, IntPtrConstant(Smi::kMaxValue))); @@ -774,8 +747,8 @@ TF_BUILTIN(TypedArrayOf, TypedArrayBuiltinsAssembler) { // ToNumber/ToBigInt may execute JavaScript code, which could // detach the array's buffer. - Node* buffer = - LoadObjectField(new_typed_array, JSTypedArray::kBufferOffset); + TNode buffer = + LoadJSArrayBufferViewBuffer(new_typed_array); GotoIf(IsDetachedBuffer(buffer), &if_detached); // GC may move backing store in ToNumber, thus load backing @@ -997,8 +970,8 @@ TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) { // ToNumber/ToBigInt may execute JavaScript code, which could // detach the array's buffer. - Node* buffer = LoadObjectField(target_obj.value(), - JSTypedArray::kBufferOffset); + TNode buffer = + LoadJSArrayBufferViewBuffer(target_obj.value()); GotoIf(IsDetachedBuffer(buffer), &if_detached); // GC may move backing store in map_fn, thus load backing @@ -1027,7 +1000,5 @@ TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) { "%TypedArray%.from"); } -#undef V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP - } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.h b/deps/v8/src/builtins/builtins-typed-array-gen.h index 6fb02a657c5431..d637bc9c6b6c9b 100644 --- a/deps/v8/src/builtins/builtins-typed-array-gen.h +++ b/deps/v8/src/builtins/builtins-typed-array-gen.h @@ -27,15 +27,12 @@ class TypedArrayBuiltinsAssembler : public CodeStubAssembler { const char* method_name, IterationKind iteration_kind); - void SetupTypedArray(TNode holder, TNode length, - TNode byte_offset, - TNode byte_length); + void SetupTypedArrayEmbedderFields(TNode holder); void AttachBuffer(TNode holder, TNode buffer, TNode map, TNode length, TNode byte_offset); TNode AllocateEmptyOnHeapBuffer(TNode context, - TNode holder, TNode byte_length); TNode LoadMapForType(TNode array); @@ -44,16 +41,17 @@ class TypedArrayBuiltinsAssembler : public CodeStubAssembler { TNode byte_offset); // Returns true if kind is either UINT8_ELEMENTS or UINT8_CLAMPED_ELEMENTS. - TNode IsUint8ElementsKind(TNode kind); + TNode IsUint8ElementsKind(TNode kind); // Returns true if kind is either BIGINT64_ELEMENTS or BIGUINT64_ELEMENTS. - TNode IsBigInt64ElementsKind(TNode kind); + TNode IsBigInt64ElementsKind(TNode kind); // Returns the byte size of an element for a TypedArray elements kind. TNode GetTypedArrayElementSize(TNode elements_kind); // Returns information (byte size and map) about a TypedArray's elements. ElementsInfo GetTypedArrayElementsInfo(TNode typed_array); + ElementsInfo GetTypedArrayElementsInfo(TNode map); TNode GetDefaultConstructor(TNode context, TNode exemplar); diff --git a/deps/v8/src/builtins/builtins-weak-refs.cc b/deps/v8/src/builtins/builtins-weak-refs.cc index 78f37c0cf5dfbd..18738d2c487703 100644 --- a/deps/v8/src/builtins/builtins-weak-refs.cc +++ b/deps/v8/src/builtins/builtins-weak-refs.cc @@ -48,14 +48,24 @@ BUILTIN(FinalizationGroupRegister) { HandleScope scope(isolate); const char* method_name = "FinalizationGroup.prototype.register"; + // 1. Let finalizationGroup be the this value. + // + // 2. If Type(finalizationGroup) is not Object, throw a TypeError + // exception. + // + // 4. If finalizationGroup does not have a [[Cells]] internal slot, + // throw a TypeError exception. CHECK_RECEIVER(JSFinalizationGroup, finalization_group, method_name); Handle target = args.atOrUndefined(isolate, 1); + + // 3. If Type(target) is not Object, throw a TypeError exception. if (!target->IsJSReceiver()) { THROW_NEW_ERROR_RETURN_FAILURE( isolate, NewTypeError(MessageTemplate::kWeakRefsRegisterTargetMustBeObject)); } + Handle holdings = args.atOrUndefined(isolate, 2); if (target->SameValue(*holdings)) { THROW_NEW_ERROR_RETURN_FAILURE( @@ -64,15 +74,21 @@ BUILTIN(FinalizationGroupRegister) { MessageTemplate::kWeakRefsRegisterTargetAndHoldingsMustNotBeSame)); } - Handle key = args.atOrUndefined(isolate, 3); - // TODO(marja, gsathya): Restrictions on "key" (e.g., does it need to be an - // object). + Handle unregister_token = args.atOrUndefined(isolate, 3); + // 5. If Type(unregisterToken) is not Object, + // a. If unregisterToken is not undefined, throw a TypeError exception. + if (!unregister_token->IsJSReceiver() && !unregister_token->IsUndefined()) { + THROW_NEW_ERROR_RETURN_FAILURE( + isolate, + NewTypeError(MessageTemplate::kWeakRefsUnregisterTokenMustBeObject, + unregister_token)); + } // TODO(marja): Realms. JSFinalizationGroup::Register(finalization_group, - Handle::cast(target), holdings, key, - isolate); + Handle::cast(target), holdings, + unregister_token, isolate); return ReadOnlyRoots(isolate).undefined_value(); } @@ -80,25 +96,63 @@ BUILTIN(FinalizationGroupUnregister) { HandleScope scope(isolate); const char* method_name = "FinalizationGroup.prototype.unregister"; + // 1. Let finalizationGroup be the this value. + // + // 2. If Type(finalizationGroup) is not Object, throw a TypeError + // exception. + // + // 3. If finalizationGroup does not have a [[Cells]] internal slot, + // throw a TypeError exception. CHECK_RECEIVER(JSFinalizationGroup, finalization_group, method_name); - Handle key = args.atOrUndefined(isolate, 1); - JSFinalizationGroup::Unregister(finalization_group, key, isolate); - return ReadOnlyRoots(isolate).undefined_value(); + Handle unregister_token = args.atOrUndefined(isolate, 1); + + // 4. If Type(unregisterToken) is not Object, throw a TypeError exception. + if (!unregister_token->IsJSReceiver()) { + THROW_NEW_ERROR_RETURN_FAILURE( + isolate, + NewTypeError(MessageTemplate::kWeakRefsUnregisterTokenMustBeObject, + unregister_token)); + } + + bool success = JSFinalizationGroup::Unregister( + finalization_group, Handle::cast(unregister_token), isolate); + + return *isolate->factory()->ToBoolean(success); } BUILTIN(FinalizationGroupCleanupSome) { HandleScope scope(isolate); const char* method_name = "FinalizationGroup.prototype.cleanupSome"; + // 1. Let finalizationGroup be the this value. + // + // 2. If Type(finalizationGroup) is not Object, throw a TypeError + // exception. + // + // 3. If finalizationGroup does not have a [[Cells]] internal slot, + // throw a TypeError exception. CHECK_RECEIVER(JSFinalizationGroup, finalization_group, method_name); - // TODO(marja, gsathya): Add missing "cleanup" callback. + Handle callback(finalization_group->cleanup(), isolate); + Handle callback_obj = args.atOrUndefined(isolate, 1); + + // 4. If callback is not undefined and IsCallable(callback) is + // false, throw a TypeError exception. + if (!callback_obj->IsUndefined(isolate)) { + if (!callback_obj->IsCallable()) { + THROW_NEW_ERROR_RETURN_FAILURE( + isolate, + NewTypeError(MessageTemplate::kWeakRefsCleanupMustBeCallable)); + } + callback = callback_obj; + } // Don't do set_scheduled_for_cleanup(false); we still have the microtask // scheduled and don't want to schedule another one in case the user never // executes microtasks. - JSFinalizationGroup::Cleanup(finalization_group, isolate); + JSFinalizationGroup::Cleanup(isolate, finalization_group, callback); + return ReadOnlyRoots(isolate).undefined_value(); } @@ -138,7 +192,7 @@ BUILTIN(WeakRefConstructor) { } Handle target_receiver = handle(JSReceiver::cast(*target_object), isolate); - isolate->heap()->AddKeepDuringJobTarget(target_receiver); + isolate->heap()->KeepDuringJob(target_receiver); // TODO(marja): Realms. @@ -158,9 +212,9 @@ BUILTIN(WeakRefDeref) { if (weak_ref->target().IsJSReceiver()) { Handle target = handle(JSReceiver::cast(weak_ref->target()), isolate); - // AddKeepDuringJobTarget might allocate and cause a GC, but it won't clear + // KeepDuringJob might allocate and cause a GC, but it won't clear // weak_ref since we hold a Handle to its target. - isolate->heap()->AddKeepDuringJobTarget(target); + isolate->heap()->KeepDuringJob(target); } else { DCHECK(weak_ref->target().IsUndefined(isolate)); } diff --git a/deps/v8/src/builtins/collections.tq b/deps/v8/src/builtins/collections.tq index eb95a7702368e3..b83906d109b2da 100644 --- a/deps/v8/src/builtins/collections.tq +++ b/deps/v8/src/builtins/collections.tq @@ -33,7 +33,7 @@ namespace collections { } } } - case (receiver: JSReceiver): { + case (JSReceiver): { goto MayHaveSideEffects; } case (o: Object): deferred { diff --git a/deps/v8/src/builtins/data-view.tq b/deps/v8/src/builtins/data-view.tq index 842e9527ee5903..62a0cc31c3c34a 100644 --- a/deps/v8/src/builtins/data-view.tq +++ b/deps/v8/src/builtins/data-view.tq @@ -74,16 +74,17 @@ namespace data_view { // ES6 section 24.2.4.1 get DataView.prototype.buffer javascript builtin DataViewPrototypeGetBuffer( - context: Context, receiver: Object, ...arguments): JSArrayBuffer { - let dataView: JSDataView = + js-implicit context: Context, + receiver: Object)(...arguments): JSArrayBuffer { + const dataView: JSDataView = ValidateDataView(context, receiver, 'get DataView.prototype.buffer'); return dataView.buffer; } // ES6 section 24.2.4.2 get DataView.prototype.byteLength javascript builtin DataViewPrototypeGetByteLength( - context: Context, receiver: Object, ...arguments): Number { - let dataView: JSDataView = ValidateDataView( + js-implicit context: Context, receiver: Object)(...arguments): Number { + const dataView: JSDataView = ValidateDataView( context, receiver, 'get DataView.prototype.byte_length'); if (WasNeutered(dataView)) { // TODO(bmeurer): According to the ES6 spec, we should throw a TypeError @@ -95,8 +96,8 @@ namespace data_view { // ES6 section 24.2.4.3 get DataView.prototype.byteOffset javascript builtin DataViewPrototypeGetByteOffset( - context: Context, receiver: Object, ...arguments): Number { - let dataView: JSDataView = ValidateDataView( + js-implicit context: Context, receiver: Object)(...arguments): Number { + const dataView: JSDataView = ValidateDataView( context, receiver, 'get DataView.prototype.byte_offset'); if (WasNeutered(dataView)) { // TODO(bmeurer): According to the ES6 spec, we should throw a TypeError @@ -128,7 +129,7 @@ namespace data_view { macro LoadDataView16( buffer: JSArrayBuffer, offset: uintptr, requestedLittleEndian: bool, signed: constexpr bool): Number { - let dataPointer: RawPtr = buffer.backing_store; + const dataPointer: RawPtr = buffer.backing_store; let b0: int32; let b1: int32; @@ -155,12 +156,12 @@ namespace data_view { macro LoadDataView32( buffer: JSArrayBuffer, offset: uintptr, requestedLittleEndian: bool, kind: constexpr ElementsKind): Number { - let dataPointer: RawPtr = buffer.backing_store; + const dataPointer: RawPtr = buffer.backing_store; - let b0: uint32 = LoadUint8(dataPointer, offset); - let b1: uint32 = LoadUint8(dataPointer, offset + 1); - let b2: uint32 = LoadUint8(dataPointer, offset + 2); - let b3: uint32 = LoadUint8(dataPointer, offset + 3); + const b0: uint32 = LoadUint8(dataPointer, offset); + const b1: uint32 = LoadUint8(dataPointer, offset + 1); + const b2: uint32 = LoadUint8(dataPointer, offset + 2); + const b3: uint32 = LoadUint8(dataPointer, offset + 3); let result: uint32; if (requestedLittleEndian) { @@ -174,7 +175,7 @@ namespace data_view { } else if constexpr (kind == UINT32_ELEMENTS) { return Convert(result); } else if constexpr (kind == FLOAT32_ELEMENTS) { - let floatRes: float64 = Convert(BitcastInt32ToFloat32(result)); + const floatRes: float64 = Convert(BitcastInt32ToFloat32(result)); return Convert(floatRes); } else { unreachable; @@ -184,16 +185,16 @@ namespace data_view { macro LoadDataViewFloat64( buffer: JSArrayBuffer, offset: uintptr, requestedLittleEndian: bool): Number { - let dataPointer: RawPtr = buffer.backing_store; - - let b0: uint32 = LoadUint8(dataPointer, offset); - let b1: uint32 = LoadUint8(dataPointer, offset + 1); - let b2: uint32 = LoadUint8(dataPointer, offset + 2); - let b3: uint32 = LoadUint8(dataPointer, offset + 3); - let b4: uint32 = LoadUint8(dataPointer, offset + 4); - let b5: uint32 = LoadUint8(dataPointer, offset + 5); - let b6: uint32 = LoadUint8(dataPointer, offset + 6); - let b7: uint32 = LoadUint8(dataPointer, offset + 7); + const dataPointer: RawPtr = buffer.backing_store; + + const b0: uint32 = LoadUint8(dataPointer, offset); + const b1: uint32 = LoadUint8(dataPointer, offset + 1); + const b2: uint32 = LoadUint8(dataPointer, offset + 2); + const b3: uint32 = LoadUint8(dataPointer, offset + 3); + const b4: uint32 = LoadUint8(dataPointer, offset + 4); + const b5: uint32 = LoadUint8(dataPointer, offset + 5); + const b6: uint32 = LoadUint8(dataPointer, offset + 6); + const b7: uint32 = LoadUint8(dataPointer, offset + 7); let lowWord: uint32; let highWord: uint32; @@ -212,74 +213,49 @@ namespace data_view { return Convert(result); } - extern macro AllocateBigInt(intptr): BigInt; - extern macro StoreBigIntBitfield(BigInt, uint32): void; - extern macro StoreBigIntDigit(BigInt, constexpr int31, uintptr): void; - extern macro DataViewBuiltinsAssembler::DataViewEncodeBigIntBits( - constexpr bool, constexpr int31): uint32; - - const kPositiveBigInt: constexpr bool = false; - const kNegativeBigInt: constexpr bool = true; const kZeroDigitBigInt: constexpr int31 = 0; const kOneDigitBigInt: constexpr int31 = 1; const kTwoDigitBigInt: constexpr int31 = 2; - macro CreateEmptyBigInt(isPositive: bool, length: constexpr int31): BigInt { - // Allocate a BigInt with the desired length (number of digits). - let result: BigInt = AllocateBigInt(length); - - // Write the desired sign and length to the BigInt bitfield. - if (isPositive) { - StoreBigIntBitfield( - result, DataViewEncodeBigIntBits(kPositiveBigInt, length)); - } else { - StoreBigIntBitfield( - result, DataViewEncodeBigIntBits(kNegativeBigInt, length)); - } - - return result; - } - // Create a BigInt on a 64-bit architecture from two 32-bit values. - macro MakeBigIntOn64Bit( + macro MakeBigIntOn64Bit(implicit context: Context)( lowWord: uint32, highWord: uint32, signed: constexpr bool): BigInt { // 0n is represented by a zero-length BigInt. if (lowWord == 0 && highWord == 0) { - return AllocateBigInt(kZeroDigitBigInt); + return Convert(bigint::AllocateBigInt(kZeroDigitBigInt)); } - let isPositive: bool = true; - let highPart: intptr = Signed(Convert(highWord)); - let lowPart: intptr = Signed(Convert(lowWord)); + let sign: uint32 = bigint::kPositiveSign; + const highPart: intptr = Signed(Convert(highWord)); + const lowPart: intptr = Signed(Convert(lowWord)); let rawValue: intptr = (highPart << 32) + lowPart; if constexpr (signed) { if (rawValue < 0) { - isPositive = false; + sign = bigint::kNegativeSign; // We have to store the absolute value of rawValue in the digit. rawValue = 0 - rawValue; } } // Allocate the BigInt and store the absolute value. - let result: BigInt = CreateEmptyBigInt(isPositive, kOneDigitBigInt); - - StoreBigIntDigit(result, 0, Unsigned(rawValue)); - - return result; + const result: MutableBigInt = + bigint::AllocateEmptyBigInt(sign, kOneDigitBigInt); + bigint::StoreBigIntDigit(result, 0, Unsigned(rawValue)); + return Convert(result); } // Create a BigInt on a 32-bit architecture from two 32-bit values. - macro MakeBigIntOn32Bit( + macro MakeBigIntOn32Bit(implicit context: Context)( lowWord: uint32, highWord: uint32, signed: constexpr bool): BigInt { // 0n is represented by a zero-length BigInt. if (lowWord == 0 && highWord == 0) { - return AllocateBigInt(kZeroDigitBigInt); + return Convert(bigint::AllocateBigInt(kZeroDigitBigInt)); } // On a 32-bit platform, we might need 1 or 2 digits to store the number. let needTwoDigits: bool = false; - let isPositive: bool = true; + let sign: uint32 = bigint::kPositiveSign; // We need to do some math on lowWord and highWord, // so Convert them to int32. @@ -293,7 +269,7 @@ namespace data_view { if constexpr (signed) { // If highPart < 0, the number is always negative. if (highPart < 0) { - isPositive = false; + sign = bigint::kNegativeSign; // We have to compute the absolute value by hand. // There will be a negative carry from the low word @@ -322,25 +298,23 @@ namespace data_view { } // Allocate the BigInt with the right sign and length. - let result: BigInt; + let result: MutableBigInt; if (needTwoDigits) { - result = CreateEmptyBigInt(isPositive, kTwoDigitBigInt); + result = bigint::AllocateEmptyBigInt(sign, kTwoDigitBigInt); } else { - result = CreateEmptyBigInt(isPositive, kOneDigitBigInt); + result = bigint::AllocateEmptyBigInt(sign, kOneDigitBigInt); } // Finally, write the digit(s) to the BigInt. - StoreBigIntDigit(result, 0, Unsigned(Convert(lowPart))); - + bigint::StoreBigIntDigit(result, 0, Unsigned(Convert(lowPart))); if (needTwoDigits) { - StoreBigIntDigit(result, 1, Unsigned(Convert(highPart))); + bigint::StoreBigIntDigit(result, 1, Unsigned(Convert(highPart))); } - - return result; + return Convert(result); } - macro MakeBigInt(lowWord: uint32, highWord: uint32, signed: constexpr bool): - BigInt { + macro MakeBigInt(implicit context: Context)( + lowWord: uint32, highWord: uint32, signed: constexpr bool): BigInt { // A BigInt digit has the platform word size, so we only need one digit // on 64-bit platforms but may need two on 32-bit. if constexpr (Is64()) { @@ -350,19 +324,19 @@ namespace data_view { } } - macro LoadDataViewBigInt( + macro LoadDataViewBigInt(implicit context: Context)( buffer: JSArrayBuffer, offset: uintptr, requestedLittleEndian: bool, signed: constexpr bool): BigInt { - let dataPointer: RawPtr = buffer.backing_store; - - let b0: uint32 = LoadUint8(dataPointer, offset); - let b1: uint32 = LoadUint8(dataPointer, offset + 1); - let b2: uint32 = LoadUint8(dataPointer, offset + 2); - let b3: uint32 = LoadUint8(dataPointer, offset + 3); - let b4: uint32 = LoadUint8(dataPointer, offset + 4); - let b5: uint32 = LoadUint8(dataPointer, offset + 5); - let b6: uint32 = LoadUint8(dataPointer, offset + 6); - let b7: uint32 = LoadUint8(dataPointer, offset + 7); + const dataPointer: RawPtr = buffer.backing_store; + + const b0: uint32 = LoadUint8(dataPointer, offset); + const b1: uint32 = LoadUint8(dataPointer, offset + 1); + const b2: uint32 = LoadUint8(dataPointer, offset + 2); + const b3: uint32 = LoadUint8(dataPointer, offset + 3); + const b4: uint32 = LoadUint8(dataPointer, offset + 4); + const b5: uint32 = LoadUint8(dataPointer, offset + 5); + const b6: uint32 = LoadUint8(dataPointer, offset + 6); + const b7: uint32 = LoadUint8(dataPointer, offset + 7); let lowWord: uint32; let highWord: uint32; @@ -385,7 +359,7 @@ namespace data_view { transitioning macro DataViewGet( context: Context, receiver: Object, offset: Object, requestedLittleEndian: Object, kind: constexpr ElementsKind): Numeric { - let dataView: JSDataView = + const dataView: JSDataView = ValidateDataView(context, receiver, MakeDataViewGetterNameString(kind)); let getIndex: Number; @@ -396,25 +370,25 @@ namespace data_view { ThrowRangeError(kInvalidDataViewAccessorOffset); } - let littleEndian: bool = ToBoolean(requestedLittleEndian); - let buffer: JSArrayBuffer = dataView.buffer; + const littleEndian: bool = ToBoolean(requestedLittleEndian); + const buffer: JSArrayBuffer = dataView.buffer; if (IsDetachedBuffer(buffer)) { ThrowTypeError(kDetachedOperation, MakeDataViewGetterNameString(kind)); } - let getIndexFloat: float64 = Convert(getIndex); - let getIndexWord: uintptr = Convert(getIndexFloat); + const getIndexFloat: float64 = Convert(getIndex); + const getIndexWord: uintptr = Convert(getIndexFloat); - let viewOffsetWord: uintptr = dataView.byte_offset; - let viewSizeFloat: float64 = Convert(dataView.byte_length); - let elementSizeFloat: float64 = DataViewElementSize(kind); + const viewOffsetWord: uintptr = dataView.byte_offset; + const viewSizeFloat: float64 = Convert(dataView.byte_length); + const elementSizeFloat: float64 = DataViewElementSize(kind); if (getIndexFloat + elementSizeFloat > viewSizeFloat) { ThrowRangeError(kInvalidDataViewAccessorOffset); } - let bufferIndex: uintptr = getIndexWord + viewOffsetWord; + const bufferIndex: uintptr = getIndexWord + viewOffsetWord; if constexpr (kind == UINT8_ELEMENTS) { return LoadDataView8(buffer, bufferIndex, false); @@ -442,84 +416,84 @@ namespace data_view { } transitioning javascript builtin DataViewPrototypeGetUint8( - context: Context, receiver: Object, ...arguments): Object { - let offset: Object = arguments.length > 0 ? arguments[0] : Undefined; + js-implicit context: Context, receiver: Object)(...arguments): Object { + const offset: Object = arguments.length > 0 ? arguments[0] : Undefined; return DataViewGet(context, receiver, offset, Undefined, UINT8_ELEMENTS); } transitioning javascript builtin DataViewPrototypeGetInt8( - context: Context, receiver: Object, ...arguments): Object { - let offset: Object = arguments.length > 0 ? arguments[0] : Undefined; + js-implicit context: Context, receiver: Object)(...arguments): Object { + const offset: Object = arguments.length > 0 ? arguments[0] : Undefined; return DataViewGet(context, receiver, offset, Undefined, INT8_ELEMENTS); } transitioning javascript builtin DataViewPrototypeGetUint16( - context: Context, receiver: Object, ...arguments): Object { - let offset: Object = arguments.length > 0 ? arguments[0] : Undefined; - let isLittleEndian: Object = + js-implicit context: Context, receiver: Object)(...arguments): Object { + const offset: Object = arguments.length > 0 ? arguments[0] : Undefined; + const isLittleEndian: Object = arguments.length > 1 ? arguments[1] : Undefined; return DataViewGet( context, receiver, offset, isLittleEndian, UINT16_ELEMENTS); } transitioning javascript builtin DataViewPrototypeGetInt16( - context: Context, receiver: Object, ...arguments): Object { - let offset: Object = arguments.length > 0 ? arguments[0] : Undefined; - let isLittleEndian: Object = + js-implicit context: Context, receiver: Object)(...arguments): Object { + const offset: Object = arguments.length > 0 ? arguments[0] : Undefined; + const isLittleEndian: Object = arguments.length > 1 ? arguments[1] : Undefined; return DataViewGet( context, receiver, offset, isLittleEndian, INT16_ELEMENTS); } transitioning javascript builtin DataViewPrototypeGetUint32( - context: Context, receiver: Object, ...arguments): Object { - let offset: Object = arguments.length > 0 ? arguments[0] : Undefined; - let isLittleEndian: Object = + js-implicit context: Context, receiver: Object)(...arguments): Object { + const offset: Object = arguments.length > 0 ? arguments[0] : Undefined; + const isLittleEndian: Object = arguments.length > 1 ? arguments[1] : Undefined; return DataViewGet( context, receiver, offset, isLittleEndian, UINT32_ELEMENTS); } transitioning javascript builtin DataViewPrototypeGetInt32( - context: Context, receiver: Object, ...arguments): Object { - let offset: Object = arguments.length > 0 ? arguments[0] : Undefined; - let isLittleEndian: Object = + js-implicit context: Context, receiver: Object)(...arguments): Object { + const offset: Object = arguments.length > 0 ? arguments[0] : Undefined; + const isLittleEndian: Object = arguments.length > 1 ? arguments[1] : Undefined; return DataViewGet( context, receiver, offset, isLittleEndian, INT32_ELEMENTS); } transitioning javascript builtin DataViewPrototypeGetFloat32( - context: Context, receiver: Object, ...arguments): Object { - let offset: Object = arguments.length > 0 ? arguments[0] : Undefined; - let isLittleEndian: Object = + js-implicit context: Context, receiver: Object)(...arguments): Object { + const offset: Object = arguments.length > 0 ? arguments[0] : Undefined; + const isLittleEndian: Object = arguments.length > 1 ? arguments[1] : Undefined; return DataViewGet( context, receiver, offset, isLittleEndian, FLOAT32_ELEMENTS); } transitioning javascript builtin DataViewPrototypeGetFloat64( - context: Context, receiver: Object, ...arguments): Object { - let offset: Object = arguments.length > 0 ? arguments[0] : Undefined; - let isLittleEndian: Object = + js-implicit context: Context, receiver: Object)(...arguments): Object { + const offset: Object = arguments.length > 0 ? arguments[0] : Undefined; + const isLittleEndian: Object = arguments.length > 1 ? arguments[1] : Undefined; return DataViewGet( context, receiver, offset, isLittleEndian, FLOAT64_ELEMENTS); } transitioning javascript builtin DataViewPrototypeGetBigUint64( - context: Context, receiver: Object, ...arguments): Object { - let offset: Object = arguments.length > 0 ? arguments[0] : Undefined; - let isLittleEndian: Object = + js-implicit context: Context, receiver: Object)(...arguments): Object { + const offset: Object = arguments.length > 0 ? arguments[0] : Undefined; + const isLittleEndian: Object = arguments.length > 1 ? arguments[1] : Undefined; return DataViewGet( context, receiver, offset, isLittleEndian, BIGUINT64_ELEMENTS); } transitioning javascript builtin DataViewPrototypeGetBigInt64( - context: Context, receiver: Object, ...arguments): Object { - let offset: Object = arguments.length > 0 ? arguments[0] : Undefined; - let isLittleEndian: Object = + js-implicit context: Context, receiver: Object)(...arguments): Object { + const offset: Object = arguments.length > 0 ? arguments[0] : Undefined; + const isLittleEndian: Object = arguments.length > 1 ? arguments[1] : Undefined; return DataViewGet( context, receiver, offset, isLittleEndian, BIGINT64_ELEMENTS); @@ -539,10 +513,10 @@ namespace data_view { macro StoreDataView16( buffer: JSArrayBuffer, offset: uintptr, value: uint32, requestedLittleEndian: bool) { - let dataPointer: RawPtr = buffer.backing_store; + const dataPointer: RawPtr = buffer.backing_store; - let b0: uint32 = value & 0xFF; - let b1: uint32 = (value >>> 8) & 0xFF; + const b0: uint32 = value & 0xFF; + const b1: uint32 = (value >>> 8) & 0xFF; if (requestedLittleEndian) { StoreWord8(dataPointer, offset, b0); @@ -556,12 +530,12 @@ namespace data_view { macro StoreDataView32( buffer: JSArrayBuffer, offset: uintptr, value: uint32, requestedLittleEndian: bool) { - let dataPointer: RawPtr = buffer.backing_store; + const dataPointer: RawPtr = buffer.backing_store; - let b0: uint32 = value & 0xFF; - let b1: uint32 = (value >>> 8) & 0xFF; - let b2: uint32 = (value >>> 16) & 0xFF; - let b3: uint32 = value >>> 24; // We don't need to mask here. + const b0: uint32 = value & 0xFF; + const b1: uint32 = (value >>> 8) & 0xFF; + const b2: uint32 = (value >>> 16) & 0xFF; + const b3: uint32 = value >>> 24; // We don't need to mask here. if (requestedLittleEndian) { StoreWord8(dataPointer, offset, b0); @@ -579,17 +553,17 @@ namespace data_view { macro StoreDataView64( buffer: JSArrayBuffer, offset: uintptr, lowWord: uint32, highWord: uint32, requestedLittleEndian: bool) { - let dataPointer: RawPtr = buffer.backing_store; + const dataPointer: RawPtr = buffer.backing_store; - let b0: uint32 = lowWord & 0xFF; - let b1: uint32 = (lowWord >>> 8) & 0xFF; - let b2: uint32 = (lowWord >>> 16) & 0xFF; - let b3: uint32 = lowWord >>> 24; + const b0: uint32 = lowWord & 0xFF; + const b1: uint32 = (lowWord >>> 8) & 0xFF; + const b2: uint32 = (lowWord >>> 16) & 0xFF; + const b3: uint32 = lowWord >>> 24; - let b4: uint32 = highWord & 0xFF; - let b5: uint32 = (highWord >>> 8) & 0xFF; - let b6: uint32 = (highWord >>> 16) & 0xFF; - let b7: uint32 = highWord >>> 24; + const b4: uint32 = highWord & 0xFF; + const b5: uint32 = (highWord >>> 8) & 0xFF; + const b6: uint32 = (highWord >>> 16) & 0xFF; + const b7: uint32 = highWord >>> 24; if (requestedLittleEndian) { StoreWord8(dataPointer, offset, b0); @@ -612,11 +586,10 @@ namespace data_view { } } - extern macro DataViewBuiltinsAssembler::DataViewDecodeBigIntLength(BigInt): - uint32; - extern macro DataViewBuiltinsAssembler::DataViewDecodeBigIntSign(BigInt): + extern macro DataViewBuiltinsAssembler::DataViewDecodeBigIntLength( + BigIntBase): uint32; + extern macro DataViewBuiltinsAssembler::DataViewDecodeBigIntSign(BigIntBase): uint32; - extern macro LoadBigIntDigit(BigInt, constexpr int31): uintptr; // We might get here a BigInt that is bigger than 64 bits, but we're only // interested in the 64 lowest ones. This means the lowest BigInt digit @@ -624,8 +597,8 @@ namespace data_view { macro StoreDataViewBigInt( buffer: JSArrayBuffer, offset: uintptr, bigIntValue: BigInt, requestedLittleEndian: bool) { - let length: uint32 = DataViewDecodeBigIntLength(bigIntValue); - let sign: uint32 = DataViewDecodeBigIntSign(bigIntValue); + const length: uint32 = DataViewDecodeBigIntLength(bigIntValue); + const sign: uint32 = DataViewDecodeBigIntSign(bigIntValue); // The 32-bit words that will hold the BigInt's value in // two's complement representation. @@ -636,13 +609,13 @@ namespace data_view { if (length != 0) { if constexpr (Is64()) { // There is always exactly 1 BigInt digit to load in this case. - let value: uintptr = LoadBigIntDigit(bigIntValue, 0); + const value: uintptr = bigint::LoadBigIntDigit(bigIntValue, 0); lowWord = Convert(value); // Truncates value to 32 bits. highWord = Convert(value >>> 32); } else { // There might be either 1 or 2 BigInt digits we need to load. - lowWord = Convert(LoadBigIntDigit(bigIntValue, 0)); + lowWord = Convert(bigint::LoadBigIntDigit(bigIntValue, 0)); if (length >= 2) { // Only load the second digit if there is one. - highWord = Convert(LoadBigIntDigit(bigIntValue, 1)); + highWord = Convert(bigint::LoadBigIntDigit(bigIntValue, 1)); } } } @@ -661,7 +634,7 @@ namespace data_view { transitioning macro DataViewSet( context: Context, receiver: Object, offset: Object, value: Object, requestedLittleEndian: Object, kind: constexpr ElementsKind): Object { - let dataView: JSDataView = + const dataView: JSDataView = ValidateDataView(context, receiver, MakeDataViewSetterNameString(kind)); let getIndex: Number; @@ -672,52 +645,52 @@ namespace data_view { ThrowRangeError(kInvalidDataViewAccessorOffset); } - let littleEndian: bool = ToBoolean(requestedLittleEndian); - let buffer: JSArrayBuffer = dataView.buffer; + const littleEndian: bool = ToBoolean(requestedLittleEndian); + const buffer: JSArrayBuffer = dataView.buffer; // According to ES6 section 24.2.1.2 SetViewValue, we must perform // the conversion before doing the bounds check. if constexpr (kind == BIGUINT64_ELEMENTS || kind == BIGINT64_ELEMENTS) { - let bigIntValue: BigInt = ToBigInt(context, value); + const bigIntValue: BigInt = ToBigInt(context, value); if (IsDetachedBuffer(buffer)) { ThrowTypeError(kDetachedOperation, MakeDataViewSetterNameString(kind)); } - let getIndexFloat: float64 = Convert(getIndex); - let getIndexWord: uintptr = Convert(getIndexFloat); + const getIndexFloat: float64 = Convert(getIndex); + const getIndexWord: uintptr = Convert(getIndexFloat); - let viewOffsetWord: uintptr = dataView.byte_offset; - let viewSizeFloat: float64 = Convert(dataView.byte_length); - let elementSizeFloat: float64 = DataViewElementSize(kind); + const viewOffsetWord: uintptr = dataView.byte_offset; + const viewSizeFloat: float64 = Convert(dataView.byte_length); + const elementSizeFloat: float64 = DataViewElementSize(kind); if (getIndexFloat + elementSizeFloat > viewSizeFloat) { ThrowRangeError(kInvalidDataViewAccessorOffset); } - let bufferIndex: uintptr = getIndexWord + viewOffsetWord; + const bufferIndex: uintptr = getIndexWord + viewOffsetWord; StoreDataViewBigInt(buffer, bufferIndex, bigIntValue, littleEndian); } else { - let numValue: Number = ToNumber(context, value); + const numValue: Number = ToNumber(context, value); if (IsDetachedBuffer(buffer)) { ThrowTypeError(kDetachedOperation, MakeDataViewSetterNameString(kind)); } - let getIndexFloat: float64 = Convert(getIndex); - let getIndexWord: uintptr = Convert(getIndexFloat); + const getIndexFloat: float64 = Convert(getIndex); + const getIndexWord: uintptr = Convert(getIndexFloat); - let viewOffsetWord: uintptr = dataView.byte_offset; - let viewSizeFloat: float64 = Convert(dataView.byte_length); - let elementSizeFloat: float64 = DataViewElementSize(kind); + const viewOffsetWord: uintptr = dataView.byte_offset; + const viewSizeFloat: float64 = Convert(dataView.byte_length); + const elementSizeFloat: float64 = DataViewElementSize(kind); if (getIndexFloat + elementSizeFloat > viewSizeFloat) { ThrowRangeError(kInvalidDataViewAccessorOffset); } - let bufferIndex: uintptr = getIndexWord + viewOffsetWord; + const bufferIndex: uintptr = getIndexWord + viewOffsetWord; - let doubleValue: float64 = ChangeNumberToFloat64(numValue); + const doubleValue: float64 = ChangeNumberToFloat64(numValue); if constexpr (kind == UINT8_ELEMENTS || kind == INT8_ELEMENTS) { StoreDataView8( @@ -731,13 +704,13 @@ namespace data_view { buffer, bufferIndex, TruncateFloat64ToWord32(doubleValue), littleEndian); } else if constexpr (kind == FLOAT32_ELEMENTS) { - let floatValue: float32 = TruncateFloat64ToFloat32(doubleValue); + const floatValue: float32 = TruncateFloat64ToFloat32(doubleValue); StoreDataView32( buffer, bufferIndex, BitcastFloat32ToInt32(floatValue), littleEndian); } else if constexpr (kind == FLOAT64_ELEMENTS) { - let lowWord: uint32 = Float64ExtractLowWord32(doubleValue); - let highWord: uint32 = Float64ExtractHighWord32(doubleValue); + const lowWord: uint32 = Float64ExtractLowWord32(doubleValue); + const highWord: uint32 = Float64ExtractHighWord32(doubleValue); StoreDataView64(buffer, bufferIndex, lowWord, highWord, littleEndian); } } @@ -745,96 +718,96 @@ namespace data_view { } transitioning javascript builtin DataViewPrototypeSetUint8( - context: Context, receiver: Object, ...arguments): Object { - let offset: Object = arguments.length > 0 ? arguments[0] : Undefined; - let value: Object = arguments.length > 1 ? arguments[1] : Undefined; + js-implicit context: Context, receiver: Object)(...arguments): Object { + const offset: Object = arguments.length > 0 ? arguments[0] : Undefined; + const value: Object = arguments.length > 1 ? arguments[1] : Undefined; return DataViewSet( context, receiver, offset, value, Undefined, UINT8_ELEMENTS); } transitioning javascript builtin DataViewPrototypeSetInt8( - context: Context, receiver: Object, ...arguments): Object { - let offset: Object = arguments.length > 0 ? arguments[0] : Undefined; - let value: Object = arguments.length > 1 ? arguments[1] : Undefined; + js-implicit context: Context, receiver: Object)(...arguments): Object { + const offset: Object = arguments.length > 0 ? arguments[0] : Undefined; + const value: Object = arguments.length > 1 ? arguments[1] : Undefined; return DataViewSet( context, receiver, offset, value, Undefined, INT8_ELEMENTS); } transitioning javascript builtin DataViewPrototypeSetUint16( - context: Context, receiver: Object, ...arguments): Object { - let offset: Object = arguments.length > 0 ? arguments[0] : Undefined; - let value: Object = arguments.length > 1 ? arguments[1] : Undefined; - let isLittleEndian: Object = + js-implicit context: Context, receiver: Object)(...arguments): Object { + const offset: Object = arguments.length > 0 ? arguments[0] : Undefined; + const value: Object = arguments.length > 1 ? arguments[1] : Undefined; + const isLittleEndian: Object = arguments.length > 2 ? arguments[2] : Undefined; return DataViewSet( context, receiver, offset, value, isLittleEndian, UINT16_ELEMENTS); } transitioning javascript builtin DataViewPrototypeSetInt16( - context: Context, receiver: Object, ...arguments): Object { - let offset: Object = arguments.length > 0 ? arguments[0] : Undefined; - let value: Object = arguments.length > 1 ? arguments[1] : Undefined; - let isLittleEndian: Object = + js-implicit context: Context, receiver: Object)(...arguments): Object { + const offset: Object = arguments.length > 0 ? arguments[0] : Undefined; + const value: Object = arguments.length > 1 ? arguments[1] : Undefined; + const isLittleEndian: Object = arguments.length > 2 ? arguments[2] : Undefined; return DataViewSet( context, receiver, offset, value, isLittleEndian, INT16_ELEMENTS); } transitioning javascript builtin DataViewPrototypeSetUint32( - context: Context, receiver: Object, ...arguments): Object { - let offset: Object = arguments.length > 0 ? arguments[0] : Undefined; - let value: Object = arguments.length > 1 ? arguments[1] : Undefined; - let isLittleEndian: Object = + js-implicit context: Context, receiver: Object)(...arguments): Object { + const offset: Object = arguments.length > 0 ? arguments[0] : Undefined; + const value: Object = arguments.length > 1 ? arguments[1] : Undefined; + const isLittleEndian: Object = arguments.length > 2 ? arguments[2] : Undefined; return DataViewSet( context, receiver, offset, value, isLittleEndian, UINT32_ELEMENTS); } transitioning javascript builtin DataViewPrototypeSetInt32( - context: Context, receiver: Object, ...arguments): Object { - let offset: Object = arguments.length > 0 ? arguments[0] : Undefined; - let value: Object = arguments.length > 1 ? arguments[1] : Undefined; - let isLittleEndian: Object = + js-implicit context: Context, receiver: Object)(...arguments): Object { + const offset: Object = arguments.length > 0 ? arguments[0] : Undefined; + const value: Object = arguments.length > 1 ? arguments[1] : Undefined; + const isLittleEndian: Object = arguments.length > 2 ? arguments[2] : Undefined; return DataViewSet( context, receiver, offset, value, isLittleEndian, INT32_ELEMENTS); } transitioning javascript builtin DataViewPrototypeSetFloat32( - context: Context, receiver: Object, ...arguments): Object { - let offset: Object = arguments.length > 0 ? arguments[0] : Undefined; - let value: Object = arguments.length > 1 ? arguments[1] : Undefined; - let isLittleEndian: Object = + js-implicit context: Context, receiver: Object)(...arguments): Object { + const offset: Object = arguments.length > 0 ? arguments[0] : Undefined; + const value: Object = arguments.length > 1 ? arguments[1] : Undefined; + const isLittleEndian: Object = arguments.length > 2 ? arguments[2] : Undefined; return DataViewSet( context, receiver, offset, value, isLittleEndian, FLOAT32_ELEMENTS); } transitioning javascript builtin DataViewPrototypeSetFloat64( - context: Context, receiver: Object, ...arguments): Object { - let offset: Object = arguments.length > 0 ? arguments[0] : Undefined; - let value: Object = arguments.length > 1 ? arguments[1] : Undefined; - let isLittleEndian: Object = + js-implicit context: Context, receiver: Object)(...arguments): Object { + const offset: Object = arguments.length > 0 ? arguments[0] : Undefined; + const value: Object = arguments.length > 1 ? arguments[1] : Undefined; + const isLittleEndian: Object = arguments.length > 2 ? arguments[2] : Undefined; return DataViewSet( context, receiver, offset, value, isLittleEndian, FLOAT64_ELEMENTS); } transitioning javascript builtin DataViewPrototypeSetBigUint64( - context: Context, receiver: Object, ...arguments): Object { - let offset: Object = arguments.length > 0 ? arguments[0] : Undefined; - let value: Object = arguments.length > 1 ? arguments[1] : Undefined; - let isLittleEndian: Object = + js-implicit context: Context, receiver: Object)(...arguments): Object { + const offset: Object = arguments.length > 0 ? arguments[0] : Undefined; + const value: Object = arguments.length > 1 ? arguments[1] : Undefined; + const isLittleEndian: Object = arguments.length > 2 ? arguments[2] : Undefined; return DataViewSet( context, receiver, offset, value, isLittleEndian, BIGUINT64_ELEMENTS); } transitioning javascript builtin DataViewPrototypeSetBigInt64( - context: Context, receiver: Object, ...arguments): Object { - let offset: Object = arguments.length > 0 ? arguments[0] : Undefined; - let value: Object = arguments.length > 1 ? arguments[1] : Undefined; - let isLittleEndian: Object = + js-implicit context: Context, receiver: Object)(...arguments): Object { + const offset: Object = arguments.length > 0 ? arguments[0] : Undefined; + const value: Object = arguments.length > 1 ? arguments[1] : Undefined; + const isLittleEndian: Object = arguments.length > 2 ? arguments[2] : Undefined; return DataViewSet( context, receiver, offset, value, isLittleEndian, BIGINT64_ELEMENTS); diff --git a/deps/v8/src/builtins/extras-utils.tq b/deps/v8/src/builtins/extras-utils.tq index 2b9b79739e0f3d..3675fda19165f4 100644 --- a/deps/v8/src/builtins/extras-utils.tq +++ b/deps/v8/src/builtins/extras-utils.tq @@ -8,17 +8,18 @@ namespace extras_utils { extern runtime PromiseStatus(Context, Object): Smi; javascript builtin ExtrasUtilsCreatePrivateSymbol( - context: Context, receiver: Object, ...arguments): HeapObject { + js-implicit context: Context, + receiver: Object)(...arguments): HeapObject { return CreatePrivateSymbol(context, arguments[0]); } javascript builtin ExtrasUtilsMarkPromiseAsHandled( - context: Context, receiver: Object, ...arguments): Undefined { + js-implicit context: Context, receiver: Object)(...arguments): Undefined { return PromiseMarkAsHandled(context, arguments[0]); } javascript builtin ExtrasUtilsPromiseState( - context: Context, receiver: Object, ...arguments): Smi { + js-implicit context: Context, receiver: Object)(...arguments): Smi { return PromiseStatus(context, arguments[0]); } } diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc index 0d80c681fbf1c1..995be77f754ee0 100644 --- a/deps/v8/src/builtins/ia32/builtins-ia32.cc +++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc @@ -1023,10 +1023,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // 8-bit fields next to each other, so we could just optimize by writing a // 16-bit. These static asserts guard our assumption is valid. STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset == - BytecodeArray::kOSRNestingLevelOffset + kCharSize); + BytecodeArray::kOsrNestingLevelOffset + kCharSize); STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0); __ mov_w(FieldOperand(kInterpreterBytecodeArrayRegister, - BytecodeArray::kOSRNestingLevelOffset), + BytecodeArray::kOsrNestingLevelOffset), Immediate(0)); // Push bytecode array. @@ -1534,6 +1534,15 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, BuiltinContinuationFrameConstants::kFixedFrameSize), eax); } + + // Replace the builtin index Smi on the stack with the start address of the + // builtin loaded from the builtins table. The ret below will return to this + // address. + int offset_to_builtin_index = allocatable_register_count * kSystemPointerSize; + __ mov(eax, Operand(esp, offset_to_builtin_index)); + __ LoadEntryFromBuiltinIndex(eax); + __ mov(Operand(esp, offset_to_builtin_index), eax); + for (int i = allocatable_register_count - 1; i >= 0; --i) { int code = config->GetAllocatableGeneralCode(i); __ pop(Register::from_code(code)); @@ -1549,7 +1558,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, kSystemPointerSize; __ pop(Operand(esp, offsetToPC)); __ Drop(offsetToPC / kSystemPointerSize); - __ add(Operand(esp, 0), Immediate(Code::kHeaderSize - kHeapObjectTag)); __ ret(0); } } // namespace @@ -3012,23 +3020,28 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address, __ mov(esi, __ ExternalReferenceAsOperand(next_address, esi)); __ mov(edi, __ ExternalReferenceAsOperand(limit_address, edi)); - Label profiler_disabled; - Label end_profiler_check; + Label profiler_enabled, end_profiler_check; __ Move(eax, Immediate(ExternalReference::is_profiling_address(isolate))); __ cmpb(Operand(eax, 0), Immediate(0)); - __ j(zero, &profiler_disabled); + __ j(not_zero, &profiler_enabled); + __ Move(eax, Immediate(ExternalReference::address_of_runtime_stats_flag())); + __ cmp(Operand(eax, 0), Immediate(0)); + __ j(not_zero, &profiler_enabled); + { + // Call the api function directly. + __ mov(eax, function_address); + __ jmp(&end_profiler_check); + } + __ bind(&profiler_enabled); + { + // Additional parameter is the address of the actual getter function. + __ mov(thunk_last_arg, function_address); + __ Move(eax, Immediate(thunk_ref)); + } + __ bind(&end_profiler_check); - // Additional parameter is the address of the actual getter function. - __ mov(thunk_last_arg, function_address); // Call the api function. - __ Move(eax, Immediate(thunk_ref)); __ call(eax); - __ jmp(&end_profiler_check); - - __ bind(&profiler_disabled); - // Call the api function. - __ call(function_address); - __ bind(&end_profiler_check); Label prologue; // Load the value from ReturnValue @@ -3080,6 +3093,9 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address, __ CompareRoot(map, RootIndex::kHeapNumberMap); __ j(equal, &ok, Label::kNear); + __ CompareRoot(map, RootIndex::kBigIntMap); + __ j(equal, &ok, Label::kNear); + __ CompareRoot(return_value, RootIndex::kUndefinedValue); __ j(equal, &ok, Label::kNear); diff --git a/deps/v8/src/builtins/internal-coverage.tq b/deps/v8/src/builtins/internal-coverage.tq index 4e75c6d837eced..d96fa924ab0418 100644 --- a/deps/v8/src/builtins/internal-coverage.tq +++ b/deps/v8/src/builtins/internal-coverage.tq @@ -28,6 +28,8 @@ namespace internal_coverage { return UnsafeCast(debugInfo.coverage_info); } + @export // Silence unused warning on release builds. SlotCount is only used + // in an assert. TODO(szuend): Remove once macros and asserts work. macro SlotCount(coverageInfo: CoverageInfo): Smi { assert(kFirstSlotIndex == 0); // Otherwise we'd have to consider it below. assert(kFirstSlotIndex == (coverageInfo.length & kSlotIndexCountMask)); diff --git a/deps/v8/src/builtins/iterator.tq b/deps/v8/src/builtins/iterator.tq index 5c9439dfc7ffce..b770f1b6528378 100644 --- a/deps/v8/src/builtins/iterator.tq +++ b/deps/v8/src/builtins/iterator.tq @@ -20,16 +20,16 @@ namespace iterator { implicit context: Context)(Object): IteratorRecord; extern macro IteratorBuiltinsAssembler::IteratorStep( - implicit context: Context)(IteratorRecord): Object + implicit context: Context)(IteratorRecord): JSReceiver labels Done; extern macro IteratorBuiltinsAssembler::IteratorStep( - implicit context: Context)(IteratorRecord, Map): Object + implicit context: Context)(IteratorRecord, Map): JSReceiver labels Done; extern macro IteratorBuiltinsAssembler::IteratorValue( - implicit context: Context)(Object): Object; + implicit context: Context)(JSReceiver): Object; extern macro IteratorBuiltinsAssembler::IteratorValue( - implicit context: Context)(Object, Map): Object; + implicit context: Context)(JSReceiver, Map): Object; extern macro IteratorBuiltinsAssembler::IteratorCloseOnException( implicit context: Context)(IteratorRecord, Object): never; diff --git a/deps/v8/src/builtins/math.tq b/deps/v8/src/builtins/math.tq index 84dd1261fa07b8..df43b30efc4e0b 100644 --- a/deps/v8/src/builtins/math.tq +++ b/deps/v8/src/builtins/math.tq @@ -7,7 +7,7 @@ namespace math { extern macro Float64Acos(float64): float64; transitioning javascript builtin - MathAcos(context: Context, receiver: Object, x: Object): Number { + MathAcos(context: Context, _receiver: Object, x: Object): Number { const value = Convert(ToNumber_Inline(context, x)); return Convert(Float64Acos(value)); } @@ -16,7 +16,7 @@ namespace math { extern macro Float64Acosh(float64): float64; transitioning javascript builtin - MathAcosh(context: Context, receiver: Object, x: Object): Number { + MathAcosh(context: Context, _receiver: Object, x: Object): Number { const value = Convert(ToNumber_Inline(context, x)); return Convert(Float64Acosh(value)); } @@ -25,7 +25,7 @@ namespace math { extern macro Float64Asin(float64): float64; transitioning javascript builtin - MathAsin(context: Context, receiver: Object, x: Object): Number { + MathAsin(context: Context, _receiver: Object, x: Object): Number { const value = Convert(ToNumber_Inline(context, x)); return Convert(Float64Asin(value)); } @@ -34,7 +34,7 @@ namespace math { extern macro Float64Asinh(float64): float64; transitioning javascript builtin - MathAsinh(context: Context, receiver: Object, x: Object): Number { + MathAsinh(context: Context, _receiver: Object, x: Object): Number { const value = Convert(ToNumber_Inline(context, x)); return Convert(Float64Asinh(value)); } @@ -43,7 +43,7 @@ namespace math { extern macro Float64Atan(float64): float64; transitioning javascript builtin - MathAtan(context: Context, receiver: Object, x: Object): Number { + MathAtan(context: Context, _receiver: Object, x: Object): Number { const value = Convert(ToNumber_Inline(context, x)); return Convert(Float64Atan(value)); } @@ -52,7 +52,7 @@ namespace math { extern macro Float64Atan2(float64, float64): float64; transitioning javascript builtin - MathAtan2(context: Context, receiver: Object, y: Object, x: Object): Number { + MathAtan2(context: Context, _receiver: Object, y: Object, x: Object): Number { const yValue = Convert(ToNumber_Inline(context, y)); const xValue = Convert(ToNumber_Inline(context, x)); return Convert(Float64Atan2(yValue, xValue)); @@ -62,7 +62,7 @@ namespace math { extern macro Float64Atanh(float64): float64; transitioning javascript builtin - MathAtanh(context: Context, receiver: Object, x: Object): Number { + MathAtanh(context: Context, _receiver: Object, x: Object): Number { const value = Convert(ToNumber_Inline(context, x)); return Convert(Float64Atanh(value)); } @@ -71,7 +71,7 @@ namespace math { extern macro Float64Cbrt(float64): float64; transitioning javascript builtin - MathCbrt(context: Context, receiver: Object, x: Object): Number { + MathCbrt(context: Context, _receiver: Object, x: Object): Number { const value = Convert(ToNumber_Inline(context, x)); return Convert(Float64Cbrt(value)); } @@ -80,7 +80,7 @@ namespace math { extern macro Word32Clz(int32): int32; transitioning javascript builtin - MathClz32(context: Context, receiver: Object, x: Object): Number { + MathClz32(context: Context, _receiver: Object, x: Object): Number { const num = ToNumber_Inline(context, x); let value: int32; @@ -100,7 +100,7 @@ namespace math { extern macro Float64Cos(float64): float64; transitioning javascript builtin - MathCos(context: Context, receiver: Object, x: Object): Number { + MathCos(context: Context, _receiver: Object, x: Object): Number { const value = Convert(ToNumber_Inline(context, x)); return Convert(Float64Cos(value)); } @@ -109,7 +109,7 @@ namespace math { extern macro Float64Cosh(float64): float64; transitioning javascript builtin - MathCosh(context: Context, receiver: Object, x: Object): Number { + MathCosh(context: Context, _receiver: Object, x: Object): Number { const value = Convert(ToNumber_Inline(context, x)); return Convert(Float64Cosh(value)); } @@ -118,7 +118,7 @@ namespace math { extern macro Float64Exp(float64): float64; transitioning javascript builtin - MathExp(context: Context, receiver: Object, x: Object): Number { + MathExp(context: Context, _receiver: Object, x: Object): Number { const value = Convert(ToNumber_Inline(context, x)); return Convert(Float64Exp(value)); } @@ -127,14 +127,14 @@ namespace math { extern macro Float64Expm1(float64): float64; transitioning javascript builtin - MathExpm1(context: Context, receiver: Object, x: Object): Number { + MathExpm1(context: Context, _receiver: Object, x: Object): Number { const value = Convert(ToNumber_Inline(context, x)); return Convert(Float64Expm1(value)); } // ES6 #sec-math.fround transitioning javascript builtin - MathFround(context: Context, receiver: Object, x: Object): Number { + MathFround(context: Context, _receiver: Object, x: Object): Number { const x32 = Convert(ToNumber_Inline(context, x)); const x64 = Convert(x32); return Convert(x64); @@ -144,7 +144,7 @@ namespace math { extern macro Float64Log(float64): float64; transitioning javascript builtin - MathLog(context: Context, receiver: Object, x: Object): Number { + MathLog(context: Context, _receiver: Object, x: Object): Number { const value = Convert(ToNumber_Inline(context, x)); return Convert(Float64Log(value)); } @@ -153,7 +153,7 @@ namespace math { extern macro Float64Log1p(float64): float64; transitioning javascript builtin - MathLog1p(context: Context, receiver: Object, x: Object): Number { + MathLog1p(context: Context, _receiver: Object, x: Object): Number { const value = Convert(ToNumber_Inline(context, x)); return Convert(Float64Log1p(value)); } @@ -162,7 +162,7 @@ namespace math { extern macro Float64Log10(float64): float64; transitioning javascript builtin - MathLog10(context: Context, receiver: Object, x: Object): Number { + MathLog10(context: Context, _receiver: Object, x: Object): Number { const value = Convert(ToNumber_Inline(context, x)); return Convert(Float64Log10(value)); } @@ -171,7 +171,7 @@ namespace math { extern macro Float64Log2(float64): float64; transitioning javascript builtin - MathLog2(context: Context, receiver: Object, x: Object): Number { + MathLog2(context: Context, _receiver: Object, x: Object): Number { const value = Convert(ToNumber_Inline(context, x)); return Convert(Float64Log2(value)); } @@ -180,14 +180,14 @@ namespace math { extern macro Float64Sin(float64): float64; transitioning javascript builtin - MathSin(context: Context, receiver: Object, x: Object): Number { + MathSin(context: Context, _receiver: Object, x: Object): Number { const value = Convert(ToNumber_Inline(context, x)); return Convert(Float64Sin(value)); } // ES6 #sec-math.sign transitioning javascript builtin - MathSign(context: Context, receiver: Object, x: Object): Number { + MathSign(context: Context, _receiver: Object, x: Object): Number { const num = ToNumber_Inline(context, x); const value = Convert(num); @@ -204,7 +204,7 @@ namespace math { extern macro Float64Sinh(float64): float64; transitioning javascript builtin - MathSinh(context: Context, receiver: Object, x: Object): Number { + MathSinh(context: Context, _receiver: Object, x: Object): Number { const value = Convert(ToNumber_Inline(context, x)); return Convert(Float64Sinh(value)); } @@ -213,7 +213,7 @@ namespace math { extern macro Float64Sqrt(float64): float64; transitioning javascript builtin - MathSqrt(context: Context, receiver: Object, x: Object): Number { + MathSqrt(context: Context, _receiver: Object, x: Object): Number { const value = Convert(ToNumber_Inline(context, x)); return Convert(Float64Sqrt(value)); } @@ -222,7 +222,7 @@ namespace math { extern macro Float64Tan(float64): float64; transitioning javascript builtin - MathTan(context: Context, receiver: Object, x: Object): Number { + MathTan(context: Context, _receiver: Object, x: Object): Number { const value = Convert(ToNumber_Inline(context, x)); return Convert(Float64Tan(value)); } @@ -231,7 +231,7 @@ namespace math { extern macro Float64Tanh(float64): float64; transitioning javascript builtin - MathTanh(context: Context, receiver: Object, x: Object): Number { + MathTanh(context: Context, _receiver: Object, x: Object): Number { const value = Convert(ToNumber_Inline(context, x)); return Convert(Float64Tanh(value)); } diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc index ec65c78ee9d65d..a359b2436f1818 100644 --- a/deps/v8/src/builtins/mips/builtins-mips.cc +++ b/deps/v8/src/builtins/mips/builtins-mips.cc @@ -62,7 +62,6 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) { static void GenerateTailCallToReturnedCode(MacroAssembler* masm, Runtime::FunctionId function_id) { // ----------- S t a t e ------------- - // -- a0 : argument count (preserved for callee) // -- a1 : target function (preserved for callee) // -- a3 : new target (preserved for callee) // ----------------------------------- @@ -70,14 +69,12 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm, FrameScope scope(masm, StackFrame::INTERNAL); // Push a copy of the target function and the new target. // Push function as parameter to the runtime call. - __ SmiTag(a0); - __ Push(a0, a1, a3, a1); + __ Push(a1, a3, a1); __ CallRuntime(function_id, 1); // Restore target function and new target. - __ Pop(a0, a1, a3); - __ SmiUntag(a0); + __ Pop(a1, a3); } static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); @@ -853,13 +850,11 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, Register scratch1, Register scratch2, Register scratch3) { // ----------- S t a t e ------------- - // -- a0 : argument count (preserved for callee if needed, and caller) // -- a3 : new target (preserved for callee if needed, and caller) // -- a1 : target function (preserved for callee if needed, and caller) // -- feedback vector (preserved for caller if needed) // ----------------------------------- - DCHECK( - !AreAliased(feedback_vector, a0, a1, a3, scratch1, scratch2, scratch3)); + DCHECK(!AreAliased(feedback_vector, a1, a3, scratch1, scratch2, scratch3)); Label optimized_code_slot_is_weak_ref, fallthrough; @@ -1035,17 +1030,18 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ lw(feedback_vector, FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); __ lw(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset)); + + Label push_stack_frame; + // Check if feedback vector is valid. If valid, check for optimized code + // and update invocation count. Otherwise, setup the stack frame. + __ lw(t0, FieldMemOperand(feedback_vector, HeapObject::kMapOffset)); + __ lhu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset)); + __ Branch(&push_stack_frame, ne, t0, Operand(FEEDBACK_VECTOR_TYPE)); + // Read off the optimized code slot in the feedback vector, and if there // is optimized code or an optimization marker, call that instead. MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, t0, t3, t1); - // Open a frame scope to indicate that there is a frame on the stack. The - // MANUAL indicates that the scope shouldn't actually generate code to set up - // the frame (that is done below). - FrameScope frame_scope(masm, StackFrame::MANUAL); - __ PushStandardFrame(closure); - - // Increment invocation count for the function. __ lw(t0, FieldMemOperand(feedback_vector, FeedbackVector::kInvocationCountOffset)); @@ -1053,10 +1049,21 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ sw(t0, FieldMemOperand(feedback_vector, FeedbackVector::kInvocationCountOffset)); - // Reset code age. - DCHECK_EQ(0, BytecodeArray::kNoAgeBytecodeAge); - __ sb(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister, - BytecodeArray::kBytecodeAgeOffset)); + // Open a frame scope to indicate that there is a frame on the stack. The + // MANUAL indicates that the scope shouldn't actually generate code to set up + // the frame (that is done below). + __ bind(&push_stack_frame); + FrameScope frame_scope(masm, StackFrame::MANUAL); + __ PushStandardFrame(closure); + + // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset are + // 8-bit fields next to each other, so we could just optimize by writing a + // 16-bit. These static asserts guard our assumption is valid. + STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset == + BytecodeArray::kOsrNestingLevelOffset + kCharSize); + STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0); + __ sh(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister, + BytecodeArray::kOsrNestingLevelOffset)); // Load initial bytecode offset. __ li(kInterpreterBytecodeOffsetRegister, @@ -1464,11 +1471,13 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, } __ lw(fp, MemOperand( sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp)); + // Load builtin index (stored as a Smi) and use it to get the builtin start + // address from the builtins table. __ Pop(t0); __ Addu(sp, sp, Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp)); __ Pop(ra); - __ Addu(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag)); + __ LoadEntryFromBuiltinIndex(t0); __ Jump(t0); } } // namespace @@ -2559,7 +2568,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, __ LoadRoot(t0, RootIndex::kTheHoleValue); // Cannot use check here as it attempts to generate call into runtime. __ Branch(&okay, eq, t0, Operand(a2)); - __ stop("Unexpected pending exception"); + __ stop(); __ bind(&okay); } @@ -2825,18 +2834,23 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address, DCHECK(function_address == a1 || function_address == a2); - Label profiler_disabled; - Label end_profiler_check; + Label profiler_enabled, end_profiler_check; __ li(t9, ExternalReference::is_profiling_address(isolate)); __ lb(t9, MemOperand(t9, 0)); - __ Branch(&profiler_disabled, eq, t9, Operand(zero_reg)); - - // Additional parameter is the address of the actual callback. - __ li(t9, thunk_ref); - __ jmp(&end_profiler_check); - - __ bind(&profiler_disabled); - __ mov(t9, function_address); + __ Branch(&profiler_enabled, ne, t9, Operand(zero_reg)); + __ li(t9, ExternalReference::address_of_runtime_stats_flag()); + __ lw(t9, MemOperand(t9, 0)); + __ Branch(&profiler_enabled, ne, t9, Operand(zero_reg)); + { + // Call the api function directly. + __ mov(t9, function_address); + __ Branch(&end_profiler_check); + } + __ bind(&profiler_enabled); + { + // Additional parameter is the address of the actual callback. + __ li(t9, thunk_ref); + } __ bind(&end_profiler_check); // Allocate HandleScope in callee-save registers. diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc index 34a5774d656901..c5565b90de7a9d 100644 --- a/deps/v8/src/builtins/mips64/builtins-mips64.cc +++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc @@ -62,7 +62,6 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) { static void GenerateTailCallToReturnedCode(MacroAssembler* masm, Runtime::FunctionId function_id) { // ----------- S t a t e ------------- - // -- a0 : argument count (preserved for callee) // -- a1 : target function (preserved for callee) // -- a3 : new target (preserved for callee) // ----------------------------------- @@ -70,13 +69,11 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm, FrameScope scope(masm, StackFrame::INTERNAL); // Push a copy of the function onto the stack. // Push a copy of the target function and the new target. - __ SmiTag(a0); - __ Push(a0, a1, a3, a1); + __ Push(a1, a3, a1); __ CallRuntime(function_id, 1); // Restore target function and new target. - __ Pop(a0, a1, a3); - __ SmiUntag(a0); + __ Pop(a1, a3); } static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); @@ -870,13 +867,11 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, Register scratch1, Register scratch2, Register scratch3) { // ----------- S t a t e ------------- - // -- a0 : argument count (preserved for callee if needed, and caller) // -- a3 : new target (preserved for callee if needed, and caller) // -- a1 : target function (preserved for callee if needed, and caller) // -- feedback vector (preserved for caller if needed) // ----------------------------------- - DCHECK( - !AreAliased(feedback_vector, a0, a1, a3, scratch1, scratch2, scratch3)); + DCHECK(!AreAliased(feedback_vector, a1, a3, scratch1, scratch2, scratch3)); Label optimized_code_slot_is_weak_ref, fallthrough; @@ -1052,16 +1047,18 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ Ld(feedback_vector, FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); __ Ld(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset)); + + Label push_stack_frame; + // Check if feedback vector is valid. If valid, check for optimized code + // and update invocation count. Otherwise, setup the stack frame. + __ Ld(a4, FieldMemOperand(feedback_vector, HeapObject::kMapOffset)); + __ Lhu(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset)); + __ Branch(&push_stack_frame, ne, a4, Operand(FEEDBACK_VECTOR_TYPE)); + // Read off the optimized code slot in the feedback vector, and if there // is optimized code or an optimization marker, call that instead. MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, a4, t3, a5); - // Open a frame scope to indicate that there is a frame on the stack. The - // MANUAL indicates that the scope shouldn't actually generate code to set up - // the frame (that is done below). - FrameScope frame_scope(masm, StackFrame::MANUAL); - __ PushStandardFrame(closure); - // Increment invocation count for the function. __ Lw(a4, FieldMemOperand(feedback_vector, FeedbackVector::kInvocationCountOffset)); @@ -1069,10 +1066,21 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ Sw(a4, FieldMemOperand(feedback_vector, FeedbackVector::kInvocationCountOffset)); - // Reset code age. - DCHECK_EQ(0, BytecodeArray::kNoAgeBytecodeAge); - __ sb(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister, - BytecodeArray::kBytecodeAgeOffset)); + // Open a frame scope to indicate that there is a frame on the stack. The + // MANUAL indicates that the scope shouldn't actually generate code to set up + // the frame (that is done below). + __ bind(&push_stack_frame); + FrameScope frame_scope(masm, StackFrame::MANUAL); + __ PushStandardFrame(closure); + + // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset are + // 8-bit fields next to each other, so we could just optimize by writing a + // 16-bit. These static asserts guard our assumption is valid. + STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset == + BytecodeArray::kOsrNestingLevelOffset + kCharSize); + STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0); + __ sh(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister, + BytecodeArray::kOsrNestingLevelOffset)); // Load initial bytecode offset. __ li(kInterpreterBytecodeOffsetRegister, @@ -1479,11 +1487,13 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, } __ Ld(fp, MemOperand( sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp)); + // Load builtin index (stored as a Smi) and use it to get the builtin start + // address from the builtins table. __ Pop(t0); __ Daddu(sp, sp, Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp)); __ Pop(ra); - __ Daddu(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag)); + __ LoadEntryFromBuiltinIndex(t0); __ Jump(t0); } } // namespace @@ -2595,7 +2605,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, __ LoadRoot(a4, RootIndex::kTheHoleValue); // Cannot use check here as it attempts to generate call into runtime. __ Branch(&okay, eq, a4, Operand(a2)); - __ stop("Unexpected pending exception"); + __ stop(); __ bind(&okay); } @@ -2864,18 +2874,24 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address, DCHECK(function_address == a1 || function_address == a2); - Label profiler_disabled; - Label end_profiler_check; + Label profiler_enabled, end_profiler_check; __ li(t9, ExternalReference::is_profiling_address(isolate)); __ Lb(t9, MemOperand(t9, 0)); - __ Branch(&profiler_disabled, eq, t9, Operand(zero_reg)); - - // Additional parameter is the address of the actual callback. - __ li(t9, thunk_ref); - __ jmp(&end_profiler_check); + __ Branch(&profiler_enabled, ne, t9, Operand(zero_reg)); + __ li(t9, ExternalReference::address_of_runtime_stats_flag()); + __ Lw(t9, MemOperand(t9, 0)); + __ Branch(&profiler_enabled, ne, t9, Operand(zero_reg)); + { + // Call the api function directly. + __ mov(t9, function_address); + __ Branch(&end_profiler_check); + } - __ bind(&profiler_disabled); - __ mov(t9, function_address); + __ bind(&profiler_enabled); + { + // Additional parameter is the address of the actual callback. + __ li(t9, thunk_ref); + } __ bind(&end_profiler_check); // Allocate HandleScope in callee-save registers. diff --git a/deps/v8/src/builtins/object-fromentries.tq b/deps/v8/src/builtins/object-fromentries.tq index 93851d4e11314d..32115e78eab250 100644 --- a/deps/v8/src/builtins/object-fromentries.tq +++ b/deps/v8/src/builtins/object-fromentries.tq @@ -33,8 +33,8 @@ namespace object { } transitioning javascript builtin - ObjectFromEntries(implicit context: Context)(receiver: Object, ...arguments): - Object { + ObjectFromEntries(js-implicit context: Context, receiver: Object)( + ...arguments): Object { const iterable: Object = arguments[0]; try { if (IsNullOrUndefined(iterable)) goto Throw; @@ -47,7 +47,8 @@ namespace object { try { assert(!IsNullOrUndefined(i.object)); while (true) { - const step: Object = iterator::IteratorStep(i, fastIteratorResultMap) + const step: JSReceiver = + iterator::IteratorStep(i, fastIteratorResultMap) otherwise return result; const iteratorValue: Object = iterator::IteratorValue(step, fastIteratorResultMap); diff --git a/deps/v8/src/builtins/object.tq b/deps/v8/src/builtins/object.tq new file mode 100644 index 00000000000000..6706a8f943399f --- /dev/null +++ b/deps/v8/src/builtins/object.tq @@ -0,0 +1,138 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +namespace runtime { + extern transitioning runtime + ObjectIsExtensible(implicit context: Context)(Object): Object; + + extern transitioning runtime + JSReceiverPreventExtensionsThrow(implicit context: Context)(JSReceiver): + Object; + + extern transitioning runtime + JSReceiverPreventExtensionsDontThrow(implicit context: Context)(JSReceiver): + Object; + + extern transitioning runtime + JSReceiverGetPrototypeOf(implicit context: Context)(JSReceiver): Object; + + extern transitioning runtime + JSReceiverSetPrototypeOfThrow(implicit context: Context)(JSReceiver, Object): + Object; + + extern transitioning runtime + JSReceiverSetPrototypeOfDontThrow(implicit context: + Context)(JSReceiver, Object): Object; +} // namespace runtime + +namespace object { + transitioning macro + ObjectIsExtensible(implicit context: Context)(object: Object): Object { + const objectJSReceiver = Cast(object) otherwise return False; + const objectJSProxy = Cast(objectJSReceiver) + otherwise return runtime::ObjectIsExtensible(objectJSReceiver); + return proxy::ProxyIsExtensible(objectJSProxy); + } + + transitioning macro + ObjectPreventExtensionsThrow(implicit context: Context)(object: Object): + Object { + const objectJSReceiver = Cast(object) otherwise return object; + const objectJSProxy = Cast(objectJSReceiver) + otherwise return runtime::JSReceiverPreventExtensionsThrow( + objectJSReceiver); + proxy::ProxyPreventExtensions(objectJSProxy, True); + return objectJSReceiver; + } + + transitioning macro + ObjectPreventExtensionsDontThrow(implicit context: Context)(object: Object): + Object { + const objectJSReceiver = Cast(object) otherwise return False; + const objectJSProxy = Cast(objectJSReceiver) + otherwise return runtime::JSReceiverPreventExtensionsDontThrow( + objectJSReceiver); + return proxy::ProxyPreventExtensions(objectJSProxy, False); + } + + transitioning macro + ObjectGetPrototypeOf(implicit context: Context)(object: Object): Object { + const objectJSReceiver: JSReceiver = ToObject_Inline(context, object); + return object::JSReceiverGetPrototypeOf(objectJSReceiver); + } + + transitioning macro + JSReceiverGetPrototypeOf(implicit context: Context)(object: JSReceiver): + Object { + const objectJSProxy = Cast(object) + otherwise return runtime::JSReceiverGetPrototypeOf(object); + return proxy::ProxyGetPrototypeOf(objectJSProxy); + } + + transitioning macro + ObjectSetPrototypeOfThrow(implicit context: Context)( + object: Object, proto: Object): Object { + const objectJSReceiver = Cast(object) otherwise return object; + const objectJSProxy = Cast(objectJSReceiver) + otherwise return runtime::JSReceiverSetPrototypeOfThrow( + objectJSReceiver, proto); + proxy::ProxySetPrototypeOf(objectJSProxy, proto, True); + return objectJSReceiver; + } + + transitioning macro + ObjectSetPrototypeOfDontThrow(implicit context: Context)( + object: Object, proto: Object): Object { + const objectJSReceiver = Cast(object) otherwise return False; + const objectJSProxy = Cast(objectJSReceiver) + otherwise return runtime::JSReceiverSetPrototypeOfDontThrow( + objectJSReceiver, proto); + return proxy::ProxySetPrototypeOf(objectJSProxy, proto, False); + } +} // namespace object + +namespace object_isextensible { + // ES6 section 19.1.2.11 Object.isExtensible ( O ) + transitioning javascript builtin ObjectIsExtensible( + js-implicit context: Context)(_receiver: Object, object: Object): Object { + return object::ObjectIsExtensible(object); + } +} // namespace object_isextensible + +namespace object_preventextensions { + // ES6 section 19.1.2.11 Object.isExtensible ( O ) + transitioning javascript builtin ObjectPreventExtensions( + js-implicit context: Context)(_receiver: Object, object: Object): Object { + return object::ObjectPreventExtensionsThrow(object); + } +} // namespace object_preventextensions + +namespace object_getprototypeof { + // ES6 section 19.1.2.9 Object.getPrototypeOf ( O ) + transitioning javascript builtin ObjectGetPrototypeOf( + js-implicit context: Context)(_receiver: Object, object: Object): Object { + return object::ObjectGetPrototypeOf(object); + } +} // namespace object_getprototypeof + +namespace object_setprototypeof { + // ES6 section 19.1.2.21 Object.setPrototypeOf ( O, proto ) + transitioning javascript builtin ObjectSetPrototypeOf( + js-implicit context: + Context)(_receiver: Object, object: Object, proto: Object): Object { + // 1. Set O to ? RequireObjectCoercible(O). + RequireObjectCoercible(object, 'Object.setPrototypeOf'); + + // 2. If Type(proto) is neither Object nor Null, throw a TypeError + // exception. + // 3. If Type(O) is not Object, return O. + // 4. Let status be ? O.[[SetPrototypeOf]](proto). + // 5. If status is false, throw a TypeError exception. + // 6. Return O. + if (proto == Null || Is(proto)) { + return object::ObjectSetPrototypeOfThrow(object, proto); + } + ThrowTypeError(kProtoObjectOrNull, proto); + } +} // namespace object_setprototypeof diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc index e3c6ce6407f275..a42cb9bebd2824 100644 --- a/deps/v8/src/builtins/ppc/builtins-ppc.cc +++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc @@ -60,24 +60,20 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) { static void GenerateTailCallToReturnedCode(MacroAssembler* masm, Runtime::FunctionId function_id) { // ----------- S t a t e ------------- - // -- r3 : argument count (preserved for callee) // -- r4 : target function (preserved for callee) // -- r6 : new target (preserved for callee) // ----------------------------------- { FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); - // Push the number of arguments to the callee. // Push a copy of the target function and the new target. // Push function as parameter to the runtime call. - __ SmiTag(r3); - __ Push(r3, r4, r6, r4); + __ Push(r4, r6, r4); __ CallRuntime(function_id, 1); __ mr(r5, r3); // Restore target function and new target. - __ Pop(r3, r4, r6); - __ SmiUntag(r3); + __ Pop(r4, r6); } static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch"); __ JumpCodeObject(r5); @@ -110,6 +106,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { // -- sp[...]: constructor arguments // ----------------------------------- + Register scratch = r5; + Label stack_overflow; Generate_StackOverflowCheck(masm, r3, r8, &stack_overflow); @@ -141,13 +139,13 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { // -- sp[2*kPointerSize]: context // ----------------------------------- __ beq(&no_args, cr0); - __ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2)); - __ sub(sp, sp, ip); + __ ShiftLeftImm(scratch, r3, Operand(kPointerSizeLog2)); + __ sub(sp, sp, scratch); __ mtctr(r3); __ bind(&loop); - __ subi(ip, ip, Operand(kPointerSize)); - __ LoadPX(r0, MemOperand(r7, ip)); - __ StorePX(r0, MemOperand(sp, ip)); + __ subi(scratch, scratch, Operand(kPointerSize)); + __ LoadPX(r0, MemOperand(r7, scratch)); + __ StorePX(r0, MemOperand(sp, scratch)); __ bdnz(&loop); __ bind(&no_args); @@ -300,13 +298,13 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // ----------------------------------- __ cmpi(r3, Operand::Zero()); __ beq(&no_args); - __ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2)); - __ sub(sp, sp, ip); + __ ShiftLeftImm(r9, r3, Operand(kPointerSizeLog2)); + __ sub(sp, sp, r9); __ mtctr(r3); __ bind(&loop); - __ subi(ip, ip, Operand(kPointerSize)); - __ LoadPX(r0, MemOperand(r7, ip)); - __ StorePX(r0, MemOperand(sp, ip)); + __ subi(r9, r9, Operand(kPointerSize)); + __ LoadPX(r0, MemOperand(r7, r9)); + __ StorePX(r0, MemOperand(sp, r9)); __ bdnz(&loop); __ bind(&no_args); @@ -416,12 +414,13 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { // Flood function if we are stepping. Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator; Label stepping_prepared; + Register scratch = r8; ExternalReference debug_hook = ExternalReference::debug_hook_on_function_call_address(masm->isolate()); - __ Move(ip, debug_hook); - __ LoadByte(ip, MemOperand(ip), r0); - __ extsb(ip, ip); - __ CmpSmiLiteral(ip, Smi::zero(), r0); + __ Move(scratch, debug_hook); + __ LoadByte(scratch, MemOperand(scratch), r0); + __ extsb(scratch, scratch); + __ CmpSmiLiteral(scratch, Smi::zero(), r0); __ bne(&prepare_step_in_if_stepping); // Flood function if we need to continue stepping in the suspended generator. @@ -429,9 +428,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { ExternalReference debug_suspended_generator = ExternalReference::debug_suspended_generator_address(masm->isolate()); - __ Move(ip, debug_suspended_generator); - __ LoadP(ip, MemOperand(ip)); - __ cmp(ip, r4); + __ Move(scratch, debug_suspended_generator); + __ LoadP(scratch, MemOperand(scratch)); + __ cmp(scratch, r4); __ beq(&prepare_step_in_suspended_generator); __ bind(&stepping_prepared); @@ -442,8 +441,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ blt(&stack_overflow); // Push receiver. - __ LoadP(ip, FieldMemOperand(r4, JSGeneratorObject::kReceiverOffset)); - __ Push(ip); + __ LoadP(scratch, FieldMemOperand(r4, JSGeneratorObject::kReceiverOffset)); + __ Push(scratch); // ----------- S t a t e ------------- // -- r4 : the JSGeneratorObject to resume @@ -470,8 +469,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ mtctr(r6); __ bind(&loop); - __ LoadPU(ip, MemOperand(r9, kPointerSize)); - __ push(ip); + __ LoadPU(scratch, MemOperand(r9, kPointerSize)); + __ push(scratch); __ bdnz(&loop); __ bind(&done_loop); @@ -602,6 +601,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, __ LoadP(r0, MemOperand(r3)); __ push(r0); + Register scratch = r9; // Set up frame pointer for the frame to be pushed. __ addi(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); @@ -611,17 +611,17 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, ExternalReference::Create(IsolateAddressId::kJSEntrySPAddress, masm->isolate()); __ Move(r3, js_entry_sp); - __ LoadP(r9, MemOperand(r3)); - __ cmpi(r9, Operand::Zero()); + __ LoadP(scratch, MemOperand(r3)); + __ cmpi(scratch, Operand::Zero()); __ bne(&non_outermost_js); __ StoreP(fp, MemOperand(r3)); - __ mov(ip, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME)); + __ mov(scratch, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME)); Label cont; __ b(&cont); __ bind(&non_outermost_js); - __ mov(ip, Operand(StackFrame::INNER_JSENTRY_FRAME)); + __ mov(scratch, Operand(StackFrame::INNER_JSENTRY_FRAME)); __ bind(&cont); - __ push(ip); // frame-type + __ push(scratch); // frame-type // Jump to a faked try block that does the invoke, with a faked catch // block that sets the pending exception. @@ -642,12 +642,12 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, // field in the JSEnv and return a failure sentinel. Coming in here the // fp will be invalid because the PushStackHandler below sets it to 0 to // signal the existence of the JSEntry frame. - __ Move(ip, - ExternalReference::Create(IsolateAddressId::kPendingExceptionAddress, - masm->isolate())); + __ Move(scratch, + ExternalReference::Create( + IsolateAddressId::kPendingExceptionAddress, masm->isolate())); } - __ StoreP(r3, MemOperand(ip)); + __ StoreP(r3, MemOperand(scratch)); __ LoadRoot(r3, RootIndex::kException); __ b(&exit); @@ -679,16 +679,16 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, __ pop(r8); __ cmpi(r8, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME)); __ bne(&non_outermost_js_2); - __ mov(r9, Operand::Zero()); + __ mov(scratch, Operand::Zero()); __ Move(r8, js_entry_sp); - __ StoreP(r9, MemOperand(r8)); + __ StoreP(scratch, MemOperand(r8)); __ bind(&non_outermost_js_2); // Restore the top frame descriptors from the stack. __ pop(r6); - __ Move(ip, ExternalReference::Create( - IsolateAddressId::kCEntryFPAddress, masm->isolate())); - __ StoreP(r6, MemOperand(ip)); + __ Move(scratch, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, + masm->isolate())); + __ StoreP(r6, MemOperand(scratch)); // Reset the stack to the callee saved registers. __ addi(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); @@ -894,13 +894,11 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, Register scratch1, Register scratch2, Register scratch3) { // ----------- S t a t e ------------- - // -- r0 : argument count (preserved for callee if needed, and caller) - // -- r3 : new target (preserved for callee if needed, and caller) - // -- r1 : target function (preserved for callee if needed, and caller) + // -- r6 : new target (preserved for callee if needed, and caller) + // -- r4 : target function (preserved for callee if needed, and caller) // -- feedback vector (preserved for caller if needed) // ----------------------------------- - DCHECK( - !AreAliased(feedback_vector, r3, r4, r6, scratch1, scratch2, scratch3)); + DCHECK(!AreAliased(feedback_vector, r4, r6, scratch1, scratch2, scratch3)); Label optimized_code_slot_is_weak_ref, fallthrough; @@ -1084,6 +1082,15 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); __ LoadP(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset)); + + Label push_stack_frame; + // Check if feedback vector is valid. If valid, check for optimized code + // and update invocation count. Otherwise, setup the stack frame. + __ LoadP(r7, FieldMemOperand(feedback_vector, HeapObject::kMapOffset)); + __ LoadHalfWord(r7, FieldMemOperand(r7, Map::kInstanceTypeOffset)); + __ cmpi(r7, Operand(FEEDBACK_VECTOR_TYPE)); + __ bne(&push_stack_frame); + // Read off the optimized code slot in the feedback vector, and if there // is optimized code or an optimization marker, call that instead. MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r7, r9, r8); @@ -1102,6 +1109,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // Open a frame scope to indicate that there is a frame on the stack. The // MANUAL indicates that the scope shouldn't actually generate code to set up // the frame (that is done below). + + __ bind(&push_stack_frame); + FrameScope frame_scope(masm, StackFrame::MANUAL); __ PushStandardFrame(closure); @@ -1109,12 +1119,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // 8-bit fields next to each other, so we could just optimize by writing a // 16-bit. These static asserts guard our assumption is valid. STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset == - BytecodeArray::kOSRNestingLevelOffset + kCharSize); + BytecodeArray::kOsrNestingLevelOffset + kCharSize); STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0); __ li(r8, Operand(0)); __ StoreHalfWord(r8, FieldMemOperand(kInterpreterBytecodeArrayRegister, - BytecodeArray::kOSRNestingLevelOffset), + BytecodeArray::kOsrNestingLevelOffset), r0); // Load initial bytecode offset. @@ -1395,11 +1405,13 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { __ SmiUntag(kInterpreterBytecodeOffsetRegister); // Dispatch to the target bytecode. + UseScratchRegisterScope temps(masm); + Register scratch = temps.Acquire(); __ lbzx(ip, MemOperand(kInterpreterBytecodeArrayRegister, kInterpreterBytecodeOffsetRegister)); - __ ShiftLeftImm(ip, ip, Operand(kPointerSizeLog2)); + __ ShiftLeftImm(scratch, scratch, Operand(kPointerSizeLog2)); __ LoadPX(kJavaScriptCallCodeStartRegister, - MemOperand(kInterpreterDispatchTableRegister, ip)); + MemOperand(kInterpreterDispatchTableRegister, scratch)); __ Jump(kJavaScriptCallCodeStartRegister); } @@ -1526,13 +1538,17 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, __ LoadP( fp, MemOperand(sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp)); - __ Pop(ip); + // Load builtin index (stored as a Smi) and use it to get the builtin start + // address from the builtins table. + UseScratchRegisterScope temps(masm); + Register builtin = temps.Acquire(); + __ Pop(builtin); __ addi(sp, sp, Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp)); __ Pop(r0); __ mtlr(r0); - __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag)); - __ Jump(ip); + __ LoadEntryFromBuiltinIndex(builtin); + __ Jump(builtin); } } // namespace @@ -1702,14 +1718,15 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) { // r3: actual number of arguments // r4: callable { + Register scratch = r6; Label loop; // Calculate the copy start address (destination). Copy end address is sp. __ add(r5, sp, r5); __ mtctr(r3); __ bind(&loop); - __ LoadP(ip, MemOperand(r5, -kPointerSize)); - __ StoreP(ip, MemOperand(r5)); + __ LoadP(scratch, MemOperand(r5, -kPointerSize)); + __ StoreP(scratch, MemOperand(r5)); __ subi(r5, r5, Operand(kPointerSize)); __ bdnz(&loop); // Adjust the actual number of arguments and remove the top element @@ -1891,7 +1908,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, // Check for stack overflow. Label stack_overflow; - Generate_StackOverflowCheck(masm, r7, ip, &stack_overflow); + Generate_StackOverflowCheck(masm, r7, scratch, &stack_overflow); // Push arguments onto the stack (thisArgument is already on the stack). { @@ -1902,12 +1919,12 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize)); __ mtctr(r7); __ bind(&loop); - __ LoadPU(ip, MemOperand(r5, kPointerSize)); - __ CompareRoot(ip, RootIndex::kTheHoleValue); + __ LoadPU(scratch, MemOperand(r5, kPointerSize)); + __ CompareRoot(scratch, RootIndex::kTheHoleValue); __ bne(&skip); - __ LoadRoot(ip, RootIndex::kUndefinedValue); + __ LoadRoot(scratch, RootIndex::kUndefinedValue); __ bind(&skip); - __ push(ip); + __ push(scratch); __ bdnz(&loop); __ bind(&no_args); __ add(r3, r3, r7); @@ -1953,8 +1970,10 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, // Check if we have an arguments adaptor frame below the function frame. Label arguments_adaptor, arguments_done; __ LoadP(r7, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ LoadP(ip, MemOperand(r7, CommonFrameConstants::kContextOrFrameTypeOffset)); - __ cmpi(ip, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR))); + __ LoadP(scratch, + MemOperand(r7, CommonFrameConstants::kContextOrFrameTypeOffset)); + __ cmpi(scratch, + Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR))); __ beq(&arguments_adaptor); { __ LoadP(r8, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); @@ -1988,9 +2007,9 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, __ add(r3, r3, r8); __ bind(&loop); { - __ ShiftLeftImm(ip, r8, Operand(kPointerSizeLog2)); - __ LoadPX(ip, MemOperand(r7, ip)); - __ push(ip); + __ ShiftLeftImm(scratch, r8, Operand(kPointerSizeLog2)); + __ LoadPX(scratch, MemOperand(r7, scratch)); + __ push(scratch); __ subi(r8, r8, Operand(1)); __ cmpi(r8, Operand::Zero()); __ bne(&loop); @@ -2134,10 +2153,11 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { // -- r7 : the number of [[BoundArguments]] // ----------------------------------- + Register scratch = r9; // Reserve stack space for the [[BoundArguments]]. { Label done; - __ mr(r9, sp); // preserve previous stack pointer + __ mr(scratch, sp); // preserve previous stack pointer __ ShiftLeftImm(r10, r7, Operand(kPointerSizeLog2)); __ sub(sp, sp, r10); // Check the stack for overflow. We are not trying to catch interruptions @@ -2146,7 +2166,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { __ CompareRoot(sp, RootIndex::kRealStackLimit); __ bgt(&done); // Signed comparison. // Restore the stack pointer. - __ mr(sp, r9); + __ mr(sp, scratch); { FrameScope scope(masm, StackFrame::MANUAL); __ EnterFrame(StackFrame::INTERNAL); @@ -2166,7 +2186,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { __ beq(&skip); __ mtctr(r3); __ bind(&loop); - __ LoadPX(r0, MemOperand(r9, r8)); + __ LoadPX(r0, MemOperand(scratch, r8)); __ StorePX(r0, MemOperand(sp, r8)); __ addi(r8, r8, Operand(kPointerSize)); __ bdnz(&loop); @@ -2201,9 +2221,9 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) { __ AssertBoundFunction(r4); // Patch the receiver to [[BoundThis]]. - __ LoadP(ip, FieldMemOperand(r4, JSBoundFunction::kBoundThisOffset)); + __ LoadP(r6, FieldMemOperand(r4, JSBoundFunction::kBoundThisOffset)); __ ShiftLeftImm(r0, r3, Operand(kPointerSizeLog2)); - __ StorePX(ip, MemOperand(sp, r0)); + __ StorePX(r6, MemOperand(sp, r0)); // Push the [[BoundArguments]] onto the stack. Generate_PushBoundArguments(masm); @@ -2388,7 +2408,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { __ cmpli(r5, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel)); __ beq(&dont_adapt_arguments); __ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset)); - __ LoadP(r7, FieldMemOperand(r7, SharedFunctionInfo::kFlagsOffset)); + __ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kFlagsOffset)); __ TestBitMask(r7, SharedFunctionInfo::IsSafeToSkipArgumentsAdaptorBit::kMask, r0); __ bne(&skip_adapt_arguments, cr0); @@ -2686,7 +2706,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, __ CompareRoot(r6, RootIndex::kTheHoleValue); // Cannot use check here as it attempts to generate call into runtime. __ beq(&okay); - __ stop("Unexpected pending exception"); + __ stop(); __ bind(&okay); } @@ -2961,13 +2981,22 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm, __ Move(scratch, thunk_ref); __ isel(eq, scratch, function_address, scratch); } else { - Label profiler_disabled; - Label end_profiler_check; - __ beq(&profiler_disabled); - __ Move(scratch, thunk_ref); - __ b(&end_profiler_check); - __ bind(&profiler_disabled); - __ mr(scratch, function_address); + Label profiler_enabled, end_profiler_check; + __ bne(&profiler_enabled); + __ Move(scratch, ExternalReference::address_of_runtime_stats_flag()); + __ lwz(scratch, MemOperand(scratch, 0)); + __ cmpi(scratch, Operand::Zero()); + __ bne(&profiler_enabled); + { + // Call the api function directly. + __ mr(scratch, function_address); + __ b(&end_profiler_check); + } + __ bind(&profiler_enabled); + { + // Additional parameter is the address of the actual callback. + __ Move(scratch, thunk_ref); + } __ bind(&end_profiler_check); } @@ -3264,6 +3293,8 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { } void Builtins::Generate_DirectCEntry(MacroAssembler* masm) { + UseScratchRegisterScope temps(masm); + Register temp2 = temps.Acquire(); // Place the return address on the stack, making the call // GC safe. The RegExp backend also relies on this. __ mflr(r0); @@ -3271,11 +3302,11 @@ void Builtins::Generate_DirectCEntry(MacroAssembler* masm) { if (ABI_USES_FUNCTION_DESCRIPTORS && FLAG_embedded_builtins) { // AIX/PPC64BE Linux use a function descriptor; - __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(ip, kPointerSize)); - __ LoadP(ip, MemOperand(ip, 0)); // Instruction address + __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(temp2, kPointerSize)); + __ LoadP(temp2, MemOperand(temp2, 0)); // Instruction address } - __ Call(ip); // Call the C++ function. + __ Call(temp2); // Call the C++ function. __ LoadP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize)); __ mtlr(r0); __ blr(); diff --git a/deps/v8/src/builtins/proxy-constructor.tq b/deps/v8/src/builtins/proxy-constructor.tq index 178759b595cea8..ad60c20e2c2ea3 100644 --- a/deps/v8/src/builtins/proxy-constructor.tq +++ b/deps/v8/src/builtins/proxy-constructor.tq @@ -6,17 +6,14 @@ namespace proxy { - extern macro ProxiesCodeStubAssembler::GetProxyConstructorJSNewTarget(): - Object; - // ES #sec-proxy-constructor // https://tc39.github.io/ecma262/#sec-proxy-constructor transitioning javascript builtin - ProxyConstructor(implicit context: Context)( - receiver: Object, target: Object, handler: Object): JSProxy { + ProxyConstructor( + js-implicit context: Context, receiver: Object, + newTarget: Object)(target: Object, handler: Object): JSProxy { try { // 1. If NewTarget is undefined, throw a TypeError exception. - const newTarget: Object = GetProxyConstructorJSNewTarget(); if (newTarget == Undefined) { ThrowTypeError(kConstructorNotFunction, 'Proxy'); } diff --git a/deps/v8/src/builtins/proxy-delete-property.tq b/deps/v8/src/builtins/proxy-delete-property.tq new file mode 100644 index 00000000000000..759de766efbed1 --- /dev/null +++ b/deps/v8/src/builtins/proxy-delete-property.tq @@ -0,0 +1,67 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include 'src/builtins/builtins-proxy-gen.h' + +namespace proxy { + + // ES #sec-proxy-object-internal-methods-and-internal-slots-delete-p + // https://tc39.es/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-delete-p + transitioning builtin + ProxyDeleteProperty(implicit context: Context)( + proxy: JSProxy, name: Name, languageMode: LanguageMode): Object { + const kTrapName: constexpr string = 'deleteProperty'; + // 1. Assert: IsPropertyKey(P) is true. + assert(TaggedIsNotSmi(name)); + assert(IsName(name)); + assert(!IsPrivateSymbol(name)); + + try { + // 2. Let handler be O.[[ProxyHandler]]. + // 3. If handler is null, throw a TypeError exception. + // 4. Assert: Type(handler) is Object. + assert(proxy.handler == Null || Is(proxy.handler)); + const handler = + Cast(proxy.handler) otherwise ThrowProxyHandlerRevoked; + + // 5. Let target be O.[[ProxyTarget]]. + const target = UnsafeCast(proxy.target); + + // 6. Let trap be ? GetMethod(handler, "deleteProperty"). + // 7. If trap is undefined, then (see 7.a below). + const trap: Callable = GetMethod(handler, kTrapName) + otherwise goto TrapUndefined(target); + + // 8. Let booleanTrapResult be ToBoolean(? Call(trap, handler, + // « target, P »)). + const trapResult = Call(context, trap, handler, target, name); + + // 9. If booleanTrapResult is false, return false. + if (BranchIfToBooleanIsFalse(trapResult)) { + if (languageMode == SmiConstant(kStrict)) { + ThrowTypeError(kProxyTrapReturnedFalsishFor, kTrapName, name); + } + return False; + } + + // 10. Let targetDesc be ? target.[[GetOwnProperty]](P). + // 11. If targetDesc is undefined, return true. + // 12. If targetDesc.[[Configurable]] is false, throw a TypeError + // exception. + // 13. Let extensibleTarget be ? IsExtensible(target). + // 14. If extensibleTarget is false, throw a TypeError exception. + CheckDeleteTrapResult(target, proxy, name); + + // 15. Return true. + return True; + } + label TrapUndefined(target: Object) { + // 7.a. Return ? target.[[Delete]](P). + return DeleteProperty(target, name, languageMode); + } + label ThrowProxyHandlerRevoked deferred { + ThrowTypeError(kProxyRevoked, kTrapName); + } + } +} diff --git a/deps/v8/src/builtins/proxy-get-property.tq b/deps/v8/src/builtins/proxy-get-property.tq index 0915a66d5f79ae..bac07f550c3eb9 100644 --- a/deps/v8/src/builtins/proxy-get-property.tq +++ b/deps/v8/src/builtins/proxy-get-property.tq @@ -6,9 +6,8 @@ namespace proxy { - extern transitioning runtime - GetPropertyWithReceiver(implicit context: Context)(Object, Name, Object, Smi): - Object; + extern transitioning builtin GetPropertyWithReceiver( + implicit context: Context)(Object, Name, Object, Smi): Object; // ES #sec-proxy-object-internal-methods-and-internal-slots-get-p-receiver // https://tc39.github.io/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-get-p-receiver @@ -16,36 +15,38 @@ namespace proxy { ProxyGetProperty(implicit context: Context)( proxy: JSProxy, name: Name, receiverValue: Object, onNonExistent: Smi): Object { + PerformStackCheck(); // 1. Assert: IsPropertyKey(P) is true. assert(TaggedIsNotSmi(name)); assert(IsName(name)); assert(!IsPrivateSymbol(name)); // 2. Let handler be O.[[ProxyHandler]]. - const handler: Object = proxy.handler; - // 3. If handler is null, throw a TypeError exception. - if (handler == Null) { - ThrowTypeError(kProxyRevoked, 'get'); - } - // 4. Assert: Type(handler) is Object. - const handlerJSReceiver = UnsafeCast(handler); + let handler: JSReceiver; + typeswitch (proxy.handler) { + case (Null): { + ThrowTypeError(kProxyRevoked, 'get'); + } + case (h: JSReceiver): { + handler = h; + } + } // 5. Let target be O.[[ProxyTarget]]. - const target = proxy.target; + const target = Cast(proxy.target) otherwise unreachable; // 6. Let trap be ? GetMethod(handler, "get"). // 7. If trap is undefined, then (see 7.a below). // 7.a. Return ? target.[[Get]](P, Receiver). - // TODO(mslekova): Introduce GetPropertyWithReceiver stub - const trap: Callable = GetMethod(handlerJSReceiver, 'get') + const trap: Callable = GetMethod(handler, 'get') otherwise return GetPropertyWithReceiver( target, name, receiverValue, onNonExistent); // 8. Let trapResult be ? Call(trap, handler, « target, P, Receiver »). const trapResult = - Call(context, trap, handlerJSReceiver, target, name, receiverValue); + Call(context, trap, handler, target, name, receiverValue); // 9. Let targetDesc be ? target.[[GetOwnProperty]](P). // 10. If targetDesc is not undefined and targetDesc.[[Configurable]] is @@ -58,6 +59,7 @@ namespace proxy { // is undefined, then // i. If trapResult is not undefined, throw a TypeError exception. // 11. Return trapResult. - return CheckGetSetTrapResult(target, proxy, name, trapResult, kProxyGet); + CheckGetSetTrapResult(target, proxy, name, trapResult, kProxyGet); + return trapResult; } } diff --git a/deps/v8/src/builtins/proxy-get-prototype-of.tq b/deps/v8/src/builtins/proxy-get-prototype-of.tq new file mode 100644 index 00000000000000..2418eaf4230cb3 --- /dev/null +++ b/deps/v8/src/builtins/proxy-get-prototype-of.tq @@ -0,0 +1,70 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include 'src/builtins/builtins-proxy-gen.h' + +namespace proxy { + + // ES #sec-proxy-object-internal-methods-and-internal-slots-isextensible + // https://tc39.github.io/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-isextensible + transitioning builtin + ProxyGetPrototypeOf(implicit context: Context)(proxy: JSProxy): Object { + PerformStackCheck(); + const kTrapName: constexpr string = 'getPrototypeOf'; + try { + // 1. Let handler be O.[[ProxyHandler]]. + // 2. If handler is null, throw a TypeError exception. + // 3. Assert: Type(handler) is Object. + assert(proxy.handler == Null || Is(proxy.handler)); + const handler = + Cast(proxy.handler) otherwise ThrowProxyHandlerRevoked; + + // 4. Let target be O.[[ProxyTarget]]. + const target = proxy.target; + + // 5. Let trap be ? GetMethod(handler, "getPrototypeOf"). + // 6. If trap is undefined, then (see 6.a below). + const trap: Callable = GetMethod(handler, kTrapName) + otherwise goto TrapUndefined(target); + + // 7. Let handlerProto be ? Call(trap, handler, « target »). + const handlerProto = Call(context, trap, handler, target); + + // 8. If Type(handlerProto) is neither Object nor Null, throw a TypeError + // exception. + if (!Is(handlerProto)) { + goto ThrowProxyGetPrototypeOfInvalid; + } + + // 9. Let extensibleTarget be ? IsExtensible(target). + // 10. If extensibleTarget is true, return handlerProto. + const extensibleTarget: Object = object::ObjectIsExtensible(target); + assert(extensibleTarget == True || extensibleTarget == False); + if (extensibleTarget == True) { + return handlerProto; + } + + // 11. Let targetProto be ? target.[[GetPrototypeOf]](). + const targetProto = object::ObjectGetPrototypeOf(target); + + // 12. If SameValue(handlerProto, targetProto) is false, throw a TypeError + // exception. + // 13. Return handlerProto. + if (BranchIfSameValue(targetProto, handlerProto)) { + return handlerProto; + } + ThrowTypeError(kProxyGetPrototypeOfNonExtensible); + } + label TrapUndefined(target: Object) { + // 6.a. Return ? target.[[GetPrototypeOf]](). + return object::ObjectGetPrototypeOf(target); + } + label ThrowProxyHandlerRevoked deferred { + ThrowTypeError(kProxyRevoked, kTrapName); + } + label ThrowProxyGetPrototypeOfInvalid deferred { + ThrowTypeError(kProxyGetPrototypeOfInvalid); + } + } +} diff --git a/deps/v8/src/builtins/proxy-has-property.tq b/deps/v8/src/builtins/proxy-has-property.tq index ab3898a9c765ee..ee394c5d847d89 100644 --- a/deps/v8/src/builtins/proxy-has-property.tq +++ b/deps/v8/src/builtins/proxy-has-property.tq @@ -22,11 +22,12 @@ namespace proxy { // 2. Let handler be O.[[ProxyHandler]]. // 3. If handler is null, throw a TypeError exception. // 4. Assert: Type(handler) is Object. + assert(proxy.handler == Null || Is(proxy.handler)); const handler = Cast(proxy.handler) otherwise ThrowProxyHandlerRevoked; // 5. Let target be O.[[ProxyTarget]]. - const target = proxy.target; + const target = Cast(proxy.target) otherwise unreachable; // 6. Let trap be ? GetMethod(handler, "has"). // 7. If trap is undefined, then (see 7.a below). @@ -42,7 +43,8 @@ namespace proxy { if (BranchIfToBooleanIsTrue(trapResult)) { return True; } - return CheckHasTrapResult(target, proxy, name); + CheckHasTrapResult(target, proxy, name); + return False; } label TrapUndefined(target: Object) { // 7.a. Return ? target.[[HasProperty]](P). diff --git a/deps/v8/src/builtins/proxy-is-extensible.tq b/deps/v8/src/builtins/proxy-is-extensible.tq new file mode 100644 index 00000000000000..82f4a5b955c297 --- /dev/null +++ b/deps/v8/src/builtins/proxy-is-extensible.tq @@ -0,0 +1,56 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include 'src/builtins/builtins-proxy-gen.h' + +namespace proxy { + + // ES #sec-proxy-object-internal-methods-and-internal-slots-isextensible + // https://tc39.github.io/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-isextensible + transitioning builtin ProxyIsExtensible(implicit context: + Context)(proxy: JSProxy): Object { + PerformStackCheck(); + const kTrapName: constexpr string = 'isExtensible'; + try { + // 1. Let handler be O.[[ProxyHandler]]. + // 2. If handler is null, throw a TypeError exception. + // 3. Assert: Type(handler) is Object. + assert(proxy.handler == Null || Is(proxy.handler)); + const handler = + Cast(proxy.handler) otherwise ThrowProxyHandlerRevoked; + + // 4. Let target be O.[[ProxyTarget]]. + const target = proxy.target; + + // 5. Let trap be ? GetMethod(handler, "isExtensible"). + // 6. If trap is undefined, then (see 6.a below). + const trap: Callable = GetMethod(handler, kTrapName) + otherwise goto TrapUndefined(target); + + // 7. Let booleanTrapResult be ToBoolean(? Call(trap, handler, « + // target»)). + const trapResult = ToBoolean(Call(context, trap, handler, target)); + + // 8. Let targetResult be ? IsExtensible(target). + const targetResult: bool = ToBoolean(object::ObjectIsExtensible(target)); + + // 9. If SameValue(booleanTrapResult, targetResult) is false, throw a + // TypeError exception. + if (trapResult != targetResult) { + ThrowTypeError( + kProxyIsExtensibleInconsistent, + SelectBooleanConstant(targetResult)); + } + // 10. Return booleanTrapResult. + return SelectBooleanConstant(trapResult); + } + label TrapUndefined(target: Object) { + // 6.a. Return ? IsExtensible(target). + return object::ObjectIsExtensible(target); + } + label ThrowProxyHandlerRevoked deferred { + ThrowTypeError(kProxyRevoked, kTrapName); + } + } +} diff --git a/deps/v8/src/builtins/proxy-prevent-extensions.tq b/deps/v8/src/builtins/proxy-prevent-extensions.tq new file mode 100644 index 00000000000000..6d5d2569fb8645 --- /dev/null +++ b/deps/v8/src/builtins/proxy-prevent-extensions.tq @@ -0,0 +1,66 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include 'src/builtins/builtins-proxy-gen.h' + +namespace proxy { + + // ES #sec-proxy-object-internal-methods-and-internal-slots-preventextensions + // https://tc39.es/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-preventextensions + transitioning builtin + ProxyPreventExtensions(implicit context: Context)( + proxy: JSProxy, doThrow: Boolean): Object { + PerformStackCheck(); + const kTrapName: constexpr string = 'preventExtensions'; + try { + // 1. Let handler be O.[[ProxyHandler]]. + // 2. If handler is null, throw a TypeError exception. + // 3. Assert: Type(handler) is Object. + assert(proxy.handler == Null || Is(proxy.handler)); + const handler = + Cast(proxy.handler) otherwise ThrowProxyHandlerRevoked; + + // 4. Let target be O.[[ProxyTarget]]. + const target = proxy.target; + + // 5. Let trap be ? GetMethod(handler, "preventExtensions"). + // 6. If trap is undefined, then (see 6.a below). + const trap: Callable = GetMethod(handler, kTrapName) + otherwise goto TrapUndefined(target); + + // 7. Let booleanTrapResult be ToBoolean(? Call(trap, handler, « + // target»)). + const trapResult = Call(context, trap, handler, target); + + // 8. If booleanTrapResult is true, then + // 8.a. Let extensibleTarget be ? IsExtensible(target). + // 8.b If extensibleTarget is true, throw a TypeError exception. + if (BranchIfToBooleanIsTrue(trapResult)) { + const extensibleTarget: Object = object::ObjectIsExtensible(target); + assert(extensibleTarget == True || extensibleTarget == False); + if (extensibleTarget == True) { + ThrowTypeError(kProxyPreventExtensionsExtensible); + } + } else { + if (doThrow == True) { + ThrowTypeError(kProxyTrapReturnedFalsish, kTrapName); + } + return False; + } + + // 9. Return booleanTrapResult. + return True; + } + label TrapUndefined(target: Object) { + // 6.a. Return ? target.[[PreventExtensions]](). + if (doThrow == True) { + return object::ObjectPreventExtensionsThrow(target); + } + return object::ObjectPreventExtensionsDontThrow(target); + } + label ThrowProxyHandlerRevoked deferred { + ThrowTypeError(kProxyRevoked, kTrapName); + } + } +} // namespace proxy diff --git a/deps/v8/src/builtins/proxy-revocable.tq b/deps/v8/src/builtins/proxy-revocable.tq index 695f005c9bc76e..b09baab9cf1913 100644 --- a/deps/v8/src/builtins/proxy-revocable.tq +++ b/deps/v8/src/builtins/proxy-revocable.tq @@ -7,17 +7,13 @@ namespace proxy { extern macro ProxiesCodeStubAssembler::AllocateProxyRevokeFunction( - Object, Object): JSFunction; - macro AllocateProxyRevokeFunction(implicit context: Context)(proxy: JSProxy): - JSFunction { - return AllocateProxyRevokeFunction(proxy, context); - } + implicit context: Context)(JSProxy): JSFunction; // Proxy.revocable(target, handler) // https://tc39.github.io/ecma262/#sec-proxy.revocable transitioning javascript builtin ProxyRevocable( - context: Context, receiver: Object, target: Object, + context: Context, _receiver: Object, target: Object, handler: Object): JSProxyRevocableResult { try { const targetJSReceiver = diff --git a/deps/v8/src/builtins/proxy-revoke.tq b/deps/v8/src/builtins/proxy-revoke.tq index 400f586b2159c1..d89b54077ae109 100644 --- a/deps/v8/src/builtins/proxy-revoke.tq +++ b/deps/v8/src/builtins/proxy-revoke.tq @@ -9,7 +9,7 @@ namespace proxy { // Proxy Revocation Functions // https://tc39.github.io/ecma262/#sec-proxy-revocation-functions transitioning javascript builtin - ProxyRevoke(implicit context: Context)(): Undefined { + ProxyRevoke(js-implicit context: Context)(): Undefined { // 1. Let p be F.[[RevocableProxy]]. const proxyObject: Object = context[PROXY_SLOT]; diff --git a/deps/v8/src/builtins/proxy-set-property.tq b/deps/v8/src/builtins/proxy-set-property.tq index 72181e08a824fc..d0411a8e894e9a 100644 --- a/deps/v8/src/builtins/proxy-set-property.tq +++ b/deps/v8/src/builtins/proxy-set-property.tq @@ -30,21 +30,20 @@ namespace proxy { return Undefined; } - // 2. Let handler be O.[[ProxyHandler]]. - const handler: Object = proxy.handler; - try { + // 2. Let handler be O.[[ProxyHandler]]. // 3. If handler is null, throw a TypeError exception. // 4. Assert: Type(handler) is Object. - const handlerJSReceiver = - Cast(handler) otherwise ThrowProxyHandlerRevoked; + assert(proxy.handler == Null || Is(proxy.handler)); + const handler = + Cast(proxy.handler) otherwise ThrowProxyHandlerRevoked; // 5. Let target be O.[[ProxyTarget]]. - const target = proxy.target; + const target = UnsafeCast(proxy.target); // 6. Let trap be ? GetMethod(handler, "set"). // 7. If trap is undefined, then (see 7.a below). - const trap: Callable = GetMethod(handlerJSReceiver, 'set') + const trap: Callable = GetMethod(handler, 'set') otherwise goto TrapUndefined(target); // 8. Let booleanTrapResult be ToBoolean(? Call(trap, handler, @@ -61,11 +60,11 @@ namespace proxy { // i. If targetDesc.[[Set]] is undefined, throw a TypeError // exception. // 12. Return true. - const trapResult = Call( - context, trap, handlerJSReceiver, target, name, value, receiverValue); + const trapResult = + Call(context, trap, handler, target, name, value, receiverValue); if (BranchIfToBooleanIsTrue(trapResult)) { - return CheckGetSetTrapResult( - target, proxy, name, trapResult, kProxySet); + CheckGetSetTrapResult(target, proxy, name, value, kProxySet); + return value; } ThrowTypeErrorIfStrict( SmiConstant(kProxyTrapReturnedFalsishFor), 'set', name); @@ -77,7 +76,6 @@ namespace proxy { return value; } label ThrowProxyHandlerRevoked deferred { - assert(handler == Null); ThrowTypeError(kProxyRevoked, 'set'); } } diff --git a/deps/v8/src/builtins/proxy-set-prototype-of.tq b/deps/v8/src/builtins/proxy-set-prototype-of.tq new file mode 100644 index 00000000000000..bbd99be4117eaa --- /dev/null +++ b/deps/v8/src/builtins/proxy-set-prototype-of.tq @@ -0,0 +1,77 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include 'src/builtins/builtins-proxy-gen.h' + +namespace proxy { + + // ES #sec-proxy-object-internal-methods-and-internal-slots-setprototypeof-v + // https://tc39.es/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-setprototypeof-v + transitioning builtin + ProxySetPrototypeOf(implicit context: Context)( + proxy: JSProxy, proto: Object, doThrow: Boolean): Object { + PerformStackCheck(); + const kTrapName: constexpr string = 'setPrototypeOf'; + try { + // 1. Assert: Either Type(V) is Object or Type(V) is Null. + assert(proto == Null || Is(proto)); + + // 2. Let handler be O.[[ProxyHandler]]. + // 3. If handler is null, throw a TypeError exception. + // 4. Assert: Type(handler) is Object. + assert(proxy.handler == Null || Is(proxy.handler)); + const handler = + Cast(proxy.handler) otherwise ThrowProxyHandlerRevoked; + + // 5. Let target be O.[[ProxyTarget]]. + const target = proxy.target; + + // 6. Let trap be ? GetMethod(handler, "setPrototypeOf"). + // 7. If trap is undefined, then (see 7.a below). + const trap: Callable = GetMethod(handler, kTrapName) + otherwise goto TrapUndefined(target, proto); + + // 8. Let booleanTrapResult be ToBoolean(? Call(trap, handler, « target, V + // »)). + const trapResult = Call(context, trap, handler, target, proto); + + // 9. If booleanTrapResult is false, return false. + if (BranchIfToBooleanIsFalse(trapResult)) { + if (doThrow == True) { + ThrowTypeError(kProxyTrapReturnedFalsishFor, kTrapName); + } + return False; + } + + // 10. Let extensibleTarget be ? IsExtensible(target). + // 11. If extensibleTarget is true, return true. + const extensibleTarget: Object = object::ObjectIsExtensible(target); + assert(extensibleTarget == True || extensibleTarget == False); + if (extensibleTarget == True) { + return True; + } + + // 12. Let targetProto be ? target.[[GetPrototypeOf]](). + const targetProto = object::ObjectGetPrototypeOf(target); + + // 13. If SameValue(V, targetProto) is false, throw a TypeError + // exception. + // 14. Return true. + if (BranchIfSameValue(proto, targetProto)) { + return True; + } + ThrowTypeError(kProxySetPrototypeOfNonExtensible); + } + label TrapUndefined(target: Object, proto: Object) { + // 7.a. Return ? target.[[SetPrototypeOf]](). + if (doThrow == True) { + return object::ObjectSetPrototypeOfThrow(target, proto); + } + return object::ObjectSetPrototypeOfDontThrow(target, proto); + } + label ThrowProxyHandlerRevoked deferred { + ThrowTypeError(kProxyRevoked, kTrapName); + } + } +} diff --git a/deps/v8/src/builtins/proxy.tq b/deps/v8/src/builtins/proxy.tq index 16bba85292f898..d95def5d0e3874 100644 --- a/deps/v8/src/builtins/proxy.tq +++ b/deps/v8/src/builtins/proxy.tq @@ -7,25 +7,23 @@ namespace proxy { extern macro ProxiesCodeStubAssembler::AllocateProxy( - JSReceiver, JSReceiver, Context): JSProxy; - macro AllocateProxy(implicit context: Context)( - target: JSReceiver, handler: JSReceiver): JSProxy { - return AllocateProxy(target, handler, context); - } + implicit context: Context)(JSReceiver, JSReceiver): JSProxy; macro IsRevokedProxy(implicit context: Context)(o: JSReceiver): bool { const proxy: JSProxy = Cast(o) otherwise return false; - const handler: JSReceiver = - Cast(proxy.handler) otherwise return true; + Cast(proxy.handler) otherwise return true; return false; } extern transitioning macro ProxiesCodeStubAssembler::CheckGetSetTrapResult( implicit context: - Context)(Object, JSProxy, Name, Object, constexpr int31): Object; + Context)(JSReceiver, JSProxy, Name, Object, constexpr int31); + + extern transitioning macro ProxiesCodeStubAssembler::CheckDeleteTrapResult( + implicit context: Context)(JSReceiver, JSProxy, Name); extern transitioning macro ProxiesCodeStubAssembler::CheckHasTrapResult( - implicit context: Context)(Object, JSProxy, Name): Object; + implicit context: Context)(JSReceiver, JSProxy, Name); const kProxyNonObject: constexpr MessageTemplate generates 'MessageTemplate::kProxyNonObject'; @@ -37,6 +35,20 @@ namespace proxy { generates 'MessageTemplate::kProxyTrapReturnedFalsishFor'; const kProxyPrivate: constexpr MessageTemplate generates 'MessageTemplate::kProxyPrivate'; + const kProxyIsExtensibleInconsistent: constexpr MessageTemplate + generates 'MessageTemplate::kProxyIsExtensibleInconsistent'; + const kProxyPreventExtensionsExtensible: constexpr MessageTemplate + generates 'MessageTemplate::kProxyPreventExtensionsExtensible'; + const kProxyTrapReturnedFalsish: constexpr MessageTemplate + generates 'MessageTemplate::kProxyTrapReturnedFalsish'; + const kProxyGetPrototypeOfInvalid: constexpr MessageTemplate + generates 'MessageTemplate::kProxyGetPrototypeOfInvalid'; + const kProxyGetPrototypeOfNonExtensible: constexpr MessageTemplate + generates 'MessageTemplate::kProxyGetPrototypeOfNonExtensible'; + const kProxySetPrototypeOfNonExtensible: constexpr MessageTemplate + generates 'MessageTemplate::kProxySetPrototypeOfNonExtensible'; + const kProxyDeletePropertyNonExtensible: constexpr MessageTemplate + generates 'MessageTemplate::kProxyDeletePropertyNonExtensible'; const kProxyGet: constexpr int31 generates 'JSProxy::AccessKind::kGet'; diff --git a/deps/v8/src/builtins/reflect.tq b/deps/v8/src/builtins/reflect.tq new file mode 100644 index 00000000000000..4c25e8338f8883 --- /dev/null +++ b/deps/v8/src/builtins/reflect.tq @@ -0,0 +1,82 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +namespace reflect { + + const kCalledOnNonObject: constexpr MessageTemplate + generates 'MessageTemplate::kCalledOnNonObject'; + + // ES6 section 26.1.10 Reflect.isExtensible + transitioning javascript builtin ReflectIsExtensible( + js-implicit context: Context)(_receiver: Object, object: Object): Object { + const objectJSReceiver = Cast(object) + otherwise ThrowTypeError(kCalledOnNonObject, 'Reflect.isExtensible'); + return object::ObjectIsExtensible(objectJSReceiver); + } + + // ES6 section 26.1.12 Reflect.preventExtensions + transitioning javascript builtin ReflectPreventExtensions( + js-implicit context: Context)(_receiver: Object, object: Object): Object { + const objectJSReceiver = Cast(object) + otherwise ThrowTypeError(kCalledOnNonObject, 'Reflect.preventExtensions'); + return object::ObjectPreventExtensionsDontThrow(objectJSReceiver); + } + + // ES6 section 26.1.8 Reflect.getPrototypeOf + transitioning javascript builtin ReflectGetPrototypeOf( + js-implicit context: Context)(_receiver: Object, object: Object): Object { + const objectJSReceiver = Cast(object) + otherwise ThrowTypeError(kCalledOnNonObject, 'Reflect.getPrototypeOf'); + return object::JSReceiverGetPrototypeOf(objectJSReceiver); + } + + // ES6 section 26.1.14 Reflect.setPrototypeOf + transitioning javascript builtin ReflectSetPrototypeOf( + js-implicit context: + Context)(_receiver: Object, object: Object, proto: Object): Object { + const objectJSReceiver = Cast(object) + otherwise ThrowTypeError(kCalledOnNonObject, 'Reflect.setPrototypeOf'); + if (proto == Null || Is(proto)) { + return object::ObjectSetPrototypeOfDontThrow(objectJSReceiver, proto); + } + ThrowTypeError(kProtoObjectOrNull, proto); + } + + extern transitioning builtin ToName(implicit context: Context)(Object): Name; + type OnNonExistent constexpr 'OnNonExistent'; + const kReturnUndefined: constexpr OnNonExistent + generates 'OnNonExistent::kReturnUndefined'; + extern macro SmiConstant(constexpr OnNonExistent): Smi; + extern transitioning builtin GetPropertyWithReceiver( + implicit context: Context)(Object, Name, Object, Smi): Object; + + // ES6 section 26.1.6 Reflect.get + transitioning javascript builtin + ReflectGet(js-implicit context: Context)(...arguments): Object { + const length = arguments.length; + const object: Object = length > 0 ? arguments[0] : Undefined; + const objectJSReceiver = Cast(object) + otherwise ThrowTypeError(kCalledOnNonObject, 'Reflect.get'); + const propertyKey: Object = length > 1 ? arguments[1] : Undefined; + const name: Name = ToName(propertyKey); + const receiver: Object = length > 2 ? arguments[2] : objectJSReceiver; + return GetPropertyWithReceiver( + objectJSReceiver, name, receiver, SmiConstant(kReturnUndefined)); + } + + // ES6 section 26.1.4 Reflect.deleteProperty + transitioning javascript builtin ReflectDeleteProperty( + js-implicit context: + Context)(_receiver: Object, object: Object, key: Object): Object { + const objectJSReceiver = Cast(object) + otherwise ThrowTypeError(kCalledOnNonObject, 'Reflect.deleteProperty'); + const name: Name = ToName(key); + if (IsPrivateSymbol(name)) { + return DeleteProperty(objectJSReceiver, name, kSloppy); + } + const proxy = Cast(objectJSReceiver) + otherwise return DeleteProperty(objectJSReceiver, name, kSloppy); + return proxy::ProxyDeleteProperty(proxy, name, kSloppy); + } +} // namespace reflect diff --git a/deps/v8/src/builtins/regexp-replace.tq b/deps/v8/src/builtins/regexp-replace.tq index 9b95f99f416de2..cb0038c6b61722 100644 --- a/deps/v8/src/builtins/regexp-replace.tq +++ b/deps/v8/src/builtins/regexp-replace.tq @@ -22,7 +22,7 @@ namespace regexp_replace { String, JSRegExp, Callable): String; extern macro - RegExpBuiltinsAssembler::AdvanceStringIndexFast(String, Number, bool): Smi; + RegExpBuiltinsAssembler::AdvanceStringIndexFast(String, Smi, bool): Smi; extern macro RegExpBuiltinsAssembler::RegExpPrototypeExecBodyWithoutResultFast( implicit context: Context)(JSReceiver, String): @@ -72,8 +72,7 @@ namespace regexp_replace { transitioning macro RegExpReplaceCallableWithExplicitCaptures(implicit context: Context)( - matchesElements: FixedArray, matchesLength: intptr, string: String, - replaceFn: Callable) { + matchesElements: FixedArray, matchesLength: intptr, replaceFn: Callable) { for (let i: intptr = 0; i < matchesLength; i++) { const elArray = Cast(matchesElements.objects[i]) otherwise continue; @@ -124,7 +123,7 @@ namespace regexp_replace { matchesElements, matchesLengthInt, string, replaceFn); } else { RegExpReplaceCallableWithExplicitCaptures( - matchesElements, matchesLengthInt, string, replaceFn); + matchesElements, matchesLengthInt, replaceFn); } return StringBuilderConcat(matches, matchesLength, string); @@ -138,7 +137,7 @@ namespace regexp_replace { let result: String = kEmptyString; let lastMatchEnd: Smi = 0; let unicode: bool = false; - let replaceLength: Smi = replaceString.length_smi; + const replaceLength: Smi = replaceString.length_smi; const global: bool = regexp.global; if (global) { @@ -209,7 +208,7 @@ namespace regexp_replace { } transitioning javascript builtin RegExpPrototypeReplace( - context: Context, receiver: Object, ...arguments): Object { + js-implicit context: Context, receiver: Object)(...arguments): Object { const methodName: constexpr string = 'RegExp.prototype.@@replace'; // RegExpPrototypeReplace is a bit of a beast - a summary of dispatch logic: diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc index bf8c0cb68a8bf0..854f31cece3cd8 100644 --- a/deps/v8/src/builtins/s390/builtins-s390.cc +++ b/deps/v8/src/builtins/s390/builtins-s390.cc @@ -60,24 +60,20 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) { static void GenerateTailCallToReturnedCode(MacroAssembler* masm, Runtime::FunctionId function_id) { // ----------- S t a t e ------------- - // -- r2 : argument count (preserved for callee) // -- r3 : target function (preserved for callee) // -- r5 : new target (preserved for callee) // ----------------------------------- { FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); - // Push the number of arguments to the callee. // Push a copy of the target function and the new target. // Push function as parameter to the runtime call. - __ SmiTag(r2); - __ Push(r2, r3, r5, r3); + __ Push(r3, r5, r3); __ CallRuntime(function_id, 1); __ LoadRR(r4, r2); // Restore target function and new target. - __ Pop(r2, r3, r5); - __ SmiUntag(r2); + __ Pop(r3, r5); } static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch"); __ JumpCodeObject(r4); @@ -110,6 +106,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { // -- sp[...]: constructor arguments // ----------------------------------- + Register scratch = r4; Label stack_overflow; Generate_StackOverflowCheck(masm, r2, r7, &stack_overflow); @@ -138,13 +135,13 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { // sp[2]: number of arguments (smi-tagged) Label loop, no_args; __ beq(&no_args); - __ ShiftLeftP(ip, r2, Operand(kPointerSizeLog2)); - __ SubP(sp, sp, ip); + __ ShiftLeftP(scratch, r2, Operand(kPointerSizeLog2)); + __ SubP(sp, sp, scratch); __ LoadRR(r1, r2); __ bind(&loop); - __ lay(ip, MemOperand(ip, -kPointerSize)); - __ LoadP(r0, MemOperand(ip, r6)); - __ StoreP(r0, MemOperand(ip, sp)); + __ lay(scratch, MemOperand(scratch, -kPointerSize)); + __ LoadP(r0, MemOperand(scratch, r6)); + __ StoreP(r0, MemOperand(scratch, sp)); __ BranchOnCount(r1, &loop); __ bind(&no_args); @@ -159,15 +156,15 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { // Restore context from the frame. __ LoadP(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset)); // Restore smi-tagged arguments count from the frame. - __ LoadP(r3, MemOperand(fp, ConstructFrameConstants::kLengthOffset)); + __ LoadP(scratch, MemOperand(fp, ConstructFrameConstants::kLengthOffset)); // Leave construct frame. } // Remove caller arguments from the stack and return. STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); - __ SmiToPtrArrayOffset(r3, r3); - __ AddP(sp, sp, r3); + __ SmiToPtrArrayOffset(scratch, scratch); + __ AddP(sp, sp, scratch); __ AddP(sp, sp, Operand(kPointerSize)); __ Ret(); @@ -296,13 +293,13 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { __ ltgr(r2, r2); __ beq(&no_args); - __ ShiftLeftP(ip, r2, Operand(kPointerSizeLog2)); - __ SubP(sp, sp, ip); + __ ShiftLeftP(r8, r2, Operand(kPointerSizeLog2)); + __ SubP(sp, sp, r8); __ LoadRR(r1, r2); __ bind(&loop); - __ lay(ip, MemOperand(ip, -kPointerSize)); - __ LoadP(r0, MemOperand(ip, r6)); - __ StoreP(r0, MemOperand(ip, sp)); + __ lay(r8, MemOperand(r8, -kPointerSize)); + __ LoadP(r0, MemOperand(r8, r6)); + __ StoreP(r0, MemOperand(r8, sp)); __ BranchOnCount(r1, &loop); __ bind(&no_args); @@ -409,11 +406,13 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { // Flood function if we are stepping. Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator; Label stepping_prepared; + Register scratch = r7; + ExternalReference debug_hook = ExternalReference::debug_hook_on_function_call_address(masm->isolate()); - __ Move(ip, debug_hook); - __ LoadB(ip, MemOperand(ip)); - __ CmpSmiLiteral(ip, Smi::zero(), r0); + __ Move(scratch, debug_hook); + __ LoadB(scratch, MemOperand(scratch)); + __ CmpSmiLiteral(scratch, Smi::zero(), r0); __ bne(&prepare_step_in_if_stepping); // Flood function if we need to continue stepping in the suspended generator. @@ -421,9 +420,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { ExternalReference debug_suspended_generator = ExternalReference::debug_suspended_generator_address(masm->isolate()); - __ Move(ip, debug_suspended_generator); - __ LoadP(ip, MemOperand(ip)); - __ CmpP(ip, r3); + __ Move(scratch, debug_suspended_generator); + __ LoadP(scratch, MemOperand(scratch)); + __ CmpP(scratch, r3); __ beq(&prepare_step_in_suspended_generator); __ bind(&stepping_prepared); @@ -434,8 +433,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ blt(&stack_overflow); // Push receiver. - __ LoadP(ip, FieldMemOperand(r3, JSGeneratorObject::kReceiverOffset)); - __ Push(ip); + __ LoadP(scratch, FieldMemOperand(r3, JSGeneratorObject::kReceiverOffset)); + __ Push(scratch); // ----------- S t a t e ------------- // -- r3 : the JSGeneratorObject to resume @@ -626,6 +625,9 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, IsolateAddressId::kCEntryFPAddress, masm->isolate())); __ LoadP(r6, MemOperand(r6)); __ StoreMultipleP(r6, r9, MemOperand(sp, kPointerSize)); + + Register scrach = r8; + // Set up frame pointer for the frame to be pushed. // Need to add kPointerSize, because sp has one extra // frame already for the frame type being pushed later. @@ -642,17 +644,17 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, ExternalReference::Create(IsolateAddressId::kJSEntrySPAddress, masm->isolate()); __ Move(r7, js_entry_sp); - __ LoadAndTestP(r8, MemOperand(r7)); + __ LoadAndTestP(scrach, MemOperand(r7)); __ bne(&non_outermost_js, Label::kNear); __ StoreP(fp, MemOperand(r7)); - __ Load(ip, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME)); + __ Load(scrach, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME)); Label cont; __ b(&cont, Label::kNear); __ bind(&non_outermost_js); - __ Load(ip, Operand(StackFrame::INNER_JSENTRY_FRAME)); + __ Load(scrach, Operand(StackFrame::INNER_JSENTRY_FRAME)); __ bind(&cont); - __ StoreP(ip, MemOperand(sp)); // frame-type + __ StoreP(scrach, MemOperand(sp)); // frame-type // Jump to a faked try block that does the invoke, with a faked catch // block that sets the pending exception. @@ -668,10 +670,11 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, // field in the JSEnv and return a failure sentinel. Coming in here the // fp will be invalid because the PushStackHandler below sets it to 0 to // signal the existence of the JSEntry frame. - __ Move(ip, ExternalReference::Create( - IsolateAddressId::kPendingExceptionAddress, masm->isolate())); + __ Move(scrach, + ExternalReference::Create(IsolateAddressId::kPendingExceptionAddress, + masm->isolate())); - __ StoreP(r2, MemOperand(ip)); + __ StoreP(r2, MemOperand(scrach)); __ LoadRoot(r2, RootIndex::kException); __ b(&exit, Label::kNear); @@ -704,16 +707,16 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, __ pop(r7); __ CmpP(r7, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME)); __ bne(&non_outermost_js_2, Label::kNear); - __ mov(r8, Operand::Zero()); + __ mov(scrach, Operand::Zero()); __ Move(r7, js_entry_sp); - __ StoreP(r8, MemOperand(r7)); + __ StoreP(scrach, MemOperand(r7)); __ bind(&non_outermost_js_2); // Restore the top frame descriptors from the stack. __ pop(r5); - __ Move(ip, ExternalReference::Create( - IsolateAddressId::kCEntryFPAddress, masm->isolate())); - __ StoreP(r5, MemOperand(ip)); + __ Move(scrach, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, + masm->isolate())); + __ StoreP(r5, MemOperand(scrach)); // Reset the stack to the callee saved registers. __ lay(sp, MemOperand(sp, -EntryFrameConstants::kCallerFPOffset)); @@ -949,13 +952,11 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, Register scratch1, Register scratch2, Register scratch3) { // ----------- S t a t e ------------- - // -- r0 : argument count (preserved for callee if needed, and caller) - // -- r3 : new target (preserved for callee if needed, and caller) - // -- r1 : target function (preserved for callee if needed, and caller) + // -- r5 : new target (preserved for callee if needed, and caller) + // -- r3 : target function (preserved for callee if needed, and caller) // -- feedback vector (preserved for caller if needed) // ----------------------------------- - DCHECK( - !AreAliased(feedback_vector, r2, r3, r5, scratch1, scratch2, scratch3)); + DCHECK(!AreAliased(feedback_vector, r3, r5, scratch1, scratch2, scratch3)); Label optimized_code_slot_is_weak_ref, fallthrough; @@ -1140,6 +1141,15 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); __ LoadP(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset)); + + Label push_stack_frame; + // Check if feedback vector is valid. If valid, check for optimized code + // and update invocation count. Otherwise, setup the stack frame. + __ LoadP(r6, FieldMemOperand(feedback_vector, HeapObject::kMapOffset)); + __ LoadLogicalHalfWordP(r6, FieldMemOperand(r6, Map::kInstanceTypeOffset)); + __ CmpP(r6, Operand(FEEDBACK_VECTOR_TYPE)); + __ bne(&push_stack_frame); + // Read off the optimized code slot in the feedback vector, and if there // is optimized code or an optimization marker, call that instead. MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r6, r8, r7); @@ -1154,6 +1164,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // Open a frame scope to indicate that there is a frame on the stack. The // MANUAL indicates that the scope shouldn't actually generate code to set up // the frame (that is done below). + __ bind(&push_stack_frame); FrameScope frame_scope(masm, StackFrame::MANUAL); __ PushStandardFrame(closure); @@ -1161,12 +1172,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // 8-bit fields next to each other, so we could just optimize by writing a // 16-bit. These static asserts guard our assumption is valid. STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset == - BytecodeArray::kOSRNestingLevelOffset + kCharSize); + BytecodeArray::kOsrNestingLevelOffset + kCharSize); STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0); __ lghi(r1, Operand(0)); __ StoreHalfWord(r1, FieldMemOperand(kInterpreterBytecodeArrayRegister, - BytecodeArray::kOSRNestingLevelOffset), + BytecodeArray::kOsrNestingLevelOffset), r0); // Load the initial bytecode offset. @@ -1447,11 +1458,13 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { __ SmiUntag(kInterpreterBytecodeOffsetRegister); // Dispatch to the target bytecode. - __ LoadlB(ip, MemOperand(kInterpreterBytecodeArrayRegister, - kInterpreterBytecodeOffsetRegister)); - __ ShiftLeftP(ip, ip, Operand(kPointerSizeLog2)); + UseScratchRegisterScope temps(masm); + Register scratch = temps.Acquire(); + __ LoadlB(scratch, MemOperand(kInterpreterBytecodeArrayRegister, + kInterpreterBytecodeOffsetRegister)); + __ ShiftLeftP(scratch, scratch, Operand(kPointerSizeLog2)); __ LoadP(kJavaScriptCallCodeStartRegister, - MemOperand(kInterpreterDispatchTableRegister, ip)); + MemOperand(kInterpreterDispatchTableRegister, scratch)); __ Jump(kJavaScriptCallCodeStartRegister); } @@ -1578,13 +1591,17 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, __ LoadP( fp, MemOperand(sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp)); - __ Pop(ip); + // Load builtin index (stored as a Smi) and use it to get the builtin start + // address from the builtins table. + UseScratchRegisterScope temps(masm); + Register builtin = temps.Acquire(); + __ Pop(builtin); __ AddP(sp, sp, Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp)); __ Pop(r0); __ LoadRR(r14, r0); - __ AddP(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag)); - __ Jump(ip); + __ LoadEntryFromBuiltinIndex(builtin); + __ Jump(builtin); } } // namespace @@ -1745,13 +1762,14 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) { // r2: actual number of arguments // r3: callable { + Register scratch = r5; Label loop; // Calculate the copy start address (destination). Copy end address is sp. __ AddP(r4, sp, r4); __ bind(&loop); - __ LoadP(ip, MemOperand(r4, -kPointerSize)); - __ StoreP(ip, MemOperand(r4)); + __ LoadP(scratch, MemOperand(r4, -kPointerSize)); + __ StoreP(scratch, MemOperand(r4)); __ SubP(r4, Operand(kPointerSize)); __ CmpP(r4, sp); __ bne(&loop); @@ -1944,7 +1962,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, // Check for stack overflow. Label stack_overflow; - Generate_StackOverflowCheck(masm, r6, ip, &stack_overflow); + Generate_StackOverflowCheck(masm, r6, scratch, &stack_overflow); // Push arguments onto the stack (thisArgument is already on the stack). { @@ -1955,13 +1973,13 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize)); __ LoadRR(r1, r6); __ bind(&loop); - __ LoadP(ip, MemOperand(r4, kPointerSize)); + __ LoadP(scratch, MemOperand(r4, kPointerSize)); __ la(r4, MemOperand(r4, kPointerSize)); - __ CompareRoot(ip, RootIndex::kTheHoleValue); + __ CompareRoot(scratch, RootIndex::kTheHoleValue); __ bne(&skip, Label::kNear); - __ LoadRoot(ip, RootIndex::kUndefinedValue); + __ LoadRoot(scratch, RootIndex::kUndefinedValue); __ bind(&skip); - __ push(ip); + __ push(scratch); __ BranchOnCount(r1, &loop); __ bind(&no_args); __ AddP(r2, r2, r6); @@ -2007,8 +2025,10 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, // Check if we have an arguments adaptor frame below the function frame. Label arguments_adaptor, arguments_done; __ LoadP(r6, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ LoadP(ip, MemOperand(r6, CommonFrameConstants::kContextOrFrameTypeOffset)); - __ CmpP(ip, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR))); + __ LoadP(scratch, + MemOperand(r6, CommonFrameConstants::kContextOrFrameTypeOffset)); + __ CmpP(scratch, + Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR))); __ beq(&arguments_adaptor); { __ LoadP(r7, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); @@ -2042,9 +2062,9 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, __ AddP(r2, r2, r7); __ bind(&loop); { - __ ShiftLeftP(ip, r7, Operand(kPointerSizeLog2)); - __ LoadP(ip, MemOperand(r6, ip)); - __ push(ip); + __ ShiftLeftP(scratch, r7, Operand(kPointerSizeLog2)); + __ LoadP(scratch, MemOperand(r6, scratch)); + __ push(scratch); __ SubP(r7, r7, Operand(1)); __ CmpP(r7, Operand::Zero()); __ bne(&loop); @@ -2189,10 +2209,11 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { // -- r6 : the number of [[BoundArguments]] // ----------------------------------- + Register scratch = r8; // Reserve stack space for the [[BoundArguments]]. { Label done; - __ LoadRR(r8, sp); // preserve previous stack pointer + __ LoadRR(scratch, sp); // preserve previous stack pointer __ ShiftLeftP(r9, r6, Operand(kPointerSizeLog2)); __ SubP(sp, sp, r9); // Check the stack for overflow. We are not trying to catch interruptions @@ -2201,7 +2222,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { __ CompareRoot(sp, RootIndex::kRealStackLimit); __ bgt(&done); // Signed comparison. // Restore the stack pointer. - __ LoadRR(sp, r8); + __ LoadRR(sp, scratch); { FrameScope scope(masm, StackFrame::MANUAL); __ EnterFrame(StackFrame::INTERNAL); @@ -2221,7 +2242,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { __ beq(&skip); __ LoadRR(r1, r2); __ bind(&loop); - __ LoadP(r0, MemOperand(r8, r7)); + __ LoadP(r0, MemOperand(scratch, r7)); __ StoreP(r0, MemOperand(sp, r7)); __ AddP(r7, r7, Operand(kPointerSize)); __ BranchOnCount(r1, &loop); @@ -2257,9 +2278,9 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) { __ AssertBoundFunction(r3); // Patch the receiver to [[BoundThis]]. - __ LoadP(ip, FieldMemOperand(r3, JSBoundFunction::kBoundThisOffset)); + __ LoadP(r5, FieldMemOperand(r3, JSBoundFunction::kBoundThisOffset)); __ ShiftLeftP(r1, r2, Operand(kPointerSizeLog2)); - __ StoreP(ip, MemOperand(sp, r1)); + __ StoreP(r5, MemOperand(sp, r1)); // Push the [[BoundArguments]] onto the stack. Generate_PushBoundArguments(masm); @@ -2749,7 +2770,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, __ CompareRoot(r1, RootIndex::kTheHoleValue); // Cannot use check here as it attempts to generate call into runtime. __ beq(&okay, Label::kNear); - __ stop("Unexpected pending exception"); + __ stop(); __ bind(&okay); } @@ -3000,13 +3021,22 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm, __ LoadlB(scratch, MemOperand(scratch, 0)); __ CmpP(scratch, Operand::Zero()); - Label profiler_disabled; - Label end_profiler_check; - __ beq(&profiler_disabled, Label::kNear); - __ Move(scratch, thunk_ref); - __ b(&end_profiler_check, Label::kNear); - __ bind(&profiler_disabled); - __ LoadRR(scratch, function_address); + Label profiler_enabled, end_profiler_check; + __ bne(&profiler_enabled, Label::kNear); + __ Move(scratch, ExternalReference::address_of_runtime_stats_flag()); + __ LoadlW(scratch, MemOperand(scratch, 0)); + __ CmpP(scratch, Operand::Zero()); + __ bne(&profiler_enabled, Label::kNear); + { + // Call the api function directly. + __ LoadRR(scratch, function_address); + __ b(&end_profiler_check, Label::kNear); + } + __ bind(&profiler_enabled); + { + // Additional parameter is the address of the actual callback. + __ Move(scratch, thunk_ref); + } __ bind(&end_profiler_check); // Allocate HandleScope in callee-save registers. @@ -3304,7 +3334,7 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { void Builtins::Generate_DirectCEntry(MacroAssembler* masm) { // Unused. - __ stop(0); + __ stop(); } #undef __ diff --git a/deps/v8/src/builtins/setup-builtins-internal.cc b/deps/v8/src/builtins/setup-builtins-internal.cc index e3403c601d6c0c..7188eb04a8c8bd 100644 --- a/deps/v8/src/builtins/setup-builtins-internal.cc +++ b/deps/v8/src/builtins/setup-builtins-internal.cc @@ -157,10 +157,7 @@ Code BuildWithCodeStubAssemblerJS(Isolate* isolate, int32_t builtin_index, // to code targets without dereferencing their handles. CanonicalHandleScope canonical(isolate); - SegmentSize segment_size = isolate->serializer_enabled() - ? SegmentSize::kLarge - : SegmentSize::kDefault; - Zone zone(isolate->allocator(), ZONE_NAME, segment_size); + Zone zone(isolate->allocator(), ZONE_NAME); const int argc_with_recv = (argc == SharedFunctionInfo::kDontAdaptArgumentsSentinel) ? 0 : argc + 1; compiler::CodeAssemblerState state( @@ -181,10 +178,7 @@ Code BuildWithCodeStubAssemblerCS(Isolate* isolate, int32_t builtin_index, // Canonicalize handles, so that we can share constant pool entries pointing // to code targets without dereferencing their handles. CanonicalHandleScope canonical(isolate); - SegmentSize segment_size = isolate->serializer_enabled() - ? SegmentSize::kLarge - : SegmentSize::kDefault; - Zone zone(isolate->allocator(), ZONE_NAME, segment_size); + Zone zone(isolate->allocator(), ZONE_NAME); // The interface descriptor with given key must be initialized at this point // and this construction just queries the details from the descriptors table. CallInterfaceDescriptor descriptor(interface_descriptor); @@ -232,9 +226,9 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) { RelocInfo::ModeMask(RelocInfo::FULL_EMBEDDED_OBJECT) | RelocInfo::ModeMask(RelocInfo::COMPRESSED_EMBEDDED_OBJECT) | RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET); - HeapIterator iterator(isolate->heap()); - for (HeapObject obj = iterator.next(); !obj.is_null(); - obj = iterator.next()) { + HeapObjectIterator iterator(isolate->heap()); + for (HeapObject obj = iterator.Next(); !obj.is_null(); + obj = iterator.Next()) { if (!obj.IsCode()) continue; Code code = Code::cast(obj); bool flush_icache = false; @@ -282,10 +276,6 @@ Code GenerateBytecodeHandler(Isolate* isolate, int builtin_index, } // namespace -#ifdef _MSC_VER -#pragma optimize( "", off ) -#endif - // static void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) { Builtins* builtins = isolate->builtins(); @@ -363,10 +353,5 @@ void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) { builtins->MarkInitialized(); } -#ifdef _MSC_VER -#pragma optimize( "", on ) -#endif - - } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/string-endswith.tq b/deps/v8/src/builtins/string-endswith.tq index 16405d4c1267cb..8b9fe84dfb759b 100644 --- a/deps/v8/src/builtins/string-endswith.tq +++ b/deps/v8/src/builtins/string-endswith.tq @@ -28,12 +28,13 @@ namespace string { // https://tc39.github.io/ecma262/#sec-string.prototype.endswith transitioning javascript builtin StringPrototypeEndsWith( - context: Context, receiver: Object, ...arguments): Boolean { + js-implicit context: Context, receiver: Object)(...arguments): Boolean { const searchString: Object = arguments[0]; const endPosition: Object = arguments[1]; + const kBuiltinName: constexpr string = 'String.prototype.endsWith'; // 1. Let O be ? RequireObjectCoercible(this value). - const object: Object = RequireObjectCoercible(receiver); + const object: Object = RequireObjectCoercible(receiver, kBuiltinName); // 2. Let S be ? ToString(O). const string: String = ToString_Inline(context, object); @@ -41,7 +42,7 @@ namespace string { // 3. Let isRegExp be ? IsRegExp(searchString). // 4. If isRegExp is true, throw a TypeError exception. if (IsRegExp(searchString)) { - ThrowTypeError(kFirstArgumentNotRegExp, 'String.prototype.endsWith'); + ThrowTypeError(kFirstArgumentNotRegExp, kBuiltinName); } // 5. Let searchStr be ? ToString(searchString). @@ -63,7 +64,7 @@ namespace string { const searchLength: Smi = searchStr.length_smi; // 10. Let start be end - searchLength. - let start = end - searchLength; + const start = end - searchLength; // 11. If start is less than 0, return false. if (start < 0) return False; diff --git a/deps/v8/src/builtins/string-html.tq b/deps/v8/src/builtins/string-html.tq index a2b162520666ec..80b5f778877bd4 100644 --- a/deps/v8/src/builtins/string-html.tq +++ b/deps/v8/src/builtins/string-html.tq @@ -22,22 +22,23 @@ namespace string_html { // https://tc39.github.io/ecma262/#sec-string.prototype.anchor transitioning javascript builtin StringPrototypeAnchor( - context: Context, receiver: Object, ...arguments): String { + js-implicit context: Context, receiver: Object)(...arguments): String { return CreateHTML( receiver, 'String.prototype.anchor', 'a', 'name', arguments[0]); } // https://tc39.github.io/ecma262/#sec-string.prototype.big transitioning javascript builtin - StringPrototypeBig(context: Context, receiver: Object, ...arguments): String { + StringPrototypeBig(js-implicit context: Context, receiver: Object)( + ...arguments): String { return CreateHTML( receiver, 'String.prototype.big', 'big', kEmptyString, kEmptyString); } // https://tc39.github.io/ecma262/#sec-string.prototype.blink transitioning javascript builtin - StringPrototypeBlink(context: Context, receiver: Object, ...arguments): - String { + StringPrototypeBlink(js-implicit context: Context, receiver: Object)( + ...arguments): String { return CreateHTML( receiver, 'String.prototype.blink', 'blink', kEmptyString, kEmptyString); @@ -45,56 +46,56 @@ namespace string_html { // https://tc39.github.io/ecma262/#sec-string.prototype.bold transitioning javascript builtin - StringPrototypeBold(context: Context, receiver: Object, ...arguments): - String { + StringPrototypeBold(js-implicit context: Context, receiver: Object)( + ...arguments): String { return CreateHTML( receiver, 'String.prototype.bold', 'b', kEmptyString, kEmptyString); } // https://tc39.github.io/ecma262/#sec-string.prototype.fontcolor transitioning javascript builtin - StringPrototypeFontcolor(context: Context, receiver: Object, ...arguments): - String { + StringPrototypeFontcolor(js-implicit context: Context, receiver: Object)( + ...arguments): String { return CreateHTML( receiver, 'String.prototype.fontcolor', 'font', 'color', arguments[0]); } // https://tc39.github.io/ecma262/#sec-string.prototype.fontsize transitioning javascript builtin - StringPrototypeFontsize(context: Context, receiver: Object, ...arguments): - String { + StringPrototypeFontsize(js-implicit context: Context, receiver: Object)( + ...arguments): String { return CreateHTML( receiver, 'String.prototype.fontsize', 'font', 'size', arguments[0]); } // https://tc39.github.io/ecma262/#sec-string.prototype.fixed transitioning javascript builtin - StringPrototypeFixed(context: Context, receiver: Object, ...arguments): - String { + StringPrototypeFixed(js-implicit context: Context, receiver: Object)( + ...arguments): String { return CreateHTML( receiver, 'String.prototype.fixed', 'tt', kEmptyString, kEmptyString); } // https://tc39.github.io/ecma262/#sec-string.prototype.italics transitioning javascript builtin - StringPrototypeItalics(context: Context, receiver: Object, ...arguments): - String { + StringPrototypeItalics(js-implicit context: Context, receiver: Object)( + ...arguments): String { return CreateHTML( receiver, 'String.prototype.italics', 'i', kEmptyString, kEmptyString); } // https://tc39.github.io/ecma262/#sec-string.prototype.link transitioning javascript builtin - StringPrototypeLink(context: Context, receiver: Object, ...arguments): - String { + StringPrototypeLink(js-implicit context: Context, receiver: Object)( + ...arguments): String { return CreateHTML( receiver, 'String.prototype.link', 'a', 'href', arguments[0]); } // https://tc39.github.io/ecma262/#sec-string.prototype.small transitioning javascript builtin - StringPrototypeSmall(context: Context, receiver: Object, ...arguments): - String { + StringPrototypeSmall(js-implicit context: Context, receiver: Object)( + ...arguments): String { return CreateHTML( receiver, 'String.prototype.small', 'small', kEmptyString, kEmptyString); @@ -102,8 +103,8 @@ namespace string_html { // https://tc39.github.io/ecma262/#sec-string.prototype.strike transitioning javascript builtin - StringPrototypeStrike(context: Context, receiver: Object, ...arguments): - String { + StringPrototypeStrike(js-implicit context: Context, receiver: Object)( + ...arguments): String { return CreateHTML( receiver, 'String.prototype.strike', 'strike', kEmptyString, kEmptyString); @@ -111,14 +112,16 @@ namespace string_html { // https://tc39.github.io/ecma262/#sec-string.prototype.sub transitioning javascript builtin - StringPrototypeSub(context: Context, receiver: Object, ...arguments): String { + StringPrototypeSub(js-implicit context: Context, receiver: Object)( + ...arguments): String { return CreateHTML( receiver, 'String.prototype.sub', 'sub', kEmptyString, kEmptyString); } // https://tc39.github.io/ecma262/#sec-string.prototype.sup transitioning javascript builtin - StringPrototypeSup(context: Context, receiver: Object, ...arguments): String { + StringPrototypeSup(js-implicit context: Context, receiver: Object)( + ...arguments): String { return CreateHTML( receiver, 'String.prototype.sup', 'sup', kEmptyString, kEmptyString); } diff --git a/deps/v8/src/builtins/string-iterator.tq b/deps/v8/src/builtins/string-iterator.tq index f5c6099c255f4d..5b8f864661295a 100644 --- a/deps/v8/src/builtins/string-iterator.tq +++ b/deps/v8/src/builtins/string-iterator.tq @@ -17,7 +17,7 @@ namespace string_iterator { // ES6 #sec-string.prototype-@@iterator transitioning javascript builtin StringPrototypeIterator( - implicit context: Context)(receiver: Object): JSStringIterator { + js-implicit context: Context)(receiver: Object): JSStringIterator { const name: String = ToThisString(receiver, 'String.prototype[Symbol.iterator]'); const index: Smi = 0; @@ -26,7 +26,7 @@ namespace string_iterator { // ES6 #sec-%stringiteratorprototype%.next transitioning javascript builtin StringIteratorPrototypeNext( - implicit context: Context)(receiver: Object): JSIteratorResult { + js-implicit context: Context)(receiver: Object): JSObject { const iterator = Cast(receiver) otherwise ThrowTypeError( kIncompatibleMethodReceiver, 'String Iterator.prototype.next', receiver); @@ -34,13 +34,13 @@ namespace string_iterator { const position: intptr = SmiUntag(iterator.next_index); const length: intptr = string.length_intptr; if (position >= length) { - return NewJSIteratorResult(Undefined, True); + return AllocateJSIteratorResult(Undefined, True); } // Move to next codepoint. const encoding = UTF16; const ch = string::LoadSurrogatePairAt(string, length, position, encoding); - const value: String = string::StringFromSingleCodePoint(ch, encoding); + const value: String = string::StringFromSingleUTF16EncodedCodePoint(ch); iterator.next_index = SmiTag(position + value.length_intptr); - return NewJSIteratorResult(value, False); + return AllocateJSIteratorResult(value, False); } } diff --git a/deps/v8/src/builtins/string-repeat.tq b/deps/v8/src/builtins/string-repeat.tq index f2590011ea2b07..0d9d4ee4982d50 100644 --- a/deps/v8/src/builtins/string-repeat.tq +++ b/deps/v8/src/builtins/string-repeat.tq @@ -28,7 +28,7 @@ namespace string_repeat { // https://tc39.github.io/ecma262/#sec-string.prototype.repeat transitioning javascript builtin StringPrototypeRepeat( - context: Context, receiver: Object, count: Object): String { + js-implicit context: Context, receiver: Object)(count: Object): String { // 1. Let O be ? RequireObjectCoercible(this value). // 2. Let S be ? ToString(O). const s: String = ToThisString(receiver, kBuiltinName); diff --git a/deps/v8/src/builtins/string-slice.tq b/deps/v8/src/builtins/string-slice.tq index 41eb38b0ad4aa0..b066fb76692ce7 100644 --- a/deps/v8/src/builtins/string-slice.tq +++ b/deps/v8/src/builtins/string-slice.tq @@ -9,7 +9,7 @@ namespace string_slice { // ES6 #sec-string.prototype.slice ( start, end ) // https://tc39.github.io/ecma262/#sec-string.prototype.slice transitioning javascript builtin StringPrototypeSlice( - implicit context: Context)(receiver: Object, ...arguments): String { + js-implicit context: Context, receiver: Object)(...arguments): String { // 1. Let O be ? RequireObjectCoercible(this value). // 2. Let S be ? ToString(O). const string: String = ToThisString(receiver, 'String.prototype.slice'); diff --git a/deps/v8/src/builtins/string-startswith.tq b/deps/v8/src/builtins/string-startswith.tq index 1f885a2afdd178..b03e67ecf5446c 100644 --- a/deps/v8/src/builtins/string-startswith.tq +++ b/deps/v8/src/builtins/string-startswith.tq @@ -8,23 +8,15 @@ namespace string { extern macro RegExpBuiltinsAssembler::IsRegExp(implicit context: Context)(Object): bool; - // TODO(ryzokuken): Add RequireObjectCoercible to base.tq and update callsites - macro RequireObjectCoercible(implicit context: Context)(argument: Object): - Object { - if (IsNullOrUndefined(argument)) { - ThrowTypeError(kCalledOnNullOrUndefined, 'String.prototype.startsWith'); - } - return argument; - } - // https://tc39.github.io/ecma262/#sec-string.prototype.startswith transitioning javascript builtin StringPrototypeStartsWith( - context: Context, receiver: Object, ...arguments): Boolean { + js-implicit context: Context, receiver: Object)(...arguments): Boolean { const searchString: Object = arguments[0]; const position: Object = arguments[1]; + const kBuiltinName: constexpr string = 'String.prototype.startsWith'; // 1. Let O be ? RequireObjectCoercible(this value). - const object: Object = RequireObjectCoercible(receiver); + const object: Object = RequireObjectCoercible(receiver, kBuiltinName); // 2. Let S be ? ToString(O). const string: String = ToString_Inline(context, object); @@ -32,7 +24,7 @@ namespace string { // 3. Let isRegExp be ? IsRegExp(searchString). // 4. If isRegExp is true, throw a TypeError exception. if (IsRegExp(searchString)) { - ThrowTypeError(kFirstArgumentNotRegExp, 'String.prototype.startsWith'); + ThrowTypeError(kFirstArgumentNotRegExp, kBuiltinName); } // 5. Let searchStr be ? ToString(searchString). diff --git a/deps/v8/src/builtins/string-substring.tq b/deps/v8/src/builtins/string-substring.tq index f322eeed06dc7a..1fafb8af4367ca 100644 --- a/deps/v8/src/builtins/string-substring.tq +++ b/deps/v8/src/builtins/string-substring.tq @@ -28,7 +28,7 @@ namespace string_substring { // ES6 #sec-string.prototype.substring transitioning javascript builtin StringPrototypeSubstring( - implicit context: Context)(receiver: Object, ...arguments): String { + js-implicit context: Context, receiver: Object)(...arguments): String { // Check that {receiver} is coercible to Object and convert it to a String. const string: String = ToThisString(receiver, 'String.prototype.substring'); const length = string.length_smi; diff --git a/deps/v8/src/builtins/string.tq b/deps/v8/src/builtins/string.tq index 1e5a74eb49d02b..dbcc5799e1063d 100644 --- a/deps/v8/src/builtins/string.tq +++ b/deps/v8/src/builtins/string.tq @@ -7,20 +7,21 @@ namespace string { // ES6 #sec-string.prototype.tostring transitioning javascript builtin - StringPrototypeToString(implicit context: Context)(receiver: Object): Object { + StringPrototypeToString(js-implicit context: Context)(receiver: Object): + Object { return ToThisValue(receiver, kString, 'String.prototype.toString'); } // ES6 #sec-string.prototype.valueof transitioning javascript builtin - StringPrototypeValueOf(implicit context: Context)(receiver: Object): Object { + StringPrototypeValueOf(js-implicit context: Context)(receiver: Object): + Object { return ToThisValue(receiver, kString, 'String.prototype.valueOf'); } extern macro StringBuiltinsAssembler::LoadSurrogatePairAt( String, intptr, intptr, constexpr UnicodeEncoding): int32; - extern macro StringFromSingleCodePoint(int32, constexpr UnicodeEncoding): - String; + extern macro StringFromSingleUTF16EncodedCodePoint(int32): String; // This function assumes StringPrimitiveWithNoCustomIteration is true. transitioning builtin StringToList(implicit context: Context)(string: String): @@ -38,7 +39,7 @@ namespace string { let i: intptr = 0; while (i < stringLength) { const ch: int32 = LoadSurrogatePairAt(string, stringLength, i, encoding); - const value: String = StringFromSingleCodePoint(ch, encoding); + const value: String = StringFromSingleUTF16EncodedCodePoint(ch); elements[arrayLength] = value; // Increment and continue the loop. i = i + value.length_intptr; @@ -52,9 +53,9 @@ namespace string { } transitioning macro GenerateStringAt(implicit context: Context)( - receiver: Object, position: Object, methodName: constexpr string): - never labels IfInBounds(String, intptr, intptr), - IfOutOfBounds { + receiver: Object, position: Object, + methodName: constexpr string): never labels + IfInBounds(String, intptr, intptr), IfOutOfBounds { // Check that {receiver} is coercible to Object and convert it to a String. const string: String = ToThisString(receiver, methodName); // Convert the {position} to a Smi and check that it's in bounds of @@ -70,12 +71,13 @@ namespace string { // ES6 #sec-string.prototype.charat transitioning javascript builtin StringPrototypeCharAt( - implicit context: Context)(receiver: Object, position: Object): Object { + js-implicit context: Context, + receiver: Object)(position: Object): Object { try { GenerateStringAt(receiver, position, 'String.prototype.charAt') otherwise IfInBounds, IfOutOfBounds; } - label IfInBounds(string: String, index: intptr, length: intptr) { + label IfInBounds(string: String, index: intptr, _length: intptr) { const code: int32 = StringCharCodeAt(string, index); return StringFromSingleCharCode(code); } @@ -86,12 +88,13 @@ namespace string { // ES6 #sec-string.prototype.charcodeat transitioning javascript builtin StringPrototypeCharCodeAt( - implicit context: Context)(receiver: Object, position: Object): Object { + js-implicit context: Context, + receiver: Object)(position: Object): Object { try { GenerateStringAt(receiver, position, 'String.prototype.charCodeAt') otherwise IfInBounds, IfOutOfBounds; } - label IfInBounds(string: String, index: intptr, length: intptr) { + label IfInBounds(string: String, index: intptr, _length: intptr) { const code: int32 = StringCharCodeAt(string, index); return Convert(code); } @@ -102,7 +105,8 @@ namespace string { // ES6 #sec-string.prototype.codepointat transitioning javascript builtin StringPrototypeCodePointAt( - implicit context: Context)(receiver: Object, position: Object): Object { + js-implicit context: Context, + receiver: Object)(position: Object): Object { try { GenerateStringAt(receiver, position, 'String.prototype.codePointAt') otherwise IfInBounds, IfOutOfBounds; @@ -121,7 +125,7 @@ namespace string { // ES6 String.prototype.concat(...args) // ES6 #sec-string.prototype.concat transitioning javascript builtin StringPrototypeConcat( - implicit context: Context)(receiver: Object, ...arguments): Object { + js-implicit context: Context, receiver: Object)(...arguments): Object { // Check that {receiver} is coercible to Object and convert it to a String. let string: String = ToThisString(receiver, 'String.prototype.concat'); diff --git a/deps/v8/src/builtins/typed-array-createtypedarray.tq b/deps/v8/src/builtins/typed-array-createtypedarray.tq index a0d745b2f4deb8..f6ab289e12c166 100644 --- a/deps/v8/src/builtins/typed-array-createtypedarray.tq +++ b/deps/v8/src/builtins/typed-array-createtypedarray.tq @@ -8,30 +8,77 @@ namespace typed_array_createtypedarray { extern builtin IterableToListMayPreserveHoles(Context, Object, Callable): JSArray; - extern macro ConstructorBuiltinsAssembler::EmitFastNewObject( - implicit context: Context)(JSFunction, JSReceiver): JSTypedArray; extern macro TypedArrayBuiltinsAssembler::AllocateEmptyOnHeapBuffer( - implicit context: Context)(JSTypedArray, uintptr): JSArrayBuffer; + implicit context: Context)(uintptr): JSArrayBuffer; + extern macro CodeStubAssembler::AllocateByteArray(uintptr): ByteArray; extern macro TypedArrayBuiltinsAssembler::GetDefaultConstructor( implicit context: Context)(JSTypedArray): JSFunction; extern macro TypedArrayBuiltinsAssembler::IsSharedArrayBuffer(JSArrayBuffer): bool; - extern macro TypedArrayBuiltinsAssembler::SetupTypedArray( - JSTypedArray, uintptr, uintptr, uintptr): void; + extern macro TypedArrayBuiltinsAssembler::SetupTypedArrayEmbedderFields( + JSTypedArray): void; extern runtime ThrowInvalidTypedArrayAlignment(implicit context: Context)( Map, String): never; extern runtime TypedArrayCopyElements(Context, JSTypedArray, Object, Number): void; + transitioning macro AllocateTypedArray(implicit context: Context)( + isOnHeap: constexpr bool, map: Map, buffer: JSArrayBuffer, + byteOffset: uintptr, byteLength: uintptr, length: uintptr): JSTypedArray { + let elements: ByteArray; + let externalPointer: RawPtr; + let basePointer: ByteArray | Smi; + if constexpr (isOnHeap) { + elements = AllocateByteArray(byteLength); + basePointer = elements; + externalPointer = PointerConstant(kExternalPointerForOnHeapArray); + } else { + basePointer = Convert(0); + + // The max byteOffset is 8 * MaxSmi on the particular platform. 32 bit + // platforms are self-limiting, because we can't allocate an array bigger + // than our 32-bit arithmetic range anyway. 64 bit platforms could + // theoretically have an offset up to 2^35 - 1. + const backingStore: RawPtr = buffer.backing_store; + externalPointer = backingStore + Convert(byteOffset); + + // Assert no overflow has occurred. Only assert if the mock array buffer + // allocator is NOT used. When the mock array buffer is used, impossibly + // large allocations are allowed that would erroneously cause an overflow + // and this assertion to fail. + assert( + IsMockArrayBufferAllocatorFlag() || + Convert(externalPointer) >= Convert(backingStore)); + + elements = kEmptyByteArray; + } + + // We can't just build the new object with "new JSTypedArray" here because + // Torque doesn't know its full size including embedder fields, so use CSA + // for the allocation step. + const typedArray = + UnsafeCast(AllocateFastOrSlowJSObjectFromMap(map)); + typedArray.elements = elements; + typedArray.buffer = buffer; + typedArray.byte_offset = byteOffset; + typedArray.byte_length = byteLength; + typedArray.length = length; + typedArray.external_pointer = externalPointer; + typedArray.base_pointer = basePointer; + SetupTypedArrayEmbedderFields(typedArray); + return typedArray; + } + transitioning macro TypedArrayInitialize(implicit context: Context)( - initialize: constexpr bool, typedArray: JSTypedArray, length: PositiveSmi, + initialize: constexpr bool, map: Map, length: PositiveSmi, elementsInfo: typed_array::TypedArrayElementsInfo, - bufferConstructor: JSReceiver): uintptr { + bufferConstructor: JSReceiver): JSTypedArray { const byteLength = elementsInfo.CalculateByteLength(length) otherwise ThrowRangeError(kInvalidArrayBufferLength); const byteLengthNum = Convert(byteLength); const defaultConstructor = GetArrayBufferFunction(); + const byteOffset: uintptr = 0; try { if (bufferConstructor != defaultConstructor) { @@ -39,14 +86,21 @@ namespace typed_array_createtypedarray { defaultConstructor, bufferConstructor, byteLengthNum)); } - if (byteLength > V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP) goto AllocateOffHeap; + if (byteLength > kMaxTypedArrayInHeap) goto AllocateOffHeap; + + const buffer = AllocateEmptyOnHeapBuffer(byteLength); - AllocateEmptyOnHeapBuffer(typedArray, byteLength); + const isOnHeap: constexpr bool = true; + const typedArray = AllocateTypedArray( + isOnHeap, map, buffer, byteOffset, byteLength, + Convert(length)); if constexpr (initialize) { const backingStore = typedArray.data_ptr; typed_array::CallCMemset(backingStore, 0, byteLength); } + + return typedArray; } label AllocateOffHeap { if constexpr (initialize) { @@ -58,22 +112,18 @@ namespace typed_array_createtypedarray { } label AttachOffHeapBuffer(bufferObj: Object) { const buffer = Cast(bufferObj) otherwise unreachable; - const byteOffset: uintptr = 0; - typedArray.AttachOffHeapBuffer(buffer, byteOffset); + const isOnHeap: constexpr bool = false; + return AllocateTypedArray( + isOnHeap, map, buffer, byteOffset, byteLength, + Convert(length)); } - - const byteOffset: uintptr = 0; - SetupTypedArray( - typedArray, Convert(length), byteOffset, byteLength); - - return byteLength; } // 22.2.4.2 TypedArray ( length ) // ES #sec-typedarray-length transitioning macro ConstructByLength(implicit context: Context)( - typedArray: JSTypedArray, length: Object, - elementsInfo: typed_array::TypedArrayElementsInfo): void { + map: Map, length: Object, + elementsInfo: typed_array::TypedArrayElementsInfo): JSTypedArray { const convertedLength: Number = ToInteger_Inline(context, length, kTruncateMinusZero); // The maximum length of a TypedArray is MaxSmi(). @@ -84,23 +134,22 @@ namespace typed_array_createtypedarray { otherwise ThrowRangeError(kInvalidTypedArrayLength, length); const defaultConstructor: Constructor = GetArrayBufferFunction(); const initialize: constexpr bool = true; - TypedArrayInitialize( - initialize, typedArray, positiveLength, elementsInfo, - defaultConstructor); + return TypedArrayInitialize( + initialize, map, positiveLength, elementsInfo, defaultConstructor); } // 22.2.4.4 TypedArray ( object ) // ES #sec-typedarray-object transitioning macro ConstructByArrayLike(implicit context: Context)( - typedArray: JSTypedArray, arrayLike: HeapObject, initialLength: Object, + map: Map, arrayLike: HeapObject, initialLength: Object, elementsInfo: typed_array::TypedArrayElementsInfo, - bufferConstructor: JSReceiver): void { + bufferConstructor: JSReceiver): JSTypedArray { // The caller has looked up length on arrayLike, which is observable. const length: PositiveSmi = ToSmiLength(initialLength) otherwise ThrowRangeError(kInvalidTypedArrayLength, initialLength); const initialize: constexpr bool = false; - const byteLength = TypedArrayInitialize( - initialize, typedArray, length, elementsInfo, bufferConstructor); + const typedArray = TypedArrayInitialize( + initialize, map, length, elementsInfo, bufferConstructor); try { const src: JSTypedArray = Cast(arrayLike) otherwise IfSlow; @@ -112,6 +161,7 @@ namespace typed_array_createtypedarray { goto IfSlow; } else if (length > 0) { + const byteLength = typedArray.byte_length; assert(byteLength <= kArrayBufferMaxByteLength); typed_array::CallCMemcpy(typedArray.data_ptr, src.data_ptr, byteLength); } @@ -121,13 +171,13 @@ namespace typed_array_createtypedarray { TypedArrayCopyElements(context, typedArray, arrayLike, length); } } + return typedArray; } // 22.2.4.4 TypedArray ( object ) // ES #sec-typedarray-object transitioning macro ConstructByIterable(implicit context: Context)( - typedArray: JSTypedArray, iterable: JSReceiver, iteratorFn: Callable, - elementsInfo: typed_array::TypedArrayElementsInfo): never + iterable: JSReceiver, iteratorFn: Callable): never labels IfConstructByArrayLike(HeapObject, Object, JSReceiver) { const array: JSArray = IterableToListMayPreserveHoles(context, iterable, iteratorFn); @@ -137,8 +187,7 @@ namespace typed_array_createtypedarray { // 22.2.4.3 TypedArray ( typedArray ) // ES #sec-typedarray-typedarray transitioning macro ConstructByTypedArray(implicit context: Context)( - typedArray: JSTypedArray, srcTypedArray: JSTypedArray, - elementsInfo: typed_array::TypedArrayElementsInfo): never + srcTypedArray: JSTypedArray): never labels IfConstructByArrayLike(HeapObject, Object, JSReceiver) { let bufferConstructor: JSReceiver = GetArrayBufferFunction(); const srcBuffer: JSArrayBuffer = srcTypedArray.buffer; @@ -161,8 +210,8 @@ namespace typed_array_createtypedarray { // 22.2.4.5 TypedArray ( buffer, byteOffset, length ) // ES #sec-typedarray-buffer-byteoffset-length transitioning macro ConstructByArrayBuffer(implicit context: Context)( - typedArray: JSTypedArray, buffer: JSArrayBuffer, byteOffset: Object, - length: Object, elementsInfo: typed_array::TypedArrayElementsInfo): void { + map: Map, buffer: JSArrayBuffer, byteOffset: Object, length: Object, + elementsInfo: typed_array::TypedArrayElementsInfo): JSTypedArray { try { let offset: uintptr = 0; if (byteOffset != Undefined) { @@ -224,12 +273,13 @@ namespace typed_array_createtypedarray { goto IfInvalidLength; } - SetupTypedArray( - typedArray, Convert(newLength), offset, newByteLength); - typedArray.AttachOffHeapBuffer(buffer, offset); + const isOnHeap: constexpr bool = false; + return AllocateTypedArray( + isOnHeap, map, buffer, offset, newByteLength, + Convert(newLength)); } label IfInvalidAlignment(problemString: String) deferred { - ThrowInvalidTypedArrayAlignment(typedArray.map, problemString); + ThrowInvalidTypedArrayAlignment(map, problemString); } label IfInvalidByteLength deferred { ThrowRangeError(kInvalidArrayBufferLength); @@ -242,16 +292,15 @@ namespace typed_array_createtypedarray { } } - transitioning macro ConstructByJSReceiver(implicit context: Context)( - array: JSTypedArray, obj: JSReceiver, - elementsInfo: typed_array::TypedArrayElementsInfo): never + transitioning macro ConstructByJSReceiver(implicit context: + Context)(obj: JSReceiver): never labels IfConstructByArrayLike(HeapObject, Object, JSReceiver) { try { const iteratorMethod: Object = GetIteratorMethod(obj) otherwise IfIteratorUndefined; const iteratorFn: Callable = Cast(iteratorMethod) otherwise ThrowTypeError(kIteratorSymbolNonCallable); - ConstructByIterable(array, obj, iteratorFn, elementsInfo) + ConstructByIterable(obj, iteratorFn) otherwise IfConstructByArrayLike; } label IfIteratorUndefined { @@ -273,22 +322,12 @@ namespace typed_array_createtypedarray { assert(IsConstructor(target)); // 4. Let O be ? AllocateTypedArray(constructorName, NewTarget, // "%TypedArrayPrototype%"). - const array: JSTypedArray = EmitFastNewObject(target, newTarget); - // We need to set the byte_offset / byte_length to some sane values - // to keep the heap verifier happy. - // TODO(bmeurer, v8:4153): Fix this initialization to not use - // EmitFastNewObject, which causes the problem, since it puts - // Undefined into all slots of the object even though that - // doesn't make any sense for these fields. - array.byte_offset = 0; - array.byte_length = 0; - array.length = 0; - array.base_pointer = Convert(0); + const map = GetDerivedMap(target, newTarget); // 5. Let elementSize be the Number value of the Element Size value in Table // 56 for constructorName. const elementsInfo: typed_array::TypedArrayElementsInfo = - typed_array::GetTypedArrayElementsInfo(array); + typed_array::GetTypedArrayElementsInfo(map); try { typeswitch (arg1) { @@ -296,15 +335,13 @@ namespace typed_array_createtypedarray { goto IfConstructByLength(length); } case (buffer: JSArrayBuffer): { - ConstructByArrayBuffer(array, buffer, arg2, arg3, elementsInfo); + return ConstructByArrayBuffer(map, buffer, arg2, arg3, elementsInfo); } case (typedArray: JSTypedArray): { - ConstructByTypedArray(array, typedArray, elementsInfo) - otherwise IfConstructByArrayLike; + ConstructByTypedArray(typedArray) otherwise IfConstructByArrayLike; } case (obj: JSReceiver): { - ConstructByJSReceiver(array, obj, elementsInfo) - otherwise IfConstructByArrayLike; + ConstructByJSReceiver(obj) otherwise IfConstructByArrayLike; } // The first argument was a number or fell through and is treated as // a number. https://tc39.github.io/ecma262/#sec-typedarray-length @@ -314,14 +351,13 @@ namespace typed_array_createtypedarray { } } label IfConstructByLength(length: Object) { - ConstructByLength(array, length, elementsInfo); + return ConstructByLength(map, length, elementsInfo); } label IfConstructByArrayLike( arrayLike: HeapObject, length: Object, bufferConstructor: JSReceiver) { - ConstructByArrayLike( - array, arrayLike, length, elementsInfo, bufferConstructor); + return ConstructByArrayLike( + map, arrayLike, length, elementsInfo, bufferConstructor); } - return array; } transitioning macro TypedArraySpeciesCreate(implicit context: Context)( diff --git a/deps/v8/src/builtins/typed-array-every.tq b/deps/v8/src/builtins/typed-array-every.tq index 4f8804880e3649..221814cb79d706 100644 --- a/deps/v8/src/builtins/typed-array-every.tq +++ b/deps/v8/src/builtins/typed-array-every.tq @@ -29,8 +29,8 @@ namespace typed_array_every { // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.every transitioning javascript builtin - TypedArrayPrototypeEvery(implicit context: Context)( - receiver: Object, ...arguments): Object { + TypedArrayPrototypeEvery(js-implicit context: Context, receiver: Object)( + ...arguments): Object { // arguments[0] = callback // arguments[1] = thisArg try { diff --git a/deps/v8/src/builtins/typed-array-filter.tq b/deps/v8/src/builtins/typed-array-filter.tq index 9407c3a7afa52f..3937699c731ad3 100644 --- a/deps/v8/src/builtins/typed-array-filter.tq +++ b/deps/v8/src/builtins/typed-array-filter.tq @@ -10,7 +10,7 @@ namespace typed_array_filter { // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.filter transitioning javascript builtin TypedArrayPrototypeFilter( - context: Context, receiver: Object, ...arguments): Object { + js-implicit context: Context, receiver: Object)(...arguments): Object { // arguments[0] = callback // arguments[1] = thisArg try { diff --git a/deps/v8/src/builtins/typed-array-find.tq b/deps/v8/src/builtins/typed-array-find.tq index 3c331eb3bb5423..be1943ccf48ce9 100644 --- a/deps/v8/src/builtins/typed-array-find.tq +++ b/deps/v8/src/builtins/typed-array-find.tq @@ -29,8 +29,8 @@ namespace typed_array_find { // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.find transitioning javascript builtin - TypedArrayPrototypeFind(implicit context: - Context)(receiver: Object, ...arguments): Object { + TypedArrayPrototypeFind(js-implicit context: Context, receiver: Object)( + ...arguments): Object { // arguments[0] = callback // arguments[1] = thisArg try { diff --git a/deps/v8/src/builtins/typed-array-findindex.tq b/deps/v8/src/builtins/typed-array-findindex.tq index 05f112d0d59df0..a5ee7897d3c62d 100644 --- a/deps/v8/src/builtins/typed-array-findindex.tq +++ b/deps/v8/src/builtins/typed-array-findindex.tq @@ -29,8 +29,8 @@ namespace typed_array_findindex { // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.findIndex transitioning javascript builtin - TypedArrayPrototypeFindIndex(implicit context: Context)( - receiver: Object, ...arguments): Object { + TypedArrayPrototypeFindIndex(js-implicit context: Context, receiver: Object)( + ...arguments): Object { // arguments[0] = callback // arguments[1] = thisArg. try { diff --git a/deps/v8/src/builtins/typed-array-foreach.tq b/deps/v8/src/builtins/typed-array-foreach.tq index dbf1a121da2e5d..656a22e07d362a 100644 --- a/deps/v8/src/builtins/typed-array-foreach.tq +++ b/deps/v8/src/builtins/typed-array-foreach.tq @@ -25,8 +25,8 @@ namespace typed_array_foreach { // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.every transitioning javascript builtin - TypedArrayPrototypeForEach(implicit context: Context)( - receiver: Object, ...arguments): Object { + TypedArrayPrototypeForEach(js-implicit context: Context, receiver: Object)( + ...arguments): Object { // arguments[0] = callback // arguments[1] = this_arg. diff --git a/deps/v8/src/builtins/typed-array-reduce.tq b/deps/v8/src/builtins/typed-array-reduce.tq index 7af918a07b1edb..d69dc9a98d840a 100644 --- a/deps/v8/src/builtins/typed-array-reduce.tq +++ b/deps/v8/src/builtins/typed-array-reduce.tq @@ -19,7 +19,7 @@ namespace typed_array_reduce { // BUG(4895): We should throw on detached buffers rather than simply exit. witness.Recheck() otherwise break; const value: Object = witness.Load(k); - if (accumulator == Hole) { + if (accumulator == TheHole) { accumulator = value; } else { accumulator = Call( @@ -27,7 +27,7 @@ namespace typed_array_reduce { witness.GetStable()); } } - if (accumulator == Hole) { + if (accumulator == TheHole) { ThrowTypeError(kReduceNoInitial, kBuiltinName); } return accumulator; @@ -35,8 +35,8 @@ namespace typed_array_reduce { // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.reduce transitioning javascript builtin - TypedArrayPrototypeReduce(implicit context: Context)( - receiver: Object, ...arguments): Object { + TypedArrayPrototypeReduce(js-implicit context: Context, receiver: Object)( + ...arguments): Object { // arguments[0] = callback // arguments[1] = initialValue. try { @@ -45,7 +45,7 @@ namespace typed_array_reduce { const uarray = typed_array::EnsureAttached(array) otherwise IsDetached; const callbackfn = Cast(arguments[0]) otherwise NotCallable; - const initialValue = arguments.length >= 2 ? arguments[1] : Hole; + const initialValue = arguments.length >= 2 ? arguments[1] : TheHole; return ReduceAllElements(uarray, callbackfn, initialValue); } label NotCallable deferred { diff --git a/deps/v8/src/builtins/typed-array-reduceright.tq b/deps/v8/src/builtins/typed-array-reduceright.tq index 59ce7ff55b03a5..99a84401ed1c90 100644 --- a/deps/v8/src/builtins/typed-array-reduceright.tq +++ b/deps/v8/src/builtins/typed-array-reduceright.tq @@ -19,7 +19,7 @@ namespace typed_array_reduceright { // BUG(4895): We should throw on detached buffers rather than simply exit. witness.Recheck() otherwise break; const value: Object = witness.Load(k); - if (accumulator == Hole) { + if (accumulator == TheHole) { accumulator = value; } else { accumulator = Call( @@ -27,7 +27,7 @@ namespace typed_array_reduceright { witness.GetStable()); } } - if (accumulator == Hole) { + if (accumulator == TheHole) { ThrowTypeError(kReduceNoInitial, kBuiltinName); } return accumulator; @@ -35,8 +35,8 @@ namespace typed_array_reduceright { // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.reduceright transitioning javascript builtin - TypedArrayPrototypeReduceRight(implicit context: Context)( - receiver: Object, ...arguments): Object { + TypedArrayPrototypeReduceRight( + js-implicit context: Context, receiver: Object)(...arguments): Object { // arguments[0] = callback // arguments[1] = initialValue. try { @@ -45,7 +45,7 @@ namespace typed_array_reduceright { const uarray = typed_array::EnsureAttached(array) otherwise IsDetached; const callbackfn = Cast(arguments[0]) otherwise NotCallable; - const initialValue = arguments.length >= 2 ? arguments[1] : Hole; + const initialValue = arguments.length >= 2 ? arguments[1] : TheHole; return ReduceRightAllElements(uarray, callbackfn, initialValue); } diff --git a/deps/v8/src/builtins/typed-array-slice.tq b/deps/v8/src/builtins/typed-array-slice.tq index f45654b71ec76d..c0087ae1be0276 100644 --- a/deps/v8/src/builtins/typed-array-slice.tq +++ b/deps/v8/src/builtins/typed-array-slice.tq @@ -53,7 +53,7 @@ namespace typed_array_slice { // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.slice transitioning javascript builtin TypedArrayPrototypeSlice( - context: Context, receiver: Object, ...arguments): Object { + js-implicit context: Context, receiver: Object)(...arguments): Object { // arguments[0] = start // arguments[1] = end diff --git a/deps/v8/src/builtins/typed-array-some.tq b/deps/v8/src/builtins/typed-array-some.tq index 991cad6b1b15b6..7056650fba824e 100644 --- a/deps/v8/src/builtins/typed-array-some.tq +++ b/deps/v8/src/builtins/typed-array-some.tq @@ -29,8 +29,8 @@ namespace typed_array_some { // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.some transitioning javascript builtin - TypedArrayPrototypeSome(implicit context: - Context)(receiver: Object, ...arguments): Object { + TypedArrayPrototypeSome(js-implicit context: Context, receiver: Object)( + ...arguments): Object { // arguments[0] = callback // arguments[1] = thisArg. try { diff --git a/deps/v8/src/builtins/typed-array-subarray.tq b/deps/v8/src/builtins/typed-array-subarray.tq index 54b945f44ea8f1..4f98123f823a19 100644 --- a/deps/v8/src/builtins/typed-array-subarray.tq +++ b/deps/v8/src/builtins/typed-array-subarray.tq @@ -5,7 +5,8 @@ namespace typed_array_subarray { // ES %TypedArray%.prototype.subarray transitioning javascript builtin TypedArrayPrototypeSubArray( - context: Context, receiver: Object, ...arguments): JSTypedArray { + js-implicit context: Context, + receiver: Object)(...arguments): JSTypedArray { const methodName: constexpr string = '%TypedArray%.prototype.subarray'; // 1. Let O be the this value. diff --git a/deps/v8/src/builtins/typed-array.tq b/deps/v8/src/builtins/typed-array.tq index 8f923947f1dd0b..d03c1a0be977e3 100644 --- a/deps/v8/src/builtins/typed-array.tq +++ b/deps/v8/src/builtins/typed-array.tq @@ -65,29 +65,18 @@ namespace typed_array { implicit context: Context)(JSTypedArray): JSArrayBuffer; extern macro TypedArrayBuiltinsAssembler::GetTypedArrayElementsInfo( JSTypedArray): TypedArrayElementsInfo; + extern macro TypedArrayBuiltinsAssembler::GetTypedArrayElementsInfo(Map): + TypedArrayElementsInfo; extern macro TypedArrayBuiltinsAssembler::IsBigInt64ElementsKind( ElementsKind): bool; extern macro LoadFixedTypedArrayElementAsTagged( - RawPtr, Smi, constexpr ElementsKind, constexpr ParameterMode): Object; + RawPtr, Smi, constexpr ElementsKind): Numeric; extern macro StoreJSTypedArrayElementFromTagged( - Context, JSTypedArray, Smi, Object, constexpr ElementsKind, - constexpr ParameterMode); + Context, JSTypedArray, Smi, Object, constexpr ElementsKind); type LoadFn = builtin(Context, JSTypedArray, Smi) => Object; type StoreFn = builtin(Context, JSTypedArray, Smi, Object) => Object; - // These UnsafeCast specializations are necessary becuase there is no - // way to definitively test whether an Object is a Torque function - // with a specific signature, and the default UnsafeCast implementation - // would try to check this through an assert(Is<>), so the test - // is bypassed in this specialization. - UnsafeCast(implicit context: Context)(o: Object): LoadFn { - return %RawDownCast(o); - } - UnsafeCast(implicit context: Context)(o: Object): StoreFn { - return %RawDownCast(o); - } - // AttachedJSTypedArray guards that the array's buffer is not detached. transient type AttachedJSTypedArray extends JSTypedArray; @@ -201,17 +190,16 @@ namespace typed_array { } builtin LoadFixedElement( - context: Context, array: JSTypedArray, index: Smi): Object { + _context: Context, array: JSTypedArray, index: Smi): Object { return LoadFixedTypedArrayElementAsTagged( - array.data_ptr, index, KindForArrayType(), SMI_PARAMETERS); + array.data_ptr, index, KindForArrayType()); } builtin StoreFixedElement( context: Context, typedArray: JSTypedArray, index: Smi, value: Object): Object { StoreJSTypedArrayElementFromTagged( - context, typedArray, index, value, KindForArrayType(), - SMI_PARAMETERS); + context, typedArray, index, value, KindForArrayType()); return Undefined; } @@ -288,7 +276,8 @@ namespace typed_array { // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.sort transitioning javascript builtin TypedArrayPrototypeSort( - context: Context, receiver: Object, ...arguments): JSTypedArray { + js-implicit context: Context, + receiver: Object)(...arguments): JSTypedArray { // 1. If comparefn is not undefined and IsCallable(comparefn) is false, // throw a TypeError exception. const comparefnObj: Object = @@ -322,7 +311,7 @@ namespace typed_array { let loadfn: LoadFn; let storefn: StoreFn; - let elementsKind: ElementsKind = array.elements_kind; + const elementsKind: ElementsKind = array.elements_kind; if (IsElementsKindGreaterThan(elementsKind, UINT32_ELEMENTS)) { if (elementsKind == INT32_ELEMENTS) { diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc index 5c09b3a8dedef4..f15c8ba29f251a 100644 --- a/deps/v8/src/builtins/x64/builtins-x64.cc +++ b/deps/v8/src/builtins/x64/builtins-x64.cc @@ -1109,10 +1109,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // 8-bit fields next to each other, so we could just optimize by writing a // 16-bit. These static asserts guard our assumption is valid. STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset == - BytecodeArray::kOSRNestingLevelOffset + kCharSize); + BytecodeArray::kOsrNestingLevelOffset + kCharSize); STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0); __ movw(FieldOperand(kInterpreterBytecodeArrayRegister, - BytecodeArray::kOSRNestingLevelOffset), + BytecodeArray::kOsrNestingLevelOffset), Immediate(0)); // Load initial bytecode offset. @@ -1562,7 +1562,15 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, kSystemPointerSize; __ popq(Operand(rsp, offsetToPC)); __ Drop(offsetToPC / kSystemPointerSize); - __ addq(Operand(rsp, 0), Immediate(Code::kHeaderSize - kHeapObjectTag)); + + // Replace the builtin index Smi on the stack with the instruction start + // address of the builtin from the builtins table, and then Ret to this + // address + __ movq(kScratchRegister, Operand(rsp, 0)); + __ movq(kScratchRegister, + __ EntryFromBuiltinIndexAsOperand(kScratchRegister)); + __ movq(Operand(rsp, 0), kScratchRegister); + __ Ret(); } } // namespace @@ -3002,21 +3010,24 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address, __ movq(prev_limit_reg, Operand(base_reg, kLimitOffset)); __ addl(Operand(base_reg, kLevelOffset), Immediate(1)); - Label profiler_disabled; - Label end_profiler_check; + Label profiler_enabled, end_profiler_check; __ Move(rax, ExternalReference::is_profiling_address(isolate)); __ cmpb(Operand(rax, 0), Immediate(0)); - __ j(zero, &profiler_disabled); - - // Third parameter is the address of the actual getter function. - __ Move(thunk_last_arg, function_address); - __ Move(rax, thunk_ref); - __ jmp(&end_profiler_check); - - __ bind(&profiler_disabled); - // Call the api function! - __ Move(rax, function_address); - + __ j(not_zero, &profiler_enabled); + __ Move(rax, ExternalReference::address_of_runtime_stats_flag()); + __ cmpl(Operand(rax, 0), Immediate(0)); + __ j(not_zero, &profiler_enabled); + { + // Call the api function directly. + __ Move(rax, function_address); + __ jmp(&end_profiler_check); + } + __ bind(&profiler_enabled); + { + // Third parameter is the address of the actual getter function. + __ Move(thunk_last_arg, function_address); + __ Move(rax, thunk_ref); + } __ bind(&end_profiler_check); // Call the api function! @@ -3065,6 +3076,9 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address, __ CompareRoot(map, RootIndex::kHeapNumberMap); __ j(equal, &ok, Label::kNear); + __ CompareRoot(map, RootIndex::kBigIntMap); + __ j(equal, &ok, Label::kNear); + __ CompareRoot(return_value, RootIndex::kUndefinedValue); __ j(equal, &ok, Label::kNear); diff --git a/deps/v8/src/codegen/DEPS b/deps/v8/src/codegen/DEPS new file mode 100644 index 00000000000000..f3715e6ad01f2a --- /dev/null +++ b/deps/v8/src/codegen/DEPS @@ -0,0 +1,9 @@ +# Copyright 2019 the V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +specific_include_rules = { + "external-reference.cc": [ + "+src/regexp/regexp-macro-assembler-arch.h", + ], +} diff --git a/deps/v8/src/codegen/OWNERS b/deps/v8/src/codegen/OWNERS index 345e80a16e29a9..feb2f62f7878ec 100644 --- a/deps/v8/src/codegen/OWNERS +++ b/deps/v8/src/codegen/OWNERS @@ -1,9 +1,12 @@ -ahaas@chromium.org +bbudge@chromium.org bmeurer@chromium.org clemensh@chromium.org +gdeepti@chromium.org +ishell@chromium.org jarin@chromium.org jgruber@chromium.org jkummerow@chromium.org +leszeks@chromium.org mslekova@chromium.org mstarzinger@chromium.org mvstanton@chromium.org @@ -11,3 +14,6 @@ neis@chromium.org rmcilroy@chromium.org sigurds@chromium.org tebbi@chromium.org +titzer@chromium.org + +# COMPONENT: Blink>JavaScript>Compiler diff --git a/deps/v8/src/codegen/arm/assembler-arm.cc b/deps/v8/src/codegen/arm/assembler-arm.cc index c8ef586fc15bb8..7ca49a3f9fc0ed 100644 --- a/deps/v8/src/codegen/arm/assembler-arm.cc +++ b/deps/v8/src/codegen/arm/assembler-arm.cc @@ -2210,7 +2210,7 @@ void Assembler::stm(BlockAddrMode am, Register base, RegList src, // Exception-generating instructions and debugging support. // Stops with a non-negative code less than kNumOfWatchedStops support // enabling/disabling and a counter feature. See simulator-arm.h . -void Assembler::stop(const char* msg, Condition cond, int32_t code) { +void Assembler::stop(Condition cond, int32_t code) { #ifndef __arm__ DCHECK_GE(code, kDefaultStopCode); { @@ -4827,12 +4827,13 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { void Assembler::ConstantPoolAddEntry(int position, RelocInfo::Mode rmode, intptr_t value) { DCHECK(rmode != RelocInfo::CONST_POOL); - // We can share CODE_TARGETs because we don't patch the code objects anymore, - // and we make sure we emit only one reloc info for them (thus delta patching) - // will apply the delta only once. At the moment, we do not dedup code targets - // if they are wrapped in a heap object request (value == 0). + // We can share CODE_TARGETs and embedded objects, but we must make sure we + // only emit one reloc info for them (thus delta patching will apply the delta + // only once). At the moment, we do not deduplicate heap object request which + // are indicated by value == 0. bool sharing_ok = RelocInfo::IsShareableRelocMode(rmode) || - (rmode == RelocInfo::CODE_TARGET && value != 0); + (rmode == RelocInfo::CODE_TARGET && value != 0) || + (RelocInfo::IsEmbeddedObjectMode(rmode) && value != 0); DCHECK_LT(pending_32_bit_constants_.size(), kMaxNumPending32Constants); if (pending_32_bit_constants_.empty()) { first_const_pool_32_use_ = position; diff --git a/deps/v8/src/codegen/arm/assembler-arm.h b/deps/v8/src/codegen/arm/assembler-arm.h index 4db825fa971b3c..f383632f73aee8 100644 --- a/deps/v8/src/codegen/arm/assembler-arm.h +++ b/deps/v8/src/codegen/arm/assembler-arm.h @@ -625,8 +625,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { void stm(BlockAddrMode am, Register base, RegList src, Condition cond = al); // Exception-generating instructions and debugging support - void stop(const char* msg, Condition cond = al, - int32_t code = kDefaultStopCode); + void stop(Condition cond = al, int32_t code = kDefaultStopCode); void bkpt(uint32_t imm16); // v5 and above void svc(uint32_t imm24, Condition cond = al); diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.cc b/deps/v8/src/codegen/arm/macro-assembler-arm.cc index bcda320f8be7de..ba334cd0b65af2 100644 --- a/deps/v8/src/codegen/arm/macro-assembler-arm.cc +++ b/deps/v8/src/codegen/arm/macro-assembler-arm.cc @@ -303,20 +303,24 @@ void TurboAssembler::Call(Handle code, RelocInfo::Mode rmode, Call(code.address(), rmode, cond, mode); } -void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) { +void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { STATIC_ASSERT(kSystemPointerSize == 4); STATIC_ASSERT(kSmiShiftSize == 0); STATIC_ASSERT(kSmiTagSize == 1); STATIC_ASSERT(kSmiTag == 0); - // The builtin_pointer register contains the builtin index as a Smi. + // The builtin_index register contains the builtin index as a Smi. // Untagging is folded into the indexing operand below. - mov(builtin_pointer, - Operand(builtin_pointer, LSL, kSystemPointerSizeLog2 - kSmiTagSize)); - add(builtin_pointer, builtin_pointer, + mov(builtin_index, + Operand(builtin_index, LSL, kSystemPointerSizeLog2 - kSmiTagSize)); + add(builtin_index, builtin_index, Operand(IsolateData::builtin_entry_table_offset())); - ldr(builtin_pointer, MemOperand(kRootRegister, builtin_pointer)); - Call(builtin_pointer); + ldr(builtin_index, MemOperand(kRootRegister, builtin_index)); +} + +void TurboAssembler::CallBuiltinByIndex(Register builtin_index) { + LoadEntryFromBuiltinIndex(builtin_index); + Call(builtin_index); } void TurboAssembler::LoadCodeObjectEntry(Register destination, @@ -632,7 +636,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset, add(scratch, object, Operand(offset - kHeapObjectTag)); tst(scratch, Operand(kPointerSize - 1)); b(eq, &ok); - stop("Unaligned cell in write barrier"); + stop(); bind(&ok); } @@ -1951,15 +1955,15 @@ void TurboAssembler::Check(Condition cond, AbortReason reason) { void TurboAssembler::Abort(AbortReason reason) { Label abort_start; bind(&abort_start); - const char* msg = GetAbortReason(reason); #ifdef DEBUG + const char* msg = GetAbortReason(reason); RecordComment("Abort message: "); RecordComment(msg); #endif // Avoid emitting call to builtin if requested. if (trap_on_abort()) { - stop(msg); + stop(); return; } @@ -2402,7 +2406,7 @@ void TurboAssembler::CallCFunctionHelper(Register function, b(eq, &alignment_as_expected); // Don't use Check here, as it will call Runtime_Abort possibly // re-entering here. - stop("Unexpected alignment"); + stop(); bind(&alignment_as_expected); } } diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.h b/deps/v8/src/codegen/arm/macro-assembler-arm.h index 4f497dcea473bb..e4ce734f52a37f 100644 --- a/deps/v8/src/codegen/arm/macro-assembler-arm.h +++ b/deps/v8/src/codegen/arm/macro-assembler-arm.h @@ -300,7 +300,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { bool check_constant_pool = true); void Call(Label* target); - void CallBuiltinPointer(Register builtin_pointer) override; + // Load the builtin given by the Smi in |builtin_index| into the same + // register. + void LoadEntryFromBuiltinIndex(Register builtin_index); + void CallBuiltinByIndex(Register builtin_index) override; void LoadCodeObjectEntry(Register destination, Register code_object) override; void CallCodeObject(Register code_object) override; diff --git a/deps/v8/src/codegen/arm64/assembler-arm64-inl.h b/deps/v8/src/codegen/arm64/assembler-arm64-inl.h index 5680d8b054f2e8..baae106c1c6ad8 100644 --- a/deps/v8/src/codegen/arm64/assembler-arm64-inl.h +++ b/deps/v8/src/codegen/arm64/assembler-arm64-inl.h @@ -5,6 +5,9 @@ #ifndef V8_CODEGEN_ARM64_ASSEMBLER_ARM64_INL_H_ #define V8_CODEGEN_ARM64_ASSEMBLER_ARM64_INL_H_ +#include + +#include "src/base/memory.h" #include "src/codegen/arm64/assembler-arm64.h" #include "src/codegen/assembler.h" #include "src/debug/debug.h" @@ -22,8 +25,9 @@ void RelocInfo::apply(intptr_t delta) { // On arm64 only internal references and immediate branches need extra work. if (RelocInfo::IsInternalReference(rmode_)) { // Absolute code pointer inside code object moves with the code object. - intptr_t* p = reinterpret_cast(pc_); - *p += delta; // Relocate entry. + intptr_t internal_ref = ReadUnalignedValue(pc_); + internal_ref += delta; // Relocate entry. + WriteUnalignedValue(pc_, internal_ref); } else { Instruction* instr = reinterpret_cast(pc_); if (instr->IsBranchAndLink() || instr->IsUnconditionalBranch()) { @@ -193,17 +197,16 @@ inline VRegister CPURegister::Q() const { // Default initializer is for int types template struct ImmediateInitializer { - static const bool kIsIntType = true; static inline RelocInfo::Mode rmode_for(T) { return RelocInfo::NONE; } static inline int64_t immediate_for(T t) { STATIC_ASSERT(sizeof(T) <= 8); + STATIC_ASSERT(std::is_integral::value || std::is_enum::value); return t; } }; template <> struct ImmediateInitializer { - static const bool kIsIntType = false; static inline RelocInfo::Mode rmode_for(Smi t) { return RelocInfo::NONE; } static inline int64_t immediate_for(Smi t) { return static_cast(t.ptr()); @@ -212,7 +215,6 @@ struct ImmediateInitializer { template <> struct ImmediateInitializer { - static const bool kIsIntType = false; static inline RelocInfo::Mode rmode_for(ExternalReference t) { return RelocInfo::EXTERNAL_REFERENCE; } @@ -222,8 +224,9 @@ struct ImmediateInitializer { }; template -Immediate::Immediate(Handle value) { - InitializeHandle(value); +Immediate::Immediate(Handle handle, RelocInfo::Mode mode) + : value_(static_cast(handle.address())), rmode_(mode) { + DCHECK(RelocInfo::IsEmbeddedObjectMode(mode)); } template @@ -234,13 +237,9 @@ Immediate::Immediate(T t) template Immediate::Immediate(T t, RelocInfo::Mode rmode) : value_(ImmediateInitializer::immediate_for(t)), rmode_(rmode) { - STATIC_ASSERT(ImmediateInitializer::kIsIntType); + STATIC_ASSERT(std::is_integral::value); } -// Operand. -template -Operand::Operand(Handle value) : immediate_(value), reg_(NoReg) {} - template Operand::Operand(T t) : immediate_(t), reg_(NoReg) {} @@ -479,7 +478,7 @@ void Assembler::Unreachable() { Address Assembler::target_pointer_address_at(Address pc) { Instruction* instr = reinterpret_cast(pc); - DCHECK(instr->IsLdrLiteralX()); + DCHECK(instr->IsLdrLiteralX() || instr->IsLdrLiteralW()); return reinterpret_cast
(instr->ImmPCOffsetTarget()); } @@ -494,6 +493,13 @@ Address Assembler::target_address_at(Address pc, Address constant_pool) { } } +Tagged_t Assembler::target_compressed_address_at(Address pc, + Address constant_pool) { + Instruction* instr = reinterpret_cast(pc); + CHECK(instr->IsLdrLiteralW()); + return Memory(target_pointer_address_at(pc)); +} + Handle Assembler::code_target_object_handle_at(Address pc) { Instruction* instr = reinterpret_cast(pc); if (instr->IsLdrLiteralX()) { @@ -502,14 +508,39 @@ Handle Assembler::code_target_object_handle_at(Address pc) { } else { DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch()); DCHECK_EQ(instr->ImmPCOffset() % kInstrSize, 0); - return GetCodeTarget(instr->ImmPCOffset() >> kInstrSizeLog2); + return Handle::cast( + GetEmbeddedObject(instr->ImmPCOffset() >> kInstrSizeLog2)); } } -Handle Assembler::compressed_embedded_object_handle_at(Address pc) { +AssemblerBase::EmbeddedObjectIndex +Assembler::embedded_object_index_referenced_from(Address pc) { Instruction* instr = reinterpret_cast(pc); - CHECK(!instr->IsLdrLiteralX()); - return GetCompressedEmbeddedObject(ReadUnalignedValue(pc)); + if (instr->IsLdrLiteralX()) { + STATIC_ASSERT(sizeof(EmbeddedObjectIndex) == sizeof(intptr_t)); + return Memory(target_pointer_address_at(pc)); + } else { + DCHECK(instr->IsLdrLiteralW()); + return Memory(target_pointer_address_at(pc)); + } +} + +void Assembler::set_embedded_object_index_referenced_from( + Address pc, EmbeddedObjectIndex data) { + Instruction* instr = reinterpret_cast(pc); + if (instr->IsLdrLiteralX()) { + Memory(target_pointer_address_at(pc)) = data; + } else { + DCHECK(instr->IsLdrLiteralW()); + DCHECK(is_uint32(data)); + WriteUnalignedValue(target_pointer_address_at(pc), + static_cast(data)); + } +} + +Handle Assembler::target_object_handle_at(Address pc) { + return GetEmbeddedObject( + Assembler::embedded_object_index_referenced_from(pc)); } Address Assembler::runtime_entry_at(Address pc) { @@ -557,7 +588,7 @@ void Assembler::deserialization_set_special_target_at(Address location, void Assembler::deserialization_set_target_internal_reference_at( Address pc, Address target, RelocInfo::Mode mode) { - Memory
(pc) = target; + WriteUnalignedValue
(pc, target); } void Assembler::set_target_address_at(Address pc, Address constant_pool, @@ -585,12 +616,21 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool, } } +void Assembler::set_target_compressed_address_at( + Address pc, Address constant_pool, Tagged_t target, + ICacheFlushMode icache_flush_mode) { + Instruction* instr = reinterpret_cast(pc); + CHECK(instr->IsLdrLiteralW()); + Memory(target_pointer_address_at(pc)) = target; +} + int RelocInfo::target_address_size() { if (IsCodedSpecially()) { return Assembler::kSpecialTargetSize; } else { - DCHECK(reinterpret_cast(pc_)->IsLdrLiteralX()); - return kSystemPointerSize; + Instruction* instr = reinterpret_cast(pc_); + DCHECK(instr->IsLdrLiteralX() || instr->IsLdrLiteralW()); + return instr->IsLdrLiteralW() ? kTaggedSize : kSystemPointerSize; } } @@ -629,19 +669,30 @@ Address RelocInfo::constant_pool_entry_address() { } HeapObject RelocInfo::target_object() { - DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_)); - return HeapObject::cast( - Object(Assembler::target_address_at(pc_, constant_pool_))); + DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_)); + if (IsCompressedEmbeddedObject(rmode_)) { + return HeapObject::cast(Object(DecompressTaggedAny( + host_.address(), + Assembler::target_compressed_address_at(pc_, constant_pool_)))); + } else { + return HeapObject::cast( + Object(Assembler::target_address_at(pc_, constant_pool_))); + } } HeapObject RelocInfo::target_object_no_host(Isolate* isolate) { - return target_object(); + if (IsCompressedEmbeddedObject(rmode_)) { + return HeapObject::cast(Object(DecompressTaggedAny( + isolate, + Assembler::target_compressed_address_at(pc_, constant_pool_)))); + } else { + return target_object(); + } } Handle RelocInfo::target_object_handle(Assembler* origin) { - if (IsFullEmbeddedObject(rmode_)) { - return Handle(reinterpret_cast( - Assembler::target_address_at(pc_, constant_pool_))); + if (IsEmbeddedObjectMode(rmode_)) { + return origin->target_object_handle_at(pc_); } else { DCHECK(IsCodeTarget(rmode_)); return origin->code_target_object_handle_at(pc_); @@ -651,9 +702,15 @@ Handle RelocInfo::target_object_handle(Assembler* origin) { void RelocInfo::set_target_object(Heap* heap, HeapObject target, WriteBarrierMode write_barrier_mode, ICacheFlushMode icache_flush_mode) { - DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_)); - Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(), - icache_flush_mode); + DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_)); + if (IsCompressedEmbeddedObject(rmode_)) { + Assembler::set_target_compressed_address_at( + pc_, constant_pool_, CompressTagged(target.ptr()), icache_flush_mode); + } else { + DCHECK(IsFullEmbeddedObject(rmode_)); + Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(), + icache_flush_mode); + } if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) { WriteBarrierForCode(host(), this, target); } @@ -673,7 +730,7 @@ void RelocInfo::set_target_external_reference( Address RelocInfo::target_internal_reference() { DCHECK(rmode_ == INTERNAL_REFERENCE); - return Memory
(pc_); + return ReadUnalignedValue
(pc_); } Address RelocInfo::target_internal_reference_address() { @@ -701,11 +758,14 @@ Address RelocInfo::target_off_heap_target() { } void RelocInfo::WipeOut() { - DCHECK(IsFullEmbeddedObject(rmode_) || IsCodeTarget(rmode_) || + DCHECK(IsEmbeddedObjectMode(rmode_) || IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) || IsInternalReference(rmode_) || IsOffHeapTarget(rmode_)); if (IsInternalReference(rmode_)) { - Memory
(pc_) = kNullAddress; + WriteUnalignedValue
(pc_, kNullAddress); + } else if (IsCompressedEmbeddedObject(rmode_)) { + Assembler::set_target_compressed_address_at(pc_, constant_pool_, + kNullAddress); } else { Assembler::set_target_address_at(pc_, constant_pool_, kNullAddress); } @@ -1025,9 +1085,7 @@ inline void Assembler::CheckBuffer() { if (pc_offset() >= next_veneer_pool_check_) { CheckVeneerPool(false, true); } - if (pc_offset() >= next_constant_pool_check_) { - CheckConstPool(false, true); - } + constpool_.MaybeCheck(); } } // namespace internal diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.cc b/deps/v8/src/codegen/arm64/assembler-arm64.cc index 1806f82b461a5f..159e763ba26026 100644 --- a/deps/v8/src/codegen/arm64/assembler-arm64.cc +++ b/deps/v8/src/codegen/arm64/assembler-arm64.cc @@ -34,6 +34,7 @@ #include "src/base/cpu.h" #include "src/codegen/arm64/assembler-arm64-inl.h" #include "src/codegen/register-configuration.h" +#include "src/codegen/safepoint-table.h" #include "src/codegen/string-constants.h" #include "src/execution/frame-constants.h" @@ -283,11 +284,6 @@ bool AreConsecutive(const VRegister& reg1, const VRegister& reg2, return true; } -void Immediate::InitializeHandle(Handle handle) { - value_ = static_cast(handle.address()); - rmode_ = RelocInfo::FULL_EMBEDDED_OBJECT; -} - bool Operand::NeedsRelocation(const Assembler* assembler) const { RelocInfo::Mode rmode = immediate_.rmode(); @@ -298,167 +294,6 @@ bool Operand::NeedsRelocation(const Assembler* assembler) const { return !RelocInfo::IsNone(rmode); } -bool ConstPool::AddSharedEntry(SharedEntryMap& entry_map, uint64_t data, - int offset) { - auto existing = entry_map.find(data); - if (existing == entry_map.end()) { - entry_map[data] = static_cast(entries_.size()); - entries_.push_back(std::make_pair(data, std::vector(1, offset))); - return true; - } - int index = existing->second; - entries_[index].second.push_back(offset); - return false; -} - -// Constant Pool. -bool ConstPool::RecordEntry(intptr_t data, RelocInfo::Mode mode) { - DCHECK(mode != RelocInfo::CONST_POOL && mode != RelocInfo::VENEER_POOL && - mode != RelocInfo::DEOPT_SCRIPT_OFFSET && - mode != RelocInfo::DEOPT_INLINING_ID && - mode != RelocInfo::DEOPT_REASON && mode != RelocInfo::DEOPT_ID); - - bool write_reloc_info = true; - - uint64_t raw_data = static_cast(data); - int offset = assm_->pc_offset(); - if (IsEmpty()) { - first_use_ = offset; - } - - if (RelocInfo::IsShareableRelocMode(mode)) { - write_reloc_info = AddSharedEntry(shared_entries_, raw_data, offset); - } else if (mode == RelocInfo::CODE_TARGET && raw_data != 0) { - // A zero data value is a placeholder and must not be shared. - write_reloc_info = AddSharedEntry(handle_to_index_map_, raw_data, offset); - } else { - entries_.push_back(std::make_pair(raw_data, std::vector(1, offset))); - } - - if (EntryCount() > Assembler::kApproxMaxPoolEntryCount) { - // Request constant pool emission after the next instruction. - assm_->SetNextConstPoolCheckIn(1); - } - - return write_reloc_info; -} - -int ConstPool::DistanceToFirstUse() { - DCHECK_GE(first_use_, 0); - return assm_->pc_offset() - first_use_; -} - -int ConstPool::MaxPcOffset() { - // There are no pending entries in the pool so we can never get out of - // range. - if (IsEmpty()) return kMaxInt; - - // Entries are not necessarily emitted in the order they are added so in the - // worst case the first constant pool use will be accessing the last entry. - return first_use_ + kMaxLoadLiteralRange - WorstCaseSize(); -} - -int ConstPool::WorstCaseSize() { - if (IsEmpty()) return 0; - - // Max size prologue: - // b over - // ldr xzr, #pool_size - // blr xzr - // nop - // All entries are 64-bit for now. - return 4 * kInstrSize + EntryCount() * kSystemPointerSize; -} - -int ConstPool::SizeIfEmittedAtCurrentPc(bool require_jump) { - if (IsEmpty()) return 0; - - // Prologue is: - // b over ;; if require_jump - // ldr xzr, #pool_size - // blr xzr - // nop ;; if not 64-bit aligned - int prologue_size = require_jump ? kInstrSize : 0; - prologue_size += 2 * kInstrSize; - prologue_size += - IsAligned(assm_->pc_offset() + prologue_size, 8) ? 0 : kInstrSize; - - // All entries are 64-bit for now. - return prologue_size + EntryCount() * kSystemPointerSize; -} - -void ConstPool::Emit(bool require_jump) { - DCHECK(!assm_->is_const_pool_blocked()); - // Prevent recursive pool emission and protect from veneer pools. - Assembler::BlockPoolsScope block_pools(assm_); - - int size = SizeIfEmittedAtCurrentPc(require_jump); - Label size_check; - assm_->bind(&size_check); - - assm_->RecordConstPool(size); - // Emit the constant pool. It is preceded by an optional branch if - // require_jump and a header which will: - // 1) Encode the size of the constant pool, for use by the disassembler. - // 2) Terminate the program, to try to prevent execution from accidentally - // flowing into the constant pool. - // 3) align the pool entries to 64-bit. - // The header is therefore made of up to three arm64 instructions: - // ldr xzr, # - // blr xzr - // nop - // - // If executed, the header will likely segfault and lr will point to the - // instruction following the offending blr. - // TODO(all): Make the alignment part less fragile. Currently code is - // allocated as a byte array so there are no guarantees the alignment will - // be preserved on compaction. Currently it works as allocation seems to be - // 64-bit aligned. - - // Emit branch if required - Label after_pool; - if (require_jump) { - assm_->b(&after_pool); - } - - // Emit the header. - assm_->RecordComment("[ Constant Pool"); - EmitMarker(); - EmitGuard(); - assm_->Align(8); - - // Emit constant pool entries. - // TODO(all): currently each relocated constant is 64 bits, consider adding - // support for 32-bit entries. - EmitEntries(); - assm_->RecordComment("]"); - - if (after_pool.is_linked()) { - assm_->bind(&after_pool); - } - - DCHECK(assm_->SizeOfCodeGeneratedSince(&size_check) == - static_cast(size)); -} - -void ConstPool::Clear() { - shared_entries_.clear(); - handle_to_index_map_.clear(); - entries_.clear(); - first_use_ = -1; -} - -void ConstPool::EmitMarker() { - // A constant pool size is expressed in number of 32-bits words. - // Currently all entries are 64-bit. - // + 1 is for the crash guard. - // + 0/1 for alignment. - int word_count = - EntryCount() * 2 + 1 + (IsAligned(assm_->pc_offset(), 8) ? 0 : 1); - assm_->Emit(LDR_x_lit | Assembler::ImmLLiteral(word_count) | - Assembler::Rt(xzr)); -} - MemOperand::PairResult MemOperand::AreConsistentForPair( const MemOperand& operandA, const MemOperand& operandB, int access_size_log2) { @@ -484,47 +319,18 @@ MemOperand::PairResult MemOperand::AreConsistentForPair( return kNotPair; } -void ConstPool::EmitGuard() { -#ifdef DEBUG - Instruction* instr = reinterpret_cast(assm_->pc()); - DCHECK(instr->preceding()->IsLdrLiteralX() && - instr->preceding()->Rt() == xzr.code()); -#endif - assm_->EmitPoolGuard(); -} - -void ConstPool::EmitEntries() { - DCHECK(IsAligned(assm_->pc_offset(), 8)); - - // Emit entries. - for (const auto& entry : entries_) { - for (const auto& pc : entry.second) { - Instruction* instr = assm_->InstructionAt(pc); - - // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0. - DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0); - instr->SetImmPCOffsetTarget(assm_->options(), assm_->pc()); - } - - assm_->dc64(entry.first); - } - Clear(); -} - // Assembler Assembler::Assembler(const AssemblerOptions& options, std::unique_ptr buffer) : AssemblerBase(options, std::move(buffer)), - constpool_(this), - unresolved_branches_() { - const_pool_blocked_nesting_ = 0; + unresolved_branches_(), + constpool_(this) { veneer_pool_blocked_nesting_ = 0; Reset(); } Assembler::~Assembler() { DCHECK(constpool_.IsEmpty()); - DCHECK_EQ(const_pool_blocked_nesting_, 0); DCHECK_EQ(veneer_pool_blocked_nesting_, 0); } @@ -533,7 +339,6 @@ void Assembler::AbortedCodeGeneration() { constpool_.Clear(); } void Assembler::Reset() { #ifdef DEBUG DCHECK((pc_ >= buffer_start_) && (pc_ < buffer_start_ + buffer_->size())); - DCHECK_EQ(const_pool_blocked_nesting_, 0); DCHECK_EQ(veneer_pool_blocked_nesting_, 0); DCHECK(unresolved_branches_.empty()); memset(buffer_start_, 0, pc_ - buffer_start_); @@ -541,9 +346,7 @@ void Assembler::Reset() { pc_ = buffer_start_; reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_); constpool_.Clear(); - next_constant_pool_check_ = 0; next_veneer_pool_check_ = kMaxInt; - no_const_pool_before_ = 0; } void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) { @@ -554,14 +357,16 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) { case HeapObjectRequest::kHeapNumber: { Handle object = isolate->factory()->NewHeapNumber( request.heap_number(), AllocationType::kOld); - set_target_address_at(pc, 0 /* unused */, object.address()); + EmbeddedObjectIndex index = AddEmbeddedObject(object); + set_embedded_object_index_referenced_from(pc, index); break; } case HeapObjectRequest::kStringConstant: { const StringConstantBase* str = request.string(); CHECK_NOT_NULL(str); - set_target_address_at(pc, 0 /* unused */, - str->AllocateStringConstant(isolate).address()); + EmbeddedObjectIndex index = + AddEmbeddedObject(str->AllocateStringConstant(isolate)); + set_embedded_object_index_referenced_from(pc, index); break; } } @@ -572,7 +377,7 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc, SafepointTableBuilder* safepoint_table_builder, int handler_table_offset) { // Emit constant pool if necessary. - CheckConstPool(true, false); + ForceConstantPoolEmissionWithoutJump(); DCHECK(constpool_.IsEmpty()); int code_comments_size = WriteCodeComments(); @@ -870,32 +675,6 @@ void Assembler::DeleteUnresolvedBranchInfoForLabel(Label* label) { } } -void Assembler::StartBlockConstPool() { - if (const_pool_blocked_nesting_++ == 0) { - // Prevent constant pool checks happening by setting the next check to - // the biggest possible offset. - next_constant_pool_check_ = kMaxInt; - } -} - -void Assembler::EndBlockConstPool() { - if (--const_pool_blocked_nesting_ == 0) { - // Check the constant pool hasn't been blocked for too long. - DCHECK(pc_offset() < constpool_.MaxPcOffset()); - // Two cases: - // * no_const_pool_before_ >= next_constant_pool_check_ and the emission is - // still blocked - // * no_const_pool_before_ < next_constant_pool_check_ and the next emit - // will trigger a check. - next_constant_pool_check_ = no_const_pool_before_; - } -} - -bool Assembler::is_const_pool_blocked() const { - return (const_pool_blocked_nesting_ > 0) || - (pc_offset() < no_const_pool_before_); -} - bool Assembler::IsConstantPoolAt(Instruction* instr) { // The constant pool marker is made of two instructions. These instructions // will never be emitted by the JIT, so checking for the first one is enough: @@ -1497,6 +1276,7 @@ Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) { void Assembler::ldr(const CPURegister& rt, const Operand& operand) { if (operand.IsHeapObjectRequest()) { + BlockPoolsScope no_pool_before_ldr_of_heap_object_request(this); RequestHeapObject(operand.heap_object_request()); ldr(rt, operand.immediate_for_heap_object_request()); } else { @@ -1505,11 +1285,8 @@ void Assembler::ldr(const CPURegister& rt, const Operand& operand) { } void Assembler::ldr(const CPURegister& rt, const Immediate& imm) { - // Currently we only support 64-bit literals. - DCHECK(rt.Is64Bits()); - + BlockPoolsScope no_pool_before_ldr_pcrel_instr(this); RecordRelocInfo(imm.rmode(), imm.value()); - BlockConstPoolFor(1); // The load will be patched when the constpool is emitted, patching code // expect a load literal with offset 0. ldr_pcrel(rt, 0); @@ -3679,6 +3456,7 @@ void Assembler::dup(const VRegister& vd, const VRegister& vn, int vn_index) { } void Assembler::dcptr(Label* label) { + BlockPoolsScope no_pool_inbetween(this); RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE); if (label->is_bound()) { // The label is bound, so it does not need to be updated and the internal @@ -4471,8 +4249,10 @@ void Assembler::GrowBuffer() { // Relocate internal references. for (auto pos : internal_reference_positions_) { - intptr_t* p = reinterpret_cast(buffer_start_ + pos); - *p += pc_delta; + Address address = reinterpret_cast(buffer_start_) + pos; + intptr_t internal_ref = ReadUnalignedValue(address); + internal_ref += pc_delta; + WriteUnalignedValue(address, internal_ref); } // Pending relocation entries are also relative, no need to relocate. @@ -4492,17 +4272,31 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data, RelocInfo::IsConstPool(rmode) || RelocInfo::IsVeneerPool(rmode)); // These modes do not need an entry in the constant pool. } else if (constant_pool_mode == NEEDS_POOL_ENTRY) { - bool new_constpool_entry = constpool_.RecordEntry(data, rmode); - // Make sure the constant pool is not emitted in place of the next - // instruction for which we just recorded relocation info. - BlockConstPoolFor(1); - if (!new_constpool_entry) return; + if (RelocInfo::IsEmbeddedObjectMode(rmode)) { + Handle handle(reinterpret_cast(data)); + data = AddEmbeddedObject(handle); + } + if (rmode == RelocInfo::COMPRESSED_EMBEDDED_OBJECT) { + if (constpool_.RecordEntry(static_cast(data), rmode) == + RelocInfoStatus::kMustOmitForDuplicate) { + return; + } + } else { + if (constpool_.RecordEntry(static_cast(data), rmode) == + RelocInfoStatus::kMustOmitForDuplicate) { + return; + } + } } // For modes that cannot use the constant pool, a different sequence of // instructions will be emitted by this function's caller. if (!ShouldRecordRelocInfo(rmode)) return; + // Callers should ensure that constant pool emission is blocked until the + // instruction the reloc info is associated with has been emitted. + DCHECK(constpool_.IsBlocked()); + // We do not try to reuse pool constants. RelocInfo rinfo(reinterpret_cast
(pc_), rmode, data, Code()); @@ -4511,103 +4305,127 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data, } void Assembler::near_jump(int offset, RelocInfo::Mode rmode) { + BlockPoolsScope no_pool_before_b_instr(this); if (!RelocInfo::IsNone(rmode)) RecordRelocInfo(rmode, offset, NO_POOL_ENTRY); b(offset); } void Assembler::near_call(int offset, RelocInfo::Mode rmode) { + BlockPoolsScope no_pool_before_bl_instr(this); if (!RelocInfo::IsNone(rmode)) RecordRelocInfo(rmode, offset, NO_POOL_ENTRY); bl(offset); } void Assembler::near_call(HeapObjectRequest request) { + BlockPoolsScope no_pool_before_bl_instr(this); RequestHeapObject(request); - int index = AddCodeTarget(Handle()); + EmbeddedObjectIndex index = AddEmbeddedObject(Handle()); RecordRelocInfo(RelocInfo::CODE_TARGET, index, NO_POOL_ENTRY); - bl(index); + DCHECK(is_int32(index)); + bl(static_cast(index)); } -void Assembler::BlockConstPoolFor(int instructions) { - int pc_limit = pc_offset() + instructions * kInstrSize; - if (no_const_pool_before_ < pc_limit) { - no_const_pool_before_ = pc_limit; - // Make sure the pool won't be blocked for too long. - DCHECK(pc_limit < constpool_.MaxPcOffset()); - } +// Constant Pool - if (next_constant_pool_check_ < no_const_pool_before_) { - next_constant_pool_check_ = no_const_pool_before_; - } +void ConstantPool::EmitPrologue(Alignment require_alignment) { + // Recorded constant pool size is expressed in number of 32-bits words, + // and includes prologue and alignment, but not the jump around the pool + // and the size of the marker itself. + const int marker_size = 1; + int word_count = + ComputeSize(Jump::kOmitted, require_alignment) / kInt32Size - marker_size; + assm_->Emit(LDR_x_lit | Assembler::ImmLLiteral(word_count) | + Assembler::Rt(xzr)); + assm_->EmitPoolGuard(); } -void Assembler::CheckConstPool(bool force_emit, bool require_jump) { - // Some short sequence of instruction mustn't be broken up by constant pool - // emission, such sequences are protected by calls to BlockConstPoolFor and - // BlockConstPoolScope. - if (is_const_pool_blocked()) { - // Something is wrong if emission is forced and blocked at the same time. - DCHECK(!force_emit); - return; - } +int ConstantPool::PrologueSize(Jump require_jump) const { + // Prologue is: + // b over ;; if require_jump + // ldr xzr, #pool_size + // blr xzr + int prologue_size = require_jump == Jump::kRequired ? kInstrSize : 0; + prologue_size += 2 * kInstrSize; + return prologue_size; +} - // There is nothing to do if there are no pending constant pool entries. - if (constpool_.IsEmpty()) { - // Calculate the offset of the next check. - SetNextConstPoolCheckIn(kCheckConstPoolInterval); - return; - } +void ConstantPool::SetLoadOffsetToConstPoolEntry(int load_offset, + Instruction* entry_offset, + const ConstantPoolKey& key) { + Instruction* instr = assm_->InstructionAt(load_offset); + // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0. + DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0); + instr->SetImmPCOffsetTarget(assm_->options(), entry_offset); +} - // We emit a constant pool when: - // * requested to do so by parameter force_emit (e.g. after each function). - // * the distance to the first instruction accessing the constant pool is - // kApproxMaxDistToConstPool or more. - // * the number of entries in the pool is kApproxMaxPoolEntryCount or more. - int dist = constpool_.DistanceToFirstUse(); - int count = constpool_.EntryCount(); - if (!force_emit && (dist < kApproxMaxDistToConstPool) && - (count < kApproxMaxPoolEntryCount)) { +void ConstantPool::Check(Emission force_emit, Jump require_jump, + size_t margin) { + // Some short sequence of instruction must not be broken up by constant pool + // emission, such sequences are protected by a ConstPool::BlockScope. + if (IsBlocked()) { + // Something is wrong if emission is forced and blocked at the same time. + DCHECK_EQ(force_emit, Emission::kIfNeeded); return; } - // Emit veneers for branches that would go out of range during emission of the - // constant pool. - int worst_case_size = constpool_.WorstCaseSize(); - CheckVeneerPool(false, require_jump, kVeneerDistanceMargin + worst_case_size); + // We emit a constant pool only if : + // * it is not empty + // * emission is forced by parameter force_emit (e.g. at function end). + // * emission is mandatory or opportune according to {ShouldEmitNow}. + if (!IsEmpty() && (force_emit == Emission::kForced || + ShouldEmitNow(require_jump, margin))) { + // Emit veneers for branches that would go out of range during emission of + // the constant pool. + int worst_case_size = ComputeSize(Jump::kRequired, Alignment::kRequired); + assm_->CheckVeneerPool(false, require_jump == Jump::kRequired, + assm_->kVeneerDistanceMargin + worst_case_size + + static_cast(margin)); + + // Check that the code buffer is large enough before emitting the constant + // pool (this includes the gap to the relocation information). + int needed_space = worst_case_size + assm_->kGap; + while (assm_->buffer_space() <= needed_space) { + assm_->GrowBuffer(); + } - // Check that the code buffer is large enough before emitting the constant - // pool (this includes the gap to the relocation information). - int needed_space = worst_case_size + kGap + 1 * kInstrSize; - while (buffer_space() <= needed_space) { - GrowBuffer(); + EmitAndClear(require_jump); } - - Label size_check; - bind(&size_check); - constpool_.Emit(require_jump); - DCHECK(SizeOfCodeGeneratedSince(&size_check) <= - static_cast(worst_case_size)); - - // Since a constant pool was just emitted, move the check offset forward by + // Since a constant pool is (now) empty, move the check offset forward by // the standard interval. - SetNextConstPoolCheckIn(kCheckConstPoolInterval); + SetNextCheckIn(ConstantPool::kCheckInterval); } -bool Assembler::ShouldEmitVeneer(int max_reachable_pc, int margin) { +// Pool entries are accessed with pc relative load therefore this cannot be more +// than 1 * MB. Since constant pool emission checks are interval based, and we +// want to keep entries close to the code, we try to emit every 64KB. +const size_t ConstantPool::kMaxDistToPool32 = 1 * MB; +const size_t ConstantPool::kMaxDistToPool64 = 1 * MB; +const size_t ConstantPool::kCheckInterval = 128 * kInstrSize; +const size_t ConstantPool::kApproxDistToPool32 = 64 * KB; +const size_t ConstantPool::kApproxDistToPool64 = kApproxDistToPool32; + +const size_t ConstantPool::kOpportunityDistToPool32 = 64 * KB; +const size_t ConstantPool::kOpportunityDistToPool64 = 64 * KB; +const size_t ConstantPool::kApproxMaxEntryCount = 512; + +bool Assembler::ShouldEmitVeneer(int max_reachable_pc, size_t margin) { // Account for the branch around the veneers and the guard. int protection_offset = 2 * kInstrSize; - return pc_offset() > - max_reachable_pc - margin - protection_offset - - static_cast(unresolved_branches_.size() * kMaxVeneerCodeSize); + return static_cast(pc_offset() + margin + protection_offset + + unresolved_branches_.size() * + kMaxVeneerCodeSize) >= max_reachable_pc; } void Assembler::RecordVeneerPool(int location_offset, int size) { + Assembler::BlockPoolsScope block_pools(this, PoolEmissionCheck::kSkip); RelocInfo rinfo(reinterpret_cast
(buffer_start_) + location_offset, RelocInfo::VENEER_POOL, static_cast(size), Code()); reloc_info_writer.Write(&rinfo); } -void Assembler::EmitVeneers(bool force_emit, bool need_protection, int margin) { - BlockPoolsScope scope(this); +void Assembler::EmitVeneers(bool force_emit, bool need_protection, + size_t margin) { + BlockPoolsScope scope(this, PoolEmissionCheck::kSkip); RecordComment("[ Veneers"); // The exact size of the veneer pool must be recorded (see the comment at the @@ -4677,7 +4495,7 @@ void Assembler::EmitVeneers(bool force_emit, bool need_protection, int margin) { } void Assembler::CheckVeneerPool(bool force_emit, bool require_jump, - int margin) { + size_t margin) { // There is nothing to do if there are no pending veneer pool entries. if (unresolved_branches_.empty()) { DCHECK_EQ(next_veneer_pool_check_, kMaxInt); @@ -4713,6 +4531,7 @@ int Assembler::buffer_space() const { void Assembler::RecordConstPool(int size) { // We only need this for debugger support, to correctly compute offsets in the // code. + Assembler::BlockPoolsScope block_pools(this); RecordRelocInfo(RelocInfo::CONST_POOL, static_cast(size)); } diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.h b/deps/v8/src/codegen/arm64/assembler-arm64.h index 04cd4222417f5a..6a6bf633c13ec6 100644 --- a/deps/v8/src/codegen/arm64/assembler-arm64.h +++ b/deps/v8/src/codegen/arm64/assembler-arm64.h @@ -35,7 +35,8 @@ class SafepointTableBuilder; class Immediate { public: template - inline explicit Immediate(Handle handle); + inline explicit Immediate( + Handle handle, RelocInfo::Mode mode = RelocInfo::FULL_EMBEDDED_OBJECT); // This is allowed to be an implicit constructor because Immediate is // a wrapper class that doesn't normally perform any type conversion. @@ -49,8 +50,6 @@ class Immediate { RelocInfo::Mode rmode() const { return rmode_; } private: - V8_EXPORT_PRIVATE void InitializeHandle(Handle value); - int64_t value_; RelocInfo::Mode rmode_; }; @@ -85,9 +84,6 @@ class Operand { inline HeapObjectRequest heap_object_request() const; inline Immediate immediate_for_heap_object_request() const; - template - inline explicit Operand(Handle handle); - // Implicit constructor for all int types, ExternalReference, and Smi. template inline Operand(T t); // NOLINT(runtime/explicit) @@ -174,60 +170,6 @@ class MemOperand { unsigned shift_amount_; }; -class ConstPool { - public: - explicit ConstPool(Assembler* assm) : assm_(assm), first_use_(-1) {} - // Returns true when we need to write RelocInfo and false when we do not. - bool RecordEntry(intptr_t data, RelocInfo::Mode mode); - int EntryCount() const { return static_cast(entries_.size()); } - bool IsEmpty() const { return entries_.empty(); } - // Distance in bytes between the current pc and the first instruction - // using the pool. If there are no pending entries return kMaxInt. - int DistanceToFirstUse(); - // Offset after which instructions using the pool will be out of range. - int MaxPcOffset(); - // Maximum size the constant pool can be with current entries. It always - // includes alignment padding and branch over. - int WorstCaseSize(); - // Size in bytes of the literal pool *if* it is emitted at the current - // pc. The size will include the branch over the pool if it was requested. - int SizeIfEmittedAtCurrentPc(bool require_jump); - // Emit the literal pool at the current pc with a branch over the pool if - // requested. - void Emit(bool require_jump); - // Discard any pending pool entries. - void Clear(); - - private: - void EmitMarker(); - void EmitGuard(); - void EmitEntries(); - - using SharedEntryMap = std::map; - // Adds a shared entry to entries_, using 'entry_map' to determine whether we - // already track this entry. Returns true if this is the first time we add - // this entry, false otherwise. - bool AddSharedEntry(SharedEntryMap& entry_map, uint64_t data, int offset); - - Assembler* assm_; - // Keep track of the first instruction requiring a constant pool entry - // since the previous constant pool was emitted. - int first_use_; - - // Map of data to index in entries_ for shared entries. - SharedEntryMap shared_entries_; - - // Map of address of handle to index in entries_. We need to keep track of - // code targets separately from other shared entries, as they can be - // relocated. - SharedEntryMap handle_to_index_map_; - - // Values, pc offset(s) of entries. Use a vector to preserve the order of - // insertion, as the serializer expects code target RelocInfo to point to - // constant pool addresses in an ascending order. - std::vector > > entries_; -}; - // ----------------------------------------------------------------------------- // Assembler. @@ -312,15 +254,26 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // Read/Modify the code target address in the branch/call instruction at pc. // The isolate argument is unused (and may be nullptr) when skipping flushing. inline static Address target_address_at(Address pc, Address constant_pool); + + // Read/Modify the code target address in the branch/call instruction at pc. + inline static Tagged_t target_compressed_address_at(Address pc, + Address constant_pool); inline static void set_target_address_at( Address pc, Address constant_pool, Address target, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED); + inline static void set_target_compressed_address_at( + Address pc, Address constant_pool, Tagged_t target, + ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED); + // Returns the handle for the code object called at 'pc'. // This might need to be temporarily encoded as an offset into code_targets_. inline Handle code_target_object_handle_at(Address pc); - - inline Handle compressed_embedded_object_handle_at(Address pc); + inline EmbeddedObjectIndex embedded_object_index_referenced_from(Address pc); + inline void set_embedded_object_index_referenced_from( + Address p, EmbeddedObjectIndex index); + // Returns the handle for the heap object referenced at 'pc'. + inline Handle target_object_handle_at(Address pc); // Returns the target address for a runtime function for the call encoded // at 'pc'. @@ -371,16 +324,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { return SizeOfCodeGeneratedSince(label) / kInstrSize; } - // Prevent contant pool emission until EndBlockConstPool is called. - // Call to this function can be nested but must be followed by an equal - // number of calls to EndBlockConstpool. - void StartBlockConstPool(); - - // Resume constant pool emission. Need to be called as many time as - // StartBlockConstPool to have an effect. - void EndBlockConstPool(); - - bool is_const_pool_blocked() const; static bool IsConstantPoolAt(Instruction* instr); static int ConstantPoolSizeAt(Instruction* instr); // See Assembler::CheckConstPool for more info. @@ -399,16 +342,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { return veneer_pool_blocked_nesting_ > 0; } - // Block/resume emission of constant pools and veneer pools. - void StartBlockPools() { - StartBlockConstPool(); - StartBlockVeneerPool(); - } - void EndBlockPools() { - EndBlockConstPool(); - EndBlockVeneerPool(); - } - // Record a deoptimization reason that can be used by a log or cpu profiler. // Use --trace-deopt to enable. void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position, @@ -2120,8 +2053,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // Code generation helpers -------------------------------------------------- - bool IsConstPoolEmpty() const { return constpool_.IsEmpty(); } - Instruction* pc() const { return Instruction::Cast(pc_); } Instruction* InstructionAt(ptrdiff_t offset) const { @@ -2405,31 +2336,26 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // FP register type. inline static Instr FPType(VRegister fd); - // Class for scoping postponing the constant pool generation. - class BlockConstPoolScope { - public: - explicit BlockConstPoolScope(Assembler* assem) : assem_(assem) { - assem_->StartBlockConstPool(); - } - ~BlockConstPoolScope() { assem_->EndBlockConstPool(); } - - private: - Assembler* assem_; - - DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope); - }; - // Unused on this architecture. void MaybeEmitOutOfLineConstantPool() {} - // Check if is time to emit a constant pool. - void CheckConstPool(bool force_emit, bool require_jump); + void ForceConstantPoolEmissionWithoutJump() { + constpool_.Check(Emission::kForced, Jump::kOmitted); + } + void ForceConstantPoolEmissionWithJump() { + constpool_.Check(Emission::kForced, Jump::kRequired); + } + // Check if the const pool needs to be emitted while pretending that {margin} + // more bytes of instructions have already been emitted. + void EmitConstPoolWithJumpIfNeeded(size_t margin = 0) { + constpool_.Check(Emission::kIfNeeded, Jump::kRequired, margin); + } // Returns true if we should emit a veneer as soon as possible for a branch // which can at most reach to specified pc. bool ShouldEmitVeneer(int max_reachable_pc, - int margin = kVeneerDistanceMargin); - bool ShouldEmitVeneers(int margin = kVeneerDistanceMargin) { + size_t margin = kVeneerDistanceMargin); + bool ShouldEmitVeneers(size_t margin = kVeneerDistanceMargin) { return ShouldEmitVeneer(unresolved_branches_first_limit(), margin); } @@ -2443,23 +2369,34 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // If need_protection is true, the veneers are protected by a branch jumping // over the code. void EmitVeneers(bool force_emit, bool need_protection, - int margin = kVeneerDistanceMargin); + size_t margin = kVeneerDistanceMargin); void EmitVeneersGuard() { EmitPoolGuard(); } // Checks whether veneers need to be emitted at this point. // If force_emit is set, a veneer is generated for *all* unresolved branches. void CheckVeneerPool(bool force_emit, bool require_jump, - int margin = kVeneerDistanceMargin); + size_t margin = kVeneerDistanceMargin); + + using BlockConstPoolScope = ConstantPool::BlockScope; class BlockPoolsScope { public: - explicit BlockPoolsScope(Assembler* assem) : assem_(assem) { - assem_->StartBlockPools(); + // Block veneer and constant pool. Emits pools if necessary to ensure that + // {margin} more bytes can be emitted without triggering pool emission. + explicit BlockPoolsScope(Assembler* assem, size_t margin = 0) + : assem_(assem), block_const_pool_(assem, margin) { + assem_->CheckVeneerPool(false, true, margin); + assem_->StartBlockVeneerPool(); + } + + BlockPoolsScope(Assembler* assem, PoolEmissionCheck check) + : assem_(assem), block_const_pool_(assem, check) { + assem_->StartBlockVeneerPool(); } - ~BlockPoolsScope() { assem_->EndBlockPools(); } + ~BlockPoolsScope() { assem_->EndBlockVeneerPool(); } private: Assembler* assem_; - + BlockConstPoolScope block_const_pool_; DISALLOW_IMPLICIT_CONSTRUCTORS(BlockPoolsScope); }; @@ -2622,15 +2559,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // Verify that a label's link chain is intact. void CheckLabelLinkChain(Label const* label); - // Postpone the generation of the constant pool for the specified number of - // instructions. - void BlockConstPoolFor(int instructions); - - // Set how far from current pc the next constant pool check will be. - void SetNextConstPoolCheckIn(int instructions) { - next_constant_pool_check_ = pc_offset() + instructions * kInstrSize; - } - // Emit the instruction at pc_. void Emit(Instr instruction) { STATIC_ASSERT(sizeof(*pc_) == 1); @@ -2658,40 +2586,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { void CheckBufferSpace(); void CheckBuffer(); - // Pc offset of the next constant pool check. - int next_constant_pool_check_; - - // Constant pool generation - // Pools are emitted in the instruction stream. They are emitted when: - // * the distance to the first use is above a pre-defined distance or - // * the numbers of entries in the pool is above a pre-defined size or - // * code generation is finished - // If a pool needs to be emitted before code generation is finished a branch - // over the emitted pool will be inserted. - - // Constants in the pool may be addresses of functions that gets relocated; - // if so, a relocation info entry is associated to the constant pool entry. - - // Repeated checking whether the constant pool should be emitted is rather - // expensive. By default we only check again once a number of instructions - // has been generated. That also means that the sizing of the buffers is not - // an exact science, and that we rely on some slop to not overrun buffers. - static constexpr int kCheckConstPoolInterval = 128; - - // Distance to first use after a which a pool will be emitted. Pool entries - // are accessed with pc relative load therefore this cannot be more than - // 1 * MB. Since constant pool emission checks are interval based this value - // is an approximation. - static constexpr int kApproxMaxDistToConstPool = 64 * KB; - - // Number of pool entries after which a pool will be emitted. Since constant - // pool emission checks are interval based this value is an approximation. - static constexpr int kApproxMaxPoolEntryCount = 512; - - // Emission of the constant pool may be blocked in some code sequences. - int const_pool_blocked_nesting_; // Block emission if this is not zero. - int no_const_pool_before_; // Block emission before this pc offset. - // Emission of the veneer pools may be blocked in some code sequences. int veneer_pool_blocked_nesting_; // Block emission if this is not zero. @@ -2705,16 +2599,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // are already bound. std::deque internal_reference_positions_; - // Relocation info records are also used during code generation as temporary - // containers for constants and code target addresses until they are emitted - // to the constant pool. These pending relocation info records are temporarily - // stored in a separate buffer until a constant pool is emitted. - // If every instruction in a long sequence is accessing the pool, we need one - // pending relocation entry per instruction. - - // The pending constant pool. - ConstPool constpool_; - protected: // Code generation // The relocation writer's position is at least kGap bytes below the end of @@ -2727,17 +2611,18 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { public: #ifdef DEBUG // Functions used for testing. - int GetConstantPoolEntriesSizeForTesting() const { + size_t GetConstantPoolEntriesSizeForTesting() const { // Do not include branch over the pool. - return constpool_.EntryCount() * kSystemPointerSize; + return constpool_.Entry32Count() * kInt32Size + + constpool_.Entry64Count() * kInt64Size; } - static constexpr int GetCheckConstPoolIntervalForTesting() { - return kCheckConstPoolInterval; + static size_t GetCheckConstPoolIntervalForTesting() { + return ConstantPool::kCheckInterval; } - static constexpr int GetApproxMaxDistToConstPoolForTesting() { - return kApproxMaxDistToConstPool; + static size_t GetApproxMaxDistToConstPoolForTesting() { + return ConstantPool::kApproxDistToPool64; } #endif @@ -2779,7 +2664,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { DCHECK(!unresolved_branches_.empty()); return unresolved_branches_.begin()->first; } - // This is similar to next_constant_pool_check_ and helps reduce the overhead + // This PC-offset of the next veneer pool check helps reduce the overhead // of checking for veneer pools. // It is maintained to the closest unresolved branch limit minus the maximum // veneer margin (or kMaxInt if there are no unresolved branches). @@ -2804,8 +2689,11 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { int WriteCodeComments(); + // The pending constant pool. + ConstantPool constpool_; + friend class EnsureSpace; - friend class ConstPool; + friend class ConstantPool; }; class PatchingAssembler : public Assembler { @@ -2822,19 +2710,12 @@ class PatchingAssembler : public Assembler { PatchingAssembler(const AssemblerOptions& options, byte* start, unsigned count) : Assembler(options, - ExternalAssemblerBuffer(start, count * kInstrSize + kGap)) { - // Block constant pool emission. - StartBlockPools(); - } + ExternalAssemblerBuffer(start, count * kInstrSize + kGap)), + block_constant_pool_emission_scope(this) {} ~PatchingAssembler() { - // Const pool should still be blocked. - DCHECK(is_const_pool_blocked()); - EndBlockPools(); // Verify we have generated the number of instruction we expected. DCHECK_EQ(pc_offset() + kGap, buffer_->size()); - // Verify no relocation information has been emitted. - DCHECK(IsConstPoolEmpty()); } // See definition of PatchAdrFar() for details. @@ -2842,11 +2723,19 @@ class PatchingAssembler : public Assembler { static constexpr int kAdrFarPatchableNInstrs = kAdrFarPatchableNNops + 2; void PatchAdrFar(int64_t target_offset); void PatchSubSp(uint32_t immediate); + + private: + BlockPoolsScope block_constant_pool_emission_scope; }; class EnsureSpace { public: - explicit EnsureSpace(Assembler* assembler) { assembler->CheckBufferSpace(); } + explicit EnsureSpace(Assembler* assembler) : block_pools_scope_(assembler) { + assembler->CheckBufferSpace(); + } + + private: + Assembler::BlockPoolsScope block_pools_scope_; }; } // namespace internal diff --git a/deps/v8/src/codegen/arm64/constants-arm64.h b/deps/v8/src/codegen/arm64/constants-arm64.h index eb3fb3a6be36ee..a1e962452b7cb2 100644 --- a/deps/v8/src/codegen/arm64/constants-arm64.h +++ b/deps/v8/src/codegen/arm64/constants-arm64.h @@ -32,8 +32,8 @@ constexpr size_t kMaxPCRelativeCodeRangeInMB = 128; constexpr uint8_t kInstrSize = 4; constexpr uint8_t kInstrSizeLog2 = 2; -constexpr size_t kLoadLiteralScaleLog2 = 2; -constexpr size_t kMaxLoadLiteralRange = 1 * MB; +constexpr uint8_t kLoadLiteralScaleLog2 = 2; +constexpr int kMaxLoadLiteralRange = 1 * MB; const int kNumberOfRegisters = 32; const int kNumberOfVRegisters = 32; diff --git a/deps/v8/src/codegen/arm64/cpu-arm64.cc b/deps/v8/src/codegen/arm64/cpu-arm64.cc index e0ab5899141072..32bcc6f268ea10 100644 --- a/deps/v8/src/codegen/arm64/cpu-arm64.cc +++ b/deps/v8/src/codegen/arm64/cpu-arm64.cc @@ -15,7 +15,7 @@ namespace internal { class CacheLineSizes { public: CacheLineSizes() { -#if !defined(V8_HOST_ARCH_ARM64) || defined(V8_OS_WIN) +#if !defined(V8_HOST_ARCH_ARM64) || defined(V8_OS_WIN) || defined(__APPLE__) cache_type_register_ = 0; #else // Copy the content of the cache type register to a core register. diff --git a/deps/v8/src/codegen/arm64/decoder-arm64.h b/deps/v8/src/codegen/arm64/decoder-arm64.h index 3d113eb8366b5e..7621c516ce79bd 100644 --- a/deps/v8/src/codegen/arm64/decoder-arm64.h +++ b/deps/v8/src/codegen/arm64/decoder-arm64.h @@ -95,7 +95,7 @@ class V8_EXPORT_PRIVATE DecoderVisitor { }; // A visitor that dispatches to a list of visitors. -class DispatchingDecoderVisitor : public DecoderVisitor { +class V8_EXPORT_PRIVATE DispatchingDecoderVisitor : public DecoderVisitor { public: DispatchingDecoderVisitor() {} virtual ~DispatchingDecoderVisitor() {} diff --git a/deps/v8/src/codegen/arm64/instructions-arm64.h b/deps/v8/src/codegen/arm64/instructions-arm64.h index 5c3cf687e75ab3..a73c3feed74396 100644 --- a/deps/v8/src/codegen/arm64/instructions-arm64.h +++ b/deps/v8/src/codegen/arm64/instructions-arm64.h @@ -203,6 +203,7 @@ class Instruction { } bool IsLdrLiteralX() const { return Mask(LoadLiteralMask) == LDR_x_lit; } + bool IsLdrLiteralW() const { return Mask(LoadLiteralMask) == LDR_w_lit; } bool IsPCRelAddressing() const { return Mask(PCRelAddressingFMask) == PCRelAddressingFixed; diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc index aab9fc79a2c2c2..792a8637f698d3 100644 --- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc +++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc @@ -291,8 +291,7 @@ void TurboAssembler::Mov(const Register& rd, const Operand& operand, ExternalReference reference = bit_cast(addr); IndirectLoadExternalReference(rd, reference); return; - } else if (operand.ImmediateRMode() == - RelocInfo::FULL_EMBEDDED_OBJECT) { + } else if (RelocInfo::IsEmbeddedObjectMode(operand.ImmediateRMode())) { Handle x( reinterpret_cast(operand.ImmediateValue())); IndirectLoadConstant(rd, x); @@ -1866,7 +1865,9 @@ void TurboAssembler::Jump(Handle code, RelocInfo::Mode rmode, } if (CanUseNearCallOrJump(rmode)) { - JumpHelper(static_cast(AddCodeTarget(code)), rmode, cond); + EmbeddedObjectIndex index = AddEmbeddedObject(code); + DCHECK(is_int32(index)); + JumpHelper(static_cast(index), rmode, cond); } else { Jump(code.address(), rmode, cond); } @@ -1912,7 +1913,9 @@ void TurboAssembler::Call(Handle code, RelocInfo::Mode rmode) { } if (CanUseNearCallOrJump(rmode)) { - near_call(AddCodeTarget(code), rmode); + EmbeddedObjectIndex index = AddEmbeddedObject(code); + DCHECK(is_int32(index)); + near_call(static_cast(index), rmode); } else { IndirectCall(code.address(), rmode); } @@ -1925,24 +1928,27 @@ void TurboAssembler::Call(ExternalReference target) { Call(temp); } -void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) { +void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { STATIC_ASSERT(kSystemPointerSize == 8); STATIC_ASSERT(kSmiTagSize == 1); STATIC_ASSERT(kSmiTag == 0); - // The builtin_pointer register contains the builtin index as a Smi. + // The builtin_index register contains the builtin index as a Smi. // Untagging is folded into the indexing operand below. #if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) STATIC_ASSERT(kSmiShiftSize == 0); - Lsl(builtin_pointer, builtin_pointer, kSystemPointerSizeLog2 - kSmiShift); + Lsl(builtin_index, builtin_index, kSystemPointerSizeLog2 - kSmiShift); #else STATIC_ASSERT(kSmiShiftSize == 31); - Asr(builtin_pointer, builtin_pointer, kSmiShift - kSystemPointerSizeLog2); + Asr(builtin_index, builtin_index, kSmiShift - kSystemPointerSizeLog2); #endif - Add(builtin_pointer, builtin_pointer, - IsolateData::builtin_entry_table_offset()); - Ldr(builtin_pointer, MemOperand(kRootRegister, builtin_pointer)); - Call(builtin_pointer); + Add(builtin_index, builtin_index, IsolateData::builtin_entry_table_offset()); + Ldr(builtin_index, MemOperand(kRootRegister, builtin_index)); +} + +void TurboAssembler::CallBuiltinByIndex(Register builtin_index) { + LoadEntryFromBuiltinIndex(builtin_index); + Call(builtin_index); } void TurboAssembler::LoadCodeObjectEntry(Register destination, @@ -2723,7 +2729,7 @@ void TurboAssembler::DecompressAnyTagged(const Register& destination, const MemOperand& field_operand) { RecordComment("[ DecompressAnyTagged"); Ldrsw(destination, field_operand); - if (kUseBranchlessPtrDecompression) { + if (kUseBranchlessPtrDecompressionInGeneratedCode) { UseScratchRegisterScope temps(this); // Branchlessly compute |masked_root|: // masked_root = HAS_SMI_TAG(destination) ? 0 : kRootRegister; @@ -2747,7 +2753,7 @@ void TurboAssembler::DecompressAnyTagged(const Register& destination, void TurboAssembler::DecompressAnyTagged(const Register& destination, const Register& source) { RecordComment("[ DecompressAnyTagged"); - if (kUseBranchlessPtrDecompression) { + if (kUseBranchlessPtrDecompressionInGeneratedCode) { UseScratchRegisterScope temps(this); // Branchlessly compute |masked_root|: // masked_root = HAS_SMI_TAG(destination) ? 0 : kRootRegister; diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h index f217c3c586afa7..d4e9c3055b0989 100644 --- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h +++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h @@ -852,7 +852,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { // Generate an indirect call (for when a direct call's range is not adequate). void IndirectCall(Address target, RelocInfo::Mode rmode); - void CallBuiltinPointer(Register builtin_pointer) override; + // Load the builtin given by the Smi in |builtin_index| into the same + // register. + void LoadEntryFromBuiltinIndex(Register builtin_index); + void CallBuiltinByIndex(Register builtin_index) override; void LoadCodeObjectEntry(Register destination, Register code_object) override; void CallCodeObject(Register code_object) override; @@ -1920,17 +1923,15 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { class InstructionAccurateScope { public: explicit InstructionAccurateScope(TurboAssembler* tasm, size_t count = 0) - : tasm_(tasm) + : tasm_(tasm), + block_pool_(tasm, count * kInstrSize) #ifdef DEBUG , size_(count * kInstrSize) #endif { - // Before blocking the const pool, see if it needs to be emitted. - tasm_->CheckConstPool(false, true); - tasm_->CheckVeneerPool(false, true); - - tasm_->StartBlockPools(); + tasm_->CheckVeneerPool(false, true, count * kInstrSize); + tasm_->StartBlockVeneerPool(); #ifdef DEBUG if (count != 0) { tasm_->bind(&start_); @@ -1941,7 +1942,7 @@ class InstructionAccurateScope { } ~InstructionAccurateScope() { - tasm_->EndBlockPools(); + tasm_->EndBlockVeneerPool(); #ifdef DEBUG if (start_.is_bound()) { DCHECK(tasm_->SizeOfCodeGeneratedSince(&start_) == size_); @@ -1952,6 +1953,7 @@ class InstructionAccurateScope { private: TurboAssembler* tasm_; + TurboAssembler::BlockConstPoolScope block_pool_; #ifdef DEBUG size_t size_; Label start_; @@ -1979,7 +1981,7 @@ class UseScratchRegisterScope { DCHECK_EQ(availablefp_->type(), CPURegister::kVRegister); } - ~UseScratchRegisterScope(); + V8_EXPORT_PRIVATE ~UseScratchRegisterScope(); // Take a register from the appropriate temps list. It will be returned // automatically when the scope ends. @@ -1993,10 +1995,11 @@ class UseScratchRegisterScope { } Register AcquireSameSizeAs(const Register& reg); - VRegister AcquireSameSizeAs(const VRegister& reg); + V8_EXPORT_PRIVATE VRegister AcquireSameSizeAs(const VRegister& reg); private: - static CPURegister AcquireNextAvailable(CPURegList* available); + V8_EXPORT_PRIVATE static CPURegister AcquireNextAvailable( + CPURegList* available); // Available scratch registers. CPURegList* available_; // kRegister diff --git a/deps/v8/src/codegen/arm64/register-arm64.h b/deps/v8/src/codegen/arm64/register-arm64.h index b429786aa95050..741866dfd64b5e 100644 --- a/deps/v8/src/codegen/arm64/register-arm64.h +++ b/deps/v8/src/codegen/arm64/register-arm64.h @@ -559,8 +559,6 @@ using Simd128Register = VRegister; // Lists of registers. class V8_EXPORT_PRIVATE CPURegList { public: - CPURegList() = default; - template explicit CPURegList(CPURegister reg0, CPURegisters... regs) : list_(CPURegister::ListOf(reg0, regs...)), diff --git a/deps/v8/src/codegen/assembler.cc b/deps/v8/src/codegen/assembler.cc index 687ae98bfe92f8..498afb03206432 100644 --- a/deps/v8/src/codegen/assembler.cc +++ b/deps/v8/src/codegen/assembler.cc @@ -64,8 +64,8 @@ AssemblerOptions AssemblerOptions::Default( // might be run on real hardware. options.enable_simulator_code = !serializer; #endif - options.inline_offheap_trampolines = - FLAG_embedded_builtins && !serializer && !generating_embedded_builtin; + options.inline_offheap_trampolines &= + !serializer && !generating_embedded_builtin; #if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 const base::AddressRegion& code_range = isolate->heap()->memory_allocator()->code_range(); @@ -226,23 +226,33 @@ int AssemblerBase::AddCodeTarget(Handle target) { } } -int AssemblerBase::AddCompressedEmbeddedObject(Handle object) { - int current = static_cast(compressed_embedded_objects_.size()); - compressed_embedded_objects_.push_back(object); - return current; +Handle AssemblerBase::GetCodeTarget(intptr_t code_target_index) const { + DCHECK_LT(static_cast(code_target_index), code_targets_.size()); + return code_targets_[code_target_index]; } -Handle AssemblerBase::GetCompressedEmbeddedObject( - intptr_t index) const { - DCHECK_LT(static_cast(index), compressed_embedded_objects_.size()); - return compressed_embedded_objects_[index]; +AssemblerBase::EmbeddedObjectIndex AssemblerBase::AddEmbeddedObject( + Handle object) { + EmbeddedObjectIndex current = embedded_objects_.size(); + // Do not deduplicate invalid handles, they are to heap object requests. + if (!object.is_null()) { + auto entry = embedded_objects_map_.find(object); + if (entry != embedded_objects_map_.end()) { + return entry->second; + } + embedded_objects_map_[object] = current; + } + embedded_objects_.push_back(object); + return current; } -Handle AssemblerBase::GetCodeTarget(intptr_t code_target_index) const { - DCHECK_LT(static_cast(code_target_index), code_targets_.size()); - return code_targets_[code_target_index]; +Handle AssemblerBase::GetEmbeddedObject( + EmbeddedObjectIndex index) const { + DCHECK_LT(index, embedded_objects_.size()); + return embedded_objects_[index]; } + int Assembler::WriteCodeComments() { if (!FLAG_code_comments || code_comments_writer_.entry_count() == 0) return 0; int offset = pc_offset(); diff --git a/deps/v8/src/codegen/assembler.h b/deps/v8/src/codegen/assembler.h index eae5d53a4fca5a..98639583d8119f 100644 --- a/deps/v8/src/codegen/assembler.h +++ b/deps/v8/src/codegen/assembler.h @@ -36,7 +36,9 @@ #define V8_CODEGEN_ASSEMBLER_H_ #include +#include +#include "src/base/memory.h" #include "src/codegen/code-comments.h" #include "src/codegen/cpu-features.h" #include "src/codegen/external-reference.h" @@ -55,6 +57,10 @@ class ApiFunction; namespace internal { +using base::Memory; +using base::ReadUnalignedValue; +using base::WriteUnalignedValue; + // Forward declarations. class EmbeddedData; class InstructionStream; @@ -155,7 +161,7 @@ struct V8_EXPORT_PRIVATE AssemblerOptions { bool isolate_independent_code = false; // Enables the use of isolate-independent builtins through an off-heap // trampoline. (macro assembler feature). - bool inline_offheap_trampolines = false; + bool inline_offheap_trampolines = FLAG_embedded_builtins; // On some platforms, all code is within a given range in the process, // and the start of this range is configured here. Address code_range_start = 0; @@ -272,8 +278,11 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced { int AddCodeTarget(Handle target); Handle GetCodeTarget(intptr_t code_target_index) const; - int AddCompressedEmbeddedObject(Handle object); - Handle GetCompressedEmbeddedObject(intptr_t index) const; + // Add 'object' to the {embedded_objects_} vector and return the index at + // which it is stored. + using EmbeddedObjectIndex = size_t; + EmbeddedObjectIndex AddEmbeddedObject(Handle object); + Handle GetEmbeddedObject(EmbeddedObjectIndex index) const; // The buffer into which code and relocation info are generated. std::unique_ptr buffer_; @@ -321,12 +330,18 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced { // the code handle in the vector instead. std::vector> code_targets_; - // When pointer compression is enabled, we need to store indexes to this - // table in the code until we are ready to copy the code and embed the real - // object pointers. We don't need to do the same thing for non-compressed - // embedded objects, because we've got enough space (kPointerSize) in the - // code stream to just embed the address of the object handle. - std::vector> compressed_embedded_objects_; + // If an assembler needs a small number to refer to a heap object handle + // (for example, because there are only 32bit available on a 64bit arch), the + // assembler adds the object into this vector using AddEmbeddedObject, and + // may then refer to the heap object using the handle's index in this vector. + std::vector> embedded_objects_; + + // Embedded objects are deduplicated based on handle location. This is a + // compromise that is almost as effective as deduplication based on actual + // heap object addresses maintains GC safety. + std::unordered_map, EmbeddedObjectIndex, + Handle::hash, Handle::equal_to> + embedded_objects_map_; const AssemblerOptions options_; uint64_t enabled_cpu_features_; diff --git a/deps/v8/src/codegen/code-stub-assembler.cc b/deps/v8/src/codegen/code-stub-assembler.cc index d967d84874d848..390746c27dcd8f 100644 --- a/deps/v8/src/codegen/code-stub-assembler.cc +++ b/deps/v8/src/codegen/code-stub-assembler.cc @@ -63,57 +63,27 @@ void CodeStubAssembler::HandleBreakOnNode() { void CodeStubAssembler::Assert(const BranchGenerator& branch, const char* message, const char* file, int line, - Node* extra_node1, const char* extra_node1_name, - Node* extra_node2, const char* extra_node2_name, - Node* extra_node3, const char* extra_node3_name, - Node* extra_node4, const char* extra_node4_name, - Node* extra_node5, - const char* extra_node5_name) { + std::initializer_list extra_nodes) { #if defined(DEBUG) if (FLAG_debug_code) { - Check(branch, message, file, line, extra_node1, extra_node1_name, - extra_node2, extra_node2_name, extra_node3, extra_node3_name, - extra_node4, extra_node4_name, extra_node5, extra_node5_name); + Check(branch, message, file, line, extra_nodes); } #endif } void CodeStubAssembler::Assert(const NodeGenerator& condition_body, const char* message, const char* file, int line, - Node* extra_node1, const char* extra_node1_name, - Node* extra_node2, const char* extra_node2_name, - Node* extra_node3, const char* extra_node3_name, - Node* extra_node4, const char* extra_node4_name, - Node* extra_node5, - const char* extra_node5_name) { + std::initializer_list extra_nodes) { #if defined(DEBUG) if (FLAG_debug_code) { - Check(condition_body, message, file, line, extra_node1, extra_node1_name, - extra_node2, extra_node2_name, extra_node3, extra_node3_name, - extra_node4, extra_node4_name, extra_node5, extra_node5_name); + Check(condition_body, message, file, line, extra_nodes); } #endif } -#ifdef DEBUG -namespace { -void MaybePrintNodeWithName(CodeStubAssembler* csa, Node* node, - const char* node_name) { - if (node != nullptr) { - csa->CallRuntime(Runtime::kPrintWithNameForAssert, csa->SmiConstant(0), - csa->StringConstant(node_name), node); - } -} -} // namespace -#endif - void CodeStubAssembler::Check(const BranchGenerator& branch, const char* message, const char* file, int line, - Node* extra_node1, const char* extra_node1_name, - Node* extra_node2, const char* extra_node2_name, - Node* extra_node3, const char* extra_node3_name, - Node* extra_node4, const char* extra_node4_name, - Node* extra_node5, const char* extra_node5_name) { + std::initializer_list extra_nodes) { Label ok(this); Label not_ok(this, Label::kDeferred); if (message != nullptr && FLAG_code_comments) { @@ -124,9 +94,7 @@ void CodeStubAssembler::Check(const BranchGenerator& branch, branch(&ok, ¬_ok); BIND(¬_ok); - FailAssert(message, file, line, extra_node1, extra_node1_name, extra_node2, - extra_node2_name, extra_node3, extra_node3_name, extra_node4, - extra_node4_name, extra_node5, extra_node5_name); + FailAssert(message, file, line, extra_nodes); BIND(&ok); Comment("] Assert"); @@ -134,20 +102,14 @@ void CodeStubAssembler::Check(const BranchGenerator& branch, void CodeStubAssembler::Check(const NodeGenerator& condition_body, const char* message, const char* file, int line, - Node* extra_node1, const char* extra_node1_name, - Node* extra_node2, const char* extra_node2_name, - Node* extra_node3, const char* extra_node3_name, - Node* extra_node4, const char* extra_node4_name, - Node* extra_node5, const char* extra_node5_name) { + std::initializer_list extra_nodes) { BranchGenerator branch = [=](Label* ok, Label* not_ok) { Node* condition = condition_body(); DCHECK_NOT_NULL(condition); Branch(condition, ok, not_ok); }; - Check(branch, message, file, line, extra_node1, extra_node1_name, extra_node2, - extra_node2_name, extra_node3, extra_node3_name, extra_node4, - extra_node4_name, extra_node5, extra_node5_name); + Check(branch, message, file, line, extra_nodes); } void CodeStubAssembler::FastCheck(TNode condition) { @@ -162,31 +124,25 @@ void CodeStubAssembler::FastCheck(TNode condition) { } void CodeStubAssembler::FailAssert( - const char* message, const char* file, int line, Node* extra_node1, - const char* extra_node1_name, Node* extra_node2, - const char* extra_node2_name, Node* extra_node3, - const char* extra_node3_name, Node* extra_node4, - const char* extra_node4_name, Node* extra_node5, - const char* extra_node5_name) { + const char* message, const char* file, int line, + std::initializer_list extra_nodes) { DCHECK_NOT_NULL(message); EmbeddedVector chars; if (file != nullptr) { - SNPrintF(chars, "CSA_ASSERT failed: %s [%s:%d]\n", message, file, line); - } else { - SNPrintF(chars, "CSA_ASSERT failed: %s\n", message); + SNPrintF(chars, "%s [%s:%d]", message, file, line); + message = chars.begin(); } - Node* message_node = StringConstant(chars.begin()); + Node* message_node = StringConstant(message); #ifdef DEBUG // Only print the extra nodes in debug builds. - MaybePrintNodeWithName(this, extra_node1, extra_node1_name); - MaybePrintNodeWithName(this, extra_node2, extra_node2_name); - MaybePrintNodeWithName(this, extra_node3, extra_node3_name); - MaybePrintNodeWithName(this, extra_node4, extra_node4_name); - MaybePrintNodeWithName(this, extra_node5, extra_node5_name); + for (auto& node : extra_nodes) { + CallRuntime(Runtime::kPrintWithNameForAssert, SmiConstant(0), + StringConstant(node.second), node.first); + } #endif - DebugAbort(message_node); + AbortCSAAssert(message_node); Unreachable(); } @@ -567,7 +523,7 @@ TNode CodeStubAssembler::Float64Trunc(SloppyTNode x) { TNode CodeStubAssembler::IsValidSmi(TNode smi) { if (SmiValuesAre31Bits() && kSystemPointerSize == kInt64Size) { // Check that the Smi value is properly sign-extended. - TNode value = Signed(BitcastTaggedToWord(smi)); + TNode value = Signed(BitcastTaggedSignedToWord(smi)); return WordEqual(value, ChangeInt32ToIntPtr(TruncateIntPtrToInt32(value))); } return Int32TrueConstant(); @@ -611,7 +567,8 @@ TNode CodeStubAssembler::SmiUntag(SloppyTNode value) { if (ToIntPtrConstant(value, constant_value)) { return IntPtrConstant(constant_value >> (kSmiShiftSize + kSmiTagSize)); } - return Signed(WordSar(BitcastTaggedToWord(value), SmiShiftBitsConstant())); + return Signed( + WordSar(BitcastTaggedSignedToWord(value), SmiShiftBitsConstant())); } TNode CodeStubAssembler::SmiToInt32(SloppyTNode value) { @@ -660,13 +617,14 @@ TNode CodeStubAssembler::TryInt32Mul(TNode a, TNode b, TNode CodeStubAssembler::TrySmiAdd(TNode lhs, TNode rhs, Label* if_overflow) { if (SmiValuesAre32Bits()) { - return BitcastWordToTaggedSigned(TryIntPtrAdd( - BitcastTaggedToWord(lhs), BitcastTaggedToWord(rhs), if_overflow)); + return BitcastWordToTaggedSigned( + TryIntPtrAdd(BitcastTaggedSignedToWord(lhs), + BitcastTaggedSignedToWord(rhs), if_overflow)); } else { DCHECK(SmiValuesAre31Bits()); - TNode> pair = - Int32AddWithOverflow(TruncateIntPtrToInt32(BitcastTaggedToWord(lhs)), - TruncateIntPtrToInt32(BitcastTaggedToWord(rhs))); + TNode> pair = Int32AddWithOverflow( + TruncateIntPtrToInt32(BitcastTaggedSignedToWord(lhs)), + TruncateIntPtrToInt32(BitcastTaggedSignedToWord(rhs))); TNode overflow = Projection<1>(pair); GotoIf(overflow, if_overflow); TNode result = Projection<0>(pair); @@ -678,16 +636,16 @@ TNode CodeStubAssembler::TrySmiSub(TNode lhs, TNode rhs, Label* if_overflow) { if (SmiValuesAre32Bits()) { TNode> pair = IntPtrSubWithOverflow( - BitcastTaggedToWord(lhs), BitcastTaggedToWord(rhs)); + BitcastTaggedSignedToWord(lhs), BitcastTaggedSignedToWord(rhs)); TNode overflow = Projection<1>(pair); GotoIf(overflow, if_overflow); TNode result = Projection<0>(pair); return BitcastWordToTaggedSigned(result); } else { DCHECK(SmiValuesAre31Bits()); - TNode> pair = - Int32SubWithOverflow(TruncateIntPtrToInt32(BitcastTaggedToWord(lhs)), - TruncateIntPtrToInt32(BitcastTaggedToWord(rhs))); + TNode> pair = Int32SubWithOverflow( + TruncateIntPtrToInt32(BitcastTaggedSignedToWord(lhs)), + TruncateIntPtrToInt32(BitcastTaggedSignedToWord(rhs))); TNode overflow = Projection<1>(pair); GotoIf(overflow, if_overflow); TNode result = Projection<0>(pair); @@ -933,7 +891,7 @@ TNode CodeStubAssembler::TrySmiDiv(TNode dividend, TNode divisor, BIND(&divisor_is_not_minus_one); TNode untagged_result = Int32Div(untagged_dividend, untagged_divisor); - TNode truncated = Signed(Int32Mul(untagged_result, untagged_divisor)); + TNode truncated = Int32Mul(untagged_result, untagged_divisor); // Do floating point division if the remainder is not 0. GotoIf(Word32NotEqual(untagged_dividend, truncated), bailout); @@ -973,9 +931,12 @@ TNode CodeStubAssembler::TaggedIsSmi(TNode a) { } TNode CodeStubAssembler::TaggedIsNotSmi(SloppyTNode a) { - return WordNotEqual( - WordAnd(BitcastTaggedToWord(a), IntPtrConstant(kSmiTagMask)), - IntPtrConstant(0)); + // Although BitcastTaggedSignedToWord is generally unsafe on HeapObjects, we + // can nonetheless use it to inspect the Smi tag. The assumption here is that + // the GC will not exchange Smis for HeapObjects or vice-versa. + TNode a_bitcast = BitcastTaggedSignedToWord(UncheckedCast(a)); + return WordNotEqual(WordAnd(a_bitcast, IntPtrConstant(kSmiTagMask)), + IntPtrConstant(0)); } TNode CodeStubAssembler::TaggedIsPositiveSmi(SloppyTNode a) { @@ -1031,7 +992,7 @@ void CodeStubAssembler::BranchIfPrototypesHaveNoElements( TNode prototype_instance_type = LoadMapInstanceType(prototype_map); // Pessimistically assume elements if a Proxy, Special API Object, - // or JSValue wrapper is found on the prototype chain. After this + // or JSPrimitiveWrapper wrapper is found on the prototype chain. After this // instance type check, it's not necessary to check for interceptors or // access checks. Label if_custom(this, Label::kDeferred), if_notcustom(this); @@ -1040,11 +1001,12 @@ void CodeStubAssembler::BranchIfPrototypesHaveNoElements( BIND(&if_custom); { - // For string JSValue wrappers we still support the checks as long - // as they wrap the empty string. - GotoIfNot(InstanceTypeEqual(prototype_instance_type, JS_VALUE_TYPE), - possibly_elements); - Node* prototype_value = LoadJSValueValue(prototype); + // For string JSPrimitiveWrapper wrappers we still support the checks as + // long as they wrap the empty string. + GotoIfNot( + InstanceTypeEqual(prototype_instance_type, JS_PRIMITIVE_WRAPPER_TYPE), + possibly_elements); + Node* prototype_value = LoadJSPrimitiveWrapperValue(prototype); Branch(IsEmptyString(prototype_value), &if_notcustom, possibly_elements); } @@ -1121,20 +1083,23 @@ TNode CodeStubAssembler::AllocateRaw(TNode size_in_bytes, Label runtime_call(this, Label::kDeferred), no_runtime_call(this), out(this); bool needs_double_alignment = flags & kDoubleAlignment; + bool allow_large_object_allocation = flags & kAllowLargeObjectAllocation; - if (flags & kAllowLargeObjectAllocation) { + if (allow_large_object_allocation) { Label next(this); GotoIf(IsRegularHeapObjectSize(size_in_bytes), &next); + TNode runtime_flags = SmiConstant(Smi::FromInt( + AllocateDoubleAlignFlag::encode(needs_double_alignment) | + AllowLargeObjectAllocationFlag::encode(allow_large_object_allocation))); if (FLAG_young_generation_large_objects) { - result = CallRuntime(Runtime::kAllocateInYoungGeneration, - NoContextConstant(), SmiTag(size_in_bytes)); + result = + CallRuntime(Runtime::kAllocateInYoungGeneration, NoContextConstant(), + SmiTag(size_in_bytes), runtime_flags); } else { - TNode alignment_flag = SmiConstant(Smi::FromInt( - AllocateDoubleAlignFlag::encode(needs_double_alignment))); result = CallRuntime(Runtime::kAllocateInOldGeneration, NoContextConstant(), - SmiTag(size_in_bytes), alignment_flag); + SmiTag(size_in_bytes), runtime_flags); } Goto(&out); @@ -1161,15 +1126,17 @@ TNode CodeStubAssembler::AllocateRaw(TNode size_in_bytes, BIND(&runtime_call); { + TNode runtime_flags = SmiConstant(Smi::FromInt( + AllocateDoubleAlignFlag::encode(needs_double_alignment) | + AllowLargeObjectAllocationFlag::encode(allow_large_object_allocation))); if (flags & kPretenured) { - TNode runtime_flags = SmiConstant(Smi::FromInt( - AllocateDoubleAlignFlag::encode(needs_double_alignment))); result = CallRuntime(Runtime::kAllocateInOldGeneration, NoContextConstant(), SmiTag(size_in_bytes), runtime_flags); } else { - result = CallRuntime(Runtime::kAllocateInYoungGeneration, - NoContextConstant(), SmiTag(size_in_bytes)); + result = + CallRuntime(Runtime::kAllocateInYoungGeneration, NoContextConstant(), + SmiTag(size_in_bytes), runtime_flags); } Goto(&out); } @@ -1394,14 +1361,15 @@ Node* CodeStubAssembler::LoadBufferObject(Node* buffer, int offset, Node* CodeStubAssembler::LoadObjectField(SloppyTNode object, int offset, MachineType type) { CSA_ASSERT(this, IsStrong(object)); - return Load(type, object, IntPtrConstant(offset - kHeapObjectTag)); + return LoadFromObject(type, object, IntPtrConstant(offset - kHeapObjectTag)); } Node* CodeStubAssembler::LoadObjectField(SloppyTNode object, SloppyTNode offset, MachineType type) { CSA_ASSERT(this, IsStrong(object)); - return Load(type, object, IntPtrSub(offset, IntPtrConstant(kHeapObjectTag))); + return LoadFromObject(type, object, + IntPtrSub(offset, IntPtrConstant(kHeapObjectTag))); } TNode CodeStubAssembler::LoadAndUntagObjectField( @@ -1469,12 +1437,18 @@ TNode CodeStubAssembler::LoadHeapNumberValue( object, HeapNumber::kValueOffset, MachineType::Float64())); } +TNode CodeStubAssembler::GetStructMap(InstanceType instance_type) { + Handle map_handle(Map::GetStructMap(isolate(), instance_type), + isolate()); + return HeapConstant(map_handle); +} + TNode CodeStubAssembler::LoadMap(SloppyTNode object) { return UncheckedCast(LoadObjectField(object, HeapObject::kMapOffset, MachineType::TaggedPointer())); } -TNode CodeStubAssembler::LoadInstanceType( +TNode CodeStubAssembler::LoadInstanceType( SloppyTNode object) { return LoadMapInstanceType(LoadMap(object)); } @@ -1591,8 +1565,8 @@ TNode CodeStubAssembler::LoadMapBitField3(SloppyTNode map) { LoadObjectField(map, Map::kBitField3Offset, MachineType::Uint32())); } -TNode CodeStubAssembler::LoadMapInstanceType(SloppyTNode map) { - return UncheckedCast( +TNode CodeStubAssembler::LoadMapInstanceType(SloppyTNode map) { + return UncheckedCast( LoadObjectField(map, Map::kInstanceTypeOffset, MachineType::Uint16())); } @@ -1700,12 +1674,10 @@ TNode CodeStubAssembler::LoadMapBackPointer(SloppyTNode map) { TNode CodeStubAssembler::EnsureOnlyHasSimpleProperties( TNode map, TNode instance_type, Label* bailout) { - // This check can have false positives, since it applies to any JSValueType. + // This check can have false positives, since it applies to any + // JSPrimitiveWrapper type. GotoIf(IsCustomElementsReceiverInstanceType(instance_type), bailout); - GotoIf(IsSetWord32(LoadMapBitField2(map), Map::HasHiddenPrototypeBit::kMask), - bailout); - TNode bit_field3 = LoadMapBitField3(map); GotoIf(IsSetWord32(bit_field3, Map::IsDictionaryMapBit::kMask), bailout); @@ -1810,9 +1782,9 @@ Node* CodeStubAssembler::PointerToSeqStringData(Node* seq_string) { IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag)); } -Node* CodeStubAssembler::LoadJSValueValue(Node* object) { - CSA_ASSERT(this, IsJSValue(object)); - return LoadObjectField(object, JSValue::kValueOffset); +Node* CodeStubAssembler::LoadJSPrimitiveWrapperValue(Node* object) { + CSA_ASSERT(this, IsJSPrimitiveWrapper(object)); + return LoadObjectField(object, JSPrimitiveWrapper::kValueOffset); } void CodeStubAssembler::DispatchMaybeObject(TNode maybe_object, @@ -1941,11 +1913,13 @@ TNode CodeStubAssembler::LoadArrayLength( return LoadAndUntagWeakFixedArrayLength(array); } -template -TNode CodeStubAssembler::LoadArrayElement( - TNode array, int array_header_size, Node* index_node, - int additional_offset, ParameterMode parameter_mode, - LoadSensitivity needs_poisoning) { +template +TNode CodeStubAssembler::LoadArrayElement(TNode array, + int array_header_size, + Node* index_node, + int additional_offset, + ParameterMode parameter_mode, + LoadSensitivity needs_poisoning) { CSA_ASSERT(this, IntPtrGreaterThanOrEqual( ParameterToIntPtr(index_node, parameter_mode), IntPtrConstant(0))); @@ -1955,8 +1929,13 @@ TNode CodeStubAssembler::LoadArrayElement( parameter_mode, header_size); CSA_ASSERT(this, IsOffsetInBounds(offset, LoadArrayLength(array), array_header_size)); - return UncheckedCast( - Load(MachineType::AnyTagged(), array, offset, needs_poisoning)); + constexpr MachineType machine_type = MachineTypeOf::value; + // TODO(gsps): Remove the Load case once LoadFromObject supports poisoning + if (needs_poisoning == LoadSensitivity::kSafe) { + return UncheckedCast(LoadFromObject(machine_type, array, offset)); + } else { + return UncheckedCast(Load(machine_type, array, offset, needs_poisoning)); + } } template TNode @@ -2046,8 +2025,8 @@ TNode CodeStubAssembler::LoadJSTypedArrayBackingStore( IntPtrAdd(external_pointer, BitcastTaggedToWord(base_pointer))); } -Node* CodeStubAssembler::LoadFixedBigInt64ArrayElementAsTagged( - Node* data_pointer, Node* offset) { +TNode CodeStubAssembler::LoadFixedBigInt64ArrayElementAsTagged( + SloppyTNode data_pointer, SloppyTNode offset) { if (Is64()) { TNode value = UncheckedCast( Load(MachineType::IntPtr(), data_pointer, offset)); @@ -2059,13 +2038,15 @@ Node* CodeStubAssembler::LoadFixedBigInt64ArrayElementAsTagged( Load(MachineType::UintPtr(), data_pointer, offset)); TNode low = UncheckedCast( Load(MachineType::UintPtr(), data_pointer, - Int32Add(offset, Int32Constant(kSystemPointerSize)))); + Int32Add(TruncateIntPtrToInt32(offset), + Int32Constant(kSystemPointerSize)))); #else TNode low = UncheckedCast( Load(MachineType::UintPtr(), data_pointer, offset)); TNode high = UncheckedCast( Load(MachineType::UintPtr(), data_pointer, - Int32Add(offset, Int32Constant(kSystemPointerSize)))); + Int32Add(TruncateIntPtrToInt32(offset), + Int32Constant(kSystemPointerSize)))); #endif return BigIntFromInt32Pair(low, high); } @@ -2176,8 +2157,9 @@ TNode CodeStubAssembler::BigIntFromInt64(TNode value) { return var_result.value(); } -Node* CodeStubAssembler::LoadFixedBigUint64ArrayElementAsTagged( - Node* data_pointer, Node* offset) { +compiler::TNode +CodeStubAssembler::LoadFixedBigUint64ArrayElementAsTagged( + SloppyTNode data_pointer, SloppyTNode offset) { Label if_zero(this), done(this); if (Is64()) { TNode value = UncheckedCast( @@ -2190,13 +2172,15 @@ Node* CodeStubAssembler::LoadFixedBigUint64ArrayElementAsTagged( Load(MachineType::UintPtr(), data_pointer, offset)); TNode low = UncheckedCast( Load(MachineType::UintPtr(), data_pointer, - Int32Add(offset, Int32Constant(kSystemPointerSize)))); + Int32Add(TruncateIntPtrToInt32(offset), + Int32Constant(kSystemPointerSize)))); #else TNode low = UncheckedCast( Load(MachineType::UintPtr(), data_pointer, offset)); TNode high = UncheckedCast( Load(MachineType::UintPtr(), data_pointer, - Int32Add(offset, Int32Constant(kSystemPointerSize)))); + Int32Add(TruncateIntPtrToInt32(offset), + Int32Constant(kSystemPointerSize)))); #endif return BigIntFromUint32Pair(low, high); } @@ -2244,10 +2228,10 @@ TNode CodeStubAssembler::BigIntFromUint64(TNode value) { return var_result.value(); } -Node* CodeStubAssembler::LoadFixedTypedArrayElementAsTagged( - Node* data_pointer, Node* index_node, ElementsKind elements_kind, +TNode CodeStubAssembler::LoadFixedTypedArrayElementAsTagged( + TNode data_pointer, Node* index_node, ElementsKind elements_kind, ParameterMode parameter_mode) { - Node* offset = + TNode offset = ElementOffsetFromIndex(index_node, elements_kind, parameter_mode, 0); switch (elements_kind) { case UINT8_ELEMENTS: /* fall through */ @@ -2281,7 +2265,8 @@ Node* CodeStubAssembler::LoadFixedTypedArrayElementAsTagged( } TNode CodeStubAssembler::LoadFixedTypedArrayElementAsTagged( - TNode data_pointer, TNode index, TNode elements_kind) { + TNode data_pointer, TNode index, + TNode elements_kind) { TVARIABLE(Numeric, var_result); Label done(this), if_unknown_type(this, Label::kDeferred); int32_t elements_kinds[] = { @@ -2307,12 +2292,12 @@ TNode CodeStubAssembler::LoadFixedTypedArrayElementAsTagged( BIND(&if_unknown_type); Unreachable(); -#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \ - BIND(&if_##type##array); \ - { \ - var_result = CAST(LoadFixedTypedArrayElementAsTagged( \ - data_pointer, index, TYPE##_ELEMENTS, SMI_PARAMETERS)); \ - Goto(&done); \ +#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \ + BIND(&if_##type##array); \ + { \ + var_result = LoadFixedTypedArrayElementAsTagged( \ + data_pointer, index, TYPE##_ELEMENTS, SMI_PARAMETERS); \ + Goto(&done); \ } TYPED_ARRAYS(TYPED_ARRAY_CASE) #undef TYPED_ARRAY_CASE @@ -2323,8 +2308,7 @@ TNode CodeStubAssembler::LoadFixedTypedArrayElementAsTagged( void CodeStubAssembler::StoreJSTypedArrayElementFromTagged( TNode context, TNode typed_array, - TNode index_node, TNode value, ElementsKind elements_kind, - ParameterMode parameter_mode) { + TNode index_node, TNode value, ElementsKind elements_kind) { TNode data_pointer = LoadJSTypedArrayBackingStore(typed_array); switch (elements_kind) { case UINT8_ELEMENTS: @@ -2333,26 +2317,26 @@ void CodeStubAssembler::StoreJSTypedArrayElementFromTagged( case UINT16_ELEMENTS: case INT16_ELEMENTS: StoreElement(data_pointer, elements_kind, index_node, - SmiToInt32(CAST(value)), parameter_mode); + SmiToInt32(CAST(value)), SMI_PARAMETERS); break; case UINT32_ELEMENTS: case INT32_ELEMENTS: StoreElement(data_pointer, elements_kind, index_node, - TruncateTaggedToWord32(context, value), parameter_mode); + TruncateTaggedToWord32(context, value), SMI_PARAMETERS); break; case FLOAT32_ELEMENTS: StoreElement(data_pointer, elements_kind, index_node, TruncateFloat64ToFloat32(LoadHeapNumberValue(CAST(value))), - parameter_mode); + SMI_PARAMETERS); break; case FLOAT64_ELEMENTS: StoreElement(data_pointer, elements_kind, index_node, - LoadHeapNumberValue(CAST(value)), parameter_mode); + LoadHeapNumberValue(CAST(value)), SMI_PARAMETERS); break; case BIGUINT64_ELEMENTS: case BIGINT64_ELEMENTS: StoreElement(data_pointer, elements_kind, index_node, - UncheckedCast(value), parameter_mode); + UncheckedCast(value), SMI_PARAMETERS); break; default: UNREACHABLE(); @@ -2925,15 +2909,12 @@ TNode CodeStubAssembler::EnsureArrayPushable(TNode map, // Disallow pushing onto prototypes. It might be the JSArray prototype. // Disallow pushing onto non-extensible objects. Comment("Disallow pushing onto prototypes"); - Node* bit_field2 = LoadMapBitField2(map); - int mask = Map::IsPrototypeMapBit::kMask | Map::IsExtensibleBit::kMask; - Node* test = Word32And(bit_field2, Int32Constant(mask)); - GotoIf(Word32NotEqual(test, Int32Constant(Map::IsExtensibleBit::kMask)), - bailout); + GotoIfNot(IsExtensibleNonPrototypeMap(map), bailout); EnsureArrayLengthWritable(map, bailout); - TNode kind = DecodeWord32(bit_field2); + TNode kind = + DecodeWord32(LoadMapBitField2(map)); return Signed(kind); } @@ -3022,7 +3003,7 @@ void CodeStubAssembler::TryStoreArrayElement(ElementsKind kind, GotoIfNotNumber(value, bailout); } if (IsDoubleElementsKind(kind)) { - value = ChangeNumberToFloat64(value); + value = ChangeNumberToFloat64(CAST(value)); } StoreElement(elements, kind, index, value, mode); } @@ -3131,14 +3112,10 @@ TNode CodeStubAssembler::AllocateBigInt(TNode length) { } TNode CodeStubAssembler::AllocateRawBigInt(TNode length) { - // This is currently used only for 64-bit wide BigInts. If more general - // applicability is required, a large-object check must be added. - CSA_ASSERT(this, UintPtrLessThan(length, IntPtrConstant(3))); - TNode size = IntPtrAdd(IntPtrConstant(BigInt::kHeaderSize), Signed(WordShl(length, kSystemPointerSizeLog2))); - Node* raw_result = Allocate(size, kNone); + Node* raw_result = Allocate(size, kAllowLargeObjectAllocation); StoreMapNoWriteBarrier(raw_result, RootIndex::kBigIntMap); if (FIELD_SIZE(BigInt::kOptionalPaddingOffset) != 0) { DCHECK_EQ(4, FIELD_SIZE(BigInt::kOptionalPaddingOffset)); @@ -3155,11 +3132,26 @@ void CodeStubAssembler::StoreBigIntBitfield(TNode bigint, MachineRepresentation::kWord32); } -void CodeStubAssembler::StoreBigIntDigit(TNode bigint, int digit_index, +void CodeStubAssembler::StoreBigIntDigit(TNode bigint, + intptr_t digit_index, TNode digit) { + CHECK_LE(0, digit_index); + CHECK_LT(digit_index, BigInt::kMaxLength); StoreObjectFieldNoWriteBarrier( - bigint, BigInt::kDigitsOffset + digit_index * kSystemPointerSize, digit, - UintPtrT::kMachineRepresentation); + bigint, + BigInt::kDigitsOffset + + static_cast(digit_index) * kSystemPointerSize, + digit, UintPtrT::kMachineRepresentation); +} + +void CodeStubAssembler::StoreBigIntDigit(TNode bigint, + TNode digit_index, + TNode digit) { + TNode offset = + IntPtrAdd(IntPtrConstant(BigInt::kDigitsOffset), + IntPtrMul(digit_index, IntPtrConstant(kSystemPointerSize))); + StoreObjectFieldNoWriteBarrier(bigint, offset, digit, + UintPtrT::kMachineRepresentation); } TNode CodeStubAssembler::LoadBigIntBitfield(TNode bigint) { @@ -3168,10 +3160,23 @@ TNode CodeStubAssembler::LoadBigIntBitfield(TNode bigint) { } TNode CodeStubAssembler::LoadBigIntDigit(TNode bigint, - int digit_index) { - return UncheckedCast(LoadObjectField( - bigint, BigInt::kDigitsOffset + digit_index * kSystemPointerSize, - MachineType::UintPtr())); + intptr_t digit_index) { + CHECK_LE(0, digit_index); + CHECK_LT(digit_index, BigInt::kMaxLength); + return UncheckedCast( + LoadObjectField(bigint, + BigInt::kDigitsOffset + + static_cast(digit_index) * kSystemPointerSize, + MachineType::UintPtr())); +} + +TNode CodeStubAssembler::LoadBigIntDigit(TNode bigint, + TNode digit_index) { + TNode offset = + IntPtrAdd(IntPtrConstant(BigInt::kDigitsOffset), + IntPtrMul(digit_index, IntPtrConstant(kSystemPointerSize))); + return UncheckedCast( + LoadObjectField(bigint, offset, MachineType::UintPtr())); } TNode CodeStubAssembler::AllocateByteArray(TNode length, @@ -3440,16 +3445,16 @@ TNode CodeStubAssembler::AllocateNameDictionary( } TNode CodeStubAssembler::AllocateNameDictionary( - TNode at_least_space_for) { + TNode at_least_space_for, AllocationFlags flags) { CSA_ASSERT(this, UintPtrLessThanOrEqual( at_least_space_for, IntPtrConstant(NameDictionary::kMaxCapacity))); TNode capacity = HashTableComputeCapacity(at_least_space_for); - return AllocateNameDictionaryWithCapacity(capacity); + return AllocateNameDictionaryWithCapacity(capacity, flags); } TNode CodeStubAssembler::AllocateNameDictionaryWithCapacity( - TNode capacity) { + TNode capacity, AllocationFlags flags) { CSA_ASSERT(this, WordIsPowerOfTwo(capacity)); CSA_ASSERT(this, IntPtrGreaterThan(capacity, IntPtrConstant(0))); TNode length = EntryToIndex(capacity); @@ -3457,39 +3462,51 @@ TNode CodeStubAssembler::AllocateNameDictionaryWithCapacity( TimesTaggedSize(length), IntPtrConstant(NameDictionary::kHeaderSize)); TNode result = - UncheckedCast(AllocateInNewSpace(store_size)); - Comment("Initialize NameDictionary"); + UncheckedCast(Allocate(store_size, flags)); + // Initialize FixedArray fields. - DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kNameDictionaryMap)); - StoreMapNoWriteBarrier(result, RootIndex::kNameDictionaryMap); - StoreObjectFieldNoWriteBarrier(result, FixedArray::kLengthOffset, - SmiFromIntPtr(length)); + { + DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kNameDictionaryMap)); + StoreMapNoWriteBarrier(result, RootIndex::kNameDictionaryMap); + StoreObjectFieldNoWriteBarrier(result, FixedArray::kLengthOffset, + SmiFromIntPtr(length)); + } + // Initialized HashTable fields. - TNode zero = SmiConstant(0); - StoreFixedArrayElement(result, NameDictionary::kNumberOfElementsIndex, zero, - SKIP_WRITE_BARRIER); - StoreFixedArrayElement(result, NameDictionary::kNumberOfDeletedElementsIndex, - zero, SKIP_WRITE_BARRIER); - StoreFixedArrayElement(result, NameDictionary::kCapacityIndex, - SmiTag(capacity), SKIP_WRITE_BARRIER); - // Initialize Dictionary fields. - TNode filler = UndefinedConstant(); - StoreFixedArrayElement(result, NameDictionary::kNextEnumerationIndexIndex, - SmiConstant(PropertyDetails::kInitialIndex), - SKIP_WRITE_BARRIER); - StoreFixedArrayElement(result, NameDictionary::kObjectHashIndex, - SmiConstant(PropertyArray::kNoHashSentinel), - SKIP_WRITE_BARRIER); + { + TNode zero = SmiConstant(0); + StoreFixedArrayElement(result, NameDictionary::kNumberOfElementsIndex, zero, + SKIP_WRITE_BARRIER); + StoreFixedArrayElement(result, + NameDictionary::kNumberOfDeletedElementsIndex, zero, + SKIP_WRITE_BARRIER); + StoreFixedArrayElement(result, NameDictionary::kCapacityIndex, + SmiTag(capacity), SKIP_WRITE_BARRIER); + // Initialize Dictionary fields. + StoreFixedArrayElement(result, NameDictionary::kNextEnumerationIndexIndex, + SmiConstant(PropertyDetails::kInitialIndex), + SKIP_WRITE_BARRIER); + StoreFixedArrayElement(result, NameDictionary::kObjectHashIndex, + SmiConstant(PropertyArray::kNoHashSentinel), + SKIP_WRITE_BARRIER); + } // Initialize NameDictionary elements. - TNode result_word = BitcastTaggedToWord(result); - TNode start_address = IntPtrAdd( - result_word, IntPtrConstant(NameDictionary::OffsetOfElementAt( - NameDictionary::kElementsStartIndex) - - kHeapObjectTag)); - TNode end_address = IntPtrAdd( - result_word, IntPtrSub(store_size, IntPtrConstant(kHeapObjectTag))); - StoreFieldsNoWriteBarrier(start_address, end_address, filler); + { + TNode result_word = BitcastTaggedToWord(result); + TNode start_address = IntPtrAdd( + result_word, IntPtrConstant(NameDictionary::OffsetOfElementAt( + NameDictionary::kElementsStartIndex) - + kHeapObjectTag)); + TNode end_address = IntPtrAdd( + result_word, IntPtrSub(store_size, IntPtrConstant(kHeapObjectTag))); + + TNode filler = UndefinedConstant(); + DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kUndefinedValue)); + + StoreFieldsNoWriteBarrier(start_address, end_address, filler); + } + return result; } @@ -3605,6 +3622,17 @@ TNode CodeStubAssembler::AllocateSmallOrderedHashTable( StoreMapNoWriteBarrier(table_obj, small_ordered_hash_map); TNode table = UncheckedCast(table_obj); + { + // This store overlaps with the header fields stored below. + // Since it happens first, it effectively still just zero-initializes the + // padding. + constexpr int offset = + RoundDown(CollectionType::PaddingOffset()); + STATIC_ASSERT(offset + kTaggedSize == CollectionType::PaddingOffset() + + CollectionType::PaddingSize()); + StoreObjectFieldNoWriteBarrier(table, offset, SmiConstant(0)); + } + // Initialize the SmallOrderedHashTable fields. StoreObjectByteNoWriteBarrier( table, CollectionType::NumberOfBucketsOffset(), @@ -3748,8 +3776,9 @@ void CodeStubAssembler::InitializeStructBody(Node* object, Node* map, StoreFieldsNoWriteBarrier(start_address, end_address, filler); } -Node* CodeStubAssembler::AllocateJSObjectFromMap( - Node* map, Node* properties, Node* elements, AllocationFlags flags, +TNode CodeStubAssembler::AllocateJSObjectFromMap( + SloppyTNode map, SloppyTNode properties, + SloppyTNode elements, AllocationFlags flags, SlackTrackingMode slack_tracking_mode) { CSA_ASSERT(this, IsMap(map)); CSA_ASSERT(this, Word32BinaryNot(IsJSFunctionMap(map))); @@ -3761,7 +3790,7 @@ Node* CodeStubAssembler::AllocateJSObjectFromMap( StoreMapNoWriteBarrier(object, map); InitializeJSObjectFromMap(object, map, instance_size, properties, elements, slack_tracking_mode); - return object; + return CAST(object); } void CodeStubAssembler::InitializeJSObjectFromMap( @@ -5508,7 +5537,7 @@ void CodeStubAssembler::TaggedToWord32OrBigIntImpl( } BIND(&is_heap_number); - var_word32->Bind(TruncateHeapNumberValueToWord32(value)); + var_word32->Bind(TruncateHeapNumberValueToWord32(CAST(value))); CombineFeedback(var_feedback, BinaryOperationFeedback::kNumber); Goto(if_number); @@ -5521,9 +5550,10 @@ void CodeStubAssembler::TaggedToWord32OrBigIntImpl( } } -Node* CodeStubAssembler::TruncateHeapNumberValueToWord32(Node* object) { +TNode CodeStubAssembler::TruncateHeapNumberValueToWord32( + TNode object) { Node* value = LoadHeapNumberValue(object); - return TruncateFloat64ToWord32(value); + return Signed(TruncateFloat64ToWord32(value)); } void CodeStubAssembler::TryHeapNumberToSmi(TNode number, @@ -5731,10 +5761,7 @@ TNode CodeStubAssembler::ChangeNumberToUint32(TNode value) { return var_result.value(); } -TNode CodeStubAssembler::ChangeNumberToFloat64( - SloppyTNode value) { - // TODO(tebbi): Remove assert once argument is TNode instead of SloppyTNode. - CSA_SLOW_ASSERT(this, IsNumber(value)); +TNode CodeStubAssembler::ChangeNumberToFloat64(TNode value) { TVARIABLE(Float64T, result); Label smi(this); Label done(this, &result); @@ -5795,43 +5822,43 @@ TNode CodeStubAssembler::TimesDoubleSize(SloppyTNode value) { return WordShl(value, kDoubleSizeLog2); } -Node* CodeStubAssembler::ToThisValue(Node* context, Node* value, - PrimitiveType primitive_type, - char const* method_name) { - // We might need to loop once due to JSValue unboxing. - VARIABLE(var_value, MachineRepresentation::kTagged, value); +TNode CodeStubAssembler::ToThisValue(TNode context, + TNode value, + PrimitiveType primitive_type, + char const* method_name) { + // We might need to loop once due to JSPrimitiveWrapper unboxing. + TVARIABLE(Object, var_value, value); Label loop(this, &var_value), done_loop(this), done_throw(this, Label::kDeferred); Goto(&loop); BIND(&loop); { - // Load the current {value}. - value = var_value.value(); - // Check if the {value} is a Smi or a HeapObject. - GotoIf(TaggedIsSmi(value), (primitive_type == PrimitiveType::kNumber) - ? &done_loop - : &done_throw); + GotoIf( + TaggedIsSmi(var_value.value()), + (primitive_type == PrimitiveType::kNumber) ? &done_loop : &done_throw); + + TNode value = CAST(var_value.value()); // Load the map of the {value}. - Node* value_map = LoadMap(value); + TNode value_map = LoadMap(value); // Load the instance type of the {value}. - Node* value_instance_type = LoadMapInstanceType(value_map); + TNode value_instance_type = LoadMapInstanceType(value_map); - // Check if {value} is a JSValue. - Label if_valueisvalue(this, Label::kDeferred), if_valueisnotvalue(this); - Branch(InstanceTypeEqual(value_instance_type, JS_VALUE_TYPE), - &if_valueisvalue, &if_valueisnotvalue); + // Check if {value} is a JSPrimitiveWrapper. + Label if_valueiswrapper(this, Label::kDeferred), if_valueisnotwrapper(this); + Branch(InstanceTypeEqual(value_instance_type, JS_PRIMITIVE_WRAPPER_TYPE), + &if_valueiswrapper, &if_valueisnotwrapper); - BIND(&if_valueisvalue); + BIND(&if_valueiswrapper); { // Load the actual value from the {value}. - var_value.Bind(LoadObjectField(value, JSValue::kValueOffset)); + var_value = LoadObjectField(value, JSPrimitiveWrapper::kValueOffset); Goto(&loop); } - BIND(&if_valueisnotvalue); + BIND(&if_valueisnotwrapper); { switch (primitive_type) { case PrimitiveType::kBoolean: @@ -5988,13 +6015,12 @@ TNode CodeStubAssembler::InstanceTypeEqual( TNode CodeStubAssembler::IsDictionaryMap(SloppyTNode map) { CSA_SLOW_ASSERT(this, IsMap(map)); - Node* bit_field3 = LoadMapBitField3(map); - return IsSetWord32(bit_field3); + return IsSetWord32(LoadMapBitField3(map)); } TNode CodeStubAssembler::IsExtensibleMap(SloppyTNode map) { CSA_ASSERT(this, IsMap(map)); - return IsSetWord32(LoadMapBitField2(map)); + return IsSetWord32(LoadMapBitField3(map)); } TNode CodeStubAssembler::IsFrozenOrSealedElementsKindMap( @@ -6007,7 +6033,7 @@ TNode CodeStubAssembler::IsFrozenOrSealedElementsKindMap( TNode CodeStubAssembler::IsExtensibleNonPrototypeMap(TNode map) { int kMask = Map::IsExtensibleBit::kMask | Map::IsPrototypeMapBit::kMask; int kExpected = Map::IsExtensibleBit::kMask; - return Word32Equal(Word32And(LoadMapBitField2(map), Int32Constant(kMask)), + return Word32Equal(Word32And(LoadMapBitField3(map), Int32Constant(kMask)), Int32Constant(kExpected)); } @@ -6072,10 +6098,13 @@ TNode CodeStubAssembler::IsTypedArraySpeciesProtectorCellInvalid() { return WordEqual(cell_value, invalid); } -TNode CodeStubAssembler::IsRegExpSpeciesProtectorCellInvalid() { - Node* invalid = SmiConstant(Isolate::kProtectorInvalid); - Node* cell = LoadRoot(RootIndex::kRegExpSpeciesProtector); - Node* cell_value = LoadObjectField(cell, PropertyCell::kValueOffset); +TNode CodeStubAssembler::IsRegExpSpeciesProtectorCellInvalid( + TNode native_context) { + CSA_ASSERT(this, IsNativeContext(native_context)); + TNode cell = CAST(LoadContextElement( + native_context, Context::REGEXP_SPECIES_PROTECTOR_INDEX)); + TNode cell_value = LoadObjectField(cell, PropertyCell::kValueOffset); + TNode invalid = SmiConstant(Isolate::kProtectorInvalid); return WordEqual(cell_value, invalid); } @@ -6270,6 +6299,15 @@ TNode CodeStubAssembler::IsJSGlobalProxyInstanceType( return InstanceTypeEqual(instance_type, JS_GLOBAL_PROXY_TYPE); } +TNode CodeStubAssembler::IsJSGlobalProxyMap(SloppyTNode map) { + return IsJSGlobalProxyInstanceType(LoadMapInstanceType(map)); +} + +TNode CodeStubAssembler::IsJSGlobalProxy( + SloppyTNode object) { + return IsJSGlobalProxyMap(LoadMap(object)); +} + TNode CodeStubAssembler::IsJSObjectInstanceType( SloppyTNode instance_type) { STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE); @@ -6304,26 +6342,22 @@ TNode CodeStubAssembler::IsJSStringIterator( return HasInstanceType(object, JS_STRING_ITERATOR_TYPE); } -TNode CodeStubAssembler::IsJSGlobalProxy( - SloppyTNode object) { - return HasInstanceType(object, JS_GLOBAL_PROXY_TYPE); -} - TNode CodeStubAssembler::IsMap(SloppyTNode map) { return IsMetaMap(LoadMap(map)); } -TNode CodeStubAssembler::IsJSValueInstanceType( +TNode CodeStubAssembler::IsJSPrimitiveWrapperInstanceType( SloppyTNode instance_type) { - return InstanceTypeEqual(instance_type, JS_VALUE_TYPE); + return InstanceTypeEqual(instance_type, JS_PRIMITIVE_WRAPPER_TYPE); } -TNode CodeStubAssembler::IsJSValue(SloppyTNode object) { - return IsJSValueMap(LoadMap(object)); +TNode CodeStubAssembler::IsJSPrimitiveWrapper( + SloppyTNode object) { + return IsJSPrimitiveWrapperMap(LoadMap(object)); } -TNode CodeStubAssembler::IsJSValueMap(SloppyTNode map) { - return IsJSValueInstanceType(LoadMapInstanceType(map)); +TNode CodeStubAssembler::IsJSPrimitiveWrapperMap(SloppyTNode map) { + return IsJSPrimitiveWrapperInstanceType(LoadMapInstanceType(map)); } TNode CodeStubAssembler::IsJSArrayInstanceType( @@ -6420,7 +6454,7 @@ TNode CodeStubAssembler::IsFixedArrayWithKind( if (IsDoubleElementsKind(kind)) { return IsFixedDoubleArray(object); } else { - DCHECK(IsSmiOrObjectElementsKind(kind)); + DCHECK(IsSmiOrObjectElementsKind(kind) || IsSealedElementsKind(kind)); return IsFixedArraySubclass(object); } } @@ -6562,6 +6596,11 @@ TNode CodeStubAssembler::IsPrivateSymbol( [=] { return Int32FalseConstant(); }); } +TNode CodeStubAssembler::IsPrivateName(SloppyTNode symbol) { + TNode flags = LoadObjectField(symbol, Symbol::kFlagsOffset); + return IsSetWord32(flags); +} + TNode CodeStubAssembler::IsNativeContext( SloppyTNode object) { return WordEqual(LoadMap(object), LoadRoot(RootIndex::kNativeContextMap)); @@ -6769,7 +6808,7 @@ TNode CodeStubAssembler::IsHeapNumberUint32(TNode number) { IsHeapNumberPositive(number), [=] { TNode value = LoadHeapNumberValue(number); - TNode int_value = Unsigned(TruncateFloat64ToWord32(value)); + TNode int_value = TruncateFloat64ToWord32(value); return Float64Equal(value, ChangeUint32ToFloat64(int_value)); }, [=] { return Int32FalseConstant(); }); @@ -7423,8 +7462,8 @@ TNode CodeStubAssembler::StringAdd(Node* context, TNode left, return result.value(); } -TNode CodeStubAssembler::StringFromSingleCodePoint( - TNode codepoint, UnicodeEncoding encoding) { +TNode CodeStubAssembler::StringFromSingleUTF16EncodedCodePoint( + TNode codepoint) { VARIABLE(var_result, MachineRepresentation::kTagged, EmptyStringConstant()); Label if_isword16(this), if_isword32(this), return_result(this); @@ -7440,27 +7479,6 @@ TNode CodeStubAssembler::StringFromSingleCodePoint( BIND(&if_isword32); { - switch (encoding) { - case UnicodeEncoding::UTF16: - break; - case UnicodeEncoding::UTF32: { - // Convert UTF32 to UTF16 code units, and store as a 32 bit word. - Node* lead_offset = Int32Constant(0xD800 - (0x10000 >> 10)); - - // lead = (codepoint >> 10) + LEAD_OFFSET - Node* lead = - Int32Add(Word32Shr(codepoint, Int32Constant(10)), lead_offset); - - // trail = (codepoint & 0x3FF) + 0xDC00; - Node* trail = Int32Add(Word32And(codepoint, Int32Constant(0x3FF)), - Int32Constant(0xDC00)); - - // codpoint = (trail << 16) | lead; - codepoint = Signed(Word32Or(Word32Shl(trail, Int32Constant(16)), lead)); - break; - } - } - Node* value = AllocateSeqTwoByteString(2); StoreNoWriteBarrier( MachineRepresentation::kWord32, value, @@ -7513,7 +7531,7 @@ TNode CodeStubAssembler::NumberToString(TNode input) { // contains two elements (number and string) for each cache entry. // TODO(ishell): cleanup mask handling. Node* mask = - BitcastTaggedToWord(LoadFixedArrayBaseLength(number_string_cache)); + BitcastTaggedSignedToWord(LoadFixedArrayBaseLength(number_string_cache)); TNode one = IntPtrConstant(1); mask = IntPtrSub(mask, one); @@ -7560,8 +7578,8 @@ TNode CodeStubAssembler::NumberToString(TNode input) { BIND(&if_smi); { // Load the smi key, make sure it matches the smi we're looking for. - Node* smi_index = BitcastWordToTagged( - WordAnd(WordShl(BitcastTaggedToWord(smi_input.value()), one), mask)); + Node* smi_index = BitcastWordToTagged(WordAnd( + WordShl(BitcastTaggedSignedToWord(smi_input.value()), one), mask)); Node* smi_key = UnsafeLoadFixedArrayElement(CAST(number_string_cache), smi_index, 0, SMI_PARAMETERS); GotoIf(WordNotEqual(smi_key, smi_input.value()), &runtime); @@ -8333,40 +8351,41 @@ TNode CodeStubAssembler::EntryToIndex(TNode entry, field_index)); } -TNode CodeStubAssembler::LoadDescriptorArrayElement( - TNode object, Node* index, int additional_offset) { - return LoadArrayElement(object, DescriptorArray::kHeaderSize, index, - additional_offset); +template +TNode CodeStubAssembler::LoadDescriptorArrayElement( + TNode object, TNode index, + int additional_offset) { + return LoadArrayElement( + object, DescriptorArray::kHeaderSize, index, additional_offset); } TNode CodeStubAssembler::LoadKeyByKeyIndex( TNode container, TNode key_index) { - return CAST(LoadDescriptorArrayElement(container, key_index, 0)); + return CAST(LoadDescriptorArrayElement(container, key_index, 0)); } TNode CodeStubAssembler::LoadDetailsByKeyIndex( TNode container, TNode key_index) { - const int kKeyToDetails = - DescriptorArray::ToDetailsIndex(0) - DescriptorArray::ToKeyIndex(0); - return Unsigned( - LoadAndUntagToWord32ArrayElement(container, DescriptorArray::kHeaderSize, - key_index, kKeyToDetails * kTaggedSize)); + const int kKeyToDetailsOffset = + DescriptorArray::kEntryDetailsOffset - DescriptorArray::kEntryKeyOffset; + return Unsigned(LoadAndUntagToWord32ArrayElement( + container, DescriptorArray::kHeaderSize, key_index, kKeyToDetailsOffset)); } TNode CodeStubAssembler::LoadValueByKeyIndex( TNode container, TNode key_index) { - const int kKeyToValue = - DescriptorArray::ToValueIndex(0) - DescriptorArray::ToKeyIndex(0); - return CAST(LoadDescriptorArrayElement(container, key_index, - kKeyToValue * kTaggedSize)); + const int kKeyToValueOffset = + DescriptorArray::kEntryValueOffset - DescriptorArray::kEntryKeyOffset; + return LoadDescriptorArrayElement(container, key_index, + kKeyToValueOffset); } TNode CodeStubAssembler::LoadFieldTypeByKeyIndex( TNode container, TNode key_index) { - const int kKeyToValue = - DescriptorArray::ToValueIndex(0) - DescriptorArray::ToKeyIndex(0); - return LoadDescriptorArrayElement(container, key_index, - kKeyToValue * kTaggedSize); + const int kKeyToValueOffset = + DescriptorArray::kEntryValueOffset - DescriptorArray::kEntryKeyOffset; + return LoadDescriptorArrayElement(container, key_index, + kKeyToValueOffset); } TNode CodeStubAssembler::DescriptorEntryToIndex( @@ -8377,14 +8396,14 @@ TNode CodeStubAssembler::DescriptorEntryToIndex( TNode CodeStubAssembler::LoadKeyByDescriptorEntry( TNode container, TNode descriptor_entry) { - return CAST(LoadDescriptorArrayElement( + return CAST(LoadDescriptorArrayElement( container, DescriptorEntryToIndex(descriptor_entry), DescriptorArray::ToKeyIndex(0) * kTaggedSize)); } TNode CodeStubAssembler::LoadKeyByDescriptorEntry( TNode container, int descriptor_entry) { - return CAST(LoadDescriptorArrayElement( + return CAST(LoadDescriptorArrayElement( container, IntPtrConstant(0), DescriptorArray::ToKeyIndex(descriptor_entry) * kTaggedSize)); } @@ -8406,14 +8425,14 @@ TNode CodeStubAssembler::LoadDetailsByDescriptorEntry( TNode CodeStubAssembler::LoadValueByDescriptorEntry( TNode container, int descriptor_entry) { - return CAST(LoadDescriptorArrayElement( + return LoadDescriptorArrayElement( container, IntPtrConstant(0), - DescriptorArray::ToValueIndex(descriptor_entry) * kTaggedSize)); + DescriptorArray::ToValueIndex(descriptor_entry) * kTaggedSize); } TNode CodeStubAssembler::LoadFieldTypeByDescriptorEntry( TNode container, TNode descriptor_entry) { - return LoadDescriptorArrayElement( + return LoadDescriptorArrayElement( container, DescriptorEntryToIndex(descriptor_entry), DescriptorArray::ToValueIndex(0) * kTaggedSize); } @@ -9503,15 +9522,15 @@ TNode CodeStubAssembler::CallGetterIfAccessor( Node* accessor_info = value; CSA_ASSERT(this, IsAccessorInfo(value)); CSA_ASSERT(this, TaggedIsNotSmi(receiver)); - Label if_array(this), if_function(this), if_value(this); + Label if_array(this), if_function(this), if_wrapper(this); // Dispatch based on {receiver} instance type. Node* receiver_map = LoadMap(receiver); Node* receiver_instance_type = LoadMapInstanceType(receiver_map); GotoIf(IsJSArrayInstanceType(receiver_instance_type), &if_array); GotoIf(IsJSFunctionInstanceType(receiver_instance_type), &if_function); - Branch(IsJSValueInstanceType(receiver_instance_type), &if_value, - if_bailout); + Branch(IsJSPrimitiveWrapperInstanceType(receiver_instance_type), + &if_wrapper, if_bailout); // JSArray AccessorInfo case. BIND(&if_array); @@ -9538,14 +9557,15 @@ TNode CodeStubAssembler::CallGetterIfAccessor( Goto(&done); } - // JSValue AccessorInfo case. - BIND(&if_value); + // JSPrimitiveWrapper AccessorInfo case. + BIND(&if_wrapper); { - // We only deal with the "length" accessor on JSValue string wrappers. + // We only deal with the "length" accessor on JSPrimitiveWrapper string + // wrappers. GotoIfNot(IsLengthString( LoadObjectField(accessor_info, AccessorInfo::kNameOffset)), if_bailout); - Node* receiver_value = LoadJSValueValue(receiver); + Node* receiver_value = LoadJSPrimitiveWrapperValue(receiver); GotoIfNot(TaggedIsNotSmi(receiver_value), if_bailout); GotoIfNot(IsString(receiver_value), if_bailout); var_value.Bind(LoadStringLengthAsSmi(receiver_value)); @@ -9646,8 +9666,9 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map, // clang-format off int32_t values[] = { // Handled by {if_isobjectorsmi}. - PACKED_SMI_ELEMENTS, HOLEY_SMI_ELEMENTS, PACKED_ELEMENTS, - HOLEY_ELEMENTS, + PACKED_SMI_ELEMENTS, HOLEY_SMI_ELEMENTS, PACKED_ELEMENTS, HOLEY_ELEMENTS, + PACKED_SEALED_ELEMENTS, HOLEY_SEALED_ELEMENTS, PACKED_FROZEN_ELEMENTS, + HOLEY_FROZEN_ELEMENTS, // Handled by {if_isdouble}. PACKED_DOUBLE_ELEMENTS, HOLEY_DOUBLE_ELEMENTS, // Handled by {if_isdictionary}. @@ -9673,7 +9694,8 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map, }; Label* labels[] = { &if_isobjectorsmi, &if_isobjectorsmi, &if_isobjectorsmi, - &if_isobjectorsmi, + &if_isobjectorsmi, &if_isobjectorsmi, &if_isobjectorsmi, + &if_isobjectorsmi, &if_isobjectorsmi, &if_isdouble, &if_isdouble, &if_isdictionary, &if_isfaststringwrapper, @@ -9731,8 +9753,8 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map, } BIND(&if_isfaststringwrapper); { - CSA_ASSERT(this, HasInstanceType(object, JS_VALUE_TYPE)); - Node* string = LoadJSValueValue(object); + CSA_ASSERT(this, HasInstanceType(object, JS_PRIMITIVE_WRAPPER_TYPE)); + Node* string = LoadJSPrimitiveWrapperValue(object); CSA_ASSERT(this, IsString(string)); Node* length = LoadStringLengthAsWord(string); GotoIf(UintPtrLessThan(intptr_index, length), if_found); @@ -9740,8 +9762,8 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map, } BIND(&if_isslowstringwrapper); { - CSA_ASSERT(this, HasInstanceType(object, JS_VALUE_TYPE)); - Node* string = LoadJSValueValue(object); + CSA_ASSERT(this, HasInstanceType(object, JS_PRIMITIVE_WRAPPER_TYPE)); + Node* string = LoadJSPrimitiveWrapperValue(object); CSA_ASSERT(this, IsString(string)); Node* length = LoadStringLengthAsWord(string); GotoIf(UintPtrLessThan(intptr_index, length), if_found); @@ -9749,7 +9771,7 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map, } BIND(&if_typedarray); { - Node* buffer = LoadObjectField(object, JSArrayBufferView::kBufferOffset); + TNode buffer = LoadJSArrayBufferViewBuffer(CAST(object)); GotoIf(IsDetachedBuffer(buffer), if_absent); TNode length = LoadJSTypedArrayLength(CAST(object)); @@ -9794,15 +9816,15 @@ void CodeStubAssembler::BranchIfMaybeSpecialIndex(TNode name_string, } void CodeStubAssembler::TryPrototypeChainLookup( - Node* receiver, Node* key, const LookupInHolder& lookup_property_in_holder, + Node* receiver, Node* object, Node* key, + const LookupInHolder& lookup_property_in_holder, const LookupInHolder& lookup_element_in_holder, Label* if_end, Label* if_bailout, Label* if_proxy) { // Ensure receiver is JSReceiver, otherwise bailout. - Label if_objectisnotsmi(this); - Branch(TaggedIsSmi(receiver), if_bailout, &if_objectisnotsmi); - BIND(&if_objectisnotsmi); + GotoIf(TaggedIsSmi(receiver), if_bailout); + CSA_ASSERT(this, TaggedIsNotSmi(object)); - Node* map = LoadMap(receiver); + Node* map = LoadMap(object); Node* instance_type = LoadMapInstanceType(map); { Label if_objectisreceiver(this); @@ -9812,9 +9834,7 @@ void CodeStubAssembler::TryPrototypeChainLookup( if_bailout); BIND(&if_objectisreceiver); - if (if_proxy) { - GotoIf(InstanceTypeEqual(instance_type, JS_PROXY_TYPE), if_proxy); - } + GotoIf(InstanceTypeEqual(instance_type, JS_PROXY_TYPE), if_proxy); } VARIABLE(var_index, MachineType::PointerRepresentation()); @@ -9826,7 +9846,7 @@ void CodeStubAssembler::TryPrototypeChainLookup( BIND(&if_iskeyunique); { - VARIABLE(var_holder, MachineRepresentation::kTagged, receiver); + VARIABLE(var_holder, MachineRepresentation::kTagged, object); VARIABLE(var_holder_map, MachineRepresentation::kTagged, map); VARIABLE(var_holder_instance_type, MachineRepresentation::kWord32, instance_type); @@ -9872,7 +9892,7 @@ void CodeStubAssembler::TryPrototypeChainLookup( } BIND(&if_keyisindex); { - VARIABLE(var_holder, MachineRepresentation::kTagged, receiver); + VARIABLE(var_holder, MachineRepresentation::kTagged, object); VARIABLE(var_holder_map, MachineRepresentation::kTagged, map); VARIABLE(var_holder_instance_type, MachineRepresentation::kWord32, instance_type); @@ -10049,7 +10069,7 @@ TNode CodeStubAssembler::ElementOffsetFromIndex(Node* index_node, Smi smi_index; constant_index = ToSmiConstant(index_node, &smi_index); if (constant_index) index = smi_index.value(); - index_node = BitcastTaggedToWord(index_node); + index_node = BitcastTaggedSignedToWord(index_node); } else { DCHECK(mode == INTPTR_PARAMETERS); constant_index = ToIntPtrConstant(index_node, index); @@ -10594,7 +10614,8 @@ void CodeStubAssembler::BigIntToRawBytes(TNode bigint, void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value, ElementsKind elements_kind, KeyedAccessStoreMode store_mode, - Label* bailout, Node* context) { + Label* bailout, Node* context, + Variable* maybe_converted_value) { CSA_ASSERT(this, Word32BinaryNot(IsJSProxy(object))); Node* elements = LoadElements(object); @@ -10610,12 +10631,12 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value, TNode intptr_key = TryToIntptr(key, bailout); if (IsTypedArrayElementsKind(elements_kind)) { - Label done(this); + Label done(this), update_value_and_bailout(this, Label::kDeferred); // IntegerIndexedElementSet converts value to a Number/BigInt prior to the // bounds check. - value = PrepareValueForWriteToTypedArray(CAST(value), elements_kind, - CAST(context)); + Node* converted_value = PrepareValueForWriteToTypedArray( + CAST(value), elements_kind, CAST(context)); // There must be no allocations between the buffer load and // and the actual store to backing store, because GC may decide that @@ -10623,8 +10644,12 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value, // TODO(ishell): introduce DisallowHeapAllocationCode scope here. // Check if buffer has been detached. - Node* buffer = LoadObjectField(object, JSArrayBufferView::kBufferOffset); - GotoIf(IsDetachedBuffer(buffer), bailout); + TNode buffer = LoadJSArrayBufferViewBuffer(CAST(object)); + if (maybe_converted_value) { + GotoIf(IsDetachedBuffer(buffer), &update_value_and_bailout); + } else { + GotoIf(IsDetachedBuffer(buffer), bailout); + } // Bounds check. TNode length = LoadJSTypedArrayLength(CAST(object)); @@ -10633,27 +10658,88 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value, // Skip the store if we write beyond the length or // to a property with a negative integer index. GotoIfNot(UintPtrLessThan(intptr_key, length), &done); - } else if (store_mode == STANDARD_STORE) { - GotoIfNot(UintPtrLessThan(intptr_key, length), bailout); } else { - // This case is produced due to the dispatched call in - // ElementsTransitionAndStore and StoreFastElement. - // TODO(jgruber): Avoid generating unsupported combinations to save code - // size. - DebugBreak(); + DCHECK_EQ(store_mode, STANDARD_STORE); + GotoIfNot(UintPtrLessThan(intptr_key, length), &update_value_and_bailout); } TNode backing_store = LoadJSTypedArrayBackingStore(CAST(object)); - StoreElement(backing_store, elements_kind, intptr_key, value, + StoreElement(backing_store, elements_kind, intptr_key, converted_value, parameter_mode); Goto(&done); + BIND(&update_value_and_bailout); + // We already prepared the incoming value for storing into a typed array. + // This might involve calling ToNumber in some cases. We shouldn't call + // ToNumber again in the runtime so pass the converted value to the runtime. + // The prepared value is an untagged value. Convert it to a tagged value + // to pass it to runtime. It is not possible to do the detached buffer check + // before we prepare the value, since ToNumber can detach the ArrayBuffer. + // The spec specifies the order of these operations. + if (maybe_converted_value != nullptr) { + switch (elements_kind) { + case UINT8_ELEMENTS: + case INT8_ELEMENTS: + case UINT16_ELEMENTS: + case INT16_ELEMENTS: + case UINT8_CLAMPED_ELEMENTS: + maybe_converted_value->Bind(SmiFromInt32(converted_value)); + break; + case UINT32_ELEMENTS: + maybe_converted_value->Bind(ChangeUint32ToTagged(converted_value)); + break; + case INT32_ELEMENTS: + maybe_converted_value->Bind(ChangeInt32ToTagged(converted_value)); + break; + case FLOAT32_ELEMENTS: { + Label dont_allocate_heap_number(this), end(this); + GotoIf(TaggedIsSmi(value), &dont_allocate_heap_number); + GotoIf(IsHeapNumber(value), &dont_allocate_heap_number); + { + maybe_converted_value->Bind(AllocateHeapNumberWithValue( + ChangeFloat32ToFloat64(converted_value))); + Goto(&end); + } + BIND(&dont_allocate_heap_number); + { + maybe_converted_value->Bind(value); + Goto(&end); + } + BIND(&end); + break; + } + case FLOAT64_ELEMENTS: { + Label dont_allocate_heap_number(this), end(this); + GotoIf(TaggedIsSmi(value), &dont_allocate_heap_number); + GotoIf(IsHeapNumber(value), &dont_allocate_heap_number); + { + maybe_converted_value->Bind( + AllocateHeapNumberWithValue(converted_value)); + Goto(&end); + } + BIND(&dont_allocate_heap_number); + { + maybe_converted_value->Bind(value); + Goto(&end); + } + BIND(&end); + break; + } + case BIGINT64_ELEMENTS: + case BIGUINT64_ELEMENTS: + maybe_converted_value->Bind(converted_value); + break; + default: + UNREACHABLE(); + } + } + Goto(bailout); + BIND(&done); return; } - DCHECK( - IsFastElementsKind(elements_kind) || - IsInRange(elements_kind, PACKED_SEALED_ELEMENTS, HOLEY_SEALED_ELEMENTS)); + DCHECK(IsFastElementsKind(elements_kind) || + IsSealedElementsKind(elements_kind)); Node* length = SelectImpl( IsJSArray(object), [=]() { return LoadJSArrayLength(object); }, @@ -10670,18 +10756,24 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value, value = TryTaggedToFloat64(value, bailout); } - if (IsGrowStoreMode(store_mode) && - !(IsInRange(elements_kind, PACKED_SEALED_ELEMENTS, - HOLEY_SEALED_ELEMENTS))) { + if (IsGrowStoreMode(store_mode) && !IsSealedElementsKind(elements_kind)) { elements = CheckForCapacityGrow(object, elements, elements_kind, length, intptr_key, parameter_mode, bailout); } else { GotoIfNot(UintPtrLessThan(intptr_key, length), bailout); } + // Cannot store to a hole in holey sealed elements so bailout. + if (elements_kind == HOLEY_SEALED_ELEMENTS) { + TNode target_value = + LoadFixedArrayElement(CAST(elements), intptr_key); + GotoIf(IsTheHole(target_value), bailout); + } + // If we didn't grow {elements}, it might still be COW, in which case we // copy it now. - if (!IsSmiOrObjectElementsKind(elements_kind)) { + if (!(IsSmiOrObjectElementsKind(elements_kind) || + IsSealedElementsKind(elements_kind))) { CSA_ASSERT(this, Word32BinaryNot(IsFixedCOWArrayMap(LoadMap(elements)))); } else if (IsCOWHandlingStoreMode(store_mode)) { elements = CopyElementsOnWrite(object, elements, elements_kind, length, @@ -10925,7 +11017,8 @@ TNode CodeStubAssembler::CreateAllocationSiteInFeedbackVector( // Link the object to the allocation site list TNode site_list = ExternalConstant( ExternalReference::allocation_sites_list_address(isolate())); - TNode next_site = CAST(LoadBufferObject(site_list, 0)); + TNode next_site = + LoadBufferObject(ReinterpretCast(site_list), 0); // TODO(mvstanton): This is a store to a weak pointer, which we may want to // mark as such in order to skip the write barrier, once we have a unified @@ -12155,8 +12248,9 @@ Node* CodeStubAssembler::Equal(Node* left, Node* right, Node* context, return result.value(); } -Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs, - Variable* var_type_feedback) { +TNode CodeStubAssembler::StrictEqual(SloppyTNode lhs, + SloppyTNode rhs, + Variable* var_type_feedback) { // Pseudo-code for the algorithm below: // // if (lhs == rhs) { @@ -12208,7 +12302,7 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs, Label if_equal(this), if_notequal(this), if_not_equivalent_types(this), end(this); - VARIABLE(result, MachineRepresentation::kTagged); + TVARIABLE(Oddball, result); OverwriteFeedback(var_type_feedback, CompareOperationFeedback::kNone); @@ -12235,7 +12329,7 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs, BIND(&if_lhsisnotsmi); { // Load the map of {lhs}. - Node* lhs_map = LoadMap(lhs); + TNode lhs_map = LoadMap(CAST(lhs)); // Check if {lhs} is a HeapNumber. Label if_lhsisnumber(this), if_lhsisnotnumber(this); @@ -12250,8 +12344,8 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs, BIND(&if_rhsissmi); { // Convert {lhs} and {rhs} to floating point values. - Node* lhs_value = LoadHeapNumberValue(lhs); - Node* rhs_value = SmiToFloat64(rhs); + Node* lhs_value = LoadHeapNumberValue(CAST(lhs)); + Node* rhs_value = SmiToFloat64(CAST(rhs)); CombineFeedback(var_type_feedback, CompareOperationFeedback::kNumber); @@ -12261,8 +12355,9 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs, BIND(&if_rhsisnotsmi); { + TNode rhs_ho = CAST(rhs); // Load the map of {rhs}. - Node* rhs_map = LoadMap(rhs); + TNode rhs_map = LoadMap(rhs_ho); // Check if {rhs} is also a HeapNumber. Label if_rhsisnumber(this), if_rhsisnotnumber(this); @@ -12271,8 +12366,8 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs, BIND(&if_rhsisnumber); { // Convert {lhs} and {rhs} to floating point values. - Node* lhs_value = LoadHeapNumberValue(lhs); - Node* rhs_value = LoadHeapNumberValue(rhs); + Node* lhs_value = LoadHeapNumberValue(CAST(lhs)); + Node* rhs_value = LoadHeapNumberValue(CAST(rhs)); CombineFeedback(var_type_feedback, CompareOperationFeedback::kNumber); @@ -12308,7 +12403,7 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs, BIND(&if_lhsisstring); { // Load the instance type of {rhs}. - Node* rhs_instance_type = LoadInstanceType(rhs); + Node* rhs_instance_type = LoadInstanceType(CAST(rhs)); // Check if {rhs} is also a String. Label if_rhsisstring(this, Label::kDeferred), @@ -12325,8 +12420,8 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs, CollectFeedbackForString(rhs_instance_type); var_type_feedback->Bind(SmiOr(lhs_feedback, rhs_feedback)); } - result.Bind(CallBuiltin(Builtins::kStringEqual, - NoContextConstant(), lhs, rhs)); + result = CAST(CallBuiltin(Builtins::kStringEqual, + NoContextConstant(), lhs, rhs)); Goto(&end); } @@ -12344,7 +12439,7 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs, BIND(&if_lhsisbigint); { // Load the instance type of {rhs}. - Node* rhs_instance_type = LoadInstanceType(rhs); + TNode rhs_instance_type = LoadInstanceType(CAST(rhs)); // Check if {rhs} is also a BigInt. Label if_rhsisbigint(this, Label::kDeferred), @@ -12356,8 +12451,8 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs, { CombineFeedback(var_type_feedback, CompareOperationFeedback::kBigInt); - result.Bind(CallRuntime(Runtime::kBigIntEqualToBigInt, - NoContextConstant(), lhs, rhs)); + result = CAST(CallRuntime(Runtime::kBigIntEqualToBigInt, + NoContextConstant(), lhs, rhs)); Goto(&end); } @@ -12368,8 +12463,8 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs, BIND(&if_lhsisnotbigint); if (var_type_feedback != nullptr) { // Load the instance type of {rhs}. - Node* rhs_map = LoadMap(rhs); - Node* rhs_instance_type = LoadMapInstanceType(rhs_map); + TNode rhs_map = LoadMap(CAST(rhs)); + TNode rhs_instance_type = LoadMapInstanceType(rhs_map); Label if_lhsissymbol(this), if_lhsisreceiver(this), if_lhsisoddball(this); @@ -12442,7 +12537,7 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs, BIND(&if_rhsisnotsmi); { // Load the map of the {rhs}. - Node* rhs_map = LoadMap(rhs); + TNode rhs_map = LoadMap(CAST(rhs)); // The {rhs} could be a HeapNumber with the same value as {lhs}. Label if_rhsisnumber(this), if_rhsisnotnumber(this); @@ -12451,8 +12546,8 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs, BIND(&if_rhsisnumber); { // Convert {lhs} and {rhs} to floating point values. - Node* lhs_value = SmiToFloat64(lhs); - Node* rhs_value = LoadHeapNumberValue(rhs); + TNode lhs_value = SmiToFloat64(CAST(lhs)); + TNode rhs_value = LoadHeapNumberValue(CAST(rhs)); CombineFeedback(var_type_feedback, CompareOperationFeedback::kNumber); @@ -12468,7 +12563,7 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs, BIND(&if_equal); { - result.Bind(TrueConstant()); + result = TrueConstant(); Goto(&end); } @@ -12480,7 +12575,7 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs, BIND(&if_notequal); { - result.Bind(FalseConstant()); + result = FalseConstant(); Goto(&end); } @@ -12636,7 +12731,7 @@ TNode CodeStubAssembler::HasProperty(SloppyTNode context, &return_true, &return_false, next_holder, if_bailout); }; - TryPrototypeChainLookup(object, key, lookup_property_in_holder, + TryPrototypeChainLookup(object, object, key, lookup_property_in_holder, lookup_element_in_holder, &return_false, &call_runtime, &if_proxy); @@ -13114,8 +13209,9 @@ TNode CodeStubAssembler::CreateArrayIterator( return CAST(iterator); } -Node* CodeStubAssembler::AllocateJSIteratorResult(Node* context, Node* value, - Node* done) { +TNode CodeStubAssembler::AllocateJSIteratorResult( + SloppyTNode context, SloppyTNode value, + SloppyTNode done) { CSA_ASSERT(this, IsBoolean(done)); Node* native_context = LoadNativeContext(context); Node* map = @@ -13128,7 +13224,7 @@ Node* CodeStubAssembler::AllocateJSIteratorResult(Node* context, Node* value, RootIndex::kEmptyFixedArray); StoreObjectFieldNoWriteBarrier(result, JSIteratorResult::kValueOffset, value); StoreObjectFieldNoWriteBarrier(result, JSIteratorResult::kDoneOffset, done); - return result; + return CAST(result); } Node* CodeStubAssembler::AllocateJSIteratorResultForEntry(Node* context, @@ -13174,9 +13270,8 @@ TNode CodeStubAssembler::ArraySpeciesCreate(TNode context, return Construct(context, constructor, len); } -Node* CodeStubAssembler::IsDetachedBuffer(Node* buffer) { - CSA_ASSERT(this, HasInstanceType(buffer, JS_ARRAY_BUFFER_TYPE)); - TNode buffer_bit_field = LoadJSArrayBufferBitField(CAST(buffer)); +TNode CodeStubAssembler::IsDetachedBuffer(TNode buffer) { + TNode buffer_bit_field = LoadJSArrayBufferBitField(buffer); return IsSetWord32(buffer_bit_field); } @@ -13367,7 +13462,8 @@ void CodeStubArguments::PopAndReturn(Node* value) { value); } -Node* CodeStubAssembler::IsFastElementsKind(Node* elements_kind) { +TNode CodeStubAssembler::IsFastElementsKind( + TNode elements_kind) { STATIC_ASSERT(FIRST_ELEMENTS_KIND == FIRST_FAST_ELEMENTS_KIND); return Uint32LessThanOrEqual(elements_kind, Int32Constant(LAST_FAST_ELEMENTS_KIND)); @@ -13382,7 +13478,8 @@ TNode CodeStubAssembler::IsDoubleElementsKind( Int32Constant(PACKED_DOUBLE_ELEMENTS / 2)); } -Node* CodeStubAssembler::IsFastSmiOrTaggedElementsKind(Node* elements_kind) { +TNode CodeStubAssembler::IsFastSmiOrTaggedElementsKind( + TNode elements_kind) { STATIC_ASSERT(FIRST_ELEMENTS_KIND == FIRST_FAST_ELEMENTS_KIND); STATIC_ASSERT(PACKED_DOUBLE_ELEMENTS > TERMINAL_FAST_ELEMENTS_KIND); STATIC_ASSERT(HOLEY_DOUBLE_ELEMENTS > TERMINAL_FAST_ELEMENTS_KIND); @@ -13390,12 +13487,14 @@ Node* CodeStubAssembler::IsFastSmiOrTaggedElementsKind(Node* elements_kind) { Int32Constant(TERMINAL_FAST_ELEMENTS_KIND)); } -Node* CodeStubAssembler::IsFastSmiElementsKind(Node* elements_kind) { +TNode CodeStubAssembler::IsFastSmiElementsKind( + SloppyTNode elements_kind) { return Uint32LessThanOrEqual(elements_kind, Int32Constant(HOLEY_SMI_ELEMENTS)); } -Node* CodeStubAssembler::IsHoleyFastElementsKind(Node* elements_kind) { +TNode CodeStubAssembler::IsHoleyFastElementsKind( + TNode elements_kind) { CSA_ASSERT(this, IsFastElementsKind(elements_kind)); STATIC_ASSERT(HOLEY_SMI_ELEMENTS == (PACKED_SMI_ELEMENTS | 1)); @@ -13404,7 +13503,8 @@ Node* CodeStubAssembler::IsHoleyFastElementsKind(Node* elements_kind) { return IsSetWord32(elements_kind, 1); } -Node* CodeStubAssembler::IsHoleyFastElementsKindForRead(Node* elements_kind) { +TNode CodeStubAssembler::IsHoleyFastElementsKindForRead( + TNode elements_kind) { CSA_ASSERT(this, Uint32LessThanOrEqual(elements_kind, Int32Constant(LAST_FROZEN_ELEMENTS_KIND))); @@ -13417,8 +13517,8 @@ Node* CodeStubAssembler::IsHoleyFastElementsKindForRead(Node* elements_kind) { return IsSetWord32(elements_kind, 1); } -Node* CodeStubAssembler::IsElementsKindGreaterThan( - Node* target_kind, ElementsKind reference_kind) { +TNode CodeStubAssembler::IsElementsKindGreaterThan( + TNode target_kind, ElementsKind reference_kind) { return Int32GreaterThan(target_kind, Int32Constant(reference_kind)); } @@ -13442,14 +13542,6 @@ Node* CodeStubAssembler::IsDebugActive() { return Word32NotEqual(is_debug_active, Int32Constant(0)); } -TNode CodeStubAssembler::IsRuntimeCallStatsEnabled() { - STATIC_ASSERT(sizeof(TracingFlags::runtime_stats) == kInt32Size); - TNode flag_value = UncheckedCast(Load( - MachineType::Int32(), - ExternalConstant(ExternalReference::address_of_runtime_stats_flag()))); - return Word32NotEqual(flag_value, Int32Constant(0)); -} - Node* CodeStubAssembler::IsPromiseHookEnabled() { Node* const promise_hook = Load( MachineType::Pointer(), @@ -13494,8 +13586,9 @@ TNode CodeStubAssembler::LoadBuiltin(TNode builtin_id) { int const kSmiShiftBits = kSmiShiftSize + kSmiTagSize; int index_shift = kSystemPointerSizeLog2 - kSmiShiftBits; TNode table_index = - index_shift >= 0 ? WordShl(BitcastTaggedToWord(builtin_id), index_shift) - : WordSar(BitcastTaggedToWord(builtin_id), -index_shift); + index_shift >= 0 + ? WordShl(BitcastTaggedSignedToWord(builtin_id), index_shift) + : WordSar(BitcastTaggedSignedToWord(builtin_id), -index_shift); return CAST( Load(MachineType::TaggedPointer(), @@ -13637,18 +13730,6 @@ Node* CodeStubAssembler::AllocateFunctionWithMapAndContext(Node* map, return fun; } -Node* CodeStubAssembler::MarkerIsFrameType(Node* marker_or_function, - StackFrame::Type frame_type) { - return WordEqual(marker_or_function, - IntPtrConstant(StackFrame::TypeToMarker(frame_type))); -} - -Node* CodeStubAssembler::MarkerIsNotFrameType(Node* marker_or_function, - StackFrame::Type frame_type) { - return WordNotEqual(marker_or_function, - IntPtrConstant(StackFrame::TypeToMarker(frame_type))); -} - void CodeStubAssembler::CheckPrototypeEnumCache(Node* receiver, Node* receiver_map, Label* if_fast, @@ -13923,7 +14004,7 @@ void CodeStubAssembler::GotoIfInitialPrototypePropertiesModified( if (i == 0) { combined_details = details; } else { - combined_details = Unsigned(Word32And(combined_details, details)); + combined_details = Word32And(combined_details, details); } } diff --git a/deps/v8/src/codegen/code-stub-assembler.h b/deps/v8/src/codegen/code-stub-assembler.h index 207eb509e11cb4..00a84c39265650 100644 --- a/deps/v8/src/codegen/code-stub-assembler.h +++ b/deps/v8/src/codegen/code-stub-assembler.h @@ -10,9 +10,8 @@ #include "src/base/macros.h" #include "src/codegen/bailout-reason.h" #include "src/common/globals.h" +#include "src/common/message-template.h" #include "src/compiler/code-assembler.h" -#include "src/execution/frames.h" -#include "src/execution/message-template.h" #include "src/objects/arguments.h" #include "src/objects/bigint.h" #include "src/objects/objects.h" @@ -39,7 +38,6 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol }; PromiseSpeciesProtector) \ V(TypedArraySpeciesProtector, typed_array_species_protector, \ TypedArraySpeciesProtector) \ - V(RegExpSpeciesProtector, regexp_species_protector, RegExpSpeciesProtector) #define HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(V) \ V(AccessorInfoMap, accessor_info_map, AccessorInfoMap) \ @@ -111,59 +109,45 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol }; #endif #ifdef DEBUG -// Add stringified versions to the given values, except the first. That is, -// transform -// x, a, b, c, d, e, f -// to -// a, "a", b, "b", c, "c", d, "d", e, "e", f, "f" -// -// __VA_ARGS__ is ignored to allow the caller to pass through too many -// parameters, and the first element is ignored to support having no extra -// values without empty __VA_ARGS__ (which cause all sorts of problems with -// extra commas). -#define CSA_ASSERT_STRINGIFY_EXTRA_VALUES_5(_, v1, v2, v3, v4, v5, ...) \ - v1, #v1, v2, #v2, v3, #v3, v4, #v4, v5, #v5 - -// Stringify the given variable number of arguments. The arguments are trimmed -// to 5 if there are too many, and padded with nullptr if there are not enough. -#define CSA_ASSERT_STRINGIFY_EXTRA_VALUES(...) \ - CSA_ASSERT_STRINGIFY_EXTRA_VALUES_5(__VA_ARGS__, nullptr, nullptr, nullptr, \ - nullptr, nullptr) - -#define CSA_ASSERT_GET_FIRST(x, ...) (x) -#define CSA_ASSERT_GET_FIRST_STR(x, ...) #x +// CSA_ASSERT_ARGS generates an +// std::initializer_list from __VA_ARGS__. It +// currently supports between 0 and 2 arguments. + +// clang-format off +#define CSA_ASSERT_0_ARGS(...) {} +#define CSA_ASSERT_1_ARG(a, ...) {{a, #a}} +#define CSA_ASSERT_2_ARGS(a, b, ...) {{a, #a}, {b, #b}} +// clang-format on +#define SWITCH_CSA_ASSERT_ARGS(dummy, a, b, FUNC, ...) FUNC(a, b) +#define CSA_ASSERT_ARGS(...) \ + SWITCH_CSA_ASSERT_ARGS(dummy, ##__VA_ARGS__, CSA_ASSERT_2_ARGS, \ + CSA_ASSERT_1_ARG, CSA_ASSERT_0_ARGS) // CSA_ASSERT(csa, , ) -// We have to jump through some hoops to allow to be -// empty. -#define CSA_ASSERT(csa, ...) \ - (csa)->Assert( \ - [&]() -> compiler::Node* { \ - return implicit_cast>( \ - EXPAND(CSA_ASSERT_GET_FIRST(__VA_ARGS__))); \ - }, \ - EXPAND(CSA_ASSERT_GET_FIRST_STR(__VA_ARGS__)), __FILE__, __LINE__, \ - CSA_ASSERT_STRINGIFY_EXTRA_VALUES(__VA_ARGS__)) +#define CSA_ASSERT(csa, condition_node, ...) \ + (csa)->Assert( \ + [&]() -> compiler::Node* { \ + return implicit_cast>(condition_node); \ + }, \ + #condition_node, __FILE__, __LINE__, CSA_ASSERT_ARGS(__VA_ARGS__)) // CSA_ASSERT_BRANCH(csa, [](Label* ok, Label* not_ok) {...}, // ) -#define CSA_ASSERT_BRANCH(csa, ...) \ - (csa)->Assert(EXPAND(CSA_ASSERT_GET_FIRST(__VA_ARGS__)), \ - EXPAND(CSA_ASSERT_GET_FIRST_STR(__VA_ARGS__)), __FILE__, \ - __LINE__, CSA_ASSERT_STRINGIFY_EXTRA_VALUES(__VA_ARGS__)) - -#define CSA_ASSERT_JS_ARGC_OP(csa, Op, op, expected) \ - (csa)->Assert( \ - [&]() -> compiler::Node* { \ - compiler::Node* const argc = \ - (csa)->Parameter(Descriptor::kJSActualArgumentsCount); \ - return (csa)->Op(argc, (csa)->Int32Constant(expected)); \ - }, \ - "argc " #op " " #expected, __FILE__, __LINE__, \ - SmiFromInt32((csa)->Parameter(Descriptor::kJSActualArgumentsCount)), \ - "argc") +#define CSA_ASSERT_BRANCH(csa, gen, ...) \ + (csa)->Assert(gen, #gen, __FILE__, __LINE__, CSA_ASSERT_ARGS(__VA_ARGS__)) + +#define CSA_ASSERT_JS_ARGC_OP(csa, Op, op, expected) \ + (csa)->Assert( \ + [&]() -> compiler::Node* { \ + compiler::Node* const argc = \ + (csa)->Parameter(Descriptor::kJSActualArgumentsCount); \ + return (csa)->Op(argc, (csa)->Int32Constant(expected)); \ + }, \ + "argc " #op " " #expected, __FILE__, __LINE__, \ + {{SmiFromInt32((csa)->Parameter(Descriptor::kJSActualArgumentsCount)), \ + "argc"}}) #define CSA_ASSERT_JS_ARGC_EQ(csa, expected) \ CSA_ASSERT_JS_ARGC_OP(csa, Word32Equal, ==, expected) @@ -490,21 +474,21 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode SmiToInt32(SloppyTNode value); // Smi operations. -#define SMI_ARITHMETIC_BINOP(SmiOpName, IntPtrOpName, Int32OpName) \ - TNode SmiOpName(TNode a, TNode b) { \ - if (SmiValuesAre32Bits()) { \ - return BitcastWordToTaggedSigned( \ - IntPtrOpName(BitcastTaggedToWord(a), BitcastTaggedToWord(b))); \ - } else { \ - DCHECK(SmiValuesAre31Bits()); \ - if (kSystemPointerSize == kInt64Size) { \ - CSA_ASSERT(this, IsValidSmi(a)); \ - CSA_ASSERT(this, IsValidSmi(b)); \ - } \ - return BitcastWordToTaggedSigned(ChangeInt32ToIntPtr( \ - Int32OpName(TruncateIntPtrToInt32(BitcastTaggedToWord(a)), \ - TruncateIntPtrToInt32(BitcastTaggedToWord(b))))); \ - } \ +#define SMI_ARITHMETIC_BINOP(SmiOpName, IntPtrOpName, Int32OpName) \ + TNode SmiOpName(TNode a, TNode b) { \ + if (SmiValuesAre32Bits()) { \ + return BitcastWordToTaggedSigned(IntPtrOpName( \ + BitcastTaggedSignedToWord(a), BitcastTaggedSignedToWord(b))); \ + } else { \ + DCHECK(SmiValuesAre31Bits()); \ + if (kSystemPointerSize == kInt64Size) { \ + CSA_ASSERT(this, IsValidSmi(a)); \ + CSA_ASSERT(this, IsValidSmi(b)); \ + } \ + return BitcastWordToTaggedSigned(ChangeInt32ToIntPtr( \ + Int32OpName(TruncateIntPtrToInt32(BitcastTaggedSignedToWord(a)), \ + TruncateIntPtrToInt32(BitcastTaggedSignedToWord(b))))); \ + } \ } SMI_ARITHMETIC_BINOP(SmiAdd, IntPtrAdd, Int32Add) SMI_ARITHMETIC_BINOP(SmiSub, IntPtrSub, Int32Sub) @@ -523,19 +507,20 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode TrySmiSub(TNode a, TNode b, Label* if_overflow); TNode SmiShl(TNode a, int shift) { - return BitcastWordToTaggedSigned(WordShl(BitcastTaggedToWord(a), shift)); + return BitcastWordToTaggedSigned( + WordShl(BitcastTaggedSignedToWord(a), shift)); } TNode SmiShr(TNode a, int shift) { return BitcastWordToTaggedSigned( - WordAnd(WordShr(BitcastTaggedToWord(a), shift), - BitcastTaggedToWord(SmiConstant(-1)))); + WordAnd(WordShr(BitcastTaggedSignedToWord(a), shift), + BitcastTaggedSignedToWord(SmiConstant(-1)))); } TNode SmiSar(TNode a, int shift) { return BitcastWordToTaggedSigned( - WordAnd(WordSar(BitcastTaggedToWord(a), shift), - BitcastTaggedToWord(SmiConstant(-1)))); + WordAnd(WordSar(BitcastTaggedSignedToWord(a), shift), + BitcastTaggedSignedToWord(SmiConstant(-1)))); } Node* WordOrSmiShl(Node* a, int shift, ParameterMode mode) { @@ -556,19 +541,20 @@ class V8_EXPORT_PRIVATE CodeStubAssembler } } -#define SMI_COMPARISON_OP(SmiOpName, IntPtrOpName, Int32OpName) \ - TNode SmiOpName(TNode a, TNode b) { \ - if (SmiValuesAre32Bits()) { \ - return IntPtrOpName(BitcastTaggedToWord(a), BitcastTaggedToWord(b)); \ - } else { \ - DCHECK(SmiValuesAre31Bits()); \ - if (kSystemPointerSize == kInt64Size) { \ - CSA_ASSERT(this, IsValidSmi(a)); \ - CSA_ASSERT(this, IsValidSmi(b)); \ - } \ - return Int32OpName(TruncateIntPtrToInt32(BitcastTaggedToWord(a)), \ - TruncateIntPtrToInt32(BitcastTaggedToWord(b))); \ - } \ +#define SMI_COMPARISON_OP(SmiOpName, IntPtrOpName, Int32OpName) \ + TNode SmiOpName(TNode a, TNode b) { \ + if (SmiValuesAre32Bits()) { \ + return IntPtrOpName(BitcastTaggedSignedToWord(a), \ + BitcastTaggedSignedToWord(b)); \ + } else { \ + DCHECK(SmiValuesAre31Bits()); \ + if (kSystemPointerSize == kInt64Size) { \ + CSA_ASSERT(this, IsValidSmi(a)); \ + CSA_ASSERT(this, IsValidSmi(b)); \ + } \ + return Int32OpName(TruncateIntPtrToInt32(BitcastTaggedSignedToWord(a)), \ + TruncateIntPtrToInt32(BitcastTaggedSignedToWord(b))); \ + } \ } SMI_COMPARISON_OP(SmiEqual, WordEqual, Word32Equal) SMI_COMPARISON_OP(SmiNotEqual, WordNotEqual, Word32NotEqual) @@ -626,43 +612,22 @@ class V8_EXPORT_PRIVATE CodeStubAssembler using BranchGenerator = std::function; using NodeGenerator = std::function; - - void Assert(const BranchGenerator& branch, const char* message = nullptr, - const char* file = nullptr, int line = 0, - Node* extra_node1 = nullptr, const char* extra_node1_name = "", - Node* extra_node2 = nullptr, const char* extra_node2_name = "", - Node* extra_node3 = nullptr, const char* extra_node3_name = "", - Node* extra_node4 = nullptr, const char* extra_node4_name = "", - Node* extra_node5 = nullptr, const char* extra_node5_name = ""); - void Assert(const NodeGenerator& condition_body, - const char* message = nullptr, const char* file = nullptr, - int line = 0, Node* extra_node1 = nullptr, - const char* extra_node1_name = "", Node* extra_node2 = nullptr, - const char* extra_node2_name = "", Node* extra_node3 = nullptr, - const char* extra_node3_name = "", Node* extra_node4 = nullptr, - const char* extra_node4_name = "", Node* extra_node5 = nullptr, - const char* extra_node5_name = ""); - void Check(const BranchGenerator& branch, const char* message = nullptr, - const char* file = nullptr, int line = 0, - Node* extra_node1 = nullptr, const char* extra_node1_name = "", - Node* extra_node2 = nullptr, const char* extra_node2_name = "", - Node* extra_node3 = nullptr, const char* extra_node3_name = "", - Node* extra_node4 = nullptr, const char* extra_node4_name = "", - Node* extra_node5 = nullptr, const char* extra_node5_name = ""); - void Check(const NodeGenerator& condition_body, const char* message = nullptr, - const char* file = nullptr, int line = 0, - Node* extra_node1 = nullptr, const char* extra_node1_name = "", - Node* extra_node2 = nullptr, const char* extra_node2_name = "", - Node* extra_node3 = nullptr, const char* extra_node3_name = "", - Node* extra_node4 = nullptr, const char* extra_node4_name = "", - Node* extra_node5 = nullptr, const char* extra_node5_name = ""); - void FailAssert( - const char* message = nullptr, const char* file = nullptr, int line = 0, - Node* extra_node1 = nullptr, const char* extra_node1_name = "", - Node* extra_node2 = nullptr, const char* extra_node2_name = "", - Node* extra_node3 = nullptr, const char* extra_node3_name = "", - Node* extra_node4 = nullptr, const char* extra_node4_name = "", - Node* extra_node5 = nullptr, const char* extra_node5_name = ""); + using ExtraNode = std::pair; + + void Assert(const BranchGenerator& branch, const char* message, + const char* file, int line, + std::initializer_list extra_nodes = {}); + void Assert(const NodeGenerator& condition_body, const char* message, + const char* file, int line, + std::initializer_list extra_nodes = {}); + void Check(const BranchGenerator& branch, const char* message, + const char* file, int line, + std::initializer_list extra_nodes = {}); + void Check(const NodeGenerator& condition_body, const char* message, + const char* file, int line, + std::initializer_list extra_nodes = {}); + void FailAssert(const char* message, const char* file, int line, + std::initializer_list extra_nodes = {}); void FastCheck(TNode condition); @@ -794,6 +759,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // otherwise goes to {if_false}. void BranchIfToBooleanIsTrue(Node* value, Label* if_true, Label* if_false); + // Branches to {if_false} if ToBoolean applied to {value} yields false, + // otherwise goes to {if_true}. + void BranchIfToBooleanIsFalse(Node* value, Label* if_false, Label* if_true) { + BranchIfToBooleanIsTrue(value, if_true, if_false); + } + void BranchIfJSReceiver(Node* object, Label* if_true, Label* if_false); // Branches to {if_true} when --force-slow-path flag has been passed. @@ -811,8 +782,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler MachineType type = MachineType::AnyTagged()); // Load an object pointer from a buffer that isn't in the heap. - Node* LoadBufferObject(Node* buffer, int offset, - MachineType type = MachineType::AnyTagged()); + Node* LoadBufferObject(Node* buffer, int offset, MachineType type); + TNode LoadBufferObject(TNode buffer, int offset) { + return CAST(LoadBufferObject(buffer, offset, MachineType::AnyTagged())); + } TNode LoadBufferPointer(TNode buffer, int offset) { return UncheckedCast( LoadBufferObject(buffer, offset, MachineType::Pointer())); @@ -887,15 +860,19 @@ class V8_EXPORT_PRIVATE CodeStubAssembler std::is_convertible, TNode>::value, int>::type = 0> TNode LoadReference(Reference reference) { - return CAST(LoadFromObject(MachineTypeOf::value, reference.object, - reference.offset)); + TNode offset = + IntPtrSub(reference.offset, IntPtrConstant(kHeapObjectTag)); + return CAST( + LoadFromObject(MachineTypeOf::value, reference.object, offset)); } template , TNode>::value, int>::type = 0> TNode LoadReference(Reference reference) { - return UncheckedCast(LoadFromObject(MachineTypeOf::value, - reference.object, reference.offset)); + TNode offset = + IntPtrSub(reference.offset, IntPtrConstant(kHeapObjectTag)); + return UncheckedCast( + LoadFromObject(MachineTypeOf::value, reference.object, offset)); } template , TNode>::value, @@ -908,15 +885,18 @@ class V8_EXPORT_PRIVATE CodeStubAssembler } else if (std::is_same::value) { write_barrier = StoreToObjectWriteBarrier::kMap; } - StoreToObject(rep, reference.object, reference.offset, value, - write_barrier); + TNode offset = + IntPtrSub(reference.offset, IntPtrConstant(kHeapObjectTag)); + StoreToObject(rep, reference.object, offset, value, write_barrier); } template , TNode>::value, int>::type = 0> void StoreReference(Reference reference, TNode value) { - StoreToObject(MachineRepresentationOf::value, reference.object, - reference.offset, value, StoreToObjectWriteBarrier::kNone); + TNode offset = + IntPtrSub(reference.offset, IntPtrConstant(kHeapObjectTag)); + StoreToObject(MachineRepresentationOf::value, reference.object, offset, + value, StoreToObjectWriteBarrier::kNone); } // Tag a smi and store it. @@ -927,7 +907,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // Load the Map of an HeapObject. TNode LoadMap(SloppyTNode object); // Load the instance type of an HeapObject. - TNode LoadInstanceType(SloppyTNode object); + TNode LoadInstanceType(SloppyTNode object); // Compare the instance the type of the object against the provided one. TNode HasInstanceType(SloppyTNode object, InstanceType type); @@ -967,7 +947,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // Load bit field 3 of a map. TNode LoadMapBitField3(SloppyTNode map); // Load the instance type of a map. - TNode LoadMapInstanceType(SloppyTNode map); + TNode LoadMapInstanceType(SloppyTNode map); // Load the ElementsKind of a map. TNode LoadMapElementsKind(SloppyTNode map); TNode LoadElementsKind(SloppyTNode object); @@ -1023,8 +1003,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode LoadStringLengthAsWord32(SloppyTNode string); // Loads a pointer to the sequential String char array. Node* PointerToSeqStringData(Node* seq_string); - // Load value field of a JSValue object. - Node* LoadJSValueValue(Node* object); + // Load value field of a JSPrimitiveWrapper object. + Node* LoadJSPrimitiveWrapperValue(Node* object); // Figures out whether the value of maybe_object is: // - a SMI (jump to "if_smi", "extracted" will be the SMI value) @@ -1076,8 +1056,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // Array is any array-like type that has a fixed header followed by // tagged elements. - template - TNode LoadArrayElement( + template + TNode LoadArrayElement( TNode array, int array_header_size, Node* index, int additional_offset = 0, ParameterMode parameter_mode = INTPTR_PARAMETERS, @@ -1232,15 +1212,23 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode LoadDoubleWithHoleCheck( SloppyTNode base, SloppyTNode offset, Label* if_hole, MachineType machine_type = MachineType::Float64()); - Node* LoadFixedTypedArrayElementAsTagged( - Node* data_pointer, Node* index_node, ElementsKind elements_kind, + TNode LoadFixedTypedArrayElementAsTagged( + TNode data_pointer, Node* index_node, ElementsKind elements_kind, ParameterMode parameter_mode = INTPTR_PARAMETERS); TNode LoadFixedTypedArrayElementAsTagged( - TNode data_pointer, TNode index, TNode elements_kind); + TNode data_pointer, TNode index_node, + ElementsKind elements_kind) { + return LoadFixedTypedArrayElementAsTagged(data_pointer, index_node, + elements_kind, SMI_PARAMETERS); + } + TNode LoadFixedTypedArrayElementAsTagged( + TNode data_pointer, TNode index, + TNode elements_kind); // Parts of the above, factored out for readability: - Node* LoadFixedBigInt64ArrayElementAsTagged(Node* data_pointer, Node* offset); - Node* LoadFixedBigUint64ArrayElementAsTagged(Node* data_pointer, - Node* offset); + TNode LoadFixedBigInt64ArrayElementAsTagged( + SloppyTNode data_pointer, SloppyTNode offset); + TNode LoadFixedBigUint64ArrayElementAsTagged( + SloppyTNode data_pointer, SloppyTNode offset); // 64-bit platforms only: TNode BigIntFromInt64(TNode value); TNode BigIntFromUint64(TNode value); @@ -1250,10 +1238,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler void StoreJSTypedArrayElementFromTagged(TNode context, TNode typed_array, - TNode index_node, + TNode index_node, TNode value, - ElementsKind elements_kind, - ParameterMode parameter_mode); + ElementsKind elements_kind); // Context manipulation TNode LoadContextElement(SloppyTNode context, @@ -1534,10 +1521,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // Like above, but allowing custom bitfield initialization. TNode AllocateRawBigInt(TNode length); void StoreBigIntBitfield(TNode bigint, TNode bitfield); - void StoreBigIntDigit(TNode bigint, int digit_index, + void StoreBigIntDigit(TNode bigint, intptr_t digit_index, + TNode digit); + void StoreBigIntDigit(TNode bigint, TNode digit_index, TNode digit); + TNode LoadBigIntBitfield(TNode bigint); - TNode LoadBigIntDigit(TNode bigint, int digit_index); + TNode LoadBigIntDigit(TNode bigint, intptr_t digit_index); + TNode LoadBigIntDigit(TNode bigint, + TNode digit_index); // Allocate a ByteArray with the given length. TNode AllocateByteArray(TNode length, @@ -1573,9 +1565,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode AllocateNameDictionary(int at_least_space_for); TNode AllocateNameDictionary( - TNode at_least_space_for); + TNode at_least_space_for, AllocationFlags = kNone); TNode AllocateNameDictionaryWithCapacity( - TNode capacity); + TNode capacity, AllocationFlags = kNone); TNode CopyNameDictionary(TNode dictionary, Label* large_object_fallback); @@ -1604,9 +1596,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler void InitializeStructBody(Node* object, Node* map, Node* size, int start_offset = Struct::kHeaderSize); - Node* AllocateJSObjectFromMap( - Node* map, Node* properties = nullptr, Node* elements = nullptr, - AllocationFlags flags = kNone, + TNode AllocateJSObjectFromMap( + SloppyTNode map, SloppyTNode properties = nullptr, + SloppyTNode elements = nullptr, AllocationFlags flags = kNone, SlackTrackingMode slack_tracking_mode = kNoSlackTracking); void InitializeJSObjectFromMap( @@ -1696,6 +1688,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler fixed_array_map); } + TNode GetStructMap(InstanceType instance_type); + TNode AllocateUninitializedFixedArray(intptr_t capacity) { return UncheckedCast(AllocateFixedArray( PACKED_ELEMENTS, IntPtrConstant(capacity), AllocationFlag::kNone)); @@ -1745,7 +1739,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode object, IterationKind mode); - Node* AllocateJSIteratorResult(Node* context, Node* value, Node* done); + TNode AllocateJSIteratorResult(SloppyTNode context, + SloppyTNode value, + SloppyTNode done); Node* AllocateJSIteratorResultForEntry(Node* context, Node* key, Node* value); TNode ArraySpeciesCreate(TNode context, @@ -1934,6 +1930,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler SMI_PARAMETERS); } + TNode ExtractFixedArray( + TNode source, TNode first, TNode count, + TNode capacity, + ExtractFixedArrayFlags extract_flags = + ExtractFixedArrayFlag::kAllFixedArrays) { + return CAST(ExtractFixedArray(source, first, count, capacity, extract_flags, + INTPTR_PARAMETERS)); + } + // Copy a portion of an existing FixedArray or FixedDoubleArray into a new // FixedArray, including special appropriate handling for COW arrays. // * |source| is either a FixedArray or FixedDoubleArray from which to copy @@ -2043,6 +2048,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode CalculateNewElementsCapacity(TNode old_capacity) { return CAST(CalculateNewElementsCapacity(old_capacity, SMI_PARAMETERS)); } + TNode CalculateNewElementsCapacity(TNode old_capacity) { + return UncheckedCast( + CalculateNewElementsCapacity(old_capacity, INTPTR_PARAMETERS)); + } // Tries to grow the |elements| array of given |object| to store the |key| // or bails out if the growing gap is too big. Returns new elements. @@ -2086,19 +2095,21 @@ class V8_EXPORT_PRIVATE CodeStubAssembler Label* if_bigint, Variable* var_bigint, Variable* var_feedback); // Truncate the floating point value of a HeapNumber to an Int32. - Node* TruncateHeapNumberValueToWord32(Node* object); + TNode TruncateHeapNumberValueToWord32(TNode object); // Conversions. - void TryHeapNumberToSmi(TNode number, TVariable& output, + void TryHeapNumberToSmi(TNode number, + TVariable& output, // NOLINT(runtime/references) Label* if_smi); - void TryFloat64ToSmi(TNode number, TVariable& output, + void TryFloat64ToSmi(TNode number, + TVariable& output, // NOLINT(runtime/references) Label* if_smi); TNode ChangeFloat64ToTagged(SloppyTNode value); TNode ChangeInt32ToTagged(SloppyTNode value); TNode ChangeUint32ToTagged(SloppyTNode value); TNode ChangeUintPtrToTagged(TNode value); TNode ChangeNumberToUint32(TNode value); - TNode ChangeNumberToFloat64(SloppyTNode value); + TNode ChangeNumberToFloat64(TNode value); TNode TryNumberToUintPtr(TNode value, Label* if_negative); TNode ChangeNonnegativeNumberToUintPtr(TNode value) { return TryNumberToUintPtr(value, nullptr); @@ -2145,10 +2156,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler } // Throws a TypeError for {method_name} if {value} is neither of the given - // {primitive_type} nor a JSValue wrapping a value of {primitive_type}, or - // returns the {value} (or wrapped value) otherwise. - Node* ToThisValue(Node* context, Node* value, PrimitiveType primitive_type, - char const* method_name); + // {primitive_type} nor a JSPrimitiveWrapper wrapping a value of + // {primitive_type}, or returns the {value} (or wrapped value) otherwise. + TNode ToThisValue(TNode context, TNode value, + PrimitiveType primitive_type, + char const* method_name); // Throws a TypeError for {method_name} if {value} is not of the given // instance type. Returns {value}'s map. @@ -2231,6 +2243,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode IsJSFunction(SloppyTNode object); TNode IsJSGeneratorObject(SloppyTNode object); TNode IsJSGlobalProxyInstanceType(SloppyTNode instance_type); + TNode IsJSGlobalProxyMap(SloppyTNode map); TNode IsJSGlobalProxy(SloppyTNode object); TNode IsJSObjectInstanceType(SloppyTNode instance_type); TNode IsJSObjectMap(SloppyTNode map); @@ -2246,9 +2259,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode IsJSTypedArrayInstanceType(SloppyTNode instance_type); TNode IsJSTypedArrayMap(SloppyTNode map); TNode IsJSTypedArray(SloppyTNode object); - TNode IsJSValueInstanceType(SloppyTNode instance_type); - TNode IsJSValueMap(SloppyTNode map); - TNode IsJSValue(SloppyTNode object); + TNode IsJSPrimitiveWrapperInstanceType( + SloppyTNode instance_type); + TNode IsJSPrimitiveWrapperMap(SloppyTNode map); + TNode IsJSPrimitiveWrapper(SloppyTNode object); TNode IsMap(SloppyTNode object); TNode IsMutableHeapNumber(SloppyTNode object); TNode IsName(SloppyTNode object); @@ -2260,6 +2274,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode IsOneByteStringInstanceType(SloppyTNode instance_type); TNode IsPrimitiveInstanceType(SloppyTNode instance_type); TNode IsPrivateSymbol(SloppyTNode object); + TNode IsPrivateName(SloppyTNode symbol); TNode IsPromiseCapability(SloppyTNode object); TNode IsPropertyArray(SloppyTNode object); TNode IsPropertyCell(SloppyTNode object); @@ -2305,7 +2320,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode IsPromiseThenProtectorCellInvalid(); TNode IsArraySpeciesProtectorCellInvalid(); TNode IsTypedArraySpeciesProtectorCellInvalid(); - TNode IsRegExpSpeciesProtectorCellInvalid(); + TNode IsRegExpSpeciesProtectorCellInvalid( + TNode native_context); TNode IsPromiseSpeciesProtectorCellInvalid(); TNode IsMockArrayBufferAllocatorFlag() { @@ -2355,7 +2371,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler return Word32Equal(a, b); } bool ElementsKindEqual(ElementsKind a, ElementsKind b) { return a == b; } - Node* IsFastElementsKind(Node* elements_kind); + TNode IsFastElementsKind(TNode elements_kind); bool IsFastElementsKind(ElementsKind kind) { return v8::internal::IsFastElementsKind(kind); } @@ -2366,12 +2382,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler bool IsDoubleElementsKind(ElementsKind kind) { return v8::internal::IsDoubleElementsKind(kind); } - Node* IsFastSmiOrTaggedElementsKind(Node* elements_kind); - Node* IsFastSmiElementsKind(Node* elements_kind); - Node* IsHoleyFastElementsKind(Node* elements_kind); - Node* IsHoleyFastElementsKindForRead(Node* elements_kind); - Node* IsElementsKindGreaterThan(Node* target_kind, - ElementsKind reference_kind); + TNode IsFastSmiOrTaggedElementsKind(TNode elements_kind); + TNode IsFastSmiElementsKind(SloppyTNode elements_kind); + TNode IsHoleyFastElementsKind(TNode elements_kind); + TNode IsHoleyFastElementsKindForRead(TNode elements_kind); + TNode IsElementsKindGreaterThan(TNode target_kind, + ElementsKind reference_kind); TNode IsElementsKindLessThanOrEqual(TNode target_kind, ElementsKind reference_kind); // Check if reference_kind_a <= target_kind <= reference_kind_b @@ -2413,8 +2429,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler Node* DerefIndirectString(TNode string, TNode instance_type, Label* cannot_deref); - TNode StringFromSingleCodePoint(TNode codepoint, - UnicodeEncoding encoding); + TNode StringFromSingleUTF16EncodedCodePoint(TNode codepoint); // Type conversion helpers. enum class BigIntHandling { kConvertToNumber, kThrow }; @@ -2578,7 +2593,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode IsSetSmi(SloppyTNode smi, int untagged_mask) { intptr_t mask_word = bit_cast(Smi::FromInt(untagged_mask)); return WordNotEqual( - WordAnd(BitcastTaggedToWord(smi), IntPtrConstant(mask_word)), + WordAnd(BitcastTaggedSignedToWord(smi), IntPtrConstant(mask_word)), IntPtrConstant(0)); } @@ -2950,11 +2965,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // If it can't handle the case {receiver}/{key} case then the control goes // to {if_bailout}. // If {if_proxy} is nullptr, proxies go to if_bailout. - void TryPrototypeChainLookup(Node* receiver, Node* key, + void TryPrototypeChainLookup(Node* receiver, Node* object, Node* key, const LookupInHolder& lookup_property_in_holder, const LookupInHolder& lookup_element_in_holder, Label* if_end, Label* if_bailout, - Label* if_proxy = nullptr); + Label* if_proxy); // Instanceof helpers. // Returns true if {object} has {prototype} somewhere in it's prototype @@ -3055,7 +3070,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler void EmitElementStore(Node* object, Node* key, Node* value, ElementsKind elements_kind, KeyedAccessStoreMode store_mode, Label* bailout, - Node* context); + Node* context, + Variable* maybe_converted_value = nullptr); Node* CheckForCapacityGrow(Node* object, Node* elements, ElementsKind kind, Node* length, Node* key, ParameterMode mode, @@ -3204,8 +3220,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler Node* Equal(Node* lhs, Node* rhs, Node* context, Variable* var_type_feedback = nullptr); - Node* StrictEqual(Node* lhs, Node* rhs, - Variable* var_type_feedback = nullptr); + TNode StrictEqual(SloppyTNode lhs, SloppyTNode rhs, + Variable* var_type_feedback = nullptr); // ECMA#sec-samevalue // Similar to StrictEqual except that NaNs are treated as equal and minus zero @@ -3248,13 +3264,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // Debug helpers Node* IsDebugActive(); - TNode IsRuntimeCallStatsEnabled(); - // JSArrayBuffer helpers TNode LoadJSArrayBufferBitField(TNode array_buffer); TNode LoadJSArrayBufferBackingStore( TNode array_buffer); - Node* IsDetachedBuffer(Node* buffer); + TNode IsDetachedBuffer(TNode buffer); void ThrowIfArrayBufferIsDetached(SloppyTNode context, TNode array_buffer, const char* method_name); @@ -3301,12 +3315,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler Node* IsPromiseHookEnabledOrHasAsyncEventDelegate(); Node* IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(); - // Helpers for StackFrame markers. - Node* MarkerIsFrameType(Node* marker_or_function, - StackFrame::Type frame_type); - Node* MarkerIsNotFrameType(Node* marker_or_function, - StackFrame::Type frame_type); - // for..in helpers void CheckPrototypeEnumCache(Node* receiver, Node* receiver_map, Label* if_fast, Label* if_slow); @@ -3589,9 +3597,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler private: // Low-level accessors for Descriptor arrays. - TNode LoadDescriptorArrayElement(TNode object, - Node* index, - int additional_offset = 0); + template + TNode LoadDescriptorArrayElement(TNode object, + TNode index, + int additional_offset); }; class V8_EXPORT_PRIVATE CodeStubArguments { diff --git a/deps/v8/src/codegen/compiler.cc b/deps/v8/src/codegen/compiler.cc index 5197dd3a2fc752..906eb0f0ca2d5e 100644 --- a/deps/v8/src/codegen/compiler.cc +++ b/deps/v8/src/codegen/compiler.cc @@ -15,8 +15,10 @@ #include "src/codegen/assembler-inl.h" #include "src/codegen/compilation-cache.h" #include "src/codegen/optimized-compilation-info.h" +#include "src/codegen/pending-optimization-table.h" #include "src/codegen/unoptimized-compilation-info.h" #include "src/common/globals.h" +#include "src/common/message-template.h" #include "src/compiler-dispatcher/compiler-dispatcher.h" #include "src/compiler-dispatcher/optimizing-compile-dispatcher.h" #include "src/compiler/pipeline.h" @@ -24,7 +26,6 @@ #include "src/debug/liveedit.h" #include "src/execution/frames-inl.h" #include "src/execution/isolate-inl.h" -#include "src/execution/message-template.h" #include "src/execution/runtime-profiler.h" #include "src/execution/vm-state-inl.h" #include "src/heap/heap-inl.h" @@ -319,6 +320,8 @@ void OptimizedCompilationJob::RecordCompilationStats(CompilationMode mode, counters->turbofan_optimize_total_foreground()->AddSample( static_cast(time_foreground.InMicroseconds())); } + counters->turbofan_ticks()->AddSample(static_cast( + compilation_info()->tick_counter().CurrentTicks() / 1000)); } } @@ -593,6 +596,12 @@ MaybeHandle GenerateUnoptimizedCodeForToplevel( return MaybeHandle(); } + if (FLAG_stress_lazy_source_positions) { + // Collect source positions immediately to try and flush out bytecode + // mismatches. + SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate, shared_info); + } + if (shared_info.is_identical_to(top_level)) { // Ensure that the top level function is retained. *is_compiled_scope = shared_info->is_compiled_scope(); @@ -797,18 +806,10 @@ MaybeHandle GetOptimizedCode(Handle function, return MaybeHandle(); } - // If code was pending optimization for testing, delete remove the strong root - // that was preventing the bytecode from being flushed between marking and - // optimization. - if (!isolate->heap()->pending_optimize_for_test_bytecode().IsUndefined()) { - Handle table = - handle(ObjectHashTable::cast( - isolate->heap()->pending_optimize_for_test_bytecode()), - isolate); - bool was_present; - table = table->Remove(isolate, table, handle(function->shared(), isolate), - &was_present); - isolate->heap()->SetPendingOptimizeForTestBytecode(*table); + // If code was pending optimization for testing, delete remove the entry + // from the table that was preventing the bytecode from being flushed + if (V8_UNLIKELY(FLAG_testing_d8_test_runner)) { + PendingOptimizationTable::FunctionWasOptimized(isolate, function); } Handle cached_code; @@ -1346,6 +1347,13 @@ bool Compiler::Compile(Handle shared_info, DCHECK(!isolate->has_pending_exception()); *is_compiled_scope = shared_info->is_compiled_scope(); DCHECK(is_compiled_scope->is_compiled()); + + if (FLAG_stress_lazy_source_positions) { + // Collect source positions immediately to try and flush out bytecode + // mismatches. + SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate, shared_info); + } + return true; } @@ -1599,33 +1607,103 @@ MaybeHandle Compiler::GetFunctionFromEval( return result; } -bool Compiler::CodeGenerationFromStringsAllowed(Isolate* isolate, - Handle context, - Handle source) { +// Check whether embedder allows code generation in this context. +// (via v8::Isolate::SetAllowCodeGenerationFromStringsCallback) +bool CodeGenerationFromStringsAllowed(Isolate* isolate, Handle context, + Handle source) { DCHECK(context->allow_code_gen_from_strings().IsFalse(isolate)); - // Check with callback if set. + DCHECK(isolate->allow_code_gen_callback()); + + // Callback set. Let it decide if code generation is allowed. + VMState state(isolate); + RuntimeCallTimerScope timer( + isolate, RuntimeCallCounterId::kCodeGenerationFromStringsCallbacks); AllowCodeGenerationFromStringsCallback callback = isolate->allow_code_gen_callback(); - if (callback == nullptr) { - // No callback set and code generation disallowed. - return false; - } else { - // Callback set. Let it decide if code generation is allowed. - VMState state(isolate); - return callback(v8::Utils::ToLocal(context), v8::Utils::ToLocal(source)); + return callback(v8::Utils::ToLocal(context), v8::Utils::ToLocal(source)); +} + +// Check whether embedder allows code generation in this context. +// (via v8::Isolate::SetModifyCodeGenerationFromStringsCallback) +bool ModifyCodeGenerationFromStrings(Isolate* isolate, Handle context, + Handle* source) { + DCHECK(context->allow_code_gen_from_strings().IsFalse(isolate)); + DCHECK(isolate->modify_code_gen_callback()); + DCHECK(source); + + // Callback set. Run it, and use the return value as source, or block + // execution if it's not set. + VMState state(isolate); + ModifyCodeGenerationFromStringsCallback modify_callback = + isolate->modify_code_gen_callback(); + RuntimeCallTimerScope timer( + isolate, RuntimeCallCounterId::kCodeGenerationFromStringsCallbacks); + MaybeLocal modified_source = + modify_callback(v8::Utils::ToLocal(context), v8::Utils::ToLocal(*source)); + if (modified_source.IsEmpty()) return false; + + // Use the new source (which might be the same as the old source) and return. + *source = Utils::OpenHandle(*modified_source.ToLocalChecked(), false); + return true; +} + +// Run Embedder-mandated checks before generating code from a string. +// +// Returns a string to be used for compilation, or a flag that an object type +// was encountered that is neither a string, nor something the embedder knows +// how to handle. +// +// Returns: (assuming: std::tie(source, unknown_object)) +// - !source.is_null(): compilation allowed, source contains the source string. +// - unknown_object is true: compilation allowed, but we don't know how to +// deal with source_object. +// - source.is_null() && !unknown_object: compilation should be blocked. +// +// - !source_is_null() and unknown_object can't be true at the same time. +std::pair, bool> Compiler::ValidateDynamicCompilationSource( + Isolate* isolate, Handle context, + Handle source_object) { + Handle source; + if (source_object->IsString()) source = Handle::cast(source_object); + + // Check if the context unconditionally allows code gen from strings. + // allow_code_gen_from_strings can be many things, so we'll always check + // against the 'false' literal, so that e.g. undefined and 'true' are treated + // the same. + if (!context->allow_code_gen_from_strings().IsFalse(isolate)) { + return {source, !source_object->IsString()}; + } + + // Check if the context allows code generation for this string. + // allow_code_gen_callback only allows proper strings. + // (I.e., let allow_code_gen_callback decide, if it has been set.) + if (isolate->allow_code_gen_callback()) { + if (source_object->IsString() && + CodeGenerationFromStringsAllowed(isolate, context, source)) { + return {source, !source_object->IsString()}; + } + } + + // Check if the context wants to block or modify this source object. + // Double-check that we really have a string now. + // (Let modify_code_gen_callback decide, if it's been set.) + if (isolate->modify_code_gen_callback()) { + if (ModifyCodeGenerationFromStrings(isolate, context, &source_object) && + source_object->IsString()) + return {Handle::cast(source_object), false}; } + + return {MaybeHandle(), !source_object->IsString()}; } -MaybeHandle Compiler::GetFunctionFromString( - Handle context, Handle source, +MaybeHandle Compiler::GetFunctionFromValidatedString( + Handle context, MaybeHandle source, ParseRestriction restriction, int parameters_end_pos) { Isolate* const isolate = context->GetIsolate(); Handle native_context(context->native_context(), isolate); - // Check if native context allows code generation from - // strings. Throw an exception if it doesn't. - if (native_context->allow_code_gen_from_strings().IsFalse(isolate) && - !CodeGenerationFromStringsAllowed(isolate, native_context, source)) { + // Raise an EvalError if we did not receive a string. + if (source.is_null()) { Handle error_message = native_context->ErrorMessageForCodeGenerationFromStrings(); THROW_NEW_ERROR( @@ -1639,9 +1717,20 @@ MaybeHandle Compiler::GetFunctionFromString( int eval_position = kNoSourcePosition; Handle outer_info( native_context->empty_function().shared(), isolate); - return Compiler::GetFunctionFromEval( - source, outer_info, native_context, LanguageMode::kSloppy, restriction, - parameters_end_pos, eval_scope_position, eval_position); + return Compiler::GetFunctionFromEval(source.ToHandleChecked(), outer_info, + native_context, LanguageMode::kSloppy, + restriction, parameters_end_pos, + eval_scope_position, eval_position); +} + +MaybeHandle Compiler::GetFunctionFromString( + Handle context, Handle source, + ParseRestriction restriction, int parameters_end_pos) { + Isolate* const isolate = context->GetIsolate(); + Handle native_context(context->native_context(), isolate); + return GetFunctionFromValidatedString( + context, ValidateDynamicCompilationSource(isolate, context, source).first, + restriction, parameters_end_pos); } namespace { diff --git a/deps/v8/src/codegen/compiler.h b/deps/v8/src/codegen/compiler.h index a5987063737c9e..836f7381233b3a 100644 --- a/deps/v8/src/codegen/compiler.h +++ b/deps/v8/src/codegen/compiler.h @@ -132,17 +132,22 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic { v8::ScriptCompiler::CompileOptions compile_options, v8::ScriptCompiler::NoCacheReason no_cache_reason); - // Returns true if the embedder permits compiling the given source string in - // the given context. - static bool CodeGenerationFromStringsAllowed(Isolate* isolate, - Handle context, - Handle source); - // Create a (bound) function for a String source within a context for eval. V8_WARN_UNUSED_RESULT static MaybeHandle GetFunctionFromString( - Handle context, Handle source, + Handle context, Handle source, ParseRestriction restriction, int parameters_end_pos); + // Decompose GetFunctionFromString into two functions, to allow callers to + // deal seperately with a case of object not handled by the embedder. + V8_WARN_UNUSED_RESULT static std::pair, bool> + ValidateDynamicCompilationSource(Isolate* isolate, Handle context, + Handle source_object); + V8_WARN_UNUSED_RESULT static MaybeHandle + GetFunctionFromValidatedString(Handle context, + MaybeHandle source, + ParseRestriction restriction, + int parameters_end_pos); + // Create a shared function info object for a String source. static MaybeHandle GetSharedFunctionInfoForScript( Isolate* isolate, Handle source, diff --git a/deps/v8/src/codegen/constant-pool.cc b/deps/v8/src/codegen/constant-pool.cc index 613a142f243618..6816c5b7ad580b 100644 --- a/deps/v8/src/codegen/constant-pool.cc +++ b/deps/v8/src/codegen/constant-pool.cc @@ -3,6 +3,7 @@ // found in the LICENSE file. #include "src/codegen/constant-pool.h" +#include "src/codegen/assembler-arch.h" #include "src/codegen/assembler-inl.h" namespace v8 { @@ -210,5 +211,253 @@ int ConstantPoolBuilder::Emit(Assembler* assm) { #endif // defined(V8_TARGET_ARCH_PPC) +#if defined(V8_TARGET_ARCH_ARM64) + +// Constant Pool. + +ConstantPool::ConstantPool(Assembler* assm) : assm_(assm) {} +ConstantPool::~ConstantPool() { DCHECK_EQ(blocked_nesting_, 0); } + +RelocInfoStatus ConstantPool::RecordEntry(uint32_t data, + RelocInfo::Mode rmode) { + ConstantPoolKey key(data, rmode); + CHECK(key.is_value32()); + return RecordKey(std::move(key), assm_->pc_offset()); +} + +RelocInfoStatus ConstantPool::RecordEntry(uint64_t data, + RelocInfo::Mode rmode) { + ConstantPoolKey key(data, rmode); + CHECK(!key.is_value32()); + return RecordKey(std::move(key), assm_->pc_offset()); +} + +RelocInfoStatus ConstantPool::RecordKey(ConstantPoolKey key, int offset) { + RelocInfoStatus write_reloc_info = GetRelocInfoStatusFor(key); + if (write_reloc_info == RelocInfoStatus::kMustRecord) { + if (key.is_value32()) { + if (entry32_count_ == 0) first_use_32_ = offset; + ++entry32_count_; + } else { + if (entry64_count_ == 0) first_use_64_ = offset; + ++entry64_count_; + } + } + entries_.insert(std::make_pair(key, offset)); + + if (Entry32Count() + Entry64Count() > ConstantPool::kApproxMaxEntryCount) { + // Request constant pool emission after the next instruction. + SetNextCheckIn(1); + } + + return write_reloc_info; +} + +RelocInfoStatus ConstantPool::GetRelocInfoStatusFor( + const ConstantPoolKey& key) { + if (key.AllowsDeduplication()) { + auto existing = entries_.find(key); + if (existing != entries_.end()) { + return RelocInfoStatus::kMustOmitForDuplicate; + } + } + return RelocInfoStatus::kMustRecord; +} + +void ConstantPool::EmitAndClear(Jump require_jump) { + DCHECK(!IsBlocked()); + // Prevent recursive pool emission. + Assembler::BlockPoolsScope block_pools(assm_, PoolEmissionCheck::kSkip); + Alignment require_alignment = + IsAlignmentRequiredIfEmittedAt(require_jump, assm_->pc_offset()); + int size = ComputeSize(require_jump, require_alignment); + Label size_check; + assm_->bind(&size_check); + assm_->RecordConstPool(size); + + // Emit the constant pool. It is preceded by an optional branch if + // {require_jump} and a header which will: + // 1) Encode the size of the constant pool, for use by the disassembler. + // 2) Terminate the program, to try to prevent execution from accidentally + // flowing into the constant pool. + // 3) align the 64bit pool entries to 64-bit. + // TODO(all): Make the alignment part less fragile. Currently code is + // allocated as a byte array so there are no guarantees the alignment will + // be preserved on compaction. Currently it works as allocation seems to be + // 64-bit aligned. + + Label after_pool; + if (require_jump == Jump::kRequired) assm_->b(&after_pool); + + assm_->RecordComment("[ Constant Pool"); + EmitPrologue(require_alignment); + if (require_alignment == Alignment::kRequired) assm_->Align(kInt64Size); + EmitEntries(); + assm_->RecordComment("]"); + + if (after_pool.is_linked()) assm_->bind(&after_pool); + + DCHECK_EQ(assm_->SizeOfCodeGeneratedSince(&size_check), size); + Clear(); +} + +void ConstantPool::Clear() { + entries_.clear(); + first_use_32_ = -1; + first_use_64_ = -1; + entry32_count_ = 0; + entry64_count_ = 0; + next_check_ = 0; +} + +void ConstantPool::StartBlock() { + if (blocked_nesting_ == 0) { + // Prevent constant pool checks from happening by setting the next check to + // the biggest possible offset. + next_check_ = kMaxInt; + } + ++blocked_nesting_; +} + +void ConstantPool::EndBlock() { + --blocked_nesting_; + if (blocked_nesting_ == 0) { + DCHECK(IsInImmRangeIfEmittedAt(assm_->pc_offset())); + // Make sure a check happens quickly after getting unblocked. + next_check_ = 0; + } +} + +bool ConstantPool::IsBlocked() const { return blocked_nesting_ > 0; } + +void ConstantPool::SetNextCheckIn(size_t instructions) { + next_check_ = + assm_->pc_offset() + static_cast(instructions * kInstrSize); +} + +void ConstantPool::EmitEntries() { + for (auto iter = entries_.begin(); iter != entries_.end();) { + DCHECK(iter->first.is_value32() || IsAligned(assm_->pc_offset(), 8)); + auto range = entries_.equal_range(iter->first); + bool shared = iter->first.AllowsDeduplication(); + for (auto it = range.first; it != range.second; ++it) { + SetLoadOffsetToConstPoolEntry(it->second, assm_->pc(), it->first); + if (!shared) Emit(it->first); + } + if (shared) Emit(iter->first); + iter = range.second; + } +} + +void ConstantPool::Emit(const ConstantPoolKey& key) { + if (key.is_value32()) { + assm_->dd(key.value32()); + } else { + assm_->dq(key.value64()); + } +} + +bool ConstantPool::ShouldEmitNow(Jump require_jump, size_t margin) const { + if (IsEmpty()) return false; + if (Entry32Count() + Entry64Count() > ConstantPool::kApproxMaxEntryCount) { + return true; + } + // We compute {dist32/64}, i.e. the distance from the first instruction + // accessing a 32bit/64bit entry in the constant pool to any of the + // 32bit/64bit constant pool entries, respectively. This is required because + // we do not guarantee that entries are emitted in order of reference, i.e. it + // is possible that the entry with the earliest reference is emitted last. + // The constant pool should be emitted if either of the following is true: + // (A) {dist32/64} will be out of range at the next check in. + // (B) Emission can be done behind an unconditional branch and {dist32/64} + // exceeds {kOpportunityDist*}. + // (C) {dist32/64} exceeds the desired approximate distance to the pool. + int worst_case_size = ComputeSize(Jump::kRequired, Alignment::kRequired); + size_t pool_end_32 = assm_->pc_offset() + margin + worst_case_size; + size_t pool_end_64 = pool_end_32 - Entry32Count() * kInt32Size; + if (Entry64Count() != 0) { + // The 64-bit constants are always emitted before the 32-bit constants, so + // we subtract the size of the 32-bit constants from {size}. + size_t dist64 = pool_end_64 - first_use_64_; + bool next_check_too_late = dist64 + 2 * kCheckInterval >= kMaxDistToPool64; + bool opportune_emission_without_jump = + require_jump == Jump::kOmitted && (dist64 >= kOpportunityDistToPool64); + bool approximate_distance_exceeded = dist64 >= kApproxDistToPool64; + if (next_check_too_late || opportune_emission_without_jump || + approximate_distance_exceeded) { + return true; + } + } + if (Entry32Count() != 0) { + size_t dist32 = pool_end_32 - first_use_32_; + bool next_check_too_late = dist32 + 2 * kCheckInterval >= kMaxDistToPool32; + bool opportune_emission_without_jump = + require_jump == Jump::kOmitted && (dist32 >= kOpportunityDistToPool32); + bool approximate_distance_exceeded = dist32 >= kApproxDistToPool32; + if (next_check_too_late || opportune_emission_without_jump || + approximate_distance_exceeded) { + return true; + } + } + return false; +} + +int ConstantPool::ComputeSize(Jump require_jump, + Alignment require_alignment) const { + int size_up_to_marker = PrologueSize(require_jump); + int alignment = require_alignment == Alignment::kRequired ? kInstrSize : 0; + size_t size_after_marker = + Entry32Count() * kInt32Size + alignment + Entry64Count() * kInt64Size; + return size_up_to_marker + static_cast(size_after_marker); +} + +Alignment ConstantPool::IsAlignmentRequiredIfEmittedAt(Jump require_jump, + int pc_offset) const { + int size_up_to_marker = PrologueSize(require_jump); + if (Entry64Count() != 0 && + !IsAligned(pc_offset + size_up_to_marker, kInt64Size)) { + return Alignment::kRequired; + } + return Alignment::kOmitted; +} + +bool ConstantPool::IsInImmRangeIfEmittedAt(int pc_offset) { + // Check that all entries are in range if the pool is emitted at {pc_offset}. + // This ignores kPcLoadDelta (conservatively, since all offsets are positive), + // and over-estimates the last entry's address with the pool's end. + Alignment require_alignment = + IsAlignmentRequiredIfEmittedAt(Jump::kRequired, pc_offset); + size_t pool_end_32 = + pc_offset + ComputeSize(Jump::kRequired, require_alignment); + size_t pool_end_64 = pool_end_32 - Entry32Count() * kInt32Size; + bool entries_in_range_32 = + Entry32Count() == 0 || (pool_end_32 < first_use_32_ + kMaxDistToPool32); + bool entries_in_range_64 = + Entry64Count() == 0 || (pool_end_64 < first_use_64_ + kMaxDistToPool64); + return entries_in_range_32 && entries_in_range_64; +} + +ConstantPool::BlockScope::BlockScope(Assembler* assm, size_t margin) + : pool_(&assm->constpool_) { + pool_->assm_->EmitConstPoolWithJumpIfNeeded(margin); + pool_->StartBlock(); +} + +ConstantPool::BlockScope::BlockScope(Assembler* assm, PoolEmissionCheck check) + : pool_(&assm->constpool_) { + DCHECK_EQ(check, PoolEmissionCheck::kSkip); + pool_->StartBlock(); +} + +ConstantPool::BlockScope::~BlockScope() { pool_->EndBlock(); } + +void ConstantPool::MaybeCheck() { + if (assm_->pc_offset() >= next_check_) { + Check(Emission::kIfNeeded, Jump::kRequired); + } +} + +#endif // defined(V8_TARGET_ARCH_ARM64) + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/codegen/constant-pool.h b/deps/v8/src/codegen/constant-pool.h index 4399f6fc1fe53d..d07452336b4e40 100644 --- a/deps/v8/src/codegen/constant-pool.h +++ b/deps/v8/src/codegen/constant-pool.h @@ -15,6 +15,8 @@ namespace v8 { namespace internal { +class Instruction; + // ----------------------------------------------------------------------------- // Constant pool support @@ -136,8 +138,9 @@ class ConstantPoolBuilder { inline Label* EmittedPosition() { return &emitted_label_; } private: - ConstantPoolEntry::Access AddEntry(ConstantPoolEntry& entry, - ConstantPoolEntry::Type type); + ConstantPoolEntry::Access AddEntry( + ConstantPoolEntry& entry, // NOLINT(runtime/references) + ConstantPoolEntry::Type type); void EmitSharedEntries(Assembler* assm, ConstantPoolEntry::Type type); void EmitGroup(Assembler* assm, ConstantPoolEntry::Access access, ConstantPoolEntry::Type type); @@ -161,6 +164,189 @@ class ConstantPoolBuilder { #endif // defined(V8_TARGET_ARCH_PPC) +#if defined(V8_TARGET_ARCH_ARM64) + +class ConstantPoolKey { + public: + explicit ConstantPoolKey(uint64_t value, + RelocInfo::Mode rmode = RelocInfo::NONE) + : is_value32_(false), value64_(value), rmode_(rmode) {} + + explicit ConstantPoolKey(uint32_t value, + RelocInfo::Mode rmode = RelocInfo::NONE) + : is_value32_(true), value32_(value), rmode_(rmode) {} + + uint64_t value64() const { + CHECK(!is_value32_); + return value64_; + } + uint32_t value32() const { + CHECK(is_value32_); + return value32_; + } + + bool is_value32() const { return is_value32_; } + RelocInfo::Mode rmode() const { return rmode_; } + + bool AllowsDeduplication() const { + DCHECK(rmode_ != RelocInfo::CONST_POOL && + rmode_ != RelocInfo::VENEER_POOL && + rmode_ != RelocInfo::DEOPT_SCRIPT_OFFSET && + rmode_ != RelocInfo::DEOPT_INLINING_ID && + rmode_ != RelocInfo::DEOPT_REASON && rmode_ != RelocInfo::DEOPT_ID); + // CODE_TARGETs can be shared because they aren't patched anymore, + // and we make sure we emit only one reloc info for them (thus delta + // patching) will apply the delta only once. At the moment, we do not dedup + // code targets if they are wrapped in a heap object request (value == 0). + bool is_sharable_code_target = + rmode_ == RelocInfo::CODE_TARGET && + (is_value32() ? (value32() != 0) : (value64() != 0)); + bool is_sharable_embedded_object = RelocInfo::IsEmbeddedObjectMode(rmode_); + return RelocInfo::IsShareableRelocMode(rmode_) || is_sharable_code_target || + is_sharable_embedded_object; + } + + private: + bool is_value32_; + union { + uint64_t value64_; + uint32_t value32_; + }; + RelocInfo::Mode rmode_; +}; + +// Order for pool entries. 64bit entries go first. +inline bool operator<(const ConstantPoolKey& a, const ConstantPoolKey& b) { + if (a.is_value32() < b.is_value32()) return true; + if (a.is_value32() > b.is_value32()) return false; + if (a.rmode() < b.rmode()) return true; + if (a.rmode() > b.rmode()) return false; + if (a.is_value32()) return a.value32() < b.value32(); + return a.value64() < b.value64(); +} + +inline bool operator==(const ConstantPoolKey& a, const ConstantPoolKey& b) { + if (a.rmode() != b.rmode() || a.is_value32() != b.is_value32()) { + return false; + } + if (a.is_value32()) return a.value32() == b.value32(); + return a.value64() == b.value64(); +} + +// Constant pool generation +enum class Jump { kOmitted, kRequired }; +enum class Emission { kIfNeeded, kForced }; +enum class Alignment { kOmitted, kRequired }; +enum class RelocInfoStatus { kMustRecord, kMustOmitForDuplicate }; +enum class PoolEmissionCheck { kSkip }; + +// Pools are emitted in the instruction stream, preferably after unconditional +// jumps or after returns from functions (in dead code locations). +// If a long code sequence does not contain unconditional jumps, it is +// necessary to emit the constant pool before the pool gets too far from the +// location it is accessed from. In this case, we emit a jump over the emitted +// constant pool. +// Constants in the pool may be addresses of functions that gets relocated; +// if so, a relocation info entry is associated to the constant pool entry. +class ConstantPool { + public: + explicit ConstantPool(Assembler* assm); + ~ConstantPool(); + + // Returns true when we need to write RelocInfo and false when we do not. + RelocInfoStatus RecordEntry(uint32_t data, RelocInfo::Mode rmode); + RelocInfoStatus RecordEntry(uint64_t data, RelocInfo::Mode rmode); + + size_t Entry32Count() const { return entry32_count_; } + size_t Entry64Count() const { return entry64_count_; } + bool IsEmpty() const { return entries_.empty(); } + // Check if pool will be out of range at {pc_offset}. + bool IsInImmRangeIfEmittedAt(int pc_offset); + // Size in bytes of the constant pool. Depending on parameters, the size will + // include the branch over the pool and alignment padding. + int ComputeSize(Jump require_jump, Alignment require_alignment) const; + + // Emit the pool at the current pc with a branch over the pool if requested. + void EmitAndClear(Jump require); + bool ShouldEmitNow(Jump require_jump, size_t margin = 0) const; + V8_EXPORT_PRIVATE void Check(Emission force_emission, Jump require_jump, + size_t margin = 0); + + V8_EXPORT_PRIVATE void MaybeCheck(); + void Clear(); + + // Constant pool emisssion can be blocked temporarily. + bool IsBlocked() const; + + // Repeated checking whether the constant pool should be emitted is expensive; + // only check once a number of instructions have been generated. + void SetNextCheckIn(size_t instructions); + + // Class for scoping postponing the constant pool generation. + class V8_EXPORT_PRIVATE BlockScope { + public: + // BlockScope immediatelly emits the pool if necessary to ensure that + // during the block scope at least {margin} bytes can be emitted without + // pool emission becomming necessary. + explicit BlockScope(Assembler* pool, size_t margin = 0); + BlockScope(Assembler* pool, PoolEmissionCheck); + ~BlockScope(); + + private: + ConstantPool* pool_; + DISALLOW_IMPLICIT_CONSTRUCTORS(BlockScope); + }; + + // Hard limit to the const pool which must not be exceeded. + static const size_t kMaxDistToPool32; + static const size_t kMaxDistToPool64; + // Approximate distance where the pool should be emitted. + static const size_t kApproxDistToPool32; + V8_EXPORT_PRIVATE static const size_t kApproxDistToPool64; + // Approximate distance where the pool may be emitted if + // no jump is required (due to a recent unconditional jump). + static const size_t kOpportunityDistToPool32; + static const size_t kOpportunityDistToPool64; + // PC distance between constant pool checks. + V8_EXPORT_PRIVATE static const size_t kCheckInterval; + // Number of entries in the pool which trigger a check. + static const size_t kApproxMaxEntryCount; + + private: + void StartBlock(); + void EndBlock(); + + void EmitEntries(); + void EmitPrologue(Alignment require_alignment); + int PrologueSize(Jump require_jump) const; + RelocInfoStatus RecordKey(ConstantPoolKey key, int offset); + RelocInfoStatus GetRelocInfoStatusFor(const ConstantPoolKey& key); + void Emit(const ConstantPoolKey& key); + void SetLoadOffsetToConstPoolEntry(int load_offset, Instruction* entry_offset, + const ConstantPoolKey& key); + Alignment IsAlignmentRequiredIfEmittedAt(Jump require_jump, + int pc_offset) const; + + Assembler* assm_; + // Keep track of the first instruction requiring a constant pool entry + // since the previous constant pool was emitted. + int first_use_32_ = -1; + int first_use_64_ = -1; + // We sort not according to insertion order, but since we do not insert + // addresses (for heap objects we insert an index which is created in + // increasing order), the order is deterministic. We map each entry to the + // pc offset of the load. We use a multimap because we need to record the + // pc offset of each load of the same constant so that the immediate of the + // loads can be back-patched when the pool is emitted. + std::multimap entries_; + size_t entry32_count_ = 0; + size_t entry64_count_ = 0; + int next_check_ = 0; + int blocked_nesting_ = 0; +}; + +#endif // defined(V8_TARGET_ARCH_ARM64) + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/codegen/cpu-features.h b/deps/v8/src/codegen/cpu-features.h index b2f792e339c6f6..dae9992c57f6c1 100644 --- a/deps/v8/src/codegen/cpu-features.h +++ b/deps/v8/src/codegen/cpu-features.h @@ -14,6 +14,7 @@ namespace internal { // CPU feature flags. enum CpuFeature { // x86 + SSE4_2, SSE4_1, SSSE3, SSE3, diff --git a/deps/v8/src/codegen/external-reference.cc b/deps/v8/src/codegen/external-reference.cc index 5538f361f076c1..c0774079311122 100644 --- a/deps/v8/src/codegen/external-reference.cc +++ b/deps/v8/src/codegen/external-reference.cc @@ -26,31 +26,11 @@ #include "src/logging/log.h" #include "src/numbers/math-random.h" #include "src/objects/objects-inl.h" +#include "src/regexp/regexp-macro-assembler-arch.h" #include "src/regexp/regexp-stack.h" #include "src/strings/string-search.h" #include "src/wasm/wasm-external-refs.h" -// Include native regexp-macro-assembler. -#if V8_TARGET_ARCH_IA32 -#include "src/regexp/ia32/regexp-macro-assembler-ia32.h" // NOLINT -#elif V8_TARGET_ARCH_X64 -#include "src/regexp/x64/regexp-macro-assembler-x64.h" // NOLINT -#elif V8_TARGET_ARCH_ARM64 -#include "src/regexp/arm64/regexp-macro-assembler-arm64.h" // NOLINT -#elif V8_TARGET_ARCH_ARM -#include "src/regexp/arm/regexp-macro-assembler-arm.h" // NOLINT -#elif V8_TARGET_ARCH_PPC -#include "src/regexp/ppc/regexp-macro-assembler-ppc.h" // NOLINT -#elif V8_TARGET_ARCH_MIPS -#include "src/regexp/mips/regexp-macro-assembler-mips.h" // NOLINT -#elif V8_TARGET_ARCH_MIPS64 -#include "src/regexp/mips64/regexp-macro-assembler-mips64.h" // NOLINT -#elif V8_TARGET_ARCH_S390 -#include "src/regexp/s390/regexp-macro-assembler-s390.h" // NOLINT -#else // Unknown architecture. -#error "Unknown architecture." -#endif // Target architecture. - #ifdef V8_INTL_SUPPORT #include "src/objects/intl-objects.h" #endif // V8_INTL_SUPPORT @@ -671,6 +651,15 @@ static Address LexicographicCompareWrapper(Isolate* isolate, Address smi_x, FUNCTION_REFERENCE(smi_lexicographic_compare_function, LexicographicCompareWrapper) +FUNCTION_REFERENCE(mutable_big_int_absolute_add_and_canonicalize_function, + MutableBigInt_AbsoluteAddAndCanonicalize) + +FUNCTION_REFERENCE(mutable_big_int_absolute_compare_function, + MutableBigInt_AbsoluteCompare) + +FUNCTION_REFERENCE(mutable_big_int_absolute_sub_and_canonicalize_function, + MutableBigInt_AbsoluteSubAndCanonicalize) + FUNCTION_REFERENCE(check_object_type, CheckObjectType) #ifdef V8_INTL_SUPPORT @@ -786,6 +775,12 @@ ExternalReference ExternalReference::fast_c_call_caller_pc_address( isolate->isolate_data()->fast_c_call_caller_pc_address()); } +ExternalReference ExternalReference::stack_is_iterable_address( + Isolate* isolate) { + return ExternalReference( + isolate->isolate_data()->stack_is_iterable_address()); +} + FUNCTION_REFERENCE(call_enqueue_microtask_function, MicrotaskQueue::CallEnqueueMicrotask) diff --git a/deps/v8/src/codegen/external-reference.h b/deps/v8/src/codegen/external-reference.h index 4c83a9b33af35d..b663ae1621e953 100644 --- a/deps/v8/src/codegen/external-reference.h +++ b/deps/v8/src/codegen/external-reference.h @@ -72,6 +72,7 @@ class StatsCounter; "IsolateData::fast_c_call_caller_fp_address") \ V(fast_c_call_caller_pc_address, \ "IsolateData::fast_c_call_caller_pc_address") \ + V(stack_is_iterable_address, "IsolateData::stack_is_iterable_address") \ V(address_of_regexp_stack_limit, "RegExpStack::limit_address()") \ V(address_of_regexp_stack_memory_address, "RegExpStack::memory_address()") \ V(address_of_regexp_stack_memory_size, "RegExpStack::memory_size()") \ @@ -149,6 +150,12 @@ class StatsCounter; V(libc_memmove_function, "libc_memmove") \ V(libc_memset_function, "libc_memset") \ V(mod_two_doubles_operation, "mod_two_doubles") \ + V(mutable_big_int_absolute_add_and_canonicalize_function, \ + "MutableBigInt_AbsoluteAddAndCanonicalize") \ + V(mutable_big_int_absolute_compare_function, \ + "MutableBigInt_AbsoluteCompare") \ + V(mutable_big_int_absolute_sub_and_canonicalize_function, \ + "MutableBigInt_AbsoluteSubAndCanonicalize") \ V(new_deoptimizer_function, "Deoptimizer::New()") \ V(orderedhashmap_gethash_raw, "orderedhashmap_gethash_raw") \ V(printf_function, "printf") \ diff --git a/deps/v8/src/codegen/handler-table.cc b/deps/v8/src/codegen/handler-table.cc index 12a05e1fbacd8e..4f94746ea58f45 100644 --- a/deps/v8/src/codegen/handler-table.cc +++ b/deps/v8/src/codegen/handler-table.cc @@ -15,31 +15,41 @@ namespace internal { HandlerTable::HandlerTable(Code code) : HandlerTable(code.InstructionStart() + code.handler_table_offset(), - code.handler_table_size()) {} + code.handler_table_size(), kReturnAddressBasedEncoding) {} HandlerTable::HandlerTable(BytecodeArray bytecode_array) : HandlerTable(bytecode_array.handler_table()) {} HandlerTable::HandlerTable(ByteArray byte_array) - : number_of_entries_(byte_array.length() / kRangeEntrySize / - sizeof(int32_t)), -#ifdef DEBUG - mode_(kRangeBasedEncoding), -#endif - raw_encoded_data_( - reinterpret_cast
(byte_array.GetDataStartAddress())) { - DCHECK_EQ(0, byte_array.length() % (kRangeEntrySize * sizeof(int32_t))); -} + : HandlerTable(reinterpret_cast
(byte_array.GetDataStartAddress()), + byte_array.length(), kRangeBasedEncoding) {} -HandlerTable::HandlerTable(Address handler_table, int handler_table_size) - : number_of_entries_(handler_table_size / kReturnEntrySize / +HandlerTable::HandlerTable(Address handler_table, int handler_table_size, + EncodingMode encoding_mode) + : number_of_entries_(handler_table_size / EntrySizeFromMode(encoding_mode) / sizeof(int32_t)), #ifdef DEBUG - mode_(kReturnAddressBasedEncoding), + mode_(encoding_mode), #endif raw_encoded_data_(handler_table) { + // Check padding. static_assert(4 < kReturnEntrySize * sizeof(int32_t), "allowed padding"); - DCHECK_GE(4, handler_table_size % (kReturnEntrySize * sizeof(int32_t))); + // For return address encoding, maximum padding is 4; otherwise, there should + // be no padding. + DCHECK_GE(kReturnAddressBasedEncoding == encoding_mode ? 4 : 0, + handler_table_size % + (EntrySizeFromMode(encoding_mode) * sizeof(int32_t))); +} + +// static +int HandlerTable::EntrySizeFromMode(EncodingMode mode) { + switch (mode) { + case kReturnAddressBasedEncoding: + return kReturnEntrySize; + case kRangeBasedEncoding: + return kRangeEntrySize; + } + UNREACHABLE(); } int HandlerTable::GetRangeStart(int index) const { diff --git a/deps/v8/src/codegen/handler-table.h b/deps/v8/src/codegen/handler-table.h index eaa062873b40b0..362412525d8a24 100644 --- a/deps/v8/src/codegen/handler-table.h +++ b/deps/v8/src/codegen/handler-table.h @@ -45,11 +45,14 @@ class V8_EXPORT_PRIVATE HandlerTable { // async/await handling in the debugger can take place. }; + enum EncodingMode { kRangeBasedEncoding, kReturnAddressBasedEncoding }; + // Constructors for the various encodings. explicit HandlerTable(Code code); explicit HandlerTable(ByteArray byte_array); explicit HandlerTable(BytecodeArray bytecode_array); - explicit HandlerTable(Address handler_table, int handler_table_size); + HandlerTable(Address handler_table, int handler_table_size, + EncodingMode encoding_mode); // Getters for handler table based on ranges. int GetRangeStart(int index) const; @@ -88,11 +91,12 @@ class V8_EXPORT_PRIVATE HandlerTable { #endif private: - enum EncodingMode { kRangeBasedEncoding, kReturnAddressBasedEncoding }; - // Getters for handler table based on ranges. CatchPrediction GetRangePrediction(int index) const; + // Gets entry size based on mode. + static int EntrySizeFromMode(EncodingMode mode); + // Getters for handler table based on return addresses. int GetReturnOffset(int index) const; int GetReturnHandler(int index) const; diff --git a/deps/v8/src/codegen/ia32/assembler-ia32.cc b/deps/v8/src/codegen/ia32/assembler-ia32.cc index 99d38890e351f0..aefcab7299c7c8 100644 --- a/deps/v8/src/codegen/ia32/assembler-ia32.cc +++ b/deps/v8/src/codegen/ia32/assembler-ia32.cc @@ -756,6 +756,13 @@ void Assembler::cmpxchg8b(Operand dst) { emit_operand(ecx, dst); } +void Assembler::mfence() { + EnsureSpace ensure_space(this); + EMIT(0x0F); + EMIT(0xAE); + EMIT(0xF0); +} + void Assembler::lfence() { EnsureSpace ensure_space(this); EMIT(0x0F); diff --git a/deps/v8/src/codegen/ia32/assembler-ia32.h b/deps/v8/src/codegen/ia32/assembler-ia32.h index d2dcb0f34848b0..2423f73bdbe9b8 100644 --- a/deps/v8/src/codegen/ia32/assembler-ia32.h +++ b/deps/v8/src/codegen/ia32/assembler-ia32.h @@ -542,6 +542,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { void cmpxchg8b(Operand dst); // Memory Fence + void mfence(); void lfence(); void pause(); diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc index 6a0be9386e1702..f6f0153e54c02c 100644 --- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc +++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc @@ -1887,20 +1887,24 @@ void TurboAssembler::Call(Handle code_object, RelocInfo::Mode rmode) { call(code_object, rmode); } -void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) { +void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { STATIC_ASSERT(kSystemPointerSize == 4); STATIC_ASSERT(kSmiShiftSize == 0); STATIC_ASSERT(kSmiTagSize == 1); STATIC_ASSERT(kSmiTag == 0); - // The builtin_pointer register contains the builtin index as a Smi. + // The builtin_index register contains the builtin index as a Smi. // Untagging is folded into the indexing operand below (we use // times_half_system_pointer_size instead of times_system_pointer_size since // smis are already shifted by one). - mov(builtin_pointer, - Operand(kRootRegister, builtin_pointer, times_half_system_pointer_size, + mov(builtin_index, + Operand(kRootRegister, builtin_index, times_half_system_pointer_size, IsolateData::builtin_entry_table_offset())); - call(builtin_pointer); +} + +void TurboAssembler::CallBuiltinByIndex(Register builtin_index) { + LoadEntryFromBuiltinIndex(builtin_index); + call(builtin_index); } void TurboAssembler::LoadCodeObjectEntry(Register destination, diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h index 345ae815af66ff..9b13e87447920f 100644 --- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h +++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h @@ -87,7 +87,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { void Call(Label* target) { call(target); } void Call(Handle code_object, RelocInfo::Mode rmode); - void CallBuiltinPointer(Register builtin_pointer) override; + // Load the builtin given by the Smi in |builtin_index| into the same + // register. + void LoadEntryFromBuiltinIndex(Register builtin_index); + void CallBuiltinByIndex(Register builtin_index) override; void LoadCodeObjectEntry(Register destination, Register code_object) override; void CallCodeObject(Register code_object) override; diff --git a/deps/v8/src/codegen/interface-descriptors.cc b/deps/v8/src/codegen/interface-descriptors.cc index f8f874359b6d12..5934c80a7d218b 100644 --- a/deps/v8/src/codegen/interface-descriptors.cc +++ b/deps/v8/src/codegen/interface-descriptors.cc @@ -252,6 +252,11 @@ void StringAtDescriptor::InitializePlatformSpecific( DefaultInitializePlatformSpecific(data, kParameterCount); } +void StringAtAsStringDescriptor::InitializePlatformSpecific( + CallInterfaceDescriptorData* data) { + DefaultInitializePlatformSpecific(data, kParameterCount); +} + void StringSubstringDescriptor::InitializePlatformSpecific( CallInterfaceDescriptorData* data) { DefaultInitializePlatformSpecific(data, kParameterCount); diff --git a/deps/v8/src/codegen/interface-descriptors.h b/deps/v8/src/codegen/interface-descriptors.h index d166b477d80868..f6c1adfe47fe5e 100644 --- a/deps/v8/src/codegen/interface-descriptors.h +++ b/deps/v8/src/codegen/interface-descriptors.h @@ -74,6 +74,7 @@ namespace internal { V(StoreTransition) \ V(StoreWithVector) \ V(StringAt) \ + V(StringAtAsString) \ V(StringSubstring) \ V(TypeConversion) \ V(TypeConversionStackParameter) \ @@ -969,6 +970,17 @@ class StringAtDescriptor final : public CallInterfaceDescriptor { DECLARE_DESCRIPTOR(StringAtDescriptor, CallInterfaceDescriptor) }; +class StringAtAsStringDescriptor final : public CallInterfaceDescriptor { + public: + DEFINE_PARAMETERS(kReceiver, kPosition) + // TODO(turbofan): Return untagged value here. + DEFINE_RESULT_AND_PARAMETER_TYPES( + MachineType::TaggedPointer(), // result string + MachineType::AnyTagged(), // kReceiver + MachineType::IntPtr()) // kPosition + DECLARE_DESCRIPTOR(StringAtAsStringDescriptor, CallInterfaceDescriptor) +}; + class StringSubstringDescriptor final : public CallInterfaceDescriptor { public: DEFINE_PARAMETERS(kString, kFrom, kTo) diff --git a/deps/v8/src/codegen/label.h b/deps/v8/src/codegen/label.h index 430958d1906495..f45f1e62d74a2d 100644 --- a/deps/v8/src/codegen/label.h +++ b/deps/v8/src/codegen/label.h @@ -99,7 +99,7 @@ class Label { friend class Assembler; friend class Displacement; - friend class RegExpMacroAssemblerIrregexp; + friend class RegExpBytecodeGenerator; // Disallow copy construction and assignment, but allow move construction and // move assignment on selected platforms (see above). diff --git a/deps/v8/src/codegen/mips/assembler-mips.cc b/deps/v8/src/codegen/mips/assembler-mips.cc index d6337aefb61385..423da2fb65f778 100644 --- a/deps/v8/src/codegen/mips/assembler-mips.cc +++ b/deps/v8/src/codegen/mips/assembler-mips.cc @@ -39,6 +39,7 @@ #include "src/base/bits.h" #include "src/base/cpu.h" #include "src/codegen/mips/assembler-mips-inl.h" +#include "src/codegen/safepoint-table.h" #include "src/codegen/string-constants.h" #include "src/deoptimizer/deoptimizer.h" #include "src/objects/heap-number-inl.h" @@ -2211,7 +2212,7 @@ void Assembler::break_(uint32_t code, bool break_as_stop) { emit(break_instr); } -void Assembler::stop(const char* msg, uint32_t code) { +void Assembler::stop(uint32_t code) { DCHECK_GT(code, kMaxWatchpointCode); DCHECK_LE(code, kMaxStopCode); #if V8_HOST_ARCH_MIPS diff --git a/deps/v8/src/codegen/mips/assembler-mips.h b/deps/v8/src/codegen/mips/assembler-mips.h index 640e11cf1aee86..86a07ab06e9ac2 100644 --- a/deps/v8/src/codegen/mips/assembler-mips.h +++ b/deps/v8/src/codegen/mips/assembler-mips.h @@ -558,7 +558,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // Break / Trap instructions. void break_(uint32_t code, bool break_as_stop = false); - void stop(const char* msg, uint32_t code = kMaxStopCode); + void stop(uint32_t code = kMaxStopCode); void tge(Register rs, Register rt, uint16_t code); void tgeu(Register rs, Register rt, uint16_t code); void tlt(Register rs, Register rt, uint16_t code); @@ -1478,11 +1478,13 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { static bool IsAddImmediate(Instr instr); static Instr SetAddImmediateOffset(Instr instr, int16_t offset); static uint32_t CreateTargetAddress(Instr instr_lui, Instr instr_jic); - static void UnpackTargetAddress(uint32_t address, int16_t& lui_offset, - int16_t& jic_offset); - static void UnpackTargetAddressUnsigned(uint32_t address, - uint32_t& lui_offset, - uint32_t& jic_offset); + static void UnpackTargetAddress( + uint32_t address, int16_t& lui_offset, // NOLINT(runtime/references) + int16_t& jic_offset); // NOLINT(runtime/references) + static void UnpackTargetAddressUnsigned( + uint32_t address, + uint32_t& lui_offset, // NOLINT(runtime/references) + uint32_t& jic_offset); // NOLINT(runtime/references) static bool IsAndImmediate(Instr instr); static bool IsEmittedConstant(Instr instr); @@ -1513,7 +1515,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // Helper function for memory load/store using base register and offset. void AdjustBaseAndOffset( - MemOperand& src, + MemOperand& src, // NOLINT(runtime/references) OffsetAccessType access_type = OffsetAccessType::SINGLE_ACCESS, int second_access_add_to_offset = 4); diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.cc b/deps/v8/src/codegen/mips/macro-assembler-mips.cc index 483b7e895bb247..79373c1b5be197 100644 --- a/deps/v8/src/codegen/mips/macro-assembler-mips.cc +++ b/deps/v8/src/codegen/mips/macro-assembler-mips.cc @@ -189,7 +189,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset, Label ok; And(t8, dst, Operand(kPointerSize - 1)); Branch(&ok, eq, t8, Operand(zero_reg)); - stop("Unaligned cell in write barrier"); + stop(); bind(&ok); } @@ -3974,18 +3974,22 @@ void TurboAssembler::Call(Handle code, RelocInfo::Mode rmode, Call(code.address(), rmode, cond, rs, rt, bd); } -void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) { +void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { STATIC_ASSERT(kSystemPointerSize == 4); STATIC_ASSERT(kSmiShiftSize == 0); STATIC_ASSERT(kSmiTagSize == 1); STATIC_ASSERT(kSmiTag == 0); - // The builtin_pointer register contains the builtin index as a Smi. - SmiUntag(builtin_pointer, builtin_pointer); - Lsa(builtin_pointer, kRootRegister, builtin_pointer, kSystemPointerSizeLog2); - lw(builtin_pointer, - MemOperand(builtin_pointer, IsolateData::builtin_entry_table_offset())); - Call(builtin_pointer); + // The builtin_index register contains the builtin index as a Smi. + SmiUntag(builtin_index, builtin_index); + Lsa(builtin_index, kRootRegister, builtin_index, kSystemPointerSizeLog2); + lw(builtin_index, + MemOperand(builtin_index, IsolateData::builtin_entry_table_offset())); +} + +void TurboAssembler::CallBuiltinByIndex(Register builtin_index) { + LoadEntryFromBuiltinIndex(builtin_index); + Call(builtin_index); } void TurboAssembler::StoreReturnAddressAndCall(Register target) { @@ -4111,6 +4115,11 @@ void MacroAssembler::Swap(Register reg1, Register reg2, Register scratch) { void TurboAssembler::Call(Label* target) { BranchAndLink(target); } +void TurboAssembler::LoadAddress(Register dst, Label* target) { + uint32_t address = jump_address(target); + li(dst, address); +} + void TurboAssembler::Push(Handle handle) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); @@ -4694,15 +4703,15 @@ void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs, void TurboAssembler::Abort(AbortReason reason) { Label abort_start; bind(&abort_start); - const char* msg = GetAbortReason(reason); #ifdef DEBUG + const char* msg = GetAbortReason(reason); RecordComment("Abort message: "); RecordComment(msg); #endif // Avoid emitting call to builtin if requested. if (trap_on_abort()) { - stop(msg); + stop(); return; } @@ -4938,7 +4947,7 @@ void MacroAssembler::AssertStackIsAligned() { andi(scratch, sp, frame_alignment_mask); Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg)); // Don't use Check here, as it will call Runtime_Abort re-entering here. - stop("Unexpected stack alignment"); + stop(); bind(&alignment_as_expected); } } @@ -5352,7 +5361,7 @@ void TurboAssembler::CallCFunctionHelper(Register function_base, Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg)); // Don't use Check here, as it will call Runtime_Abort possibly // re-entering here. - stop("Unexpected alignment in CallCFunction"); + stop(); bind(&alignment_as_expected); } } diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.h b/deps/v8/src/codegen/mips/macro-assembler-mips.h index f394e01769e7f8..3dfc7bfbad1987 100644 --- a/deps/v8/src/codegen/mips/macro-assembler-mips.h +++ b/deps/v8/src/codegen/mips/macro-assembler-mips.h @@ -212,8 +212,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { void Call(Handle code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, COND_ARGS); void Call(Label* target); + void LoadAddress(Register dst, Label* target); - void CallBuiltinPointer(Register builtin_pointer) override; + // Load the builtin given by the Smi in |builtin_index| into the same + // register. + void LoadEntryFromBuiltinIndex(Register builtin_index); + void CallBuiltinByIndex(Register builtin_index) override; void LoadCodeObjectEntry(Register destination, Register code_object) override { @@ -841,9 +845,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { void BranchShortMSA(MSABranchDF df, Label* target, MSABranchCondition cond, MSARegister wt, BranchDelaySlot bd = PROTECT); - bool CalculateOffset(Label* L, int32_t& offset, OffsetSize bits); - bool CalculateOffset(Label* L, int32_t& offset, OffsetSize bits, - Register& scratch, const Operand& rt); + bool CalculateOffset(Label* L, int32_t& offset, // NOLINT(runtime/references) + OffsetSize bits); + bool CalculateOffset(Label* L, int32_t& offset, // NOLINT(runtime/references) + OffsetSize bits, + Register& scratch, // NOLINT(runtime/references) + const Operand& rt); void BranchShortHelperR6(int32_t offset, Label* L); void BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot); diff --git a/deps/v8/src/codegen/mips64/assembler-mips64.cc b/deps/v8/src/codegen/mips64/assembler-mips64.cc index cb8e3dd7d1ef14..801faf6306d861 100644 --- a/deps/v8/src/codegen/mips64/assembler-mips64.cc +++ b/deps/v8/src/codegen/mips64/assembler-mips64.cc @@ -38,6 +38,7 @@ #include "src/base/cpu.h" #include "src/codegen/mips64/assembler-mips64-inl.h" +#include "src/codegen/safepoint-table.h" #include "src/codegen/string-constants.h" #include "src/deoptimizer/deoptimizer.h" #include "src/objects/heap-number-inl.h" @@ -2344,7 +2345,7 @@ void Assembler::break_(uint32_t code, bool break_as_stop) { emit(break_instr); } -void Assembler::stop(const char* msg, uint32_t code) { +void Assembler::stop(uint32_t code) { DCHECK_GT(code, kMaxWatchpointCode); DCHECK_LE(code, kMaxStopCode); #if defined(V8_HOST_ARCH_MIPS) || defined(V8_HOST_ARCH_MIPS64) diff --git a/deps/v8/src/codegen/mips64/assembler-mips64.h b/deps/v8/src/codegen/mips64/assembler-mips64.h index c7c027eef713e9..a22ddf0e7d2cc1 100644 --- a/deps/v8/src/codegen/mips64/assembler-mips64.h +++ b/deps/v8/src/codegen/mips64/assembler-mips64.h @@ -601,7 +601,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // Break / Trap instructions. void break_(uint32_t code, bool break_as_stop = false); - void stop(const char* msg, uint32_t code = kMaxStopCode); + void stop(uint32_t code = kMaxStopCode); void tge(Register rs, Register rt, uint16_t code); void tgeu(Register rs, Register rt, uint16_t code); void tlt(Register rs, Register rt, uint16_t code); @@ -1560,7 +1560,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // Helper function for memory load/store using base register and offset. void AdjustBaseAndOffset( - MemOperand& src, + MemOperand& src, // NOLINT(runtime/references) OffsetAccessType access_type = OffsetAccessType::SINGLE_ACCESS, int second_access_add_to_offset = 4); diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc index 65c0b592ebad85..97e5af1fa8e5bf 100644 --- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc +++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc @@ -187,7 +187,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset, Label ok; And(t8, dst, Operand(kPointerSize - 1)); Branch(&ok, eq, t8, Operand(zero_reg)); - stop("Unaligned cell in write barrier"); + stop(); bind(&ok); } @@ -4274,18 +4274,22 @@ void TurboAssembler::Call(Handle code, RelocInfo::Mode rmode, Call(code.address(), rmode, cond, rs, rt, bd); } -void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) { +void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { STATIC_ASSERT(kSystemPointerSize == 8); STATIC_ASSERT(kSmiShiftSize == 31); STATIC_ASSERT(kSmiTagSize == 1); STATIC_ASSERT(kSmiTag == 0); - // The builtin_pointer register contains the builtin index as a Smi. - SmiUntag(builtin_pointer, builtin_pointer); - Dlsa(builtin_pointer, kRootRegister, builtin_pointer, kSystemPointerSizeLog2); - Ld(builtin_pointer, - MemOperand(builtin_pointer, IsolateData::builtin_entry_table_offset())); - Call(builtin_pointer); + // The builtin_index register contains the builtin index as a Smi. + SmiUntag(builtin_index, builtin_index); + Dlsa(builtin_index, kRootRegister, builtin_index, kSystemPointerSizeLog2); + Ld(builtin_index, + MemOperand(builtin_index, IsolateData::builtin_entry_table_offset())); +} + +void TurboAssembler::CallBuiltinByIndex(Register builtin_index) { + LoadEntryFromBuiltinIndex(builtin_index); + Call(builtin_index); } void TurboAssembler::StoreReturnAddressAndCall(Register target) { @@ -4433,6 +4437,11 @@ void MacroAssembler::Swap(Register reg1, Register reg2, Register scratch) { void TurboAssembler::Call(Label* target) { BranchAndLink(target); } +void TurboAssembler::LoadAddress(Register dst, Label* target) { + uint64_t address = jump_address(target); + li(dst, address); +} + void TurboAssembler::Push(Smi smi) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); @@ -5026,15 +5035,15 @@ void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs, void TurboAssembler::Abort(AbortReason reason) { Label abort_start; bind(&abort_start); - const char* msg = GetAbortReason(reason); #ifdef DEBUG + const char* msg = GetAbortReason(reason); RecordComment("Abort message: "); RecordComment(msg); #endif // Avoid emitting call to builtin if requested. if (trap_on_abort()) { - stop(msg); + stop(); return; } @@ -5273,7 +5282,7 @@ void MacroAssembler::AssertStackIsAligned() { Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg)); } // Don't use Check here, as it will call Runtime_Abort re-entering here. - stop("Unexpected stack alignment"); + stop(); bind(&alignment_as_expected); } } @@ -5698,7 +5707,7 @@ void TurboAssembler::CallCFunctionHelper(Register function, } // Don't use Check here, as it will call Runtime_Abort possibly // re-entering here. - stop("Unexpected alignment in CallCFunction"); + stop(); bind(&alignment_as_expected); } } diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h index d0f9b7f5bc5007..eb62bec0e82395 100644 --- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h +++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h @@ -234,8 +234,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { void Call(Handle code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, COND_ARGS); void Call(Label* target); + void LoadAddress(Register dst, Label* target); - void CallBuiltinPointer(Register builtin_pointer) override; + // Load the builtin given by the Smi in |builtin_index| into the same + // register. + void LoadEntryFromBuiltinIndex(Register builtin_index); + void CallBuiltinByIndex(Register builtin_index) override; void LoadCodeObjectEntry(Register destination, Register code_object) override { @@ -845,9 +849,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { void CallCFunctionHelper(Register function, int num_reg_arguments, int num_double_arguments); - bool CalculateOffset(Label* L, int32_t& offset, OffsetSize bits); - bool CalculateOffset(Label* L, int32_t& offset, OffsetSize bits, - Register& scratch, const Operand& rt); + bool CalculateOffset(Label* L, int32_t& offset, // NOLINT(runtime/references) + OffsetSize bits); + bool CalculateOffset(Label* L, int32_t& offset, // NOLINT(runtime/references) + OffsetSize bits, + Register& scratch, // NOLINT(runtime/references) + const Operand& rt); void BranchShortHelperR6(int32_t offset, Label* L); void BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot); diff --git a/deps/v8/src/codegen/optimized-compilation-info.cc b/deps/v8/src/codegen/optimized-compilation-info.cc index 596d5c261ee197..f3582d868af0e1 100644 --- a/deps/v8/src/codegen/optimized-compilation-info.cc +++ b/deps/v8/src/codegen/optimized-compilation-info.cc @@ -75,9 +75,15 @@ void OptimizedCompilationInfo::ConfigureFlags() { break; case Code::BYTECODE_HANDLER: SetFlag(kCalledWithCodeStartRegister); + if (FLAG_turbo_splitting) { + MarkAsSplittingEnabled(); + } break; case Code::BUILTIN: case Code::STUB: + if (FLAG_turbo_splitting) { + MarkAsSplittingEnabled(); + } #if ENABLE_GDB_JIT_INTERFACE && DEBUG MarkAsSourcePositionsEnabled(); #endif // ENABLE_GDB_JIT_INTERFACE && DEBUG @@ -177,6 +183,8 @@ StackFrame::Type OptimizedCompilationInfo::GetOutputStackFrameType() const { return StackFrame::WASM_TO_JS; case Code::WASM_INTERPRETER_ENTRY: return StackFrame::WASM_INTERPRETER_ENTRY; + case Code::C_WASM_ENTRY: + return StackFrame::C_WASM_ENTRY; default: UNIMPLEMENTED(); return StackFrame::NONE; @@ -206,7 +214,7 @@ bool OptimizedCompilationInfo::has_native_context() const { return !closure().is_null() && !closure()->native_context().is_null(); } -Context OptimizedCompilationInfo::native_context() const { +NativeContext OptimizedCompilationInfo::native_context() const { DCHECK(has_native_context()); return closure()->native_context(); } @@ -234,6 +242,8 @@ void OptimizedCompilationInfo::SetTracingFlags(bool passes_filter) { if (FLAG_trace_turbo) SetFlag(kTraceTurboJson); if (FLAG_trace_turbo_graph) SetFlag(kTraceTurboGraph); if (FLAG_trace_turbo_scheduled) SetFlag(kTraceTurboScheduled); + if (FLAG_trace_turbo_alloc) SetFlag(kTraceTurboAllocation); + if (FLAG_trace_heap_broker) SetFlag(kTraceHeapBroker); } OptimizedCompilationInfo::InlinedFunctionHolder::InlinedFunctionHolder( diff --git a/deps/v8/src/codegen/optimized-compilation-info.h b/deps/v8/src/codegen/optimized-compilation-info.h index eca3a8fa3236d5..624517283e3e2c 100644 --- a/deps/v8/src/codegen/optimized-compilation-info.h +++ b/deps/v8/src/codegen/optimized-compilation-info.h @@ -9,6 +9,7 @@ #include "src/codegen/bailout-reason.h" #include "src/codegen/source-position-table.h" +#include "src/codegen/tick-counter.h" #include "src/common/globals.h" #include "src/execution/frames.h" #include "src/handles/handles.h" @@ -60,9 +61,11 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final { kTraceTurboJson = 1 << 14, kTraceTurboGraph = 1 << 15, kTraceTurboScheduled = 1 << 16, - kWasmRuntimeExceptionSupport = 1 << 17, - kTurboControlFlowAwareAllocation = 1 << 18, - kTurboPreprocessRanges = 1 << 19 + kTraceTurboAllocation = 1 << 17, + kTraceHeapBroker = 1 << 18, + kWasmRuntimeExceptionSupport = 1 << 19, + kTurboControlFlowAwareAllocation = 1 << 20, + kTurboPreprocessRanges = 1 << 21 }; // Construct a compilation info for optimized compilation. @@ -189,10 +192,16 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final { bool trace_turbo_graph_enabled() const { return GetFlag(kTraceTurboGraph); } + bool trace_turbo_allocation_enabled() const { + return GetFlag(kTraceTurboAllocation); + } + bool trace_turbo_scheduled_enabled() const { return GetFlag(kTraceTurboScheduled); } + bool trace_heap_broker_enabled() const { return GetFlag(kTraceHeapBroker); } + // Code getters and setters. void SetCode(Handle code) { code_ = code; } @@ -204,7 +213,7 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final { Context context() const; bool has_native_context() const; - Context native_context() const; + NativeContext native_context() const; bool has_global_object() const; JSGlobalObject global_object() const; @@ -281,6 +290,8 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final { std::unique_ptr ToTracedValue(); + TickCounter& tick_counter() { return tick_counter_; } + private: OptimizedCompilationInfo(Code::Kind code_kind, Zone* zone); void ConfigureFlags(); @@ -333,6 +344,8 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final { Vector debug_name_; std::unique_ptr trace_turbo_filename_; + TickCounter tick_counter_; + DISALLOW_COPY_AND_ASSIGN(OptimizedCompilationInfo); }; diff --git a/deps/v8/src/codegen/pending-optimization-table.cc b/deps/v8/src/codegen/pending-optimization-table.cc new file mode 100644 index 00000000000000..9e33de7918cda2 --- /dev/null +++ b/deps/v8/src/codegen/pending-optimization-table.cc @@ -0,0 +1,97 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/codegen/pending-optimization-table.h" + +#include "src/execution/isolate-inl.h" +#include "src/heap/heap-inl.h" +#include "src/objects/hash-table.h" +#include "src/objects/js-objects.h" + +namespace v8 { +namespace internal { + +enum class FunctionStatus { kPrepareForOptimize, kMarkForOptimize }; + +void PendingOptimizationTable::PreparedForOptimization( + Isolate* isolate, Handle function) { + DCHECK(FLAG_testing_d8_test_runner); + + Handle table = + isolate->heap()->pending_optimize_for_test_bytecode().IsUndefined() + ? ObjectHashTable::New(isolate, 1) + : handle(ObjectHashTable::cast( + isolate->heap()->pending_optimize_for_test_bytecode()), + isolate); + Handle tuple = isolate->factory()->NewTuple2( + handle(function->shared().GetBytecodeArray(), isolate), + handle( + Smi::FromInt(static_cast(FunctionStatus::kPrepareForOptimize)), + isolate), + AllocationType::kYoung); + table = + ObjectHashTable::Put(table, handle(function->shared(), isolate), tuple); + isolate->heap()->SetPendingOptimizeForTestBytecode(*table); +} + +void PendingOptimizationTable::MarkedForOptimization( + Isolate* isolate, Handle function) { + DCHECK(FLAG_testing_d8_test_runner); + + Handle table = + handle(isolate->heap()->pending_optimize_for_test_bytecode(), isolate); + Handle entry = + table->IsUndefined() + ? handle(ReadOnlyRoots(isolate).the_hole_value(), isolate) + : handle(Handle::cast(table)->Lookup( + handle(function->shared(), isolate)), + isolate); + if (entry->IsTheHole()) { + PrintF("Error: Function "); + function->ShortPrint(); + PrintF( + " should be prepared for optimization with " + "%%PrepareFunctionForOptimize before " + "%%OptimizeFunctionOnNextCall / %%OptimizeOSR "); + UNREACHABLE(); + } + + DCHECK(entry->IsTuple2()); + Handle::cast(entry)->set_value2( + Smi::FromInt(static_cast(FunctionStatus::kMarkForOptimize))); + table = ObjectHashTable::Put(Handle::cast(table), + handle(function->shared(), isolate), entry); + isolate->heap()->SetPendingOptimizeForTestBytecode(*table); +} + +void PendingOptimizationTable::FunctionWasOptimized( + Isolate* isolate, Handle function) { + DCHECK(FLAG_testing_d8_test_runner); + + if (isolate->heap()->pending_optimize_for_test_bytecode().IsUndefined()) { + return; + } + + Handle table = + handle(ObjectHashTable::cast( + isolate->heap()->pending_optimize_for_test_bytecode()), + isolate); + Handle value(table->Lookup(handle(function->shared(), isolate)), + isolate); + // Remove only if we have already seen %OptimizeFunctionOnNextCall. If it is + // optimized for other reasons, still keep holding the bytecode since we may + // optimize it later. + if (!value->IsTheHole() && + Smi::cast(Handle::cast(value)->value2()).value() == + static_cast(FunctionStatus::kMarkForOptimize)) { + bool was_present; + table = table->Remove(isolate, table, handle(function->shared(), isolate), + &was_present); + DCHECK(was_present); + isolate->heap()->SetPendingOptimizeForTestBytecode(*table); + } +} + +} // namespace internal +} // namespace v8 diff --git a/deps/v8/src/codegen/pending-optimization-table.h b/deps/v8/src/codegen/pending-optimization-table.h new file mode 100644 index 00000000000000..2a2782d17a67cd --- /dev/null +++ b/deps/v8/src/codegen/pending-optimization-table.h @@ -0,0 +1,44 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_CODEGEN_PENDING_OPTIMIZATION_TABLE_H_ +#define V8_CODEGEN_PENDING_OPTIMIZATION_TABLE_H_ + +#include "src/common/globals.h" + +namespace v8 { +namespace internal { + +// This class adds the functionality to properly test the optimized code. This +// is only for use in tests. All these functions should only be called when +// testing_d8_flag_for_tests is set. +class PendingOptimizationTable { + public: + // This function should be called before we mark the function for + // optimization. Calling this function ensures that |function| is compiled and + // has a feedback vector allocated. This also holds on to the bytecode + // strongly in pending optimization table preventing the bytecode to be + // flushed. + static void PreparedForOptimization(Isolate* isolate, + Handle function); + + // This function should be called when the function is marked for optimization + // via the intrinsics. This will update the state of the bytecode array in the + // pending optimization table, so that the entry can be removed once the + // function is optimized. If the function is already optimized it removes the + // entry from the table. + static void MarkedForOptimization(Isolate* isolate, + Handle function); + + // This function should be called once the function is optimized. If there is + // an entry in the pending optimization table and it is marked for removal + // then this function removes the entry from pending optimization table. + static void FunctionWasOptimized(Isolate* isolate, + Handle function); +}; + +} // namespace internal +} // namespace v8 + +#endif // V8_CODEGEN_PENDING_OPTIMIZATION_TABLE_H_ diff --git a/deps/v8/src/codegen/ppc/assembler-ppc.cc b/deps/v8/src/codegen/ppc/assembler-ppc.cc index 3241f821f9e4b2..2a638af0705055 100644 --- a/deps/v8/src/codegen/ppc/assembler-ppc.cc +++ b/deps/v8/src/codegen/ppc/assembler-ppc.cc @@ -224,6 +224,7 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) { Assembler::Assembler(const AssemblerOptions& options, std::unique_ptr buffer) : AssemblerBase(options, std::move(buffer)), + scratch_register_list_(ip.bit()), constant_pool_builder_(kLoadPtrMaxReachBits, kLoadDoubleMaxReachBits) { reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_); @@ -1490,8 +1491,7 @@ void Assembler::mtfprwa(DoubleRegister dst, Register src) { // Exception-generating instructions and debugging support. // Stops with a non-negative code less than kNumOfWatchedStops support // enabling/disabling and a counter feature. See simulator-ppc.h . -void Assembler::stop(const char* msg, Condition cond, int32_t code, - CRegister cr) { +void Assembler::stop(Condition cond, int32_t code, CRegister cr) { if (cond != al) { Label skip; b(NegateCondition(cond), &skip, cr); @@ -1948,6 +1948,24 @@ PatchingAssembler::~PatchingAssembler() { DCHECK_EQ(reloc_info_writer.pos(), buffer_start_ + buffer_->size()); } +UseScratchRegisterScope::UseScratchRegisterScope(Assembler* assembler) + : assembler_(assembler), + old_available_(*assembler->GetScratchRegisterList()) {} + +UseScratchRegisterScope::~UseScratchRegisterScope() { + *assembler_->GetScratchRegisterList() = old_available_; +} + +Register UseScratchRegisterScope::Acquire() { + RegList* available = assembler_->GetScratchRegisterList(); + DCHECK_NOT_NULL(available); + DCHECK_NE(*available, 0); + int index = static_cast(base::bits::CountTrailingZeros32(*available)); + Register reg = Register::from_code(index); + *available &= ~reg.bit(); + return reg; +} + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/codegen/ppc/assembler-ppc.h b/deps/v8/src/codegen/ppc/assembler-ppc.h index 2c4225849f5712..dee264a75c06bb 100644 --- a/deps/v8/src/codegen/ppc/assembler-ppc.h +++ b/deps/v8/src/codegen/ppc/assembler-ppc.h @@ -437,6 +437,7 @@ class Assembler : public AssemblerBase { PPC_XX3_OPCODE_LIST(DECLARE_PPC_XX3_INSTRUCTIONS) #undef DECLARE_PPC_XX3_INSTRUCTIONS + RegList* GetScratchRegisterList() { return &scratch_register_list_; } // --------------------------------------------------------------------------- // Code generation @@ -841,8 +842,8 @@ class Assembler : public AssemblerBase { void function_descriptor(); // Exception-generating instructions and debugging support - void stop(const char* msg, Condition cond = al, - int32_t code = kDefaultStopCode, CRegister cr = cr7); + void stop(Condition cond = al, int32_t code = kDefaultStopCode, + CRegister cr = cr7); void bkpt(uint32_t imm16); // v5 and above @@ -1182,6 +1183,9 @@ class Assembler : public AssemblerBase { static constexpr int kMaxRelocSize = RelocInfoWriter::kMaxSize; std::vector relocations_; + // Scratch registers available for use by the Assembler. + RegList scratch_register_list_; + // The bound position, before this we cannot do instruction elimination. int last_bound_pos_; // Optimizable cmpi information. @@ -1297,6 +1301,7 @@ class Assembler : public AssemblerBase { friend class RelocInfo; friend class BlockTrampolinePoolScope; friend class EnsureSpace; + friend class UseScratchRegisterScope; }; class EnsureSpace { @@ -1311,6 +1316,24 @@ class PatchingAssembler : public Assembler { ~PatchingAssembler(); }; +class V8_EXPORT_PRIVATE UseScratchRegisterScope { + public: + explicit UseScratchRegisterScope(Assembler* assembler); + ~UseScratchRegisterScope(); + + Register Acquire(); + + // Check if we have registers available to acquire. + bool CanAcquire() const { return *assembler_->GetScratchRegisterList() != 0; } + + private: + friend class Assembler; + friend class TurboAssembler; + + Assembler* assembler_; + RegList old_available_; +}; + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/codegen/ppc/code-stubs-ppc.cc b/deps/v8/src/codegen/ppc/code-stubs-ppc.cc deleted file mode 100644 index 937c7456623282..00000000000000 --- a/deps/v8/src/codegen/ppc/code-stubs-ppc.cc +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2014 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#if V8_TARGET_ARCH_PPC - -#include "src/api/api-arguments-inl.h" -#include "src/base/bits.h" -#include "src/code-stubs.h" -#include "src/codegen/assembler-inl.h" -#include "src/codegen/macro-assembler.h" -#include "src/execution/frame-constants.h" -#include "src/execution/frames.h" -#include "src/execution/isolate.h" -#include "src/ic/ic.h" -#include "src/ic/stub-cache.h" -#include "src/init/bootstrapper.h" -#include "src/numbers/double.h" -#include "src/objects/api-callbacks.h" -#include "src/regexp/jsregexp.h" -#include "src/regexp/regexp-macro-assembler.h" -#include "src/runtime/runtime.h" - -namespace v8 { -namespace internal {} // namespace internal -} // namespace v8 - -#endif // V8_TARGET_ARCH_PPC diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc index 62f0fde3b8e2ee..8ab3e5b83b1866 100644 --- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc +++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc @@ -419,7 +419,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset, Label ok; andi(r0, dst, Operand(kPointerSize - 1)); beq(&ok, cr0); - stop("Unaligned cell in write barrier"); + stop(); bind(&ok); } @@ -1721,15 +1721,15 @@ void TurboAssembler::Check(Condition cond, AbortReason reason, CRegister cr) { void TurboAssembler::Abort(AbortReason reason) { Label abort_start; bind(&abort_start); - const char* msg = GetAbortReason(reason); #ifdef DEBUG + const char* msg = GetAbortReason(reason); RecordComment("Abort message: "); RecordComment(msg); #endif // Avoid emitting call to builtin if requested. if (trap_on_abort()) { - stop(msg); + stop(); return; } @@ -2454,27 +2454,24 @@ void TurboAssembler::LoadP(Register dst, const MemOperand& mem, Register scratch) { DCHECK_EQ(mem.rb(), no_reg); int offset = mem.offset(); + int misaligned = (offset & 3); + int adj = (offset & 3) - 4; + int alignedOffset = (offset & ~3) + 4; - if (!is_int16(offset)) { + if (!is_int16(offset) || (misaligned && !is_int16(alignedOffset))) { /* cannot use d-form */ - DCHECK_NE(scratch, no_reg); mov(scratch, Operand(offset)); LoadPX(dst, MemOperand(mem.ra(), scratch)); } else { -#if V8_TARGET_ARCH_PPC64 - int misaligned = (offset & 3); if (misaligned) { // adjust base to conform to offset alignment requirements // Todo: enhance to use scratch if dst is unsuitable - DCHECK(dst != r0); - addi(dst, mem.ra(), Operand((offset & 3) - 4)); - ld(dst, MemOperand(dst, (offset & ~3) + 4)); + DCHECK_NE(dst, r0); + addi(dst, mem.ra(), Operand(adj)); + ld(dst, MemOperand(dst, alignedOffset)); } else { ld(dst, mem); } -#else - lwz(dst, mem); -#endif } } @@ -2934,20 +2931,24 @@ void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) { blt(dest); } -void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) { +void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { STATIC_ASSERT(kSystemPointerSize == 8); STATIC_ASSERT(kSmiShiftSize == 31); STATIC_ASSERT(kSmiTagSize == 1); STATIC_ASSERT(kSmiTag == 0); - // The builtin_pointer register contains the builtin index as a Smi. + // The builtin_index register contains the builtin index as a Smi. // Untagging is folded into the indexing operand below. - ShiftRightArithImm(builtin_pointer, builtin_pointer, + ShiftRightArithImm(builtin_index, builtin_index, kSmiShift - kSystemPointerSizeLog2); - addi(builtin_pointer, builtin_pointer, + addi(builtin_index, builtin_index, Operand(IsolateData::builtin_entry_table_offset())); - LoadPX(builtin_pointer, MemOperand(kRootRegister, builtin_pointer)); - Call(builtin_pointer); + LoadPX(builtin_index, MemOperand(kRootRegister, builtin_index)); +} + +void TurboAssembler::CallBuiltinByIndex(Register builtin_index) { + LoadEntryFromBuiltinIndex(builtin_index); + Call(builtin_index); } void TurboAssembler::LoadCodeObjectEntry(Register destination, diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h index ae24ef9a55bc07..6249c405e3aa11 100644 --- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h +++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h @@ -408,11 +408,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { Condition cond = al); void Call(Label* target); + // Load the builtin given by the Smi in |builtin_index| into the same + // register. + void LoadEntryFromBuiltinIndex(Register builtin_index); void LoadCodeObjectEntry(Register destination, Register code_object) override; void CallCodeObject(Register code_object) override; void JumpCodeObject(Register code_object) override; - void CallBuiltinPointer(Register builtin_pointer) override; + void CallBuiltinByIndex(Register builtin_index) override; void CallForDeoptimization(Address target, int deopt_id); // Emit code to discard a non-negative number of pointer-sized elements diff --git a/deps/v8/src/codegen/s390/assembler-s390.cc b/deps/v8/src/codegen/s390/assembler-s390.cc index dbfdc9a32a0289..6776626a23a3d4 100644 --- a/deps/v8/src/codegen/s390/assembler-s390.cc +++ b/deps/v8/src/codegen/s390/assembler-s390.cc @@ -351,7 +351,8 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) { Assembler::Assembler(const AssemblerOptions& options, std::unique_ptr buffer) - : AssemblerBase(options, std::move(buffer)) { + : AssemblerBase(options, std::move(buffer)), + scratch_register_list_(ip.bit()) { reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_); last_bound_pos_ = 0; relocations_.reserve(128); @@ -636,8 +637,7 @@ void Assembler::branchOnCond(Condition c, int branch_offset, bool is_bound) { // Exception-generating instructions and debugging support. // Stops with a non-negative code less than kNumOfWatchedStops support // enabling/disabling and a counter feature. See simulator-s390.h . -void Assembler::stop(const char* msg, Condition cond, int32_t code, - CRegister cr) { +void Assembler::stop(Condition cond, int32_t code, CRegister cr) { if (cond != al) { Label skip; b(NegateCondition(cond), &skip, Label::kNear); @@ -831,6 +831,23 @@ void Assembler::EmitRelocations() { } } +UseScratchRegisterScope::UseScratchRegisterScope(Assembler* assembler) + : assembler_(assembler), + old_available_(*assembler->GetScratchRegisterList()) {} + +UseScratchRegisterScope::~UseScratchRegisterScope() { + *assembler_->GetScratchRegisterList() = old_available_; +} + +Register UseScratchRegisterScope::Acquire() { + RegList* available = assembler_->GetScratchRegisterList(); + DCHECK_NOT_NULL(available); + DCHECK_NE(*available, 0); + int index = static_cast(base::bits::CountTrailingZeros32(*available)); + Register reg = Register::from_code(index); + *available &= ~reg.bit(); + return reg; +} } // namespace internal } // namespace v8 #endif // V8_TARGET_ARCH_S390 diff --git a/deps/v8/src/codegen/s390/assembler-s390.h b/deps/v8/src/codegen/s390/assembler-s390.h index e22c037a312855..0653e79b67cf20 100644 --- a/deps/v8/src/codegen/s390/assembler-s390.h +++ b/deps/v8/src/codegen/s390/assembler-s390.h @@ -307,7 +307,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // in the code, so the serializer should not step forwards in memory after // a target is resolved and written. static constexpr int kSpecialTargetSize = 0; - // Number of bytes for instructions used to store pointer sized constant. #if V8_TARGET_ARCH_S390X static constexpr int kBytesForPtrConstant = 12; // IIHF + IILF @@ -315,6 +314,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { static constexpr int kBytesForPtrConstant = 6; // IILF #endif + RegList* GetScratchRegisterList() { return &scratch_register_list_; } + // --------------------------------------------------------------------------- // Code generation @@ -1261,8 +1262,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { void larl(Register r, Label* l); // Exception-generating instructions and debugging support - void stop(const char* msg, Condition cond = al, - int32_t code = kDefaultStopCode, CRegister cr = cr7); + void stop(Condition cond = al, int32_t code = kDefaultStopCode, + CRegister cr = cr7); void bkpt(uint32_t imm16); // v5 and above @@ -1376,6 +1377,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { RelocInfoWriter reloc_info_writer; std::vector relocations_; + // Scratch registers available for use by the Assembler. + RegList scratch_register_list_; + // The bound position, before this we cannot do instruction elimination. int last_bound_pos_; @@ -1455,6 +1459,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { friend class RegExpMacroAssemblerS390; friend class RelocInfo; friend class EnsureSpace; + friend class UseScratchRegisterScope; }; class EnsureSpace { @@ -1462,6 +1467,24 @@ class EnsureSpace { explicit EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); } }; +class V8_EXPORT_PRIVATE UseScratchRegisterScope { + public: + explicit UseScratchRegisterScope(Assembler* assembler); + ~UseScratchRegisterScope(); + + Register Acquire(); + + // Check if we have registers available to acquire. + bool CanAcquire() const { return *assembler_->GetScratchRegisterList() != 0; } + + private: + friend class Assembler; + friend class TurboAssembler; + + Assembler* assembler_; + RegList old_available_; +}; + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/codegen/s390/code-stubs-s390.cc b/deps/v8/src/codegen/s390/code-stubs-s390.cc deleted file mode 100644 index f85c3099439024..00000000000000 --- a/deps/v8/src/codegen/s390/code-stubs-s390.cc +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2014 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#if V8_TARGET_ARCH_S390 - -#include "src/api/api-arguments-inl.h" -#include "src/base/bits.h" -#include "src/code-stubs.h" -#include "src/codegen/assembler-inl.h" -#include "src/codegen/macro-assembler.h" -#include "src/execution/frame-constants.h" -#include "src/execution/frames.h" -#include "src/execution/isolate.h" -#include "src/ic/ic.h" -#include "src/ic/stub-cache.h" -#include "src/init/bootstrapper.h" -#include "src/objects/api-callbacks.h" -#include "src/regexp/jsregexp.h" -#include "src/regexp/regexp-macro-assembler.h" -#include "src/runtime/runtime.h" - -namespace v8 { -namespace internal {} // namespace internal -} // namespace v8 - -#endif // V8_TARGET_ARCH_S390 diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.cc b/deps/v8/src/codegen/s390/macro-assembler-s390.cc index ff94fa839e1d0f..f6c2314a84b8b8 100644 --- a/deps/v8/src/codegen/s390/macro-assembler-s390.cc +++ b/deps/v8/src/codegen/s390/macro-assembler-s390.cc @@ -440,7 +440,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset, Label ok; AndP(r0, dst, Operand(kPointerSize - 1)); beq(&ok, Label::kNear); - stop("Unaligned cell in write barrier"); + stop(); bind(&ok); } @@ -1670,15 +1670,15 @@ void TurboAssembler::Check(Condition cond, AbortReason reason, CRegister cr) { void TurboAssembler::Abort(AbortReason reason) { Label abort_start; bind(&abort_start); - const char* msg = GetAbortReason(reason); #ifdef DEBUG + const char* msg = GetAbortReason(reason); RecordComment("Abort message: "); RecordComment(msg); #endif // Avoid emitting call to builtin if requested. if (trap_on_abort()) { - stop(msg); + stop(); return; } @@ -4332,20 +4332,24 @@ void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) { blt(dest); } -void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) { +void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { STATIC_ASSERT(kSystemPointerSize == 8); STATIC_ASSERT(kSmiShiftSize == 31); STATIC_ASSERT(kSmiTagSize == 1); STATIC_ASSERT(kSmiTag == 0); - // The builtin_pointer register contains the builtin index as a Smi. + // The builtin_index register contains the builtin index as a Smi. // Untagging is folded into the indexing operand below. - ShiftRightArithP(builtin_pointer, builtin_pointer, + ShiftRightArithP(builtin_index, builtin_index, Operand(kSmiShift - kSystemPointerSizeLog2)); - AddP(builtin_pointer, builtin_pointer, + AddP(builtin_index, builtin_index, Operand(IsolateData::builtin_entry_table_offset())); - LoadP(builtin_pointer, MemOperand(kRootRegister, builtin_pointer)); - Call(builtin_pointer); + LoadP(builtin_index, MemOperand(kRootRegister, builtin_index)); +} + +void TurboAssembler::CallBuiltinByIndex(Register builtin_index) { + LoadEntryFromBuiltinIndex(builtin_index); + Call(builtin_index); } void TurboAssembler::LoadCodeObjectEntry(Register destination, diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.h b/deps/v8/src/codegen/s390/macro-assembler-s390.h index ba870874c88db0..52f668d1755a2f 100644 --- a/deps/v8/src/codegen/s390/macro-assembler-s390.h +++ b/deps/v8/src/codegen/s390/macro-assembler-s390.h @@ -166,11 +166,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { void Call(Label* target); + // Load the builtin given by the Smi in |builtin_index| into the same + // register. + void LoadEntryFromBuiltinIndex(Register builtin_index); void LoadCodeObjectEntry(Register destination, Register code_object) override; void CallCodeObject(Register code_object) override; void JumpCodeObject(Register code_object) override; - void CallBuiltinPointer(Register builtin_pointer) override; + void CallBuiltinByIndex(Register builtin_index) override; // Register move. May do nothing if the registers are identical. void Move(Register dst, Smi smi) { LoadSmiLiteral(dst, smi); } diff --git a/deps/v8/src/codegen/safepoint-table.h b/deps/v8/src/codegen/safepoint-table.h index 066f0123fc7ccb..fccce1a7a69122 100644 --- a/deps/v8/src/codegen/safepoint-table.h +++ b/deps/v8/src/codegen/safepoint-table.h @@ -5,8 +5,8 @@ #ifndef V8_CODEGEN_SAFEPOINT_TABLE_H_ #define V8_CODEGEN_SAFEPOINT_TABLE_H_ +#include "src/base/memory.h" #include "src/common/assert-scope.h" -#include "src/common/v8memory.h" #include "src/utils/allocation.h" #include "src/utils/utils.h" #include "src/zone/zone-chunk-list.h" @@ -76,22 +76,23 @@ class SafepointTable { unsigned GetPcOffset(unsigned index) const { DCHECK(index < length_); - return Memory(GetPcOffsetLocation(index)); + return base::Memory(GetPcOffsetLocation(index)); } int GetTrampolinePcOffset(unsigned index) const { DCHECK(index < length_); - return Memory(GetTrampolineLocation(index)); + return base::Memory(GetTrampolineLocation(index)); } unsigned find_return_pc(unsigned pc_offset); SafepointEntry GetEntry(unsigned index) const { DCHECK(index < length_); - unsigned deopt_index = Memory(GetEncodedInfoLocation(index)); - uint8_t* bits = &Memory(entries_ + (index * entry_size_)); + unsigned deopt_index = + base::Memory(GetEncodedInfoLocation(index)); + uint8_t* bits = &base::Memory(entries_ + (index * entry_size_)); int trampoline_pc = - has_deopt_ ? Memory(GetTrampolineLocation(index)) : -1; + has_deopt_ ? base::Memory(GetTrampolineLocation(index)) : -1; return SafepointEntry(deopt_index, bits, trampoline_pc); } diff --git a/deps/v8/src/codegen/source-position-table.cc b/deps/v8/src/codegen/source-position-table.cc index 6c0aa36b27673a..e10cc075714e62 100644 --- a/deps/v8/src/codegen/source-position-table.cc +++ b/deps/v8/src/codegen/source-position-table.cc @@ -31,7 +31,7 @@ class MoreBit : public BitField8 {}; class ValueBits : public BitField8 {}; // Helper: Add the offsets from 'other' to 'value'. Also set is_statement. -void AddAndSetEntry(PositionTableEntry& value, +void AddAndSetEntry(PositionTableEntry& value, // NOLINT(runtime/references) const PositionTableEntry& other) { value.code_offset += other.code_offset; value.source_position += other.source_position; @@ -39,7 +39,7 @@ void AddAndSetEntry(PositionTableEntry& value, } // Helper: Subtract the offsets from 'other' from 'value'. -void SubtractFromEntry(PositionTableEntry& value, +void SubtractFromEntry(PositionTableEntry& value, // NOLINT(runtime/references) const PositionTableEntry& other) { value.code_offset -= other.code_offset; value.source_position -= other.source_position; @@ -47,7 +47,8 @@ void SubtractFromEntry(PositionTableEntry& value, // Helper: Encode an integer. template -void EncodeInt(std::vector& bytes, T value) { +void EncodeInt(std::vector& bytes, // NOLINT(runtime/references) + T value) { using unsigned_type = typename std::make_unsigned::type; // Zig-zag encoding. static const int kShift = sizeof(T) * kBitsPerByte - 1; @@ -65,7 +66,8 @@ void EncodeInt(std::vector& bytes, T value) { } // Encode a PositionTableEntry. -void EncodeEntry(std::vector& bytes, const PositionTableEntry& entry) { +void EncodeEntry(std::vector& bytes, // NOLINT(runtime/references) + const PositionTableEntry& entry) { // We only accept ascending code offsets. DCHECK_GE(entry.code_offset, 0); // Since code_offset is not negative, we use sign to encode is_statement. @@ -113,8 +115,9 @@ Vector VectorFromByteArray(ByteArray byte_array) { } #ifdef ENABLE_SLOW_DCHECKS -void CheckTableEquals(std::vector& raw_entries, - SourcePositionTableIterator& encoded) { +void CheckTableEquals( + std::vector& raw_entries, // NOLINT(runtime/references) + SourcePositionTableIterator& encoded) { // NOLINT(runtime/references) // Brute force testing: Record all positions and decode // the entire table to verify they are identical. auto raw = raw_entries.begin(); diff --git a/deps/v8/src/codegen/tick-counter.cc b/deps/v8/src/codegen/tick-counter.cc new file mode 100644 index 00000000000000..2e72ae0e864ddb --- /dev/null +++ b/deps/v8/src/codegen/tick-counter.cc @@ -0,0 +1,23 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/codegen/tick-counter.h" + +#include "src/base/logging.h" +#include "src/base/macros.h" + +namespace v8 { +namespace internal { + +void TickCounter::DoTick() { + ++ticks_; + // Magical number to detect performance bugs or compiler divergence. + // Selected as being roughly 10x of what's needed frequently. + constexpr size_t kMaxTicks = 100000000; + USE(kMaxTicks); + DCHECK_LT(ticks_, kMaxTicks); +} + +} // namespace internal +} // namespace v8 diff --git a/deps/v8/src/codegen/tick-counter.h b/deps/v8/src/codegen/tick-counter.h new file mode 100644 index 00000000000000..8d6c966bb05075 --- /dev/null +++ b/deps/v8/src/codegen/tick-counter.h @@ -0,0 +1,28 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_CODEGEN_TICK_COUNTER_H_ +#define V8_CODEGEN_TICK_COUNTER_H_ + +#include + +namespace v8 { +namespace internal { + +// A deterministic correlate of time, used to detect performance or +// divergence bugs in Turbofan. DoTick() should be called frequently +// thoughout the compilation. +class TickCounter { + public: + void DoTick(); + size_t CurrentTicks() const { return ticks_; } + + private: + size_t ticks_ = 0; +}; + +} // namespace internal +} // namespace v8 + +#endif // V8_CODEGEN_TICK_COUNTER_H_ diff --git a/deps/v8/src/codegen/turbo-assembler.h b/deps/v8/src/codegen/turbo-assembler.h index afdef22fe7fbd2..2f058eda19aa7a 100644 --- a/deps/v8/src/codegen/turbo-assembler.h +++ b/deps/v8/src/codegen/turbo-assembler.h @@ -50,9 +50,9 @@ class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler { void set_has_frame(bool v) { has_frame_ = v; } bool has_frame() const { return has_frame_; } - // Calls the given builtin. If builtins are embedded, the trampoline Code - // object on the heap is not used. - virtual void CallBuiltinPointer(Register builtin_pointer) = 0; + // Calls the builtin given by the Smi in |builtin|. If builtins are embedded, + // the trampoline Code object on the heap is not used. + virtual void CallBuiltinByIndex(Register builtin_index) = 0; // Calls/jumps to the given Code object. If builtins are embedded, the // trampoline Code object on the heap is not used. diff --git a/deps/v8/src/codegen/x64/assembler-x64-inl.h b/deps/v8/src/codegen/x64/assembler-x64-inl.h index 67cf648c04f03d..f5d0c0ffcf528c 100644 --- a/deps/v8/src/codegen/x64/assembler-x64-inl.h +++ b/deps/v8/src/codegen/x64/assembler-x64-inl.h @@ -8,7 +8,7 @@ #include "src/codegen/x64/assembler-x64.h" #include "src/base/cpu.h" -#include "src/common/v8memory.h" +#include "src/base/memory.h" #include "src/debug/debug.h" #include "src/objects/objects-inl.h" @@ -246,7 +246,7 @@ Handle Assembler::code_target_object_handle_at(Address pc) { } Handle Assembler::compressed_embedded_object_handle_at(Address pc) { - return GetCompressedEmbeddedObject(ReadUnalignedValue(pc)); + return GetEmbeddedObject(ReadUnalignedValue(pc)); } Address Assembler::runtime_entry_at(Address pc) { diff --git a/deps/v8/src/codegen/x64/assembler-x64.cc b/deps/v8/src/codegen/x64/assembler-x64.cc index 3236b0f52c7398..1d28f1d45dd304 100644 --- a/deps/v8/src/codegen/x64/assembler-x64.cc +++ b/deps/v8/src/codegen/x64/assembler-x64.cc @@ -78,6 +78,7 @@ void CpuFeatures::ProbeImpl(bool cross_compile) { // Only use statically determined features for cross compile (snapshot). if (cross_compile) return; + if (cpu.has_sse42() && FLAG_enable_sse4_2) supported_ |= 1u << SSE4_2; if (cpu.has_sse41() && FLAG_enable_sse4_1) { supported_ |= 1u << SSE4_1; supported_ |= 1u << SSSE3; @@ -1257,6 +1258,13 @@ void Assembler::emit_cmpxchg(Operand dst, Register src, int size) { emit_operand(src, dst); } +void Assembler::mfence() { + EnsureSpace ensure_space(this); + emit(0x0F); + emit(0xAE); + emit(0xF0); +} + void Assembler::lfence() { EnsureSpace ensure_space(this); emit(0x0F); @@ -1512,19 +1520,20 @@ void Assembler::j(Condition cc, Handle target, RelocInfo::Mode rmode) { emitl(code_target_index); } -void Assembler::jmp_rel(int offset) { +void Assembler::jmp_rel(int32_t offset) { EnsureSpace ensure_space(this); - const int short_size = sizeof(int8_t); - const int long_size = sizeof(int32_t); - --offset; // This is how jumps are specified on x64. - if (is_int8(offset - short_size) && !predictable_code_size()) { - // 1110 1011 #8-bit disp. + // The offset is encoded relative to the next instruction. + constexpr int32_t kShortJmpDisplacement = 1 + sizeof(int8_t); + constexpr int32_t kNearJmpDisplacement = 1 + sizeof(int32_t); + DCHECK_LE(std::numeric_limits::min() + kNearJmpDisplacement, offset); + if (is_int8(offset - kShortJmpDisplacement) && !predictable_code_size()) { + // 0xEB #8-bit disp. emit(0xEB); - emit((offset - short_size) & 0xFF); + emit(offset - kShortJmpDisplacement); } else { - // 1110 1001 #32-bit disp. + // 0xE9 #32-bit disp. emit(0xE9); - emitl(offset - long_size); + emitl(offset - kNearJmpDisplacement); } } @@ -2005,84 +2014,37 @@ void Assembler::emit_not(Operand dst, int size) { } void Assembler::Nop(int n) { + DCHECK_LE(0, n); // The recommended muti-byte sequences of NOP instructions from the Intel 64 // and IA-32 Architectures Software Developer's Manual. // - // Length Assembly Byte Sequence - // 2 bytes 66 NOP 66 90H - // 3 bytes NOP DWORD ptr [EAX] 0F 1F 00H - // 4 bytes NOP DWORD ptr [EAX + 00H] 0F 1F 40 00H - // 5 bytes NOP DWORD ptr [EAX + EAX*1 + 00H] 0F 1F 44 00 00H - // 6 bytes 66 NOP DWORD ptr [EAX + EAX*1 + 00H] 66 0F 1F 44 00 00H - // 7 bytes NOP DWORD ptr [EAX + 00000000H] 0F 1F 80 00 00 00 00H - // 8 bytes NOP DWORD ptr [EAX + EAX*1 + 00000000H] 0F 1F 84 00 00 00 00 00H - // 9 bytes 66 NOP DWORD ptr [EAX + EAX*1 + 66 0F 1F 84 00 00 00 00 - // 00000000H] 00H - - EnsureSpace ensure_space(this); - while (n > 0) { - switch (n) { - case 2: - emit(0x66); - V8_FALLTHROUGH; - case 1: - emit(0x90); - return; - case 3: - emit(0x0F); - emit(0x1F); - emit(0x00); - return; - case 4: - emit(0x0F); - emit(0x1F); - emit(0x40); - emit(0x00); - return; - case 6: - emit(0x66); - V8_FALLTHROUGH; - case 5: - emit(0x0F); - emit(0x1F); - emit(0x44); - emit(0x00); - emit(0x00); - return; - case 7: - emit(0x0F); - emit(0x1F); - emit(0x80); - emit(0x00); - emit(0x00); - emit(0x00); - emit(0x00); - return; - default: - case 11: - emit(0x66); - n--; - V8_FALLTHROUGH; - case 10: - emit(0x66); - n--; - V8_FALLTHROUGH; - case 9: - emit(0x66); - n--; - V8_FALLTHROUGH; - case 8: - emit(0x0F); - emit(0x1F); - emit(0x84); - emit(0x00); - emit(0x00); - emit(0x00); - emit(0x00); - emit(0x00); - n -= 8; - } - } + // Len Assembly Byte Sequence + // 2 66 NOP 66 90H + // 3 NOP DWORD ptr [EAX] 0F 1F 00H + // 4 NOP DWORD ptr [EAX + 00H] 0F 1F 40 00H + // 5 NOP DWORD ptr [EAX + EAX*1 + 00H] 0F 1F 44 00 00H + // 6 66 NOP DWORD ptr [EAX + EAX*1 + 00H] 66 0F 1F 44 00 00H + // 7 NOP DWORD ptr [EAX + 00000000H] 0F 1F 80 00 00 00 00H + // 8 NOP DWORD ptr [EAX + EAX*1 + 00000000H] 0F 1F 84 00 00 00 00 00H + // 9 66 NOP DWORD ptr [EAX + EAX*1 + 00000000H] 66 0F 1F 84 00 00 00 00 00H + + constexpr const char* kNopSequences = + "\x66\x90" // length 1 (@1) / 2 (@0) + "\x0F\x1F\x00" // length 3 (@2) + "\x0F\x1F\x40\x00" // length 4 (@5) + "\x66\x0F\x1F\x44\x00\x00" // length 5 (@10) / 6 (@9) + "\x0F\x1F\x80\x00\x00\x00\x00" // length 7 (@15) + "\x66\x0F\x1F\x84\x00\x00\x00\x00\x00"; // length 8 (@23) / 9 (@22) + constexpr int8_t kNopOffsets[10] = {0, 1, 0, 2, 5, 10, 9, 15, 23, 22}; + + do { + EnsureSpace ensure_space(this); + int nop_bytes = std::min(n, 9); + const char* sequence = kNopSequences + kNopOffsets[nop_bytes]; + memcpy(pc_, sequence, nop_bytes); + pc_ += nop_bytes; + n -= nop_bytes; + } while (n); } void Assembler::popq(Register dst) { @@ -2883,6 +2845,18 @@ void Assembler::movd(Register dst, XMMRegister src) { } void Assembler::movq(XMMRegister dst, Register src) { + // Mixing AVX and non-AVX is expensive, catch those cases + DCHECK(!IsEnabled(AVX)); + EnsureSpace ensure_space(this); + emit(0x66); + emit_rex_64(dst, src); + emit(0x0F); + emit(0x6E); + emit_sse_operand(dst, src); +} + +void Assembler::movq(XMMRegister dst, Operand src) { + // Mixing AVX and non-AVX is expensive, catch those cases DCHECK(!IsEnabled(AVX)); EnsureSpace ensure_space(this); emit(0x66); @@ -2893,6 +2867,7 @@ void Assembler::movq(XMMRegister dst, Register src) { } void Assembler::movq(Register dst, XMMRegister src) { + // Mixing AVX and non-AVX is expensive, catch those cases DCHECK(!IsEnabled(AVX)); EnsureSpace ensure_space(this); emit(0x66); @@ -2903,6 +2878,7 @@ void Assembler::movq(Register dst, XMMRegister src) { } void Assembler::movq(XMMRegister dst, XMMRegister src) { + // Mixing AVX and non-AVX is expensive, catch those cases DCHECK(!IsEnabled(AVX)); EnsureSpace ensure_space(this); if (dst.low_bits() == 4) { @@ -3068,6 +3044,42 @@ void Assembler::pextrd(Operand dst, XMMRegister src, int8_t imm8) { emit(imm8); } +void Assembler::pextrq(Register dst, XMMRegister src, int8_t imm8) { + DCHECK(IsEnabled(SSE4_1)); + EnsureSpace ensure_space(this); + emit(0x66); + emit_rex_64(src, dst); + emit(0x0F); + emit(0x3A); + emit(0x16); + emit_sse_operand(src, dst); + emit(imm8); +} + +void Assembler::pinsrq(XMMRegister dst, Register src, int8_t imm8) { + DCHECK(IsEnabled(SSE4_1)); + EnsureSpace ensure_space(this); + emit(0x66); + emit_rex_64(dst, src); + emit(0x0F); + emit(0x3A); + emit(0x22); + emit_sse_operand(dst, src); + emit(imm8); +} + +void Assembler::pinsrq(XMMRegister dst, Operand src, int8_t imm8) { + DCHECK(IsEnabled(SSE4_1)); + EnsureSpace ensure_space(this); + emit(0x66); + emit_rex_64(dst, src); + emit(0x0F); + emit(0x3A); + emit(0x22); + emit_sse_operand(dst, src); + emit(imm8); +} + void Assembler::pinsrd(XMMRegister dst, Register src, int8_t imm8) { DCHECK(IsEnabled(SSE4_1)); EnsureSpace ensure_space(this); @@ -4135,6 +4147,22 @@ void Assembler::vmovq(Register dst, XMMRegister src) { emit_sse_operand(src, dst); } +void Assembler::vmovdqu(XMMRegister dst, Operand src) { + DCHECK(IsEnabled(AVX)); + EnsureSpace ensure_space(this); + emit_vex_prefix(dst, xmm0, src, kL128, kF3, k0F, kWIG); + emit(0x6F); + emit_sse_operand(dst, src); +} + +void Assembler::vmovdqu(Operand src, XMMRegister dst) { + DCHECK(IsEnabled(AVX)); + EnsureSpace ensure_space(this); + emit_vex_prefix(dst, xmm0, src, kL128, kF3, k0F, kWIG); + emit(0x7F); + emit_sse_operand(dst, src); +} + void Assembler::vinstr(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2, SIMDPrefix pp, LeadingOpcode m, VexW w) { @@ -4654,6 +4682,30 @@ void Assembler::sse4_instr(XMMRegister dst, Operand src, byte prefix, emit_sse_operand(dst, src); } +void Assembler::sse4_2_instr(XMMRegister dst, XMMRegister src, byte prefix, + byte escape1, byte escape2, byte opcode) { + DCHECK(IsEnabled(SSE4_2)); + EnsureSpace ensure_space(this); + emit(prefix); + emit_optional_rex_32(dst, src); + emit(escape1); + emit(escape2); + emit(opcode); + emit_sse_operand(dst, src); +} + +void Assembler::sse4_2_instr(XMMRegister dst, Operand src, byte prefix, + byte escape1, byte escape2, byte opcode) { + DCHECK(IsEnabled(SSE4_2)); + EnsureSpace ensure_space(this); + emit(prefix); + emit_optional_rex_32(dst, src); + emit(escape1); + emit(escape2); + emit(opcode); + emit_sse_operand(dst, src); +} + void Assembler::lddqu(XMMRegister dst, Operand src) { DCHECK(IsEnabled(SSE3)); EnsureSpace ensure_space(this); diff --git a/deps/v8/src/codegen/x64/assembler-x64.h b/deps/v8/src/codegen/x64/assembler-x64.h index dc6acb67f4fcfa..acb4fce82c1ab3 100644 --- a/deps/v8/src/codegen/x64/assembler-x64.h +++ b/deps/v8/src/codegen/x64/assembler-x64.h @@ -952,6 +952,23 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { SSE4_INSTRUCTION_LIST(DECLARE_SSE4_INSTRUCTION) #undef DECLARE_SSE4_INSTRUCTION + // SSE4.2 + void sse4_2_instr(XMMRegister dst, XMMRegister src, byte prefix, byte escape1, + byte escape2, byte opcode); + void sse4_2_instr(XMMRegister dst, Operand src, byte prefix, byte escape1, + byte escape2, byte opcode); +#define DECLARE_SSE4_2_INSTRUCTION(instruction, prefix, escape1, escape2, \ + opcode) \ + void instruction(XMMRegister dst, XMMRegister src) { \ + sse4_2_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode); \ + } \ + void instruction(XMMRegister dst, Operand src) { \ + sse4_2_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode); \ + } + + SSE4_2_INSTRUCTION_LIST(DECLARE_SSE4_2_INSTRUCTION) +#undef DECLARE_SSE4_2_INSTRUCTION + #define DECLARE_SSE34_AVX_INSTRUCTION(instruction, prefix, escape1, escape2, \ opcode) \ void v##instruction(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \ @@ -969,6 +986,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { void movd(XMMRegister dst, Operand src); void movd(Register dst, XMMRegister src); void movq(XMMRegister dst, Register src); + void movq(XMMRegister dst, Operand src); void movq(Register dst, XMMRegister src); void movq(XMMRegister dst, XMMRegister src); @@ -1068,12 +1086,15 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { void pextrw(Operand dst, XMMRegister src, int8_t imm8); void pextrd(Register dst, XMMRegister src, int8_t imm8); void pextrd(Operand dst, XMMRegister src, int8_t imm8); + void pextrq(Register dst, XMMRegister src, int8_t imm8); void pinsrb(XMMRegister dst, Register src, int8_t imm8); void pinsrb(XMMRegister dst, Operand src, int8_t imm8); void pinsrw(XMMRegister dst, Register src, int8_t imm8); void pinsrw(XMMRegister dst, Operand src, int8_t imm8); void pinsrd(XMMRegister dst, Register src, int8_t imm8); void pinsrd(XMMRegister dst, Operand src, int8_t imm8); + void pinsrq(XMMRegister dst, Register src, int8_t imm8); + void pinsrq(XMMRegister dst, Operand src, int8_t imm8); void roundss(XMMRegister dst, XMMRegister src, RoundingMode mode); void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode); @@ -1284,6 +1305,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { } void vmovsd(XMMRegister dst, Operand src) { vsd(0x10, dst, xmm0, src); } void vmovsd(Operand dst, XMMRegister src) { vsd(0x11, src, xmm0, dst); } + void vmovdqu(XMMRegister dst, Operand src); + void vmovdqu(Operand dst, XMMRegister src); #define AVX_SP_3(instr, opcode) \ AVX_S_3(instr, opcode) \ @@ -1723,6 +1746,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { void rorxl(Register dst, Register src, byte imm8); void rorxl(Register dst, Operand src, byte imm8); + void mfence(); void lfence(); void pause(); diff --git a/deps/v8/src/codegen/x64/constants-x64.h b/deps/v8/src/codegen/x64/constants-x64.h index 0e43b05034899a..775abecd9fd355 100644 --- a/deps/v8/src/codegen/x64/constants-x64.h +++ b/deps/v8/src/codegen/x64/constants-x64.h @@ -12,7 +12,8 @@ namespace internal { // Actual value of root register is offset from the root array's start // to take advantage of negative displacement values. // TODO(sigurds): Choose best value. -constexpr int kRootRegisterBias = 128; +// TODO(ishell): Choose best value for ptr-compr. +constexpr int kRootRegisterBias = kSystemPointerSize == kTaggedSize ? 128 : 0; constexpr size_t kMaxPCRelativeCodeRangeInMB = 2048; } // namespace internal diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.cc b/deps/v8/src/codegen/x64/macro-assembler-x64.cc index 493c7110098f66..f13811b1aec361 100644 --- a/deps/v8/src/codegen/x64/macro-assembler-x64.cc +++ b/deps/v8/src/codegen/x64/macro-assembler-x64.cc @@ -317,15 +317,14 @@ void TurboAssembler::DecompressTaggedPointer(Register destination, void TurboAssembler::DecompressRegisterAnyTagged(Register destination, Register scratch) { - if (kUseBranchlessPtrDecompression) { + if (kUseBranchlessPtrDecompressionInGeneratedCode) { // Branchlessly compute |masked_root|: // masked_root = HAS_SMI_TAG(destination) ? 0 : kRootRegister; STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag < 32)); Register masked_root = scratch; - movl(masked_root, destination); - andl(masked_root, Immediate(kSmiTagMask)); - negq(masked_root); - andq(masked_root, kRootRegister); + xorq(masked_root, masked_root); + Condition smi = CheckSmi(destination); + cmovq(NegateCondition(smi), masked_root, kRootRegister); // Now this add operation will either leave the value unchanged if it is // a smi or add the isolate root if it is a heap object. addq(destination, masked_root); @@ -917,7 +916,7 @@ void TurboAssembler::Cvtqui2ss(XMMRegister dst, Register src) { orq(kScratchRegister, Immediate(1)); bind(&msb_not_set); Cvtqsi2ss(dst, kScratchRegister); - addss(dst, dst); + Addss(dst, dst); bind(&done); } @@ -941,7 +940,7 @@ void TurboAssembler::Cvtqui2sd(XMMRegister dst, Register src) { orq(kScratchRegister, Immediate(1)); bind(&msb_not_set); Cvtqsi2sd(dst, kScratchRegister); - addsd(dst, dst); + Addsd(dst, dst); bind(&done); } @@ -1042,11 +1041,11 @@ void ConvertFloatToUint64(TurboAssembler* tasm, Register dst, // and convert it again to see if it is within the uint64 range. if (is_double) { tasm->Move(kScratchDoubleReg, -9223372036854775808.0); - tasm->addsd(kScratchDoubleReg, src); + tasm->Addsd(kScratchDoubleReg, src); tasm->Cvttsd2siq(dst, kScratchDoubleReg); } else { tasm->Move(kScratchDoubleReg, -9223372036854775808.0f); - tasm->addss(kScratchDoubleReg, src); + tasm->Addss(kScratchDoubleReg, src); tasm->Cvttss2siq(dst, kScratchDoubleReg); } tasm->testq(dst, dst); @@ -1468,8 +1467,9 @@ void TurboAssembler::Move(Register result, Handle object, } } if (RelocInfo::IsCompressedEmbeddedObject(rmode)) { - int compressed_embedded_object_index = AddCompressedEmbeddedObject(object); - movl(result, Immediate(compressed_embedded_object_index, rmode)); + EmbeddedObjectIndex index = AddEmbeddedObject(object); + DCHECK(is_uint32(index)); + movl(result, Immediate(static_cast(index), rmode)); } else { DCHECK(RelocInfo::IsFullEmbeddedObject(rmode)); movq(result, Immediate64(object.address(), rmode)); @@ -1607,29 +1607,33 @@ void TurboAssembler::Call(Handle code_object, RelocInfo::Mode rmode) { call(code_object, rmode); } -void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) { +Operand TurboAssembler::EntryFromBuiltinIndexAsOperand(Register builtin_index) { #if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) STATIC_ASSERT(kSmiShiftSize == 0); STATIC_ASSERT(kSmiTagSize == 1); STATIC_ASSERT(kSmiTag == 0); - // The builtin_pointer register contains the builtin index as a Smi. + // The builtin_index register contains the builtin index as a Smi. // Untagging is folded into the indexing operand below (we use times_4 instead // of times_8 since smis are already shifted by one). - Call(Operand(kRootRegister, builtin_pointer, times_4, - IsolateData::builtin_entry_table_offset())); + return Operand(kRootRegister, builtin_index, times_4, + IsolateData::builtin_entry_table_offset()); #else // defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) STATIC_ASSERT(kSmiShiftSize == 31); STATIC_ASSERT(kSmiTagSize == 1); STATIC_ASSERT(kSmiTag == 0); - // The builtin_pointer register contains the builtin index as a Smi. - SmiUntag(builtin_pointer, builtin_pointer); - Call(Operand(kRootRegister, builtin_pointer, times_8, - IsolateData::builtin_entry_table_offset())); + // The builtin_index register contains the builtin index as a Smi. + SmiUntag(builtin_index, builtin_index); + return Operand(kRootRegister, builtin_index, times_8, + IsolateData::builtin_entry_table_offset()); #endif // defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) } +void TurboAssembler::CallBuiltinByIndex(Register builtin_index) { + Call(EntryFromBuiltinIndexAsOperand(builtin_index)); +} + void TurboAssembler::LoadCodeObjectEntry(Register destination, Register code_object) { // Code objects are called differently depending on whether we are generating @@ -1767,6 +1771,46 @@ void TurboAssembler::Pinsrd(XMMRegister dst, Operand src, int8_t imm8) { } } +void TurboAssembler::Psllq(XMMRegister dst, byte imm8) { + if (CpuFeatures::IsSupported(AVX)) { + CpuFeatureScope scope(this, AVX); + vpsllq(dst, dst, imm8); + } else { + DCHECK(!IsEnabled(AVX)); + psllq(dst, imm8); + } +} + +void TurboAssembler::Psrlq(XMMRegister dst, byte imm8) { + if (CpuFeatures::IsSupported(AVX)) { + CpuFeatureScope scope(this, AVX); + vpsrlq(dst, dst, imm8); + } else { + DCHECK(!IsEnabled(AVX)); + psrlq(dst, imm8); + } +} + +void TurboAssembler::Pslld(XMMRegister dst, byte imm8) { + if (CpuFeatures::IsSupported(AVX)) { + CpuFeatureScope scope(this, AVX); + vpslld(dst, dst, imm8); + } else { + DCHECK(!IsEnabled(AVX)); + pslld(dst, imm8); + } +} + +void TurboAssembler::Psrld(XMMRegister dst, byte imm8) { + if (CpuFeatures::IsSupported(AVX)) { + CpuFeatureScope scope(this, AVX); + vpsrld(dst, dst, imm8); + } else { + DCHECK(!IsEnabled(AVX)); + psrld(dst, imm8); + } +} + void TurboAssembler::Lzcntl(Register dst, Register src) { if (CpuFeatures::IsSupported(LZCNT)) { CpuFeatureScope scope(this, LZCNT); diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.h b/deps/v8/src/codegen/x64/macro-assembler-x64.h index a5b8e60ec53a2b..139690bb8df9b0 100644 --- a/deps/v8/src/codegen/x64/macro-assembler-x64.h +++ b/deps/v8/src/codegen/x64/macro-assembler-x64.h @@ -80,7 +80,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { template struct AvxHelper { Assembler* assm; - // Call an method where the AVX version expects the dst argument to be + // Call a method where the AVX version expects the dst argument to be // duplicated. template @@ -93,7 +93,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { } } - // Call an method where the AVX version expects no duplicated dst argument. + // Call a method where the AVX version expects no duplicated dst argument. template void emit(Dst dst, Args... args) { @@ -127,11 +127,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { AVX_OP(Movmskpd, movmskpd) AVX_OP(Movss, movss) AVX_OP(Movsd, movsd) + AVX_OP(Movdqu, movdqu) AVX_OP(Pcmpeqd, pcmpeqd) - AVX_OP(Pslld, pslld) - AVX_OP(Psllq, psllq) - AVX_OP(Psrld, psrld) - AVX_OP(Psrlq, psrlq) + AVX_OP(Addss, addss) AVX_OP(Addsd, addsd) AVX_OP(Mulsd, mulsd) AVX_OP(Andps, andps) @@ -344,7 +342,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { void Call(ExternalReference ext); void Call(Label* target) { call(target); } - void CallBuiltinPointer(Register builtin_pointer) override; + Operand EntryFromBuiltinIndexAsOperand(Register builtin_index); + void CallBuiltinByIndex(Register builtin_index) override; void LoadCodeObjectEntry(Register destination, Register code_object) override; void CallCodeObject(Register code_object) override; @@ -368,6 +367,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { void Pinsrd(XMMRegister dst, Register src, int8_t imm8); void Pinsrd(XMMRegister dst, Operand src, int8_t imm8); + void Psllq(XMMRegister dst, byte imm8); + void Psrlq(XMMRegister dst, byte imm8); + void Pslld(XMMRegister dst, byte imm8); + void Psrld(XMMRegister dst, byte imm8); + void CompareRoot(Register with, RootIndex index); void CompareRoot(Operand with, RootIndex index); diff --git a/deps/v8/src/codegen/x64/sse-instr.h b/deps/v8/src/codegen/x64/sse-instr.h index ee20483cfeff2c..56618d20e0efbf 100644 --- a/deps/v8/src/codegen/x64/sse-instr.h +++ b/deps/v8/src/codegen/x64/sse-instr.h @@ -21,6 +21,7 @@ V(paddb, 66, 0F, FC) \ V(paddw, 66, 0F, FD) \ V(paddd, 66, 0F, FE) \ + V(paddq, 66, 0F, D4) \ V(paddsb, 66, 0F, EC) \ V(paddsw, 66, 0F, ED) \ V(paddusb, 66, 0F, DC) \ @@ -46,6 +47,7 @@ V(psubb, 66, 0F, F8) \ V(psubw, 66, 0F, F9) \ V(psubd, 66, 0F, FA) \ + V(psubq, 66, 0F, FB) \ V(psubsb, 66, 0F, E8) \ V(psubsw, 66, 0F, E9) \ V(psubusb, 66, 0F, D8) \ @@ -66,6 +68,7 @@ V(psignd, 66, 0F, 38, 0A) #define SSE4_INSTRUCTION_LIST(V) \ + V(pcmpeqq, 66, 0F, 38, 29) \ V(ptest, 66, 0F, 38, 17) \ V(pmovsxbw, 66, 0F, 38, 20) \ V(pmovsxwd, 66, 0F, 38, 23) \ @@ -82,4 +85,6 @@ V(pmaxud, 66, 0F, 38, 3F) \ V(pmulld, 66, 0F, 38, 40) +#define SSE4_2_INSTRUCTION_LIST(V) V(pcmpgtq, 66, 0F, 38, 37) + #endif // V8_CODEGEN_X64_SSE_INSTR_H_ diff --git a/deps/v8/src/common/OWNERS b/deps/v8/src/common/OWNERS new file mode 100644 index 00000000000000..3f9de7e204c675 --- /dev/null +++ b/deps/v8/src/common/OWNERS @@ -0,0 +1,3 @@ +file://COMMON_OWNERS + +# COMPONENT: Blink>JavaScript diff --git a/deps/v8/src/common/globals.h b/deps/v8/src/common/globals.h index 5d4b957e84fc01..8d1bf5dfcc1e72 100644 --- a/deps/v8/src/common/globals.h +++ b/deps/v8/src/common/globals.h @@ -212,15 +212,6 @@ constexpr size_t kReservedCodeRangePages = 0; STATIC_ASSERT(kSystemPointerSize == (1 << kSystemPointerSizeLog2)); -// This macro is used for declaring and defining HeapObject getter methods that -// are a bit more efficient for the pointer compression case than the default -// parameterless getters because isolate root doesn't have to be computed from -// arbitrary field address but it comes "for free" instead. -// These alternatives are always defined (in order to avoid #ifdef mess but -// are not supposed to be used when pointer compression is not enabled. -#define ROOT_VALUE isolate_for_root -#define ROOT_PARAM Isolate* const ROOT_VALUE - #ifdef V8_COMPRESS_POINTERS static_assert( kSystemPointerSize == kInt64Size, @@ -234,11 +225,6 @@ constexpr int kTaggedSizeLog2 = 2; using Tagged_t = int32_t; using AtomicTagged_t = base::Atomic32; -#define DEFINE_ROOT_VALUE(isolate) ROOT_PARAM = isolate -#define WITH_ROOT_PARAM(...) ROOT_PARAM, ##__VA_ARGS__ -#define WITH_ROOT_VALUE(...) ROOT_VALUE, ##__VA_ARGS__ -#define WITH_ROOT(isolate_for_root, ...) isolate_for_root, ##__VA_ARGS__ - #else constexpr int kTaggedSize = kSystemPointerSize; @@ -249,16 +235,12 @@ constexpr int kTaggedSizeLog2 = kSystemPointerSizeLog2; using Tagged_t = Address; using AtomicTagged_t = base::AtomicWord; -#define DEFINE_ROOT_VALUE(isolate) -#define WITH_ROOT_PARAM(...) __VA_ARGS__ -#define WITH_ROOT_VALUE(...) __VA_ARGS__ -#define WITH_ROOT(isolate_for_root, ...) __VA_ARGS__ - #endif // V8_COMPRESS_POINTERS // Defines whether the branchless or branchful implementation of pointer // decompression should be used. -constexpr bool kUseBranchlessPtrDecompression = true; +constexpr bool kUseBranchlessPtrDecompressionInRuntime = false; +constexpr bool kUseBranchlessPtrDecompressionInGeneratedCode = false; STATIC_ASSERT(kTaggedSize == (1 << kTaggedSizeLog2)); STATIC_ASSERT((kTaggedSize == 8) == TAGGED_SIZE_8_BYTES); @@ -667,7 +649,6 @@ struct SlotTraits; template <> struct SlotTraits { using TObjectSlot = FullObjectSlot; - using TMapWordSlot = FullObjectSlot; using TMaybeObjectSlot = FullMaybeObjectSlot; using THeapObjectSlot = FullHeapObjectSlot; }; @@ -678,12 +659,10 @@ template <> struct SlotTraits { #ifdef V8_COMPRESS_POINTERS using TObjectSlot = CompressedObjectSlot; - using TMapWordSlot = CompressedMapWordSlot; using TMaybeObjectSlot = CompressedMaybeObjectSlot; using THeapObjectSlot = CompressedHeapObjectSlot; #else using TObjectSlot = FullObjectSlot; - using TMapWordSlot = FullObjectSlot; using TMaybeObjectSlot = FullMaybeObjectSlot; using THeapObjectSlot = FullHeapObjectSlot; #endif @@ -693,10 +672,6 @@ struct SlotTraits { // holding Object value (smi or strong heap object). using ObjectSlot = SlotTraits::TObjectSlot; -// An MapWordSlot instance describes a kTaggedSize-sized on-heap field ("slot") -// holding HeapObject (strong heap object) value or a forwarding pointer. -using MapWordSlot = SlotTraits::TMapWordSlot; - // A MaybeObjectSlot instance describes a kTaggedSize-sized on-heap field // ("slot") holding MaybeObject (smi or weak heap object or strong heap object). using MaybeObjectSlot = SlotTraits::TMaybeObjectSlot; @@ -1193,7 +1168,7 @@ enum InitializationFlag : uint8_t { kNeedsInitialization, kCreatedInitialized }; enum MaybeAssignedFlag : uint8_t { kNotAssigned, kMaybeAssigned }; -enum ParseErrorType { kSyntaxError = 0, kReferenceError = 1 }; +enum RequiresBrandCheckFlag : uint8_t { kNoBrandCheck, kRequiresBrandCheck }; enum class InterpreterPushArgsMode : unsigned { kArrayFunction, @@ -1554,6 +1529,12 @@ constexpr int kFunctionLiteralIdTopLevel = 0; constexpr int kSmallOrderedHashSetMinCapacity = 4; constexpr int kSmallOrderedHashMapMinCapacity = 4; +// Opaque data type for identifying stack frames. Used extensively +// by the debugger. +// ID_MIN_VALUE and ID_MAX_VALUE are specified to ensure that enumeration type +// has correct value range (see Issue 830 for more details). +enum StackFrameId { ID_MIN_VALUE = kMinInt, ID_MAX_VALUE = kMaxInt, NO_ID = 0 }; + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/execution/message-template.h b/deps/v8/src/common/message-template.h similarity index 98% rename from deps/v8/src/execution/message-template.h rename to deps/v8/src/common/message-template.h index ae88aa4411c814..fedbfa5a100497 100644 --- a/deps/v8/src/execution/message-template.h +++ b/deps/v8/src/common/message-template.h @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -#ifndef V8_EXECUTION_MESSAGE_TEMPLATE_H_ -#define V8_EXECUTION_MESSAGE_TEMPLATE_H_ +#ifndef V8_COMMON_MESSAGE_TEMPLATE_H_ +#define V8_COMMON_MESSAGE_TEMPLATE_H_ #include "src/base/logging.h" @@ -90,6 +90,7 @@ namespace internal { T(ImmutablePrototypeSet, \ "Immutable prototype object '%' cannot have their prototype set") \ T(ImportCallNotNewExpression, "Cannot use new with import") \ + T(ImportOutsideModule, "Cannot use import statement outside a module") \ T(ImportMetaOutsideModule, "Cannot use 'import.meta' outside a module") \ T(ImportMissingSpecifier, "import() requires a specifier") \ T(IncompatibleMethodReceiver, "Method % called on incompatible receiver %") \ @@ -415,6 +416,7 @@ namespace internal { "Read of private field % from an object which did not contain the field") \ T(InvalidPrivateFieldWrite, \ "Write of private field % to an object which did not contain the field") \ + T(InvalidPrivateMethodWrite, "Private method '%' is not writable") \ T(JsonParseUnexpectedEOS, "Unexpected end of JSON input") \ T(JsonParseUnexpectedToken, "Unexpected token % in JSON at position %") \ T(JsonParseUnexpectedTokenNumber, "Unexpected number in JSON at position %") \ @@ -495,7 +497,7 @@ namespace internal { T(UnexpectedSuper, "'super' keyword unexpected here") \ T(UnexpectedNewTarget, "new.target expression is not allowed here") \ T(UnexpectedTemplateString, "Unexpected template string") \ - T(UnexpectedToken, "Unexpected token %") \ + T(UnexpectedToken, "Unexpected token '%'") \ T(UnexpectedTokenUnaryExponentiation, \ "Unary operator used immediately before exponentiation expression. " \ "Parenthesis must be used to disambiguate operator precedence") \ @@ -562,6 +564,8 @@ namespace internal { T(TraceEventPhaseError, "Trace event phase must be a number.") \ T(TraceEventIDError, "Trace event id must be a number.") \ /* Weak refs */ \ + T(WeakRefsUnregisterTokenMustBeObject, \ + "unregisterToken ('%') must be an object") \ T(WeakRefsCleanupMustBeCallable, \ "FinalizationGroup: cleanup must be callable") \ T(WeakRefsRegisterTargetMustBeObject, \ @@ -576,16 +580,16 @@ enum class MessageTemplate { #define TEMPLATE(NAME, STRING) k##NAME, MESSAGE_TEMPLATES(TEMPLATE) #undef TEMPLATE - kLastMessage + kMessageCount }; inline MessageTemplate MessageTemplateFromInt(int message_id) { - DCHECK_LE(0, message_id); - DCHECK_LT(message_id, static_cast(MessageTemplate::kLastMessage)); + DCHECK_LT(static_cast(message_id), + static_cast(MessageTemplate::kMessageCount)); return static_cast(message_id); } } // namespace internal } // namespace v8 -#endif // V8_EXECUTION_MESSAGE_TEMPLATE_H_ +#endif // V8_COMMON_MESSAGE_TEMPLATE_H_ diff --git a/deps/v8/src/common/ptr-compr-inl.h b/deps/v8/src/common/ptr-compr-inl.h index fd0f97e904a2c0..00a79bb29107ff 100644 --- a/deps/v8/src/common/ptr-compr-inl.h +++ b/deps/v8/src/common/ptr-compr-inl.h @@ -25,8 +25,12 @@ V8_INLINE Address GetIsolateRoot(TOnHeapAddress on_heap_addr); template <> V8_INLINE Address GetIsolateRoot
(Address on_heap_addr) { + // We subtract 1 here in order to let the compiler generate addition of 32-bit + // signed constant instead of 64-bit constant (the problem is that 2Gb looks + // like a negative 32-bit value). It's correct because we will never use + // leftmost address of V8 heap as |on_heap_addr|. return RoundDown(on_heap_addr + - kPtrComprIsolateRootBias); + kPtrComprIsolateRootBias - 1); } template <> @@ -34,17 +38,10 @@ V8_INLINE Address GetIsolateRoot(Isolate* isolate) { return isolate->isolate_root(); } -template <> -V8_INLINE Address GetIsolateRoot(const Isolate* isolate) { - return isolate->isolate_root(); -} - // Decompresses smi value. V8_INLINE Address DecompressTaggedSigned(Tagged_t raw_value) { - // Current compression scheme requires |raw_value| to be sign-extended - // from int32_t to intptr_t. - intptr_t value = static_cast(static_cast(raw_value)); - return static_cast
(value); + // For runtime code the upper 32-bits of the Smi value do not matter. + return static_cast
(raw_value); } // Decompresses weak or strong heap object pointer or forwarding pointer, @@ -63,18 +60,18 @@ V8_INLINE Address DecompressTaggedPointer(TOnHeapAddress on_heap_addr, template V8_INLINE Address DecompressTaggedAny(TOnHeapAddress on_heap_addr, Tagged_t raw_value) { - // Current compression scheme requires |raw_value| to be sign-extended - // from int32_t to intptr_t. - intptr_t value = static_cast(static_cast(raw_value)); - if (kUseBranchlessPtrDecompression) { + if (kUseBranchlessPtrDecompressionInRuntime) { + // Current compression scheme requires |raw_value| to be sign-extended + // from int32_t to intptr_t. + intptr_t value = static_cast(static_cast(raw_value)); // |root_mask| is 0 if the |value| was a smi or -1 otherwise. Address root_mask = static_cast
(-(value & kSmiTagMask)); Address root_or_zero = root_mask & GetIsolateRoot(on_heap_addr); return root_or_zero + static_cast
(value); } else { - return HAS_SMI_TAG(value) - ? static_cast
(value) - : (GetIsolateRoot(on_heap_addr) + static_cast
(value)); + return HAS_SMI_TAG(raw_value) + ? DecompressTaggedSigned(raw_value) + : DecompressTaggedPointer(on_heap_addr, raw_value); } } diff --git a/deps/v8/src/compiler-dispatcher/OWNERS b/deps/v8/src/compiler-dispatcher/OWNERS new file mode 100644 index 00000000000000..9664a4857ca4e8 --- /dev/null +++ b/deps/v8/src/compiler-dispatcher/OWNERS @@ -0,0 +1,7 @@ +ahaas@chromium.org +jkummerow@chromium.org +leszeks@chromium.org +mstarzinger@chromium.org +rmcilroy@chromium.org + +# COMPONENT: Blink>JavaScript>Compiler diff --git a/deps/v8/src/compiler/OWNERS b/deps/v8/src/compiler/OWNERS index 39beced3f3c21b..50e2af71290003 100644 --- a/deps/v8/src/compiler/OWNERS +++ b/deps/v8/src/compiler/OWNERS @@ -1,5 +1,3 @@ -set noparent - bmeurer@chromium.org jarin@chromium.org mstarzinger@chromium.org @@ -19,6 +17,7 @@ per-file wasm-*=gdeepti@chromium.org per-file int64-lowering.*=ahaas@chromium.org -per-file simd-scalar-lowering.*=aseemgarg@chromium.org +per-file simd-scalar-lowering.*=bbudge@chromium.org +per-file simd-scalar-lowering.*=gdeepti@chromium.org # COMPONENT: Blink>JavaScript>Compiler diff --git a/deps/v8/src/compiler/STYLE b/deps/v8/src/compiler/STYLE deleted file mode 100644 index ae41e3f989feaf..00000000000000 --- a/deps/v8/src/compiler/STYLE +++ /dev/null @@ -1,29 +0,0 @@ -Compiler Coding Style -===================== - -Coding style for the TurboFan compiler generally follows the Google C++ Style -Guide and the Chromium Coding Style. The notes below are usually just extensions -beyond what the Google style guide already says. If this document doesn't -mention a rule, follow the Google C++ style. - - -TODOs ------ -We use the following convention for putting TODOs into the code: - - * A TODO(turbofan) implies a performance improvement opportunity. - * A TODO(name) implies an incomplete implementation. - - -Use of C++11 auto keyword -------------------------- -Use auto to avoid type names that are just clutter. Continue to use manifest -type declarations when it helps readability, and never use auto for anything -but local variables, in particular auto should only be used where it is obvious -from context what the type is: - - for (auto block : x->blocks()) // clearly a Block of some kind - for (auto instr : x->instructions()) // clearly an Instruction of some kind - - for (auto b : x->predecessors()) // less clear, better to make it explicit - for (BasicBlock* b : x->predecessors()) // now clear diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc index 726a81a465c34a..a369de48859ef9 100644 --- a/deps/v8/src/compiler/access-builder.cc +++ b/deps/v8/src/compiler/access-builder.cc @@ -14,9 +14,9 @@ #include "src/objects/heap-number.h" #include "src/objects/js-collection.h" #include "src/objects/js-generator.h" -#include "src/objects/module.h" #include "src/objects/objects-inl.h" #include "src/objects/ordered-hash-table.h" +#include "src/objects/source-text-module.h" namespace v8 { namespace internal { @@ -71,6 +71,26 @@ FieldAccess AccessBuilder::ForBigIntBitfield() { return access; } +// static +FieldAccess AccessBuilder::ForBigIntOptionalPadding() { + DCHECK_EQ(FIELD_SIZE(BigInt::kOptionalPaddingOffset), 4); + FieldAccess access = { + kTaggedBase, BigInt::kOptionalPaddingOffset, MaybeHandle(), + MaybeHandle(), TypeCache::Get()->kInt32, MachineType::Uint32(), + kNoWriteBarrier}; + return access; +} + +// static +FieldAccess AccessBuilder::ForBigIntLeastSignificantDigit64() { + DCHECK_EQ(BigInt::SizeFor(1) - BigInt::SizeFor(0), 8); + FieldAccess access = { + kTaggedBase, BigInt::kDigitsOffset, MaybeHandle(), + MaybeHandle(), TypeCache::Get()->kBigUint64, MachineType::Uint64(), + kNoWriteBarrier}; + return access; +} + // static FieldAccess AccessBuilder::ForJSObjectPropertiesOrHash() { FieldAccess access = { @@ -626,7 +646,7 @@ FieldAccess AccessBuilder::ForMapPrototype() { // static FieldAccess AccessBuilder::ForModuleRegularExports() { FieldAccess access = { - kTaggedBase, Module::kRegularExportsOffset, + kTaggedBase, SourceTextModule::kRegularExportsOffset, Handle(), MaybeHandle(), Type::OtherInternal(), MachineType::TypeCompressedTaggedPointer(), kPointerWriteBarrier}; @@ -636,7 +656,7 @@ FieldAccess AccessBuilder::ForModuleRegularExports() { // static FieldAccess AccessBuilder::ForModuleRegularImports() { FieldAccess access = { - kTaggedBase, Module::kRegularImportsOffset, + kTaggedBase, SourceTextModule::kRegularImportsOffset, Handle(), MaybeHandle(), Type::OtherInternal(), MachineType::TypeCompressedTaggedPointer(), kPointerWriteBarrier}; @@ -847,7 +867,7 @@ FieldAccess AccessBuilder::ForJSStringIteratorIndex() { // static FieldAccess AccessBuilder::ForValue() { FieldAccess access = { - kTaggedBase, JSValue::kValueOffset, + kTaggedBase, JSPrimitiveWrapper::kValueOffset, Handle(), MaybeHandle(), Type::NonInternal(), MachineType::TypeCompressedTagged(), kFullWriteBarrier}; diff --git a/deps/v8/src/compiler/access-builder.h b/deps/v8/src/compiler/access-builder.h index e38c487b1a7dde..e3a17fe257d405 100644 --- a/deps/v8/src/compiler/access-builder.h +++ b/deps/v8/src/compiler/access-builder.h @@ -42,6 +42,15 @@ class V8_EXPORT_PRIVATE AccessBuilder final // Provides access to BigInt's bit field. static FieldAccess ForBigIntBitfield(); + // Provides access to BigInt's 32 bit padding that is placed after the + // bitfield on 64 bit architectures without pointer compression. Do not use + // this on 32 bit architectures. + static FieldAccess ForBigIntOptionalPadding(); + + // Provides access to BigInt's least significant digit on 64 bit + // architectures. Do not use this on 32 bit architectures. + static FieldAccess ForBigIntLeastSignificantDigit64(); + // Provides access to JSObject::properties() field. static FieldAccess ForJSObjectPropertiesOrHash(); @@ -263,7 +272,7 @@ class V8_EXPORT_PRIVATE AccessBuilder final // Provides access to JSStringIterator::index() field. static FieldAccess ForJSStringIteratorIndex(); - // Provides access to JSValue::value() field. + // Provides access to JSPrimitiveWrapper::value() field. static FieldAccess ForValue(); // Provides access to Cell::value() field. diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc index 713484f7348c85..6fc9e8214e5b49 100644 --- a/deps/v8/src/compiler/access-info.cc +++ b/deps/v8/src/compiler/access-info.cc @@ -8,6 +8,7 @@ #include "src/builtins/accessors.h" #include "src/compiler/compilation-dependencies.h" +#include "src/compiler/compilation-dependency.h" #include "src/compiler/type-cache.h" #include "src/ic/call-optimization.h" #include "src/logging/counters.h" @@ -78,7 +79,7 @@ PropertyAccessInfo PropertyAccessInfo::NotFound(Zone* zone, // static PropertyAccessInfo PropertyAccessInfo::DataField( Zone* zone, Handle receiver_map, - ZoneVector&& dependencies, + ZoneVector&& dependencies, FieldIndex field_index, Representation field_representation, Type field_type, MaybeHandle field_map, MaybeHandle holder, MaybeHandle transition_map) { @@ -90,7 +91,7 @@ PropertyAccessInfo PropertyAccessInfo::DataField( // static PropertyAccessInfo PropertyAccessInfo::DataConstant( Zone* zone, Handle receiver_map, - ZoneVector&& dependencies, + ZoneVector&& dependencies, FieldIndex field_index, Representation field_representation, Type field_type, MaybeHandle field_map, MaybeHandle holder, MaybeHandle transition_map) { @@ -156,8 +157,7 @@ PropertyAccessInfo::PropertyAccessInfo( FieldIndex field_index, Representation field_representation, Type field_type, MaybeHandle field_map, ZoneVector>&& receiver_maps, - ZoneVector&& - unrecorded_dependencies) + ZoneVector&& unrecorded_dependencies) : kind_(kind), receiver_maps_(receiver_maps), unrecorded_dependencies_(std::move(unrecorded_dependencies)), @@ -258,11 +258,6 @@ bool PropertyAccessInfo::Merge(PropertyAccessInfo const* that, } } -Handle PropertyAccessInfo::export_cell() const { - DCHECK_EQ(kModuleExport, kind_); - return Handle::cast(constant_); -} - AccessInfoFactory::AccessInfoFactory(JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone) @@ -336,11 +331,10 @@ PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo( Type field_type = Type::NonInternal(); MaybeHandle field_map; MapRef map_ref(broker(), map); - ZoneVector - unrecorded_dependencies(zone()); + ZoneVector unrecorded_dependencies(zone()); + map_ref.SerializeOwnDescriptor(descriptor); if (details_representation.IsSmi()) { field_type = Type::SignedSmall(); - map_ref.SerializeOwnDescriptor(descriptor); unrecorded_dependencies.push_back( dependencies()->FieldRepresentationDependencyOffTheRecord(map_ref, descriptor)); @@ -360,19 +354,23 @@ PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo( // The field type was cleared by the GC, so we don't know anything // about the contents now. } - map_ref.SerializeOwnDescriptor(descriptor); unrecorded_dependencies.push_back( dependencies()->FieldRepresentationDependencyOffTheRecord(map_ref, descriptor)); if (descriptors_field_type->IsClass()) { - unrecorded_dependencies.push_back( - dependencies()->FieldTypeDependencyOffTheRecord(map_ref, descriptor)); // Remember the field map, and try to infer a useful type. Handle map(descriptors_field_type->AsClass(), isolate()); field_type = Type::For(MapRef(broker(), map)); field_map = MaybeHandle(map); } + } else { + CHECK(details_representation.IsTagged()); } + // TODO(turbofan): We may want to do this only depending on the use + // of the access info. + unrecorded_dependencies.push_back( + dependencies()->FieldTypeDependencyOffTheRecord(map_ref, descriptor)); + PropertyConstness constness; if (details.IsReadOnly() && !details.IsConfigurable()) { constness = PropertyConstness::kConst; @@ -445,9 +443,6 @@ PropertyAccessInfo AccessInfoFactory::ComputeAccessorDescriptorAccessInfo( DCHECK_IMPLIES(lookup == CallOptimization::kHolderIsReceiver, holder.is_null()); DCHECK_IMPLIES(lookup == CallOptimization::kHolderFound, !holder.is_null()); - if (V8_UNLIKELY(TracingFlags::is_runtime_stats_enabled())) { - return PropertyAccessInfo::Invalid(zone()); - } } if (access_mode == AccessMode::kLoad) { Handle cached_property_name; @@ -569,7 +564,7 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo( if (map_prototype->map().is_deprecated()) { // Try to migrate the prototype object so we don't embed the deprecated // map into the optimized code. - JSObject::TryMigrateInstance(map_prototype); + JSObject::TryMigrateInstance(isolate(), map_prototype); } map = handle(map_prototype->map(), isolate()); holder = map_prototype; @@ -611,8 +606,7 @@ void AccessInfoFactory::ComputePropertyAccessInfos( void PropertyAccessInfo::RecordDependencies( CompilationDependencies* dependencies) { - for (CompilationDependencies::Dependency const* d : - unrecorded_dependencies_) { + for (CompilationDependency const* d : unrecorded_dependencies_) { dependencies->RecordDependency(d); } unrecorded_dependencies_.clear(); @@ -648,6 +642,8 @@ void AccessInfoFactory::MergePropertyAccessInfos( CHECK(!result->empty()); } +Isolate* AccessInfoFactory::isolate() const { return broker()->isolate(); } + namespace { Maybe GeneralizeElementsKind(ElementsKind this_kind, @@ -760,8 +756,7 @@ PropertyAccessInfo AccessInfoFactory::LookupTransition( Type field_type = Type::NonInternal(); MaybeHandle field_map; MapRef transition_map_ref(broker(), transition_map); - ZoneVector - unrecorded_dependencies(zone()); + ZoneVector unrecorded_dependencies(zone()); if (details_representation.IsSmi()) { field_type = Type::SignedSmall(); transition_map_ref.SerializeOwnDescriptor(number); @@ -796,6 +791,7 @@ PropertyAccessInfo AccessInfoFactory::LookupTransition( unrecorded_dependencies.push_back( dependencies()->TransitionDependencyOffTheRecord( MapRef(broker(), transition_map))); + transition_map_ref.SerializeBackPointer(); // For BuildPropertyStore. // Transitioning stores *may* store to const fields. The resulting // DataConstant access infos can be distinguished from later, i.e. redundant, // stores to the same constant field by the presence of a transition map. diff --git a/deps/v8/src/compiler/access-info.h b/deps/v8/src/compiler/access-info.h index 3499069fc44fc6..4c7c3611df685c 100644 --- a/deps/v8/src/compiler/access-info.h +++ b/deps/v8/src/compiler/access-info.h @@ -8,7 +8,6 @@ #include #include "src/codegen/machine-type.h" -#include "src/compiler/compilation-dependencies.h" #include "src/compiler/types.h" #include "src/objects/feedback-vector.h" #include "src/objects/field-index.h" @@ -25,8 +24,10 @@ class Factory; namespace compiler { // Forward declarations. +class CompilationDependencies; +class CompilationDependency; class ElementAccessFeedback; -class Type; +class JSHeapBroker; class TypeCache; std::ostream& operator<<(std::ostream&, AccessMode); @@ -74,16 +75,14 @@ class PropertyAccessInfo final { MaybeHandle holder); static PropertyAccessInfo DataField( Zone* zone, Handle receiver_map, - ZoneVector&& - unrecorded_dependencies, + ZoneVector&& unrecorded_dependencies, FieldIndex field_index, Representation field_representation, Type field_type, MaybeHandle field_map = MaybeHandle(), MaybeHandle holder = MaybeHandle(), MaybeHandle transition_map = MaybeHandle()); static PropertyAccessInfo DataConstant( Zone* zone, Handle receiver_map, - ZoneVector&& - unrecorded_dependencies, + ZoneVector&& unrecorded_dependencies, FieldIndex field_index, Representation field_representation, Type field_type, MaybeHandle field_map, MaybeHandle holder, MaybeHandle transition_map = MaybeHandle()); @@ -113,9 +112,9 @@ class PropertyAccessInfo final { Kind kind() const { return kind_; } MaybeHandle holder() const { - // This CHECK tries to protect against using the access info without - // recording its dependencies first. - CHECK(unrecorded_dependencies_.empty()); + // TODO(neis): There was a CHECK here that tries to protect against + // using the access info without recording its dependencies first. + // Find a more suitable place for it. return holder_; } MaybeHandle transition_map() const { return transition_map_; } @@ -127,7 +126,6 @@ class PropertyAccessInfo final { ZoneVector> const& receiver_maps() const { return receiver_maps_; } - Handle export_cell() const; private: explicit PropertyAccessInfo(Zone* zone); @@ -136,17 +134,16 @@ class PropertyAccessInfo final { PropertyAccessInfo(Zone* zone, Kind kind, MaybeHandle holder, Handle constant, ZoneVector>&& receiver_maps); - PropertyAccessInfo( - Kind kind, MaybeHandle holder, MaybeHandle transition_map, - FieldIndex field_index, Representation field_representation, - Type field_type, MaybeHandle field_map, - ZoneVector>&& receiver_maps, - ZoneVector&& dependencies); + PropertyAccessInfo(Kind kind, MaybeHandle holder, + MaybeHandle transition_map, FieldIndex field_index, + Representation field_representation, Type field_type, + MaybeHandle field_map, + ZoneVector>&& receiver_maps, + ZoneVector&& dependencies); Kind kind_; ZoneVector> receiver_maps_; - ZoneVector - unrecorded_dependencies_; + ZoneVector unrecorded_dependencies_; Handle constant_; MaybeHandle transition_map_; MaybeHandle holder_; @@ -215,7 +212,7 @@ class AccessInfoFactory final { CompilationDependencies* dependencies() const { return dependencies_; } JSHeapBroker* broker() const { return broker_; } - Isolate* isolate() const { return broker()->isolate(); } + Isolate* isolate() const; Zone* zone() const { return zone_; } JSHeapBroker* const broker_; diff --git a/deps/v8/src/compiler/add-type-assertions-reducer.cc b/deps/v8/src/compiler/add-type-assertions-reducer.cc new file mode 100644 index 00000000000000..59d2fe68203ed7 --- /dev/null +++ b/deps/v8/src/compiler/add-type-assertions-reducer.cc @@ -0,0 +1,51 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/compiler/add-type-assertions-reducer.h" + +#include "src/compiler/node-properties.h" + +namespace v8 { +namespace internal { +namespace compiler { + +AddTypeAssertionsReducer::AddTypeAssertionsReducer(Editor* editor, + JSGraph* jsgraph, Zone* zone) + : AdvancedReducer(editor), + jsgraph_(jsgraph), + visited_(jsgraph->graph()->NodeCount(), zone) {} + +AddTypeAssertionsReducer::~AddTypeAssertionsReducer() = default; + +Reduction AddTypeAssertionsReducer::Reduce(Node* node) { + if (node->opcode() == IrOpcode::kAssertType || + node->opcode() == IrOpcode::kPhi || !NodeProperties::IsTyped(node) || + visited_.Get(node)) { + return NoChange(); + } + visited_.Set(node, true); + + Type type = NodeProperties::GetType(node); + if (!type.IsRange()) { + return NoChange(); + } + + Node* assertion = graph()->NewNode(simplified()->AssertType(type), node); + NodeProperties::SetType(assertion, type); + + for (Edge edge : node->use_edges()) { + Node* const user = edge.from(); + DCHECK(!user->IsDead()); + if (NodeProperties::IsValueEdge(edge) && user != assertion) { + edge.UpdateTo(assertion); + Revisit(user); + } + } + + return NoChange(); +} + +} // namespace compiler +} // namespace internal +} // namespace v8 diff --git a/deps/v8/src/compiler/add-type-assertions-reducer.h b/deps/v8/src/compiler/add-type-assertions-reducer.h new file mode 100644 index 00000000000000..36add040e1ce7c --- /dev/null +++ b/deps/v8/src/compiler/add-type-assertions-reducer.h @@ -0,0 +1,45 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_ADD_TYPE_ASSERTIONS_REDUCER_H_ +#define V8_COMPILER_ADD_TYPE_ASSERTIONS_REDUCER_H_ + +#include "src/common/globals.h" +#include "src/compiler/graph-reducer.h" +#include "src/compiler/js-graph.h" +#include "src/compiler/node-aux-data.h" +#include "src/compiler/simplified-operator.h" + +namespace v8 { +namespace internal { + +namespace compiler { + +class V8_EXPORT_PRIVATE AddTypeAssertionsReducer final + : public NON_EXPORTED_BASE(AdvancedReducer) { + public: + AddTypeAssertionsReducer(Editor* editor, JSGraph* jsgraph, Zone* zone); + ~AddTypeAssertionsReducer() final; + + const char* reducer_name() const override { + return "AddTypeAssertionsReducer"; + } + + Reduction Reduce(Node* node) final; + + private: + JSGraph* const jsgraph_; + NodeAuxData visited_; + + Graph* graph() { return jsgraph_->graph(); } + SimplifiedOperatorBuilder* simplified() { return jsgraph_->simplified(); } + + DISALLOW_COPY_AND_ASSIGN(AddTypeAssertionsReducer); +}; + +} // namespace compiler +} // namespace internal +} // namespace v8 + +#endif // V8_COMPILER_ADD_TYPE_ASSERTIONS_REDUCER_H_ diff --git a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc index d93053c64b8fb9..88a9c52a3339f5 100644 --- a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc +++ b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc @@ -130,6 +130,7 @@ class ArmOperandConverter final : public InstructionOperandConverter { return Operand::EmbeddedStringConstant( constant.ToDelayedStringConstant()); case Constant::kInt64: + case Constant::kCompressedHeapObject: case Constant::kHeapObject: // TODO(dcarney): loading RPO constants on arm. case Constant::kRpoNumber: @@ -308,9 +309,9 @@ Condition FlagsConditionToCondition(FlagsCondition condition) { UNREACHABLE(); } -void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, - InstructionCode opcode, - ArmOperandConverter& i) { +void EmitWordLoadPoisoningIfNeeded( + CodeGenerator* codegen, InstructionCode opcode, + ArmOperandConverter& i) { // NOLINT(runtime/references) const MemoryAccessMode access_mode = static_cast(MiscField::decode(opcode)); if (access_mode == kMemoryAccessPoisoned) { @@ -319,9 +320,10 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, } } -void ComputePoisonedAddressForLoad(CodeGenerator* codegen, - InstructionCode opcode, - ArmOperandConverter& i, Register address) { +void ComputePoisonedAddressForLoad( + CodeGenerator* codegen, InstructionCode opcode, + ArmOperandConverter& i, // NOLINT(runtime/references) + Register address) { DCHECK_EQ(kMemoryAccessPoisoned, static_cast(MiscField::decode(opcode))); switch (AddressingModeField::decode(opcode)) { @@ -711,8 +713,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kArchCallBuiltinPointer: { DCHECK(!instr->InputAt(0)->IsImmediate()); - Register builtin_pointer = i.InputRegister(0); - __ CallBuiltinPointer(builtin_pointer); + Register builtin_index = i.InputRegister(0); + __ CallBuiltinByIndex(builtin_index); RecordCallPosition(instr); frame_access_state()->ClearSPDelta(); break; @@ -879,23 +881,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( AssembleArchTableSwitch(instr); DCHECK_EQ(LeaveCC, i.OutputSBit()); break; - case kArchDebugAbort: + case kArchAbortCSAAssert: DCHECK(i.InputRegister(0) == r1); - if (!frame_access_state()->has_frame()) { + { // We don't actually want to generate a pile of code for this, so just // claim there is a stack frame, without generating one. FrameScope scope(tasm(), StackFrame::NONE); - __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS), - RelocInfo::CODE_TARGET); - } else { - __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS), - RelocInfo::CODE_TARGET); + __ Call( + isolate()->builtins()->builtin_handle(Builtins::kAbortCSAAssert), + RelocInfo::CODE_TARGET); } - __ stop("kArchDebugAbort"); + __ stop(); unwinding_info_writer_.MarkBlockWillExit(); break; case kArchDebugBreak: - __ stop("kArchDebugBreak"); + __ stop(); break; case kArchComment: __ RecordComment(reinterpret_cast(i.InputInt32(0))); @@ -1752,6 +1752,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } break; } + case kArmDmbIsh: { + __ dmb(ISH); + break; + } case kArmDsbIsb: { __ dsb(SY); __ isb(SY); @@ -2588,6 +2592,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ vpmax(NeonU32, scratch, src.low(), src.high()); __ vpmax(NeonU32, scratch, scratch, scratch); __ ExtractLane(i.OutputRegister(), scratch, NeonS32, 0); + __ cmp(i.OutputRegister(), Operand(0)); + __ mov(i.OutputRegister(), Operand(1), LeaveCC, ne); break; } case kArmS1x4AllTrue: { @@ -2597,6 +2603,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ vpmin(NeonU32, scratch, src.low(), src.high()); __ vpmin(NeonU32, scratch, scratch, scratch); __ ExtractLane(i.OutputRegister(), scratch, NeonS32, 0); + __ cmp(i.OutputRegister(), Operand(0)); + __ mov(i.OutputRegister(), Operand(1), LeaveCC, ne); break; } case kArmS1x8AnyTrue: { @@ -2607,6 +2615,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ vpmax(NeonU16, scratch, scratch, scratch); __ vpmax(NeonU16, scratch, scratch, scratch); __ ExtractLane(i.OutputRegister(), scratch, NeonS16, 0); + __ cmp(i.OutputRegister(), Operand(0)); + __ mov(i.OutputRegister(), Operand(1), LeaveCC, ne); break; } case kArmS1x8AllTrue: { @@ -2617,6 +2627,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ vpmin(NeonU16, scratch, scratch, scratch); __ vpmin(NeonU16, scratch, scratch, scratch); __ ExtractLane(i.OutputRegister(), scratch, NeonS16, 0); + __ cmp(i.OutputRegister(), Operand(0)); + __ mov(i.OutputRegister(), Operand(1), LeaveCC, ne); break; } case kArmS1x16AnyTrue: { @@ -2631,6 +2643,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( // kDoubleRegZero is not changed, since it is 0. __ vtst(Neon32, q_scratch, q_scratch, q_scratch); __ ExtractLane(i.OutputRegister(), d_scratch, NeonS32, 0); + __ cmp(i.OutputRegister(), Operand(0)); + __ mov(i.OutputRegister(), Operand(1), LeaveCC, ne); break; } case kArmS1x16AllTrue: { @@ -2642,6 +2656,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ vpmin(NeonU8, scratch, scratch, scratch); __ vpmin(NeonU8, scratch, scratch, scratch); __ ExtractLane(i.OutputRegister(), scratch, NeonS8, 0); + __ cmp(i.OutputRegister(), Operand(0)); + __ mov(i.OutputRegister(), Operand(1), LeaveCC, ne); break; } case kWord32AtomicLoadInt8: @@ -2901,7 +2917,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr, new (gen_->zone()) ReferenceMap(gen_->zone()); gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt); if (FLAG_debug_code) { - __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap)); + __ stop(); } } } @@ -2993,8 +3009,14 @@ void CodeGenerator::AssembleConstructFrame() { auto call_descriptor = linkage()->GetIncomingDescriptor(); if (frame_access_state()->has_frame()) { if (call_descriptor->IsCFunctionCall()) { - __ Push(lr, fp); - __ mov(fp, sp); + if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) { + __ StubPrologue(StackFrame::C_WASM_ENTRY); + // Reserve stack space for saving the c_entry_fp later. + __ AllocateStackSpace(kSystemPointerSize); + } else { + __ Push(lr, fp); + __ mov(fp, sp); + } } else if (call_descriptor->IsJSFunctionCall()) { __ Prologue(); if (call_descriptor->PushArgumentCount()) { @@ -3025,8 +3047,8 @@ void CodeGenerator::AssembleConstructFrame() { unwinding_info_writer_.MarkFrameConstructed(__ pc_offset()); } - int required_slots = frame()->GetTotalFrameSlotCount() - - call_descriptor->CalculateFixedFrameSize(); + int required_slots = + frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount(); if (info()->is_osr()) { // TurboFan OSR-compiled functions cannot be entered directly. @@ -3074,7 +3096,7 @@ void CodeGenerator::AssembleConstructFrame() { ReferenceMap* reference_map = new (zone()) ReferenceMap(zone()); RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt); if (FLAG_debug_code) { - __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromThrow)); + __ stop(); } __ bind(&done); diff --git a/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h b/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h index 722502edc7802b..165ca39f9d3620 100644 --- a/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h +++ b/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h @@ -126,6 +126,7 @@ namespace compiler { V(ArmPush) \ V(ArmPoke) \ V(ArmPeek) \ + V(ArmDmbIsh) \ V(ArmDsbIsb) \ V(ArmF32x4Splat) \ V(ArmF32x4ExtractLane) \ diff --git a/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc b/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc index 211abd85b8cd66..41d7b4055fce5a 100644 --- a/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc +++ b/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc @@ -275,6 +275,7 @@ int InstructionScheduler::GetTargetInstructionFlags( case kArmStr: case kArmPush: case kArmPoke: + case kArmDmbIsh: case kArmDsbIsb: case kArmWord32AtomicPairStore: case kArmWord32AtomicPairAdd: diff --git a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc index 678d75ae5eaa73..06aba4491ac737 100644 --- a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc +++ b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc @@ -441,9 +441,9 @@ void InstructionSelector::VisitStackSlot(Node* node) { sequence()->AddImmediate(Constant(slot)), 0, nullptr); } -void InstructionSelector::VisitDebugAbort(Node* node) { +void InstructionSelector::VisitAbortCSAAssert(Node* node) { ArmOperandGenerator g(this); - Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), r1)); + Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), r1)); } void InstructionSelector::VisitLoad(Node* node) { @@ -2020,6 +2020,11 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) { g.UseRegister(right)); } +void InstructionSelector::VisitMemoryBarrier(Node* node) { + ArmOperandGenerator g(this); + Emit(kArmDmbIsh, g.NoOutput()); +} + void InstructionSelector::VisitWord32AtomicLoad(Node* node) { LoadRepresentation load_rep = LoadRepresentationOf(node->op()); ArmOperandGenerator g(this); diff --git a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc index 53864ad2e95f47..c71a63cc3d96e5 100644 --- a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc +++ b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc @@ -224,6 +224,7 @@ class Arm64OperandConverter final : public InstructionOperandConverter { return Operand(Operand::EmbeddedNumber(constant.ToFloat64().value())); case Constant::kExternalReference: return Operand(constant.ToExternalReference()); + case Constant::kCompressedHeapObject: // Fall through. case Constant::kHeapObject: return Operand(constant.ToHeapObject()); case Constant::kDelayedStringConstant: @@ -375,9 +376,9 @@ Condition FlagsConditionToCondition(FlagsCondition condition) { UNREACHABLE(); } -void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, - InstructionCode opcode, Instruction* instr, - Arm64OperandConverter& i) { +void EmitWordLoadPoisoningIfNeeded( + CodeGenerator* codegen, InstructionCode opcode, Instruction* instr, + Arm64OperandConverter& i) { // NOLINT(runtime/references) const MemoryAccessMode access_mode = static_cast(MiscField::decode(opcode)); if (access_mode == kMemoryAccessPoisoned) { @@ -621,8 +622,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kArchCallBuiltinPointer: { DCHECK(!instr->InputAt(0)->IsImmediate()); - Register builtin_pointer = i.InputRegister(0); - __ CallBuiltinPointer(builtin_pointer); + Register builtin_index = i.InputRegister(0); + __ CallBuiltinByIndex(builtin_index); RecordCallPosition(instr); frame_access_state()->ClearSPDelta(); break; @@ -793,19 +794,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArchLookupSwitch: AssembleArchLookupSwitch(instr); break; - case kArchDebugAbort: + case kArchAbortCSAAssert: DCHECK(i.InputRegister(0).is(x1)); - if (!frame_access_state()->has_frame()) { + { // We don't actually want to generate a pile of code for this, so just // claim there is a stack frame, without generating one. FrameScope scope(tasm(), StackFrame::NONE); - __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS), - RelocInfo::CODE_TARGET); - } else { - __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS), - RelocInfo::CODE_TARGET); + __ Call( + isolate()->builtins()->builtin_handle(Builtins::kAbortCSAAssert), + RelocInfo::CODE_TARGET); } - __ Debug("kArchDebugAbort", 0, BREAK); + __ Debug("kArchAbortCSAAssert", 0, BREAK); unwinding_info_writer_.MarkBlockWillExit(); break; case kArchDebugBreak: @@ -867,9 +866,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( this, object, offset, value, mode, DetermineStubCallMode(), &unwinding_info_writer_); __ StoreTaggedField(value, MemOperand(object, offset)); - if (COMPRESS_POINTERS_BOOL) { - __ DecompressTaggedPointer(object, object); - } __ CheckPageFlag(object, MemoryChunk::kPointersFromHereAreInterestingMask, eq, ool->entry()); __ Bind(ool->exit()); @@ -1629,6 +1625,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArm64StrCompressTagged: __ StoreTaggedField(i.InputOrZeroRegister64(0), i.MemoryOperand(1)); break; + case kArm64DmbIsh: + __ Dmb(InnerShareable, BarrierAll); + break; case kArm64DsbIsb: __ Dsb(FullSystem, BarrierAll); __ Isb(); @@ -2200,6 +2199,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( VRegister temp = scope.AcquireV(format); \ __ Instr(temp, i.InputSimd128Register(0).V##FORMAT()); \ __ Umov(i.OutputRegister32(), temp, 0); \ + __ Cmp(i.OutputRegister32(), 0); \ + __ Cset(i.OutputRegister32(), ne); \ break; \ } SIMD_REDUCE_OP_CASE(kArm64S1x4AnyTrue, Umaxv, kFormatS, 4S); @@ -2399,12 +2400,14 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) { __ Adr(temp, &table); __ Add(temp, temp, Operand(input, UXTW, 2)); __ Br(temp); - __ StartBlockPools(); - __ Bind(&table); - for (size_t index = 0; index < case_count; ++index) { - __ B(GetLabel(i.InputRpo(index + 2))); + { + TurboAssembler::BlockPoolsScope block_pools(tasm(), + case_count * kInstrSize); + __ Bind(&table); + for (size_t index = 0; index < case_count; ++index) { + __ B(GetLabel(i.InputRpo(index + 2))); + } } - __ EndBlockPools(); } void CodeGenerator::FinishFrame(Frame* frame) { @@ -2437,8 +2440,8 @@ void CodeGenerator::AssembleConstructFrame() { // The frame has been previously padded in CodeGenerator::FinishFrame(). DCHECK_EQ(frame()->GetTotalFrameSlotCount() % 2, 0); - int required_slots = frame()->GetTotalFrameSlotCount() - - call_descriptor->CalculateFixedFrameSize(); + int required_slots = + frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount(); CPURegList saves = CPURegList(CPURegister::kRegister, kXRegSizeInBits, call_descriptor->CalleeSavedRegisters()); @@ -2577,7 +2580,17 @@ void CodeGenerator::AssembleConstructFrame() { MemOperand(fp, WasmCompiledFrameConstants::kWasmInstanceOffset)); } break; case CallDescriptor::kCallAddress: + if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) { + required_slots += 2; // marker + saved c_entry_fp. + } __ Claim(required_slots); + if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) { + UseScratchRegisterScope temps(tasm()); + Register scratch = temps.AcquireX(); + __ Mov(scratch, StackFrame::TypeToMarker(StackFrame::C_WASM_ENTRY)); + __ Str(scratch, + MemOperand(fp, TypedFrameConstants::kFrameTypeOffset)); + } break; default: UNREACHABLE(); @@ -2654,7 +2667,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) { __ Ret(); } -void CodeGenerator::FinishCode() { __ CheckConstPool(true, false); } +void CodeGenerator::FinishCode() { __ ForceConstantPoolEmissionWithoutJump(); } void CodeGenerator::AssembleMove(InstructionOperand* source, InstructionOperand* destination) { @@ -2669,6 +2682,18 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, } else { __ Mov(dst, src_object); } + } else if (src.type() == Constant::kCompressedHeapObject) { + Handle src_object = src.ToHeapObject(); + RootIndex index; + if (IsMaterializableFromRoot(src_object, &index)) { + __ LoadRoot(dst, index); + } else { + // TODO(v8:8977): Even though this mov happens on 32 bits (Note the + // .W()) and we are passing along the RelocInfo, we still haven't made + // the address embedded in the code-stream actually be compressed. + __ Mov(dst.W(), + Immediate(src_object, RelocInfo::COMPRESSED_EMBEDDED_OBJECT)); + } } else { __ Mov(dst, g.ToImmediate(source)); } diff --git a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h index 4b7b01711172fc..1c4c0e333542c5 100644 --- a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h +++ b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h @@ -171,6 +171,7 @@ namespace compiler { V(Arm64CompressSigned) \ V(Arm64CompressPointer) \ V(Arm64CompressAny) \ + V(Arm64DmbIsh) \ V(Arm64DsbIsb) \ V(Arm64F32x4Splat) \ V(Arm64F32x4ExtractLane) \ diff --git a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc index 502b9d7d82601d..8344887ec2feda 100644 --- a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc +++ b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc @@ -319,6 +319,7 @@ int InstructionScheduler::GetTargetInstructionFlags( case kArm64StrW: case kArm64Str: case kArm64StrCompressTagged: + case kArm64DmbIsh: case kArm64DsbIsb: return kHasSideEffect; diff --git a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc index 69d82b49933ab0..a953e35a669ffb 100644 --- a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc +++ b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc @@ -535,9 +535,9 @@ void InstructionSelector::VisitStackSlot(Node* node) { sequence()->AddImmediate(Constant(slot)), 0, nullptr); } -void InstructionSelector::VisitDebugAbort(Node* node) { +void InstructionSelector::VisitAbortCSAAssert(Node* node) { Arm64OperandGenerator g(this); - Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), x1)); + Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), x1)); } void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode, @@ -676,10 +676,11 @@ void InstructionSelector::VisitStore(Node* node) { InstructionOperand inputs[3]; size_t input_count = 0; inputs[input_count++] = g.UseUniqueRegister(base); - // OutOfLineRecordWrite uses the index in an arithmetic instruction, so we - // must check kArithmeticImm as well as kLoadStoreImm64. - if (g.CanBeImmediate(index, kArithmeticImm) && - g.CanBeImmediate(index, kLoadStoreImm64)) { + // OutOfLineRecordWrite uses the index in an add or sub instruction, but we + // can trust the assembler to generate extra instructions if the index does + // not fit into add or sub. So here only check the immediate for a store. + if (g.CanBeImmediate(index, COMPRESS_POINTERS_BOOL ? kLoadStoreImm32 + : kLoadStoreImm64)) { inputs[input_count++] = g.UseImmediate(index); addressing_mode = kMode_MRI; } else { @@ -1599,7 +1600,7 @@ void InstructionSelector::VisitChangeUint32ToUint64(Node* node) { // 32-bit operations will write their result in a W register (implicitly // clearing the top 32-bit of the corresponding X register) so the // zero-extension is a no-op. - Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value)); + EmitIdentity(node); return; } case IrOpcode::kLoad: { @@ -1610,7 +1611,7 @@ void InstructionSelector::VisitChangeUint32ToUint64(Node* node) { case MachineRepresentation::kWord8: case MachineRepresentation::kWord16: case MachineRepresentation::kWord32: - Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value)); + EmitIdentity(node); return; default: break; @@ -1646,29 +1647,75 @@ void InstructionSelector::VisitChangeTaggedSignedToCompressedSigned( void InstructionSelector::VisitChangeCompressedToTagged(Node* node) { Arm64OperandGenerator g(this); Node* const value = node->InputAt(0); - Emit(kArm64DecompressAny, g.DefineAsRegister(node), g.UseRegister(value)); + if ((value->opcode() == IrOpcode::kLoad || + value->opcode() == IrOpcode::kPoisonedLoad) && + CanCover(node, value)) { + DCHECK_EQ(LoadRepresentationOf(value->op()).representation(), + MachineRepresentation::kCompressed); + InstructionCode opcode = kArm64LdrDecompressAnyTagged; + if (value->opcode() == IrOpcode::kPoisonedLoad) { + CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison); + opcode |= MiscField::encode(kMemoryAccessPoisoned); + } + ImmediateMode immediate_mode = kLoadStoreImm32; + MachineRepresentation rep = MachineRepresentation::kCompressed; + EmitLoad(this, value, opcode, immediate_mode, rep, node); + } else { + Emit(kArm64DecompressAny, g.DefineAsRegister(node), g.UseRegister(value)); + } } void InstructionSelector::VisitChangeCompressedPointerToTaggedPointer( Node* node) { Arm64OperandGenerator g(this); Node* const value = node->InputAt(0); - Emit(kArm64DecompressPointer, g.DefineAsRegister(node), g.UseRegister(value)); + if ((value->opcode() == IrOpcode::kLoad || + value->opcode() == IrOpcode::kPoisonedLoad) && + CanCover(node, value)) { + DCHECK_EQ(LoadRepresentationOf(value->op()).representation(), + MachineRepresentation::kCompressedPointer); + InstructionCode opcode = kArm64LdrDecompressTaggedPointer; + if (value->opcode() == IrOpcode::kPoisonedLoad) { + CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison); + opcode |= MiscField::encode(kMemoryAccessPoisoned); + } + ImmediateMode immediate_mode = kLoadStoreImm32; + MachineRepresentation rep = MachineRepresentation::kCompressedPointer; + EmitLoad(this, value, opcode, immediate_mode, rep, node); + } else { + Emit(kArm64DecompressPointer, g.DefineAsRegister(node), + g.UseRegister(value)); + } } void InstructionSelector::VisitChangeCompressedSignedToTaggedSigned( Node* node) { Arm64OperandGenerator g(this); Node* const value = node->InputAt(0); - Emit(kArm64DecompressSigned, g.DefineAsRegister(node), g.UseRegister(value)); + if ((value->opcode() == IrOpcode::kLoad || + value->opcode() == IrOpcode::kPoisonedLoad) && + CanCover(node, value)) { + DCHECK_EQ(LoadRepresentationOf(value->op()).representation(), + MachineRepresentation::kCompressedSigned); + InstructionCode opcode = kArm64LdrDecompressTaggedSigned; + if (value->opcode() == IrOpcode::kPoisonedLoad) { + CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison); + opcode |= MiscField::encode(kMemoryAccessPoisoned); + } + ImmediateMode immediate_mode = kLoadStoreImm32; + MachineRepresentation rep = MachineRepresentation::kCompressedSigned; + EmitLoad(this, value, opcode, immediate_mode, rep, node); + } else { + Emit(kArm64DecompressSigned, g.DefineAsRegister(node), + g.UseRegister(value)); + } } void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) { Arm64OperandGenerator g(this); - Node* value = node->InputAt(0); // The top 32 bits in the 64-bit register will be undefined, and // must not be used by a dependent node. - Emit(kArchNop, g.DefineSameAsFirst(node), g.UseRegister(value)); + EmitIdentity(node); } void InstructionSelector::VisitFloat64Mod(Node* node) { @@ -2451,7 +2498,7 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) { size_t table_time_cost = 3; size_t lookup_space_cost = 3 + 2 * sw.case_count(); size_t lookup_time_cost = sw.case_count(); - if (sw.case_count() > 0 && + if (sw.case_count() > 4 && table_space_cost + 3 * table_time_cost <= lookup_space_cost + 3 * lookup_time_cost && sw.min_value() > std::numeric_limits::min() && @@ -2755,6 +2802,11 @@ void InstructionSelector::VisitFloat64Mul(Node* node) { return VisitRRR(this, kArm64Float64Mul, node); } +void InstructionSelector::VisitMemoryBarrier(Node* node) { + Arm64OperandGenerator g(this); + Emit(kArm64DmbIsh, g.NoOutput()); +} + void InstructionSelector::VisitWord32AtomicLoad(Node* node) { LoadRepresentation load_rep = LoadRepresentationOf(node->op()); ArchOpcode opcode = kArchNop; diff --git a/deps/v8/src/compiler/backend/code-generator.cc b/deps/v8/src/compiler/backend/code-generator.cc index bb83a8497bbe3e..9ce92dadaa9469 100644 --- a/deps/v8/src/compiler/backend/code-generator.cc +++ b/deps/v8/src/compiler/backend/code-generator.cc @@ -1210,6 +1210,10 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation, DCHECK_EQ(MachineRepresentation::kTagged, type.representation()); literal = DeoptimizationLiteral(constant.ToHeapObject()); break; + case Constant::kCompressedHeapObject: + DCHECK_EQ(MachineRepresentation::kCompressed, type.representation()); + literal = DeoptimizationLiteral(constant.ToHeapObject()); + break; case Constant::kDelayedStringConstant: DCHECK_EQ(MachineRepresentation::kTagged, type.representation()); literal = DeoptimizationLiteral(constant.ToDelayedStringConstant()); diff --git a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc index 0e61c22cbbcd51..ed4be7a47cb296 100644 --- a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc +++ b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc @@ -81,6 +81,8 @@ class IA32OperandConverter : public InstructionOperandConverter { return Immediate(constant.ToExternalReference()); case Constant::kHeapObject: return Immediate(constant.ToHeapObject()); + case Constant::kCompressedHeapObject: + break; case Constant::kDelayedStringConstant: return Immediate::EmbeddedStringConstant( constant.ToDelayedStringConstant()); @@ -462,6 +464,19 @@ class OutOfLineRecordWrite final : public OutOfLineCode { __ opcode(i.OutputSimd128Register(), i.InputOperand(1), imm); \ } +#define ASSEMBLE_SIMD_ALL_TRUE(opcode) \ + do { \ + Register dst = i.OutputRegister(); \ + Operand src = i.InputOperand(0); \ + Register tmp = i.TempRegister(0); \ + __ mov(tmp, Immediate(1)); \ + __ xor_(dst, dst); \ + __ Pxor(kScratchDoubleReg, kScratchDoubleReg); \ + __ opcode(kScratchDoubleReg, src); \ + __ Ptest(kScratchDoubleReg, kScratchDoubleReg); \ + __ cmov(zero, dst, tmp); \ + } while (false) + void CodeGenerator::AssembleDeconstructFrame() { __ mov(esp, ebp); __ pop(ebp); @@ -674,8 +689,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kArchCallBuiltinPointer: { DCHECK(!HasImmediateInput(instr, 0)); - Register builtin_pointer = i.InputRegister(0); - __ CallBuiltinPointer(builtin_pointer); + Register builtin_index = i.InputRegister(0); + __ CallBuiltinByIndex(builtin_index); RecordCallPosition(instr); frame_access_state()->ClearSPDelta(); break; @@ -870,17 +885,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArchComment: __ RecordComment(reinterpret_cast(i.InputInt32(0))); break; - case kArchDebugAbort: + case kArchAbortCSAAssert: DCHECK(i.InputRegister(0) == edx); - if (!frame_access_state()->has_frame()) { + { // We don't actually want to generate a pile of code for this, so just // claim there is a stack frame, without generating one. FrameScope scope(tasm(), StackFrame::NONE); - __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS), - RelocInfo::CODE_TARGET); - } else { - __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS), - RelocInfo::CODE_TARGET); + __ Call( + isolate()->builtins()->builtin_handle(Builtins::kAbortCSAAssert), + RelocInfo::CODE_TARGET); } __ int3(); break; @@ -1204,7 +1217,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArchWordPoisonOnSpeculation: // TODO(860429): Remove remaining poisoning infrastructure on ia32. UNREACHABLE(); - case kLFence: + case kIA32MFence: + __ mfence(); + break; + case kIA32LFence: __ lfence(); break; case kSSEFloat32Cmp: @@ -3663,18 +3679,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ cmov(zero, dst, tmp); break; } + // Need to split up all the different lane structures because the + // comparison instruction used matters, e.g. given 0xff00, pcmpeqb returns + // 0x0011, pcmpeqw returns 0x0000, ptest will set ZF to 0 and 1 + // respectively. case kIA32S1x4AllTrue: + ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqd); + break; case kIA32S1x8AllTrue: + ASSEMBLE_SIMD_ALL_TRUE(pcmpeqw); + break; case kIA32S1x16AllTrue: { - Register dst = i.OutputRegister(); - Operand src = i.InputOperand(0); - Register tmp = i.TempRegister(0); - __ mov(tmp, Immediate(1)); - __ xor_(dst, dst); - __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); - __ Pxor(kScratchDoubleReg, src); - __ Ptest(kScratchDoubleReg, kScratchDoubleReg); - __ cmov(zero, dst, tmp); + ASSEMBLE_SIMD_ALL_TRUE(pcmpeqb); break; } case kIA32StackCheck: { @@ -4224,6 +4240,11 @@ void CodeGenerator::AssembleConstructFrame() { if (call_descriptor->IsCFunctionCall()) { __ push(ebp); __ mov(ebp, esp); + if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) { + __ Push(Immediate(StackFrame::TypeToMarker(StackFrame::C_WASM_ENTRY))); + // Reserve stack space for saving the c_entry_fp later. + __ AllocateStackSpace(kSystemPointerSize); + } } else if (call_descriptor->IsJSFunctionCall()) { __ Prologue(); if (call_descriptor->PushArgumentCount()) { @@ -4254,8 +4275,8 @@ void CodeGenerator::AssembleConstructFrame() { } } - int required_slots = frame()->GetTotalFrameSlotCount() - - call_descriptor->CalculateFixedFrameSize(); + int required_slots = + frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount(); if (info()->is_osr()) { // TurboFan OSR-compiled functions cannot be entered directly. @@ -4629,6 +4650,7 @@ void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) { #undef ASSEMBLE_MOVX #undef ASSEMBLE_SIMD_PUNPCK_SHUFFLE #undef ASSEMBLE_SIMD_IMM_SHUFFLE +#undef ASSEMBLE_SIMD_ALL_TRUE } // namespace compiler } // namespace internal diff --git a/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h b/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h index 60ed1cc29cdd33..56dea82fe2c29a 100644 --- a/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h +++ b/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h @@ -44,7 +44,8 @@ namespace compiler { V(IA32Tzcnt) \ V(IA32Popcnt) \ V(IA32Bswap) \ - V(LFence) \ + V(IA32MFence) \ + V(IA32LFence) \ V(SSEFloat32Cmp) \ V(SSEFloat32Add) \ V(SSEFloat32Sub) \ diff --git a/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc index f2d5cc0d179d8f..15f69b991c7288 100644 --- a/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc +++ b/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc @@ -365,7 +365,8 @@ int InstructionScheduler::GetTargetInstructionFlags( case kIA32PushFloat64: case kIA32PushSimd128: case kIA32Poke: - case kLFence: + case kIA32MFence: + case kIA32LFence: return kHasSideEffect; case kIA32Word32AtomicPairLoad: diff --git a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc index f81b88823e594c..e1fc66b4ba4843 100644 --- a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc +++ b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc @@ -272,9 +272,9 @@ void InstructionSelector::VisitStackSlot(Node* node) { sequence()->AddImmediate(Constant(slot)), 0, nullptr); } -void InstructionSelector::VisitDebugAbort(Node* node) { +void InstructionSelector::VisitAbortCSAAssert(Node* node) { IA32OperandGenerator g(this); - Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), edx)); + Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), edx)); } void InstructionSelector::VisitLoad(Node* node) { @@ -1593,6 +1593,11 @@ void InstructionSelector::VisitFloat64SilenceNaN(Node* node) { g.UseRegister(node->InputAt(0))); } +void InstructionSelector::VisitMemoryBarrier(Node* node) { + IA32OperandGenerator g(this); + Emit(kIA32MFence, g.NoOutput()); +} + void InstructionSelector::VisitWord32AtomicLoad(Node* node) { LoadRepresentation load_rep = LoadRepresentationOf(node->op()); DCHECK(load_rep.representation() == MachineRepresentation::kWord8 || diff --git a/deps/v8/src/compiler/backend/instruction-codes.h b/deps/v8/src/compiler/backend/instruction-codes.h index 068164b57e4eba..1085de2196f8cf 100644 --- a/deps/v8/src/compiler/backend/instruction-codes.h +++ b/deps/v8/src/compiler/backend/instruction-codes.h @@ -82,7 +82,7 @@ inline RecordWriteMode WriteBarrierKindToRecordWriteMode( V(ArchLookupSwitch) \ V(ArchTableSwitch) \ V(ArchNop) \ - V(ArchDebugAbort) \ + V(ArchAbortCSAAssert) \ V(ArchDebugBreak) \ V(ArchComment) \ V(ArchThrowTerminator) \ diff --git a/deps/v8/src/compiler/backend/instruction-scheduler.cc b/deps/v8/src/compiler/backend/instruction-scheduler.cc index b0637c175df927..538af71bb469f3 100644 --- a/deps/v8/src/compiler/backend/instruction-scheduler.cc +++ b/deps/v8/src/compiler/backend/instruction-scheduler.cc @@ -298,7 +298,7 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const { case kArchTailCallCodeObject: case kArchTailCallAddress: case kArchTailCallWasm: - case kArchDebugAbort: + case kArchAbortCSAAssert: case kArchDebugBreak: return kHasSideEffect; diff --git a/deps/v8/src/compiler/backend/instruction-selector-impl.h b/deps/v8/src/compiler/backend/instruction-selector-impl.h index 21edc2f503853d..a3f62e7ba40c45 100644 --- a/deps/v8/src/compiler/backend/instruction-selector-impl.h +++ b/deps/v8/src/compiler/backend/instruction-selector-impl.h @@ -29,8 +29,8 @@ inline bool operator<(const CaseInfo& l, const CaseInfo& r) { // Helper struct containing data about a table or lookup switch. class SwitchInfo { public: - SwitchInfo(ZoneVector& cases, int32_t min_value, int32_t max_value, - BasicBlock* default_branch) + SwitchInfo(ZoneVector& cases, // NOLINT(runtime/references) + int32_t min_value, int32_t max_value, BasicBlock* default_branch) : cases_(cases), min_value_(min_value), max_value_(max_value), @@ -109,13 +109,9 @@ class OperandGenerator { } InstructionOperand DefineAsConstant(Node* node) { - return DefineAsConstant(node, ToConstant(node)); - } - - InstructionOperand DefineAsConstant(Node* node, Constant constant) { selector()->MarkAsDefined(node); int virtual_register = GetVReg(node); - sequence()->AddConstant(virtual_register, constant); + sequence()->AddConstant(virtual_register, ToConstant(node)); return ConstantOperand(virtual_register); } @@ -326,6 +322,8 @@ class OperandGenerator { } case IrOpcode::kHeapConstant: return Constant(HeapConstantOf(node->op())); + case IrOpcode::kCompressedHeapConstant: + return Constant(HeapConstantOf(node->op()), true); case IrOpcode::kDelayedStringConstant: return Constant(StringConstantBaseOf(node->op())); case IrOpcode::kDeadValue: { diff --git a/deps/v8/src/compiler/backend/instruction-selector.cc b/deps/v8/src/compiler/backend/instruction-selector.cc index 2b748a188b9d34..11ba9104059453 100644 --- a/deps/v8/src/compiler/backend/instruction-selector.cc +++ b/deps/v8/src/compiler/backend/instruction-selector.cc @@ -8,6 +8,7 @@ #include "src/base/adapters.h" #include "src/codegen/assembler-inl.h" +#include "src/codegen/tick-counter.h" #include "src/compiler/backend/instruction-selector-impl.h" #include "src/compiler/compiler-source-position-table.h" #include "src/compiler/node-matchers.h" @@ -24,7 +25,7 @@ InstructionSelector::InstructionSelector( Zone* zone, size_t node_count, Linkage* linkage, InstructionSequence* sequence, Schedule* schedule, SourcePositionTable* source_positions, Frame* frame, - EnableSwitchJumpTable enable_switch_jump_table, + EnableSwitchJumpTable enable_switch_jump_table, TickCounter* tick_counter, SourcePositionMode source_position_mode, Features features, EnableScheduling enable_scheduling, EnableRootsRelativeAddressing enable_roots_relative_addressing, @@ -54,7 +55,8 @@ InstructionSelector::InstructionSelector( frame_(frame), instruction_selection_failed_(false), instr_origins_(sequence->zone()), - trace_turbo_(trace_turbo) { + trace_turbo_(trace_turbo), + tick_counter_(tick_counter) { instructions_.reserve(node_count); continuation_inputs_.reserve(5); continuation_outputs_.reserve(2); @@ -1078,7 +1080,8 @@ void InstructionSelector::VisitBlock(BasicBlock* block) { node->opcode() == IrOpcode::kCall || node->opcode() == IrOpcode::kCallWithCallerSavedRegisters || node->opcode() == IrOpcode::kProtectedLoad || - node->opcode() == IrOpcode::kProtectedStore) { + node->opcode() == IrOpcode::kProtectedStore || + node->opcode() == IrOpcode::kMemoryBarrier) { ++effect_level; } } @@ -1251,6 +1254,7 @@ void InstructionSelector::MarkPairProjectionsAsWord32(Node* node) { } void InstructionSelector::VisitNode(Node* node) { + tick_counter_->DoTick(); DCHECK_NOT_NULL(schedule()->block(node)); // should only use scheduled nodes. switch (node->opcode()) { case IrOpcode::kStart: @@ -1301,6 +1305,8 @@ void InstructionSelector::VisitNode(Node* node) { return MarkAsFloat64(node), VisitConstant(node); case IrOpcode::kHeapConstant: return MarkAsReference(node), VisitConstant(node); + case IrOpcode::kCompressedHeapConstant: + return MarkAsCompressed(node), VisitConstant(node); case IrOpcode::kNumberConstant: { double value = OpParameter(node->op()); if (!IsSmiDouble(value)) MarkAsReference(node); @@ -1324,8 +1330,8 @@ void InstructionSelector::VisitNode(Node* node) { case IrOpcode::kStateValues: case IrOpcode::kObjectState: return; - case IrOpcode::kDebugAbort: - VisitDebugAbort(node); + case IrOpcode::kAbortCSAAssert: + VisitAbortCSAAssert(node); return; case IrOpcode::kDebugBreak: VisitDebugBreak(node); @@ -1474,6 +1480,7 @@ void InstructionSelector::VisitNode(Node* node) { case IrOpcode::kUint64Mod: return MarkAsWord64(node), VisitUint64Mod(node); case IrOpcode::kBitcastTaggedToWord: + case IrOpcode::kBitcastTaggedSignedToWord: return MarkAsRepresentation(MachineType::PointerRepresentation(), node), VisitBitcastTaggedToWord(node); case IrOpcode::kBitcastWordToTagged: @@ -1734,6 +1741,8 @@ void InstructionSelector::VisitNode(Node* node) { MarkAsWord32(node); MarkPairProjectionsAsWord32(node); return VisitWord32PairSar(node); + case IrOpcode::kMemoryBarrier: + return VisitMemoryBarrier(node); case IrOpcode::kWord32AtomicLoad: { LoadRepresentation type = LoadRepresentationOf(node->op()); MarkAsRepresentation(type.representation(), node); @@ -1808,6 +1817,24 @@ void InstructionSelector::VisitNode(Node* node) { case IrOpcode::kUnsafePointerAdd: MarkAsRepresentation(MachineType::PointerRepresentation(), node); return VisitUnsafePointerAdd(node); + case IrOpcode::kF64x2Splat: + return MarkAsSimd128(node), VisitF64x2Splat(node); + case IrOpcode::kF64x2ExtractLane: + return MarkAsFloat64(node), VisitF64x2ExtractLane(node); + case IrOpcode::kF64x2ReplaceLane: + return MarkAsSimd128(node), VisitF64x2ReplaceLane(node); + case IrOpcode::kF64x2Abs: + return MarkAsSimd128(node), VisitF64x2Abs(node); + case IrOpcode::kF64x2Neg: + return MarkAsSimd128(node), VisitF64x2Neg(node); + case IrOpcode::kF64x2Eq: + return MarkAsSimd128(node), VisitF64x2Eq(node); + case IrOpcode::kF64x2Ne: + return MarkAsSimd128(node), VisitF64x2Ne(node); + case IrOpcode::kF64x2Lt: + return MarkAsSimd128(node), VisitF64x2Lt(node); + case IrOpcode::kF64x2Le: + return MarkAsSimd128(node), VisitF64x2Le(node); case IrOpcode::kF32x4Splat: return MarkAsSimd128(node), VisitF32x4Splat(node); case IrOpcode::kF32x4ExtractLane: @@ -1846,6 +1873,38 @@ void InstructionSelector::VisitNode(Node* node) { return MarkAsSimd128(node), VisitF32x4Lt(node); case IrOpcode::kF32x4Le: return MarkAsSimd128(node), VisitF32x4Le(node); + case IrOpcode::kI64x2Splat: + return MarkAsSimd128(node), VisitI64x2Splat(node); + case IrOpcode::kI64x2ExtractLane: + return MarkAsWord64(node), VisitI64x2ExtractLane(node); + case IrOpcode::kI64x2ReplaceLane: + return MarkAsSimd128(node), VisitI64x2ReplaceLane(node); + case IrOpcode::kI64x2Neg: + return MarkAsSimd128(node), VisitI64x2Neg(node); + case IrOpcode::kI64x2Shl: + return MarkAsSimd128(node), VisitI64x2Shl(node); + case IrOpcode::kI64x2ShrS: + return MarkAsSimd128(node), VisitI64x2ShrS(node); + case IrOpcode::kI64x2Add: + return MarkAsSimd128(node), VisitI64x2Add(node); + case IrOpcode::kI64x2Sub: + return MarkAsSimd128(node), VisitI64x2Sub(node); + case IrOpcode::kI64x2Mul: + return MarkAsSimd128(node), VisitI64x2Mul(node); + case IrOpcode::kI64x2Eq: + return MarkAsSimd128(node), VisitI64x2Eq(node); + case IrOpcode::kI64x2Ne: + return MarkAsSimd128(node), VisitI64x2Ne(node); + case IrOpcode::kI64x2GtS: + return MarkAsSimd128(node), VisitI64x2GtS(node); + case IrOpcode::kI64x2GeS: + return MarkAsSimd128(node), VisitI64x2GeS(node); + case IrOpcode::kI64x2ShrU: + return MarkAsSimd128(node), VisitI64x2ShrU(node); + case IrOpcode::kI64x2GtU: + return MarkAsSimd128(node), VisitI64x2GtU(node); + case IrOpcode::kI64x2GeU: + return MarkAsSimd128(node), VisitI64x2GeU(node); case IrOpcode::kI32x4Splat: return MarkAsSimd128(node), VisitI32x4Splat(node); case IrOpcode::kI32x4ExtractLane: @@ -2028,6 +2087,10 @@ void InstructionSelector::VisitNode(Node* node) { return MarkAsSimd128(node), VisitS128Select(node); case IrOpcode::kS8x16Shuffle: return MarkAsSimd128(node), VisitS8x16Shuffle(node); + case IrOpcode::kS1x2AnyTrue: + return MarkAsWord32(node), VisitS1x2AnyTrue(node); + case IrOpcode::kS1x2AllTrue: + return MarkAsWord32(node), VisitS1x2AllTrue(node); case IrOpcode::kS1x4AnyTrue: return MarkAsWord32(node), VisitS1x4AnyTrue(node); case IrOpcode::kS1x4AllTrue: @@ -2489,6 +2552,36 @@ void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) { #endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_PPC // !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_S390 +#if !V8_TARGET_ARCH_X64 +void InstructionSelector::VisitF64x2Splat(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitF64x2ExtractLane(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitF64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitF64x2Abs(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitF64x2Neg(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitF64x2Eq(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitF64x2Ne(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitF64x2Lt(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitF64x2Le(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitI64x2Splat(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitI64x2ExtractLane(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitI64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitI64x2Neg(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitS1x2AnyTrue(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitS1x2AllTrue(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitI64x2Shl(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitI64x2ShrS(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitI64x2Add(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitI64x2Sub(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitI64x2Mul(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitI64x2Eq(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitI64x2Ne(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitI64x2GtS(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitI64x2GeS(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitI64x2ShrU(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitI64x2GtU(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitI64x2GeU(Node* node) { UNIMPLEMENTED(); } +#endif // !V8_TARGET_ARCH_X64 + void InstructionSelector::VisitFinishRegion(Node* node) { EmitIdentity(node); } void InstructionSelector::VisitParameter(Node* node) { @@ -2962,7 +3055,7 @@ void InstructionSelector::CanonicalizeShuffle(bool inputs_equal, void InstructionSelector::CanonicalizeShuffle(Node* node, uint8_t* shuffle, bool* is_swizzle) { // Get raw shuffle indices. - memcpy(shuffle, OpParameter(node->op()), kSimd128Size); + memcpy(shuffle, S8x16ShuffleOf(node->op()), kSimd128Size); bool needs_swap; bool inputs_equal = GetVirtualRegister(node->InputAt(0)) == GetVirtualRegister(node->InputAt(1)); diff --git a/deps/v8/src/compiler/backend/instruction-selector.h b/deps/v8/src/compiler/backend/instruction-selector.h index 4f6b1c5971d8fe..16f88bb5167462 100644 --- a/deps/v8/src/compiler/backend/instruction-selector.h +++ b/deps/v8/src/compiler/backend/instruction-selector.h @@ -19,6 +19,9 @@ namespace v8 { namespace internal { + +class TickCounter; + namespace compiler { // Forward declarations. @@ -266,7 +269,7 @@ class V8_EXPORT_PRIVATE InstructionSelector final { Zone* zone, size_t node_count, Linkage* linkage, InstructionSequence* sequence, Schedule* schedule, SourcePositionTable* source_positions, Frame* frame, - EnableSwitchJumpTable enable_switch_jump_table, + EnableSwitchJumpTable enable_switch_jump_table, TickCounter* tick_counter, SourcePositionMode source_position_mode = kCallSourcePositions, Features features = SupportedFeatures(), EnableScheduling enable_scheduling = FLAG_turbo_instruction_scheduling @@ -496,11 +499,15 @@ class V8_EXPORT_PRIVATE InstructionSelector final { VectorSlotPair const& feedback, Node* frame_state); - void EmitTableSwitch(const SwitchInfo& sw, InstructionOperand& index_operand); - void EmitLookupSwitch(const SwitchInfo& sw, - InstructionOperand& value_operand); - void EmitBinarySearchSwitch(const SwitchInfo& sw, - InstructionOperand& value_operand); + void EmitTableSwitch( + const SwitchInfo& sw, + InstructionOperand& index_operand); // NOLINT(runtime/references) + void EmitLookupSwitch( + const SwitchInfo& sw, + InstructionOperand& value_operand); // NOLINT(runtime/references) + void EmitBinarySearchSwitch( + const SwitchInfo& sw, + InstructionOperand& value_operand); // NOLINT(runtime/references) void TryRename(InstructionOperand* op); int GetRename(int virtual_register); @@ -604,6 +611,8 @@ class V8_EXPORT_PRIVATE InstructionSelector final { MACHINE_SIMD_OP_LIST(DECLARE_GENERATOR) #undef DECLARE_GENERATOR + // Visit the load node with a value and opcode to replace with. + void VisitLoad(Node* node, Node* value, InstructionCode opcode); void VisitFinishRegion(Node* node); void VisitParameter(Node* node); void VisitIfException(Node* node); @@ -772,6 +781,7 @@ class V8_EXPORT_PRIVATE InstructionSelector final { bool instruction_selection_failed_; ZoneVector> instr_origins_; EnableTraceTurboJson trace_turbo_; + TickCounter* const tick_counter_; }; } // namespace compiler diff --git a/deps/v8/src/compiler/backend/instruction.cc b/deps/v8/src/compiler/backend/instruction.cc index c52dca61a1a237..09c7fe22c5f03e 100644 --- a/deps/v8/src/compiler/backend/instruction.cc +++ b/deps/v8/src/compiler/backend/instruction.cc @@ -530,7 +530,7 @@ Constant::Constant(RelocatablePtrConstantInfo info) { } Handle Constant::ToHeapObject() const { - DCHECK_EQ(kHeapObject, type()); + DCHECK(kHeapObject == type() || kCompressedHeapObject == type()); Handle value( reinterpret_cast(static_cast(value_))); return value; @@ -561,7 +561,8 @@ std::ostream& operator<<(std::ostream& os, const Constant& constant) { return os << constant.ToFloat64().value(); case Constant::kExternalReference: return os << constant.ToExternalReference().address(); - case Constant::kHeapObject: + case Constant::kHeapObject: // Fall through. + case Constant::kCompressedHeapObject: return os << Brief(*constant.ToHeapObject()); case Constant::kRpoNumber: return os << "RPO" << constant.ToRpoNumber().ToInt(); diff --git a/deps/v8/src/compiler/backend/instruction.h b/deps/v8/src/compiler/backend/instruction.h index 61875a1a17a1dc..9b322040551df4 100644 --- a/deps/v8/src/compiler/backend/instruction.h +++ b/deps/v8/src/compiler/backend/instruction.h @@ -1007,6 +1007,7 @@ class V8_EXPORT_PRIVATE Constant final { kFloat32, kFloat64, kExternalReference, + kCompressedHeapObject, kHeapObject, kRpoNumber, kDelayedStringConstant @@ -1018,8 +1019,9 @@ class V8_EXPORT_PRIVATE Constant final { explicit Constant(double v) : type_(kFloat64), value_(bit_cast(v)) {} explicit Constant(ExternalReference ref) : type_(kExternalReference), value_(bit_cast(ref.address())) {} - explicit Constant(Handle obj) - : type_(kHeapObject), value_(bit_cast(obj)) {} + explicit Constant(Handle obj, bool is_compressed = false) + : type_(is_compressed ? kCompressedHeapObject : kHeapObject), + value_(bit_cast(obj)) {} explicit Constant(RpoNumber rpo) : type_(kRpoNumber), value_(rpo.ToInt()) {} explicit Constant(const StringConstantBase* str) : type_(kDelayedStringConstant), value_(bit_cast(str)) {} diff --git a/deps/v8/src/compiler/backend/jump-threading.h b/deps/v8/src/compiler/backend/jump-threading.h index e23dd453598e5e..ce60ebcb2e3423 100644 --- a/deps/v8/src/compiler/backend/jump-threading.h +++ b/deps/v8/src/compiler/backend/jump-threading.h @@ -17,14 +17,17 @@ class V8_EXPORT_PRIVATE JumpThreading { public: // Compute the forwarding map of basic blocks to their ultimate destination. // Returns {true} if there is at least one block that is forwarded. - static bool ComputeForwarding(Zone* local_zone, ZoneVector& result, - InstructionSequence* code, bool frame_at_start); + static bool ComputeForwarding( + Zone* local_zone, + ZoneVector& result, // NOLINT(runtime/references) + InstructionSequence* code, bool frame_at_start); // Rewrite the instructions to forward jumps and branches. // May also negate some branches. - static void ApplyForwarding(Zone* local_zone, - ZoneVector& forwarding, - InstructionSequence* code); + static void ApplyForwarding( + Zone* local_zone, + ZoneVector& forwarding, // NOLINT(runtime/references) + InstructionSequence* code); }; } // namespace compiler diff --git a/deps/v8/src/compiler/backend/live-range-separator.cc b/deps/v8/src/compiler/backend/live-range-separator.cc index 6ed04160450789..0a0aadfad143e0 100644 --- a/deps/v8/src/compiler/backend/live-range-separator.cc +++ b/deps/v8/src/compiler/backend/live-range-separator.cc @@ -9,15 +9,16 @@ namespace v8 { namespace internal { namespace compiler { -#define TRACE(...) \ - do { \ - if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \ +#define TRACE_COND(cond, ...) \ + do { \ + if (cond) PrintF(__VA_ARGS__); \ } while (false) namespace { void CreateSplinter(TopLevelLiveRange* range, RegisterAllocationData* data, - LifetimePosition first_cut, LifetimePosition last_cut) { + LifetimePosition first_cut, LifetimePosition last_cut, + bool trace_alloc) { DCHECK(!range->IsSplinter()); // We can ignore ranges that live solely in deferred blocks. // If a range ends right at the end of a deferred block, it is marked by @@ -49,9 +50,10 @@ void CreateSplinter(TopLevelLiveRange* range, RegisterAllocationData* data, range->SetSplinter(splinter); } Zone* zone = data->allocation_zone(); - TRACE("creating splinter %d for range %d between %d and %d\n", - range->splinter()->vreg(), range->vreg(), start.ToInstructionIndex(), - end.ToInstructionIndex()); + TRACE_COND(trace_alloc, + "creating splinter %d for range %d between %d and %d\n", + range->splinter()->vreg(), range->vreg(), + start.ToInstructionIndex(), end.ToInstructionIndex()); range->Splinter(start, end, zone); } } @@ -102,7 +104,8 @@ void SplinterLiveRange(TopLevelLiveRange* range, RegisterAllocationData* data) { current_block->last_instruction_index()); } else { if (first_cut.IsValid()) { - CreateSplinter(range, data, first_cut, last_cut); + CreateSplinter(range, data, first_cut, last_cut, + data->is_trace_alloc()); first_cut = LifetimePosition::Invalid(); last_cut = LifetimePosition::Invalid(); } @@ -116,7 +119,8 @@ void SplinterLiveRange(TopLevelLiveRange* range, RegisterAllocationData* data) { // have to connect blocks anyway, so we can also splinter to the end of the // block, too. if (first_cut.IsValid()) { - CreateSplinter(range, data, first_cut, interval_end); + CreateSplinter(range, data, first_cut, interval_end, + data->is_trace_alloc()); first_cut = LifetimePosition::Invalid(); last_cut = LifetimePosition::Invalid(); } @@ -186,7 +190,7 @@ void LiveRangeMerger::Merge() { } } -#undef TRACE +#undef TRACE_COND } // namespace compiler } // namespace internal diff --git a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc index 1f793868211010..5cec4a8a16beff 100644 --- a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc +++ b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc @@ -80,6 +80,7 @@ class MipsOperandConverter final : public InstructionOperandConverter { return Operand::EmbeddedNumber(constant.ToFloat64().value()); case Constant::kInt64: case Constant::kExternalReference: + case Constant::kCompressedHeapObject: case Constant::kHeapObject: // TODO(plind): Maybe we should handle ExtRef & HeapObj here? // maybe not done on arm due to const pool ?? @@ -264,8 +265,9 @@ Condition FlagsConditionToConditionTst(FlagsCondition condition) { UNREACHABLE(); } -FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate, - FlagsCondition condition) { +FPUCondition FlagsConditionToConditionCmpFPU( + bool& predicate, // NOLINT(runtime/references) + FlagsCondition condition) { switch (condition) { case kEqual: predicate = true; @@ -301,9 +303,9 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate, << "\""; \ UNIMPLEMENTED(); -void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, - InstructionCode opcode, Instruction* instr, - MipsOperandConverter& i) { +void EmitWordLoadPoisoningIfNeeded( + CodeGenerator* codegen, InstructionCode opcode, Instruction* instr, + MipsOperandConverter& i) { // NOLINT(runtime/references) const MemoryAccessMode access_mode = static_cast(MiscField::decode(opcode)); if (access_mode == kMemoryAccessPoisoned) { @@ -662,8 +664,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kArchCallBuiltinPointer: { DCHECK(!instr->InputAt(0)->IsImmediate()); - Register builtin_pointer = i.InputRegister(0); - __ CallBuiltinPointer(builtin_pointer); + Register builtin_index = i.InputRegister(0); + __ CallBuiltinByIndex(builtin_index); RecordCallPosition(instr); frame_access_state()->ClearSPDelta(); break; @@ -778,6 +780,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; case kArchCallCFunction: { int const num_parameters = MiscField::decode(instr->opcode()); + Label return_location; + if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) { + // Put the return address in a stack slot. + __ LoadAddress(kScratchReg, &return_location); + __ sw(kScratchReg, + MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset)); + } if (instr->InputAt(0)->IsImmediate()) { ExternalReference ref = i.InputExternalReference(0); __ CallCFunction(ref, num_parameters); @@ -785,6 +794,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( Register func = i.InputRegister(0); __ CallCFunction(func, num_parameters); } + __ bind(&return_location); + RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt); frame_access_state()->SetFrameAccessToDefault(); // Ideally, we should decrement SP delta to match the change of stack // pointer in CallCFunction. However, for certain architectures (e.g. @@ -816,22 +827,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArchTableSwitch: AssembleArchTableSwitch(instr); break; - case kArchDebugAbort: + case kArchAbortCSAAssert: DCHECK(i.InputRegister(0) == a0); - if (!frame_access_state()->has_frame()) { + { // We don't actually want to generate a pile of code for this, so just // claim there is a stack frame, without generating one. FrameScope scope(tasm(), StackFrame::NONE); - __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS), - RelocInfo::CODE_TARGET); - } else { - __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS), - RelocInfo::CODE_TARGET); + __ Call( + isolate()->builtins()->builtin_handle(Builtins::kAbortCSAAssert), + RelocInfo::CODE_TARGET); } - __ stop("kArchDebugAbort"); + __ stop(); break; case kArchDebugBreak: - __ stop("kArchDebugBreak"); + __ stop(); break; case kArchComment: __ RecordComment(reinterpret_cast(i.InputInt32(0))); @@ -1611,6 +1620,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ Usdc1(ft, i.MemoryOperand(), kScratchReg); break; } + case kMipsSync: { + __ sync(); + break; + } case kMipsPush: if (instr->InputAt(0)->IsFPRegister()) { LocationOperand* op = LocationOperand::cast(instr->InputAt(0)); @@ -3157,7 +3170,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr, new (gen_->zone()) ReferenceMap(gen_->zone()); gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt); if (FLAG_debug_code) { - __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap)); + __ stop(); } } } @@ -3376,8 +3389,14 @@ void CodeGenerator::AssembleConstructFrame() { auto call_descriptor = linkage()->GetIncomingDescriptor(); if (frame_access_state()->has_frame()) { if (call_descriptor->IsCFunctionCall()) { - __ Push(ra, fp); - __ mov(fp, sp); + if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) { + __ StubPrologue(StackFrame::C_WASM_ENTRY); + // Reserve stack space for saving the c_entry_fp later. + __ Subu(sp, sp, Operand(kSystemPointerSize)); + } else { + __ Push(ra, fp); + __ mov(fp, sp); + } } else if (call_descriptor->IsJSFunctionCall()) { __ Prologue(); if (call_descriptor->PushArgumentCount()) { @@ -3387,7 +3406,8 @@ void CodeGenerator::AssembleConstructFrame() { __ StubPrologue(info()->GetOutputStackFrameType()); if (call_descriptor->IsWasmFunctionCall()) { __ Push(kWasmInstanceRegister); - } else if (call_descriptor->IsWasmImportWrapper()) { + } else if (call_descriptor->IsWasmImportWrapper() || + call_descriptor->IsWasmCapiFunction()) { // WASM import wrappers are passed a tuple in the place of the instance. // Unpack the tuple into the instance and the target callable. // This must be done here in the codegen because it cannot be expressed @@ -3397,12 +3417,16 @@ void CodeGenerator::AssembleConstructFrame() { __ lw(kWasmInstanceRegister, FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset)); __ Push(kWasmInstanceRegister); + if (call_descriptor->IsWasmCapiFunction()) { + // Reserve space for saving the PC later. + __ Subu(sp, sp, Operand(kSystemPointerSize)); + } } } } - int required_slots = frame()->GetTotalFrameSlotCount() - - call_descriptor->CalculateFixedFrameSize(); + int required_slots = + frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount(); if (info()->is_osr()) { // TurboFan OSR-compiled functions cannot be entered directly. @@ -3564,6 +3588,8 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, } break; } + case Constant::kCompressedHeapObject: + UNREACHABLE(); case Constant::kRpoNumber: UNREACHABLE(); // TODO(titzer): loading RPO numbers on mips. break; diff --git a/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h b/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h index ba64e594293cf3..44e53ac044e13d 100644 --- a/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h +++ b/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h @@ -134,6 +134,7 @@ namespace compiler { V(MipsStackClaim) \ V(MipsSeb) \ V(MipsSeh) \ + V(MipsSync) \ V(MipsS128Zero) \ V(MipsI32x4Splat) \ V(MipsI32x4ExtractLane) \ diff --git a/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc index 26a3e808cc6e3d..92ab3f93443c65 100644 --- a/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc +++ b/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc @@ -284,6 +284,7 @@ int InstructionScheduler::GetTargetInstructionFlags( case kMipsUsh: case kMipsUsw: case kMipsUswc1: + case kMipsSync: case kMipsWord32AtomicPairStore: case kMipsWord32AtomicPairAdd: case kMipsWord32AtomicPairSub: @@ -1352,7 +1353,7 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) { return AssembleArchLookupSwitchLatency((instr->InputCount() - 2) / 2); case kArchTableSwitch: return AssembleArchTableSwitchLatency(); - case kArchDebugAbort: + case kArchAbortCSAAssert: return CallLatency() + 1; case kArchComment: case kArchDeoptimize: diff --git a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc index 0c7299d4514dc2..452e92a174989e 100644 --- a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc +++ b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc @@ -274,9 +274,9 @@ void InstructionSelector::VisitStackSlot(Node* node) { sequence()->AddImmediate(Constant(alignment)), 0, nullptr); } -void InstructionSelector::VisitDebugAbort(Node* node) { +void InstructionSelector::VisitAbortCSAAssert(Node* node) { MipsOperandGenerator g(this); - Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), a0)); + Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), a0)); } void InstructionSelector::VisitLoad(Node* node) { @@ -1775,6 +1775,11 @@ void InstructionSelector::VisitFloat64SilenceNaN(Node* node) { arraysize(temps), temps); } +void InstructionSelector::VisitMemoryBarrier(Node* node) { + MipsOperandGenerator g(this); + Emit(kMipsSync, g.NoOutput()); +} + void InstructionSelector::VisitWord32AtomicLoad(Node* node) { LoadRepresentation load_rep = LoadRepresentationOf(node->op()); MipsOperandGenerator g(this); diff --git a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc index 5cd9bc54eb4ff8..f746b52df67bf6 100644 --- a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc +++ b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc @@ -82,6 +82,7 @@ class MipsOperandConverter final : public InstructionOperandConverter { case Constant::kFloat64: return Operand::EmbeddedNumber(constant.ToFloat64().value()); case Constant::kExternalReference: + case Constant::kCompressedHeapObject: case Constant::kHeapObject: // TODO(plind): Maybe we should handle ExtRef & HeapObj here? // maybe not done on arm due to const pool ?? @@ -277,8 +278,9 @@ Condition FlagsConditionToConditionOvf(FlagsCondition condition) { UNREACHABLE(); } -FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate, - FlagsCondition condition) { +FPUCondition FlagsConditionToConditionCmpFPU( + bool& predicate, // NOLINT(runtime/references) + FlagsCondition condition) { switch (condition) { case kEqual: predicate = true; @@ -309,9 +311,9 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate, UNREACHABLE(); } -void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, - InstructionCode opcode, Instruction* instr, - MipsOperandConverter& i) { +void EmitWordLoadPoisoningIfNeeded( + CodeGenerator* codegen, InstructionCode opcode, Instruction* instr, + MipsOperandConverter& i) { // NOLINT(runtime/references) const MemoryAccessMode access_mode = static_cast(MiscField::decode(opcode)); if (access_mode == kMemoryAccessPoisoned) { @@ -634,8 +636,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kArchCallBuiltinPointer: { DCHECK(!instr->InputAt(0)->IsImmediate()); - Register builtin_pointer = i.InputRegister(0); - __ CallBuiltinPointer(builtin_pointer); + Register builtin_index = i.InputRegister(0); + __ CallBuiltinByIndex(builtin_index); RecordCallPosition(instr); frame_access_state()->ClearSPDelta(); break; @@ -756,6 +758,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; case kArchCallCFunction: { int const num_parameters = MiscField::decode(instr->opcode()); + Label return_location; + if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) { + // Put the return address in a stack slot. + __ LoadAddress(kScratchReg, &return_location); + __ sd(kScratchReg, + MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset)); + } if (instr->InputAt(0)->IsImmediate()) { ExternalReference ref = i.InputExternalReference(0); __ CallCFunction(ref, num_parameters); @@ -763,6 +772,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( Register func = i.InputRegister(0); __ CallCFunction(func, num_parameters); } + __ bind(&return_location); + RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt); frame_access_state()->SetFrameAccessToDefault(); // Ideally, we should decrement SP delta to match the change of stack // pointer in CallCFunction. However, for certain architectures (e.g. @@ -794,22 +805,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArchTableSwitch: AssembleArchTableSwitch(instr); break; - case kArchDebugAbort: + case kArchAbortCSAAssert: DCHECK(i.InputRegister(0) == a0); - if (!frame_access_state()->has_frame()) { + { // We don't actually want to generate a pile of code for this, so just // claim there is a stack frame, without generating one. FrameScope scope(tasm(), StackFrame::NONE); - __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS), - RelocInfo::CODE_TARGET); - } else { - __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS), - RelocInfo::CODE_TARGET); + __ Call( + isolate()->builtins()->builtin_handle(Builtins::kAbortCSAAssert), + RelocInfo::CODE_TARGET); } - __ stop("kArchDebugAbort"); + __ stop(); break; case kArchDebugBreak: - __ stop("kArchDebugBreak"); + __ stop(); break; case kArchComment: __ RecordComment(reinterpret_cast(i.InputInt64(0))); @@ -1786,6 +1795,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ Usdc1(ft, i.MemoryOperand(), kScratchReg); break; } + case kMips64Sync: { + __ sync(); + break; + } case kMips64Push: if (instr->InputAt(0)->IsFPRegister()) { __ Sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize)); @@ -3304,7 +3317,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr, new (gen_->zone()) ReferenceMap(gen_->zone()); gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt); if (FLAG_debug_code) { - __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap)); + __ stop(); } } } @@ -3535,8 +3548,14 @@ void CodeGenerator::AssembleConstructFrame() { if (frame_access_state()->has_frame()) { if (call_descriptor->IsCFunctionCall()) { - __ Push(ra, fp); - __ mov(fp, sp); + if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) { + __ StubPrologue(StackFrame::C_WASM_ENTRY); + // Reserve stack space for saving the c_entry_fp later. + __ Dsubu(sp, sp, Operand(kSystemPointerSize)); + } else { + __ Push(ra, fp); + __ mov(fp, sp); + } } else if (call_descriptor->IsJSFunctionCall()) { __ Prologue(); if (call_descriptor->PushArgumentCount()) { @@ -3546,7 +3565,8 @@ void CodeGenerator::AssembleConstructFrame() { __ StubPrologue(info()->GetOutputStackFrameType()); if (call_descriptor->IsWasmFunctionCall()) { __ Push(kWasmInstanceRegister); - } else if (call_descriptor->IsWasmImportWrapper()) { + } else if (call_descriptor->IsWasmImportWrapper() || + call_descriptor->IsWasmCapiFunction()) { // WASM import wrappers are passed a tuple in the place of the instance. // Unpack the tuple into the instance and the target callable. // This must be done here in the codegen because it cannot be expressed @@ -3556,12 +3576,16 @@ void CodeGenerator::AssembleConstructFrame() { __ ld(kWasmInstanceRegister, FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset)); __ Push(kWasmInstanceRegister); + if (call_descriptor->IsWasmCapiFunction()) { + // Reserve space for saving the PC later. + __ Dsubu(sp, sp, Operand(kSystemPointerSize)); + } } } } - int required_slots = frame()->GetTotalFrameSlotCount() - - call_descriptor->CalculateFixedFrameSize(); + int required_slots = + frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount(); if (info()->is_osr()) { // TurboFan OSR-compiled functions cannot be entered directly. @@ -3723,6 +3747,8 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, } break; } + case Constant::kCompressedHeapObject: + UNREACHABLE(); case Constant::kRpoNumber: UNREACHABLE(); // TODO(titzer): loading RPO numbers on mips64. break; diff --git a/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h b/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h index 24f01b1af1f730..e375ee8d07dfd5 100644 --- a/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h +++ b/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h @@ -163,6 +163,7 @@ namespace compiler { V(Mips64StackClaim) \ V(Mips64Seb) \ V(Mips64Seh) \ + V(Mips64Sync) \ V(Mips64AssertEqual) \ V(Mips64S128Zero) \ V(Mips64I32x4Splat) \ diff --git a/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc index 499a3da05ae9dc..4dcafe41977a15 100644 --- a/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc +++ b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc @@ -318,6 +318,7 @@ int InstructionScheduler::GetTargetInstructionFlags( case kMips64Ush: case kMips64Usw: case kMips64Uswc1: + case kMips64Sync: case kMips64Word64AtomicStoreWord8: case kMips64Word64AtomicStoreWord16: case kMips64Word64AtomicStoreWord32: @@ -1263,7 +1264,7 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) { return AssembleArchLookupSwitchLatency(instr); case kArchTableSwitch: return AssembleArchTableSwitchLatency(); - case kArchDebugAbort: + case kArchAbortCSAAssert: return CallLatency() + 1; case kArchDebugBreak: return 1; diff --git a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc index 9768a7da9b90b0..95f11ebed1cd00 100644 --- a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc +++ b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc @@ -334,9 +334,9 @@ void InstructionSelector::VisitStackSlot(Node* node) { sequence()->AddImmediate(Constant(alignment)), 0, nullptr); } -void InstructionSelector::VisitDebugAbort(Node* node) { +void InstructionSelector::VisitAbortCSAAssert(Node* node) { Mips64OperandGenerator g(this); - Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), a0)); + Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), a0)); } void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode, @@ -1946,7 +1946,17 @@ void VisitWord32Compare(InstructionSelector* selector, Node* node, // in those cases. Unfortunately, the solution is not complete because // it might skip cases where Word32 full compare is needed, so // basically it is a hack. + // When call to a host function in simulator, if the function return a + // int32 value, the simulator do not sign-extended to int64 because in + // simulator we do not know the function whether return a int32 or int64. + // so we need do a full word32 compare in this case. +#ifndef USE_SIMULATOR if (IsNodeUnsigned(node->InputAt(0)) != IsNodeUnsigned(node->InputAt(1))) { +#else + if (IsNodeUnsigned(node->InputAt(0)) != IsNodeUnsigned(node->InputAt(1)) || + node->InputAt(0)->opcode() == IrOpcode::kCall || + node->InputAt(1)->opcode() == IrOpcode::kCall ) { +#endif VisitFullWord32Compare(selector, node, kMips64Cmp, cont); } else { VisitOptimizedWord32Compare(selector, node, kMips64Cmp, cont); @@ -2398,6 +2408,11 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) { g.UseRegister(left), g.UseRegister(right)); } +void InstructionSelector::VisitMemoryBarrier(Node* node) { + Mips64OperandGenerator g(this); + Emit(kMips64Sync, g.NoOutput()); +} + void InstructionSelector::VisitWord32AtomicLoad(Node* node) { LoadRepresentation load_rep = LoadRepresentationOf(node->op()); ArchOpcode opcode = kArchNop; diff --git a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc index 30605df270d458..5289812cb5f280 100644 --- a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc +++ b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc @@ -79,6 +79,7 @@ class PPCOperandConverter final : public InstructionOperandConverter { case Constant::kDelayedStringConstant: return Operand::EmbeddedStringConstant( constant.ToDelayedStringConstant()); + case Constant::kCompressedHeapObject: case Constant::kHeapObject: case Constant::kRpoNumber: break; @@ -262,8 +263,9 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) { UNREACHABLE(); } -void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr, - PPCOperandConverter& i) { +void EmitWordLoadPoisoningIfNeeded( + CodeGenerator* codegen, Instruction* instr, + PPCOperandConverter& i) { // NOLINT(runtime/references) const MemoryAccessMode access_mode = static_cast(MiscField::decode(instr->opcode())); if (access_mode == kMemoryAccessPoisoned) { @@ -877,8 +879,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kArchCallBuiltinPointer: { DCHECK(!instr->InputAt(0)->IsImmediate()); - Register builtin_pointer = i.InputRegister(0); - __ CallBuiltinPointer(builtin_pointer); + Register builtin_index = i.InputRegister(0); + __ CallBuiltinByIndex(builtin_index); RecordCallPosition(instr); frame_access_state()->ClearSPDelta(); break; @@ -1019,6 +1021,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; case kArchCallCFunction: { int const num_parameters = MiscField::decode(instr->opcode()); + Label start_call; + bool isWasmCapiFunction = + linkage()->GetIncomingDescriptor()->IsWasmCapiFunction(); + constexpr int offset = 12; + if (isWasmCapiFunction) { + __ mflr(kScratchReg); + __ bind(&start_call); + __ LoadPC(r0); + __ addi(r0, r0, Operand(offset)); + __ StoreP(r0, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset)); + __ mtlr(r0); + } if (instr->InputAt(0)->IsImmediate()) { ExternalReference ref = i.InputExternalReference(0); __ CallCFunction(ref, num_parameters); @@ -1026,6 +1040,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( Register func = i.InputRegister(0); __ CallCFunction(func, num_parameters); } + // TODO(miladfar): In the above block, r0 must be populated with the + // strictly-correct PC, which is the return address at this spot. The + // offset is set to 12 right now, which is counted from where we are + // binding to the label and ends at this spot. If failed, replace it it + // with the correct offset suggested. More info on f5ab7d3. + if (isWasmCapiFunction) + CHECK_EQ(offset, __ SizeOfCodeGeneratedSince(&start_call)); + + RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt); frame_access_state()->SetFrameAccessToDefault(); // Ideally, we should decrement SP delta to match the change of stack // pointer in CallCFunction. However, for certain architectures (e.g. @@ -1060,22 +1083,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( AssembleArchTableSwitch(instr); DCHECK_EQ(LeaveRC, i.OutputRCBit()); break; - case kArchDebugAbort: + case kArchAbortCSAAssert: DCHECK(i.InputRegister(0) == r4); - if (!frame_access_state()->has_frame()) { + { // We don't actually want to generate a pile of code for this, so just // claim there is a stack frame, without generating one. FrameScope scope(tasm(), StackFrame::NONE); - __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS), - RelocInfo::CODE_TARGET); - } else { - __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS), - RelocInfo::CODE_TARGET); + __ Call( + isolate()->builtins()->builtin_handle(Builtins::kAbortCSAAssert), + RelocInfo::CODE_TARGET); } - __ stop("kArchDebugAbort"); + __ stop(); break; case kArchDebugBreak: - __ stop("kArchDebugBreak"); + __ stop(); break; case kArchNop: case kArchThrowTerminator: @@ -1174,6 +1195,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } break; } + case kPPC_Sync: { + __ sync(); + break; + } case kPPC_And: if (HasRegisterInput(instr, 1)) { __ and_(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1), @@ -2150,7 +2175,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr, new (gen_->zone()) ReferenceMap(gen_->zone()); gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt); if (FLAG_debug_code) { - __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap)); + __ stop(); } } } @@ -2304,14 +2329,20 @@ void CodeGenerator::AssembleConstructFrame() { auto call_descriptor = linkage()->GetIncomingDescriptor(); if (frame_access_state()->has_frame()) { if (call_descriptor->IsCFunctionCall()) { - __ mflr(r0); - if (FLAG_enable_embedded_constant_pool) { - __ Push(r0, fp, kConstantPoolRegister); - // Adjust FP to point to saved FP. - __ subi(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset)); + if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) { + __ StubPrologue(StackFrame::C_WASM_ENTRY); + // Reserve stack space for saving the c_entry_fp later. + __ addi(sp, sp, Operand(-kSystemPointerSize)); } else { - __ Push(r0, fp); - __ mr(fp, sp); + __ mflr(r0); + if (FLAG_enable_embedded_constant_pool) { + __ Push(r0, fp, kConstantPoolRegister); + // Adjust FP to point to saved FP. + __ subi(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset)); + } else { + __ Push(r0, fp); + __ mr(fp, sp); + } } } else if (call_descriptor->IsJSFunctionCall()) { __ Prologue(); @@ -2325,7 +2356,8 @@ void CodeGenerator::AssembleConstructFrame() { __ StubPrologue(type); if (call_descriptor->IsWasmFunctionCall()) { __ Push(kWasmInstanceRegister); - } else if (call_descriptor->IsWasmImportWrapper()) { + } else if (call_descriptor->IsWasmImportWrapper() || + call_descriptor->IsWasmCapiFunction()) { // WASM import wrappers are passed a tuple in the place of the instance. // Unpack the tuple into the instance and the target callable. // This must be done here in the codegen because it cannot be expressed @@ -2335,12 +2367,16 @@ void CodeGenerator::AssembleConstructFrame() { __ LoadP(kWasmInstanceRegister, FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset)); __ Push(kWasmInstanceRegister); + if (call_descriptor->IsWasmCapiFunction()) { + // Reserve space for saving the PC later. + __ addi(sp, sp, Operand(-kSystemPointerSize)); + } } } } - int required_slots = frame()->GetTotalFrameSlotCount() - - call_descriptor->CalculateFixedFrameSize(); + int required_slots = + frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount(); if (info()->is_osr()) { // TurboFan OSR-compiled functions cannot be entered directly. __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction); @@ -2389,7 +2425,7 @@ void CodeGenerator::AssembleConstructFrame() { ReferenceMap* reference_map = new (zone()) ReferenceMap(zone()); RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt); if (FLAG_debug_code) { - __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromThrow)); + __ stop(); } __ bind(&done); @@ -2554,6 +2590,8 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, } break; } + case Constant::kCompressedHeapObject: + UNREACHABLE(); case Constant::kRpoNumber: UNREACHABLE(); // TODO(dcarney): loading RPO constants on PPC. break; diff --git a/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h b/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h index a34a09b7969606..f37529bd884eaf 100644 --- a/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h +++ b/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h @@ -13,6 +13,7 @@ namespace compiler { // Most opcodes specify a single instruction. #define TARGET_ARCH_OPCODE_LIST(V) \ V(PPC_Peek) \ + V(PPC_Sync) \ V(PPC_And) \ V(PPC_AndComplement) \ V(PPC_Or) \ diff --git a/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc index e5f7d7e45a405c..61c2d2be3bd5a7 100644 --- a/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc +++ b/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc @@ -143,6 +143,7 @@ int InstructionScheduler::GetTargetInstructionFlags( case kPPC_Push: case kPPC_PushFrame: case kPPC_StoreToStackSlot: + case kPPC_Sync: return kHasSideEffect; case kPPC_AtomicStoreUint8: diff --git a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc index bb503763c216fa..bfc77b9412a890 100644 --- a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc +++ b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc @@ -173,9 +173,9 @@ void InstructionSelector::VisitStackSlot(Node* node) { sequence()->AddImmediate(Constant(slot)), 0, nullptr); } -void InstructionSelector::VisitDebugAbort(Node* node) { +void InstructionSelector::VisitAbortCSAAssert(Node* node) { PPCOperandGenerator g(this); - Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), r4)); + Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), r4)); } void InstructionSelector::VisitLoad(Node* node) { @@ -1853,6 +1853,11 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) { g.UseRegister(left), g.UseRegister(right)); } +void InstructionSelector::VisitMemoryBarrier(Node* node) { + PPCOperandGenerator g(this); + Emit(kPPC_Sync, g.NoOutput()); +} + void InstructionSelector::VisitWord32AtomicLoad(Node* node) { VisitLoad(node); } void InstructionSelector::VisitWord64AtomicLoad(Node* node) { VisitLoad(node); } diff --git a/deps/v8/src/compiler/backend/register-allocator.cc b/deps/v8/src/compiler/backend/register-allocator.cc index 57ea2c1a26636e..44701f8159385c 100644 --- a/deps/v8/src/compiler/backend/register-allocator.cc +++ b/deps/v8/src/compiler/backend/register-allocator.cc @@ -9,6 +9,7 @@ #include "src/base/adapters.h" #include "src/base/small-vector.h" #include "src/codegen/assembler-inl.h" +#include "src/codegen/tick-counter.h" #include "src/compiler/linkage.h" #include "src/strings/string-stream.h" #include "src/utils/vector.h" @@ -17,11 +18,13 @@ namespace v8 { namespace internal { namespace compiler { -#define TRACE(...) \ - do { \ - if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \ +#define TRACE_COND(cond, ...) \ + do { \ + if (cond) PrintF(__VA_ARGS__); \ } while (false) +#define TRACE(...) TRACE_COND(data()->is_trace_alloc(), __VA_ARGS__) + namespace { static constexpr int kFloat32Bit = @@ -1119,8 +1122,9 @@ void TopLevelLiveRange::Verify() const { } } -void TopLevelLiveRange::ShortenTo(LifetimePosition start) { - TRACE("Shorten live range %d to [%d\n", vreg(), start.value()); +void TopLevelLiveRange::ShortenTo(LifetimePosition start, bool trace_alloc) { + TRACE_COND(trace_alloc, "Shorten live range %d to [%d\n", vreg(), + start.value()); DCHECK_NOT_NULL(first_interval_); DCHECK(first_interval_->start() <= start); DCHECK(start < first_interval_->end()); @@ -1128,9 +1132,10 @@ void TopLevelLiveRange::ShortenTo(LifetimePosition start) { } void TopLevelLiveRange::EnsureInterval(LifetimePosition start, - LifetimePosition end, Zone* zone) { - TRACE("Ensure live range %d in interval [%d %d[\n", vreg(), start.value(), - end.value()); + LifetimePosition end, Zone* zone, + bool trace_alloc) { + TRACE_COND(trace_alloc, "Ensure live range %d in interval [%d %d[\n", vreg(), + start.value(), end.value()); LifetimePosition new_end = end; while (first_interval_ != nullptr && first_interval_->start() <= end) { if (first_interval_->end() > end) { @@ -1148,9 +1153,10 @@ void TopLevelLiveRange::EnsureInterval(LifetimePosition start, } void TopLevelLiveRange::AddUseInterval(LifetimePosition start, - LifetimePosition end, Zone* zone) { - TRACE("Add to live range %d interval [%d %d[\n", vreg(), start.value(), - end.value()); + LifetimePosition end, Zone* zone, + bool trace_alloc) { + TRACE_COND(trace_alloc, "Add to live range %d interval [%d %d[\n", vreg(), + start.value(), end.value()); if (first_interval_ == nullptr) { UseInterval* interval = new (zone) UseInterval(start, end); first_interval_ = interval; @@ -1173,9 +1179,10 @@ void TopLevelLiveRange::AddUseInterval(LifetimePosition start, } } -void TopLevelLiveRange::AddUsePosition(UsePosition* use_pos) { +void TopLevelLiveRange::AddUsePosition(UsePosition* use_pos, bool trace_alloc) { LifetimePosition pos = use_pos->pos(); - TRACE("Add to live range %d use position %d\n", vreg(), pos.value()); + TRACE_COND(trace_alloc, "Add to live range %d use position %d\n", vreg(), + pos.value()); UsePosition* prev_hint = nullptr; UsePosition* prev = nullptr; UsePosition* current = first_pos_; @@ -1309,13 +1316,8 @@ void LinearScanAllocator::PrintRangeRow(std::ostream& os, if (range->spilled()) { prefix = snprintf(buffer, max_prefix_length, "|%s", kind_string); } else { - const char* reg_name; - if (range->assigned_register() == kUnassignedRegister) { - reg_name = "???"; - } else { - reg_name = RegisterName(range->assigned_register()); - } - prefix = snprintf(buffer, max_prefix_length, "|%s", reg_name); + prefix = snprintf(buffer, max_prefix_length, "|%s", + RegisterName(range->assigned_register())); } os << buffer; position += std::min(prefix, max_prefix_length - 1); @@ -1469,7 +1471,7 @@ void RegisterAllocationData::PhiMapValue::CommitAssignment( RegisterAllocationData::RegisterAllocationData( const RegisterConfiguration* config, Zone* zone, Frame* frame, InstructionSequence* code, RegisterAllocationFlags flags, - const char* debug_name) + TickCounter* tick_counter, const char* debug_name) : allocation_zone_(zone), frame_(frame), code_(code), @@ -1496,7 +1498,8 @@ RegisterAllocationData::RegisterAllocationData( preassigned_slot_ranges_(zone), spill_state_(code->InstructionBlockCount(), ZoneVector(zone), zone), - flags_(flags) { + flags_(flags), + tick_counter_(tick_counter) { if (!kSimpleFPAliasing) { fixed_float_live_ranges_.resize( kNumberOfFixedRangesPerRegister * this->config()->num_float_registers(), @@ -1815,6 +1818,7 @@ InstructionOperand* ConstraintBuilder::AllocateFixed( void ConstraintBuilder::MeetRegisterConstraints() { for (InstructionBlock* block : code()->instruction_blocks()) { + data_->tick_counter()->DoTick(); MeetRegisterConstraints(block); } } @@ -1973,14 +1977,6 @@ void ConstraintBuilder::MeetConstraintsBefore(int instr_index) { second->reference_map(), &gap_move->source()}; data()->delayed_references().push_back(delayed_reference); } - } else if (!code()->IsReference(input_vreg) && - code()->IsReference(output_vreg)) { - // The input is assumed to immediately have a tagged representation, - // before the pointer map can be used. I.e. the pointer map at the - // instruction will include the output operand (whose value at the - // beginning of the instruction is equal to the input operand). If - // this is not desired, then the pointer map at this instruction needs - // to be adjusted manually. } } } @@ -1988,6 +1984,7 @@ void ConstraintBuilder::MeetConstraintsBefore(int instr_index) { void ConstraintBuilder::ResolvePhis() { // Process the blocks in reverse order. for (InstructionBlock* block : base::Reversed(code()->instruction_blocks())) { + data_->tick_counter()->DoTick(); ResolvePhis(block); } } @@ -2071,7 +2068,8 @@ void LiveRangeBuilder::AddInitialIntervals(const InstructionBlock* block, while (!iterator.Done()) { int operand_index = iterator.Current(); TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(operand_index); - range->AddUseInterval(start, end, allocation_zone()); + range->AddUseInterval(start, end, allocation_zone(), + data()->is_trace_alloc()); iterator.Advance(); } } @@ -2192,16 +2190,18 @@ UsePosition* LiveRangeBuilder::Define(LifetimePosition position, if (range->IsEmpty() || range->Start() > position) { // Can happen if there is a definition without use. - range->AddUseInterval(position, position.NextStart(), allocation_zone()); - range->AddUsePosition(NewUsePosition(position.NextStart())); + range->AddUseInterval(position, position.NextStart(), allocation_zone(), + data()->is_trace_alloc()); + range->AddUsePosition(NewUsePosition(position.NextStart()), + data()->is_trace_alloc()); } else { - range->ShortenTo(position); + range->ShortenTo(position, data()->is_trace_alloc()); } if (!operand->IsUnallocated()) return nullptr; UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand); UsePosition* use_pos = NewUsePosition(position, unalloc_operand, hint, hint_type); - range->AddUsePosition(use_pos); + range->AddUsePosition(use_pos, data()->is_trace_alloc()); return use_pos; } @@ -2216,9 +2216,10 @@ UsePosition* LiveRangeBuilder::Use(LifetimePosition block_start, if (operand->IsUnallocated()) { UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand); use_pos = NewUsePosition(position, unalloc_operand, hint, hint_type); - range->AddUsePosition(use_pos); + range->AddUsePosition(use_pos, data()->is_trace_alloc()); } - range->AddUseInterval(block_start, position, allocation_zone()); + range->AddUseInterval(block_start, position, allocation_zone(), + data()->is_trace_alloc()); return use_pos; } @@ -2279,7 +2280,7 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block, int code = config()->GetAllocatableGeneralCode(i); TopLevelLiveRange* range = FixedLiveRangeFor(code, spill_mode); range->AddUseInterval(curr_position, curr_position.End(), - allocation_zone()); + allocation_zone(), data()->is_trace_alloc()); } } @@ -2291,7 +2292,7 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block, TopLevelLiveRange* range = FixedFPLiveRangeFor( code, MachineRepresentation::kFloat64, spill_mode); range->AddUseInterval(curr_position, curr_position.End(), - allocation_zone()); + allocation_zone(), data()->is_trace_alloc()); } // Clobber fixed float registers on archs with non-simple aliasing. if (!kSimpleFPAliasing) { @@ -2304,7 +2305,7 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block, TopLevelLiveRange* range = FixedFPLiveRangeFor( code, MachineRepresentation::kFloat32, spill_mode); range->AddUseInterval(curr_position, curr_position.End(), - allocation_zone()); + allocation_zone(), data()->is_trace_alloc()); } } if (fixed_simd128_live_ranges) { @@ -2314,7 +2315,7 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block, TopLevelLiveRange* range = FixedFPLiveRangeFor( code, MachineRepresentation::kSimd128, spill_mode); range->AddUseInterval(curr_position, curr_position.End(), - allocation_zone()); + allocation_zone(), data()->is_trace_alloc()); } } } @@ -2574,7 +2575,8 @@ void LiveRangeBuilder::ProcessLoopHeader(const InstructionBlock* block, while (!iterator.Done()) { int operand_index = iterator.Current(); TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(operand_index); - range->EnsureInterval(start, end, allocation_zone()); + range->EnsureInterval(start, end, allocation_zone(), + data()->is_trace_alloc()); iterator.Advance(); } // Insert all values into the live in sets of all blocks in the loop. @@ -2588,6 +2590,7 @@ void LiveRangeBuilder::BuildLiveRanges() { // Process the blocks in reverse order. for (int block_id = code()->InstructionBlockCount() - 1; block_id >= 0; --block_id) { + data_->tick_counter()->DoTick(); InstructionBlock* block = code()->InstructionBlockAt(RpoNumber::FromInt(block_id)); BitVector* live = ComputeLiveOut(block, data()); @@ -2607,6 +2610,7 @@ void LiveRangeBuilder::BuildLiveRanges() { // Postprocess the ranges. const size_t live_ranges_size = data()->live_ranges().size(); for (TopLevelLiveRange* range : data()->live_ranges()) { + data_->tick_counter()->DoTick(); CHECK_EQ(live_ranges_size, data()->live_ranges().size()); // TODO(neis): crbug.com/831822 if (range == nullptr) continue; @@ -2773,7 +2777,7 @@ void BundleBuilder::BuildBundles() { LiveRangeBundle* input_bundle = input_range->get_bundle(); if (input_bundle != nullptr) { TRACE("Merge\n"); - if (out->TryMerge(input_bundle)) + if (out->TryMerge(input_bundle, data()->is_trace_alloc())) TRACE("Merged %d and %d to %d\n", phi->virtual_register(), input, out->id()); } else { @@ -2798,7 +2802,7 @@ bool LiveRangeBundle::TryAddRange(LiveRange* range) { InsertUses(range->first_interval()); return true; } -bool LiveRangeBundle::TryMerge(LiveRangeBundle* other) { +bool LiveRangeBundle::TryMerge(LiveRangeBundle* other, bool trace_alloc) { if (other == this) return true; auto iter1 = uses_.begin(); @@ -2810,8 +2814,8 @@ bool LiveRangeBundle::TryMerge(LiveRangeBundle* other) { } else if (iter2->start > iter1->end) { ++iter1; } else { - TRACE("No merge %d:%d %d:%d\n", iter1->start, iter1->end, iter2->start, - iter2->end); + TRACE_COND(trace_alloc, "No merge %d:%d %d:%d\n", iter1->start, + iter1->end, iter2->start, iter2->end); return false; } } @@ -3042,6 +3046,7 @@ void RegisterAllocator::Spill(LiveRange* range, SpillMode spill_mode) { } const char* RegisterAllocator::RegisterName(int register_code) const { + if (register_code == kUnassignedRegister) return "unassigned"; return mode() == GENERAL_REGISTERS ? i::RegisterName(Register::from_code(register_code)) : i::RegisterName(DoubleRegister::from_code(register_code)); @@ -3408,7 +3413,7 @@ void LinearScanAllocator::ComputeStateFromManyPredecessors( to_be_live->emplace(val.first, reg); TRACE("Reset %d as live due vote %zu in %s\n", val.first->TopLevel()->vreg(), val.second.count, - reg == kUnassignedRegister ? "unassigned" : RegisterName(reg)); + RegisterName(reg)); } } }; @@ -3477,6 +3482,8 @@ void LinearScanAllocator::UpdateDeferredFixedRanges(SpillMode spill_mode, RegisterName(other->assigned_register())); LiveRange* split_off = other->SplitAt(next_start, data()->allocation_zone()); + // Try to get the same register after the deferred block. + split_off->set_controlflow_hint(other->assigned_register()); DCHECK_NE(split_off, other); AddToUnhandled(split_off); update_caches(other); @@ -3574,7 +3581,7 @@ void LinearScanAllocator::AllocateRegisters() { SplitAndSpillRangesDefinedByMemoryOperand(); data()->ResetSpillState(); - if (FLAG_trace_alloc) { + if (data()->is_trace_alloc()) { PrintRangeOverview(std::cout); } @@ -3642,6 +3649,7 @@ void LinearScanAllocator::AllocateRegisters() { while (!unhandled_live_ranges().empty() || (data()->is_turbo_control_flow_aware_allocation() && last_block < max_blocks)) { + data()->tick_counter()->DoTick(); LiveRange* current = unhandled_live_ranges().empty() ? nullptr : *unhandled_live_ranges().begin(); @@ -3824,7 +3832,7 @@ void LinearScanAllocator::AllocateRegisters() { ProcessCurrentRange(current, spill_mode); } - if (FLAG_trace_alloc) { + if (data()->is_trace_alloc()) { PrintRangeOverview(std::cout); } } @@ -4557,6 +4565,14 @@ void LinearScanAllocator::SpillBetweenUntil(LiveRange* range, LiveRange* third_part = SplitBetween(second_part, split_start, third_part_end); + if (GetInstructionBlock(data()->code(), second_part->Start()) + ->IsDeferred()) { + // Try to use the same register as before. + TRACE("Setting control flow hint for %d:%d to %s\n", + third_part->TopLevel()->vreg(), third_part->relative_id(), + RegisterName(range->controlflow_hint())); + third_part->set_controlflow_hint(range->controlflow_hint()); + } AddToUnhandled(third_part); // This can happen, even if we checked for start < end above, as we fiddle @@ -4601,6 +4617,7 @@ OperandAssigner::OperandAssigner(RegisterAllocationData* data) : data_(data) {} void OperandAssigner::DecideSpillingMode() { if (data()->is_turbo_control_flow_aware_allocation()) { for (auto range : data()->live_ranges()) { + data()->tick_counter()->DoTick(); int max_blocks = data()->code()->InstructionBlockCount(); if (range != nullptr && range->IsSpilledOnlyInDeferredBlocks(data())) { // If the range is spilled only in deferred blocks and starts in @@ -4629,6 +4646,7 @@ void OperandAssigner::DecideSpillingMode() { void OperandAssigner::AssignSpillSlots() { for (auto range : data()->live_ranges()) { + data()->tick_counter()->DoTick(); if (range != nullptr && range->get_bundle() != nullptr) { range->get_bundle()->MergeSpillRanges(); } @@ -4636,6 +4654,7 @@ void OperandAssigner::AssignSpillSlots() { ZoneVector& spill_ranges = data()->spill_ranges(); // Merge disjoint spill ranges for (size_t i = 0; i < spill_ranges.size(); ++i) { + data()->tick_counter()->DoTick(); SpillRange* range = spill_ranges[i]; if (range == nullptr) continue; if (range->IsEmpty()) continue; @@ -4648,6 +4667,7 @@ void OperandAssigner::AssignSpillSlots() { } // Allocate slots for the merged spill ranges. for (SpillRange* range : spill_ranges) { + data()->tick_counter()->DoTick(); if (range == nullptr || range->IsEmpty()) continue; // Allocate a new operand referring to the spill slot. if (!range->HasSlot()) { @@ -4660,6 +4680,7 @@ void OperandAssigner::AssignSpillSlots() { void OperandAssigner::CommitAssignment() { const size_t live_ranges_size = data()->live_ranges().size(); for (TopLevelLiveRange* top_range : data()->live_ranges()) { + data()->tick_counter()->DoTick(); CHECK_EQ(live_ranges_size, data()->live_ranges().size()); // TODO(neis): crbug.com/831822 if (top_range == nullptr || top_range->IsEmpty()) continue; @@ -4859,6 +4880,7 @@ void LiveRangeConnector::ResolveControlFlow(Zone* local_zone) { BitVector* live = live_in_sets[block->rpo_number().ToInt()]; BitVector::Iterator iterator(live); while (!iterator.Done()) { + data()->tick_counter()->DoTick(); int vreg = iterator.Current(); LiveRangeBoundArray* array = finder.ArrayFor(vreg); for (const RpoNumber& pred : block->predecessors()) { @@ -5130,6 +5152,7 @@ void LiveRangeConnector::CommitSpillsInDeferredBlocks( } #undef TRACE +#undef TRACE_COND } // namespace compiler } // namespace internal diff --git a/deps/v8/src/compiler/backend/register-allocator.h b/deps/v8/src/compiler/backend/register-allocator.h index 8929fb2ee6870f..55f8a8dd1f608a 100644 --- a/deps/v8/src/compiler/backend/register-allocator.h +++ b/deps/v8/src/compiler/backend/register-allocator.h @@ -16,6 +16,9 @@ namespace v8 { namespace internal { + +class TickCounter; + namespace compiler { static const int32_t kUnassignedRegister = RegisterConfiguration::kMaxRegisters; @@ -175,7 +178,8 @@ std::ostream& operator<<(std::ostream& os, const LifetimePosition pos); enum class RegisterAllocationFlag : unsigned { kTurboControlFlowAwareAllocation = 1 << 0, - kTurboPreprocessRanges = 1 << 1 + kTurboPreprocessRanges = 1 << 1, + kTraceAllocation = 1 << 2 }; using RegisterAllocationFlags = base::Flags; @@ -198,6 +202,10 @@ class RegisterAllocationData final : public ZoneObject { return flags_ & RegisterAllocationFlag::kTurboPreprocessRanges; } + bool is_trace_alloc() { + return flags_ & RegisterAllocationFlag::kTraceAllocation; + } + static constexpr int kNumberOfFixedRangesPerRegister = 2; class PhiMapValue : public ZoneObject { @@ -238,6 +246,7 @@ class RegisterAllocationData final : public ZoneObject { Zone* allocation_zone, Frame* frame, InstructionSequence* code, RegisterAllocationFlags flags, + TickCounter* tick_counter, const char* debug_name = nullptr); const ZoneVector& live_ranges() const { @@ -328,6 +337,8 @@ class RegisterAllocationData final : public ZoneObject { void ResetSpillState() { spill_state_.clear(); } + TickCounter* tick_counter() { return tick_counter_; } + private: int GetNextLiveRangeId(); @@ -354,6 +365,7 @@ class RegisterAllocationData final : public ZoneObject { RangesWithPreassignedSlots preassigned_slot_ranges_; ZoneVector> spill_state_; RegisterAllocationFlags flags_; + TickCounter* const tick_counter_; DISALLOW_COPY_AND_ASSIGN(RegisterAllocationData); }; @@ -741,7 +753,7 @@ class LiveRangeBundle : public ZoneObject { : ranges_(zone), uses_(zone), id_(id) {} bool TryAddRange(LiveRange* range); - bool TryMerge(LiveRangeBundle* other); + bool TryMerge(LiveRangeBundle* other, bool trace_alloc); ZoneSet ranges_; ZoneSet uses_; @@ -785,12 +797,14 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange { SlotUseKind slot_use_kind() const { return HasSlotUseField::decode(bits_); } // Add a new interval or a new use position to this live range. - void EnsureInterval(LifetimePosition start, LifetimePosition end, Zone* zone); - void AddUseInterval(LifetimePosition start, LifetimePosition end, Zone* zone); - void AddUsePosition(UsePosition* pos); + void EnsureInterval(LifetimePosition start, LifetimePosition end, Zone* zone, + bool trace_alloc); + void AddUseInterval(LifetimePosition start, LifetimePosition end, Zone* zone, + bool trace_alloc); + void AddUsePosition(UsePosition* pos, bool trace_alloc); // Shorten the most recently added interval by setting a new start. - void ShortenTo(LifetimePosition start); + void ShortenTo(LifetimePosition start, bool trace_alloc); // Detaches between start and end, and attributes the resulting range to // result. @@ -1279,11 +1293,13 @@ class LinearScanAllocator final : public RegisterAllocator { RangeWithRegister::Equals>; void MaybeUndoPreviousSplit(LiveRange* range); - void SpillNotLiveRanges(RangeWithRegisterSet& to_be_live, - LifetimePosition position, SpillMode spill_mode); + void SpillNotLiveRanges( + RangeWithRegisterSet& to_be_live, // NOLINT(runtime/references) + LifetimePosition position, SpillMode spill_mode); LiveRange* AssignRegisterOnReload(LiveRange* range, int reg); - void ReloadLiveRanges(RangeWithRegisterSet& to_be_live, - LifetimePosition position); + void ReloadLiveRanges( + RangeWithRegisterSet& to_be_live, // NOLINT(runtime/references) + LifetimePosition position); void UpdateDeferredFixedRanges(SpillMode spill_mode, InstructionBlock* block); bool BlockIsDeferredOrImmediatePredecessorIsNotDeferred( diff --git a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc index 595800268d8fcc..6457b7c8b44493 100644 --- a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc +++ b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc @@ -73,6 +73,7 @@ class S390OperandConverter final : public InstructionOperandConverter { case Constant::kDelayedStringConstant: return Operand::EmbeddedStringConstant( constant.ToDelayedStringConstant()); + case Constant::kCompressedHeapObject: case Constant::kHeapObject: case Constant::kRpoNumber: break; @@ -1245,8 +1246,9 @@ void AdjustStackPointerForTailCall( } } -void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr, - S390OperandConverter& i) { +void EmitWordLoadPoisoningIfNeeded( + CodeGenerator* codegen, Instruction* instr, + S390OperandConverter& i) { // NOLINT(runtime/references) const MemoryAccessMode access_mode = static_cast(MiscField::decode(instr->opcode())); if (access_mode == kMemoryAccessPoisoned) { @@ -1380,8 +1382,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kArchCallBuiltinPointer: { DCHECK(!instr->InputAt(0)->IsImmediate()); - Register builtin_pointer = i.InputRegister(0); - __ CallBuiltinPointer(builtin_pointer); + Register builtin_index = i.InputRegister(0); + __ CallBuiltinByIndex(builtin_index); RecordCallPosition(instr); frame_access_state()->ClearSPDelta(); break; @@ -1509,6 +1511,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; case kArchCallCFunction: { int const num_parameters = MiscField::decode(instr->opcode()); + Label return_location; + // Put the return address in a stack slot. + if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) { + // Put the return address in a stack slot. + __ larl(r0, &return_location); + __ StoreP(r0, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset)); + } if (instr->InputAt(0)->IsImmediate()) { ExternalReference ref = i.InputExternalReference(0); __ CallCFunction(ref, num_parameters); @@ -1516,6 +1525,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( Register func = i.InputRegister(0); __ CallCFunction(func, num_parameters); } + __ bind(&return_location); + RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt); frame_access_state()->SetFrameAccessToDefault(); // Ideally, we should decrement SP delta to match the change of stack // pointer in CallCFunction. However, for certain architectures (e.g. @@ -1547,22 +1558,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArchTableSwitch: AssembleArchTableSwitch(instr); break; - case kArchDebugAbort: + case kArchAbortCSAAssert: DCHECK(i.InputRegister(0) == r3); - if (!frame_access_state()->has_frame()) { + { // We don't actually want to generate a pile of code for this, so just // claim there is a stack frame, without generating one. FrameScope scope(tasm(), StackFrame::NONE); - __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS), - RelocInfo::CODE_TARGET); - } else { - __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS), - RelocInfo::CODE_TARGET); + __ Call( + isolate()->builtins()->builtin_handle(Builtins::kAbortCSAAssert), + RelocInfo::CODE_TARGET); } - __ stop("kArchDebugAbort"); + __ stop(); break; case kArchDebugBreak: - __ stop("kArchDebugBreak"); + __ stop(); break; case kArchNop: case kArchThrowTerminator: @@ -2891,7 +2900,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr, new (gen_->zone()) ReferenceMap(gen_->zone()); gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt); if (FLAG_debug_code) { - __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap)); + __ stop(); } } } @@ -3014,8 +3023,14 @@ void CodeGenerator::AssembleConstructFrame() { if (frame_access_state()->has_frame()) { if (call_descriptor->IsCFunctionCall()) { - __ Push(r14, fp); - __ LoadRR(fp, sp); + if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) { + __ StubPrologue(StackFrame::C_WASM_ENTRY); + // Reserve stack space for saving the c_entry_fp later. + __ lay(sp, MemOperand(sp, -kSystemPointerSize)); + } else { + __ Push(r14, fp); + __ LoadRR(fp, sp); + } } else if (call_descriptor->IsJSFunctionCall()) { __ Prologue(ip); if (call_descriptor->PushArgumentCount()) { @@ -3028,7 +3043,8 @@ void CodeGenerator::AssembleConstructFrame() { __ StubPrologue(type); if (call_descriptor->IsWasmFunctionCall()) { __ Push(kWasmInstanceRegister); - } else if (call_descriptor->IsWasmImportWrapper()) { + } else if (call_descriptor->IsWasmImportWrapper() || + call_descriptor->IsWasmCapiFunction()) { // WASM import wrappers are passed a tuple in the place of the instance. // Unpack the tuple into the instance and the target callable. // This must be done here in the codegen because it cannot be expressed @@ -3038,12 +3054,16 @@ void CodeGenerator::AssembleConstructFrame() { __ LoadP(kWasmInstanceRegister, FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset)); __ Push(kWasmInstanceRegister); + if (call_descriptor->IsWasmCapiFunction()) { + // Reserve space for saving the PC later. + __ lay(sp, MemOperand(sp, -kSystemPointerSize)); + } } } } - int required_slots = frame()->GetTotalFrameSlotCount() - - call_descriptor->CalculateFixedFrameSize(); + int required_slots = + frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount(); if (info()->is_osr()) { // TurboFan OSR-compiled functions cannot be entered directly. __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction); @@ -3089,7 +3109,7 @@ void CodeGenerator::AssembleConstructFrame() { ReferenceMap* reference_map = new (zone()) ReferenceMap(zone()); RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt); if (FLAG_debug_code) { - __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromThrow)); + __ stop(); } __ bind(&done); @@ -3247,6 +3267,9 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, } break; } + case Constant::kCompressedHeapObject: + UNREACHABLE(); + break; case Constant::kRpoNumber: UNREACHABLE(); // TODO(dcarney): loading RPO constants on S390. break; diff --git a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc index d982605efc30d9..99d3b0fa0f0acf 100644 --- a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc +++ b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc @@ -447,11 +447,13 @@ void VisitTryTruncateDouble(InstructionSelector* selector, ArchOpcode opcode, #endif template -void GenerateRightOperands(InstructionSelector* selector, Node* node, - Node* right, InstructionCode& opcode, - OperandModes& operand_mode, - InstructionOperand* inputs, size_t& input_count, - CanCombineWithLoad canCombineWithLoad) { +void GenerateRightOperands( + InstructionSelector* selector, Node* node, Node* right, + InstructionCode& opcode, // NOLINT(runtime/references) + OperandModes& operand_mode, // NOLINT(runtime/references) + InstructionOperand* inputs, + size_t& input_count, // NOLINT(runtime/references) + CanCombineWithLoad canCombineWithLoad) { S390OperandGenerator g(selector); if ((operand_mode & OperandMode::kAllowImmediate) && @@ -491,11 +493,13 @@ void GenerateRightOperands(InstructionSelector* selector, Node* node, } template -void GenerateBinOpOperands(InstructionSelector* selector, Node* node, - Node* left, Node* right, InstructionCode& opcode, - OperandModes& operand_mode, - InstructionOperand* inputs, size_t& input_count, - CanCombineWithLoad canCombineWithLoad) { +void GenerateBinOpOperands( + InstructionSelector* selector, Node* node, Node* left, Node* right, + InstructionCode& opcode, // NOLINT(runtime/references) + OperandModes& operand_mode, // NOLINT(runtime/references) + InstructionOperand* inputs, + size_t& input_count, // NOLINT(runtime/references) + CanCombineWithLoad canCombineWithLoad) { S390OperandGenerator g(selector); // left is always register InstructionOperand const left_input = g.UseRegister(left); @@ -686,9 +690,9 @@ void InstructionSelector::VisitStackSlot(Node* node) { sequence()->AddImmediate(Constant(slot)), 0, nullptr); } -void InstructionSelector::VisitDebugAbort(Node* node) { +void InstructionSelector::VisitAbortCSAAssert(Node* node) { S390OperandGenerator g(this); - Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), r3)); + Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), r3)); } void InstructionSelector::VisitLoad(Node* node) { @@ -2194,6 +2198,11 @@ void InstructionSelector::EmitPrepareArguments( } } +void InstructionSelector::VisitMemoryBarrier(Node* node) { + S390OperandGenerator g(this); + Emit(kArchNop, g.NoOutput()); +} + bool InstructionSelector::IsTailCallAddressImmediate() { return false; } int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; } diff --git a/deps/v8/src/compiler/backend/unwinding-info-writer.h b/deps/v8/src/compiler/backend/unwinding-info-writer.h index 590a839a06f27a..d3a52b34b7712a 100644 --- a/deps/v8/src/compiler/backend/unwinding-info-writer.h +++ b/deps/v8/src/compiler/backend/unwinding-info-writer.h @@ -23,6 +23,7 @@ namespace v8 { namespace internal { class EhFrameWriter; +class Zone; namespace compiler { diff --git a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc index c6667292fc3a04..a108edeff0e592 100644 --- a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc +++ b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc @@ -349,7 +349,8 @@ class WasmProtectedInstructionTrap final : public WasmOutOfLineTrap { void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen, InstructionCode opcode, Instruction* instr, - X64OperandConverter& i, int pc) { + X64OperandConverter& i, // NOLINT(runtime/references) + int pc) { const MemoryAccessMode access_mode = static_cast(MiscField::decode(opcode)); if (access_mode == kMemoryAccessProtected) { @@ -357,9 +358,9 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen, } } -void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, - InstructionCode opcode, Instruction* instr, - X64OperandConverter& i) { +void EmitWordLoadPoisoningIfNeeded( + CodeGenerator* codegen, InstructionCode opcode, Instruction* instr, + X64OperandConverter& i) { // NOLINT(runtime/references) const MemoryAccessMode access_mode = static_cast(MiscField::decode(opcode)); if (access_mode == kMemoryAccessPoisoned) { @@ -575,6 +576,19 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, __ opcode(i.OutputSimd128Register(), i.InputSimd128Register(1), imm); \ } while (false) +#define ASSEMBLE_SIMD_ALL_TRUE(opcode) \ + do { \ + CpuFeatureScope sse_scope(tasm(), SSE4_1); \ + Register dst = i.OutputRegister(); \ + Register tmp = i.TempRegister(0); \ + __ movq(tmp, Immediate(1)); \ + __ xorq(dst, dst); \ + __ pxor(kScratchDoubleReg, kScratchDoubleReg); \ + __ opcode(kScratchDoubleReg, i.InputSimd128Register(0)); \ + __ ptest(kScratchDoubleReg, kScratchDoubleReg); \ + __ cmovq(zero, dst, tmp); \ + } while (false) + void CodeGenerator::AssembleDeconstructFrame() { unwinding_info_writer_.MarkFrameDeconstructed(__ pc_offset()); __ movq(rsp, rbp); @@ -752,8 +766,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kArchCallBuiltinPointer: { DCHECK(!HasImmediateInput(instr, 0)); - Register builtin_pointer = i.InputRegister(0); - __ CallBuiltinPointer(builtin_pointer); + Register builtin_index = i.InputRegister(0); + __ CallBuiltinByIndex(builtin_index); RecordCallPosition(instr); frame_access_state()->ClearSPDelta(); break; @@ -952,17 +966,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArchComment: __ RecordComment(reinterpret_cast(i.InputInt64(0))); break; - case kArchDebugAbort: + case kArchAbortCSAAssert: DCHECK(i.InputRegister(0) == rdx); - if (!frame_access_state()->has_frame()) { + { // We don't actually want to generate a pile of code for this, so just // claim there is a stack frame, without generating one. FrameScope scope(tasm(), StackFrame::NONE); - __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS), - RelocInfo::CODE_TARGET); - } else { - __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS), - RelocInfo::CODE_TARGET); + __ Call( + isolate()->builtins()->builtin_handle(Builtins::kAbortCSAAssert), + RelocInfo::CODE_TARGET); } __ int3(); unwinding_info_writer_.MarkBlockWillExit(); @@ -1029,9 +1041,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( OutOfLineRecordWrite(this, object, operand, value, scratch0, scratch1, mode, DetermineStubCallMode()); __ StoreTaggedField(operand, value); - if (COMPRESS_POINTERS_BOOL) { - __ DecompressTaggedPointer(object, object); - } __ CheckPageFlag(object, scratch0, MemoryChunk::kPointersFromHereAreInterestingMask, not_zero, ool->entry()); @@ -1042,7 +1051,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( DCHECK_EQ(i.OutputRegister(), i.InputRegister(0)); __ andq(i.InputRegister(0), kSpeculationPoisonRegister); break; - case kLFence: + case kX64MFence: + __ mfence(); + break; + case kX64LFence: __ lfence(); break; case kArchStackSlot: { @@ -1309,16 +1321,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; case kSSEFloat32Abs: { // TODO(bmeurer): Use RIP relative 128-bit constants. - __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); - __ psrlq(kScratchDoubleReg, 33); - __ andps(i.OutputDoubleRegister(), kScratchDoubleReg); + __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); + __ Psrlq(kScratchDoubleReg, 33); + __ Andps(i.OutputDoubleRegister(), kScratchDoubleReg); break; } case kSSEFloat32Neg: { // TODO(bmeurer): Use RIP relative 128-bit constants. - __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); - __ psllq(kScratchDoubleReg, 31); - __ xorps(i.OutputDoubleRegister(), kScratchDoubleReg); + __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); + __ Psllq(kScratchDoubleReg, 31); + __ Xorps(i.OutputDoubleRegister(), kScratchDoubleReg); break; } case kSSEFloat32Sqrt: @@ -1517,18 +1529,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ bind(ool->exit()); break; } + case kX64F64x2Abs: case kSSEFloat64Abs: { // TODO(bmeurer): Use RIP relative 128-bit constants. - __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); - __ psrlq(kScratchDoubleReg, 1); - __ andpd(i.OutputDoubleRegister(), kScratchDoubleReg); + __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); + __ Psrlq(kScratchDoubleReg, 1); + __ Andpd(i.OutputDoubleRegister(), kScratchDoubleReg); break; } + case kX64F64x2Neg: case kSSEFloat64Neg: { // TODO(bmeurer): Use RIP relative 128-bit constants. - __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); - __ psllq(kScratchDoubleReg, 63); - __ xorpd(i.OutputDoubleRegister(), kScratchDoubleReg); + __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); + __ Psllq(kScratchDoubleReg, 63); + __ Xorpd(i.OutputDoubleRegister(), kScratchDoubleReg); break; } case kSSEFloat64Sqrt: @@ -1944,16 +1958,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kX64MovqDecompressTaggedSigned: { CHECK(instr->HasOutput()); __ DecompressTaggedSigned(i.OutputRegister(), i.MemoryOperand()); + EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; } case kX64MovqDecompressTaggedPointer: { CHECK(instr->HasOutput()); __ DecompressTaggedPointer(i.OutputRegister(), i.MemoryOperand()); + EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; } case kX64MovqDecompressAnyTagged: { CHECK(instr->HasOutput()); __ DecompressAnyTagged(i.OutputRegister(), i.MemoryOperand()); + EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; } case kX64MovqCompressTagged: { @@ -1970,16 +1987,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kX64DecompressSigned: { CHECK(instr->HasOutput()); ASSEMBLE_MOVX(DecompressTaggedSigned); + EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; } case kX64DecompressPointer: { CHECK(instr->HasOutput()); ASSEMBLE_MOVX(DecompressTaggedPointer); + EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; } case kX64DecompressAny: { CHECK(instr->HasOutput()); ASSEMBLE_MOVX(DecompressAnyTagged); + EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; } case kX64CompressSigned: // Fall through. @@ -2006,11 +2026,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kX64Movss: EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset()); if (instr->HasOutput()) { - __ movss(i.OutputDoubleRegister(), i.MemoryOperand()); + __ Movss(i.OutputDoubleRegister(), i.MemoryOperand()); } else { size_t index = 0; Operand operand = i.MemoryOperand(&index); - __ movss(operand, i.InputDoubleRegister(index)); + __ Movss(operand, i.InputDoubleRegister(index)); } break; case kX64Movsd: { @@ -2039,11 +2059,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( CpuFeatureScope sse_scope(tasm(), SSSE3); EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset()); if (instr->HasOutput()) { - __ movdqu(i.OutputSimd128Register(), i.MemoryOperand()); + __ Movdqu(i.OutputSimd128Register(), i.MemoryOperand()); } else { size_t index = 0; Operand operand = i.MemoryOperand(&index); - __ movdqu(operand, i.InputSimd128Register(index)); + __ Movdqu(operand, i.InputSimd128Register(index)); } break; } @@ -2065,7 +2085,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( if (instr->InputAt(0)->IsRegister()) { __ Movd(i.OutputDoubleRegister(), i.InputRegister(0)); } else { - __ movss(i.OutputDoubleRegister(), i.InputOperand(0)); + __ Movss(i.OutputDoubleRegister(), i.InputOperand(0)); } break; case kX64BitcastLD: @@ -2235,6 +2255,51 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } break; } + case kX64F64x2Splat: { + XMMRegister dst = i.OutputSimd128Register(); + if (instr->InputAt(0)->IsFPRegister()) { + __ pshufd(dst, i.InputDoubleRegister(0), 0x44); + } else { + __ pshufd(dst, i.InputOperand(0), 0x44); + } + break; + } + case kX64F64x2ReplaceLane: { + CpuFeatureScope sse_scope(tasm(), SSE4_1); + if (instr->InputAt(2)->IsFPRegister()) { + __ movq(kScratchRegister, i.InputDoubleRegister(2)); + __ pinsrq(i.OutputSimd128Register(), kScratchRegister, i.InputInt8(1)); + } else { + __ pinsrq(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1)); + } + break; + } + case kX64F64x2ExtractLane: { + CpuFeatureScope sse_scope(tasm(), SSE4_1); + __ pextrq(kScratchRegister, i.InputSimd128Register(0), i.InputInt8(1)); + __ movq(i.OutputDoubleRegister(), kScratchRegister); + break; + } + case kX64F64x2Eq: { + DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); + __ cmpeqpd(i.OutputSimd128Register(), i.InputSimd128Register(1)); + break; + } + case kX64F64x2Ne: { + DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); + __ cmpneqpd(i.OutputSimd128Register(), i.InputSimd128Register(1)); + break; + } + case kX64F64x2Lt: { + DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); + __ cmpltpd(i.OutputSimd128Register(), i.InputSimd128Register(1)); + break; + } + case kX64F64x2Le: { + DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); + __ cmplepd(i.OutputSimd128Register(), i.InputSimd128Register(1)); + break; + } // TODO(gdeepti): Get rid of redundant moves for F32x4Splat/Extract below case kX64F32x4Splat: { XMMRegister dst = i.OutputSimd128Register(); @@ -2400,6 +2465,171 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ cmpleps(i.OutputSimd128Register(), i.InputSimd128Register(1)); break; } + case kX64I64x2Splat: { + XMMRegister dst = i.OutputSimd128Register(); + if (instr->InputAt(0)->IsRegister()) { + __ movq(dst, i.InputRegister(0)); + } else { + __ movq(dst, i.InputOperand(0)); + } + __ pshufd(dst, dst, 0x44); + break; + } + case kX64I64x2ExtractLane: { + CpuFeatureScope sse_scope(tasm(), SSE4_1); + __ pextrq(i.OutputRegister(), i.InputSimd128Register(0), i.InputInt8(1)); + break; + } + case kX64I64x2ReplaceLane: { + CpuFeatureScope sse_scope(tasm(), SSE4_1); + if (instr->InputAt(2)->IsRegister()) { + __ pinsrq(i.OutputSimd128Register(), i.InputRegister(2), + i.InputInt8(1)); + } else { + __ pinsrq(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1)); + } + break; + } + case kX64I64x2Neg: { + XMMRegister dst = i.OutputSimd128Register(); + XMMRegister src = i.InputSimd128Register(0); + if (dst == src) { + __ movapd(kScratchDoubleReg, src); + src = kScratchDoubleReg; + } + __ pxor(dst, dst); + __ psubq(dst, src); + break; + } + case kX64I64x2Shl: { + __ psllq(i.OutputSimd128Register(), i.InputInt8(1)); + break; + } + case kX64I64x2ShrS: { + // TODO(zhin): there is vpsraq but requires AVX512 + CpuFeatureScope sse_scope(tasm(), SSE4_1); + // ShrS on each quadword one at a time + XMMRegister dst = i.OutputSimd128Register(); + XMMRegister src = i.InputSimd128Register(0); + + // lower quadword + __ pextrq(kScratchRegister, src, 0x0); + __ sarq(kScratchRegister, Immediate(i.InputInt8(1))); + __ pinsrq(dst, kScratchRegister, 0x0); + + // upper quadword + __ pextrq(kScratchRegister, src, 0x1); + __ sarq(kScratchRegister, Immediate(i.InputInt8(1))); + __ pinsrq(dst, kScratchRegister, 0x1); + break; + } + case kX64I64x2Add: { + DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); + __ paddq(i.OutputSimd128Register(), i.InputSimd128Register(1)); + break; + } + case kX64I64x2Sub: { + DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); + __ psubq(i.OutputSimd128Register(), i.InputSimd128Register(1)); + break; + } + case kX64I64x2Mul: { + DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); + CpuFeatureScope sse_scope(tasm(), SSE4_1); + XMMRegister left = i.InputSimd128Register(0); + XMMRegister right = i.InputSimd128Register(1); + XMMRegister tmp1 = i.ToSimd128Register(instr->TempAt(0)); + XMMRegister tmp2 = i.ToSimd128Register(instr->TempAt(1)); + + __ movaps(tmp1, left); + __ movaps(tmp2, right); + + // Multiply high dword of each qword of left with right. + __ psrlq(tmp1, 32); + __ pmuludq(tmp1, right); + + // Multiply high dword of each qword of right with left. + __ psrlq(tmp2, 32); + __ pmuludq(tmp2, left); + + __ paddq(tmp2, tmp1); + __ psllq(tmp2, 32); + + __ pmuludq(left, right); + __ paddq(left, tmp2); // left == dst + break; + } + case kX64I64x2Eq: { + DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); + CpuFeatureScope sse_scope(tasm(), SSE4_1); + __ pcmpeqq(i.OutputSimd128Register(), i.InputSimd128Register(1)); + break; + } + case kX64I64x2Ne: { + DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); + CpuFeatureScope sse_scope(tasm(), SSE4_1); + __ pcmpeqq(i.OutputSimd128Register(), i.InputSimd128Register(1)); + __ pcmpeqq(kScratchDoubleReg, kScratchDoubleReg); + __ pxor(i.OutputSimd128Register(), kScratchDoubleReg); + break; + } + case kX64I64x2GtS: { + DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); + CpuFeatureScope sse_scope(tasm(), SSE4_2); + __ pcmpgtq(i.OutputSimd128Register(), i.InputSimd128Register(1)); + break; + } + case kX64I64x2GeS: { + DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); + CpuFeatureScope sse_scope(tasm(), SSE4_2); + XMMRegister dst = i.OutputSimd128Register(); + XMMRegister src = i.InputSimd128Register(1); + XMMRegister tmp = i.ToSimd128Register(instr->TempAt(0)); + + __ movaps(tmp, src); + __ pcmpgtq(tmp, dst); + __ pcmpeqd(dst, dst); + __ pxor(dst, tmp); + break; + } + case kX64I64x2ShrU: { + __ psrlq(i.OutputSimd128Register(), i.InputInt8(1)); + break; + } + case kX64I64x2GtU: { + DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); + CpuFeatureScope sse_scope(tasm(), SSE4_2); + XMMRegister dst = i.OutputSimd128Register(); + XMMRegister src = i.InputSimd128Register(1); + XMMRegister tmp = i.ToSimd128Register(instr->TempAt(0)); + + __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); + __ psllq(kScratchDoubleReg, 63); + + __ movaps(tmp, src); + __ pxor(tmp, kScratchDoubleReg); + __ pxor(dst, kScratchDoubleReg); + __ pcmpgtq(dst, tmp); + break; + } + case kX64I64x2GeU: { + DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); + CpuFeatureScope sse_scope(tasm(), SSE4_2); + XMMRegister dst = i.OutputSimd128Register(); + XMMRegister src = i.InputSimd128Register(1); + XMMRegister tmp = i.ToSimd128Register(instr->TempAt(0)); + + __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); + __ psllq(kScratchDoubleReg, 63); + + __ movaps(tmp, src); + __ pxor(dst, kScratchDoubleReg); + __ pxor(tmp, kScratchDoubleReg); + __ pcmpgtq(tmp, dst); + __ pcmpeqd(dst, dst); + __ pxor(dst, tmp); + break; + } case kX64I32x4Splat: { XMMRegister dst = i.OutputSimd128Register(); if (instr->InputAt(0)->IsRegister()) { @@ -3297,6 +3527,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ por(dst, kScratchDoubleReg); break; } + case kX64S1x2AnyTrue: case kX64S1x4AnyTrue: case kX64S1x8AnyTrue: case kX64S1x16AnyTrue: { @@ -3310,19 +3541,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ cmovq(zero, dst, tmp); break; } - case kX64S1x4AllTrue: - case kX64S1x8AllTrue: + // Need to split up all the different lane structures because the + // comparison instruction used matters, e.g. given 0xff00, pcmpeqb returns + // 0x0011, pcmpeqw returns 0x0000, ptest will set ZF to 0 and 1 + // respectively. + case kX64S1x2AllTrue: { + ASSEMBLE_SIMD_ALL_TRUE(pcmpeqq); + break; + } + case kX64S1x4AllTrue: { + ASSEMBLE_SIMD_ALL_TRUE(pcmpeqd); + break; + } + case kX64S1x8AllTrue: { + ASSEMBLE_SIMD_ALL_TRUE(pcmpeqw); + break; + } case kX64S1x16AllTrue: { - CpuFeatureScope sse_scope(tasm(), SSE4_1); - Register dst = i.OutputRegister(); - XMMRegister src = i.InputSimd128Register(0); - Register tmp = i.TempRegister(0); - __ movq(tmp, Immediate(1)); - __ xorq(dst, dst); - __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); - __ pxor(kScratchDoubleReg, src); - __ ptest(kScratchDoubleReg, kScratchDoubleReg); - __ cmovq(zero, dst, tmp); + ASSEMBLE_SIMD_ALL_TRUE(pcmpeqb); break; } case kX64StackCheck: @@ -3507,6 +3743,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( #undef ASSEMBLE_SIMD_IMM_INSTR #undef ASSEMBLE_SIMD_PUNPCK_SHUFFLE #undef ASSEMBLE_SIMD_IMM_SHUFFLE +#undef ASSEMBLE_SIMD_ALL_TRUE namespace { @@ -3734,6 +3971,11 @@ void CodeGenerator::AssembleConstructFrame() { if (call_descriptor->IsCFunctionCall()) { __ pushq(rbp); __ movq(rbp, rsp); + if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) { + __ Push(Immediate(StackFrame::TypeToMarker(StackFrame::C_WASM_ENTRY))); + // Reserve stack space for saving the c_entry_fp later. + __ AllocateStackSpace(kSystemPointerSize); + } } else if (call_descriptor->IsJSFunctionCall()) { __ Prologue(); if (call_descriptor->PushArgumentCount()) { @@ -3765,8 +4007,8 @@ void CodeGenerator::AssembleConstructFrame() { unwinding_info_writer_.MarkFrameConstructed(pc_base); } - int required_slots = frame()->GetTotalFrameSlotCount() - - call_descriptor->CalculateFixedFrameSize(); + int required_slots = + frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount(); if (info()->is_osr()) { // TurboFan OSR-compiled functions cannot be entered directly. @@ -3835,7 +4077,7 @@ void CodeGenerator::AssembleConstructFrame() { int slot_idx = 0; for (int i = 0; i < XMMRegister::kNumRegisters; i++) { if (!((1 << i) & saves_fp)) continue; - __ movdqu(Operand(rsp, kQuadWordSize * slot_idx), + __ Movdqu(Operand(rsp, kQuadWordSize * slot_idx), XMMRegister::from_code(i)); slot_idx++; } @@ -3877,7 +4119,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) { int slot_idx = 0; for (int i = 0; i < XMMRegister::kNumRegisters; i++) { if (!((1 << i) & saves_fp)) continue; - __ movdqu(XMMRegister::from_code(i), + __ Movdqu(XMMRegister::from_code(i), Operand(rsp, kQuadWordSize * slot_idx)); slot_idx++; } @@ -3970,6 +4212,16 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, } break; } + case Constant::kCompressedHeapObject: { + Handle src_object = src.ToHeapObject(); + RootIndex index; + if (IsMaterializableFromRoot(src_object, &index)) { + __ LoadRoot(dst, index); + } else { + __ Move(dst, src_object, RelocInfo::COMPRESSED_EMBEDDED_OBJECT); + } + break; + } case Constant::kDelayedStringConstant: { const StringConstantBase* src_constant = src.ToDelayedStringConstant(); __ MoveStringConstant(dst, src_constant); diff --git a/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h index 57ef26dbd70d75..d6ac3f43dfaa88 100644 --- a/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h +++ b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h @@ -58,7 +58,8 @@ namespace compiler { V(X64Popcnt32) \ V(X64Bswap) \ V(X64Bswap32) \ - V(LFence) \ + V(X64MFence) \ + V(X64LFence) \ V(SSEFloat32Cmp) \ V(SSEFloat32Add) \ V(SSEFloat32Sub) \ @@ -158,6 +159,15 @@ namespace compiler { V(X64Poke) \ V(X64Peek) \ V(X64StackCheck) \ + V(X64F64x2Splat) \ + V(X64F64x2ExtractLane) \ + V(X64F64x2ReplaceLane) \ + V(X64F64x2Abs) \ + V(X64F64x2Neg) \ + V(X64F64x2Eq) \ + V(X64F64x2Ne) \ + V(X64F64x2Lt) \ + V(X64F64x2Le) \ V(X64F32x4Splat) \ V(X64F32x4ExtractLane) \ V(X64F32x4ReplaceLane) \ @@ -177,6 +187,22 @@ namespace compiler { V(X64F32x4Ne) \ V(X64F32x4Lt) \ V(X64F32x4Le) \ + V(X64I64x2Splat) \ + V(X64I64x2ExtractLane) \ + V(X64I64x2ReplaceLane) \ + V(X64I64x2Neg) \ + V(X64I64x2Shl) \ + V(X64I64x2ShrS) \ + V(X64I64x2Add) \ + V(X64I64x2Sub) \ + V(X64I64x2Mul) \ + V(X64I64x2Eq) \ + V(X64I64x2Ne) \ + V(X64I64x2GtS) \ + V(X64I64x2GeS) \ + V(X64I64x2ShrU) \ + V(X64I64x2GtU) \ + V(X64I64x2GeU) \ V(X64I32x4Splat) \ V(X64I32x4ExtractLane) \ V(X64I32x4ReplaceLane) \ @@ -293,6 +319,8 @@ namespace compiler { V(X64S8x8Reverse) \ V(X64S8x4Reverse) \ V(X64S8x2Reverse) \ + V(X64S1x2AnyTrue) \ + V(X64S1x2AllTrue) \ V(X64S1x4AnyTrue) \ V(X64S1x4AllTrue) \ V(X64S1x8AnyTrue) \ diff --git a/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc index 9d48e9175a6c36..6389ef2e503f73 100644 --- a/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc +++ b/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc @@ -124,6 +124,15 @@ int InstructionScheduler::GetTargetInstructionFlags( case kX64Lea: case kX64Dec32: case kX64Inc32: + case kX64F64x2Splat: + case kX64F64x2ExtractLane: + case kX64F64x2ReplaceLane: + case kX64F64x2Abs: + case kX64F64x2Neg: + case kX64F64x2Eq: + case kX64F64x2Ne: + case kX64F64x2Lt: + case kX64F64x2Le: case kX64F32x4Splat: case kX64F32x4ExtractLane: case kX64F32x4ReplaceLane: @@ -143,6 +152,22 @@ int InstructionScheduler::GetTargetInstructionFlags( case kX64F32x4Ne: case kX64F32x4Lt: case kX64F32x4Le: + case kX64I64x2Splat: + case kX64I64x2ExtractLane: + case kX64I64x2ReplaceLane: + case kX64I64x2Neg: + case kX64I64x2Shl: + case kX64I64x2ShrS: + case kX64I64x2Add: + case kX64I64x2Sub: + case kX64I64x2Mul: + case kX64I64x2Eq: + case kX64I64x2Ne: + case kX64I64x2GtS: + case kX64I64x2GeS: + case kX64I64x2ShrU: + case kX64I64x2GtU: + case kX64I64x2GeU: case kX64I32x4Splat: case kX64I32x4ExtractLane: case kX64I32x4ReplaceLane: @@ -233,6 +258,8 @@ int InstructionScheduler::GetTargetInstructionFlags( case kX64S128Not: case kX64S128Select: case kX64S128Zero: + case kX64S1x2AnyTrue: + case kX64S1x2AllTrue: case kX64S1x4AnyTrue: case kX64S1x4AllTrue: case kX64S1x8AnyTrue: @@ -327,7 +354,8 @@ int InstructionScheduler::GetTargetInstructionFlags( case kX64Poke: return kHasSideEffect; - case kLFence: + case kX64MFence: + case kX64LFence: return kHasSideEffect; case kX64Word64AtomicLoadUint8: diff --git a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc index a20590b8d3ceb4..a4908fb846167b 100644 --- a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc +++ b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc @@ -309,21 +309,19 @@ void InstructionSelector::VisitStackSlot(Node* node) { sequence()->AddImmediate(Constant(slot)), 0, nullptr); } -void InstructionSelector::VisitDebugAbort(Node* node) { +void InstructionSelector::VisitAbortCSAAssert(Node* node) { X64OperandGenerator g(this); - Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), rdx)); + Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), rdx)); } -void InstructionSelector::VisitLoad(Node* node) { - LoadRepresentation load_rep = LoadRepresentationOf(node->op()); +void InstructionSelector::VisitLoad(Node* node, Node* value, + InstructionCode opcode) { X64OperandGenerator g(this); - - ArchOpcode opcode = GetLoadOpcode(load_rep); InstructionOperand outputs[] = {g.DefineAsRegister(node)}; InstructionOperand inputs[3]; size_t input_count = 0; AddressingMode mode = - g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count); + g.GetEffectiveAddressMemoryOperand(value, inputs, &input_count); InstructionCode code = opcode | AddressingModeField::encode(mode); if (node->opcode() == IrOpcode::kProtectedLoad) { code |= MiscField::encode(kMemoryAccessProtected); @@ -334,6 +332,11 @@ void InstructionSelector::VisitLoad(Node* node) { Emit(code, 1, outputs, input_count, inputs); } +void InstructionSelector::VisitLoad(Node* node) { + LoadRepresentation load_rep = LoadRepresentationOf(node->op()); + VisitLoad(node, node, GetLoadOpcode(load_rep)); +} + void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); } void InstructionSelector::VisitProtectedLoad(Node* node) { VisitLoad(node); } @@ -898,7 +901,8 @@ void InstructionSelector::VisitInt32Sub(Node* node) { // Omit truncation and turn subtractions of constant values into immediate // "leal" instructions by negating the value. Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI), - g.DefineAsRegister(node), int64_input, g.TempImmediate(-imm)); + g.DefineAsRegister(node), int64_input, + g.TempImmediate(base::NegateWithWraparound(imm))); } return; } @@ -907,9 +911,9 @@ void InstructionSelector::VisitInt32Sub(Node* node) { if (m.left().Is(0)) { Emit(kX64Neg32, g.DefineSameAsFirst(node), g.UseRegister(m.right().node())); } else if (m.right().Is(0)) { - // TODO(jarin): We should be able to use {EmitIdentity} here - // (https://crbug.com/v8/7947). - Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(m.left().node())); + // {EmitIdentity} reuses the virtual register of the first input + // for the output. This is exactly what we want here. + EmitIdentity(node); } else if (m.right().HasValue() && g.CanBeImmediate(m.right().node())) { // Turn subtractions of constant values into immediate "leal" instructions // by negating the value. @@ -1254,23 +1258,47 @@ void InstructionSelector::VisitChangeTaggedSignedToCompressedSigned( } void InstructionSelector::VisitChangeCompressedToTagged(Node* node) { - X64OperandGenerator g(this); Node* const value = node->InputAt(0); - Emit(kX64DecompressAny, g.DefineAsRegister(node), g.Use(value)); + if ((value->opcode() == IrOpcode::kLoad || + value->opcode() == IrOpcode::kPoisonedLoad) && + CanCover(node, value)) { + DCHECK_EQ(LoadRepresentationOf(value->op()).representation(), + MachineRepresentation::kCompressed); + VisitLoad(node, value, kX64MovqDecompressAnyTagged); + } else { + X64OperandGenerator g(this); + Emit(kX64DecompressAny, g.DefineAsRegister(node), g.Use(value)); + } } void InstructionSelector::VisitChangeCompressedPointerToTaggedPointer( Node* node) { - X64OperandGenerator g(this); Node* const value = node->InputAt(0); - Emit(kX64DecompressPointer, g.DefineAsRegister(node), g.Use(value)); + if ((value->opcode() == IrOpcode::kLoad || + value->opcode() == IrOpcode::kPoisonedLoad) && + CanCover(node, value)) { + DCHECK_EQ(LoadRepresentationOf(value->op()).representation(), + MachineRepresentation::kCompressedPointer); + VisitLoad(node, value, kX64MovqDecompressTaggedPointer); + } else { + X64OperandGenerator g(this); + Emit(kX64DecompressPointer, g.DefineAsRegister(node), g.Use(value)); + } } void InstructionSelector::VisitChangeCompressedSignedToTaggedSigned( Node* node) { - X64OperandGenerator g(this); Node* const value = node->InputAt(0); - Emit(kX64DecompressSigned, g.DefineAsRegister(node), g.Use(value)); + if ((value->opcode() == IrOpcode::kLoad || + value->opcode() == IrOpcode::kPoisonedLoad) && + CanCover(node, value)) { + DCHECK_EQ(LoadRepresentationOf(value->op()).representation(), + MachineRepresentation::kCompressedSigned); + VisitLoad(node, value, kX64MovqDecompressTaggedSigned); + } else { + X64OperandGenerator g(this); + Emit(kX64DecompressSigned, g.DefineAsRegister(node), g.Use(value)); + } } namespace { @@ -2343,6 +2371,11 @@ void InstructionSelector::VisitFloat64SilenceNaN(Node* node) { g.UseRegister(node->InputAt(0))); } +void InstructionSelector::VisitMemoryBarrier(Node* node) { + X64OperandGenerator g(this); + Emit(kX64MFence, g.NoOutput()); +} + void InstructionSelector::VisitWord32AtomicLoad(Node* node) { LoadRepresentation load_rep = LoadRepresentationOf(node->op()); DCHECK(load_rep.representation() == MachineRepresentation::kWord8 || @@ -2545,12 +2578,18 @@ VISIT_ATOMIC_BINOP(Xor) #undef VISIT_ATOMIC_BINOP #define SIMD_TYPES(V) \ + V(F64x2) \ V(F32x4) \ + V(I64x2) \ V(I32x4) \ V(I16x8) \ V(I8x16) #define SIMD_BINOP_LIST(V) \ + V(F64x2Eq) \ + V(F64x2Ne) \ + V(F64x2Lt) \ + V(F64x2Le) \ V(F32x4Add) \ V(F32x4AddHoriz) \ V(F32x4Sub) \ @@ -2561,6 +2600,11 @@ VISIT_ATOMIC_BINOP(Xor) V(F32x4Ne) \ V(F32x4Lt) \ V(F32x4Le) \ + V(I64x2Add) \ + V(I64x2Sub) \ + V(I64x2Eq) \ + V(I64x2Ne) \ + V(I64x2GtS) \ V(I32x4Add) \ V(I32x4AddHoriz) \ V(I32x4Sub) \ @@ -2615,12 +2659,18 @@ VISIT_ATOMIC_BINOP(Xor) V(S128Or) \ V(S128Xor) +#define SIMD_BINOP_ONE_TEMP_LIST(V) \ + V(I64x2GeS) \ + V(I64x2GtU) \ + V(I64x2GeU) + #define SIMD_UNOP_LIST(V) \ V(F32x4SConvertI32x4) \ V(F32x4Abs) \ V(F32x4Neg) \ V(F32x4RecipApprox) \ V(F32x4RecipSqrtApprox) \ + V(I64x2Neg) \ V(I32x4SConvertI16x8Low) \ V(I32x4SConvertI16x8High) \ V(I32x4Neg) \ @@ -2635,6 +2685,9 @@ VISIT_ATOMIC_BINOP(Xor) V(S128Not) #define SIMD_SHIFT_OPCODES(V) \ + V(I64x2Shl) \ + V(I64x2ShrS) \ + V(I64x2ShrU) \ V(I32x4Shl) \ V(I32x4ShrS) \ V(I32x4ShrU) \ @@ -2646,11 +2699,13 @@ VISIT_ATOMIC_BINOP(Xor) V(I8x16ShrU) #define SIMD_ANYTRUE_LIST(V) \ + V(S1x2AnyTrue) \ V(S1x4AnyTrue) \ V(S1x8AnyTrue) \ V(S1x16AnyTrue) #define SIMD_ALLTRUE_LIST(V) \ + V(S1x2AllTrue) \ V(S1x4AllTrue) \ V(S1x8AllTrue) \ V(S1x16AllTrue) @@ -2721,6 +2776,18 @@ SIMD_BINOP_LIST(VISIT_SIMD_BINOP) #undef VISIT_SIMD_BINOP #undef SIMD_BINOP_LIST +#define VISIT_SIMD_BINOP_ONE_TEMP(Opcode) \ + void InstructionSelector::Visit##Opcode(Node* node) { \ + X64OperandGenerator g(this); \ + InstructionOperand temps[] = {g.TempSimd128Register()}; \ + Emit(kX64##Opcode, g.DefineSameAsFirst(node), \ + g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), \ + arraysize(temps), temps); \ + } +SIMD_BINOP_ONE_TEMP_LIST(VISIT_SIMD_BINOP_ONE_TEMP) +#undef VISIT_SIMD_BINOP_ONE_TEMP +#undef SIMD_BINOP_ONE_TEMP_LIST + #define VISIT_SIMD_ANYTRUE(Opcode) \ void InstructionSelector::Visit##Opcode(Node* node) { \ X64OperandGenerator g(this); \ @@ -2751,12 +2818,33 @@ void InstructionSelector::VisitS128Select(Node* node) { g.UseRegister(node->InputAt(2))); } +void InstructionSelector::VisitF64x2Abs(Node* node) { + X64OperandGenerator g(this); + Emit(kX64F64x2Abs, g.DefineSameAsFirst(node), + g.UseRegister(node->InputAt(0))); +} + +void InstructionSelector::VisitF64x2Neg(Node* node) { + X64OperandGenerator g(this); + Emit(kX64F64x2Neg, g.DefineSameAsFirst(node), + g.UseRegister(node->InputAt(0))); +} + void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) { X64OperandGenerator g(this); Emit(kX64F32x4UConvertI32x4, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0))); } +void InstructionSelector::VisitI64x2Mul(Node* node) { + X64OperandGenerator g(this); + InstructionOperand temps[] = {g.TempSimd128Register(), + g.TempSimd128Register()}; + Emit(kX64I64x2Mul, g.DefineSameAsFirst(node), + g.UseUniqueRegister(node->InputAt(0)), + g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps); +} + void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) { X64OperandGenerator g(this); Emit(kX64I32x4SConvertF32x4, g.DefineSameAsFirst(node), diff --git a/deps/v8/src/compiler/bytecode-analysis.cc b/deps/v8/src/compiler/bytecode-analysis.cc index 9c23cd460ab742..b44bec5fc88737 100644 --- a/deps/v8/src/compiler/bytecode-analysis.cc +++ b/deps/v8/src/compiler/bytecode-analysis.cc @@ -79,22 +79,28 @@ ResumeJumpTarget ResumeJumpTarget::AtLoopHeader(int loop_header_offset, } BytecodeAnalysis::BytecodeAnalysis(Handle bytecode_array, - Zone* zone, bool do_liveness_analysis) + Zone* zone, BailoutId osr_bailout_id, + bool analyze_liveness) : bytecode_array_(bytecode_array), - do_liveness_analysis_(do_liveness_analysis), zone_(zone), + osr_bailout_id_(osr_bailout_id), + analyze_liveness_(analyze_liveness), loop_stack_(zone), loop_end_index_queue_(zone), resume_jump_targets_(zone), end_to_header_(zone), header_to_info_(zone), osr_entry_point_(-1), - liveness_map_(bytecode_array->length(), zone) {} + liveness_map_(bytecode_array->length(), zone) { + Analyze(); +} namespace { -void UpdateInLiveness(Bytecode bytecode, BytecodeLivenessState& in_liveness, - const interpreter::BytecodeArrayAccessor& accessor) { +void UpdateInLiveness( + Bytecode bytecode, + BytecodeLivenessState& in_liveness, // NOLINT(runtime/references) + const interpreter::BytecodeArrayAccessor& accessor) { int num_operands = Bytecodes::NumberOfOperands(bytecode); const OperandType* operand_types = Bytecodes::GetOperandTypes(bytecode); @@ -201,12 +207,14 @@ void UpdateInLiveness(Bytecode bytecode, BytecodeLivenessState& in_liveness, } } -void UpdateOutLiveness(Bytecode bytecode, BytecodeLivenessState& out_liveness, - BytecodeLivenessState* next_bytecode_in_liveness, - const interpreter::BytecodeArrayAccessor& accessor, - const BytecodeLivenessMap& liveness_map) { +void UpdateOutLiveness( + Bytecode bytecode, + BytecodeLivenessState& out_liveness, // NOLINT(runtime/references) + BytecodeLivenessState* next_bytecode_in_liveness, + const interpreter::BytecodeArrayAccessor& accessor, + Handle bytecode_array, + const BytecodeLivenessMap& liveness_map) { int current_offset = accessor.current_offset(); - const Handle& bytecode_array = accessor.bytecode_array(); // Special case Suspend and Resume to just pass through liveness. if (bytecode == Bytecode::kSuspendGenerator || @@ -261,20 +269,24 @@ void UpdateOutLiveness(Bytecode bytecode, BytecodeLivenessState& out_liveness, } } -void UpdateLiveness(Bytecode bytecode, BytecodeLiveness& liveness, +void UpdateLiveness(Bytecode bytecode, + BytecodeLiveness& liveness, // NOLINT(runtime/references) BytecodeLivenessState** next_bytecode_in_liveness, const interpreter::BytecodeArrayAccessor& accessor, + Handle bytecode_array, const BytecodeLivenessMap& liveness_map) { UpdateOutLiveness(bytecode, *liveness.out, *next_bytecode_in_liveness, - accessor, liveness_map); + accessor, bytecode_array, liveness_map); liveness.in->CopyFrom(*liveness.out); UpdateInLiveness(bytecode, *liveness.in, accessor); *next_bytecode_in_liveness = liveness.in; } -void UpdateAssignments(Bytecode bytecode, BytecodeLoopAssignments& assignments, - const interpreter::BytecodeArrayAccessor& accessor) { +void UpdateAssignments( + Bytecode bytecode, + BytecodeLoopAssignments& assignments, // NOLINT(runtime/references) + const interpreter::BytecodeArrayAccessor& accessor) { int num_operands = Bytecodes::NumberOfOperands(bytecode); const OperandType* operand_types = Bytecodes::GetOperandTypes(bytecode); @@ -307,15 +319,13 @@ void UpdateAssignments(Bytecode bytecode, BytecodeLoopAssignments& assignments, } // namespace -void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) { +void BytecodeAnalysis::Analyze() { loop_stack_.push({-1, nullptr}); BytecodeLivenessState* next_bytecode_in_liveness = nullptr; - - bool is_osr = !osr_bailout_id.IsNone(); - int osr_loop_end_offset = is_osr ? osr_bailout_id.ToInt() : -1; - int generator_switch_index = -1; + int osr_loop_end_offset = osr_bailout_id_.ToInt(); + DCHECK_EQ(osr_loop_end_offset < 0, osr_bailout_id_.IsNone()); interpreter::BytecodeArrayRandomIterator iterator(bytecode_array(), zone()); for (iterator.GoToEnd(); iterator.IsValid(); --iterator) { @@ -337,14 +347,14 @@ void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) { if (current_offset == osr_loop_end_offset) { osr_entry_point_ = loop_header; } else if (current_offset < osr_loop_end_offset) { - // Check we've found the osr_entry_point if we've gone past the + // Assert that we've found the osr_entry_point if we've gone past the // osr_loop_end_offset. Note, we are iterating the bytecode in reverse, - // so the less than in the check is correct. - DCHECK_NE(-1, osr_entry_point_); + // so the less-than in the above condition is correct. + DCHECK_LE(0, osr_entry_point_); } // Save the index so that we can do another pass later. - if (do_liveness_analysis_) { + if (analyze_liveness_) { loop_end_index_queue_.push_back(iterator.current_index()); } } else if (loop_stack_.size() > 1) { @@ -357,8 +367,8 @@ void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) { // information we currently have. UpdateAssignments(bytecode, current_loop_info->assignments(), iterator); - // Update suspend counts for this loop, though only if not OSR. - if (!is_osr && bytecode == Bytecode::kSuspendGenerator) { + // Update suspend counts for this loop. + if (bytecode == Bytecode::kSuspendGenerator) { int suspend_id = iterator.GetUnsignedImmediateOperand(3); int resume_offset = current_offset + iterator.current_bytecode_size(); current_loop_info->AddResumeTarget( @@ -412,7 +422,7 @@ void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) { } } } - } else if (!is_osr && bytecode == Bytecode::kSuspendGenerator) { + } else if (bytecode == Bytecode::kSuspendGenerator) { // If we're not in a loop, we still need to look for suspends. // TODO(leszeks): It would be nice to de-duplicate this with the in-loop // case @@ -422,11 +432,11 @@ void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) { ResumeJumpTarget::Leaf(suspend_id, resume_offset)); } - if (do_liveness_analysis_) { + if (analyze_liveness_) { BytecodeLiveness& liveness = liveness_map_.InitializeLiveness( current_offset, bytecode_array()->register_count(), zone()); UpdateLiveness(bytecode, liveness, &next_bytecode_in_liveness, iterator, - liveness_map_); + bytecode_array(), liveness_map_); } } @@ -435,7 +445,7 @@ void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) { DCHECK(ResumeJumpTargetsAreValid()); - if (!do_liveness_analysis_) return; + if (!analyze_liveness_) return; // At this point, every bytecode has a valid in and out liveness, except for // propagating liveness across back edges (i.e. JumpLoop). Subsequent liveness @@ -489,12 +499,13 @@ void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) { BytecodeLiveness& liveness = liveness_map_.GetLiveness(current_offset); UpdateLiveness(bytecode, liveness, &next_bytecode_in_liveness, iterator, - liveness_map_); + bytecode_array(), liveness_map_); } // Now we are at the loop header. Since the in-liveness of the header // can't change, we need only to update the out-liveness. UpdateOutLiveness(iterator.current_bytecode(), *header_liveness.out, - next_bytecode_in_liveness, iterator, liveness_map_); + next_bytecode_in_liveness, iterator, bytecode_array(), + liveness_map_); } // Process the generator switch statement separately, once the loops are done. @@ -533,12 +544,12 @@ void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) { DCHECK_NE(bytecode, Bytecode::kJumpLoop); UpdateLiveness(bytecode, liveness, &next_bytecode_in_liveness, iterator, - liveness_map_); + bytecode_array(), liveness_map_); } } } - DCHECK(do_liveness_analysis_); + DCHECK(analyze_liveness_); if (FLAG_trace_environment_liveness) { StdoutStream of; PrintLivenessTo(of); @@ -610,14 +621,14 @@ const LoopInfo& BytecodeAnalysis::GetLoopInfoFor(int header_offset) const { const BytecodeLivenessState* BytecodeAnalysis::GetInLivenessFor( int offset) const { - if (!do_liveness_analysis_) return nullptr; + if (!analyze_liveness_) return nullptr; return liveness_map_.GetInLiveness(offset); } const BytecodeLivenessState* BytecodeAnalysis::GetOutLivenessFor( int offset) const { - if (!do_liveness_analysis_) return nullptr; + if (!analyze_liveness_) return nullptr; return liveness_map_.GetOutLiveness(offset); } @@ -662,9 +673,8 @@ bool BytecodeAnalysis::ResumeJumpTargetsAreValid() { } // If the iterator is invalid, we've reached the end without finding the - // generator switch. Similarly, if we are OSR-ing, we're not resuming, so we - // need no jump targets. So, ensure there are no jump targets and exit. - if (!iterator.IsValid() || HasOsrEntryPoint()) { + // generator switch. So, ensure there are no jump targets and exit. + if (!iterator.IsValid()) { // Check top-level. if (!resume_jump_targets().empty()) { PrintF(stderr, @@ -758,14 +768,14 @@ bool BytecodeAnalysis::ResumeJumpTargetLeavesResolveSuspendIds( valid = false; } else { // Make sure we're resuming to a Resume bytecode - interpreter::BytecodeArrayAccessor assessor(bytecode_array(), + interpreter::BytecodeArrayAccessor accessor(bytecode_array(), target.target_offset()); - if (assessor.current_bytecode() != Bytecode::kResumeGenerator) { + if (accessor.current_bytecode() != Bytecode::kResumeGenerator) { PrintF(stderr, "Expected resume target for id %d, offset %d, to be " "ResumeGenerator, but found %s\n", target.suspend_id(), target.target_offset(), - Bytecodes::ToString(assessor.current_bytecode())); + Bytecodes::ToString(accessor.current_bytecode())); valid = false; } @@ -820,7 +830,7 @@ bool BytecodeAnalysis::LivenessIsValid() { previous_liveness.CopyFrom(*liveness.out); UpdateOutLiveness(bytecode, *liveness.out, next_bytecode_in_liveness, - iterator, liveness_map_); + iterator, bytecode_array(), liveness_map_); // UpdateOutLiveness skips kJumpLoop, so we update it manually. if (bytecode == Bytecode::kJumpLoop) { int target_offset = iterator.GetJumpTargetOffset(); diff --git a/deps/v8/src/compiler/bytecode-analysis.h b/deps/v8/src/compiler/bytecode-analysis.h index 53f86ca3063c27..32c5168466e7da 100644 --- a/deps/v8/src/compiler/bytecode-analysis.h +++ b/deps/v8/src/compiler/bytecode-analysis.h @@ -92,18 +92,14 @@ struct V8_EXPORT_PRIVATE LoopInfo { ZoneVector resume_jump_targets_; }; -class V8_EXPORT_PRIVATE BytecodeAnalysis { +// Analyze the bytecodes to find the loop ranges, loop nesting, loop assignments +// and liveness. NOTE: The broker/serializer relies on the fact that an +// analysis for OSR (osr_bailout_id is not None) subsumes an analysis for +// non-OSR (osr_bailout_id is None). +class V8_EXPORT_PRIVATE BytecodeAnalysis : public ZoneObject { public: BytecodeAnalysis(Handle bytecode_array, Zone* zone, - bool do_liveness_analysis); - - // Analyze the bytecodes to find the loop ranges, loop nesting, loop - // assignments and liveness, under the assumption that there is an OSR bailout - // at {osr_bailout_id}. - // - // No other methods in this class return valid information until this has been - // called. - void Analyze(BailoutId osr_bailout_id); + BailoutId osr_bailout_id, bool analyze_liveness); // Return true if the given offset is a loop header bool IsLoopHeader(int offset) const; @@ -118,23 +114,30 @@ class V8_EXPORT_PRIVATE BytecodeAnalysis { return resume_jump_targets_; } - // True if the current analysis has an OSR entry point. - bool HasOsrEntryPoint() const { return osr_entry_point_ != -1; } - - int osr_entry_point() const { return osr_entry_point_; } - - // Gets the in-liveness for the bytecode at {offset}. + // Gets the in-/out-liveness for the bytecode at {offset}. const BytecodeLivenessState* GetInLivenessFor(int offset) const; - - // Gets the out-liveness for the bytecode at {offset}. const BytecodeLivenessState* GetOutLivenessFor(int offset) const; + // In the case of OSR, the analysis also computes the (bytecode offset of the) + // OSR entry point from the {osr_bailout_id} that was given to the + // constructor. + int osr_entry_point() const { + CHECK_LE(0, osr_entry_point_); + return osr_entry_point_; + } + // Return the osr_bailout_id (for verification purposes). + BailoutId osr_bailout_id() const { return osr_bailout_id_; } + + // Return whether liveness analysis was performed (for verification purposes). + bool liveness_analyzed() const { return analyze_liveness_; } + private: struct LoopStackEntry { int header_offset; LoopInfo* loop_info; }; + void Analyze(); void PushLoop(int loop_header, int loop_end); #if DEBUG @@ -153,17 +156,15 @@ class V8_EXPORT_PRIVATE BytecodeAnalysis { std::ostream& PrintLivenessTo(std::ostream& os) const; Handle const bytecode_array_; - bool const do_liveness_analysis_; Zone* const zone_; - + BailoutId const osr_bailout_id_; + bool const analyze_liveness_; ZoneStack loop_stack_; ZoneVector loop_end_index_queue_; ZoneVector resume_jump_targets_; - ZoneMap end_to_header_; ZoneMap header_to_info_; int osr_entry_point_; - BytecodeLivenessMap liveness_map_; DISALLOW_COPY_AND_ASSIGN(BytecodeAnalysis); diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc index 0ab8f85670c349..7c7144632074dd 100644 --- a/deps/v8/src/compiler/bytecode-graph-builder.cc +++ b/deps/v8/src/compiler/bytecode-graph-builder.cc @@ -6,9 +6,11 @@ #include "src/ast/ast.h" #include "src/codegen/source-position-table.h" +#include "src/codegen/tick-counter.h" #include "src/compiler/access-builder.h" #include "src/compiler/bytecode-analysis.h" #include "src/compiler/compiler-source-position-table.h" +#include "src/compiler/js-heap-broker.h" #include "src/compiler/linkage.h" #include "src/compiler/node-matchers.h" #include "src/compiler/operator-properties.h" @@ -32,14 +34,15 @@ namespace compiler { class BytecodeGraphBuilder { public: BytecodeGraphBuilder(JSHeapBroker* broker, Zone* local_zone, - Handle bytecode_array, - Handle shared, - Handle feedback_vector, - BailoutId osr_offset, JSGraph* jsgraph, + BytecodeArrayRef bytecode_array, + SharedFunctionInfoRef shared, + FeedbackVectorRef feedback_vector, BailoutId osr_offset, + JSGraph* jsgraph, CallFrequency const& invocation_frequency, SourcePositionTable* source_positions, - Handle native_context, int inlining_id, - BytecodeGraphBuilderFlags flags); + NativeContextRef native_context, int inlining_id, + BytecodeGraphBuilderFlags flags, + TickCounter* tick_counter); // Creates a graph by visiting bytecodes. void CreateGraph(); @@ -318,12 +321,8 @@ class BytecodeGraphBuilder { return jsgraph_->simplified(); } Zone* local_zone() const { return local_zone_; } - const Handle& bytecode_array() const { - return bytecode_array_; - } - const Handle& feedback_vector() const { - return feedback_vector_; - } + const BytecodeArrayRef bytecode_array() const { return bytecode_array_; } + FeedbackVectorRef feedback_vector() const { return feedback_vector_; } const JSTypeHintLowering& type_hint_lowering() const { return type_hint_lowering_; } @@ -332,7 +331,7 @@ class BytecodeGraphBuilder { } SourcePositionTableIterator& source_position_iterator() { - return source_position_iterator_; + return *source_position_iterator_.get(); } interpreter::BytecodeArrayIterator& bytecode_iterator() { @@ -343,8 +342,6 @@ class BytecodeGraphBuilder { return bytecode_analysis_; } - void RunBytecodeAnalysis() { bytecode_analysis_.Analyze(osr_offset_); } - int currently_peeled_loop_offset() const { return currently_peeled_loop_offset_; } @@ -368,9 +365,9 @@ class BytecodeGraphBuilder { needs_eager_checkpoint_ = value; } - Handle shared_info() const { return shared_info_; } + SharedFunctionInfoRef shared_info() const { return shared_info_; } - Handle native_context() const { return native_context_; } + NativeContextRef native_context() const { return native_context_; } JSHeapBroker* broker() const { return broker_; } @@ -382,15 +379,15 @@ class BytecodeGraphBuilder { Zone* const local_zone_; JSGraph* const jsgraph_; CallFrequency const invocation_frequency_; - Handle const bytecode_array_; - Handle const feedback_vector_; + BytecodeArrayRef const bytecode_array_; + FeedbackVectorRef feedback_vector_; JSTypeHintLowering const type_hint_lowering_; const FrameStateFunctionInfo* const frame_state_function_info_; - SourcePositionTableIterator source_position_iterator_; + std::unique_ptr source_position_iterator_; interpreter::BytecodeArrayIterator bytecode_iterator_; - BytecodeAnalysis bytecode_analysis_; + BytecodeAnalysis const& bytecode_analysis_; Environment* environment_; - BailoutId const osr_offset_; + bool const osr_; int currently_peeled_loop_offset_; bool skip_next_stack_check_; @@ -434,10 +431,12 @@ class BytecodeGraphBuilder { SourcePosition const start_position_; - Handle const shared_info_; + SharedFunctionInfoRef const shared_info_; // The native context for which we optimize. - Handle const native_context_; + NativeContextRef const native_context_; + + TickCounter* const tick_counter_; static int const kBinaryOperationHintIndex = 1; static int const kCountOperationHintIndex = 0; @@ -938,13 +937,12 @@ Node* BytecodeGraphBuilder::Environment::Checkpoint( } BytecodeGraphBuilder::BytecodeGraphBuilder( - JSHeapBroker* broker, Zone* local_zone, - Handle bytecode_array, - Handle shared_info, - Handle feedback_vector, BailoutId osr_offset, - JSGraph* jsgraph, CallFrequency const& invocation_frequency, - SourcePositionTable* source_positions, Handle native_context, - int inlining_id, BytecodeGraphBuilderFlags flags) + JSHeapBroker* broker, Zone* local_zone, BytecodeArrayRef bytecode_array, + SharedFunctionInfoRef shared_info, FeedbackVectorRef feedback_vector, + BailoutId osr_offset, JSGraph* jsgraph, + CallFrequency const& invocation_frequency, + SourcePositionTable* source_positions, NativeContextRef native_context, + int inlining_id, BytecodeGraphBuilderFlags flags, TickCounter* tick_counter) : broker_(broker), local_zone_(local_zone), jsgraph_(jsgraph), @@ -952,22 +950,22 @@ BytecodeGraphBuilder::BytecodeGraphBuilder( bytecode_array_(bytecode_array), feedback_vector_(feedback_vector), type_hint_lowering_( - jsgraph, feedback_vector, + jsgraph, feedback_vector.object(), (flags & BytecodeGraphBuilderFlag::kBailoutOnUninitialized) ? JSTypeHintLowering::kBailoutOnUninitialized : JSTypeHintLowering::kNoFlags), frame_state_function_info_(common()->CreateFrameStateFunctionInfo( FrameStateType::kInterpretedFunction, - bytecode_array->parameter_count(), bytecode_array->register_count(), - shared_info)), - source_position_iterator_( - handle(bytecode_array->SourcePositionTableIfCollected(), isolate())), - bytecode_iterator_(bytecode_array), - bytecode_analysis_( - bytecode_array, local_zone, - flags & BytecodeGraphBuilderFlag::kAnalyzeEnvironmentLiveness), + bytecode_array.parameter_count(), bytecode_array.register_count(), + shared_info.object())), + bytecode_iterator_( + base::make_unique(bytecode_array)), + bytecode_analysis_(broker_->GetBytecodeAnalysis( + bytecode_array.object(), osr_offset, + flags & BytecodeGraphBuilderFlag::kAnalyzeEnvironmentLiveness, + !FLAG_concurrent_inlining)), environment_(nullptr), - osr_offset_(osr_offset), + osr_(!osr_offset.IsNone()), currently_peeled_loop_offset_(-1), skip_next_stack_check_(flags & BytecodeGraphBuilderFlag::kSkipFirstStackCheck), @@ -981,9 +979,23 @@ BytecodeGraphBuilder::BytecodeGraphBuilder( exit_controls_(local_zone), state_values_cache_(jsgraph), source_positions_(source_positions), - start_position_(shared_info->StartPosition(), inlining_id), + start_position_(shared_info.StartPosition(), inlining_id), shared_info_(shared_info), - native_context_(native_context) {} + native_context_(native_context), + tick_counter_(tick_counter) { + if (FLAG_concurrent_inlining) { + // With concurrent inlining on, the source position address doesn't change + // because it's been copied from the heap. + source_position_iterator_ = base::make_unique( + Vector(bytecode_array.source_positions_address(), + bytecode_array.source_positions_size())); + } else { + // Otherwise, we need to access the table through a handle. + source_position_iterator_ = base::make_unique( + handle(bytecode_array.object()->SourcePositionTableIfCollected(), + isolate())); + } +} Node* BytecodeGraphBuilder::GetFunctionClosure() { if (!function_closure_.is_set()) { @@ -997,33 +1009,30 @@ Node* BytecodeGraphBuilder::GetFunctionClosure() { Node* BytecodeGraphBuilder::BuildLoadNativeContextField(int index) { Node* result = NewNode(javascript()->LoadContext(0, index, true)); - NodeProperties::ReplaceContextInput( - result, jsgraph()->HeapConstant(native_context())); + NodeProperties::ReplaceContextInput(result, + jsgraph()->Constant(native_context())); return result; } VectorSlotPair BytecodeGraphBuilder::CreateVectorSlotPair(int slot_id) { FeedbackSlot slot = FeedbackVector::ToSlot(slot_id); - FeedbackNexus nexus(feedback_vector(), slot); - return VectorSlotPair(feedback_vector(), slot, nexus.ic_state()); + FeedbackNexus nexus(feedback_vector().object(), slot); + return VectorSlotPair(feedback_vector().object(), slot, nexus.ic_state()); } void BytecodeGraphBuilder::CreateGraph() { - BytecodeArrayRef bytecode_array_ref(broker(), bytecode_array()); - SourcePositionTable::Scope pos_scope(source_positions_, start_position_); // Set up the basic structure of the graph. Outputs for {Start} are the formal // parameters (including the receiver) plus new target, number of arguments, // context and closure. - int actual_parameter_count = bytecode_array_ref.parameter_count() + 4; + int actual_parameter_count = bytecode_array().parameter_count() + 4; graph()->SetStart(graph()->NewNode(common()->Start(actual_parameter_count))); - Environment env( - this, bytecode_array_ref.register_count(), - bytecode_array_ref.parameter_count(), - bytecode_array_ref.incoming_new_target_or_generator_register(), - graph()->start()); + Environment env(this, bytecode_array().register_count(), + bytecode_array().parameter_count(), + bytecode_array().incoming_new_target_or_generator_register(), + graph()->start()); set_environment(&env); VisitBytecodes(); @@ -1112,19 +1121,17 @@ class BytecodeGraphBuilder::OsrIteratorState { void ProcessOsrPrelude() { ZoneVector outer_loop_offsets(graph_builder_->local_zone()); - BytecodeAnalysis const& bytecode_analysis = - graph_builder_->bytecode_analysis(); - int osr_offset = bytecode_analysis.osr_entry_point(); + int osr_entry = graph_builder_->bytecode_analysis().osr_entry_point(); // We find here the outermost loop which contains the OSR loop. - int outermost_loop_offset = osr_offset; - while ((outermost_loop_offset = - bytecode_analysis.GetLoopInfoFor(outermost_loop_offset) - .parent_offset()) != -1) { + int outermost_loop_offset = osr_entry; + while ((outermost_loop_offset = graph_builder_->bytecode_analysis() + .GetLoopInfoFor(outermost_loop_offset) + .parent_offset()) != -1) { outer_loop_offsets.push_back(outermost_loop_offset); } outermost_loop_offset = - outer_loop_offsets.empty() ? osr_offset : outer_loop_offsets.back(); + outer_loop_offsets.empty() ? osr_entry : outer_loop_offsets.back(); graph_builder_->AdvanceIteratorsTo(outermost_loop_offset); // We save some iterators states at the offsets of the loop headers of the @@ -1142,14 +1149,16 @@ class BytecodeGraphBuilder::OsrIteratorState { } // Finishing by advancing to the OSR entry - graph_builder_->AdvanceIteratorsTo(osr_offset); + graph_builder_->AdvanceIteratorsTo(osr_entry); // Enters all remaining exception handler which end before the OSR loop // so that on next call of VisitSingleBytecode they will get popped from // the exception handlers stack. - graph_builder_->ExitThenEnterExceptionHandlers(osr_offset); + graph_builder_->ExitThenEnterExceptionHandlers(osr_entry); graph_builder_->set_currently_peeled_loop_offset( - bytecode_analysis.GetLoopInfoFor(osr_offset).parent_offset()); + graph_builder_->bytecode_analysis() + .GetLoopInfoFor(osr_entry) + .parent_offset()); } void RestoreState(int target_offset, int new_parent_offset) { @@ -1198,8 +1207,8 @@ void BytecodeGraphBuilder::RemoveMergeEnvironmentsBeforeOffset( void BytecodeGraphBuilder::AdvanceToOsrEntryAndPeelLoops() { OsrIteratorState iterator_states(this); iterator_states.ProcessOsrPrelude(); - int osr_offset = bytecode_analysis().osr_entry_point(); - DCHECK_EQ(bytecode_iterator().current_offset(), osr_offset); + int osr_entry = bytecode_analysis().osr_entry_point(); + DCHECK_EQ(bytecode_iterator().current_offset(), osr_entry); environment()->FillWithOsrValues(); @@ -1217,7 +1226,7 @@ void BytecodeGraphBuilder::AdvanceToOsrEntryAndPeelLoops() { // parent loop entirely, and so on. int current_parent_offset = - bytecode_analysis().GetLoopInfoFor(osr_offset).parent_offset(); + bytecode_analysis().GetLoopInfoFor(osr_entry).parent_offset(); while (current_parent_offset != -1) { const LoopInfo& current_parent_loop = bytecode_analysis().GetLoopInfoFor(current_parent_offset); @@ -1261,6 +1270,7 @@ void BytecodeGraphBuilder::AdvanceToOsrEntryAndPeelLoops() { } void BytecodeGraphBuilder::VisitSingleBytecode() { + tick_counter_->DoTick(); int current_offset = bytecode_iterator().current_offset(); UpdateSourcePosition(current_offset); ExitThenEnterExceptionHandlers(current_offset); @@ -1289,14 +1299,12 @@ void BytecodeGraphBuilder::VisitSingleBytecode() { } void BytecodeGraphBuilder::VisitBytecodes() { - RunBytecodeAnalysis(); - if (!bytecode_analysis().resume_jump_targets().empty()) { environment()->BindGeneratorState( jsgraph()->SmiConstant(JSGeneratorObject::kGeneratorExecuting)); } - if (bytecode_analysis().HasOsrEntryPoint()) { + if (osr_) { // We peel the OSR loop and any outer loop containing it except that we // leave the nodes corresponding to the whole outermost loop (including // the last copies of the loops it contains) to be generated by the normal @@ -1333,7 +1341,7 @@ void BytecodeGraphBuilder::VisitLdaSmi() { void BytecodeGraphBuilder::VisitLdaConstant() { Node* node = jsgraph()->Constant( - handle(bytecode_iterator().GetConstantForIndexOperand(0), isolate())); + bytecode_iterator().GetConstantForIndexOperand(0, isolate())); environment()->BindAccumulator(node); } @@ -1383,15 +1391,16 @@ Node* BytecodeGraphBuilder::BuildLoadGlobal(Handle name, uint32_t feedback_slot_index, TypeofMode typeof_mode) { VectorSlotPair feedback = CreateVectorSlotPair(feedback_slot_index); - DCHECK(IsLoadGlobalICKind(feedback_vector()->GetKind(feedback.slot()))); + DCHECK( + IsLoadGlobalICKind(feedback_vector().object()->GetKind(feedback.slot()))); const Operator* op = javascript()->LoadGlobal(name, feedback, typeof_mode); return NewNode(op); } void BytecodeGraphBuilder::VisitLdaGlobal() { PrepareEagerCheckpoint(); - Handle name( - Name::cast(bytecode_iterator().GetConstantForIndexOperand(0)), isolate()); + Handle name = Handle::cast( + bytecode_iterator().GetConstantForIndexOperand(0, isolate())); uint32_t feedback_slot_index = bytecode_iterator().GetIndexOperand(1); Node* node = BuildLoadGlobal(name, feedback_slot_index, TypeofMode::NOT_INSIDE_TYPEOF); @@ -1400,8 +1409,8 @@ void BytecodeGraphBuilder::VisitLdaGlobal() { void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeof() { PrepareEagerCheckpoint(); - Handle name( - Name::cast(bytecode_iterator().GetConstantForIndexOperand(0)), isolate()); + Handle name = Handle::cast( + bytecode_iterator().GetConstantForIndexOperand(0, isolate())); uint32_t feedback_slot_index = bytecode_iterator().GetIndexOperand(1); Node* node = BuildLoadGlobal(name, feedback_slot_index, TypeofMode::INSIDE_TYPEOF); @@ -1410,8 +1419,8 @@ void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeof() { void BytecodeGraphBuilder::VisitStaGlobal() { PrepareEagerCheckpoint(); - Handle name( - Name::cast(bytecode_iterator().GetConstantForIndexOperand(0)), isolate()); + Handle name = Handle::cast( + bytecode_iterator().GetConstantForIndexOperand(0, isolate())); VectorSlotPair feedback = CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(1)); Node* value = environment()->LookupAccumulator(); @@ -1537,7 +1546,7 @@ void BytecodeGraphBuilder::VisitStaCurrentContextSlot() { void BytecodeGraphBuilder::BuildLdaLookupSlot(TypeofMode typeof_mode) { PrepareEagerCheckpoint(); Node* name = jsgraph()->Constant( - handle(bytecode_iterator().GetConstantForIndexOperand(0), isolate())); + bytecode_iterator().GetConstantForIndexOperand(0, isolate())); const Operator* op = javascript()->CallRuntime(typeof_mode == TypeofMode::NOT_INSIDE_TYPEOF ? Runtime::kLoadLookupSlot @@ -1622,7 +1631,7 @@ void BytecodeGraphBuilder::BuildLdaLookupContextSlot(TypeofMode typeof_mode) { set_environment(slow_environment); { Node* name = jsgraph()->Constant( - handle(bytecode_iterator().GetConstantForIndexOperand(0), isolate())); + bytecode_iterator().GetConstantForIndexOperand(0, isolate())); const Operator* op = javascript()->CallRuntime(typeof_mode == TypeofMode::NOT_INSIDE_TYPEOF @@ -1657,9 +1666,8 @@ void BytecodeGraphBuilder::BuildLdaLookupGlobalSlot(TypeofMode typeof_mode) { // Fast path, do a global load. { PrepareEagerCheckpoint(); - Handle name( - Name::cast(bytecode_iterator().GetConstantForIndexOperand(0)), - isolate()); + Handle name = Handle::cast( + bytecode_iterator().GetConstantForIndexOperand(0, isolate())); uint32_t feedback_slot_index = bytecode_iterator().GetIndexOperand(1); Node* node = BuildLoadGlobal(name, feedback_slot_index, typeof_mode); environment()->BindAccumulator(node, Environment::kAttachFrameState); @@ -1675,7 +1683,7 @@ void BytecodeGraphBuilder::BuildLdaLookupGlobalSlot(TypeofMode typeof_mode) { set_environment(slow_environment); { Node* name = jsgraph()->Constant( - handle(bytecode_iterator().GetConstantForIndexOperand(0), isolate())); + bytecode_iterator().GetConstantForIndexOperand(0, isolate())); const Operator* op = javascript()->CallRuntime(typeof_mode == TypeofMode::NOT_INSIDE_TYPEOF @@ -1705,7 +1713,7 @@ void BytecodeGraphBuilder::VisitStaLookupSlot() { PrepareEagerCheckpoint(); Node* value = environment()->LookupAccumulator(); Node* name = jsgraph()->Constant( - handle(bytecode_iterator().GetConstantForIndexOperand(0), isolate())); + bytecode_iterator().GetConstantForIndexOperand(0, isolate())); int bytecode_flags = bytecode_iterator().GetFlagOperand(1); LanguageMode language_mode = static_cast( interpreter::StoreLookupSlotFlags::LanguageModeBit::decode( @@ -1729,8 +1737,8 @@ void BytecodeGraphBuilder::VisitLdaNamedProperty() { PrepareEagerCheckpoint(); Node* object = environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0)); - Handle name( - Name::cast(bytecode_iterator().GetConstantForIndexOperand(1)), isolate()); + Handle name = Handle::cast( + bytecode_iterator().GetConstantForIndexOperand(1, isolate())); VectorSlotPair feedback = CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(2)); const Operator* op = javascript()->LoadNamed(name, feedback); @@ -1753,8 +1761,8 @@ void BytecodeGraphBuilder::VisitLdaNamedPropertyNoFeedback() { PrepareEagerCheckpoint(); Node* object = environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0)); - Handle name( - Name::cast(bytecode_iterator().GetConstantForIndexOperand(1)), isolate()); + Handle name = Handle::cast( + bytecode_iterator().GetConstantForIndexOperand(1, isolate())); const Operator* op = javascript()->LoadNamed(name, VectorSlotPair()); Node* node = NewNode(op, object); environment()->BindAccumulator(node, Environment::kAttachFrameState); @@ -1788,8 +1796,8 @@ void BytecodeGraphBuilder::BuildNamedStore(StoreMode store_mode) { Node* value = environment()->LookupAccumulator(); Node* object = environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0)); - Handle name( - Name::cast(bytecode_iterator().GetConstantForIndexOperand(1)), isolate()); + Handle name = Handle::cast( + bytecode_iterator().GetConstantForIndexOperand(1, isolate())); VectorSlotPair feedback = CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(2)); @@ -1828,8 +1836,8 @@ void BytecodeGraphBuilder::VisitStaNamedPropertyNoFeedback() { Node* value = environment()->LookupAccumulator(); Node* object = environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0)); - Handle name( - Name::cast(bytecode_iterator().GetConstantForIndexOperand(1)), isolate()); + Handle name = Handle::cast( + bytecode_iterator().GetConstantForIndexOperand(1, isolate())); LanguageMode language_mode = static_cast(bytecode_iterator().GetFlagOperand(2)); const Operator* op = @@ -1902,10 +1910,8 @@ void BytecodeGraphBuilder::VisitPopContext() { } void BytecodeGraphBuilder::VisitCreateClosure() { - Handle shared_info( - SharedFunctionInfo::cast( - bytecode_iterator().GetConstantForIndexOperand(0)), - isolate()); + Handle shared_info = Handle::cast( + bytecode_iterator().GetConstantForIndexOperand(0, isolate())); AllocationType allocation = interpreter::CreateClosureFlags::PretenuredBit::decode( bytecode_iterator().GetFlagOperand(2)) @@ -1913,7 +1919,7 @@ void BytecodeGraphBuilder::VisitCreateClosure() { : AllocationType::kYoung; const Operator* op = javascript()->CreateClosure( shared_info, - feedback_vector()->GetClosureFeedbackCell( + feedback_vector().object()->GetClosureFeedbackCell( bytecode_iterator().GetIndexOperand(1)), handle(jsgraph()->isolate()->builtins()->builtin(Builtins::kCompileLazy), isolate()), @@ -1923,9 +1929,8 @@ void BytecodeGraphBuilder::VisitCreateClosure() { } void BytecodeGraphBuilder::VisitCreateBlockContext() { - Handle scope_info( - ScopeInfo::cast(bytecode_iterator().GetConstantForIndexOperand(0)), - isolate()); + Handle scope_info = Handle::cast( + bytecode_iterator().GetConstantForIndexOperand(0, isolate())); const Operator* op = javascript()->CreateBlockContext(scope_info); Node* context = NewNode(op); @@ -1933,9 +1938,8 @@ void BytecodeGraphBuilder::VisitCreateBlockContext() { } void BytecodeGraphBuilder::VisitCreateFunctionContext() { - Handle scope_info( - ScopeInfo::cast(bytecode_iterator().GetConstantForIndexOperand(0)), - isolate()); + Handle scope_info = Handle::cast( + bytecode_iterator().GetConstantForIndexOperand(0, isolate())); uint32_t slots = bytecode_iterator().GetUnsignedImmediateOperand(1); const Operator* op = javascript()->CreateFunctionContext(scope_info, slots, FUNCTION_SCOPE); @@ -1944,9 +1948,8 @@ void BytecodeGraphBuilder::VisitCreateFunctionContext() { } void BytecodeGraphBuilder::VisitCreateEvalContext() { - Handle scope_info( - ScopeInfo::cast(bytecode_iterator().GetConstantForIndexOperand(0)), - isolate()); + Handle scope_info = Handle::cast( + bytecode_iterator().GetConstantForIndexOperand(0, isolate())); uint32_t slots = bytecode_iterator().GetUnsignedImmediateOperand(1); const Operator* op = javascript()->CreateFunctionContext(scope_info, slots, EVAL_SCOPE); @@ -1957,9 +1960,8 @@ void BytecodeGraphBuilder::VisitCreateEvalContext() { void BytecodeGraphBuilder::VisitCreateCatchContext() { interpreter::Register reg = bytecode_iterator().GetRegisterOperand(0); Node* exception = environment()->LookupRegister(reg); - Handle scope_info( - ScopeInfo::cast(bytecode_iterator().GetConstantForIndexOperand(1)), - isolate()); + Handle scope_info = Handle::cast( + bytecode_iterator().GetConstantForIndexOperand(1, isolate())); const Operator* op = javascript()->CreateCatchContext(scope_info); Node* context = NewNode(op, exception); @@ -1969,9 +1971,8 @@ void BytecodeGraphBuilder::VisitCreateCatchContext() { void BytecodeGraphBuilder::VisitCreateWithContext() { Node* object = environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0)); - Handle scope_info( - ScopeInfo::cast(bytecode_iterator().GetConstantForIndexOperand(1)), - isolate()); + Handle scope_info = Handle::cast( + bytecode_iterator().GetConstantForIndexOperand(1, isolate())); const Operator* op = javascript()->CreateWithContext(scope_info); Node* context = NewNode(op, object); @@ -1997,9 +1998,8 @@ void BytecodeGraphBuilder::VisitCreateRestParameter() { } void BytecodeGraphBuilder::VisitCreateRegExpLiteral() { - Handle constant_pattern( - String::cast(bytecode_iterator().GetConstantForIndexOperand(0)), - isolate()); + Handle constant_pattern = Handle::cast( + bytecode_iterator().GetConstantForIndexOperand(0, isolate())); int const slot_id = bytecode_iterator().GetIndexOperand(1); VectorSlotPair pair = CreateVectorSlotPair(slot_id); int literal_flags = bytecode_iterator().GetFlagOperand(2); @@ -2009,10 +2009,9 @@ void BytecodeGraphBuilder::VisitCreateRegExpLiteral() { } void BytecodeGraphBuilder::VisitCreateArrayLiteral() { - Handle array_boilerplate_description( - ArrayBoilerplateDescription::cast( - bytecode_iterator().GetConstantForIndexOperand(0)), - isolate()); + Handle array_boilerplate_description = + Handle::cast( + bytecode_iterator().GetConstantForIndexOperand(0, isolate())); int const slot_id = bytecode_iterator().GetIndexOperand(1); VectorSlotPair pair = CreateVectorSlotPair(slot_id); int bytecode_flags = bytecode_iterator().GetFlagOperand(2); @@ -2046,10 +2045,9 @@ void BytecodeGraphBuilder::VisitCreateArrayFromIterable() { } void BytecodeGraphBuilder::VisitCreateObjectLiteral() { - Handle constant_properties( - ObjectBoilerplateDescription::cast( - bytecode_iterator().GetConstantForIndexOperand(0)), - isolate()); + Handle constant_properties = + Handle::cast( + bytecode_iterator().GetConstantForIndexOperand(0, isolate())); int const slot_id = bytecode_iterator().GetIndexOperand(1); VectorSlotPair pair = CreateVectorSlotPair(slot_id); int bytecode_flags = bytecode_iterator().GetFlagOperand(2); @@ -2082,29 +2080,13 @@ void BytecodeGraphBuilder::VisitCloneObject() { } void BytecodeGraphBuilder::VisitGetTemplateObject() { - Handle description( - TemplateObjectDescription::cast( - bytecode_iterator().GetConstantForIndexOperand(0)), - isolate()); + DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining); FeedbackSlot slot = bytecode_iterator().GetSlotOperand(1); - FeedbackNexus nexus(feedback_vector(), slot); - - Handle cached_value; - if (nexus.GetFeedback() == MaybeObject::FromSmi(Smi::zero())) { - // It's not observable when the template object is created, so we - // can just create it eagerly during graph building and bake in - // the JSArray constant here. - cached_value = TemplateObjectDescription::GetTemplateObject( - isolate(), native_context(), description, shared_info(), slot.ToInt()); - nexus.vector().Set(slot, *cached_value); - } else { - cached_value = - handle(JSArray::cast(nexus.GetFeedback()->GetHeapObjectAssumeStrong()), - isolate()); - } - - Node* template_object = jsgraph()->HeapConstant(cached_value); - environment()->BindAccumulator(template_object); + ObjectRef description( + broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate())); + JSArrayRef template_object = + shared_info().GetTemplateObject(description, feedback_vector(), slot); + environment()->BindAccumulator(jsgraph()->Constant(template_object)); } Node* const* BytecodeGraphBuilder::GetCallArgumentsFromRegisters( @@ -2587,7 +2569,7 @@ void BytecodeGraphBuilder::VisitThrowReferenceErrorIfHole() { Node* check_for_hole = NewNode(simplified()->ReferenceEqual(), accumulator, jsgraph()->TheHoleConstant()); Node* name = jsgraph()->Constant( - handle(bytecode_iterator().GetConstantForIndexOperand(0), isolate())); + bytecode_iterator().GetConstantForIndexOperand(0, isolate())); BuildHoleCheckAndThrow(check_for_hole, Runtime::kThrowAccessedUninitializedVariable, name); } @@ -2658,7 +2640,7 @@ void BytecodeGraphBuilder::BuildBinaryOp(const Operator* op) { BinaryOperationHint BytecodeGraphBuilder::GetBinaryOperationHint( int operand_index) { FeedbackSlot slot = bytecode_iterator().GetSlotOperand(operand_index); - FeedbackNexus nexus(feedback_vector(), slot); + FeedbackNexus nexus(feedback_vector().object(), slot); return nexus.GetBinaryOperationFeedback(); } @@ -2666,14 +2648,14 @@ BinaryOperationHint BytecodeGraphBuilder::GetBinaryOperationHint( // feedback. CompareOperationHint BytecodeGraphBuilder::GetCompareOperationHint() { FeedbackSlot slot = bytecode_iterator().GetSlotOperand(1); - FeedbackNexus nexus(feedback_vector(), slot); + FeedbackNexus nexus(feedback_vector().object(), slot); return nexus.GetCompareOperationFeedback(); } // Helper function to create for-in mode from the recorded type feedback. ForInMode BytecodeGraphBuilder::GetForInMode(int operand_index) { FeedbackSlot slot = bytecode_iterator().GetSlotOperand(operand_index); - FeedbackNexus nexus(feedback_vector(), slot); + FeedbackNexus nexus(feedback_vector().object(), slot); switch (nexus.GetForInFeedback()) { case ForInHint::kNone: case ForInHint::kEnumCacheKeysAndIndices: @@ -2688,7 +2670,8 @@ ForInMode BytecodeGraphBuilder::GetForInMode(int operand_index) { CallFrequency BytecodeGraphBuilder::ComputeCallFrequency(int slot_id) const { if (invocation_frequency_.IsUnknown()) return CallFrequency(); - FeedbackNexus nexus(feedback_vector(), FeedbackVector::ToSlot(slot_id)); + FeedbackNexus nexus(feedback_vector().object(), + FeedbackVector::ToSlot(slot_id)); float feedback_frequency = nexus.ComputeCallFrequency(); if (feedback_frequency == 0.0f) { // This is to prevent multiplying zero and infinity. @@ -2699,7 +2682,8 @@ CallFrequency BytecodeGraphBuilder::ComputeCallFrequency(int slot_id) const { } SpeculationMode BytecodeGraphBuilder::GetSpeculationMode(int slot_id) const { - FeedbackNexus nexus(feedback_vector(), FeedbackVector::ToSlot(slot_id)); + FeedbackNexus nexus(feedback_vector().object(), + FeedbackVector::ToSlot(slot_id)); return nexus.GetSpeculationMode(); } @@ -3301,8 +3285,7 @@ void BytecodeGraphBuilder::VisitSuspendGenerator() { CHECK_EQ(0, first_reg.index()); int register_count = static_cast(bytecode_iterator().GetRegisterCountOperand(2)); - int parameter_count_without_receiver = - bytecode_array()->parameter_count() - 1; + int parameter_count_without_receiver = bytecode_array().parameter_count() - 1; Node* suspend_id = jsgraph()->SmiConstant( bytecode_iterator().GetUnsignedImmediateOperand(3)); @@ -3442,8 +3425,7 @@ void BytecodeGraphBuilder::VisitResumeGenerator() { const BytecodeLivenessState* liveness = bytecode_analysis().GetOutLivenessFor( bytecode_iterator().current_offset()); - int parameter_count_without_receiver = - bytecode_array()->parameter_count() - 1; + int parameter_count_without_receiver = bytecode_array().parameter_count() - 1; // Mapping between registers and array indices must match that used in // InterpreterAssembler::ExportParametersAndRegisterFile. @@ -3836,7 +3818,10 @@ Node** BytecodeGraphBuilder::EnsureInputBufferSize(int size) { } void BytecodeGraphBuilder::ExitThenEnterExceptionHandlers(int current_offset) { - HandlerTable table(*bytecode_array()); + DisallowHeapAllocation no_allocation; + HandlerTable table(bytecode_array().handler_table_address(), + bytecode_array().handler_table_size(), + HandlerTable::kRangeBasedEncoding); // Potentially exit exception handlers. while (!exception_handlers_.empty()) { @@ -3890,7 +3875,7 @@ Node* BytecodeGraphBuilder::MakeNode(const Operator* op, int value_input_count, if (has_context) { *current_input++ = OperatorProperties::NeedsExactContext(op) ? environment()->Context() - : jsgraph()->HeapConstant(native_context()); + : jsgraph()->Constant(native_context()); } if (has_frame_state) { // The frame state will be inserted later. Here we misuse the {Dead} node @@ -4037,12 +4022,19 @@ void BuildGraphFromBytecode(JSHeapBroker* broker, Zone* local_zone, BailoutId osr_offset, JSGraph* jsgraph, CallFrequency const& invocation_frequency, SourcePositionTable* source_positions, - Handle native_context, int inlining_id, - BytecodeGraphBuilderFlags flags) { - BytecodeGraphBuilder builder(broker, local_zone, bytecode_array, shared, - feedback_vector, osr_offset, jsgraph, - invocation_frequency, source_positions, - native_context, inlining_id, flags); + Handle native_context, + int inlining_id, BytecodeGraphBuilderFlags flags, + TickCounter* tick_counter) { + BytecodeArrayRef bytecode_array_ref(broker, bytecode_array); + DCHECK(bytecode_array_ref.IsSerializedForCompilation()); + FeedbackVectorRef feedback_vector_ref(broker, feedback_vector); + SharedFunctionInfoRef shared_ref(broker, shared); + DCHECK(shared_ref.IsSerializedForCompilation(feedback_vector_ref)); + NativeContextRef native_context_ref(broker, native_context); + BytecodeGraphBuilder builder( + broker, local_zone, bytecode_array_ref, shared_ref, feedback_vector_ref, + osr_offset, jsgraph, invocation_frequency, source_positions, + native_context_ref, inlining_id, flags, tick_counter); builder.CreateGraph(); } diff --git a/deps/v8/src/compiler/bytecode-graph-builder.h b/deps/v8/src/compiler/bytecode-graph-builder.h index b9504a60868920..682569778f6990 100644 --- a/deps/v8/src/compiler/bytecode-graph-builder.h +++ b/deps/v8/src/compiler/bytecode-graph-builder.h @@ -11,6 +11,9 @@ #include "src/handles/handles.h" namespace v8 { + +class TickCounter; + namespace internal { class BytecodeArray; @@ -25,6 +28,9 @@ class SourcePositionTable; enum class BytecodeGraphBuilderFlag : uint8_t { kSkipFirstStackCheck = 1 << 0, + // TODO(neis): Remove liveness flag here when concurrent inlining is always + // on, because then the serializer will be the only place where we perform + // bytecode analysis. kAnalyzeEnvironmentLiveness = 1 << 1, kBailoutOnUninitialized = 1 << 2, }; @@ -39,8 +45,9 @@ void BuildGraphFromBytecode(JSHeapBroker* broker, Zone* local_zone, BailoutId osr_offset, JSGraph* jsgraph, CallFrequency const& invocation_frequency, SourcePositionTable* source_positions, - Handle native_context, int inlining_id, - BytecodeGraphBuilderFlags flags); + Handle native_context, + int inlining_id, BytecodeGraphBuilderFlags flags, + TickCounter* tick_counter); } // namespace compiler } // namespace internal diff --git a/deps/v8/src/compiler/code-assembler.cc b/deps/v8/src/compiler/code-assembler.cc index d8a01d6308f2d2..af0ba98ffd159b 100644 --- a/deps/v8/src/compiler/code-assembler.cc +++ b/deps/v8/src/compiler/code-assembler.cc @@ -226,8 +226,12 @@ void CodeAssembler::GenerateCheckMaybeObjectIsObject(Node* node, IntPtrConstant(kHeapObjectTagMask)), IntPtrConstant(kWeakHeapObjectTag)), &ok); - Node* message_node = StringConstant(location); - DebugAbort(message_node); + EmbeddedVector message; + SNPrintF(message, "no Object: %s", location); + Node* message_node = StringConstant(message.begin()); + // This somewhat misuses the AbortCSAAssert runtime function. This will print + // "abort: CSA_ASSERT failed: ", which is good enough. + AbortCSAAssert(message_node); Unreachable(); Bind(&ok); } @@ -409,8 +413,8 @@ void CodeAssembler::ReturnRaw(Node* value) { return raw_assembler()->Return(value); } -void CodeAssembler::DebugAbort(Node* message) { - raw_assembler()->DebugAbort(message); +void CodeAssembler::AbortCSAAssert(Node* message) { + raw_assembler()->AbortCSAAssert(message); } void CodeAssembler::DebugBreak() { raw_assembler()->DebugBreak(); } @@ -441,16 +445,16 @@ void CodeAssembler::Bind(Label* label, AssemblerDebugInfo debug_info) { } #endif // DEBUG -Node* CodeAssembler::LoadFramePointer() { - return raw_assembler()->LoadFramePointer(); +TNode CodeAssembler::LoadFramePointer() { + return UncheckedCast(raw_assembler()->LoadFramePointer()); } -Node* CodeAssembler::LoadParentFramePointer() { - return raw_assembler()->LoadParentFramePointer(); +TNode CodeAssembler::LoadParentFramePointer() { + return UncheckedCast(raw_assembler()->LoadParentFramePointer()); } -Node* CodeAssembler::LoadStackPointer() { - return raw_assembler()->LoadStackPointer(); +TNode CodeAssembler::LoadStackPointer() { + return UncheckedCast(raw_assembler()->LoadStackPointer()); } TNode CodeAssembler::TaggedPoisonOnSpeculation( @@ -1140,14 +1144,6 @@ Node* CodeAssembler::Retain(Node* value) { return raw_assembler()->Retain(value); } -Node* CodeAssembler::ChangeTaggedToCompressed(Node* tagged) { - return raw_assembler()->ChangeTaggedToCompressed(tagged); -} - -Node* CodeAssembler::ChangeCompressedToTagged(Node* compressed) { - return raw_assembler()->ChangeCompressedToTagged(compressed); -} - Node* CodeAssembler::Projection(int index, Node* value) { DCHECK_LT(index, value->op()->ValueOutputCount()); return raw_assembler()->Projection(index, value); diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h index 0f7ae640828ab4..cc432214aa1063 100644 --- a/deps/v8/src/compiler/code-assembler.h +++ b/deps/v8/src/compiler/code-assembler.h @@ -73,6 +73,9 @@ class PromiseReactionJobTask; class PromiseRejectReactionJobTask; class WasmDebugInfo; class Zone; +#define MAKE_FORWARD_DECLARATION(V, NAME, Name, name) class Name; +TORQUE_STRUCT_LIST_GENERATOR(MAKE_FORWARD_DECLARATION, UNUSED) +#undef MAKE_FORWARD_DECLARATION template class Signature; @@ -107,13 +110,13 @@ struct Uint32T : Word32T { struct Int16T : Int32T { static constexpr MachineType kMachineType = MachineType::Int16(); }; -struct Uint16T : Uint32T { +struct Uint16T : Uint32T, Int32T { static constexpr MachineType kMachineType = MachineType::Uint16(); }; struct Int8T : Int16T { static constexpr MachineType kMachineType = MachineType::Int8(); }; -struct Uint8T : Uint16T { +struct Uint8T : Uint16T, Int16T { static constexpr MachineType kMachineType = MachineType::Uint8(); }; @@ -147,6 +150,12 @@ struct Float64T : UntaggedT { static constexpr MachineType kMachineType = MachineType::Float64(); }; +#ifdef V8_COMPRESS_POINTERS +using TaggedT = Int32T; +#else +using TaggedT = IntPtrT; +#endif + // Result of a comparison operation. struct BoolT : Word32T {}; @@ -329,6 +338,7 @@ class WasmExceptionObject; class WasmExceptionTag; class WasmExportedFunctionData; class WasmGlobalObject; +class WasmIndirectFunctionTable; class WasmJSFunctionData; class WasmMemoryObject; class WasmModuleObject; @@ -413,6 +423,10 @@ struct types_have_common_values { static const bool value = is_subtype::value || is_subtype::value; }; template +struct types_have_common_values { + static const bool value = types_have_common_values::value; +}; +template struct types_have_common_values { static const bool value = types_have_common_values::value; }; @@ -611,14 +625,15 @@ TNode Float64Add(TNode a, TNode b); V(Float64Sqrt, Float64T, Float64T) \ V(Float64Tan, Float64T, Float64T) \ V(Float64Tanh, Float64T, Float64T) \ - V(Float64ExtractLowWord32, Word32T, Float64T) \ - V(Float64ExtractHighWord32, Word32T, Float64T) \ + V(Float64ExtractLowWord32, Uint32T, Float64T) \ + V(Float64ExtractHighWord32, Uint32T, Float64T) \ V(BitcastTaggedToWord, IntPtrT, Object) \ + V(BitcastTaggedSignedToWord, IntPtrT, Smi) \ V(BitcastMaybeObjectToWord, IntPtrT, MaybeObject) \ V(BitcastWordToTagged, Object, WordT) \ V(BitcastWordToTaggedSigned, Smi, WordT) \ V(TruncateFloat64ToFloat32, Float32T, Float64T) \ - V(TruncateFloat64ToWord32, Word32T, Float64T) \ + V(TruncateFloat64ToWord32, Uint32T, Float64T) \ V(TruncateInt64ToInt32, Int32T, Int64T) \ V(ChangeFloat32ToFloat64, Float64T, Float32T) \ V(ChangeFloat64ToUint32, Uint32T, Float64T) \ @@ -628,7 +643,7 @@ TNode Float64Add(TNode a, TNode b); V(ChangeUint32ToFloat64, Float64T, Word32T) \ V(ChangeUint32ToUint64, Uint64T, Word32T) \ V(BitcastInt32ToFloat32, Float32T, Word32T) \ - V(BitcastFloat32ToInt32, Word32T, Float32T) \ + V(BitcastFloat32ToInt32, Uint32T, Float32T) \ V(RoundFloat64ToInt32, Int32T, Float64T) \ V(RoundInt32ToFloat32, Int32T, Float32T) \ V(Float64SilenceNaN, Float64T, Float64T) \ @@ -840,10 +855,13 @@ class V8_EXPORT_PRIVATE CodeAssembler { // TODO(jkummerow): The style guide wants pointers for output parameters. // https://google.github.io/styleguide/cppguide.html#Output_Parameters - bool ToInt32Constant(Node* node, int32_t& out_value); - bool ToInt64Constant(Node* node, int64_t& out_value); + bool ToInt32Constant(Node* node, + int32_t& out_value); // NOLINT(runtime/references) + bool ToInt64Constant(Node* node, + int64_t& out_value); // NOLINT(runtime/references) bool ToSmiConstant(Node* node, Smi* out_value); - bool ToIntPtrConstant(Node* node, intptr_t& out_value); + bool ToIntPtrConstant(Node* node, + intptr_t& out_value); // NOLINT(runtime/references) bool IsUndefinedConstant(TNode node); bool IsNullConstant(TNode node); @@ -872,7 +890,7 @@ class V8_EXPORT_PRIVATE CodeAssembler { void ReturnRaw(Node* value); - void DebugAbort(Node* message); + void AbortCSAAssert(Node* message); void DebugBreak(); void Unreachable(); void Comment(const char* msg) { @@ -938,11 +956,11 @@ class V8_EXPORT_PRIVATE CodeAssembler { Label** case_labels, size_t case_count); // Access to the frame pointer - Node* LoadFramePointer(); - Node* LoadParentFramePointer(); + TNode LoadFramePointer(); + TNode LoadParentFramePointer(); // Access to the stack pointer - Node* LoadStackPointer(); + TNode LoadStackPointer(); // Poison |value| on speculative paths. TNode TaggedPoisonOnSpeculation(SloppyTNode value); @@ -1047,20 +1065,60 @@ class V8_EXPORT_PRIVATE CodeAssembler { CODE_ASSEMBLER_BINARY_OP_LIST(DECLARE_CODE_ASSEMBLER_BINARY_OP) #undef DECLARE_CODE_ASSEMBLER_BINARY_OP - TNode WordShr(TNode left, TNode right) { - return UncheckedCast( + TNode WordShr(TNode left, TNode right) { + return Unsigned( WordShr(static_cast(left), static_cast(right))); } TNode WordSar(TNode left, TNode right) { - return UncheckedCast( - WordSar(static_cast(left), static_cast(right))); + return Signed(WordSar(static_cast(left), static_cast(right))); + } + TNode WordShl(TNode left, TNode right) { + return Signed(WordShl(static_cast(left), static_cast(right))); + } + TNode WordShl(TNode left, TNode right) { + return Unsigned( + WordShl(static_cast(left), static_cast(right))); + } + + TNode Word32Shl(TNode left, TNode right) { + return Signed( + Word32Shl(static_cast(left), static_cast(right))); + } + TNode Word32Shl(TNode left, TNode right) { + return Unsigned( + Word32Shl(static_cast(left), static_cast(right))); + } + TNode Word32Shr(TNode left, TNode right) { + return Unsigned( + Word32Shr(static_cast(left), static_cast(right))); } TNode WordAnd(TNode left, TNode right) { - return UncheckedCast( + return Signed(WordAnd(static_cast(left), static_cast(right))); + } + TNode WordAnd(TNode left, TNode right) { + return Unsigned( WordAnd(static_cast(left), static_cast(right))); } + TNode Word32And(TNode left, TNode right) { + return Signed( + Word32And(static_cast(left), static_cast(right))); + } + TNode Word32And(TNode left, TNode right) { + return Unsigned( + Word32And(static_cast(left), static_cast(right))); + } + + TNode Word32Or(TNode left, TNode right) { + return Signed( + Word32Or(static_cast(left), static_cast(right))); + } + TNode Word32Or(TNode left, TNode right) { + return Unsigned( + Word32Or(static_cast(left), static_cast(right))); + } + template ::value && @@ -1106,6 +1164,15 @@ class V8_EXPORT_PRIVATE CodeAssembler { TNode Word64NotEqual(SloppyTNode left, SloppyTNode right); + TNode Word32Or(TNode left, TNode right) { + return UncheckedCast( + Word32Or(static_cast(left), static_cast(right))); + } + TNode Word32And(TNode left, TNode right) { + return UncheckedCast( + Word32And(static_cast(left), static_cast(right))); + } + TNode Int32Add(TNode left, TNode right) { return Signed( Int32Add(static_cast(left), static_cast(right))); @@ -1116,6 +1183,16 @@ class V8_EXPORT_PRIVATE CodeAssembler { Int32Add(static_cast(left), static_cast(right))); } + TNode Int32Sub(TNode left, TNode right) { + return Signed( + Int32Sub(static_cast(left), static_cast(right))); + } + + TNode Int32Mul(TNode left, TNode right) { + return Signed( + Int32Mul(static_cast(left), static_cast(right))); + } + TNode IntPtrAdd(SloppyTNode left, SloppyTNode right); TNode IntPtrDiv(TNode left, TNode right); TNode IntPtrSub(SloppyTNode left, SloppyTNode right); @@ -1195,6 +1272,12 @@ class V8_EXPORT_PRIVATE CodeAssembler { CODE_ASSEMBLER_UNARY_OP_LIST(DECLARE_CODE_ASSEMBLER_UNARY_OP) #undef DECLARE_CODE_ASSEMBLER_UNARY_OP + template + TNode BitcastTaggedToWord(TNode node) { + static_assert(sizeof(Dummy) < 0, + "Should use BitcastTaggedSignedToWord instead."); + } + // Changes a double to an inptr_t for pointer arithmetic outside of Smi range. // Assumes that the double can be exactly represented as an int. TNode ChangeFloat64ToUintPtr(SloppyTNode value); @@ -1217,10 +1300,6 @@ class V8_EXPORT_PRIVATE CodeAssembler { // Projections Node* Projection(int index, Node* value); - // Pointer compression and decompression. - Node* ChangeTaggedToCompressed(Node* tagged); - Node* ChangeCompressedToTagged(Node* compressed); - template TNode>::type> Projection(TNode> value) { diff --git a/deps/v8/src/compiler/common-operator-reducer.cc b/deps/v8/src/compiler/common-operator-reducer.cc index fa727748f6cfd7..5dd765527fdf42 100644 --- a/deps/v8/src/compiler/common-operator-reducer.cc +++ b/deps/v8/src/compiler/common-operator-reducer.cc @@ -337,9 +337,9 @@ Reduction CommonOperatorReducer::ReduceReturn(Node* node) { // End // Now the effect input to the {Return} node can be either an {EffectPhi} - // hanging off the same {Merge}, or the {Merge} node is only connected to - // the {Return} and the {Phi}, in which case we know that the effect input - // must somehow dominate all merged branches. + // hanging off the same {Merge}, or the effect chain doesn't depend on the + // {Phi} or the {Merge}, in which case we know that the effect input must + // somehow dominate all merged branches. Node::Inputs control_inputs = control->inputs(); Node::Inputs value_inputs = value->inputs(); @@ -347,7 +347,7 @@ Reduction CommonOperatorReducer::ReduceReturn(Node* node) { DCHECK_EQ(control_inputs.count(), value_inputs.count() - 1); DCHECK_EQ(IrOpcode::kEnd, graph()->end()->opcode()); DCHECK_NE(0, graph()->end()->InputCount()); - if (control->OwnedBy(node, value)) { + if (control->OwnedBy(node, value) && value->OwnedBy(node)) { for (int i = 0; i < control_inputs.count(); ++i) { // Create a new {Return} and connect it to {end}. We don't need to mark // {end} as revisit, because we mark {node} as {Dead} below, which was diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc index 45e558f6096102..0ef6402264bfac 100644 --- a/deps/v8/src/compiler/common-operator.cc +++ b/deps/v8/src/compiler/common-operator.cc @@ -1216,8 +1216,18 @@ const Operator* CommonOperatorBuilder::HeapConstant( value); // parameter } +const Operator* CommonOperatorBuilder::CompressedHeapConstant( + const Handle& value) { + return new (zone()) Operator1>( // -- + IrOpcode::kCompressedHeapConstant, Operator::kPure, // opcode + "CompressedHeapConstant", // name + 0, 0, 0, 1, 0, 0, // counts + value); // parameter +} + Handle HeapConstantOf(const Operator* op) { - DCHECK_EQ(IrOpcode::kHeapConstant, op->opcode()); + DCHECK(IrOpcode::kHeapConstant == op->opcode() || + IrOpcode::kCompressedHeapConstant == op->opcode()); return OpParameter>(op); } diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h index 43a689b5c27004..9f634e72ec27a9 100644 --- a/deps/v8/src/compiler/common-operator.h +++ b/deps/v8/src/compiler/common-operator.h @@ -499,6 +499,7 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final const Operator* NumberConstant(volatile double); const Operator* PointerConstant(intptr_t); const Operator* HeapConstant(const Handle&); + const Operator* CompressedHeapConstant(const Handle&); const Operator* ObjectId(uint32_t); const Operator* RelocatableInt32Constant(int32_t value, diff --git a/deps/v8/src/compiler/compilation-dependencies.cc b/deps/v8/src/compiler/compilation-dependencies.cc index f0bb797b68fe18..673f4a341be8ce 100644 --- a/deps/v8/src/compiler/compilation-dependencies.cc +++ b/deps/v8/src/compiler/compilation-dependencies.cc @@ -4,6 +4,7 @@ #include "src/compiler/compilation-dependencies.h" +#include "src/compiler/compilation-dependency.h" #include "src/handles/handles-inl.h" #include "src/objects/allocation-site-inl.h" #include "src/objects/objects-inl.h" @@ -17,18 +18,7 @@ CompilationDependencies::CompilationDependencies(JSHeapBroker* broker, Zone* zone) : zone_(zone), broker_(broker), dependencies_(zone) {} -class CompilationDependencies::Dependency : public ZoneObject { - public: - virtual bool IsValid() const = 0; - virtual void PrepareInstall() const {} - virtual void Install(const MaybeObjectHandle& code) const = 0; - -#ifdef DEBUG - virtual bool IsPretenureModeDependency() const { return false; } -#endif -}; - -class InitialMapDependency final : public CompilationDependencies::Dependency { +class InitialMapDependency final : public CompilationDependency { public: // TODO(neis): Once the concurrent compiler frontend is always-on, we no // longer need to explicitly store the initial map. @@ -56,8 +46,7 @@ class InitialMapDependency final : public CompilationDependencies::Dependency { MapRef initial_map_; }; -class PrototypePropertyDependency final - : public CompilationDependencies::Dependency { +class PrototypePropertyDependency final : public CompilationDependency { public: // TODO(neis): Once the concurrent compiler frontend is always-on, we no // longer need to explicitly store the prototype. @@ -96,7 +85,7 @@ class PrototypePropertyDependency final ObjectRef prototype_; }; -class StableMapDependency final : public CompilationDependencies::Dependency { +class StableMapDependency final : public CompilationDependency { public: explicit StableMapDependency(const MapRef& map) : map_(map) { DCHECK(map_.is_stable()); @@ -114,7 +103,7 @@ class StableMapDependency final : public CompilationDependencies::Dependency { MapRef map_; }; -class TransitionDependency final : public CompilationDependencies::Dependency { +class TransitionDependency final : public CompilationDependency { public: explicit TransitionDependency(const MapRef& map) : map_(map) { DCHECK(!map_.is_deprecated()); @@ -132,8 +121,7 @@ class TransitionDependency final : public CompilationDependencies::Dependency { MapRef map_; }; -class PretenureModeDependency final - : public CompilationDependencies::Dependency { +class PretenureModeDependency final : public CompilationDependency { public: // TODO(neis): Once the concurrent compiler frontend is always-on, we no // longer need to explicitly store the mode. @@ -163,8 +151,7 @@ class PretenureModeDependency final AllocationType allocation_; }; -class FieldRepresentationDependency final - : public CompilationDependencies::Dependency { +class FieldRepresentationDependency final : public CompilationDependency { public: // TODO(neis): Once the concurrent compiler frontend is always-on, we no // longer need to explicitly store the representation. @@ -197,7 +184,7 @@ class FieldRepresentationDependency final Representation representation_; }; -class FieldTypeDependency final : public CompilationDependencies::Dependency { +class FieldTypeDependency final : public CompilationDependency { public: // TODO(neis): Once the concurrent compiler frontend is always-on, we no // longer need to explicitly store the type. @@ -227,8 +214,7 @@ class FieldTypeDependency final : public CompilationDependencies::Dependency { ObjectRef type_; }; -class FieldConstnessDependency final - : public CompilationDependencies::Dependency { +class FieldConstnessDependency final : public CompilationDependency { public: FieldConstnessDependency(const MapRef& owner, int descriptor) : owner_(owner), descriptor_(descriptor) { @@ -255,8 +241,7 @@ class FieldConstnessDependency final int descriptor_; }; -class GlobalPropertyDependency final - : public CompilationDependencies::Dependency { +class GlobalPropertyDependency final : public CompilationDependency { public: // TODO(neis): Once the concurrent compiler frontend is always-on, we no // longer need to explicitly store the type and the read_only flag. @@ -294,7 +279,7 @@ class GlobalPropertyDependency final bool read_only_; }; -class ProtectorDependency final : public CompilationDependencies::Dependency { +class ProtectorDependency final : public CompilationDependency { public: explicit ProtectorDependency(const PropertyCellRef& cell) : cell_(cell) { DCHECK_EQ(cell_.value().AsSmi(), Isolate::kProtectorValid); @@ -315,8 +300,7 @@ class ProtectorDependency final : public CompilationDependencies::Dependency { PropertyCellRef cell_; }; -class ElementsKindDependency final - : public CompilationDependencies::Dependency { +class ElementsKindDependency final : public CompilationDependency { public: // TODO(neis): Once the concurrent compiler frontend is always-on, we no // longer need to explicitly store the elements kind. @@ -349,7 +333,7 @@ class ElementsKindDependency final }; class InitialMapInstanceSizePredictionDependency final - : public CompilationDependencies::Dependency { + : public CompilationDependency { public: InitialMapInstanceSizePredictionDependency(const JSFunctionRef& function, int instance_size) @@ -380,7 +364,8 @@ class InitialMapInstanceSizePredictionDependency final int instance_size_; }; -void CompilationDependencies::RecordDependency(Dependency const* dependency) { +void CompilationDependencies::RecordDependency( + CompilationDependency const* dependency) { if (dependency != nullptr) dependencies_.push_front(dependency); } @@ -565,6 +550,11 @@ namespace { // This function expects to never see a JSProxy. void DependOnStablePrototypeChain(CompilationDependencies* deps, MapRef map, base::Optional last_prototype) { + // TODO(neis): Remove heap access (SerializePrototype call). + AllowCodeDependencyChange dependency_change_; + AllowHandleAllocation handle_allocation_; + AllowHandleDereference handle_dereference_; + AllowHeapAllocation heap_allocation_; while (true) { map.SerializePrototype(); HeapObjectRef proto = map.prototype(); @@ -635,7 +625,7 @@ CompilationDependencies::DependOnInitialMapInstanceSizePrediction( return SlackTrackingPrediction(initial_map, instance_size); } -CompilationDependencies::Dependency const* +CompilationDependency const* CompilationDependencies::TransitionDependencyOffTheRecord( const MapRef& target_map) const { if (target_map.CanBeDeprecated()) { @@ -646,7 +636,7 @@ CompilationDependencies::TransitionDependencyOffTheRecord( } } -CompilationDependencies::Dependency const* +CompilationDependency const* CompilationDependencies::FieldRepresentationDependencyOffTheRecord( const MapRef& map, int descriptor) const { MapRef owner = map.FindFieldOwner(descriptor); @@ -657,7 +647,7 @@ CompilationDependencies::FieldRepresentationDependencyOffTheRecord( details.representation()); } -CompilationDependencies::Dependency const* +CompilationDependency const* CompilationDependencies::FieldTypeDependencyOffTheRecord(const MapRef& map, int descriptor) const { MapRef owner = map.FindFieldOwner(descriptor); diff --git a/deps/v8/src/compiler/compilation-dependencies.h b/deps/v8/src/compiler/compilation-dependencies.h index 37a2bc3a28c66b..cb6cea0685f29e 100644 --- a/deps/v8/src/compiler/compilation-dependencies.h +++ b/deps/v8/src/compiler/compilation-dependencies.h @@ -25,6 +25,8 @@ class SlackTrackingPrediction { int inobject_property_count_; }; +class CompilationDependency; + // Collects and installs dependencies of the code that is being generated. class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject { public: @@ -113,14 +115,13 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject { // DependOnTransition(map); // is equivalent to: // RecordDependency(TransitionDependencyOffTheRecord(map)); - class Dependency; - void RecordDependency(Dependency const* dependency); - Dependency const* TransitionDependencyOffTheRecord( + void RecordDependency(CompilationDependency const* dependency); + CompilationDependency const* TransitionDependencyOffTheRecord( const MapRef& target_map) const; - Dependency const* FieldRepresentationDependencyOffTheRecord( + CompilationDependency const* FieldRepresentationDependencyOffTheRecord( + const MapRef& map, int descriptor) const; + CompilationDependency const* FieldTypeDependencyOffTheRecord( const MapRef& map, int descriptor) const; - Dependency const* FieldTypeDependencyOffTheRecord(const MapRef& map, - int descriptor) const; // Exposed only for testing purposes. bool AreValid() const; @@ -128,7 +129,7 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject { private: Zone* const zone_; JSHeapBroker* const broker_; - ZoneForwardList dependencies_; + ZoneForwardList dependencies_; }; } // namespace compiler diff --git a/deps/v8/src/compiler/compilation-dependency.h b/deps/v8/src/compiler/compilation-dependency.h new file mode 100644 index 00000000000000..e5726a0ddb8dc4 --- /dev/null +++ b/deps/v8/src/compiler/compilation-dependency.h @@ -0,0 +1,32 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_COMPILATION_DEPENDENCY_H_ +#define V8_COMPILER_COMPILATION_DEPENDENCY_H_ + +#include "src/zone/zone.h" + +namespace v8 { +namespace internal { + +class MaybeObjectHandle; + +namespace compiler { + +class CompilationDependency : public ZoneObject { + public: + virtual bool IsValid() const = 0; + virtual void PrepareInstall() const {} + virtual void Install(const MaybeObjectHandle& code) const = 0; + +#ifdef DEBUG + virtual bool IsPretenureModeDependency() const { return false; } +#endif +}; + +} // namespace compiler +} // namespace internal +} // namespace v8 + +#endif // V8_COMPILER_COMPILATION_DEPENDENCY_H_ diff --git a/deps/v8/src/compiler/control-flow-optimizer.cc b/deps/v8/src/compiler/control-flow-optimizer.cc index 7177a6069da26c..600db1d160847a 100644 --- a/deps/v8/src/compiler/control-flow-optimizer.cc +++ b/deps/v8/src/compiler/control-flow-optimizer.cc @@ -4,6 +4,7 @@ #include "src/compiler/control-flow-optimizer.h" +#include "src/codegen/tick-counter.h" #include "src/compiler/common-operator.h" #include "src/compiler/graph.h" #include "src/compiler/node-matchers.h" @@ -16,18 +17,20 @@ namespace compiler { ControlFlowOptimizer::ControlFlowOptimizer(Graph* graph, CommonOperatorBuilder* common, MachineOperatorBuilder* machine, + TickCounter* tick_counter, Zone* zone) : graph_(graph), common_(common), machine_(machine), queue_(zone), queued_(graph, 2), - zone_(zone) {} - + zone_(zone), + tick_counter_(tick_counter) {} void ControlFlowOptimizer::Optimize() { Enqueue(graph()->start()); while (!queue_.empty()) { + tick_counter_->DoTick(); Node* node = queue_.front(); queue_.pop(); if (node->IsDead()) continue; diff --git a/deps/v8/src/compiler/control-flow-optimizer.h b/deps/v8/src/compiler/control-flow-optimizer.h index 0a688a7c3926d5..07fc9e6fc2c3cd 100644 --- a/deps/v8/src/compiler/control-flow-optimizer.h +++ b/deps/v8/src/compiler/control-flow-optimizer.h @@ -11,6 +11,9 @@ namespace v8 { namespace internal { + +class TickCounter; + namespace compiler { // Forward declarations. @@ -22,7 +25,8 @@ class Node; class V8_EXPORT_PRIVATE ControlFlowOptimizer final { public: ControlFlowOptimizer(Graph* graph, CommonOperatorBuilder* common, - MachineOperatorBuilder* machine, Zone* zone); + MachineOperatorBuilder* machine, + TickCounter* tick_counter, Zone* zone); void Optimize(); @@ -45,6 +49,7 @@ class V8_EXPORT_PRIVATE ControlFlowOptimizer final { ZoneQueue queue_; NodeMarker queued_; Zone* const zone_; + TickCounter* const tick_counter_; DISALLOW_COPY_AND_ASSIGN(ControlFlowOptimizer); }; diff --git a/deps/v8/src/compiler/csa-load-elimination.cc b/deps/v8/src/compiler/csa-load-elimination.cc new file mode 100644 index 00000000000000..620d98019fd18d --- /dev/null +++ b/deps/v8/src/compiler/csa-load-elimination.cc @@ -0,0 +1,336 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/compiler/csa-load-elimination.h" + +#include "src/compiler/common-operator.h" +#include "src/compiler/node-matchers.h" +#include "src/compiler/node-properties.h" +#include "src/compiler/simplified-operator.h" + +namespace v8 { +namespace internal { +namespace compiler { + +Reduction CsaLoadElimination::Reduce(Node* node) { + if (FLAG_trace_turbo_load_elimination) { + if (node->op()->EffectInputCount() > 0) { + PrintF(" visit #%d:%s", node->id(), node->op()->mnemonic()); + if (node->op()->ValueInputCount() > 0) { + PrintF("("); + for (int i = 0; i < node->op()->ValueInputCount(); ++i) { + if (i > 0) PrintF(", "); + Node* const value = NodeProperties::GetValueInput(node, i); + PrintF("#%d:%s", value->id(), value->op()->mnemonic()); + } + PrintF(")"); + } + PrintF("\n"); + for (int i = 0; i < node->op()->EffectInputCount(); ++i) { + Node* const effect = NodeProperties::GetEffectInput(node, i); + if (AbstractState const* const state = node_states_.Get(effect)) { + PrintF(" state[%i]: #%d:%s\n", i, effect->id(), + effect->op()->mnemonic()); + state->Print(); + } else { + PrintF(" no state[%i]: #%d:%s\n", i, effect->id(), + effect->op()->mnemonic()); + } + } + } + } + switch (node->opcode()) { + case IrOpcode::kLoadFromObject: + return ReduceLoadFromObject(node, ObjectAccessOf(node->op())); + case IrOpcode::kStoreToObject: + return ReduceStoreToObject(node, ObjectAccessOf(node->op())); + case IrOpcode::kDebugBreak: + case IrOpcode::kAbortCSAAssert: + // Avoid changing optimizations in the presence of debug instructions. + return PropagateInputState(node); + case IrOpcode::kCall: + return ReduceCall(node); + case IrOpcode::kEffectPhi: + return ReduceEffectPhi(node); + case IrOpcode::kDead: + break; + case IrOpcode::kStart: + return ReduceStart(node); + default: + return ReduceOtherNode(node); + } + return NoChange(); +} + +namespace CsaLoadEliminationHelpers { + +bool IsCompatible(MachineRepresentation r1, MachineRepresentation r2) { + if (r1 == r2) return true; + return IsAnyCompressedTagged(r1) && IsAnyCompressedTagged(r2); +} + +bool ObjectMayAlias(Node* a, Node* b) { + if (a != b) { + if (b->opcode() == IrOpcode::kAllocate) { + std::swap(a, b); + } + if (a->opcode() == IrOpcode::kAllocate) { + switch (b->opcode()) { + case IrOpcode::kAllocate: + case IrOpcode::kHeapConstant: + case IrOpcode::kParameter: + return false; + default: + break; + } + } + } + return true; +} + +bool OffsetMayAlias(Node* offset1, MachineRepresentation repr1, Node* offset2, + MachineRepresentation repr2) { + IntPtrMatcher matcher1(offset1); + IntPtrMatcher matcher2(offset2); + // If either of the offsets is variable, accesses may alias + if (!matcher1.HasValue() || !matcher2.HasValue()) { + return true; + } + // Otherwise, we return whether accesses overlap + intptr_t start1 = matcher1.Value(); + intptr_t end1 = start1 + ElementSizeInBytes(repr1); + intptr_t start2 = matcher2.Value(); + intptr_t end2 = start2 + ElementSizeInBytes(repr2); + return !(end1 <= start2 || end2 <= start1); +} + +} // namespace CsaLoadEliminationHelpers + +namespace Helpers = CsaLoadEliminationHelpers; + +void CsaLoadElimination::AbstractState::Merge(AbstractState const* that, + Zone* zone) { + FieldInfo empty_info; + for (std::pair entry : field_infos_) { + if (that->field_infos_.Get(entry.first) != entry.second) { + field_infos_.Set(entry.first, empty_info); + } + } +} + +CsaLoadElimination::AbstractState const* +CsaLoadElimination::AbstractState::KillField(Node* kill_object, + Node* kill_offset, + MachineRepresentation kill_repr, + Zone* zone) const { + FieldInfo empty_info; + AbstractState* that = new (zone) AbstractState(*this); + for (std::pair entry : that->field_infos_) { + Field field = entry.first; + MachineRepresentation field_repr = entry.second.representation; + if (Helpers::OffsetMayAlias(kill_offset, kill_repr, field.second, + field_repr) && + Helpers::ObjectMayAlias(kill_object, field.first)) { + that->field_infos_.Set(field, empty_info); + } + } + return that; +} + +CsaLoadElimination::AbstractState const* +CsaLoadElimination::AbstractState::AddField(Node* object, Node* offset, + CsaLoadElimination::FieldInfo info, + Zone* zone) const { + AbstractState* that = new (zone) AbstractState(*this); + that->field_infos_.Set({object, offset}, info); + return that; +} + +CsaLoadElimination::FieldInfo CsaLoadElimination::AbstractState::Lookup( + Node* object, Node* offset) const { + if (object->IsDead()) { + return {}; + } + return field_infos_.Get({object, offset}); +} + +void CsaLoadElimination::AbstractState::Print() const { + for (std::pair entry : field_infos_) { + Field field = entry.first; + Node* object = field.first; + Node* offset = field.second; + FieldInfo info = entry.second; + PrintF(" #%d+#%d:%s -> #%d:%s [repr=%s]\n", object->id(), offset->id(), + object->op()->mnemonic(), info.value->id(), + info.value->op()->mnemonic(), + MachineReprToString(info.representation)); + } +} + +Reduction CsaLoadElimination::ReduceLoadFromObject(Node* node, + ObjectAccess const& access) { + Node* object = NodeProperties::GetValueInput(node, 0); + Node* offset = NodeProperties::GetValueInput(node, 1); + Node* effect = NodeProperties::GetEffectInput(node); + AbstractState const* state = node_states_.Get(effect); + if (state == nullptr) return NoChange(); + + MachineRepresentation representation = access.machine_type.representation(); + FieldInfo lookup_result = state->Lookup(object, offset); + if (!lookup_result.IsEmpty()) { + // Make sure we don't reuse values that were recorded with a different + // representation or resurrect dead {replacement} nodes. + Node* replacement = lookup_result.value; + if (Helpers::IsCompatible(representation, lookup_result.representation) && + !replacement->IsDead()) { + ReplaceWithValue(node, replacement, effect); + return Replace(replacement); + } + } + FieldInfo info(node, representation); + state = state->AddField(object, offset, info, zone()); + + return UpdateState(node, state); +} + +Reduction CsaLoadElimination::ReduceStoreToObject(Node* node, + ObjectAccess const& access) { + Node* object = NodeProperties::GetValueInput(node, 0); + Node* offset = NodeProperties::GetValueInput(node, 1); + Node* value = NodeProperties::GetValueInput(node, 2); + Node* effect = NodeProperties::GetEffectInput(node); + AbstractState const* state = node_states_.Get(effect); + if (state == nullptr) return NoChange(); + + FieldInfo info(value, access.machine_type.representation()); + state = state->KillField(object, offset, info.representation, zone()); + state = state->AddField(object, offset, info, zone()); + + return UpdateState(node, state); +} + +Reduction CsaLoadElimination::ReduceEffectPhi(Node* node) { + Node* const effect0 = NodeProperties::GetEffectInput(node, 0); + Node* const control = NodeProperties::GetControlInput(node); + AbstractState const* state0 = node_states_.Get(effect0); + if (state0 == nullptr) return NoChange(); + if (control->opcode() == IrOpcode::kLoop) { + // Here we rely on having only reducible loops: + // The loop entry edge always dominates the header, so we can just take + // the state from the first input, and compute the loop state based on it. + AbstractState const* state = ComputeLoopState(node, state0); + return UpdateState(node, state); + } + DCHECK_EQ(IrOpcode::kMerge, control->opcode()); + + // Shortcut for the case when we do not know anything about some input. + int const input_count = node->op()->EffectInputCount(); + for (int i = 1; i < input_count; ++i) { + Node* const effect = NodeProperties::GetEffectInput(node, i); + if (node_states_.Get(effect) == nullptr) return NoChange(); + } + + // Make a copy of the first input's state and merge with the state + // from other inputs. + AbstractState* state = new (zone()) AbstractState(*state0); + for (int i = 1; i < input_count; ++i) { + Node* const input = NodeProperties::GetEffectInput(node, i); + state->Merge(node_states_.Get(input), zone()); + } + return UpdateState(node, state); +} + +Reduction CsaLoadElimination::ReduceStart(Node* node) { + return UpdateState(node, empty_state()); +} + +Reduction CsaLoadElimination::ReduceCall(Node* node) { + Node* value = NodeProperties::GetValueInput(node, 0); + ExternalReferenceMatcher m(value); + if (m.Is(ExternalReference::check_object_type())) { + return PropagateInputState(node); + } + return ReduceOtherNode(node); +} + +Reduction CsaLoadElimination::ReduceOtherNode(Node* node) { + if (node->op()->EffectInputCount() == 1) { + if (node->op()->EffectOutputCount() == 1) { + Node* const effect = NodeProperties::GetEffectInput(node); + AbstractState const* state = node_states_.Get(effect); + // If we do not know anything about the predecessor, do not propagate + // just yet because we will have to recompute anyway once we compute + // the predecessor. + if (state == nullptr) return NoChange(); + // Check if this {node} has some uncontrolled side effects. + if (!node->op()->HasProperty(Operator::kNoWrite)) { + state = empty_state(); + } + return UpdateState(node, state); + } else { + return NoChange(); + } + } + DCHECK_EQ(0, node->op()->EffectInputCount()); + DCHECK_EQ(0, node->op()->EffectOutputCount()); + return NoChange(); +} + +Reduction CsaLoadElimination::UpdateState(Node* node, + AbstractState const* state) { + AbstractState const* original = node_states_.Get(node); + // Only signal that the {node} has Changed, if the information about {state} + // has changed wrt. the {original}. + if (state != original) { + if (original == nullptr || !state->Equals(original)) { + node_states_.Set(node, state); + return Changed(node); + } + } + return NoChange(); +} + +Reduction CsaLoadElimination::PropagateInputState(Node* node) { + Node* const effect = NodeProperties::GetEffectInput(node); + AbstractState const* state = node_states_.Get(effect); + if (state == nullptr) return NoChange(); + return UpdateState(node, state); +} + +CsaLoadElimination::AbstractState const* CsaLoadElimination::ComputeLoopState( + Node* node, AbstractState const* state) const { + DCHECK_EQ(node->opcode(), IrOpcode::kEffectPhi); + Node* const control = NodeProperties::GetControlInput(node); + ZoneQueue queue(zone()); + ZoneSet visited(zone()); + visited.insert(node); + for (int i = 1; i < control->InputCount(); ++i) { + queue.push(node->InputAt(i)); + } + while (!queue.empty()) { + Node* const current = queue.front(); + queue.pop(); + if (visited.insert(current).second) { + if (!current->op()->HasProperty(Operator::kNoWrite)) { + return empty_state(); + } + for (int i = 0; i < current->op()->EffectInputCount(); ++i) { + queue.push(NodeProperties::GetEffectInput(current, i)); + } + } + } + return state; +} + +CommonOperatorBuilder* CsaLoadElimination::common() const { + return jsgraph()->common(); +} + +Graph* CsaLoadElimination::graph() const { return jsgraph()->graph(); } + +Isolate* CsaLoadElimination::isolate() const { return jsgraph()->isolate(); } + +} // namespace compiler +} // namespace internal +} // namespace v8 diff --git a/deps/v8/src/compiler/csa-load-elimination.h b/deps/v8/src/compiler/csa-load-elimination.h new file mode 100644 index 00000000000000..9460858d046a7b --- /dev/null +++ b/deps/v8/src/compiler/csa-load-elimination.h @@ -0,0 +1,118 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_CSA_LOAD_ELIMINATION_H_ +#define V8_COMPILER_CSA_LOAD_ELIMINATION_H_ + +#include "src/base/compiler-specific.h" +#include "src/codegen/machine-type.h" +#include "src/common/globals.h" +#include "src/compiler/graph-reducer.h" +#include "src/compiler/js-graph.h" +#include "src/compiler/node-aux-data.h" +#include "src/compiler/persistent-map.h" +#include "src/handles/maybe-handles.h" +#include "src/zone/zone-handle-set.h" + +namespace v8 { +namespace internal { + +namespace compiler { + +// Forward declarations. +class CommonOperatorBuilder; +struct ObjectAccess; +class Graph; +class JSGraph; + +class V8_EXPORT_PRIVATE CsaLoadElimination final + : public NON_EXPORTED_BASE(AdvancedReducer) { + public: + CsaLoadElimination(Editor* editor, JSGraph* jsgraph, Zone* zone) + : AdvancedReducer(editor), + empty_state_(zone), + node_states_(jsgraph->graph()->NodeCount(), zone), + jsgraph_(jsgraph), + zone_(zone) {} + ~CsaLoadElimination() final = default; + + const char* reducer_name() const override { return "CsaLoadElimination"; } + + Reduction Reduce(Node* node) final; + + private: + struct FieldInfo { + FieldInfo() = default; + FieldInfo(Node* value, MachineRepresentation representation) + : value(value), representation(representation) {} + + bool operator==(const FieldInfo& other) const { + return value == other.value && representation == other.representation; + } + + bool operator!=(const FieldInfo& other) const { return !(*this == other); } + + bool IsEmpty() const { return value == nullptr; } + + Node* value = nullptr; + MachineRepresentation representation = MachineRepresentation::kNone; + }; + + class AbstractState final : public ZoneObject { + public: + explicit AbstractState(Zone* zone) : field_infos_(zone) {} + + bool Equals(AbstractState const* that) const { + return field_infos_ == that->field_infos_; + } + void Merge(AbstractState const* that, Zone* zone); + + AbstractState const* KillField(Node* object, Node* offset, + MachineRepresentation repr, + Zone* zone) const; + AbstractState const* AddField(Node* object, Node* offset, FieldInfo info, + Zone* zone) const; + FieldInfo Lookup(Node* object, Node* offset) const; + + void Print() const; + + private: + using Field = std::pair; + using FieldInfos = PersistentMap; + FieldInfos field_infos_; + }; + + Reduction ReduceLoadFromObject(Node* node, ObjectAccess const& access); + Reduction ReduceStoreToObject(Node* node, ObjectAccess const& access); + Reduction ReduceEffectPhi(Node* node); + Reduction ReduceStart(Node* node); + Reduction ReduceCall(Node* node); + Reduction ReduceOtherNode(Node* node); + + Reduction UpdateState(Node* node, AbstractState const* state); + Reduction PropagateInputState(Node* node); + + AbstractState const* ComputeLoopState(Node* node, + AbstractState const* state) const; + + CommonOperatorBuilder* common() const; + Isolate* isolate() const; + Graph* graph() const; + JSGraph* jsgraph() const { return jsgraph_; } + Zone* zone() const { return zone_; } + AbstractState const* empty_state() const { return &empty_state_; } + + AbstractState const empty_state_; + NodeAuxData node_states_; + JSGraph* const jsgraph_; + Zone* zone_; + + DISALLOW_COPY_AND_ASSIGN(CsaLoadElimination); +}; + +} // namespace compiler +} // namespace internal +} // namespace v8 + +#endif // V8_COMPILER_CSA_LOAD_ELIMINATION_H_ diff --git a/deps/v8/src/compiler/decompression-elimination.cc b/deps/v8/src/compiler/decompression-elimination.cc index e69e61fac5e61b..537744652b9686 100644 --- a/deps/v8/src/compiler/decompression-elimination.cc +++ b/deps/v8/src/compiler/decompression-elimination.cc @@ -21,10 +21,8 @@ bool DecompressionElimination::IsReducibleConstantOpcode( IrOpcode::Value opcode) { switch (opcode) { case IrOpcode::kInt64Constant: - return true; - // TODO(v8:8977): Disabling HeapConstant until CompressedHeapConstant - // exists, since it breaks with verify CSA on. case IrOpcode::kHeapConstant: + return true; default: return false; } @@ -55,13 +53,8 @@ Node* DecompressionElimination::GetCompressedConstant(Node* constant) { static_cast(OpParameter(constant->op())))); break; case IrOpcode::kHeapConstant: - // TODO(v8:8977): The HeapConstant remains as 64 bits. This does not - // affect the comparison and it will still work correctly. However, we are - // introducing a 64 bit value in the stream where a 32 bit one will - // suffice. Currently there is no "CompressedHeapConstant", and - // introducing a new opcode and handling it correctly throught the - // pipeline seems that it will involve quite a bit of work. - return constant; + return graph()->NewNode( + common()->CompressedHeapConstant(HeapConstantOf(constant->op()))); default: UNREACHABLE(); } @@ -84,6 +77,21 @@ Reduction DecompressionElimination::ReduceCompress(Node* node) { } } +Reduction DecompressionElimination::ReduceDecompress(Node* node) { + DCHECK(IrOpcode::IsDecompressOpcode(node->opcode())); + + DCHECK_EQ(node->InputCount(), 1); + Node* input_node = node->InputAt(0); + IrOpcode::Value input_opcode = input_node->opcode(); + if (IrOpcode::IsCompressOpcode(input_opcode)) { + DCHECK(IsValidDecompress(input_opcode, node->opcode())); + DCHECK_EQ(input_node->InputCount(), 1); + return Replace(input_node->InputAt(0)); + } else { + return NoChange(); + } +} + Reduction DecompressionElimination::ReducePhi(Node* node) { DCHECK_EQ(node->opcode(), IrOpcode::kPhi); @@ -138,7 +146,10 @@ Reduction DecompressionElimination::ReducePhi(Node* node) { // Add a decompress after the Phi. To do this, we need to replace the Phi with // "Phi <- Decompress". - return Replace(graph()->NewNode(op, node)); + Node* decompress = graph()->NewNode(op, node); + ReplaceWithValue(node, decompress); + decompress->ReplaceInput(0, node); + return Changed(node); } Reduction DecompressionElimination::ReduceTypedStateValues(Node* node) { @@ -201,6 +212,10 @@ Reduction DecompressionElimination::Reduce(Node* node) { case IrOpcode::kChangeTaggedSignedToCompressedSigned: case IrOpcode::kChangeTaggedPointerToCompressedPointer: return ReduceCompress(node); + case IrOpcode::kChangeCompressedToTagged: + case IrOpcode::kChangeCompressedSignedToTaggedSigned: + case IrOpcode::kChangeCompressedPointerToTaggedPointer: + return ReduceDecompress(node); case IrOpcode::kPhi: return ReducePhi(node); case IrOpcode::kTypedStateValues: diff --git a/deps/v8/src/compiler/decompression-elimination.h b/deps/v8/src/compiler/decompression-elimination.h index c850f064a96639..85a6c98aa0bbb5 100644 --- a/deps/v8/src/compiler/decompression-elimination.h +++ b/deps/v8/src/compiler/decompression-elimination.h @@ -38,7 +38,7 @@ class V8_EXPORT_PRIVATE DecompressionElimination final // elimination. bool IsReducibleConstantOpcode(IrOpcode::Value opcode); - // Get the new 32 bit node constant given the 64 bit one + // Get the new 32 bit node constant given the 64 bit one. Node* GetCompressedConstant(Node* constant); // Removes direct Decompressions & Compressions, going from @@ -48,6 +48,9 @@ class V8_EXPORT_PRIVATE DecompressionElimination final // Can be used for Any, Signed, and Pointer compressions. Reduction ReduceCompress(Node* node); + // Removes direct Compressions & Decompressions, analogously to ReduceCompress + Reduction ReduceDecompress(Node* node); + // Replaces Phi's input decompressions with their input node, if and only if // all of the Phi's inputs are Decompress nodes. Reduction ReducePhi(Node* node); diff --git a/deps/v8/src/compiler/diamond.h b/deps/v8/src/compiler/diamond.h index cc6ca954f3d92e..cac1b1726b4ad9 100644 --- a/deps/v8/src/compiler/diamond.h +++ b/deps/v8/src/compiler/diamond.h @@ -33,13 +33,13 @@ struct Diamond { } // Place {this} after {that} in control flow order. - void Chain(Diamond& that) { branch->ReplaceInput(1, that.merge); } + void Chain(Diamond const& that) { branch->ReplaceInput(1, that.merge); } // Place {this} after {that} in control flow order. void Chain(Node* that) { branch->ReplaceInput(1, that); } // Nest {this} into either the if_true or if_false branch of {that}. - void Nest(Diamond& that, bool if_true) { + void Nest(Diamond const& that, bool if_true) { if (if_true) { branch->ReplaceInput(1, that.if_true); that.merge->ReplaceInput(0, merge); diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc index ced078a17899a6..788638fe68b8f4 100644 --- a/deps/v8/src/compiler/effect-control-linearizer.cc +++ b/deps/v8/src/compiler/effect-control-linearizer.cc @@ -17,6 +17,7 @@ #include "src/compiler/node-properties.h" #include "src/compiler/node.h" #include "src/compiler/schedule.h" +#include "src/execution/frames.h" #include "src/heap/factory-inl.h" #include "src/objects/heap-number.h" #include "src/objects/oddball.h" @@ -51,6 +52,7 @@ class EffectControlLinearizer { bool TryWireInStateEffect(Node* node, Node* frame_state, Node** effect, Node** control); Node* LowerChangeBitToTagged(Node* node); + Node* LowerChangeInt31ToCompressedSigned(Node* node); Node* LowerChangeInt31ToTaggedSigned(Node* node); Node* LowerChangeInt32ToTagged(Node* node); Node* LowerChangeInt64ToTagged(Node* node); @@ -58,6 +60,7 @@ class EffectControlLinearizer { Node* LowerChangeUint64ToTagged(Node* node); Node* LowerChangeFloat64ToTagged(Node* node); Node* LowerChangeFloat64ToTaggedPointer(Node* node); + Node* LowerChangeCompressedSignedToInt32(Node* node); Node* LowerChangeTaggedSignedToInt32(Node* node); Node* LowerChangeTaggedSignedToInt64(Node* node); Node* LowerChangeTaggedToBit(Node* node); @@ -75,6 +78,7 @@ class EffectControlLinearizer { Node* LowerCheckReceiver(Node* node, Node* frame_state); Node* LowerCheckReceiverOrNullOrUndefined(Node* node, Node* frame_state); Node* LowerCheckString(Node* node, Node* frame_state); + Node* LowerCheckBigInt(Node* node, Node* frame_state); Node* LowerCheckSymbol(Node* node, Node* frame_state); void LowerCheckIf(Node* node, Node* frame_state); Node* LowerCheckedInt32Add(Node* node, Node* frame_state); @@ -84,6 +88,7 @@ class EffectControlLinearizer { Node* LowerCheckedUint32Div(Node* node, Node* frame_state); Node* LowerCheckedUint32Mod(Node* node, Node* frame_state); Node* LowerCheckedInt32Mul(Node* node, Node* frame_state); + Node* LowerCheckedInt32ToCompressedSigned(Node* node, Node* frame_state); Node* LowerCheckedInt32ToTaggedSigned(Node* node, Node* frame_state); Node* LowerCheckedInt64ToInt32(Node* node, Node* frame_state); Node* LowerCheckedInt64ToTaggedSigned(Node* node, Node* frame_state); @@ -101,6 +106,9 @@ class EffectControlLinearizer { Node* LowerCheckedTaggedToFloat64(Node* node, Node* frame_state); Node* LowerCheckedTaggedToTaggedSigned(Node* node, Node* frame_state); Node* LowerCheckedTaggedToTaggedPointer(Node* node, Node* frame_state); + Node* LowerBigIntAsUintN(Node* node, Node* frame_state); + Node* LowerChangeUint64ToBigInt(Node* node); + Node* LowerTruncateBigIntToUint64(Node* node); Node* LowerCheckedCompressedToTaggedSigned(Node* node, Node* frame_state); Node* LowerCheckedCompressedToTaggedPointer(Node* node, Node* frame_state); Node* LowerCheckedTaggedToCompressedSigned(Node* node, Node* frame_state); @@ -150,17 +158,20 @@ class EffectControlLinearizer { Node* LowerStringConcat(Node* node); Node* LowerStringToNumber(Node* node); Node* LowerStringCharCodeAt(Node* node); - Node* LowerStringCodePointAt(Node* node, UnicodeEncoding encoding); + Node* LowerStringCodePointAt(Node* node); Node* LowerStringToLowerCaseIntl(Node* node); Node* LowerStringToUpperCaseIntl(Node* node); Node* LowerStringFromSingleCharCode(Node* node); Node* LowerStringFromSingleCodePoint(Node* node); Node* LowerStringIndexOf(Node* node); Node* LowerStringSubstring(Node* node); + Node* LowerStringFromCodePointAt(Node* node); Node* LowerStringLength(Node* node); Node* LowerStringEqual(Node* node); Node* LowerStringLessThan(Node* node); Node* LowerStringLessThanOrEqual(Node* node); + Node* LowerBigIntAdd(Node* node, Node* frame_state); + Node* LowerBigIntNegate(Node* node); Node* LowerCheckFloat64Hole(Node* node, Node* frame_state); Node* LowerCheckNotTaggedHole(Node* node, Node* frame_state); Node* LowerConvertTaggedHoleToUndefined(Node* node); @@ -186,6 +197,7 @@ class EffectControlLinearizer { void LowerTransitionAndStoreNumberElement(Node* node); void LowerTransitionAndStoreNonNumberElement(Node* node); void LowerRuntimeAbort(Node* node); + Node* LowerAssertType(Node* node); Node* LowerConvertReceiver(Node* node); Node* LowerDateNow(Node* node); @@ -214,6 +226,7 @@ class EffectControlLinearizer { Node* LowerStringComparison(Callable const& callable, Node* node); Node* IsElementsKindGreaterThan(Node* kind, ElementsKind reference_kind); + Node* ChangeInt32ToCompressedSmi(Node* value); Node* ChangeInt32ToSmi(Node* value); Node* ChangeInt32ToIntPtr(Node* value); Node* ChangeInt64ToSmi(Node* value); @@ -222,6 +235,7 @@ class EffectControlLinearizer { Node* ChangeUint32ToUintPtr(Node* value); Node* ChangeUint32ToSmi(Node* value); Node* ChangeSmiToIntPtr(Node* value); + Node* ChangeCompressedSmiToInt32(Node* value); Node* ChangeSmiToInt32(Node* value); Node* ChangeSmiToInt64(Node* value); Node* ObjectIsSmi(Node* value); @@ -827,6 +841,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node, case IrOpcode::kChangeBitToTagged: result = LowerChangeBitToTagged(node); break; + case IrOpcode::kChangeInt31ToCompressedSigned: + result = LowerChangeInt31ToCompressedSigned(node); + break; case IrOpcode::kChangeInt31ToTaggedSigned: result = LowerChangeInt31ToTaggedSigned(node); break; @@ -848,6 +865,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node, case IrOpcode::kChangeFloat64ToTaggedPointer: result = LowerChangeFloat64ToTaggedPointer(node); break; + case IrOpcode::kChangeCompressedSignedToInt32: + result = LowerChangeCompressedSignedToInt32(node); + break; case IrOpcode::kChangeTaggedSignedToInt32: result = LowerChangeTaggedSignedToInt32(node); break; @@ -911,6 +931,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node, case IrOpcode::kCheckString: result = LowerCheckString(node, frame_state); break; + case IrOpcode::kCheckBigInt: + result = LowerCheckBigInt(node, frame_state); + break; case IrOpcode::kCheckInternalizedString: result = LowerCheckInternalizedString(node, frame_state); break; @@ -938,6 +961,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node, case IrOpcode::kCheckedInt32Mul: result = LowerCheckedInt32Mul(node, frame_state); break; + case IrOpcode::kCheckedInt32ToCompressedSigned: + result = LowerCheckedInt32ToCompressedSigned(node, frame_state); + break; case IrOpcode::kCheckedInt32ToTaggedSigned: result = LowerCheckedInt32ToTaggedSigned(node, frame_state); break; @@ -993,6 +1019,15 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node, case IrOpcode::kCheckedTaggedToTaggedPointer: result = LowerCheckedTaggedToTaggedPointer(node, frame_state); break; + case IrOpcode::kBigIntAsUintN: + result = LowerBigIntAsUintN(node, frame_state); + break; + case IrOpcode::kChangeUint64ToBigInt: + result = LowerChangeUint64ToBigInt(node); + break; + case IrOpcode::kTruncateBigIntToUint64: + result = LowerTruncateBigIntToUint64(node); + break; case IrOpcode::kCheckedCompressedToTaggedSigned: result = LowerCheckedCompressedToTaggedSigned(node, frame_state); break; @@ -1110,6 +1145,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node, case IrOpcode::kStringIndexOf: result = LowerStringIndexOf(node); break; + case IrOpcode::kStringFromCodePointAt: + result = LowerStringFromCodePointAt(node); + break; case IrOpcode::kStringLength: result = LowerStringLength(node); break; @@ -1120,7 +1158,7 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node, result = LowerStringCharCodeAt(node); break; case IrOpcode::kStringCodePointAt: - result = LowerStringCodePointAt(node, UnicodeEncodingOf(node->op())); + result = LowerStringCodePointAt(node); break; case IrOpcode::kStringToLowerCaseIntl: result = LowerStringToLowerCaseIntl(node); @@ -1140,6 +1178,12 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node, case IrOpcode::kStringLessThanOrEqual: result = LowerStringLessThanOrEqual(node); break; + case IrOpcode::kBigIntAdd: + result = LowerBigIntAdd(node, frame_state); + break; + case IrOpcode::kBigIntNegate: + result = LowerBigIntNegate(node); + break; case IrOpcode::kNumberIsFloat64Hole: result = LowerNumberIsFloat64Hole(node); break; @@ -1233,6 +1277,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node, case IrOpcode::kRuntimeAbort: LowerRuntimeAbort(node); break; + case IrOpcode::kAssertType: + result = LowerAssertType(node); + break; case IrOpcode::kConvertReceiver: result = LowerConvertReceiver(node); break; @@ -1357,6 +1404,11 @@ Node* EffectControlLinearizer::LowerChangeBitToTagged(Node* node) { return done.PhiAt(0); } +Node* EffectControlLinearizer::LowerChangeInt31ToCompressedSigned(Node* node) { + Node* value = node->InputAt(0); + return ChangeInt32ToCompressedSmi(value); +} + Node* EffectControlLinearizer::LowerChangeInt31ToTaggedSigned(Node* node) { Node* value = node->InputAt(0); return ChangeInt32ToSmi(value); @@ -1461,6 +1513,11 @@ Node* EffectControlLinearizer::LowerChangeTaggedSignedToInt32(Node* node) { return ChangeSmiToInt32(value); } +Node* EffectControlLinearizer::LowerChangeCompressedSignedToInt32(Node* node) { + Node* value = node->InputAt(0); + return ChangeCompressedSmiToInt32(value); +} + Node* EffectControlLinearizer::LowerChangeTaggedSignedToInt64(Node* node) { Node* value = node->InputAt(0); return ChangeSmiToInt64(value); @@ -1684,8 +1741,7 @@ Node* EffectControlLinearizer::LowerChangeTaggedToCompressedSigned(Node* node) { STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset); Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value); vfalse = __ ChangeFloat64ToInt32(vfalse); - vfalse = ChangeInt32ToSmi(vfalse); - vfalse = __ ChangeTaggedSignedToCompressedSigned(vfalse); + vfalse = ChangeInt32ToCompressedSmi(vfalse); __ Goto(&done, vfalse); __ Bind(&done); @@ -2283,6 +2339,19 @@ Node* EffectControlLinearizer::LowerCheckedInt32Mul(Node* node, return value; } +Node* EffectControlLinearizer::LowerCheckedInt32ToCompressedSigned( + Node* node, Node* frame_state) { + DCHECK(SmiValuesAre31Bits()); + Node* value = node->InputAt(0); + const CheckParameters& params = CheckParametersOf(node->op()); + + Node* add = __ Int32AddWithOverflow(value, value); + Node* check = __ Projection(1, add); + __ DeoptimizeIf(DeoptimizeReason::kLostPrecision, params.feedback(), check, + frame_state); + return __ Projection(0, add); +} + Node* EffectControlLinearizer::LowerCheckedInt32ToTaggedSigned( Node* node, Node* frame_state) { DCHECK(SmiValuesAre31Bits()); @@ -2651,6 +2720,121 @@ Node* EffectControlLinearizer::LowerCheckedTaggedToTaggedPointer( return value; } +Node* EffectControlLinearizer::LowerCheckBigInt(Node* node, Node* frame_state) { + Node* value = node->InputAt(0); + const CheckParameters& params = CheckParametersOf(node->op()); + + // Check for Smi. + Node* smi_check = ObjectIsSmi(value); + __ DeoptimizeIf(DeoptimizeReason::kSmi, params.feedback(), smi_check, + frame_state); + + // Check for BigInt. + Node* value_map = __ LoadField(AccessBuilder::ForMap(), value); + Node* bi_check = __ WordEqual(value_map, __ BigIntMapConstant()); + __ DeoptimizeIfNot(DeoptimizeReason::kWrongInstanceType, params.feedback(), + bi_check, frame_state); + + return value; +} + +Node* EffectControlLinearizer::LowerBigIntAsUintN(Node* node, + Node* frame_state) { + DCHECK(machine()->Is64()); + + const int bits = OpParameter(node->op()); + DCHECK(0 <= bits && bits <= 64); + + if (bits == 64) { + // Reduce to nop. + return node->InputAt(0); + } else { + const uint64_t msk = (1ULL << bits) - 1ULL; + return __ Word64And(node->InputAt(0), __ Int64Constant(msk)); + } +} + +Node* EffectControlLinearizer::LowerChangeUint64ToBigInt(Node* node) { + DCHECK(machine()->Is64()); + + Node* value = node->InputAt(0); + Node* map = jsgraph()->HeapConstant(factory()->bigint_map()); + // BigInts with value 0 must be of size 0 (canonical form). + auto if_zerodigits = __ MakeLabel(); + auto if_onedigit = __ MakeLabel(); + auto done = __ MakeLabel(MachineRepresentation::kTagged); + + __ GotoIf(__ Word64Equal(value, __ IntPtrConstant(0)), &if_zerodigits); + __ Goto(&if_onedigit); + + __ Bind(&if_onedigit); + { + Node* result = __ Allocate(AllocationType::kYoung, + __ IntPtrConstant(BigInt::SizeFor(1))); + const auto bitfield = BigInt::LengthBits::update(0, 1); + __ StoreField(AccessBuilder::ForMap(), result, map); + __ StoreField(AccessBuilder::ForBigIntBitfield(), result, + __ IntPtrConstant(bitfield)); + // BigInts have no padding on 64 bit architectures with pointer compression. + if (BigInt::HasOptionalPadding()) { + __ StoreField(AccessBuilder::ForBigIntOptionalPadding(), result, + __ IntPtrConstant(0)); + } + __ StoreField(AccessBuilder::ForBigIntLeastSignificantDigit64(), result, + value); + __ Goto(&done, result); + } + + __ Bind(&if_zerodigits); + { + Node* result = __ Allocate(AllocationType::kYoung, + __ IntPtrConstant(BigInt::SizeFor(0))); + const auto bitfield = BigInt::LengthBits::update(0, 0); + __ StoreField(AccessBuilder::ForMap(), result, map); + __ StoreField(AccessBuilder::ForBigIntBitfield(), result, + __ IntPtrConstant(bitfield)); + // BigInts have no padding on 64 bit architectures with pointer compression. + if (BigInt::HasOptionalPadding()) { + __ StoreField(AccessBuilder::ForBigIntOptionalPadding(), result, + __ IntPtrConstant(0)); + } + __ Goto(&done, result); + } + + __ Bind(&done); + return done.PhiAt(0); +} + +Node* EffectControlLinearizer::LowerTruncateBigIntToUint64(Node* node) { + DCHECK(machine()->Is64()); + + auto done = __ MakeLabel(MachineRepresentation::kWord64); + auto if_neg = __ MakeLabel(); + auto if_not_zero = __ MakeLabel(); + + Node* value = node->InputAt(0); + + Node* bitfield = __ LoadField(AccessBuilder::ForBigIntBitfield(), value); + __ GotoIfNot(__ Word32Equal(bitfield, __ Int32Constant(0)), &if_not_zero); + __ Goto(&done, __ Int64Constant(0)); + + __ Bind(&if_not_zero); + { + Node* lsd = + __ LoadField(AccessBuilder::ForBigIntLeastSignificantDigit64(), value); + Node* sign = + __ Word32And(bitfield, __ Int32Constant(BigInt::SignBits::kMask)); + __ GotoIf(__ Word32Equal(sign, __ Int32Constant(1)), &if_neg); + __ Goto(&done, lsd); + + __ Bind(&if_neg); + __ Goto(&done, __ Int64Sub(__ Int64Constant(0), lsd)); + } + + __ Bind(&done); + return done.PhiAt(0); +} + Node* EffectControlLinearizer::LowerCheckedCompressedToTaggedSigned( Node* node, Node* frame_state) { Node* value = node->InputAt(0); @@ -3726,16 +3910,12 @@ Node* EffectControlLinearizer::LowerStringCharCodeAt(Node* node) { return loop_done.PhiAt(0); } -Node* EffectControlLinearizer::LowerStringCodePointAt( - Node* node, UnicodeEncoding encoding) { +Node* EffectControlLinearizer::LowerStringCodePointAt(Node* node) { Node* receiver = node->InputAt(0); Node* position = node->InputAt(1); - Builtins::Name builtin = encoding == UnicodeEncoding::UTF16 - ? Builtins::kStringCodePointAtUTF16 - : Builtins::kStringCodePointAtUTF32; - - Callable const callable = Builtins::CallableFor(isolate(), builtin); + Callable const callable = + Builtins::CallableFor(isolate(), Builtins::kStringCodePointAt); Operator::Properties properties = Operator::kNoThrow | Operator::kNoWrite; CallDescriptor::Flags flags = CallDescriptor::kNoFlags; auto call_descriptor = Linkage::GetStubCallDescriptor( @@ -3968,31 +4148,23 @@ Node* EffectControlLinearizer::LowerStringFromSingleCodePoint(Node* node) { __ Bind(&if_not_single_code); // Generate surrogate pair string { - switch (UnicodeEncodingOf(node->op())) { - case UnicodeEncoding::UTF16: - break; + // Convert UTF32 to UTF16 code units, and store as a 32 bit word. + Node* lead_offset = __ Int32Constant(0xD800 - (0x10000 >> 10)); - case UnicodeEncoding::UTF32: { - // Convert UTF32 to UTF16 code units, and store as a 32 bit word. - Node* lead_offset = __ Int32Constant(0xD800 - (0x10000 >> 10)); + // lead = (codepoint >> 10) + LEAD_OFFSET + Node* lead = + __ Int32Add(__ Word32Shr(code, __ Int32Constant(10)), lead_offset); - // lead = (codepoint >> 10) + LEAD_OFFSET - Node* lead = - __ Int32Add(__ Word32Shr(code, __ Int32Constant(10)), lead_offset); + // trail = (codepoint & 0x3FF) + 0xDC00; + Node* trail = __ Int32Add(__ Word32And(code, __ Int32Constant(0x3FF)), + __ Int32Constant(0xDC00)); - // trail = (codepoint & 0x3FF) + 0xDC00; - Node* trail = __ Int32Add(__ Word32And(code, __ Int32Constant(0x3FF)), - __ Int32Constant(0xDC00)); - - // codpoint = (trail << 16) | lead; + // codpoint = (trail << 16) | lead; #if V8_TARGET_BIG_ENDIAN - code = __ Word32Or(__ Word32Shl(lead, __ Int32Constant(16)), trail); + code = __ Word32Or(__ Word32Shl(lead, __ Int32Constant(16)), trail); #else - code = __ Word32Or(__ Word32Shl(trail, __ Int32Constant(16)), lead); + code = __ Word32Or(__ Word32Shl(trail, __ Int32Constant(16)), lead); #endif - break; - } - } // Allocate a new SeqTwoByteString for {code}. Node* vfalse0 = @@ -4032,6 +4204,21 @@ Node* EffectControlLinearizer::LowerStringIndexOf(Node* node) { search_string, position, __ NoContextConstant()); } +Node* EffectControlLinearizer::LowerStringFromCodePointAt(Node* node) { + Node* string = node->InputAt(0); + Node* index = node->InputAt(1); + + Callable callable = + Builtins::CallableFor(isolate(), Builtins::kStringFromCodePointAt); + Operator::Properties properties = Operator::kEliminatable; + CallDescriptor::Flags flags = CallDescriptor::kNoFlags; + auto call_descriptor = Linkage::GetStubCallDescriptor( + graph()->zone(), callable.descriptor(), + callable.descriptor().GetStackParameterCount(), flags, properties); + return __ Call(call_descriptor, __ HeapConstant(callable.code()), string, + index, __ NoContextConstant()); +} + Node* EffectControlLinearizer::LowerStringLength(Node* node) { Node* subject = node->InputAt(0); @@ -4083,6 +4270,41 @@ Node* EffectControlLinearizer::LowerStringLessThanOrEqual(Node* node) { Builtins::CallableFor(isolate(), Builtins::kStringLessThanOrEqual), node); } +Node* EffectControlLinearizer::LowerBigIntAdd(Node* node, Node* frame_state) { + Node* lhs = node->InputAt(0); + Node* rhs = node->InputAt(1); + + Callable const callable = + Builtins::CallableFor(isolate(), Builtins::kBigIntAddNoThrow); + auto call_descriptor = Linkage::GetStubCallDescriptor( + graph()->zone(), callable.descriptor(), + callable.descriptor().GetStackParameterCount(), CallDescriptor::kNoFlags, + Operator::kFoldable | Operator::kNoThrow); + Node* value = + __ Call(call_descriptor, jsgraph()->HeapConstant(callable.code()), lhs, + rhs, __ NoContextConstant()); + + // Check for exception sentinel: Smi is returned to signal BigIntTooBig. + __ DeoptimizeIf(DeoptimizeReason::kBigIntTooBig, VectorSlotPair{}, + ObjectIsSmi(value), frame_state); + + return value; +} + +Node* EffectControlLinearizer::LowerBigIntNegate(Node* node) { + Callable const callable = + Builtins::CallableFor(isolate(), Builtins::kBigIntUnaryMinus); + auto call_descriptor = Linkage::GetStubCallDescriptor( + graph()->zone(), callable.descriptor(), + callable.descriptor().GetStackParameterCount(), CallDescriptor::kNoFlags, + Operator::kFoldable | Operator::kNoThrow); + Node* value = + __ Call(call_descriptor, jsgraph()->HeapConstant(callable.code()), + node->InputAt(0), __ NoContextConstant()); + + return value; +} + Node* EffectControlLinearizer::LowerCheckFloat64Hole(Node* node, Node* frame_state) { // If we reach this point w/o eliminating the {node} that's marked @@ -4256,6 +4478,11 @@ Node* EffectControlLinearizer::ChangeIntPtrToInt32(Node* value) { return value; } +Node* EffectControlLinearizer::ChangeInt32ToCompressedSmi(Node* value) { + CHECK(machine()->Is64() && SmiValuesAre31Bits()); + return __ Word32Shl(value, SmiShiftBitsConstant()); +} + Node* EffectControlLinearizer::ChangeInt32ToSmi(Node* value) { // Do shift on 32bit values if Smis are stored in the lower word. if (machine()->Is64() && SmiValuesAre31Bits()) { @@ -4305,6 +4532,11 @@ Node* EffectControlLinearizer::ChangeSmiToInt32(Node* value) { return ChangeSmiToIntPtr(value); } +Node* EffectControlLinearizer::ChangeCompressedSmiToInt32(Node* value) { + CHECK(machine()->Is64() && SmiValuesAre31Bits()); + return __ Word32Sar(value, SmiShiftBitsConstant()); +} + Node* EffectControlLinearizer::ChangeSmiToInt64(Node* value) { CHECK(machine()->Is64()); return ChangeSmiToIntPtr(value); @@ -5163,6 +5395,30 @@ void EffectControlLinearizer::LowerRuntimeAbort(Node* node) { __ Int32Constant(1), __ NoContextConstant()); } +Node* EffectControlLinearizer::LowerAssertType(Node* node) { + DCHECK_EQ(node->opcode(), IrOpcode::kAssertType); + Type type = OpParameter(node->op()); + DCHECK(type.IsRange()); + auto range = type.AsRange(); + + Node* const input = node->InputAt(0); + Node* const min = __ NumberConstant(range->Min()); + Node* const max = __ NumberConstant(range->Max()); + + { + Callable const callable = + Builtins::CallableFor(isolate(), Builtins::kCheckNumberInRange); + Operator::Properties const properties = node->op()->properties(); + CallDescriptor::Flags const flags = CallDescriptor::kNoFlags; + auto call_descriptor = Linkage::GetStubCallDescriptor( + graph()->zone(), callable.descriptor(), + callable.descriptor().GetStackParameterCount(), flags, properties); + __ Call(call_descriptor, __ HeapConstant(callable.code()), input, min, max, + __ NoContextConstant()); + return input; + } +} + Node* EffectControlLinearizer::LowerConvertReceiver(Node* node) { ConvertReceiverMode const mode = ConvertReceiverModeOf(node->op()); Node* value = node->InputAt(0); @@ -5187,7 +5443,7 @@ Node* EffectControlLinearizer::LowerConvertReceiver(Node* node) { __ GotoIf(check, &convert_to_object); __ Goto(&done_convert, value); - // Wrap the primitive {value} into a JSValue. + // Wrap the primitive {value} into a JSPrimitiveWrapper. __ Bind(&convert_to_object); Operator::Properties properties = Operator::kEliminatable; Callable callable = Builtins::CallableFor(isolate(), Builtins::kToObject); @@ -5220,7 +5476,7 @@ Node* EffectControlLinearizer::LowerConvertReceiver(Node* node) { __ GotoIf(check, &convert_to_object); __ Goto(&done_convert, value); - // Wrap the primitive {value} into a JSValue. + // Wrap the primitive {value} into a JSPrimitiveWrapper. __ Bind(&convert_to_object); __ GotoIf(__ WordEqual(value, __ UndefinedConstant()), &convert_global_proxy); diff --git a/deps/v8/src/compiler/escape-analysis.cc b/deps/v8/src/compiler/escape-analysis.cc index dc0db4d780eb91..aee0121384ac61 100644 --- a/deps/v8/src/compiler/escape-analysis.cc +++ b/deps/v8/src/compiler/escape-analysis.cc @@ -4,6 +4,7 @@ #include "src/compiler/escape-analysis.h" +#include "src/codegen/tick-counter.h" #include "src/compiler/linkage.h" #include "src/compiler/node-matchers.h" #include "src/compiler/operator-properties.h" @@ -153,6 +154,7 @@ class VariableTracker { ZoneVector buffer_; EffectGraphReducer* reducer_; int next_variable_ = 0; + TickCounter* const tick_counter_; DISALLOW_COPY_AND_ASSIGN(VariableTracker); }; @@ -279,12 +281,14 @@ class EscapeAnalysisTracker : public ZoneObject { }; EffectGraphReducer::EffectGraphReducer( - Graph* graph, std::function reduce, Zone* zone) + Graph* graph, std::function reduce, + TickCounter* tick_counter, Zone* zone) : graph_(graph), state_(graph, kNumStates), revisit_(zone), stack_(zone), - reduce_(std::move(reduce)) {} + reduce_(std::move(reduce)), + tick_counter_(tick_counter) {} void EffectGraphReducer::ReduceFrom(Node* node) { // Perform DFS and eagerly trigger revisitation as soon as possible. @@ -293,6 +297,7 @@ void EffectGraphReducer::ReduceFrom(Node* node) { DCHECK(stack_.empty()); stack_.push({node, 0}); while (!stack_.empty()) { + tick_counter_->DoTick(); Node* current = stack_.top().node; int& input_index = stack_.top().input_index; if (input_index < current->InputCount()) { @@ -357,7 +362,8 @@ VariableTracker::VariableTracker(JSGraph* graph, EffectGraphReducer* reducer, graph_(graph), table_(zone, State(zone)), buffer_(zone), - reducer_(reducer) {} + reducer_(reducer), + tick_counter_(reducer->tick_counter()) {} VariableTracker::Scope::Scope(VariableTracker* states, Node* node, Reduction* reduction) @@ -406,6 +412,7 @@ VariableTracker::State VariableTracker::MergeInputs(Node* effect_phi) { State first_input = table_.Get(NodeProperties::GetEffectInput(effect_phi, 0)); State result = first_input; for (std::pair var_value : first_input) { + tick_counter_->DoTick(); if (Node* value = var_value.second) { Variable var = var_value.first; TRACE("var %i:\n", var.id_); @@ -441,10 +448,12 @@ VariableTracker::State VariableTracker::MergeInputs(Node* effect_phi) { // [old_value] cannot originate from the inputs. Thus [old_value] // must have been created by a previous reduction of this [effect_phi]. for (int i = 0; i < arity; ++i) { - NodeProperties::ReplaceValueInput( - old_value, buffer_[i] ? buffer_[i] : graph_->Dead(), i); - // This change cannot affect the rest of the reducer, so there is no - // need to trigger additional revisitations. + Node* old_input = NodeProperties::GetValueInput(old_value, i); + Node* new_input = buffer_[i] ? buffer_[i] : graph_->Dead(); + if (old_input != new_input) { + NodeProperties::ReplaceValueInput(old_value, new_input, i); + reducer_->Revisit(old_value); + } } result.Set(var, old_value); } else { @@ -701,21 +710,19 @@ void ReduceNode(const Operator* op, EscapeAnalysisTracker::Scope* current, } else if (right_object && !right_object->HasEscaped()) { replacement = jsgraph->FalseConstant(); } - if (replacement) { - // TODO(tebbi) This is a workaround for uninhabited types. If we - // replaced a value of uninhabited type with a constant, we would - // widen the type of the node. This could produce inconsistent - // types (which might confuse representation selection). We get - // around this by refusing to constant-fold and escape-analyze - // if the type is not inhabited. - if (!NodeProperties::GetType(left).IsNone() && - !NodeProperties::GetType(right).IsNone()) { - current->SetReplacement(replacement); - } else { - current->SetEscaped(left); - current->SetEscaped(right); - } + // TODO(tebbi) This is a workaround for uninhabited types. If we + // replaced a value of uninhabited type with a constant, we would + // widen the type of the node. This could produce inconsistent + // types (which might confuse representation selection). We get + // around this by refusing to constant-fold and escape-analyze + // if the type is not inhabited. + if (replacement && !NodeProperties::GetType(left).IsNone() && + !NodeProperties::GetType(right).IsNone()) { + current->SetReplacement(replacement); + break; } + current->SetEscaped(left); + current->SetEscaped(right); break; } case IrOpcode::kCheckMaps: { @@ -817,11 +824,12 @@ void EscapeAnalysis::Reduce(Node* node, Reduction* reduction) { ReduceNode(op, ¤t, jsgraph()); } -EscapeAnalysis::EscapeAnalysis(JSGraph* jsgraph, Zone* zone) +EscapeAnalysis::EscapeAnalysis(JSGraph* jsgraph, TickCounter* tick_counter, + Zone* zone) : EffectGraphReducer( jsgraph->graph(), [this](Node* node, Reduction* reduction) { Reduce(node, reduction); }, - zone), + tick_counter, zone), tracker_(new (zone) EscapeAnalysisTracker(jsgraph, this, zone)), jsgraph_(jsgraph) {} diff --git a/deps/v8/src/compiler/escape-analysis.h b/deps/v8/src/compiler/escape-analysis.h index c3dcd2f74d6e72..0fbc7d0bdd2e99 100644 --- a/deps/v8/src/compiler/escape-analysis.h +++ b/deps/v8/src/compiler/escape-analysis.h @@ -14,6 +14,9 @@ namespace v8 { namespace internal { + +class TickCounter; + namespace compiler { class CommonOperatorBuilder; @@ -38,7 +41,8 @@ class EffectGraphReducer { }; EffectGraphReducer(Graph* graph, - std::function reduce, Zone* zone); + std::function reduce, + TickCounter* tick_counter, Zone* zone); void ReduceGraph() { ReduceFrom(graph_->end()); } @@ -56,6 +60,8 @@ class EffectGraphReducer { bool Complete() { return stack_.empty() && revisit_.empty(); } + TickCounter* tick_counter() const { return tick_counter_; } + private: struct NodeState { Node* node; @@ -69,6 +75,7 @@ class EffectGraphReducer { ZoneStack revisit_; ZoneStack stack_; std::function reduce_; + TickCounter* const tick_counter_; }; // A variable is an abstract storage location, which is lowered to SSA values @@ -164,7 +171,7 @@ class EscapeAnalysisResult { class V8_EXPORT_PRIVATE EscapeAnalysis final : public NON_EXPORTED_BASE(EffectGraphReducer) { public: - EscapeAnalysis(JSGraph* jsgraph, Zone* zone); + EscapeAnalysis(JSGraph* jsgraph, TickCounter* tick_counter, Zone* zone); EscapeAnalysisResult analysis_result() { DCHECK(Complete()); diff --git a/deps/v8/src/compiler/graph-assembler.cc b/deps/v8/src/compiler/graph-assembler.cc index cc9dbd9dfdb9da..50f29d968bfdf6 100644 --- a/deps/v8/src/compiler/graph-assembler.cc +++ b/deps/v8/src/compiler/graph-assembler.cc @@ -52,6 +52,9 @@ Node* GraphAssembler::HeapConstant(Handle object) { return jsgraph()->HeapConstant(object); } +Node* GraphAssembler::NumberConstant(double value) { + return jsgraph()->Constant(value); +} Node* GraphAssembler::ExternalConstant(ExternalReference ref) { return jsgraph()->ExternalConstant(ref); @@ -221,6 +224,12 @@ Node* GraphAssembler::BitcastTaggedToWord(Node* value) { current_effect_, current_control_); } +Node* GraphAssembler::BitcastTaggedSignedToWord(Node* value) { + return current_effect_ = + graph()->NewNode(machine()->BitcastTaggedSignedToWord(), value, + current_effect_, current_control_); +} + Node* GraphAssembler::Word32PoisonOnSpeculation(Node* value) { return current_effect_ = graph()->NewNode(machine()->Word32PoisonOnSpeculation(), value, diff --git a/deps/v8/src/compiler/graph-assembler.h b/deps/v8/src/compiler/graph-assembler.h index 74b885b7887f56..e2c0005d15741f 100644 --- a/deps/v8/src/compiler/graph-assembler.h +++ b/deps/v8/src/compiler/graph-assembler.h @@ -57,6 +57,7 @@ namespace compiler { V(Word32Shr) \ V(Word32Shl) \ V(Word32Sar) \ + V(Word64And) \ V(IntAdd) \ V(IntSub) \ V(IntMul) \ @@ -71,6 +72,7 @@ namespace compiler { V(Uint64LessThan) \ V(Uint64LessThanOrEqual) \ V(Int32LessThan) \ + V(Int64Sub) \ V(Float64Add) \ V(Float64Sub) \ V(Float64Div) \ @@ -93,22 +95,24 @@ namespace compiler { V(Uint32Mod) \ V(Uint32Div) -#define JSGRAPH_SINGLETON_CONSTANT_LIST(V) \ - V(TrueConstant) \ - V(FalseConstant) \ - V(NullConstant) \ - V(BigIntMapConstant) \ - V(BooleanMapConstant) \ - V(HeapNumberMapConstant) \ - V(NoContextConstant) \ - V(EmptyStringConstant) \ - V(UndefinedConstant) \ - V(TheHoleConstant) \ - V(FixedArrayMapConstant) \ - V(FixedDoubleArrayMapConstant) \ - V(ToNumberBuiltinConstant) \ - V(AllocateInYoungGenerationStubConstant) \ - V(AllocateInOldGenerationStubConstant) +#define JSGRAPH_SINGLETON_CONSTANT_LIST(V) \ + V(TrueConstant) \ + V(FalseConstant) \ + V(NullConstant) \ + V(BigIntMapConstant) \ + V(BooleanMapConstant) \ + V(HeapNumberMapConstant) \ + V(NoContextConstant) \ + V(EmptyStringConstant) \ + V(UndefinedConstant) \ + V(TheHoleConstant) \ + V(FixedArrayMapConstant) \ + V(FixedDoubleArrayMapConstant) \ + V(ToNumberBuiltinConstant) \ + V(AllocateInYoungGenerationStubConstant) \ + V(AllocateRegularInYoungGenerationStubConstant) \ + V(AllocateInOldGenerationStubConstant) \ + V(AllocateRegularInOldGenerationStubConstant) class GraphAssembler; @@ -196,6 +200,7 @@ class GraphAssembler { Node* Float64Constant(double value); Node* Projection(int index, Node* value); Node* HeapConstant(Handle object); + Node* NumberConstant(double value); Node* CEntryStubConstant(int result_size); Node* ExternalConstant(ExternalReference ref); @@ -225,6 +230,7 @@ class GraphAssembler { Node* ToNumber(Node* value); Node* BitcastWordToTagged(Node* value); Node* BitcastTaggedToWord(Node* value); + Node* BitcastTaggedSignedToWord(Node* value); Node* Allocate(AllocationType allocation, Node* size); Node* LoadField(FieldAccess const&, Node* object); Node* LoadElement(ElementAccess const&, Node* object, Node* index); diff --git a/deps/v8/src/compiler/graph-reducer.cc b/deps/v8/src/compiler/graph-reducer.cc index fafa322d87cb81..9a0dea6b260fe1 100644 --- a/deps/v8/src/compiler/graph-reducer.cc +++ b/deps/v8/src/compiler/graph-reducer.cc @@ -5,10 +5,11 @@ #include #include -#include "src/compiler/graph.h" +#include "src/codegen/tick-counter.h" #include "src/compiler/graph-reducer.h" -#include "src/compiler/node.h" +#include "src/compiler/graph.h" #include "src/compiler/node-properties.h" +#include "src/compiler/node.h" #include "src/compiler/verifier.h" namespace v8 { @@ -25,13 +26,15 @@ enum class GraphReducer::State : uint8_t { void Reducer::Finalize() {} -GraphReducer::GraphReducer(Zone* zone, Graph* graph, Node* dead) +GraphReducer::GraphReducer(Zone* zone, Graph* graph, TickCounter* tick_counter, + Node* dead) : graph_(graph), dead_(dead), state_(graph, 4), reducers_(zone), revisit_(zone), - stack_(zone) { + stack_(zone), + tick_counter_(tick_counter) { if (dead != nullptr) { NodeProperties::SetType(dead_, Type::None()); } @@ -82,6 +85,7 @@ Reduction GraphReducer::Reduce(Node* const node) { auto skip = reducers_.end(); for (auto i = reducers_.begin(); i != reducers_.end();) { if (i != skip) { + tick_counter_->DoTick(); Reduction reduction = (*i)->Reduce(node); if (!reduction.Changed()) { // No change from this reducer. diff --git a/deps/v8/src/compiler/graph-reducer.h b/deps/v8/src/compiler/graph-reducer.h index 3bb20a462514ea..bbcc67b0748cd3 100644 --- a/deps/v8/src/compiler/graph-reducer.h +++ b/deps/v8/src/compiler/graph-reducer.h @@ -12,13 +12,15 @@ namespace v8 { namespace internal { + +class TickCounter; + namespace compiler { // Forward declarations. class Graph; class Node; - // NodeIds are identifying numbers for nodes that can be used to index auxiliary // out-of-line data associated with each node. using NodeId = uint32_t; @@ -129,7 +131,8 @@ class AdvancedReducer : public Reducer { class V8_EXPORT_PRIVATE GraphReducer : public NON_EXPORTED_BASE(AdvancedReducer::Editor) { public: - GraphReducer(Zone* zone, Graph* graph, Node* dead = nullptr); + GraphReducer(Zone* zone, Graph* graph, TickCounter* tick_counter, + Node* dead = nullptr); ~GraphReducer() override; Graph* graph() const { return graph_; } @@ -181,6 +184,7 @@ class V8_EXPORT_PRIVATE GraphReducer ZoneVector reducers_; ZoneQueue revisit_; ZoneStack stack_; + TickCounter* const tick_counter_; DISALLOW_COPY_AND_ASSIGN(GraphReducer); }; diff --git a/deps/v8/src/compiler/heap-refs.h b/deps/v8/src/compiler/heap-refs.h new file mode 100644 index 00000000000000..5547039fa63c5f --- /dev/null +++ b/deps/v8/src/compiler/heap-refs.h @@ -0,0 +1,906 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_HEAP_REFS_H_ +#define V8_COMPILER_HEAP_REFS_H_ + +#include "src/base/optional.h" +#include "src/ic/call-optimization.h" +#include "src/objects/elements-kind.h" +#include "src/objects/feedback-vector.h" +#include "src/objects/instance-type.h" + +namespace v8 { +namespace internal { + +class BytecodeArray; +class CallHandlerInfo; +class FixedDoubleArray; +class FunctionTemplateInfo; +class HeapNumber; +class InternalizedString; +class JSBoundFunction; +class JSDataView; +class JSGlobalProxy; +class JSRegExp; +class JSTypedArray; +class NativeContext; +class ScriptContextTable; +class VectorSlotPair; + +namespace compiler { + +// Whether we are loading a property or storing to a property. +// For a store during literal creation, do not walk up the prototype chain. +enum class AccessMode { kLoad, kStore, kStoreInLiteral, kHas }; + +enum class OddballType : uint8_t { + kNone, // Not an Oddball. + kBoolean, // True or False. + kUndefined, + kNull, + kHole, + kUninitialized, + kOther // Oddball, but none of the above. +}; + +// This list is sorted such that subtypes appear before their supertypes. +// DO NOT VIOLATE THIS PROPERTY! +#define HEAP_BROKER_OBJECT_LIST(V) \ + /* Subtypes of JSObject */ \ + V(JSArray) \ + V(JSBoundFunction) \ + V(JSDataView) \ + V(JSFunction) \ + V(JSGlobalProxy) \ + V(JSRegExp) \ + V(JSTypedArray) \ + /* Subtypes of Context */ \ + V(NativeContext) \ + /* Subtypes of FixedArray */ \ + V(Context) \ + V(ScopeInfo) \ + V(ScriptContextTable) \ + /* Subtypes of FixedArrayBase */ \ + V(BytecodeArray) \ + V(FixedArray) \ + V(FixedDoubleArray) \ + /* Subtypes of Name */ \ + V(InternalizedString) \ + V(String) \ + V(Symbol) \ + /* Subtypes of HeapObject */ \ + V(AllocationSite) \ + V(BigInt) \ + V(CallHandlerInfo) \ + V(Cell) \ + V(Code) \ + V(DescriptorArray) \ + V(FeedbackCell) \ + V(FeedbackVector) \ + V(FixedArrayBase) \ + V(FunctionTemplateInfo) \ + V(HeapNumber) \ + V(JSObject) \ + V(Map) \ + V(MutableHeapNumber) \ + V(Name) \ + V(PropertyCell) \ + V(SharedFunctionInfo) \ + V(SourceTextModule) \ + /* Subtypes of Object */ \ + V(HeapObject) + +class CompilationDependencies; +class JSHeapBroker; +class ObjectData; +class PerIsolateCompilerCache; +class PropertyAccessInfo; +#define FORWARD_DECL(Name) class Name##Ref; +HEAP_BROKER_OBJECT_LIST(FORWARD_DECL) +#undef FORWARD_DECL + +class V8_EXPORT_PRIVATE ObjectRef { + public: + ObjectRef(JSHeapBroker* broker, Handle object); + ObjectRef(JSHeapBroker* broker, ObjectData* data) + : data_(data), broker_(broker) { + CHECK_NOT_NULL(data_); + } + + Handle object() const; + + bool equals(const ObjectRef& other) const; + + bool IsSmi() const; + int AsSmi() const; + +#define HEAP_IS_METHOD_DECL(Name) bool Is##Name() const; + HEAP_BROKER_OBJECT_LIST(HEAP_IS_METHOD_DECL) +#undef HEAP_IS_METHOD_DECL + +#define HEAP_AS_METHOD_DECL(Name) Name##Ref As##Name() const; + HEAP_BROKER_OBJECT_LIST(HEAP_AS_METHOD_DECL) +#undef HEAP_AS_METHOD_DECL + + bool IsNullOrUndefined() const; + + bool BooleanValue() const; + Maybe OddballToNumber() const; + + // Return the element at key {index} if {index} is known to be an own data + // property of the object that is non-writable and non-configurable. + base::Optional GetOwnConstantElement(uint32_t index, + bool serialize = false) const; + + Isolate* isolate() const; + + struct Hash { + size_t operator()(const ObjectRef& ref) const { + return base::hash_combine(ref.object().address()); + } + }; + struct Equal { + bool operator()(const ObjectRef& lhs, const ObjectRef& rhs) const { + return lhs.equals(rhs); + } + }; + + protected: + JSHeapBroker* broker() const; + ObjectData* data() const; + ObjectData* data_; // Should be used only by object() getters. + + private: + friend class FunctionTemplateInfoRef; + friend class JSArrayData; + friend class JSGlobalProxyRef; + friend class JSGlobalProxyData; + friend class JSObjectData; + friend class StringData; + + friend std::ostream& operator<<(std::ostream& os, const ObjectRef& ref); + + JSHeapBroker* broker_; +}; + +// Temporary class that carries information from a Map. We'd like to remove +// this class and use MapRef instead, but we can't as long as we support the +// kDisabled broker mode. That's because obtaining the MapRef via +// HeapObjectRef::map() requires a HandleScope when the broker is disabled. +// During OptimizeGraph we generally don't have a HandleScope, however. There +// are two places where we therefore use GetHeapObjectType() instead. Both that +// function and this class should eventually be removed. +class HeapObjectType { + public: + enum Flag : uint8_t { kUndetectable = 1 << 0, kCallable = 1 << 1 }; + + using Flags = base::Flags; + + HeapObjectType(InstanceType instance_type, Flags flags, + OddballType oddball_type) + : instance_type_(instance_type), + oddball_type_(oddball_type), + flags_(flags) { + DCHECK_EQ(instance_type == ODDBALL_TYPE, + oddball_type != OddballType::kNone); + } + + OddballType oddball_type() const { return oddball_type_; } + InstanceType instance_type() const { return instance_type_; } + Flags flags() const { return flags_; } + + bool is_callable() const { return flags_ & kCallable; } + bool is_undetectable() const { return flags_ & kUndetectable; } + + private: + InstanceType const instance_type_; + OddballType const oddball_type_; + Flags const flags_; +}; + +class HeapObjectRef : public ObjectRef { + public: + using ObjectRef::ObjectRef; + Handle object() const; + + MapRef map() const; + + // See the comment on the HeapObjectType class. + HeapObjectType GetHeapObjectType() const; +}; + +class PropertyCellRef : public HeapObjectRef { + public: + using HeapObjectRef::HeapObjectRef; + Handle object() const; + + PropertyDetails property_details() const; + + void Serialize(); + ObjectRef value() const; +}; + +class JSObjectRef : public HeapObjectRef { + public: + using HeapObjectRef::HeapObjectRef; + Handle object() const; + + uint64_t RawFastDoublePropertyAsBitsAt(FieldIndex index) const; + double RawFastDoublePropertyAt(FieldIndex index) const; + ObjectRef RawFastPropertyAt(FieldIndex index) const; + + // Return the value of the property identified by the field {index} + // if {index} is known to be an own data property of the object. + base::Optional GetOwnProperty(Representation field_representation, + FieldIndex index, + bool serialize = false) const; + + FixedArrayBaseRef elements() const; + void SerializeElements(); + void EnsureElementsTenured(); + ElementsKind GetElementsKind() const; + + void SerializeObjectCreateMap(); + base::Optional GetObjectCreateMap() const; +}; + +class JSDataViewRef : public JSObjectRef { + public: + using JSObjectRef::JSObjectRef; + Handle object() const; + + size_t byte_length() const; + size_t byte_offset() const; +}; + +class JSBoundFunctionRef : public JSObjectRef { + public: + using JSObjectRef::JSObjectRef; + Handle object() const; + + void Serialize(); + + // The following are available only after calling Serialize(). + ObjectRef bound_target_function() const; + ObjectRef bound_this() const; + FixedArrayRef bound_arguments() const; +}; + +class V8_EXPORT_PRIVATE JSFunctionRef : public JSObjectRef { + public: + using JSObjectRef::JSObjectRef; + Handle object() const; + + bool has_feedback_vector() const; + bool has_initial_map() const; + bool has_prototype() const; + bool PrototypeRequiresRuntimeLookup() const; + + void Serialize(); + bool serialized() const; + + // The following are available only after calling Serialize(). + ObjectRef prototype() const; + MapRef initial_map() const; + ContextRef context() const; + NativeContextRef native_context() const; + SharedFunctionInfoRef shared() const; + FeedbackVectorRef feedback_vector() const; + int InitialMapInstanceSizeWithMinSlack() const; + + bool IsSerializedForCompilation() const; +}; + +class JSRegExpRef : public JSObjectRef { + public: + using JSObjectRef::JSObjectRef; + Handle object() const; + + ObjectRef raw_properties_or_hash() const; + ObjectRef data() const; + ObjectRef source() const; + ObjectRef flags() const; + ObjectRef last_index() const; +}; + +class HeapNumberRef : public HeapObjectRef { + public: + using HeapObjectRef::HeapObjectRef; + Handle object() const; + + double value() const; +}; + +class MutableHeapNumberRef : public HeapObjectRef { + public: + using HeapObjectRef::HeapObjectRef; + Handle object() const; + + double value() const; +}; + +class ContextRef : public HeapObjectRef { + public: + using HeapObjectRef::HeapObjectRef; + Handle object() const; + + // {previous} decrements {depth} by 1 for each previous link successfully + // followed. If {depth} != 0 on function return, then it only got + // partway to the desired depth. If {serialize} is true, then + // {previous} will cache its findings. + ContextRef previous(size_t* depth, bool serialize = false) const; + + // Only returns a value if the index is valid for this ContextRef. + base::Optional get(int index, bool serialize = false) const; + + // We only serialize the ScopeInfo if certain Promise + // builtins are called. + void SerializeScopeInfo(); + base::Optional scope_info() const; +}; + +#define BROKER_COMPULSORY_NATIVE_CONTEXT_FIELDS(V) \ + V(JSFunction, array_function) \ + V(JSFunction, boolean_function) \ + V(JSFunction, bigint_function) \ + V(JSFunction, number_function) \ + V(JSFunction, object_function) \ + V(JSFunction, promise_function) \ + V(JSFunction, promise_then) \ + V(JSFunction, string_function) \ + V(JSFunction, symbol_function) \ + V(JSGlobalProxy, global_proxy_object) \ + V(JSObject, promise_prototype) \ + V(Map, bound_function_with_constructor_map) \ + V(Map, bound_function_without_constructor_map) \ + V(Map, fast_aliased_arguments_map) \ + V(Map, initial_array_iterator_map) \ + V(Map, initial_string_iterator_map) \ + V(Map, iterator_result_map) \ + V(Map, js_array_holey_double_elements_map) \ + V(Map, js_array_holey_elements_map) \ + V(Map, js_array_holey_smi_elements_map) \ + V(Map, js_array_packed_double_elements_map) \ + V(Map, js_array_packed_elements_map) \ + V(Map, js_array_packed_smi_elements_map) \ + V(Map, sloppy_arguments_map) \ + V(Map, slow_object_with_null_prototype_map) \ + V(Map, strict_arguments_map) \ + V(ScriptContextTable, script_context_table) \ + V(SharedFunctionInfo, promise_capability_default_reject_shared_fun) \ + V(SharedFunctionInfo, promise_catch_finally_shared_fun) \ + V(SharedFunctionInfo, promise_then_finally_shared_fun) \ + V(SharedFunctionInfo, promise_capability_default_resolve_shared_fun) + +// Those are set by Bootstrapper::ExportFromRuntime, which may not yet have +// happened when Turbofan is invoked via --always-opt. +#define BROKER_OPTIONAL_NATIVE_CONTEXT_FIELDS(V) \ + V(Map, async_function_object_map) \ + V(Map, map_key_iterator_map) \ + V(Map, map_key_value_iterator_map) \ + V(Map, map_value_iterator_map) \ + V(JSFunction, regexp_exec_function) \ + V(Map, set_key_value_iterator_map) \ + V(Map, set_value_iterator_map) + +#define BROKER_NATIVE_CONTEXT_FIELDS(V) \ + BROKER_COMPULSORY_NATIVE_CONTEXT_FIELDS(V) \ + BROKER_OPTIONAL_NATIVE_CONTEXT_FIELDS(V) + +class NativeContextRef : public ContextRef { + public: + using ContextRef::ContextRef; + Handle object() const; + + void Serialize(); + +#define DECL_ACCESSOR(type, name) type##Ref name() const; + BROKER_NATIVE_CONTEXT_FIELDS(DECL_ACCESSOR) +#undef DECL_ACCESSOR + + ScopeInfoRef scope_info() const; + MapRef GetFunctionMapFromIndex(int index) const; + MapRef GetInitialJSArrayMap(ElementsKind kind) const; + base::Optional GetConstructorFunction(const MapRef& map) const; +}; + +class NameRef : public HeapObjectRef { + public: + using HeapObjectRef::HeapObjectRef; + Handle object() const; + + bool IsUniqueName() const; +}; + +class ScriptContextTableRef : public HeapObjectRef { + public: + using HeapObjectRef::HeapObjectRef; + Handle object() const; + + struct LookupResult { + ContextRef context; + bool immutable; + int index; + }; + + base::Optional lookup(const NameRef& name) const; +}; + +class DescriptorArrayRef : public HeapObjectRef { + public: + using HeapObjectRef::HeapObjectRef; + Handle object() const; +}; + +class FeedbackCellRef : public HeapObjectRef { + public: + using HeapObjectRef::HeapObjectRef; + Handle object() const; + + HeapObjectRef value() const; +}; + +class FeedbackVectorRef : public HeapObjectRef { + public: + using HeapObjectRef::HeapObjectRef; + Handle object() const; + + ObjectRef get(FeedbackSlot slot) const; + + void SerializeSlots(); +}; + +class CallHandlerInfoRef : public HeapObjectRef { + public: + using HeapObjectRef::HeapObjectRef; + Handle object() const; + + Address callback() const; + + void Serialize(); + ObjectRef data() const; +}; + +class AllocationSiteRef : public HeapObjectRef { + public: + using HeapObjectRef::HeapObjectRef; + Handle object() const; + + bool PointsToLiteral() const; + AllocationType GetAllocationType() const; + ObjectRef nested_site() const; + + // {IsFastLiteral} determines whether the given array or object literal + // boilerplate satisfies all limits to be considered for fast deep-copying + // and computes the total size of all objects that are part of the graph. + // + // If PointsToLiteral() is false, then IsFastLiteral() is also false. + bool IsFastLiteral() const; + // We only serialize boilerplate if IsFastLiteral is true. + base::Optional boilerplate() const; + + ElementsKind GetElementsKind() const; + bool CanInlineCall() const; +}; + +class BigIntRef : public HeapObjectRef { + public: + using HeapObjectRef::HeapObjectRef; + Handle object() const; + + uint64_t AsUint64() const; +}; + +class V8_EXPORT_PRIVATE MapRef : public HeapObjectRef { + public: + using HeapObjectRef::HeapObjectRef; + Handle object() const; + + int instance_size() const; + InstanceType instance_type() const; + int GetInObjectProperties() const; + int GetInObjectPropertiesStartInWords() const; + int NumberOfOwnDescriptors() const; + int GetInObjectPropertyOffset(int index) const; + int constructor_function_index() const; + int NextFreePropertyIndex() const; + int UnusedPropertyFields() const; + ElementsKind elements_kind() const; + bool is_stable() const; + bool is_extensible() const; + bool is_constructor() const; + bool has_prototype_slot() const; + bool is_access_check_needed() const; + bool is_deprecated() const; + bool CanBeDeprecated() const; + bool CanTransition() const; + bool IsInobjectSlackTrackingInProgress() const; + bool is_dictionary_map() const; + bool IsFixedCowArrayMap() const; + bool IsPrimitiveMap() const; + bool is_undetectable() const; + bool is_callable() const; + bool has_indexed_interceptor() const; + bool is_migration_target() const; + bool supports_fast_array_iteration() const; + bool supports_fast_array_resize() const; + bool IsMapOfCurrentGlobalProxy() const; + + OddballType oddball_type() const; + +#define DEF_TESTER(Type, ...) bool Is##Type##Map() const; + INSTANCE_TYPE_CHECKERS(DEF_TESTER) +#undef DEF_TESTER + + void SerializeBackPointer(); + HeapObjectRef GetBackPointer() const; + + void SerializePrototype(); + bool serialized_prototype() const; + HeapObjectRef prototype() const; + + void SerializeForElementLoad(); + + void SerializeForElementStore(); + bool HasOnlyStablePrototypesWithFastElements( + ZoneVector* prototype_maps); + + // Concerning the underlying instance_descriptors: + void SerializeOwnDescriptors(); + void SerializeOwnDescriptor(int descriptor_index); + MapRef FindFieldOwner(int descriptor_index) const; + PropertyDetails GetPropertyDetails(int descriptor_index) const; + NameRef GetPropertyKey(int descriptor_index) const; + FieldIndex GetFieldIndexFor(int descriptor_index) const; + ObjectRef GetFieldType(int descriptor_index) const; + bool IsUnboxedDoubleField(int descriptor_index) const; + + // Available after calling JSFunctionRef::Serialize on a function that has + // this map as initial map. + ObjectRef GetConstructor() const; + base::Optional AsElementsKind(ElementsKind kind) const; +}; + +struct HolderLookupResult { + HolderLookupResult(CallOptimization::HolderLookup lookup_ = + CallOptimization::kHolderNotFound, + base::Optional holder_ = base::nullopt) + : lookup(lookup_), holder(holder_) {} + CallOptimization::HolderLookup lookup; + base::Optional holder; +}; + +class FunctionTemplateInfoRef : public HeapObjectRef { + public: + using HeapObjectRef::HeapObjectRef; + Handle object() const; + + bool is_signature_undefined() const; + bool accept_any_receiver() const; + // The following returns true if the CallHandlerInfo is present. + bool has_call_code() const; + + void SerializeCallCode(); + base::Optional call_code() const; + + HolderLookupResult LookupHolderOfExpectedType(MapRef receiver_map, + bool serialize); +}; + +class FixedArrayBaseRef : public HeapObjectRef { + public: + using HeapObjectRef::HeapObjectRef; + Handle object() const; + + int length() const; +}; + +class FixedArrayRef : public FixedArrayBaseRef { + public: + using FixedArrayBaseRef::FixedArrayBaseRef; + Handle object() const; + + ObjectRef get(int i) const; +}; + +class FixedDoubleArrayRef : public FixedArrayBaseRef { + public: + using FixedArrayBaseRef::FixedArrayBaseRef; + Handle object() const; + + double get_scalar(int i) const; + bool is_the_hole(int i) const; +}; + +class BytecodeArrayRef : public FixedArrayBaseRef { + public: + using FixedArrayBaseRef::FixedArrayBaseRef; + Handle object() const; + + int register_count() const; + int parameter_count() const; + interpreter::Register incoming_new_target_or_generator_register() const; + + // Bytecode access methods. + uint8_t get(int index) const; + Address GetFirstBytecodeAddress() const; + + // Source position table. + const byte* source_positions_address() const; + int source_positions_size() const; + + // Constant pool access. + Handle GetConstantAtIndex(int index) const; + bool IsConstantAtIndexSmi(int index) const; + Smi GetConstantAtIndexAsSmi(int index) const; + + // Exception handler table. + Address handler_table_address() const; + int handler_table_size() const; + + bool IsSerializedForCompilation() const; + void SerializeForCompilation(); +}; + +class JSArrayRef : public JSObjectRef { + public: + using JSObjectRef::JSObjectRef; + Handle object() const; + + ObjectRef length() const; + + // Return the element at key {index} if the array has a copy-on-write elements + // storage and {index} is known to be an own data property. + base::Optional GetOwnCowElement(uint32_t index, + bool serialize = false) const; +}; + +class ScopeInfoRef : public HeapObjectRef { + public: + using HeapObjectRef::HeapObjectRef; + Handle object() const; + + int ContextLength() const; +}; + +#define BROKER_SFI_FIELDS(V) \ + V(int, internal_formal_parameter_count) \ + V(bool, has_duplicate_parameters) \ + V(int, function_map_index) \ + V(FunctionKind, kind) \ + V(LanguageMode, language_mode) \ + V(bool, native) \ + V(bool, HasBreakInfo) \ + V(bool, HasBuiltinId) \ + V(bool, construct_as_builtin) \ + V(bool, HasBytecodeArray) \ + V(bool, is_safe_to_skip_arguments_adaptor) \ + V(bool, IsInlineable) \ + V(int, StartPosition) \ + V(bool, is_compiled) + +class V8_EXPORT_PRIVATE SharedFunctionInfoRef : public HeapObjectRef { + public: + using HeapObjectRef::HeapObjectRef; + Handle object() const; + + int builtin_id() const; + BytecodeArrayRef GetBytecodeArray() const; + +#define DECL_ACCESSOR(type, name) type name() const; + BROKER_SFI_FIELDS(DECL_ACCESSOR) +#undef DECL_ACCESSOR + + bool IsSerializedForCompilation(FeedbackVectorRef feedback) const; + void SetSerializedForCompilation(FeedbackVectorRef feedback); + + // Template objects may not be created at compilation time. This method + // wraps the retrieval of the template object and creates it if + // necessary. + JSArrayRef GetTemplateObject(ObjectRef description, FeedbackVectorRef vector, + FeedbackSlot slot, bool serialize = false); + + void SerializeFunctionTemplateInfo(); + base::Optional function_template_info() const; +}; + +class StringRef : public NameRef { + public: + using NameRef::NameRef; + Handle object() const; + + int length() const; + uint16_t GetFirstChar(); + base::Optional ToNumber(); + bool IsSeqString() const; + bool IsExternalString() const; +}; + +class SymbolRef : public NameRef { + public: + using NameRef::NameRef; + Handle object() const; +}; + +class JSTypedArrayRef : public JSObjectRef { + public: + using JSObjectRef::JSObjectRef; + Handle object() const; + + bool is_on_heap() const; + size_t length() const; + void* external_pointer() const; + + void Serialize(); + bool serialized() const; + + HeapObjectRef buffer() const; +}; + +class SourceTextModuleRef : public HeapObjectRef { + public: + using HeapObjectRef::HeapObjectRef; + Handle object() const; + + void Serialize(); + + CellRef GetCell(int cell_index) const; +}; + +class CellRef : public HeapObjectRef { + public: + using HeapObjectRef::HeapObjectRef; + Handle object() const; + + ObjectRef value() const; +}; + +class JSGlobalProxyRef : public JSObjectRef { + public: + using JSObjectRef::JSObjectRef; + Handle object() const; + + // If {serialize} is false: + // If the property is known to exist as a property cell (on the global + // object), return that property cell. Otherwise (not known to exist as a + // property cell or known not to exist as a property cell) return nothing. + // If {serialize} is true: + // Like above but potentially access the heap and serialize the necessary + // information. + base::Optional GetPropertyCell(NameRef const& name, + bool serialize = false) const; +}; + +class CodeRef : public HeapObjectRef { + public: + using HeapObjectRef::HeapObjectRef; + Handle object() const; +}; + +class InternalizedStringRef : public StringRef { + public: + using StringRef::StringRef; + Handle object() const; +}; + +class ElementAccessFeedback; +class NamedAccessFeedback; + +class ProcessedFeedback : public ZoneObject { + public: + enum Kind { kInsufficient, kGlobalAccess, kNamedAccess, kElementAccess }; + Kind kind() const { return kind_; } + + ElementAccessFeedback const* AsElementAccess() const; + NamedAccessFeedback const* AsNamedAccess() const; + + protected: + explicit ProcessedFeedback(Kind kind) : kind_(kind) {} + + private: + Kind const kind_; +}; + +class InsufficientFeedback final : public ProcessedFeedback { + public: + InsufficientFeedback(); +}; + +class GlobalAccessFeedback : public ProcessedFeedback { + public: + explicit GlobalAccessFeedback(PropertyCellRef cell); + GlobalAccessFeedback(ContextRef script_context, int slot_index, + bool immutable); + + bool IsPropertyCell() const; + PropertyCellRef property_cell() const; + + bool IsScriptContextSlot() const { return !IsPropertyCell(); } + ContextRef script_context() const; + int slot_index() const; + bool immutable() const; + + base::Optional GetConstantHint() const; + + private: + ObjectRef const cell_or_context_; + int const index_and_immutable_; +}; + +class KeyedAccessMode { + public: + static KeyedAccessMode FromNexus(FeedbackNexus const& nexus); + + AccessMode access_mode() const; + bool IsLoad() const; + bool IsStore() const; + KeyedAccessLoadMode load_mode() const; + KeyedAccessStoreMode store_mode() const; + + private: + AccessMode const access_mode_; + union LoadStoreMode { + LoadStoreMode(KeyedAccessLoadMode load_mode); + LoadStoreMode(KeyedAccessStoreMode store_mode); + KeyedAccessLoadMode load_mode; + KeyedAccessStoreMode store_mode; + } const load_store_mode_; + + KeyedAccessMode(AccessMode access_mode, KeyedAccessLoadMode load_mode); + KeyedAccessMode(AccessMode access_mode, KeyedAccessStoreMode store_mode); +}; + +class ElementAccessFeedback : public ProcessedFeedback { + public: + ElementAccessFeedback(Zone* zone, KeyedAccessMode const& keyed_mode); + + // No transition sources appear in {receiver_maps}. + // All transition targets appear in {receiver_maps}. + ZoneVector> receiver_maps; + ZoneVector, Handle>> transitions; + + KeyedAccessMode const keyed_mode; + + class MapIterator { + public: + bool done() const; + void advance(); + MapRef current() const; + + private: + friend class ElementAccessFeedback; + + explicit MapIterator(ElementAccessFeedback const& processed, + JSHeapBroker* broker); + + ElementAccessFeedback const& processed_; + JSHeapBroker* const broker_; + size_t index_ = 0; + }; + + // Iterator over all maps: first {receiver_maps}, then transition sources. + MapIterator all_maps(JSHeapBroker* broker) const; +}; + +class NamedAccessFeedback : public ProcessedFeedback { + public: + NamedAccessFeedback(NameRef const& name, + ZoneVector const& access_infos); + + NameRef const& name() const { return name_; } + ZoneVector const& access_infos() const { + return access_infos_; + } + + private: + NameRef const name_; + ZoneVector const access_infos_; +}; + +} // namespace compiler +} // namespace internal +} // namespace v8 + +#endif // V8_COMPILER_HEAP_REFS_H_ diff --git a/deps/v8/src/compiler/int64-lowering.cc b/deps/v8/src/compiler/int64-lowering.cc index 3430b6b3393bf8..eda866e5f2ad04 100644 --- a/deps/v8/src/compiler/int64-lowering.cc +++ b/deps/v8/src/compiler/int64-lowering.cc @@ -97,7 +97,10 @@ int GetReturnCountAfterLowering(CallDescriptor* call_descriptor) { int GetParameterIndexAfterLowering( Signature* signature, int old_index) { int result = old_index; - for (int i = 0; i < old_index; i++) { + // Be robust towards special indexes (>= param count). + int max_to_check = + std::min(old_index, static_cast(signature->parameter_count())); + for (int i = 0; i < max_to_check; i++) { if (signature->GetParam(i) == MachineRepresentation::kWord64) { result++; } @@ -142,16 +145,16 @@ int Int64Lowering::GetParameterCountAfterLowering( signature, static_cast(signature->parameter_count())); } -void Int64Lowering::GetIndexNodes(Node* index, Node*& index_low, - Node*& index_high) { +void Int64Lowering::GetIndexNodes(Node* index, Node** index_low, + Node** index_high) { #if defined(V8_TARGET_LITTLE_ENDIAN) - index_low = index; - index_high = graph()->NewNode(machine()->Int32Add(), index, - graph()->NewNode(common()->Int32Constant(4))); + *index_low = index; + *index_high = graph()->NewNode(machine()->Int32Add(), index, + graph()->NewNode(common()->Int32Constant(4))); #elif defined(V8_TARGET_BIG_ENDIAN) - index_low = graph()->NewNode(machine()->Int32Add(), index, - graph()->NewNode(common()->Int32Constant(4))); - index_high = index; + *index_low = graph()->NewNode(machine()->Int32Add(), index, + graph()->NewNode(common()->Int32Constant(4))); + *index_high = index; #endif } @@ -182,7 +185,7 @@ void Int64Lowering::LowerNode(Node* node) { Node* index = node->InputAt(1); Node* index_low; Node* index_high; - GetIndexNodes(index, index_low, index_high); + GetIndexNodes(index, &index_low, &index_high); const Operator* load_op; if (node->opcode() == IrOpcode::kLoad) { @@ -232,7 +235,7 @@ void Int64Lowering::LowerNode(Node* node) { Node* index = node->InputAt(1); Node* index_low; Node* index_high; - GetIndexNodes(index, index_low, index_high); + GetIndexNodes(index, &index_low, &index_high); Node* value = node->InputAt(2); DCHECK(HasReplacementLow(value)); DCHECK(HasReplacementHigh(value)); @@ -291,12 +294,6 @@ void Int64Lowering::LowerNode(Node* node) { // changes. if (GetParameterCountAfterLowering(signature()) != param_count) { int old_index = ParameterIndexOf(node->op()); - // Prevent special lowering of wasm's instance or JS - // context/closure parameters. - if (old_index <= 0 || old_index > param_count) { - DefaultLowering(node); - break; - } // Adjust old_index to be compliant with the signature. --old_index; int new_index = GetParameterIndexAfterLowering(signature(), old_index); @@ -304,6 +301,12 @@ void Int64Lowering::LowerNode(Node* node) { ++new_index; NodeProperties::ChangeOp(node, common()->Parameter(new_index)); + if (old_index < 0 || old_index >= param_count) { + // Special parameters (JS closure/context) don't have kWord64 + // representation anyway. + break; + } + if (signature()->GetParam(old_index) == MachineRepresentation::kWord64) { Node* high_node = graph()->NewNode(common()->Parameter(new_index + 1), diff --git a/deps/v8/src/compiler/int64-lowering.h b/deps/v8/src/compiler/int64-lowering.h index b0838057719d64..9c77cf41a33137 100644 --- a/deps/v8/src/compiler/int64-lowering.h +++ b/deps/v8/src/compiler/int64-lowering.h @@ -59,7 +59,7 @@ class V8_EXPORT_PRIVATE Int64Lowering { bool HasReplacementHigh(Node* node); Node* GetReplacementHigh(Node* node); void PreparePhiReplacement(Node* phi); - void GetIndexNodes(Node* index, Node*& index_low, Node*& index_high); + void GetIndexNodes(Node* index, Node** index_low, Node** index_high); void ReplaceNodeWithProjections(Node* node); void LowerMemoryBaseAndIndex(Node* node); diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc index d58331c85e3fb5..8128f899497192 100644 --- a/deps/v8/src/compiler/js-call-reducer.cc +++ b/deps/v8/src/compiler/js-call-reducer.cc @@ -179,6 +179,100 @@ Reduction JSCallReducer::ReduceMathMinMax(Node* node, const Operator* op, return Replace(value); } +// ES section #sec-math.hypot Math.hypot ( value1, value2, ...values ) +Reduction JSCallReducer::ReduceMathHypot(Node* node) { + CallParameters const& p = CallParametersOf(node->op()); + if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) { + return NoChange(); + } + if (node->op()->ValueInputCount() < 3) { + Node* value = jsgraph()->ZeroConstant(); + ReplaceWithValue(node, value); + return Replace(value); + } + Node* effect = NodeProperties::GetEffectInput(node); + Node* control = NodeProperties::GetControlInput(node); + NodeVector values(graph()->zone()); + + Node* max = effect = + graph()->NewNode(simplified()->SpeculativeToNumber( + NumberOperationHint::kNumberOrOddball, p.feedback()), + NodeProperties::GetValueInput(node, 2), effect, control); + max = graph()->NewNode(simplified()->NumberAbs(), max); + values.push_back(max); + for (int i = 3; i < node->op()->ValueInputCount(); ++i) { + Node* input = effect = graph()->NewNode( + simplified()->SpeculativeToNumber(NumberOperationHint::kNumberOrOddball, + p.feedback()), + NodeProperties::GetValueInput(node, i), effect, control); + input = graph()->NewNode(simplified()->NumberAbs(), input); + values.push_back(input); + + // Make sure {max} is NaN in the end in case any argument was NaN. + max = graph()->NewNode( + common()->Select(MachineRepresentation::kTagged), + graph()->NewNode(simplified()->NumberLessThanOrEqual(), input, max), + max, input); + } + + Node* check0 = graph()->NewNode(simplified()->NumberEqual(), max, + jsgraph()->ZeroConstant()); + Node* branch0 = + graph()->NewNode(common()->Branch(BranchHint::kFalse), check0, control); + + Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0); + Node* vtrue0 = jsgraph()->ZeroConstant(); + + Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0); + Node* vfalse0; + { + Node* check1 = graph()->NewNode(simplified()->NumberEqual(), max, + jsgraph()->Constant(V8_INFINITY)); + Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse), + check1, if_false0); + + Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1); + Node* vtrue1 = jsgraph()->Constant(V8_INFINITY); + + Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1); + Node* vfalse1; + { + // Kahan summation to avoid rounding errors. + // Normalize the numbers to the largest one to avoid overflow. + Node* sum = jsgraph()->ZeroConstant(); + Node* compensation = jsgraph()->ZeroConstant(); + for (Node* value : values) { + Node* n = graph()->NewNode(simplified()->NumberDivide(), value, max); + Node* summand = graph()->NewNode( + simplified()->NumberSubtract(), + graph()->NewNode(simplified()->NumberMultiply(), n, n), + compensation); + Node* preliminary = + graph()->NewNode(simplified()->NumberAdd(), sum, summand); + compensation = graph()->NewNode( + simplified()->NumberSubtract(), + graph()->NewNode(simplified()->NumberSubtract(), preliminary, sum), + summand); + sum = preliminary; + } + vfalse1 = graph()->NewNode( + simplified()->NumberMultiply(), + graph()->NewNode(simplified()->NumberSqrt(), sum), max); + } + + if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1); + vfalse0 = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2), + vtrue1, vfalse1, if_false0); + } + + control = graph()->NewNode(common()->Merge(2), if_true0, if_false0); + Node* value = + graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2), vtrue0, + vfalse0, control); + ReplaceWithValue(node, value, effect, control); + return Replace(value); +} + Reduction JSCallReducer::Reduce(Node* node) { switch (node->opcode()) { case IrOpcode::kJSConstruct: @@ -274,6 +368,8 @@ Reduction JSCallReducer::ReduceObjectConstructor(Node* node) { // ES6 section 19.2.3.1 Function.prototype.apply ( thisArg, argArray ) Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) { + DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining); + DCHECK_EQ(IrOpcode::kJSCall, node->opcode()); CallParameters const& p = CallParametersOf(node->op()); size_t arity = p.arity(); @@ -381,9 +477,17 @@ Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) { } } // Change {node} to the new {JSCall} operator. + // TODO(mslekova): Since this introduces a Call that will get optimized by + // the JSCallReducer, we basically might have to do all the serialization + // that we do for that here as well. The only difference is that here we + // disable speculation (cf. the empty VectorSlotPair above), causing the + // JSCallReducer to do much less work. We should revisit this later. NodeProperties::ChangeOp( node, javascript()->Call(arity, p.frequency(), VectorSlotPair(), convert_mode)); + // TODO(mslekova): Remove once ReduceJSCall is brokerized. + AllowHandleDereference allow_handle_dereference; + AllowHandleAllocation allow_handle_allocation; // Try to further reduce the JSCall {node}. Reduction const reduction = ReduceJSCall(node); return reduction.Changed() ? reduction : Changed(node); @@ -496,6 +600,8 @@ Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) { // ES6 section 19.2.3.3 Function.prototype.call (thisArg, ...args) Reduction JSCallReducer::ReduceFunctionPrototypeCall(Node* node) { + DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining); + DCHECK_EQ(IrOpcode::kJSCall, node->opcode()); CallParameters const& p = CallParametersOf(node->op()); Node* target = NodeProperties::GetValueInput(node, 0); @@ -508,6 +614,10 @@ Reduction JSCallReducer::ReduceFunctionPrototypeCall(Node* node) { HeapObjectMatcher m(target); if (m.HasValue()) { JSFunctionRef function = m.Ref(broker()).AsJSFunction(); + if (FLAG_concurrent_inlining && !function.serialized()) { + TRACE_BROKER_MISSING(broker(), "Serialize call on function " << function); + return NoChange(); + } context = jsgraph()->Constant(function.context()); } else { context = effect = graph()->NewNode( @@ -537,6 +647,9 @@ Reduction JSCallReducer::ReduceFunctionPrototypeCall(Node* node) { NodeProperties::ChangeOp( node, javascript()->Call(arity, p.frequency(), VectorSlotPair(), convert_mode)); + // TODO(mslekova): Remove once ReduceJSCall is brokerized. + AllowHandleDereference allow_handle_dereference; + AllowHandleAllocation allow_handle_allocation; // Try to further reduce the JSCall {node}. Reduction const reduction = ReduceJSCall(node); return reduction.Changed() ? reduction : Changed(node); @@ -588,7 +701,6 @@ Reduction JSCallReducer::ReduceObjectGetPrototype(Node* node, Node* object) { MapRef object_map(broker(), object_maps[i]); object_map.SerializePrototype(); if (IsSpecialReceiverInstanceType(object_map.instance_type()) || - object_map.has_hidden_prototype() || !object_map.prototype().equals(candidate_prototype)) { // We exclude special receivers, like JSProxy or API objects that // might require access checks here; we also don't want to deal @@ -1002,27 +1114,28 @@ bool CanInlineArrayIteratingBuiltin(JSHeapBroker* broker, return true; } -bool CanInlineArrayResizingBuiltin(JSHeapBroker* broker, - MapHandles const& receiver_maps, - ElementsKind* kind_return, - bool builtin_is_push = false) { +bool CanInlineArrayResizingBuiltin( + JSHeapBroker* broker, MapHandles const& receiver_maps, + std::vector& kinds, // NOLINT(runtime/references) + bool builtin_is_push = false) { DCHECK_NE(0, receiver_maps.size()); - *kind_return = MapRef(broker, receiver_maps[0]).elements_kind(); for (auto receiver_map : receiver_maps) { MapRef map(broker, receiver_map); if (!map.supports_fast_array_resize()) return false; - if (builtin_is_push) { - if (!UnionElementsKindUptoPackedness(kind_return, map.elements_kind())) { - return false; - } - } else { - // TODO(turbofan): We should also handle fast holey double elements once - // we got the hole NaN mess sorted out in TurboFan/V8. - if (map.elements_kind() == HOLEY_DOUBLE_ELEMENTS || - !UnionElementsKindUptoSize(kind_return, map.elements_kind())) { - return false; + // TODO(turbofan): We should also handle fast holey double elements once + // we got the hole NaN mess sorted out in TurboFan/V8. + if (map.elements_kind() == HOLEY_DOUBLE_ELEMENTS && !builtin_is_push) { + return false; + } + ElementsKind current_kind = map.elements_kind(); + auto kind_ptr = kinds.data(); + size_t i; + for (i = 0; i < kinds.size(); i++, kind_ptr++) { + if (UnionElementsKindUptoPackedness(kind_ptr, current_kind)) { + break; } } + if (i == kinds.size()) kinds.push_back(current_kind); } return true; } @@ -2735,6 +2848,8 @@ Reduction JSCallReducer::ReduceArraySome(Node* node, Reduction JSCallReducer::ReduceCallApiFunction( Node* node, const SharedFunctionInfoRef& shared) { + DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining); + DCHECK_EQ(IrOpcode::kJSCall, node->opcode()); CallParameters const& p = CallParametersOf(node->op()); int const argc = static_cast(p.arity()) - 2; @@ -2750,78 +2865,21 @@ Reduction JSCallReducer::ReduceCallApiFunction( Node* context = NodeProperties::GetContextInput(node); Node* frame_state = NodeProperties::GetFrameStateInput(node); - // See if we can optimize this API call to {shared}. - Handle function_template_info( - FunctionTemplateInfo::cast(shared.object()->function_data()), isolate()); - CallOptimization call_optimization(isolate(), function_template_info); - if (!call_optimization.is_simple_api_call()) return NoChange(); - - // Try to infer the {receiver} maps from the graph. - MapInference inference(broker(), receiver, effect); - if (inference.HaveMaps()) { - MapHandles const& receiver_maps = inference.GetMaps(); - - // Check that all {receiver_maps} are actually JSReceiver maps and - // that the {function_template_info} accepts them without access - // checks (even if "access check needed" is set for {receiver}). - // - // Note that we don't need to know the concrete {receiver} maps here, - // meaning it's fine if the {receiver_maps} are unreliable, and we also - // don't need to install any stability dependencies, since the only - // relevant information regarding the {receiver} is the Map::constructor - // field on the root map (which is different from the JavaScript exposed - // "constructor" property) and that field cannot change. - // - // So if we know that {receiver} had a certain constructor at some point - // in the past (i.e. it had a certain map), then this constructor is going - // to be the same later, since this information cannot change with map - // transitions. - // - // The same is true for the instance type, e.g. we still know that the - // instance type is JSObject even if that information is unreliable, and - // the "access check needed" bit, which also cannot change later. - for (Handle map : receiver_maps) { - MapRef receiver_map(broker(), map); - if (!receiver_map.IsJSReceiverMap() || - (receiver_map.is_access_check_needed() && - !function_template_info->accept_any_receiver())) { - return inference.NoChange(); - } - } - - // See if we can constant-fold the compatible receiver checks. - CallOptimization::HolderLookup lookup; - Handle api_holder = - call_optimization.LookupHolderOfExpectedType(receiver_maps[0], &lookup); - if (lookup == CallOptimization::kHolderNotFound) - return inference.NoChange(); - for (size_t i = 1; i < receiver_maps.size(); ++i) { - CallOptimization::HolderLookup lookupi; - Handle holderi = call_optimization.LookupHolderOfExpectedType( - receiver_maps[i], &lookupi); - if (lookup != lookupi) return inference.NoChange(); - if (!api_holder.is_identical_to(holderi)) return inference.NoChange(); - } + if (!shared.function_template_info().has_value()) { + TRACE_BROKER_MISSING( + broker(), "FunctionTemplateInfo for function with SFI " << shared); + return NoChange(); + } - if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation && - !inference.RelyOnMapsViaStability(dependencies())) { - // We were not able to make the receiver maps reliable without map checks - // but doing map checks would lead to deopt loops, so give up. - return inference.NoChange(); - } + // See if we can optimize this API call to {shared}. + FunctionTemplateInfoRef function_template_info( + shared.function_template_info().value()); - // TODO(neis): The maps were used in a way that does not actually require - // map checks or stability dependencies. - inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect, - control, p.feedback()); + if (!function_template_info.has_call_code()) return NoChange(); - // Determine the appropriate holder for the {lookup}. - holder = lookup == CallOptimization::kHolderFound - ? jsgraph()->HeapConstant(api_holder) - : receiver; - } else if (function_template_info->accept_any_receiver() && - function_template_info->signature().IsUndefined(isolate())) { - // We haven't found any {receiver_maps}, but we might still be able to + if (function_template_info.accept_any_receiver() && + function_template_info.is_signature_undefined()) { + // We might be able to // optimize the API call depending on the {function_template_info}. // If the API function accepts any kind of {receiver}, we only need to // ensure that the {receiver} is actually a JSReceiver at this point, @@ -2840,51 +2898,127 @@ Reduction JSCallReducer::ReduceCallApiFunction( graph()->NewNode(simplified()->ConvertReceiver(p.convert_mode()), receiver, global_proxy, effect, control); } else { - // We don't have enough information to eliminate the access check - // and/or the compatible receiver check, so use the generic builtin - // that does those checks dynamically. This is still significantly - // faster than the generic call sequence. - Builtins::Name builtin_name = - !function_template_info->accept_any_receiver() - ? (function_template_info->signature().IsUndefined(isolate()) - ? Builtins::kCallFunctionTemplate_CheckAccess - : Builtins:: - kCallFunctionTemplate_CheckAccessAndCompatibleReceiver) - : Builtins::kCallFunctionTemplate_CheckCompatibleReceiver; - - // The CallFunctionTemplate builtin requires the {receiver} to be - // an actual JSReceiver, so make sure we do the proper conversion - // first if necessary. - receiver = holder = effect = - graph()->NewNode(simplified()->ConvertReceiver(p.convert_mode()), - receiver, global_proxy, effect, control); + // Try to infer the {receiver} maps from the graph. + MapInference inference(broker(), receiver, effect); + if (inference.HaveMaps()) { + MapHandles const& receiver_maps = inference.GetMaps(); + MapRef first_receiver_map(broker(), receiver_maps[0]); + + // See if we can constant-fold the compatible receiver checks. + HolderLookupResult api_holder = + function_template_info.LookupHolderOfExpectedType(first_receiver_map, + false); + if (api_holder.lookup == CallOptimization::kHolderNotFound) + return inference.NoChange(); - Callable callable = Builtins::CallableFor(isolate(), builtin_name); - auto call_descriptor = Linkage::GetStubCallDescriptor( - graph()->zone(), callable.descriptor(), - argc + 1 /* implicit receiver */, CallDescriptor::kNeedsFrameState); - node->InsertInput(graph()->zone(), 0, - jsgraph()->HeapConstant(callable.code())); - node->ReplaceInput(1, jsgraph()->HeapConstant(function_template_info)); - node->InsertInput(graph()->zone(), 2, jsgraph()->Constant(argc)); - node->ReplaceInput(3, receiver); // Update receiver input. - node->ReplaceInput(6 + argc, effect); // Update effect input. - NodeProperties::ChangeOp(node, common()->Call(call_descriptor)); - return Changed(node); + // Check that all {receiver_maps} are actually JSReceiver maps and + // that the {function_template_info} accepts them without access + // checks (even if "access check needed" is set for {receiver}). + // + // Note that we don't need to know the concrete {receiver} maps here, + // meaning it's fine if the {receiver_maps} are unreliable, and we also + // don't need to install any stability dependencies, since the only + // relevant information regarding the {receiver} is the Map::constructor + // field on the root map (which is different from the JavaScript exposed + // "constructor" property) and that field cannot change. + // + // So if we know that {receiver} had a certain constructor at some point + // in the past (i.e. it had a certain map), then this constructor is going + // to be the same later, since this information cannot change with map + // transitions. + // + // The same is true for the instance type, e.g. we still know that the + // instance type is JSObject even if that information is unreliable, and + // the "access check needed" bit, which also cannot change later. + CHECK(first_receiver_map.IsJSReceiverMap()); + CHECK(!first_receiver_map.is_access_check_needed() || + function_template_info.accept_any_receiver()); + + for (size_t i = 1; i < receiver_maps.size(); ++i) { + MapRef receiver_map(broker(), receiver_maps[i]); + HolderLookupResult holder_i = + function_template_info.LookupHolderOfExpectedType(receiver_map, + false); + + if (api_holder.lookup != holder_i.lookup) return inference.NoChange(); + if (!(api_holder.holder.has_value() && holder_i.holder.has_value())) + return inference.NoChange(); + if (!api_holder.holder->equals(*holder_i.holder)) + return inference.NoChange(); + + CHECK(receiver_map.IsJSReceiverMap()); + CHECK(!receiver_map.is_access_check_needed() || + function_template_info.accept_any_receiver()); + } + + if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation && + !inference.RelyOnMapsViaStability(dependencies())) { + // We were not able to make the receiver maps reliable without map + // checks but doing map checks would lead to deopt loops, so give up. + return inference.NoChange(); + } + + // TODO(neis): The maps were used in a way that does not actually require + // map checks or stability dependencies. + inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect, + control, p.feedback()); + + // Determine the appropriate holder for the {lookup}. + holder = api_holder.lookup == CallOptimization::kHolderFound + ? jsgraph()->Constant(*api_holder.holder) + : receiver; + } else { + // We don't have enough information to eliminate the access check + // and/or the compatible receiver check, so use the generic builtin + // that does those checks dynamically. This is still significantly + // faster than the generic call sequence. + Builtins::Name builtin_name; + if (function_template_info.accept_any_receiver()) { + builtin_name = Builtins::kCallFunctionTemplate_CheckCompatibleReceiver; + } else if (function_template_info.is_signature_undefined()) { + builtin_name = Builtins::kCallFunctionTemplate_CheckAccess; + } else { + builtin_name = + Builtins::kCallFunctionTemplate_CheckAccessAndCompatibleReceiver; + } + + // The CallFunctionTemplate builtin requires the {receiver} to be + // an actual JSReceiver, so make sure we do the proper conversion + // first if necessary. + receiver = holder = effect = + graph()->NewNode(simplified()->ConvertReceiver(p.convert_mode()), + receiver, global_proxy, effect, control); + + Callable callable = Builtins::CallableFor(isolate(), builtin_name); + auto call_descriptor = Linkage::GetStubCallDescriptor( + graph()->zone(), callable.descriptor(), + argc + 1 /* implicit receiver */, CallDescriptor::kNeedsFrameState); + node->InsertInput(graph()->zone(), 0, + jsgraph()->HeapConstant(callable.code())); + node->ReplaceInput(1, jsgraph()->Constant(function_template_info)); + node->InsertInput(graph()->zone(), 2, jsgraph()->Constant(argc)); + node->ReplaceInput(3, receiver); // Update receiver input. + node->ReplaceInput(6 + argc, effect); // Update effect input. + NodeProperties::ChangeOp(node, common()->Call(call_descriptor)); + return Changed(node); + } } // TODO(turbofan): Consider introducing a JSCallApiCallback operator for // this and lower it during JSGenericLowering, and unify this with the // JSNativeContextSpecialization::InlineApiCall method a bit. - Handle call_handler_info( - CallHandlerInfo::cast(function_template_info->call_code()), isolate()); - Handle data(call_handler_info->data(), isolate()); + if (!function_template_info.call_code().has_value()) { + TRACE_BROKER_MISSING(broker(), "call code for function template info " + << function_template_info); + return NoChange(); + } + CallHandlerInfoRef call_handler_info = *function_template_info.call_code(); Callable call_api_callback = CodeFactory::CallApiCallback(isolate()); CallInterfaceDescriptor cid = call_api_callback.descriptor(); auto call_descriptor = Linkage::GetStubCallDescriptor( graph()->zone(), cid, argc + 1 /* implicit receiver */, CallDescriptor::kNeedsFrameState); - ApiFunction api_function(v8::ToCData
(call_handler_info->callback())); + ApiFunction api_function(call_handler_info.callback()); ExternalReference function_reference = ExternalReference::Create( &api_function, ExternalReference::DIRECT_API_CALL); @@ -2895,7 +3029,8 @@ Reduction JSCallReducer::ReduceCallApiFunction( jsgraph()->HeapConstant(call_api_callback.code())); node->ReplaceInput(1, jsgraph()->ExternalConstant(function_reference)); node->InsertInput(graph()->zone(), 2, jsgraph()->Constant(argc)); - node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(data)); + node->InsertInput(graph()->zone(), 3, + jsgraph()->Constant(call_handler_info.data())); node->InsertInput(graph()->zone(), 4, holder); node->ReplaceInput(5, receiver); // Update receiver input. node->ReplaceInput(7 + argc, continuation_frame_state); @@ -3495,6 +3630,8 @@ Reduction JSCallReducer::ReduceJSCall(Node* node, return ReduceMathUnary(node, simplified()->NumberFloor()); case Builtins::kMathFround: return ReduceMathUnary(node, simplified()->NumberFround()); + case Builtins::kMathHypot: + return ReduceMathHypot(node); case Builtins::kMathLog: return ReduceMathUnary(node, simplified()->NumberLog()); case Builtins::kMathLog1p: @@ -3563,8 +3700,8 @@ Reduction JSCallReducer::ReduceJSCall(Node* node, return ReduceStringPrototypeStringAt(simplified()->StringCharCodeAt(), node); case Builtins::kStringPrototypeCodePointAt: - return ReduceStringPrototypeStringAt( - simplified()->StringCodePointAt(UnicodeEncoding::UTF32), node); + return ReduceStringPrototypeStringAt(simplified()->StringCodePointAt(), + node); case Builtins::kStringPrototypeSubstring: return ReduceStringPrototypeSubstring(node); case Builtins::kStringPrototypeSlice: @@ -3642,18 +3779,23 @@ Reduction JSCallReducer::ReduceJSCall(Node* node, return ReduceDateNow(node); case Builtins::kNumberConstructor: return ReduceNumberConstructor(node); + case Builtins::kBigIntAsUintN: + return ReduceBigIntAsUintN(node); default: break; } - if (!TracingFlags::is_runtime_stats_enabled() && - shared.object()->IsApiFunction()) { + if (shared.object()->IsApiFunction()) { return ReduceCallApiFunction(node, shared); } return NoChange(); } Reduction JSCallReducer::ReduceJSCallWithArrayLike(Node* node) { + // TODO(mslekova): Remove once ReduceJSCallWithArrayLike is brokerized. + AllowHandleDereference allow_handle_dereference; + AllowHandleAllocation allow_handle_allocation; + DCHECK_EQ(IrOpcode::kJSCallWithArrayLike, node->opcode()); CallFrequency frequency = CallFrequencyOf(node->op()); VectorSlotPair feedback; @@ -4250,6 +4392,52 @@ Reduction JSCallReducer::ReduceSoftDeoptimize(Node* node, return Changed(node); } +Node* JSCallReducer::LoadReceiverElementsKind(Node* receiver, Node** effect, + Node** control) { + Node* receiver_map = *effect = + graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()), + receiver, *effect, *control); + Node* receiver_bit_field2 = *effect = graph()->NewNode( + simplified()->LoadField(AccessBuilder::ForMapBitField2()), receiver_map, + *effect, *control); + Node* receiver_elements_kind = graph()->NewNode( + simplified()->NumberShiftRightLogical(), + graph()->NewNode(simplified()->NumberBitwiseAnd(), receiver_bit_field2, + jsgraph()->Constant(Map::ElementsKindBits::kMask)), + jsgraph()->Constant(Map::ElementsKindBits::kShift)); + return receiver_elements_kind; +} + +void JSCallReducer::CheckIfElementsKind(Node* receiver_elements_kind, + ElementsKind kind, Node* control, + Node** if_true, Node** if_false) { + Node* is_packed_kind = + graph()->NewNode(simplified()->NumberEqual(), receiver_elements_kind, + jsgraph()->Constant(GetPackedElementsKind(kind))); + Node* packed_branch = + graph()->NewNode(common()->Branch(), is_packed_kind, control); + Node* if_packed = graph()->NewNode(common()->IfTrue(), packed_branch); + + if (IsHoleyElementsKind(kind)) { + Node* if_not_packed = graph()->NewNode(common()->IfFalse(), packed_branch); + Node* is_holey_kind = + graph()->NewNode(simplified()->NumberEqual(), receiver_elements_kind, + jsgraph()->Constant(GetHoleyElementsKind(kind))); + Node* holey_branch = + graph()->NewNode(common()->Branch(), is_holey_kind, if_not_packed); + Node* if_holey = graph()->NewNode(common()->IfTrue(), holey_branch); + + Node* if_not_packed_not_holey = + graph()->NewNode(common()->IfFalse(), holey_branch); + + *if_true = graph()->NewNode(common()->Merge(2), if_packed, if_holey); + *if_false = if_not_packed_not_holey; + } else { + *if_true = if_packed; + *if_false = graph()->NewNode(common()->IfFalse(), packed_branch); + } +} + // ES6 section 22.1.3.18 Array.prototype.push ( ) Reduction JSCallReducer::ReduceArrayPrototypePush(Node* node) { DCHECK_EQ(IrOpcode::kJSCall, node->opcode()); @@ -4267,81 +4455,121 @@ Reduction JSCallReducer::ReduceArrayPrototypePush(Node* node) { if (!inference.HaveMaps()) return NoChange(); MapHandles const& receiver_maps = inference.GetMaps(); - ElementsKind kind; - if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, &kind, true)) { + std::vector kinds; + if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, kinds, true)) { return inference.NoChange(); } if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE(); inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect, control, p.feedback()); - // Collect the value inputs to push. - std::vector values(num_values); - for (int i = 0; i < num_values; ++i) { - values[i] = NodeProperties::GetValueInput(node, 2 + i); - } - - for (auto& value : values) { - if (IsSmiElementsKind(kind)) { - value = effect = graph()->NewNode(simplified()->CheckSmi(p.feedback()), - value, effect, control); - } else if (IsDoubleElementsKind(kind)) { - value = effect = graph()->NewNode(simplified()->CheckNumber(p.feedback()), - value, effect, control); - // Make sure we do not store signaling NaNs into double arrays. - value = graph()->NewNode(simplified()->NumberSilenceNaN(), value); + std::vector controls_to_merge; + std::vector effects_to_merge; + std::vector values_to_merge; + Node* return_value = jsgraph()->UndefinedConstant(); + + Node* receiver_elements_kind = + LoadReceiverElementsKind(receiver, &effect, &control); + Node* next_control = control; + Node* next_effect = effect; + for (size_t i = 0; i < kinds.size(); i++) { + ElementsKind kind = kinds[i]; + control = next_control; + effect = next_effect; + // We do not need branch for the last elements kind. + if (i != kinds.size() - 1) { + CheckIfElementsKind(receiver_elements_kind, kind, control, &control, + &next_control); } - } - // Load the "length" property of the {receiver}. - Node* length = effect = graph()->NewNode( - simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver, - effect, control); - Node* value = length; + // Collect the value inputs to push. + std::vector values(num_values); + for (int i = 0; i < num_values; ++i) { + values[i] = NodeProperties::GetValueInput(node, 2 + i); + } - // Check if we have any {values} to push. - if (num_values > 0) { - // Compute the resulting "length" of the {receiver}. - Node* new_length = value = graph()->NewNode( - simplified()->NumberAdd(), length, jsgraph()->Constant(num_values)); + for (auto& value : values) { + if (IsSmiElementsKind(kind)) { + value = effect = graph()->NewNode(simplified()->CheckSmi(p.feedback()), + value, effect, control); + } else if (IsDoubleElementsKind(kind)) { + value = effect = graph()->NewNode( + simplified()->CheckNumber(p.feedback()), value, effect, control); + // Make sure we do not store signaling NaNs into double arrays. + value = graph()->NewNode(simplified()->NumberSilenceNaN(), value); + } + } - // Load the elements backing store of the {receiver}. - Node* elements = effect = graph()->NewNode( - simplified()->LoadField(AccessBuilder::ForJSObjectElements()), receiver, - effect, control); - Node* elements_length = effect = graph()->NewNode( - simplified()->LoadField(AccessBuilder::ForFixedArrayLength()), elements, - effect, control); + // Load the "length" property of the {receiver}. + Node* length = effect = graph()->NewNode( + simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), + receiver, effect, control); + return_value = length; - GrowFastElementsMode mode = - IsDoubleElementsKind(kind) ? GrowFastElementsMode::kDoubleElements - : GrowFastElementsMode::kSmiOrObjectElements; - elements = effect = graph()->NewNode( - simplified()->MaybeGrowFastElements(mode, p.feedback()), receiver, - elements, - graph()->NewNode(simplified()->NumberAdd(), length, - jsgraph()->Constant(num_values - 1)), - elements_length, effect, control); - - // Update the JSArray::length field. Since this is observable, - // there must be no other check after this. - effect = graph()->NewNode( - simplified()->StoreField(AccessBuilder::ForJSArrayLength(kind)), - receiver, new_length, effect, control); + // Check if we have any {values} to push. + if (num_values > 0) { + // Compute the resulting "length" of the {receiver}. + Node* new_length = return_value = graph()->NewNode( + simplified()->NumberAdd(), length, jsgraph()->Constant(num_values)); - // Append the {values} to the {elements}. - for (int i = 0; i < num_values; ++i) { - Node* value = values[i]; - Node* index = graph()->NewNode(simplified()->NumberAdd(), length, - jsgraph()->Constant(i)); + // Load the elements backing store of the {receiver}. + Node* elements = effect = graph()->NewNode( + simplified()->LoadField(AccessBuilder::ForJSObjectElements()), + receiver, effect, control); + Node* elements_length = effect = graph()->NewNode( + simplified()->LoadField(AccessBuilder::ForFixedArrayLength()), + elements, effect, control); + + GrowFastElementsMode mode = + IsDoubleElementsKind(kind) + ? GrowFastElementsMode::kDoubleElements + : GrowFastElementsMode::kSmiOrObjectElements; + elements = effect = graph()->NewNode( + simplified()->MaybeGrowFastElements(mode, p.feedback()), receiver, + elements, + graph()->NewNode(simplified()->NumberAdd(), length, + jsgraph()->Constant(num_values - 1)), + elements_length, effect, control); + + // Update the JSArray::length field. Since this is observable, + // there must be no other check after this. effect = graph()->NewNode( - simplified()->StoreElement(AccessBuilder::ForFixedArrayElement(kind)), - elements, index, value, effect, control); + simplified()->StoreField(AccessBuilder::ForJSArrayLength(kind)), + receiver, new_length, effect, control); + + // Append the {values} to the {elements}. + for (int i = 0; i < num_values; ++i) { + Node* value = values[i]; + Node* index = graph()->NewNode(simplified()->NumberAdd(), length, + jsgraph()->Constant(i)); + effect = + graph()->NewNode(simplified()->StoreElement( + AccessBuilder::ForFixedArrayElement(kind)), + elements, index, value, effect, control); + } } + + controls_to_merge.push_back(control); + effects_to_merge.push_back(effect); + values_to_merge.push_back(return_value); } - ReplaceWithValue(node, value, effect, control); - return Replace(value); + if (controls_to_merge.size() > 1) { + int const count = static_cast(controls_to_merge.size()); + + control = graph()->NewNode(common()->Merge(count), count, + &controls_to_merge.front()); + effects_to_merge.push_back(control); + effect = graph()->NewNode(common()->EffectPhi(count), count + 1, + &effects_to_merge.front()); + values_to_merge.push_back(control); + return_value = + graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, count), + count + 1, &values_to_merge.front()); + } + + ReplaceWithValue(node, return_value, effect, control); + return Replace(return_value); } // ES6 section 22.1.3.17 Array.prototype.pop ( ) @@ -4360,79 +4588,117 @@ Reduction JSCallReducer::ReduceArrayPrototypePop(Node* node) { if (!inference.HaveMaps()) return NoChange(); MapHandles const& receiver_maps = inference.GetMaps(); - ElementsKind kind; - if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, &kind)) { + std::vector kinds; + if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, kinds)) { return inference.NoChange(); } if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE(); inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect, control, p.feedback()); - // Load the "length" property of the {receiver}. - Node* length = effect = graph()->NewNode( - simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver, - effect, control); + std::vector controls_to_merge; + std::vector effects_to_merge; + std::vector values_to_merge; + Node* value = jsgraph()->UndefinedConstant(); + + Node* receiver_elements_kind = + LoadReceiverElementsKind(receiver, &effect, &control); + Node* next_control = control; + Node* next_effect = effect; + for (size_t i = 0; i < kinds.size(); i++) { + ElementsKind kind = kinds[i]; + control = next_control; + effect = next_effect; + // We do not need branch for the last elements kind. + if (i != kinds.size() - 1) { + CheckIfElementsKind(receiver_elements_kind, kind, control, &control, + &next_control); + } - // Check if the {receiver} has any elements. - Node* check = graph()->NewNode(simplified()->NumberEqual(), length, - jsgraph()->ZeroConstant()); - Node* branch = - graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control); + // Load the "length" property of the {receiver}. + Node* length = effect = graph()->NewNode( + simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), + receiver, effect, control); - Node* if_true = graph()->NewNode(common()->IfTrue(), branch); - Node* etrue = effect; - Node* vtrue = jsgraph()->UndefinedConstant(); + // Check if the {receiver} has any elements. + Node* check = graph()->NewNode(simplified()->NumberEqual(), length, + jsgraph()->ZeroConstant()); + Node* branch = + graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control); - Node* if_false = graph()->NewNode(common()->IfFalse(), branch); - Node* efalse = effect; - Node* vfalse; - { - // TODO(tebbi): We should trim the backing store if the capacity is too - // big, as implemented in elements.cc:ElementsAccessorBase::SetLengthImpl. - - // Load the elements backing store from the {receiver}. - Node* elements = efalse = graph()->NewNode( - simplified()->LoadField(AccessBuilder::ForJSObjectElements()), receiver, - efalse, if_false); - - // Ensure that we aren't popping from a copy-on-write backing store. - if (IsSmiOrObjectElementsKind(kind)) { - elements = efalse = - graph()->NewNode(simplified()->EnsureWritableFastElements(), receiver, - elements, efalse, if_false); - } + Node* if_true = graph()->NewNode(common()->IfTrue(), branch); + Node* etrue = effect; + Node* vtrue = jsgraph()->UndefinedConstant(); - // Compute the new {length}. - length = graph()->NewNode(simplified()->NumberSubtract(), length, - jsgraph()->OneConstant()); + Node* if_false = graph()->NewNode(common()->IfFalse(), branch); + Node* efalse = effect; + Node* vfalse; + { + // TODO(tebbi): We should trim the backing store if the capacity is too + // big, as implemented in elements.cc:ElementsAccessorBase::SetLengthImpl. + + // Load the elements backing store from the {receiver}. + Node* elements = efalse = graph()->NewNode( + simplified()->LoadField(AccessBuilder::ForJSObjectElements()), + receiver, efalse, if_false); + + // Ensure that we aren't popping from a copy-on-write backing store. + if (IsSmiOrObjectElementsKind(kind)) { + elements = efalse = + graph()->NewNode(simplified()->EnsureWritableFastElements(), + receiver, elements, efalse, if_false); + } + + // Compute the new {length}. + length = graph()->NewNode(simplified()->NumberSubtract(), length, + jsgraph()->OneConstant()); - // Store the new {length} to the {receiver}. - efalse = graph()->NewNode( - simplified()->StoreField(AccessBuilder::ForJSArrayLength(kind)), - receiver, length, efalse, if_false); + // Store the new {length} to the {receiver}. + efalse = graph()->NewNode( + simplified()->StoreField(AccessBuilder::ForJSArrayLength(kind)), + receiver, length, efalse, if_false); + + // Load the last entry from the {elements}. + vfalse = efalse = graph()->NewNode( + simplified()->LoadElement(AccessBuilder::ForFixedArrayElement(kind)), + elements, length, efalse, if_false); + + // Store a hole to the element we just removed from the {receiver}. + efalse = graph()->NewNode( + simplified()->StoreElement( + AccessBuilder::ForFixedArrayElement(GetHoleyElementsKind(kind))), + elements, length, jsgraph()->TheHoleConstant(), efalse, if_false); + } + + control = graph()->NewNode(common()->Merge(2), if_true, if_false); + effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control); + value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2), + vtrue, vfalse, control); - // Load the last entry from the {elements}. - vfalse = efalse = graph()->NewNode( - simplified()->LoadElement(AccessBuilder::ForFixedArrayElement(kind)), - elements, length, efalse, if_false); + // Convert the hole to undefined. Do this last, so that we can optimize + // conversion operator via some smart strength reduction in many cases. + if (IsHoleyElementsKind(kind)) { + value = + graph()->NewNode(simplified()->ConvertTaggedHoleToUndefined(), value); + } - // Store a hole to the element we just removed from the {receiver}. - efalse = graph()->NewNode( - simplified()->StoreElement( - AccessBuilder::ForFixedArrayElement(GetHoleyElementsKind(kind))), - elements, length, jsgraph()->TheHoleConstant(), efalse, if_false); + controls_to_merge.push_back(control); + effects_to_merge.push_back(effect); + values_to_merge.push_back(value); } - control = graph()->NewNode(common()->Merge(2), if_true, if_false); - effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control); - Node* value = graph()->NewNode( - common()->Phi(MachineRepresentation::kTagged, 2), vtrue, vfalse, control); + if (controls_to_merge.size() > 1) { + int const count = static_cast(controls_to_merge.size()); - // Convert the hole to undefined. Do this last, so that we can optimize - // conversion operator via some smart strength reduction in many cases. - if (IsHoleyElementsKind(kind)) { + control = graph()->NewNode(common()->Merge(count), count, + &controls_to_merge.front()); + effects_to_merge.push_back(control); + effect = graph()->NewNode(common()->EffectPhi(count), count + 1, + &effects_to_merge.front()); + values_to_merge.push_back(control); value = - graph()->NewNode(simplified()->ConvertTaggedHoleToUndefined(), value); + graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, count), + count + 1, &values_to_merge.front()); } ReplaceWithValue(node, value, effect, control); @@ -4458,151 +4724,172 @@ Reduction JSCallReducer::ReduceArrayPrototypeShift(Node* node) { if (!inference.HaveMaps()) return NoChange(); MapHandles const& receiver_maps = inference.GetMaps(); - ElementsKind kind; - if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, &kind)) { + std::vector kinds; + if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, kinds)) { return inference.NoChange(); } if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE(); inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect, control, p.feedback()); - // Load length of the {receiver}. - Node* length = effect = graph()->NewNode( - simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver, - effect, control); + std::vector controls_to_merge; + std::vector effects_to_merge; + std::vector values_to_merge; + Node* value = jsgraph()->UndefinedConstant(); + + Node* receiver_elements_kind = + LoadReceiverElementsKind(receiver, &effect, &control); + Node* next_control = control; + Node* next_effect = effect; + for (size_t i = 0; i < kinds.size(); i++) { + ElementsKind kind = kinds[i]; + control = next_control; + effect = next_effect; + // We do not need branch for the last elements kind. + if (i != kinds.size() - 1) { + CheckIfElementsKind(receiver_elements_kind, kind, control, &control, + &next_control); + } - // Return undefined if {receiver} has no elements. - Node* check0 = graph()->NewNode(simplified()->NumberEqual(), length, - jsgraph()->ZeroConstant()); - Node* branch0 = - graph()->NewNode(common()->Branch(BranchHint::kFalse), check0, control); + // Load length of the {receiver}. + Node* length = effect = graph()->NewNode( + simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), + receiver, effect, control); - Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0); - Node* etrue0 = effect; - Node* vtrue0 = jsgraph()->UndefinedConstant(); + // Return undefined if {receiver} has no elements. + Node* check0 = graph()->NewNode(simplified()->NumberEqual(), length, + jsgraph()->ZeroConstant()); + Node* branch0 = + graph()->NewNode(common()->Branch(BranchHint::kFalse), check0, control); - Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0); - Node* efalse0 = effect; - Node* vfalse0; - { - // Check if we should take the fast-path. - Node* check1 = - graph()->NewNode(simplified()->NumberLessThanOrEqual(), length, - jsgraph()->Constant(JSArray::kMaxCopyElements)); - Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kTrue), - check1, if_false0); + Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0); + Node* etrue0 = effect; + Node* vtrue0 = jsgraph()->UndefinedConstant(); - Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1); - Node* etrue1 = efalse0; - Node* vtrue1; + Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0); + Node* efalse0 = effect; + Node* vfalse0; { - Node* elements = etrue1 = graph()->NewNode( - simplified()->LoadField(AccessBuilder::ForJSObjectElements()), - receiver, etrue1, if_true1); - - // Load the first element here, which we return below. - vtrue1 = etrue1 = graph()->NewNode( - simplified()->LoadElement(AccessBuilder::ForFixedArrayElement(kind)), - elements, jsgraph()->ZeroConstant(), etrue1, if_true1); + // Check if we should take the fast-path. + Node* check1 = + graph()->NewNode(simplified()->NumberLessThanOrEqual(), length, + jsgraph()->Constant(JSArray::kMaxCopyElements)); + Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kTrue), + check1, if_false0); + + Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1); + Node* etrue1 = efalse0; + Node* vtrue1; + { + Node* elements = etrue1 = graph()->NewNode( + simplified()->LoadField(AccessBuilder::ForJSObjectElements()), + receiver, etrue1, if_true1); - // Ensure that we aren't shifting a copy-on-write backing store. - if (IsSmiOrObjectElementsKind(kind)) { - elements = etrue1 = - graph()->NewNode(simplified()->EnsureWritableFastElements(), - receiver, elements, etrue1, if_true1); - } + // Load the first element here, which we return below. + vtrue1 = etrue1 = graph()->NewNode( + simplified()->LoadElement( + AccessBuilder::ForFixedArrayElement(kind)), + elements, jsgraph()->ZeroConstant(), etrue1, if_true1); + + // Ensure that we aren't shifting a copy-on-write backing store. + if (IsSmiOrObjectElementsKind(kind)) { + elements = etrue1 = + graph()->NewNode(simplified()->EnsureWritableFastElements(), + receiver, elements, etrue1, if_true1); + } - // Shift the remaining {elements} by one towards the start. - Node* loop = graph()->NewNode(common()->Loop(2), if_true1, if_true1); - Node* eloop = - graph()->NewNode(common()->EffectPhi(2), etrue1, etrue1, loop); - Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop); - NodeProperties::MergeControlToEnd(graph(), common(), terminate); - Node* index = graph()->NewNode( - common()->Phi(MachineRepresentation::kTagged, 2), - jsgraph()->OneConstant(), - jsgraph()->Constant(JSArray::kMaxCopyElements - 1), loop); + // Shift the remaining {elements} by one towards the start. + Node* loop = graph()->NewNode(common()->Loop(2), if_true1, if_true1); + Node* eloop = + graph()->NewNode(common()->EffectPhi(2), etrue1, etrue1, loop); + Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop); + NodeProperties::MergeControlToEnd(graph(), common(), terminate); + Node* index = graph()->NewNode( + common()->Phi(MachineRepresentation::kTagged, 2), + jsgraph()->OneConstant(), + jsgraph()->Constant(JSArray::kMaxCopyElements - 1), loop); - { - Node* check2 = - graph()->NewNode(simplified()->NumberLessThan(), index, length); - Node* branch2 = graph()->NewNode(common()->Branch(), check2, loop); + { + Node* check2 = + graph()->NewNode(simplified()->NumberLessThan(), index, length); + Node* branch2 = graph()->NewNode(common()->Branch(), check2, loop); - if_true1 = graph()->NewNode(common()->IfFalse(), branch2); - etrue1 = eloop; + if_true1 = graph()->NewNode(common()->IfFalse(), branch2); + etrue1 = eloop; - Node* control = graph()->NewNode(common()->IfTrue(), branch2); - Node* effect = etrue1; + Node* control = graph()->NewNode(common()->IfTrue(), branch2); + Node* effect = etrue1; - ElementAccess const access = AccessBuilder::ForFixedArrayElement(kind); - Node* value = effect = - graph()->NewNode(simplified()->LoadElement(access), elements, index, - effect, control); - effect = - graph()->NewNode(simplified()->StoreElement(access), elements, - graph()->NewNode(simplified()->NumberSubtract(), - index, jsgraph()->OneConstant()), - value, effect, control); - - loop->ReplaceInput(1, control); - eloop->ReplaceInput(1, effect); - index->ReplaceInput(1, - graph()->NewNode(simplified()->NumberAdd(), index, - jsgraph()->OneConstant())); - } + ElementAccess const access = + AccessBuilder::ForFixedArrayElement(kind); + Node* value = effect = + graph()->NewNode(simplified()->LoadElement(access), elements, + index, effect, control); + effect = graph()->NewNode( + simplified()->StoreElement(access), elements, + graph()->NewNode(simplified()->NumberSubtract(), index, + jsgraph()->OneConstant()), + value, effect, control); + + loop->ReplaceInput(1, control); + eloop->ReplaceInput(1, effect); + index->ReplaceInput(1, + graph()->NewNode(simplified()->NumberAdd(), index, + jsgraph()->OneConstant())); + } - // Compute the new {length}. - length = graph()->NewNode(simplified()->NumberSubtract(), length, - jsgraph()->OneConstant()); + // Compute the new {length}. + length = graph()->NewNode(simplified()->NumberSubtract(), length, + jsgraph()->OneConstant()); - // Store the new {length} to the {receiver}. - etrue1 = graph()->NewNode( - simplified()->StoreField(AccessBuilder::ForJSArrayLength(kind)), - receiver, length, etrue1, if_true1); + // Store the new {length} to the {receiver}. + etrue1 = graph()->NewNode( + simplified()->StoreField(AccessBuilder::ForJSArrayLength(kind)), + receiver, length, etrue1, if_true1); - // Store a hole to the element we just removed from the {receiver}. - etrue1 = graph()->NewNode( - simplified()->StoreElement( - AccessBuilder::ForFixedArrayElement(GetHoleyElementsKind(kind))), - elements, length, jsgraph()->TheHoleConstant(), etrue1, if_true1); - } + // Store a hole to the element we just removed from the {receiver}. + etrue1 = graph()->NewNode( + simplified()->StoreElement(AccessBuilder::ForFixedArrayElement( + GetHoleyElementsKind(kind))), + elements, length, jsgraph()->TheHoleConstant(), etrue1, if_true1); + } - Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1); - Node* efalse1 = efalse0; - Node* vfalse1; - { - // Call the generic C++ implementation. - const int builtin_index = Builtins::kArrayShift; - auto call_descriptor = Linkage::GetCEntryStubCallDescriptor( - graph()->zone(), 1, BuiltinArguments::kNumExtraArgsWithReceiver, - Builtins::name(builtin_index), node->op()->properties(), - CallDescriptor::kNeedsFrameState); - Node* stub_code = - jsgraph()->CEntryStubConstant(1, kDontSaveFPRegs, kArgvOnStack, true); - Address builtin_entry = Builtins::CppEntryOf(builtin_index); - Node* entry = - jsgraph()->ExternalConstant(ExternalReference::Create(builtin_entry)); - Node* argc = - jsgraph()->Constant(BuiltinArguments::kNumExtraArgsWithReceiver); - if_false1 = efalse1 = vfalse1 = - graph()->NewNode(common()->Call(call_descriptor), stub_code, receiver, - jsgraph()->PaddingConstant(), argc, target, - jsgraph()->UndefinedConstant(), entry, argc, context, - frame_state, efalse1, if_false1); - } + Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1); + Node* efalse1 = efalse0; + Node* vfalse1; + { + // Call the generic C++ implementation. + const int builtin_index = Builtins::kArrayShift; + auto call_descriptor = Linkage::GetCEntryStubCallDescriptor( + graph()->zone(), 1, BuiltinArguments::kNumExtraArgsWithReceiver, + Builtins::name(builtin_index), node->op()->properties(), + CallDescriptor::kNeedsFrameState); + Node* stub_code = jsgraph()->CEntryStubConstant(1, kDontSaveFPRegs, + kArgvOnStack, true); + Address builtin_entry = Builtins::CppEntryOf(builtin_index); + Node* entry = jsgraph()->ExternalConstant( + ExternalReference::Create(builtin_entry)); + Node* argc = + jsgraph()->Constant(BuiltinArguments::kNumExtraArgsWithReceiver); + if_false1 = efalse1 = vfalse1 = + graph()->NewNode(common()->Call(call_descriptor), stub_code, + receiver, jsgraph()->PaddingConstant(), argc, + target, jsgraph()->UndefinedConstant(), entry, + argc, context, frame_state, efalse1, if_false1); + } - if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1); - efalse0 = - graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0); - vfalse0 = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2), - vtrue1, vfalse1, if_false0); + if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1); + efalse0 = + graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0); + vfalse0 = + graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2), + vtrue1, vfalse1, if_false0); } control = graph()->NewNode(common()->Merge(2), if_true0, if_false0); effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control); - Node* value = - graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2), - vtrue0, vfalse0, control); + value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2), + vtrue0, vfalse0, control); // Convert the hole to undefined. Do this last, so that we can optimize // conversion operator via some smart strength reduction in many cases. @@ -4611,8 +4898,27 @@ Reduction JSCallReducer::ReduceArrayPrototypeShift(Node* node) { graph()->NewNode(simplified()->ConvertTaggedHoleToUndefined(), value); } - ReplaceWithValue(node, value, effect, control); - return Replace(value); + controls_to_merge.push_back(control); + effects_to_merge.push_back(effect); + values_to_merge.push_back(value); + } + + if (controls_to_merge.size() > 1) { + int const count = static_cast(controls_to_merge.size()); + + control = graph()->NewNode(common()->Merge(count), count, + &controls_to_merge.front()); + effects_to_merge.push_back(control); + effect = graph()->NewNode(common()->EffectPhi(count), count + 1, + &effects_to_merge.front()); + values_to_merge.push_back(control); + value = + graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, count), + count + 1, &values_to_merge.front()); + } + + ReplaceWithValue(node, value, effect, control); + return Replace(value); } // ES6 section 22.1.3.23 Array.prototype.slice ( ) @@ -5230,8 +5536,8 @@ Reduction JSCallReducer::ReduceStringFromCodePoint(Node* node) { graph()->NewNode(simplified()->CheckBounds(p.feedback()), input, jsgraph()->Constant(0x10FFFF + 1), effect, control); - Node* value = graph()->NewNode( - simplified()->StringFromSingleCodePoint(UnicodeEncoding::UTF32), input); + Node* value = + graph()->NewNode(simplified()->StringFromSingleCodePoint(), input); ReplaceWithValue(node, value, effect); return Replace(value); } @@ -5287,12 +5593,8 @@ Reduction JSCallReducer::ReduceStringIteratorPrototypeNext(Node* node) { Node* vtrue0; { done_true = jsgraph()->FalseConstant(); - Node* codepoint = etrue0 = graph()->NewNode( - simplified()->StringCodePointAt(UnicodeEncoding::UTF16), string, index, - etrue0, if_true0); - vtrue0 = graph()->NewNode( - simplified()->StringFromSingleCodePoint(UnicodeEncoding::UTF16), - codepoint); + vtrue0 = etrue0 = graph()->NewNode(simplified()->StringFromCodePointAt(), + string, index, etrue0, if_true0); // Update iterator.[[NextIndex]] Node* char_length = graph()->NewNode(simplified()->StringLength(), vtrue0); @@ -5396,6 +5698,8 @@ Node* JSCallReducer::CreateArtificialFrameState( } Reduction JSCallReducer::ReducePromiseConstructor(Node* node) { + DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining); + DCHECK_EQ(IrOpcode::kJSConstruct, node->opcode()); ConstructParameters const& p = ConstructParametersOf(node->op()); int arity = static_cast(p.arity() - 2); @@ -5404,7 +5708,6 @@ Reduction JSCallReducer::ReducePromiseConstructor(Node* node) { Node* target = NodeProperties::GetValueInput(node, 0); Node* executor = NodeProperties::GetValueInput(node, 1); Node* new_target = NodeProperties::GetValueInput(node, arity + 1); - Node* context = NodeProperties::GetContextInput(node); Node* outer_frame_state = NodeProperties::GetFrameStateInput(node); Node* effect = NodeProperties::GetEffectInput(node); @@ -5459,7 +5762,7 @@ Reduction JSCallReducer::ReducePromiseConstructor(Node* node) { // Allocate a promise context for the closures below. Node* promise_context = effect = graph()->NewNode( javascript()->CreateFunctionContext( - handle(native_context().object()->scope_info(), isolate()), + native_context().scope_info().object(), PromiseBuiltins::kPromiseContextLength - Context::MIN_CONTEXT_SLOTS, FUNCTION_SCOPE), context, effect, control); @@ -5477,21 +5780,13 @@ Reduction JSCallReducer::ReducePromiseConstructor(Node* node) { promise_context, jsgraph()->TrueConstant(), effect, control); // Allocate the closure for the resolve case. - SharedFunctionInfoRef resolve_shared = - native_context().promise_capability_default_resolve_shared_fun(); - Node* resolve = effect = graph()->NewNode( - javascript()->CreateClosure( - resolve_shared.object(), factory()->many_closures_cell(), - handle(resolve_shared.object()->GetCode(), isolate())), + Node* resolve = effect = CreateClosureFromBuiltinSharedFunctionInfo( + native_context().promise_capability_default_resolve_shared_fun(), promise_context, effect, control); // Allocate the closure for the reject case. - SharedFunctionInfoRef reject_shared = - native_context().promise_capability_default_reject_shared_fun(); - Node* reject = effect = graph()->NewNode( - javascript()->CreateClosure( - reject_shared.object(), factory()->many_closures_cell(), - handle(reject_shared.object()->GetCode(), isolate())), + Node* reject = effect = CreateClosureFromBuiltinSharedFunctionInfo( + native_context().promise_capability_default_reject_shared_fun(), promise_context, effect, control); const std::vector checkpoint_parameters_continuation( @@ -5624,6 +5919,30 @@ Reduction JSCallReducer::ReducePromiseInternalResolve(Node* node) { return Replace(value); } +bool JSCallReducer::DoPromiseChecks(MapInference* inference) { + if (!inference->HaveMaps()) return false; + MapHandles const& receiver_maps = inference->GetMaps(); + + // Check whether all {receiver_maps} are JSPromise maps and + // have the initial Promise.prototype as their [[Prototype]]. + for (Handle map : receiver_maps) { + MapRef receiver_map(broker(), map); + if (!receiver_map.IsJSPromiseMap()) return false; + if (!FLAG_concurrent_inlining) { + receiver_map.SerializePrototype(); + } else if (!receiver_map.serialized_prototype()) { + TRACE_BROKER_MISSING(broker(), "prototype for map " << receiver_map); + return false; + } + if (!receiver_map.prototype().equals( + native_context().promise_prototype())) { + return false; + } + } + + return true; +} + // ES section #sec-promise.prototype.catch Reduction JSCallReducer::ReducePromisePrototypeCatch(Node* node) { DCHECK_EQ(IrOpcode::kJSCall, node->opcode()); @@ -5637,20 +5956,7 @@ Reduction JSCallReducer::ReducePromisePrototypeCatch(Node* node) { Node* control = NodeProperties::GetControlInput(node); MapInference inference(broker(), receiver, effect); - if (!inference.HaveMaps()) return NoChange(); - MapHandles const& receiver_maps = inference.GetMaps(); - - // Check whether all {receiver_maps} are JSPromise maps and - // have the initial Promise.prototype as their [[Prototype]]. - for (Handle map : receiver_maps) { - MapRef receiver_map(broker(), map); - if (!receiver_map.IsJSPromiseMap()) return inference.NoChange(); - receiver_map.SerializePrototype(); - if (!receiver_map.prototype().equals( - native_context().promise_prototype())) { - return inference.NoChange(); - } - } + if (!DoPromiseChecks(&inference)) return inference.NoChange(); if (!dependencies()->DependOnPromiseThenProtector()) return inference.NoChange(); @@ -5675,8 +5981,21 @@ Reduction JSCallReducer::ReducePromisePrototypeCatch(Node* node) { return reduction.Changed() ? reduction : Changed(node); } +Node* JSCallReducer::CreateClosureFromBuiltinSharedFunctionInfo( + SharedFunctionInfoRef shared, Node* context, Node* effect, Node* control) { + DCHECK(shared.HasBuiltinId()); + Callable const callable = Builtins::CallableFor( + isolate(), static_cast(shared.builtin_id())); + return graph()->NewNode( + javascript()->CreateClosure( + shared.object(), factory()->many_closures_cell(), callable.code()), + context, effect, control); +} + // ES section #sec-promise.prototype.finally Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) { + DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining); + DCHECK_EQ(IrOpcode::kJSCall, node->opcode()); CallParameters const& p = CallParametersOf(node->op()); int arity = static_cast(p.arity() - 2); @@ -5690,21 +6009,9 @@ Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) { } MapInference inference(broker(), receiver, effect); - if (!inference.HaveMaps()) return NoChange(); + if (!DoPromiseChecks(&inference)) return inference.NoChange(); MapHandles const& receiver_maps = inference.GetMaps(); - // Check whether all {receiver_maps} are JSPromise maps and - // have the initial Promise.prototype as their [[Prototype]]. - for (Handle map : receiver_maps) { - MapRef receiver_map(broker(), map); - if (!receiver_map.IsJSPromiseMap()) return inference.NoChange(); - receiver_map.SerializePrototype(); - if (!receiver_map.prototype().equals( - native_context().promise_prototype())) { - return inference.NoChange(); - } - } - if (!dependencies()->DependOnPromiseHookProtector()) return inference.NoChange(); if (!dependencies()->DependOnPromiseThenProtector()) @@ -5730,13 +6037,13 @@ Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) { jsgraph()->Constant(native_context().promise_function()); // Allocate shared context for the closures below. - context = etrue = graph()->NewNode( - javascript()->CreateFunctionContext( - handle(native_context().object()->scope_info(), isolate()), - PromiseBuiltins::kPromiseFinallyContextLength - - Context::MIN_CONTEXT_SLOTS, - FUNCTION_SCOPE), - context, etrue, if_true); + context = etrue = + graph()->NewNode(javascript()->CreateFunctionContext( + native_context().scope_info().object(), + PromiseBuiltins::kPromiseFinallyContextLength - + Context::MIN_CONTEXT_SLOTS, + FUNCTION_SCOPE), + context, etrue, if_true); etrue = graph()->NewNode( simplified()->StoreField( AccessBuilder::ForContextSlot(PromiseBuiltins::kOnFinallySlot)), @@ -5747,22 +6054,14 @@ Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) { context, constructor, etrue, if_true); // Allocate the closure for the reject case. - SharedFunctionInfoRef catch_finally = - native_context().promise_catch_finally_shared_fun(); - catch_true = etrue = graph()->NewNode( - javascript()->CreateClosure( - catch_finally.object(), factory()->many_closures_cell(), - handle(catch_finally.object()->GetCode(), isolate())), - context, etrue, if_true); + catch_true = etrue = CreateClosureFromBuiltinSharedFunctionInfo( + native_context().promise_catch_finally_shared_fun(), context, etrue, + if_true); // Allocate the closure for the fulfill case. - SharedFunctionInfoRef then_finally = - native_context().promise_then_finally_shared_fun(); - then_true = etrue = graph()->NewNode( - javascript()->CreateClosure( - then_finally.object(), factory()->many_closures_cell(), - handle(then_finally.object()->GetCode(), isolate())), - context, etrue, if_true); + then_true = etrue = CreateClosureFromBuiltinSharedFunctionInfo( + native_context().promise_then_finally_shared_fun(), context, etrue, + if_true); } Node* if_false = graph()->NewNode(common()->IfFalse(), branch); @@ -5810,6 +6109,8 @@ Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) { } Reduction JSCallReducer::ReducePromisePrototypeThen(Node* node) { + DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining); + DCHECK_EQ(IrOpcode::kJSCall, node->opcode()); CallParameters const& p = CallParametersOf(node->op()); if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) { @@ -5829,20 +6130,7 @@ Reduction JSCallReducer::ReducePromisePrototypeThen(Node* node) { Node* frame_state = NodeProperties::GetFrameStateInput(node); MapInference inference(broker(), receiver, effect); - if (!inference.HaveMaps()) return NoChange(); - MapHandles const& receiver_maps = inference.GetMaps(); - - // Check whether all {receiver_maps} are JSPromise maps and - // have the initial Promise.prototype as their [[Prototype]]. - for (Handle map : receiver_maps) { - MapRef receiver_map(broker(), map); - if (!receiver_map.IsJSPromiseMap()) return inference.NoChange(); - receiver_map.SerializePrototype(); - if (!receiver_map.prototype().equals( - native_context().promise_prototype())) { - return inference.NoChange(); - } - } + if (!DoPromiseChecks(&inference)) return inference.NoChange(); if (!dependencies()->DependOnPromiseHookProtector()) return inference.NoChange(); @@ -5889,6 +6177,8 @@ Reduction JSCallReducer::ReducePromisePrototypeThen(Node* node) { // ES section #sec-promise.resolve Reduction JSCallReducer::ReducePromiseResolveTrampoline(Node* node) { + DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining); + DCHECK_EQ(IrOpcode::kJSCall, node->opcode()); Node* receiver = NodeProperties::GetValueInput(node, 1); Node* value = node->op()->ValueInputCount() > 2 @@ -6828,8 +7118,11 @@ Reduction JSCallReducer::ReduceNumberParseInt(Node* node) { } Reduction JSCallReducer::ReduceRegExpPrototypeTest(Node* node) { + DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining); + if (FLAG_force_slow_path) return NoChange(); if (node->op()->ValueInputCount() < 3) return NoChange(); + CallParameters const& p = CallParametersOf(node->op()); if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) { return NoChange(); @@ -6846,13 +7139,24 @@ Reduction JSCallReducer::ReduceRegExpPrototypeTest(Node* node) { } MapHandles const& regexp_maps = inference.GetMaps(); - // Compute property access info for "exec" on {resolution}. ZoneVector access_infos(graph()->zone()); AccessInfoFactory access_info_factory(broker(), dependencies(), graph()->zone()); - access_info_factory.ComputePropertyAccessInfos( - MapHandles(regexp_maps.begin(), regexp_maps.end()), - factory()->exec_string(), AccessMode::kLoad, &access_infos); + if (!FLAG_concurrent_inlining) { + // Compute property access info for "exec" on {resolution}. + access_info_factory.ComputePropertyAccessInfos( + MapHandles(regexp_maps.begin(), regexp_maps.end()), + factory()->exec_string(), AccessMode::kLoad, &access_infos); + } else { + // Obtain precomputed access infos from the broker. + for (auto map : regexp_maps) { + MapRef map_ref(broker(), map); + PropertyAccessInfo access_info = + broker()->GetAccessInfoForLoadingExec(map_ref); + access_infos.push_back(access_info); + } + } + PropertyAccessInfo ai_exec = access_info_factory.FinalizePropertyAccessInfosAsOne(access_infos, AccessMode::kLoad); @@ -6864,34 +7168,24 @@ Reduction JSCallReducer::ReduceRegExpPrototypeTest(Node* node) { // Do not reduce if the exec method is not on the prototype chain. if (!ai_exec.holder().ToHandle(&holder)) return inference.NoChange(); + JSObjectRef holder_ref(broker(), holder); + // Bail out if the exec method is not the original one. - Handle constant = JSObject::FastPropertyAt( - holder, ai_exec.field_representation(), ai_exec.field_index()); - if (!constant.is_identical_to(isolate()->regexp_exec_function())) { + base::Optional constant = holder_ref.GetOwnProperty( + ai_exec.field_representation(), ai_exec.field_index()); + if (!constant.has_value() || + !constant->equals(native_context().regexp_exec_function())) { return inference.NoChange(); } - // Protect the exec method change in the holder. - Handle exec_on_proto; - MapRef holder_map(broker(), handle(holder->map(), isolate())); - Handle descriptors( - holder_map.object()->instance_descriptors(), isolate()); - int descriptor_index = - descriptors->Search(*(factory()->exec_string()), *holder_map.object()); - CHECK_NE(descriptor_index, DescriptorArray::kNotFound); - holder_map.SerializeOwnDescriptors(); - dependencies()->DependOnFieldType(holder_map, descriptor_index); - } else { - return inference.NoChange(); - } - - // Add proper dependencies on the {regexp}s [[Prototype]]s. - Handle holder; - if (ai_exec.holder().ToHandle(&holder)) { + // Add proper dependencies on the {regexp}s [[Prototype]]s. dependencies()->DependOnStablePrototypeChains( ai_exec.receiver_maps(), kStartAtPrototype, JSObjectRef(broker(), holder)); + } else { + return inference.NoChange(); } + inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect, control, p.feedback()); @@ -6955,12 +7249,47 @@ Reduction JSCallReducer::ReduceNumberConstructor(Node* node) { return Changed(node); } +Reduction JSCallReducer::ReduceBigIntAsUintN(Node* node) { + if (!jsgraph()->machine()->Is64()) { + return NoChange(); + } + + CallParameters const& p = CallParametersOf(node->op()); + if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) { + return NoChange(); + } + if (node->op()->ValueInputCount() < 3) { + return NoChange(); + } + + Node* effect = NodeProperties::GetEffectInput(node); + Node* control = NodeProperties::GetControlInput(node); + Node* bits = NodeProperties::GetValueInput(node, 2); + Node* value = NodeProperties::GetValueInput(node, 3); + + NumberMatcher matcher(bits); + if (matcher.IsInteger() && matcher.IsInRange(0, 64)) { + const int bits_value = static_cast(matcher.Value()); + value = effect = graph()->NewNode(simplified()->CheckBigInt(p.feedback()), + value, effect, control); + value = graph()->NewNode(simplified()->BigIntAsUintN(bits_value), value); + ReplaceWithValue(node, value, effect); + return Replace(value); + } + + return NoChange(); +} + Graph* JSCallReducer::graph() const { return jsgraph()->graph(); } Isolate* JSCallReducer::isolate() const { return jsgraph()->isolate(); } Factory* JSCallReducer::factory() const { return isolate()->factory(); } +NativeContextRef JSCallReducer::native_context() const { + return broker()->native_context(); +} + CommonOperatorBuilder* JSCallReducer::common() const { return jsgraph()->common(); } diff --git a/deps/v8/src/compiler/js-call-reducer.h b/deps/v8/src/compiler/js-call-reducer.h index 02821ebb0dc4cb..bf3676c5b2202f 100644 --- a/deps/v8/src/compiler/js-call-reducer.h +++ b/deps/v8/src/compiler/js-call-reducer.h @@ -29,6 +29,7 @@ struct FieldAccess; class JSGraph; class JSHeapBroker; class JSOperatorBuilder; +class MapInference; class NodeProperties; class SimplifiedOperatorBuilder; @@ -155,6 +156,7 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer { Reduction ReduceMathImul(Node* node); Reduction ReduceMathClz32(Node* node); Reduction ReduceMathMinMax(Node* node, const Operator* op, Node* empty_value); + Reduction ReduceMathHypot(Node* node); Reduction ReduceNumberIsFinite(Node* node); Reduction ReduceNumberIsInteger(Node* node); @@ -190,6 +192,15 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer { Reduction ReduceNumberParseInt(Node* node); Reduction ReduceNumberConstructor(Node* node); + Reduction ReduceBigIntAsUintN(Node* node); + + // Helper to verify promise receiver maps are as expected. + // On bailout from a reduction, be sure to return inference.NoChange(). + bool DoPromiseChecks(MapInference* inference); + + Node* CreateClosureFromBuiltinSharedFunctionInfo(SharedFunctionInfoRef shared, + Node* context, Node* effect, + Node* control); // Returns the updated {to} node, and updates control and effect along the // way. @@ -231,12 +242,16 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer { const SharedFunctionInfoRef& shared, Node* context = nullptr); + void CheckIfElementsKind(Node* receiver_elements_kind, ElementsKind kind, + Node* control, Node** if_true, Node** if_false); + Node* LoadReceiverElementsKind(Node* receiver, Node** effect, Node** control); + Graph* graph() const; JSGraph* jsgraph() const { return jsgraph_; } JSHeapBroker* broker() const { return broker_; } Isolate* isolate() const; Factory* factory() const; - NativeContextRef native_context() const { return broker()->native_context(); } + NativeContextRef native_context() const; CommonOperatorBuilder* common() const; JSOperatorBuilder* javascript() const; SimplifiedOperatorBuilder* simplified() const; diff --git a/deps/v8/src/compiler/js-context-specialization.cc b/deps/v8/src/compiler/js-context-specialization.cc index dea6d7fc2b62a6..035e8b7ceb9392 100644 --- a/deps/v8/src/compiler/js-context-specialization.cc +++ b/deps/v8/src/compiler/js-context-specialization.cc @@ -6,6 +6,7 @@ #include "src/compiler/common-operator.h" #include "src/compiler/js-graph.h" +#include "src/compiler/js-heap-broker.h" #include "src/compiler/js-operator.h" #include "src/compiler/linkage.h" #include "src/compiler/node-matchers.h" @@ -144,9 +145,10 @@ Reduction JSContextSpecialization::ReduceJSLoadContext(Node* node) { // Now walk up the concrete context chain for the remaining depth. ContextRef concrete = maybe_concrete.value(); - concrete.SerializeContextChain(); // TODO(neis): Remove later. - for (; depth > 0; --depth) { - concrete = concrete.previous(); + concrete = concrete.previous(&depth); + if (depth > 0) { + TRACE_BROKER_MISSING(broker(), "previous value for context " << concrete); + return SimplifyJSLoadContext(node, jsgraph()->Constant(concrete), depth); } if (!access.immutable()) { @@ -157,8 +159,6 @@ Reduction JSContextSpecialization::ReduceJSLoadContext(Node* node) { // This will hold the final value, if we can figure it out. base::Optional maybe_value; - - concrete.SerializeSlot(static_cast(access.index())); maybe_value = concrete.get(static_cast(access.index())); if (maybe_value.has_value() && !maybe_value->IsSmi()) { // Even though the context slot is immutable, the context might have escaped @@ -174,6 +174,9 @@ Reduction JSContextSpecialization::ReduceJSLoadContext(Node* node) { } if (!maybe_value.has_value()) { + TRACE_BROKER_MISSING(broker(), "slot value " << access.index() + << " for context " + << concrete); return SimplifyJSLoadContext(node, jsgraph()->Constant(concrete), depth); } @@ -207,9 +210,10 @@ Reduction JSContextSpecialization::ReduceJSStoreContext(Node* node) { // Now walk up the concrete context chain for the remaining depth. ContextRef concrete = maybe_concrete.value(); - concrete.SerializeContextChain(); // TODO(neis): Remove later. - for (; depth > 0; --depth) { - concrete = concrete.previous(); + concrete = concrete.previous(&depth); + if (depth > 0) { + TRACE_BROKER_MISSING(broker(), "previous value for context " << concrete); + return SimplifyJSStoreContext(node, jsgraph()->Constant(concrete), depth); } return SimplifyJSStoreContext(node, jsgraph()->Constant(concrete), depth); diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc index 8fc8dd1308cfb7..4e69db6b9bca6c 100644 --- a/deps/v8/src/compiler/js-create-lowering.cc +++ b/deps/v8/src/compiler/js-create-lowering.cc @@ -837,7 +837,7 @@ Reduction JSCreateLowering::ReduceJSCreateCollectionIterator(Node* node) { simplified()->LoadField(AccessBuilder::ForJSCollectionTable()), iterated_object, effect, control); - // Create the JSArrayIterator result. + // Create the JSCollectionIterator result. AllocationBuilder a(jsgraph(), effect, control); a.Allocate(JSCollectionIterator::kSize, AllocationType::kYoung, Type::OtherObject()); diff --git a/deps/v8/src/compiler/js-graph.cc b/deps/v8/src/compiler/js-graph.cc index a3805ec125620d..43a4beadeeb754 100644 --- a/deps/v8/src/compiler/js-graph.cc +++ b/deps/v8/src/compiler/js-graph.cc @@ -128,9 +128,17 @@ void JSGraph::GetCachedNodes(NodeVector* nodes) { DEFINE_GETTER(AllocateInYoungGenerationStubConstant, HeapConstant(BUILTIN_CODE(isolate(), AllocateInYoungGeneration))) +DEFINE_GETTER(AllocateRegularInYoungGenerationStubConstant, + HeapConstant(BUILTIN_CODE(isolate(), + AllocateRegularInYoungGeneration))) + DEFINE_GETTER(AllocateInOldGenerationStubConstant, HeapConstant(BUILTIN_CODE(isolate(), AllocateInOldGeneration))) +DEFINE_GETTER(AllocateRegularInOldGenerationStubConstant, + HeapConstant(BUILTIN_CODE(isolate(), + AllocateRegularInOldGeneration))) + DEFINE_GETTER(ArrayConstructorStubConstant, HeapConstant(BUILTIN_CODE(isolate(), ArrayConstructorImpl))) diff --git a/deps/v8/src/compiler/js-graph.h b/deps/v8/src/compiler/js-graph.h index b5c80515ad4513..ec36c26034b1ba 100644 --- a/deps/v8/src/compiler/js-graph.h +++ b/deps/v8/src/compiler/js-graph.h @@ -80,31 +80,33 @@ class V8_EXPORT_PRIVATE JSGraph : public MachineGraph { void GetCachedNodes(NodeVector* nodes); // Cached global nodes. -#define CACHED_GLOBAL_LIST(V) \ - V(AllocateInYoungGenerationStubConstant) \ - V(AllocateInOldGenerationStubConstant) \ - V(ArrayConstructorStubConstant) \ - V(BigIntMapConstant) \ - V(BooleanMapConstant) \ - V(ToNumberBuiltinConstant) \ - V(EmptyFixedArrayConstant) \ - V(EmptyStringConstant) \ - V(FixedArrayMapConstant) \ - V(PropertyArrayMapConstant) \ - V(FixedDoubleArrayMapConstant) \ - V(HeapNumberMapConstant) \ - V(OptimizedOutConstant) \ - V(StaleRegisterConstant) \ - V(UndefinedConstant) \ - V(TheHoleConstant) \ - V(TrueConstant) \ - V(FalseConstant) \ - V(NullConstant) \ - V(ZeroConstant) \ - V(OneConstant) \ - V(NaNConstant) \ - V(MinusOneConstant) \ - V(EmptyStateValues) \ +#define CACHED_GLOBAL_LIST(V) \ + V(AllocateInYoungGenerationStubConstant) \ + V(AllocateRegularInYoungGenerationStubConstant) \ + V(AllocateInOldGenerationStubConstant) \ + V(AllocateRegularInOldGenerationStubConstant) \ + V(ArrayConstructorStubConstant) \ + V(BigIntMapConstant) \ + V(BooleanMapConstant) \ + V(ToNumberBuiltinConstant) \ + V(EmptyFixedArrayConstant) \ + V(EmptyStringConstant) \ + V(FixedArrayMapConstant) \ + V(PropertyArrayMapConstant) \ + V(FixedDoubleArrayMapConstant) \ + V(HeapNumberMapConstant) \ + V(OptimizedOutConstant) \ + V(StaleRegisterConstant) \ + V(UndefinedConstant) \ + V(TheHoleConstant) \ + V(TrueConstant) \ + V(FalseConstant) \ + V(NullConstant) \ + V(ZeroConstant) \ + V(OneConstant) \ + V(NaNConstant) \ + V(MinusOneConstant) \ + V(EmptyStateValues) \ V(SingleDeadTypedStateValues) // Cached global node accessor methods. diff --git a/deps/v8/src/compiler/js-heap-broker.cc b/deps/v8/src/compiler/js-heap-broker.cc index 86250e9d1fec1a..c79c793ae69a63 100644 --- a/deps/v8/src/compiler/js-heap-broker.cc +++ b/deps/v8/src/compiler/js-heap-broker.cc @@ -3,6 +3,7 @@ // found in the LICENSE file. #include "src/compiler/js-heap-broker.h" +#include "src/compiler/heap-refs.h" #ifdef ENABLE_SLOW_DCHECKS #include @@ -12,6 +13,7 @@ #include "src/ast/modules.h" #include "src/codegen/code-factory.h" #include "src/compiler/access-info.h" +#include "src/compiler/bytecode-analysis.h" #include "src/compiler/graph-reducer.h" #include "src/compiler/per-isolate-compiler-cache.h" #include "src/compiler/vector-slot-pair.h" @@ -26,6 +28,7 @@ #include "src/objects/js-regexp-inl.h" #include "src/objects/module-inl.h" #include "src/objects/objects-inl.h" +#include "src/objects/template-objects-inl.h" #include "src/objects/templates.h" #include "src/utils/boxed-float.h" #include "src/utils/utils.h" @@ -121,17 +124,31 @@ class PropertyCellData : public HeapObjectData { ObjectData* value_ = nullptr; }; +// TODO(mslekova): Once we have real-world usage data, we might want to +// reimplement this as sorted vector instead, to reduce the memory overhead. +typedef ZoneMap KnownReceiversMap; + class FunctionTemplateInfoData : public HeapObjectData { public: FunctionTemplateInfoData(JSHeapBroker* broker, ObjectData** storage, Handle object); - void Serialize(JSHeapBroker* broker); - ObjectData* call_code() const { return call_code_; } + bool is_signature_undefined() const { return is_signature_undefined_; } + bool accept_any_receiver() const { return accept_any_receiver_; } + bool has_call_code() const { return has_call_code_; } + + void SerializeCallCode(JSHeapBroker* broker); + CallHandlerInfoData* call_code() const { return call_code_; } + KnownReceiversMap& known_receivers() { return known_receivers_; } private: - bool serialized_ = false; - ObjectData* call_code_ = nullptr; + bool serialized_call_code_ = false; + CallHandlerInfoData* call_code_ = nullptr; + bool is_signature_undefined_ = false; + bool accept_any_receiver_ = false; + bool has_call_code_ = false; + + KnownReceiversMap known_receivers_; }; class CallHandlerInfoData : public HeapObjectData { @@ -154,7 +171,16 @@ class CallHandlerInfoData : public HeapObjectData { FunctionTemplateInfoData::FunctionTemplateInfoData( JSHeapBroker* broker, ObjectData** storage, Handle object) - : HeapObjectData(broker, storage, object) {} + : HeapObjectData(broker, storage, object), + known_receivers_(broker->zone()) { + auto function_template_info = Handle::cast(object); + is_signature_undefined_ = + function_template_info->signature().IsUndefined(broker->isolate()); + accept_any_receiver_ = function_template_info->accept_any_receiver(); + + CallOptimization call_optimization(broker->isolate(), object); + has_call_code_ = call_optimization.is_simple_api_call(); +} CallHandlerInfoData::CallHandlerInfoData(JSHeapBroker* broker, ObjectData** storage, @@ -181,18 +207,17 @@ void PropertyCellData::Serialize(JSHeapBroker* broker) { value_ = broker->GetOrCreateData(cell->value()); } -void FunctionTemplateInfoData::Serialize(JSHeapBroker* broker) { - if (serialized_) return; - serialized_ = true; +void FunctionTemplateInfoData::SerializeCallCode(JSHeapBroker* broker) { + if (serialized_call_code_) return; + serialized_call_code_ = true; - TraceScope tracer(broker, this, "FunctionTemplateInfoData::Serialize"); + TraceScope tracer(broker, this, + "FunctionTemplateInfoData::SerializeCallCode"); auto function_template_info = Handle::cast(object()); DCHECK_NULL(call_code_); - call_code_ = broker->GetOrCreateData(function_template_info->call_code()); - - if (call_code_->IsCallHandlerInfo()) { - call_code_->AsCallHandlerInfo()->Serialize(broker); - } + call_code_ = broker->GetOrCreateData(function_template_info->call_code()) + ->AsCallHandlerInfo(); + call_code_->Serialize(broker); } void CallHandlerInfoData::Serialize(JSHeapBroker* broker) { @@ -231,6 +256,12 @@ class JSObjectField { uint64_t number_bits_ = 0; }; +struct FieldIndexHasher { + size_t operator()(FieldIndex field_index) const { + return field_index.index(); + } +}; + class JSObjectData : public HeapObjectData { public: JSObjectData(JSHeapBroker* broker, ObjectData** storage, @@ -253,12 +284,15 @@ class JSObjectData : public HeapObjectData { ObjectData* GetOwnConstantElement(JSHeapBroker* broker, uint32_t index, bool serialize); + ObjectData* GetOwnProperty(JSHeapBroker* broker, + Representation representation, + FieldIndex field_index, bool serialize); // This method is only used to assert our invariants. bool cow_or_empty_elements_tenured() const; private: - void SerializeRecursive(JSHeapBroker* broker, int max_depths); + void SerializeRecursiveAsBoilerplate(JSHeapBroker* broker, int max_depths); FixedArrayBaseData* elements_ = nullptr; bool cow_or_empty_elements_tenured_ = false; @@ -277,6 +311,12 @@ class JSObjectData : public HeapObjectData { // non-configurable, or (2) are known not to (possibly they don't exist at // all). In case (2), the second pair component is nullptr. ZoneVector> own_constant_elements_; + // Properties that either: + // (1) are known to exist directly on the object, or + // (2) are known not to (possibly they don't exist at all). + // In case (2), the second pair component is nullptr. + // For simplicity, this may in theory overlap with inobject_fields_. + ZoneUnorderedMap own_properties_; }; void JSObjectData::SerializeObjectCreateMap(JSHeapBroker* broker) { @@ -312,6 +352,15 @@ base::Optional GetOwnElementFromHeap(JSHeapBroker* broker, } return base::nullopt; } + +ObjectRef GetOwnPropertyFromHeap(JSHeapBroker* broker, + Handle receiver, + Representation representation, + FieldIndex field_index) { + Handle constant = + JSObject::FastPropertyAt(receiver, representation, field_index); + return ObjectRef(broker, constant); +} } // namespace ObjectData* JSObjectData::GetOwnConstantElement(JSHeapBroker* broker, @@ -333,6 +382,27 @@ ObjectData* JSObjectData::GetOwnConstantElement(JSHeapBroker* broker, return result; } +ObjectData* JSObjectData::GetOwnProperty(JSHeapBroker* broker, + Representation representation, + FieldIndex field_index, + bool serialize) { + auto p = own_properties_.find(field_index); + if (p != own_properties_.end()) return p->second; + + if (!serialize) { + TRACE_MISSING(broker, "knowledge about property with index " + << field_index.property_index() << " on " + << this); + return nullptr; + } + + ObjectRef property = GetOwnPropertyFromHeap( + broker, Handle::cast(object()), representation, field_index); + ObjectData* result(property.data()); + own_properties_.insert(std::make_pair(field_index, result)); + return result; +} + class JSTypedArrayData : public JSObjectData { public: JSTypedArrayData(JSHeapBroker* broker, ObjectData** storage, @@ -503,24 +573,18 @@ class ContextData : public HeapObjectData { public: ContextData(JSHeapBroker* broker, ObjectData** storage, Handle object); - void SerializeContextChain(JSHeapBroker* broker); - ContextData* previous() const { - CHECK(serialized_context_chain_); - return previous_; - } + // {previous} will return the closest valid context possible to desired + // {depth}, decrementing {depth} for each previous link successfully followed. + // If {serialize} is true, it will serialize contexts along the way. + ContextData* previous(JSHeapBroker* broker, size_t* depth, bool serialize); - void SerializeSlot(JSHeapBroker* broker, int index); - - ObjectData* GetSlot(int index) { - auto search = slots_.find(index); - CHECK(search != slots_.end()); - return search->second; - } + // Returns nullptr if the slot index isn't valid or wasn't serialized + // (unless {serialize} is true). + ObjectData* GetSlot(JSHeapBroker* broker, int index, bool serialize); private: ZoneMap slots_; - bool serialized_context_chain_ = false; ContextData* previous_ = nullptr; }; @@ -528,28 +592,46 @@ ContextData::ContextData(JSHeapBroker* broker, ObjectData** storage, Handle object) : HeapObjectData(broker, storage, object), slots_(broker->zone()) {} -void ContextData::SerializeContextChain(JSHeapBroker* broker) { - if (serialized_context_chain_) return; - serialized_context_chain_ = true; +ContextData* ContextData::previous(JSHeapBroker* broker, size_t* depth, + bool serialize) { + if (*depth == 0) return this; - TraceScope tracer(broker, this, "ContextData::SerializeContextChain"); - Handle context = Handle::cast(object()); + if (serialize && previous_ == nullptr) { + TraceScope tracer(broker, this, "ContextData::previous"); + Handle context = Handle::cast(object()); + Object prev = context->unchecked_previous(); + if (prev.IsContext()) { + previous_ = broker->GetOrCreateData(prev)->AsContext(); + } + } - DCHECK_NULL(previous_); - // Context::previous DCHECK-fails when called on the native context. - if (!context->IsNativeContext()) { - previous_ = broker->GetOrCreateData(context->previous())->AsContext(); - previous_->SerializeContextChain(broker); + if (previous_ != nullptr) { + *depth = *depth - 1; + return previous_->previous(broker, depth, serialize); } + return this; } -void ContextData::SerializeSlot(JSHeapBroker* broker, int index) { - TraceScope tracer(broker, this, "ContextData::SerializeSlot"); - TRACE(broker, "Serializing script context slot " << index); - Handle context = Handle::cast(object()); - CHECK(index >= 0 && index < context->length()); - ObjectData* odata = broker->GetOrCreateData(context->get(index)); - slots_.insert(std::make_pair(index, odata)); +ObjectData* ContextData::GetSlot(JSHeapBroker* broker, int index, + bool serialize) { + CHECK_GE(index, 0); + auto search = slots_.find(index); + if (search != slots_.end()) { + return search->second; + } + + if (serialize) { + Handle context = Handle::cast(object()); + if (index < context->length()) { + TraceScope tracer(broker, this, "ContextData::GetSlot"); + TRACE(broker, "Serializing context slot " << index); + ObjectData* odata = broker->GetOrCreateData(context->get(index)); + slots_.insert(std::make_pair(index, odata)); + return odata; + } + } + + return nullptr; } class NativeContextData : public ContextData { @@ -564,6 +646,11 @@ class NativeContextData : public ContextData { return function_maps_; } + ScopeInfoData* scope_info() const { + CHECK(serialized_); + return scope_info_; + } + NativeContextData(JSHeapBroker* broker, ObjectData** storage, Handle object); void Serialize(JSHeapBroker* broker); @@ -574,6 +661,7 @@ class NativeContextData : public ContextData { BROKER_NATIVE_CONTEXT_FIELDS(DECL_MEMBER) #undef DECL_MEMBER ZoneVector function_maps_; + ScopeInfoData* scope_info_ = nullptr; }; class NameData : public HeapObjectData { @@ -674,14 +762,15 @@ bool IsFastLiteralHelper(Handle boilerplate, int max_depth, DCHECK_GE(max_depth, 0); DCHECK_GE(*max_properties, 0); + Isolate* const isolate = boilerplate->GetIsolate(); + // Make sure the boilerplate map is not deprecated. - if (!JSObject::TryMigrateInstance(boilerplate)) return false; + if (!JSObject::TryMigrateInstance(isolate, boilerplate)) return false; // Check for too deep nesting. if (max_depth == 0) return false; // Check the elements. - Isolate* const isolate = boilerplate->GetIsolate(); Handle elements(boilerplate->elements(), isolate); if (elements->length() > 0 && elements->map() != ReadOnlyRoots(isolate).fixed_cow_array_map()) { @@ -780,6 +869,18 @@ class AllocationSiteData : public HeapObjectData { bool serialized_boilerplate_ = false; }; +class BigIntData : public HeapObjectData { + public: + BigIntData(JSHeapBroker* broker, ObjectData** storage, Handle object) + : HeapObjectData(broker, storage, object), + as_uint64_(object->AsUint64(nullptr)) {} + + uint64_t AsUint64() const { return as_uint64_; } + + private: + const uint64_t as_uint64_; +}; + // Only used in JSNativeContextSpecialization. class ScriptContextTableData : public HeapObjectData { public: @@ -1215,7 +1316,8 @@ JSObjectData::JSObjectData(JSHeapBroker* broker, ObjectData** storage, Handle object) : HeapObjectData(broker, storage, object), inobject_fields_(broker->zone()), - own_constant_elements_(broker->zone()) {} + own_constant_elements_(broker->zone()), + own_properties_(broker->zone()) {} FixedArrayData::FixedArrayData(JSHeapBroker* broker, ObjectData** storage, Handle object) @@ -1282,18 +1384,106 @@ class BytecodeArrayData : public FixedArrayBaseData { return incoming_new_target_or_generator_register_; } + uint8_t get(int index) const { + DCHECK(is_serialized_for_compilation_); + return bytecodes_[index]; + } + + Address GetFirstBytecodeAddress() const { + return reinterpret_cast
(bytecodes_.data()); + } + + Handle GetConstantAtIndex(int index, Isolate* isolate) const { + return constant_pool_[index]->object(); + } + + bool IsConstantAtIndexSmi(int index) const { + return constant_pool_[index]->is_smi(); + } + + Smi GetConstantAtIndexAsSmi(int index) const { + return *(Handle::cast(constant_pool_[index]->object())); + } + + bool IsSerializedForCompilation() const { + return is_serialized_for_compilation_; + } + + void SerializeForCompilation(JSHeapBroker* broker) { + if (is_serialized_for_compilation_) return; + + Handle bytecode_array = + Handle::cast(object()); + + DCHECK(bytecodes_.empty()); + bytecodes_.reserve(bytecode_array->length()); + for (int i = 0; i < bytecode_array->length(); i++) { + bytecodes_.push_back(bytecode_array->get(i)); + } + + DCHECK(constant_pool_.empty()); + Handle constant_pool(bytecode_array->constant_pool(), + broker->isolate()); + constant_pool_.reserve(constant_pool->length()); + for (int i = 0; i < constant_pool->length(); i++) { + constant_pool_.push_back(broker->GetOrCreateData(constant_pool->get(i))); + } + + Handle source_position_table( + bytecode_array->SourcePositionTableIfCollected(), broker->isolate()); + source_positions_.reserve(source_position_table->length()); + for (int i = 0; i < source_position_table->length(); i++) { + source_positions_.push_back(source_position_table->get(i)); + } + + Handle handlers(bytecode_array->handler_table(), + broker->isolate()); + handler_table_.reserve(handlers->length()); + for (int i = 0; i < handlers->length(); i++) { + handler_table_.push_back(handlers->get(i)); + } + + is_serialized_for_compilation_ = true; + } + + const byte* source_positions_address() const { + return source_positions_.data(); + } + + size_t source_positions_size() const { return source_positions_.size(); } + + Address handler_table_address() const { + CHECK(is_serialized_for_compilation_); + return reinterpret_cast
(handler_table_.data()); + } + + int handler_table_size() const { + CHECK(is_serialized_for_compilation_); + return static_cast(handler_table_.size()); + } + BytecodeArrayData(JSHeapBroker* broker, ObjectData** storage, Handle object) : FixedArrayBaseData(broker, storage, object), register_count_(object->register_count()), parameter_count_(object->parameter_count()), incoming_new_target_or_generator_register_( - object->incoming_new_target_or_generator_register()) {} + object->incoming_new_target_or_generator_register()), + bytecodes_(broker->zone()), + source_positions_(broker->zone()), + handler_table_(broker->zone()), + constant_pool_(broker->zone()) {} private: int const register_count_; int const parameter_count_; interpreter::Register const incoming_new_target_or_generator_register_; + + bool is_serialized_for_compilation_ = false; + ZoneVector bytecodes_; + ZoneVector source_positions_; + ZoneVector handler_table_; + ZoneVector constant_pool_; }; class JSArrayData : public JSObjectData { @@ -1377,6 +1567,22 @@ class SharedFunctionInfoData : public HeapObjectData { void SetSerializedForCompilation(JSHeapBroker* broker, FeedbackVectorRef feedback); bool IsSerializedForCompilation(FeedbackVectorRef feedback) const; + void SerializeFunctionTemplateInfo(JSHeapBroker* broker); + FunctionTemplateInfoData* function_template_info() const { + return function_template_info_; + } + JSArrayData* GetTemplateObject(FeedbackSlot slot) const { + auto lookup_it = template_objects_.find(slot.ToInt()); + if (lookup_it != template_objects_.cend()) { + return lookup_it->second; + } + return nullptr; + } + void SetTemplateObject(FeedbackSlot slot, JSArrayData* object) { + CHECK( + template_objects_.insert(std::make_pair(slot.ToInt(), object)).second); + } + #define DECL_ACCESSOR(type, name) \ type name() const { return name##_; } BROKER_SFI_FIELDS(DECL_ACCESSOR) @@ -1391,6 +1597,8 @@ class SharedFunctionInfoData : public HeapObjectData { #define DECL_MEMBER(type, name) type const name##_; BROKER_SFI_FIELDS(DECL_MEMBER) #undef DECL_MEMBER + FunctionTemplateInfoData* function_template_info_; + ZoneMap template_objects_; }; SharedFunctionInfoData::SharedFunctionInfoData( @@ -1408,7 +1616,9 @@ SharedFunctionInfoData::SharedFunctionInfoData( #define INIT_MEMBER(type, name) , name##_(object->name()) BROKER_SFI_FIELDS(INIT_MEMBER) #undef INIT_MEMBER -{ + , + function_template_info_(nullptr), + template_objects_(broker->zone()) { DCHECK_EQ(HasBuiltinId_, builtin_id_ != Builtins::kNoBuiltinId); DCHECK_EQ(HasBytecodeArray_, GetBytecodeArray_ != nullptr); } @@ -1420,15 +1630,28 @@ void SharedFunctionInfoData::SetSerializedForCompilation( << " as serialized for compilation"); } +void SharedFunctionInfoData::SerializeFunctionTemplateInfo( + JSHeapBroker* broker) { + if (function_template_info_) return; + + function_template_info_ = + broker + ->GetOrCreateData(handle( + Handle::cast(object())->function_data(), + broker->isolate())) + ->AsFunctionTemplateInfo(); +} + bool SharedFunctionInfoData::IsSerializedForCompilation( FeedbackVectorRef feedback) const { return serialized_for_compilation_.find(feedback.object()) != serialized_for_compilation_.end(); } -class ModuleData : public HeapObjectData { +class SourceTextModuleData : public HeapObjectData { public: - ModuleData(JSHeapBroker* broker, ObjectData** storage, Handle object); + SourceTextModuleData(JSHeapBroker* broker, ObjectData** storage, + Handle object); void Serialize(JSHeapBroker* broker); CellData* GetCell(int cell_index) const; @@ -1439,35 +1662,36 @@ class ModuleData : public HeapObjectData { ZoneVector exports_; }; -ModuleData::ModuleData(JSHeapBroker* broker, ObjectData** storage, - Handle object) +SourceTextModuleData::SourceTextModuleData(JSHeapBroker* broker, + ObjectData** storage, + Handle object) : HeapObjectData(broker, storage, object), imports_(broker->zone()), exports_(broker->zone()) {} -CellData* ModuleData::GetCell(int cell_index) const { +CellData* SourceTextModuleData::GetCell(int cell_index) const { CHECK(serialized_); CellData* cell; - switch (ModuleDescriptor::GetCellIndexKind(cell_index)) { - case ModuleDescriptor::kImport: - cell = imports_.at(Module::ImportIndex(cell_index)); + switch (SourceTextModuleDescriptor::GetCellIndexKind(cell_index)) { + case SourceTextModuleDescriptor::kImport: + cell = imports_.at(SourceTextModule::ImportIndex(cell_index)); break; - case ModuleDescriptor::kExport: - cell = exports_.at(Module::ExportIndex(cell_index)); + case SourceTextModuleDescriptor::kExport: + cell = exports_.at(SourceTextModule::ExportIndex(cell_index)); break; - case ModuleDescriptor::kInvalid: + case SourceTextModuleDescriptor::kInvalid: UNREACHABLE(); } CHECK_NOT_NULL(cell); return cell; } -void ModuleData::Serialize(JSHeapBroker* broker) { +void SourceTextModuleData::Serialize(JSHeapBroker* broker) { if (serialized_) return; serialized_ = true; - TraceScope tracer(broker, this, "ModuleData::Serialize"); - Handle module = Handle::cast(object()); + TraceScope tracer(broker, this, "SourceTextModuleData::Serialize"); + Handle module = Handle::cast(object()); // TODO(neis): We could be smarter and only serialize the cells we care about. // TODO(neis): Define a helper for serializing a FixedArray into a ZoneVector. @@ -1614,7 +1838,7 @@ bool JSObjectData::cow_or_empty_elements_tenured() const { FixedArrayBaseData* JSObjectData::elements() const { return elements_; } void JSObjectData::SerializeAsBoilerplate(JSHeapBroker* broker) { - SerializeRecursive(broker, kMaxFastLiteralDepth); + SerializeRecursiveAsBoilerplate(broker, kMaxFastLiteralDepth); } void JSObjectData::SerializeElements(JSHeapBroker* broker) { @@ -1717,11 +1941,13 @@ void MapData::SerializeOwnDescriptor(JSHeapBroker* broker, << contents.size() << " total)"); } -void JSObjectData::SerializeRecursive(JSHeapBroker* broker, int depth) { +void JSObjectData::SerializeRecursiveAsBoilerplate(JSHeapBroker* broker, + int depth) { if (serialized_as_boilerplate_) return; serialized_as_boilerplate_ = true; - TraceScope tracer(broker, this, "JSObjectData::SerializeRecursive"); + TraceScope tracer(broker, this, + "JSObjectData::SerializeRecursiveAsBoilerplate"); Handle boilerplate = Handle::cast(object()); // We only serialize boilerplates that pass the IsInlinableFastLiteral @@ -1767,7 +1993,8 @@ void JSObjectData::SerializeRecursive(JSHeapBroker* broker, int depth) { Handle value(fast_elements->get(i), isolate); if (value->IsJSObject()) { ObjectData* value_data = broker->GetOrCreateData(value); - value_data->AsJSObject()->SerializeRecursive(broker, depth - 1); + value_data->AsJSObject()->SerializeRecursiveAsBoilerplate(broker, + depth - 1); } } } else { @@ -1802,9 +2029,22 @@ void JSObjectData::SerializeRecursive(JSHeapBroker* broker, int depth) { } else { Handle value(boilerplate->RawFastPropertyAt(field_index), isolate); + // In case of unboxed double fields we use a sentinel NaN value to mark + // uninitialized fields. A boilerplate value with such a field may migrate + // from its unboxed double to a tagged representation. In the process the + // raw double is converted to a heap number. The sentinel value carries no + // special meaning when it occurs in a heap number, so we would like to + // recover the uninitialized value. + // We check for the sentinel here, specifically, since migrations might + // have been triggered as part of boilerplate serialization. + if (value->IsHeapNumber() && + HeapNumber::cast(*value).value_as_bits() == kHoleNanInt64) { + value = isolate->factory()->uninitialized_value(); + } ObjectData* value_data = broker->GetOrCreateData(value); if (value->IsJSObject()) { - value_data->AsJSObject()->SerializeRecursive(broker, depth - 1); + value_data->AsJSObject()->SerializeRecursiveAsBoilerplate(broker, + depth - 1); } inobject_fields_.push_back(JSObjectField{value_data}); } @@ -1839,35 +2079,50 @@ bool ObjectRef::equals(const ObjectRef& other) const { Isolate* ObjectRef::isolate() const { return broker()->isolate(); } -ContextRef ContextRef::previous() const { +ContextRef ContextRef::previous(size_t* depth, bool serialize) const { + DCHECK_NOT_NULL(depth); if (broker()->mode() == JSHeapBroker::kDisabled) { AllowHandleAllocation handle_allocation; AllowHandleDereference handle_dereference; - return ContextRef(broker(), - handle(object()->previous(), broker()->isolate())); + Context current = *object(); + while (*depth != 0 && current.unchecked_previous().IsContext()) { + current = Context::cast(current.unchecked_previous()); + (*depth)--; + } + return ContextRef(broker(), handle(current, broker()->isolate())); } - return ContextRef(broker(), data()->AsContext()->previous()); + ContextData* current = this->data()->AsContext(); + return ContextRef(broker(), current->previous(broker(), depth, serialize)); } -// Not needed for TypedLowering. -ObjectRef ContextRef::get(int index) const { +base::Optional ContextRef::get(int index, bool serialize) const { if (broker()->mode() == JSHeapBroker::kDisabled) { AllowHandleAllocation handle_allocation; AllowHandleDereference handle_dereference; Handle value(object()->get(index), broker()->isolate()); return ObjectRef(broker(), value); } - return ObjectRef(broker(), data()->AsContext()->GetSlot(index)); + ObjectData* optional_slot = + data()->AsContext()->GetSlot(broker(), index, serialize); + if (optional_slot != nullptr) { + return ObjectRef(broker(), optional_slot); + } + return base::nullopt; } -JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone) +JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone, + bool tracing_enabled) : isolate_(isolate), broker_zone_(broker_zone), current_zone_(broker_zone), refs_(new (zone()) RefsMap(kMinimalRefsBucketCount, AddressMatcher(), zone())), array_and_object_prototypes_(zone()), - feedback_(zone()) { + tracing_enabled_(tracing_enabled), + feedback_(zone()), + bytecode_analyses_(zone()), + ais_for_loading_then_(zone()), + ais_for_loading_exec_(zone()) { // Note that this initialization of the refs_ pointer with the minimal // initial capacity is redundant in the normal use case (concurrent // compilation enabled, standard objects to be serialized), as the map @@ -1939,7 +2194,9 @@ void JSHeapBroker::SerializeShareableObjects() { { Builtins::Name builtins[] = { Builtins::kAllocateInYoungGeneration, + Builtins::kAllocateRegularInYoungGeneration, Builtins::kAllocateInOldGeneration, + Builtins::kAllocateRegularInOldGeneration, Builtins::kArgumentsAdaptorTrampoline, Builtins::kArrayConstructorImpl, Builtins::kCallFunctionForwardVarargs, @@ -2400,6 +2657,11 @@ bool AllocationSiteRef::IsFastLiteral() const { return data()->AsAllocationSite()->IsFastLiteral(); } +void JSObjectRef::SerializeElements() { + CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing); + data()->AsJSObject()->SerializeElements(broker()); +} + void JSObjectRef::EnsureElementsTenured() { if (broker()->mode() == JSHeapBroker::kDisabled) { AllowHandleAllocation allow_handle_allocation; @@ -2553,6 +2815,95 @@ double FixedDoubleArrayRef::get_scalar(int i) const { return data()->AsFixedDoubleArray()->Get(i).get_scalar(); } +uint8_t BytecodeArrayRef::get(int index) const { + if (broker()->mode() == JSHeapBroker::kDisabled) { + AllowHandleAllocation handle_allocation; + AllowHandleDereference allow_handle_dereference; + return object()->get(index); + } + return data()->AsBytecodeArray()->get(index); +} + +Address BytecodeArrayRef::GetFirstBytecodeAddress() const { + if (broker()->mode() == JSHeapBroker::kDisabled) { + AllowHandleAllocation handle_allocation; + AllowHandleDereference allow_handle_dereference; + return object()->GetFirstBytecodeAddress(); + } + return data()->AsBytecodeArray()->GetFirstBytecodeAddress(); +} + +Handle BytecodeArrayRef::GetConstantAtIndex(int index) const { + if (broker()->mode() == JSHeapBroker::kDisabled) { + AllowHandleAllocation handle_allocation; + AllowHandleDereference allow_handle_dereference; + return handle(object()->constant_pool().get(index), broker()->isolate()); + } + return data()->AsBytecodeArray()->GetConstantAtIndex(index, + broker()->isolate()); +} + +bool BytecodeArrayRef::IsConstantAtIndexSmi(int index) const { + if (broker()->mode() == JSHeapBroker::kDisabled) { + AllowHandleAllocation handle_allocation; + AllowHandleDereference allow_handle_dereference; + return object()->constant_pool().get(index).IsSmi(); + } + return data()->AsBytecodeArray()->IsConstantAtIndexSmi(index); +} + +Smi BytecodeArrayRef::GetConstantAtIndexAsSmi(int index) const { + if (broker()->mode() == JSHeapBroker::kDisabled) { + AllowHandleAllocation handle_allocation; + AllowHandleDereference allow_handle_dereference; + return Smi::cast(object()->constant_pool().get(index)); + } + return data()->AsBytecodeArray()->GetConstantAtIndexAsSmi(index); +} + +bool BytecodeArrayRef::IsSerializedForCompilation() const { + if (broker()->mode() == JSHeapBroker::kDisabled) return true; + return data()->AsBytecodeArray()->IsSerializedForCompilation(); +} + +void BytecodeArrayRef::SerializeForCompilation() { + if (broker()->mode() == JSHeapBroker::kDisabled) return; + data()->AsBytecodeArray()->SerializeForCompilation(broker()); +} + +const byte* BytecodeArrayRef::source_positions_address() const { + if (broker()->mode() == JSHeapBroker::kDisabled) { + AllowHandleDereference allow_handle_dereference; + return object()->SourcePositionTableIfCollected().GetDataStartAddress(); + } + return data()->AsBytecodeArray()->source_positions_address(); +} + +int BytecodeArrayRef::source_positions_size() const { + if (broker()->mode() == JSHeapBroker::kDisabled) { + AllowHandleDereference allow_handle_dereference; + return object()->SourcePositionTableIfCollected().length(); + } + return static_cast(data()->AsBytecodeArray()->source_positions_size()); +} + +Address BytecodeArrayRef::handler_table_address() const { + if (broker()->mode() == JSHeapBroker::kDisabled) { + AllowHandleDereference allow_handle_dereference; + return reinterpret_cast
( + object()->handler_table().GetDataStartAddress()); + } + return data()->AsBytecodeArray()->handler_table_address(); +} + +int BytecodeArrayRef::handler_table_size() const { + if (broker()->mode() == JSHeapBroker::kDisabled) { + AllowHandleDereference allow_handle_dereference; + return object()->handler_table().length(); + } + return data()->AsBytecodeArray()->handler_table_size(); +} + #define IF_BROKER_DISABLED_ACCESS_HANDLE_C(holder, name) \ if (broker()->mode() == JSHeapBroker::kDisabled) { \ AllowHandleAllocation handle_allocation; \ @@ -2630,15 +2981,13 @@ BIMODAL_ACCESSOR_C(JSTypedArray, size_t, length) BIMODAL_ACCESSOR(JSTypedArray, HeapObject, buffer) BIMODAL_ACCESSOR_B(Map, bit_field2, elements_kind, Map::ElementsKindBits) -BIMODAL_ACCESSOR_B(Map, bit_field2, is_extensible, Map::IsExtensibleBit) -BIMODAL_ACCESSOR_B(Map, bit_field2, has_hidden_prototype, - Map::HasHiddenPrototypeBit) -BIMODAL_ACCESSOR_B(Map, bit_field3, is_deprecated, Map::IsDeprecatedBit) BIMODAL_ACCESSOR_B(Map, bit_field3, is_dictionary_map, Map::IsDictionaryMapBit) +BIMODAL_ACCESSOR_B(Map, bit_field3, is_deprecated, Map::IsDeprecatedBit) BIMODAL_ACCESSOR_B(Map, bit_field3, NumberOfOwnDescriptors, Map::NumberOfOwnDescriptorsBits) BIMODAL_ACCESSOR_B(Map, bit_field3, is_migration_target, Map::IsMigrationTargetBit) +BIMODAL_ACCESSOR_B(Map, bit_field3, is_extensible, Map::IsExtensibleBit) BIMODAL_ACCESSOR_B(Map, bit_field, has_prototype_slot, Map::HasPrototypeSlotBit) BIMODAL_ACCESSOR_B(Map, bit_field, is_access_check_needed, Map::IsAccessCheckNeededBit) @@ -2663,7 +3012,109 @@ BROKER_NATIVE_CONTEXT_FIELDS(DEF_NATIVE_CONTEXT_ACCESSOR) BIMODAL_ACCESSOR(PropertyCell, Object, value) BIMODAL_ACCESSOR_C(PropertyCell, PropertyDetails, property_details) -BIMODAL_ACCESSOR(FunctionTemplateInfo, Object, call_code) +base::Optional FunctionTemplateInfoRef::call_code() const { + if (broker()->mode() == JSHeapBroker::kDisabled) { + return CallHandlerInfoRef( + broker(), handle(object()->call_code(), broker()->isolate())); + } + CallHandlerInfoData* call_code = + data()->AsFunctionTemplateInfo()->call_code(); + if (!call_code) return base::nullopt; + return CallHandlerInfoRef(broker(), call_code); +} + +bool FunctionTemplateInfoRef::is_signature_undefined() const { + if (broker()->mode() == JSHeapBroker::kDisabled) { + AllowHandleDereference allow_handle_dereference; + AllowHandleAllocation allow_handle_allocation; + + return object()->signature().IsUndefined(broker()->isolate()); + } + return data()->AsFunctionTemplateInfo()->is_signature_undefined(); +} + +bool FunctionTemplateInfoRef::has_call_code() const { + if (broker()->mode() == JSHeapBroker::kDisabled) { + AllowHandleDereference allow_handle_dereference; + AllowHandleAllocation allow_handle_allocation; + + CallOptimization call_optimization(broker()->isolate(), object()); + return call_optimization.is_simple_api_call(); + } + return data()->AsFunctionTemplateInfo()->has_call_code(); +} + +BIMODAL_ACCESSOR_C(FunctionTemplateInfo, bool, accept_any_receiver) + +HolderLookupResult FunctionTemplateInfoRef::LookupHolderOfExpectedType( + MapRef receiver_map, bool serialize) { + const HolderLookupResult not_found; + + if (broker()->mode() == JSHeapBroker::kDisabled) { + AllowHandleDereference allow_handle_dereference; + AllowHandleAllocation allow_handle_allocation; + + CallOptimization call_optimization(broker()->isolate(), object()); + Handle receiver_map_ref(receiver_map.object()); + if (!receiver_map_ref->IsJSReceiverMap() || + (receiver_map_ref->is_access_check_needed() && + !object()->accept_any_receiver())) { + return not_found; + } + + HolderLookupResult result; + Handle holder = call_optimization.LookupHolderOfExpectedType( + receiver_map_ref, &result.lookup); + + switch (result.lookup) { + case CallOptimization::kHolderFound: + result.holder = JSObjectRef(broker(), holder); + break; + default: + DCHECK_EQ(result.holder, base::nullopt); + break; + } + return result; + } + + FunctionTemplateInfoData* fti_data = data()->AsFunctionTemplateInfo(); + KnownReceiversMap::iterator lookup_it = + fti_data->known_receivers().find(receiver_map.data()->AsMap()); + if (lookup_it != fti_data->known_receivers().cend()) { + return lookup_it->second; + } + if (!serialize) { + TRACE_BROKER_MISSING(broker(), + "holder for receiver with map " << receiver_map); + return not_found; + } + if (!receiver_map.IsJSReceiverMap() || + (receiver_map.is_access_check_needed() && !accept_any_receiver())) { + fti_data->known_receivers().insert( + {receiver_map.data()->AsMap(), not_found}); + return not_found; + } + + HolderLookupResult result; + CallOptimization call_optimization(broker()->isolate(), object()); + Handle holder = call_optimization.LookupHolderOfExpectedType( + receiver_map.object(), &result.lookup); + + switch (result.lookup) { + case CallOptimization::kHolderFound: { + result.holder = JSObjectRef(broker(), holder); + fti_data->known_receivers().insert( + {receiver_map.data()->AsMap(), result}); + break; + } + default: { + DCHECK_EQ(result.holder, base::nullopt); + fti_data->known_receivers().insert( + {receiver_map.data()->AsMap(), result}); + } + } + return result; +} BIMODAL_ACCESSOR(CallHandlerInfo, Object, data) @@ -2746,11 +3197,21 @@ bool StringRef::IsSeqString() const { return data()->AsString()->is_seq_string(); } +ScopeInfoRef NativeContextRef::scope_info() const { + if (broker()->mode() == JSHeapBroker::kDisabled) { + AllowHandleAllocation handle_allocation; + AllowHandleDereference handle_dereference; + return ScopeInfoRef(broker(), + handle(object()->scope_info(), broker()->isolate())); + } + return ScopeInfoRef(broker(), data()->AsNativeContext()->scope_info()); +} + MapRef NativeContextRef::GetFunctionMapFromIndex(int index) const { DCHECK_GE(index, Context::FIRST_FUNCTION_MAP_INDEX); DCHECK_LE(index, Context::LAST_FUNCTION_MAP_INDEX); if (broker()->mode() == JSHeapBroker::kDisabled) { - return get(index).AsMap(); + return get(index).value().AsMap(); } return MapRef(broker(), data()->AsNativeContext()->function_maps().at( index - Context::FIRST_FUNCTION_MAP_INDEX)); @@ -2853,6 +3314,19 @@ base::Optional ObjectRef::GetOwnConstantElement( return ObjectRef(broker(), element); } +base::Optional JSObjectRef::GetOwnProperty( + Representation field_representation, FieldIndex index, + bool serialize) const { + if (broker()->mode() == JSHeapBroker::kDisabled) { + return GetOwnPropertyFromHeap(broker(), Handle::cast(object()), + field_representation, index); + } + ObjectData* property = data()->AsJSObject()->GetOwnProperty( + broker(), field_representation, index, serialize); + if (property == nullptr) return base::nullopt; + return ObjectRef(broker(), property); +} + base::Optional JSArrayRef::GetOwnCowElement(uint32_t index, bool serialize) const { if (broker()->mode() == JSHeapBroker::kDisabled) { @@ -2884,14 +3358,19 @@ double MutableHeapNumberRef::value() const { return data()->AsMutableHeapNumber()->value(); } -CellRef ModuleRef::GetCell(int cell_index) const { +uint64_t BigIntRef::AsUint64() const { + IF_BROKER_DISABLED_ACCESS_HANDLE_C(BigInt, AsUint64); + return data()->AsBigInt()->AsUint64(); +} + +CellRef SourceTextModuleRef::GetCell(int cell_index) const { if (broker()->mode() == JSHeapBroker::kDisabled) { AllowHandleAllocation handle_allocation; AllowHandleDereference allow_handle_dereference; return CellRef(broker(), handle(object()->GetCell(cell_index), broker()->isolate())); } - return CellRef(broker(), data()->AsModule()->GetCell(cell_index)); + return CellRef(broker(), data()->AsSourceTextModule()->GetCell(cell_index)); } ObjectRef::ObjectRef(JSHeapBroker* broker, Handle object) @@ -3108,6 +3587,8 @@ void NativeContextData::Serialize(JSHeapBroker* broker) { for (int i = first; i <= last; ++i) { function_maps_.push_back(broker->GetOrCreateData(context->get(i))->AsMap()); } + + scope_info_ = broker->GetOrCreateData(context->scope_info())->AsScopeInfo(); } void JSFunctionRef::Serialize() { @@ -3133,6 +3614,46 @@ bool JSFunctionRef::IsSerializedForCompilation() const { shared().IsSerializedForCompilation(feedback_vector()); } +JSArrayRef SharedFunctionInfoRef::GetTemplateObject(ObjectRef description, + FeedbackVectorRef vector, + FeedbackSlot slot, + bool serialize) { + // Look in the feedback vector for the array. A Smi indicates that it's + // not yet cached here. + ObjectRef candidate = vector.get(slot); + if (!candidate.IsSmi()) { + return candidate.AsJSArray(); + } + + if (broker()->mode() == JSHeapBroker::kDisabled) { + AllowHandleAllocation handle_allocation; + AllowHandleDereference allow_handle_dereference; + Handle tod = + Handle::cast(description.object()); + Handle template_object = + TemplateObjectDescription::GetTemplateObject( + broker()->isolate(), broker()->native_context().object(), tod, + object(), slot.ToInt()); + return JSArrayRef(broker(), template_object); + } + + JSArrayData* array = data()->AsSharedFunctionInfo()->GetTemplateObject(slot); + if (array != nullptr) return JSArrayRef(broker(), array); + + CHECK(serialize); + CHECK(broker()->SerializingAllowed()); + + Handle tod = + Handle::cast(description.object()); + Handle template_object = + TemplateObjectDescription::GetTemplateObject( + broker()->isolate(), broker()->native_context().object(), tod, + object(), slot.ToInt()); + array = broker()->GetOrCreateData(template_object)->AsJSArray(); + data()->AsSharedFunctionInfo()->SetTemplateObject(slot, array); + return JSArrayRef(broker(), array); +} + void SharedFunctionInfoRef::SetSerializedForCompilation( FeedbackVectorRef feedback) { CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing); @@ -3140,9 +3661,27 @@ void SharedFunctionInfoRef::SetSerializedForCompilation( feedback); } +void SharedFunctionInfoRef::SerializeFunctionTemplateInfo() { + CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing); + + data()->AsSharedFunctionInfo()->SerializeFunctionTemplateInfo(broker()); +} + +base::Optional +SharedFunctionInfoRef::function_template_info() const { + if (broker()->mode() == JSHeapBroker::kDisabled) { + return FunctionTemplateInfoRef( + broker(), handle(object()->function_data(), broker()->isolate())); + } + FunctionTemplateInfoData* function_template_info = + data()->AsSharedFunctionInfo()->function_template_info(); + if (!function_template_info) return base::nullopt; + return FunctionTemplateInfoRef(broker(), function_template_info); +} + bool SharedFunctionInfoRef::IsSerializedForCompilation( FeedbackVectorRef feedback) const { - CHECK_NE(broker()->mode(), JSHeapBroker::kDisabled); + if (broker()->mode() == JSHeapBroker::kDisabled) return true; return data()->AsSharedFunctionInfo()->IsSerializedForCompilation(feedback); } @@ -3181,22 +3720,10 @@ bool MapRef::serialized_prototype() const { return data()->AsMap()->serialized_prototype(); } -void ModuleRef::Serialize() { +void SourceTextModuleRef::Serialize() { if (broker()->mode() == JSHeapBroker::kDisabled) return; CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing); - data()->AsModule()->Serialize(broker()); -} - -void ContextRef::SerializeContextChain() { - if (broker()->mode() == JSHeapBroker::kDisabled) return; - CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing); - data()->AsContext()->SerializeContextChain(broker()); -} - -void ContextRef::SerializeSlot(int index) { - if (broker()->mode() == JSHeapBroker::kDisabled) return; - CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing); - data()->AsContext()->SerializeSlot(broker(), index); + data()->AsSourceTextModule()->Serialize(broker()); } void NativeContextRef::Serialize() { @@ -3228,10 +3755,10 @@ void PropertyCellRef::Serialize() { data()->AsPropertyCell()->Serialize(broker()); } -void FunctionTemplateInfoRef::Serialize() { +void FunctionTemplateInfoRef::SerializeCallCode() { if (broker()->mode() == JSHeapBroker::kDisabled) return; CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing); - data()->AsFunctionTemplateInfo()->Serialize(broker()); + data()->AsFunctionTemplateInfo()->SerializeCallCode(broker()); } base::Optional JSGlobalProxyRef::GetPropertyCell( @@ -3307,10 +3834,67 @@ base::Optional GlobalAccessFeedback::GetConstantHint() const { return {}; } -ElementAccessFeedback::ElementAccessFeedback(Zone* zone) +KeyedAccessMode KeyedAccessMode::FromNexus(FeedbackNexus const& nexus) { + if (IsKeyedLoadICKind(nexus.kind())) { + return KeyedAccessMode(AccessMode::kLoad, nexus.GetKeyedAccessLoadMode()); + } + if (IsKeyedHasICKind(nexus.kind())) { + return KeyedAccessMode(AccessMode::kHas, nexus.GetKeyedAccessLoadMode()); + } + if (IsKeyedStoreICKind(nexus.kind())) { + return KeyedAccessMode(AccessMode::kStore, nexus.GetKeyedAccessStoreMode()); + } + if (IsStoreInArrayLiteralICKind(nexus.kind())) { + return KeyedAccessMode(AccessMode::kStoreInLiteral, + nexus.GetKeyedAccessStoreMode()); + } + UNREACHABLE(); +} + +AccessMode KeyedAccessMode::access_mode() const { return access_mode_; } + +bool KeyedAccessMode::IsLoad() const { + return access_mode_ == AccessMode::kLoad || access_mode_ == AccessMode::kHas; +} +bool KeyedAccessMode::IsStore() const { + return access_mode_ == AccessMode::kStore || + access_mode_ == AccessMode::kStoreInLiteral; +} + +KeyedAccessLoadMode KeyedAccessMode::load_mode() const { + CHECK(IsLoad()); + return load_store_mode_.load_mode; +} + +KeyedAccessStoreMode KeyedAccessMode::store_mode() const { + CHECK(IsStore()); + return load_store_mode_.store_mode; +} + +KeyedAccessMode::LoadStoreMode::LoadStoreMode(KeyedAccessLoadMode load_mode) + : load_mode(load_mode) {} +KeyedAccessMode::LoadStoreMode::LoadStoreMode(KeyedAccessStoreMode store_mode) + : store_mode(store_mode) {} + +KeyedAccessMode::KeyedAccessMode(AccessMode access_mode, + KeyedAccessLoadMode load_mode) + : access_mode_(access_mode), load_store_mode_(load_mode) { + CHECK(!IsStore()); + CHECK(IsLoad()); +} +KeyedAccessMode::KeyedAccessMode(AccessMode access_mode, + KeyedAccessStoreMode store_mode) + : access_mode_(access_mode), load_store_mode_(store_mode) { + CHECK(!IsLoad()); + CHECK(IsStore()); +} + +ElementAccessFeedback::ElementAccessFeedback(Zone* zone, + KeyedAccessMode const& keyed_mode) : ProcessedFeedback(kElementAccess), receiver_maps(zone), - transitions(zone) {} + transitions(zone), + keyed_mode(keyed_mode) {} ElementAccessFeedback::MapIterator::MapIterator( ElementAccessFeedback const& processed, JSHeapBroker* broker) @@ -3383,7 +3967,7 @@ GlobalAccessFeedback const* JSHeapBroker::GetGlobalAccessFeedback( } ElementAccessFeedback const* JSHeapBroker::ProcessFeedbackMapsForElementAccess( - MapHandles const& maps) { + MapHandles const& maps, KeyedAccessMode const& keyed_mode) { DCHECK(!maps.empty()); // Collect possible transition targets. @@ -3397,7 +3981,8 @@ ElementAccessFeedback const* JSHeapBroker::ProcessFeedbackMapsForElementAccess( } } - ElementAccessFeedback* result = new (zone()) ElementAccessFeedback(zone()); + ElementAccessFeedback* result = + new (zone()) ElementAccessFeedback(zone(), keyed_mode); // Separate the actual receiver maps and the possible transition sources. for (Handle map : maps) { @@ -3464,7 +4049,7 @@ GlobalAccessFeedback const* JSHeapBroker::ProcessFeedbackForGlobalAccess( } ContextRef context_ref(this, context); if (immutable) { - context_ref.SerializeSlot(context_slot_index); + context_ref.get(context_slot_index, true); } return new (zone()) GlobalAccessFeedback(context_ref, context_slot_index, immutable); @@ -3489,6 +4074,54 @@ base::Optional JSHeapBroker::GetNameFeedback( return NameRef(this, handle(raw_name, isolate())); } +PropertyAccessInfo JSHeapBroker::GetAccessInfoForLoadingThen(MapRef map) { + auto access_info = ais_for_loading_then_.find(map); + if (access_info == ais_for_loading_then_.end()) { + TRACE_BROKER_MISSING( + this, "access info for reducing JSResolvePromise with map " << map); + return PropertyAccessInfo::Invalid(zone()); + } + return access_info->second; +} + +void JSHeapBroker::CreateAccessInfoForLoadingThen( + MapRef map, CompilationDependencies* dependencies) { + auto access_info = ais_for_loading_then_.find(map); + if (access_info == ais_for_loading_then_.end()) { + AccessInfoFactory access_info_factory(this, dependencies, zone()); + Handle then_string = isolate()->factory()->then_string(); + ais_for_loading_then_.insert( + std::make_pair(map, access_info_factory.ComputePropertyAccessInfo( + map.object(), then_string, AccessMode::kLoad))); + } +} + +PropertyAccessInfo JSHeapBroker::GetAccessInfoForLoadingExec(MapRef map) { + auto access_info = ais_for_loading_exec_.find(map); + if (access_info == ais_for_loading_exec_.end()) { + TRACE_BROKER_MISSING(this, + "access info for property 'exec' on map " << map); + return PropertyAccessInfo::Invalid(zone()); + } + return access_info->second; +} + +PropertyAccessInfo const& JSHeapBroker::CreateAccessInfoForLoadingExec( + MapRef map, CompilationDependencies* dependencies) { + auto access_info = ais_for_loading_exec_.find(map); + if (access_info != ais_for_loading_exec_.end()) { + return access_info->second; + } + + ZoneVector access_infos(zone()); + AccessInfoFactory access_info_factory(this, dependencies, zone()); + PropertyAccessInfo ai_exec = access_info_factory.ComputePropertyAccessInfo( + map.object(), isolate()->factory()->exec_string(), AccessMode::kLoad); + + auto inserted_ai = ais_for_loading_exec_.insert(std::make_pair(map, ai_exec)); + return inserted_ai.first->second; +} + ElementAccessFeedback const* ProcessedFeedback::AsElementAccess() const { CHECK_EQ(kElementAccess, kind()); return static_cast(this); @@ -3499,6 +4132,66 @@ NamedAccessFeedback const* ProcessedFeedback::AsNamedAccess() const { return static_cast(this); } +BytecodeAnalysis const& JSHeapBroker::GetBytecodeAnalysis( + Handle bytecode_array, BailoutId osr_bailout_id, + bool analyze_liveness, bool serialize) { + ObjectData* bytecode_array_data = GetData(bytecode_array); + CHECK_NOT_NULL(bytecode_array_data); + + auto it = bytecode_analyses_.find(bytecode_array_data); + if (it != bytecode_analyses_.end()) { + // Bytecode analysis can be run for OSR or for non-OSR. In the rare case + // where we optimize for OSR and consider the top-level function itself for + // inlining (because of recursion), we need both the OSR and the non-OSR + // analysis. Fortunately, the only difference between the two lies in + // whether the OSR entry offset gets computed (from the OSR bailout id). + // Hence it's okay to reuse the OSR-version when asked for the non-OSR + // version, such that we need to store at most one analysis result per + // bytecode array. + CHECK_IMPLIES(osr_bailout_id != it->second->osr_bailout_id(), + osr_bailout_id.IsNone()); + CHECK_EQ(analyze_liveness, it->second->liveness_analyzed()); + return *it->second; + } + + CHECK(serialize); + BytecodeAnalysis* analysis = new (zone()) BytecodeAnalysis( + bytecode_array, zone(), osr_bailout_id, analyze_liveness); + DCHECK_EQ(analysis->osr_bailout_id(), osr_bailout_id); + bytecode_analyses_[bytecode_array_data] = analysis; + return *analysis; +} + +OffHeapBytecodeArray::OffHeapBytecodeArray(BytecodeArrayRef bytecode_array) + : array_(bytecode_array) {} + +int OffHeapBytecodeArray::length() const { return array_.length(); } + +int OffHeapBytecodeArray::parameter_count() const { + return array_.parameter_count(); +} + +uint8_t OffHeapBytecodeArray::get(int index) const { return array_.get(index); } + +void OffHeapBytecodeArray::set(int index, uint8_t value) { UNREACHABLE(); } + +Address OffHeapBytecodeArray::GetFirstBytecodeAddress() const { + return array_.GetFirstBytecodeAddress(); +} + +Handle OffHeapBytecodeArray::GetConstantAtIndex( + int index, Isolate* isolate) const { + return array_.GetConstantAtIndex(index); +} + +bool OffHeapBytecodeArray::IsConstantAtIndexSmi(int index) const { + return array_.IsConstantAtIndexSmi(index); +} + +Smi OffHeapBytecodeArray::GetConstantAtIndexAsSmi(int index) const { + return array_.GetConstantAtIndexAsSmi(index); +} + #undef BIMODAL_ACCESSOR #undef BIMODAL_ACCESSOR_B #undef BIMODAL_ACCESSOR_C diff --git a/deps/v8/src/compiler/js-heap-broker.h b/deps/v8/src/compiler/js-heap-broker.h index 2c4cc766bced00..ffc10d2b93a023 100644 --- a/deps/v8/src/compiler/js-heap-broker.h +++ b/deps/v8/src/compiler/js-heap-broker.h @@ -8,796 +8,24 @@ #include "src/base/compiler-specific.h" #include "src/base/optional.h" #include "src/common/globals.h" +#include "src/compiler/access-info.h" #include "src/compiler/refs-map.h" #include "src/handles/handles.h" +#include "src/interpreter/bytecode-array-accessor.h" #include "src/objects/feedback-vector.h" #include "src/objects/function-kind.h" -#include "src/objects/instance-type.h" #include "src/objects/objects.h" #include "src/utils/ostreams.h" #include "src/zone/zone-containers.h" namespace v8 { namespace internal { - -class BytecodeArray; -class CallHandlerInfo; -class FixedDoubleArray; -class FunctionTemplateInfo; -class HeapNumber; -class InternalizedString; -class JSBoundFunction; -class JSDataView; -class JSGlobalProxy; -class JSRegExp; -class JSTypedArray; -class NativeContext; -class ScriptContextTable; -class VectorSlotPair; - namespace compiler { -// Whether we are loading a property or storing to a property. -// For a store during literal creation, do not walk up the prototype chain. -enum class AccessMode { kLoad, kStore, kStoreInLiteral, kHas }; - -enum class OddballType : uint8_t { - kNone, // Not an Oddball. - kBoolean, // True or False. - kUndefined, - kNull, - kHole, - kUninitialized, - kOther // Oddball, but none of the above. -}; - -// This list is sorted such that subtypes appear before their supertypes. -// DO NOT VIOLATE THIS PROPERTY! -#define HEAP_BROKER_OBJECT_LIST(V) \ - /* Subtypes of JSObject */ \ - V(JSArray) \ - V(JSBoundFunction) \ - V(JSDataView) \ - V(JSFunction) \ - V(JSGlobalProxy) \ - V(JSRegExp) \ - V(JSTypedArray) \ - /* Subtypes of Context */ \ - V(NativeContext) \ - /* Subtypes of FixedArray */ \ - V(Context) \ - V(ScopeInfo) \ - V(ScriptContextTable) \ - /* Subtypes of FixedArrayBase */ \ - V(BytecodeArray) \ - V(FixedArray) \ - V(FixedDoubleArray) \ - /* Subtypes of Name */ \ - V(InternalizedString) \ - V(String) \ - V(Symbol) \ - /* Subtypes of HeapObject */ \ - V(AllocationSite) \ - V(CallHandlerInfo) \ - V(Cell) \ - V(Code) \ - V(DescriptorArray) \ - V(FeedbackCell) \ - V(FeedbackVector) \ - V(FixedArrayBase) \ - V(FunctionTemplateInfo) \ - V(HeapNumber) \ - V(JSObject) \ - V(Map) \ - V(Module) \ - V(MutableHeapNumber) \ - V(Name) \ - V(PropertyCell) \ - V(SharedFunctionInfo) \ - /* Subtypes of Object */ \ - V(HeapObject) - -class CompilationDependencies; -class JSHeapBroker; -class ObjectData; -class PerIsolateCompilerCache; -class PropertyAccessInfo; -#define FORWARD_DECL(Name) class Name##Ref; -HEAP_BROKER_OBJECT_LIST(FORWARD_DECL) -#undef FORWARD_DECL - -class V8_EXPORT_PRIVATE ObjectRef { - public: - ObjectRef(JSHeapBroker* broker, Handle object); - ObjectRef(JSHeapBroker* broker, ObjectData* data) - : data_(data), broker_(broker) { - CHECK_NOT_NULL(data_); - } - - Handle object() const; - - bool equals(const ObjectRef& other) const; - - bool IsSmi() const; - int AsSmi() const; - -#define HEAP_IS_METHOD_DECL(Name) bool Is##Name() const; - HEAP_BROKER_OBJECT_LIST(HEAP_IS_METHOD_DECL) -#undef HEAP_IS_METHOD_DECL - -#define HEAP_AS_METHOD_DECL(Name) Name##Ref As##Name() const; - HEAP_BROKER_OBJECT_LIST(HEAP_AS_METHOD_DECL) -#undef HEAP_AS_METHOD_DECL - - bool IsNullOrUndefined() const; - - bool BooleanValue() const; - Maybe OddballToNumber() const; - - // Return the element at key {index} if {index} is known to be an own data - // property of the object that is non-writable and non-configurable. - base::Optional GetOwnConstantElement(uint32_t index, - bool serialize = false) const; - - Isolate* isolate() const; - - protected: - JSHeapBroker* broker() const; - ObjectData* data() const; - ObjectData* data_; // Should be used only by object() getters. - - private: - friend class JSArrayData; - friend class JSGlobalProxyRef; - friend class JSGlobalProxyData; - friend class JSObjectData; - friend class StringData; - - friend std::ostream& operator<<(std::ostream& os, const ObjectRef& ref); - - JSHeapBroker* broker_; -}; - +class BytecodeAnalysis; +class ObjectRef; std::ostream& operator<<(std::ostream& os, const ObjectRef& ref); -// Temporary class that carries information from a Map. We'd like to remove -// this class and use MapRef instead, but we can't as long as we support the -// kDisabled broker mode. That's because obtaining the MapRef via -// HeapObjectRef::map() requires a HandleScope when the broker is disabled. -// During OptimizeGraph we generally don't have a HandleScope, however. There -// are two places where we therefore use GetHeapObjectType() instead. Both that -// function and this class should eventually be removed. -class HeapObjectType { - public: - enum Flag : uint8_t { kUndetectable = 1 << 0, kCallable = 1 << 1 }; - - using Flags = base::Flags; - - HeapObjectType(InstanceType instance_type, Flags flags, - OddballType oddball_type) - : instance_type_(instance_type), - oddball_type_(oddball_type), - flags_(flags) { - DCHECK_EQ(instance_type == ODDBALL_TYPE, - oddball_type != OddballType::kNone); - } - - OddballType oddball_type() const { return oddball_type_; } - InstanceType instance_type() const { return instance_type_; } - Flags flags() const { return flags_; } - - bool is_callable() const { return flags_ & kCallable; } - bool is_undetectable() const { return flags_ & kUndetectable; } - - private: - InstanceType const instance_type_; - OddballType const oddball_type_; - Flags const flags_; -}; - -class HeapObjectRef : public ObjectRef { - public: - using ObjectRef::ObjectRef; - Handle object() const; - - MapRef map() const; - - // See the comment on the HeapObjectType class. - HeapObjectType GetHeapObjectType() const; -}; - -class PropertyCellRef : public HeapObjectRef { - public: - using HeapObjectRef::HeapObjectRef; - Handle object() const; - - PropertyDetails property_details() const; - - void Serialize(); - ObjectRef value() const; -}; - -class JSObjectRef : public HeapObjectRef { - public: - using HeapObjectRef::HeapObjectRef; - Handle object() const; - - uint64_t RawFastDoublePropertyAsBitsAt(FieldIndex index) const; - double RawFastDoublePropertyAt(FieldIndex index) const; - ObjectRef RawFastPropertyAt(FieldIndex index) const; - - FixedArrayBaseRef elements() const; - void EnsureElementsTenured(); - ElementsKind GetElementsKind() const; - - void SerializeObjectCreateMap(); - base::Optional GetObjectCreateMap() const; -}; - -class JSDataViewRef : public JSObjectRef { - public: - using JSObjectRef::JSObjectRef; - Handle object() const; - - size_t byte_length() const; - size_t byte_offset() const; -}; - -class JSBoundFunctionRef : public JSObjectRef { - public: - using JSObjectRef::JSObjectRef; - Handle object() const; - - void Serialize(); - - // The following are available only after calling Serialize(). - ObjectRef bound_target_function() const; - ObjectRef bound_this() const; - FixedArrayRef bound_arguments() const; -}; - -class V8_EXPORT_PRIVATE JSFunctionRef : public JSObjectRef { - public: - using JSObjectRef::JSObjectRef; - Handle object() const; - - bool has_feedback_vector() const; - bool has_initial_map() const; - bool has_prototype() const; - bool PrototypeRequiresRuntimeLookup() const; - - void Serialize(); - bool serialized() const; - - // The following are available only after calling Serialize(). - ObjectRef prototype() const; - MapRef initial_map() const; - ContextRef context() const; - NativeContextRef native_context() const; - SharedFunctionInfoRef shared() const; - FeedbackVectorRef feedback_vector() const; - int InitialMapInstanceSizeWithMinSlack() const; - - bool IsSerializedForCompilation() const; -}; - -class JSRegExpRef : public JSObjectRef { - public: - using JSObjectRef::JSObjectRef; - Handle object() const; - - ObjectRef raw_properties_or_hash() const; - ObjectRef data() const; - ObjectRef source() const; - ObjectRef flags() const; - ObjectRef last_index() const; -}; - -class HeapNumberRef : public HeapObjectRef { - public: - using HeapObjectRef::HeapObjectRef; - Handle object() const; - - double value() const; -}; - -class MutableHeapNumberRef : public HeapObjectRef { - public: - using HeapObjectRef::HeapObjectRef; - Handle object() const; - - double value() const; -}; - -class ContextRef : public HeapObjectRef { - public: - using HeapObjectRef::HeapObjectRef; - Handle object() const; - - void SerializeContextChain(); - ContextRef previous() const; - - void SerializeSlot(int index); - ObjectRef get(int index) const; -}; - -#define BROKER_COMPULSORY_NATIVE_CONTEXT_FIELDS(V) \ - V(JSFunction, array_function) \ - V(JSFunction, boolean_function) \ - V(JSFunction, bigint_function) \ - V(JSFunction, number_function) \ - V(JSFunction, object_function) \ - V(JSFunction, promise_function) \ - V(JSFunction, promise_then) \ - V(JSFunction, string_function) \ - V(JSFunction, symbol_function) \ - V(JSGlobalProxy, global_proxy_object) \ - V(JSObject, promise_prototype) \ - V(Map, bound_function_with_constructor_map) \ - V(Map, bound_function_without_constructor_map) \ - V(Map, fast_aliased_arguments_map) \ - V(Map, initial_array_iterator_map) \ - V(Map, initial_string_iterator_map) \ - V(Map, iterator_result_map) \ - V(Map, js_array_holey_double_elements_map) \ - V(Map, js_array_holey_elements_map) \ - V(Map, js_array_holey_smi_elements_map) \ - V(Map, js_array_packed_double_elements_map) \ - V(Map, js_array_packed_elements_map) \ - V(Map, js_array_packed_smi_elements_map) \ - V(Map, sloppy_arguments_map) \ - V(Map, slow_object_with_null_prototype_map) \ - V(Map, strict_arguments_map) \ - V(ScriptContextTable, script_context_table) \ - V(SharedFunctionInfo, promise_capability_default_reject_shared_fun) \ - V(SharedFunctionInfo, promise_catch_finally_shared_fun) \ - V(SharedFunctionInfo, promise_then_finally_shared_fun) \ - V(SharedFunctionInfo, promise_capability_default_resolve_shared_fun) - -// Those are set by Bootstrapper::ExportFromRuntime, which may not yet have -// happened when Turbofan is invoked via --always-opt. -#define BROKER_OPTIONAL_NATIVE_CONTEXT_FIELDS(V) \ - V(Map, async_function_object_map) \ - V(Map, map_key_iterator_map) \ - V(Map, map_key_value_iterator_map) \ - V(Map, map_value_iterator_map) \ - V(Map, set_key_value_iterator_map) \ - V(Map, set_value_iterator_map) - -#define BROKER_NATIVE_CONTEXT_FIELDS(V) \ - BROKER_COMPULSORY_NATIVE_CONTEXT_FIELDS(V) \ - BROKER_OPTIONAL_NATIVE_CONTEXT_FIELDS(V) - -class NativeContextRef : public ContextRef { - public: - using ContextRef::ContextRef; - Handle object() const; - - void Serialize(); - -#define DECL_ACCESSOR(type, name) type##Ref name() const; - BROKER_NATIVE_CONTEXT_FIELDS(DECL_ACCESSOR) -#undef DECL_ACCESSOR - - MapRef GetFunctionMapFromIndex(int index) const; - MapRef GetInitialJSArrayMap(ElementsKind kind) const; - base::Optional GetConstructorFunction(const MapRef& map) const; -}; - -class NameRef : public HeapObjectRef { - public: - using HeapObjectRef::HeapObjectRef; - Handle object() const; - - bool IsUniqueName() const; -}; - -class ScriptContextTableRef : public HeapObjectRef { - public: - using HeapObjectRef::HeapObjectRef; - Handle object() const; - - struct LookupResult { - ContextRef context; - bool immutable; - int index; - }; - - base::Optional lookup(const NameRef& name) const; -}; - -class DescriptorArrayRef : public HeapObjectRef { - public: - using HeapObjectRef::HeapObjectRef; - Handle object() const; -}; - -class FeedbackCellRef : public HeapObjectRef { - public: - using HeapObjectRef::HeapObjectRef; - Handle object() const; - - HeapObjectRef value() const; -}; - -class FeedbackVectorRef : public HeapObjectRef { - public: - using HeapObjectRef::HeapObjectRef; - Handle object() const; - - ObjectRef get(FeedbackSlot slot) const; - - void SerializeSlots(); -}; - -class FunctionTemplateInfoRef : public HeapObjectRef { - public: - using HeapObjectRef::HeapObjectRef; - Handle object() const; - - void Serialize(); - ObjectRef call_code() const; -}; - -class CallHandlerInfoRef : public HeapObjectRef { - public: - using HeapObjectRef::HeapObjectRef; - Handle object() const; - - Address callback() const; - - void Serialize(); - ObjectRef data() const; -}; - -class AllocationSiteRef : public HeapObjectRef { - public: - using HeapObjectRef::HeapObjectRef; - Handle object() const; - - bool PointsToLiteral() const; - AllocationType GetAllocationType() const; - ObjectRef nested_site() const; - - // {IsFastLiteral} determines whether the given array or object literal - // boilerplate satisfies all limits to be considered for fast deep-copying - // and computes the total size of all objects that are part of the graph. - // - // If PointsToLiteral() is false, then IsFastLiteral() is also false. - bool IsFastLiteral() const; - // We only serialize boilerplate if IsFastLiteral is true. - base::Optional boilerplate() const; - - ElementsKind GetElementsKind() const; - bool CanInlineCall() const; -}; - -class V8_EXPORT_PRIVATE MapRef : public HeapObjectRef { - public: - using HeapObjectRef::HeapObjectRef; - Handle object() const; - - int instance_size() const; - InstanceType instance_type() const; - int GetInObjectProperties() const; - int GetInObjectPropertiesStartInWords() const; - int NumberOfOwnDescriptors() const; - int GetInObjectPropertyOffset(int index) const; - int constructor_function_index() const; - int NextFreePropertyIndex() const; - int UnusedPropertyFields() const; - ElementsKind elements_kind() const; - bool is_stable() const; - bool is_extensible() const; - bool is_constructor() const; - bool has_prototype_slot() const; - bool is_access_check_needed() const; - bool is_deprecated() const; - bool CanBeDeprecated() const; - bool CanTransition() const; - bool IsInobjectSlackTrackingInProgress() const; - bool is_dictionary_map() const; - bool IsFixedCowArrayMap() const; - bool IsPrimitiveMap() const; - bool is_undetectable() const; - bool is_callable() const; - bool has_indexed_interceptor() const; - bool has_hidden_prototype() const; - bool is_migration_target() const; - bool supports_fast_array_iteration() const; - bool supports_fast_array_resize() const; - bool IsMapOfCurrentGlobalProxy() const; - - OddballType oddball_type() const; - -#define DEF_TESTER(Type, ...) bool Is##Type##Map() const; - INSTANCE_TYPE_CHECKERS(DEF_TESTER) -#undef DEF_TESTER - - void SerializeBackPointer(); - HeapObjectRef GetBackPointer() const; - - void SerializePrototype(); - bool serialized_prototype() const; - HeapObjectRef prototype() const; - - void SerializeForElementLoad(); - - void SerializeForElementStore(); - bool HasOnlyStablePrototypesWithFastElements( - ZoneVector* prototype_maps); - - // Concerning the underlying instance_descriptors: - void SerializeOwnDescriptors(); - void SerializeOwnDescriptor(int descriptor_index); - MapRef FindFieldOwner(int descriptor_index) const; - PropertyDetails GetPropertyDetails(int descriptor_index) const; - NameRef GetPropertyKey(int descriptor_index) const; - FieldIndex GetFieldIndexFor(int descriptor_index) const; - ObjectRef GetFieldType(int descriptor_index) const; - bool IsUnboxedDoubleField(int descriptor_index) const; - - // Available after calling JSFunctionRef::Serialize on a function that has - // this map as initial map. - ObjectRef GetConstructor() const; - base::Optional AsElementsKind(ElementsKind kind) const; -}; - -class FixedArrayBaseRef : public HeapObjectRef { - public: - using HeapObjectRef::HeapObjectRef; - Handle object() const; - - int length() const; -}; - -class FixedArrayRef : public FixedArrayBaseRef { - public: - using FixedArrayBaseRef::FixedArrayBaseRef; - Handle object() const; - - ObjectRef get(int i) const; -}; - -class FixedDoubleArrayRef : public FixedArrayBaseRef { - public: - using FixedArrayBaseRef::FixedArrayBaseRef; - Handle object() const; - - double get_scalar(int i) const; - bool is_the_hole(int i) const; -}; - -class BytecodeArrayRef : public FixedArrayBaseRef { - public: - using FixedArrayBaseRef::FixedArrayBaseRef; - Handle object() const; - - int register_count() const; - int parameter_count() const; - interpreter::Register incoming_new_target_or_generator_register() const; -}; - -class JSArrayRef : public JSObjectRef { - public: - using JSObjectRef::JSObjectRef; - Handle object() const; - - ObjectRef length() const; - - // Return the element at key {index} if the array has a copy-on-write elements - // storage and {index} is known to be an own data property. - base::Optional GetOwnCowElement(uint32_t index, - bool serialize = false) const; -}; - -class ScopeInfoRef : public HeapObjectRef { - public: - using HeapObjectRef::HeapObjectRef; - Handle object() const; - - int ContextLength() const; -}; - -#define BROKER_SFI_FIELDS(V) \ - V(int, internal_formal_parameter_count) \ - V(bool, has_duplicate_parameters) \ - V(int, function_map_index) \ - V(FunctionKind, kind) \ - V(LanguageMode, language_mode) \ - V(bool, native) \ - V(bool, HasBreakInfo) \ - V(bool, HasBuiltinId) \ - V(bool, construct_as_builtin) \ - V(bool, HasBytecodeArray) \ - V(bool, is_safe_to_skip_arguments_adaptor) \ - V(bool, IsInlineable) \ - V(bool, is_compiled) - -class V8_EXPORT_PRIVATE SharedFunctionInfoRef : public HeapObjectRef { - public: - using HeapObjectRef::HeapObjectRef; - Handle object() const; - - int builtin_id() const; - BytecodeArrayRef GetBytecodeArray() const; - -#define DECL_ACCESSOR(type, name) type name() const; - BROKER_SFI_FIELDS(DECL_ACCESSOR) -#undef DECL_ACCESSOR - - bool IsSerializedForCompilation(FeedbackVectorRef feedback) const; - void SetSerializedForCompilation(FeedbackVectorRef feedback); -}; - -class StringRef : public NameRef { - public: - using NameRef::NameRef; - Handle object() const; - - int length() const; - uint16_t GetFirstChar(); - base::Optional ToNumber(); - bool IsSeqString() const; - bool IsExternalString() const; -}; - -class SymbolRef : public NameRef { - public: - using NameRef::NameRef; - Handle object() const; -}; - -class JSTypedArrayRef : public JSObjectRef { - public: - using JSObjectRef::JSObjectRef; - Handle object() const; - - bool is_on_heap() const; - size_t length() const; - void* external_pointer() const; - - void Serialize(); - bool serialized() const; - - HeapObjectRef buffer() const; -}; - -class ModuleRef : public HeapObjectRef { - public: - using HeapObjectRef::HeapObjectRef; - Handle object() const; - - void Serialize(); - - CellRef GetCell(int cell_index) const; -}; - -class CellRef : public HeapObjectRef { - public: - using HeapObjectRef::HeapObjectRef; - Handle object() const; - - ObjectRef value() const; -}; - -class JSGlobalProxyRef : public JSObjectRef { - public: - using JSObjectRef::JSObjectRef; - Handle object() const; - - // If {serialize} is false: - // If the property is known to exist as a property cell (on the global - // object), return that property cell. Otherwise (not known to exist as a - // property cell or known not to exist as a property cell) return nothing. - // If {serialize} is true: - // Like above but potentially access the heap and serialize the necessary - // information. - base::Optional GetPropertyCell(NameRef const& name, - bool serialize = false) const; -}; - -class CodeRef : public HeapObjectRef { - public: - using HeapObjectRef::HeapObjectRef; - Handle object() const; -}; - -class InternalizedStringRef : public StringRef { - public: - using StringRef::StringRef; - Handle object() const; -}; - -class ElementAccessFeedback; -class NamedAccessFeedback; - -class ProcessedFeedback : public ZoneObject { - public: - enum Kind { kInsufficient, kGlobalAccess, kNamedAccess, kElementAccess }; - Kind kind() const { return kind_; } - - ElementAccessFeedback const* AsElementAccess() const; - NamedAccessFeedback const* AsNamedAccess() const; - - protected: - explicit ProcessedFeedback(Kind kind) : kind_(kind) {} - - private: - Kind const kind_; -}; - -class InsufficientFeedback final : public ProcessedFeedback { - public: - InsufficientFeedback(); -}; - -class GlobalAccessFeedback : public ProcessedFeedback { - public: - explicit GlobalAccessFeedback(PropertyCellRef cell); - GlobalAccessFeedback(ContextRef script_context, int slot_index, - bool immutable); - - bool IsPropertyCell() const; - PropertyCellRef property_cell() const; - - bool IsScriptContextSlot() const { return !IsPropertyCell(); } - ContextRef script_context() const; - int slot_index() const; - bool immutable() const; - - base::Optional GetConstantHint() const; - - private: - ObjectRef const cell_or_context_; - int const index_and_immutable_; -}; - -class ElementAccessFeedback : public ProcessedFeedback { - public: - explicit ElementAccessFeedback(Zone* zone); - - // No transition sources appear in {receiver_maps}. - // All transition targets appear in {receiver_maps}. - ZoneVector> receiver_maps; - ZoneVector, Handle>> transitions; - - class MapIterator { - public: - bool done() const; - void advance(); - MapRef current() const; - - private: - friend class ElementAccessFeedback; - - explicit MapIterator(ElementAccessFeedback const& processed, - JSHeapBroker* broker); - - ElementAccessFeedback const& processed_; - JSHeapBroker* const broker_; - size_t index_ = 0; - }; - - // Iterator over all maps: first {receiver_maps}, then transition sources. - MapIterator all_maps(JSHeapBroker* broker) const; -}; - -class NamedAccessFeedback : public ProcessedFeedback { - public: - NamedAccessFeedback(NameRef const& name, - ZoneVector const& access_infos); - - NameRef const& name() const { return name_; } - ZoneVector const& access_infos() const { - return access_infos_; - } - - private: - NameRef const name_; - ZoneVector const access_infos_; -}; - struct FeedbackSource { FeedbackSource(Handle vector_, FeedbackSlot slot_) : vector(vector_), slot(slot_) {} @@ -821,26 +49,28 @@ struct FeedbackSource { }; }; -#define TRACE_BROKER(broker, x) \ - do { \ - if (FLAG_trace_heap_broker_verbose) broker->Trace() << x << '\n'; \ +#define TRACE_BROKER(broker, x) \ + do { \ + if (broker->tracing_enabled() && FLAG_trace_heap_broker_verbose) \ + broker->Trace() << x << '\n'; \ } while (false) #define TRACE_BROKER_MISSING(broker, x) \ do { \ - if (FLAG_trace_heap_broker) \ + if (broker->tracing_enabled()) \ broker->Trace() << __FUNCTION__ << ": missing " << x << '\n'; \ } while (false) class V8_EXPORT_PRIVATE JSHeapBroker { public: - JSHeapBroker(Isolate* isolate, Zone* broker_zone); + JSHeapBroker(Isolate* isolate, Zone* broker_zone, bool tracing_enabled); void SetNativeContextRef(); void SerializeStandardObjects(); Isolate* isolate() const { return isolate_; } Zone* zone() const { return current_zone_; } + bool tracing_enabled() const { return tracing_enabled_; } NativeContextRef native_context() const { return native_context_.value(); } PerIsolateCompilerCache* compiler_cache() const { return compiler_cache_; } @@ -875,12 +105,25 @@ class V8_EXPORT_PRIVATE JSHeapBroker { // TODO(neis): Move these into serializer when we're always in the background. ElementAccessFeedback const* ProcessFeedbackMapsForElementAccess( - MapHandles const& maps); + MapHandles const& maps, KeyedAccessMode const& keyed_mode); GlobalAccessFeedback const* ProcessFeedbackForGlobalAccess( FeedbackSource const& source); + BytecodeAnalysis const& GetBytecodeAnalysis( + Handle bytecode_array, BailoutId osr_offset, + bool analyze_liveness, bool serialize); + base::Optional GetNameFeedback(FeedbackNexus const& nexus); + // If there is no result stored for {map}, we return an Invalid + // PropertyAccessInfo. + PropertyAccessInfo GetAccessInfoForLoadingThen(MapRef map); + void CreateAccessInfoForLoadingThen(MapRef map, + CompilationDependencies* dependencies); + PropertyAccessInfo GetAccessInfoForLoadingExec(MapRef map); + PropertyAccessInfo const& CreateAccessInfoForLoadingExec( + MapRef map, CompilationDependencies* dependencies); + std::ostream& Trace(); void IncrementTracingIndentation(); void DecrementTracingIndentation(); @@ -902,12 +145,19 @@ class V8_EXPORT_PRIVATE JSHeapBroker { Handle::equal_to> array_and_object_prototypes_; BrokerMode mode_ = kDisabled; + bool const tracing_enabled_; StdoutStream trace_out_; unsigned trace_indentation_ = 0; PerIsolateCompilerCache* compiler_cache_; ZoneUnorderedMap feedback_; + ZoneUnorderedMap bytecode_analyses_; + typedef ZoneUnorderedMap + MapToAccessInfos; + MapToAccessInfos ais_for_loading_then_; + MapToAccessInfos ais_for_loading_exec_; static const size_t kMinimalRefsBucketCount = 8; // must be power of 2 static const size_t kInitialRefsBucketCount = 1024; // must be power of 2 @@ -948,6 +198,23 @@ Reduction NoChangeBecauseOfMissingData(JSHeapBroker* broker, // compilation is finished. bool CanInlineElementAccess(MapRef const& map); +class OffHeapBytecodeArray final : public interpreter::AbstractBytecodeArray { + public: + explicit OffHeapBytecodeArray(BytecodeArrayRef bytecode_array); + + int length() const override; + int parameter_count() const override; + uint8_t get(int index) const override; + void set(int index, uint8_t value) override; + Address GetFirstBytecodeAddress() const override; + Handle GetConstantAtIndex(int index, Isolate* isolate) const override; + bool IsConstantAtIndexSmi(int index) const override; + Smi GetConstantAtIndexAsSmi(int index) const override; + + private: + BytecodeArrayRef array_; +}; + } // namespace compiler } // namespace internal } // namespace v8 diff --git a/deps/v8/src/compiler/js-heap-copy-reducer.cc b/deps/v8/src/compiler/js-heap-copy-reducer.cc index cc48ae80cbc026..7e7c9e3a0e1e52 100644 --- a/deps/v8/src/compiler/js-heap-copy-reducer.cc +++ b/deps/v8/src/compiler/js-heap-copy-reducer.cc @@ -30,8 +30,7 @@ Reduction JSHeapCopyReducer::Reduce(Node* node) { ObjectRef object(broker(), HeapConstantOf(node->op())); if (object.IsJSFunction()) object.AsJSFunction().Serialize(); if (object.IsJSObject()) object.AsJSObject().SerializeObjectCreateMap(); - if (object.IsModule()) object.AsModule().Serialize(); - if (object.IsContext()) object.AsContext().SerializeContextChain(); + if (object.IsSourceTextModule()) object.AsSourceTextModule().Serialize(); break; } case IrOpcode::kJSCreateArray: { diff --git a/deps/v8/src/compiler/js-inlining-heuristic.cc b/deps/v8/src/compiler/js-inlining-heuristic.cc index f78635b1397560..e11d6b59a30349 100644 --- a/deps/v8/src/compiler/js-inlining-heuristic.cc +++ b/deps/v8/src/compiler/js-inlining-heuristic.cc @@ -7,6 +7,7 @@ #include "src/codegen/optimized-compilation-info.h" #include "src/compiler/common-operator.h" #include "src/compiler/compiler-source-position-table.h" +#include "src/compiler/js-heap-broker.h" #include "src/compiler/node-matchers.h" #include "src/compiler/simplified-operator.h" #include "src/objects/objects-inl.h" @@ -21,15 +22,9 @@ namespace compiler { } while (false) namespace { - -bool IsSmallInlineFunction(BytecodeArrayRef bytecode) { - // Forcibly inline small functions. - if (bytecode.length() <= FLAG_max_inlined_bytecode_size_small) { - return true; - } - return false; +bool IsSmall(BytecodeArrayRef bytecode) { + return bytecode.length() <= FLAG_max_inlined_bytecode_size_small; } - } // namespace JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions( @@ -65,7 +60,7 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions( out.functions[n] = m.Ref(broker()).AsJSFunction(); JSFunctionRef function = out.functions[n].value(); if (function.IsSerializedForCompilation()) { - out.bytecode[n] = function.shared().GetBytecodeArray(), isolate(); + out.bytecode[n] = function.shared().GetBytecodeArray(); } } out.num_functions = value_input_count; @@ -91,6 +86,11 @@ Reduction JSInliningHeuristic::Reduce(Node* node) { if (!IrOpcode::IsInlineeOpcode(node->opcode())) return NoChange(); + if (total_inlined_bytecode_size_ >= FLAG_max_inlined_bytecode_size_absolute && + mode_ != kStressInlining) { + return NoChange(); + } + // Check if we already saw that {node} before, and if so, just skip it. if (seen_.find(node->id()) != seen_.end()) return NoChange(); seen_.insert(node->id()); @@ -107,7 +107,7 @@ Reduction JSInliningHeuristic::Reduce(Node* node) { return NoChange(); } - bool can_inline = false, force_inline_small = true; + bool can_inline_candidate = false, candidate_is_small = true; candidate.total_size = 0; Node* frame_state = NodeProperties::GetFrameStateInput(node); FrameStateInfo const& frame_info = FrameStateInfoOf(frame_state->op()); @@ -155,15 +155,12 @@ Reduction JSInliningHeuristic::Reduce(Node* node) { // serialized. BytecodeArrayRef bytecode = candidate.bytecode[i].value(); if (candidate.can_inline_function[i]) { - can_inline = true; + can_inline_candidate = true; candidate.total_size += bytecode.length(); } - // We don't force inline small functions if any of them is not inlineable. - if (!IsSmallInlineFunction(bytecode)) { - force_inline_small = false; - } + candidate_is_small = candidate_is_small && IsSmall(bytecode); } - if (!can_inline) return NoChange(); + if (!can_inline_candidate) return NoChange(); // Gather feedback on how often this call site has been hit before. if (node->opcode() == IrOpcode::kJSCall) { @@ -195,9 +192,8 @@ Reduction JSInliningHeuristic::Reduce(Node* node) { } // Forcibly inline small functions here. In the case of polymorphic inlining - // force_inline_small is set only when all functions are small. - if (force_inline_small && - cumulative_count_ < FLAG_max_inlined_bytecode_size_absolute) { + // candidate_is_small is set only when all functions are small. + if (candidate_is_small) { TRACE("Inlining small function(s) at call site #%d:%s\n", node->id(), node->op()->mnemonic()); return InlineCandidate(candidate, true); @@ -221,21 +217,24 @@ void JSInliningHeuristic::Finalize() { Candidate candidate = *i; candidates_.erase(i); + // Make sure we don't try to inline dead candidate nodes. + if (candidate.node->IsDead()) { + continue; + } + // Make sure we have some extra budget left, so that any small functions // exposed by this function would be given a chance to inline. double size_of_candidate = candidate.total_size * FLAG_reserve_inline_budget_scale_factor; - int total_size = cumulative_count_ + static_cast(size_of_candidate); + int total_size = + total_inlined_bytecode_size_ + static_cast(size_of_candidate); if (total_size > FLAG_max_inlined_bytecode_size_cumulative) { // Try if any smaller functions are available to inline. continue; } - // Make sure we don't try to inline dead candidate nodes. - if (!candidate.node->IsDead()) { - Reduction const reduction = InlineCandidate(candidate, false); - if (reduction.Changed()) return; - } + Reduction const reduction = InlineCandidate(candidate, false); + if (reduction.Changed()) return; } } @@ -630,7 +629,7 @@ Reduction JSInliningHeuristic::InlineCandidate(Candidate const& candidate, if (num_calls == 1) { Reduction const reduction = inliner_.ReduceJSCall(node); if (reduction.Changed()) { - cumulative_count_ += candidate.bytecode[0].value().length(); + total_inlined_bytecode_size_ += candidate.bytecode[0].value().length(); } return reduction; } @@ -688,20 +687,19 @@ Reduction JSInliningHeuristic::InlineCandidate(Candidate const& candidate, ReplaceWithValue(node, value, effect, control); // Inline the individual, cloned call sites. - for (int i = 0; i < num_calls; ++i) { - Node* node = calls[i]; + for (int i = 0; i < num_calls && total_inlined_bytecode_size_ < + FLAG_max_inlined_bytecode_size_absolute; + ++i) { if (candidate.can_inline_function[i] && - (small_function || - cumulative_count_ < FLAG_max_inlined_bytecode_size_cumulative)) { + (small_function || total_inlined_bytecode_size_ < + FLAG_max_inlined_bytecode_size_cumulative)) { + Node* node = calls[i]; Reduction const reduction = inliner_.ReduceJSCall(node); if (reduction.Changed()) { + total_inlined_bytecode_size_ += candidate.bytecode[i]->length(); // Killing the call node is not strictly necessary, but it is safer to // make sure we do not resurrect the node. node->Kill(); - // Small functions don't count towards the budget. - if (!small_function) { - cumulative_count_ += candidate.bytecode[i]->length(); - } } } } diff --git a/deps/v8/src/compiler/js-inlining-heuristic.h b/deps/v8/src/compiler/js-inlining-heuristic.h index 99ad258c31e0dc..b143e9b67fd846 100644 --- a/deps/v8/src/compiler/js-inlining-heuristic.h +++ b/deps/v8/src/compiler/js-inlining-heuristic.h @@ -97,7 +97,7 @@ class JSInliningHeuristic final : public AdvancedReducer { SourcePositionTable* source_positions_; JSGraph* const jsgraph_; JSHeapBroker* const broker_; - int cumulative_count_ = 0; + int total_inlined_bytecode_size_ = 0; }; } // namespace compiler diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc index e43e710da779e5..91cbea2346a37b 100644 --- a/deps/v8/src/compiler/js-inlining.cc +++ b/deps/v8/src/compiler/js-inlining.cc @@ -7,11 +7,13 @@ #include "src/ast/ast.h" #include "src/codegen/compiler.h" #include "src/codegen/optimized-compilation-info.h" +#include "src/codegen/tick-counter.h" #include "src/compiler/all-nodes.h" #include "src/compiler/bytecode-graph-builder.h" #include "src/compiler/common-operator.h" #include "src/compiler/compiler-source-position-table.h" #include "src/compiler/graph-reducer.h" +#include "src/compiler/js-heap-broker.h" #include "src/compiler/js-operator.h" #include "src/compiler/node-matchers.h" #include "src/compiler/node-properties.h" @@ -466,14 +468,13 @@ Reduction JSInliner::ReduceJSCall(Node* node) { AllowHandleAllocation allow_handle_alloc; AllowHeapAllocation allow_heap_alloc; AllowCodeDependencyChange allow_code_dep_change; - Handle native_context = - handle(info_->native_context(), isolate()); - - BuildGraphFromBytecode(broker(), zone(), bytecode_array.object(), - shared_info.value().object(), - feedback_vector.object(), BailoutId::None(), - jsgraph(), call.frequency(), source_positions_, - native_context, inlining_id, flags); + CallFrequency frequency = call.frequency(); + Handle native_context(info_->native_context(), isolate()); + BuildGraphFromBytecode( + broker(), zone(), bytecode_array.object(), + shared_info.value().object(), feedback_vector.object(), + BailoutId::None(), jsgraph(), frequency, source_positions_, + native_context, inlining_id, flags, &info_->tick_counter()); } // Extract the inlinee start/end nodes. diff --git a/deps/v8/src/compiler/js-inlining.h b/deps/v8/src/compiler/js-inlining.h index 94a9e71b2e27d0..f50f7b591d2559 100644 --- a/deps/v8/src/compiler/js-inlining.h +++ b/deps/v8/src/compiler/js-inlining.h @@ -59,7 +59,8 @@ class JSInliner final : public AdvancedReducer { SourcePositionTable* const source_positions_; base::Optional DetermineCallTarget(Node* node); - FeedbackVectorRef DetermineCallContext(Node* node, Node*& context_out); + FeedbackVectorRef DetermineCallContext( + Node* node, Node*& context_out); // NOLINT(runtime/references) Node* CreateArtificialFrameState(Node* node, Node* outer_frame_state, int parameter_count, BailoutId bailout_id, diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc index 312ab38f517380..7d742a5f326f10 100644 --- a/deps/v8/src/compiler/js-native-context-specialization.cc +++ b/deps/v8/src/compiler/js-native-context-specialization.cc @@ -33,12 +33,6 @@ namespace v8 { namespace internal { namespace compiler { -// This is needed for gc_mole which will compile this file without the full set -// of GN defined macros. -#ifndef V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP -#define V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP 64 -#endif - namespace { bool HasNumberMaps(JSHeapBroker* broker, ZoneVector> const& maps) { @@ -513,8 +507,8 @@ JSNativeContextSpecialization::InferHasInPrototypeChain( Node* receiver, Node* effect, Handle prototype) { ZoneHandleSet receiver_maps; NodeProperties::InferReceiverMapsResult result = - NodeProperties::InferReceiverMaps(broker(), receiver, effect, - &receiver_maps); + NodeProperties::InferReceiverMapsUnsafe(broker(), receiver, effect, + &receiver_maps); if (result == NodeProperties::kNoReceiverMaps) return kMayBeInPrototypeChain; // Try to determine either that all of the {receiver_maps} have the given @@ -686,6 +680,7 @@ Reduction JSNativeContextSpecialization::ReduceJSPromiseResolve(Node* node) { // ES section #sec-promise-resolve-functions Reduction JSNativeContextSpecialization::ReduceJSResolvePromise(Node* node) { + DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining); DCHECK_EQ(IrOpcode::kJSResolvePromise, node->opcode()); Node* promise = NodeProperties::GetValueInput(node, 0); Node* resolution = NodeProperties::GetValueInput(node, 1); @@ -702,9 +697,17 @@ Reduction JSNativeContextSpecialization::ReduceJSResolvePromise(Node* node) { ZoneVector access_infos(graph()->zone()); AccessInfoFactory access_info_factory(broker(), dependencies(), graph()->zone()); - access_info_factory.ComputePropertyAccessInfos( - resolution_maps, factory()->then_string(), AccessMode::kLoad, - &access_infos); + if (!FLAG_concurrent_inlining) { + access_info_factory.ComputePropertyAccessInfos( + resolution_maps, factory()->then_string(), AccessMode::kLoad, + &access_infos); + } else { + // Obtain pre-computed access infos from the broker. + for (auto map : resolution_maps) { + MapRef map_ref(broker(), map); + access_infos.push_back(broker()->GetAccessInfoForLoadingThen(map_ref)); + } + } PropertyAccessInfo access_info = access_info_factory.FinalizePropertyAccessInfosAsOne(access_infos, AccessMode::kLoad); @@ -975,9 +978,8 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess( } Reduction JSNativeContextSpecialization::ReduceJSLoadGlobal(Node* node) { - DCHECK_EQ(IrOpcode::kJSLoadGlobal, node->opcode()); DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining); - + DCHECK_EQ(IrOpcode::kJSLoadGlobal, node->opcode()); LoadGlobalParameters const& p = LoadGlobalParametersOf(node->op()); if (!p.feedback().IsValid()) return NoChange(); FeedbackSource source(p.feedback()); @@ -1007,9 +1009,8 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadGlobal(Node* node) { } Reduction JSNativeContextSpecialization::ReduceJSStoreGlobal(Node* node) { - DCHECK_EQ(IrOpcode::kJSStoreGlobal, node->opcode()); DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining); - + DCHECK_EQ(IrOpcode::kJSStoreGlobal, node->opcode()); Node* value = NodeProperties::GetValueInput(node, 0); StoreGlobalParameters const& p = StoreGlobalParametersOf(node->op()); @@ -1298,7 +1299,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess( } Reduction JSNativeContextSpecialization::ReduceNamedAccessFromNexus( - Node* node, Node* value, FeedbackNexus const& nexus, NameRef const& name, + Node* node, Node* value, FeedbackSource const& source, NameRef const& name, AccessMode access_mode) { DCHECK(node->opcode() == IrOpcode::kJSLoadNamed || node->opcode() == IrOpcode::kJSStoreNamed || @@ -1312,11 +1313,11 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccessFromNexus( return ReduceGlobalAccess(node, nullptr, value, name, access_mode); } - return ReducePropertyAccessUsingProcessedFeedback(node, nullptr, name, value, - nexus, access_mode); + return ReducePropertyAccess(node, nullptr, name, value, source, access_mode); } Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) { + DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining); DCHECK_EQ(IrOpcode::kJSLoadNamed, node->opcode()); NamedAccess const& p = NamedAccessOf(node->op()); Node* const receiver = NodeProperties::GetValueInput(node, 0); @@ -1355,56 +1356,47 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) { } } - // Extract receiver maps from the load IC using the FeedbackNexus. if (!p.feedback().IsValid()) return NoChange(); - FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot()); - - // Try to lower the named access based on the {receiver_maps}. - return ReduceNamedAccessFromNexus(node, jsgraph()->Dead(), nexus, name, + return ReduceNamedAccessFromNexus(node, jsgraph()->Dead(), + FeedbackSource(p.feedback()), name, AccessMode::kLoad); } Reduction JSNativeContextSpecialization::ReduceJSStoreNamed(Node* node) { + DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining); DCHECK_EQ(IrOpcode::kJSStoreNamed, node->opcode()); NamedAccess const& p = NamedAccessOf(node->op()); Node* const value = NodeProperties::GetValueInput(node, 1); - // Extract receiver maps from the store IC using the FeedbackNexus. if (!p.feedback().IsValid()) return NoChange(); - FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot()); - - // Try to lower the named access based on the {receiver_maps}. - return ReduceNamedAccessFromNexus( - node, value, nexus, NameRef(broker(), p.name()), AccessMode::kStore); + return ReduceNamedAccessFromNexus(node, value, FeedbackSource(p.feedback()), + NameRef(broker(), p.name()), + AccessMode::kStore); } Reduction JSNativeContextSpecialization::ReduceJSStoreNamedOwn(Node* node) { + DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining); DCHECK_EQ(IrOpcode::kJSStoreNamedOwn, node->opcode()); StoreNamedOwnParameters const& p = StoreNamedOwnParametersOf(node->op()); Node* const value = NodeProperties::GetValueInput(node, 1); - // Extract receiver maps from the IC using the FeedbackNexus. if (!p.feedback().IsValid()) return NoChange(); - FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot()); - - // Try to lower the creation of a named property based on the {receiver_maps}. - return ReduceNamedAccessFromNexus(node, value, nexus, + return ReduceNamedAccessFromNexus(node, value, FeedbackSource(p.feedback()), NameRef(broker(), p.name()), AccessMode::kStoreInLiteral); } Reduction JSNativeContextSpecialization::ReduceElementAccessOnString( - Node* node, Node* index, Node* value, AccessMode access_mode, - KeyedAccessLoadMode load_mode) { + Node* node, Node* index, Node* value, KeyedAccessMode const& keyed_mode) { Node* receiver = NodeProperties::GetValueInput(node, 0); Node* effect = NodeProperties::GetEffectInput(node); Node* control = NodeProperties::GetControlInput(node); // Strings are immutable in JavaScript. - if (access_mode == AccessMode::kStore) return NoChange(); + if (keyed_mode.access_mode() == AccessMode::kStore) return NoChange(); // `in` cannot be used on strings. - if (access_mode == AccessMode::kHas) return NoChange(); + if (keyed_mode.access_mode() == AccessMode::kHas) return NoChange(); // Ensure that the {receiver} is actually a String. receiver = effect = graph()->NewNode( @@ -1416,7 +1408,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccessOnString( // Load the single character string from {receiver} or yield undefined // if the {index} is out of bounds (depending on the {load_mode}). value = BuildIndexedStringLoad(receiver, index, length, &effect, &control, - load_mode); + keyed_mode.load_mode()); ReplaceWithValue(node, value, effect, control); return Replace(value); @@ -1437,24 +1429,31 @@ base::Optional GetTypedArrayConstant(JSHeapBroker* broker, Reduction JSNativeContextSpecialization::ReduceElementAccess( Node* node, Node* index, Node* value, - ElementAccessFeedback const& processed, AccessMode access_mode, - KeyedAccessLoadMode load_mode, KeyedAccessStoreMode store_mode) { + ElementAccessFeedback const& processed) { DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining); - DCHECK(node->opcode() == IrOpcode::kJSLoadProperty || node->opcode() == IrOpcode::kJSStoreProperty || node->opcode() == IrOpcode::kJSStoreInArrayLiteral || node->opcode() == IrOpcode::kJSHasProperty); + Node* receiver = NodeProperties::GetValueInput(node, 0); Node* effect = NodeProperties::GetEffectInput(node); Node* control = NodeProperties::GetControlInput(node); Node* frame_state = NodeProperties::FindFrameStateBefore(node, jsgraph()->Dead()); + AccessMode access_mode = processed.keyed_mode.access_mode(); + if ((access_mode == AccessMode::kLoad || access_mode == AccessMode::kHas) && + receiver->opcode() == IrOpcode::kHeapConstant) { + Reduction reduction = ReduceKeyedLoadFromHeapConstant( + node, index, access_mode, processed.keyed_mode.load_mode()); + if (reduction.Changed()) return reduction; + } + if (HasOnlyStringMaps(broker(), processed.receiver_maps)) { DCHECK(processed.transitions.empty()); - return ReduceElementAccessOnString(node, index, value, access_mode, - load_mode); + return ReduceElementAccessOnString(node, index, value, + processed.keyed_mode); } // Compute element access infos for the receiver maps. @@ -1485,7 +1484,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess( // then we need to check that all prototypes have stable maps with // fast elements (and we need to guard against changes to that below). if ((IsHoleyOrDictionaryElementsKind(receiver_map.elements_kind()) || - IsGrowStoreMode(store_mode)) && + IsGrowStoreMode(processed.keyed_mode.store_mode())) && !receiver_map.HasOnlyStablePrototypesWithFastElements( &prototype_maps)) { return NoChange(); @@ -1558,7 +1557,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess( // Access the actual element. ValueEffectControl continuation = BuildElementAccess(receiver, index, value, effect, control, access_info, - access_mode, load_mode, store_mode); + processed.keyed_mode); value = continuation.value(); effect = continuation.effect(); control = continuation.control(); @@ -1591,7 +1590,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess( ? ElementsTransition::kFastTransition : ElementsTransition::kSlowTransition, transition_source.object(), transition_target.object())), - receiver, effect, control); + receiver, this_effect, this_control); } // Perform map check(s) on {receiver}. @@ -1623,9 +1622,9 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess( } // Access the actual element. - ValueEffectControl continuation = BuildElementAccess( - this_receiver, this_index, this_value, this_effect, this_control, - access_info, access_mode, load_mode, store_mode); + ValueEffectControl continuation = + BuildElementAccess(this_receiver, this_index, this_value, this_effect, + this_control, access_info, processed.keyed_mode); values.push_back(continuation.value()); effects.push_back(continuation.effect()); controls.push_back(continuation.control()); @@ -1659,7 +1658,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess( } Reduction JSNativeContextSpecialization::ReduceKeyedLoadFromHeapConstant( - Node* node, Node* key, FeedbackNexus const& nexus, AccessMode access_mode, + Node* node, Node* key, AccessMode access_mode, KeyedAccessLoadMode load_mode) { DCHECK(node->opcode() == IrOpcode::kJSLoadProperty || node->opcode() == IrOpcode::kJSHasProperty); @@ -1715,54 +1714,24 @@ Reduction JSNativeContextSpecialization::ReduceKeyedLoadFromHeapConstant( // accesses using the known length, which doesn't change. if (receiver_ref.IsString()) { DCHECK_NE(access_mode, AccessMode::kHas); - // We can only assume that the {index} is a valid array index if the - // IC is in element access mode and not MEGAMORPHIC, otherwise there's - // no guard for the bounds check below. - if (nexus.ic_state() != MEGAMORPHIC && nexus.GetKeyType() == ELEMENT) { - // Ensure that {key} is less than {receiver} length. - Node* length = jsgraph()->Constant(receiver_ref.AsString().length()); - - // Load the single character string from {receiver} or yield - // undefined if the {key} is out of bounds (depending on the - // {load_mode}). - Node* value = BuildIndexedStringLoad(receiver, key, length, &effect, - &control, load_mode); - ReplaceWithValue(node, value, effect, control); - return Replace(value); - } - } + // Ensure that {key} is less than {receiver} length. + Node* length = jsgraph()->Constant(receiver_ref.AsString().length()); - return NoChange(); -} - -Reduction JSNativeContextSpecialization::ReduceKeyedAccess( - Node* node, Node* key, Node* value, FeedbackNexus const& nexus, - AccessMode access_mode, KeyedAccessLoadMode load_mode, - KeyedAccessStoreMode store_mode) { - DCHECK(node->opcode() == IrOpcode::kJSLoadProperty || - node->opcode() == IrOpcode::kJSStoreProperty || - node->opcode() == IrOpcode::kJSStoreInArrayLiteral || - node->opcode() == IrOpcode::kJSHasProperty); - - Node* receiver = NodeProperties::GetValueInput(node, 0); - - if ((access_mode == AccessMode::kLoad || access_mode == AccessMode::kHas) && - receiver->opcode() == IrOpcode::kHeapConstant) { - Reduction reduction = ReduceKeyedLoadFromHeapConstant( - node, key, nexus, access_mode, load_mode); - if (reduction.Changed()) return reduction; + // Load the single character string from {receiver} or yield + // undefined if the {key} is out of bounds (depending on the + // {load_mode}). + Node* value = BuildIndexedStringLoad(receiver, key, length, &effect, + &control, load_mode); + ReplaceWithValue(node, value, effect, control); + return Replace(value); } - return ReducePropertyAccessUsingProcessedFeedback(node, key, base::nullopt, - value, nexus, access_mode, - load_mode, store_mode); + return NoChange(); } -Reduction -JSNativeContextSpecialization::ReducePropertyAccessUsingProcessedFeedback( +Reduction JSNativeContextSpecialization::ReducePropertyAccess( Node* node, Node* key, base::Optional static_name, Node* value, - FeedbackNexus const& nexus, AccessMode access_mode, - KeyedAccessLoadMode load_mode, KeyedAccessStoreMode store_mode) { + FeedbackSource const& source, AccessMode access_mode) { DCHECK_EQ(key == nullptr, static_name.has_value()); DCHECK(node->opcode() == IrOpcode::kJSLoadProperty || node->opcode() == IrOpcode::kJSStoreProperty || @@ -1777,11 +1746,12 @@ JSNativeContextSpecialization::ReducePropertyAccessUsingProcessedFeedback( ProcessedFeedback const* processed = nullptr; if (FLAG_concurrent_inlining) { - processed = broker()->GetFeedback(FeedbackSource(nexus)); + processed = broker()->GetFeedback(source); // TODO(neis): Infer maps from the graph and consolidate with feedback/hints // and filter impossible candidates based on inferred root map. } else { // TODO(neis): Try to unify this with the similar code in the serializer. + FeedbackNexus nexus(source.vector, source.slot); if (nexus.ic_state() == UNINITIALIZED) { processed = new (zone()) InsufficientFeedback(); } else { @@ -1801,8 +1771,8 @@ JSNativeContextSpecialization::ReducePropertyAccessUsingProcessedFeedback( processed = new (zone()) NamedAccessFeedback(*name, access_infos); } else if (nexus.GetKeyType() == ELEMENT && MEGAMORPHIC != nexus.ic_state()) { - processed = - broker()->ProcessFeedbackMapsForElementAccess(receiver_maps); + processed = broker()->ProcessFeedbackMapsForElementAccess( + receiver_maps, KeyedAccessMode::FromNexus(nexus)); } } } @@ -1818,9 +1788,10 @@ JSNativeContextSpecialization::ReducePropertyAccessUsingProcessedFeedback( return ReduceNamedAccess(node, value, *processed->AsNamedAccess(), access_mode, key); case ProcessedFeedback::kElementAccess: + CHECK_EQ(processed->AsElementAccess()->keyed_mode.access_mode(), + access_mode); return ReduceElementAccess(node, key, value, - *processed->AsElementAccess(), access_mode, - load_mode, store_mode); + *processed->AsElementAccess()); case ProcessedFeedback::kGlobalAccess: UNREACHABLE(); } @@ -1846,21 +1817,15 @@ Reduction JSNativeContextSpecialization::ReduceSoftDeoptimize( } Reduction JSNativeContextSpecialization::ReduceJSHasProperty(Node* node) { + DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining); DCHECK_EQ(IrOpcode::kJSHasProperty, node->opcode()); PropertyAccess const& p = PropertyAccessOf(node->op()); Node* key = NodeProperties::GetValueInput(node, 1); Node* value = jsgraph()->Dead(); - // Extract receiver maps from the has property IC using the FeedbackNexus. if (!p.feedback().IsValid()) return NoChange(); - FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot()); - - // Extract the keyed access load mode from the keyed load IC. - KeyedAccessLoadMode load_mode = nexus.GetKeyedAccessLoadMode(); - - // Try to lower the keyed access based on the {nexus}. - return ReduceKeyedAccess(node, key, value, nexus, AccessMode::kHas, load_mode, - STANDARD_STORE); + return ReducePropertyAccess(node, key, base::nullopt, value, + FeedbackSource(p.feedback()), AccessMode::kHas); } Reduction JSNativeContextSpecialization::ReduceJSLoadPropertyWithEnumeratedKey( @@ -1970,6 +1935,7 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadPropertyWithEnumeratedKey( } Reduction JSNativeContextSpecialization::ReduceJSLoadProperty(Node* node) { + DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining); DCHECK_EQ(IrOpcode::kJSLoadProperty, node->opcode()); PropertyAccess const& p = PropertyAccessOf(node->op()); Node* name = NodeProperties::GetValueInput(node, 1); @@ -1979,62 +1945,49 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadProperty(Node* node) { if (reduction.Changed()) return reduction; } - // Extract receiver maps from the keyed load IC using the FeedbackNexus. if (!p.feedback().IsValid()) return NoChange(); - FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot()); - - // Extract the keyed access load mode from the keyed load IC. - KeyedAccessLoadMode load_mode = nexus.GetKeyedAccessLoadMode(); - - // Try to lower the keyed access based on the {nexus}. Node* value = jsgraph()->Dead(); - return ReduceKeyedAccess(node, name, value, nexus, AccessMode::kLoad, - load_mode, STANDARD_STORE); + return ReducePropertyAccess(node, name, base::nullopt, value, + FeedbackSource(p.feedback()), AccessMode::kLoad); } Reduction JSNativeContextSpecialization::ReduceJSStoreProperty(Node* node) { + DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining); DCHECK_EQ(IrOpcode::kJSStoreProperty, node->opcode()); PropertyAccess const& p = PropertyAccessOf(node->op()); Node* const key = NodeProperties::GetValueInput(node, 1); Node* const value = NodeProperties::GetValueInput(node, 2); - // Extract receiver maps from the keyed store IC using the FeedbackNexus. if (!p.feedback().IsValid()) return NoChange(); - FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot()); - - // Extract the keyed access store mode from the keyed store IC. - KeyedAccessStoreMode store_mode = nexus.GetKeyedAccessStoreMode(); - - // Try to lower the keyed access based on the {nexus}. - return ReduceKeyedAccess(node, key, value, nexus, AccessMode::kStore, - STANDARD_LOAD, store_mode); + return ReducePropertyAccess(node, key, base::nullopt, value, + FeedbackSource(p.feedback()), AccessMode::kStore); } Node* JSNativeContextSpecialization::InlinePropertyGetterCall( Node* receiver, Node* context, Node* frame_state, Node** effect, Node** control, ZoneVector* if_exceptions, PropertyAccessInfo const& access_info) { - Node* target = jsgraph()->Constant(access_info.constant()); + ObjectRef constant(broker(), access_info.constant()); + Node* target = jsgraph()->Constant(constant); FrameStateInfo const& frame_info = FrameStateInfoOf(frame_state->op()); // Introduce the call to the getter function. Node* value; - ObjectRef constant(broker(), access_info.constant()); if (constant.IsJSFunction()) { value = *effect = *control = graph()->NewNode( jsgraph()->javascript()->Call(2, CallFrequency(), VectorSlotPair(), ConvertReceiverMode::kNotNullOrUndefined), target, receiver, context, frame_state, *effect, *control); } else { - auto function_template_info = constant.AsFunctionTemplateInfo(); - function_template_info.Serialize(); - Node* holder = - access_info.holder().is_null() - ? receiver - : jsgraph()->Constant(access_info.holder().ToHandleChecked()); + Node* holder = access_info.holder().is_null() + ? receiver + : jsgraph()->Constant(ObjectRef( + broker(), access_info.holder().ToHandleChecked())); SharedFunctionInfoRef shared_info( broker(), frame_info.shared_info().ToHandleChecked()); - value = InlineApiCall(receiver, holder, frame_state, nullptr, effect, - control, shared_info, function_template_info); + + value = + InlineApiCall(receiver, holder, frame_state, nullptr, effect, control, + shared_info, constant.AsFunctionTemplateInfo()); } // Remember to rewire the IfException edge if this is inside a try-block. if (if_exceptions != nullptr) { @@ -2052,26 +2005,24 @@ void JSNativeContextSpecialization::InlinePropertySetterCall( Node* receiver, Node* value, Node* context, Node* frame_state, Node** effect, Node** control, ZoneVector* if_exceptions, PropertyAccessInfo const& access_info) { - Node* target = jsgraph()->Constant(access_info.constant()); + ObjectRef constant(broker(), access_info.constant()); + Node* target = jsgraph()->Constant(constant); FrameStateInfo const& frame_info = FrameStateInfoOf(frame_state->op()); // Introduce the call to the setter function. - ObjectRef constant(broker(), access_info.constant()); if (constant.IsJSFunction()) { *effect = *control = graph()->NewNode( jsgraph()->javascript()->Call(3, CallFrequency(), VectorSlotPair(), ConvertReceiverMode::kNotNullOrUndefined), target, receiver, value, context, frame_state, *effect, *control); } else { - auto function_template_info = constant.AsFunctionTemplateInfo(); - function_template_info.Serialize(); - Node* holder = - access_info.holder().is_null() - ? receiver - : jsgraph()->Constant(access_info.holder().ToHandleChecked()); + Node* holder = access_info.holder().is_null() + ? receiver + : jsgraph()->Constant(ObjectRef( + broker(), access_info.holder().ToHandleChecked())); SharedFunctionInfoRef shared_info( broker(), frame_info.shared_info().ToHandleChecked()); InlineApiCall(receiver, holder, frame_state, value, effect, control, - shared_info, function_template_info); + shared_info, constant.AsFunctionTemplateInfo()); } // Remember to rewire the IfException edge if this is inside a try-block. if (if_exceptions != nullptr) { @@ -2088,8 +2039,16 @@ Node* JSNativeContextSpecialization::InlineApiCall( Node* receiver, Node* holder, Node* frame_state, Node* value, Node** effect, Node** control, SharedFunctionInfoRef const& shared_info, FunctionTemplateInfoRef const& function_template_info) { - auto call_handler_info = - function_template_info.call_code().AsCallHandlerInfo(); + if (!function_template_info.has_call_code()) { + return nullptr; + } + + if (!function_template_info.call_code().has_value()) { + TRACE_BROKER_MISSING(broker(), "call code for function template info " + << function_template_info); + return nullptr; + } + CallHandlerInfoRef call_handler_info = *function_template_info.call_code(); // Only setters have a value. int const argc = value == nullptr ? 0 : 1; @@ -2151,7 +2110,8 @@ JSNativeContextSpecialization::BuildPropertyLoad( value = InlinePropertyGetterCall(receiver, context, frame_state, &effect, &control, if_exceptions, access_info); } else if (access_info.IsModuleExport()) { - Node* cell = jsgraph()->Constant(access_info.export_cell()); + Node* cell = jsgraph()->Constant( + ObjectRef(broker(), access_info.constant()).AsCell()); value = effect = graph()->NewNode(simplified()->LoadField(AccessBuilder::ForCellValue()), cell, effect, control); @@ -2382,7 +2342,6 @@ JSNativeContextSpecialization::BuildPropertyStore( // Check if we need to grow the properties backing store // with this transitioning store. MapRef transition_map_ref(broker(), transition_map); - transition_map_ref.SerializeBackPointer(); MapRef original_map = transition_map_ref.GetBackPointer().AsMap(); if (original_map.UnusedPropertyFields() == 0) { DCHECK(!field_index.is_inobject()); @@ -2404,7 +2363,7 @@ JSNativeContextSpecialization::BuildPropertyStore( common()->BeginRegion(RegionObservability::kObservable), effect); effect = graph()->NewNode( simplified()->StoreField(AccessBuilder::ForMap()), receiver, - jsgraph()->Constant(transition_map), effect, control); + jsgraph()->Constant(transition_map_ref), effect, control); effect = graph()->NewNode(simplified()->StoreField(field_access), storage, value, effect, control); effect = graph()->NewNode(common()->FinishRegion(), @@ -2495,21 +2454,16 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreDataPropertyInLiteral( Reduction JSNativeContextSpecialization::ReduceJSStoreInArrayLiteral( Node* node) { + DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining); DCHECK_EQ(IrOpcode::kJSStoreInArrayLiteral, node->opcode()); FeedbackParameter const& p = FeedbackParameterOf(node->op()); Node* const index = NodeProperties::GetValueInput(node, 1); Node* const value = NodeProperties::GetValueInput(node, 2); - // Extract receiver maps from the keyed store IC using the FeedbackNexus. if (!p.feedback().IsValid()) return NoChange(); - FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot()); - - // Extract the keyed access store mode from the keyed store IC. - KeyedAccessStoreMode store_mode = nexus.GetKeyedAccessStoreMode(); - - return ReduceKeyedAccess(node, index, value, nexus, - AccessMode::kStoreInLiteral, STANDARD_LOAD, - store_mode); + return ReducePropertyAccess(node, index, base::nullopt, value, + FeedbackSource(p.feedback()), + AccessMode::kStoreInLiteral); } Reduction JSNativeContextSpecialization::ReduceJSToObject(Node* node) { @@ -2546,8 +2500,7 @@ ExternalArrayType GetArrayTypeFromElementsKind(ElementsKind kind) { JSNativeContextSpecialization::ValueEffectControl JSNativeContextSpecialization::BuildElementAccess( Node* receiver, Node* index, Node* value, Node* effect, Node* control, - ElementAccessInfo const& access_info, AccessMode access_mode, - KeyedAccessLoadMode load_mode, KeyedAccessStoreMode store_mode) { + ElementAccessInfo const& access_info, KeyedAccessMode const& keyed_mode) { // TODO(bmeurer): We currently specialize based on elements kind. We should // also be able to properly support strings and other JSObjects here. ElementsKind elements_kind = access_info.elements_kind(); @@ -2583,7 +2536,7 @@ JSNativeContextSpecialization::BuildElementAccess( // for Chrome. Node and Electron both set this limit to 0. Setting // the base to Smi zero here allows the EffectControlLinearizer to // optimize away the tricky part of the access later. - if (V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP == 0) { + if (JSTypedArray::kMaxSizeInHeap == 0) { base_pointer = jsgraph()->ZeroConstant(); } else { base_pointer = effect = @@ -2629,8 +2582,10 @@ JSNativeContextSpecialization::BuildElementAccess( buffer_or_receiver = buffer; } - if (load_mode == LOAD_IGNORE_OUT_OF_BOUNDS || - store_mode == STORE_IGNORE_OUT_OF_BOUNDS) { + if ((keyed_mode.IsLoad() && + keyed_mode.load_mode() == LOAD_IGNORE_OUT_OF_BOUNDS) || + (keyed_mode.IsStore() && + keyed_mode.store_mode() == STORE_IGNORE_OUT_OF_BOUNDS)) { // Only check that the {index} is in SignedSmall range. We do the actual // bounds check below and just skip the property access if it's out of // bounds for the {receiver}. @@ -2651,10 +2606,10 @@ JSNativeContextSpecialization::BuildElementAccess( // Access the actual element. ExternalArrayType external_array_type = GetArrayTypeFromElementsKind(elements_kind); - switch (access_mode) { + switch (keyed_mode.access_mode()) { case AccessMode::kLoad: { // Check if we can return undefined for out-of-bounds loads. - if (load_mode == LOAD_IGNORE_OUT_OF_BOUNDS) { + if (keyed_mode.load_mode() == LOAD_IGNORE_OUT_OF_BOUNDS) { Node* check = graph()->NewNode(simplified()->NumberLessThan(), index, length); Node* branch = graph()->NewNode( @@ -2716,7 +2671,7 @@ JSNativeContextSpecialization::BuildElementAccess( } // Check if we can skip the out-of-bounds store. - if (store_mode == STORE_IGNORE_OUT_OF_BOUNDS) { + if (keyed_mode.store_mode() == STORE_IGNORE_OUT_OF_BOUNDS) { Node* check = graph()->NewNode(simplified()->NumberLessThan(), index, length); Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue), @@ -2766,9 +2721,9 @@ JSNativeContextSpecialization::BuildElementAccess( // Don't try to store to a copy-on-write backing store (unless supported by // the store mode). - if (access_mode == AccessMode::kStore && + if (keyed_mode.access_mode() == AccessMode::kStore && IsSmiOrObjectElementsKind(elements_kind) && - !IsCOWHandlingStoreMode(store_mode)) { + !IsCOWHandlingStoreMode(keyed_mode.store_mode())) { effect = graph()->NewNode( simplified()->CheckMaps( CheckMapsFlag::kNone, @@ -2791,11 +2746,10 @@ JSNativeContextSpecialization::BuildElementAccess( elements, effect, control); // Check if we might need to grow the {elements} backing store. - if (IsGrowStoreMode(store_mode)) { + if (keyed_mode.IsStore() && IsGrowStoreMode(keyed_mode.store_mode())) { // For growing stores we validate the {index} below. - DCHECK(access_mode == AccessMode::kStore || - access_mode == AccessMode::kStoreInLiteral); - } else if (load_mode == LOAD_IGNORE_OUT_OF_BOUNDS && + } else if (keyed_mode.IsLoad() && + keyed_mode.load_mode() == LOAD_IGNORE_OUT_OF_BOUNDS && CanTreatHoleAsUndefined(receiver_maps)) { // Check that the {index} is a valid array index, we do the actual // bounds check below and just skip the store below if it's out of @@ -2826,7 +2780,7 @@ JSNativeContextSpecialization::BuildElementAccess( kFullWriteBarrier, LoadSensitivity::kCritical}; // Access the actual element. - if (access_mode == AccessMode::kLoad) { + if (keyed_mode.access_mode() == AccessMode::kLoad) { // Compute the real element access type, which includes the hole in case // of holey backing stores. if (IsHoleyElementsKind(elements_kind)) { @@ -2839,7 +2793,7 @@ JSNativeContextSpecialization::BuildElementAccess( } // Check if we can return undefined for out-of-bounds loads. - if (load_mode == LOAD_IGNORE_OUT_OF_BOUNDS && + if (keyed_mode.load_mode() == LOAD_IGNORE_OUT_OF_BOUNDS && CanTreatHoleAsUndefined(receiver_maps)) { Node* check = graph()->NewNode(simplified()->NumberLessThan(), index, length); @@ -2923,7 +2877,7 @@ JSNativeContextSpecialization::BuildElementAccess( effect, control); } } - } else if (access_mode == AccessMode::kHas) { + } else if (keyed_mode.access_mode() == AccessMode::kHas) { // For packed arrays with NoElementsProctector valid, a bound check // is equivalent to HasProperty. value = effect = graph()->NewNode(simplified()->SpeculativeNumberLessThan( @@ -2996,8 +2950,9 @@ JSNativeContextSpecialization::BuildElementAccess( vtrue, vfalse, control); } } else { - DCHECK(access_mode == AccessMode::kStore || - access_mode == AccessMode::kStoreInLiteral); + DCHECK(keyed_mode.access_mode() == AccessMode::kStore || + keyed_mode.access_mode() == AccessMode::kStoreInLiteral); + if (IsSmiElementsKind(elements_kind)) { value = effect = graph()->NewNode( simplified()->CheckSmi(VectorSlotPair()), value, effect, control); @@ -3011,11 +2966,11 @@ JSNativeContextSpecialization::BuildElementAccess( // Ensure that copy-on-write backing store is writable. if (IsSmiOrObjectElementsKind(elements_kind) && - store_mode == STORE_HANDLE_COW) { + keyed_mode.store_mode() == STORE_HANDLE_COW) { elements = effect = graph()->NewNode(simplified()->EnsureWritableFastElements(), receiver, elements, effect, control); - } else if (IsGrowStoreMode(store_mode)) { + } else if (IsGrowStoreMode(keyed_mode.store_mode())) { // Determine the length of the {elements} backing store. Node* elements_length = effect = graph()->NewNode( simplified()->LoadField(AccessBuilder::ForFixedArrayLength()), @@ -3053,7 +3008,7 @@ JSNativeContextSpecialization::BuildElementAccess( // If we didn't grow {elements}, it might still be COW, in which case we // copy it now. if (IsSmiOrObjectElementsKind(elements_kind) && - store_mode == STORE_AND_GROW_HANDLE_COW) { + keyed_mode.store_mode() == STORE_AND_GROW_HANDLE_COW) { elements = effect = graph()->NewNode(simplified()->EnsureWritableFastElements(), receiver, elements, effect, control); @@ -3295,7 +3250,8 @@ bool JSNativeContextSpecialization::InferReceiverMaps( Node* receiver, Node* effect, MapHandles* receiver_maps) { ZoneHandleSet maps; NodeProperties::InferReceiverMapsResult result = - NodeProperties::InferReceiverMaps(broker(), receiver, effect, &maps); + NodeProperties::InferReceiverMapsUnsafe(broker(), receiver, effect, + &maps); if (result == NodeProperties::kReliableReceiverMaps) { for (size_t i = 0; i < maps.size(); ++i) { receiver_maps->push_back(maps[i]); @@ -3357,8 +3313,6 @@ SimplifiedOperatorBuilder* JSNativeContextSpecialization::simplified() const { return jsgraph()->simplified(); } -#undef V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP - } // namespace compiler } // namespace internal } // namespace v8 diff --git a/deps/v8/src/compiler/js-native-context-specialization.h b/deps/v8/src/compiler/js-native-context-specialization.h index 7de2639966ee49..8510c76bfc3d59 100644 --- a/deps/v8/src/compiler/js-native-context-specialization.h +++ b/deps/v8/src/compiler/js-native-context-specialization.h @@ -7,6 +7,7 @@ #include "src/base/flags.h" #include "src/compiler/graph-reducer.h" +#include "src/compiler/js-heap-broker.h" #include "src/deoptimizer/deoptimize-reason.h" #include "src/objects/map.h" @@ -93,24 +94,15 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final Reduction ReduceJSToObject(Node* node); Reduction ReduceElementAccess(Node* node, Node* index, Node* value, - ElementAccessFeedback const& processed, - AccessMode access_mode, - KeyedAccessLoadMode load_mode, - KeyedAccessStoreMode store_mode); + ElementAccessFeedback const& processed); // In the case of non-keyed (named) accesses, pass the name as {static_name} // and use {nullptr} for {key} (load/store modes are irrelevant). - Reduction ReducePropertyAccessUsingProcessedFeedback( - Node* node, Node* key, base::Optional static_name, Node* value, - FeedbackNexus const& nexus, AccessMode access_mode, - KeyedAccessLoadMode load_mode = STANDARD_LOAD, - KeyedAccessStoreMode store_mode = STANDARD_STORE); - Reduction ReduceKeyedAccess(Node* node, Node* key, Node* value, - FeedbackNexus const& nexus, - AccessMode access_mode, - KeyedAccessLoadMode load_mode, - KeyedAccessStoreMode store_mode); + Reduction ReducePropertyAccess(Node* node, Node* key, + base::Optional static_name, + Node* value, FeedbackSource const& source, + AccessMode access_mode); Reduction ReduceNamedAccessFromNexus(Node* node, Node* value, - FeedbackNexus const& nexus, + FeedbackSource const& source, NameRef const& name, AccessMode access_mode); Reduction ReduceNamedAccess(Node* node, Node* value, @@ -123,12 +115,10 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final NameRef const& name, AccessMode access_mode, Node* key, PropertyCellRef const& property_cell); Reduction ReduceKeyedLoadFromHeapConstant(Node* node, Node* key, - FeedbackNexus const& nexus, AccessMode access_mode, KeyedAccessLoadMode load_mode); Reduction ReduceElementAccessOnString(Node* node, Node* index, Node* value, - AccessMode access_mode, - KeyedAccessLoadMode load_mode); + KeyedAccessMode const& keyed_mode); Reduction ReduceSoftDeoptimize(Node* node, DeoptimizeReason reason); Reduction ReduceJSToString(Node* node); @@ -197,10 +187,11 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final FunctionTemplateInfoRef const& function_template_info); // Construct the appropriate subgraph for element access. - ValueEffectControl BuildElementAccess( - Node* receiver, Node* index, Node* value, Node* effect, Node* control, - ElementAccessInfo const& access_info, AccessMode access_mode, - KeyedAccessLoadMode load_mode, KeyedAccessStoreMode store_mode); + ValueEffectControl BuildElementAccess(Node* receiver, Node* index, + Node* value, Node* effect, + Node* control, + ElementAccessInfo const& access_info, + KeyedAccessMode const& keyed_mode); // Construct appropriate subgraph to load from a String. Node* BuildIndexedStringLoad(Node* receiver, Node* index, Node* length, diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc index a779790b8df31a..e0f97922b2ced0 100644 --- a/deps/v8/src/compiler/js-operator.cc +++ b/deps/v8/src/compiler/js-operator.cc @@ -17,7 +17,7 @@ namespace v8 { namespace internal { namespace compiler { -std::ostream& operator<<(std::ostream& os, CallFrequency f) { +std::ostream& operator<<(std::ostream& os, CallFrequency const& f) { if (f.IsUnknown()) return os << "unknown"; return os << f.value(); } @@ -28,7 +28,6 @@ CallFrequency CallFrequencyOf(Operator const* op) { return OpParameter(op); } - std::ostream& operator<<(std::ostream& os, ConstructForwardVarargsParameters const& p) { return os << p.arity() << ", " << p.start_index(); @@ -843,7 +842,8 @@ const Operator* JSOperatorBuilder::Call(size_t arity, parameters); // parameter } -const Operator* JSOperatorBuilder::CallWithArrayLike(CallFrequency frequency) { +const Operator* JSOperatorBuilder::CallWithArrayLike( + CallFrequency const& frequency) { return new (zone()) Operator1( // -- IrOpcode::kJSCallWithArrayLike, Operator::kNoProperties, // opcode "JSCallWithArrayLike", // name @@ -899,8 +899,10 @@ const Operator* JSOperatorBuilder::ConstructForwardVarargs( parameters); // parameter } +// Note: frequency is taken by reference to work around a GCC bug +// on AIX (v8:8193). const Operator* JSOperatorBuilder::Construct(uint32_t arity, - CallFrequency frequency, + CallFrequency const& frequency, VectorSlotPair const& feedback) { ConstructParameters parameters(arity, frequency, feedback); return new (zone()) Operator1( // -- @@ -911,7 +913,7 @@ const Operator* JSOperatorBuilder::Construct(uint32_t arity, } const Operator* JSOperatorBuilder::ConstructWithArrayLike( - CallFrequency frequency) { + CallFrequency const& frequency) { return new (zone()) Operator1( // -- IrOpcode::kJSConstructWithArrayLike, // opcode Operator::kNoProperties, // properties @@ -921,7 +923,8 @@ const Operator* JSOperatorBuilder::ConstructWithArrayLike( } const Operator* JSOperatorBuilder::ConstructWithSpread( - uint32_t arity, CallFrequency frequency, VectorSlotPair const& feedback) { + uint32_t arity, CallFrequency const& frequency, + VectorSlotPair const& feedback) { ConstructParameters parameters(arity, frequency, feedback); return new (zone()) Operator1( // -- IrOpcode::kJSConstructWithSpread, Operator::kNoProperties, // opcode diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h index 0f315b1cb56dac..e7d9acb152acfe 100644 --- a/deps/v8/src/compiler/js-operator.h +++ b/deps/v8/src/compiler/js-operator.h @@ -48,7 +48,7 @@ class CallFrequency final { } bool operator!=(CallFrequency const& that) const { return !(*this == that); } - friend size_t hash_value(CallFrequency f) { + friend size_t hash_value(CallFrequency const& f) { return bit_cast(f.value_); } @@ -58,7 +58,7 @@ class CallFrequency final { float value_; }; -std::ostream& operator<<(std::ostream&, CallFrequency); +std::ostream& operator<<(std::ostream&, CallFrequency const&); CallFrequency CallFrequencyOf(Operator const* op) V8_WARN_UNUSED_RESULT; @@ -101,7 +101,7 @@ ConstructForwardVarargsParameters const& ConstructForwardVarargsParametersOf( // used as a parameter by JSConstruct and JSConstructWithSpread operators. class ConstructParameters final { public: - ConstructParameters(uint32_t arity, CallFrequency frequency, + ConstructParameters(uint32_t arity, CallFrequency const& frequency, VectorSlotPair const& feedback) : arity_(arity), frequency_(frequency), feedback_(feedback) {} @@ -757,7 +757,7 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final VectorSlotPair const& feedback = VectorSlotPair(), ConvertReceiverMode convert_mode = ConvertReceiverMode::kAny, SpeculationMode speculation_mode = SpeculationMode::kDisallowSpeculation); - const Operator* CallWithArrayLike(CallFrequency frequency); + const Operator* CallWithArrayLike(CallFrequency const& frequency); const Operator* CallWithSpread( uint32_t arity, CallFrequency const& frequency = CallFrequency(), VectorSlotPair const& feedback = VectorSlotPair(), @@ -768,11 +768,11 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final const Operator* ConstructForwardVarargs(size_t arity, uint32_t start_index); const Operator* Construct(uint32_t arity, - CallFrequency frequency = CallFrequency(), + CallFrequency const& frequency = CallFrequency(), VectorSlotPair const& feedback = VectorSlotPair()); - const Operator* ConstructWithArrayLike(CallFrequency frequency); + const Operator* ConstructWithArrayLike(CallFrequency const& frequency); const Operator* ConstructWithSpread( - uint32_t arity, CallFrequency frequency = CallFrequency(), + uint32_t arity, CallFrequency const& frequency = CallFrequency(), VectorSlotPair const& feedback = VectorSlotPair()); const Operator* LoadProperty(VectorSlotPair const& feedback); diff --git a/deps/v8/src/compiler/js-type-hint-lowering.cc b/deps/v8/src/compiler/js-type-hint-lowering.cc index 9d882e823835d2..f3696bcc4887f4 100644 --- a/deps/v8/src/compiler/js-type-hint-lowering.cc +++ b/deps/v8/src/compiler/js-type-hint-lowering.cc @@ -44,6 +44,25 @@ bool BinaryOperationHintToNumberOperationHint( return false; } +bool BinaryOperationHintToBigIntOperationHint( + BinaryOperationHint binop_hint, BigIntOperationHint* bigint_hint) { + switch (binop_hint) { + case BinaryOperationHint::kSignedSmall: + case BinaryOperationHint::kSignedSmallInputs: + case BinaryOperationHint::kSigned32: + case BinaryOperationHint::kNumber: + case BinaryOperationHint::kNumberOrOddball: + case BinaryOperationHint::kAny: + case BinaryOperationHint::kNone: + case BinaryOperationHint::kString: + return false; + case BinaryOperationHint::kBigInt: + *bigint_hint = BigIntOperationHint::kBigInt; + return true; + } + UNREACHABLE(); +} + } // namespace class JSSpeculativeBinopBuilder final { @@ -74,6 +93,11 @@ class JSSpeculativeBinopBuilder final { hint); } + bool GetBinaryBigIntOperationHint(BigIntOperationHint* hint) { + return BinaryOperationHintToBigIntOperationHint(GetBinaryOperationHint(), + hint); + } + bool GetCompareNumberOperationHint(NumberOperationHint* hint) { switch (GetCompareOperationHint()) { case CompareOperationHint::kSignedSmall: @@ -138,6 +162,16 @@ class JSSpeculativeBinopBuilder final { UNREACHABLE(); } + const Operator* SpeculativeBigIntOp(BigIntOperationHint hint) { + switch (op_->opcode()) { + case IrOpcode::kJSAdd: + return simplified()->SpeculativeBigIntAdd(hint); + default: + break; + } + UNREACHABLE(); + } + const Operator* SpeculativeCompareOp(NumberOperationHint hint) { switch (op_->opcode()) { case IrOpcode::kJSEqual: @@ -179,6 +213,16 @@ class JSSpeculativeBinopBuilder final { return nullptr; } + Node* TryBuildBigIntBinop() { + BigIntOperationHint hint; + if (GetBinaryBigIntOperationHint(&hint)) { + const Operator* op = SpeculativeBigIntOp(hint); + Node* node = BuildSpeculativeOperation(op); + return node; + } + return nullptr; + } + Node* TryBuildNumberCompare() { NumberOperationHint hint; if (GetCompareNumberOperationHint(&hint)) { @@ -264,6 +308,15 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceUnaryOperation( operand, jsgraph()->SmiConstant(-1), effect, control, slot); node = b.TryBuildNumberBinop(); + if (!node) { + FeedbackNexus nexus(feedback_vector(), slot); + if (nexus.GetBinaryOperationFeedback() == + BinaryOperationHint::kBigInt) { + const Operator* op = jsgraph()->simplified()->SpeculativeBigIntNegate( + BigIntOperationHint::kBigInt); + node = jsgraph()->graph()->NewNode(op, operand, effect, control); + } + } break; } default: @@ -345,6 +398,11 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceBinaryOperation( if (Node* node = b.TryBuildNumberBinop()) { return LoweringResult::SideEffectFree(node, node, control); } + if (op->opcode() == IrOpcode::kJSAdd) { + if (Node* node = b.TryBuildBigIntBinop()) { + return LoweringResult::SideEffectFree(node, node, control); + } + } break; } case IrOpcode::kJSExponentiate: { diff --git a/deps/v8/src/compiler/js-type-hint-lowering.h b/deps/v8/src/compiler/js-type-hint-lowering.h index 7164a0b708a23d..a74c0193558734 100644 --- a/deps/v8/src/compiler/js-type-hint-lowering.h +++ b/deps/v8/src/compiler/js-type-hint-lowering.h @@ -153,7 +153,8 @@ class JSTypeHintLowering { private: friend class JSSpeculativeBinopBuilder; - Node* TryBuildSoftDeopt(FeedbackNexus& nexus, Node* effect, Node* control, + Node* TryBuildSoftDeopt(FeedbackNexus& nexus, // NOLINT(runtime/references) + Node* effect, Node* control, DeoptimizeReason reson) const; JSGraph* jsgraph() const { return jsgraph_; } diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc index ba50b7579234c3..3190fc993056c1 100644 --- a/deps/v8/src/compiler/js-typed-lowering.cc +++ b/deps/v8/src/compiler/js-typed-lowering.cc @@ -10,6 +10,7 @@ #include "src/compiler/access-builder.h" #include "src/compiler/allocation-builder.h" #include "src/compiler/js-graph.h" +#include "src/compiler/js-heap-broker.h" #include "src/compiler/linkage.h" #include "src/compiler/node-matchers.h" #include "src/compiler/node-properties.h" @@ -1364,20 +1365,21 @@ Node* JSTypedLowering::BuildGetModuleCell(Node* node) { Type module_type = NodeProperties::GetType(module); if (module_type.IsHeapConstant()) { - ModuleRef module_constant = module_type.AsHeapConstant()->Ref().AsModule(); + SourceTextModuleRef module_constant = + module_type.AsHeapConstant()->Ref().AsSourceTextModule(); CellRef cell_constant = module_constant.GetCell(cell_index); return jsgraph()->Constant(cell_constant); } FieldAccess field_access; int index; - if (ModuleDescriptor::GetCellIndexKind(cell_index) == - ModuleDescriptor::kExport) { + if (SourceTextModuleDescriptor::GetCellIndexKind(cell_index) == + SourceTextModuleDescriptor::kExport) { field_access = AccessBuilder::ForModuleRegularExports(); index = cell_index - 1; } else { - DCHECK_EQ(ModuleDescriptor::GetCellIndexKind(cell_index), - ModuleDescriptor::kImport); + DCHECK_EQ(SourceTextModuleDescriptor::GetCellIndexKind(cell_index), + SourceTextModuleDescriptor::kImport); field_access = AccessBuilder::ForModuleRegularImports(); index = -cell_index - 1; } @@ -1408,9 +1410,9 @@ Reduction JSTypedLowering::ReduceJSStoreModule(Node* node) { Node* effect = NodeProperties::GetEffectInput(node); Node* control = NodeProperties::GetControlInput(node); Node* value = NodeProperties::GetValueInput(node, 1); - DCHECK_EQ( - ModuleDescriptor::GetCellIndexKind(OpParameter(node->op())), - ModuleDescriptor::kExport); + DCHECK_EQ(SourceTextModuleDescriptor::GetCellIndexKind( + OpParameter(node->op())), + SourceTextModuleDescriptor::kExport); Node* cell = BuildGetModuleCell(node); if (cell->op()->EffectOutputCount() > 0) effect = cell; diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc index 8bb47b43e9f26c..1d88a27a5f758e 100644 --- a/deps/v8/src/compiler/linkage.cc +++ b/deps/v8/src/compiler/linkage.cc @@ -137,13 +137,19 @@ bool CallDescriptor::CanTailCall(const Node* node) const { return HasSameReturnLocationsAs(CallDescriptorOf(node->op())); } -int CallDescriptor::CalculateFixedFrameSize() const { +// TODO(jkummerow, sigurds): Arguably frame size calculation should be +// keyed on code/frame type, not on CallDescriptor kind. Think about a +// good way to organize this logic. +int CallDescriptor::CalculateFixedFrameSize(Code::Kind code_kind) const { switch (kind_) { case kCallJSFunction: return PushArgumentCount() ? OptimizedBuiltinFrameConstants::kFixedSlotCount : StandardFrameConstants::kFixedSlotCount; case kCallAddress: + if (code_kind == Code::C_WASM_ENTRY) { + return CWasmEntryFrameConstants::kFixedSlotCount; + } return CommonFrameConstants::kFixedSlotCountAboveFp + CommonFrameConstants::kCPSlotCount; case kCallCodeObject: diff --git a/deps/v8/src/compiler/linkage.h b/deps/v8/src/compiler/linkage.h index e4fa6f9f207b46..05eb0e7d11732f 100644 --- a/deps/v8/src/compiler/linkage.h +++ b/deps/v8/src/compiler/linkage.h @@ -325,7 +325,7 @@ class V8_EXPORT_PRIVATE CallDescriptor final bool CanTailCall(const Node* call) const; - int CalculateFixedFrameSize() const; + int CalculateFixedFrameSize(Code::Kind code_kind) const; RegList AllocatableRegisters() const { return allocatable_registers_; } diff --git a/deps/v8/src/compiler/load-elimination.cc b/deps/v8/src/compiler/load-elimination.cc index c42bfd839a4ac5..f9998723f387da 100644 --- a/deps/v8/src/compiler/load-elimination.cc +++ b/deps/v8/src/compiler/load-elimination.cc @@ -419,14 +419,15 @@ bool LoadElimination::AbstractState::Equals(AbstractState const* that) const { } void LoadElimination::AbstractState::FieldsMerge( - AbstractFields& this_fields, AbstractFields const& that_fields, + AbstractFields* this_fields, AbstractFields const& that_fields, Zone* zone) { - for (size_t i = 0; i < this_fields.size(); ++i) { - if (this_fields[i]) { + for (size_t i = 0; i < this_fields->size(); ++i) { + AbstractField const*& this_field = (*this_fields)[i]; + if (this_field) { if (that_fields[i]) { - this_fields[i] = this_fields[i]->Merge(that_fields[i], zone); + this_field = this_field->Merge(that_fields[i], zone); } else { - this_fields[i] = nullptr; + this_field = nullptr; } } } @@ -442,8 +443,8 @@ void LoadElimination::AbstractState::Merge(AbstractState const* that, } // Merge the information we have about the fields. - FieldsMerge(this->fields_, that->fields_, zone); - FieldsMerge(this->const_fields_, that->const_fields_, zone); + FieldsMerge(&this->fields_, that->fields_, zone); + FieldsMerge(&this->const_fields_, that->const_fields_, zone); // Merge the information we have about the maps. if (this->maps_) { @@ -923,20 +924,23 @@ Reduction LoadElimination::ReduceStoreField(Node* node, FieldInfo const* lookup_result = state->LookupField(object, field_index, constness); - if (lookup_result && constness == PropertyConstness::kMutable) { + if (lookup_result && (constness == PropertyConstness::kMutable || + V8_ENABLE_DOUBLE_CONST_STORE_CHECK_BOOL)) { // At runtime, we should never encounter // - any store replacing existing info with a different, incompatible // representation, nor // - two consecutive const stores. // However, we may see such code statically, so we guard against // executing it by emitting Unreachable. - // TODO(gsps): Re-enable the double const store check once we have - // identified other FieldAccesses that should be marked mutable - // instead of const (cf. JSCreateLowering::AllocateFastLiteral). + // TODO(gsps): Re-enable the double const store check even for + // non-debug builds once we have identified other FieldAccesses + // that should be marked mutable instead of const + // (cf. JSCreateLowering::AllocateFastLiteral). bool incompatible_representation = !lookup_result->name.is_null() && !IsCompatible(representation, lookup_result->representation); - if (incompatible_representation) { + if (incompatible_representation || + constness == PropertyConstness::kConst) { Node* control = NodeProperties::GetControlInput(node); Node* unreachable = graph()->NewNode(common()->Unreachable(), effect, control); diff --git a/deps/v8/src/compiler/load-elimination.h b/deps/v8/src/compiler/load-elimination.h index 7658d013652979..4ad1fa64a201ef 100644 --- a/deps/v8/src/compiler/load-elimination.h +++ b/deps/v8/src/compiler/load-elimination.h @@ -233,7 +233,7 @@ class V8_EXPORT_PRIVATE LoadElimination final bool FieldsEquals(AbstractFields const& this_fields, AbstractFields const& that_fields) const; - void FieldsMerge(AbstractFields& this_fields, + void FieldsMerge(AbstractFields* this_fields, AbstractFields const& that_fields, Zone* zone); AbstractElements const* elements_ = nullptr; diff --git a/deps/v8/src/compiler/loop-analysis.cc b/deps/v8/src/compiler/loop-analysis.cc index d6b88b13f5a73b..41d50549b3154b 100644 --- a/deps/v8/src/compiler/loop-analysis.cc +++ b/deps/v8/src/compiler/loop-analysis.cc @@ -4,6 +4,7 @@ #include "src/compiler/loop-analysis.h" +#include "src/codegen/tick-counter.h" #include "src/compiler/graph.h" #include "src/compiler/node-marker.h" #include "src/compiler/node-properties.h" @@ -12,6 +13,9 @@ namespace v8 { namespace internal { + +class TickCounter; + namespace compiler { #define OFFSET(x) ((x)&0x1F) @@ -51,7 +55,8 @@ struct TempLoopInfo { // marks on edges into/out-of the loop header nodes. class LoopFinderImpl { public: - LoopFinderImpl(Graph* graph, LoopTree* loop_tree, Zone* zone) + LoopFinderImpl(Graph* graph, LoopTree* loop_tree, TickCounter* tick_counter, + Zone* zone) : zone_(zone), end_(graph->end()), queue_(zone), @@ -63,7 +68,8 @@ class LoopFinderImpl { loops_found_(0), width_(0), backward_(nullptr), - forward_(nullptr) {} + forward_(nullptr), + tick_counter_(tick_counter) {} void Run() { PropagateBackward(); @@ -116,6 +122,7 @@ class LoopFinderImpl { int width_; uint32_t* backward_; uint32_t* forward_; + TickCounter* const tick_counter_; int num_nodes() { return static_cast(loop_tree_->node_to_loop_num_.size()); @@ -183,6 +190,7 @@ class LoopFinderImpl { Queue(end_); while (!queue_.empty()) { + tick_counter_->DoTick(); Node* node = queue_.front(); info(node); queue_.pop_front(); @@ -301,6 +309,7 @@ class LoopFinderImpl { } // Propagate forward on paths that were backward reachable from backedges. while (!queue_.empty()) { + tick_counter_->DoTick(); Node* node = queue_.front(); queue_.pop_front(); queued_.Set(node, false); @@ -512,11 +521,11 @@ class LoopFinderImpl { } }; - -LoopTree* LoopFinder::BuildLoopTree(Graph* graph, Zone* zone) { +LoopTree* LoopFinder::BuildLoopTree(Graph* graph, TickCounter* tick_counter, + Zone* zone) { LoopTree* loop_tree = new (graph->zone()) LoopTree(graph->NodeCount(), graph->zone()); - LoopFinderImpl finder(graph, loop_tree, zone); + LoopFinderImpl finder(graph, loop_tree, tick_counter, zone); finder.Run(); if (FLAG_trace_turbo_loop) { finder.Print(); @@ -524,7 +533,6 @@ LoopTree* LoopFinder::BuildLoopTree(Graph* graph, Zone* zone) { return loop_tree; } - Node* LoopTree::HeaderNode(Loop* loop) { Node* first = *HeaderNodes(loop).begin(); if (first->opcode() == IrOpcode::kLoop) return first; diff --git a/deps/v8/src/compiler/loop-analysis.h b/deps/v8/src/compiler/loop-analysis.h index 620a9554e08edd..043833a54ca099 100644 --- a/deps/v8/src/compiler/loop-analysis.h +++ b/deps/v8/src/compiler/loop-analysis.h @@ -13,6 +13,9 @@ namespace v8 { namespace internal { + +class TickCounter; + namespace compiler { // TODO(titzer): don't assume entry edges have a particular index. @@ -156,7 +159,8 @@ class LoopTree : public ZoneObject { class V8_EXPORT_PRIVATE LoopFinder { public: // Build a loop tree for the entire graph. - static LoopTree* BuildLoopTree(Graph* graph, Zone* temp_zone); + static LoopTree* BuildLoopTree(Graph* graph, TickCounter* tick_counter, + Zone* temp_zone); }; diff --git a/deps/v8/src/compiler/machine-graph-verifier.cc b/deps/v8/src/compiler/machine-graph-verifier.cc index f8e78b216953bb..80205f80b64685 100644 --- a/deps/v8/src/compiler/machine-graph-verifier.cc +++ b/deps/v8/src/compiler/machine-graph-verifier.cc @@ -240,6 +240,7 @@ class MachineRepresentationInferrer { MachineType::PointerRepresentation(); break; case IrOpcode::kBitcastTaggedToWord: + case IrOpcode::kBitcastTaggedSignedToWord: representation_vector_[node->id()] = MachineType::PointerRepresentation(); break; @@ -428,6 +429,7 @@ class MachineRepresentationChecker { MachineRepresentation::kWord64); break; case IrOpcode::kBitcastTaggedToWord: + case IrOpcode::kBitcastTaggedSignedToWord: case IrOpcode::kTaggedPoisonOnSpeculation: CheckValueInputIsTagged(node, 0); break; @@ -556,7 +558,7 @@ class MachineRepresentationChecker { case IrOpcode::kParameter: case IrOpcode::kProjection: break; - case IrOpcode::kDebugAbort: + case IrOpcode::kAbortCSAAssert: CheckValueInputIsTagged(node, 0); break; case IrOpcode::kLoad: @@ -700,6 +702,7 @@ class MachineRepresentationChecker { case IrOpcode::kThrow: case IrOpcode::kTypedStateValues: case IrOpcode::kFrameState: + case IrOpcode::kStaticAssert: break; default: if (node->op()->ValueInputCount() != 0) { @@ -748,6 +751,11 @@ class MachineRepresentationChecker { case MachineRepresentation::kCompressedPointer: case MachineRepresentation::kCompressedSigned: return; + case MachineRepresentation::kNone: + if (input->opcode() == IrOpcode::kCompressedHeapConstant) { + return; + } + break; default: break; } @@ -851,6 +859,9 @@ class MachineRepresentationChecker { case MachineRepresentation::kCompressedPointer: return; case MachineRepresentation::kNone: { + if (input->opcode() == IrOpcode::kCompressedHeapConstant) { + return; + } std::ostringstream str; str << "TypeError: node #" << input->id() << ":" << *input->op() << " is untyped."; diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc index a6a8e87cf462ca..f720c2908461ba 100644 --- a/deps/v8/src/compiler/machine-operator-reducer.cc +++ b/deps/v8/src/compiler/machine-operator-reducer.cc @@ -710,7 +710,8 @@ Reduction MachineOperatorReducer::Reduce(Node* node) { return ReduceFloat64Compare(node); case IrOpcode::kFloat64RoundDown: return ReduceFloat64RoundDown(node); - case IrOpcode::kBitcastTaggedToWord: { + case IrOpcode::kBitcastTaggedToWord: + case IrOpcode::kBitcastTaggedSignedToWord: { NodeMatcher m(node->InputAt(0)); if (m.IsBitcastWordToTaggedSigned()) { RelaxEffectsAndControls(node); diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc index d2ddedc8fa1708..f447861aca758b 100644 --- a/deps/v8/src/compiler/machine-operator.cc +++ b/deps/v8/src/compiler/machine-operator.cc @@ -140,6 +140,7 @@ MachineType AtomicOpType(Operator const* op) { V(Word64Clz, Operator::kNoProperties, 1, 0, 1) \ V(Word32ReverseBytes, Operator::kNoProperties, 1, 0, 1) \ V(Word64ReverseBytes, Operator::kNoProperties, 1, 0, 1) \ + V(BitcastTaggedSignedToWord, Operator::kNoProperties, 1, 0, 1) \ V(BitcastWordToTaggedSigned, Operator::kNoProperties, 1, 0, 1) \ V(TruncateFloat64ToWord32, Operator::kNoProperties, 1, 0, 1) \ V(ChangeFloat32ToFloat64, Operator::kNoProperties, 1, 0, 1) \ @@ -244,6 +245,13 @@ MachineType AtomicOpType(Operator const* op) { V(Word32PairShl, Operator::kNoProperties, 3, 0, 2) \ V(Word32PairShr, Operator::kNoProperties, 3, 0, 2) \ V(Word32PairSar, Operator::kNoProperties, 3, 0, 2) \ + V(F64x2Splat, Operator::kNoProperties, 1, 0, 1) \ + V(F64x2Abs, Operator::kNoProperties, 1, 0, 1) \ + V(F64x2Neg, Operator::kNoProperties, 1, 0, 1) \ + V(F64x2Eq, Operator::kCommutative, 2, 0, 1) \ + V(F64x2Ne, Operator::kCommutative, 2, 0, 1) \ + V(F64x2Lt, Operator::kNoProperties, 2, 0, 1) \ + V(F64x2Le, Operator::kNoProperties, 2, 0, 1) \ V(F32x4Splat, Operator::kNoProperties, 1, 0, 1) \ V(F32x4SConvertI32x4, Operator::kNoProperties, 1, 0, 1) \ V(F32x4UConvertI32x4, Operator::kNoProperties, 1, 0, 1) \ @@ -261,6 +269,17 @@ MachineType AtomicOpType(Operator const* op) { V(F32x4Ne, Operator::kCommutative, 2, 0, 1) \ V(F32x4Lt, Operator::kNoProperties, 2, 0, 1) \ V(F32x4Le, Operator::kNoProperties, 2, 0, 1) \ + V(I64x2Splat, Operator::kNoProperties, 1, 0, 1) \ + V(I64x2Neg, Operator::kNoProperties, 1, 0, 1) \ + V(I64x2Add, Operator::kCommutative, 2, 0, 1) \ + V(I64x2Sub, Operator::kNoProperties, 2, 0, 1) \ + V(I64x2Mul, Operator::kCommutative, 2, 0, 1) \ + V(I64x2Eq, Operator::kCommutative, 2, 0, 1) \ + V(I64x2Ne, Operator::kCommutative, 2, 0, 1) \ + V(I64x2GtS, Operator::kNoProperties, 2, 0, 1) \ + V(I64x2GeS, Operator::kNoProperties, 2, 0, 1) \ + V(I64x2GtU, Operator::kNoProperties, 2, 0, 1) \ + V(I64x2GeU, Operator::kNoProperties, 2, 0, 1) \ V(I32x4Splat, Operator::kNoProperties, 1, 0, 1) \ V(I32x4SConvertF32x4, Operator::kNoProperties, 1, 0, 1) \ V(I32x4SConvertI16x8Low, Operator::kNoProperties, 1, 0, 1) \ @@ -338,6 +357,8 @@ MachineType AtomicOpType(Operator const* op) { V(S128Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \ V(S128Not, Operator::kNoProperties, 1, 0, 1) \ V(S128Select, Operator::kNoProperties, 3, 0, 1) \ + V(S1x2AnyTrue, Operator::kNoProperties, 1, 0, 1) \ + V(S1x2AllTrue, Operator::kNoProperties, 1, 0, 1) \ V(S1x4AnyTrue, Operator::kNoProperties, 1, 0, 1) \ V(S1x4AllTrue, Operator::kNoProperties, 1, 0, 1) \ V(S1x8AnyTrue, Operator::kNoProperties, 1, 0, 1) \ @@ -439,12 +460,15 @@ MachineType AtomicOpType(Operator const* op) { V(Exchange) #define SIMD_LANE_OP_LIST(V) \ + V(F64x2, 2) \ V(F32x4, 4) \ + V(I64x2, 2) \ V(I32x4, 4) \ V(I16x8, 8) \ V(I8x16, 16) #define SIMD_FORMAT_LIST(V) \ + V(64x2, 64) \ V(32x4, 32) \ V(16x8, 16) \ V(8x16, 8) @@ -754,6 +778,14 @@ struct MachineOperatorGlobalCache { }; Word32AtomicPairCompareExchangeOperator kWord32AtomicPairCompareExchange; + struct MemoryBarrierOperator : public Operator { + MemoryBarrierOperator() + : Operator(IrOpcode::kMemoryBarrier, + Operator::kNoDeopt | Operator::kNoThrow, "MemoryBarrier", 0, + 1, 1, 0, 1, 0) {} + }; + MemoryBarrierOperator kMemoryBarrier; + // The {BitcastWordToTagged} operator must not be marked as pure (especially // not idempotent), because otherwise the splitting logic in the Scheduler // might decide to split these operators, thus potentially creating live @@ -807,12 +839,12 @@ struct MachineOperatorGlobalCache { }; Word64PoisonOnSpeculation kWord64PoisonOnSpeculation; - struct DebugAbortOperator : public Operator { - DebugAbortOperator() - : Operator(IrOpcode::kDebugAbort, Operator::kNoThrow, "DebugAbort", 1, - 1, 1, 0, 1, 0) {} + struct AbortCSAAssertOperator : public Operator { + AbortCSAAssertOperator() + : Operator(IrOpcode::kAbortCSAAssert, Operator::kNoThrow, + "AbortCSAAssert", 1, 1, 1, 0, 1, 0) {} }; - DebugAbortOperator kDebugAbort; + AbortCSAAssertOperator kAbortCSAAssert; struct DebugBreakOperator : public Operator { DebugBreakOperator() @@ -1005,8 +1037,8 @@ const Operator* MachineOperatorBuilder::BitcastMaybeObjectToWord() { return &cache_.kBitcastMaybeObjectToWord; } -const Operator* MachineOperatorBuilder::DebugAbort() { - return &cache_.kDebugAbort; +const Operator* MachineOperatorBuilder::AbortCSAAssert() { + return &cache_.kAbortCSAAssert; } const Operator* MachineOperatorBuilder::DebugBreak() { @@ -1017,6 +1049,10 @@ const Operator* MachineOperatorBuilder::Comment(const char* msg) { return new (zone_) CommentOperator(msg); } +const Operator* MachineOperatorBuilder::MemBarrier() { + return &cache_.kMemoryBarrier; +} + const Operator* MachineOperatorBuilder::Word32AtomicLoad( LoadRepresentation rep) { #define LOAD(Type) \ @@ -1300,6 +1336,11 @@ const Operator* MachineOperatorBuilder::S8x16Shuffle( 2, 0, 0, 1, 0, 0, array); } +const uint8_t* S8x16ShuffleOf(Operator const* op) { + DCHECK_EQ(IrOpcode::kS8x16Shuffle, op->opcode()); + return OpParameter(op); +} + #undef PURE_BINARY_OP_LIST_32 #undef PURE_BINARY_OP_LIST_64 #undef MACHINE_PURE_OP_LIST diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h index 8b1250dd30553a..0f8130120693f9 100644 --- a/deps/v8/src/compiler/machine-operator.h +++ b/deps/v8/src/compiler/machine-operator.h @@ -112,6 +112,9 @@ MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) MachineType AtomicOpType(Operator const* op) V8_WARN_UNUSED_RESULT; +V8_EXPORT_PRIVATE const uint8_t* S8x16ShuffleOf(Operator const* op) + V8_WARN_UNUSED_RESULT; + // Interface for building machine-level operators. These operators are // machine-level but machine-independent and thus define a language suitable // for generating code to run on architectures such as ia32, x64, arm, etc. @@ -216,7 +219,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final AlignmentRequirements::FullUnalignedAccessSupport()); const Operator* Comment(const char* msg); - const Operator* DebugAbort(); + const Operator* AbortCSAAssert(); const Operator* DebugBreak(); const Operator* UnsafePointerAdd(); @@ -295,9 +298,12 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final const Operator* Uint64LessThanOrEqual(); const Operator* Uint64Mod(); - // This operator reinterprets the bits of a tagged pointer as word. + // This operator reinterprets the bits of a tagged pointer as a word. const Operator* BitcastTaggedToWord(); + // This operator reinterprets the bits of a Smi as a word. + const Operator* BitcastTaggedSignedToWord(); + // This operator reinterprets the bits of a tagged MaybeObject pointer as // word. const Operator* BitcastMaybeObjectToWord(); @@ -462,6 +468,16 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final const Operator* Float64SilenceNaN(); // SIMD operators. + const Operator* F64x2Splat(); + const Operator* F64x2Abs(); + const Operator* F64x2Neg(); + const Operator* F64x2ExtractLane(int32_t); + const Operator* F64x2ReplaceLane(int32_t); + const Operator* F64x2Eq(); + const Operator* F64x2Ne(); + const Operator* F64x2Lt(); + const Operator* F64x2Le(); + const Operator* F32x4Splat(); const Operator* F32x4ExtractLane(int32_t); const Operator* F32x4ReplaceLane(int32_t); @@ -483,6 +499,23 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final const Operator* F32x4Lt(); const Operator* F32x4Le(); + const Operator* I64x2Splat(); + const Operator* I64x2ExtractLane(int32_t); + const Operator* I64x2ReplaceLane(int32_t); + const Operator* I64x2Neg(); + const Operator* I64x2Shl(int32_t); + const Operator* I64x2ShrS(int32_t); + const Operator* I64x2Add(); + const Operator* I64x2Sub(); + const Operator* I64x2Mul(); + const Operator* I64x2Eq(); + const Operator* I64x2Ne(); + const Operator* I64x2GtS(); + const Operator* I64x2GeS(); + const Operator* I64x2ShrU(int32_t); + const Operator* I64x2GtU(); + const Operator* I64x2GeU(); + const Operator* I32x4Splat(); const Operator* I32x4ExtractLane(int32_t); const Operator* I32x4ReplaceLane(int32_t); @@ -585,6 +618,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final const Operator* S8x16Shuffle(const uint8_t shuffle[16]); + const Operator* S1x2AnyTrue(); + const Operator* S1x2AllTrue(); const Operator* S1x4AnyTrue(); const Operator* S1x4AllTrue(); const Operator* S1x8AnyTrue(); @@ -620,6 +655,9 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final const Operator* LoadFramePointer(); const Operator* LoadParentFramePointer(); + // Memory barrier. + const Operator* MemBarrier(); + // atomic-load [base + index] const Operator* Word32AtomicLoad(LoadRepresentation rep); // atomic-load [base + index] diff --git a/deps/v8/src/compiler/map-inference.cc b/deps/v8/src/compiler/map-inference.cc index f43ba0d15536f8..07ac95b4f7a539 100644 --- a/deps/v8/src/compiler/map-inference.cc +++ b/deps/v8/src/compiler/map-inference.cc @@ -19,7 +19,7 @@ MapInference::MapInference(JSHeapBroker* broker, Node* object, Node* effect) : broker_(broker), object_(object) { ZoneHandleSet maps; auto result = - NodeProperties::InferReceiverMaps(broker_, object_, effect, &maps); + NodeProperties::InferReceiverMapsUnsafe(broker_, object_, effect, &maps); maps_.insert(maps_.end(), maps.begin(), maps.end()); maps_state_ = (result == NodeProperties::kUnreliableReceiverMaps) ? kUnreliableDontNeedGuard @@ -65,21 +65,25 @@ bool MapInference::AllOfInstanceTypes(std::function f) { bool MapInference::AllOfInstanceTypesUnsafe( std::function f) const { - // TODO(neis): Brokerize the MapInference. - AllowHandleDereference allow_handle_deref; CHECK(HaveMaps()); - return std::all_of(maps_.begin(), maps_.end(), - [f](Handle map) { return f(map->instance_type()); }); + auto instance_type = [this, f](Handle map) { + MapRef map_ref(broker_, map); + return f(map_ref.instance_type()); + }; + return std::all_of(maps_.begin(), maps_.end(), instance_type); } bool MapInference::AnyOfInstanceTypesUnsafe( std::function f) const { - AllowHandleDereference allow_handle_deref; CHECK(HaveMaps()); - return std::any_of(maps_.begin(), maps_.end(), - [f](Handle map) { return f(map->instance_type()); }); + auto instance_type = [this, f](Handle map) { + MapRef map_ref(broker_, map); + return f(map_ref.instance_type()); + }; + + return std::any_of(maps_.begin(), maps_.end(), instance_type); } MapHandles const& MapInference::GetMaps() { @@ -122,7 +126,10 @@ bool MapInference::RelyOnMapsHelper(CompilationDependencies* dependencies, const VectorSlotPair& feedback) { if (Safe()) return true; - auto is_stable = [](Handle map) { return map->is_stable(); }; + auto is_stable = [this](Handle map) { + MapRef map_ref(broker_, map); + return map_ref.is_stable(); + }; if (dependencies != nullptr && std::all_of(maps_.cbegin(), maps_.cend(), is_stable)) { for (Handle map : maps_) { diff --git a/deps/v8/src/compiler/memory-optimizer.cc b/deps/v8/src/compiler/memory-optimizer.cc index 29cbb4d26c2e6d..368c060c1d90e8 100644 --- a/deps/v8/src/compiler/memory-optimizer.cc +++ b/deps/v8/src/compiler/memory-optimizer.cc @@ -5,6 +5,7 @@ #include "src/compiler/memory-optimizer.h" #include "src/codegen/interface-descriptors.h" +#include "src/codegen/tick-counter.h" #include "src/compiler/js-graph.h" #include "src/compiler/linkage.h" #include "src/compiler/node-matchers.h" @@ -20,7 +21,8 @@ namespace compiler { MemoryOptimizer::MemoryOptimizer(JSGraph* jsgraph, Zone* zone, PoisoningMitigationLevel poisoning_level, AllocationFolding allocation_folding, - const char* function_debug_name) + const char* function_debug_name, + TickCounter* tick_counter) : jsgraph_(jsgraph), empty_state_(AllocationState::Empty(zone)), pending_(zone), @@ -29,7 +31,8 @@ MemoryOptimizer::MemoryOptimizer(JSGraph* jsgraph, Zone* zone, graph_assembler_(jsgraph, nullptr, nullptr, zone), poisoning_level_(poisoning_level), allocation_folding_(allocation_folding), - function_debug_name_(function_debug_name) {} + function_debug_name_(function_debug_name), + tick_counter_(tick_counter) {} void MemoryOptimizer::Optimize() { EnqueueUses(graph()->start(), empty_state()); @@ -99,7 +102,7 @@ bool CanAllocate(const Node* node) { case IrOpcode::kBitcastTaggedToWord: case IrOpcode::kBitcastWordToTagged: case IrOpcode::kComment: - case IrOpcode::kDebugAbort: + case IrOpcode::kAbortCSAAssert: case IrOpcode::kDebugBreak: case IrOpcode::kDeoptimizeIf: case IrOpcode::kDeoptimizeUnless: @@ -108,6 +111,7 @@ bool CanAllocate(const Node* node) { case IrOpcode::kLoad: case IrOpcode::kLoadElement: case IrOpcode::kLoadField: + case IrOpcode::kLoadFromObject: case IrOpcode::kPoisonedLoad: case IrOpcode::kProtectedLoad: case IrOpcode::kProtectedStore: @@ -118,6 +122,7 @@ bool CanAllocate(const Node* node) { case IrOpcode::kStore: case IrOpcode::kStoreElement: case IrOpcode::kStoreField: + case IrOpcode::kStoreToObject: case IrOpcode::kTaggedPoisonOnSpeculation: case IrOpcode::kUnalignedLoad: case IrOpcode::kUnalignedStore: @@ -214,6 +219,7 @@ Node* EffectPhiForPhi(Node* phi) { } // namespace void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) { + tick_counter_->DoTick(); DCHECK(!node->IsDead()); DCHECK_LT(0, node->op()->EffectInputCount()); switch (node->opcode()) { @@ -296,6 +302,21 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node, } } + Node* allocate_builtin; + if (allocation_type == AllocationType::kYoung) { + if (allocation.allow_large_objects() == AllowLargeObjects::kTrue) { + allocate_builtin = __ AllocateInYoungGenerationStubConstant(); + } else { + allocate_builtin = __ AllocateRegularInYoungGenerationStubConstant(); + } + } else { + if (allocation.allow_large_objects() == AllowLargeObjects::kTrue) { + allocate_builtin = __ AllocateInOldGenerationStubConstant(); + } else { + allocate_builtin = __ AllocateRegularInOldGenerationStubConstant(); + } + } + // Determine the top/limit addresses. Node* top_address = __ ExternalConstant( allocation_type == AllocationType::kYoung @@ -371,11 +392,6 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node, __ Bind(&call_runtime); { - Node* target = allocation_type == AllocationType::kYoung - ? __ - AllocateInYoungGenerationStubConstant() - : __ - AllocateInOldGenerationStubConstant(); if (!allocate_operator_.is_set()) { auto descriptor = AllocateDescriptor{}; auto call_descriptor = Linkage::GetStubCallDescriptor( @@ -384,7 +400,7 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node, allocate_operator_.set(common()->Call(call_descriptor)); } Node* vfalse = __ BitcastTaggedToWord( - __ Call(allocate_operator_.get(), target, size)); + __ Call(allocate_operator_.get(), allocate_builtin, size)); vfalse = __ IntSub(vfalse, __ IntPtrConstant(kHeapObjectTag)); __ Goto(&done, vfalse); } @@ -434,11 +450,6 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node, __ IntAdd(top, __ IntPtrConstant(kHeapObjectTag)))); __ Bind(&call_runtime); - Node* target = allocation_type == AllocationType::kYoung - ? __ - AllocateInYoungGenerationStubConstant() - : __ - AllocateInOldGenerationStubConstant(); if (!allocate_operator_.is_set()) { auto descriptor = AllocateDescriptor{}; auto call_descriptor = Linkage::GetStubCallDescriptor( @@ -446,7 +457,7 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node, CallDescriptor::kCanUseRoots, Operator::kNoThrow); allocate_operator_.set(common()->Call(call_descriptor)); } - __ Goto(&done, __ Call(allocate_operator_.get(), target, size)); + __ Goto(&done, __ Call(allocate_operator_.get(), allocate_builtin, size)); __ Bind(&done); value = done.PhiAt(0); @@ -483,8 +494,6 @@ void MemoryOptimizer::VisitLoadFromObject(Node* node, AllocationState const* state) { DCHECK_EQ(IrOpcode::kLoadFromObject, node->opcode()); ObjectAccess const& access = ObjectAccessOf(node->op()); - Node* offset = node->InputAt(1); - node->ReplaceInput(1, __ IntSub(offset, __ IntPtrConstant(kHeapObjectTag))); NodeProperties::ChangeOp(node, machine()->Load(access.machine_type)); EnqueueUses(node, state); } @@ -494,9 +503,7 @@ void MemoryOptimizer::VisitStoreToObject(Node* node, DCHECK_EQ(IrOpcode::kStoreToObject, node->opcode()); ObjectAccess const& access = ObjectAccessOf(node->op()); Node* object = node->InputAt(0); - Node* offset = node->InputAt(1); Node* value = node->InputAt(2); - node->ReplaceInput(1, __ IntSub(offset, __ IntPtrConstant(kHeapObjectTag))); WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind( node, object, value, state, access.write_barrier_kind); NodeProperties::ChangeOp( diff --git a/deps/v8/src/compiler/memory-optimizer.h b/deps/v8/src/compiler/memory-optimizer.h index cbefcb67de4fb7..71f33fa3d7d7df 100644 --- a/deps/v8/src/compiler/memory-optimizer.h +++ b/deps/v8/src/compiler/memory-optimizer.h @@ -10,6 +10,9 @@ namespace v8 { namespace internal { + +class TickCounter; + namespace compiler { // Forward declarations. @@ -36,7 +39,7 @@ class MemoryOptimizer final { MemoryOptimizer(JSGraph* jsgraph, Zone* zone, PoisoningMitigationLevel poisoning_level, AllocationFolding allocation_folding, - const char* function_debug_name); + const char* function_debug_name, TickCounter* tick_counter); ~MemoryOptimizer() = default; void Optimize(); @@ -158,6 +161,7 @@ class MemoryOptimizer final { PoisoningMitigationLevel poisoning_level_; AllocationFolding allocation_folding_; const char* function_debug_name_; + TickCounter* const tick_counter_; DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryOptimizer); }; diff --git a/deps/v8/src/compiler/node-properties.cc b/deps/v8/src/compiler/node-properties.cc index d6528c553a149c..1e00ec00f48a29 100644 --- a/deps/v8/src/compiler/node-properties.cc +++ b/deps/v8/src/compiler/node-properties.cc @@ -5,6 +5,7 @@ #include "src/compiler/node-properties.h" #include "src/compiler/common-operator.h" #include "src/compiler/graph.h" +#include "src/compiler/js-heap-broker.h" #include "src/compiler/js-operator.h" #include "src/compiler/linkage.h" #include "src/compiler/map-inference.h" @@ -392,7 +393,7 @@ base::Optional NodeProperties::GetJSCreateMap(JSHeapBroker* broker, } // static -NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMaps( +NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMapsUnsafe( JSHeapBroker* broker, Node* receiver, Node* effect, ZoneHandleSet* maps_return) { HeapObjectMatcher m(receiver); diff --git a/deps/v8/src/compiler/node-properties.h b/deps/v8/src/compiler/node-properties.h index 4a23b6781d9b8c..a660fe70220670 100644 --- a/deps/v8/src/compiler/node-properties.h +++ b/deps/v8/src/compiler/node-properties.h @@ -151,7 +151,8 @@ class V8_EXPORT_PRIVATE NodeProperties final { kReliableReceiverMaps, // Receiver maps can be trusted. kUnreliableReceiverMaps // Receiver maps might have changed (side-effect). }; - static InferReceiverMapsResult InferReceiverMaps( + // DO NOT USE InferReceiverMapsUnsafe IN NEW CODE. Use MapInference instead. + static InferReceiverMapsResult InferReceiverMapsUnsafe( JSHeapBroker* broker, Node* receiver, Node* effect, ZoneHandleSet* maps_return); diff --git a/deps/v8/src/compiler/node.cc b/deps/v8/src/compiler/node.cc index 50cfdf62486bf4..7688379e9f317d 100644 --- a/deps/v8/src/compiler/node.cc +++ b/deps/v8/src/compiler/node.cc @@ -303,7 +303,13 @@ void Node::Print() const { void Node::Print(std::ostream& os) const { os << *this << std::endl; for (Node* input : this->inputs()) { - os << " " << *input << std::endl; + os << " "; + if (input) { + os << *input; + } else { + os << "(NULL)"; + } + os << std::endl; } } diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h index 9ac8ec581f31b3..d621e23e3a3c42 100644 --- a/deps/v8/src/compiler/opcodes.h +++ b/deps/v8/src/compiler/opcodes.h @@ -45,6 +45,7 @@ V(NumberConstant) \ V(PointerConstant) \ V(HeapConstant) \ + V(CompressedHeapConstant) \ V(RelocatableInt32Constant) \ V(RelocatableInt64Constant) @@ -231,6 +232,7 @@ // Opcodes for VirtuaMachine-level operators. #define SIMPLIFIED_CHANGE_OP_LIST(V) \ + V(ChangeCompressedSignedToInt32) \ V(ChangeTaggedSignedToInt32) \ V(ChangeTaggedSignedToInt64) \ V(ChangeTaggedToInt32) \ @@ -240,6 +242,7 @@ V(ChangeTaggedToTaggedSigned) \ V(ChangeCompressedToTaggedSigned) \ V(ChangeTaggedToCompressedSigned) \ + V(ChangeInt31ToCompressedSigned) \ V(ChangeInt31ToTaggedSigned) \ V(ChangeInt32ToTagged) \ V(ChangeInt64ToTagged) \ @@ -249,6 +252,8 @@ V(ChangeFloat64ToTaggedPointer) \ V(ChangeTaggedToBit) \ V(ChangeBitToTagged) \ + V(ChangeUint64ToBigInt) \ + V(TruncateBigIntToUint64) \ V(TruncateTaggedToWord32) \ V(TruncateTaggedToFloat64) \ V(TruncateTaggedToBit) \ @@ -262,6 +267,7 @@ V(CheckedUint32Div) \ V(CheckedUint32Mod) \ V(CheckedInt32Mul) \ + V(CheckedInt32ToCompressedSigned) \ V(CheckedInt32ToTaggedSigned) \ V(CheckedInt64ToInt32) \ V(CheckedInt64ToTaggedSigned) \ @@ -318,6 +324,8 @@ V(NumberMin) \ V(NumberPow) +#define SIMPLIFIED_BIGINT_BINOP_LIST(V) V(BigIntAdd) + #define SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(V) \ V(SpeculativeNumberAdd) \ V(SpeculativeNumberSubtract) \ @@ -369,6 +377,11 @@ V(NumberToUint8Clamped) \ V(NumberSilenceNaN) +#define SIMPLIFIED_BIGINT_UNOP_LIST(V) \ + V(BigIntAsUintN) \ + V(BigIntNegate) \ + V(CheckBigInt) + #define SIMPLIFIED_SPECULATIVE_NUMBER_UNOP_LIST(V) V(SpeculativeToNumber) #define SIMPLIFIED_OTHER_OP_LIST(V) \ @@ -382,6 +395,7 @@ V(StringCodePointAt) \ V(StringFromSingleCharCode) \ V(StringFromSingleCodePoint) \ + V(StringFromCodePointAt) \ V(StringIndexOf) \ V(StringLength) \ V(StringToLowerCaseIntl) \ @@ -461,16 +475,24 @@ V(FindOrderedHashMapEntryForInt32Key) \ V(PoisonIndex) \ V(RuntimeAbort) \ + V(AssertType) \ V(DateNow) +#define SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(V) V(SpeculativeBigIntAdd) +#define SIMPLIFIED_SPECULATIVE_BIGINT_UNOP_LIST(V) V(SpeculativeBigIntNegate) + #define SIMPLIFIED_OP_LIST(V) \ SIMPLIFIED_CHANGE_OP_LIST(V) \ SIMPLIFIED_CHECKED_OP_LIST(V) \ SIMPLIFIED_COMPARE_BINOP_LIST(V) \ SIMPLIFIED_NUMBER_BINOP_LIST(V) \ + SIMPLIFIED_BIGINT_BINOP_LIST(V) \ SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(V) \ SIMPLIFIED_NUMBER_UNOP_LIST(V) \ + SIMPLIFIED_BIGINT_UNOP_LIST(V) \ SIMPLIFIED_SPECULATIVE_NUMBER_UNOP_LIST(V) \ + SIMPLIFIED_SPECULATIVE_BIGINT_UNOP_LIST(V) \ + SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(V) \ SIMPLIFIED_OTHER_OP_LIST(V) // Opcodes for Machine-level operators. @@ -616,7 +638,7 @@ MACHINE_FLOAT64_BINOP_LIST(V) \ MACHINE_FLOAT64_UNOP_LIST(V) \ MACHINE_WORD64_ATOMIC_OP_LIST(V) \ - V(DebugAbort) \ + V(AbortCSAAssert) \ V(DebugBreak) \ V(Comment) \ V(Load) \ @@ -631,6 +653,7 @@ V(Word64ReverseBytes) \ V(Int64AbsWithOverflow) \ V(BitcastTaggedToWord) \ + V(BitcastTaggedSignedToWord) \ V(BitcastWordToTagged) \ V(BitcastWordToTaggedSigned) \ V(TruncateFloat64ToWord32) \ @@ -692,6 +715,7 @@ V(Word32PairSar) \ V(ProtectedLoad) \ V(ProtectedStore) \ + V(MemoryBarrier) \ V(Word32AtomicLoad) \ V(Word32AtomicStore) \ V(Word32AtomicExchange) \ @@ -718,6 +742,15 @@ V(UnsafePointerAdd) #define MACHINE_SIMD_OP_LIST(V) \ + V(F64x2Splat) \ + V(F64x2ExtractLane) \ + V(F64x2ReplaceLane) \ + V(F64x2Abs) \ + V(F64x2Neg) \ + V(F64x2Eq) \ + V(F64x2Ne) \ + V(F64x2Lt) \ + V(F64x2Le) \ V(F32x4Splat) \ V(F32x4ExtractLane) \ V(F32x4ReplaceLane) \ @@ -739,6 +772,22 @@ V(F32x4Le) \ V(F32x4Gt) \ V(F32x4Ge) \ + V(I64x2Splat) \ + V(I64x2ExtractLane) \ + V(I64x2ReplaceLane) \ + V(I64x2Neg) \ + V(I64x2Shl) \ + V(I64x2ShrS) \ + V(I64x2Add) \ + V(I64x2Sub) \ + V(I64x2Mul) \ + V(I64x2Eq) \ + V(I64x2Ne) \ + V(I64x2GtS) \ + V(I64x2GeS) \ + V(I64x2ShrU) \ + V(I64x2GtU) \ + V(I64x2GeU) \ V(I32x4Splat) \ V(I32x4ExtractLane) \ V(I32x4ReplaceLane) \ @@ -844,6 +893,8 @@ V(S128Xor) \ V(S128Select) \ V(S8x16Shuffle) \ + V(S1x2AnyTrue) \ + V(S1x2AllTrue) \ V(S1x4AnyTrue) \ V(S1x4AllTrue) \ V(S1x8AnyTrue) \ diff --git a/deps/v8/src/compiler/operation-typer.cc b/deps/v8/src/compiler/operation-typer.cc index 475623f76b3781..8cb991ceb73c5c 100644 --- a/deps/v8/src/compiler/operation-typer.cc +++ b/deps/v8/src/compiler/operation-typer.cc @@ -5,6 +5,7 @@ #include "src/compiler/operation-typer.h" #include "src/compiler/common-operator.h" +#include "src/compiler/js-heap-broker.h" #include "src/compiler/type-cache.h" #include "src/compiler/types.h" #include "src/execution/isolate.h" @@ -259,7 +260,8 @@ Type OperationTyper::ConvertReceiver(Type type) { type = Type::Intersect(type, Type::Receiver(), zone()); if (maybe_primitive) { // ConvertReceiver maps null and undefined to the JSGlobalProxy of the - // target function, and all other primitives are wrapped into a JSValue. + // target function, and all other primitives are wrapped into a + // JSPrimitiveWrapper. type = Type::Union(type, Type::OtherObject(), zone()); } return type; @@ -577,6 +579,13 @@ Type OperationTyper::NumberSilenceNaN(Type type) { return type; } +Type OperationTyper::BigIntAsUintN(Type type) { + DCHECK(type.Is(Type::BigInt())); + return Type::BigInt(); +} + +Type OperationTyper::CheckBigInt(Type type) { return Type::BigInt(); } + Type OperationTyper::NumberAdd(Type lhs, Type rhs) { DCHECK(lhs.Is(Type::Number())); DCHECK(rhs.Is(Type::Number())); @@ -1111,6 +1120,26 @@ SPECULATIVE_NUMBER_BINOP(NumberShiftRight) SPECULATIVE_NUMBER_BINOP(NumberShiftRightLogical) #undef SPECULATIVE_NUMBER_BINOP +Type OperationTyper::BigIntAdd(Type lhs, Type rhs) { + if (lhs.IsNone() || rhs.IsNone()) return Type::None(); + return Type::BigInt(); +} + +Type OperationTyper::BigIntNegate(Type type) { + if (type.IsNone()) return type; + return Type::BigInt(); +} + +Type OperationTyper::SpeculativeBigIntAdd(Type lhs, Type rhs) { + if (lhs.IsNone() || rhs.IsNone()) return Type::None(); + return Type::BigInt(); +} + +Type OperationTyper::SpeculativeBigIntNegate(Type type) { + if (type.IsNone()) return type; + return Type::BigInt(); +} + Type OperationTyper::SpeculativeToNumber(Type type) { return ToNumber(Type::Intersect(type, Type::NumberOrOddball(), zone())); } diff --git a/deps/v8/src/compiler/operation-typer.h b/deps/v8/src/compiler/operation-typer.h index a905662ad17bdb..728e297a1b218b 100644 --- a/deps/v8/src/compiler/operation-typer.h +++ b/deps/v8/src/compiler/operation-typer.h @@ -43,14 +43,18 @@ class V8_EXPORT_PRIVATE OperationTyper { // Unary operators. #define DECLARE_METHOD(Name) Type Name(Type type); SIMPLIFIED_NUMBER_UNOP_LIST(DECLARE_METHOD) + SIMPLIFIED_BIGINT_UNOP_LIST(DECLARE_METHOD) SIMPLIFIED_SPECULATIVE_NUMBER_UNOP_LIST(DECLARE_METHOD) + SIMPLIFIED_SPECULATIVE_BIGINT_UNOP_LIST(DECLARE_METHOD) DECLARE_METHOD(ConvertReceiver) #undef DECLARE_METHOD -// Number binary operators. +// Numeric binary operators. #define DECLARE_METHOD(Name) Type Name(Type lhs, Type rhs); SIMPLIFIED_NUMBER_BINOP_LIST(DECLARE_METHOD) + SIMPLIFIED_BIGINT_BINOP_LIST(DECLARE_METHOD) SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_METHOD) + SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(DECLARE_METHOD) #undef DECLARE_METHOD // Comparison operators. diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc index e771cef1230836..eb060b71e1fcd3 100644 --- a/deps/v8/src/compiler/pipeline.cc +++ b/deps/v8/src/compiler/pipeline.cc @@ -16,6 +16,7 @@ #include "src/codegen/compiler.h" #include "src/codegen/optimized-compilation-info.h" #include "src/codegen/register-configuration.h" +#include "src/compiler/add-type-assertions-reducer.h" #include "src/compiler/backend/code-generator.h" #include "src/compiler/backend/frame-elider.h" #include "src/compiler/backend/instruction-selector.h" @@ -34,6 +35,7 @@ #include "src/compiler/compiler-source-position-table.h" #include "src/compiler/constant-folding-reducer.h" #include "src/compiler/control-flow-optimizer.h" +#include "src/compiler/csa-load-elimination.h" #include "src/compiler/dead-code-elimination.h" #include "src/compiler/decompression-elimination.h" #include "src/compiler/effect-control-linearizer.h" @@ -114,7 +116,8 @@ class PipelineData { instruction_zone_(instruction_zone_scope_.zone()), codegen_zone_scope_(zone_stats_, ZONE_NAME), codegen_zone_(codegen_zone_scope_.zone()), - broker_(new JSHeapBroker(isolate_, info_->zone())), + broker_(new JSHeapBroker(isolate_, info_->zone(), + info_->trace_heap_broker_enabled())), register_allocation_zone_scope_(zone_stats_, ZONE_NAME), register_allocation_zone_(register_allocation_zone_scope_.zone()), assembler_options_(AssemblerOptions::Default(isolate)) { @@ -266,7 +269,7 @@ class PipelineData { JSOperatorBuilder* javascript() const { return javascript_; } JSGraph* jsgraph() const { return jsgraph_; } MachineGraph* mcgraph() const { return mcgraph_; } - Handle native_context() const { + Handle native_context() const { return handle(info()->native_context(), isolate()); } Handle global_object() const { @@ -324,7 +327,8 @@ class PipelineData { Typer* CreateTyper() { DCHECK_NULL(typer_); - typer_ = new Typer(broker(), typer_flags_, graph()); + typer_ = + new Typer(broker(), typer_flags_, graph(), &info()->tick_counter()); return typer_; } @@ -397,7 +401,8 @@ class PipelineData { DCHECK_NULL(frame_); int fixed_frame_size = 0; if (call_descriptor != nullptr) { - fixed_frame_size = call_descriptor->CalculateFixedFrameSize(); + fixed_frame_size = + call_descriptor->CalculateFixedFrameSize(info()->code_kind()); } frame_ = new (codegen_zone()) Frame(fixed_frame_size); } @@ -408,7 +413,8 @@ class PipelineData { DCHECK_NULL(register_allocation_data_); register_allocation_data_ = new (register_allocation_zone()) RegisterAllocationData(config, register_allocation_zone(), frame(), - sequence(), flags, debug_name()); + sequence(), flags, &info()->tick_counter(), + debug_name()); } void InitializeOsrHelper() { @@ -1040,6 +1046,119 @@ void PipelineCompilationJob::RegisterWeakObjectsInOptimizedCode( code->set_can_have_weak_objects(true); } +class WasmHeapStubCompilationJob final : public OptimizedCompilationJob { + public: + WasmHeapStubCompilationJob(Isolate* isolate, CallDescriptor* call_descriptor, + std::unique_ptr zone, Graph* graph, + Code::Kind kind, + std::unique_ptr debug_name, + const AssemblerOptions& options, + SourcePositionTable* source_positions) + // Note that the OptimizedCompilationInfo is not initialized at the time + // we pass it to the CompilationJob constructor, but it is not + // dereferenced there. + : OptimizedCompilationJob(isolate->stack_guard()->real_climit(), &info_, + "TurboFan"), + debug_name_(std::move(debug_name)), + info_(CStrVector(debug_name_.get()), graph->zone(), kind), + call_descriptor_(call_descriptor), + zone_stats_(isolate->allocator()), + zone_(std::move(zone)), + graph_(graph), + data_(&zone_stats_, &info_, isolate, graph_, nullptr, source_positions, + new (zone_.get()) NodeOriginTable(graph_), nullptr, options), + pipeline_(&data_) {} + + ~WasmHeapStubCompilationJob() = default; + + protected: + Status PrepareJobImpl(Isolate* isolate) final; + Status ExecuteJobImpl() final; + Status FinalizeJobImpl(Isolate* isolate) final; + + private: + std::unique_ptr debug_name_; + OptimizedCompilationInfo info_; + CallDescriptor* call_descriptor_; + ZoneStats zone_stats_; + std::unique_ptr zone_; + Graph* graph_; + PipelineData data_; + PipelineImpl pipeline_; + + DISALLOW_COPY_AND_ASSIGN(WasmHeapStubCompilationJob); +}; + +// static +std::unique_ptr +Pipeline::NewWasmHeapStubCompilationJob(Isolate* isolate, + CallDescriptor* call_descriptor, + std::unique_ptr zone, + Graph* graph, Code::Kind kind, + std::unique_ptr debug_name, + const AssemblerOptions& options, + SourcePositionTable* source_positions) { + return base::make_unique( + isolate, call_descriptor, std::move(zone), graph, kind, + std::move(debug_name), options, source_positions); +} + +CompilationJob::Status WasmHeapStubCompilationJob::PrepareJobImpl( + Isolate* isolate) { + std::unique_ptr pipeline_statistics; + if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) { + pipeline_statistics.reset(new PipelineStatistics( + &info_, isolate->GetTurboStatistics(), &zone_stats_)); + pipeline_statistics->BeginPhaseKind("V8.WasmStubCodegen"); + } + if (info_.trace_turbo_json_enabled() || info_.trace_turbo_graph_enabled()) { + CodeTracer::Scope tracing_scope(data_.GetCodeTracer()); + OFStream os(tracing_scope.file()); + os << "---------------------------------------------------\n" + << "Begin compiling method " << info_.GetDebugName().get() + << " using TurboFan" << std::endl; + } + if (info_.trace_turbo_graph_enabled()) { // Simple textual RPO. + StdoutStream{} << "-- wasm stub " << Code::Kind2String(info_.code_kind()) + << " graph -- " << std::endl + << AsRPO(*data_.graph()); + } + + if (info_.trace_turbo_json_enabled()) { + TurboJsonFile json_of(&info_, std::ios_base::trunc); + json_of << "{\"function\":\"" << info_.GetDebugName().get() + << "\", \"source\":\"\",\n\"phases\":["; + } + pipeline_.RunPrintAndVerify("V8.WasmMachineCode", true); + return CompilationJob::SUCCEEDED; +} + +CompilationJob::Status WasmHeapStubCompilationJob::ExecuteJobImpl() { + pipeline_.ComputeScheduledGraph(); + if (pipeline_.SelectInstructionsAndAssemble(call_descriptor_)) { + return CompilationJob::SUCCEEDED; + } + return CompilationJob::FAILED; +} + +CompilationJob::Status WasmHeapStubCompilationJob::FinalizeJobImpl( + Isolate* isolate) { + Handle code; + if (pipeline_.FinalizeCode(call_descriptor_).ToHandle(&code) && + pipeline_.CommitDependencies(code)) { + info_.SetCode(code); +#ifdef ENABLE_DISASSEMBLER + if (FLAG_print_opt_code) { + CodeTracer::Scope tracing_scope(isolate->GetCodeTracer()); + OFStream os(tracing_scope.file()); + code->Disassemble(compilation_info()->GetDebugName().get(), os); + } +#endif + return SUCCEEDED; + } + return FAILED; +} + template void PipelineImpl::Run(Args&&... args) { PipelineRunScope scope(this->data_, Phase::phase_name()); @@ -1065,7 +1184,7 @@ struct GraphBuilderPhase { handle(data->info()->closure()->feedback_vector(), data->isolate()), data->info()->osr_offset(), data->jsgraph(), frequency, data->source_positions(), data->native_context(), - SourcePosition::kNotInlined, flags); + SourcePosition::kNotInlined, flags, &data->info()->tick_counter()); } }; @@ -1102,7 +1221,7 @@ struct InliningPhase { void Run(PipelineData* data, Zone* temp_zone) { Isolate* isolate = data->isolate(); OptimizedCompilationInfo* info = data->info(); - GraphReducer graph_reducer(temp_zone, data->graph(), + GraphReducer graph_reducer(temp_zone, data->graph(), &info->tick_counter(), data->jsgraph()->Dead()); DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(), data->common(), temp_zone); @@ -1196,6 +1315,7 @@ struct UntyperPhase { } GraphReducer graph_reducer(temp_zone, data->graph(), + &data->info()->tick_counter(), data->jsgraph()->Dead()); RemoveTypeReducer remove_type_reducer; AddReducer(data, &graph_reducer, &remove_type_reducer); @@ -1216,6 +1336,7 @@ struct CopyMetadataForConcurrentCompilePhase { void Run(PipelineData* data, Zone* temp_zone) { GraphReducer graph_reducer(temp_zone, data->graph(), + &data->info()->tick_counter(), data->jsgraph()->Dead()); JSHeapCopyReducer heap_copy_reducer(data->broker()); AddReducer(data, &graph_reducer, &heap_copy_reducer); @@ -1242,13 +1363,13 @@ struct SerializationPhase { if (data->info()->is_source_positions_enabled()) { flags |= SerializerForBackgroundCompilationFlag::kCollectSourcePositions; } - if (data->info()->is_osr()) { - flags |= SerializerForBackgroundCompilationFlag::kOsr; + if (data->info()->is_analyze_environment_liveness()) { + flags |= + SerializerForBackgroundCompilationFlag::kAnalyzeEnvironmentLiveness; } - SerializerForBackgroundCompilation serializer( - data->broker(), data->dependencies(), temp_zone, - data->info()->closure(), flags); - serializer.Run(); + RunSerializerForBackgroundCompilation(data->broker(), data->dependencies(), + temp_zone, data->info()->closure(), + flags, data->info()->osr_offset()); } }; @@ -1257,6 +1378,7 @@ struct TypedLoweringPhase { void Run(PipelineData* data, Zone* temp_zone) { GraphReducer graph_reducer(temp_zone, data->graph(), + &data->info()->tick_counter(), data->jsgraph()->Dead()); DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(), data->common(), temp_zone); @@ -1292,9 +1414,12 @@ struct EscapeAnalysisPhase { static const char* phase_name() { return "V8.TFEscapeAnalysis"; } void Run(PipelineData* data, Zone* temp_zone) { - EscapeAnalysis escape_analysis(data->jsgraph(), temp_zone); + EscapeAnalysis escape_analysis(data->jsgraph(), + &data->info()->tick_counter(), temp_zone); escape_analysis.ReduceGraph(); - GraphReducer reducer(temp_zone, data->graph(), data->jsgraph()->Dead()); + GraphReducer reducer(temp_zone, data->graph(), + &data->info()->tick_counter(), + data->jsgraph()->Dead()); EscapeAnalysisReducer escape_reducer(&reducer, data->jsgraph(), escape_analysis.analysis_result(), temp_zone); @@ -1305,13 +1430,28 @@ struct EscapeAnalysisPhase { } }; +struct TypeAssertionsPhase { + static const char* phase_name() { return "V8.TFTypeAssertions"; } + + void Run(PipelineData* data, Zone* temp_zone) { + GraphReducer graph_reducer(temp_zone, data->graph(), + &data->info()->tick_counter(), + data->jsgraph()->Dead()); + AddTypeAssertionsReducer type_assertions(&graph_reducer, data->jsgraph(), + temp_zone); + AddReducer(data, &graph_reducer, &type_assertions); + graph_reducer.ReduceGraph(); + } +}; + struct SimplifiedLoweringPhase { static const char* phase_name() { return "V8.TFSimplifiedLowering"; } void Run(PipelineData* data, Zone* temp_zone) { SimplifiedLowering lowering(data->jsgraph(), data->broker(), temp_zone, data->source_positions(), data->node_origins(), - data->info()->GetPoisoningMitigationLevel()); + data->info()->GetPoisoningMitigationLevel(), + &data->info()->tick_counter()); lowering.LowerAllNodes(); } }; @@ -1325,8 +1465,8 @@ struct LoopPeelingPhase { data->jsgraph()->GetCachedNodes(&roots); trimmer.TrimGraph(roots.begin(), roots.end()); - LoopTree* loop_tree = - LoopFinder::BuildLoopTree(data->jsgraph()->graph(), temp_zone); + LoopTree* loop_tree = LoopFinder::BuildLoopTree( + data->jsgraph()->graph(), &data->info()->tick_counter(), temp_zone); LoopPeeler(data->graph(), data->common(), loop_tree, temp_zone, data->source_positions(), data->node_origins()) .PeelInnerLoopsOfTree(); @@ -1346,6 +1486,7 @@ struct GenericLoweringPhase { void Run(PipelineData* data, Zone* temp_zone) { GraphReducer graph_reducer(temp_zone, data->graph(), + &data->info()->tick_counter(), data->jsgraph()->Dead()); JSGenericLowering generic_lowering(data->jsgraph(), &graph_reducer); AddReducer(data, &graph_reducer, &generic_lowering); @@ -1358,6 +1499,7 @@ struct EarlyOptimizationPhase { void Run(PipelineData* data, Zone* temp_zone) { GraphReducer graph_reducer(temp_zone, data->graph(), + &data->info()->tick_counter(), data->jsgraph()->Dead()); DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(), data->common(), temp_zone); @@ -1384,7 +1526,8 @@ struct ControlFlowOptimizationPhase { void Run(PipelineData* data, Zone* temp_zone) { ControlFlowOptimizer optimizer(data->graph(), data->common(), - data->machine(), temp_zone); + data->machine(), + &data->info()->tick_counter(), temp_zone); optimizer.Optimize(); } }; @@ -1406,8 +1549,9 @@ struct EffectControlLinearizationPhase { // fix the effect and control flow for nodes with low-level side // effects (such as changing representation to tagged or // 'floating' allocation regions.) - Schedule* schedule = Scheduler::ComputeSchedule(temp_zone, data->graph(), - Scheduler::kTempSchedule); + Schedule* schedule = Scheduler::ComputeSchedule( + temp_zone, data->graph(), Scheduler::kTempSchedule, + &data->info()->tick_counter()); if (FLAG_turbo_verify) ScheduleVerifier::Run(schedule); TraceSchedule(data->info(), data, schedule, "effect linearization schedule"); @@ -1433,6 +1577,7 @@ struct EffectControlLinearizationPhase { // doing a common operator reducer and dead code elimination just before // it, to eliminate conditional deopts with a constant condition. GraphReducer graph_reducer(temp_zone, data->graph(), + &data->info()->tick_counter(), data->jsgraph()->Dead()); DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(), data->common(), temp_zone); @@ -1455,7 +1600,8 @@ struct StoreStoreEliminationPhase { data->jsgraph()->GetCachedNodes(&roots); trimmer.TrimGraph(roots.begin(), roots.end()); - StoreStoreElimination::Run(data->jsgraph(), temp_zone); + StoreStoreElimination::Run(data->jsgraph(), &data->info()->tick_counter(), + temp_zone); } }; @@ -1464,6 +1610,7 @@ struct LoadEliminationPhase { void Run(PipelineData* data, Zone* temp_zone) { GraphReducer graph_reducer(temp_zone, data->graph(), + &data->info()->tick_counter(), data->jsgraph()->Dead()); BranchElimination branch_condition_elimination(&graph_reducer, data->jsgraph(), temp_zone); @@ -1513,7 +1660,7 @@ struct MemoryOptimizationPhase { data->info()->is_allocation_folding_enabled() ? MemoryOptimizer::AllocationFolding::kDoAllocationFolding : MemoryOptimizer::AllocationFolding::kDontAllocationFolding, - data->debug_name()); + data->debug_name(), &data->info()->tick_counter()); optimizer.Optimize(); } }; @@ -1523,6 +1670,7 @@ struct LateOptimizationPhase { void Run(PipelineData* data, Zone* temp_zone) { GraphReducer graph_reducer(temp_zone, data->graph(), + &data->info()->tick_counter(), data->jsgraph()->Dead()); BranchElimination branch_condition_elimination(&graph_reducer, data->jsgraph(), temp_zone); @@ -1555,6 +1703,7 @@ struct MachineOperatorOptimizationPhase { void Run(PipelineData* data, Zone* temp_zone) { GraphReducer graph_reducer(temp_zone, data->graph(), + &data->info()->tick_counter(), data->jsgraph()->Dead()); ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone()); MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph()); @@ -1565,11 +1714,38 @@ struct MachineOperatorOptimizationPhase { } }; +struct CsaEarlyOptimizationPhase { + static const char* phase_name() { return "V8.CSAEarlyOptimization"; } + + void Run(PipelineData* data, Zone* temp_zone) { + GraphReducer graph_reducer(temp_zone, data->graph(), + &data->info()->tick_counter(), + data->jsgraph()->Dead()); + BranchElimination branch_condition_elimination(&graph_reducer, + data->jsgraph(), temp_zone); + DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(), + data->common(), temp_zone); + CommonOperatorReducer common_reducer(&graph_reducer, data->graph(), + data->broker(), data->common(), + data->machine(), temp_zone); + ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone()); + CsaLoadElimination load_elimination(&graph_reducer, data->jsgraph(), + temp_zone); + AddReducer(data, &graph_reducer, &branch_condition_elimination); + AddReducer(data, &graph_reducer, &dead_code_elimination); + AddReducer(data, &graph_reducer, &common_reducer); + AddReducer(data, &graph_reducer, &value_numbering); + AddReducer(data, &graph_reducer, &load_elimination); + graph_reducer.ReduceGraph(); + } +}; + struct CsaOptimizationPhase { static const char* phase_name() { return "V8.CSAOptimization"; } void Run(PipelineData* data, Zone* temp_zone) { GraphReducer graph_reducer(temp_zone, data->graph(), + &data->info()->tick_counter(), data->jsgraph()->Dead()); BranchElimination branch_condition_elimination(&graph_reducer, data->jsgraph(), temp_zone); @@ -1621,9 +1797,10 @@ struct ComputeSchedulePhase { void Run(PipelineData* data, Zone* temp_zone) { Schedule* schedule = Scheduler::ComputeSchedule( - temp_zone, data->graph(), data->info()->is_splitting_enabled() - ? Scheduler::kSplitNodes - : Scheduler::kNoFlags); + temp_zone, data->graph(), + data->info()->is_splitting_enabled() ? Scheduler::kSplitNodes + : Scheduler::kNoFlags, + &data->info()->tick_counter()); if (FLAG_turbo_verify) ScheduleVerifier::Run(schedule); data->set_schedule(schedule); } @@ -1671,6 +1848,7 @@ struct InstructionSelectionPhase { data->info()->switch_jump_table_enabled() ? InstructionSelector::kEnableSwitchJumpTable : InstructionSelector::kDisableSwitchJumpTable, + &data->info()->tick_counter(), data->info()->is_source_positions_enabled() ? InstructionSelector::kAllSourcePositions : InstructionSelector::kCallSourcePositions, @@ -1920,7 +2098,8 @@ struct PrintGraphPhase { Schedule* schedule = data->schedule(); if (schedule == nullptr) { schedule = Scheduler::ComputeSchedule(temp_zone, data->graph(), - Scheduler::kNoFlags); + Scheduler::kNoFlags, + &info->tick_counter()); } AllowHandleDereference allow_deref; @@ -2089,6 +2268,11 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) { RunPrintAndVerify(EscapeAnalysisPhase::phase_name()); } + if (FLAG_assert_types) { + Run(); + RunPrintAndVerify(TypeAssertionsPhase::phase_name()); + } + // Perform simplified lowering. This has to run w/o the Typer decorator, // because we cannot compute meaningful types anyways, and the computed types // might even conflict with the representation/truncation logic. @@ -2201,6 +2385,9 @@ MaybeHandle Pipeline::GenerateCodeForCodeStub( pipeline.Run("V8.TFMachineCode"); } + pipeline.Run(); + pipeline.RunPrintAndVerify(CsaEarlyOptimizationPhase::phase_name(), true); + // Optimize memory access and allocation operations. pipeline.Run(); pipeline.RunPrintAndVerify(MemoryOptimizationPhase::phase_name(), true); @@ -2330,58 +2517,6 @@ wasm::WasmCompilationResult Pipeline::GenerateCodeForWasmNativeStub( return result; } -// static -MaybeHandle Pipeline::GenerateCodeForWasmHeapStub( - Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph, - Code::Kind kind, const char* debug_name, const AssemblerOptions& options, - SourcePositionTable* source_positions) { - OptimizedCompilationInfo info(CStrVector(debug_name), graph->zone(), kind); - // Construct a pipeline for scheduling and code generation. - ZoneStats zone_stats(isolate->allocator()); - NodeOriginTable* node_positions = new (graph->zone()) NodeOriginTable(graph); - PipelineData data(&zone_stats, &info, isolate, graph, nullptr, - source_positions, node_positions, nullptr, options); - std::unique_ptr pipeline_statistics; - if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) { - pipeline_statistics.reset(new PipelineStatistics( - &info, isolate->GetTurboStatistics(), &zone_stats)); - pipeline_statistics->BeginPhaseKind("V8.WasmStubCodegen"); - } - - PipelineImpl pipeline(&data); - - if (info.trace_turbo_json_enabled() || - info.trace_turbo_graph_enabled()) { - CodeTracer::Scope tracing_scope(data.GetCodeTracer()); - OFStream os(tracing_scope.file()); - os << "---------------------------------------------------\n" - << "Begin compiling method " << info.GetDebugName().get() - << " using TurboFan" << std::endl; - } - - if (info.trace_turbo_graph_enabled()) { // Simple textual RPO. - StdoutStream{} << "-- wasm stub " << Code::Kind2String(kind) << " graph -- " - << std::endl - << AsRPO(*graph); - } - - if (info.trace_turbo_json_enabled()) { - TurboJsonFile json_of(&info, std::ios_base::trunc); - json_of << "{\"function\":\"" << info.GetDebugName().get() - << "\", \"source\":\"\",\n\"phases\":["; - } - - pipeline.RunPrintAndVerify("V8.WasmMachineCode", true); - pipeline.ComputeScheduledGraph(); - - Handle code; - if (pipeline.GenerateCode(call_descriptor).ToHandle(&code) && - pipeline.CommitDependencies(code)) { - return code; - } - return MaybeHandle(); -} - // static MaybeHandle Pipeline::GenerateCodeForTesting( OptimizedCompilationInfo* info, Isolate* isolate, @@ -2449,11 +2584,11 @@ MaybeHandle Pipeline::GenerateCodeForTesting( } // static -OptimizedCompilationJob* Pipeline::NewCompilationJob( +std::unique_ptr Pipeline::NewCompilationJob( Isolate* isolate, Handle function, bool has_script) { Handle shared = handle(function->shared(), function->GetIsolate()); - return new PipelineCompilationJob(isolate, shared, function); + return base::make_unique(isolate, shared, function); } // static @@ -2490,13 +2625,14 @@ void Pipeline::GenerateCodeForWasmFunction( pipeline.RunPrintAndVerify("V8.WasmMachineCode", true); data.BeginPhaseKind("V8.WasmOptimization"); - const bool is_asm_js = module->origin == wasm::kAsmJsOrigin; + const bool is_asm_js = is_asmjs_module(module); if (FLAG_turbo_splitting && !is_asm_js) { data.info()->MarkAsSplittingEnabled(); } if (FLAG_wasm_opt || is_asm_js) { PipelineRunScope scope(&data, "V8.WasmFullOptimization"); GraphReducer graph_reducer(scope.zone(), data.graph(), + &data.info()->tick_counter(), data.mcgraph()->Dead()); DeadCodeElimination dead_code_elimination(&graph_reducer, data.graph(), data.common(), scope.zone()); @@ -2515,6 +2651,7 @@ void Pipeline::GenerateCodeForWasmFunction( } else { PipelineRunScope scope(&data, "V8.WasmBaseOptimization"); GraphReducer graph_reducer(scope.zone(), data.graph(), + &data.info()->tick_counter(), data.mcgraph()->Dead()); ValueNumberingReducer value_numbering(scope.zone(), data.graph()->zone()); AddReducer(&data, &graph_reducer, &value_numbering); @@ -2870,8 +3007,9 @@ bool PipelineImpl::SelectInstructionsAndAssemble( } MaybeHandle PipelineImpl::GenerateCode(CallDescriptor* call_descriptor) { - if (!SelectInstructionsAndAssemble(call_descriptor)) + if (!SelectInstructionsAndAssemble(call_descriptor)) { return MaybeHandle(); + } return FinalizeCode(); } @@ -2928,6 +3066,9 @@ void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config, if (data->info()->is_turbo_preprocess_ranges()) { flags |= RegisterAllocationFlag::kTurboPreprocessRanges; } + if (data->info()->trace_turbo_allocation_enabled()) { + flags |= RegisterAllocationFlag::kTraceAllocation; + } data->InitializeRegisterAllocationData(config, call_descriptor, flags); if (info()->is_osr()) data->osr_helper()->SetupFrame(data->frame()); diff --git a/deps/v8/src/compiler/pipeline.h b/deps/v8/src/compiler/pipeline.h index 7f9a242d9872f4..6898faaad0d432 100644 --- a/deps/v8/src/compiler/pipeline.h +++ b/deps/v8/src/compiler/pipeline.h @@ -41,9 +41,8 @@ class SourcePositionTable; class Pipeline : public AllStatic { public: // Returns a new compilation job for the given JavaScript function. - static OptimizedCompilationJob* NewCompilationJob(Isolate* isolate, - Handle function, - bool has_script); + static std::unique_ptr NewCompilationJob( + Isolate* isolate, Handle function, bool has_script); // Run the pipeline for the WebAssembly compilation info. static void GenerateCodeForWasmFunction( @@ -60,11 +59,11 @@ class Pipeline : public AllStatic { const char* debug_name, const AssemblerOptions& assembler_options, SourcePositionTable* source_positions = nullptr); - // Run the pipeline on a machine graph and generate code. - static MaybeHandle GenerateCodeForWasmHeapStub( - Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph, - Code::Kind kind, const char* debug_name, - const AssemblerOptions& assembler_options, + // Returns a new compilation job for a wasm heap stub. + static std::unique_ptr NewWasmHeapStubCompilationJob( + Isolate* isolate, CallDescriptor* call_descriptor, + std::unique_ptr zone, Graph* graph, Code::Kind kind, + std::unique_ptr debug_name, const AssemblerOptions& options, SourcePositionTable* source_positions = nullptr); // Run the pipeline on a machine graph and generate code. diff --git a/deps/v8/src/compiler/property-access-builder.cc b/deps/v8/src/compiler/property-access-builder.cc index dafd481797a671..99a06ef874a289 100644 --- a/deps/v8/src/compiler/property-access-builder.cc +++ b/deps/v8/src/compiler/property-access-builder.cc @@ -127,7 +127,7 @@ Node* PropertyAccessBuilder::ResolveHolder( PropertyAccessInfo const& access_info, Node* receiver) { Handle holder; if (access_info.holder().ToHandle(&holder)) { - return jsgraph()->Constant(holder); + return jsgraph()->Constant(ObjectRef(broker(), holder)); } return receiver; } @@ -151,7 +151,16 @@ MachineRepresentation PropertyAccessBuilder::ConvertRepresentation( Node* PropertyAccessBuilder::TryBuildLoadConstantDataField( NameRef const& name, PropertyAccessInfo const& access_info, Node* receiver) { + // TODO(neis): Eliminate FastPropertyAt call below by doing the lookup during + // acccess info computation. Requires extra care in the case where the + // receiver is the holder. + AllowCodeDependencyChange dependency_change_; + AllowHandleAllocation handle_allocation_; + AllowHandleDereference handle_dereference_; + AllowHeapAllocation heap_allocation_; + if (!access_info.IsDataConstant()) return nullptr; + // First, determine if we have a constant holder to load from. Handle holder; // If {access_info} has a holder, just use it. @@ -165,7 +174,7 @@ Node* PropertyAccessBuilder::TryBuildLoadConstantDataField( MapRef receiver_map = m.Ref(broker()).map(); if (std::find_if(access_info.receiver_maps().begin(), access_info.receiver_maps().end(), [&](Handle map) { - return map.address() == receiver_map.object().address(); + return map.equals(receiver_map.object()); }) == access_info.receiver_maps().end()) { // The map of the receiver is not in the feedback, let us bail out. return nullptr; diff --git a/deps/v8/src/compiler/raw-machine-assembler.cc b/deps/v8/src/compiler/raw-machine-assembler.cc index dc1edc710d1f2f..277c89c932e92f 100644 --- a/deps/v8/src/compiler/raw-machine-assembler.cc +++ b/deps/v8/src/compiler/raw-machine-assembler.cc @@ -556,8 +556,8 @@ void RawMachineAssembler::PopAndReturn(Node* pop, Node* v1, Node* v2, Node* v3, current_block_ = nullptr; } -void RawMachineAssembler::DebugAbort(Node* message) { - AddNode(machine()->DebugAbort(), message); +void RawMachineAssembler::AbortCSAAssert(Node* message) { + AddNode(machine()->AbortCSAAssert(), message); } void RawMachineAssembler::DebugBreak() { AddNode(machine()->DebugBreak()); } diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h index 67326ac7307b5f..890c38c5515af7 100644 --- a/deps/v8/src/compiler/raw-machine-assembler.h +++ b/deps/v8/src/compiler/raw-machine-assembler.h @@ -732,6 +732,9 @@ class V8_EXPORT_PRIVATE RawMachineAssembler { Node* BitcastTaggedToWord(Node* a) { return AddNode(machine()->BitcastTaggedToWord(), a); } + Node* BitcastTaggedSignedToWord(Node* a) { + return AddNode(machine()->BitcastTaggedSignedToWord(), a); + } Node* BitcastMaybeObjectToWord(Node* a) { return AddNode(machine()->BitcastMaybeObjectToWord(), a); } @@ -1016,7 +1019,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler { void PopAndReturn(Node* pop, Node* v1, Node* v2, Node* v3, Node* v4); void Bind(RawMachineLabel* label); void Deoptimize(Node* state); - void DebugAbort(Node* message); + void AbortCSAAssert(Node* message); void DebugBreak(); void Unreachable(); void Comment(const std::string& msg); diff --git a/deps/v8/src/compiler/redundancy-elimination.cc b/deps/v8/src/compiler/redundancy-elimination.cc index 0822e47bbab046..9b401bcf43f786 100644 --- a/deps/v8/src/compiler/redundancy-elimination.cc +++ b/deps/v8/src/compiler/redundancy-elimination.cc @@ -19,6 +19,7 @@ RedundancyElimination::~RedundancyElimination() = default; Reduction RedundancyElimination::Reduce(Node* node) { if (node_checks_.Get(node)) return NoChange(); switch (node->opcode()) { + case IrOpcode::kCheckBigInt: case IrOpcode::kCheckBounds: case IrOpcode::kCheckEqualsInternalizedString: case IrOpcode::kCheckEqualsSymbol: @@ -147,7 +148,9 @@ bool CheckSubsumes(Node const* a, Node const* b) { case IrOpcode::kCheckSmi: case IrOpcode::kCheckString: case IrOpcode::kCheckNumber: + case IrOpcode::kCheckBigInt: break; + case IrOpcode::kCheckedInt32ToCompressedSigned: case IrOpcode::kCheckedInt32ToTaggedSigned: case IrOpcode::kCheckedInt64ToInt32: case IrOpcode::kCheckedInt64ToTaggedSigned: diff --git a/deps/v8/src/compiler/representation-change.cc b/deps/v8/src/compiler/representation-change.cc index cebd87e73d6473..7a4577b799a0e7 100644 --- a/deps/v8/src/compiler/representation-change.cc +++ b/deps/v8/src/compiler/representation-change.cc @@ -8,6 +8,7 @@ #include "src/base/bits.h" #include "src/codegen/code-factory.h" +#include "src/compiler/js-heap-broker.h" #include "src/compiler/machine-operator.h" #include "src/compiler/node-matchers.h" #include "src/compiler/type-cache.h" @@ -25,12 +26,14 @@ const char* Truncation::description() const { return "truncate-to-bool"; case TruncationKind::kWord32: return "truncate-to-word32"; - case TruncationKind::kFloat64: + case TruncationKind::kWord64: + return "truncate-to-word64"; + case TruncationKind::kOddballAndBigIntToNumber: switch (identify_zeros()) { case kIdentifyZeros: - return "truncate-to-float64 (identify zeros)"; + return "truncate-oddball&bigint-to-number (identify zeros)"; case kDistinguishZeros: - return "truncate-to-float64 (distinguish zeros)"; + return "truncate-oddball&bigint-to-number (distinguish zeros)"; } case TruncationKind::kAny: switch (identify_zeros()) { @@ -45,22 +48,25 @@ const char* Truncation::description() const { // Partial order for truncations: // -// kAny <-------+ -// ^ | -// | | -// kFloat64 | -// ^ | -// / | -// kWord32 kBool -// ^ ^ -// \ / -// \ / -// \ / -// \ / -// \ / -// kNone +// kAny <-------+ +// ^ | +// | | +// kOddballAndBigIntToNumber | +// ^ | +// / | +// kWord64 | +// ^ | +// | | +// kWord32 kBool +// ^ ^ +// \ / +// \ / +// \ / +// \ / +// \ / +// kNone // -// TODO(jarin) We might consider making kBool < kFloat64. +// TODO(jarin) We might consider making kBool < kOddballAndBigIntToNumber. // static Truncation::TruncationKind Truncation::Generalize(TruncationKind rep1, @@ -68,9 +74,9 @@ Truncation::TruncationKind Truncation::Generalize(TruncationKind rep1, if (LessGeneral(rep1, rep2)) return rep2; if (LessGeneral(rep2, rep1)) return rep1; // Handle the generalization of float64-representable values. - if (LessGeneral(rep1, TruncationKind::kFloat64) && - LessGeneral(rep2, TruncationKind::kFloat64)) { - return TruncationKind::kFloat64; + if (LessGeneral(rep1, TruncationKind::kOddballAndBigIntToNumber) && + LessGeneral(rep2, TruncationKind::kOddballAndBigIntToNumber)) { + return TruncationKind::kOddballAndBigIntToNumber; } // Handle the generalization of any-representable values. if (LessGeneral(rep1, TruncationKind::kAny) && @@ -101,9 +107,16 @@ bool Truncation::LessGeneral(TruncationKind rep1, TruncationKind rep2) { return rep2 == TruncationKind::kBool || rep2 == TruncationKind::kAny; case TruncationKind::kWord32: return rep2 == TruncationKind::kWord32 || - rep2 == TruncationKind::kFloat64 || rep2 == TruncationKind::kAny; - case TruncationKind::kFloat64: - return rep2 == TruncationKind::kFloat64 || rep2 == TruncationKind::kAny; + rep2 == TruncationKind::kWord64 || + rep2 == TruncationKind::kOddballAndBigIntToNumber || + rep2 == TruncationKind::kAny; + case TruncationKind::kWord64: + return rep2 == TruncationKind::kWord64 || + rep2 == TruncationKind::kOddballAndBigIntToNumber || + rep2 == TruncationKind::kAny; + case TruncationKind::kOddballAndBigIntToNumber: + return rep2 == TruncationKind::kOddballAndBigIntToNumber || + rep2 == TruncationKind::kAny; case TruncationKind::kAny: return rep2 == TruncationKind::kAny; } @@ -125,10 +138,11 @@ bool IsWord(MachineRepresentation rep) { } // namespace -RepresentationChanger::RepresentationChanger(JSGraph* jsgraph, Isolate* isolate) +RepresentationChanger::RepresentationChanger(JSGraph* jsgraph, + JSHeapBroker* broker) : cache_(TypeCache::Get()), jsgraph_(jsgraph), - isolate_(isolate), + broker_(broker), testing_type_errors_(false), type_error_(false) {} @@ -169,7 +183,8 @@ Node* RepresentationChanger::GetRepresentationFor( use_node, use_info); case MachineRepresentation::kTaggedPointer: DCHECK(use_info.type_check() == TypeCheckKind::kNone || - use_info.type_check() == TypeCheckKind::kHeapObject); + use_info.type_check() == TypeCheckKind::kHeapObject || + use_info.type_check() == TypeCheckKind::kBigInt); return GetTaggedPointerRepresentationFor(node, output_rep, output_type, use_node, use_info); case MachineRepresentation::kTagged: @@ -207,7 +222,8 @@ Node* RepresentationChanger::GetRepresentationFor( use_info); case MachineRepresentation::kWord64: DCHECK(use_info.type_check() == TypeCheckKind::kNone || - use_info.type_check() == TypeCheckKind::kSigned64); + use_info.type_check() == TypeCheckKind::kSigned64 || + use_info.type_check() == TypeCheckKind::kBigInt); return GetWord64RepresentationFor(node, output_rep, output_type, use_node, use_info); case MachineRepresentation::kSimd128: @@ -418,6 +434,8 @@ Node* RepresentationChanger::GetTaggedPointerRepresentationFor( op = machine()->ChangeInt64ToFloat64(); node = jsgraph()->graph()->NewNode(op, node); op = simplified()->ChangeFloat64ToTaggedPointer(); + } else if (output_type.Is(Type::BigInt())) { + op = simplified()->ChangeUint64ToBigInt(); } else { return TypeError(node, output_rep, output_type, MachineRepresentation::kTaggedPointer); @@ -447,16 +465,37 @@ Node* RepresentationChanger::GetTaggedPointerRepresentationFor( // TODO(turbofan): Consider adding a Bailout operator that just deopts // for TaggedSigned output representation. op = simplified()->CheckedTaggedToTaggedPointer(use_info.feedback()); + } else if (IsAnyTagged(output_rep) && + (use_info.type_check() == TypeCheckKind::kBigInt || + output_type.Is(Type::BigInt()))) { + if (output_type.Is(Type::BigInt())) { + return node; + } + op = simplified()->CheckBigInt(use_info.feedback()); } else if (output_rep == MachineRepresentation::kCompressedPointer) { + if (use_info.type_check() == TypeCheckKind::kBigInt && + !output_type.Is(Type::BigInt())) { + node = InsertChangeCompressedToTagged(node); + op = simplified()->CheckBigInt(use_info.feedback()); + } else { + op = machine()->ChangeCompressedPointerToTaggedPointer(); + } + } else if (output_rep == MachineRepresentation::kCompressed && + output_type.Is(Type::BigInt())) { op = machine()->ChangeCompressedPointerToTaggedPointer(); + } else if (output_rep == MachineRepresentation::kCompressed && + use_info.type_check() == TypeCheckKind::kBigInt) { + node = InsertChangeCompressedToTagged(node); + op = simplified()->CheckBigInt(use_info.feedback()); } else if (CanBeCompressedSigned(output_rep) && use_info.type_check() == TypeCheckKind::kHeapObject) { if (!output_type.Maybe(Type::SignedSmall())) { op = machine()->ChangeCompressedPointerToTaggedPointer(); + } else { + // TODO(turbofan): Consider adding a Bailout operator that just deopts + // for CompressedSigned output representation. + op = simplified()->CheckedCompressedToTaggedPointer(use_info.feedback()); } - // TODO(turbofan): Consider adding a Bailout operator that just deopts - // for CompressedSigned output representation. - op = simplified()->CheckedCompressedToTaggedPointer(use_info.feedback()); } else { return TypeError(node, output_rep, output_type, MachineRepresentation::kTaggedPointer); @@ -535,6 +574,9 @@ Node* RepresentationChanger::GetTaggedRepresentationFor( } else if (output_type.Is(cache_->kSafeInteger)) { // int64 -> tagged op = simplified()->ChangeInt64ToTagged(); + } else if (output_type.Is(Type::BigInt())) { + // uint64 -> BigInt + op = simplified()->ChangeUint64ToBigInt(); } else { return TypeError(node, output_rep, output_type, MachineRepresentation::kTagged); @@ -560,7 +602,7 @@ Node* RepresentationChanger::GetTaggedRepresentationFor( op = simplified()->ChangeUint32ToTagged(); } else if (output_type.Is(Type::Number()) || (output_type.Is(Type::NumberOrOddball()) && - truncation.IsUsedAsFloat64())) { + truncation.TruncatesOddballAndBigIntToNumber())) { op = simplified()->ChangeFloat64ToTagged( output_type.Maybe(Type::MinusZero()) ? CheckForMinusZeroMode::kCheckForMinusZero @@ -569,7 +611,11 @@ Node* RepresentationChanger::GetTaggedRepresentationFor( return TypeError(node, output_rep, output_type, MachineRepresentation::kTagged); } - } else if (IsAnyCompressed(output_rep)) { + } else if (output_rep == MachineRepresentation::kCompressedSigned) { + op = machine()->ChangeCompressedSignedToTaggedSigned(); + } else if (output_rep == MachineRepresentation::kCompressedPointer) { + op = machine()->ChangeCompressedPointerToTaggedPointer(); + } else if (output_rep == MachineRepresentation::kCompressed) { op = machine()->ChangeCompressedToTagged(); } else { return TypeError(node, output_rep, output_type, @@ -606,9 +652,20 @@ Node* RepresentationChanger::GetCompressedSignedRepresentationFor( use_node, use_info); op = machine()->ChangeTaggedSignedToCompressedSigned(); } else if (IsWord(output_rep)) { - node = GetTaggedSignedRepresentationFor(node, output_rep, output_type, - use_node, use_info); - op = machine()->ChangeTaggedSignedToCompressedSigned(); + if (output_type.Is(Type::Signed31())) { + op = simplified()->ChangeInt31ToCompressedSigned(); + } else if (output_type.Is(Type::Signed32())) { + if (use_info.type_check() == TypeCheckKind::kSignedSmall) { + op = simplified()->CheckedInt32ToCompressedSigned(use_info.feedback()); + } else { + return TypeError(node, output_rep, output_type, + MachineRepresentation::kCompressedSigned); + } + } else { + node = GetTaggedSignedRepresentationFor(node, output_rep, output_type, + use_node, use_info); + op = machine()->ChangeTaggedSignedToCompressedSigned(); + } } else if (output_rep == MachineRepresentation::kWord64) { node = GetTaggedSignedRepresentationFor(node, output_rep, output_type, use_node, use_info); @@ -645,10 +702,11 @@ Node* RepresentationChanger::GetCompressedPointerRepresentationFor( use_info.type_check() == TypeCheckKind::kHeapObject) { if (!output_type.Maybe(Type::SignedSmall())) { op = machine()->ChangeTaggedPointerToCompressedPointer(); + } else { + // TODO(turbofan): Consider adding a Bailout operator that just deopts + // for TaggedSigned output representation. + op = simplified()->CheckedTaggedToCompressedPointer(use_info.feedback()); } - // TODO(turbofan): Consider adding a Bailout operator that just deopts - // for TaggedSigned output representation. - op = simplified()->CheckedTaggedToCompressedPointer(use_info.feedback()); } else if (output_rep == MachineRepresentation::kBit) { // TODO(v8:8977): specialize here and below node = GetTaggedPointerRepresentationFor(node, output_rep, output_type, @@ -810,11 +868,14 @@ Node* RepresentationChanger::GetFloat64RepresentationFor( Node* use_node, UseInfo use_info) { NumberMatcher m(node); if (m.HasValue()) { + // BigInts are not used as number constants. + DCHECK(use_info.type_check() != TypeCheckKind::kBigInt); switch (use_info.type_check()) { case TypeCheckKind::kNone: case TypeCheckKind::kNumber: case TypeCheckKind::kNumberOrOddball: return jsgraph()->Float64Constant(m.Value()); + case TypeCheckKind::kBigInt: case TypeCheckKind::kHeapObject: case TypeCheckKind::kSigned32: case TypeCheckKind::kSigned64: @@ -843,9 +904,7 @@ Node* RepresentationChanger::GetFloat64RepresentationFor( } } else if (output_rep == MachineRepresentation::kBit) { CHECK(output_type.Is(Type::Boolean())); - // TODO(tebbi): TypeCheckKind::kNumberOrOddball should imply Float64 - // truncation, since this exactly means that we treat Oddballs as Numbers. - if (use_info.truncation().IsUsedAsFloat64() || + if (use_info.truncation().TruncatesOddballAndBigIntToNumber() || use_info.type_check() == TypeCheckKind::kNumberOrOddball) { op = machine()->ChangeUint32ToFloat64(); } else { @@ -867,7 +926,7 @@ Node* RepresentationChanger::GetFloat64RepresentationFor( } else if (output_type.Is(Type::Number())) { op = simplified()->ChangeTaggedToFloat64(); } else if ((output_type.Is(Type::NumberOrOddball()) && - use_info.truncation().IsUsedAsFloat64()) || + use_info.truncation().TruncatesOddballAndBigIntToNumber()) || output_type.Is(Type::NumberOrHole())) { // JavaScript 'null' is an Oddball that results in +0 when truncated to // Number. In a context like -0 == null, which must evaluate to false, @@ -1063,11 +1122,15 @@ Node* RepresentationChanger::GetWord32RepresentationFor( output_type, use_node, use_info); } else if (output_rep == MachineRepresentation::kCompressedSigned) { // TODO(v8:8977): Specialise here - op = machine()->ChangeCompressedSignedToTaggedSigned(); - node = jsgraph()->graph()->NewNode(op, node); - return GetWord32RepresentationFor(node, - MachineRepresentation::kTaggedSigned, - output_type, use_node, use_info); + if (output_type.Is(Type::SignedSmall())) { + op = simplified()->ChangeCompressedSignedToInt32(); + } else { + op = machine()->ChangeCompressedSignedToTaggedSigned(); + node = jsgraph()->graph()->NewNode(op, node); + return GetWord32RepresentationFor(node, + MachineRepresentation::kTaggedSigned, + output_type, use_node, use_info); + } } else if (output_rep == MachineRepresentation::kCompressedPointer) { // TODO(v8:8977): Specialise here op = machine()->ChangeCompressedPointerToTaggedPointer(); @@ -1252,6 +1315,15 @@ Node* RepresentationChanger::GetWord64RepresentationFor( } break; } + case IrOpcode::kHeapConstant: { + HeapObjectMatcher m(node); + if (m.HasValue() && m.Ref(broker_).IsBigInt()) { + auto bigint = m.Ref(broker_).AsBigInt(); + return jsgraph()->Int64Constant( + static_cast(bigint.AsUint64())); + } + break; + } default: break; } @@ -1272,9 +1344,15 @@ Node* RepresentationChanger::GetWord64RepresentationFor( jsgraph()->common()->DeadValue(MachineRepresentation::kWord64), unreachable); } else if (IsWord(output_rep)) { - if (output_type.Is(Type::Unsigned32())) { + if (output_type.Is(Type::Unsigned32OrMinusZero())) { + // uint32 -> uint64 + CHECK_IMPLIES(output_type.Maybe(Type::MinusZero()), + use_info.truncation().IdentifiesZeroAndMinusZero()); op = machine()->ChangeUint32ToUint64(); - } else if (output_type.Is(Type::Signed32())) { + } else if (output_type.Is(Type::Signed32OrMinusZero())) { + // int32 -> int64 + CHECK_IMPLIES(output_type.Maybe(Type::MinusZero()), + use_info.truncation().IdentifiesZeroAndMinusZero()); op = machine()->ChangeInt32ToInt64(); } else { return TypeError(node, output_rep, output_type, @@ -1323,6 +1401,13 @@ Node* RepresentationChanger::GetWord64RepresentationFor( return TypeError(node, output_rep, output_type, MachineRepresentation::kWord64); } + } else if (IsAnyTagged(output_rep) && + use_info.truncation().IsUsedAsWord64() && + (use_info.type_check() == TypeCheckKind::kBigInt || + output_type.Is(Type::BigInt()))) { + node = GetTaggedPointerRepresentationFor(node, output_rep, output_type, + use_node, use_info); + op = simplified()->TruncateBigIntToUint64(); } else if (CanBeTaggedPointer(output_rep)) { if (output_type.Is(cache_->kInt64)) { op = simplified()->ChangeTaggedToInt64(); @@ -1656,6 +1741,13 @@ Node* RepresentationChanger::InsertTruncateInt64ToInt32(Node* node) { return jsgraph()->graph()->NewNode(machine()->TruncateInt64ToInt32(), node); } +Node* RepresentationChanger::InsertChangeCompressedToTagged(Node* node) { + return jsgraph()->graph()->NewNode(machine()->ChangeCompressedToTagged(), + node); +} + +Isolate* RepresentationChanger::isolate() const { return broker_->isolate(); } + } // namespace compiler } // namespace internal } // namespace v8 diff --git a/deps/v8/src/compiler/representation-change.h b/deps/v8/src/compiler/representation-change.h index e8bb3f12ac577f..d3386676032b7a 100644 --- a/deps/v8/src/compiler/representation-change.h +++ b/deps/v8/src/compiler/representation-change.h @@ -29,8 +29,13 @@ class Truncation final { static Truncation Word32() { return Truncation(TruncationKind::kWord32, kIdentifyZeros); } - static Truncation Float64(IdentifyZeros identify_zeros = kDistinguishZeros) { - return Truncation(TruncationKind::kFloat64, identify_zeros); + static Truncation Word64() { + return Truncation(TruncationKind::kWord64, kIdentifyZeros); + } + static Truncation OddballAndBigIntToNumber( + IdentifyZeros identify_zeros = kDistinguishZeros) { + return Truncation(TruncationKind::kOddballAndBigIntToNumber, + identify_zeros); } static Truncation Any(IdentifyZeros identify_zeros = kDistinguishZeros) { return Truncation(TruncationKind::kAny, identify_zeros); @@ -50,8 +55,11 @@ class Truncation final { bool IsUsedAsWord32() const { return LessGeneral(kind_, TruncationKind::kWord32); } - bool IsUsedAsFloat64() const { - return LessGeneral(kind_, TruncationKind::kFloat64); + bool IsUsedAsWord64() const { + return LessGeneral(kind_, TruncationKind::kWord64); + } + bool TruncatesOddballAndBigIntToNumber() const { + return LessGeneral(kind_, TruncationKind::kOddballAndBigIntToNumber); } bool IdentifiesUndefinedAndZero() { return LessGeneral(kind_, TruncationKind::kWord32) || @@ -81,13 +89,15 @@ class Truncation final { kNone, kBool, kWord32, - kFloat64, + kWord64, + kOddballAndBigIntToNumber, kAny }; explicit Truncation(TruncationKind kind, IdentifyZeros identify_zeros) : kind_(kind), identify_zeros_(identify_zeros) { - DCHECK(kind == TruncationKind::kAny || kind == TruncationKind::kFloat64 || + DCHECK(kind == TruncationKind::kAny || + kind == TruncationKind::kOddballAndBigIntToNumber || identify_zeros == kIdentifyZeros); } TruncationKind kind() const { return kind_; } @@ -109,7 +119,8 @@ enum class TypeCheckKind : uint8_t { kSigned64, kNumber, kNumberOrOddball, - kHeapObject + kHeapObject, + kBigInt, }; inline std::ostream& operator<<(std::ostream& os, TypeCheckKind type_check) { @@ -128,6 +139,8 @@ inline std::ostream& operator<<(std::ostream& os, TypeCheckKind type_check) { return os << "NumberOrOddball"; case TypeCheckKind::kHeapObject: return os << "HeapObject"; + case TypeCheckKind::kBigInt: + return os << "BigInt"; } UNREACHABLE(); } @@ -160,6 +173,13 @@ class UseInfo { static UseInfo TruncatingWord32() { return UseInfo(MachineRepresentation::kWord32, Truncation::Word32()); } + static UseInfo TruncatingWord64() { + return UseInfo(MachineRepresentation::kWord64, Truncation::Word64()); + } + static UseInfo CheckedBigIntTruncatingWord64(const VectorSlotPair& feedback) { + return UseInfo(MachineRepresentation::kWord64, Truncation::Word64(), + TypeCheckKind::kBigInt, feedback); + } static UseInfo Word64() { return UseInfo(MachineRepresentation::kWord64, Truncation::Any()); } @@ -175,7 +195,7 @@ class UseInfo { static UseInfo TruncatingFloat64( IdentifyZeros identify_zeros = kDistinguishZeros) { return UseInfo(MachineRepresentation::kFloat64, - Truncation::Float64(identify_zeros)); + Truncation::OddballAndBigIntToNumber(identify_zeros)); } static UseInfo AnyTagged() { return UseInfo(MachineRepresentation::kTagged, Truncation::Any()); @@ -203,6 +223,12 @@ class UseInfo { return UseInfo(MachineRepresentation::kTaggedPointer, Truncation::Any(), TypeCheckKind::kHeapObject, feedback); } + + static UseInfo CheckedBigIntAsTaggedPointer(const VectorSlotPair& feedback) { + return UseInfo(MachineRepresentation::kTaggedPointer, Truncation::Any(), + TypeCheckKind::kBigInt, feedback); + } + static UseInfo CheckedSignedSmallAsTaggedSigned( const VectorSlotPair& feedback, IdentifyZeros identify_zeros = kDistinguishZeros) { @@ -240,8 +266,6 @@ class UseInfo { } static UseInfo CheckedNumberOrOddballAsFloat64( IdentifyZeros identify_zeros, const VectorSlotPair& feedback) { - // TODO(tebbi): We should use Float64 truncation here, since this exactly - // means that we treat Oddballs as Numbers. return UseInfo(MachineRepresentation::kFloat64, Truncation::Any(identify_zeros), TypeCheckKind::kNumberOrOddball, feedback); @@ -287,7 +311,7 @@ class UseInfo { // Eagerly folds any representation changes for constants. class V8_EXPORT_PRIVATE RepresentationChanger final { public: - RepresentationChanger(JSGraph* jsgraph, Isolate* isolate); + RepresentationChanger(JSGraph* jsgraph, JSHeapBroker* broker); // Changes representation from {output_type} to {use_rep}. The {truncation} // parameter is only used for sanity checking - if the changer cannot figure @@ -317,7 +341,7 @@ class V8_EXPORT_PRIVATE RepresentationChanger final { private: TypeCache const* cache_; JSGraph* jsgraph_; - Isolate* isolate_; + JSHeapBroker* broker_; friend class RepresentationChangerTester; // accesses the below fields. @@ -371,12 +395,13 @@ class V8_EXPORT_PRIVATE RepresentationChanger final { Node* InsertChangeTaggedSignedToInt32(Node* node); Node* InsertChangeTaggedToFloat64(Node* node); Node* InsertChangeUint32ToFloat64(Node* node); + Node* InsertChangeCompressedToTagged(Node* node); Node* InsertConversion(Node* node, const Operator* op, Node* use_node); Node* InsertTruncateInt64ToInt32(Node* node); Node* InsertUnconditionalDeopt(Node* node, DeoptimizeReason reason); JSGraph* jsgraph() const { return jsgraph_; } - Isolate* isolate() const { return isolate_; } + Isolate* isolate() const; Factory* factory() const { return isolate()->factory(); } SimplifiedOperatorBuilder* simplified() { return jsgraph()->simplified(); } MachineOperatorBuilder* machine() { return jsgraph()->machine(); } diff --git a/deps/v8/src/compiler/scheduler.cc b/deps/v8/src/compiler/scheduler.cc index b57162f7f5a193..25919bb3b3a35f 100644 --- a/deps/v8/src/compiler/scheduler.cc +++ b/deps/v8/src/compiler/scheduler.cc @@ -7,6 +7,7 @@ #include #include "src/base/adapters.h" +#include "src/codegen/tick-counter.h" #include "src/compiler/common-operator.h" #include "src/compiler/control-equivalence.h" #include "src/compiler/graph.h" @@ -26,7 +27,7 @@ namespace compiler { } while (false) Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule, Flags flags, - size_t node_count_hint) + size_t node_count_hint, TickCounter* tick_counter) : zone_(zone), graph_(graph), schedule_(schedule), @@ -34,12 +35,14 @@ Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule, Flags flags, scheduled_nodes_(zone), schedule_root_nodes_(zone), schedule_queue_(zone), - node_data_(zone) { + node_data_(zone), + tick_counter_(tick_counter) { node_data_.reserve(node_count_hint); node_data_.resize(graph->NodeCount(), DefaultSchedulerData()); } -Schedule* Scheduler::ComputeSchedule(Zone* zone, Graph* graph, Flags flags) { +Schedule* Scheduler::ComputeSchedule(Zone* zone, Graph* graph, Flags flags, + TickCounter* tick_counter) { Zone* schedule_zone = (flags & Scheduler::kTempSchedule) ? zone : graph->zone(); @@ -50,7 +53,8 @@ Schedule* Scheduler::ComputeSchedule(Zone* zone, Graph* graph, Flags flags) { Schedule* schedule = new (schedule_zone) Schedule(schedule_zone, node_count_hint); - Scheduler scheduler(zone, graph, schedule, flags, node_count_hint); + Scheduler scheduler(zone, graph, schedule, flags, node_count_hint, + tick_counter); scheduler.BuildCFG(); scheduler.ComputeSpecialRPONumbering(); @@ -65,7 +69,6 @@ Schedule* Scheduler::ComputeSchedule(Zone* zone, Graph* graph, Flags flags) { return schedule; } - Scheduler::SchedulerData Scheduler::DefaultSchedulerData() { SchedulerData def = {schedule_->start(), 0, kUnknown}; return def; @@ -258,6 +261,7 @@ class CFGBuilder : public ZoneObject { Queue(scheduler_->graph_->end()); while (!queue_.empty()) { // Breadth-first backwards traversal. + scheduler_->tick_counter_->DoTick(); Node* node = queue_.front(); queue_.pop(); int max = NodeProperties::PastControlIndex(node); @@ -283,6 +287,7 @@ class CFGBuilder : public ZoneObject { component_end_ = schedule_->block(exit); scheduler_->equivalence_->Run(exit); while (!queue_.empty()) { // Breadth-first backwards traversal. + scheduler_->tick_counter_->DoTick(); Node* node = queue_.front(); queue_.pop(); @@ -728,11 +733,10 @@ class SpecialRPONumberer : public ZoneObject { } }; - int Push(ZoneVector& stack, int depth, - BasicBlock* child, int unvisited) { + int Push(int depth, BasicBlock* child, int unvisited) { if (child->rpo_number() == unvisited) { - stack[depth].block = child; - stack[depth].index = 0; + stack_[depth].block = child; + stack_[depth].index = 0; child->set_rpo_number(kBlockOnStack); return depth + 1; } @@ -780,7 +784,7 @@ class SpecialRPONumberer : public ZoneObject { DCHECK_LT(previous_block_count_, schedule_->BasicBlockCount()); stack_.resize(schedule_->BasicBlockCount() - previous_block_count_); previous_block_count_ = schedule_->BasicBlockCount(); - int stack_depth = Push(stack_, 0, entry, kBlockUnvisited1); + int stack_depth = Push(0, entry, kBlockUnvisited1); int num_loops = static_cast(loops_.size()); while (stack_depth > 0) { @@ -802,7 +806,7 @@ class SpecialRPONumberer : public ZoneObject { } else { // Push the successor onto the stack. DCHECK_EQ(kBlockUnvisited1, succ->rpo_number()); - stack_depth = Push(stack_, stack_depth, succ, kBlockUnvisited1); + stack_depth = Push(stack_depth, succ, kBlockUnvisited1); } } else { // Finished with all successors; pop the stack and add the block. @@ -827,7 +831,7 @@ class SpecialRPONumberer : public ZoneObject { // edges that lead out of loops. Visits each block once, but linking loop // sections together is linear in the loop size, so overall is // O(|B| + max(loop_depth) * max(|loop|)) - stack_depth = Push(stack_, 0, entry, kBlockUnvisited2); + stack_depth = Push(0, entry, kBlockUnvisited2); while (stack_depth > 0) { SpecialRPOStackFrame* frame = &stack_[stack_depth - 1]; BasicBlock* block = frame->block; @@ -874,7 +878,7 @@ class SpecialRPONumberer : public ZoneObject { loop->AddOutgoing(zone_, succ); } else { // Push the successor onto the stack. - stack_depth = Push(stack_, stack_depth, succ, kBlockUnvisited2); + stack_depth = Push(stack_depth, succ, kBlockUnvisited2); if (HasLoopNumber(succ)) { // Push the inner loop onto the loop stack. DCHECK(GetLoopNumber(succ) < num_loops); @@ -958,8 +962,9 @@ class SpecialRPONumberer : public ZoneObject { } // Computes loop membership from the backedges of the control flow graph. - void ComputeLoopInfo(ZoneVector& queue, - size_t num_loops, ZoneVector* backedges) { + void ComputeLoopInfo( + ZoneVector& queue, // NOLINT(runtime/references) + size_t num_loops, ZoneVector* backedges) { // Extend existing loop membership vectors. for (LoopInfo& loop : loops_) { loop.members->Resize(static_cast(schedule_->BasicBlockCount()), @@ -1234,6 +1239,7 @@ void Scheduler::PrepareUses() { visited[node->id()] = true; stack.push(node->input_edges().begin()); while (!stack.empty()) { + tick_counter_->DoTick(); Edge edge = *stack.top(); Node* node = edge.to(); if (visited[node->id()]) { @@ -1262,6 +1268,7 @@ class ScheduleEarlyNodeVisitor { for (Node* const root : *roots) { queue_.push(root); while (!queue_.empty()) { + scheduler_->tick_counter_->DoTick(); VisitNode(queue_.front()); queue_.pop(); } @@ -1388,6 +1395,7 @@ class ScheduleLateNodeVisitor { queue->push(node); do { + scheduler_->tick_counter_->DoTick(); Node* const node = queue->front(); queue->pop(); VisitNode(node); diff --git a/deps/v8/src/compiler/scheduler.h b/deps/v8/src/compiler/scheduler.h index bd2f2780ddec9f..3d1fa40025b10a 100644 --- a/deps/v8/src/compiler/scheduler.h +++ b/deps/v8/src/compiler/scheduler.h @@ -15,6 +15,9 @@ namespace v8 { namespace internal { + +class TickCounter; + namespace compiler { // Forward declarations. @@ -23,7 +26,6 @@ class ControlEquivalence; class Graph; class SpecialRPONumberer; - // Computes a schedule from a graph, placing nodes into basic blocks and // ordering the basic blocks in the special RPO order. class V8_EXPORT_PRIVATE Scheduler { @@ -34,7 +36,8 @@ class V8_EXPORT_PRIVATE Scheduler { // The complete scheduling algorithm. Creates a new schedule and places all // nodes from the graph into it. - static Schedule* ComputeSchedule(Zone* temp_zone, Graph* graph, Flags flags); + static Schedule* ComputeSchedule(Zone* temp_zone, Graph* graph, Flags flags, + TickCounter* tick_counter); // Compute the RPO of blocks in an existing schedule. static BasicBlockVector* ComputeSpecialRPO(Zone* zone, Schedule* schedule); @@ -78,9 +81,10 @@ class V8_EXPORT_PRIVATE Scheduler { CFGBuilder* control_flow_builder_; // Builds basic blocks for controls. SpecialRPONumberer* special_rpo_; // Special RPO numbering of blocks. ControlEquivalence* equivalence_; // Control dependence equivalence. + TickCounter* const tick_counter_; Scheduler(Zone* zone, Graph* graph, Schedule* schedule, Flags flags, - size_t node_count_hint_); + size_t node_count_hint_, TickCounter* tick_counter); inline SchedulerData DefaultSchedulerData(); inline SchedulerData* GetData(Node* node); diff --git a/deps/v8/src/compiler/serializer-for-background-compilation.cc b/deps/v8/src/compiler/serializer-for-background-compilation.cc index ecbd9cc0309947..5597850b0612c4 100644 --- a/deps/v8/src/compiler/serializer-for-background-compilation.cc +++ b/deps/v8/src/compiler/serializer-for-background-compilation.cc @@ -6,30 +6,495 @@ #include +#include "src/base/optional.h" +#include "src/compiler/access-info.h" +#include "src/compiler/bytecode-analysis.h" +#include "src/compiler/compilation-dependencies.h" #include "src/compiler/js-heap-broker.h" #include "src/compiler/vector-slot-pair.h" #include "src/handles/handles-inl.h" +#include "src/ic/call-optimization.h" #include "src/interpreter/bytecode-array-iterator.h" #include "src/objects/code.h" +#include "src/objects/js-array-inl.h" +#include "src/objects/js-regexp-inl.h" #include "src/objects/shared-function-info-inl.h" +#include "src/zone/zone-containers.h" #include "src/zone/zone.h" namespace v8 { namespace internal { namespace compiler { +#define CLEAR_ENVIRONMENT_LIST(V) \ + V(CallRuntimeForPair) \ + V(Debugger) \ + V(ResumeGenerator) \ + V(SuspendGenerator) + +#define KILL_ENVIRONMENT_LIST(V) \ + V(Abort) \ + V(ReThrow) \ + V(Throw) + +#define CLEAR_ACCUMULATOR_LIST(V) \ + V(Add) \ + V(AddSmi) \ + V(BitwiseAnd) \ + V(BitwiseAndSmi) \ + V(BitwiseNot) \ + V(BitwiseOr) \ + V(BitwiseOrSmi) \ + V(BitwiseXor) \ + V(BitwiseXorSmi) \ + V(CallRuntime) \ + V(CloneObject) \ + V(CreateArrayFromIterable) \ + V(CreateArrayLiteral) \ + V(CreateEmptyArrayLiteral) \ + V(CreateEmptyObjectLiteral) \ + V(CreateMappedArguments) \ + V(CreateObjectLiteral) \ + V(CreateRegExpLiteral) \ + V(CreateRestParameter) \ + V(CreateUnmappedArguments) \ + V(Dec) \ + V(DeletePropertySloppy) \ + V(DeletePropertyStrict) \ + V(Div) \ + V(DivSmi) \ + V(Exp) \ + V(ExpSmi) \ + V(ForInContinue) \ + V(ForInEnumerate) \ + V(ForInNext) \ + V(ForInStep) \ + V(Inc) \ + V(LdaLookupSlot) \ + V(LdaLookupSlotInsideTypeof) \ + V(LogicalNot) \ + V(Mod) \ + V(ModSmi) \ + V(Mul) \ + V(MulSmi) \ + V(Negate) \ + V(SetPendingMessage) \ + V(ShiftLeft) \ + V(ShiftLeftSmi) \ + V(ShiftRight) \ + V(ShiftRightLogical) \ + V(ShiftRightLogicalSmi) \ + V(ShiftRightSmi) \ + V(StaLookupSlot) \ + V(Sub) \ + V(SubSmi) \ + V(TestEqual) \ + V(TestEqualStrict) \ + V(TestGreaterThan) \ + V(TestGreaterThanOrEqual) \ + V(TestInstanceOf) \ + V(TestLessThan) \ + V(TestLessThanOrEqual) \ + V(TestNull) \ + V(TestReferenceEqual) \ + V(TestTypeOf) \ + V(TestUndefined) \ + V(TestUndetectable) \ + V(ToBooleanLogicalNot) \ + V(ToName) \ + V(ToNumber) \ + V(ToNumeric) \ + V(ToString) \ + V(TypeOf) + +#define UNCONDITIONAL_JUMPS_LIST(V) \ + V(Jump) \ + V(JumpConstant) \ + V(JumpLoop) + +#define CONDITIONAL_JUMPS_LIST(V) \ + V(JumpIfFalse) \ + V(JumpIfFalseConstant) \ + V(JumpIfJSReceiver) \ + V(JumpIfJSReceiverConstant) \ + V(JumpIfNotNull) \ + V(JumpIfNotNullConstant) \ + V(JumpIfNotUndefined) \ + V(JumpIfNotUndefinedConstant) \ + V(JumpIfNull) \ + V(JumpIfNullConstant) \ + V(JumpIfToBooleanFalse) \ + V(JumpIfToBooleanFalseConstant) \ + V(JumpIfToBooleanTrue) \ + V(JumpIfToBooleanTrueConstant) \ + V(JumpIfTrue) \ + V(JumpIfTrueConstant) \ + V(JumpIfUndefined) \ + V(JumpIfUndefinedConstant) + +#define IGNORED_BYTECODE_LIST(V) \ + V(CallNoFeedback) \ + V(IncBlockCounter) \ + V(LdaNamedPropertyNoFeedback) \ + V(StackCheck) \ + V(StaNamedPropertyNoFeedback) \ + V(ThrowReferenceErrorIfHole) \ + V(ThrowSuperAlreadyCalledIfNotHole) \ + V(ThrowSuperNotCalledIfHole) + +#define UNREACHABLE_BYTECODE_LIST(V) \ + V(ExtraWide) \ + V(Illegal) \ + V(Wide) + +#define SUPPORTED_BYTECODE_LIST(V) \ + V(CallAnyReceiver) \ + V(CallJSRuntime) \ + V(CallProperty) \ + V(CallProperty0) \ + V(CallProperty1) \ + V(CallProperty2) \ + V(CallUndefinedReceiver) \ + V(CallUndefinedReceiver0) \ + V(CallUndefinedReceiver1) \ + V(CallUndefinedReceiver2) \ + V(CallWithSpread) \ + V(Construct) \ + V(ConstructWithSpread) \ + V(CreateBlockContext) \ + V(CreateCatchContext) \ + V(CreateClosure) \ + V(CreateEvalContext) \ + V(CreateFunctionContext) \ + V(CreateWithContext) \ + V(GetSuperConstructor) \ + V(GetTemplateObject) \ + V(InvokeIntrinsic) \ + V(LdaConstant) \ + V(LdaContextSlot) \ + V(LdaCurrentContextSlot) \ + V(LdaImmutableContextSlot) \ + V(LdaImmutableCurrentContextSlot) \ + V(LdaModuleVariable) \ + V(LdaFalse) \ + V(LdaGlobal) \ + V(LdaGlobalInsideTypeof) \ + V(LdaKeyedProperty) \ + V(LdaLookupContextSlot) \ + V(LdaLookupContextSlotInsideTypeof) \ + V(LdaLookupGlobalSlot) \ + V(LdaLookupGlobalSlotInsideTypeof) \ + V(LdaNamedProperty) \ + V(LdaNull) \ + V(Ldar) \ + V(LdaSmi) \ + V(LdaTheHole) \ + V(LdaTrue) \ + V(LdaUndefined) \ + V(LdaZero) \ + V(Mov) \ + V(PopContext) \ + V(PushContext) \ + V(Return) \ + V(StaContextSlot) \ + V(StaCurrentContextSlot) \ + V(StaGlobal) \ + V(StaInArrayLiteral) \ + V(StaKeyedProperty) \ + V(StaModuleVariable) \ + V(StaNamedOwnProperty) \ + V(StaNamedProperty) \ + V(Star) \ + V(SwitchOnGeneratorState) \ + V(SwitchOnSmiNoFeedback) \ + V(TestIn) \ + CLEAR_ACCUMULATOR_LIST(V) \ + CLEAR_ENVIRONMENT_LIST(V) \ + CONDITIONAL_JUMPS_LIST(V) \ + IGNORED_BYTECODE_LIST(V) \ + KILL_ENVIRONMENT_LIST(V) \ + UNCONDITIONAL_JUMPS_LIST(V) \ + UNREACHABLE_BYTECODE_LIST(V) + +template +struct HandleComparator { + bool operator()(const Handle& lhs, const Handle& rhs) const { + return lhs.address() < rhs.address(); + } +}; + +struct VirtualContext { + unsigned int distance; + Handle context; + + VirtualContext(unsigned int distance_in, Handle context_in) + : distance(distance_in), context(context_in) { + CHECK_GT(distance, 0); + } + bool operator<(const VirtualContext& other) const { + return HandleComparator()(context, other.context) && + distance < other.distance; + } +}; + +class FunctionBlueprint; +using ConstantsSet = ZoneSet, HandleComparator>; +using VirtualContextsSet = ZoneSet; +using MapsSet = ZoneSet, HandleComparator>; +using BlueprintsSet = ZoneSet; + +class Hints { + public: + explicit Hints(Zone* zone); + + const ConstantsSet& constants() const; + const MapsSet& maps() const; + const BlueprintsSet& function_blueprints() const; + const VirtualContextsSet& virtual_contexts() const; + + void AddConstant(Handle constant); + void AddMap(Handle map); + void AddFunctionBlueprint(FunctionBlueprint function_blueprint); + void AddVirtualContext(VirtualContext virtual_context); + + void Add(const Hints& other); + + void Clear(); + bool IsEmpty() const; + +#ifdef ENABLE_SLOW_DCHECKS + bool Includes(Hints const& other) const; + bool Equals(Hints const& other) const; +#endif + + private: + VirtualContextsSet virtual_contexts_; + ConstantsSet constants_; + MapsSet maps_; + BlueprintsSet function_blueprints_; +}; + +using HintsVector = ZoneVector; + +class FunctionBlueprint { + public: + FunctionBlueprint(Handle function, Isolate* isolate, Zone* zone); + + FunctionBlueprint(Handle shared, + Handle feedback_vector, + const Hints& context_hints); + + Handle shared() const { return shared_; } + Handle feedback_vector() const { return feedback_vector_; } + const Hints& context_hints() const { return context_hints_; } + + bool operator<(const FunctionBlueprint& other) const { + // A feedback vector is never used for more than one SFI, so it can + // be used for strict ordering of blueprints. + DCHECK_IMPLIES(feedback_vector_.equals(other.feedback_vector_), + shared_.equals(other.shared_)); + return HandleComparator()(feedback_vector_, + other.feedback_vector_); + } + + private: + Handle shared_; + Handle feedback_vector_; + Hints context_hints_; +}; + +class CompilationSubject { + public: + explicit CompilationSubject(FunctionBlueprint blueprint) + : blueprint_(blueprint) {} + + // The zone parameter is to correctly initialize the blueprint, + // which contains zone-allocated context information. + CompilationSubject(Handle closure, Isolate* isolate, Zone* zone); + + const FunctionBlueprint& blueprint() const { return blueprint_; } + MaybeHandle closure() const { return closure_; } + + private: + FunctionBlueprint blueprint_; + MaybeHandle closure_; +}; + +// The SerializerForBackgroundCompilation makes sure that the relevant function +// data such as bytecode, SharedFunctionInfo and FeedbackVector, used by later +// optimizations in the compiler, is copied to the heap broker. +class SerializerForBackgroundCompilation { + public: + SerializerForBackgroundCompilation( + JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone, + Handle closure, SerializerForBackgroundCompilationFlags flags, + BailoutId osr_offset); + Hints Run(); // NOTE: Returns empty for an already-serialized function. + + class Environment; + + private: + SerializerForBackgroundCompilation( + JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone, + CompilationSubject function, base::Optional new_target, + const HintsVector& arguments, + SerializerForBackgroundCompilationFlags flags); + + bool BailoutOnUninitialized(FeedbackSlot slot); + + void TraverseBytecode(); + +#define DECLARE_VISIT_BYTECODE(name, ...) \ + void Visit##name(interpreter::BytecodeArrayIterator* iterator); + SUPPORTED_BYTECODE_LIST(DECLARE_VISIT_BYTECODE) +#undef DECLARE_VISIT_BYTECODE + + void ProcessCallOrConstruct(Hints callee, base::Optional new_target, + const HintsVector& arguments, FeedbackSlot slot, + bool with_spread = false); + void ProcessCallVarArgs(interpreter::BytecodeArrayIterator* iterator, + ConvertReceiverMode receiver_mode, + bool with_spread = false); + void ProcessApiCall(Handle target, + const HintsVector& arguments); + void ProcessReceiverMapForApiCall( + FunctionTemplateInfoRef& target, // NOLINT(runtime/references) + Handle receiver); + void ProcessBuiltinCall(Handle target, + const HintsVector& arguments); + + void ProcessJump(interpreter::BytecodeArrayIterator* iterator); + + void ProcessKeyedPropertyAccess(Hints const& receiver, Hints const& key, + FeedbackSlot slot, AccessMode mode); + void ProcessNamedPropertyAccess(interpreter::BytecodeArrayIterator* iterator, + AccessMode mode); + void ProcessNamedPropertyAccess(Hints const& receiver, NameRef const& name, + FeedbackSlot slot, AccessMode mode); + void ProcessMapHintsForPromises(Hints const& receiver_hints); + void ProcessHintsForPromiseResolve(Hints const& resolution_hints); + void ProcessHintsForRegExpTest(Hints const& regexp_hints); + PropertyAccessInfo ProcessMapForRegExpTest(MapRef map); + void ProcessHintsForFunctionCall(Hints const& target_hints); + + GlobalAccessFeedback const* ProcessFeedbackForGlobalAccess(FeedbackSlot slot); + NamedAccessFeedback const* ProcessFeedbackMapsForNamedAccess( + const MapHandles& maps, AccessMode mode, NameRef const& name); + ElementAccessFeedback const* ProcessFeedbackMapsForElementAccess( + const MapHandles& maps, AccessMode mode, + KeyedAccessMode const& keyed_mode); + void ProcessFeedbackForPropertyAccess(FeedbackSlot slot, AccessMode mode, + base::Optional static_name); + void ProcessMapForNamedPropertyAccess(MapRef const& map, NameRef const& name); + + void ProcessCreateContext(); + enum ContextProcessingMode { + kIgnoreSlot, + kSerializeSlot, + kSerializeSlotAndAddToAccumulator + }; + + void ProcessContextAccess(const Hints& context_hints, int slot, int depth, + ContextProcessingMode mode); + void ProcessImmutableLoad(ContextRef& context, // NOLINT(runtime/references) + int slot, ContextProcessingMode mode); + void ProcessLdaLookupGlobalSlot(interpreter::BytecodeArrayIterator* iterator); + void ProcessLdaLookupContextSlot( + interpreter::BytecodeArrayIterator* iterator); + + // Performs extension lookups for [0, depth) like + // BytecodeGraphBuilder::CheckContextExtensions(). + void ProcessCheckContextExtensions(int depth); + + Hints RunChildSerializer(CompilationSubject function, + base::Optional new_target, + const HintsVector& arguments, bool with_spread); + + // When (forward-)branching bytecodes are encountered, e.g. a conditional + // jump, we call ContributeToJumpTargetEnvironment to "remember" the current + // environment, associated with the jump target offset. When serialization + // eventually reaches that offset, we call IncorporateJumpTargetEnvironment to + // merge that environment back into whatever is the current environment then. + // Note: Since there may be multiple jumps to the same target, + // ContributeToJumpTargetEnvironment may actually do a merge as well. + void ContributeToJumpTargetEnvironment(int target_offset); + void IncorporateJumpTargetEnvironment(int target_offset); + + Handle bytecode_array() const; + BytecodeAnalysis const& GetBytecodeAnalysis(bool serialize); + + JSHeapBroker* broker() const { return broker_; } + CompilationDependencies* dependencies() const { return dependencies_; } + Zone* zone() const { return zone_; } + Environment* environment() const { return environment_; } + SerializerForBackgroundCompilationFlags flags() const { return flags_; } + BailoutId osr_offset() const { return osr_offset_; } + + JSHeapBroker* const broker_; + CompilationDependencies* const dependencies_; + Zone* const zone_; + Environment* const environment_; + ZoneUnorderedMap jump_target_environments_; + SerializerForBackgroundCompilationFlags const flags_; + BailoutId const osr_offset_; +}; + +void RunSerializerForBackgroundCompilation( + JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone, + Handle closure, SerializerForBackgroundCompilationFlags flags, + BailoutId osr_offset) { + SerializerForBackgroundCompilation serializer(broker, dependencies, zone, + closure, flags, osr_offset); + serializer.Run(); +} + using BytecodeArrayIterator = interpreter::BytecodeArrayIterator; +FunctionBlueprint::FunctionBlueprint(Handle shared, + Handle feedback_vector, + const Hints& context_hints) + : shared_(shared), + feedback_vector_(feedback_vector), + context_hints_(context_hints) {} + +FunctionBlueprint::FunctionBlueprint(Handle function, + Isolate* isolate, Zone* zone) + : shared_(handle(function->shared(), isolate)), + feedback_vector_(handle(function->feedback_vector(), isolate)), + context_hints_(zone) { + context_hints_.AddConstant(handle(function->context(), isolate)); +} + CompilationSubject::CompilationSubject(Handle closure, - Isolate* isolate) - : blueprint_{handle(closure->shared(), isolate), - handle(closure->feedback_vector(), isolate)}, - closure_(closure) { + Isolate* isolate, Zone* zone) + : blueprint_(closure, isolate, zone), closure_(closure) { CHECK(closure->has_feedback_vector()); } Hints::Hints(Zone* zone) - : constants_(zone), maps_(zone), function_blueprints_(zone) {} + : virtual_contexts_(zone), + constants_(zone), + maps_(zone), + function_blueprints_(zone) {} + +#ifdef ENABLE_SLOW_DCHECKS +namespace { +template +bool SetIncludes(ZoneSet const& lhs, + ZoneSet const& rhs) { + return std::all_of(rhs.cbegin(), rhs.cend(), + [&](K const& x) { return lhs.find(x) != lhs.cend(); }); +} +} // namespace +bool Hints::Includes(Hints const& other) const { + return SetIncludes(constants(), other.constants()) && + SetIncludes(function_blueprints(), other.function_blueprints()) && + SetIncludes(maps(), other.maps()); +} +bool Hints::Equals(Hints const& other) const { + return this->Includes(other) && other.Includes(*this); +} +#endif const ConstantsSet& Hints::constants() const { return constants_; } @@ -39,6 +504,14 @@ const BlueprintsSet& Hints::function_blueprints() const { return function_blueprints_; } +const VirtualContextsSet& Hints::virtual_contexts() const { + return virtual_contexts_; +} + +void Hints::AddVirtualContext(VirtualContext virtual_context) { + virtual_contexts_.insert(virtual_context); +} + void Hints::AddConstant(Handle constant) { constants_.insert(constant); } @@ -53,16 +526,29 @@ void Hints::Add(const Hints& other) { for (auto x : other.constants()) AddConstant(x); for (auto x : other.maps()) AddMap(x); for (auto x : other.function_blueprints()) AddFunctionBlueprint(x); + for (auto x : other.virtual_contexts()) AddVirtualContext(x); } bool Hints::IsEmpty() const { - return constants().empty() && maps().empty() && function_blueprints().empty(); + return constants().empty() && maps().empty() && + function_blueprints().empty() && virtual_contexts().empty(); } +std::ostream& operator<<(std::ostream& out, + const VirtualContext& virtual_context) { + out << "Distance " << virtual_context.distance << " from " + << Brief(*virtual_context.context) << std::endl; + return out; +} + +std::ostream& operator<<(std::ostream& out, const Hints& hints); + std::ostream& operator<<(std::ostream& out, const FunctionBlueprint& blueprint) { - out << Brief(*blueprint.shared) << std::endl; - out << Brief(*blueprint.feedback_vector) << std::endl; + out << Brief(*blueprint.shared()) << std::endl; + out << Brief(*blueprint.feedback_vector()) << std::endl; + !blueprint.context_hints().IsEmpty() && out << blueprint.context_hints() + << "):" << std::endl; return out; } @@ -76,10 +562,14 @@ std::ostream& operator<<(std::ostream& out, const Hints& hints) { for (FunctionBlueprint const& blueprint : hints.function_blueprints()) { out << " blueprint " << blueprint << std::endl; } + for (VirtualContext const& virtual_context : hints.virtual_contexts()) { + out << " virtual context " << virtual_context << std::endl; + } return out; } void Hints::Clear() { + virtual_contexts_.clear(); constants_.clear(); maps_.clear(); function_blueprints_.clear(); @@ -92,50 +582,53 @@ class SerializerForBackgroundCompilation::Environment : public ZoneObject { Environment(Zone* zone, Isolate* isolate, CompilationSubject function, base::Optional new_target, const HintsVector& arguments); - bool IsDead() const { return environment_hints_.empty(); } + bool IsDead() const { return ephemeral_hints_.empty(); } void Kill() { DCHECK(!IsDead()); - environment_hints_.clear(); + ephemeral_hints_.clear(); DCHECK(IsDead()); } void Revive() { DCHECK(IsDead()); - environment_hints_.resize(environment_hints_size(), Hints(zone())); + ephemeral_hints_.resize(ephemeral_hints_size(), Hints(zone())); DCHECK(!IsDead()); } - // When control flow bytecodes are encountered, e.g. a conditional jump, - // the current environment needs to be stashed together with the target jump - // address. Later, when this target bytecode is handled, the stashed - // environment will be merged into the current one. + // Merge {other} into {this} environment (leaving {other} unmodified). void Merge(Environment* other); FunctionBlueprint function() const { return function_; } + Hints const& closure_hints() const { return closure_hints_; } + Hints const& current_context_hints() const { return current_context_hints_; } + Hints& current_context_hints() { return current_context_hints_; } + Hints const& return_value_hints() const { return return_value_hints_; } + Hints& return_value_hints() { return return_value_hints_; } + Hints& accumulator_hints() { - CHECK_LT(accumulator_index(), environment_hints_.size()); - return environment_hints_[accumulator_index()]; + CHECK_LT(accumulator_index(), ephemeral_hints_.size()); + return ephemeral_hints_[accumulator_index()]; } + Hints& register_hints(interpreter::Register reg) { + if (reg.is_function_closure()) return closure_hints_; + if (reg.is_current_context()) return current_context_hints_; int local_index = RegisterToLocalIndex(reg); - CHECK_LT(local_index, environment_hints_.size()); - return environment_hints_[local_index]; + CHECK_LT(local_index, ephemeral_hints_.size()); + return ephemeral_hints_[local_index]; } - Hints& return_value_hints() { return return_value_hints_; } - // Clears all hints except those for the return value and the closure. + // Clears all hints except those for the context, return value, and the + // closure. void ClearEphemeralHints() { - DCHECK_EQ(environment_hints_.size(), function_closure_index() + 1); - for (int i = 0; i < function_closure_index(); ++i) { - environment_hints_[i].Clear(); - } + for (auto& hints : ephemeral_hints_) hints.Clear(); } // Appends the hints for the given register range to {dst} (in order). void ExportRegisterHints(interpreter::Register first, size_t count, - HintsVector& dst); + HintsVector& dst); // NOLINT(runtime/references) private: friend std::ostream& operator<<(std::ostream& out, const Environment& env); @@ -153,34 +646,39 @@ class SerializerForBackgroundCompilation::Environment : public ZoneObject { int const parameter_count_; int const register_count_; - // environment_hints_ contains hints for the contents of the registers, + Hints closure_hints_; + Hints current_context_hints_; + Hints return_value_hints_; + + // ephemeral_hints_ contains hints for the contents of the registers, // the accumulator and the parameters. The layout is as follows: - // [ parameters | registers | accumulator | context | closure ] + // [ parameters | registers | accumulator ] // The first parameter is the receiver. - HintsVector environment_hints_; + HintsVector ephemeral_hints_; int accumulator_index() const { return parameter_count() + register_count(); } - int current_context_index() const { return accumulator_index() + 1; } - int function_closure_index() const { return current_context_index() + 1; } - int environment_hints_size() const { return function_closure_index() + 1; } - - Hints return_value_hints_; + int ephemeral_hints_size() const { return accumulator_index() + 1; } }; SerializerForBackgroundCompilation::Environment::Environment( Zone* zone, CompilationSubject function) : zone_(zone), function_(function.blueprint()), - parameter_count_(function_.shared->GetBytecodeArray().parameter_count()), - register_count_(function_.shared->GetBytecodeArray().register_count()), - environment_hints_(environment_hints_size(), Hints(zone), zone), - return_value_hints_(zone) { + parameter_count_( + function_.shared()->GetBytecodeArray().parameter_count()), + register_count_(function_.shared()->GetBytecodeArray().register_count()), + closure_hints_(zone), + current_context_hints_(zone), + return_value_hints_(zone), + ephemeral_hints_(ephemeral_hints_size(), Hints(zone), zone) { Handle closure; if (function.closure().ToHandle(&closure)) { - environment_hints_[function_closure_index()].AddConstant(closure); + closure_hints_.AddConstant(closure); } else { - environment_hints_[function_closure_index()].AddFunctionBlueprint( - function.blueprint()); + closure_hints_.AddFunctionBlueprint(function.blueprint()); } + + // Consume blueprint context hint information. + current_context_hints().Add(function.blueprint().context_hints()); } SerializerForBackgroundCompilation::Environment::Environment( @@ -191,18 +689,19 @@ SerializerForBackgroundCompilation::Environment::Environment( // the parameter_count. size_t param_count = static_cast(parameter_count()); for (size_t i = 0; i < std::min(arguments.size(), param_count); ++i) { - environment_hints_[i] = arguments[i]; + ephemeral_hints_[i] = arguments[i]; } // Pad the rest with "undefined". Hints undefined_hint(zone); undefined_hint.AddConstant(isolate->factory()->undefined_value()); for (size_t i = arguments.size(); i < param_count; ++i) { - environment_hints_[i] = undefined_hint; + ephemeral_hints_[i] = undefined_hint; } interpreter::Register new_target_reg = - function_.shared->GetBytecodeArray() + function_.shared() + ->GetBytecodeArray() .incoming_new_target_or_generator_register(); if (new_target_reg.is_valid()) { DCHECK(register_hints(new_target_reg).IsEmpty()); @@ -219,16 +718,20 @@ void SerializerForBackgroundCompilation::Environment::Merge( CHECK_EQ(parameter_count(), other->parameter_count()); CHECK_EQ(register_count(), other->register_count()); + SLOW_DCHECK(closure_hints_.Equals(other->closure_hints_)); + if (IsDead()) { - environment_hints_ = other->environment_hints_; + ephemeral_hints_ = other->ephemeral_hints_; + SLOW_DCHECK(return_value_hints_.Includes(other->return_value_hints_)); CHECK(!IsDead()); return; } - CHECK_EQ(environment_hints_.size(), other->environment_hints_.size()); - for (size_t i = 0; i < environment_hints_.size(); ++i) { - environment_hints_[i].Add(other->environment_hints_[i]); + CHECK_EQ(ephemeral_hints_.size(), other->ephemeral_hints_.size()); + for (size_t i = 0; i < ephemeral_hints_.size(); ++i) { + ephemeral_hints_[i].Add(other->ephemeral_hints_[i]); } + return_value_hints_.Add(other->return_value_hints_); } @@ -236,42 +739,39 @@ std::ostream& operator<<( std::ostream& out, const SerializerForBackgroundCompilation::Environment& env) { std::ostringstream output_stream; + output_stream << "Function "; + env.function_.shared()->Name().Print(output_stream); - for (size_t i = 0; i << env.parameter_count(); ++i) { - Hints const& hints = env.environment_hints_[i]; - if (!hints.IsEmpty()) { - output_stream << "Hints for a" << i << ":\n" << hints; - } - } - for (size_t i = 0; i << env.register_count(); ++i) { - Hints const& hints = env.environment_hints_[env.parameter_count() + i]; - if (!hints.IsEmpty()) { - output_stream << "Hints for r" << i << ":\n" << hints; - } - } - { - Hints const& hints = env.environment_hints_[env.accumulator_index()]; - if (!hints.IsEmpty()) { - output_stream << "Hints for :\n" << hints; + if (env.IsDead()) { + output_stream << "dead\n"; + } else { + output_stream << "alive\n"; + for (int i = 0; i < static_cast(env.ephemeral_hints_.size()); ++i) { + Hints const& hints = env.ephemeral_hints_[i]; + if (!hints.IsEmpty()) { + if (i < env.parameter_count()) { + output_stream << "Hints for a" << i << ":\n"; + } else if (i < env.parameter_count() + env.register_count()) { + int local_register = i - env.parameter_count(); + output_stream << "Hints for r" << local_register << ":\n"; + } else if (i == env.accumulator_index()) { + output_stream << "Hints for :\n"; + } else { + UNREACHABLE(); + } + output_stream << hints; + } } } - { - Hints const& hints = env.environment_hints_[env.function_closure_index()]; - if (!hints.IsEmpty()) { - output_stream << "Hints for :\n" << hints; - } + + if (!env.closure_hints().IsEmpty()) { + output_stream << "Hints for :\n" << env.closure_hints(); } - { - Hints const& hints = env.environment_hints_[env.current_context_index()]; - if (!hints.IsEmpty()) { - output_stream << "Hints for :\n" << hints; - } + if (!env.current_context_hints().IsEmpty()) { + output_stream << "Hints for :\n" << env.current_context_hints(); } - { - Hints const& hints = env.return_value_hints_; - if (!hints.IsEmpty()) { - output_stream << "Hints for {return value}:\n" << hints; - } + if (!env.return_value_hints().IsEmpty()) { + output_stream << "Hints for {return value}:\n" << env.return_value_hints(); } out << output_stream.str(); @@ -280,25 +780,26 @@ std::ostream& operator<<( int SerializerForBackgroundCompilation::Environment::RegisterToLocalIndex( interpreter::Register reg) const { - // TODO(mslekova): We also want to gather hints for the context. - if (reg.is_current_context()) return current_context_index(); - if (reg.is_function_closure()) return function_closure_index(); if (reg.is_parameter()) { return reg.ToParameterIndex(parameter_count()); } else { + DCHECK(!reg.is_function_closure()); return parameter_count() + reg.index(); } } SerializerForBackgroundCompilation::SerializerForBackgroundCompilation( JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone, - Handle closure, SerializerForBackgroundCompilationFlags flags) + Handle closure, SerializerForBackgroundCompilationFlags flags, + BailoutId osr_offset) : broker_(broker), dependencies_(dependencies), zone_(zone), - environment_(new (zone) Environment(zone, {closure, broker_->isolate()})), - stashed_environments_(zone), - flags_(flags) { + environment_(new (zone) Environment( + zone, CompilationSubject(closure, broker_->isolate(), zone))), + jump_target_environments_(zone), + flags_(flags), + osr_offset_(osr_offset) { JSFunctionRef(broker, closure).Serialize(); } @@ -311,9 +812,9 @@ SerializerForBackgroundCompilation::SerializerForBackgroundCompilation( zone_(zone), environment_(new (zone) Environment(zone, broker_->isolate(), function, new_target, arguments)), - stashed_environments_(zone), - flags_(flags) { - DCHECK(!(flags_ & SerializerForBackgroundCompilationFlag::kOsr)); + jump_target_environments_(zone), + flags_(flags), + osr_offset_(BailoutId::None()) { TraceScope tracer( broker_, this, "SerializerForBackgroundCompilation::SerializerForBackgroundCompilation"); @@ -331,12 +832,12 @@ bool SerializerForBackgroundCompilation::BailoutOnUninitialized( SerializerForBackgroundCompilationFlag::kBailoutOnUninitialized)) { return false; } - if (flags() & SerializerForBackgroundCompilationFlag::kOsr) { + if (!osr_offset().IsNone()) { // Exclude OSR from this optimization because we might end up skipping the // OSR entry point. TODO(neis): Support OSR? return false; } - FeedbackNexus nexus(environment()->function().feedback_vector, slot); + FeedbackNexus nexus(environment()->function().feedback_vector(), slot); if (!slot.IsInvalid() && nexus.IsUninitialized()) { FeedbackSource source(nexus); if (broker()->HasFeedback(source)) { @@ -354,9 +855,9 @@ bool SerializerForBackgroundCompilation::BailoutOnUninitialized( Hints SerializerForBackgroundCompilation::Run() { TraceScope tracer(broker(), this, "SerializerForBackgroundCompilation::Run"); - SharedFunctionInfoRef shared(broker(), environment()->function().shared); - FeedbackVectorRef feedback_vector(broker(), - environment()->function().feedback_vector); + SharedFunctionInfoRef shared(broker(), environment()->function().shared()); + FeedbackVectorRef feedback_vector( + broker(), environment()->function().feedback_vector()); if (shared.IsSerializedForCompilation(feedback_vector)) { TRACE_BROKER(broker(), "Already ran serializer for SharedFunctionInfo " << Brief(*shared.object()) @@ -382,9 +883,10 @@ Hints SerializerForBackgroundCompilation::Run() { class ExceptionHandlerMatcher { public: explicit ExceptionHandlerMatcher( - BytecodeArrayIterator const& bytecode_iterator) + BytecodeArrayIterator const& bytecode_iterator, + Handle bytecode_array) : bytecode_iterator_(bytecode_iterator) { - HandlerTable table(*bytecode_iterator_.bytecode_array()); + HandlerTable table(*bytecode_array); for (int i = 0, n = table.NumberOfRangeEntries(); i < n; ++i) { handlers_.insert(table.GetRangeHandler(i)); } @@ -407,30 +909,53 @@ class ExceptionHandlerMatcher { std::set::const_iterator handlers_iterator_; }; +Handle SerializerForBackgroundCompilation::bytecode_array() + const { + return handle(environment()->function().shared()->GetBytecodeArray(), + broker()->isolate()); +} + +BytecodeAnalysis const& SerializerForBackgroundCompilation::GetBytecodeAnalysis( + bool serialize) { + return broker()->GetBytecodeAnalysis( + bytecode_array(), osr_offset(), + flags() & + SerializerForBackgroundCompilationFlag::kAnalyzeEnvironmentLiveness, + serialize); +} + void SerializerForBackgroundCompilation::TraverseBytecode() { - BytecodeArrayRef bytecode_array( - broker(), handle(environment()->function().shared->GetBytecodeArray(), - broker()->isolate())); - BytecodeArrayIterator iterator(bytecode_array.object()); - ExceptionHandlerMatcher handler_matcher(iterator); + BytecodeAnalysis const& bytecode_analysis = GetBytecodeAnalysis(true); + BytecodeArrayRef(broker(), bytecode_array()).SerializeForCompilation(); + + BytecodeArrayIterator iterator(bytecode_array()); + ExceptionHandlerMatcher handler_matcher(iterator, bytecode_array()); for (; !iterator.done(); iterator.Advance()) { - MergeAfterJump(&iterator); + int const current_offset = iterator.current_offset(); + IncorporateJumpTargetEnvironment(current_offset); + + TRACE_BROKER(broker(), + "Handling bytecode: " << current_offset << " " + << iterator.current_bytecode()); + TRACE_BROKER(broker(), "Current environment: " << *environment()); if (environment()->IsDead()) { - if (iterator.current_bytecode() == - interpreter::Bytecode::kResumeGenerator || - handler_matcher.CurrentBytecodeIsExceptionHandlerStart()) { + if (handler_matcher.CurrentBytecodeIsExceptionHandlerStart()) { environment()->Revive(); } else { continue; // Skip this bytecode since TF won't generate code for it. } } - TRACE_BROKER(broker(), - "Handling bytecode: " << iterator.current_offset() << " " - << iterator.current_bytecode()); - TRACE_BROKER(broker(), "Current environment:\n" << *environment()); + if (bytecode_analysis.IsLoopHeader(current_offset)) { + // Graph builder might insert jumps to resume targets in the loop body. + LoopInfo const& loop_info = + bytecode_analysis.GetLoopInfoFor(current_offset); + for (const auto& target : loop_info.resume_jump_targets()) { + ContributeToJumpTargetEnvironment(target.target_offset()); + } + } switch (iterator.current_bytecode()) { #define DEFINE_BYTECODE_CASE(name) \ @@ -447,21 +972,6 @@ void SerializerForBackgroundCompilation::TraverseBytecode() { } } -void SerializerForBackgroundCompilation::VisitIllegal( - BytecodeArrayIterator* iterator) { - UNREACHABLE(); -} - -void SerializerForBackgroundCompilation::VisitWide( - BytecodeArrayIterator* iterator) { - UNREACHABLE(); -} - -void SerializerForBackgroundCompilation::VisitExtraWide( - BytecodeArrayIterator* iterator) { - UNREACHABLE(); -} - void SerializerForBackgroundCompilation::VisitGetSuperConstructor( BytecodeArrayIterator* iterator) { interpreter::Register dst = iterator->GetRegisterOperand(0); @@ -480,6 +990,20 @@ void SerializerForBackgroundCompilation::VisitGetSuperConstructor( } } +void SerializerForBackgroundCompilation::VisitGetTemplateObject( + BytecodeArrayIterator* iterator) { + ObjectRef description( + broker(), iterator->GetConstantForIndexOperand(0, broker()->isolate())); + FeedbackSlot slot = iterator->GetSlotOperand(1); + FeedbackVectorRef feedback_vector( + broker(), environment()->function().feedback_vector()); + SharedFunctionInfoRef shared(broker(), environment()->function().shared()); + JSArrayRef template_object = + shared.GetTemplateObject(description, feedback_vector, slot, true); + environment()->accumulator_hints().Clear(); + environment()->accumulator_hints().AddConstant(template_object.object()); +} + void SerializerForBackgroundCompilation::VisitLdaTrue( BytecodeArrayIterator* iterator) { environment()->accumulator_hints().Clear(); @@ -529,11 +1053,171 @@ void SerializerForBackgroundCompilation::VisitLdaSmi( Smi::FromInt(iterator->GetImmediateOperand(0)), broker()->isolate())); } +void SerializerForBackgroundCompilation::VisitInvokeIntrinsic( + BytecodeArrayIterator* iterator) { + Runtime::FunctionId functionId = iterator->GetIntrinsicIdOperand(0); + // For JSNativeContextSpecialization::ReduceJSAsyncFunctionResolve and + // JSNativeContextSpecialization::ReduceJSResolvePromise. + if (functionId == Runtime::kInlineAsyncFunctionResolve) { + interpreter::Register first_reg = iterator->GetRegisterOperand(1); + size_t reg_count = iterator->GetRegisterCountOperand(2); + CHECK_EQ(reg_count, 3); + HintsVector arguments(zone()); + environment()->ExportRegisterHints(first_reg, reg_count, arguments); + Hints const& resolution_hints = arguments[1]; // The resolution object. + ProcessHintsForPromiseResolve(resolution_hints); + environment()->accumulator_hints().Clear(); + return; + } + environment()->ClearEphemeralHints(); +} + void SerializerForBackgroundCompilation::VisitLdaConstant( BytecodeArrayIterator* iterator) { environment()->accumulator_hints().Clear(); environment()->accumulator_hints().AddConstant( - handle(iterator->GetConstantForIndexOperand(0), broker()->isolate())); + iterator->GetConstantForIndexOperand(0, broker()->isolate())); +} + +void SerializerForBackgroundCompilation::VisitPushContext( + BytecodeArrayIterator* iterator) { + // Transfer current context hints to the destination register hints. + Hints& current_context_hints = environment()->current_context_hints(); + Hints& saved_context_hints = + environment()->register_hints(iterator->GetRegisterOperand(0)); + saved_context_hints.Clear(); + saved_context_hints.Add(current_context_hints); + + // New Context is in the accumulator. Put those hints into the current context + // register hints. + current_context_hints.Clear(); + current_context_hints.Add(environment()->accumulator_hints()); +} + +void SerializerForBackgroundCompilation::VisitPopContext( + BytecodeArrayIterator* iterator) { + // Replace current context hints with hints given in the argument register. + Hints& new_context_hints = + environment()->register_hints(iterator->GetRegisterOperand(0)); + environment()->current_context_hints().Clear(); + environment()->current_context_hints().Add(new_context_hints); +} + +void SerializerForBackgroundCompilation::ProcessImmutableLoad( + ContextRef& context_ref, int slot, ContextProcessingMode mode) { + DCHECK(mode == kSerializeSlot || mode == kSerializeSlotAndAddToAccumulator); + base::Optional slot_value = context_ref.get(slot, true); + + // Also, put the object into the constant hints for the accumulator. + if (mode == kSerializeSlotAndAddToAccumulator && slot_value.has_value()) { + environment()->accumulator_hints().AddConstant(slot_value.value().object()); + } +} + +void SerializerForBackgroundCompilation::ProcessContextAccess( + const Hints& context_hints, int slot, int depth, + ContextProcessingMode mode) { + // This function is for JSContextSpecialization::ReduceJSLoadContext and + // ReduceJSStoreContext. Those reductions attempt to eliminate as many + // loads as possible by making use of constant Context objects. In the + // case of an immutable load, ReduceJSLoadContext even attempts to load + // the value at {slot}, replacing the load with a constant. + for (auto x : context_hints.constants()) { + if (x->IsContext()) { + // Walk this context to the given depth and serialize the slot found. + ContextRef context_ref(broker(), x); + size_t remaining_depth = depth; + context_ref = context_ref.previous(&remaining_depth, true); + if (remaining_depth == 0 && mode != kIgnoreSlot) { + ProcessImmutableLoad(context_ref, slot, mode); + } + } + } + for (auto x : context_hints.virtual_contexts()) { + if (x.distance <= static_cast(depth)) { + ContextRef context_ref(broker(), x.context); + size_t remaining_depth = depth - x.distance; + context_ref = context_ref.previous(&remaining_depth, true); + if (remaining_depth == 0 && mode != kIgnoreSlot) { + ProcessImmutableLoad(context_ref, slot, mode); + } + } + } +} + +void SerializerForBackgroundCompilation::VisitLdaContextSlot( + BytecodeArrayIterator* iterator) { + Hints& context_hints = + environment()->register_hints(iterator->GetRegisterOperand(0)); + const int slot = iterator->GetIndexOperand(1); + const int depth = iterator->GetUnsignedImmediateOperand(2); + environment()->accumulator_hints().Clear(); + ProcessContextAccess(context_hints, slot, depth, kIgnoreSlot); +} + +void SerializerForBackgroundCompilation::VisitLdaCurrentContextSlot( + BytecodeArrayIterator* iterator) { + const int slot = iterator->GetIndexOperand(0); + const int depth = 0; + Hints& context_hints = environment()->current_context_hints(); + environment()->accumulator_hints().Clear(); + ProcessContextAccess(context_hints, slot, depth, kIgnoreSlot); +} + +void SerializerForBackgroundCompilation::VisitLdaImmutableContextSlot( + BytecodeArrayIterator* iterator) { + const int slot = iterator->GetIndexOperand(1); + const int depth = iterator->GetUnsignedImmediateOperand(2); + Hints& context_hints = + environment()->register_hints(iterator->GetRegisterOperand(0)); + environment()->accumulator_hints().Clear(); + ProcessContextAccess(context_hints, slot, depth, + kSerializeSlotAndAddToAccumulator); +} + +void SerializerForBackgroundCompilation::VisitLdaImmutableCurrentContextSlot( + BytecodeArrayIterator* iterator) { + const int slot = iterator->GetIndexOperand(0); + const int depth = 0; + Hints& context_hints = environment()->current_context_hints(); + environment()->accumulator_hints().Clear(); + ProcessContextAccess(context_hints, slot, depth, + kSerializeSlotAndAddToAccumulator); +} + +void SerializerForBackgroundCompilation::VisitLdaModuleVariable( + BytecodeArrayIterator* iterator) { + const int depth = iterator->GetUnsignedImmediateOperand(1); + + // TODO(mvstanton): If we have a constant module, should we serialize the + // cell as well? Then we could put the value in the accumulator. + environment()->accumulator_hints().Clear(); + ProcessContextAccess(environment()->current_context_hints(), + Context::EXTENSION_INDEX, depth, kSerializeSlot); +} + +void SerializerForBackgroundCompilation::VisitStaModuleVariable( + BytecodeArrayIterator* iterator) { + const int depth = iterator->GetUnsignedImmediateOperand(1); + ProcessContextAccess(environment()->current_context_hints(), + Context::EXTENSION_INDEX, depth, kSerializeSlot); +} + +void SerializerForBackgroundCompilation::VisitStaContextSlot( + BytecodeArrayIterator* iterator) { + const int slot = iterator->GetIndexOperand(1); + const int depth = iterator->GetUnsignedImmediateOperand(2); + Hints& register_hints = + environment()->register_hints(iterator->GetRegisterOperand(0)); + ProcessContextAccess(register_hints, slot, depth, kIgnoreSlot); +} + +void SerializerForBackgroundCompilation::VisitStaCurrentContextSlot( + BytecodeArrayIterator* iterator) { + const int slot = iterator->GetIndexOperand(0); + const int depth = 0; + Hints& context_hints = environment()->current_context_hints(); + ProcessContextAccess(context_hints, slot, depth, kIgnoreSlot); } void SerializerForBackgroundCompilation::VisitLdar( @@ -558,14 +1242,60 @@ void SerializerForBackgroundCompilation::VisitMov( environment()->register_hints(dst).Add(environment()->register_hints(src)); } +void SerializerForBackgroundCompilation::VisitCreateFunctionContext( + BytecodeArrayIterator* iterator) { + ProcessCreateContext(); +} + +void SerializerForBackgroundCompilation::VisitCreateBlockContext( + BytecodeArrayIterator* iterator) { + ProcessCreateContext(); +} + +void SerializerForBackgroundCompilation::VisitCreateEvalContext( + BytecodeArrayIterator* iterator) { + ProcessCreateContext(); +} + +void SerializerForBackgroundCompilation::VisitCreateWithContext( + BytecodeArrayIterator* iterator) { + ProcessCreateContext(); +} + +void SerializerForBackgroundCompilation::VisitCreateCatchContext( + BytecodeArrayIterator* iterator) { + ProcessCreateContext(); +} + +void SerializerForBackgroundCompilation::ProcessCreateContext() { + Hints& accumulator_hints = environment()->accumulator_hints(); + accumulator_hints.Clear(); + Hints& current_context_hints = environment()->current_context_hints(); + + // For each constant context, we must create a virtual context from + // it of distance one. + for (auto x : current_context_hints.constants()) { + if (x->IsContext()) { + Handle as_context(Handle::cast(x)); + accumulator_hints.AddVirtualContext(VirtualContext(1, as_context)); + } + } + + // For each virtual context, we must create a virtual context from + // it of distance {existing distance} + 1. + for (auto x : current_context_hints.virtual_contexts()) { + accumulator_hints.AddVirtualContext( + VirtualContext(x.distance + 1, x.context)); + } +} + void SerializerForBackgroundCompilation::VisitCreateClosure( BytecodeArrayIterator* iterator) { - Handle shared( - SharedFunctionInfo::cast(iterator->GetConstantForIndexOperand(0)), - broker()->isolate()); + Handle shared = Handle::cast( + iterator->GetConstantForIndexOperand(0, broker()->isolate())); Handle feedback_cell = - environment()->function().feedback_vector->GetClosureFeedbackCell( + environment()->function().feedback_vector()->GetClosureFeedbackCell( iterator->GetIndexOperand(1)); FeedbackCellRef feedback_cell_ref(broker(), feedback_cell); Handle cell_value(feedback_cell->value(), broker()->isolate()); @@ -573,8 +1303,13 @@ void SerializerForBackgroundCompilation::VisitCreateClosure( environment()->accumulator_hints().Clear(); if (cell_value->IsFeedbackVector()) { - environment()->accumulator_hints().AddFunctionBlueprint( - {shared, Handle::cast(cell_value)}); + // Gather the context hints from the current context register hint + // structure. + FunctionBlueprint blueprint(shared, + Handle::cast(cell_value), + environment()->current_context_hints()); + + environment()->accumulator_hints().AddFunctionBlueprint(blueprint); } } @@ -685,6 +1420,16 @@ void SerializerForBackgroundCompilation::VisitCallWithSpread( ProcessCallVarArgs(iterator, ConvertReceiverMode::kAny, true); } +void SerializerForBackgroundCompilation::VisitCallJSRuntime( + BytecodeArrayIterator* iterator) { + environment()->accumulator_hints().Clear(); + + // BytecodeGraphBuilder::VisitCallJSRuntime needs the {runtime_index} + // slot in the native context to be serialized. + const int runtime_index = iterator->GetNativeContextIndexOperand(0); + broker()->native_context().get(runtime_index, true); +} + Hints SerializerForBackgroundCompilation::RunChildSerializer( CompilationSubject function, base::Optional new_target, const HintsVector& arguments, bool with_spread) { @@ -700,14 +1445,14 @@ Hints SerializerForBackgroundCompilation::RunChildSerializer( padded.pop_back(); // Remove the spread element. // Fill the rest with empty hints. padded.resize( - function.blueprint().shared->GetBytecodeArray().parameter_count(), + function.blueprint().shared()->GetBytecodeArray().parameter_count(), Hints(zone())); return RunChildSerializer(function, new_target, padded, false); } SerializerForBackgroundCompilation child_serializer( broker(), dependencies(), zone(), function, new_target, arguments, - flags().without(SerializerForBackgroundCompilationFlag::kOsr)); + flags()); return child_serializer.Run(); } @@ -734,7 +1479,7 @@ void SerializerForBackgroundCompilation::ProcessCallOrConstruct( // Incorporate feedback into hints. base::Optional feedback = GetHeapObjectFeedback( - broker(), environment()->function().feedback_vector, slot); + broker(), environment()->function().feedback_vector(), slot); if (feedback.has_value() && feedback->map().is_callable()) { if (new_target.has_value()) { // Construct; feedback is new_target, which often is also the callee. @@ -752,15 +1497,37 @@ void SerializerForBackgroundCompilation::ProcessCallOrConstruct( if (!hint->IsJSFunction()) continue; Handle function = Handle::cast(hint); - if (!function->shared().IsInlineable() || !function->has_feedback_vector()) - continue; + JSFunctionRef(broker(), function).Serialize(); + + Handle shared(function->shared(), broker()->isolate()); + + if (shared->IsApiFunction()) { + ProcessApiCall(shared, arguments); + DCHECK(!shared->IsInlineable()); + } else if (shared->HasBuiltinId()) { + ProcessBuiltinCall(shared, arguments); + DCHECK(!shared->IsInlineable()); + } + + if (!shared->IsInlineable() || !function->has_feedback_vector()) continue; environment()->accumulator_hints().Add(RunChildSerializer( - {function, broker()->isolate()}, new_target, arguments, with_spread)); + CompilationSubject(function, broker()->isolate(), zone()), new_target, + arguments, with_spread)); } for (auto hint : callee.function_blueprints()) { - if (!hint.shared->IsInlineable()) continue; + Handle shared = hint.shared(); + + if (shared->IsApiFunction()) { + ProcessApiCall(shared, arguments); + DCHECK(!shared->IsInlineable()); + } else if (shared->HasBuiltinId()) { + ProcessBuiltinCall(shared, arguments); + DCHECK(!shared->IsInlineable()); + } + + if (!shared->IsInlineable()) continue; environment()->accumulator_hints().Add(RunChildSerializer( CompilationSubject(hint), new_target, arguments, with_spread)); } @@ -788,22 +1555,222 @@ void SerializerForBackgroundCompilation::ProcessCallVarArgs( ProcessCallOrConstruct(callee, base::nullopt, arguments, slot); } -void SerializerForBackgroundCompilation::ProcessJump( - interpreter::BytecodeArrayIterator* iterator) { - int jump_target = iterator->GetJumpTargetOffset(); - int current_offset = iterator->current_offset(); - if (current_offset >= jump_target) return; +void SerializerForBackgroundCompilation::ProcessApiCall( + Handle target, const HintsVector& arguments) { + FunctionTemplateInfoRef target_template_info( + broker(), handle(target->function_data(), broker()->isolate())); + if (!target_template_info.has_call_code()) return; + + target_template_info.SerializeCallCode(); + + SharedFunctionInfoRef target_ref(broker(), target); + target_ref.SerializeFunctionTemplateInfo(); + + if (target_template_info.accept_any_receiver() && + target_template_info.is_signature_undefined()) + return; - stashed_environments_[jump_target] = new (zone()) Environment(*environment()); + CHECK_GE(arguments.size(), 1); + Hints const& receiver_hints = arguments[0]; + for (auto hint : receiver_hints.constants()) { + if (hint->IsUndefined()) { + // The receiver is the global proxy. + Handle global_proxy = + broker()->native_context().global_proxy_object().object(); + ProcessReceiverMapForApiCall( + target_template_info, + handle(global_proxy->map(), broker()->isolate())); + continue; + } + + if (!hint->IsJSReceiver()) continue; + Handle receiver(Handle::cast(hint)); + + ProcessReceiverMapForApiCall(target_template_info, + handle(receiver->map(), broker()->isolate())); + } + + for (auto receiver_map : receiver_hints.maps()) { + ProcessReceiverMapForApiCall(target_template_info, receiver_map); + } } -void SerializerForBackgroundCompilation::MergeAfterJump( +void SerializerForBackgroundCompilation::ProcessReceiverMapForApiCall( + FunctionTemplateInfoRef& target, Handle receiver) { + if (receiver->is_access_check_needed()) { + return; + } + + MapRef receiver_map(broker(), receiver); + TRACE_BROKER(broker(), "Serializing holder for target:" << target); + + target.LookupHolderOfExpectedType(receiver_map, true); +} + +void SerializerForBackgroundCompilation::ProcessBuiltinCall( + Handle target, const HintsVector& arguments) { + DCHECK(target->HasBuiltinId()); + const int builtin_id = target->builtin_id(); + const char* name = Builtins::name(builtin_id); + TRACE_BROKER(broker(), "Serializing for call to builtin " << name); + switch (builtin_id) { + case Builtins::kPromisePrototypeCatch: { + // For JSCallReducer::ReducePromisePrototypeCatch. + CHECK_GE(arguments.size(), 1); + ProcessMapHintsForPromises(arguments[0]); + break; + } + case Builtins::kPromisePrototypeFinally: { + // For JSCallReducer::ReducePromisePrototypeFinally. + CHECK_GE(arguments.size(), 1); + ProcessMapHintsForPromises(arguments[0]); + break; + } + case Builtins::kPromisePrototypeThen: { + // For JSCallReducer::ReducePromisePrototypeThen. + CHECK_GE(arguments.size(), 1); + ProcessMapHintsForPromises(arguments[0]); + break; + } + case Builtins::kPromiseResolveTrampoline: + // For JSCallReducer::ReducePromiseInternalResolve and + // JSNativeContextSpecialization::ReduceJSResolvePromise. + if (arguments.size() >= 2) { + Hints const& resolution_hints = arguments[1]; + ProcessHintsForPromiseResolve(resolution_hints); + } + break; + case Builtins::kPromiseInternalResolve: + // For JSCallReducer::ReducePromiseInternalResolve and + // JSNativeContextSpecialization::ReduceJSResolvePromise. + if (arguments.size() >= 3) { + Hints const& resolution_hints = arguments[2]; + ProcessHintsForPromiseResolve(resolution_hints); + } + break; + case Builtins::kRegExpPrototypeTest: { + // For JSCallReducer::ReduceRegExpPrototypeTest. + if (arguments.size() >= 1) { + Hints const& regexp_hints = arguments[0]; + ProcessHintsForRegExpTest(regexp_hints); + } + break; + } + case Builtins::kFunctionPrototypeCall: + if (arguments.size() >= 1) { + Hints const& target_hints = arguments[0]; + ProcessHintsForFunctionCall(target_hints); + } + break; + default: + break; + } +} + +void SerializerForBackgroundCompilation::ProcessHintsForPromiseResolve( + Hints const& resolution_hints) { + auto processMap = [&](Handle map) { + broker()->CreateAccessInfoForLoadingThen(MapRef(broker(), map), + dependencies()); + }; + + for (auto hint : resolution_hints.constants()) { + if (!hint->IsJSReceiver()) continue; + Handle receiver(Handle::cast(hint)); + processMap(handle(receiver->map(), broker()->isolate())); + } + for (auto map_hint : resolution_hints.maps()) { + processMap(map_hint); + } +} + +void SerializerForBackgroundCompilation::ProcessMapHintsForPromises( + Hints const& receiver_hints) { + // We need to serialize the prototypes on each receiver map. + for (auto constant : receiver_hints.constants()) { + if (!constant->IsJSPromise()) continue; + Handle map(Handle::cast(constant)->map(), + broker()->isolate()); + MapRef(broker(), map).SerializePrototype(); + } + for (auto map : receiver_hints.maps()) { + if (!map->IsJSPromiseMap()) continue; + MapRef(broker(), map).SerializePrototype(); + } +} + +PropertyAccessInfo SerializerForBackgroundCompilation::ProcessMapForRegExpTest( + MapRef map) { + PropertyAccessInfo ai_exec = + broker()->CreateAccessInfoForLoadingExec(map, dependencies()); + + Handle holder; + if (ai_exec.IsDataConstant() && ai_exec.holder().ToHandle(&holder)) { + // The property is on the prototype chain. + JSObjectRef holder_ref(broker(), holder); + holder_ref.GetOwnProperty(ai_exec.field_representation(), + ai_exec.field_index(), true); + } + return ai_exec; +} + +void SerializerForBackgroundCompilation::ProcessHintsForRegExpTest( + Hints const& regexp_hints) { + for (auto hint : regexp_hints.constants()) { + if (!hint->IsJSRegExp()) continue; + Handle regexp(Handle::cast(hint)); + Handle regexp_map(regexp->map(), broker()->isolate()); + PropertyAccessInfo ai_exec = + ProcessMapForRegExpTest(MapRef(broker(), regexp_map)); + Handle holder; + if (ai_exec.IsDataConstant() && !ai_exec.holder().ToHandle(&holder)) { + // The property is on the object itself. + JSObjectRef holder_ref(broker(), regexp); + holder_ref.GetOwnProperty(ai_exec.field_representation(), + ai_exec.field_index(), true); + } + } + + for (auto map : regexp_hints.maps()) { + if (!map->IsJSRegExpMap()) continue; + ProcessMapForRegExpTest(MapRef(broker(), map)); + } +} + +void SerializerForBackgroundCompilation::ProcessHintsForFunctionCall( + Hints const& target_hints) { + for (auto constant : target_hints.constants()) { + if (!constant->IsJSFunction()) continue; + JSFunctionRef func(broker(), constant); + func.Serialize(); + } +} + +void SerializerForBackgroundCompilation::ContributeToJumpTargetEnvironment( + int target_offset) { + auto it = jump_target_environments_.find(target_offset); + if (it == jump_target_environments_.end()) { + jump_target_environments_[target_offset] = + new (zone()) Environment(*environment()); + } else { + it->second->Merge(environment()); + } +} + +void SerializerForBackgroundCompilation::IncorporateJumpTargetEnvironment( + int target_offset) { + auto it = jump_target_environments_.find(target_offset); + if (it != jump_target_environments_.end()) { + environment()->Merge(it->second); + jump_target_environments_.erase(it); + } +} + +void SerializerForBackgroundCompilation::ProcessJump( interpreter::BytecodeArrayIterator* iterator) { - int current_offset = iterator->current_offset(); - auto stash = stashed_environments_.find(current_offset); - if (stash != stashed_environments_.end()) { - environment()->Merge(stash->second); - stashed_environments_.erase(stash); + int jump_target = iterator->GetJumpTargetOffset(); + if (iterator->current_offset() < jump_target) { + ContributeToJumpTargetEnvironment(jump_target); } } @@ -813,10 +1780,25 @@ void SerializerForBackgroundCompilation::VisitReturn( environment()->ClearEphemeralHints(); } +void SerializerForBackgroundCompilation::VisitSwitchOnSmiNoFeedback( + interpreter::BytecodeArrayIterator* iterator) { + interpreter::JumpTableTargetOffsets targets = + iterator->GetJumpTableTargetOffsets(); + for (const auto& target : targets) { + ContributeToJumpTargetEnvironment(target.target_offset); + } +} + +void SerializerForBackgroundCompilation::VisitSwitchOnGeneratorState( + interpreter::BytecodeArrayIterator* iterator) { + for (const auto& target : GetBytecodeAnalysis(false).resume_jump_targets()) { + ContributeToJumpTargetEnvironment(target.target_offset()); + } +} + void SerializerForBackgroundCompilation::Environment::ExportRegisterHints( interpreter::Register first, size_t count, HintsVector& dst) { - dst.resize(dst.size() + count, Hints(zone())); - int reg_base = first.index(); + const int reg_base = first.index(); for (int i = 0; i < static_cast(count); ++i) { dst.push_back(register_hints(interpreter::Register(reg_base + i))); } @@ -856,8 +1838,8 @@ GlobalAccessFeedback const* SerializerForBackgroundCompilation::ProcessFeedbackForGlobalAccess( FeedbackSlot slot) { if (slot.IsInvalid()) return nullptr; - if (environment()->function().feedback_vector.is_null()) return nullptr; - FeedbackSource source(environment()->function().feedback_vector, slot); + if (environment()->function().feedback_vector().is_null()) return nullptr; + FeedbackSource source(environment()->function().feedback_vector(), slot); if (broker()->HasFeedback(source)) { return broker()->GetGlobalAccessFeedback(source); @@ -889,14 +1871,31 @@ void SerializerForBackgroundCompilation::VisitLdaGlobalInsideTypeof( VisitLdaGlobal(iterator); } -void SerializerForBackgroundCompilation::VisitLdaLookupGlobalSlot( +void SerializerForBackgroundCompilation::ProcessCheckContextExtensions( + int depth) { + // for BytecodeGraphBuilder::CheckContextExtensions. + Hints& context_hints = environment()->current_context_hints(); + for (int i = 0; i < depth; i++) { + ProcessContextAccess(context_hints, Context::EXTENSION_INDEX, i, + kSerializeSlot); + } +} + +void SerializerForBackgroundCompilation::ProcessLdaLookupGlobalSlot( BytecodeArrayIterator* iterator) { + ProcessCheckContextExtensions(iterator->GetUnsignedImmediateOperand(2)); + // TODO(neis): BytecodeGraphBilder may insert a JSLoadGlobal. VisitLdaGlobal(iterator); } +void SerializerForBackgroundCompilation::VisitLdaLookupGlobalSlot( + BytecodeArrayIterator* iterator) { + ProcessLdaLookupGlobalSlot(iterator); +} + void SerializerForBackgroundCompilation::VisitLdaLookupGlobalSlotInsideTypeof( BytecodeArrayIterator* iterator) { - VisitLdaGlobal(iterator); + ProcessLdaLookupGlobalSlot(iterator); } void SerializerForBackgroundCompilation::VisitStaGlobal( @@ -905,6 +1904,26 @@ void SerializerForBackgroundCompilation::VisitStaGlobal( ProcessFeedbackForGlobalAccess(slot); } +void SerializerForBackgroundCompilation::ProcessLdaLookupContextSlot( + BytecodeArrayIterator* iterator) { + const int slot_index = iterator->GetIndexOperand(1); + const int depth = iterator->GetUnsignedImmediateOperand(2); + ProcessCheckContextExtensions(depth); + Hints& context_hints = environment()->current_context_hints(); + environment()->accumulator_hints().Clear(); + ProcessContextAccess(context_hints, slot_index, depth, kIgnoreSlot); +} + +void SerializerForBackgroundCompilation::VisitLdaLookupContextSlot( + BytecodeArrayIterator* iterator) { + ProcessLdaLookupContextSlot(iterator); +} + +void SerializerForBackgroundCompilation::VisitLdaLookupContextSlotInsideTypeof( + BytecodeArrayIterator* iterator) { + ProcessLdaLookupContextSlot(iterator); +} + namespace { template MapHandles GetRelevantReceiverMaps(Isolate* isolate, MapContainer const& maps) { @@ -922,9 +1941,10 @@ MapHandles GetRelevantReceiverMaps(Isolate* isolate, MapContainer const& maps) { ElementAccessFeedback const* SerializerForBackgroundCompilation::ProcessFeedbackMapsForElementAccess( - const MapHandles& maps, AccessMode mode) { + const MapHandles& maps, AccessMode mode, + KeyedAccessMode const& keyed_mode) { ElementAccessFeedback const* result = - broker()->ProcessFeedbackMapsForElementAccess(maps); + broker()->ProcessFeedbackMapsForElementAccess(maps, keyed_mode); for (ElementAccessFeedback::MapIterator it = result->all_maps(broker()); !it.done(); it.advance()) { switch (mode) { @@ -952,9 +1972,34 @@ SerializerForBackgroundCompilation::ProcessFeedbackMapsForNamedAccess( ProcessMapForNamedPropertyAccess(map_ref, name); AccessInfoFactory access_info_factory(broker(), dependencies(), broker()->zone()); - access_infos.push_back(access_info_factory.ComputePropertyAccessInfo( + PropertyAccessInfo info(access_info_factory.ComputePropertyAccessInfo( map, name.object(), mode)); + access_infos.push_back(info); + + // TODO(turbofan): We want to take receiver hints into account as well, + // not only the feedback maps. + // For JSNativeContextSpecialization::InlinePropertySetterCall + // and InlinePropertyGetterCall. + if (info.IsAccessorConstant() && !info.constant().is_null()) { + if (info.constant()->IsJSFunction()) { + // For JSCallReducer::ReduceCallApiFunction. + Handle sfi( + handle(Handle::cast(info.constant())->shared(), + broker()->isolate())); + if (sfi->IsApiFunction()) { + FunctionTemplateInfoRef fti_ref( + broker(), handle(sfi->get_api_func_data(), broker()->isolate())); + if (fti_ref.has_call_code()) fti_ref.SerializeCallCode(); + ProcessReceiverMapForApiCall(fti_ref, map); + } + } else { + FunctionTemplateInfoRef fti_ref( + broker(), Handle::cast(info.constant())); + if (fti_ref.has_call_code()) fti_ref.SerializeCallCode(); + } + } } + DCHECK(!access_infos.empty()); return new (broker()->zone()) NamedAccessFeedback(name, access_infos); } @@ -962,9 +2007,9 @@ SerializerForBackgroundCompilation::ProcessFeedbackMapsForNamedAccess( void SerializerForBackgroundCompilation::ProcessFeedbackForPropertyAccess( FeedbackSlot slot, AccessMode mode, base::Optional static_name) { if (slot.IsInvalid()) return; - if (environment()->function().feedback_vector.is_null()) return; + if (environment()->function().feedback_vector().is_null()) return; - FeedbackNexus nexus(environment()->function().feedback_vector, slot); + FeedbackNexus nexus(environment()->function().feedback_vector(), slot); FeedbackSource source(nexus); if (broker()->HasFeedback(source)) return; @@ -992,8 +2037,10 @@ void SerializerForBackgroundCompilation::ProcessFeedbackForPropertyAccess( static_name.has_value() ? static_name : broker()->GetNameFeedback(nexus); if (name.has_value()) { processed = ProcessFeedbackMapsForNamedAccess(maps, mode, *name); - } else if (nexus.GetKeyType() == ELEMENT && nexus.ic_state() != MEGAMORPHIC) { - processed = ProcessFeedbackMapsForElementAccess(maps, mode); + } else if (nexus.GetKeyType() == ELEMENT) { + DCHECK_NE(nexus.ic_state(), MEGAMORPHIC); + processed = ProcessFeedbackMapsForElementAccess( + maps, mode, KeyedAccessMode::FromNexus(nexus)); } broker()->SetFeedback(source, processed); } @@ -1087,8 +2134,8 @@ void SerializerForBackgroundCompilation::ProcessNamedPropertyAccess( BytecodeArrayIterator* iterator, AccessMode mode) { Hints const& receiver = environment()->register_hints(iterator->GetRegisterOperand(0)); - Handle name(Name::cast(iterator->GetConstantForIndexOperand(1)), - broker()->isolate()); + Handle name = Handle::cast( + iterator->GetConstantForIndexOperand(1, broker()->isolate())); FeedbackSlot slot = iterator->GetSlotOperand(2); ProcessNamedPropertyAccess(receiver, NameRef(broker(), name), slot, mode); } @@ -1176,6 +2223,31 @@ UNCONDITIONAL_JUMPS_LIST(DEFINE_UNCONDITIONAL_JUMP) IGNORED_BYTECODE_LIST(DEFINE_IGNORE) #undef DEFINE_IGNORE +#define DEFINE_UNREACHABLE(name, ...) \ + void SerializerForBackgroundCompilation::Visit##name( \ + BytecodeArrayIterator* iterator) { \ + UNREACHABLE(); \ + } +UNREACHABLE_BYTECODE_LIST(DEFINE_UNREACHABLE) +#undef DEFINE_UNREACHABLE + +#define DEFINE_KILL(name, ...) \ + void SerializerForBackgroundCompilation::Visit##name( \ + BytecodeArrayIterator* iterator) { \ + environment()->Kill(); \ + } +KILL_ENVIRONMENT_LIST(DEFINE_KILL) +#undef DEFINE_KILL + +#undef CLEAR_ENVIRONMENT_LIST +#undef KILL_ENVIRONMENT_LIST +#undef CLEAR_ACCUMULATOR_LIST +#undef UNCONDITIONAL_JUMPS_LIST +#undef CONDITIONAL_JUMPS_LIST +#undef IGNORED_BYTECODE_LIST +#undef UNREACHABLE_BYTECODE_LIST +#undef SUPPORTED_BYTECODE_LIST + } // namespace compiler } // namespace internal } // namespace v8 diff --git a/deps/v8/src/compiler/serializer-for-background-compilation.h b/deps/v8/src/compiler/serializer-for-background-compilation.h index 0ee37ef280074e..881ed61a555231 100644 --- a/deps/v8/src/compiler/serializer-for-background-compilation.h +++ b/deps/v8/src/compiler/serializer-for-background-compilation.h @@ -5,346 +5,31 @@ #ifndef V8_COMPILER_SERIALIZER_FOR_BACKGROUND_COMPILATION_H_ #define V8_COMPILER_SERIALIZER_FOR_BACKGROUND_COMPILATION_H_ -#include "src/base/optional.h" -#include "src/compiler/access-info.h" -#include "src/utils/utils.h" #include "src/handles/handles.h" -#include "src/handles/maybe-handles.h" -#include "src/zone/zone-containers.h" namespace v8 { namespace internal { -namespace interpreter { -class BytecodeArrayIterator; -} // namespace interpreter - -class BytecodeArray; -class FeedbackVector; -class LookupIterator; -class NativeContext; -class ScriptContextTable; -class SharedFunctionInfo; -class SourcePositionTableIterator; +class BailoutId; class Zone; namespace compiler { -#define CLEAR_ENVIRONMENT_LIST(V) \ - V(Abort) \ - V(CallRuntime) \ - V(CallRuntimeForPair) \ - V(CreateBlockContext) \ - V(CreateEvalContext) \ - V(CreateFunctionContext) \ - V(Debugger) \ - V(PopContext) \ - V(PushContext) \ - V(ResumeGenerator) \ - V(ReThrow) \ - V(StaContextSlot) \ - V(StaCurrentContextSlot) \ - V(SuspendGenerator) \ - V(SwitchOnGeneratorState) \ - V(Throw) - -#define CLEAR_ACCUMULATOR_LIST(V) \ - V(Add) \ - V(AddSmi) \ - V(BitwiseAnd) \ - V(BitwiseAndSmi) \ - V(BitwiseNot) \ - V(BitwiseOr) \ - V(BitwiseOrSmi) \ - V(BitwiseXor) \ - V(BitwiseXorSmi) \ - V(CloneObject) \ - V(CreateArrayFromIterable) \ - V(CreateArrayLiteral) \ - V(CreateEmptyArrayLiteral) \ - V(CreateEmptyObjectLiteral) \ - V(CreateMappedArguments) \ - V(CreateObjectLiteral) \ - V(CreateRestParameter) \ - V(CreateUnmappedArguments) \ - V(Dec) \ - V(DeletePropertySloppy) \ - V(DeletePropertyStrict) \ - V(Div) \ - V(DivSmi) \ - V(Exp) \ - V(ExpSmi) \ - V(ForInContinue) \ - V(ForInEnumerate) \ - V(ForInNext) \ - V(ForInStep) \ - V(GetTemplateObject) \ - V(Inc) \ - V(LdaContextSlot) \ - V(LdaCurrentContextSlot) \ - V(LdaImmutableContextSlot) \ - V(LdaImmutableCurrentContextSlot) \ - V(LogicalNot) \ - V(Mod) \ - V(ModSmi) \ - V(Mul) \ - V(MulSmi) \ - V(Negate) \ - V(SetPendingMessage) \ - V(ShiftLeft) \ - V(ShiftLeftSmi) \ - V(ShiftRight) \ - V(ShiftRightLogical) \ - V(ShiftRightLogicalSmi) \ - V(ShiftRightSmi) \ - V(Sub) \ - V(SubSmi) \ - V(TestEqual) \ - V(TestEqualStrict) \ - V(TestGreaterThan) \ - V(TestGreaterThanOrEqual) \ - V(TestInstanceOf) \ - V(TestLessThan) \ - V(TestLessThanOrEqual) \ - V(TestNull) \ - V(TestReferenceEqual) \ - V(TestTypeOf) \ - V(TestUndefined) \ - V(TestUndetectable) \ - V(ToBooleanLogicalNot) \ - V(ToName) \ - V(ToNumber) \ - V(ToNumeric) \ - V(ToString) \ - V(TypeOf) - -#define UNCONDITIONAL_JUMPS_LIST(V) \ - V(Jump) \ - V(JumpConstant) \ - V(JumpLoop) - -#define CONDITIONAL_JUMPS_LIST(V) \ - V(JumpIfFalse) \ - V(JumpIfFalseConstant) \ - V(JumpIfJSReceiver) \ - V(JumpIfJSReceiverConstant) \ - V(JumpIfNotNull) \ - V(JumpIfNotNullConstant) \ - V(JumpIfNotUndefined) \ - V(JumpIfNotUndefinedConstant) \ - V(JumpIfNull) \ - V(JumpIfNullConstant) \ - V(JumpIfToBooleanFalse) \ - V(JumpIfToBooleanFalseConstant) \ - V(JumpIfToBooleanTrue) \ - V(JumpIfToBooleanTrueConstant) \ - V(JumpIfTrue) \ - V(JumpIfTrueConstant) \ - V(JumpIfUndefined) \ - V(JumpIfUndefinedConstant) - -#define IGNORED_BYTECODE_LIST(V) \ - V(CallNoFeedback) \ - V(LdaNamedPropertyNoFeedback) \ - V(StackCheck) \ - V(StaNamedPropertyNoFeedback) \ - V(ThrowReferenceErrorIfHole) \ - V(ThrowSuperAlreadyCalledIfNotHole) \ - V(ThrowSuperNotCalledIfHole) - -#define SUPPORTED_BYTECODE_LIST(V) \ - V(CallAnyReceiver) \ - V(CallProperty) \ - V(CallProperty0) \ - V(CallProperty1) \ - V(CallProperty2) \ - V(CallUndefinedReceiver) \ - V(CallUndefinedReceiver0) \ - V(CallUndefinedReceiver1) \ - V(CallUndefinedReceiver2) \ - V(CallWithSpread) \ - V(Construct) \ - V(ConstructWithSpread) \ - V(CreateClosure) \ - V(ExtraWide) \ - V(GetSuperConstructor) \ - V(Illegal) \ - V(LdaConstant) \ - V(LdaFalse) \ - V(LdaGlobal) \ - V(LdaGlobalInsideTypeof) \ - V(LdaKeyedProperty) \ - V(LdaLookupGlobalSlot) \ - V(LdaLookupGlobalSlotInsideTypeof) \ - V(LdaNamedProperty) \ - V(LdaNull) \ - V(Ldar) \ - V(LdaSmi) \ - V(LdaTheHole) \ - V(LdaTrue) \ - V(LdaUndefined) \ - V(LdaZero) \ - V(Mov) \ - V(Return) \ - V(StaGlobal) \ - V(StaInArrayLiteral) \ - V(StaKeyedProperty) \ - V(StaNamedOwnProperty) \ - V(StaNamedProperty) \ - V(Star) \ - V(TestIn) \ - V(Wide) \ - CLEAR_ENVIRONMENT_LIST(V) \ - CLEAR_ACCUMULATOR_LIST(V) \ - CONDITIONAL_JUMPS_LIST(V) \ - UNCONDITIONAL_JUMPS_LIST(V) \ - IGNORED_BYTECODE_LIST(V) - +class CompilationDependencies; class JSHeapBroker; -template -struct HandleComparator { - bool operator()(const Handle& lhs, const Handle& rhs) const { - return lhs.address() < rhs.address(); - } -}; - -struct FunctionBlueprint { - Handle shared; - Handle feedback_vector; - - bool operator<(const FunctionBlueprint& other) const { - // A feedback vector is never used for more than one SFI, so it can - // be used for strict ordering of blueprints. - DCHECK_IMPLIES(feedback_vector.equals(other.feedback_vector), - shared.equals(other.shared)); - return HandleComparator()(feedback_vector, - other.feedback_vector); - } -}; - -class CompilationSubject { - public: - explicit CompilationSubject(FunctionBlueprint blueprint) - : blueprint_(blueprint) {} - CompilationSubject(Handle closure, Isolate* isolate); - - FunctionBlueprint blueprint() const { return blueprint_; } - MaybeHandle closure() const { return closure_; } - - private: - FunctionBlueprint blueprint_; - MaybeHandle closure_; -}; - -using ConstantsSet = ZoneSet, HandleComparator>; -using MapsSet = ZoneSet, HandleComparator>; -using BlueprintsSet = ZoneSet; - -class Hints { - public: - explicit Hints(Zone* zone); - - const ConstantsSet& constants() const; - const MapsSet& maps() const; - const BlueprintsSet& function_blueprints() const; - - void AddConstant(Handle constant); - void AddMap(Handle map); - void AddFunctionBlueprint(FunctionBlueprint function_blueprint); - - void Add(const Hints& other); - - void Clear(); - bool IsEmpty() const; - - private: - ConstantsSet constants_; - MapsSet maps_; - BlueprintsSet function_blueprints_; -}; -using HintsVector = ZoneVector; - enum class SerializerForBackgroundCompilationFlag : uint8_t { kBailoutOnUninitialized = 1 << 0, kCollectSourcePositions = 1 << 1, - kOsr = 1 << 2, + kAnalyzeEnvironmentLiveness = 1 << 2, }; using SerializerForBackgroundCompilationFlags = base::Flags; -// The SerializerForBackgroundCompilation makes sure that the relevant function -// data such as bytecode, SharedFunctionInfo and FeedbackVector, used by later -// optimizations in the compiler, is copied to the heap broker. -class SerializerForBackgroundCompilation { - public: - SerializerForBackgroundCompilation( - JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone, - Handle closure, - SerializerForBackgroundCompilationFlags flags); - Hints Run(); // NOTE: Returns empty for an already-serialized function. - - class Environment; - - private: - SerializerForBackgroundCompilation( - JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone, - CompilationSubject function, base::Optional new_target, - const HintsVector& arguments, - SerializerForBackgroundCompilationFlags flags); - - bool BailoutOnUninitialized(FeedbackSlot slot); - - void TraverseBytecode(); - -#define DECLARE_VISIT_BYTECODE(name, ...) \ - void Visit##name(interpreter::BytecodeArrayIterator* iterator); - SUPPORTED_BYTECODE_LIST(DECLARE_VISIT_BYTECODE) -#undef DECLARE_VISIT_BYTECODE - - void ProcessCallOrConstruct(Hints callee, base::Optional new_target, - const HintsVector& arguments, FeedbackSlot slot, - bool with_spread = false); - void ProcessCallVarArgs(interpreter::BytecodeArrayIterator* iterator, - ConvertReceiverMode receiver_mode, - bool with_spread = false); - - void ProcessJump(interpreter::BytecodeArrayIterator* iterator); - void MergeAfterJump(interpreter::BytecodeArrayIterator* iterator); - - void ProcessKeyedPropertyAccess(Hints const& receiver, Hints const& key, - FeedbackSlot slot, AccessMode mode); - void ProcessNamedPropertyAccess(interpreter::BytecodeArrayIterator* iterator, - AccessMode mode); - void ProcessNamedPropertyAccess(Hints const& receiver, NameRef const& name, - FeedbackSlot slot, AccessMode mode); - - GlobalAccessFeedback const* ProcessFeedbackForGlobalAccess(FeedbackSlot slot); - NamedAccessFeedback const* ProcessFeedbackMapsForNamedAccess( - const MapHandles& maps, AccessMode mode, NameRef const& name); - ElementAccessFeedback const* ProcessFeedbackMapsForElementAccess( - const MapHandles& maps, AccessMode mode); - void ProcessFeedbackForPropertyAccess(FeedbackSlot slot, AccessMode mode, - base::Optional static_name); - void ProcessMapForNamedPropertyAccess(MapRef const& map, NameRef const& name); - - Hints RunChildSerializer(CompilationSubject function, - base::Optional new_target, - const HintsVector& arguments, bool with_spread); - - JSHeapBroker* broker() const { return broker_; } - CompilationDependencies* dependencies() const { return dependencies_; } - Zone* zone() const { return zone_; } - Environment* environment() const { return environment_; } - SerializerForBackgroundCompilationFlags flags() const { return flags_; } - - JSHeapBroker* const broker_; - CompilationDependencies* const dependencies_; - Zone* const zone_; - Environment* const environment_; - ZoneUnorderedMap stashed_environments_; - SerializerForBackgroundCompilationFlags const flags_; -}; +void RunSerializerForBackgroundCompilation( + JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone, + Handle closure, SerializerForBackgroundCompilationFlags flags, + BailoutId osr_offset); } // namespace compiler } // namespace internal diff --git a/deps/v8/src/compiler/simd-scalar-lowering.cc b/deps/v8/src/compiler/simd-scalar-lowering.cc index cab398c1606df3..6deba2b00291c1 100644 --- a/deps/v8/src/compiler/simd-scalar-lowering.cc +++ b/deps/v8/src/compiler/simd-scalar-lowering.cc @@ -16,6 +16,7 @@ namespace internal { namespace compiler { namespace { +static const int kNumLanes64 = 2; static const int kNumLanes32 = 4; static const int kNumLanes16 = 8; static const int kNumLanes8 = 16; @@ -76,6 +77,8 @@ void SimdScalarLowering::LowerGraph() { } } +#define FOREACH_INT64X2_OPCODE(V) V(I64x2Splat) + #define FOREACH_INT32X4_OPCODE(V) \ V(I32x4Splat) \ V(I32x4ExtractLane) \ @@ -119,6 +122,8 @@ void SimdScalarLowering::LowerGraph() { V(S1x16AnyTrue) \ V(S1x16AllTrue) +#define FOREACH_FLOAT64X2_OPCODE(V) V(F64x2Splat) + #define FOREACH_FLOAT32X4_OPCODE(V) \ V(F32x4Splat) \ V(F32x4ExtractLane) \ @@ -208,8 +213,12 @@ void SimdScalarLowering::LowerGraph() { MachineType SimdScalarLowering::MachineTypeFrom(SimdType simdType) { switch (simdType) { + case SimdType::kFloat64x2: + return MachineType::Float64(); case SimdType::kFloat32x4: return MachineType::Float32(); + case SimdType::kInt64x2: + return MachineType::Int64(); case SimdType::kInt32x4: return MachineType::Int32(); case SimdType::kInt16x8: @@ -223,6 +232,14 @@ MachineType SimdScalarLowering::MachineTypeFrom(SimdType simdType) { void SimdScalarLowering::SetLoweredType(Node* node, Node* output) { switch (node->opcode()) { #define CASE_STMT(name) case IrOpcode::k##name: + FOREACH_FLOAT64X2_OPCODE(CASE_STMT) { + replacements_[node->id()].type = SimdType::kFloat64x2; + break; + } + FOREACH_INT64X2_OPCODE(CASE_STMT) { + replacements_[node->id()].type = SimdType::kInt64x2; + break; + } FOREACH_INT32X4_OPCODE(CASE_STMT) case IrOpcode::kReturn: case IrOpcode::kParameter: @@ -326,7 +343,9 @@ static int GetReturnCountAfterLoweringSimd128( int SimdScalarLowering::NumLanes(SimdType type) { int num_lanes = 0; - if (type == SimdType::kFloat32x4 || type == SimdType::kInt32x4) { + if (type == SimdType::kFloat64x2 || type == SimdType::kInt64x2) { + num_lanes = kNumLanes64; + } else if (type == SimdType::kFloat32x4 || type == SimdType::kInt32x4) { num_lanes = kNumLanes32; } else if (type == SimdType::kInt16x8) { num_lanes = kNumLanes16; @@ -1198,7 +1217,7 @@ void SimdScalarLowering::LowerNode(Node* node) { } F32X4_UNOP_CASE(Abs) F32X4_UNOP_CASE(Neg) -#undef F32x4_UNOP_CASE +#undef F32X4_UNOP_CASE case IrOpcode::kF32x4RecipApprox: case IrOpcode::kF32x4RecipSqrtApprox: { DCHECK_EQ(1, node->InputCount()); @@ -1223,8 +1242,10 @@ void SimdScalarLowering::LowerNode(Node* node) { LowerUnaryOp(node, SimdType::kInt32x4, machine()->RoundUint32ToFloat32()); break; } - case IrOpcode::kI32x4Splat: + case IrOpcode::kF64x2Splat: case IrOpcode::kF32x4Splat: + case IrOpcode::kI64x2Splat: + case IrOpcode::kI32x4Splat: case IrOpcode::kI16x8Splat: case IrOpcode::kI8x16Splat: { Node** rep_node = zone()->NewArray(num_lanes); @@ -1347,7 +1368,7 @@ void SimdScalarLowering::LowerNode(Node* node) { } case IrOpcode::kS8x16Shuffle: { DCHECK_EQ(2, node->InputCount()); - const uint8_t* shuffle = OpParameter(node->op()); + const uint8_t* shuffle = S8x16ShuffleOf(node->op()); Node** rep_left = GetReplacementsWithType(node->InputAt(0), rep_type); Node** rep_right = GetReplacementsWithType(node->InputAt(1), rep_type); Node** rep_node = zone()->NewArray(16); diff --git a/deps/v8/src/compiler/simd-scalar-lowering.h b/deps/v8/src/compiler/simd-scalar-lowering.h index 01ea195bdc8bf7..76723fcc7726bc 100644 --- a/deps/v8/src/compiler/simd-scalar-lowering.h +++ b/deps/v8/src/compiler/simd-scalar-lowering.h @@ -32,7 +32,14 @@ class SimdScalarLowering { private: enum class State : uint8_t { kUnvisited, kOnStack, kVisited }; - enum class SimdType : uint8_t { kFloat32x4, kInt32x4, kInt16x8, kInt8x16 }; + enum class SimdType : uint8_t { + kFloat64x2, + kFloat32x4, + kInt64x2, + kInt32x4, + kInt16x8, + kInt8x16 + }; #if defined(V8_TARGET_BIG_ENDIAN) static constexpr int kLaneOffsets[16] = {15, 14, 13, 12, 11, 10, 9, 8, diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc index 8bc0e7af7b16b8..b028a76bb0d8ea 100644 --- a/deps/v8/src/compiler/simplified-lowering.cc +++ b/deps/v8/src/compiler/simplified-lowering.cc @@ -8,6 +8,7 @@ #include "src/base/bits.h" #include "src/codegen/code-factory.h" +#include "src/codegen/tick-counter.h" #include "src/compiler/access-builder.h" #include "src/compiler/common-operator.h" #include "src/compiler/compiler-source-position-table.h" @@ -22,8 +23,8 @@ #include "src/compiler/simplified-operator.h" #include "src/compiler/type-cache.h" #include "src/numbers/conversions-inl.h" -#include "src/utils/address-map.h" #include "src/objects/objects.h" +#include "src/utils/address-map.h" namespace v8 { namespace internal { @@ -279,7 +280,8 @@ class RepresentationSelector { RepresentationSelector(JSGraph* jsgraph, JSHeapBroker* broker, Zone* zone, RepresentationChanger* changer, SourcePositionTable* source_positions, - NodeOriginTable* node_origins) + NodeOriginTable* node_origins, + TickCounter* tick_counter) : jsgraph_(jsgraph), zone_(zone), count_(jsgraph->graph()->NodeCount()), @@ -296,7 +298,8 @@ class RepresentationSelector { source_positions_(source_positions), node_origins_(node_origins), type_cache_(TypeCache::Get()), - op_typer_(broker, graph_zone()) { + op_typer_(broker, graph_zone()), + tick_counter_(tick_counter) { } // Forward propagation of types from type feedback. @@ -444,6 +447,7 @@ class RepresentationSelector { break; \ } SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_CASE) + SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(DECLARE_CASE) #undef DECLARE_CASE #define DECLARE_CASE(Name) \ @@ -747,21 +751,32 @@ class RepresentationSelector { !GetUpperBound(node->InputAt(1)).Maybe(type); } + void ChangeToDeadValue(Node* node, Node* effect, Node* control) { + DCHECK(TypeOf(node).IsNone()); + // If the node is unreachable, insert an Unreachable node and mark the + // value dead. + // TODO(jarin,tebbi) Find a way to unify/merge this insertion with + // InsertUnreachableIfNecessary. + Node* unreachable = effect = + graph()->NewNode(jsgraph_->common()->Unreachable(), effect, control); + const Operator* dead_value = + jsgraph_->common()->DeadValue(GetInfo(node)->representation()); + node->ReplaceInput(0, unreachable); + node->TrimInputCount(dead_value->ValueInputCount()); + ReplaceEffectControlUses(node, effect, control); + NodeProperties::ChangeOp(node, dead_value); + } + void ChangeToPureOp(Node* node, const Operator* new_op) { DCHECK(new_op->HasProperty(Operator::kPure)); + DCHECK_EQ(new_op->ValueInputCount(), node->op()->ValueInputCount()); if (node->op()->EffectInputCount() > 0) { DCHECK_LT(0, node->op()->ControlInputCount()); Node* control = NodeProperties::GetControlInput(node); Node* effect = NodeProperties::GetEffectInput(node); if (TypeOf(node).IsNone()) { - // If the node is unreachable, insert an Unreachable node and mark the - // value dead. - // TODO(jarin,tebbi) Find a way to unify/merge this insertion with - // InsertUnreachableIfNecessary. - Node* unreachable = effect = graph()->NewNode( - jsgraph_->common()->Unreachable(), effect, control); - new_op = jsgraph_->common()->DeadValue(GetInfo(node)->representation()); - node->ReplaceInput(0, unreachable); + ChangeToDeadValue(node, effect, control); + return; } // Rewire the effect and control chains. node->TrimInputCount(new_op->ValueInputCount()); @@ -772,6 +787,30 @@ class RepresentationSelector { NodeProperties::ChangeOp(node, new_op); } + void ChangeUnaryToPureBinaryOp(Node* node, const Operator* new_op, + int new_input_index, Node* new_input) { + DCHECK(new_op->HasProperty(Operator::kPure)); + DCHECK_EQ(new_op->ValueInputCount(), 2); + DCHECK_EQ(node->op()->ValueInputCount(), 1); + DCHECK_LE(0, new_input_index); + DCHECK_LE(new_input_index, 1); + if (node->op()->EffectInputCount() > 0) { + DCHECK_LT(0, node->op()->ControlInputCount()); + Node* control = NodeProperties::GetControlInput(node); + Node* effect = NodeProperties::GetEffectInput(node); + if (TypeOf(node).IsNone()) { + ChangeToDeadValue(node, effect, control); + return; + } + node->TrimInputCount(node->op()->ValueInputCount()); + ReplaceEffectControlUses(node, effect, control); + } else { + DCHECK_EQ(0, node->op()->ControlInputCount()); + } + node->InsertInput(jsgraph_->zone(), new_input_index, new_input); + NodeProperties::ChangeOp(node, new_op); + } + // Converts input {index} of {node} according to given UseInfo {use}, // assuming the type of the input is {input_type}. If {input_type} is null, // it takes the input from the input node {TypeOf(node->InputAt(index))}. @@ -804,6 +843,10 @@ class RepresentationSelector { } void ProcessInput(Node* node, int index, UseInfo use) { + DCHECK_IMPLIES(use.type_check() != TypeCheckKind::kNone, + !node->op()->HasProperty(Operator::kNoDeopt) && + node->op()->EffectInputCount() > 0); + switch (phase_) { case PROPAGATE: EnqueueInput(node, index, use); @@ -958,7 +1001,8 @@ class RepresentationSelector { return MachineRepresentation::kWord32; } else if (type.Is(Type::Boolean())) { return MachineRepresentation::kBit; - } else if (type.Is(Type::NumberOrOddball()) && use.IsUsedAsFloat64()) { + } else if (type.Is(Type::NumberOrOddball()) && + use.TruncatesOddballAndBigIntToNumber()) { return MachineRepresentation::kFloat64; } else if (type.Is(Type::Union(Type::SignedSmall(), Type::NaN(), zone()))) { // TODO(turbofan): For Phis that return either NaN or some Smi, it's @@ -968,6 +1012,8 @@ class RepresentationSelector { return MachineRepresentation::kTagged; } else if (type.Is(Type::Number())) { return MachineRepresentation::kFloat64; + } else if (type.Is(Type::BigInt()) && use.IsUsedAsWord64()) { + return MachineRepresentation::kWord64; } else if (type.Is(Type::ExternalPointer())) { return MachineType::PointerRepresentation(); } @@ -1109,8 +1155,11 @@ class RepresentationSelector { if (IsAnyCompressed(rep)) { return MachineType::AnyCompressed(); } - // Word64 representation is only valid for safe integer values. if (rep == MachineRepresentation::kWord64) { + if (type.Is(Type::BigInt())) { + return MachineType::AnyTagged(); + } + DCHECK(type.Is(TypeCache::Get()->kSafeInteger)); return MachineType(rep, MachineSemantic::kInt64); } @@ -1126,7 +1175,17 @@ class RepresentationSelector { void VisitStateValues(Node* node) { if (propagate()) { for (int i = 0; i < node->InputCount(); i++) { - EnqueueInput(node, i, UseInfo::Any()); + // When lowering 64 bit BigInts to Word64 representation, we have to + // make sure they are rematerialized before deoptimization. By + // propagating a AnyTagged use, the RepresentationChanger is going to + // insert the necessary conversions. + // TODO(nicohartmann): Remove, once the deoptimizer can rematerialize + // truncated BigInts. + if (TypeOf(node->InputAt(i)).Is(Type::BigInt())) { + EnqueueInput(node, i, UseInfo::AnyTagged()); + } else { + EnqueueInput(node, i, UseInfo::Any()); + } } } else if (lower()) { Zone* zone = jsgraph_->zone(); @@ -1135,6 +1194,12 @@ class RepresentationSelector { ZoneVector(node->InputCount(), zone); for (int i = 0; i < node->InputCount(); i++) { Node* input = node->InputAt(i); + // TODO(nicohartmann): Remove, once the deoptimizer can rematerialize + // truncated BigInts. + if (TypeOf(input).Is(Type::BigInt())) { + ProcessInput(node, i, UseInfo::AnyTagged()); + } + (*types)[i] = DeoptMachineTypeOf(GetInfo(input)->representation(), TypeOf(input)); } @@ -1621,6 +1686,8 @@ class RepresentationSelector { // Depending on the operator, propagate new usage info to the inputs. void VisitNode(Node* node, Truncation truncation, SimplifiedLowering* lowering) { + tick_counter_->DoTick(); + // Unconditionally eliminate unused pure nodes (only relevant if there's // a pure operation in between two effectful ones, where the last one // is unused). @@ -1715,13 +1782,15 @@ class RepresentationSelector { case IrOpcode::kJSToNumber: case IrOpcode::kJSToNumberConvertBigInt: case IrOpcode::kJSToNumeric: { + DCHECK(NodeProperties::GetType(node).Is(Type::Union( + Type::BigInt(), Type::NumberOrOddball(), graph()->zone()))); VisitInputs(node); // TODO(bmeurer): Optimize somewhat based on input type? if (truncation.IsUsedAsWord32()) { SetOutput(node, MachineRepresentation::kWord32); if (lower()) lowering->DoJSToNumberOrNumericTruncatesToWord32(node, this); - } else if (truncation.IsUsedAsFloat64()) { + } else if (truncation.TruncatesOddballAndBigIntToNumber()) { SetOutput(node, MachineRepresentation::kFloat64); if (lower()) lowering->DoJSToNumberOrNumericTruncatesToFloat64(node, this); @@ -2461,6 +2530,20 @@ class RepresentationSelector { } return; } + case IrOpcode::kCheckBigInt: { + if (InputIs(node, Type::BigInt())) { + VisitNoop(node, truncation); + } else { + VisitUnop(node, UseInfo::AnyTagged(), + MachineRepresentation::kTaggedPointer); + } + return; + } + case IrOpcode::kBigIntAsUintN: { + ProcessInput(node, 0, UseInfo::TruncatingWord64()); + SetOutput(node, MachineRepresentation::kWord64, Type::BigInt()); + return; + } case IrOpcode::kNumberAcos: case IrOpcode::kNumberAcosh: case IrOpcode::kNumberAsin: @@ -2621,6 +2704,43 @@ class RepresentationSelector { SetOutput(node, MachineRepresentation::kTaggedPointer); return; } + case IrOpcode::kSpeculativeBigIntAdd: { + if (truncation.IsUsedAsWord64()) { + VisitBinop(node, + UseInfo::CheckedBigIntTruncatingWord64(VectorSlotPair{}), + MachineRepresentation::kWord64); + if (lower()) { + ChangeToPureOp(node, lowering->machine()->Int64Add()); + } + } else { + VisitBinop(node, + UseInfo::CheckedBigIntAsTaggedPointer(VectorSlotPair{}), + MachineRepresentation::kTaggedPointer); + if (lower()) { + NodeProperties::ChangeOp(node, lowering->simplified()->BigIntAdd()); + } + } + return; + } + case IrOpcode::kSpeculativeBigIntNegate: { + if (truncation.IsUsedAsWord64()) { + VisitUnop(node, + UseInfo::CheckedBigIntTruncatingWord64(VectorSlotPair{}), + MachineRepresentation::kWord64); + if (lower()) { + ChangeUnaryToPureBinaryOp(node, lowering->machine()->Int64Sub(), 0, + jsgraph_->Int64Constant(0)); + } + } else { + VisitUnop(node, + UseInfo::CheckedBigIntAsTaggedPointer(VectorSlotPair{}), + MachineRepresentation::kTaggedPointer); + if (lower()) { + ChangeToPureOp(node, lowering->simplified()->BigIntNegate()); + } + } + return; + } case IrOpcode::kStringConcat: { // TODO(turbofan): We currently depend on having this first length input // to make sure that the overflow check is properly scheduled before the @@ -2657,6 +2777,10 @@ class RepresentationSelector { MachineRepresentation::kTaggedPointer); return; } + case IrOpcode::kStringFromCodePointAt: { + return VisitBinop(node, UseInfo::AnyTagged(), UseInfo::Word(), + MachineRepresentation::kTaggedPointer); + } case IrOpcode::kStringIndexOf: { ProcessInput(node, 0, UseInfo::AnyTagged()); ProcessInput(node, 1, UseInfo::AnyTagged()); @@ -2983,7 +3107,7 @@ class RepresentationSelector { simplified()->PlainPrimitiveToWord32()); } } - } else if (truncation.IsUsedAsFloat64()) { + } else if (truncation.TruncatesOddballAndBigIntToNumber()) { if (InputIs(node, Type::NumberOrOddball())) { VisitUnop(node, UseInfo::TruncatingFloat64(), MachineRepresentation::kFloat64); @@ -3236,7 +3360,7 @@ class RepresentationSelector { // identifies NaN and undefined, we can just pass along // the {truncation} and completely wipe the {node}. if (truncation.IsUnused()) return VisitUnused(node); - if (truncation.IsUsedAsFloat64()) { + if (truncation.TruncatesOddballAndBigIntToNumber()) { VisitUnop(node, UseInfo::TruncatingFloat64(), MachineRepresentation::kFloat64); if (lower()) DeferReplacement(node, node->InputAt(0)); @@ -3263,7 +3387,7 @@ class RepresentationSelector { MachineRepresentation::kWord32); if (lower()) DeferReplacement(node, node->InputAt(0)); } else if (InputIs(node, Type::NumberOrOddball()) && - truncation.IsUsedAsFloat64()) { + truncation.TruncatesOddballAndBigIntToNumber()) { // Propagate the Float64 truncation. VisitUnop(node, UseInfo::TruncatingFloat64(), MachineRepresentation::kFloat64); @@ -3431,6 +3555,9 @@ class RepresentationSelector { return SetOutput(node, MachineRepresentation::kNone); case IrOpcode::kStaticAssert: return VisitUnop(node, UseInfo::Any(), MachineRepresentation::kTagged); + case IrOpcode::kAssertType: + return VisitUnop(node, UseInfo::AnyTagged(), + MachineRepresentation::kTagged); default: FATAL( "Representation inference: unsupported opcode %i (%s), node #%i\n.", @@ -3534,6 +3661,7 @@ class RepresentationSelector { NodeOriginTable* node_origins_; TypeCache const* type_cache_; OperationTyper op_typer_; // helper for the feedback typer + TickCounter* const tick_counter_; NodeInfo* GetInfo(Node* node) { DCHECK(node->id() < count_); @@ -3547,19 +3675,22 @@ SimplifiedLowering::SimplifiedLowering(JSGraph* jsgraph, JSHeapBroker* broker, Zone* zone, SourcePositionTable* source_positions, NodeOriginTable* node_origins, - PoisoningMitigationLevel poisoning_level) + PoisoningMitigationLevel poisoning_level, + TickCounter* tick_counter) : jsgraph_(jsgraph), broker_(broker), zone_(zone), type_cache_(TypeCache::Get()), source_positions_(source_positions), node_origins_(node_origins), - poisoning_level_(poisoning_level) {} + poisoning_level_(poisoning_level), + tick_counter_(tick_counter) {} void SimplifiedLowering::LowerAllNodes() { - RepresentationChanger changer(jsgraph(), jsgraph()->isolate()); + RepresentationChanger changer(jsgraph(), broker_); RepresentationSelector selector(jsgraph(), broker_, zone_, &changer, - source_positions_, node_origins_); + source_positions_, node_origins_, + tick_counter_); selector.Run(this); } diff --git a/deps/v8/src/compiler/simplified-lowering.h b/deps/v8/src/compiler/simplified-lowering.h index e434af9d4f11b2..414e3588d72c59 100644 --- a/deps/v8/src/compiler/simplified-lowering.h +++ b/deps/v8/src/compiler/simplified-lowering.h @@ -12,6 +12,9 @@ namespace v8 { namespace internal { + +class TickCounter; + namespace compiler { // Forward declarations. @@ -26,7 +29,8 @@ class V8_EXPORT_PRIVATE SimplifiedLowering final { SimplifiedLowering(JSGraph* jsgraph, JSHeapBroker* broker, Zone* zone, SourcePositionTable* source_position, NodeOriginTable* node_origins, - PoisoningMitigationLevel poisoning_level); + PoisoningMitigationLevel poisoning_level, + TickCounter* tick_counter); ~SimplifiedLowering() = default; void LowerAllNodes(); @@ -67,6 +71,8 @@ class V8_EXPORT_PRIVATE SimplifiedLowering final { PoisoningMitigationLevel poisoning_level_; + TickCounter* const tick_counter_; + Node* Float64Round(Node* const node); Node* Float64Sign(Node* const node); Node* Int32Abs(Node* const node); diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc index ed3cfa861751cc..4f83635422eb9b 100644 --- a/deps/v8/src/compiler/simplified-operator.cc +++ b/deps/v8/src/compiler/simplified-operator.cc @@ -492,6 +492,18 @@ Handle FastMapParameterOf(const Operator* op) { return Handle::null(); } +std::ostream& operator<<(std::ostream& os, BigIntOperationHint hint) { + switch (hint) { + case BigIntOperationHint::kBigInt: + return os << "BigInt"; + } + UNREACHABLE(); +} + +size_t hash_value(BigIntOperationHint hint) { + return static_cast(hint); +} + std::ostream& operator<<(std::ostream& os, NumberOperationHint hint) { switch (hint) { case NumberOperationHint::kSignedSmall: @@ -585,12 +597,6 @@ Type AllocateTypeOf(const Operator* op) { return AllocateParametersOf(op).type(); } -UnicodeEncoding UnicodeEncodingOf(const Operator* op) { - DCHECK(op->opcode() == IrOpcode::kStringFromSingleCodePoint || - op->opcode() == IrOpcode::kStringCodePointAt); - return OpParameter(op); -} - AbortReason AbortReasonOf(const Operator* op) { DCHECK_EQ(IrOpcode::kRuntimeAbort, op->opcode()); return static_cast(OpParameter(op)); @@ -702,9 +708,11 @@ bool operator==(CheckMinusZeroParameters const& lhs, V(NumberToUint32, Operator::kNoProperties, 1, 0) \ V(NumberToUint8Clamped, Operator::kNoProperties, 1, 0) \ V(NumberSilenceNaN, Operator::kNoProperties, 1, 0) \ + V(BigIntNegate, Operator::kNoProperties, 1, 0) \ V(StringConcat, Operator::kNoProperties, 3, 0) \ V(StringToNumber, Operator::kNoProperties, 1, 0) \ V(StringFromSingleCharCode, Operator::kNoProperties, 1, 0) \ + V(StringFromSingleCodePoint, Operator::kNoProperties, 1, 0) \ V(StringIndexOf, Operator::kNoProperties, 3, 0) \ V(StringLength, Operator::kNoProperties, 1, 0) \ V(StringToLowerCaseIntl, Operator::kNoProperties, 1, 0) \ @@ -713,6 +721,7 @@ bool operator==(CheckMinusZeroParameters const& lhs, V(PlainPrimitiveToNumber, Operator::kNoProperties, 1, 0) \ V(PlainPrimitiveToWord32, Operator::kNoProperties, 1, 0) \ V(PlainPrimitiveToFloat64, Operator::kNoProperties, 1, 0) \ + V(ChangeCompressedSignedToInt32, Operator::kNoProperties, 1, 0) \ V(ChangeTaggedSignedToInt32, Operator::kNoProperties, 1, 0) \ V(ChangeTaggedSignedToInt64, Operator::kNoProperties, 1, 0) \ V(ChangeTaggedToInt32, Operator::kNoProperties, 1, 0) \ @@ -723,6 +732,7 @@ bool operator==(CheckMinusZeroParameters const& lhs, V(ChangeCompressedToTaggedSigned, Operator::kNoProperties, 1, 0) \ V(ChangeTaggedToCompressedSigned, Operator::kNoProperties, 1, 0) \ V(ChangeFloat64ToTaggedPointer, Operator::kNoProperties, 1, 0) \ + V(ChangeInt31ToCompressedSigned, Operator::kNoProperties, 1, 0) \ V(ChangeInt31ToTaggedSigned, Operator::kNoProperties, 1, 0) \ V(ChangeInt32ToTagged, Operator::kNoProperties, 1, 0) \ V(ChangeInt64ToTagged, Operator::kNoProperties, 1, 0) \ @@ -730,6 +740,8 @@ bool operator==(CheckMinusZeroParameters const& lhs, V(ChangeUint64ToTagged, Operator::kNoProperties, 1, 0) \ V(ChangeTaggedToBit, Operator::kNoProperties, 1, 0) \ V(ChangeBitToTagged, Operator::kNoProperties, 1, 0) \ + V(TruncateBigIntToUint64, Operator::kNoProperties, 1, 0) \ + V(ChangeUint64ToBigInt, Operator::kNoProperties, 1, 0) \ V(TruncateTaggedToBit, Operator::kNoProperties, 1, 0) \ V(TruncateTaggedPointerToBit, Operator::kNoProperties, 1, 0) \ V(TruncateTaggedToWord32, Operator::kNoProperties, 1, 0) \ @@ -769,9 +781,12 @@ bool operator==(CheckMinusZeroParameters const& lhs, V(NewConsString, Operator::kNoProperties, 3, 0) \ V(PoisonIndex, Operator::kNoProperties, 1, 0) -#define EFFECT_DEPENDENT_OP_LIST(V) \ - V(StringCharCodeAt, Operator::kNoProperties, 2, 1) \ - V(StringSubstring, Operator::kNoProperties, 3, 1) \ +#define EFFECT_DEPENDENT_OP_LIST(V) \ + V(BigIntAdd, Operator::kNoProperties, 2, 1) \ + V(StringCharCodeAt, Operator::kNoProperties, 2, 1) \ + V(StringCodePointAt, Operator::kNoProperties, 2, 1) \ + V(StringFromCodePointAt, Operator::kNoProperties, 2, 1) \ + V(StringSubstring, Operator::kNoProperties, 3, 1) \ V(DateNow, Operator::kNoProperties, 0, 1) #define SPECULATIVE_NUMBER_BINOP_LIST(V) \ @@ -801,6 +816,8 @@ bool operator==(CheckMinusZeroParameters const& lhs, V(CheckNumber, 1, 1) \ V(CheckSmi, 1, 1) \ V(CheckString, 1, 1) \ + V(CheckBigInt, 1, 1) \ + V(CheckedInt32ToCompressedSigned, 1, 1) \ V(CheckedInt32ToTaggedSigned, 1, 1) \ V(CheckedInt64ToInt32, 1, 1) \ V(CheckedInt64ToTaggedSigned, 1, 1) \ @@ -895,32 +912,6 @@ struct SimplifiedOperatorGlobalCache final { DEOPTIMIZE_REASON_LIST(CHECK_IF) #undef CHECK_IF - template - struct StringCodePointAtOperator final : public Operator1 { - StringCodePointAtOperator() - : Operator1(IrOpcode::kStringCodePointAt, - Operator::kFoldable | Operator::kNoThrow, - "StringCodePointAt", 2, 1, 1, 1, 1, 0, - kEncoding) {} - }; - StringCodePointAtOperator - kStringCodePointAtOperatorUTF16; - StringCodePointAtOperator - kStringCodePointAtOperatorUTF32; - - template - struct StringFromSingleCodePointOperator final - : public Operator1 { - StringFromSingleCodePointOperator() - : Operator1( - IrOpcode::kStringFromSingleCodePoint, Operator::kPure, - "StringFromSingleCodePoint", 1, 0, 0, 1, 0, 0, kEncoding) {} - }; - StringFromSingleCodePointOperator - kStringFromSingleCodePointOperatorUTF16; - StringFromSingleCodePointOperator - kStringFromSingleCodePointOperatorUTF32; - struct FindOrderedHashMapEntryOperator final : public Operator { FindOrderedHashMapEntryOperator() : Operator(IrOpcode::kFindOrderedHashMapEntry, Operator::kEliminatable, @@ -1236,6 +1227,20 @@ const Operator* SimplifiedOperatorBuilder::RuntimeAbort(AbortReason reason) { static_cast(reason)); // parameter } +const Operator* SimplifiedOperatorBuilder::BigIntAsUintN(int bits) { + CHECK(0 <= bits && bits <= 64); + + return new (zone()) Operator1(IrOpcode::kBigIntAsUintN, Operator::kPure, + "BigIntAsUintN", 1, 0, 0, 1, 0, 0, bits); +} + +const Operator* SimplifiedOperatorBuilder::AssertType(Type type) { + DCHECK(type.IsRange()); + return new (zone()) Operator1(IrOpcode::kAssertType, + Operator::kNoThrow | Operator::kNoDeopt, + "AssertType", 1, 0, 0, 1, 0, 0, type); +} + const Operator* SimplifiedOperatorBuilder::CheckIf( DeoptimizeReason reason, const VectorSlotPair& feedback) { if (!feedback.IsValid()) { @@ -1433,6 +1438,21 @@ const Operator* SimplifiedOperatorBuilder::CheckFloat64Hole( CheckFloat64HoleParameters(mode, feedback)); } +const Operator* SimplifiedOperatorBuilder::SpeculativeBigIntAdd( + BigIntOperationHint hint) { + return new (zone()) Operator1( + IrOpcode::kSpeculativeBigIntAdd, Operator::kFoldable | Operator::kNoThrow, + "SpeculativeBigIntAdd", 2, 1, 1, 1, 1, 0, hint); +} + +const Operator* SimplifiedOperatorBuilder::SpeculativeBigIntNegate( + BigIntOperationHint hint) { + return new (zone()) Operator1( + IrOpcode::kSpeculativeBigIntNegate, + Operator::kFoldable | Operator::kNoThrow, "SpeculativeBigIntNegate", 1, 1, + 1, 1, 1, 0, hint); +} + const Operator* SimplifiedOperatorBuilder::SpeculativeToNumber( NumberOperationHint hint, const VectorSlotPair& feedback) { if (!feedback.IsValid()) { @@ -1655,28 +1675,6 @@ const Operator* SimplifiedOperatorBuilder::AllocateRaw( AllocateParameters(type, allocation, allow_large_objects)); } -const Operator* SimplifiedOperatorBuilder::StringCodePointAt( - UnicodeEncoding encoding) { - switch (encoding) { - case UnicodeEncoding::UTF16: - return &cache_.kStringCodePointAtOperatorUTF16; - case UnicodeEncoding::UTF32: - return &cache_.kStringCodePointAtOperatorUTF32; - } - UNREACHABLE(); -} - -const Operator* SimplifiedOperatorBuilder::StringFromSingleCodePoint( - UnicodeEncoding encoding) { - switch (encoding) { - case UnicodeEncoding::UTF16: - return &cache_.kStringFromSingleCodePointOperatorUTF16; - case UnicodeEncoding::UTF32: - return &cache_.kStringFromSingleCodePointOperatorUTF32; - } - UNREACHABLE(); -} - #define SPECULATIVE_NUMBER_BINOP(Name) \ const Operator* SimplifiedOperatorBuilder::Name(NumberOperationHint hint) { \ switch (hint) { \ diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h index d93544c5cd5869..bdac796adfff69 100644 --- a/deps/v8/src/compiler/simplified-operator.h +++ b/deps/v8/src/compiler/simplified-operator.h @@ -475,10 +475,15 @@ enum class NumberOperationHint : uint8_t { kNumberOrOddball, // Inputs were Number or Oddball, output was Number. }; +enum class BigIntOperationHint : uint8_t { + kBigInt, +}; + size_t hash_value(NumberOperationHint); +size_t hash_value(BigIntOperationHint); V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, NumberOperationHint); - +V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, BigIntOperationHint); V8_EXPORT_PRIVATE NumberOperationHint NumberOperationHintOf(const Operator* op) V8_WARN_UNUSED_RESULT; @@ -634,6 +639,9 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final const Operator* NumberSilenceNaN(); + const Operator* BigIntAdd(); + const Operator* BigIntNegate(); + const Operator* SpeculativeSafeIntegerAdd(NumberOperationHint hint); const Operator* SpeculativeSafeIntegerSubtract(NumberOperationHint hint); @@ -653,6 +661,10 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final const Operator* SpeculativeNumberLessThanOrEqual(NumberOperationHint hint); const Operator* SpeculativeNumberEqual(NumberOperationHint hint); + const Operator* SpeculativeBigIntAdd(BigIntOperationHint hint); + const Operator* SpeculativeBigIntNegate(BigIntOperationHint hint); + const Operator* BigIntAsUintN(int bits); + const Operator* ReferenceEqual(); const Operator* SameValue(); const Operator* SameValueNumbersOnly(); @@ -666,9 +678,10 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final const Operator* StringLessThan(); const Operator* StringLessThanOrEqual(); const Operator* StringCharCodeAt(); - const Operator* StringCodePointAt(UnicodeEncoding encoding); + const Operator* StringCodePointAt(); const Operator* StringFromSingleCharCode(); - const Operator* StringFromSingleCodePoint(UnicodeEncoding encoding); + const Operator* StringFromSingleCodePoint(); + const Operator* StringFromCodePointAt(); const Operator* StringIndexOf(); const Operator* StringLength(); const Operator* StringToLowerCaseIntl(); @@ -686,6 +699,7 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final const Operator* PlainPrimitiveToWord32(); const Operator* PlainPrimitiveToFloat64(); + const Operator* ChangeCompressedSignedToInt32(); const Operator* ChangeTaggedSignedToInt32(); const Operator* ChangeTaggedSignedToInt64(); const Operator* ChangeTaggedToInt32(); @@ -695,6 +709,7 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final const Operator* ChangeTaggedToTaggedSigned(); const Operator* ChangeCompressedToTaggedSigned(); const Operator* ChangeTaggedToCompressedSigned(); + const Operator* ChangeInt31ToCompressedSigned(); const Operator* ChangeInt31ToTaggedSigned(); const Operator* ChangeInt32ToTagged(); const Operator* ChangeInt64ToTagged(); @@ -704,6 +719,8 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final const Operator* ChangeFloat64ToTaggedPointer(); const Operator* ChangeTaggedToBit(); const Operator* ChangeBitToTagged(); + const Operator* TruncateBigIntToUint64(); + const Operator* ChangeUint64ToBigInt(); const Operator* TruncateTaggedToWord32(); const Operator* TruncateTaggedToFloat64(); const Operator* TruncateTaggedToBit(); @@ -740,6 +757,8 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final const Operator* CheckedInt32Mod(); const Operator* CheckedInt32Mul(CheckForMinusZeroMode); const Operator* CheckedInt32Sub(); + const Operator* CheckedInt32ToCompressedSigned( + const VectorSlotPair& feedback); const Operator* CheckedInt32ToTaggedSigned(const VectorSlotPair& feedback); const Operator* CheckedInt64ToInt32(const VectorSlotPair& feedback); const Operator* CheckedInt64ToTaggedSigned(const VectorSlotPair& feedback); @@ -752,6 +771,7 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final const VectorSlotPair& feedback); const Operator* CheckedTaggedToTaggedPointer(const VectorSlotPair& feedback); const Operator* CheckedTaggedToTaggedSigned(const VectorSlotPair& feedback); + const Operator* CheckBigInt(const VectorSlotPair& feedback); const Operator* CheckedCompressedToTaggedPointer( const VectorSlotPair& feedback); const Operator* CheckedCompressedToTaggedSigned( @@ -874,6 +894,9 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final // Abort (for terminating execution on internal error). const Operator* RuntimeAbort(AbortReason reason); + // Abort if the value input does not inhabit the given type + const Operator* AssertType(Type type); + const Operator* DateNow(); private: diff --git a/deps/v8/src/compiler/state-values-utils.cc b/deps/v8/src/compiler/state-values-utils.cc index c00613c232902b..2bb5a0a4b532ca 100644 --- a/deps/v8/src/compiler/state-values-utils.cc +++ b/deps/v8/src/compiler/state-values-utils.cc @@ -329,9 +329,7 @@ void StateValuesAccess::iterator::Pop() { current_depth_--; } - -bool StateValuesAccess::iterator::done() { return current_depth_ < 0; } - +bool StateValuesAccess::iterator::done() const { return current_depth_ < 0; } void StateValuesAccess::iterator::Advance() { Top()->Advance(); @@ -392,14 +390,12 @@ MachineType StateValuesAccess::iterator::type() { } } - -bool StateValuesAccess::iterator::operator!=(iterator& other) { +bool StateValuesAccess::iterator::operator!=(iterator const& other) { // We only allow comparison with end(). CHECK(other.done()); return !done(); } - StateValuesAccess::iterator& StateValuesAccess::iterator::operator++() { Advance(); return *this; diff --git a/deps/v8/src/compiler/state-values-utils.h b/deps/v8/src/compiler/state-values-utils.h index 00ec3bb351282c..0ff5d218f1815e 100644 --- a/deps/v8/src/compiler/state-values-utils.h +++ b/deps/v8/src/compiler/state-values-utils.h @@ -92,7 +92,7 @@ class V8_EXPORT_PRIVATE StateValuesAccess { class V8_EXPORT_PRIVATE iterator { public: // Bare minimum of operators needed for range iteration. - bool operator!=(iterator& other); + bool operator!=(iterator const& other); iterator& operator++(); TypedNode operator*(); @@ -104,7 +104,7 @@ class V8_EXPORT_PRIVATE StateValuesAccess { Node* node(); MachineType type(); - bool done(); + bool done() const; void Advance(); void EnsureValid(); diff --git a/deps/v8/src/compiler/store-store-elimination.cc b/deps/v8/src/compiler/store-store-elimination.cc index 13d8199745af49..b71bcd7e669fb8 100644 --- a/deps/v8/src/compiler/store-store-elimination.cc +++ b/deps/v8/src/compiler/store-store-elimination.cc @@ -6,6 +6,7 @@ #include "src/compiler/store-store-elimination.h" +#include "src/codegen/tick-counter.h" #include "src/compiler/all-nodes.h" #include "src/compiler/js-graph.h" #include "src/compiler/node-properties.h" @@ -129,7 +130,8 @@ namespace { class RedundantStoreFinder final { public: - RedundantStoreFinder(JSGraph* js_graph, Zone* temp_zone); + RedundantStoreFinder(JSGraph* js_graph, TickCounter* tick_counter, + Zone* temp_zone); void Find(); @@ -157,6 +159,7 @@ class RedundantStoreFinder final { ZoneSet& to_remove() { return to_remove_; } JSGraph* const jsgraph_; + TickCounter* const tick_counter_; Zone* const temp_zone_; ZoneStack revisit_; @@ -199,6 +202,7 @@ void RedundantStoreFinder::Find() { Visit(jsgraph()->graph()->end()); while (!revisit_.empty()) { + tick_counter_->DoTick(); Node* next = revisit_.top(); revisit_.pop(); DCHECK_LT(next->id(), in_revisit_.size()); @@ -230,9 +234,10 @@ bool RedundantStoreFinder::HasBeenVisited(Node* node) { return !unobservable_for_id(node->id()).IsUnvisited(); } -void StoreStoreElimination::Run(JSGraph* js_graph, Zone* temp_zone) { +void StoreStoreElimination::Run(JSGraph* js_graph, TickCounter* tick_counter, + Zone* temp_zone) { // Find superfluous nodes - RedundantStoreFinder finder(js_graph, temp_zone); + RedundantStoreFinder finder(js_graph, tick_counter, temp_zone); finder.Find(); // Remove superfluous nodes @@ -336,8 +341,11 @@ bool RedundantStoreFinder::CannotObserveStoreField(Node* node) { } // Initialize unobservable_ with js_graph->graph->NodeCount() empty sets. -RedundantStoreFinder::RedundantStoreFinder(JSGraph* js_graph, Zone* temp_zone) +RedundantStoreFinder::RedundantStoreFinder(JSGraph* js_graph, + TickCounter* tick_counter, + Zone* temp_zone) : jsgraph_(js_graph), + tick_counter_(tick_counter), temp_zone_(temp_zone), revisit_(temp_zone), in_revisit_(js_graph->graph()->NodeCount(), temp_zone), diff --git a/deps/v8/src/compiler/store-store-elimination.h b/deps/v8/src/compiler/store-store-elimination.h index cda7591fccf10f..646640a3104fa7 100644 --- a/deps/v8/src/compiler/store-store-elimination.h +++ b/deps/v8/src/compiler/store-store-elimination.h @@ -11,11 +11,15 @@ namespace v8 { namespace internal { + +class TickCounter; + namespace compiler { class StoreStoreElimination final { public: - static void Run(JSGraph* js_graph, Zone* temp_zone); + static void Run(JSGraph* js_graph, TickCounter* tick_counter, + Zone* temp_zone); }; } // namespace compiler diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc index 4cf2c38bdb24c9..5dbbad3dcd64bd 100644 --- a/deps/v8/src/compiler/typer.cc +++ b/deps/v8/src/compiler/typer.cc @@ -7,6 +7,7 @@ #include #include "src/base/flags.h" +#include "src/codegen/tick-counter.h" #include "src/compiler/common-operator.h" #include "src/compiler/graph-reducer.h" #include "src/compiler/js-operator.h" @@ -33,13 +34,15 @@ class Typer::Decorator final : public GraphDecorator { Typer* const typer_; }; -Typer::Typer(JSHeapBroker* broker, Flags flags, Graph* graph) +Typer::Typer(JSHeapBroker* broker, Flags flags, Graph* graph, + TickCounter* tick_counter) : flags_(flags), graph_(graph), decorator_(nullptr), cache_(TypeCache::Get()), broker_(broker), - operation_typer_(broker, zone()) { + operation_typer_(broker, zone()), + tick_counter_(tick_counter) { singleton_false_ = operation_typer_.singleton_false(); singleton_true_ = operation_typer_.singleton_true(); @@ -47,7 +50,6 @@ Typer::Typer(JSHeapBroker* broker, Flags flags, Graph* graph) graph_->AddDecorator(decorator_); } - Typer::~Typer() { graph_->RemoveDecorator(decorator_); } @@ -91,14 +93,18 @@ class Typer::Visitor : public Reducer { case IrOpcode::k##x: \ return UpdateType(node, TypeBinaryOp(node, x)); SIMPLIFIED_NUMBER_BINOP_LIST(DECLARE_CASE) + SIMPLIFIED_BIGINT_BINOP_LIST(DECLARE_CASE) SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_CASE) + SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(DECLARE_CASE) #undef DECLARE_CASE #define DECLARE_CASE(x) \ case IrOpcode::k##x: \ return UpdateType(node, TypeUnaryOp(node, x)); SIMPLIFIED_NUMBER_UNOP_LIST(DECLARE_CASE) + SIMPLIFIED_BIGINT_UNOP_LIST(DECLARE_CASE) SIMPLIFIED_SPECULATIVE_NUMBER_UNOP_LIST(DECLARE_CASE) + SIMPLIFIED_SPECULATIVE_BIGINT_UNOP_LIST(DECLARE_CASE) #undef DECLARE_CASE #define DECLARE_CASE(x) case IrOpcode::k##x: @@ -157,14 +163,18 @@ class Typer::Visitor : public Reducer { case IrOpcode::k##x: \ return TypeBinaryOp(node, x); SIMPLIFIED_NUMBER_BINOP_LIST(DECLARE_CASE) + SIMPLIFIED_BIGINT_BINOP_LIST(DECLARE_CASE) SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_CASE) + SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(DECLARE_CASE) #undef DECLARE_CASE #define DECLARE_CASE(x) \ case IrOpcode::k##x: \ return TypeUnaryOp(node, x); SIMPLIFIED_NUMBER_UNOP_LIST(DECLARE_CASE) + SIMPLIFIED_BIGINT_UNOP_LIST(DECLARE_CASE) SIMPLIFIED_SPECULATIVE_NUMBER_UNOP_LIST(DECLARE_CASE) + SIMPLIFIED_SPECULATIVE_BIGINT_UNOP_LIST(DECLARE_CASE) #undef DECLARE_CASE #define DECLARE_CASE(x) case IrOpcode::k##x: @@ -276,14 +286,18 @@ class Typer::Visitor : public Reducer { return t->operation_typer_.Name(type); \ } SIMPLIFIED_NUMBER_UNOP_LIST(DECLARE_METHOD) + SIMPLIFIED_BIGINT_UNOP_LIST(DECLARE_METHOD) SIMPLIFIED_SPECULATIVE_NUMBER_UNOP_LIST(DECLARE_METHOD) + SIMPLIFIED_SPECULATIVE_BIGINT_UNOP_LIST(DECLARE_METHOD) #undef DECLARE_METHOD #define DECLARE_METHOD(Name) \ static Type Name(Type lhs, Type rhs, Typer* t) { \ return t->operation_typer_.Name(lhs, rhs); \ } SIMPLIFIED_NUMBER_BINOP_LIST(DECLARE_METHOD) + SIMPLIFIED_BIGINT_BINOP_LIST(DECLARE_METHOD) SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_METHOD) + SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(DECLARE_METHOD) #undef DECLARE_METHOD static Type ObjectIsArrayBufferView(Type, Typer*); @@ -410,7 +424,7 @@ void Typer::Run(const NodeVector& roots, induction_vars->ChangeToInductionVariablePhis(); } Visitor visitor(this, induction_vars); - GraphReducer graph_reducer(zone(), graph()); + GraphReducer graph_reducer(zone(), graph(), tick_counter_); graph_reducer.AddReducer(&visitor); for (Node* const root : roots) graph_reducer.ReduceNode(root); graph_reducer.ReduceGraph(); @@ -798,6 +812,8 @@ Type Typer::Visitor::TypeHeapConstant(Node* node) { return TypeConstant(HeapConstantOf(node->op())); } +Type Typer::Visitor::TypeCompressedHeapConstant(Node* node) { UNREACHABLE(); } + Type Typer::Visitor::TypeExternalConstant(Node* node) { return Type::ExternalPointer(); } @@ -2060,6 +2076,10 @@ Type Typer::Visitor::TypeStringFromSingleCodePoint(Node* node) { return TypeUnaryOp(node, StringFromSingleCodePointTyper); } +Type Typer::Visitor::TypeStringFromCodePointAt(Node* node) { + return Type::String(); +} + Type Typer::Visitor::TypeStringIndexOf(Node* node) { return Type::Range(-1.0, String::kMaxLength, zone()); } @@ -2336,6 +2356,8 @@ Type Typer::Visitor::TypeFindOrderedHashMapEntryForInt32Key(Node* node) { Type Typer::Visitor::TypeRuntimeAbort(Node* node) { UNREACHABLE(); } +Type Typer::Visitor::TypeAssertType(Node* node) { UNREACHABLE(); } + // Heap constants. Type Typer::Visitor::TypeConstant(Handle value) { diff --git a/deps/v8/src/compiler/typer.h b/deps/v8/src/compiler/typer.h index fa87d81f1e05d6..305470d72421e8 100644 --- a/deps/v8/src/compiler/typer.h +++ b/deps/v8/src/compiler/typer.h @@ -11,6 +11,9 @@ namespace v8 { namespace internal { + +class TickCounter; + namespace compiler { // Forward declarations. @@ -25,7 +28,8 @@ class V8_EXPORT_PRIVATE Typer { }; using Flags = base::Flags; - Typer(JSHeapBroker* broker, Flags flags, Graph* graph); + Typer(JSHeapBroker* broker, Flags flags, Graph* graph, + TickCounter* tick_counter); ~Typer(); void Run(); @@ -49,6 +53,7 @@ class V8_EXPORT_PRIVATE Typer { TypeCache const* cache_; JSHeapBroker* broker_; OperationTyper operation_typer_; + TickCounter* const tick_counter_; Type singleton_false_; Type singleton_true_; diff --git a/deps/v8/src/compiler/types.cc b/deps/v8/src/compiler/types.cc index edf07a4ffd99a5..d4267a75fe0f58 100644 --- a/deps/v8/src/compiler/types.cc +++ b/deps/v8/src/compiler/types.cc @@ -6,9 +6,10 @@ #include "src/compiler/types.h" -#include "src/utils/ostreams.h" #include "src/handles/handles-inl.h" +#include "src/objects/instance-type.h" #include "src/objects/objects-inl.h" +#include "src/utils/ostreams.h" namespace v8 { namespace internal { @@ -202,7 +203,7 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) { return kOtherObject; case JS_ARRAY_TYPE: return kArray; - case JS_VALUE_TYPE: + case JS_PRIMITIVE_WRAPPER_TYPE: case JS_MESSAGE_OBJECT_TYPE: case JS_DATE_TYPE: #ifdef V8_INTL_SUPPORT @@ -312,8 +313,9 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) { case SCRIPT_TYPE: case CODE_TYPE: case PROPERTY_CELL_TYPE: - case MODULE_TYPE: - case MODULE_INFO_ENTRY_TYPE: + case SOURCE_TEXT_MODULE_TYPE: + case SOURCE_TEXT_MODULE_INFO_ENTRY_TYPE: + case SYNTHETIC_MODULE_TYPE: case CELL_TYPE: case PREPARSE_DATA_TYPE: case UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE: @@ -349,6 +351,7 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) { case ENUM_CACHE_TYPE: case SOURCE_POSITION_TABLE_WITH_FRAME_CACHE_TYPE: case WASM_CAPI_FUNCTION_DATA_TYPE: + case WASM_INDIRECT_FUNCTION_TABLE_TYPE: case WASM_DEBUG_INFO_TYPE: case WASM_EXCEPTION_TAG_TYPE: case WASM_EXPORTED_FUNCTION_DATA_TYPE: @@ -363,6 +366,9 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) { case PROMISE_REJECT_REACTION_JOB_TASK_TYPE: case PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE: case FINALIZATION_GROUP_CLEANUP_JOB_TASK_TYPE: +#define MAKE_TORQUE_CLASS_TYPE(V) case V: + TORQUE_DEFINED_INSTANCE_TYPES(MAKE_TORQUE_CLASS_TYPE) +#undef MAKE_TORQUE_CLASS_TYPE UNREACHABLE(); } UNREACHABLE(); diff --git a/deps/v8/src/compiler/types.h b/deps/v8/src/compiler/types.h index 21aaab50362a0d..0dc1aa77b06d13 100644 --- a/deps/v8/src/compiler/types.h +++ b/deps/v8/src/compiler/types.h @@ -7,7 +7,7 @@ #include "src/base/compiler-specific.h" #include "src/common/globals.h" -#include "src/compiler/js-heap-broker.h" +#include "src/compiler/heap-refs.h" #include "src/handles/handles.h" #include "src/numbers/conversions.h" #include "src/objects/objects.h" @@ -220,6 +220,7 @@ namespace compiler { INTERNAL_BITSET_TYPE_LIST(V) \ PROPER_BITSET_TYPE_LIST(V) +class JSHeapBroker; class HeapConstantType; class OtherNumberConstantType; class TupleType; diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc index 3f1b2e9f13f11e..d3d4d54ea25485 100644 --- a/deps/v8/src/compiler/verifier.cc +++ b/deps/v8/src/compiler/verifier.cc @@ -431,6 +431,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) { CheckTypeIs(node, Type::Number()); break; case IrOpcode::kHeapConstant: + case IrOpcode::kCompressedHeapConstant: // Constants have no inputs. CHECK_EQ(0, input_count); // Type is anything. @@ -933,7 +934,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) { break; case IrOpcode::kComment: - case IrOpcode::kDebugAbort: + case IrOpcode::kAbortCSAAssert: case IrOpcode::kDebugBreak: case IrOpcode::kRetain: case IrOpcode::kUnsafePointerAdd: @@ -975,6 +976,25 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) { case IrOpcode::kSpeculativeNumberLessThanOrEqual: CheckTypeIs(node, Type::Boolean()); break; + case IrOpcode::kSpeculativeBigIntAdd: + CheckTypeIs(node, Type::BigInt()); + break; + case IrOpcode::kSpeculativeBigIntNegate: + CheckTypeIs(node, Type::BigInt()); + break; + case IrOpcode::kBigIntAsUintN: + CheckValueInputIs(node, 0, Type::BigInt()); + CheckTypeIs(node, Type::BigInt()); + break; + case IrOpcode::kBigIntAdd: + CheckValueInputIs(node, 0, Type::BigInt()); + CheckValueInputIs(node, 1, Type::BigInt()); + CheckTypeIs(node, Type::BigInt()); + break; + case IrOpcode::kBigIntNegate: + CheckValueInputIs(node, 0, Type::BigInt()); + CheckTypeIs(node, Type::BigInt()); + break; case IrOpcode::kNumberAdd: case IrOpcode::kNumberSubtract: case IrOpcode::kNumberMultiply: @@ -1156,6 +1176,12 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) { CheckValueInputIs(node, 0, Type::Number()); CheckTypeIs(node, Type::String()); break; + case IrOpcode::kStringFromCodePointAt: + // (String, Unsigned32) -> UnsignedSmall + CheckValueInputIs(node, 0, Type::String()); + CheckValueInputIs(node, 1, Type::Unsigned32()); + CheckTypeIs(node, Type::String()); + break; case IrOpcode::kStringIndexOf: // (String, String, SignedSmall) -> SignedSmall CheckValueInputIs(node, 0, Type::String()); @@ -1306,6 +1332,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) { CheckNotTyped(node); break; + case IrOpcode::kChangeCompressedSignedToInt32: case IrOpcode::kChangeTaggedSignedToInt32: { // Signed32 /\ Tagged -> Signed32 /\ UntaggedInt32 // TODO(neis): Activate once ChangeRepresentation works in typer. @@ -1360,6 +1387,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) { // CheckTypeIs(node, to)); break; } + case IrOpcode::kChangeInt31ToCompressedSigned: case IrOpcode::kChangeInt31ToTaggedSigned: { // Signed31 /\ UntaggedInt32 -> Signed31 /\ Tagged // TODO(neis): Activate once ChangeRepresentation works in typer. @@ -1429,6 +1457,14 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) { // CheckTypeIs(node, to)); break; } + case IrOpcode::kTruncateBigIntToUint64: + CheckValueInputIs(node, 0, Type::BigInt()); + CheckTypeIs(node, Type::BigInt()); + break; + case IrOpcode::kChangeUint64ToBigInt: + CheckValueInputIs(node, 0, Type::BigInt()); + CheckTypeIs(node, Type::BigInt()); + break; case IrOpcode::kTruncateTaggedToBit: case IrOpcode::kTruncateTaggedPointerToBit: break; @@ -1498,6 +1534,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) { case IrOpcode::kCheckedUint32Div: case IrOpcode::kCheckedUint32Mod: case IrOpcode::kCheckedInt32Mul: + case IrOpcode::kCheckedInt32ToCompressedSigned: case IrOpcode::kCheckedInt32ToTaggedSigned: case IrOpcode::kCheckedInt64ToInt32: case IrOpcode::kCheckedInt64ToTaggedSigned: @@ -1520,6 +1557,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) { case IrOpcode::kCheckedTaggedToCompressedSigned: case IrOpcode::kCheckedTaggedToCompressedPointer: case IrOpcode::kCheckedTruncateTaggedToWord32: + case IrOpcode::kAssertType: break; case IrOpcode::kCheckFloat64Hole: @@ -1619,6 +1657,10 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) { CHECK_EQ(0, value_count); CheckTypeIs(node, Type::Number()); break; + case IrOpcode::kCheckBigInt: + CheckValueInputIs(node, 0, Type::Any()); + CheckTypeIs(node, Type::BigInt()); + break; // Machine operators // ----------------------- @@ -1755,6 +1797,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) { case IrOpcode::kBitcastInt32ToFloat32: case IrOpcode::kBitcastInt64ToFloat64: case IrOpcode::kBitcastTaggedToWord: + case IrOpcode::kBitcastTaggedSignedToWord: case IrOpcode::kBitcastWordToTagged: case IrOpcode::kBitcastWordToTaggedSigned: case IrOpcode::kChangeInt32ToInt64: @@ -1800,6 +1843,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) { case IrOpcode::kLoadParentFramePointer: case IrOpcode::kUnalignedLoad: case IrOpcode::kUnalignedStore: + case IrOpcode::kMemoryBarrier: case IrOpcode::kWord32AtomicLoad: case IrOpcode::kWord32AtomicStore: case IrOpcode::kWord32AtomicExchange: diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc index 3396214e5894b3..2da7177ece2d54 100644 --- a/deps/v8/src/compiler/wasm-compiler.cc +++ b/deps/v8/src/compiler/wasm-compiler.cc @@ -14,6 +14,7 @@ #include "src/codegen/assembler-inl.h" #include "src/codegen/assembler.h" #include "src/codegen/code-factory.h" +#include "src/codegen/compiler.h" #include "src/codegen/interface-descriptors.h" #include "src/codegen/optimized-compilation-info.h" #include "src/compiler/backend/code-generator.h" @@ -276,8 +277,9 @@ Node* WasmGraphBuilder::EffectPhi(unsigned count, Node** effects, } Node* WasmGraphBuilder::RefNull() { - return LOAD_INSTANCE_FIELD(NullValue, - MachineType::TypeCompressedTaggedPointer()); + Node* isolate_root = LOAD_INSTANCE_FIELD(IsolateRoot, MachineType::Pointer()); + return LOAD_TAGGED_POINTER( + isolate_root, IsolateData::root_slot_offset(RootIndex::kNullValue)); } Node* WasmGraphBuilder::RefFunc(uint32_t function_index) { @@ -2195,8 +2197,8 @@ Node* WasmGraphBuilder::Throw(uint32_t exception_index, graph()->NewNode(m->I32x4ExtractLane(3), value)); break; case wasm::kWasmAnyRef: - case wasm::kWasmAnyFunc: - case wasm::kWasmExceptRef: + case wasm::kWasmFuncRef: + case wasm::kWasmExnRef: STORE_FIXED_ARRAY_SLOT_ANY(values_array, index, value); ++index; break; @@ -2334,8 +2336,8 @@ Node** WasmGraphBuilder::GetExceptionValues( BuildDecodeException32BitValue(values_array, &index)); break; case wasm::kWasmAnyRef: - case wasm::kWasmAnyFunc: - case wasm::kWasmExceptRef: + case wasm::kWasmFuncRef: + case wasm::kWasmExnRef: value = LOAD_FIXED_ARRAY_SLOT_ANY(values_array, index); ++index; break; @@ -2853,25 +2855,69 @@ Node* WasmGraphBuilder::CallDirect(uint32_t index, Node** args, Node*** rets, Node* WasmGraphBuilder::CallIndirect(uint32_t table_index, uint32_t sig_index, Node** args, Node*** rets, wasm::WasmCodePosition position) { - if (table_index == 0) { - return BuildIndirectCall(sig_index, args, rets, position, kCallContinues); - } return BuildIndirectCall(table_index, sig_index, args, rets, position, kCallContinues); } -Node* WasmGraphBuilder::BuildIndirectCall(uint32_t sig_index, Node** args, +void WasmGraphBuilder::LoadIndirectFunctionTable(uint32_t table_index, + Node** ift_size, + Node** ift_sig_ids, + Node** ift_targets, + Node** ift_instances) { + if (table_index == 0) { + *ift_size = + LOAD_INSTANCE_FIELD(IndirectFunctionTableSize, MachineType::Uint32()); + *ift_sig_ids = LOAD_INSTANCE_FIELD(IndirectFunctionTableSigIds, + MachineType::Pointer()); + *ift_targets = LOAD_INSTANCE_FIELD(IndirectFunctionTableTargets, + MachineType::Pointer()); + *ift_instances = LOAD_INSTANCE_FIELD( + IndirectFunctionTableRefs, MachineType::TypeCompressedTaggedPointer()); + return; + } + + Node* ift_tables = LOAD_INSTANCE_FIELD( + IndirectFunctionTables, MachineType::TypeCompressedTaggedPointer()); + Node* ift_table = LOAD_FIXED_ARRAY_SLOT_ANY(ift_tables, table_index); + + *ift_size = LOAD_RAW( + ift_table, + wasm::ObjectAccess::ToTagged(WasmIndirectFunctionTable::kSizeOffset), + MachineType::Int32()); + + *ift_sig_ids = LOAD_RAW( + ift_table, + wasm::ObjectAccess::ToTagged(WasmIndirectFunctionTable::kSigIdsOffset), + MachineType::Pointer()); + + *ift_targets = LOAD_RAW( + ift_table, + wasm::ObjectAccess::ToTagged(WasmIndirectFunctionTable::kTargetsOffset), + MachineType::Pointer()); + + *ift_instances = LOAD_RAW( + ift_table, + wasm::ObjectAccess::ToTagged(WasmIndirectFunctionTable::kRefsOffset), + MachineType::TypeCompressedTaggedPointer()); +} + +Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index, + uint32_t sig_index, Node** args, Node*** rets, wasm::WasmCodePosition position, IsReturnCall continuation) { DCHECK_NOT_NULL(args[0]); DCHECK_NOT_NULL(env_); - // Assume only one table for now. - wasm::FunctionSig* sig = env_->module->signatures[sig_index]; + // First we have to load the table. + Node* ift_size; + Node* ift_sig_ids; + Node* ift_targets; + Node* ift_instances; + LoadIndirectFunctionTable(table_index, &ift_size, &ift_sig_ids, &ift_targets, + &ift_instances); - Node* ift_size = - LOAD_INSTANCE_FIELD(IndirectFunctionTableSize, MachineType::Uint32()); + wasm::FunctionSig* sig = env_->module->signatures[sig_index]; MachineOperatorBuilder* machine = mcgraph()->machine(); Node* key = args[0]; @@ -2894,9 +2940,6 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t sig_index, Node** args, } // Load signature from the table and check. - Node* ift_sig_ids = - LOAD_INSTANCE_FIELD(IndirectFunctionTableSigIds, MachineType::Pointer()); - int32_t expected_sig_id = env_->module->signature_ids[sig_index]; Node* int32_scaled_key = Uint32ToUintptr( graph()->NewNode(machine->Word32Shl(), key, Int32Constant(2))); @@ -2909,11 +2952,6 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t sig_index, Node** args, TrapIfFalse(wasm::kTrapFuncSigMismatch, sig_match, position); - Node* ift_targets = - LOAD_INSTANCE_FIELD(IndirectFunctionTableTargets, MachineType::Pointer()); - Node* ift_instances = LOAD_INSTANCE_FIELD( - IndirectFunctionTableRefs, MachineType::TypeCompressedTaggedPointer()); - Node* tagged_scaled_key; if (kTaggedSize == kInt32Size) { tagged_scaled_key = int32_scaled_key; @@ -2955,48 +2993,6 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t sig_index, Node** args, } } -Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index, - uint32_t sig_index, Node** args, - Node*** rets, - wasm::WasmCodePosition position, - IsReturnCall continuation) { - DCHECK_NOT_NULL(args[0]); - Node* entry_index = args[0]; - DCHECK_NOT_NULL(env_); - BoundsCheckTable(table_index, entry_index, position, wasm::kTrapFuncInvalid, - nullptr); - - DCHECK(Smi::IsValid(table_index)); - DCHECK(Smi::IsValid(sig_index)); - Node* runtime_args[]{ - graph()->NewNode(mcgraph()->common()->NumberConstant(table_index)), - BuildChangeUint31ToSmi(entry_index), - graph()->NewNode(mcgraph()->common()->NumberConstant(sig_index))}; - - Node* target_instance = BuildCallToRuntime( - Runtime::kWasmIndirectCallCheckSignatureAndGetTargetInstance, - runtime_args, arraysize(runtime_args)); - - // We reuse the runtime_args array here, even though we only need the first - // two arguments. - Node* call_target = BuildCallToRuntime( - Runtime::kWasmIndirectCallGetTargetAddress, runtime_args, 2); - - wasm::FunctionSig* sig = env_->module->signatures[sig_index]; - args[0] = call_target; - const UseRetpoline use_retpoline = - untrusted_code_mitigations_ ? kRetpoline : kNoRetpoline; - - switch (continuation) { - case kCallContinues: - return BuildWasmCall(sig, args, rets, position, target_instance, - use_retpoline); - case kReturnCall: - return BuildWasmReturnCall(sig, args, position, target_instance, - use_retpoline); - } -} - Node* WasmGraphBuilder::ReturnCall(uint32_t index, Node** args, wasm::WasmCodePosition position) { DCHECK_NULL(args[0]); @@ -3019,9 +3015,6 @@ Node* WasmGraphBuilder::ReturnCall(uint32_t index, Node** args, Node* WasmGraphBuilder::ReturnCallIndirect(uint32_t table_index, uint32_t sig_index, Node** args, wasm::WasmCodePosition position) { - if (table_index == 0) { - return BuildIndirectCall(sig_index, args, nullptr, position, kReturnCall); - } return BuildIndirectCall(table_index, sig_index, args, nullptr, position, kReturnCall); } @@ -3324,13 +3317,6 @@ Node* WasmGraphBuilder::CurrentMemoryPages() { return result; } -Node* WasmGraphBuilder::BuildLoadBuiltinFromInstance(int builtin_index) { - DCHECK(Builtins::IsBuiltinId(builtin_index)); - Node* isolate_root = LOAD_INSTANCE_FIELD(IsolateRoot, MachineType::Pointer()); - return LOAD_TAGGED_POINTER(isolate_root, - IsolateData::builtin_slot_offset(builtin_index)); -} - // Only call this function for code which is not reused across instantiations, // as we do not patch the embedded js_context. Node* WasmGraphBuilder::BuildCallToRuntimeWithContext( @@ -3492,7 +3478,7 @@ void WasmGraphBuilder::GetTableBaseAndOffset(uint32_t table_index, wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(0))); } -Node* WasmGraphBuilder::GetTable(uint32_t table_index, Node* index, +Node* WasmGraphBuilder::TableGet(uint32_t table_index, Node* index, wasm::WasmCodePosition position) { if (env_->module->tables[table_index].type == wasm::kWasmAnyRef) { Node* base = nullptr; @@ -3501,7 +3487,7 @@ Node* WasmGraphBuilder::GetTable(uint32_t table_index, Node* index, return LOAD_RAW_NODE_OFFSET(base, offset, MachineType::TypeCompressedTagged()); } - // We access anyfunc tables through runtime calls. + // We access funcref tables through runtime calls. WasmTableGetDescriptor interface_descriptor; auto call_descriptor = Linkage::GetStubCallDescriptor( mcgraph()->zone(), // zone @@ -3521,7 +3507,7 @@ Node* WasmGraphBuilder::GetTable(uint32_t table_index, Node* index, Effect(), Control()))); } -Node* WasmGraphBuilder::SetTable(uint32_t table_index, Node* index, Node* val, +Node* WasmGraphBuilder::TableSet(uint32_t table_index, Node* index, Node* val, wasm::WasmCodePosition position) { if (env_->module->tables[table_index].type == wasm::kWasmAnyRef) { Node* base = nullptr; @@ -3530,7 +3516,7 @@ Node* WasmGraphBuilder::SetTable(uint32_t table_index, Node* index, Node* val, return STORE_RAW_NODE_OFFSET( base, offset, val, MachineRepresentation::kTagged, kFullWriteBarrier); } else { - // We access anyfunc tables through runtime calls. + // We access funcref tables through runtime calls. WasmTableSetDescriptor interface_descriptor; auto call_descriptor = Linkage::GetStubCallDescriptor( mcgraph()->zone(), // zone @@ -4000,6 +3986,30 @@ Node* WasmGraphBuilder::S128Zero() { Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) { has_simd_ = true; switch (opcode) { + case wasm::kExprF64x2Splat: + return graph()->NewNode(mcgraph()->machine()->F64x2Splat(), inputs[0]); + case wasm::kExprF64x2Abs: + return graph()->NewNode(mcgraph()->machine()->F64x2Abs(), inputs[0]); + case wasm::kExprF64x2Neg: + return graph()->NewNode(mcgraph()->machine()->F64x2Neg(), inputs[0]); + case wasm::kExprF64x2Eq: + return graph()->NewNode(mcgraph()->machine()->F64x2Eq(), inputs[0], + inputs[1]); + case wasm::kExprF64x2Ne: + return graph()->NewNode(mcgraph()->machine()->F64x2Ne(), inputs[0], + inputs[1]); + case wasm::kExprF64x2Lt: + return graph()->NewNode(mcgraph()->machine()->F64x2Lt(), inputs[0], + inputs[1]); + case wasm::kExprF64x2Le: + return graph()->NewNode(mcgraph()->machine()->F64x2Le(), inputs[0], + inputs[1]); + case wasm::kExprF64x2Gt: + return graph()->NewNode(mcgraph()->machine()->F64x2Lt(), inputs[1], + inputs[0]); + case wasm::kExprF64x2Ge: + return graph()->NewNode(mcgraph()->machine()->F64x2Le(), inputs[1], + inputs[0]); case wasm::kExprF32x4Splat: return graph()->NewNode(mcgraph()->machine()->F32x4Splat(), inputs[0]); case wasm::kExprF32x4SConvertI32x4: @@ -4054,6 +4064,49 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) { case wasm::kExprF32x4Ge: return graph()->NewNode(mcgraph()->machine()->F32x4Le(), inputs[1], inputs[0]); + case wasm::kExprI64x2Splat: + return graph()->NewNode(mcgraph()->machine()->I64x2Splat(), inputs[0]); + case wasm::kExprI64x2Neg: + return graph()->NewNode(mcgraph()->machine()->I64x2Neg(), inputs[0]); + case wasm::kExprI64x2Add: + return graph()->NewNode(mcgraph()->machine()->I64x2Add(), inputs[0], + inputs[1]); + case wasm::kExprI64x2Sub: + return graph()->NewNode(mcgraph()->machine()->I64x2Sub(), inputs[0], + inputs[1]); + case wasm::kExprI64x2Mul: + return graph()->NewNode(mcgraph()->machine()->I64x2Mul(), inputs[0], + inputs[1]); + case wasm::kExprI64x2Eq: + return graph()->NewNode(mcgraph()->machine()->I64x2Eq(), inputs[0], + inputs[1]); + case wasm::kExprI64x2Ne: + return graph()->NewNode(mcgraph()->machine()->I64x2Ne(), inputs[0], + inputs[1]); + case wasm::kExprI64x2LtS: + return graph()->NewNode(mcgraph()->machine()->I64x2GtS(), inputs[1], + inputs[0]); + case wasm::kExprI64x2LeS: + return graph()->NewNode(mcgraph()->machine()->I64x2GeS(), inputs[1], + inputs[0]); + case wasm::kExprI64x2GtS: + return graph()->NewNode(mcgraph()->machine()->I64x2GtS(), inputs[0], + inputs[1]); + case wasm::kExprI64x2GeS: + return graph()->NewNode(mcgraph()->machine()->I64x2GeS(), inputs[0], + inputs[1]); + case wasm::kExprI64x2LtU: + return graph()->NewNode(mcgraph()->machine()->I64x2GtU(), inputs[1], + inputs[0]); + case wasm::kExprI64x2LeU: + return graph()->NewNode(mcgraph()->machine()->I64x2GeU(), inputs[1], + inputs[0]); + case wasm::kExprI64x2GtU: + return graph()->NewNode(mcgraph()->machine()->I64x2GtU(), inputs[0], + inputs[1]); + case wasm::kExprI64x2GeU: + return graph()->NewNode(mcgraph()->machine()->I64x2GeU(), inputs[0], + inputs[1]); case wasm::kExprI32x4Splat: return graph()->NewNode(mcgraph()->machine()->I32x4Splat(), inputs[0]); case wasm::kExprI32x4SConvertF32x4: @@ -4305,6 +4358,10 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) { case wasm::kExprS128Select: return graph()->NewNode(mcgraph()->machine()->S128Select(), inputs[2], inputs[0], inputs[1]); + case wasm::kExprS1x2AnyTrue: + return graph()->NewNode(mcgraph()->machine()->S1x2AnyTrue(), inputs[0]); + case wasm::kExprS1x2AllTrue: + return graph()->NewNode(mcgraph()->machine()->S1x2AllTrue(), inputs[0]); case wasm::kExprS1x4AnyTrue: return graph()->NewNode(mcgraph()->machine()->S1x4AnyTrue(), inputs[0]); case wasm::kExprS1x4AllTrue: @@ -4326,12 +4383,24 @@ Node* WasmGraphBuilder::SimdLaneOp(wasm::WasmOpcode opcode, uint8_t lane, Node* const* inputs) { has_simd_ = true; switch (opcode) { + case wasm::kExprF64x2ExtractLane: + return graph()->NewNode(mcgraph()->machine()->F64x2ExtractLane(lane), + inputs[0]); + case wasm::kExprF64x2ReplaceLane: + return graph()->NewNode(mcgraph()->machine()->F64x2ReplaceLane(lane), + inputs[0], inputs[1]); case wasm::kExprF32x4ExtractLane: return graph()->NewNode(mcgraph()->machine()->F32x4ExtractLane(lane), inputs[0]); case wasm::kExprF32x4ReplaceLane: return graph()->NewNode(mcgraph()->machine()->F32x4ReplaceLane(lane), inputs[0], inputs[1]); + case wasm::kExprI64x2ExtractLane: + return graph()->NewNode(mcgraph()->machine()->I64x2ExtractLane(lane), + inputs[0]); + case wasm::kExprI64x2ReplaceLane: + return graph()->NewNode(mcgraph()->machine()->I64x2ReplaceLane(lane), + inputs[0], inputs[1]); case wasm::kExprI32x4ExtractLane: return graph()->NewNode(mcgraph()->machine()->I32x4ExtractLane(lane), inputs[0]); @@ -4359,6 +4428,14 @@ Node* WasmGraphBuilder::SimdShiftOp(wasm::WasmOpcode opcode, uint8_t shift, Node* const* inputs) { has_simd_ = true; switch (opcode) { + case wasm::kExprI64x2Shl: + return graph()->NewNode(mcgraph()->machine()->I64x2Shl(shift), inputs[0]); + case wasm::kExprI64x2ShrS: + return graph()->NewNode(mcgraph()->machine()->I64x2ShrS(shift), + inputs[0]); + case wasm::kExprI64x2ShrU: + return graph()->NewNode(mcgraph()->machine()->I64x2ShrU(shift), + inputs[0]); case wasm::kExprI32x4Shl: return graph()->NewNode(mcgraph()->machine()->I32x4Shl(shift), inputs[0]); case wasm::kExprI32x4ShrS: @@ -4612,6 +4689,11 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs, return SetEffect(node); } +Node* WasmGraphBuilder::AtomicFence() { + return SetEffect(graph()->NewNode(mcgraph()->machine()->MemBarrier(), + Effect(), Control())); +} + #undef ATOMIC_BINOP_LIST #undef ATOMIC_CMP_EXCHG_LIST #undef ATOMIC_LOAD_LIST @@ -4636,8 +4718,19 @@ Node* WasmGraphBuilder::MemoryInit(uint32_t data_segment_index, Node* dst, Node* src, Node* size, wasm::WasmCodePosition position) { CheckDataSegmentIsPassiveAndNotDropped(data_segment_index, position); - Node* dst_fail = BoundsCheckMemRange(&dst, &size, position); auto m = mcgraph()->machine(); + auto common = mcgraph()->common(); + Node* size_null_check = + graph()->NewNode(m->Word32Equal(), size, mcgraph()->Int32Constant(0)); + Node* size_null_branch = graph()->NewNode(common->Branch(BranchHint::kFalse), + size_null_check, Control()); + + Node* size_null_etrue = Effect(); + Node* size_null_if_false = + graph()->NewNode(common->IfFalse(), size_null_branch); + SetControl(size_null_if_false); + + Node* dst_fail = BoundsCheckMemRange(&dst, &size, position); Node* seg_index = Uint32Constant(data_segment_index); Node* src_fail; @@ -4679,9 +4772,16 @@ Node* WasmGraphBuilder::MemoryInit(uint32_t data_segment_index, Node* dst, MachineType::Uint32()}; MachineSignature sig(0, 3, sig_types); BuildCCall(&sig, function, dst, src, size); - return TrapIfTrue(wasm::kTrapMemOutOfBounds, - graph()->NewNode(m->Word32Or(), dst_fail, src_fail), - position); + TrapIfTrue(wasm::kTrapMemOutOfBounds, + graph()->NewNode(m->Word32Or(), dst_fail, src_fail), position); + Node* size_null_if_true = + graph()->NewNode(common->IfTrue(), size_null_branch); + + Node* merge = SetControl( + graph()->NewNode(common->Merge(2), size_null_if_true, Control())); + SetEffect( + graph()->NewNode(common->EffectPhi(2), size_null_etrue, Effect(), merge)); + return merge; } Node* WasmGraphBuilder::DataDrop(uint32_t data_segment_index, @@ -4699,16 +4799,19 @@ Node* WasmGraphBuilder::DataDrop(uint32_t data_segment_index, Node* WasmGraphBuilder::MemoryCopy(Node* dst, Node* src, Node* size, wasm::WasmCodePosition position) { auto m = mcgraph()->machine(); - // The data must be copied backward if the regions overlap and src < dst. The - // regions overlap if {src + size > dst && dst + size > src}. Since we already - // test that {src < dst}, we know that {dst + size > src}, so this simplifies - // to just {src + size > dst}. That sum can overflow, but if we subtract - // {size} from both sides of the inequality we get the equivalent test - // {size > dst - src}. - Node* copy_backward = graph()->NewNode( - m->Word32And(), graph()->NewNode(m->Uint32LessThan(), src, dst), - graph()->NewNode(m->Uint32LessThan(), - graph()->NewNode(m->Int32Sub(), dst, src), size)); + auto common = mcgraph()->common(); + // If size == 0, then memory.copy is a no-op. + Node* size_null_check = + graph()->NewNode(m->Word32Equal(), size, mcgraph()->Int32Constant(0)); + Node* size_null_branch = graph()->NewNode(common->Branch(BranchHint::kFalse), + size_null_check, Control()); + + Node* size_null_etrue = Effect(); + Node* size_null_if_false = + graph()->NewNode(common->IfFalse(), size_null_branch); + SetControl(size_null_if_false); + // The data must be copied backward if src < dst. + Node* copy_backward = graph()->NewNode(m->Uint32LessThan(), src, dst); Node* dst_fail = BoundsCheckMemRange(&dst, &size, position); @@ -4728,13 +4831,32 @@ Node* WasmGraphBuilder::MemoryCopy(Node* dst, Node* src, Node* size, MachineType::Uint32()}; MachineSignature sig(0, 3, sig_types); BuildCCall(&sig, function, dst, src, size); - return TrapIfTrue(wasm::kTrapMemOutOfBounds, - graph()->NewNode(m->Word32Or(), dst_fail, src_fail), - position); + TrapIfTrue(wasm::kTrapMemOutOfBounds, + graph()->NewNode(m->Word32Or(), dst_fail, src_fail), position); + Node* size_null_if_true = + graph()->NewNode(common->IfTrue(), size_null_branch); + + Node* merge = SetControl( + graph()->NewNode(common->Merge(2), size_null_if_true, Control())); + SetEffect( + graph()->NewNode(common->EffectPhi(2), size_null_etrue, Effect(), merge)); + return merge; } Node* WasmGraphBuilder::MemoryFill(Node* dst, Node* value, Node* size, wasm::WasmCodePosition position) { + auto machine = mcgraph()->machine(); + auto common = mcgraph()->common(); + // If size == 0, then memory.copy is a no-op. + Node* size_null_check = graph()->NewNode(machine->Word32Equal(), size, + mcgraph()->Int32Constant(0)); + Node* size_null_branch = graph()->NewNode(common->Branch(BranchHint::kFalse), + size_null_check, Control()); + + Node* size_null_etrue = Effect(); + Node* size_null_if_false = + graph()->NewNode(common->IfFalse(), size_null_branch); + SetControl(size_null_if_false); Node* fail = BoundsCheckMemRange(&dst, &size, position); Node* function = graph()->NewNode(mcgraph()->common()->ExternalConstant( ExternalReference::wasm_memory_fill())); @@ -4742,7 +4864,15 @@ Node* WasmGraphBuilder::MemoryFill(Node* dst, Node* value, Node* size, MachineType::Uint32()}; MachineSignature sig(0, 3, sig_types); BuildCCall(&sig, function, dst, value, size); - return TrapIfTrue(wasm::kTrapMemOutOfBounds, fail, position); + TrapIfTrue(wasm::kTrapMemOutOfBounds, fail, position); + Node* size_null_if_true = + graph()->NewNode(common->IfTrue(), size_null_branch); + + Node* merge = SetControl( + graph()->NewNode(common->Merge(2), size_null_if_true, Control())); + SetEffect( + graph()->NewNode(common->EffectPhi(2), size_null_etrue, Effect(), merge)); + return merge; } Node* WasmGraphBuilder::CheckElemSegmentIsPassiveAndNotDropped( @@ -4789,13 +4919,13 @@ Node* WasmGraphBuilder::ElemDrop(uint32_t elem_segment_index, mcgraph()->Int32Constant(1), Effect(), Control())); } -Node* WasmGraphBuilder::TableCopy(uint32_t table_src_index, - uint32_t table_dst_index, Node* dst, +Node* WasmGraphBuilder::TableCopy(uint32_t table_dst_index, + uint32_t table_src_index, Node* dst, Node* src, Node* size, wasm::WasmCodePosition position) { Node* args[] = { - graph()->NewNode(mcgraph()->common()->NumberConstant(table_src_index)), graph()->NewNode(mcgraph()->common()->NumberConstant(table_dst_index)), + graph()->NewNode(mcgraph()->common()->NumberConstant(table_src_index)), BuildConvertUint32ToSmiWithSaturation(dst, FLAG_wasm_max_table_size), BuildConvertUint32ToSmiWithSaturation(src, FLAG_wasm_max_table_size), BuildConvertUint32ToSmiWithSaturation(size, FLAG_wasm_max_table_size)}; @@ -4878,28 +5008,6 @@ void WasmGraphBuilder::RemoveBytecodePositionDecorator() { } namespace { -bool must_record_function_compilation(Isolate* isolate) { - return isolate->logger()->is_listening_to_code_events() || - isolate->is_profiling(); -} - -PRINTF_FORMAT(4, 5) -void RecordFunctionCompilation(CodeEventListener::LogEventsAndTags tag, - Isolate* isolate, Handle code, - const char* format, ...) { - DCHECK(must_record_function_compilation(isolate)); - - ScopedVector buffer(128); - va_list arguments; - va_start(arguments, format); - int len = VSNPrintF(buffer, format, arguments); - CHECK_LT(0, len); - va_end(arguments); - Handle name_str = - isolate->factory()->NewStringFromAsciiChecked(buffer.begin()); - PROFILE(isolate, CodeCreateEvent(tag, AbstractCode::cast(*code), *name_str)); -} - class WasmWrapperGraphBuilder : public WasmGraphBuilder { public: WasmWrapperGraphBuilder(Zone* zone, JSGraph* jsgraph, wasm::FunctionSig* sig, @@ -4914,12 +5022,12 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { Node* BuildAllocateHeapNumberWithValue(Node* value, Node* control) { MachineOperatorBuilder* machine = mcgraph()->machine(); CommonOperatorBuilder* common = mcgraph()->common(); - Node* target = (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) - ? mcgraph()->RelocatableIntPtrConstant( - wasm::WasmCode::kWasmAllocateHeapNumber, - RelocInfo::WASM_STUB_CALL) - : jsgraph()->HeapConstant( - BUILTIN_CODE(isolate_, AllocateHeapNumber)); + Node* target = + (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) + ? mcgraph()->RelocatableIntPtrConstant( + wasm::WasmCode::kWasmAllocateHeapNumber, + RelocInfo::WASM_STUB_CALL) + : BuildLoadBuiltinFromInstance(Builtins::kAllocateHeapNumber); if (!allocate_heap_number_operator_.is_set()) { auto call_descriptor = Linkage::GetStubCallDescriptor( mcgraph()->zone(), AllocateHeapNumberDescriptor(), 0, @@ -4956,6 +5064,34 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { return mcgraph()->IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag); } + Node* BuildLoadUndefinedValueFromInstance() { + if (undefined_value_node_ == nullptr) { + Node* isolate_root = graph()->NewNode( + mcgraph()->machine()->Load(MachineType::Pointer()), + instance_node_.get(), + mcgraph()->Int32Constant(WASM_INSTANCE_OBJECT_OFFSET(IsolateRoot)), + graph()->start(), graph()->start()); + undefined_value_node_ = InsertDecompressionIfNeeded( + MachineType::TypeCompressedTaggedPointer(), + graph()->NewNode( + mcgraph()->machine()->Load( + MachineType::TypeCompressedTaggedPointer()), + isolate_root, + mcgraph()->Int32Constant( + IsolateData::root_slot_offset(RootIndex::kUndefinedValue)), + isolate_root, graph()->start())); + } + return undefined_value_node_.get(); + } + + Node* BuildLoadBuiltinFromInstance(int builtin_index) { + DCHECK(Builtins::IsBuiltinId(builtin_index)); + Node* isolate_root = + LOAD_INSTANCE_FIELD(IsolateRoot, MachineType::Pointer()); + return LOAD_TAGGED_POINTER(isolate_root, + IsolateData::builtin_slot_offset(builtin_index)); + } + Node* BuildChangeInt32ToTagged(Node* value) { MachineOperatorBuilder* machine = mcgraph()->machine(); CommonOperatorBuilder* common = mcgraph()->common(); @@ -5096,7 +5232,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) ? mcgraph()->RelocatableIntPtrConstant( wasm::WasmCode::kWasmToNumber, RelocInfo::WASM_STUB_CALL) - : jsgraph()->HeapConstant(BUILTIN_CODE(isolate_, ToNumber)); + : BuildLoadBuiltinFromInstance(Builtins::kToNumber); Node* result = SetEffect( graph()->NewNode(mcgraph()->common()->Call(call_descriptor), stub_code, @@ -5126,8 +5262,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { SetControl(is_heap_object.if_true); Node* orig_effect = Effect(); - Node* undefined_node = LOAD_INSTANCE_FIELD( - UndefinedValue, MachineType::TypeCompressedTaggedPointer()); + Node* undefined_node = BuildLoadUndefinedValueFromInstance(); Node* check_undefined = graph()->NewNode(machine->WordEqual(), value, undefined_node); Node* effect_tagged = Effect(); @@ -5173,8 +5308,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { case wasm::kWasmF64: return BuildChangeFloat64ToTagged(node); case wasm::kWasmAnyRef: - case wasm::kWasmAnyFunc: - case wasm::kWasmExceptRef: + case wasm::kWasmFuncRef: + case wasm::kWasmExnRef: return node; default: UNREACHABLE(); @@ -5196,7 +5331,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) ? mcgraph()->RelocatableIntPtrConstant( wasm::WasmCode::kWasmI64ToBigInt, RelocInfo::WASM_STUB_CALL) - : jsgraph()->HeapConstant(BUILTIN_CODE(isolate_, I64ToBigInt)); + : BuildLoadBuiltinFromInstance(Builtins::kI64ToBigInt); return SetEffect( SetControl(graph()->NewNode(mcgraph()->common()->Call(call_descriptor), @@ -5218,7 +5353,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) ? mcgraph()->RelocatableIntPtrConstant( wasm::WasmCode::kWasmBigIntToI64, RelocInfo::WASM_STUB_CALL) - : jsgraph()->HeapConstant(BUILTIN_CODE(isolate_, BigIntToI64)); + : BuildLoadBuiltinFromInstance(Builtins::kBigIntToI64); return SetEffect(SetControl( graph()->NewNode(mcgraph()->common()->Call(call_descriptor), target, @@ -5228,15 +5363,15 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { Node* FromJS(Node* node, Node* js_context, wasm::ValueType type) { DCHECK_NE(wasm::kWasmStmt, type); - // The parameter is of type anyref or except_ref, we take it as is. - if (type == wasm::kWasmAnyRef || type == wasm::kWasmExceptRef) { + // The parameter is of type anyref or exnref, we take it as is. + if (type == wasm::kWasmAnyRef || type == wasm::kWasmExnRef) { return node; } - if (type == wasm::kWasmAnyFunc) { + if (type == wasm::kWasmFuncRef) { Node* check = BuildChangeSmiToInt32(SetEffect(BuildCallToRuntimeWithContext( - Runtime::kWasmIsValidAnyFuncValue, js_context, &node, 1, effect_, + Runtime::kWasmIsValidFuncRefValue, js_context, &node, 1, effect_, Control()))); Diamond type_check(graph(), mcgraph()->common(), check, @@ -5471,8 +5606,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { // The callable is passed as the last parameter, after WASM arguments. Node* callable_node = Param(wasm_count + 1); - Node* undefined_node = LOAD_INSTANCE_FIELD( - UndefinedValue, MachineType::TypeCompressedTaggedPointer()); + Node* undefined_node = BuildLoadUndefinedValueFromInstance(); Node* call = nullptr; bool sloppy_receiver = true; @@ -5811,22 +5945,26 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { } void BuildCWasmEntry() { - // Build the start and the JS parameter nodes. - SetEffect(SetControl(Start(CWasmEntryParameters::kNumParameters + 5))); + // +1 offset for first parameter index being -1. + SetEffect(SetControl(Start(CWasmEntryParameters::kNumParameters + 1))); - // Create parameter nodes (offset by 1 for the receiver parameter). - Node* code_entry = Param(CWasmEntryParameters::kCodeEntry + 1); - Node* object_ref_node = Param(CWasmEntryParameters::kObjectRef + 1); - Node* arg_buffer = Param(CWasmEntryParameters::kArgumentsBuffer + 1); + Node* code_entry = Param(CWasmEntryParameters::kCodeEntry); + Node* object_ref = Param(CWasmEntryParameters::kObjectRef); + Node* arg_buffer = Param(CWasmEntryParameters::kArgumentsBuffer); + Node* c_entry_fp = Param(CWasmEntryParameters::kCEntryFp); + + Node* fp_value = graph()->NewNode(mcgraph()->machine()->LoadFramePointer()); + STORE_RAW(fp_value, TypedFrameConstants::kFirstPushedFrameValueOffset, + c_entry_fp, MachineType::PointerRepresentation(), + kNoWriteBarrier); int wasm_arg_count = static_cast(sig_->parameter_count()); - int arg_count = - wasm_arg_count + 4; // code, object_ref_node, control, effect + int arg_count = wasm_arg_count + 4; // code, object_ref, control, effect Node** args = Buffer(arg_count); int pos = 0; args[pos++] = code_entry; - args[pos++] = object_ref_node; + args[pos++] = object_ref; int offset = 0; for (wasm::ValueType type : sig_->parameters()) { @@ -5847,26 +5985,43 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { Node* call = SetEffect(graph()->NewNode( mcgraph()->common()->Call(call_descriptor), arg_count, args)); - // Store the return value. - DCHECK_GE(1, sig_->return_count()); - if (sig_->return_count() == 1) { + Node* if_success = graph()->NewNode(mcgraph()->common()->IfSuccess(), call); + Node* if_exception = + graph()->NewNode(mcgraph()->common()->IfException(), call, call); + + // Handle exception: return it. + SetControl(if_exception); + Return(if_exception); + + // Handle success: store the return value(s). + SetControl(if_success); + pos = 0; + offset = 0; + for (wasm::ValueType type : sig_->returns()) { StoreRepresentation store_rep( - wasm::ValueTypes::MachineRepresentationFor(sig_->GetReturn()), - kNoWriteBarrier); + wasm::ValueTypes::MachineRepresentationFor(type), kNoWriteBarrier); + Node* value = sig_->return_count() == 1 + ? call + : graph()->NewNode(mcgraph()->common()->Projection(pos), + call, Control()); SetEffect(graph()->NewNode(mcgraph()->machine()->Store(store_rep), - arg_buffer, Int32Constant(0), call, Effect(), - Control())); + arg_buffer, Int32Constant(offset), value, + Effect(), Control())); + offset += wasm::ValueTypes::ElementSizeInBytes(type); + pos++; } + Return(jsgraph()->SmiConstant(0)); if (mcgraph()->machine()->Is32() && ContainsInt64(sig_)) { MachineRepresentation sig_reps[] = { - MachineRepresentation::kWord32, // return value - MachineRepresentation::kTagged, // receiver - MachineRepresentation::kTagged, // arg0 (code) - MachineRepresentation::kTagged // arg1 (buffer) + MachineType::PointerRepresentation(), // return value + MachineType::PointerRepresentation(), // target + MachineRepresentation::kTagged, // object_ref + MachineType::PointerRepresentation(), // argv + MachineType::PointerRepresentation() // c_entry_fp }; - Signature c_entry_sig(1, 2, sig_reps); + Signature c_entry_sig(1, 4, sig_reps); Int64Lowering r(mcgraph()->graph(), mcgraph()->machine(), mcgraph()->common(), mcgraph()->zone(), &c_entry_sig); r.LowerGraph(); @@ -5879,6 +6034,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { Isolate* const isolate_; JSGraph* jsgraph_; StubCallMode stub_mode_; + SetOncePointer undefined_value_node_; SetOncePointer allocate_heap_number_operator_; wasm::WasmFeatures enabled_features_; }; @@ -5901,27 +6057,25 @@ void AppendSignature(char* buffer, size_t max_name_len, } // namespace -MaybeHandle CompileJSToWasmWrapper(Isolate* isolate, - wasm::FunctionSig* sig, - bool is_import) { - TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), - "CompileJSToWasmWrapper"); +std::unique_ptr NewJSToWasmCompilationJob( + Isolate* isolate, wasm::FunctionSig* sig, bool is_import) { //---------------------------------------------------------------------------- // Create the Graph. //---------------------------------------------------------------------------- - Zone zone(isolate->allocator(), ZONE_NAME); - Graph graph(&zone); - CommonOperatorBuilder common(&zone); + std::unique_ptr zone = + base::make_unique(isolate->allocator(), ZONE_NAME); + Graph* graph = new (zone.get()) Graph(zone.get()); + CommonOperatorBuilder common(zone.get()); MachineOperatorBuilder machine( - &zone, MachineType::PointerRepresentation(), + zone.get(), MachineType::PointerRepresentation(), InstructionSelector::SupportedMachineOperatorFlags(), InstructionSelector::AlignmentRequirements()); - JSGraph jsgraph(isolate, &graph, &common, nullptr, nullptr, &machine); + JSGraph jsgraph(isolate, graph, &common, nullptr, nullptr, &machine); Node* control = nullptr; Node* effect = nullptr; - WasmWrapperGraphBuilder builder(&zone, &jsgraph, sig, nullptr, + WasmWrapperGraphBuilder builder(zone.get(), &jsgraph, sig, nullptr, StubCallMode::kCallCodeObject, wasm::WasmFeaturesFromIsolate(isolate)); builder.set_control_ptr(&control); @@ -5929,73 +6083,66 @@ MaybeHandle CompileJSToWasmWrapper(Isolate* isolate, builder.BuildJSToWasmWrapper(is_import); //---------------------------------------------------------------------------- - // Run the compilation pipeline. + // Create the compilation job. //---------------------------------------------------------------------------- static constexpr size_t kMaxNameLen = 128; - char debug_name[kMaxNameLen] = "js_to_wasm:"; - AppendSignature(debug_name, kMaxNameLen, sig); + auto debug_name = std::unique_ptr(new char[kMaxNameLen]); + memcpy(debug_name.get(), "js_to_wasm:", 12); + AppendSignature(debug_name.get(), kMaxNameLen, sig); - // Schedule and compile to machine code. int params = static_cast(sig->parameter_count()); CallDescriptor* incoming = Linkage::GetJSCallDescriptor( - &zone, false, params + 1, CallDescriptor::kNoFlags); + zone.get(), false, params + 1, CallDescriptor::kNoFlags); - MaybeHandle maybe_code = Pipeline::GenerateCodeForWasmHeapStub( - isolate, incoming, &graph, Code::JS_TO_WASM_FUNCTION, debug_name, - WasmAssemblerOptions()); - Handle code; - if (!maybe_code.ToHandle(&code)) { - return maybe_code; - } -#ifdef ENABLE_DISASSEMBLER - if (FLAG_print_opt_code) { - CodeTracer::Scope tracing_scope(isolate->GetCodeTracer()); - OFStream os(tracing_scope.file()); - code->Disassemble(debug_name, os); - } -#endif - - if (must_record_function_compilation(isolate)) { - RecordFunctionCompilation(CodeEventListener::STUB_TAG, isolate, code, "%s", - debug_name); - } - - return code; + return Pipeline::NewWasmHeapStubCompilationJob( + isolate, incoming, std::move(zone), graph, Code::JS_TO_WASM_FUNCTION, + std::move(debug_name), WasmAssemblerOptions()); } -WasmImportCallKind GetWasmImportCallKind(Handle target, - wasm::FunctionSig* expected_sig, - bool has_bigint_feature) { - if (WasmExportedFunction::IsWasmExportedFunction(*target)) { - auto imported_function = WasmExportedFunction::cast(*target); - auto func_index = imported_function.function_index(); - auto module = imported_function.instance().module(); +std::pair> ResolveWasmImportCall( + Handle callable, wasm::FunctionSig* expected_sig, + bool has_bigint_feature) { + if (WasmExportedFunction::IsWasmExportedFunction(*callable)) { + auto imported_function = Handle::cast(callable); + auto func_index = imported_function->function_index(); + auto module = imported_function->instance().module(); wasm::FunctionSig* imported_sig = module->functions[func_index].sig; if (*imported_sig != *expected_sig) { - return WasmImportCallKind::kLinkError; + return std::make_pair(WasmImportCallKind::kLinkError, callable); } - if (static_cast(func_index) < module->num_imported_functions) { - // TODO(wasm): this redirects all imported-reexported functions - // through the call builtin. Fall through to JS function cases below? - return WasmImportCallKind::kUseCallBuiltin; + if (static_cast(func_index) >= module->num_imported_functions) { + return std::make_pair(WasmImportCallKind::kWasmToWasm, callable); } - return WasmImportCallKind::kWasmToWasm; - } - if (WasmCapiFunction::IsWasmCapiFunction(*target)) { - WasmCapiFunction capi_function = WasmCapiFunction::cast(*target); - if (!capi_function.IsSignatureEqual(expected_sig)) { - return WasmImportCallKind::kLinkError; + Isolate* isolate = callable->GetIsolate(); + // Resolve the short-cut to the underlying callable and continue. + Handle instance(imported_function->instance(), isolate); + ImportedFunctionEntry entry(instance, func_index); + callable = handle(entry.callable(), isolate); + } + if (WasmJSFunction::IsWasmJSFunction(*callable)) { + auto js_function = Handle::cast(callable); + if (!js_function->MatchesSignature(expected_sig)) { + return std::make_pair(WasmImportCallKind::kLinkError, callable); + } + Isolate* isolate = callable->GetIsolate(); + // Resolve the short-cut to the underlying callable and continue. + callable = handle(js_function->GetCallable(), isolate); + } + if (WasmCapiFunction::IsWasmCapiFunction(*callable)) { + auto capi_function = Handle::cast(callable); + if (!capi_function->IsSignatureEqual(expected_sig)) { + return std::make_pair(WasmImportCallKind::kLinkError, callable); } - return WasmImportCallKind::kWasmToCapi; + return std::make_pair(WasmImportCallKind::kWasmToCapi, callable); } // Assuming we are calling to JS, check whether this would be a runtime error. if (!wasm::IsJSCompatibleSignature(expected_sig, has_bigint_feature)) { - return WasmImportCallKind::kRuntimeTypeError; + return std::make_pair(WasmImportCallKind::kRuntimeTypeError, callable); } // For JavaScript calls, determine whether the target has an arity match // and whether it has a sloppy receiver. - if (target->IsJSFunction()) { - Handle function = Handle::cast(target); + if (callable->IsJSFunction()) { + Handle function = Handle::cast(callable); SharedFunctionInfo shared = function->shared(); // Check for math intrinsics. @@ -6004,7 +6151,9 @@ WasmImportCallKind GetWasmImportCallKind(Handle target, wasm::FunctionSig* sig = wasm::WasmOpcodes::Signature(wasm::kExpr##name); \ if (!sig) sig = wasm::WasmOpcodes::AsmjsSignature(wasm::kExpr##name); \ DCHECK_NOT_NULL(sig); \ - if (*expected_sig == *sig) return WasmImportCallKind::k##name; \ + if (*expected_sig == *sig) { \ + return std::make_pair(WasmImportCallKind::k##name, callable); \ + } \ } #define COMPARE_SIG_FOR_BUILTIN_F64(name) \ case Builtins::kMath##name: \ @@ -6051,19 +6200,23 @@ WasmImportCallKind GetWasmImportCallKind(Handle target, if (IsClassConstructor(shared.kind())) { // Class constructor will throw anyway. - return WasmImportCallKind::kUseCallBuiltin; + return std::make_pair(WasmImportCallKind::kUseCallBuiltin, callable); } bool sloppy = is_sloppy(shared.language_mode()) && !shared.native(); if (shared.internal_formal_parameter_count() == expected_sig->parameter_count()) { - return sloppy ? WasmImportCallKind::kJSFunctionArityMatchSloppy - : WasmImportCallKind::kJSFunctionArityMatch; + return std::make_pair( + sloppy ? WasmImportCallKind::kJSFunctionArityMatchSloppy + : WasmImportCallKind::kJSFunctionArityMatch, + callable); } - return sloppy ? WasmImportCallKind::kJSFunctionArityMismatchSloppy - : WasmImportCallKind::kJSFunctionArityMismatch; + return std::make_pair( + sloppy ? WasmImportCallKind::kJSFunctionArityMismatchSloppy + : WasmImportCallKind::kJSFunctionArityMismatch, + callable); } // Unknown case. Use the call builtin. - return WasmImportCallKind::kUseCallBuiltin; + return std::make_pair(WasmImportCallKind::kUseCallBuiltin, callable); } wasm::WasmOpcode GetMathIntrinsicOpcode(WasmImportCallKind kind, @@ -6103,10 +6256,9 @@ wasm::WasmOpcode GetMathIntrinsicOpcode(WasmImportCallKind kind, #undef CASE } -wasm::WasmCode* CompileWasmMathIntrinsic(wasm::WasmEngine* wasm_engine, - wasm::NativeModule* native_module, - WasmImportCallKind kind, - wasm::FunctionSig* sig) { +wasm::WasmCompilationResult CompileWasmMathIntrinsic( + wasm::WasmEngine* wasm_engine, WasmImportCallKind kind, + wasm::FunctionSig* sig) { DCHECK_EQ(1, sig->return_count()); TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), @@ -6125,7 +6277,7 @@ wasm::WasmCode* CompileWasmMathIntrinsic(wasm::WasmEngine* wasm_engine, InstructionSelector::AlignmentRequirements())); wasm::CompilationEnv env( - native_module->module(), wasm::UseTrapHandler::kNoTrapHandler, + nullptr, wasm::UseTrapHandler::kNoTrapHandler, wasm::RuntimeExceptionSupport::kNoRuntimeExceptionSupport, wasm::kAllWasmFeatures, wasm::LowerSimd::kNoLowerSimd); @@ -6167,21 +6319,12 @@ wasm::WasmCode* CompileWasmMathIntrinsic(wasm::WasmEngine* wasm_engine, wasm_engine, call_descriptor, mcgraph, Code::WASM_FUNCTION, wasm::WasmCode::kFunction, debug_name, WasmStubAssemblerOptions(), source_positions); - std::unique_ptr wasm_code = native_module->AddCode( - wasm::WasmCode::kAnonymousFuncIndex, result.code_desc, - result.frame_slot_count, result.tagged_parameter_slots, - std::move(result.protected_instructions), - std::move(result.source_positions), wasm::WasmCode::kFunction, - wasm::ExecutionTier::kNone); - // TODO(titzer): add counters for math intrinsic code size / allocation - return native_module->PublishCode(std::move(wasm_code)); + return result; } -wasm::WasmCode* CompileWasmImportCallWrapper(wasm::WasmEngine* wasm_engine, - wasm::NativeModule* native_module, - WasmImportCallKind kind, - wasm::FunctionSig* sig, - bool source_positions) { +wasm::WasmCompilationResult CompileWasmImportCallWrapper( + wasm::WasmEngine* wasm_engine, wasm::CompilationEnv* env, + WasmImportCallKind kind, wasm::FunctionSig* sig, bool source_positions) { DCHECK_NE(WasmImportCallKind::kLinkError, kind); DCHECK_NE(WasmImportCallKind::kWasmToWasm, kind); @@ -6189,7 +6332,7 @@ wasm::WasmCode* CompileWasmImportCallWrapper(wasm::WasmEngine* wasm_engine, if (FLAG_wasm_math_intrinsics && kind >= WasmImportCallKind::kFirstMathIntrinsic && kind <= WasmImportCallKind::kLastMathIntrinsic) { - return CompileWasmMathIntrinsic(wasm_engine, native_module, kind, sig); + return CompileWasmMathIntrinsic(wasm_engine, kind, sig); } TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), @@ -6214,7 +6357,7 @@ wasm::WasmCode* CompileWasmImportCallWrapper(wasm::WasmEngine* wasm_engine, WasmWrapperGraphBuilder builder(&zone, &jsgraph, sig, source_position_table, StubCallMode::kCallWasmRuntimeStub, - native_module->enabled_features()); + env->enabled_features); builder.set_control_ptr(&control); builder.set_effect_ptr(&effect); builder.BuildWasmImportCallWrapper(kind); @@ -6232,13 +6375,8 @@ wasm::WasmCode* CompileWasmImportCallWrapper(wasm::WasmEngine* wasm_engine, wasm_engine, incoming, &jsgraph, Code::WASM_TO_JS_FUNCTION, wasm::WasmCode::kWasmToJsWrapper, func_name, WasmStubAssemblerOptions(), source_position_table); - std::unique_ptr wasm_code = native_module->AddCode( - wasm::WasmCode::kAnonymousFuncIndex, result.code_desc, - result.frame_slot_count, result.tagged_parameter_slots, - std::move(result.protected_instructions), - std::move(result.source_positions), wasm::WasmCode::kWasmToJsWrapper, - wasm::ExecutionTier::kNone); - return native_module->PublishCode(std::move(wasm_code)); + result.kind = wasm::WasmCompilationResult::kWasmToJsWrapper; + return result; } wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::WasmEngine* wasm_engine, @@ -6290,9 +6428,8 @@ wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::WasmEngine* wasm_engine, wasm::WasmCode::kWasmToCapiWrapper, debug_name, WasmStubAssemblerOptions(), source_positions); std::unique_ptr wasm_code = native_module->AddCode( - wasm::WasmCode::kAnonymousFuncIndex, result.code_desc, - result.frame_slot_count, result.tagged_parameter_slots, - std::move(result.protected_instructions), + wasm::kAnonymousFuncIndex, result.code_desc, result.frame_slot_count, + result.tagged_parameter_slots, std::move(result.protected_instructions), std::move(result.source_positions), wasm::WasmCode::kWasmToCapiWrapper, wasm::ExecutionTier::kNone); return native_module->PublishCode(std::move(wasm_code)); @@ -6338,24 +6475,26 @@ wasm::WasmCompilationResult CompileWasmInterpreterEntry( wasm::WasmCode::kInterpreterEntry, func_name.begin(), WasmStubAssemblerOptions()); result.result_tier = wasm::ExecutionTier::kInterpreter; + result.kind = wasm::WasmCompilationResult::kInterpreterEntry; return result; } MaybeHandle CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig) { - Zone zone(isolate->allocator(), ZONE_NAME); - Graph graph(&zone); - CommonOperatorBuilder common(&zone); + std::unique_ptr zone = + base::make_unique(isolate->allocator(), ZONE_NAME); + Graph* graph = new (zone.get()) Graph(zone.get()); + CommonOperatorBuilder common(zone.get()); MachineOperatorBuilder machine( - &zone, MachineType::PointerRepresentation(), + zone.get(), MachineType::PointerRepresentation(), InstructionSelector::SupportedMachineOperatorFlags(), InstructionSelector::AlignmentRequirements()); - JSGraph jsgraph(isolate, &graph, &common, nullptr, nullptr, &machine); + JSGraph jsgraph(isolate, graph, &common, nullptr, nullptr, &machine); Node* control = nullptr; Node* effect = nullptr; - WasmWrapperGraphBuilder builder(&zone, &jsgraph, sig, nullptr, + WasmWrapperGraphBuilder builder(zone.get(), &jsgraph, sig, nullptr, StubCallMode::kCallCodeObject, wasm::WasmFeaturesFromIsolate(isolate)); builder.set_control_ptr(&control); @@ -6363,29 +6502,36 @@ MaybeHandle CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig) { builder.BuildCWasmEntry(); // Schedule and compile to machine code. - CallDescriptor* incoming = Linkage::GetJSCallDescriptor( - &zone, false, CWasmEntryParameters::kNumParameters + 1, - CallDescriptor::kNoFlags); + MachineType sig_types[] = {MachineType::Pointer(), // return + MachineType::Pointer(), // target + MachineType::AnyTagged(), // object_ref + MachineType::Pointer(), // argv + MachineType::Pointer()}; // c_entry_fp + MachineSignature incoming_sig(1, 4, sig_types); + // Traps need the root register, for TailCallRuntimeWithCEntry to call + // Runtime::kThrowWasmError. + bool initialize_root_flag = true; + CallDescriptor* incoming = Linkage::GetSimplifiedCDescriptor( + zone.get(), &incoming_sig, initialize_root_flag); // Build a name in the form "c-wasm-entry::". static constexpr size_t kMaxNameLen = 128; - char debug_name[kMaxNameLen] = "c-wasm-entry:"; - AppendSignature(debug_name, kMaxNameLen, sig); - - MaybeHandle maybe_code = Pipeline::GenerateCodeForWasmHeapStub( - isolate, incoming, &graph, Code::C_WASM_ENTRY, debug_name, - AssemblerOptions::Default(isolate)); - Handle code; - if (!maybe_code.ToHandle(&code)) { - return maybe_code; - } -#ifdef ENABLE_DISASSEMBLER - if (FLAG_print_opt_code) { - CodeTracer::Scope tracing_scope(isolate->GetCodeTracer()); - OFStream os(tracing_scope.file()); - code->Disassemble(debug_name, os); - } -#endif + auto debug_name = std::unique_ptr(new char[kMaxNameLen]); + memcpy(debug_name.get(), "c-wasm-entry:", 14); + AppendSignature(debug_name.get(), kMaxNameLen, sig); + + // Run the compilation job synchronously. + std::unique_ptr job( + Pipeline::NewWasmHeapStubCompilationJob( + isolate, incoming, std::move(zone), graph, Code::C_WASM_ENTRY, + std::move(debug_name), AssemblerOptions::Default(isolate))); + + if (job->PrepareJob(isolate) == CompilationJob::FAILED || + job->ExecuteJob() == CompilationJob::FAILED || + job->FinalizeJob(isolate) == CompilationJob::FAILED) { + return {}; + } + Handle code = job->compilation_info()->code(); return code; } diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h index 460d0d2f1b1bbe..315733c396d70c 100644 --- a/deps/v8/src/compiler/wasm-compiler.h +++ b/deps/v8/src/compiler/wasm-compiler.h @@ -6,6 +6,7 @@ #define V8_COMPILER_WASM_COMPILER_H_ #include +#include // Clients of this interface shouldn't depend on lots of compiler internals. // Do not include anything from src/compiler here! @@ -20,6 +21,7 @@ namespace v8 { namespace internal { struct AssemblerOptions; +class OptimizedCompilationJob; namespace compiler { // Forward declarations for some compiler data structures. @@ -103,13 +105,23 @@ enum class WasmImportCallKind : uint8_t { kUseCallBuiltin }; -V8_EXPORT_PRIVATE WasmImportCallKind -GetWasmImportCallKind(Handle callable, wasm::FunctionSig* sig, +// TODO(wasm): There should be only one import kind for sloppy and strict in +// order to reduce wrapper cache misses. The mode can be checked at runtime +// instead. +constexpr WasmImportCallKind kDefaultImportCallKind = + WasmImportCallKind::kJSFunctionArityMatchSloppy; + +// Resolves which import call wrapper is required for the given JS callable. +// Returns the kind of wrapper need and the ultimate target callable. Note that +// some callables (e.g. a {WasmExportedFunction} or {WasmJSFunction}) just wrap +// another target, which is why the ultimate target is returned as well. +V8_EXPORT_PRIVATE std::pair> +ResolveWasmImportCall(Handle callable, wasm::FunctionSig* sig, bool has_bigint_feature); // Compiles an import call wrapper, which allows WASM to call imports. -V8_EXPORT_PRIVATE wasm::WasmCode* CompileWasmImportCallWrapper( - wasm::WasmEngine*, wasm::NativeModule*, WasmImportCallKind, +V8_EXPORT_PRIVATE wasm::WasmCompilationResult CompileWasmImportCallWrapper( + wasm::WasmEngine*, wasm::CompilationEnv* env, WasmImportCallKind, wasm::FunctionSig*, bool source_positions); // Compiles a host call wrapper, which allows WASM to call host functions. @@ -117,11 +129,9 @@ wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::WasmEngine*, wasm::NativeModule*, wasm::FunctionSig*, Address address); -// Creates a code object calling a wasm function with the given signature, -// callable from JS. -V8_EXPORT_PRIVATE MaybeHandle CompileJSToWasmWrapper(Isolate*, - wasm::FunctionSig*, - bool is_import); +// Returns an OptimizedCompilationJob object for a JS to Wasm wrapper. +std::unique_ptr NewJSToWasmCompilationJob( + Isolate* isolate, wasm::FunctionSig* sig, bool is_import); // Compiles a stub that redirects a call to a wasm function to the wasm // interpreter. It's ABI compatible with the compiled wasm function. @@ -133,13 +143,13 @@ enum CWasmEntryParameters { kCodeEntry, kObjectRef, kArgumentsBuffer, + kCEntryFp, // marker: kNumParameters }; -// Compiles a stub with JS linkage, taking parameters as described by -// {CWasmEntryParameters}. It loads the wasm parameters from the argument -// buffer and calls the wasm function given as first parameter. +// Compiles a stub with C++ linkage, to be called from Execution::CallWasm, +// which knows how to feed it its parameters. MaybeHandle CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig); // Values from the instance object are cached between WASM-level function calls. @@ -280,9 +290,9 @@ class WasmGraphBuilder { Node* GetGlobal(uint32_t index); Node* SetGlobal(uint32_t index, Node* val); - Node* GetTable(uint32_t table_index, Node* index, + Node* TableGet(uint32_t table_index, Node* index, wasm::WasmCodePosition position); - Node* SetTable(uint32_t table_index, Node* index, Node* val, + Node* TableSet(uint32_t table_index, Node* index, Node* val, wasm::WasmCodePosition position); //----------------------------------------------------------------------- // Operations that concern the linear memory. @@ -377,6 +387,7 @@ class WasmGraphBuilder { Node* AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs, uint32_t alignment, uint32_t offset, wasm::WasmCodePosition position); + Node* AtomicFence(); // Returns a pointer to the dropped_data_segments array. Traps if the data // segment is active or has been dropped. @@ -395,7 +406,7 @@ class WasmGraphBuilder { Node* TableInit(uint32_t table_index, uint32_t elem_segment_index, Node* dst, Node* src, Node* size, wasm::WasmCodePosition position); Node* ElemDrop(uint32_t elem_segment_index, wasm::WasmCodePosition position); - Node* TableCopy(uint32_t table_src_index, uint32_t table_dst_index, Node* dst, + Node* TableCopy(uint32_t table_dst_index, uint32_t table_src_index, Node* dst, Node* src, Node* size, wasm::WasmCodePosition position); Node* TableGrow(uint32_t table_index, Node* value, Node* delta); Node* TableSize(uint32_t table_index); @@ -485,10 +496,10 @@ class WasmGraphBuilder { Node* BuildCallNode(wasm::FunctionSig* sig, Node** args, wasm::WasmCodePosition position, Node* instance_node, const Operator* op); - // Special implementation for CallIndirect for table 0. - Node* BuildIndirectCall(uint32_t sig_index, Node** args, Node*** rets, - wasm::WasmCodePosition position, - IsReturnCall continuation); + // Helper function for {BuildIndirectCall}. + void LoadIndirectFunctionTable(uint32_t table_index, Node** ift_size, + Node** ift_sig_ids, Node** ift_targets, + Node** ift_instances); Node* BuildIndirectCall(uint32_t table_index, uint32_t sig_index, Node** args, Node*** rets, wasm::WasmCodePosition position, IsReturnCall continuation); @@ -591,8 +602,6 @@ class WasmGraphBuilder { return buf; } - Node* BuildLoadBuiltinFromInstance(int builtin_index); - //----------------------------------------------------------------------- // Operations involving the CEntry, a dependency we want to remove // to get off the GC heap. diff --git a/deps/v8/src/d8/d8.cc b/deps/v8/src/d8/d8.cc index a29c596909e0a6..6656ab608dc56a 100644 --- a/deps/v8/src/d8/d8.cc +++ b/deps/v8/src/d8/d8.cc @@ -48,6 +48,10 @@ #include "src/utils/utils.h" #include "src/wasm/wasm-engine.h" +#ifdef V8_USE_PERFETTO +#include "perfetto/tracing.h" +#endif // V8_USE_PERFETTO + #ifdef V8_INTL_SUPPORT #include "unicode/locid.h" #endif // V8_INTL_SUPPORT @@ -247,15 +251,7 @@ namespace tracing { namespace { -// String options that can be used to initialize TraceOptions. -const char kRecordUntilFull[] = "record-until-full"; -const char kRecordContinuously[] = "record-continuously"; -const char kRecordAsMuchAsPossible[] = "record-as-much-as-possible"; - -const char kRecordModeParam[] = "record_mode"; -const char kEnableSystraceParam[] = "enable_systrace"; -const char kEnableArgumentFilterParam[] = "enable_argument_filter"; -const char kIncludedCategoriesParam[] = "included_categories"; +static constexpr char kIncludedCategoriesParam[] = "included_categories"; class TraceConfigParser { public: @@ -273,30 +269,11 @@ class TraceConfigParser { Local result = JSON::Parse(context, source).ToLocalChecked(); Local trace_config_object = Local::Cast(result); - trace_config->SetTraceRecordMode( - GetTraceRecordMode(isolate, context, trace_config_object)); - if (GetBoolean(isolate, context, trace_config_object, - kEnableSystraceParam)) { - trace_config->EnableSystrace(); - } - if (GetBoolean(isolate, context, trace_config_object, - kEnableArgumentFilterParam)) { - trace_config->EnableArgumentFilter(); - } UpdateIncludedCategoriesList(isolate, context, trace_config_object, trace_config); } private: - static bool GetBoolean(v8::Isolate* isolate, Local context, - Local object, const char* property) { - Local value = GetValue(isolate, context, object, property); - if (value->IsNumber()) { - return value->BooleanValue(isolate); - } - return false; - } - static int UpdateIncludedCategoriesList( v8::Isolate* isolate, Local context, Local object, platform::tracing::TraceConfig* trace_config) { @@ -316,23 +293,6 @@ class TraceConfigParser { } return 0; } - - static platform::tracing::TraceRecordMode GetTraceRecordMode( - v8::Isolate* isolate, Local context, Local object) { - Local value = GetValue(isolate, context, object, kRecordModeParam); - if (value->IsString()) { - Local v8_string = value->ToString(context).ToLocalChecked(); - String::Utf8Value str(isolate, v8_string); - if (strcmp(kRecordUntilFull, *str) == 0) { - return platform::tracing::TraceRecordMode::RECORD_UNTIL_FULL; - } else if (strcmp(kRecordContinuously, *str) == 0) { - return platform::tracing::TraceRecordMode::RECORD_CONTINUOUSLY; - } else if (strcmp(kRecordAsMuchAsPossible, *str) == 0) { - return platform::tracing::TraceRecordMode::RECORD_AS_MUCH_AS_POSSIBLE; - } - } - return platform::tracing::TraceRecordMode::RECORD_UNTIL_FULL; - } }; } // namespace @@ -1927,7 +1887,7 @@ static void PrintNonErrorsMessageCallback(Local message, auto ToCString = [](const v8::String::Utf8Value& value) { return *value ? *value : ""; }; - Isolate* isolate = Isolate::GetCurrent(); + Isolate* isolate = message->GetIsolate(); v8::String::Utf8Value msg(isolate, message->Get()); const char* msg_string = ToCString(msg); // Print (filename):(line number): (message). @@ -2001,20 +1961,20 @@ int LineFromOffset(Local script, int offset) { return location.GetLineNumber(); } -void WriteLcovDataForRange(std::vector& lines, int start_line, +void WriteLcovDataForRange(std::vector* lines, int start_line, int end_line, uint32_t count) { // Ensure space in the array. - lines.resize(std::max(static_cast(end_line + 1), lines.size()), 0); + lines->resize(std::max(static_cast(end_line + 1), lines->size()), 0); // Boundary lines could be shared between two functions with different // invocation counts. Take the maximum. - lines[start_line] = std::max(lines[start_line], count); - lines[end_line] = std::max(lines[end_line], count); + (*lines)[start_line] = std::max((*lines)[start_line], count); + (*lines)[end_line] = std::max((*lines)[end_line], count); // Invocation counts for non-boundary lines are overwritten. - for (int k = start_line + 1; k < end_line; k++) lines[k] = count; + for (int k = start_line + 1; k < end_line; k++) (*lines)[k] = count; } void WriteLcovDataForNamedRange(std::ostream& sink, - std::vector& lines, + std::vector* lines, const std::string& name, int start_line, int end_line, uint32_t count) { WriteLcovDataForRange(lines, start_line, end_line, count); @@ -2064,7 +2024,7 @@ void Shell::WriteLcovData(v8::Isolate* isolate, const char* file) { name_stream << start.GetColumnNumber() << ">"; } - WriteLcovDataForNamedRange(sink, lines, name_stream.str(), start_line, + WriteLcovDataForNamedRange(sink, &lines, name_stream.str(), start_line, end_line, count); } @@ -2074,7 +2034,7 @@ void Shell::WriteLcovData(v8::Isolate* isolate, const char* file) { int start_line = LineFromOffset(script, block_data.StartOffset()); int end_line = LineFromOffset(script, block_data.EndOffset() - 1); uint32_t count = block_data.Count(); - WriteLcovDataForRange(lines, start_line, end_line, count); + WriteLcovDataForRange(&lines, start_line, end_line, count); } } // Write per-line coverage. LCOV uses 1-based line numbers. @@ -3350,24 +3310,25 @@ int Shell::Main(int argc, char* argv[]) { std::unique_ptr tracing; std::ofstream trace_file; -#ifdef V8_USE_PERFETTO - std::ofstream perfetto_trace_file; -#endif // V8_USE_PERFETTO if (options.trace_enabled && !i::FLAG_verify_predictable) { tracing = base::make_unique(); - trace_file.open(options.trace_path ? options.trace_path : "v8_trace.json"); DCHECK(trace_file.good()); + +#ifdef V8_USE_PERFETTO + // Set up the in-process backend that the tracing controller will connect + // to. + perfetto::TracingInitArgs init_args; + init_args.backends = perfetto::BackendType::kInProcessBackend; + perfetto::Tracing::Initialize(init_args); + + tracing->InitializeForPerfetto(&trace_file); +#else platform::tracing::TraceBuffer* trace_buffer = platform::tracing::TraceBuffer::CreateTraceBufferRingBuffer( platform::tracing::TraceBuffer::kRingBufferChunks, platform::tracing::TraceWriter::CreateJSONTraceWriter(trace_file)); tracing->Initialize(trace_buffer); - -#ifdef V8_USE_PERFETTO - perfetto_trace_file.open("v8_perfetto_trace.json"); - DCHECK(trace_file.good()); - tracing->InitializeForPerfetto(&perfetto_trace_file); #endif // V8_USE_PERFETTO } diff --git a/deps/v8/src/date/OWNERS b/deps/v8/src/date/OWNERS index fc4aa8d5acf2f4..6edeeae0ea188a 100644 --- a/deps/v8/src/date/OWNERS +++ b/deps/v8/src/date/OWNERS @@ -1,3 +1,6 @@ ishell@chromium.org jshin@chromium.org ulan@chromium.org +verwaest@chromium.org + +# COMPONENT: Blink>JavaScript>Runtime diff --git a/deps/v8/src/debug/OWNERS b/deps/v8/src/debug/OWNERS index 46b472480d1a79..220aa1ce26b153 100644 --- a/deps/v8/src/debug/OWNERS +++ b/deps/v8/src/debug/OWNERS @@ -1,5 +1,3 @@ -set noparent - bmeurer@chromium.org jgruber@chromium.org mvstanton@chromium.org diff --git a/deps/v8/src/debug/debug-coverage.cc b/deps/v8/src/debug/debug-coverage.cc index 24aa617524d011..15aad1fcc25556 100644 --- a/deps/v8/src/debug/debug-coverage.cc +++ b/deps/v8/src/debug/debug-coverage.cc @@ -61,7 +61,8 @@ bool CompareCoverageBlock(const CoverageBlock& a, const CoverageBlock& b) { return a.start < b.start; } -void SortBlockData(std::vector& v) { +void SortBlockData( + std::vector& v) { // NOLINT(runtime/references) // Sort according to the block nesting structure. std::sort(v.begin(), v.end(), CompareCoverageBlock); } @@ -507,9 +508,9 @@ void CollectAndMaybeResetCounts(Isolate* isolate, ->feedback_vectors_for_profiling_tools() ->IsArrayList()); DCHECK_EQ(v8::debug::CoverageMode::kBestEffort, coverage_mode); - HeapIterator heap_iterator(isolate->heap()); - for (HeapObject current_obj = heap_iterator.next(); - !current_obj.is_null(); current_obj = heap_iterator.next()) { + HeapObjectIterator heap_iterator(isolate->heap()); + for (HeapObject current_obj = heap_iterator.Next(); + !current_obj.is_null(); current_obj = heap_iterator.Next()) { if (!current_obj.IsJSFunction()) continue; JSFunction func = JSFunction::cast(current_obj); SharedFunctionInfo shared = func.shared(); @@ -714,9 +715,9 @@ void Coverage::SelectMode(Isolate* isolate, debug::CoverageMode mode) { std::vector> funcs_needing_feedback_vector; { - HeapIterator heap_iterator(isolate->heap()); - for (HeapObject o = heap_iterator.next(); !o.is_null(); - o = heap_iterator.next()) { + HeapObjectIterator heap_iterator(isolate->heap()); + for (HeapObject o = heap_iterator.Next(); !o.is_null(); + o = heap_iterator.Next()) { if (o.IsJSFunction()) { JSFunction func = JSFunction::cast(o); if (func.has_closure_feedback_cell_array()) { diff --git a/deps/v8/src/debug/debug-evaluate.cc b/deps/v8/src/debug/debug-evaluate.cc index 65e62f2aac8c75..0d8a7b2c7e67d4 100644 --- a/deps/v8/src/debug/debug-evaluate.cc +++ b/deps/v8/src/debug/debug-evaluate.cc @@ -51,7 +51,7 @@ MaybeHandle DebugEvaluate::Global(Isolate* isolate, } MaybeHandle DebugEvaluate::Local(Isolate* isolate, - StackFrame::Id frame_id, + StackFrameId frame_id, int inlined_jsframe_index, Handle source, bool throw_on_side_effect) { @@ -312,6 +312,7 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) { V(ObjectValuesSkipFastPath) \ V(ObjectGetOwnPropertyNames) \ V(ObjectGetOwnPropertyNamesTryFast) \ + V(ObjectIsExtensible) \ V(RegExpInitializeAndCompile) \ V(StackGuard) \ V(StringAdd) \ @@ -771,6 +772,8 @@ DebugInfo::SideEffectState BuiltinGetSideEffectState(Builtins::Name id) { case Builtins::kStrictPoisonPillThrower: case Builtins::kAllocateInYoungGeneration: case Builtins::kAllocateInOldGeneration: + case Builtins::kAllocateRegularInYoungGeneration: + case Builtins::kAllocateRegularInOldGeneration: return DebugInfo::kHasNoSideEffect; // Set builtins. @@ -904,7 +907,7 @@ static bool TransitivelyCalledBuiltinHasNoSideEffect(Builtins::Name caller, switch (callee) { // Transitively called Builtins: case Builtins::kAbort: - case Builtins::kAbortJS: + case Builtins::kAbortCSAAssert: case Builtins::kAdaptorWithBuiltinExitFrame: case Builtins::kArrayConstructorImpl: case Builtins::kArrayEveryLoopContinuation: @@ -959,6 +962,8 @@ static bool TransitivelyCalledBuiltinHasNoSideEffect(Builtins::Name caller, case Builtins::kOrdinaryToPrimitive_String: case Builtins::kParseInt: case Builtins::kProxyHasProperty: + case Builtins::kProxyIsExtensible: + case Builtins::kProxyGetPrototypeOf: case Builtins::kRecordWrite: case Builtins::kStringAdd_CheckNone: case Builtins::kStringEqual: diff --git a/deps/v8/src/debug/debug-evaluate.h b/deps/v8/src/debug/debug-evaluate.h index 50817691d72b96..7819892050261e 100644 --- a/deps/v8/src/debug/debug-evaluate.h +++ b/deps/v8/src/debug/debug-evaluate.h @@ -7,8 +7,11 @@ #include +#include "src/common/globals.h" #include "src/debug/debug-frames.h" #include "src/debug/debug-scopes.h" +#include "src/debug/debug.h" +#include "src/execution/frames.h" #include "src/objects/objects.h" #include "src/objects/shared-function-info.h" #include "src/objects/string-table.h" @@ -28,7 +31,7 @@ class DebugEvaluate : public AllStatic { // - Parameters and stack-allocated locals need to be materialized. Altered // values need to be written back to the stack afterwards. // - The arguments object needs to materialized. - static MaybeHandle Local(Isolate* isolate, StackFrame::Id frame_id, + static MaybeHandle Local(Isolate* isolate, StackFrameId frame_id, int inlined_jsframe_index, Handle source, bool throw_on_side_effect); diff --git a/deps/v8/src/debug/debug-frames.cc b/deps/v8/src/debug/debug-frames.cc index a6ee31738dc3fc..4fe062b277a382 100644 --- a/deps/v8/src/debug/debug-frames.cc +++ b/deps/v8/src/debug/debug-frames.cc @@ -52,10 +52,13 @@ FrameInspector::FrameInspector(StandardFrame* frame, int inlined_frame_index, } } -// NOLINTNEXTLINE -FrameInspector::~FrameInspector() { - // Destructor needs to be defined in the .cc file, because it instantiates - // std::unique_ptr destructors but the types are not known in the header. +// Destructor needs to be defined in the .cc file, because it instantiates +// std::unique_ptr destructors but the types are not known in the header. +FrameInspector::~FrameInspector() = default; + +JavaScriptFrame* FrameInspector::javascript_frame() { + return frame_->is_arguments_adaptor() ? ArgumentsAdaptorFrame::cast(frame_) + : JavaScriptFrame::cast(frame_); } int FrameInspector::GetParametersCount() { @@ -90,8 +93,10 @@ bool FrameInspector::ParameterIsShadowedByContextLocal( VariableMode mode; InitializationFlag init_flag; MaybeAssignedFlag maybe_assigned_flag; + RequiresBrandCheckFlag requires_brand_check; return ScopeInfo::ContextSlotIndex(*info, *parameter_name, &mode, &init_flag, - &maybe_assigned_flag) != -1; + &maybe_assigned_flag, + &requires_brand_check) != -1; } RedirectActiveFunctions::RedirectActiveFunctions(SharedFunctionInfo shared, diff --git a/deps/v8/src/debug/debug-frames.h b/deps/v8/src/debug/debug-frames.h index 5ee4f8b61f472c..274d10030af516 100644 --- a/deps/v8/src/debug/debug-frames.h +++ b/deps/v8/src/debug/debug-frames.h @@ -6,7 +6,6 @@ #define V8_DEBUG_DEBUG_FRAMES_H_ #include "src/deoptimizer/deoptimizer.h" -#include "src/execution/frames.h" #include "src/execution/isolate.h" #include "src/execution/v8threads.h" #include "src/objects/objects.h" @@ -15,12 +14,15 @@ namespace v8 { namespace internal { +class JavaScriptFrame; +class StandardFrame; + class FrameInspector { public: FrameInspector(StandardFrame* frame, int inlined_frame_index, Isolate* isolate); - ~FrameInspector(); // NOLINT (modernize-use-equals-default) + ~FrameInspector(); int GetParametersCount(); Handle GetFunction() const { return function_; } @@ -37,10 +39,7 @@ class FrameInspector { bool IsWasm(); bool IsJavaScript(); - inline JavaScriptFrame* javascript_frame() { - return frame_->is_arguments_adaptor() ? ArgumentsAdaptorFrame::cast(frame_) - : JavaScriptFrame::cast(frame_); - } + JavaScriptFrame* javascript_frame(); int inlined_frame_index() const { return inlined_frame_index_; } diff --git a/deps/v8/src/debug/debug-interface.h b/deps/v8/src/debug/debug-interface.h index 79222371f966cb..59bc6d08632e75 100644 --- a/deps/v8/src/debug/debug-interface.h +++ b/deps/v8/src/debug/debug-interface.h @@ -164,8 +164,9 @@ class WasmScript : public Script { uint32_t GetFunctionHash(int function_index); }; -V8_EXPORT_PRIVATE void GetLoadedScripts(Isolate* isolate, - PersistentValueVector