diff --git a/.gitattributes b/.gitattributes index fa5162684b8dc8..9f7d9377eaf426 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,2 +1,3 @@ test/fixtures/* -text vcbuild.bat text eol=crlf +tools/msvs/find_python.cmd text eol=crlf diff --git a/.mailmap b/.mailmap index a314fe408f0455..2320600463c1cb 100644 --- a/.mailmap +++ b/.mailmap @@ -218,6 +218,8 @@ Kelsey Breseman Ke Ding Khaidi Chu XadillaX Khaidi Chu +Kimberly Wilber +Kimberly Wilber Kiyoshi Nomo kysnm Koichi Kobayashi Kostiantyn Wandalen @@ -255,7 +257,6 @@ Matthew Turner Maurice Hayward maurice_hayward Michael Bernstein Michael Dawson -Michael Wilber Michaël Zasso Michael-Rainabba Richardson rainabba Michał Gołębiowski-Owczarek diff --git a/.travis.yml b/.travis.yml index 88f0c6cdbd2887..e3cf371831765a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -6,9 +6,13 @@ x-ccache-setup-steps: &ccache-setup-steps os: linux language: cpp +# Currently this file can only support one PYTHON_VERSION. +# To experiment with Python 3, comment out Python 2.7 and uncomment one of the Python 3 versions. env: global: - PYTHON_VERSION="2.7.15" + # - PYTHON_VERSION="3.6.7" + # - PYTHON_VERSION="3.7.1" jobs: include: - stage: "Compile" @@ -87,31 +91,3 @@ jobs: - if [ "${TRAVIS_PULL_REQUEST}" != "false" ]; then bash -x tools/lint-pr-commit-message.sh ${TRAVIS_PULL_REQUEST}; fi - - - name: "Python 3 is EXPERIMENTAL (Py36)" - language: node_js - node_js: "node" - install: - - pyenv global 3.6.7 - - python3.6 -m pip install --upgrade pip - - make lint-py-build - script: - - NODE=$(which node) make lint lint-py - - python3.6 ./configure.py - - NODE=$(which node) make test - - - name: "Python 3 is EXPERIMENTAL (Py37)" - language: node_js - node_js: "node" - install: - - pyenv global 3.7.1 - - python3.7 -m pip install --upgrade pip - - make lint-py-build - script: - - NODE=$(which node) make lint lint-py - - python3.7 ./configure.py - - NODE=$(which node) make test - - allow_failures: - - name: "Python 3 is EXPERIMENTAL (Py36)" - - name: "Python 3 is EXPERIMENTAL (Py37)" diff --git a/AUTHORS b/AUTHORS index f1d2517b554a22..6d3ab499f822a2 100644 --- a/AUTHORS +++ b/AUTHORS @@ -141,7 +141,7 @@ Daniel Gröber Travis Swicegood Oleg Slobodskoi Jeremy Martin -Michael Wilber +Kimberly Wilber Sean Braithwaite Anders Conbere Devin Torres diff --git a/BUILDING.md b/BUILDING.md index c56be2ad54e9cb..7fed3f9bdd084d 100644 --- a/BUILDING.md +++ b/BUILDING.md @@ -116,7 +116,7 @@ platforms. This is true regardless of entries in the table below. | Windows | arm64 | >= Windows 10 | Experimental | | | macOS | x64 | >= 10.11 | Tier 1 | | | SmartOS | x64 | >= 18 | Tier 2 | | -| AIX | ppc64be >=power7 | >= 7.1 TL05 | Tier 2 | | +| AIX | ppc64be >=power7 | >= 7.2 TL02 | Tier 2 | | | FreeBSD | x64 | >= 11 | Experimental | Downgraded as of Node.js 12 | 1: GCC 6 is not provided on the base platform, users will @@ -243,8 +243,8 @@ transition before the year-end deadline. * `gcc` and `g++` >= 6.3 or newer, or * GNU Make 3.81 or newer * Python (see note above) - * Python 2.7 - * Python 3.5, 3.6, and 3.7 are experimental. + * Python 2.7 + * Python 3.5, 3.6, and 3.7 are experimental. Installation via Linux package manager can be achieved with: @@ -259,8 +259,8 @@ FreeBSD and OpenBSD users may also need to install `libexecinfo`. * Xcode Command Line Tools >= 8 for macOS * Python (see note above) - * Python 2.7 - * Python 3.5, 3.6, and 3.7 are experimental. + * Python 2.7 + * Python 3.5, 3.6, and 3.7 are experimental. macOS users can install the `Xcode Command Line Tools` by running `xcode-select --install`. Alternatively, if you already have the full Xcode @@ -501,16 +501,16 @@ $ backtrace [Build Tools](https://www.visualstudio.com/downloads/#build-tools-for-visual-studio-2017), with the default optional components. * Basic Unix tools required for some tests, - [Git for Windows](http://git-scm.com/download/win) includes Git Bash + [Git for Windows](https://git-scm.com/download/win) includes Git Bash and tools which can be included in the global `PATH`. -* The [NetWide Assembler](http://www.nasm.us/), for OpenSSL assembler modules. +* The [NetWide Assembler](https://www.nasm.us/), for OpenSSL assembler modules. If not installed in the default location, it needs to be manually added to `PATH`. A build with the `openssl-no-asm` option does not need this, nor does a build targeting ARM64 Windows. Optional requirements to build the MSI installer package: -* The [WiX Toolset v3.11](http://wixtoolset.org/releases/) and the +* The [WiX Toolset v3.11](https://wixtoolset.org/releases/) and the [Wix Toolset Visual Studio 2017 Extension](https://marketplace.visualstudio.com/items?itemName=RobMensching.WixToolsetVisualStudio2017Extension). Optional requirements for compiling for Windows 10 on ARM (ARM64): @@ -527,7 +527,7 @@ Optional requirements for compiling for Windows 10 on ARM (ARM64): ##### Option 2: Automated install with Boxstarter -A [Boxstarter](http://boxstarter.org/) script can be used for easy setup of +A [Boxstarter](https://boxstarter.org/) script can be used for easy setup of Windows systems with all the required prerequisites for Node.js development. This script will install the following [Chocolatey](https://chocolatey.org/) packages: @@ -541,8 +541,8 @@ packages: * [NetWide Assembler](https://chocolatey.org/packages/nasm) To install Node.js prerequisites using -[Boxstarter WebLauncher](http://boxstarter.org/WebLauncher), open - +[Boxstarter WebLauncher](https://boxstarter.org/WebLauncher), open + with Internet Explorer or Edge browser on the target machine. Alternatively, you can use PowerShell. Run those commands from an elevated @@ -550,7 +550,7 @@ PowerShell terminal: ```powershell Set-ExecutionPolicy Unrestricted -Force -iex ((New-Object System.Net.WebClient).DownloadString('http://boxstarter.org/bootstrapper.ps1')) +iex ((New-Object System.Net.WebClient).DownloadString('https://boxstarter.org/bootstrapper.ps1')) get-boxstarter -Force Install-BoxstarterPackage https://raw.githubusercontent.com/nodejs/node/master/tools/bootstrap/windows_boxstarter -DisableReboots ``` diff --git a/CHANGELOG.md b/CHANGELOG.md index a80f38766bb4f6..c6b3fd29f2cf48 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -28,7 +28,8 @@ release. -12.10.0
+12.11.0
+12.10.0
12.9.1
12.9.0
12.8.1
diff --git a/COLLABORATOR_GUIDE.md b/COLLABORATOR_GUIDE.md index d8e0d22fd8f705..2796565670a1ff 100644 --- a/COLLABORATOR_GUIDE.md +++ b/COLLABORATOR_GUIDE.md @@ -3,35 +3,35 @@ ## Contents * [Issues and Pull Requests](#issues-and-pull-requests) - - [Welcoming First-Time Contributors](#welcoming-first-time-contributors) - - [Closing Issues and Pull Requests](#closing-issues-and-pull-requests) - - [Author ready pull requests](#author-ready-pull-requests) - - [Handling own pull requests](#handling-own-pull-requests) + * [Welcoming First-Time Contributors](#welcoming-first-time-contributors) + * [Closing Issues and Pull Requests](#closing-issues-and-pull-requests) + * [Author ready pull requests](#author-ready-pull-requests) + * [Handling own pull requests](#handling-own-pull-requests) * [Accepting Modifications](#accepting-modifications) - - [Code Reviews](#code-reviews) - - [Consensus Seeking](#consensus-seeking) - - [Waiting for Approvals](#waiting-for-approvals) - - [Testing and CI](#testing-and-ci) - - [Useful CI Jobs](#useful-ci-jobs) - - [Internal vs. Public API](#internal-vs-public-api) - - [Breaking Changes](#breaking-changes) - - [Breaking Changes and Deprecations](#breaking-changes-and-deprecations) - - [Breaking Changes to Internal Elements](#breaking-changes-to-internal-elements) - - [Unintended Breaking Changes](#unintended-breaking-changes) - - [Reverting commits](#reverting-commits) - - [Introducing New Modules](#introducing-new-modules) - - [Additions to N-API](#additions-to-n-api) - - [Deprecations](#deprecations) - - [Involving the TSC](#involving-the-tsc) + * [Code Reviews](#code-reviews) + * [Consensus Seeking](#consensus-seeking) + * [Waiting for Approvals](#waiting-for-approvals) + * [Testing and CI](#testing-and-ci) + * [Useful CI Jobs](#useful-ci-jobs) + * [Internal vs. Public API](#internal-vs-public-api) + * [Breaking Changes](#breaking-changes) + * [Breaking Changes and Deprecations](#breaking-changes-and-deprecations) + * [Breaking Changes to Internal Elements](#breaking-changes-to-internal-elements) + * [Unintended Breaking Changes](#unintended-breaking-changes) + * [Reverting commits](#reverting-commits) + * [Introducing New Modules](#introducing-new-modules) + * [Additions to N-API](#additions-to-n-api) + * [Deprecations](#deprecations) + * [Involving the TSC](#involving-the-tsc) * [Landing Pull Requests](#landing-pull-requests) - - [Using `git-node`](#using-git-node) - - [Technical HOWTO](#technical-howto) - - [Troubleshooting](#troubleshooting) - - [I Made a Mistake](#i-made-a-mistake) - - [Long Term Support](#long-term-support) - - [What is LTS?](#what-is-lts) - - [How are LTS Branches Managed?](#how-are-lts-branches-managed) - - [How can I help?](#how-can-i-help) + * [Using `git-node`](#using-git-node) + * [Technical HOWTO](#technical-howto) + * [Troubleshooting](#troubleshooting) + * [I Made a Mistake](#i-made-a-mistake) + * [Long Term Support](#long-term-support) + * [What is LTS?](#what-is-lts) + * [How are LTS Branches Managed?](#how-are-lts-branches-managed) + * [How can I help?](#how-can-i-help) * [Who to CC in the issue tracker](#who-to-cc-in-the-issue-tracker) This document explains how Collaborators manage the Node.js project. @@ -371,10 +371,10 @@ deprecation level of an API. Collaborators may opt to elevate pull requests or issues to the [TSC][]. Do this if a pull request or issue: -- is labeled `semver-major`, or -- has a significant impact on the codebase, or -- is controversial, or -- is at an impasse among Collaborators who are participating in the discussion. +* is labeled `semver-major`, or +* has a significant impact on the codebase, or +* is controversial, or +* is at an impasse among Collaborators who are participating in the discussion. @-mention the `@nodejs/tsc` GitHub team if you want to elevate an issue to the [TSC][]. Do not use the GitHub UI on the right-hand side to assign to @@ -542,9 +542,7 @@ Save the file and close the editor. When prompted, enter a new commit message for that commit. This is an opportunity to fix commit messages. * The commit message text must conform to the [commit message guidelines][]. - - -* Change the original commit message to include metadata. (The +* Change the original commit message to include metadata. (The [`git node metadata`][git-node-metadata] command can generate the metadata for you.) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 1a3babf8405ea2..b9d1f2cef604b9 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -3,7 +3,7 @@ * [Code of Conduct](#code-of-conduct) * [Issues](#issues) * [Pull Requests](#pull-requests) -* [Developer's Certificate of Origin 1.1](#developers-certificate-of-origin-11) +* [Developer's Certificate of Origin 1.1](#developers-certificate-of-origin) ## [Code of Conduct](./doc/guides/contributing/coc.md) diff --git a/CPP_STYLE_GUIDE.md b/CPP_STYLE_GUIDE.md index 07733db96ca89e..30a2ec99fa087b 100644 --- a/CPP_STYLE_GUIDE.md +++ b/CPP_STYLE_GUIDE.md @@ -36,6 +36,7 @@ features and idioms, as well as have some specific guidelines for the use of runtime features. Coding guidelines are based on the following guides (highest priority first): + 1. This document 2. The [Google C++ Style Guide][] 3. The ISO [C++ Core Guidelines][] @@ -190,8 +191,8 @@ class FancyContainer { ### Memory allocation -- `Malloc()`, `Calloc()`, etc. from `util.h` abort in Out-of-Memory situations -- `UncheckedMalloc()`, etc. return `nullptr` in OOM situations +* `Malloc()`, `Calloc()`, etc. from `util.h` abort in Out-of-Memory situations +* `UncheckedMalloc()`, etc. return `nullptr` in OOM situations ### Use `nullptr` instead of `NULL` or `0` @@ -277,13 +278,14 @@ data[0] = 12345; ### Type casting -- Use `static_cast` if casting is required, and it is valid -- Use `reinterpret_cast` only when it is necessary -- Avoid C-style casts (`(type)value`) -- `dynamic_cast` does not work because Node.js is built without +* Use `static_cast` if casting is required, and it is valid +* Use `reinterpret_cast` only when it is necessary +* Avoid C-style casts (`(type)value`) +* `dynamic_cast` does not work because Node.js is built without [Run Time Type Information][] Further reading: + * [ES.48]: Avoid casts * [ES.49]: If you must use a cast, use a named cast diff --git a/GOVERNANCE.md b/GOVERNANCE.md index 9e678855932e1d..c8560291164f36 100644 --- a/GOVERNANCE.md +++ b/GOVERNANCE.md @@ -2,13 +2,13 @@ -- [Collaborators](#collaborators) - - [Collaborator Activities](#collaborator-activities) -- [Technical Steering Committee](#technical-steering-committee) - - [TSC Meetings](#tsc-meetings) -- [Collaborator Nominations](#collaborator-nominations) - - [Onboarding](#onboarding) -- [Consensus Seeking Process](#consensus-seeking-process) +* [Collaborators](#collaborators) + * [Collaborator Activities](#collaborator-activities) +* [Technical Steering Committee](#technical-steering-committee) + * [TSC Meetings](#tsc-meetings) +* [Collaborator Nominations](#collaborator-nominations) + * [Onboarding](#onboarding) +* [Consensus Seeking Process](#consensus-seeking-process) diff --git a/Makefile b/Makefile index 33d43798f52617..5898541735289d 100644 --- a/Makefile +++ b/Makefile @@ -146,7 +146,9 @@ out/Makefile: config.gypi common.gypi node.gyp \ tools/v8_gypfiles/inspector.gypi tools/v8_gypfiles/v8.gyp $(PYTHON) tools/gyp_node.py -f make -config.gypi: configure configure.py +# node_version.h is listed because the N-API version is taken from there +# and included in config.gypi +config.gypi: configure configure.py src/node_version.h @if [ -x config.status ]; then \ ./config.status; \ else \ @@ -240,17 +242,15 @@ coverage-build-js: .PHONY: coverage-test coverage-test: coverage-build - $(RM) out/$(BUILDTYPE)/obj.target/node/gen/*.gcda $(RM) out/$(BUILDTYPE)/obj.target/node/src/*.gcda - $(RM) out/$(BUILDTYPE)/obj.target/node/src/tracing/*.gcda - $(RM) out/$(BUILDTYPE)/obj.target/node_lib/gen/*.gcda + $(RM) out/$(BUILDTYPE)/obj.target/node/src/*/*.gcda $(RM) out/$(BUILDTYPE)/obj.target/node_lib/src/*.gcda - $(RM) out/$(BUILDTYPE)/obj.target/node_lib/src/tracing/*.gcda + $(RM) out/$(BUILDTYPE)/obj.target/node_lib/src/*/*.gcda -NODE_V8_COVERAGE=out/$(BUILDTYPE)/.coverage \ TEST_CI_ARGS="$(TEST_CI_ARGS) --type=coverage" $(MAKE) $(COVTESTS) $(MAKE) coverage-report-js - -(cd out && "../gcovr/scripts/gcovr" --gcov-exclude='.*deps' \ - --gcov-exclude='.*usr' -v -r Release/obj.target \ + -(cd out && "../gcovr/scripts/gcovr" \ + --gcov-exclude='.*\b(deps|usr|out|cctest)\b' -v -r Release/obj.target \ --html --html-detail -o ../coverage/cxxcoverage.html \ --gcov-executable="$(GCOV)") @echo -n "Javascript coverage %: " @@ -335,7 +335,7 @@ test-cov: all $(MAKE) build-addons $(MAKE) build-js-native-api-tests $(MAKE) build-node-api-tests - # $(MAKE) cctest + $(MAKE) cctest CI_SKIP_TESTS=$(COV_SKIP_TESTS) $(MAKE) jstest test-parallel: all @@ -1312,9 +1312,9 @@ else endif ifeq ($(V),1) - CPPLINT_QUIET = +CPPLINT_QUIET = else - CPPLINT_QUIET = --quiet +CPPLINT_QUIET = --quiet endif .PHONY: lint-cpp # Lints the C++ code with cpplint.py and check-imports.py. diff --git a/README.md b/README.md index 9ae8f214e6af90..8bc15d76e1d10c 100644 --- a/README.md +++ b/README.md @@ -154,6 +154,8 @@ For information about the governance of the Node.js project, see **Anna Henningsen** <anna@addaleax.net> (she/her) * [apapirovski](https://github.com/apapirovski) - **Anatoli Papirovski** <apapirovski@mac.com> (he/him) +* [BethGriggs](https://github.com/BethGriggs) - +**Beth Griggs** <Bethany.Griggs@uk.ibm.com> (she/her) * [ChALkeR](https://github.com/ChALkeR) - **Сковорода Никита Андреевич** <chalkerx@gmail.com> (he/him) * [cjihrig](https://github.com/cjihrig) - @@ -184,6 +186,8 @@ For information about the governance of the Node.js project, see **Michaël Zasso** <targos@protonmail.com> (he/him) * [thefourtheye](https://github.com/thefourtheye) - **Sakthipriyan Vairamani** <thechargingvolcano@gmail.com> (he/him) +* [tniessen](https://github.com/tniessen) - +**Tobias Nießen** <tniessen@tnie.de> * [Trott](https://github.com/Trott) - **Rich Trott** <rtrott@gmail.com> (he/him) diff --git a/SECURITY.md b/SECURITY.md index 5f1e3e2cc7d563..a82c5f48ceac12 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -14,22 +14,22 @@ nonetheless. ## Public disclosure preferred -- [#14519](https://github.com/nodejs/node/issues/14519): _Internal domain +* [#14519](https://github.com/nodejs/node/issues/14519): _Internal domain function can be used to cause segfaults_. Requires the ability to execute arbitrary JavaScript code. That is already the highest level of privilege possible. ## Private disclosure preferred -- [CVE-2016-7099](https://nodejs.org/en/blog/vulnerability/september-2016-security-releases/): +* [CVE-2016-7099](https://nodejs.org/en/blog/vulnerability/september-2016-security-releases/): _Fix invalid wildcard certificate validation check_. This was a high-severity defect. It caused Node.js TLS clients to accept invalid wildcard certificates. -- [#5507](https://github.com/nodejs/node/pull/5507): _Fix a defect that makes +* [#5507](https://github.com/nodejs/node/pull/5507): _Fix a defect that makes the CacheBleed Attack possible_. Many, though not all, OpenSSL vulnerabilities in the TLS/SSL protocols also affect Node.js. -- [CVE-2016-2216](https://nodejs.org/en/blog/vulnerability/february-2016-security-releases/): +* [CVE-2016-2216](https://nodejs.org/en/blog/vulnerability/february-2016-security-releases/): _Fix defects in HTTP header parsing for requests and responses that can allow response splitting_. This was a remotely-exploitable defect in the Node.js HTTP implementation. diff --git a/benchmark/process/bench-env.js b/benchmark/process/bench-env.js index a332d3cbd61895..5df521cc958389 100644 --- a/benchmark/process/bench-env.js +++ b/benchmark/process/bench-env.js @@ -3,15 +3,55 @@ const common = require('../common'); const bench = common.createBenchmark(main, { - n: [1e5], + n: [1e6], + operation: ['get', 'set', 'enumerate', 'query', 'delete'] }); -function main({ n }) { - bench.start(); - for (var i = 0; i < n; i++) { - // Access every item in object to process values. - Object.keys(process.env); +function main({ n, operation }) { + switch (operation) { + case 'get': + bench.start(); + for (let i = 0; i < n; i++) { + process.env.PATH; + } + bench.end(n); + break; + case 'set': + bench.start(); + for (let i = 0; i < n; i++) { + process.env.DUMMY = 'hello, world'; + } + bench.end(n); + break; + case 'enumerate': + // First, normalize process.env so that benchmark results are comparable. + for (const key of Object.keys(process.env)) + delete process.env[key]; + for (let i = 0; i < 64; i++) + process.env[Math.random()] = Math.random(); + + n /= 10; // Enumeration is comparatively heavy. + bench.start(); + for (let i = 0; i < n; i++) { + // Access every item in object to process values. + Object.keys(process.env); + } + bench.end(n); + break; + case 'query': + bench.start(); + for (let i = 0; i < n; i++) { + 'PATH' in process.env; + } + bench.end(n); + break; + case 'delete': + bench.start(); + for (let i = 0; i < n; i++) { + delete process.env.DUMMY; + } + bench.end(n); + break; } - bench.end(n); } diff --git a/common.gypi b/common.gypi index b86e5e05d7df9a..6501f78796fd17 100644 --- a/common.gypi +++ b/common.gypi @@ -38,7 +38,7 @@ # Reset this number to 0 on major V8 upgrades. # Increment by one for each non-official patch applied to deps/v8. - 'v8_embedder_string': '-node.16', + 'v8_embedder_string': '-node.12', ##### V8 defaults for Node.js ##### diff --git a/configure.py b/configure.py index ce01a14566e65a..473bbfeb294b34 100755 --- a/configure.py +++ b/configure.py @@ -724,7 +724,7 @@ def get_nasm_version(asm): def get_llvm_version(cc): return get_version_helper( - cc, r"(^(?:FreeBSD )?clang version|based on LLVM) ([3-9]\.[0-9]+)") + cc, r"(^(?:FreeBSD )?clang version|based on LLVM) ([0-9]+\.[0-9]+)") def get_xcode_version(cc): return get_version_helper( diff --git a/deps/npm/.mailmap b/deps/npm/.mailmap index a14e7b8287bc8e..725a59da655908 100644 --- a/deps/npm/.mailmap +++ b/deps/npm/.mailmap @@ -2,13 +2,14 @@ Alex K. Wolfe Andrew Bradley Andrew Lunny Arlo Breault -Ashley Williams Ashley Williams +Ashley Williams Benjamin Coe Benjamin Coe Brian White Cedric Nelson Charlie Robbins +Claudia Hernández Dalmais Maxence Danila Gerasimov Dave Galbraith @@ -22,22 +23,23 @@ Evan Lucas Evan Lucas Faiq Raza Forbes Lindesay -Forrest L Norvell +Forrest L Norvell +Forrest L Norvell Gabriel Barros Geoff Flarity Gregers Gram Rygg Ifeanyi Oraelosi -Isaac Z. Schlueter -Isaac Z. Schlueter -isaacs +isaacs +isaacs +isaacs Jake Verbaten James Sanders James Treworgy Jason Smith Jed Fox +Jonas Weber Joshua Bennett Joshua Bennett -Jonas Weber Julien Meddah Kat Marchán Kevin Lorenz @@ -51,12 +53,12 @@ Max Goodman Maxim Bogushevich Maximilian Antoni Michael Hayes -Nicolas Morel Misha Kaletsky +Nicolas Morel Olivier Melcher Ra'Shaun Stovall -Rebecca Turner Rebecca Turner +Rebecca Turner Ryan Emery Sam Mikes Sreenivas Alapati diff --git a/deps/npm/.travis.yml b/deps/npm/.travis.yml index ef949a0d454d96..1d2e3120c4e845 100644 --- a/deps/npm/.travis.yml +++ b/deps/npm/.travis.yml @@ -1,37 +1,53 @@ sudo: true # need to declare the language as well as the matrix below language: node_js + +os: + - linux + +node_js: + - 12 + - 10 + - 8 + +env: "DEPLOY_VERSION=testing" + # having top-level `env:` adds a phantom build # https://github.com/travis-ci/travis-ci/issues/4681 #env: DEPLOY_VERSION=testing matrix: include: - # LTS is our most important target + # Run the sudotest, but only on Linux - node_js: "12" # DEPLOY_VERSION is used to set the couchapp setup mode for test/tap/registry.js # only gather coverage info for LTS env: DEPLOY_VERSION=testing COVERALLS_REPO_TOKEN="$COVERALLS_OPTIONAL_TOKEN" script: # run the sudo tests, with coverage enabled - - "sudo PATH=$PATH $(which node) . run tap -- \"test/tap/*.js\" --coverage" - - "unset COVERALLS_REPO_TOKEN ; node . run tap -- \"test/broken-under-*/*.js\"" - # previous LTS is next most important + - "sudo PATH=$PATH $(which node) . run tap -- \"test/tap/*.js\" --coverage --timeout 600" + + # also run standard and license checking - node_js: "10" - env: DEPLOY_VERSION=testing script: - - "npx standard" - - "node . run licenses" - - "node . run tap -- \"test/tap/*.js\" \"test/broken-under-nyc/*.js\"" - - node_js: "8" - env: DEPLOY_VERSION=testing + - "npx standard" + - "node . run licenses" + + # separate out node 6 so we can turn off caching, because that + # always breaks for some reason. - node_js: "6" - env: DEPLOY_VERSION=testing + cache: false + env: "DEPLOY_VERSION=testing" + + # only run one test on Windows, because it's hella slow + - node_js: "12" + os: "windows" + env: "DEPLOY_VERSION=testing" + notifications: slack: npm-inc:kRqQjto7YbINqHPb1X6nS3g8 -cache: - directories: - - node_modules/.cache + install: - "node . install" + script: - - "node . run tap -- \"test/tap/*.js\" \"test/broken-under-nyc/*.js\"" + - "node . run tap -- \"test/tap/*.js\" -t600 -Rclassic -c" diff --git a/deps/npm/AUTHORS b/deps/npm/AUTHORS index 69e158001e92f0..bed059c08e193b 100644 --- a/deps/npm/AUTHORS +++ b/deps/npm/AUTHORS @@ -1,5 +1,4 @@ # Authors sorted by whether or not they're me -Isaac Z. Schlueter isaacs Steve Steiner Mikeal Rogers @@ -644,11 +643,16 @@ Florian Keller Sreeram Jayan --get raywu0123 -isaacs Iván Reinoso García Roy Marples Robert James Gabriel John Firebaugh Kitten King -claudiahdz +Claudia Hernández Artem Sapegin +Márton Salomváry +gall0ws +Olivier Chevet +Maël Nison +Sara Ahbabou +Gareth Jones diff --git a/deps/npm/CHANGELOG.md b/deps/npm/CHANGELOG.md index e843fcaef659ad..a9b73d7a230d18 100644 --- a/deps/npm/CHANGELOG.md +++ b/deps/npm/CHANGELOG.md @@ -1,3 +1,190 @@ +## 6.11.3 (2019-09-03): + +Fix npm ci regressions and npm outdated depth. + +### BUG FIXES + +* [`235ed1d28`](https://github.com/npm/cli/commit/235ed1d2838ef302bb995e183980209d16c51b9b) + [#239](https://github.com/npm/cli/pull/239) + Don't override user specified depth in outdated + Restores ability to update packages using `--depth` as suggested by `npm audit`. + ([@G-Rath](https://github.com/G-Rath)) +* [`1fafb5151`](https://github.com/npm/cli/commit/1fafb51513466cd793866b576dfea9a8963a3335) + [#242](https://github.com/npm/cli/pull/242) + [npm.community#9586](https://npm.community/t/6-11-1-some-dependencies-are-no-longer-being-installed/9586/4) + Revert "install: do not descend into directory deps' child modules" + ([@isaacs](https://github.com/isaacs)) +* [`cebf542e6`](https://github.com/npm/cli/commit/cebf542e61dcabdd2bd3b876272bf8eebf7d01cc) + [#243](https://github.com/npm/cli/pull/243) + [npm.community#9720](https://npm.community/t/6-11-2-npm-ci-installs-package-with-wrong-permissions/9720) + ci: pass appropriate configs for file/dir modes + ([@isaacs](https://github.com/isaacs)) + +### DEPENDENCIES + +* [`e5fbb7ed1`](https://github.com/npm/cli/commit/e5fbb7ed1fc7ef5c6ca4790e2d0dc441e0ac1596) + `read-cmd-shim@1.0.4` + ([@claudiahdz](https://github.com/claudiahdz)) +* [`23ce65616`](https://github.com/npm/cli/commit/23ce65616c550647c586f7babc3c2f60115af2aa) + `npm-pick-manifest@3.0.2` + ([@claudiahdz](https://github.com/claudiahdz)) + +## 6.11.2 (2019-08-22): + +Fix a recent Windows regression, and two long-standing Windows bugs. Also, +get CI running on Windows, so these things are less likely in the future. + +### DEPENDENCIES + +* [`9778a1b87`](https://github.com/npm/cli/commit/9778a1b878aaa817af6e99385e7683c2a389570d) + `cmd-shim@3.0.3`: Fix regression where shims fail to preserve exit code + ([@isaacs](https://github.com/isaacs)) +* [`bf93e91d8`](https://github.com/npm/cli/commit/bf93e91d879c816a055d5913e6e4210d7299f299) + `npm-package-arg@6.1.1`: Properly handle git+file: urls on Windows when a + drive letter is included. ([@isaacs](https://github.com/isaacs)) + +### BUGFIXES + +* [`6cc4cc66f`](https://github.com/npm/cli/commit/6cc4cc66f1fb050dc4113e35cab59197fd48e04a) + escape args properly on Windows Bash Despite being bash, Node.js running + on windows git mingw bash still executes child processes using cmd.exe. + As a result, arguments in this environment need to be escaped in the + style of cmd.exe, not bash. ([@isaacs](https://github.com/isaacs)) + +### TESTS + +* [`291aba7b8`](https://github.com/npm/cli/commit/291aba7b821e247b96240b1ec037310ead69a594) + make tests pass on Windows ([@isaacs](https://github.com/isaacs)) +* [`fea3a023a`](https://github.com/npm/cli/commit/fea3a023a80863f32a5f97f5132401b1a16161b8) + travis: run tests on Windows as well + ([@isaacs](https://github.com/isaacs)) + +## 6.11.1 (2019-08-20): + +Fix a regression for windows command shim syntax. + +* [`37db29647`](https://github.com/npm/cli/commit/37db2964710c80003604b7e3c1527d17be7ed3d0) + `cmd-shim@3.0.2` ([@isaacs](https://github.com/isaacs)) + +## v6.11.0 (2019-08-20): + +A few meaty bugfixes, and introducing `peerDependenciesMeta`. + +### FEATURES + +* [`a12341088`](https://github.com/npm/cli/commit/a12341088820c0e7ef6c1c0db3c657f0c2b3943e) + [#224](https://github.com/npm/cli/pull/224) Implements + peerDependenciesMeta ([@arcanis](https://github.com/arcanis)) +* [`2f3b79bba`](https://github.com/npm/cli/commit/2f3b79bbad820fd4a398aa494b19f79b7fd520a1) + [#234](https://github.com/npm/cli/pull/234) add new forbidden 403 error + code ([@claudiahdz](https://github.com/claudiahdz)) + +### BUGFIXES + +* [`24acc9fc8`](https://github.com/npm/cli/commit/24acc9fc89d99d87cc66206c6c6f7cdc82fbf763) + and + [`45772af0d`](https://github.com/npm/cli/commit/45772af0ddca54b658cb2ba2182eec26d0a4729d) + [#217](https://github.com/npm/cli/pull/217) + [npm.community#8863](https://npm.community/t/installing-the-same-module-under-multiple-relative-paths-fails-on-linux/8863) + [npm.community#9327](https://npm.community/t/reinstall-breaks-after-npm-update-to-6-10-2/9327,) + do not descend into directory deps' child modules, fix shrinkwrap files + that inappropriately list child nodes of symlink packages + ([@isaacs](https://github.com/isaacs) and + [@salomvary](https://github.com/salomvary)) +* [`50cfe113d`](https://github.com/npm/cli/commit/50cfe113da5fcc59c1d99b0dcf1050ace45803c7) + [#229](https://github.com/npm/cli/pull/229) fixed typo in semver doc + ([@gall0ws](https://github.com/gall0ws)) +* [`e8fb2a1bd`](https://github.com/npm/cli/commit/e8fb2a1bd9785e0092e9926f4fd65ad431e38452) + [#231](https://github.com/npm/cli/pull/231) Fix spelling mistakes in + CHANGELOG-3.md ([@XhmikosR](https://github.com/XhmikosR)) +* [`769d2e057`](https://github.com/npm/cli/commit/769d2e057daf5a2cbfe0ce86f02550e59825a691) + [npm/uid-number#7](https://github.com/npm/uid-number/issues/7) Better + error on invalid `--user`/`--group` configs. This addresses the issue + when people fail to install binary packages on Docker and other + environments where there is no 'nobody' user. + ([@isaacs](https://github.com/isaacs)) +* [`8b43c9624`](https://github.com/npm/cli/commit/8b43c962498c8e2707527e4fca442d7a4fa51595) + [nodejs/node#28987](https://github.com/nodejs/node/issues/28987) + [npm.community#6032](https://npm.community/t/npm-ci-doesnt-respect-npmrc-variables/6032) + [npm.community#6658](https://npm.community/t/npm-ci-doesnt-fill-anymore-the-process-env-npm-config-cache-variable-on-post-install-scripts/6658) + [npm.community#6069](https://npm.community/t/npm-ci-does-not-compile-native-dependencies-according-to-npmrc-configuration/6069) + [npm.community#9323](https://npm.community/t/npm-6-9-x-not-passing-environment-to-node-gyp-regression-from-6-4-x/9323/2) + Fix the regression where random config values in a .npmrc file are not + passed to lifecycle scripts, breaking build processes which rely on them. + ([@isaacs](https://github.com/isaacs)) +* [`8b85eaa47`](https://github.com/npm/cli/commit/8b85eaa47da3abaacc90fe23162a68cc6e1f0404) + save files with inferred ownership rather than relying on `SUDO_UID` and + `SUDO_GID`. ([@isaacs](https://github.com/isaacs)) +* [`b7f6e5f02`](https://github.com/npm/cli/commit/b7f6e5f0285515087b4614d81db17206524c0fdb) + Infer ownership of shrinkwrap files + ([@isaacs](https://github.com/isaacs)) +* [`54b095d77`](https://github.com/npm/cli/commit/54b095d77b3b131622b3cf4cb5c689aa2dd10b6b) + [#235](https://github.com/npm/cli/pull/235) Add spec to dist-tag remove + function ([@theberbie](https://github.com/theberbie)) + +### DEPENDENCIES + +* [`dc8f9e52f`](https://github.com/npm/cli/commit/dc8f9e52f0bb107c0a6b20cc0c97cbc3b056c1b3) + `pacote@9.5.7`: Infer the ownership of all unpacked files in + `node_modules`, so that we never have user-owned files in root-owned + folders, or root-owned files in user-owned folders. + ([@isaacs](https://github.com/isaacs)) +* [`bb33940c3`](https://github.com/npm/cli/commit/bb33940c32aad61704084e61ebd1bd8e7cacccc8) + `cmd-shim@3.0.0`: + * [`9c93ac3`](https://github.com/npm/cmd-shim/commit/9c93ac39e95b0d6ae852e842e4c5dba5e19687c2) + [#2](https://github.com/npm/cmd-shim/pull/2) + [npm#3380](https://github.com/npm/npm/issues/3380) Handle environment + variables properly ([@basbossink](https://github.com/basbossink)) + * [`2d277f8`](https://github.com/npm/cmd-shim/commit/2d277f8e84d45401747b0b9470058f168b974ad5) + [#25](https://github.com/npm/cmd-shim/pull/25) + [#36](https://github.com/npm/cmd-shim/pull/36) + [#35](https://github.com/npm/cmd-shim/pull/35) Fix 'no shebang' case by + always providing `$basedir` in shell script + ([@igorklopov](https://github.com/igorklopov)) + * [`adaf20b`](https://github.com/npm/cmd-shim/commit/adaf20b7fa2c09c2111a2506c6a3e53ed0831f88) + [#26](https://github.com/npm/cmd-shim/pull/26) Fix `$*` causing an + error when arguments contain parentheses + ([@satazor](https://github.com/satazor)) + * [`49f0c13`](https://github.com/npm/cmd-shim/commit/49f0c1318fd384e0031c3fd43801f0e22e1e555f) + [#30](https://github.com/npm/cmd-shim/pull/30) Fix paths for MSYS/MINGW + bash ([@dscho](https://github.com/dscho)) + * [`51a8af3`](https://github.com/npm/cmd-shim/commit/51a8af30990cb072cb30d67fc1b564b14746bba9) + [#34](https://github.com/npm/cmd-shim/pull/34) Add proper support for + PowerShell ([@ExE-Boss](https://github.com/ExE-Boss)) + * [`4c37e04`](https://github.com/npm/cmd-shim/commit/4c37e048dee672237e8962fdffca28e20e9f976d) + [#10](https://github.com/npm/cmd-shim/issues/10) Work around quoted + batch file names ([@isaacs](https://github.com/isaacs)) +* [`a4e279544`](https://github.com/npm/cli/commit/a4e279544f7983e0adff1e475e3760f1ea85825a) + `npm-lifecycle@3.1.3` ([@isaacs](https://github.com/isaacs)): + * fail properly if `uid-number` raises an error +* [`7086a1809`](https://github.com/npm/cli/commit/7086a1809bbfda9be81344b3949c7d3ac687ffc4) + `libcipm@4.0.3` ([@isaacs](https://github.com/isaacs)) +* [`8845141f9`](https://github.com/npm/cli/commit/8845141f9d7827dae572c8cf26f2c775db905bd3) + `read-package-json@2.1.0` ([@isaacs](https://github.com/isaacs)) +* [`51c028215`](https://github.com/npm/cli/commit/51c02821575d80035ebe853492d110db11a7d1b9) + `bin-links@1.1.3` ([@isaacs](https://github.com/isaacs)) +* [`534a5548c`](https://github.com/npm/cli/commit/534a5548c9ebd59f0dd90e9ccca148ed8946efa6) + `read-cmd-shim@1.0.3` ([@isaacs](https://github.com/isaacs)) +* [`3038f2fd5`](https://github.com/npm/cli/commit/3038f2fd5b1d7dd886ee72798241d8943690f508) + `gentle-fs@2.2.1` ([@isaacs](https://github.com/isaacs)) +* [`a609a1648`](https://github.com/npm/cli/commit/a609a16489f76791697d270b499fd4949ab1f8c3) + `graceful-fs@4.2.2` ([@isaacs](https://github.com/isaacs)) +* [`f0346f754`](https://github.com/npm/cli/commit/f0346f75490619a81b310bfc18646ae5ae2e0ea4) + `cacache@12.0.3` ([@isaacs](https://github.com/isaacs)) +* [`ca9c615c8`](https://github.com/npm/cli/commit/ca9c615c8cff5c7db125735eb09f84d912d18694) + `npm-pick-manifest@3.0.0` ([@isaacs](https://github.com/isaacs)) +* [`b417affbf`](https://github.com/npm/cli/commit/b417affbf7133dc7687fd809e4956a43eae3438a) + `pacote@9.5.8` ([@isaacs](https://github.com/isaacs)) + +### TESTS + +* [`b6df0913c`](https://github.com/npm/cli/commit/b6df0913ca73246f1fa6cfa0e81e34ba5f2b6204) + [#228](https://github.com/npm/cli/pull/228) Proper handing of + /usr/bin/node lifecycle-path test + ([@olivr70](https://github.com/olivr70)) +* [`aaf98e88c`](https://github.com/npm/cli/commit/aaf98e88c78fd6c850d0a3d3ee2f61c02f63bc8c) + `npm-registry-mock@1.3.0` ([@isaacs](https://github.com/isaacs)) + ## v6.10.3 (2019-08-06): ### BUGFIXES diff --git a/deps/npm/changelogs/CHANGELOG-3.md b/deps/npm/changelogs/CHANGELOG-3.md index a652eb378ebcb9..c13159a5c9dcb2 100644 --- a/deps/npm/changelogs/CHANGELOG-3.md +++ b/deps/npm/changelogs/CHANGELOG-3.md @@ -2864,7 +2864,7 @@ Historically, if you used a pre-release version of Node.js, you would get dozens and dozens of warnings when EVERY engine check failed across all of your modules, because `>= 0.10.0` doesn't match prereleases. -You might find this stream of redundent warnings undesirable. I do. +You might find this stream of redundant warnings undesirable. I do. We've moved this into a SINGLE warning you'll get about using a pre-release version of Node.js and now suppress those other warnings. @@ -3524,7 +3524,7 @@ it would just refuse to install anything. (We fixed that in [#10338](https://github.com/npm/npm/pull/10338) Updating a module could result in the module stealing some of its dependencies from the top level, potentially breaking other modules or - resulting in many redundent installations. This bug was first introduced + resulting in many redundant installations. This bug was first introduced by [`971fd47a`](https://github.com/npm/npm/commit/971fd47a). ([@iarna](https://github.com/iarna)) * [`5653366`](https://github.com/npm/npm/commit/5653366) diff --git a/deps/npm/doc/misc/semver.md b/deps/npm/doc/misc/semver.md index 1a93f022b332bd..92c6381b7fe850 100644 --- a/deps/npm/doc/misc/semver.md +++ b/deps/npm/doc/misc/semver.md @@ -398,14 +398,15 @@ range, use the `satisfies(version, range)` function. * `coerce(version)`: Coerces a string to semver if possible -This aims to provide a very forgiving translation of a non-semver -string to semver. It looks for the first digit in a string, and -consumes all remaining characters which satisfy at least a partial semver -(e.g., `1`, `1.2`, `1.2.3`) up to the max permitted length (256 characters). -Longer versions are simply truncated (`4.6.3.9.2-alpha2` becomes `4.6.3`). -All surrounding text is simply ignored (`v3.4 replaces v3.3.1` becomes `3.4.0`). -Only text which lacks digits will fail coercion (`version one` is not valid). -The maximum length for any semver component considered for coercion is 16 characters; -longer components will be ignored (`10000000000000000.4.7.4` becomes `4.7.4`). -The maximum value for any semver component is `Integer.MAX_SAFE_INTEGER || (2**53 - 1)`; -higher value components are invalid (`9999999999999999.4.7.4` is likely invalid). +This aims to provide a very forgiving translation of a non-semver string to +semver. It looks for the first digit in a string, and consumes all +remaining characters which satisfy at least a partial semver (e.g., `1`, +`1.2`, `1.2.3`) up to the max permitted length (256 characters). Longer +versions are simply truncated (`4.6.3.9.2-alpha2` becomes `4.6.3`). All +surrounding text is simply ignored (`v3.4 replaces v3.3.1` becomes +`3.4.0`). Only text which lacks digits will fail coercion (`version one` +is not valid). The maximum length for any semver component considered for +coercion is 16 characters; longer components will be ignored +(`10000000000000000.4.7.4` becomes `4.7.4`). The maximum value for any +semver component is `Number.MAX_SAFE_INTEGER || (2**53 - 1)`; higher value +components are invalid (`9999999999999999.4.7.4` is likely invalid). diff --git a/deps/npm/html/doc/README.html b/deps/npm/html/doc/README.html index 62c152345528eb..33023d353b9c46 100644 --- a/deps/npm/html/doc/README.html +++ b/deps/npm/html/doc/README.html @@ -118,5 +118,5 @@

SEE ALSO

       - + diff --git a/deps/npm/html/doc/cli/npm-access.html b/deps/npm/html/doc/cli/npm-access.html index 764e2ee2228e07..eba589bc10d298 100644 --- a/deps/npm/html/doc/cli/npm-access.html +++ b/deps/npm/html/doc/cli/npm-access.html @@ -93,5 +93,5 @@

SEE ALSO

       - + diff --git a/deps/npm/html/doc/cli/npm-adduser.html b/deps/npm/html/doc/cli/npm-adduser.html index c11579de2d0b69..f70bd0bc64e707 100644 --- a/deps/npm/html/doc/cli/npm-adduser.html +++ b/deps/npm/html/doc/cli/npm-adduser.html @@ -78,5 +78,5 @@

SEE ALSO

       - + diff --git a/deps/npm/html/doc/cli/npm-audit.html b/deps/npm/html/doc/cli/npm-audit.html index 855e7997a84dfb..5c3f0a1f263762 100644 --- a/deps/npm/html/doc/cli/npm-audit.html +++ b/deps/npm/html/doc/cli/npm-audit.html @@ -94,4 +94,4 @@

SEE ALSO

       - + diff --git a/deps/npm/html/doc/cli/npm-bin.html b/deps/npm/html/doc/cli/npm-bin.html index b910528b4b20bd..8efe816aee6324 100644 --- a/deps/npm/html/doc/cli/npm-bin.html +++ b/deps/npm/html/doc/cli/npm-bin.html @@ -34,5 +34,5 @@

SEE ALSO

       - + diff --git a/deps/npm/html/doc/cli/npm-bugs.html b/deps/npm/html/doc/cli/npm-bugs.html index 1251539ec0512b..0161cfa3945099 100644 --- a/deps/npm/html/doc/cli/npm-bugs.html +++ b/deps/npm/html/doc/cli/npm-bugs.html @@ -54,5 +54,5 @@

SEE ALSO

       - + diff --git a/deps/npm/html/doc/cli/npm-build.html b/deps/npm/html/doc/cli/npm-build.html index 09c2300d6d73a8..7e02c92ad8c7c9 100644 --- a/deps/npm/html/doc/cli/npm-build.html +++ b/deps/npm/html/doc/cli/npm-build.html @@ -38,5 +38,5 @@

DESCRIPTION

       - + diff --git a/deps/npm/html/doc/cli/npm-bundle.html b/deps/npm/html/doc/cli/npm-bundle.html index 60ecf73fc8ec83..51e3289fe4182b 100644 --- a/deps/npm/html/doc/cli/npm-bundle.html +++ b/deps/npm/html/doc/cli/npm-bundle.html @@ -31,5 +31,5 @@

SEE ALSO

       - + diff --git a/deps/npm/html/doc/cli/npm-cache.html b/deps/npm/html/doc/cli/npm-cache.html index 18ed299afcc2f7..6902876cdd4157 100644 --- a/deps/npm/html/doc/cli/npm-cache.html +++ b/deps/npm/html/doc/cli/npm-cache.html @@ -88,5 +88,5 @@

SEE ALSO

       - + diff --git a/deps/npm/html/doc/cli/npm-ci.html b/deps/npm/html/doc/cli/npm-ci.html index e99806d9453867..f3f5a384fc5db3 100644 --- a/deps/npm/html/doc/cli/npm-ci.html +++ b/deps/npm/html/doc/cli/npm-ci.html @@ -58,4 +58,4 @@

SEE ALSO

       - + diff --git a/deps/npm/html/doc/cli/npm-completion.html b/deps/npm/html/doc/cli/npm-completion.html index c29aef9b9a1b54..13098b2ca86f41 100644 --- a/deps/npm/html/doc/cli/npm-completion.html +++ b/deps/npm/html/doc/cli/npm-completion.html @@ -42,5 +42,5 @@

SEE ALSO

       - + diff --git a/deps/npm/html/doc/cli/npm-config.html b/deps/npm/html/doc/cli/npm-config.html index c06093bedf5623..426361e07b0946 100644 --- a/deps/npm/html/doc/cli/npm-config.html +++ b/deps/npm/html/doc/cli/npm-config.html @@ -62,5 +62,5 @@

SEE ALSO

       - + diff --git a/deps/npm/html/doc/cli/npm-dedupe.html b/deps/npm/html/doc/cli/npm-dedupe.html index 637f1e84d0c68a..495aee69276e1d 100644 --- a/deps/npm/html/doc/cli/npm-dedupe.html +++ b/deps/npm/html/doc/cli/npm-dedupe.html @@ -58,5 +58,5 @@

SEE ALSO

       - + diff --git a/deps/npm/html/doc/cli/npm-deprecate.html b/deps/npm/html/doc/cli/npm-deprecate.html index a2e15a49f38e82..c4eed11fb8434a 100644 --- a/deps/npm/html/doc/cli/npm-deprecate.html +++ b/deps/npm/html/doc/cli/npm-deprecate.html @@ -38,5 +38,5 @@

SEE ALSO

       - + diff --git a/deps/npm/html/doc/cli/npm-dist-tag.html b/deps/npm/html/doc/cli/npm-dist-tag.html index 23db46348b08eb..c3b792aac4432d 100644 --- a/deps/npm/html/doc/cli/npm-dist-tag.html +++ b/deps/npm/html/doc/cli/npm-dist-tag.html @@ -86,5 +86,5 @@

SEE ALSO

       - + diff --git a/deps/npm/html/doc/cli/npm-docs.html b/deps/npm/html/doc/cli/npm-docs.html index fbacd73b99eb31..6acc2a1d354f31 100644 --- a/deps/npm/html/doc/cli/npm-docs.html +++ b/deps/npm/html/doc/cli/npm-docs.html @@ -55,5 +55,5 @@

SEE ALSO

       - + diff --git a/deps/npm/html/doc/cli/npm-doctor.html b/deps/npm/html/doc/cli/npm-doctor.html index 64900639b55224..07d4ada5208791 100644 --- a/deps/npm/html/doc/cli/npm-doctor.html +++ b/deps/npm/html/doc/cli/npm-doctor.html @@ -102,4 +102,4 @@

SEE ALSO

       - + diff --git a/deps/npm/html/doc/cli/npm-edit.html b/deps/npm/html/doc/cli/npm-edit.html index 963f40f06e348d..b6e994d5617002 100644 --- a/deps/npm/html/doc/cli/npm-edit.html +++ b/deps/npm/html/doc/cli/npm-edit.html @@ -50,5 +50,5 @@

SEE ALSO

       - + diff --git a/deps/npm/html/doc/cli/npm-explore.html b/deps/npm/html/doc/cli/npm-explore.html index 96c3e80a6c7509..856b9d00c7e9ff 100644 --- a/deps/npm/html/doc/cli/npm-explore.html +++ b/deps/npm/html/doc/cli/npm-explore.html @@ -47,5 +47,5 @@

SEE ALSO

       - + diff --git a/deps/npm/html/doc/cli/npm-help-search.html b/deps/npm/html/doc/cli/npm-help-search.html index a3dc98ade761ce..c624c6c38c6930 100644 --- a/deps/npm/html/doc/cli/npm-help-search.html +++ b/deps/npm/html/doc/cli/npm-help-search.html @@ -44,5 +44,5 @@

SEE ALSO

       - + diff --git a/deps/npm/html/doc/cli/npm-help.html b/deps/npm/html/doc/cli/npm-help.html index b15429ac730d91..fa3fd9c376fe7b 100644 --- a/deps/npm/html/doc/cli/npm-help.html +++ b/deps/npm/html/doc/cli/npm-help.html @@ -49,5 +49,5 @@

SEE ALSO

       - + diff --git a/deps/npm/html/doc/cli/npm-hook.html b/deps/npm/html/doc/cli/npm-hook.html index 6167f657ba0db8..b77be2c619c142 100644 --- a/deps/npm/html/doc/cli/npm-hook.html +++ b/deps/npm/html/doc/cli/npm-hook.html @@ -52,4 +52,4 @@

SEE ALSO

       - + diff --git a/deps/npm/html/doc/cli/npm-init.html b/deps/npm/html/doc/cli/npm-init.html index d4ed30bbe1b906..82f998539e45fb 100644 --- a/deps/npm/html/doc/cli/npm-init.html +++ b/deps/npm/html/doc/cli/npm-init.html @@ -61,5 +61,5 @@

SEE ALSO

       - + diff --git a/deps/npm/html/doc/cli/npm-install-ci-test.html b/deps/npm/html/doc/cli/npm-install-ci-test.html index 46f84153e157ca..69522cd04cc629 100644 --- a/deps/npm/html/doc/cli/npm-install-ci-test.html +++ b/deps/npm/html/doc/cli/npm-install-ci-test.html @@ -32,4 +32,4 @@

SEE ALSO

       - + diff --git a/deps/npm/html/doc/cli/npm-install-test.html b/deps/npm/html/doc/cli/npm-install-test.html index a8fcadf606bcb3..6cda9617c6740a 100644 --- a/deps/npm/html/doc/cli/npm-install-test.html +++ b/deps/npm/html/doc/cli/npm-install-test.html @@ -41,5 +41,5 @@

SEE ALSO

       - + diff --git a/deps/npm/html/doc/cli/npm-install.html b/deps/npm/html/doc/cli/npm-install.html index 74f09e32b56fa4..14d57977fcbe02 100644 --- a/deps/npm/html/doc/cli/npm-install.html +++ b/deps/npm/html/doc/cli/npm-install.html @@ -370,5 +370,5 @@

SEE ALSO

       - + diff --git a/deps/npm/html/doc/cli/npm-link.html b/deps/npm/html/doc/cli/npm-link.html index 2a7620062ba64f..97586769c0c822 100644 --- a/deps/npm/html/doc/cli/npm-link.html +++ b/deps/npm/html/doc/cli/npm-link.html @@ -71,5 +71,5 @@

SYNOPSIS

       - + diff --git a/deps/npm/html/doc/cli/npm-logout.html b/deps/npm/html/doc/cli/npm-logout.html index 0fe11efc977a2c..5901ec16c847b4 100644 --- a/deps/npm/html/doc/cli/npm-logout.html +++ b/deps/npm/html/doc/cli/npm-logout.html @@ -49,5 +49,5 @@

scope

       - + diff --git a/deps/npm/html/doc/cli/npm-ls.html b/deps/npm/html/doc/cli/npm-ls.html index 9fc05ed2066a54..3d13d66bcfceb7 100644 --- a/deps/npm/html/doc/cli/npm-ls.html +++ b/deps/npm/html/doc/cli/npm-ls.html @@ -20,7 +20,7 @@

SYNOPSIS

limit the results to only the paths to the packages named. Note that nested packages will also show the paths to the specified packages. For example, running npm ls promzard in npm's source tree will show:

-
npm@6.10.3 /path/to/npm
+
npm@6.11.3 /path/to/npm
 └─┬ init-package-json@0.0.4
   └── promzard@0.1.5

It will print out extraneous, missing, and invalid packages.

If a project specifies git urls for dependencies these are shown @@ -108,5 +108,5 @@

SEE ALSO

       - + diff --git a/deps/npm/html/doc/cli/npm-org.html b/deps/npm/html/doc/cli/npm-org.html index 96e74d90baf69d..19bbed145bc826 100644 --- a/deps/npm/html/doc/cli/npm-org.html +++ b/deps/npm/html/doc/cli/npm-org.html @@ -40,4 +40,4 @@

SEE ALSO

       - + diff --git a/deps/npm/html/doc/cli/npm-outdated.html b/deps/npm/html/doc/cli/npm-outdated.html index 70357c7cc2322f..bde4b2f02a7e8f 100644 --- a/deps/npm/html/doc/cli/npm-outdated.html +++ b/deps/npm/html/doc/cli/npm-outdated.html @@ -116,5 +116,5 @@

SEE ALSO

       - + diff --git a/deps/npm/html/doc/cli/npm-owner.html b/deps/npm/html/doc/cli/npm-owner.html index 7208e4144972e7..e76c24fe1734f6 100644 --- a/deps/npm/html/doc/cli/npm-owner.html +++ b/deps/npm/html/doc/cli/npm-owner.html @@ -53,5 +53,5 @@

SEE ALSO

       - + diff --git a/deps/npm/html/doc/cli/npm-pack.html b/deps/npm/html/doc/cli/npm-pack.html index 815f498fcebcf0..65609119eda86c 100644 --- a/deps/npm/html/doc/cli/npm-pack.html +++ b/deps/npm/html/doc/cli/npm-pack.html @@ -42,5 +42,5 @@

SEE ALSO

       - + diff --git a/deps/npm/html/doc/cli/npm-ping.html b/deps/npm/html/doc/cli/npm-ping.html index 3cc774414a553c..ce95eddb1d4132 100644 --- a/deps/npm/html/doc/cli/npm-ping.html +++ b/deps/npm/html/doc/cli/npm-ping.html @@ -33,5 +33,5 @@

SYNOPSIS

       - + diff --git a/deps/npm/html/doc/cli/npm-prefix.html b/deps/npm/html/doc/cli/npm-prefix.html index 9a862fa3e9d7b3..600db447e43959 100644 --- a/deps/npm/html/doc/cli/npm-prefix.html +++ b/deps/npm/html/doc/cli/npm-prefix.html @@ -38,5 +38,5 @@

SEE ALSO

       - + diff --git a/deps/npm/html/doc/cli/npm-profile.html b/deps/npm/html/doc/cli/npm-profile.html index c41c0f57480570..bde85bab221e99 100644 --- a/deps/npm/html/doc/cli/npm-profile.html +++ b/deps/npm/html/doc/cli/npm-profile.html @@ -88,4 +88,4 @@

SEE ALSO

       - + diff --git a/deps/npm/html/doc/cli/npm-prune.html b/deps/npm/html/doc/cli/npm-prune.html index c61d78d92f3c79..0bc5234db9bb63 100644 --- a/deps/npm/html/doc/cli/npm-prune.html +++ b/deps/npm/html/doc/cli/npm-prune.html @@ -47,5 +47,5 @@

SEE ALSO

       - + diff --git a/deps/npm/html/doc/cli/npm-publish.html b/deps/npm/html/doc/cli/npm-publish.html index 2405bc993bed88..2c37473e041187 100644 --- a/deps/npm/html/doc/cli/npm-publish.html +++ b/deps/npm/html/doc/cli/npm-publish.html @@ -87,5 +87,5 @@

SEE ALSO

       - + diff --git a/deps/npm/html/doc/cli/npm-rebuild.html b/deps/npm/html/doc/cli/npm-rebuild.html index b3f5f4471722da..d899c26ea8ced1 100644 --- a/deps/npm/html/doc/cli/npm-rebuild.html +++ b/deps/npm/html/doc/cli/npm-rebuild.html @@ -34,5 +34,5 @@

SEE ALSO

       - + diff --git a/deps/npm/html/doc/cli/npm-repo.html b/deps/npm/html/doc/cli/npm-repo.html index b82090671d8c58..a12b89a43d3a62 100644 --- a/deps/npm/html/doc/cli/npm-repo.html +++ b/deps/npm/html/doc/cli/npm-repo.html @@ -40,5 +40,5 @@

SEE ALSO

       - + diff --git a/deps/npm/html/doc/cli/npm-restart.html b/deps/npm/html/doc/cli/npm-restart.html index 1e4e8fd2543db2..18f101c4852360 100644 --- a/deps/npm/html/doc/cli/npm-restart.html +++ b/deps/npm/html/doc/cli/npm-restart.html @@ -52,5 +52,5 @@

SEE ALSO

       - + diff --git a/deps/npm/html/doc/cli/npm-root.html b/deps/npm/html/doc/cli/npm-root.html index 640a7586df3a9f..7b9cf2318c9af7 100644 --- a/deps/npm/html/doc/cli/npm-root.html +++ b/deps/npm/html/doc/cli/npm-root.html @@ -34,5 +34,5 @@

SEE ALSO

       - + diff --git a/deps/npm/html/doc/cli/npm-run-script.html b/deps/npm/html/doc/cli/npm-run-script.html index b66e10f09a0930..91729ef4d64893 100644 --- a/deps/npm/html/doc/cli/npm-run-script.html +++ b/deps/npm/html/doc/cli/npm-run-script.html @@ -79,5 +79,5 @@

SEE ALSO

       - + diff --git a/deps/npm/html/doc/cli/npm-search.html b/deps/npm/html/doc/cli/npm-search.html index 2ca3df6688f425..87764078c8f566 100644 --- a/deps/npm/html/doc/cli/npm-search.html +++ b/deps/npm/html/doc/cli/npm-search.html @@ -108,5 +108,5 @@

SEE ALSO

       - + diff --git a/deps/npm/html/doc/cli/npm-shrinkwrap.html b/deps/npm/html/doc/cli/npm-shrinkwrap.html index f0e5e51c4c2ac9..6e54a40e45ac11 100644 --- a/deps/npm/html/doc/cli/npm-shrinkwrap.html +++ b/deps/npm/html/doc/cli/npm-shrinkwrap.html @@ -40,5 +40,5 @@

SEE ALSO

       - + diff --git a/deps/npm/html/doc/cli/npm-star.html b/deps/npm/html/doc/cli/npm-star.html index 1f62c1bdbb2ed1..7cadb46a363f48 100644 --- a/deps/npm/html/doc/cli/npm-star.html +++ b/deps/npm/html/doc/cli/npm-star.html @@ -35,5 +35,5 @@

SEE ALSO

       - + diff --git a/deps/npm/html/doc/cli/npm-stars.html b/deps/npm/html/doc/cli/npm-stars.html index 143af06a0ab49b..e93c1f2db16296 100644 --- a/deps/npm/html/doc/cli/npm-stars.html +++ b/deps/npm/html/doc/cli/npm-stars.html @@ -35,5 +35,5 @@

SEE ALSO

       - + diff --git a/deps/npm/html/doc/cli/npm-start.html b/deps/npm/html/doc/cli/npm-start.html index 515fde51f9dd02..09191c8a55bd0b 100644 --- a/deps/npm/html/doc/cli/npm-start.html +++ b/deps/npm/html/doc/cli/npm-start.html @@ -38,5 +38,5 @@

SEE ALSO

       - + diff --git a/deps/npm/html/doc/cli/npm-stop.html b/deps/npm/html/doc/cli/npm-stop.html index b770c9cdb1b43f..2eb308647fc7b8 100644 --- a/deps/npm/html/doc/cli/npm-stop.html +++ b/deps/npm/html/doc/cli/npm-stop.html @@ -33,5 +33,5 @@

SEE ALSO

       - + diff --git a/deps/npm/html/doc/cli/npm-team.html b/deps/npm/html/doc/cli/npm-team.html index 4119921f3f0921..c67c7ae91096c9 100644 --- a/deps/npm/html/doc/cli/npm-team.html +++ b/deps/npm/html/doc/cli/npm-team.html @@ -69,5 +69,5 @@

SEE ALSO

       - + diff --git a/deps/npm/html/doc/cli/npm-test.html b/deps/npm/html/doc/cli/npm-test.html index d1eca812ef5157..a8193402aed848 100644 --- a/deps/npm/html/doc/cli/npm-test.html +++ b/deps/npm/html/doc/cli/npm-test.html @@ -35,5 +35,5 @@

SEE ALSO

       - + diff --git a/deps/npm/html/doc/cli/npm-token.html b/deps/npm/html/doc/cli/npm-token.html index 2ee2bd1bb8ae25..14dbfe3b932949 100644 --- a/deps/npm/html/doc/cli/npm-token.html +++ b/deps/npm/html/doc/cli/npm-token.html @@ -81,4 +81,4 @@

SYNOPSIS

       - + diff --git a/deps/npm/html/doc/cli/npm-uninstall.html b/deps/npm/html/doc/cli/npm-uninstall.html index d3c62b6cce32a9..b80d63c56c7391 100644 --- a/deps/npm/html/doc/cli/npm-uninstall.html +++ b/deps/npm/html/doc/cli/npm-uninstall.html @@ -60,5 +60,5 @@

SYNOPSIS

       - + diff --git a/deps/npm/html/doc/cli/npm-unpublish.html b/deps/npm/html/doc/cli/npm-unpublish.html index 9e72de8f05029f..11a35495131de2 100644 --- a/deps/npm/html/doc/cli/npm-unpublish.html +++ b/deps/npm/html/doc/cli/npm-unpublish.html @@ -52,5 +52,5 @@

SEE ALSO

       - + diff --git a/deps/npm/html/doc/cli/npm-update.html b/deps/npm/html/doc/cli/npm-update.html index d2636f836c756d..fefcb61397da64 100644 --- a/deps/npm/html/doc/cli/npm-update.html +++ b/deps/npm/html/doc/cli/npm-update.html @@ -100,5 +100,5 @@

SEE ALSO

       - + diff --git a/deps/npm/html/doc/cli/npm-version.html b/deps/npm/html/doc/cli/npm-version.html index 26e1353fc696bd..bb7b12d988864c 100644 --- a/deps/npm/html/doc/cli/npm-version.html +++ b/deps/npm/html/doc/cli/npm-version.html @@ -116,5 +116,5 @@

SEE ALSO

       - + diff --git a/deps/npm/html/doc/cli/npm-view.html b/deps/npm/html/doc/cli/npm-view.html index 9599e54867c0d8..092f9c446d1bcd 100644 --- a/deps/npm/html/doc/cli/npm-view.html +++ b/deps/npm/html/doc/cli/npm-view.html @@ -75,5 +75,5 @@

SEE ALSO

       - + diff --git a/deps/npm/html/doc/cli/npm-whoami.html b/deps/npm/html/doc/cli/npm-whoami.html index 759e22bc534d8c..387d2741103064 100644 --- a/deps/npm/html/doc/cli/npm-whoami.html +++ b/deps/npm/html/doc/cli/npm-whoami.html @@ -32,5 +32,5 @@

SEE ALSO

       - + diff --git a/deps/npm/html/doc/cli/npm.html b/deps/npm/html/doc/cli/npm.html index 988c1539cdc041..22eef43d401350 100644 --- a/deps/npm/html/doc/cli/npm.html +++ b/deps/npm/html/doc/cli/npm.html @@ -12,7 +12,7 @@

npm

javascript package manager

SYNOPSIS

npm <command> [args]

VERSION

-

6.10.3

+

6.11.3

DESCRIPTION

npm is the package manager for the Node JavaScript platform. It puts modules in place so that node can find them, and manages dependency @@ -130,7 +130,7 @@

AUTHOR

Isaac Z. Schlueter :: isaacs :: @izs :: -i@izs.me

+i@izs.me

SEE ALSO

  • npm-help(1)
  • @@ -154,5 +154,5 @@

    SEE ALSO

           - + diff --git a/deps/npm/html/doc/files/npm-folders.html b/deps/npm/html/doc/files/npm-folders.html index 76fa1bdf5067f1..94463e12dc8e31 100644 --- a/deps/npm/html/doc/files/npm-folders.html +++ b/deps/npm/html/doc/files/npm-folders.html @@ -179,5 +179,5 @@

    SEE ALSO

           - + diff --git a/deps/npm/html/doc/files/npm-global.html b/deps/npm/html/doc/files/npm-global.html index 76fa1bdf5067f1..94463e12dc8e31 100644 --- a/deps/npm/html/doc/files/npm-global.html +++ b/deps/npm/html/doc/files/npm-global.html @@ -179,5 +179,5 @@

    SEE ALSO

           - + diff --git a/deps/npm/html/doc/files/npm-json.html b/deps/npm/html/doc/files/npm-json.html index 051e9e46a7278f..a0bd6e2cca3506 100644 --- a/deps/npm/html/doc/files/npm-json.html +++ b/deps/npm/html/doc/files/npm-json.html @@ -581,5 +581,5 @@

    SEE ALSO

           - + diff --git a/deps/npm/html/doc/files/npm-package-locks.html b/deps/npm/html/doc/files/npm-package-locks.html index 48bbf8f65aebb6..30021aca94dc3b 100644 --- a/deps/npm/html/doc/files/npm-package-locks.html +++ b/deps/npm/html/doc/files/npm-package-locks.html @@ -154,4 +154,4 @@

    SEE ALSO

           - + diff --git a/deps/npm/html/doc/files/npm-shrinkwrap.json.html b/deps/npm/html/doc/files/npm-shrinkwrap.json.html index 42fe1a21225195..e263e0989f4f28 100644 --- a/deps/npm/html/doc/files/npm-shrinkwrap.json.html +++ b/deps/npm/html/doc/files/npm-shrinkwrap.json.html @@ -42,4 +42,4 @@

    SEE ALSO

           - + diff --git a/deps/npm/html/doc/files/npmrc.html b/deps/npm/html/doc/files/npmrc.html index d2f6c9715a9680..bd5dd2a2b66af3 100644 --- a/deps/npm/html/doc/files/npmrc.html +++ b/deps/npm/html/doc/files/npmrc.html @@ -82,5 +82,5 @@

    SEE ALSO

           - + diff --git a/deps/npm/html/doc/files/package-lock.json.html b/deps/npm/html/doc/files/package-lock.json.html index ed836e8d426c7e..125db738c58142 100644 --- a/deps/npm/html/doc/files/package-lock.json.html +++ b/deps/npm/html/doc/files/package-lock.json.html @@ -130,4 +130,4 @@

    SEE ALSO

           - + diff --git a/deps/npm/html/doc/files/package.json.html b/deps/npm/html/doc/files/package.json.html index 051e9e46a7278f..a0bd6e2cca3506 100644 --- a/deps/npm/html/doc/files/package.json.html +++ b/deps/npm/html/doc/files/package.json.html @@ -581,5 +581,5 @@

    SEE ALSO

           - + diff --git a/deps/npm/html/doc/index.html b/deps/npm/html/doc/index.html index 5bb29de1bf4294..7a820ffaffbec9 100644 --- a/deps/npm/html/doc/index.html +++ b/deps/npm/html/doc/index.html @@ -182,5 +182,5 @@

    semver(7)

           - + diff --git a/deps/npm/html/doc/misc/npm-coding-style.html b/deps/npm/html/doc/misc/npm-coding-style.html index 15f12497bbe4b4..167d1cfcfa167c 100644 --- a/deps/npm/html/doc/misc/npm-coding-style.html +++ b/deps/npm/html/doc/misc/npm-coding-style.html @@ -145,5 +145,5 @@

    SEE ALSO

           - + diff --git a/deps/npm/html/doc/misc/npm-config.html b/deps/npm/html/doc/misc/npm-config.html index c5b9f7f1c3791f..9ffa467df06072 100644 --- a/deps/npm/html/doc/misc/npm-config.html +++ b/deps/npm/html/doc/misc/npm-config.html @@ -1078,5 +1078,5 @@

    SEE ALSO

           - + diff --git a/deps/npm/html/doc/misc/npm-developers.html b/deps/npm/html/doc/misc/npm-developers.html index 072da03410a138..06868e70528313 100644 --- a/deps/npm/html/doc/misc/npm-developers.html +++ b/deps/npm/html/doc/misc/npm-developers.html @@ -198,5 +198,5 @@

    SEE ALSO

           - + diff --git a/deps/npm/html/doc/misc/npm-disputes.html b/deps/npm/html/doc/misc/npm-disputes.html index ae2a6d8f0a8d97..5aaf5d78d212f8 100644 --- a/deps/npm/html/doc/misc/npm-disputes.html +++ b/deps/npm/html/doc/misc/npm-disputes.html @@ -20,7 +20,7 @@

    npm-disputes

    Handling Module

    TL;DR

    1. Get the author email with npm owner ls <pkgname>
    2. -
    3. Email the author, CC support@npmjs.com
    4. +
    5. Email the author, CC support@npmjs.com
    6. After a few weeks, if there's no resolution, we'll sort it out.

    Don't squat on package names. Publish code or move out of the way.

    @@ -58,13 +58,13 @@

    DESCRIPTION

  • Alice emails Yusuf, explaining the situation as respectfully as possible, and what she would like to do with the module name. She adds the npm support -staff support@npmjs.com to the CC list of the email. Mention in the email +staff support@npmjs.com to the CC list of the email. Mention in the email that Yusuf can run npm owner add alice foo to add Alice as an owner of the foo package.

  • After a reasonable amount of time, if Yusuf has not responded, or if Yusuf and Alice can't come to any sort of resolution, email support -support@npmjs.com and we'll sort it out. ("Reasonable" is usually at least +support@npmjs.com and we'll sort it out. ("Reasonable" is usually at least 4 weeks.)

  • @@ -101,12 +101,12 @@

    EXCEPTIONS

    Code of Conduct such as hateful language, pornographic content, or harassment. -

    If you see bad behavior like this, please report it to abuse@npmjs.com right +

    If you see bad behavior like this, please report it to abuse@npmjs.com right away. You are never expected to resolve abusive behavior on your own. We are here to help.

    TRADEMARKS

    If you think another npm publisher is infringing your trademark, such as by -using a confusingly similar package name, email abuse@npmjs.com with a link to +using a confusingly similar package name, email abuse@npmjs.com with a link to the package or user account on https://www.npmjs.com/. Attach a copy of your trademark registration certificate.

    If we see that the package's publisher is intentionally misleading others by @@ -139,5 +139,5 @@

    SEE ALSO

           - + diff --git a/deps/npm/html/doc/misc/npm-index.html b/deps/npm/html/doc/misc/npm-index.html index 4b8e208286d7b9..71e36ea9bf92f2 100644 --- a/deps/npm/html/doc/misc/npm-index.html +++ b/deps/npm/html/doc/misc/npm-index.html @@ -182,5 +182,5 @@

    semver(7)

           - + diff --git a/deps/npm/html/doc/misc/npm-orgs.html b/deps/npm/html/doc/misc/npm-orgs.html index aa6e9ae8ac1c00..9be5e012fbc901 100644 --- a/deps/npm/html/doc/misc/npm-orgs.html +++ b/deps/npm/html/doc/misc/npm-orgs.html @@ -77,5 +77,5 @@

    Team Admins create teams

           - + diff --git a/deps/npm/html/doc/misc/npm-registry.html b/deps/npm/html/doc/misc/npm-registry.html index 143e02baa899c6..38533b6b0254b6 100644 --- a/deps/npm/html/doc/misc/npm-registry.html +++ b/deps/npm/html/doc/misc/npm-registry.html @@ -96,5 +96,5 @@

    SEE ALSO

           - + diff --git a/deps/npm/html/doc/misc/npm-scope.html b/deps/npm/html/doc/misc/npm-scope.html index f1bfa455f881f6..56a46857b09386 100644 --- a/deps/npm/html/doc/misc/npm-scope.html +++ b/deps/npm/html/doc/misc/npm-scope.html @@ -93,5 +93,5 @@

    SEE ALSO

           - + diff --git a/deps/npm/html/doc/misc/npm-scripts.html b/deps/npm/html/doc/misc/npm-scripts.html index 7c5d621c1e6cd0..837a7550cc14f9 100644 --- a/deps/npm/html/doc/misc/npm-scripts.html +++ b/deps/npm/html/doc/misc/npm-scripts.html @@ -234,5 +234,5 @@

    SEE ALSO

           - + diff --git a/deps/npm/html/doc/misc/removing-npm.html b/deps/npm/html/doc/misc/removing-npm.html index 28b384181e7896..15122763670260 100644 --- a/deps/npm/html/doc/misc/removing-npm.html +++ b/deps/npm/html/doc/misc/removing-npm.html @@ -52,5 +52,5 @@

    SEE ALSO

           - + diff --git a/deps/npm/html/doc/misc/semver.html b/deps/npm/html/doc/misc/semver.html index 4bf18c5d808829..e0ec19652e88ef 100644 --- a/deps/npm/html/doc/misc/semver.html +++ b/deps/npm/html/doc/misc/semver.html @@ -349,17 +349,18 @@

    Coercion

    • coerce(version): Coerces a string to semver if possible
    -

    This aims to provide a very forgiving translation of a non-semver -string to semver. It looks for the first digit in a string, and -consumes all remaining characters which satisfy at least a partial semver -(e.g., 1, 1.2, 1.2.3) up to the max permitted length (256 characters). -Longer versions are simply truncated (4.6.3.9.2-alpha2 becomes 4.6.3). -All surrounding text is simply ignored (v3.4 replaces v3.3.1 becomes 3.4.0). -Only text which lacks digits will fail coercion (version one is not valid). -The maximum length for any semver component considered for coercion is 16 characters; -longer components will be ignored (10000000000000000.4.7.4 becomes 4.7.4). -The maximum value for any semver component is Integer.MAX_SAFE_INTEGER || (2**53 - 1); -higher value components are invalid (9999999999999999.4.7.4 is likely invalid).

    +

    This aims to provide a very forgiving translation of a non-semver string to +semver. It looks for the first digit in a string, and consumes all +remaining characters which satisfy at least a partial semver (e.g., 1, +1.2, 1.2.3) up to the max permitted length (256 characters). Longer +versions are simply truncated (4.6.3.9.2-alpha2 becomes 4.6.3). All +surrounding text is simply ignored (v3.4 replaces v3.3.1 becomes +3.4.0). Only text which lacks digits will fail coercion (version one +is not valid). The maximum length for any semver component considered for +coercion is 16 characters; longer components will be ignored +(10000000000000000.4.7.4 becomes 4.7.4). The maximum value for any +semver component is Number.MAX_SAFE_INTEGER || (2**53 - 1); higher value +components are invalid (9999999999999999.4.7.4 is likely invalid).

    @@ -372,5 +373,5 @@

    Coercion

           - + diff --git a/deps/npm/lib/ci.js b/deps/npm/lib/ci.js index 1fbb28b570f6fa..309ad2f784ec00 100644 --- a/deps/npm/lib/ci.js +++ b/deps/npm/lib/ci.js @@ -1,8 +1,9 @@ 'use strict' +const npm = require('./npm.js') const Installer = require('libcipm') -const npmConfig = require('./config/figgy-config.js') -const npmlog = require('npmlog') +const log = require('npmlog') +const path = require('path') ci.usage = 'npm ci' @@ -10,8 +11,33 @@ ci.completion = (cb) => cb(null, []) module.exports = ci function ci (args, cb) { - return new Installer(npmConfig({ log: npmlog })).run().then(details => { - npmlog.disableProgress() + const opts = { + // Add some non-npm-config opts by hand. + cache: path.join(npm.config.get('cache'), '_cacache'), + // NOTE: npm has some magic logic around color distinct from the config + // value, so we have to override it here + color: !!npm.color, + hashAlgorithm: 'sha1', + includeDeprecated: false, + log, + 'npm-session': npm.session, + 'project-scope': npm.projectScope, + refer: npm.referer, + dmode: npm.modes.exec, + fmode: npm.modes.file, + umask: npm.modes.umask, + npmVersion: npm.version, + tmp: npm.tmp + } + + for (const key in npm.config.list[0]) { + if (key !== 'log') { + opts[key] = npm.config.list[0][key] + } + } + + return new Installer(opts).run().then(details => { + log.disableProgress() console.log(`added ${details.pkgCount} packages in ${ details.runTime / 1000 }s`) diff --git a/deps/npm/lib/config.js b/deps/npm/lib/config.js index 0d4161d3b53e85..5f9819879be23d 100644 --- a/deps/npm/lib/config.js +++ b/deps/npm/lib/config.js @@ -11,7 +11,7 @@ var ini = require('ini') var editor = require('editor') var os = require('os') var path = require('path') -var mkdirp = require('mkdirp') +var mkdirp = require('gentle-fs').mkdir var umask = require('./utils/umask') var usage = require('./utils/usage') var output = require('./utils/output') diff --git a/deps/npm/lib/config/core.js b/deps/npm/lib/config/core.js index b9851f98d0e0c7..36420b3450163d 100644 --- a/deps/npm/lib/config/core.js +++ b/deps/npm/lib/config/core.js @@ -8,7 +8,7 @@ var path = require('path') var nopt = require('nopt') var ini = require('ini') var Umask = configDefs.Umask -var mkdirp = require('mkdirp') +var mkdirp = require('gentle-fs').mkdir var umask = require('../utils/umask') var isWindows = require('../utils/is-windows.js') @@ -31,10 +31,8 @@ enumerable: true }) exports.validate = validate -var myUid = process.env.SUDO_UID !== undefined - ? process.env.SUDO_UID : (process.getuid && process.getuid()) -var myGid = process.env.SUDO_GID !== undefined - ? process.env.SUDO_GID : (process.getgid && process.getgid()) +var myUid = process.getuid && process.getuid() +var myGid = process.getgid && process.getgid() var loading = false var loadCbs = [] @@ -218,7 +216,6 @@ function Conf (base) { Conf.prototype.loadPrefix = require('./load-prefix.js') Conf.prototype.loadCAFile = require('./load-cafile.js') -Conf.prototype.loadUid = require('./load-uid.js') Conf.prototype.setUser = require('./set-user.js') Conf.prototype.getCredentialsByURI = require('./get-credentials-by-uri.js') Conf.prototype.setCredentialsByURI = require('./set-credentials-by-uri.js') @@ -227,11 +224,8 @@ Conf.prototype.clearCredentialsByURI = require('./clear-credentials-by-uri.js') Conf.prototype.loadExtras = function (cb) { this.setUser(function (er) { if (er) return cb(er) - this.loadUid(function (er) { - if (er) return cb(er) - // Without prefix, nothing will ever work - mkdirp(this.prefix, cb) - }.bind(this)) + // Without prefix, nothing will ever work + mkdirp(this.prefix, cb) }.bind(this)) } @@ -287,15 +281,21 @@ Conf.prototype.save = function (where, cb) { done(null) }) } else { - mkdirp(path.dirname(target.path), function (er) { + // we don't have to use inferOwner here, because gentle-fs will + // mkdir with the correctly inferred ownership. Just preserve it. + const dir = path.dirname(target.path) + mkdirp(dir, function (er) { if (er) return then(er) - fs.writeFile(target.path, data, 'utf8', function (er) { + fs.stat(dir, (er, st) => { if (er) return then(er) - if (where === 'user' && myUid && myGid) { - fs.chown(target.path, +myUid, +myGid, then) - } else { - then() - } + fs.writeFile(target.path, data, 'utf8', function (er) { + if (er) return then(er) + if (myUid === 0 && (myUid !== st.uid || myGid !== st.gid)) { + fs.chown(target.path, st.uid, st.gid, then) + } else { + then() + } + }) }) }) } diff --git a/deps/npm/lib/config/figgy-config.js b/deps/npm/lib/config/figgy-config.js index 9e9ca0ba561efb..d704d1502cb447 100644 --- a/deps/npm/lib/config/figgy-config.js +++ b/deps/npm/lib/config/figgy-config.js @@ -9,7 +9,7 @@ const npm = require('../npm.js') const pack = require('../pack.js') const path = require('path') -const npmSession = crypto.randomBytes(8).toString('hex') +const npmSession = npm.session = crypto.randomBytes(8).toString('hex') log.verbose('npm-session', npmSession) const SCOPE_REGISTRY_REGEX = /@.*:registry$/gi diff --git a/deps/npm/lib/config/load-uid.js b/deps/npm/lib/config/load-uid.js deleted file mode 100644 index 859eac7494bc7e..00000000000000 --- a/deps/npm/lib/config/load-uid.js +++ /dev/null @@ -1,15 +0,0 @@ -module.exports = loadUid - -var getUid = require('uid-number') - -// Call in the context of a npmconf object - -function loadUid (cb) { - // if we're not in unsafe-perm mode, then figure out who - // to run stuff as. Do this first, to support `npm update npm -g` - if (!this.get('unsafe-perm')) { - getUid(this.get('user'), this.get('group'), cb) - } else { - process.nextTick(cb) - } -} diff --git a/deps/npm/lib/config/set-user.js b/deps/npm/lib/config/set-user.js index 14cc21d2ebd998..570a1f54e2757f 100644 --- a/deps/npm/lib/config/set-user.js +++ b/deps/npm/lib/config/set-user.js @@ -3,7 +3,7 @@ module.exports = setUser var assert = require('assert') var path = require('path') var fs = require('fs') -var mkdirp = require('mkdirp') +var mkdirp = require('gentle-fs').mkdir function setUser (cb) { var defaultConf = this.root diff --git a/deps/npm/lib/dist-tag.js b/deps/npm/lib/dist-tag.js index 176e61221eef0e..64bb97b61a4c6f 100644 --- a/deps/npm/lib/dist-tag.js +++ b/deps/npm/lib/dist-tag.js @@ -119,7 +119,8 @@ function remove (spec, tag, opts) { delete tags[tag] const url = `/-/package/${spec.escapedName}/dist-tags/${encodeURIComponent(tag)}` const reqOpts = opts.concat({ - method: 'DELETE' + method: 'DELETE', + spec }) return otplease(reqOpts, reqOpts => regFetch(url, reqOpts)).then(() => { output(`-${tag}: ${spec.name}@${version}`) diff --git a/deps/npm/lib/explore.js b/deps/npm/lib/explore.js index 826a527fa7ef35..0c9930f8e4ca75 100644 --- a/deps/npm/lib/explore.js +++ b/deps/npm/lib/explore.js @@ -9,10 +9,11 @@ var npm = require('./npm.js') var spawn = require('./utils/spawn') var path = require('path') var fs = require('graceful-fs') -var isWindowsShell = require('./utils/is-windows-shell.js') +var isWindows = require('./utils/is-windows.js') var escapeExecPath = require('./utils/escape-exec-path.js') var escapeArg = require('./utils/escape-arg.js') var output = require('./utils/output.js') +var log = require('npmlog') function explore (args, cb) { if (args.length < 1 || !args[0]) return cb(explore.usage) @@ -23,7 +24,7 @@ function explore (args, cb) { var shellArgs = [] if (args) { - if (isWindowsShell) { + if (isWindows) { var execCmd = escapeExecPath(args.shift()) var execArgs = [execCmd].concat(args.map(escapeArg)) opts.windowsVerbatimArguments = true @@ -49,6 +50,7 @@ function explore (args, cb) { ) } + log.silly('explore', {sh, shellArgs, opts}) var shell = spawn(sh, shellArgs, opts) shell.on('close', function (er) { // only fail if non-interactive. diff --git a/deps/npm/lib/install.js b/deps/npm/lib/install.js index d2f705e1d1abd9..8cc6d16bdd1698 100644 --- a/deps/npm/lib/install.js +++ b/deps/npm/lib/install.js @@ -104,7 +104,7 @@ var readPackageJson = require('read-package-json') var chain = require('slide').chain var asyncMap = require('slide').asyncMap var archy = require('archy') -var mkdirp = require('mkdirp') +var mkdirp = require('gentle-fs').mkdir var rimraf = require('rimraf') var iferr = require('iferr') var validate = require('aproba') diff --git a/deps/npm/lib/install/action/extract.js b/deps/npm/lib/install/action/extract.js index 32a4f4e004ad7a..585580edd29b78 100644 --- a/deps/npm/lib/install/action/extract.js +++ b/deps/npm/lib/install/action/extract.js @@ -5,7 +5,7 @@ const BB = require('bluebird') const figgyPudding = require('figgy-pudding') const stat = BB.promisify(require('graceful-fs').stat) const gentlyRm = BB.promisify(require('../../utils/gently-rm.js')) -const mkdirp = BB.promisify(require('mkdirp')) +const mkdirp = BB.promisify(require('gentle-fs').mkdir) const moduleName = require('../../utils/module-name.js') const moduleStagingPath = require('../module-staging-path.js') const move = require('../../utils/move.js') diff --git a/deps/npm/lib/install/action/finalize.js b/deps/npm/lib/install/action/finalize.js index e46f1b9d833966..1e53c189d210e6 100644 --- a/deps/npm/lib/install/action/finalize.js +++ b/deps/npm/lib/install/action/finalize.js @@ -3,7 +3,7 @@ const path = require('path') const fs = require('graceful-fs') const Bluebird = require('bluebird') const rimraf = Bluebird.promisify(require('rimraf')) -const mkdirp = Bluebird.promisify(require('mkdirp')) +const mkdirp = Bluebird.promisify(require('gentle-fs').mkdir) const lstat = Bluebird.promisify(fs.lstat) const readdir = Bluebird.promisify(fs.readdir) const symlink = Bluebird.promisify(fs.symlink) diff --git a/deps/npm/lib/install/action/move.js b/deps/npm/lib/install/action/move.js index 00d58a15923176..8a956f59d6d908 100644 --- a/deps/npm/lib/install/action/move.js +++ b/deps/npm/lib/install/action/move.js @@ -4,7 +4,7 @@ var path = require('path') var chain = require('slide').chain var iferr = require('iferr') var rimraf = require('rimraf') -var mkdirp = require('mkdirp') +var mkdirp = require('gentle-fs').mkdir var rmStuff = require('../../unbuild.js').rmStuff var lifecycle = require('../../utils/lifecycle.js') var move = require('../../utils/move.js') diff --git a/deps/npm/lib/install/action/remove.js b/deps/npm/lib/install/action/remove.js index a852d10c5fd84f..f7182d596bed01 100644 --- a/deps/npm/lib/install/action/remove.js +++ b/deps/npm/lib/install/action/remove.js @@ -3,7 +3,7 @@ var path = require('path') var fs = require('graceful-fs') var rimraf = require('rimraf') var asyncMap = require('slide').asyncMap -var mkdirp = require('mkdirp') +var mkdirp = require('gentle-fs').mkdir var npm = require('../../npm.js') var andIgnoreErrors = require('../and-ignore-errors.js') var move = require('../../utils/move.js') diff --git a/deps/npm/lib/install/deps.js b/deps/npm/lib/install/deps.js index 3fe370140adc3d..bfc94ae5048607 100644 --- a/deps/npm/lib/install/deps.js +++ b/deps/npm/lib/install/deps.js @@ -711,6 +711,12 @@ function resolveWithNewModule (pkg, tree, log, next) { }) } +var isOptionalPeerDep = exports.isOptionalPeerDep = function (tree, pkgname) { + if (!tree.package.peerDependenciesMeta) return + if (!tree.package.peerDependenciesMeta[pkgname]) return + return !!tree.package.peerDependenciesMeta[pkgname].optional +} + var validatePeerDeps = exports.validatePeerDeps = function (tree, onInvalid) { if (!tree.package.peerDependencies) return Object.keys(tree.package.peerDependencies).forEach(function (pkgname) { @@ -719,7 +725,7 @@ var validatePeerDeps = exports.validatePeerDeps = function (tree, onInvalid) { var spec = npa.resolve(pkgname, version) } catch (e) {} var match = spec && findRequirement(tree.parent || tree, pkgname, spec) - if (!match) onInvalid(tree, pkgname, version) + if (!match && !isOptionalPeerDep(tree, pkgname)) onInvalid(tree, pkgname, version) }) } diff --git a/deps/npm/lib/install/inflate-shrinkwrap.js b/deps/npm/lib/install/inflate-shrinkwrap.js index 5da9418bbdb7ad..b0b71ef6b1323b 100644 --- a/deps/npm/lib/install/inflate-shrinkwrap.js +++ b/deps/npm/lib/install/inflate-shrinkwrap.js @@ -167,7 +167,7 @@ function makeFakeChild (name, topPath, tree, sw, requested) { } const child = createChild({ package: pkg, - loaded: false, + loaded: true, parent: tree, children: [], fromShrinkwrap: requested, diff --git a/deps/npm/lib/outdated.js b/deps/npm/lib/outdated.js index bb4c346f9a6547..5b84ae35587c80 100644 --- a/deps/npm/lib/outdated.js +++ b/deps/npm/lib/outdated.js @@ -91,7 +91,7 @@ function outdated (args, silent, cb) { var dir = path.resolve(npm.dir, '..') // default depth for `outdated` is 0 (cf. `ls`) - if (opts.depth) opts = opts.concat({depth: 0}) + if (opts.depth === Infinity) opts = opts.concat({depth: 0}) readPackageTree(dir, andComputeMetadata(function (er, tree) { if (!tree) return cb(er) @@ -421,7 +421,7 @@ function shouldUpdate (args, tree, dep, has, req, depth, pkgpath, opts, cb, type var l = pickManifest(d, 'latest') var m = pickManifest(d, req) } catch (er) { - if (er.code === 'ETARGET') { + if (er.code === 'ETARGET' || er.code === 'E403') { return skip(er) } else { return skip() diff --git a/deps/npm/lib/search/all-package-metadata.js b/deps/npm/lib/search/all-package-metadata.js index a006dadaddf3b1..388b4f61f04968 100644 --- a/deps/npm/lib/search/all-package-metadata.js +++ b/deps/npm/lib/search/all-package-metadata.js @@ -9,7 +9,7 @@ const figgyPudding = require('figgy-pudding') const fs = require('graceful-fs') const JSONStream = require('JSONStream') const log = require('npmlog') -const mkdir = BB.promisify(require('mkdirp')) +const mkdir = BB.promisify(require('gentle-fs').mkdir) const ms = require('mississippi') const npmFetch = require('libnpm/fetch') const path = require('path') diff --git a/deps/npm/lib/shrinkwrap.js b/deps/npm/lib/shrinkwrap.js index 35e063d447956c..0a3f53546ca87d 100644 --- a/deps/npm/lib/shrinkwrap.js +++ b/deps/npm/lib/shrinkwrap.js @@ -25,6 +25,13 @@ const writeFileAtomic = require('write-file-atomic') const unixFormatPath = require('./utils/unix-format-path.js') const isRegistry = require('./utils/is-registry.js') +const { chown } = require('fs') +const inferOwner = require('infer-owner') +const selfOwner = { + uid: process.getuid && process.getuid(), + gid: process.getgid && process.getgid() +} + const PKGLOCK = 'package-lock.json' const SHRINKWRAP = 'npm-shrinkwrap.json' const PKGLOCK_VERSION = npm.lockfileVersion @@ -217,13 +224,19 @@ function save (dir, pkginfo, opts, cb) { log.verbose('shrinkwrap', `skipping write for ${path.basename(info.path)} because there were no changes.`) cb(null, pkginfo) } else { - writeFileAtomic(info.path, swdata, (err) => { - if (err) return cb(err) - if (opts.silent) return cb(null, pkginfo) - if (!shrinkwrap && !lockfile) { - log.notice('', `created a lockfile as ${path.basename(info.path)}. You should commit this file.`) - } - cb(null, pkginfo) + inferOwner(info.path).then(owner => { + writeFileAtomic(info.path, swdata, (err) => { + if (err) return cb(err) + if (opts.silent) return cb(null, pkginfo) + if (!shrinkwrap && !lockfile) { + log.notice('', `created a lockfile as ${path.basename(info.path)}. You should commit this file.`) + } + if (selfOwner.uid === 0 && (selfOwner.uid !== owner.uid || selfOwner.gid !== owner.gid)) { + chown(info.path, owner.uid, owner.gid, er => cb(er, pkginfo)) + } else { + cb(null, pkginfo) + } + }) }) } } diff --git a/deps/npm/lib/utils/cache-file.js b/deps/npm/lib/utils/cache-file.js index 77df7d4e09361d..7b3136b2e2db93 100644 --- a/deps/npm/lib/utils/cache-file.js +++ b/deps/npm/lib/utils/cache-file.js @@ -1,3 +1,4 @@ +// XXX use infer-owner or gentle-fs.mkdir here const npm = require('../npm.js') const path = require('path') const chownr = require('chownr') diff --git a/deps/npm/lib/utils/correct-mkdir.js b/deps/npm/lib/utils/correct-mkdir.js index 2558de66f5b6fa..57368e946e37f5 100644 --- a/deps/npm/lib/utils/correct-mkdir.js +++ b/deps/npm/lib/utils/correct-mkdir.js @@ -1,3 +1,4 @@ +// XXX this can probably be replaced with gentle-fs.mkdir everywhere it's used const chownr = require('chownr') const inflight = require('inflight') const log = require('npmlog') diff --git a/deps/npm/lib/utils/error-message.js b/deps/npm/lib/utils/error-message.js index ea8b05938c108e..5ddfb37682a2e6 100644 --- a/deps/npm/lib/utils/error-message.js +++ b/deps/npm/lib/utils/error-message.js @@ -72,6 +72,20 @@ function errorMessage (er) { } break + case 'EUIDLOOKUP': + short.push(['lifecycle', er.message]) + detail.push([ + '', + [ + '', + 'Failed to look up the user/group for running scripts.', + '', + 'Try again with a different --user or --group settings, or', + 'run with --unsafe-perm to execute scripts as root.' + ].join('\n') + ]) + break + case 'ELIFECYCLE': short.push(['', er.message]) detail.push([ @@ -313,6 +327,18 @@ function errorMessage (er) { detail.push(['notarget', msg.join('\n')]) break + case 'E403': + short.push(['403', er.message]) + msg = [ + 'In most cases, you or one of your dependencies are requesting', + 'a package version that is forbidden by your security policy.' + ] + if (er.parent) { + msg.push("\nIt was specified as a dependency of '" + er.parent + "'\n") + } + detail.push(['403', msg.join('\n')]) + break + case 'ENOTSUP': if (er.required) { short.push(['notsup', er.message]) diff --git a/deps/npm/lib/utils/escape-arg.js b/deps/npm/lib/utils/escape-arg.js index d12ee5edf58205..114abaadaa0906 100644 --- a/deps/npm/lib/utils/escape-arg.js +++ b/deps/npm/lib/utils/escape-arg.js @@ -1,6 +1,6 @@ 'use strict' var path = require('path') -var isWindowsShell = require('./is-windows-shell.js') +var isWindows = require('./is-windows.js') /* Escape the name of an executable suitable for passing to the system shell. @@ -15,7 +15,7 @@ any single quotes in the filename. module.exports = escapify function escapify (str) { - if (isWindowsShell) { + if (isWindows) { return '"' + path.normalize(str) + '"' } else { if (/[^-_.~/\w]/.test(str)) { diff --git a/deps/npm/lib/utils/escape-exec-path.js b/deps/npm/lib/utils/escape-exec-path.js index bf94886efa331a..42b64934867dd1 100644 --- a/deps/npm/lib/utils/escape-exec-path.js +++ b/deps/npm/lib/utils/escape-exec-path.js @@ -1,6 +1,6 @@ 'use strict' var path = require('path') -var isWindowsShell = require('./is-windows-shell.js') +var isWindows = require('./is-windows.js') /* Escape the name of an executable suitable for passing to the system shell. @@ -20,7 +20,7 @@ function windowsQuotes (str) { } function escapify (str) { - if (isWindowsShell) { + if (isWindows) { return path.normalize(str).split(/\\/).map(windowsQuotes).join('\\') } else if (/[^-_.~/\w]/.test(str)) { return "'" + str.replace(/'/g, "'\"'\"'") + "'" diff --git a/deps/npm/man/man1/npm-README.1 b/deps/npm/man/man1/npm-README.1 index 24c77377d71fbb..e3b606e205f340 100644 --- a/deps/npm/man/man1/npm-README.1 +++ b/deps/npm/man/man1/npm-README.1 @@ -1,4 +1,4 @@ -.TH "NPM" "1" "August 2019" "" "" +.TH "NPM" "1" "September 2019" "" "" .SH "NAME" \fBnpm\fR \- a JavaScript package manager .P diff --git a/deps/npm/man/man1/npm-access.1 b/deps/npm/man/man1/npm-access.1 index 086af617ba7e3d..7d5a38cba3955a 100644 --- a/deps/npm/man/man1/npm-access.1 +++ b/deps/npm/man/man1/npm-access.1 @@ -1,4 +1,4 @@ -.TH "NPM\-ACCESS" "1" "August 2019" "" "" +.TH "NPM\-ACCESS" "1" "September 2019" "" "" .SH "NAME" \fBnpm-access\fR \- Set access level on published packages .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-adduser.1 b/deps/npm/man/man1/npm-adduser.1 index d2f8a22b3eaab5..53305de7d4b65d 100644 --- a/deps/npm/man/man1/npm-adduser.1 +++ b/deps/npm/man/man1/npm-adduser.1 @@ -1,4 +1,4 @@ -.TH "NPM\-ADDUSER" "1" "August 2019" "" "" +.TH "NPM\-ADDUSER" "1" "September 2019" "" "" .SH "NAME" \fBnpm-adduser\fR \- Add a registry user account .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-audit.1 b/deps/npm/man/man1/npm-audit.1 index 506efa44fba3e3..f76e1327ed7972 100644 --- a/deps/npm/man/man1/npm-audit.1 +++ b/deps/npm/man/man1/npm-audit.1 @@ -1,4 +1,4 @@ -.TH "NPM\-AUDIT" "1" "August 2019" "" "" +.TH "NPM\-AUDIT" "1" "September 2019" "" "" .SH "NAME" \fBnpm-audit\fR \- Run a security audit .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-bin.1 b/deps/npm/man/man1/npm-bin.1 index ebfa2bf1d0f3ae..7551a4c5231a27 100644 --- a/deps/npm/man/man1/npm-bin.1 +++ b/deps/npm/man/man1/npm-bin.1 @@ -1,4 +1,4 @@ -.TH "NPM\-BIN" "1" "August 2019" "" "" +.TH "NPM\-BIN" "1" "September 2019" "" "" .SH "NAME" \fBnpm-bin\fR \- Display npm bin folder .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-bugs.1 b/deps/npm/man/man1/npm-bugs.1 index 7fcdbdc330f130..d3b5d180de5b46 100644 --- a/deps/npm/man/man1/npm-bugs.1 +++ b/deps/npm/man/man1/npm-bugs.1 @@ -1,4 +1,4 @@ -.TH "NPM\-BUGS" "1" "August 2019" "" "" +.TH "NPM\-BUGS" "1" "September 2019" "" "" .SH "NAME" \fBnpm-bugs\fR \- Bugs for a package in a web browser maybe .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-build.1 b/deps/npm/man/man1/npm-build.1 index 0628a87f292bcf..76a6f01d8617b1 100644 --- a/deps/npm/man/man1/npm-build.1 +++ b/deps/npm/man/man1/npm-build.1 @@ -1,4 +1,4 @@ -.TH "NPM\-BUILD" "1" "August 2019" "" "" +.TH "NPM\-BUILD" "1" "September 2019" "" "" .SH "NAME" \fBnpm-build\fR \- Build a package .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-bundle.1 b/deps/npm/man/man1/npm-bundle.1 index 197db97914c3d1..815844ac6f648f 100644 --- a/deps/npm/man/man1/npm-bundle.1 +++ b/deps/npm/man/man1/npm-bundle.1 @@ -1,4 +1,4 @@ -.TH "NPM\-BUNDLE" "1" "August 2019" "" "" +.TH "NPM\-BUNDLE" "1" "September 2019" "" "" .SH "NAME" \fBnpm-bundle\fR \- REMOVED .SH DESCRIPTION diff --git a/deps/npm/man/man1/npm-cache.1 b/deps/npm/man/man1/npm-cache.1 index 214b9642e09a50..828211c2891421 100644 --- a/deps/npm/man/man1/npm-cache.1 +++ b/deps/npm/man/man1/npm-cache.1 @@ -1,4 +1,4 @@ -.TH "NPM\-CACHE" "1" "August 2019" "" "" +.TH "NPM\-CACHE" "1" "September 2019" "" "" .SH "NAME" \fBnpm-cache\fR \- Manipulates packages cache .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-ci.1 b/deps/npm/man/man1/npm-ci.1 index 7aaf83c2ae70e4..5f85fc32f4bf90 100644 --- a/deps/npm/man/man1/npm-ci.1 +++ b/deps/npm/man/man1/npm-ci.1 @@ -1,4 +1,4 @@ -.TH "NPM\-CI" "1" "August 2019" "" "" +.TH "NPM\-CI" "1" "September 2019" "" "" .SH "NAME" \fBnpm-ci\fR \- Install a project with a clean slate .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-completion.1 b/deps/npm/man/man1/npm-completion.1 index 1f84f35d0d5258..45f47bb253c0a2 100644 --- a/deps/npm/man/man1/npm-completion.1 +++ b/deps/npm/man/man1/npm-completion.1 @@ -1,4 +1,4 @@ -.TH "NPM\-COMPLETION" "1" "August 2019" "" "" +.TH "NPM\-COMPLETION" "1" "September 2019" "" "" .SH "NAME" \fBnpm-completion\fR \- Tab Completion for npm .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-config.1 b/deps/npm/man/man1/npm-config.1 index 17ce589a5aa8f9..05a3a97ad307e9 100644 --- a/deps/npm/man/man1/npm-config.1 +++ b/deps/npm/man/man1/npm-config.1 @@ -1,4 +1,4 @@ -.TH "NPM\-CONFIG" "1" "August 2019" "" "" +.TH "NPM\-CONFIG" "1" "September 2019" "" "" .SH "NAME" \fBnpm-config\fR \- Manage the npm configuration files .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-dedupe.1 b/deps/npm/man/man1/npm-dedupe.1 index 837ae674b57f4c..f03630bd42001c 100644 --- a/deps/npm/man/man1/npm-dedupe.1 +++ b/deps/npm/man/man1/npm-dedupe.1 @@ -1,4 +1,4 @@ -.TH "NPM\-DEDUPE" "1" "August 2019" "" "" +.TH "NPM\-DEDUPE" "1" "September 2019" "" "" .SH "NAME" \fBnpm-dedupe\fR \- Reduce duplication .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-deprecate.1 b/deps/npm/man/man1/npm-deprecate.1 index f91bfa352f9270..e5cc62760ebc8b 100644 --- a/deps/npm/man/man1/npm-deprecate.1 +++ b/deps/npm/man/man1/npm-deprecate.1 @@ -1,4 +1,4 @@ -.TH "NPM\-DEPRECATE" "1" "August 2019" "" "" +.TH "NPM\-DEPRECATE" "1" "September 2019" "" "" .SH "NAME" \fBnpm-deprecate\fR \- Deprecate a version of a package .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-dist-tag.1 b/deps/npm/man/man1/npm-dist-tag.1 index dcceff66532b63..d66ebaa831d1e5 100644 --- a/deps/npm/man/man1/npm-dist-tag.1 +++ b/deps/npm/man/man1/npm-dist-tag.1 @@ -1,4 +1,4 @@ -.TH "NPM\-DIST\-TAG" "1" "August 2019" "" "" +.TH "NPM\-DIST\-TAG" "1" "September 2019" "" "" .SH "NAME" \fBnpm-dist-tag\fR \- Modify package distribution tags .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-docs.1 b/deps/npm/man/man1/npm-docs.1 index 8a92f7e53a14ce..c7e958f5e6e043 100644 --- a/deps/npm/man/man1/npm-docs.1 +++ b/deps/npm/man/man1/npm-docs.1 @@ -1,4 +1,4 @@ -.TH "NPM\-DOCS" "1" "August 2019" "" "" +.TH "NPM\-DOCS" "1" "September 2019" "" "" .SH "NAME" \fBnpm-docs\fR \- Docs for a package in a web browser maybe .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-doctor.1 b/deps/npm/man/man1/npm-doctor.1 index f399ff32f3a454..b4c773badb1184 100644 --- a/deps/npm/man/man1/npm-doctor.1 +++ b/deps/npm/man/man1/npm-doctor.1 @@ -1,4 +1,4 @@ -.TH "NPM\-DOCTOR" "1" "August 2019" "" "" +.TH "NPM\-DOCTOR" "1" "September 2019" "" "" .SH "NAME" \fBnpm-doctor\fR \- Check your environments .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-edit.1 b/deps/npm/man/man1/npm-edit.1 index 7255b82f29cee3..5894265c71e13a 100644 --- a/deps/npm/man/man1/npm-edit.1 +++ b/deps/npm/man/man1/npm-edit.1 @@ -1,4 +1,4 @@ -.TH "NPM\-EDIT" "1" "August 2019" "" "" +.TH "NPM\-EDIT" "1" "September 2019" "" "" .SH "NAME" \fBnpm-edit\fR \- Edit an installed package .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-explore.1 b/deps/npm/man/man1/npm-explore.1 index a20c8ba88db5c7..460c809ddc93b5 100644 --- a/deps/npm/man/man1/npm-explore.1 +++ b/deps/npm/man/man1/npm-explore.1 @@ -1,4 +1,4 @@ -.TH "NPM\-EXPLORE" "1" "August 2019" "" "" +.TH "NPM\-EXPLORE" "1" "September 2019" "" "" .SH "NAME" \fBnpm-explore\fR \- Browse an installed package .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-help-search.1 b/deps/npm/man/man1/npm-help-search.1 index 8799d2471facd9..04dd3e30bf9530 100644 --- a/deps/npm/man/man1/npm-help-search.1 +++ b/deps/npm/man/man1/npm-help-search.1 @@ -1,4 +1,4 @@ -.TH "NPM\-HELP\-SEARCH" "1" "August 2019" "" "" +.TH "NPM\-HELP\-SEARCH" "1" "September 2019" "" "" .SH "NAME" \fBnpm-help-search\fR \- Search npm help documentation .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-help.1 b/deps/npm/man/man1/npm-help.1 index 34bf6220720668..d053ae01955b83 100644 --- a/deps/npm/man/man1/npm-help.1 +++ b/deps/npm/man/man1/npm-help.1 @@ -1,4 +1,4 @@ -.TH "NPM\-HELP" "1" "August 2019" "" "" +.TH "NPM\-HELP" "1" "September 2019" "" "" .SH "NAME" \fBnpm-help\fR \- Get help on npm .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-hook.1 b/deps/npm/man/man1/npm-hook.1 index 8bbe5db39500a7..27766b1851d89a 100644 --- a/deps/npm/man/man1/npm-hook.1 +++ b/deps/npm/man/man1/npm-hook.1 @@ -1,4 +1,4 @@ -.TH "NPM\-HOOK" "1" "August 2019" "" "" +.TH "NPM\-HOOK" "1" "September 2019" "" "" .SH "NAME" \fBnpm-hook\fR \- Manage registry hooks .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-init.1 b/deps/npm/man/man1/npm-init.1 index 1dcb2f99bd2e08..f72d45f2d1a761 100644 --- a/deps/npm/man/man1/npm-init.1 +++ b/deps/npm/man/man1/npm-init.1 @@ -1,4 +1,4 @@ -.TH "NPM\-INIT" "1" "August 2019" "" "" +.TH "NPM\-INIT" "1" "September 2019" "" "" .SH "NAME" \fBnpm-init\fR \- create a package\.json file .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-install-ci-test.1 b/deps/npm/man/man1/npm-install-ci-test.1 index 7c1f6bd4f0eda0..7da3d736fdc1e6 100644 --- a/deps/npm/man/man1/npm-install-ci-test.1 +++ b/deps/npm/man/man1/npm-install-ci-test.1 @@ -1,4 +1,4 @@ -.TH "NPM" "" "August 2019" "" "" +.TH "NPM" "" "September 2019" "" "" .SH "NAME" \fBnpm\fR .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-install-test.1 b/deps/npm/man/man1/npm-install-test.1 index bdc45d73e139d0..29ec3ab330d13c 100644 --- a/deps/npm/man/man1/npm-install-test.1 +++ b/deps/npm/man/man1/npm-install-test.1 @@ -1,4 +1,4 @@ -.TH "NPM" "" "August 2019" "" "" +.TH "NPM" "" "September 2019" "" "" .SH "NAME" \fBnpm\fR .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-install.1 b/deps/npm/man/man1/npm-install.1 index 66084041656fe5..4a024784135417 100644 --- a/deps/npm/man/man1/npm-install.1 +++ b/deps/npm/man/man1/npm-install.1 @@ -1,4 +1,4 @@ -.TH "NPM\-INSTALL" "1" "August 2019" "" "" +.TH "NPM\-INSTALL" "1" "September 2019" "" "" .SH "NAME" \fBnpm-install\fR \- Install a package .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-link.1 b/deps/npm/man/man1/npm-link.1 index a819438ff4bc73..7d4146fc4445a2 100644 --- a/deps/npm/man/man1/npm-link.1 +++ b/deps/npm/man/man1/npm-link.1 @@ -1,4 +1,4 @@ -.TH "NPM\-LINK" "1" "August 2019" "" "" +.TH "NPM\-LINK" "1" "September 2019" "" "" .SH "NAME" \fBnpm-link\fR \- Symlink a package folder .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-logout.1 b/deps/npm/man/man1/npm-logout.1 index ed7ce8cba23343..427b837ed4b565 100644 --- a/deps/npm/man/man1/npm-logout.1 +++ b/deps/npm/man/man1/npm-logout.1 @@ -1,4 +1,4 @@ -.TH "NPM\-LOGOUT" "1" "August 2019" "" "" +.TH "NPM\-LOGOUT" "1" "September 2019" "" "" .SH "NAME" \fBnpm-logout\fR \- Log out of the registry .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-ls.1 b/deps/npm/man/man1/npm-ls.1 index 724bf7d5d60a79..d5b40d68a8356c 100644 --- a/deps/npm/man/man1/npm-ls.1 +++ b/deps/npm/man/man1/npm-ls.1 @@ -1,4 +1,4 @@ -.TH "NPM\-LS" "1" "August 2019" "" "" +.TH "NPM\-LS" "1" "September 2019" "" "" .SH "NAME" \fBnpm-ls\fR \- List installed packages .SH SYNOPSIS @@ -22,7 +22,7 @@ For example, running \fBnpm ls promzard\fP in npm's source tree will show: .P .RS 2 .nf -npm@6.10.3 /path/to/npm +npm@6.11.3 /path/to/npm └─┬ init\-package\-json@0\.0\.4 └── promzard@0\.1\.5 .fi diff --git a/deps/npm/man/man1/npm-org.1 b/deps/npm/man/man1/npm-org.1 index baf9a7ba654c01..136f31d6a2c071 100644 --- a/deps/npm/man/man1/npm-org.1 +++ b/deps/npm/man/man1/npm-org.1 @@ -1,4 +1,4 @@ -.TH "NPM\-ORG" "1" "August 2019" "" "" +.TH "NPM\-ORG" "1" "September 2019" "" "" .SH "NAME" \fBnpm-org\fR \- Manage orgs .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-outdated.1 b/deps/npm/man/man1/npm-outdated.1 index 5cc8f309844a76..5a3e3990c62be0 100644 --- a/deps/npm/man/man1/npm-outdated.1 +++ b/deps/npm/man/man1/npm-outdated.1 @@ -1,4 +1,4 @@ -.TH "NPM\-OUTDATED" "1" "August 2019" "" "" +.TH "NPM\-OUTDATED" "1" "September 2019" "" "" .SH "NAME" \fBnpm-outdated\fR \- Check for outdated packages .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-owner.1 b/deps/npm/man/man1/npm-owner.1 index 8f92de7bbb607d..7ae1d55f5a49b5 100644 --- a/deps/npm/man/man1/npm-owner.1 +++ b/deps/npm/man/man1/npm-owner.1 @@ -1,4 +1,4 @@ -.TH "NPM\-OWNER" "1" "August 2019" "" "" +.TH "NPM\-OWNER" "1" "September 2019" "" "" .SH "NAME" \fBnpm-owner\fR \- Manage package owners .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-pack.1 b/deps/npm/man/man1/npm-pack.1 index 1dd150611d87c0..00ec1c124d18e2 100644 --- a/deps/npm/man/man1/npm-pack.1 +++ b/deps/npm/man/man1/npm-pack.1 @@ -1,4 +1,4 @@ -.TH "NPM\-PACK" "1" "August 2019" "" "" +.TH "NPM\-PACK" "1" "September 2019" "" "" .SH "NAME" \fBnpm-pack\fR \- Create a tarball from a package .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-ping.1 b/deps/npm/man/man1/npm-ping.1 index 15768ab2af81f8..7f3d03192efa41 100644 --- a/deps/npm/man/man1/npm-ping.1 +++ b/deps/npm/man/man1/npm-ping.1 @@ -1,4 +1,4 @@ -.TH "NPM\-PING" "1" "August 2019" "" "" +.TH "NPM\-PING" "1" "September 2019" "" "" .SH "NAME" \fBnpm-ping\fR \- Ping npm registry .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-prefix.1 b/deps/npm/man/man1/npm-prefix.1 index 687f735748a6d5..9d42676f4413e5 100644 --- a/deps/npm/man/man1/npm-prefix.1 +++ b/deps/npm/man/man1/npm-prefix.1 @@ -1,4 +1,4 @@ -.TH "NPM\-PREFIX" "1" "August 2019" "" "" +.TH "NPM\-PREFIX" "1" "September 2019" "" "" .SH "NAME" \fBnpm-prefix\fR \- Display prefix .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-profile.1 b/deps/npm/man/man1/npm-profile.1 index e4766b14348c2a..ee7e4814bbddd0 100644 --- a/deps/npm/man/man1/npm-profile.1 +++ b/deps/npm/man/man1/npm-profile.1 @@ -1,4 +1,4 @@ -.TH "NPM\-PROFILE" "1" "August 2019" "" "" +.TH "NPM\-PROFILE" "1" "September 2019" "" "" .SH "NAME" \fBnpm-profile\fR \- Change settings on your registry profile .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-prune.1 b/deps/npm/man/man1/npm-prune.1 index c5bf949e67873b..4202f13befe6ad 100644 --- a/deps/npm/man/man1/npm-prune.1 +++ b/deps/npm/man/man1/npm-prune.1 @@ -1,4 +1,4 @@ -.TH "NPM\-PRUNE" "1" "August 2019" "" "" +.TH "NPM\-PRUNE" "1" "September 2019" "" "" .SH "NAME" \fBnpm-prune\fR \- Remove extraneous packages .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-publish.1 b/deps/npm/man/man1/npm-publish.1 index 35671949999d1c..1d49b7f1977634 100644 --- a/deps/npm/man/man1/npm-publish.1 +++ b/deps/npm/man/man1/npm-publish.1 @@ -1,4 +1,4 @@ -.TH "NPM\-PUBLISH" "1" "August 2019" "" "" +.TH "NPM\-PUBLISH" "1" "September 2019" "" "" .SH "NAME" \fBnpm-publish\fR \- Publish a package .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-rebuild.1 b/deps/npm/man/man1/npm-rebuild.1 index c5883986e173f3..c41139e5b6bacc 100644 --- a/deps/npm/man/man1/npm-rebuild.1 +++ b/deps/npm/man/man1/npm-rebuild.1 @@ -1,4 +1,4 @@ -.TH "NPM\-REBUILD" "1" "August 2019" "" "" +.TH "NPM\-REBUILD" "1" "September 2019" "" "" .SH "NAME" \fBnpm-rebuild\fR \- Rebuild a package .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-repo.1 b/deps/npm/man/man1/npm-repo.1 index 6c86f43073ef58..80862abd7aa3f2 100644 --- a/deps/npm/man/man1/npm-repo.1 +++ b/deps/npm/man/man1/npm-repo.1 @@ -1,4 +1,4 @@ -.TH "NPM\-REPO" "1" "August 2019" "" "" +.TH "NPM\-REPO" "1" "September 2019" "" "" .SH "NAME" \fBnpm-repo\fR \- Open package repository page in the browser .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-restart.1 b/deps/npm/man/man1/npm-restart.1 index 6b885c370692b2..f90bff8e5b07cf 100644 --- a/deps/npm/man/man1/npm-restart.1 +++ b/deps/npm/man/man1/npm-restart.1 @@ -1,4 +1,4 @@ -.TH "NPM\-RESTART" "1" "August 2019" "" "" +.TH "NPM\-RESTART" "1" "September 2019" "" "" .SH "NAME" \fBnpm-restart\fR \- Restart a package .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-root.1 b/deps/npm/man/man1/npm-root.1 index 4dcc4da07e9cb4..737a848c4c33ea 100644 --- a/deps/npm/man/man1/npm-root.1 +++ b/deps/npm/man/man1/npm-root.1 @@ -1,4 +1,4 @@ -.TH "NPM\-ROOT" "1" "August 2019" "" "" +.TH "NPM\-ROOT" "1" "September 2019" "" "" .SH "NAME" \fBnpm-root\fR \- Display npm root .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-run-script.1 b/deps/npm/man/man1/npm-run-script.1 index 727789b2382c0e..181870c5337c6c 100644 --- a/deps/npm/man/man1/npm-run-script.1 +++ b/deps/npm/man/man1/npm-run-script.1 @@ -1,4 +1,4 @@ -.TH "NPM\-RUN\-SCRIPT" "1" "August 2019" "" "" +.TH "NPM\-RUN\-SCRIPT" "1" "September 2019" "" "" .SH "NAME" \fBnpm-run-script\fR \- Run arbitrary package scripts .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-search.1 b/deps/npm/man/man1/npm-search.1 index 9beb7e736cb055..9c7117320a1621 100644 --- a/deps/npm/man/man1/npm-search.1 +++ b/deps/npm/man/man1/npm-search.1 @@ -1,4 +1,4 @@ -.TH "NPM\-SEARCH" "1" "August 2019" "" "" +.TH "NPM\-SEARCH" "1" "September 2019" "" "" .SH "NAME" \fBnpm-search\fR \- Search for packages .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-shrinkwrap.1 b/deps/npm/man/man1/npm-shrinkwrap.1 index 320b42f6ab6311..b62ab91b7fe6bc 100644 --- a/deps/npm/man/man1/npm-shrinkwrap.1 +++ b/deps/npm/man/man1/npm-shrinkwrap.1 @@ -1,4 +1,4 @@ -.TH "NPM\-SHRINKWRAP" "1" "August 2019" "" "" +.TH "NPM\-SHRINKWRAP" "1" "September 2019" "" "" .SH "NAME" \fBnpm-shrinkwrap\fR \- Lock down dependency versions for publication .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-star.1 b/deps/npm/man/man1/npm-star.1 index 5c8868ee46aa8a..ef6a28976645cf 100644 --- a/deps/npm/man/man1/npm-star.1 +++ b/deps/npm/man/man1/npm-star.1 @@ -1,4 +1,4 @@ -.TH "NPM\-STAR" "1" "August 2019" "" "" +.TH "NPM\-STAR" "1" "September 2019" "" "" .SH "NAME" \fBnpm-star\fR \- Mark your favorite packages .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-stars.1 b/deps/npm/man/man1/npm-stars.1 index 597dd61872437e..ce833c45a20444 100644 --- a/deps/npm/man/man1/npm-stars.1 +++ b/deps/npm/man/man1/npm-stars.1 @@ -1,4 +1,4 @@ -.TH "NPM\-STARS" "1" "August 2019" "" "" +.TH "NPM\-STARS" "1" "September 2019" "" "" .SH "NAME" \fBnpm-stars\fR \- View packages marked as favorites .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-start.1 b/deps/npm/man/man1/npm-start.1 index 01beb52ff68944..b530cd09622d59 100644 --- a/deps/npm/man/man1/npm-start.1 +++ b/deps/npm/man/man1/npm-start.1 @@ -1,4 +1,4 @@ -.TH "NPM\-START" "1" "August 2019" "" "" +.TH "NPM\-START" "1" "September 2019" "" "" .SH "NAME" \fBnpm-start\fR \- Start a package .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-stop.1 b/deps/npm/man/man1/npm-stop.1 index 0559706aedc739..50c5421a93a33e 100644 --- a/deps/npm/man/man1/npm-stop.1 +++ b/deps/npm/man/man1/npm-stop.1 @@ -1,4 +1,4 @@ -.TH "NPM\-STOP" "1" "August 2019" "" "" +.TH "NPM\-STOP" "1" "September 2019" "" "" .SH "NAME" \fBnpm-stop\fR \- Stop a package .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-team.1 b/deps/npm/man/man1/npm-team.1 index 53661c74509c15..7ff899eec89061 100644 --- a/deps/npm/man/man1/npm-team.1 +++ b/deps/npm/man/man1/npm-team.1 @@ -1,4 +1,4 @@ -.TH "NPM\-TEAM" "1" "August 2019" "" "" +.TH "NPM\-TEAM" "1" "September 2019" "" "" .SH "NAME" \fBnpm-team\fR \- Manage organization teams and team memberships .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-test.1 b/deps/npm/man/man1/npm-test.1 index 7bd09730e274cc..cda0dc7bcc48a2 100644 --- a/deps/npm/man/man1/npm-test.1 +++ b/deps/npm/man/man1/npm-test.1 @@ -1,4 +1,4 @@ -.TH "NPM\-TEST" "1" "August 2019" "" "" +.TH "NPM\-TEST" "1" "September 2019" "" "" .SH "NAME" \fBnpm-test\fR \- Test a package .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-token.1 b/deps/npm/man/man1/npm-token.1 index ee36d1a3aabbc1..82338976e58644 100644 --- a/deps/npm/man/man1/npm-token.1 +++ b/deps/npm/man/man1/npm-token.1 @@ -1,4 +1,4 @@ -.TH "NPM\-TOKEN" "1" "August 2019" "" "" +.TH "NPM\-TOKEN" "1" "September 2019" "" "" .SH "NAME" \fBnpm-token\fR \- Manage your authentication tokens .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-uninstall.1 b/deps/npm/man/man1/npm-uninstall.1 index cda784e9564113..3d9ebb93543262 100644 --- a/deps/npm/man/man1/npm-uninstall.1 +++ b/deps/npm/man/man1/npm-uninstall.1 @@ -1,4 +1,4 @@ -.TH "NPM\-UNINSTALL" "1" "August 2019" "" "" +.TH "NPM\-UNINSTALL" "1" "September 2019" "" "" .SH "NAME" \fBnpm-uninstall\fR \- Remove a package .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-unpublish.1 b/deps/npm/man/man1/npm-unpublish.1 index 7e23a53fffb103..c3eda750e6fbd0 100644 --- a/deps/npm/man/man1/npm-unpublish.1 +++ b/deps/npm/man/man1/npm-unpublish.1 @@ -1,4 +1,4 @@ -.TH "NPM\-UNPUBLISH" "1" "August 2019" "" "" +.TH "NPM\-UNPUBLISH" "1" "September 2019" "" "" .SH "NAME" \fBnpm-unpublish\fR \- Remove a package from the registry .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-update.1 b/deps/npm/man/man1/npm-update.1 index e84da4f2cf6f67..6f66409bc968d1 100644 --- a/deps/npm/man/man1/npm-update.1 +++ b/deps/npm/man/man1/npm-update.1 @@ -1,4 +1,4 @@ -.TH "NPM\-UPDATE" "1" "August 2019" "" "" +.TH "NPM\-UPDATE" "1" "September 2019" "" "" .SH "NAME" \fBnpm-update\fR \- Update a package .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-version.1 b/deps/npm/man/man1/npm-version.1 index 2865c678009b58..69b18d13ebe16c 100644 --- a/deps/npm/man/man1/npm-version.1 +++ b/deps/npm/man/man1/npm-version.1 @@ -1,4 +1,4 @@ -.TH "NPM\-VERSION" "1" "August 2019" "" "" +.TH "NPM\-VERSION" "1" "September 2019" "" "" .SH "NAME" \fBnpm-version\fR \- Bump a package version .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-view.1 b/deps/npm/man/man1/npm-view.1 index 3319199395d619..044c0ab0b7a109 100644 --- a/deps/npm/man/man1/npm-view.1 +++ b/deps/npm/man/man1/npm-view.1 @@ -1,4 +1,4 @@ -.TH "NPM\-VIEW" "1" "August 2019" "" "" +.TH "NPM\-VIEW" "1" "September 2019" "" "" .SH "NAME" \fBnpm-view\fR \- View registry info .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm-whoami.1 b/deps/npm/man/man1/npm-whoami.1 index 587ae5384df943..45523202efe4a9 100644 --- a/deps/npm/man/man1/npm-whoami.1 +++ b/deps/npm/man/man1/npm-whoami.1 @@ -1,4 +1,4 @@ -.TH "NPM\-WHOAMI" "1" "August 2019" "" "" +.TH "NPM\-WHOAMI" "1" "September 2019" "" "" .SH "NAME" \fBnpm-whoami\fR \- Display npm username .SH SYNOPSIS diff --git a/deps/npm/man/man1/npm.1 b/deps/npm/man/man1/npm.1 index 4566a986b7c736..594995af1ca5da 100644 --- a/deps/npm/man/man1/npm.1 +++ b/deps/npm/man/man1/npm.1 @@ -1,4 +1,4 @@ -.TH "NPM" "1" "August 2019" "" "" +.TH "NPM" "1" "September 2019" "" "" .SH "NAME" \fBnpm\fR \- javascript package manager .SH SYNOPSIS @@ -10,7 +10,7 @@ npm [args] .RE .SH VERSION .P -6.10.3 +6.11.3 .SH DESCRIPTION .P npm is the package manager for the Node JavaScript platform\. It puts diff --git a/deps/npm/man/man5/npm-folders.5 b/deps/npm/man/man5/npm-folders.5 index ad8a0f30ceee63..d2dd3ce2b42f67 100644 --- a/deps/npm/man/man5/npm-folders.5 +++ b/deps/npm/man/man5/npm-folders.5 @@ -1,4 +1,4 @@ -.TH "NPM\-FOLDERS" "5" "August 2019" "" "" +.TH "NPM\-FOLDERS" "5" "September 2019" "" "" .SH "NAME" \fBnpm-folders\fR \- Folder Structures Used by npm .SH DESCRIPTION diff --git a/deps/npm/man/man5/npm-global.5 b/deps/npm/man/man5/npm-global.5 index ad8a0f30ceee63..d2dd3ce2b42f67 100644 --- a/deps/npm/man/man5/npm-global.5 +++ b/deps/npm/man/man5/npm-global.5 @@ -1,4 +1,4 @@ -.TH "NPM\-FOLDERS" "5" "August 2019" "" "" +.TH "NPM\-FOLDERS" "5" "September 2019" "" "" .SH "NAME" \fBnpm-folders\fR \- Folder Structures Used by npm .SH DESCRIPTION diff --git a/deps/npm/man/man5/npm-json.5 b/deps/npm/man/man5/npm-json.5 index 22756f7e4c6a2e..3e3ec76bf6df6d 100644 --- a/deps/npm/man/man5/npm-json.5 +++ b/deps/npm/man/man5/npm-json.5 @@ -1,4 +1,4 @@ -.TH "PACKAGE\.JSON" "5" "August 2019" "" "" +.TH "PACKAGE\.JSON" "5" "September 2019" "" "" .SH "NAME" \fBpackage.json\fR \- Specifics of npm's package\.json handling .SH DESCRIPTION diff --git a/deps/npm/man/man5/npm-package-locks.5 b/deps/npm/man/man5/npm-package-locks.5 index fe4f1382f6289a..364641e2b8f121 100644 --- a/deps/npm/man/man5/npm-package-locks.5 +++ b/deps/npm/man/man5/npm-package-locks.5 @@ -1,4 +1,4 @@ -.TH "NPM\-PACKAGE\-LOCKS" "5" "August 2019" "" "" +.TH "NPM\-PACKAGE\-LOCKS" "5" "September 2019" "" "" .SH "NAME" \fBnpm-package-locks\fR \- An explanation of npm lockfiles .SH DESCRIPTION diff --git a/deps/npm/man/man5/npm-shrinkwrap.json.5 b/deps/npm/man/man5/npm-shrinkwrap.json.5 index be25c5cebd83b0..56e03cb3425a3d 100644 --- a/deps/npm/man/man5/npm-shrinkwrap.json.5 +++ b/deps/npm/man/man5/npm-shrinkwrap.json.5 @@ -1,4 +1,4 @@ -.TH "NPM\-SHRINKWRAP\.JSON" "5" "August 2019" "" "" +.TH "NPM\-SHRINKWRAP\.JSON" "5" "September 2019" "" "" .SH "NAME" \fBnpm-shrinkwrap.json\fR \- A publishable lockfile .SH DESCRIPTION diff --git a/deps/npm/man/man5/npmrc.5 b/deps/npm/man/man5/npmrc.5 index 449e39e806e167..1a64713d30395a 100644 --- a/deps/npm/man/man5/npmrc.5 +++ b/deps/npm/man/man5/npmrc.5 @@ -1,4 +1,4 @@ -.TH "NPMRC" "5" "August 2019" "" "" +.TH "NPMRC" "5" "September 2019" "" "" .SH "NAME" \fBnpmrc\fR \- The npm config files .SH DESCRIPTION diff --git a/deps/npm/man/man5/package-lock.json.5 b/deps/npm/man/man5/package-lock.json.5 index 4cc5228e57adc2..85cfc15c904ff5 100644 --- a/deps/npm/man/man5/package-lock.json.5 +++ b/deps/npm/man/man5/package-lock.json.5 @@ -1,4 +1,4 @@ -.TH "PACKAGE\-LOCK\.JSON" "5" "August 2019" "" "" +.TH "PACKAGE\-LOCK\.JSON" "5" "September 2019" "" "" .SH "NAME" \fBpackage-lock.json\fR \- A manifestation of the manifest .SH DESCRIPTION diff --git a/deps/npm/man/man5/package.json.5 b/deps/npm/man/man5/package.json.5 index 22756f7e4c6a2e..3e3ec76bf6df6d 100644 --- a/deps/npm/man/man5/package.json.5 +++ b/deps/npm/man/man5/package.json.5 @@ -1,4 +1,4 @@ -.TH "PACKAGE\.JSON" "5" "August 2019" "" "" +.TH "PACKAGE\.JSON" "5" "September 2019" "" "" .SH "NAME" \fBpackage.json\fR \- Specifics of npm's package\.json handling .SH DESCRIPTION diff --git a/deps/npm/man/man7/npm-coding-style.7 b/deps/npm/man/man7/npm-coding-style.7 index 28c425d4fef36a..b80c7ed0df7b2c 100644 --- a/deps/npm/man/man7/npm-coding-style.7 +++ b/deps/npm/man/man7/npm-coding-style.7 @@ -1,4 +1,4 @@ -.TH "NPM\-CODING\-STYLE" "7" "August 2019" "" "" +.TH "NPM\-CODING\-STYLE" "7" "September 2019" "" "" .SH "NAME" \fBnpm-coding-style\fR \- npm's "funny" coding style .SH DESCRIPTION diff --git a/deps/npm/man/man7/npm-config.7 b/deps/npm/man/man7/npm-config.7 index f6c878ea79f514..f9410eedbfdf92 100644 --- a/deps/npm/man/man7/npm-config.7 +++ b/deps/npm/man/man7/npm-config.7 @@ -1,4 +1,4 @@ -.TH "NPM\-CONFIG" "7" "August 2019" "" "" +.TH "NPM\-CONFIG" "7" "September 2019" "" "" .SH "NAME" \fBnpm-config\fR \- More than you probably want to know about npm configuration .SH DESCRIPTION diff --git a/deps/npm/man/man7/npm-developers.7 b/deps/npm/man/man7/npm-developers.7 index 25c057843cb3c8..990c130e8bc873 100644 --- a/deps/npm/man/man7/npm-developers.7 +++ b/deps/npm/man/man7/npm-developers.7 @@ -1,4 +1,4 @@ -.TH "NPM\-DEVELOPERS" "7" "August 2019" "" "" +.TH "NPM\-DEVELOPERS" "7" "September 2019" "" "" .SH "NAME" \fBnpm-developers\fR \- Developer Guide .SH DESCRIPTION diff --git a/deps/npm/man/man7/npm-disputes.7 b/deps/npm/man/man7/npm-disputes.7 index 1de4bc90c487b3..8a954937a0a9ce 100644 --- a/deps/npm/man/man7/npm-disputes.7 +++ b/deps/npm/man/man7/npm-disputes.7 @@ -1,4 +1,4 @@ -.TH "NPM\-DISPUTES" "7" "August 2019" "" "" +.TH "NPM\-DISPUTES" "7" "September 2019" "" "" .SH "NAME" \fBnpm-disputes\fR \- Handling Module Name Disputes .P diff --git a/deps/npm/man/man7/npm-index.7 b/deps/npm/man/man7/npm-index.7 index 359529beba0411..1a8db20410ddd5 100644 --- a/deps/npm/man/man7/npm-index.7 +++ b/deps/npm/man/man7/npm-index.7 @@ -1,4 +1,4 @@ -.TH "NPM\-INDEX" "7" "August 2019" "" "" +.TH "NPM\-INDEX" "7" "September 2019" "" "" .SH "NAME" \fBnpm-index\fR \- Index of all npm documentation .SS npm help README diff --git a/deps/npm/man/man7/npm-orgs.7 b/deps/npm/man/man7/npm-orgs.7 index c01f4e6ab7a107..1954b726bf5486 100644 --- a/deps/npm/man/man7/npm-orgs.7 +++ b/deps/npm/man/man7/npm-orgs.7 @@ -1,4 +1,4 @@ -.TH "NPM\-ORGS" "7" "August 2019" "" "" +.TH "NPM\-ORGS" "7" "September 2019" "" "" .SH "NAME" \fBnpm-orgs\fR \- Working with Teams & Orgs .SH DESCRIPTION diff --git a/deps/npm/man/man7/npm-registry.7 b/deps/npm/man/man7/npm-registry.7 index 1799bf9428e43a..840ebde1a9c83a 100644 --- a/deps/npm/man/man7/npm-registry.7 +++ b/deps/npm/man/man7/npm-registry.7 @@ -1,4 +1,4 @@ -.TH "NPM\-REGISTRY" "7" "August 2019" "" "" +.TH "NPM\-REGISTRY" "7" "September 2019" "" "" .SH "NAME" \fBnpm-registry\fR \- The JavaScript Package Registry .SH DESCRIPTION diff --git a/deps/npm/man/man7/npm-scope.7 b/deps/npm/man/man7/npm-scope.7 index ff246438dca24f..e4acb4c6be2cb7 100644 --- a/deps/npm/man/man7/npm-scope.7 +++ b/deps/npm/man/man7/npm-scope.7 @@ -1,4 +1,4 @@ -.TH "NPM\-SCOPE" "7" "August 2019" "" "" +.TH "NPM\-SCOPE" "7" "September 2019" "" "" .SH "NAME" \fBnpm-scope\fR \- Scoped packages .SH DESCRIPTION diff --git a/deps/npm/man/man7/npm-scripts.7 b/deps/npm/man/man7/npm-scripts.7 index ba1d29c3f33705..8a55172256f212 100644 --- a/deps/npm/man/man7/npm-scripts.7 +++ b/deps/npm/man/man7/npm-scripts.7 @@ -1,4 +1,4 @@ -.TH "NPM\-SCRIPTS" "7" "August 2019" "" "" +.TH "NPM\-SCRIPTS" "7" "September 2019" "" "" .SH "NAME" \fBnpm-scripts\fR \- How npm handles the "scripts" field .SH DESCRIPTION diff --git a/deps/npm/man/man7/removing-npm.7 b/deps/npm/man/man7/removing-npm.7 index a33a8426fb71d0..4eaba8b5c95904 100644 --- a/deps/npm/man/man7/removing-npm.7 +++ b/deps/npm/man/man7/removing-npm.7 @@ -1,4 +1,4 @@ -.TH "NPM\-REMOVAL" "1" "August 2019" "" "" +.TH "NPM\-REMOVAL" "1" "September 2019" "" "" .SH "NAME" \fBnpm-removal\fR \- Cleaning the Slate .SH SYNOPSIS diff --git a/deps/npm/man/man7/semver.7 b/deps/npm/man/man7/semver.7 index 8978c25958afc3..778eb1baf4fd2b 100644 --- a/deps/npm/man/man7/semver.7 +++ b/deps/npm/man/man7/semver.7 @@ -1,4 +1,4 @@ -.TH "SEMVER" "7" "August 2019" "" "" +.TH "SEMVER" "7" "September 2019" "" "" .SH "NAME" \fBsemver\fR \- The semantic versioner for npm .SH Install @@ -496,15 +496,16 @@ range, use the \fBsatisfies(version, range)\fP function\. .RE .P -This aims to provide a very forgiving translation of a non\-semver -string to semver\. It looks for the first digit in a string, and -consumes all remaining characters which satisfy at least a partial semver -(e\.g\., \fB1\fP, \fB1\.2\fP, \fB1\.2\.3\fP) up to the max permitted length (256 characters)\. -Longer versions are simply truncated (\fB4\.6\.3\.9\.2\-alpha2\fP becomes \fB4\.6\.3\fP)\. -All surrounding text is simply ignored (\fBv3\.4 replaces v3\.3\.1\fP becomes \fB3\.4\.0\fP)\. -Only text which lacks digits will fail coercion (\fBversion one\fP is not valid)\. -The maximum length for any semver component considered for coercion is 16 characters; -longer components will be ignored (\fB10000000000000000\.4\.7\.4\fP becomes \fB4\.7\.4\fP)\. -The maximum value for any semver component is \fBInteger\.MAX_SAFE_INTEGER || (2**53 \- 1)\fP; -higher value components are invalid (\fB9999999999999999\.4\.7\.4\fP is likely invalid)\. +This aims to provide a very forgiving translation of a non\-semver string to +semver\. It looks for the first digit in a string, and consumes all +remaining characters which satisfy at least a partial semver (e\.g\., \fB1\fP, +\fB1\.2\fP, \fB1\.2\.3\fP) up to the max permitted length (256 characters)\. Longer +versions are simply truncated (\fB4\.6\.3\.9\.2\-alpha2\fP becomes \fB4\.6\.3\fP)\. All +surrounding text is simply ignored (\fBv3\.4 replaces v3\.3\.1\fP becomes +\fB3\.4\.0\fP)\. Only text which lacks digits will fail coercion (\fBversion one\fP +is not valid)\. The maximum length for any semver component considered for +coercion is 16 characters; longer components will be ignored +(\fB10000000000000000\.4\.7\.4\fP becomes \fB4\.7\.4\fP)\. The maximum value for any +semver component is \fBNumber\.MAX_SAFE_INTEGER || (2**53 \- 1)\fP; higher value +components are invalid (\fB9999999999999999\.4\.7\.4\fP is likely invalid)\. diff --git a/deps/npm/node_modules/bin-links/CHANGELOG.md b/deps/npm/node_modules/bin-links/CHANGELOG.md index fb6bd0bd1e7b0e..e5293027887bb3 100644 --- a/deps/npm/node_modules/bin-links/CHANGELOG.md +++ b/deps/npm/node_modules/bin-links/CHANGELOG.md @@ -2,6 +2,11 @@ All notable changes to this project will be documented in this file. See [standard-version](https://github.com/conventional-changelog/standard-version) for commit guidelines. + +## [1.1.3](https://github.com/npm/bin-links/compare/v1.1.2...v1.1.3) (2019-08-14) + + + ## [1.1.2](https://github.com/npm/bin-links/compare/v1.1.1...v1.1.2) (2018-03-22) diff --git a/deps/npm/node_modules/bin-links/package.json b/deps/npm/node_modules/bin-links/package.json index 0a1af20fdaec46..e14be1e6926dfa 100644 --- a/deps/npm/node_modules/bin-links/package.json +++ b/deps/npm/node_modules/bin-links/package.json @@ -1,54 +1,53 @@ { - "_args": [ - [ - "bin-links@1.1.2", - "/Users/rebecca/code/npm" - ] - ], - "_from": "bin-links@1.1.2", - "_id": "bin-links@1.1.2", + "_from": "bin-links@1.1.3", + "_id": "bin-links@1.1.3", "_inBundle": false, - "_integrity": "sha512-8eEHVgYP03nILphilltWjeIjMbKyJo3wvp9K816pHbhP301ismzw15mxAAEVQ/USUwcP++1uNrbERbp8lOA6Fg==", + "_integrity": "sha512-TEwmH4PHU/D009stP+fkkazMJgkBNCv60z01lQ/Mn8E6+ThHoD03svMnBVuCowwXo2nP2qKyKZxKxp58OHRzxw==", "_location": "/bin-links", "_phantomChildren": {}, "_requested": { "type": "version", "registry": true, - "raw": "bin-links@1.1.2", + "raw": "bin-links@1.1.3", "name": "bin-links", "escapedName": "bin-links", - "rawSpec": "1.1.2", + "rawSpec": "1.1.3", "saveSpec": null, - "fetchSpec": "1.1.2" + "fetchSpec": "1.1.3" }, "_requiredBy": [ + "#USER", "/", - "/libcipm" + "/libcipm", + "/libnpm" ], - "_resolved": "https://registry.npmjs.org/bin-links/-/bin-links-1.1.2.tgz", - "_spec": "1.1.2", - "_where": "/Users/rebecca/code/npm", + "_resolved": "https://registry.npmjs.org/bin-links/-/bin-links-1.1.3.tgz", + "_shasum": "702fd59552703727313bc624bdbc4c0d3431c2ca", + "_spec": "bin-links@1.1.3", + "_where": "/Users/isaacs/dev/npm/cli", "author": { "name": "Mike Sherov" }, "bugs": { "url": "https://github.com/npm/bin-links/issues" }, + "bundleDependencies": false, "dependencies": { - "bluebird": "^3.5.0", - "cmd-shim": "^2.0.2", - "gentle-fs": "^2.0.0", - "graceful-fs": "^4.1.11", + "bluebird": "^3.5.3", + "cmd-shim": "^3.0.0", + "gentle-fs": "^2.0.1", + "graceful-fs": "^4.1.15", "write-file-atomic": "^2.3.0" }, + "deprecated": false, "description": "JavaScript package binary linker", "devDependencies": { "mkdirp": "^0.5.1", - "nyc": "^11.1.0", - "rimraf": "^2.6.2", + "nyc": "^13.1.0", + "rimraf": "^2.6.3", "standard": "^10.0.3", - "standard-version": "^4.2.0", - "tap": "^10.7.2", + "standard-version": "^4.4.0", + "tap": "^12.1.3", "weallbehave": "^1.2.0", "weallcontribute": "^1.0.8" }, @@ -77,5 +76,5 @@ "update-coc": "weallbehave -o . && git add CODE_OF_CONDUCT.md && git commit -m 'docs(coc): updated CODE_OF_CONDUCT.md'", "update-contrib": "weallcontribute -o . && git add CONTRIBUTING.md && git commit -m 'docs(contributing): updated CONTRIBUTING.md'" }, - "version": "1.1.2" + "version": "1.1.3" } diff --git a/deps/npm/node_modules/cacache/CHANGELOG.md b/deps/npm/node_modules/cacache/CHANGELOG.md index de84bdbf132169..f67fbc8b4ec9ee 100644 --- a/deps/npm/node_modules/cacache/CHANGELOG.md +++ b/deps/npm/node_modules/cacache/CHANGELOG.md @@ -2,6 +2,15 @@ All notable changes to this project will be documented in this file. See [standard-version](https://github.com/conventional-changelog/standard-version) for commit guidelines. +### [12.0.3](https://github.com/npm/cacache/compare/v12.0.2...v12.0.3) (2019-08-19) + + +### Bug Fixes + +* do not chown if not running as root ([2d80af9](https://github.com/npm/cacache/commit/2d80af9)) + + + ### [12.0.2](https://github.com/npm/cacache/compare/v12.0.1...v12.0.2) (2019-07-19) diff --git a/deps/npm/node_modules/cacache/lib/util/fix-owner.js b/deps/npm/node_modules/cacache/lib/util/fix-owner.js index c91c709d9586b6..f5c33db5f0adc2 100644 --- a/deps/npm/node_modules/cacache/lib/util/fix-owner.js +++ b/deps/npm/node_modules/cacache/lib/util/fix-owner.js @@ -37,9 +37,15 @@ function fixOwner (cache, filepath) { // This platform doesn't need ownership fixing return BB.resolve() } + + getSelf() + if (self.uid !== 0) { + // almost certainly can't chown anyway + return BB.resolve() + } + return BB.resolve(inferOwner(cache)).then(owner => { const { uid, gid } = owner - getSelf() // No need to override if it's already what we used. if (self.uid === uid && self.gid === gid) { diff --git a/deps/npm/node_modules/cacache/package.json b/deps/npm/node_modules/cacache/package.json index 1df72c2d7db6f1..aa20092ccc9a14 100644 --- a/deps/npm/node_modules/cacache/package.json +++ b/deps/npm/node_modules/cacache/package.json @@ -1,19 +1,19 @@ { - "_from": "cacache@12.0.2", - "_id": "cacache@12.0.2", + "_from": "cacache@12.0.3", + "_id": "cacache@12.0.3", "_inBundle": false, - "_integrity": "sha512-ifKgxH2CKhJEg6tNdAwziu6Q33EvuG26tYcda6PT3WKisZcYDXsnEdnRv67Po3yCzFfaSoMjGZzJyD2c3DT1dg==", + "_integrity": "sha512-kqdmfXEGFepesTuROHMs3MpFLWrPkSSpRqOw80RCflZXy/khxaArvFrQ7uJxSUduzAufc6G0g1VUCOZXxWavPw==", "_location": "/cacache", "_phantomChildren": {}, "_requested": { "type": "version", "registry": true, - "raw": "cacache@12.0.2", + "raw": "cacache@12.0.3", "name": "cacache", "escapedName": "cacache", - "rawSpec": "12.0.2", + "rawSpec": "12.0.3", "saveSpec": null, - "fetchSpec": "12.0.2" + "fetchSpec": "12.0.3" }, "_requiredBy": [ "#USER", @@ -21,9 +21,9 @@ "/make-fetch-happen", "/pacote" ], - "_resolved": "https://registry.npmjs.org/cacache/-/cacache-12.0.2.tgz", - "_shasum": "8db03205e36089a3df6954c66ce92541441ac46c", - "_spec": "cacache@12.0.2", + "_resolved": "https://registry.npmjs.org/cacache/-/cacache-12.0.3.tgz", + "_shasum": "be99abba4e1bf5df461cd5a2c1071fc432573390", + "_spec": "cacache@12.0.3", "_where": "/Users/isaacs/dev/npm/cli", "author": { "name": "Kat Marchán", @@ -125,5 +125,5 @@ "update-coc": "weallbehave -o . && git add CODE_OF_CONDUCT.md && git commit -m 'docs(coc): updated CODE_OF_CONDUCT.md'", "update-contrib": "weallcontribute -o . && git add CONTRIBUTING.md && git commit -m 'docs(contributing): updated CONTRIBUTING.md'" }, - "version": "12.0.2" + "version": "12.0.3" } diff --git a/deps/npm/node_modules/cmd-shim/.npmignore b/deps/npm/node_modules/cmd-shim/.npmignore deleted file mode 100644 index 699b5d4f136936..00000000000000 --- a/deps/npm/node_modules/cmd-shim/.npmignore +++ /dev/null @@ -1,16 +0,0 @@ -lib-cov -*.seed -*.log -*.csv -*.dat -*.out -*.pid -*.gz - -pids -logs -results - -npm-debug.log - -node_modules diff --git a/deps/npm/node_modules/cmd-shim/.travis.yml b/deps/npm/node_modules/cmd-shim/.travis.yml deleted file mode 100644 index 2ca91f28954d72..00000000000000 --- a/deps/npm/node_modules/cmd-shim/.travis.yml +++ /dev/null @@ -1,4 +0,0 @@ -language: node_js -node_js: - - "0.10" - - "0.8" \ No newline at end of file diff --git a/deps/npm/node_modules/cmd-shim/LICENSE b/deps/npm/node_modules/cmd-shim/LICENSE index 0c44ae716db8f3..20a47625409237 100644 --- a/deps/npm/node_modules/cmd-shim/LICENSE +++ b/deps/npm/node_modules/cmd-shim/LICENSE @@ -1,27 +1,15 @@ -Copyright (c) Isaac Z. Schlueter ("Author") -All rights reserved. +The ISC License -The BSD License +Copyright (c) npm, Inc. and Contributors -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS -BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR -BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE -OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN -IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR +IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/deps/npm/node_modules/cmd-shim/README.md b/deps/npm/node_modules/cmd-shim/README.md index ff6745f8e5970d..2b8b2f468f2c3f 100644 --- a/deps/npm/node_modules/cmd-shim/README.md +++ b/deps/npm/node_modules/cmd-shim/README.md @@ -5,8 +5,8 @@ since symlinks are not suitable for this purpose there. On Unix systems, you should use a symbolic link instead. -[![Build Status](https://img.shields.io/travis/ForbesLindesay/cmd-shim/master.svg)](https://travis-ci.org/ForbesLindesay/cmd-shim) -[![Dependency Status](https://img.shields.io/david/ForbesLindesay/cmd-shim.svg)](https://david-dm.org/ForbesLindesay/cmd-shim) +[![Build Status](https://img.shields.io/travis/npm/cmd-shim/master.svg)](https://travis-ci.org/npm/cmd-shim) +[![Dependency Status](https://img.shields.io/david/npm/cmd-shim.svg)](https://david-dm.org/npm/cmd-shim) [![NPM version](https://img.shields.io/npm/v/cmd-shim.svg)](https://www.npmjs.com/package/cmd-shim) ## Installation diff --git a/deps/npm/node_modules/cmd-shim/index.js b/deps/npm/node_modules/cmd-shim/index.js index 9f22e103a5fe87..010bd38d398d22 100644 --- a/deps/npm/node_modules/cmd-shim/index.js +++ b/deps/npm/node_modules/cmd-shim/index.js @@ -6,7 +6,7 @@ // "#! " // // Write a binroot/pkg.bin + ".cmd" file that has this line in it: -// @ %~dp0 %* +// @ %dp0% %* module.exports = cmdShim cmdShim.ifExists = cmdShimIfExists @@ -15,7 +15,8 @@ var fs = require("graceful-fs") var mkdir = require("mkdirp") , path = require("path") - , shebangExpr = /^#\!\s*(?:\/usr\/bin\/env)?\s*([^ \t]+)(.*)$/ + , toBatchSyntax = require("./lib/to-batch-syntax") + , shebangExpr = /^#\!\s*(?:\/usr\/bin\/env)?\s*([^ \t]+=[^ \t]+\s+)*\s*([^ \t]+)(.*)$/ function cmdShimIfExists (from, to, cb) { fs.stat(from, function (er) { @@ -42,9 +43,10 @@ function cmdShim (from, to, cb) { } function cmdShim_ (from, to, cb) { - var then = times(2, next, cb) + var then = times(3, next, cb) rm(to, then) rm(to + ".cmd", then) + rm(to + ".ps1", then) function next(er) { writeShim(from, to, cb) @@ -60,63 +62,99 @@ function writeShim (from, to, cb) { if (er) return cb(er) fs.readFile(from, "utf8", function (er, data) { - if (er) return writeShim_(from, to, null, null, cb) + if (er) return writeShim_(from, to, null, null, null, cb) var firstLine = data.trim().split(/\r*\n/)[0] , shebang = firstLine.match(shebangExpr) - if (!shebang) return writeShim_(from, to, null, null, cb) - var prog = shebang[1] - , args = shebang[2] || "" - return writeShim_(from, to, prog, args, cb) + if (!shebang) return writeShim_(from, to, null, null, null, cb) + var vars = shebang[1] || "" + , prog = shebang[2] + , args = shebang[3] || "" + return writeShim_(from, to, prog, args, vars, cb) }) }) } -function writeShim_ (from, to, prog, args, cb) { + +function writeShim_ (from, to, prog, args, variables, cb) { var shTarget = path.relative(path.dirname(to), from) , target = shTarget.split("/").join("\\") , longProg , shProg = prog && prog.split("\\").join("/") , shLongProg + , pwshProg = shProg && "\"" + shProg + "$exe\"" + , pwshLongProg shTarget = shTarget.split("\\").join("/") args = args || "" + variables = variables || "" if (!prog) { - prog = "\"%~dp0\\" + target + "\"" + prog = "\"%dp0%\\" + target + "\"" shProg = "\"$basedir/" + shTarget + "\"" + pwshProg = shProg args = "" target = "" shTarget = "" } else { - longProg = "\"%~dp0\\" + prog + ".exe\"" + longProg = "\"%dp0%\\" + prog + ".exe\"" shLongProg = "\"$basedir/" + prog + "\"" - target = "\"%~dp0\\" + target + "\"" + pwshLongProg = "\"$basedir/" + prog + "$exe\"" + target = "\"%dp0%\\" + target + "\"" shTarget = "\"$basedir/" + shTarget + "\"" } - // @IF EXIST "%~dp0\node.exe" ( - // "%~dp0\node.exe" "%~dp0\.\node_modules\npm\bin\npm-cli.js" %* + // @SETLOCAL + // @CALL :find_dp0 + // + // @IF EXIST "%dp0%\node.exe" ( + // @SET "_prog=%dp0%\node.exe" // ) ELSE ( - // SETLOCAL - // SET PATHEXT=%PATHEXT:;.JS;=;% - // node "%~dp0\.\node_modules\npm\bin\npm-cli.js" %* + // @SET "_prog=node" + // @SET PATHEXT=%PATHEXT:;.JS;=;% // ) + // + // "%_prog%" "%dp0%\.\node_modules\npm\bin\npm-cli.js" %* + // @ENDLOCAL + // @EXIT /b %errorlevel% + // + // :find_dp0 + // SET dp0=%~dp0 + // EXIT /b + // + // Subroutine trick to fix https://github.com/npm/cmd-shim/issues/10 + var head = '@ECHO off\r\n' + + 'SETLOCAL\r\n' + + 'CALL :find_dp0\r\n' + var foot = 'ENDLOCAL\r\n' + + 'EXIT /b %errorlevel%\r\n' + + ':find_dp0\r\n' + + 'SET dp0=%~dp0\r\n' + + 'EXIT /b\r\n' + var cmd if (longProg) { - cmd = "@IF EXIST " + longProg + " (\r\n" - + " " + longProg + " " + args + " " + target + " %*\r\n" + shLongProg = shLongProg.trim(); + args = args.trim(); + var variableDeclarationsAsBatch = toBatchSyntax.convertToSetCommands(variables) + cmd = head + + variableDeclarationsAsBatch + + "\r\n" + + "IF EXIST " + longProg + " (\r\n" + + " SET \"_prog=" + longProg.replace(/(^")|("$)/g, '') + "\"\r\n" + ") ELSE (\r\n" - + " @SETLOCAL\r\n" - + " @SET PATHEXT=%PATHEXT:;.JS;=;%\r\n" - + " " + prog + " " + args + " " + target + " %*\r\n" - + ")" + + " SET \"_prog=" + prog.replace(/(^")|("$)/g, '') + "\"\r\n" + + " SET PATHEXT=%PATHEXT:;.JS;=;%\r\n" + + ")\r\n" + + "\r\n" + + "\"%_prog%\" " + args + " " + target + " %*\r\n" + + foot } else { - cmd = "@" + prog + " " + args + " " + target + " %*\r\n" + cmd = head + prog + " " + args + " " + target + " %*\r\n" + foot } // #!/bin/sh // basedir=`dirname "$0"` // // case `uname` in - // *CYGWIN*) basedir=`cygpath -w "$basedir"`;; + // *CYGWIN*|*MINGW*|*MSYS*) basedir=`cygpath -w "$basedir"`;; // esac // // if [ -x "$basedir/node.exe" ]; then @@ -130,30 +168,76 @@ function writeShim_ (from, to, prog, args, cb) { var sh = "#!/bin/sh\n" - if (shLongProg) { - sh = sh - + "basedir=$(dirname \"$(echo \"$0\" | sed -e 's,\\\\,/,g')\")\n" - + "\n" - + "case `uname` in\n" - + " *CYGWIN*) basedir=`cygpath -w \"$basedir\"`;;\n" - + "esac\n" - + "\n" + sh = sh + + "basedir=$(dirname \"$(echo \"$0\" | sed -e 's,\\\\,/,g')\")\n" + + "\n" + + "case `uname` in\n" + + " *CYGWIN*|*MINGW*|*MSYS*) basedir=`cygpath -w \"$basedir\"`;;\n" + + "esac\n" + + "\n" + if (shLongProg) { sh = sh + "if [ -x "+shLongProg+" ]; then\n" - + " " + shLongProg + " " + args + " " + shTarget + " \"$@\"\n" + + " " + variables + shLongProg + " " + args + " " + shTarget + " \"$@\"\n" + " ret=$?\n" + "else \n" - + " " + shProg + " " + args + " " + shTarget + " \"$@\"\n" + + " " + variables + shProg + " " + args + " " + shTarget + " \"$@\"\n" + " ret=$?\n" + "fi\n" + "exit $ret\n" } else { - sh = shProg + " " + args + " " + shTarget + " \"$@\"\n" + sh = sh + + shProg + " " + args + " " + shTarget + " \"$@\"\n" + "exit $?\n" } - var then = times(2, next, cb) + // #!/usr/bin/env pwsh + // $basedir=Split-Path $MyInvocation.MyCommand.Definition -Parent + // + // $ret=0 + // $exe = "" + // if ($PSVersionTable.PSVersion -lt "6.0" -or $IsWindows) { + // # Fix case when both the Windows and Linux builds of Node + // # are installed in the same directory + // $exe = ".exe" + // } + // if (Test-Path "$basedir/node") { + // & "$basedir/node$exe" "$basedir/node_modules/npm/bin/npm-cli.js" $args + // $ret=$LASTEXITCODE + // } else { + // & "node$exe" "$basedir/node_modules/npm/bin/npm-cli.js" $args + // $ret=$LASTEXITCODE + // } + // exit $ret + var pwsh = "#!/usr/bin/env pwsh\n" + + "$basedir=Split-Path $MyInvocation.MyCommand.Definition -Parent\n" + + "\n" + + "$exe=\"\"\n" + + "if ($PSVersionTable.PSVersion -lt \"6.0\" -or $IsWindows) {\n" + + " # Fix case when both the Windows and Linux builds of Node\n" + + " # are installed in the same directory\n" + + " $exe=\".exe\"\n" + + "}\n" + if (shLongProg) { + pwsh = pwsh + + "$ret=0\n" + + "if (Test-Path " + pwshLongProg + ") {\n" + + " & " + pwshLongProg + " " + args + " " + shTarget + " $args\n" + + " $ret=$LASTEXITCODE\n" + + "} else {\n" + + " & " + pwshProg + " " + args + " " + shTarget + " $args\n" + + " $ret=$LASTEXITCODE\n" + + "}\n" + + "exit $ret\n" + } else { + pwsh = pwsh + + "& " + pwshProg + " " + args + " " + shTarget + " $args\n" + + "exit $LASTEXITCODE\n" + } + + var then = times(3, next, cb) + fs.writeFile(to + ".ps1", pwsh, "utf8", then) fs.writeFile(to + ".cmd", cmd, "utf8", then) fs.writeFile(to, sh, "utf8", then) function next () { @@ -162,9 +246,10 @@ function writeShim_ (from, to, prog, args, cb) { } function chmodShim (to, cb) { - var then = times(2, cb, cb) - fs.chmod(to, 0755, then) - fs.chmod(to + ".cmd", 0755, then) + var then = times(3, cb, cb) + fs.chmod(to, "0755", then) + fs.chmod(to + ".cmd", "0755", then) + fs.chmod(to + ".ps1", "0755", then) } function times(n, ok, cb) { diff --git a/deps/npm/node_modules/cmd-shim/lib/to-batch-syntax.js b/deps/npm/node_modules/cmd-shim/lib/to-batch-syntax.js new file mode 100644 index 00000000000000..59d242c071efe9 --- /dev/null +++ b/deps/npm/node_modules/cmd-shim/lib/to-batch-syntax.js @@ -0,0 +1,49 @@ +exports.replaceDollarWithPercentPair = replaceDollarWithPercentPair +exports.convertToSetCommand = convertToSetCommand +exports.convertToSetCommands = convertToSetCommands + +function convertToSetCommand(key, value) { + var line = "" + key = key || "" + key = key.trim() + value = value || "" + value = value.trim() + if(key && value && value.length > 0) { + line = "@SET " + key + "=" + replaceDollarWithPercentPair(value) + "\r\n" + } + return line +} + +function extractVariableValuePairs(declarations) { + var pairs = {} + declarations.map(function(declaration) { + var split = declaration.split("=") + pairs[split[0]]=split[1] + }) + return pairs +} + +function convertToSetCommands(variableString) { + var variableValuePairs = extractVariableValuePairs(variableString.split(" ")) + var variableDeclarationsAsBatch = "" + Object.keys(variableValuePairs).forEach(function (key) { + variableDeclarationsAsBatch += convertToSetCommand(key, variableValuePairs[key]) + }) + return variableDeclarationsAsBatch +} + +function replaceDollarWithPercentPair(value) { + var dollarExpressions = /\$\{?([^\$@#\?\- \t{}:]+)\}?/g + var result = "" + var startIndex = 0 + do { + var match = dollarExpressions.exec(value) + if(match) { + var betweenMatches = value.substring(startIndex, match.index) || "" + result += betweenMatches + "%" + match[1] + "%" + startIndex = dollarExpressions.lastIndex + } + } while (dollarExpressions.lastIndex > 0) + result += value.substr(startIndex) + return result +} diff --git a/deps/npm/node_modules/cmd-shim/package.json b/deps/npm/node_modules/cmd-shim/package.json index 97e73e46fe133e..43a7b36a8fdd0c 100644 --- a/deps/npm/node_modules/cmd-shim/package.json +++ b/deps/npm/node_modules/cmd-shim/package.json @@ -1,54 +1,60 @@ { - "_args": [ - [ - "cmd-shim@2.0.2", - "/Users/rebecca/code/npm" - ] - ], - "_from": "cmd-shim@2.0.2", - "_id": "cmd-shim@2.0.2", + "_from": "cmd-shim@3.0.3", + "_id": "cmd-shim@3.0.3", "_inBundle": false, - "_integrity": "sha1-b8vamUg6j9FdfTChlspp1oii79s=", + "_integrity": "sha512-DtGg+0xiFhQIntSBRzL2fRQBnmtAVwXIDo4Qq46HPpObYquxMaZS4sb82U9nH91qJrlosC1wa9gwr0QyL/HypA==", "_location": "/cmd-shim", "_phantomChildren": {}, "_requested": { "type": "version", "registry": true, - "raw": "cmd-shim@2.0.2", + "raw": "cmd-shim@3.0.3", "name": "cmd-shim", "escapedName": "cmd-shim", - "rawSpec": "2.0.2", + "rawSpec": "3.0.3", "saveSpec": null, - "fetchSpec": "2.0.2" + "fetchSpec": "3.0.3" }, "_requiredBy": [ + "#USER", "/", "/bin-links" ], - "_resolved": "https://registry.npmjs.org/cmd-shim/-/cmd-shim-2.0.2.tgz", - "_spec": "2.0.2", - "_where": "/Users/rebecca/code/npm", + "_resolved": "https://registry.npmjs.org/cmd-shim/-/cmd-shim-3.0.3.tgz", + "_shasum": "2c35238d3df37d98ecdd7d5f6b8dc6b21cadc7cb", + "_spec": "cmd-shim@3.0.3", + "_where": "/Users/isaacs/dev/npm/cli", "bugs": { - "url": "https://github.com/ForbesLindesay/cmd-shim/issues" + "url": "https://github.com/npm/cmd-shim/issues" }, + "bundleDependencies": false, "dependencies": { "graceful-fs": "^4.1.2", "mkdirp": "~0.5.0" }, + "deprecated": false, "description": "Used in npm for command line application support", "devDependencies": { "rimraf": "~2.2.8", - "tap": "~0.4.11" + "tap": "^12.7.0" }, - "homepage": "https://github.com/ForbesLindesay/cmd-shim#readme", - "license": "BSD-2-Clause", + "files": [ + "index.js", + "lib" + ], + "homepage": "https://github.com/npm/cmd-shim#readme", + "license": "ISC", "name": "cmd-shim", "repository": { "type": "git", - "url": "git+https://github.com/ForbesLindesay/cmd-shim.git" + "url": "git+https://github.com/npm/cmd-shim.git" }, "scripts": { - "test": "tap test/*.js" + "postpublish": "git push origin --follow-tags", + "postversion": "npm publish", + "preversion": "npm test", + "snap": "TAP_SNAPSHOT=1 tap test/*.js --100", + "test": "tap test/*.js --100" }, - "version": "2.0.2" + "version": "3.0.3" } diff --git a/deps/npm/node_modules/cmd-shim/test/00-setup.js b/deps/npm/node_modules/cmd-shim/test/00-setup.js deleted file mode 100644 index 04ec2b256b135a..00000000000000 --- a/deps/npm/node_modules/cmd-shim/test/00-setup.js +++ /dev/null @@ -1,34 +0,0 @@ -var test = require('tap').test -var mkdirp = require('mkdirp') -var fs = require('fs') -var path = require('path') -var fixtures = path.resolve(__dirname, 'fixtures') - -var froms = { - 'from.exe': 'exe', - 'from.env': '#!/usr/bin/env node\nconsole.log(/hi/)\n', - 'from.env.args': '#!/usr/bin/env node --expose_gc\ngc()\n', - 'from.sh': '#!/usr/bin/sh\necho hi\n', - 'from.sh.args': '#!/usr/bin/sh -x\necho hi\n' -} - -var cmdShim = require('../') - -test('create fixture', function (t) { - mkdirp(fixtures, function (er) { - if (er) - throw er - t.pass('made dir') - Object.keys(froms).forEach(function (f) { - t.test('write ' + f, function (t) { - fs.writeFile(path.resolve(fixtures, f), froms[f], function (er) { - if (er) - throw er - t.pass('wrote ' + f) - t.end() - }) - }) - }) - t.end() - }) -}) diff --git a/deps/npm/node_modules/cmd-shim/test/basic.js b/deps/npm/node_modules/cmd-shim/test/basic.js deleted file mode 100755 index 09823158b865ab..00000000000000 --- a/deps/npm/node_modules/cmd-shim/test/basic.js +++ /dev/null @@ -1,175 +0,0 @@ -var test = require('tap').test -var mkdirp = require('mkdirp') -var fs = require('fs') -var path = require('path') -var fixtures = path.resolve(__dirname, 'fixtures') - -var cmdShim = require('../') - -test('no shebang', function (t) { - var from = path.resolve(fixtures, 'from.exe') - var to = path.resolve(fixtures, 'exe.shim') - cmdShim(from, to, function(er) { - if (er) - throw er - t.equal(fs.readFileSync(to, 'utf8'), - "\"$basedir/from.exe\" \"$@\"\nexit $?\n") - t.equal(fs.readFileSync(to + '.cmd', 'utf8'), - "@\"%~dp0\\from.exe\" %*\r\n") - t.end() - }) -}) - -test('env shebang', function (t) { - var from = path.resolve(fixtures, 'from.env') - var to = path.resolve(fixtures, 'env.shim') - cmdShim(from, to, function(er) { - if (er) - throw er - console.error('%j', fs.readFileSync(to, 'utf8')) - console.error('%j', fs.readFileSync(to + '.cmd', 'utf8')) - - t.equal(fs.readFileSync(to, 'utf8'), - "#!/bin/sh"+ - "\nbasedir=$(dirname \"$(echo \"$0\" | sed -e 's,\\\\,/,g')\")"+ - "\n"+ - "\ncase `uname` in"+ - "\n *CYGWIN*) basedir=`cygpath -w \"$basedir\"`;;"+ - "\nesac"+ - "\n"+ - "\nif [ -x \"$basedir/node\" ]; then"+ - "\n \"$basedir/node\" \"$basedir/from.env\" \"$@\""+ - "\n ret=$?"+ - "\nelse "+ - "\n node \"$basedir/from.env\" \"$@\""+ - "\n ret=$?"+ - "\nfi"+ - "\nexit $ret"+ - "\n") - t.equal(fs.readFileSync(to + '.cmd', 'utf8'), - "@IF EXIST \"%~dp0\\node.exe\" (\r"+ - "\n \"%~dp0\\node.exe\" \"%~dp0\\from.env\" %*\r"+ - "\n) ELSE (\r"+ - "\n @SETLOCAL\r"+ - "\n @SET PATHEXT=%PATHEXT:;.JS;=;%\r"+ - "\n node \"%~dp0\\from.env\" %*\r"+ - "\n)") - t.end() - }) -}) - -test('env shebang with args', function (t) { - var from = path.resolve(fixtures, 'from.env.args') - var to = path.resolve(fixtures, 'env.args.shim') - cmdShim(from, to, function(er) { - if (er) - throw er - console.error('%j', fs.readFileSync(to, 'utf8')) - console.error('%j', fs.readFileSync(to + '.cmd', 'utf8')) - - t.equal(fs.readFileSync(to, 'utf8'), - "#!/bin/sh"+ - "\nbasedir=$(dirname \"$(echo \"$0\" | sed -e 's,\\\\,/,g')\")"+ - "\n"+ - "\ncase `uname` in"+ - "\n *CYGWIN*) basedir=`cygpath -w \"$basedir\"`;;"+ - "\nesac"+ - "\n"+ - "\nif [ -x \"$basedir/node\" ]; then"+ - "\n \"$basedir/node\" --expose_gc \"$basedir/from.env.args\" \"$@\""+ - "\n ret=$?"+ - "\nelse "+ - "\n node --expose_gc \"$basedir/from.env.args\" \"$@\""+ - "\n ret=$?"+ - "\nfi"+ - "\nexit $ret"+ - "\n") - t.equal(fs.readFileSync(to + '.cmd', 'utf8'), - "@IF EXIST \"%~dp0\\node.exe\" (\r"+ - "\n \"%~dp0\\node.exe\" --expose_gc \"%~dp0\\from.env.args\" %*\r"+ - "\n) ELSE (\r"+ - "\n @SETLOCAL\r"+ - "\n @SET PATHEXT=%PATHEXT:;.JS;=;%\r"+ - "\n node --expose_gc \"%~dp0\\from.env.args\" %*\r"+ - "\n)") - t.end() - }) -}) - -test('explicit shebang', function (t) { - var from = path.resolve(fixtures, 'from.sh') - var to = path.resolve(fixtures, 'sh.shim') - cmdShim(from, to, function(er) { - if (er) - throw er - console.error('%j', fs.readFileSync(to, 'utf8')) - console.error('%j', fs.readFileSync(to + '.cmd', 'utf8')) - - t.equal(fs.readFileSync(to, 'utf8'), - "#!/bin/sh" + - "\nbasedir=$(dirname \"$(echo \"$0\" | sed -e 's,\\\\,/,g')\")" + - "\n" + - "\ncase `uname` in" + - "\n *CYGWIN*) basedir=`cygpath -w \"$basedir\"`;;" + - "\nesac" + - "\n" + - "\nif [ -x \"$basedir//usr/bin/sh\" ]; then" + - "\n \"$basedir//usr/bin/sh\" \"$basedir/from.sh\" \"$@\"" + - "\n ret=$?" + - "\nelse " + - "\n /usr/bin/sh \"$basedir/from.sh\" \"$@\"" + - "\n ret=$?" + - "\nfi" + - "\nexit $ret" + - "\n") - - t.equal(fs.readFileSync(to + '.cmd', 'utf8'), - "@IF EXIST \"%~dp0\\/usr/bin/sh.exe\" (\r" + - "\n \"%~dp0\\/usr/bin/sh.exe\" \"%~dp0\\from.sh\" %*\r" + - "\n) ELSE (\r" + - "\n @SETLOCAL\r"+ - "\n @SET PATHEXT=%PATHEXT:;.JS;=;%\r"+ - "\n /usr/bin/sh \"%~dp0\\from.sh\" %*\r" + - "\n)") - t.end() - }) -}) - -test('explicit shebang with args', function (t) { - var from = path.resolve(fixtures, 'from.sh.args') - var to = path.resolve(fixtures, 'sh.args.shim') - cmdShim(from, to, function(er) { - if (er) - throw er - console.error('%j', fs.readFileSync(to, 'utf8')) - console.error('%j', fs.readFileSync(to + '.cmd', 'utf8')) - - t.equal(fs.readFileSync(to, 'utf8'), - "#!/bin/sh" + - "\nbasedir=$(dirname \"$(echo \"$0\" | sed -e 's,\\\\,/,g')\")" + - "\n" + - "\ncase `uname` in" + - "\n *CYGWIN*) basedir=`cygpath -w \"$basedir\"`;;" + - "\nesac" + - "\n" + - "\nif [ -x \"$basedir//usr/bin/sh\" ]; then" + - "\n \"$basedir//usr/bin/sh\" -x \"$basedir/from.sh.args\" \"$@\"" + - "\n ret=$?" + - "\nelse " + - "\n /usr/bin/sh -x \"$basedir/from.sh.args\" \"$@\"" + - "\n ret=$?" + - "\nfi" + - "\nexit $ret" + - "\n") - - t.equal(fs.readFileSync(to + '.cmd', 'utf8'), - "@IF EXIST \"%~dp0\\/usr/bin/sh.exe\" (\r" + - "\n \"%~dp0\\/usr/bin/sh.exe\" -x \"%~dp0\\from.sh.args\" %*\r" + - "\n) ELSE (\r" + - "\n @SETLOCAL\r"+ - "\n @SET PATHEXT=%PATHEXT:;.JS;=;%\r"+ - "\n /usr/bin/sh -x \"%~dp0\\from.sh.args\" %*\r" + - "\n)") - t.end() - }) -}) diff --git a/deps/npm/node_modules/cmd-shim/test/zz-cleanup.js b/deps/npm/node_modules/cmd-shim/test/zz-cleanup.js deleted file mode 100644 index 9425031001cb08..00000000000000 --- a/deps/npm/node_modules/cmd-shim/test/zz-cleanup.js +++ /dev/null @@ -1,13 +0,0 @@ -var test = require('tap').test -var path = require('path') -var fixtures = path.resolve(__dirname, 'fixtures') -var rimraf = require('rimraf') - -test('cleanup', function(t) { - rimraf(fixtures, function(er) { - if (er) - throw er - t.pass('cleaned up') - t.end() - }) -}) diff --git a/deps/npm/node_modules/debug/.eslintrc b/deps/npm/node_modules/debug/.eslintrc deleted file mode 100644 index 146371edbe3251..00000000000000 --- a/deps/npm/node_modules/debug/.eslintrc +++ /dev/null @@ -1,14 +0,0 @@ -{ - "env": { - "browser": true, - "node": true - }, - "globals": { - "chrome": true - }, - "rules": { - "no-console": 0, - "no-empty": [1, { "allowEmptyCatch": true }] - }, - "extends": "eslint:recommended" -} diff --git a/deps/npm/node_modules/define-properties/.eslintrc b/deps/npm/node_modules/define-properties/.eslintrc deleted file mode 100644 index db992d7a90c021..00000000000000 --- a/deps/npm/node_modules/define-properties/.eslintrc +++ /dev/null @@ -1,12 +0,0 @@ -{ - "root": true, - - "extends": "@ljharb", - - "rules": { - "id-length": [2, { "min": 1, "max": 35 }], - "max-lines-per-function": [2, 100], - "max-params": [2, 4], - "max-statements": [2, 13] - } -} diff --git a/deps/npm/node_modules/es-abstract/.eslintrc b/deps/npm/node_modules/es-abstract/.eslintrc deleted file mode 100644 index 9478bfa1d345c2..00000000000000 --- a/deps/npm/node_modules/es-abstract/.eslintrc +++ /dev/null @@ -1,23 +0,0 @@ -{ - "root": true, - - "extends": "@ljharb", - - "rules": { - "array-bracket-newline": 0, - "array-element-newline": 0, - "complexity": 0, - "eqeqeq": [2, "allow-null"], - "func-name-matching": 0, - "id-length": [2, { "min": 1, "max": 30 }], - "max-lines": [2, 700], - "max-params": [2, 4], - "max-statements": [2, 24], - "max-statements-per-line": [2, { "max": 2 }], - "no-magic-numbers": 0, - "new-cap": 0, - "no-extra-parens": 1, - "operator-linebreak": [2, "before"], - "sort-keys": 0 - } -} diff --git a/deps/npm/node_modules/es-abstract/test/.eslintrc b/deps/npm/node_modules/es-abstract/test/.eslintrc deleted file mode 100644 index 904e833b4c25e5..00000000000000 --- a/deps/npm/node_modules/es-abstract/test/.eslintrc +++ /dev/null @@ -1,11 +0,0 @@ -{ - "rules": { - "id-length": 0, - "max-lines": 0, - "max-statements-per-line": [2, { "max": 3 }], - "max-nested-callbacks": [2, 3], - "max-statements": 0, - "no-implicit-coercion": [1], - "no-invalid-this": [1] - } -} diff --git a/deps/npm/node_modules/es-to-primitive/.eslintrc b/deps/npm/node_modules/es-to-primitive/.eslintrc deleted file mode 100644 index 09e0c6c26c6dc2..00000000000000 --- a/deps/npm/node_modules/es-to-primitive/.eslintrc +++ /dev/null @@ -1,14 +0,0 @@ -{ - "root": true, - - "extends": "@ljharb", - - "rules": { - "complexity": [2, 14], - "func-name-matching": 0, - "id-length": [2, { "min": 1, "max": 24, "properties": "never" }], - "max-lines-per-function": [2, { "max": 68 }], - "max-statements": [2, 20], - "new-cap": [2, { "capIsNewExceptions": ["GetMethod"] }] - } -} diff --git a/deps/npm/node_modules/es-to-primitive/test/.eslintrc b/deps/npm/node_modules/es-to-primitive/test/.eslintrc deleted file mode 100644 index 9beb88c75250a8..00000000000000 --- a/deps/npm/node_modules/es-to-primitive/test/.eslintrc +++ /dev/null @@ -1,9 +0,0 @@ -{ - "rules": { - "array-bracket-newline": 0, - "array-element-newline": 0, - "max-statements-per-line": [2, { "max": 3 }], - "no-magic-numbers": [0], - "sort-keys": [0] - } -} diff --git a/deps/npm/node_modules/extend/.eslintrc b/deps/npm/node_modules/extend/.eslintrc deleted file mode 100644 index a34cf2831b7c38..00000000000000 --- a/deps/npm/node_modules/extend/.eslintrc +++ /dev/null @@ -1,17 +0,0 @@ -{ - "root": true, - - "extends": "@ljharb", - - "rules": { - "complexity": [2, 20], - "eqeqeq": [2, "allow-null"], - "func-name-matching": [1], - "max-depth": [1, 4], - "max-statements": [2, 26], - "no-extra-parens": [1], - "no-magic-numbers": [0], - "no-restricted-syntax": [2, "BreakStatement", "ContinueStatement", "DebuggerStatement", "LabeledStatement", "WithStatement"], - "sort-keys": [0], - } -} diff --git a/deps/npm/node_modules/function-bind/.eslintrc b/deps/npm/node_modules/function-bind/.eslintrc deleted file mode 100644 index 9b33d8edffcc82..00000000000000 --- a/deps/npm/node_modules/function-bind/.eslintrc +++ /dev/null @@ -1,15 +0,0 @@ -{ - "root": true, - - "extends": "@ljharb", - - "rules": { - "func-name-matching": 0, - "indent": [2, 4], - "max-nested-callbacks": [2, 3], - "max-params": [2, 3], - "max-statements": [2, 20], - "no-new-func": [1], - "strict": [0] - } -} diff --git a/deps/npm/node_modules/function-bind/test/.eslintrc b/deps/npm/node_modules/function-bind/test/.eslintrc deleted file mode 100644 index 8a56d5b72fb008..00000000000000 --- a/deps/npm/node_modules/function-bind/test/.eslintrc +++ /dev/null @@ -1,9 +0,0 @@ -{ - "rules": { - "array-bracket-newline": 0, - "array-element-newline": 0, - "max-statements-per-line": [2, { "max": 2 }], - "no-invalid-this": 0, - "no-magic-numbers": 0, - } -} diff --git a/deps/npm/node_modules/gentle-fs/CHANGELOG.md b/deps/npm/node_modules/gentle-fs/CHANGELOG.md index e9bb23d98b891b..38fc91cba587d9 100644 --- a/deps/npm/node_modules/gentle-fs/CHANGELOG.md +++ b/deps/npm/node_modules/gentle-fs/CHANGELOG.md @@ -2,6 +2,41 @@ All notable changes to this project will be documented in this file. See [standard-version](https://github.com/conventional-changelog/standard-version) for commit guidelines. + +## [2.2.1](https://github.com/npm/gentle-fs/compare/v2.2.0...v2.2.1) (2019-08-15) + + +### Bug Fixes + +* **link:** properly detect that we should chown the link ([1c69beb](https://github.com/npm/gentle-fs/commit/1c69beb)) + + + + +# [2.2.0](https://github.com/npm/gentle-fs/compare/v2.1.0...v2.2.0) (2019-08-14) + + +### Bug Fixes + +* don't chown if we didn't make any dirs ([c4df8a8](https://github.com/npm/gentle-fs/commit/c4df8a8)) + + +### Features + +* export mkdir method ([4891c09](https://github.com/npm/gentle-fs/commit/4891c09)) + + + + +# [2.1.0](https://github.com/npm/gentle-fs/compare/v2.0.1...v2.1.0) (2019-08-14) + + +### Features + +* infer ownership of created dirs and links ([0dd2879](https://github.com/npm/gentle-fs/commit/0dd2879)) + + + ## [2.0.1](https://github.com/npm/gentle-fs/compare/v2.0.0...v2.0.1) (2017-11-28) diff --git a/deps/npm/node_modules/gentle-fs/index.js b/deps/npm/node_modules/gentle-fs/index.js index 2828fdb2bd318a..9807ed9d8580be 100644 --- a/deps/npm/node_modules/gentle-fs/index.js +++ b/deps/npm/node_modules/gentle-fs/index.js @@ -2,9 +2,11 @@ const rm = require('./lib/rm.js') const link = require('./lib/link.js') +const mkdir = require('./lib/mkdir.js') exports = module.exports = { rm: rm, link: link.link, - linkIfExists: link.linkIfExists + linkIfExists: link.linkIfExists, + mkdir: mkdir } diff --git a/deps/npm/node_modules/gentle-fs/lib/chown.js b/deps/npm/node_modules/gentle-fs/lib/chown.js new file mode 100644 index 00000000000000..5921e56345769e --- /dev/null +++ b/deps/npm/node_modules/gentle-fs/lib/chown.js @@ -0,0 +1,24 @@ +'use strict' + +// A module for chowning things we just created, to preserve +// ownership of new links and directories. + +const chownr = require('chownr') + +const selfOwner = { + uid: process.getuid && process.getuid(), + gid: process.getgid && process.getgid() +} + +module.exports = (path, uid, gid, cb) => { + if (selfOwner.uid !== 0 || + uid === undefined || gid === undefined || + (selfOwner.uid === uid && selfOwner.gid === gid)) { + // don't need to, or can't chown anyway, so just leave it. + // this also handles platforms where process.getuid is undefined + return cb() + } + chownr(path, uid, gid, cb) +} + +module.exports.selfOwner = selfOwner diff --git a/deps/npm/node_modules/gentle-fs/lib/link.js b/deps/npm/node_modules/gentle-fs/lib/link.js index 246d801479a11c..4623e7e82cf882 100644 --- a/deps/npm/node_modules/gentle-fs/lib/link.js +++ b/deps/npm/node_modules/gentle-fs/lib/link.js @@ -3,8 +3,10 @@ const path = require('path') const fs = require('graceful-fs') const chain = require('slide').chain -const mkdir = require('mkdirp') +const mkdir = require('./mkdir.js') const rm = require('./rm.js') +const inferOwner = require('infer-owner') +const chown = require('./chown.js') exports = module.exports = { link: link, @@ -53,14 +55,20 @@ function link (from, to, opts, cb) { var relativeTarget = path.relative(opts.base, absTarget) var target = opts.absolute ? absTarget : relativeTarget - chain( - [ - [ensureFromIsNotSource, absTarget, to], - [fs, 'stat', absTarget], - [rm, to, opts], - [mkdir, path.dirname(to)], - [fs, 'symlink', target, to, 'junction'] - ], - cb - ) + const tasks = [ + [ensureFromIsNotSource, absTarget, to], + [fs, 'stat', absTarget], + [rm, to, opts], + [mkdir, path.dirname(to)], + [fs, 'symlink', target, to, 'junction'] + ] + + if (chown.selfOwner.uid !== 0) { + chain(tasks, cb) + } else { + inferOwner(to).then(owner => { + tasks.push([chown, to, owner.uid, owner.gid]) + chain(tasks, cb) + }) + } } diff --git a/deps/npm/node_modules/gentle-fs/lib/mkdir.js b/deps/npm/node_modules/gentle-fs/lib/mkdir.js new file mode 100644 index 00000000000000..5b419959716bdb --- /dev/null +++ b/deps/npm/node_modules/gentle-fs/lib/mkdir.js @@ -0,0 +1,22 @@ +'use strict' + +const mkdirp = require('mkdirp') +const inferOwner = require('infer-owner') +const chown = require('./chown.js') + +module.exports = (path, cb) => { + // don't bother chowning if we can't anyway + if (process.platform === 'win32' || chown.selfOwner.uid !== 0) { + return mkdirp(path, cb) + } + + inferOwner(path).then(owner => { + mkdirp(path, (er, made) => { + if (er || !made) { + cb(er, made) + } else { + chown(made || path, owner.uid, owner.gid, cb) + } + }) + }, cb) +} diff --git a/deps/npm/node_modules/gentle-fs/package.json b/deps/npm/node_modules/gentle-fs/package.json index 55bc6bd40eca1b..bf4867c08d328a 100644 --- a/deps/npm/node_modules/gentle-fs/package.json +++ b/deps/npm/node_modules/gentle-fs/package.json @@ -1,49 +1,49 @@ { - "_args": [ - [ - "gentle-fs@2.0.1", - "/Users/rebecca/code/npm" - ] - ], - "_from": "gentle-fs@2.0.1", - "_id": "gentle-fs@2.0.1", + "_from": "gentle-fs@2.2.1", + "_id": "gentle-fs@2.2.1", "_inBundle": false, - "_integrity": "sha512-cEng5+3fuARewXktTEGbwsktcldA+YsnUEaXZwcK/3pjSE1X9ObnTs+/8rYf8s+RnIcQm2D5x3rwpN7Zom8Bew==", + "_integrity": "sha512-e7dRgUM5fsS+7wm2oggZpgcRx6sEvJHXujPH5RzgQ1ziQY4+HuVBYsnUzJwJ+C7mjOJN27DjiFy1TaL+TNltow==", "_location": "/gentle-fs", "_phantomChildren": {}, "_requested": { "type": "version", "registry": true, - "raw": "gentle-fs@2.0.1", + "raw": "gentle-fs@2.2.1", "name": "gentle-fs", "escapedName": "gentle-fs", - "rawSpec": "2.0.1", + "rawSpec": "2.2.1", "saveSpec": null, - "fetchSpec": "2.0.1" + "fetchSpec": "2.2.1" }, "_requiredBy": [ + "#USER", "/", "/bin-links" ], - "_resolved": "https://registry.npmjs.org/gentle-fs/-/gentle-fs-2.0.1.tgz", - "_spec": "2.0.1", - "_where": "/Users/rebecca/code/npm", + "_resolved": "https://registry.npmjs.org/gentle-fs/-/gentle-fs-2.2.1.tgz", + "_shasum": "1f38df4b4ead685566257201fd526de401ebb215", + "_spec": "gentle-fs@2.2.1", + "_where": "/Users/isaacs/dev/npm/cli", "author": { "name": "Mike Sherov" }, "bugs": { "url": "https://github.com/npm/gentle-fs/issues" }, + "bundleDependencies": false, "dependencies": { "aproba": "^1.1.2", + "chownr": "^1.1.2", "fs-vacuum": "^1.2.10", "graceful-fs": "^4.1.11", "iferr": "^0.1.5", + "infer-owner": "^1.0.4", "mkdirp": "^0.5.1", "path-is-inside": "^1.0.2", "read-cmd-shim": "^1.0.1", "slide": "^1.1.6" }, + "deprecated": false, "description": "Gentle Filesystem operations", "devDependencies": { "dezalgo": "^1.0.3", @@ -81,5 +81,5 @@ "update-coc": "weallbehave -o . && git add CODE_OF_CONDUCT.md && git commit -m 'docs(coc): updated CODE_OF_CONDUCT.md'", "update-contrib": "weallcontribute -o . && git add CONTRIBUTING.md && git commit -m 'docs(contributing): updated CONTRIBUTING.md'" }, - "version": "2.0.1" + "version": "2.2.1" } diff --git a/deps/npm/node_modules/graceful-fs/graceful-fs.js b/deps/npm/node_modules/graceful-fs/graceful-fs.js index ac206757e63c5a..3e1a9eb4432f36 100644 --- a/deps/npm/node_modules/graceful-fs/graceful-fs.js +++ b/deps/npm/node_modules/graceful-fs/graceful-fs.js @@ -3,10 +3,22 @@ var polyfills = require('./polyfills.js') var legacy = require('./legacy-streams.js') var clone = require('./clone.js') -var queue = [] - var util = require('util') +/* istanbul ignore next - node 0.x polyfill */ +var gracefulQueue +var previousSymbol + +/* istanbul ignore else - node 0.x polyfill */ +if (typeof Symbol === 'function' && typeof Symbol.for === 'function') { + gracefulQueue = Symbol.for('graceful-fs.queue') + // This is used in testing by future versions + previousSymbol = Symbol.for('graceful-fs.previous') +} else { + gracefulQueue = '___graceful-fs.queue' + previousSymbol = '___graceful-fs.previous' +} + function noop () {} var debug = noop @@ -19,11 +31,58 @@ else if (/\bgfs4\b/i.test(process.env.NODE_DEBUG || '')) console.error(m) } -if (/\bgfs4\b/i.test(process.env.NODE_DEBUG || '')) { - process.on('exit', function() { - debug(queue) - require('assert').equal(queue.length, 0) +// Once time initialization +if (!global[gracefulQueue]) { + // This queue can be shared by multiple loaded instances + var queue = [] + Object.defineProperty(global, gracefulQueue, { + get: function() { + return queue + } }) + + // Patch fs.close/closeSync to shared queue version, because we need + // to retry() whenever a close happens *anywhere* in the program. + // This is essential when multiple graceful-fs instances are + // in play at the same time. + fs.close = (function (fs$close) { + function close (fd, cb) { + return fs$close.call(fs, fd, function (err) { + // This function uses the graceful-fs shared queue + if (!err) { + retry() + } + + if (typeof cb === 'function') + cb.apply(this, arguments) + }) + } + + Object.defineProperty(close, previousSymbol, { + value: fs$close + }) + return close + })(fs.close) + + fs.closeSync = (function (fs$closeSync) { + function closeSync (fd) { + // This function uses the graceful-fs shared queue + fs$closeSync.apply(fs, arguments) + retry() + } + + Object.defineProperty(closeSync, previousSymbol, { + value: fs$closeSync + }) + return closeSync + })(fs.closeSync) + + if (/\bgfs4\b/i.test(process.env.NODE_DEBUG || '')) { + process.on('exit', function() { + debug(global[gracefulQueue]) + require('assert').equal(global[gracefulQueue].length, 0) + }) + } } module.exports = patch(clone(fs)) @@ -32,45 +91,11 @@ if (process.env.TEST_GRACEFUL_FS_GLOBAL_PATCH && !fs.__patched) { fs.__patched = true; } -// Always patch fs.close/closeSync, because we want to -// retry() whenever a close happens *anywhere* in the program. -// This is essential when multiple graceful-fs instances are -// in play at the same time. -module.exports.close = (function (fs$close) { return function (fd, cb) { - return fs$close.call(fs, fd, function (err) { - if (!err) - retry() - - if (typeof cb === 'function') - cb.apply(this, arguments) - }) -}})(fs.close) - -module.exports.closeSync = (function (fs$closeSync) { return function (fd) { - // Note that graceful-fs also retries when fs.closeSync() fails. - // Looks like a bug to me, although it's probably a harmless one. - var rval = fs$closeSync.apply(fs, arguments) - retry() - return rval -}})(fs.closeSync) - -// Only patch fs once, otherwise we'll run into a memory leak if -// graceful-fs is loaded multiple times, such as in test environments that -// reset the loaded modules between tests. -// We look for the string `graceful-fs` from the comment above. This -// way we are not adding any extra properties and it will detect if older -// versions of graceful-fs are installed. -if (!/\bgraceful-fs\b/.test(fs.closeSync.toString())) { - fs.closeSync = module.exports.closeSync; - fs.close = module.exports.close; -} - function patch (fs) { // Everything that references the open() function needs to be in here polyfills(fs) fs.gracefulify = patch - fs.FileReadStream = ReadStream; // Legacy name. - fs.FileWriteStream = WriteStream; // Legacy name. + fs.createReadStream = createReadStream fs.createWriteStream = createWriteStream var fs$readFile = fs.readFile @@ -187,8 +212,48 @@ function patch (fs) { WriteStream.prototype.open = WriteStream$open } - fs.ReadStream = ReadStream - fs.WriteStream = WriteStream + Object.defineProperty(fs, 'ReadStream', { + get: function () { + return ReadStream + }, + set: function (val) { + ReadStream = val + }, + enumerable: true, + configurable: true + }) + Object.defineProperty(fs, 'WriteStream', { + get: function () { + return WriteStream + }, + set: function (val) { + WriteStream = val + }, + enumerable: true, + configurable: true + }) + + // legacy names + Object.defineProperty(fs, 'FileReadStream', { + get: function () { + return ReadStream + }, + set: function (val) { + ReadStream = val + }, + enumerable: true, + configurable: true + }) + Object.defineProperty(fs, 'FileWriteStream', { + get: function () { + return WriteStream + }, + set: function (val) { + WriteStream = val + }, + enumerable: true, + configurable: true + }) function ReadStream (path, options) { if (this instanceof ReadStream) @@ -234,11 +299,11 @@ function patch (fs) { } function createReadStream (path, options) { - return new ReadStream(path, options) + return new fs.ReadStream(path, options) } function createWriteStream (path, options) { - return new WriteStream(path, options) + return new fs.WriteStream(path, options) } var fs$open = fs.open @@ -267,11 +332,11 @@ function patch (fs) { function enqueue (elem) { debug('ENQUEUE', elem[0].name, elem[1]) - queue.push(elem) + global[gracefulQueue].push(elem) } function retry () { - var elem = queue.shift() + var elem = global[gracefulQueue].shift() if (elem) { debug('RETRY', elem[0].name, elem[1]) elem[0].apply(null, elem[1]) diff --git a/deps/npm/node_modules/graceful-fs/package.json b/deps/npm/node_modules/graceful-fs/package.json index eabcee1a8b3461..e5dff90aea7ce5 100644 --- a/deps/npm/node_modules/graceful-fs/package.json +++ b/deps/npm/node_modules/graceful-fs/package.json @@ -1,19 +1,19 @@ { - "_from": "graceful-fs@^4.1.15", - "_id": "graceful-fs@4.2.0", + "_from": "graceful-fs@4.2.2", + "_id": "graceful-fs@4.2.2", "_inBundle": false, - "_integrity": "sha512-jpSvDPV4Cq/bgtpndIWbI5hmYxhQGHPC4d4cqBPb4DLniCfhJokdXhwhaDuLBGLQdvvRum/UiX6ECVIPvDXqdg==", + "_integrity": "sha512-IItsdsea19BoLC7ELy13q1iJFNmd7ofZH5+X/pJr90/nRoPEX0DJo1dHDbgtYWOhJhcCgMDTOw84RZ72q6lB+Q==", "_location": "/graceful-fs", "_phantomChildren": {}, "_requested": { - "type": "range", + "type": "version", "registry": true, - "raw": "graceful-fs@^4.1.15", + "raw": "graceful-fs@4.2.2", "name": "graceful-fs", "escapedName": "graceful-fs", - "rawSpec": "^4.1.15", + "rawSpec": "4.2.2", "saveSpec": null, - "fetchSpec": "^4.1.15" + "fetchSpec": "4.2.2" }, "_requiredBy": [ "#USER", @@ -26,7 +26,6 @@ "/flat-cache", "/fs-vacuum", "/fs-write-stream-atomic", - "/fstream", "/gentle-fs", "/libcipm", "/load-json-file", @@ -42,9 +41,9 @@ "/test-exclude/load-json-file", "/write-file-atomic" ], - "_resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.0.tgz", - "_shasum": "8d8fdc73977cb04104721cb53666c1ca64cd328b", - "_spec": "graceful-fs@^4.1.15", + "_resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.2.tgz", + "_shasum": "6f0952605d0140c1cfdb138ed005775b92d67b02", + "_spec": "graceful-fs@4.2.2", "_where": "/Users/isaacs/dev/npm/cli", "bugs": { "url": "https://github.com/isaacs/node-graceful-fs/issues" @@ -99,5 +98,5 @@ "preversion": "npm test", "test": "node test.js | tap -" }, - "version": "4.2.0" + "version": "4.2.2" } diff --git a/deps/npm/node_modules/graceful-fs/polyfills.js b/deps/npm/node_modules/graceful-fs/polyfills.js index ab692016c90b04..a5808d23f132e2 100644 --- a/deps/npm/node_modules/graceful-fs/polyfills.js +++ b/deps/npm/node_modules/graceful-fs/polyfills.js @@ -115,20 +115,26 @@ function patch (fs) { } // if read() returns EAGAIN, then just try it again. - fs.read = (function (fs$read) { return function (fd, buffer, offset, length, position, callback_) { - var callback - if (callback_ && typeof callback_ === 'function') { - var eagCounter = 0 - callback = function (er, _, __) { - if (er && er.code === 'EAGAIN' && eagCounter < 10) { - eagCounter ++ - return fs$read.call(fs, fd, buffer, offset, length, position, callback) + fs.read = (function (fs$read) { + function read (fd, buffer, offset, length, position, callback_) { + var callback + if (callback_ && typeof callback_ === 'function') { + var eagCounter = 0 + callback = function (er, _, __) { + if (er && er.code === 'EAGAIN' && eagCounter < 10) { + eagCounter ++ + return fs$read.call(fs, fd, buffer, offset, length, position, callback) + } + callback_.apply(this, arguments) } - callback_.apply(this, arguments) } + return fs$read.call(fs, fd, buffer, offset, length, position, callback) } - return fs$read.call(fs, fd, buffer, offset, length, position, callback) - }})(fs.read) + + // This ensures `util.promisify` works as it does for native `fs.read`. + read.__proto__ = fs$read + return read + })(fs.read) fs.readSync = (function (fs$readSync) { return function (fd, buffer, offset, length, position) { var eagCounter = 0 diff --git a/deps/npm/node_modules/has-symbols/.eslintrc b/deps/npm/node_modules/has-symbols/.eslintrc deleted file mode 100644 index f78f6f181f67dc..00000000000000 --- a/deps/npm/node_modules/has-symbols/.eslintrc +++ /dev/null @@ -1,10 +0,0 @@ -{ - "root": true, - - "extends": "@ljharb", - - "rules": { - "max-statements-per-line": [2, { "max": 2 }], - "no-magic-numbers": 0 - } -} diff --git a/deps/npm/node_modules/is-callable/.eslintrc b/deps/npm/node_modules/is-callable/.eslintrc deleted file mode 100644 index db619b50ce084d..00000000000000 --- a/deps/npm/node_modules/is-callable/.eslintrc +++ /dev/null @@ -1,11 +0,0 @@ -{ - "root": true, - - "extends": "@ljharb", - - "rules": { - "id-length": 0, - "max-statements": [2, 12], - "max-statements-per-line": [2, { "max": 2 }] - } -} diff --git a/deps/npm/node_modules/is-date-object/.eslintrc b/deps/npm/node_modules/is-date-object/.eslintrc deleted file mode 100644 index 1228f975c99738..00000000000000 --- a/deps/npm/node_modules/is-date-object/.eslintrc +++ /dev/null @@ -1,9 +0,0 @@ -{ - "root": true, - - "extends": "@ljharb", - - "rules": { - "max-statements": [2, 12] - } -} diff --git a/deps/npm/node_modules/is-regex/.eslintrc b/deps/npm/node_modules/is-regex/.eslintrc deleted file mode 100644 index fbb8e9de537b9e..00000000000000 --- a/deps/npm/node_modules/is-regex/.eslintrc +++ /dev/null @@ -1,9 +0,0 @@ -{ - "root": true, - - "extends": "@ljharb", - - "rules": { - "id-length": [1] - } -} diff --git a/deps/npm/node_modules/is-symbol/.eslintrc b/deps/npm/node_modules/is-symbol/.eslintrc deleted file mode 100644 index 5f511fd05f5435..00000000000000 --- a/deps/npm/node_modules/is-symbol/.eslintrc +++ /dev/null @@ -1,9 +0,0 @@ -{ - "root": true, - - "extends": "@ljharb", - - "rules": { - "max-statements": [2, 14] - } -} diff --git a/deps/npm/node_modules/is-symbol/test/.eslintrc b/deps/npm/node_modules/is-symbol/test/.eslintrc deleted file mode 100644 index 1ac0d47b38544d..00000000000000 --- a/deps/npm/node_modules/is-symbol/test/.eslintrc +++ /dev/null @@ -1,7 +0,0 @@ -{ - "rules": { - "max-statements-per-line": [2, { "max": 2 }], - "no-restricted-properties": 0, - "symbol-description": 0, - } -} diff --git a/deps/npm/node_modules/isstream/.jshintrc b/deps/npm/node_modules/isstream/.jshintrc deleted file mode 100644 index c8ef3ca4097f82..00000000000000 --- a/deps/npm/node_modules/isstream/.jshintrc +++ /dev/null @@ -1,59 +0,0 @@ -{ - "predef": [ ] - , "bitwise": false - , "camelcase": false - , "curly": false - , "eqeqeq": false - , "forin": false - , "immed": false - , "latedef": false - , "noarg": true - , "noempty": true - , "nonew": true - , "plusplus": false - , "quotmark": true - , "regexp": false - , "undef": true - , "unused": true - , "strict": false - , "trailing": true - , "maxlen": 120 - , "asi": true - , "boss": true - , "debug": true - , "eqnull": true - , "esnext": true - , "evil": true - , "expr": true - , "funcscope": false - , "globalstrict": false - , "iterator": false - , "lastsemic": true - , "laxbreak": true - , "laxcomma": true - , "loopfunc": true - , "multistr": false - , "onecase": false - , "proto": false - , "regexdash": false - , "scripturl": true - , "smarttabs": false - , "shadow": false - , "sub": true - , "supernew": false - , "validthis": true - , "browser": true - , "couch": false - , "devel": false - , "dojo": false - , "mootools": false - , "node": true - , "nonstandard": true - , "prototypejs": false - , "rhino": false - , "worker": true - , "wsh": false - , "nomen": false - , "onevar": false - , "passfail": false -} \ No newline at end of file diff --git a/deps/npm/node_modules/libcipm/CHANGELOG.md b/deps/npm/node_modules/libcipm/CHANGELOG.md index 5d72f4c362a3d7..26cf2d224cfb07 100644 --- a/deps/npm/node_modules/libcipm/CHANGELOG.md +++ b/deps/npm/node_modules/libcipm/CHANGELOG.md @@ -2,16 +2,41 @@ All notable changes to this project will be documented in this file. See [standard-version](https://github.com/conventional-changelog/standard-version) for commit guidelines. + +## [4.0.3](https://github.com/npm/libcipm/compare/v4.0.2...v4.0.3) (2019-08-12) + + +### Bug Fixes + +* do not pass opts.log to lifecycle ([46b2101](https://github.com/npm/libcipm/commit/46b2101)) + + + + +## [4.0.2](https://github.com/npm/libcipm/compare/v4.0.1...v4.0.2) (2019-08-12) + + + + +## [4.0.1](https://github.com/npm/libcipm/compare/v4.0.0...v4.0.1) (2019-08-12) + + +### Bug Fixes + +* respect and retain all configs passed in ([20b7372](https://github.com/npm/libcipm/commit/20b7372)) + + + -# [4.0.0](https://github.com/zkat/cipm/compare/v3.0.3...v4.0.0) (2019-07-10) +# [4.0.0](https://github.com/npm/libcipm/compare/v3.0.3...v4.0.0) (2019-07-10) -* npm-lifecycle@3.0.0 ([84b8d7e](https://github.com/zkat/cipm/commit/84b8d7e)) +* npm-lifecycle@3.0.0 ([84b8d7e](https://github.com/npm/libcipm/commit/84b8d7e)) ### Bug Fixes -* **lifecycle:** remove warning from bluebird ([#59](https://github.com/zkat/cipm/issues/59)) ([7af39e6](https://github.com/zkat/cipm/commit/7af39e6)), closes [#58](https://github.com/zkat/cipm/issues/58) +* **lifecycle:** remove warning from bluebird ([#59](https://github.com/npm/libcipm/issues/59)) ([7af39e6](https://github.com/npm/libcipm/commit/7af39e6)), closes [#58](https://github.com/npm/libcipm/issues/58) ### BREAKING CHANGES @@ -21,42 +46,42 @@ All notable changes to this project will be documented in this file. See [standa -## [3.0.3](https://github.com/zkat/cipm/compare/v3.0.2...v3.0.3) (2019-01-22) +## [3.0.3](https://github.com/npm/libcipm/compare/v3.0.2...v3.0.3) (2019-01-22) ### Bug Fixes -* **scripts:** pass in opts.dir directly ([018df27](https://github.com/zkat/cipm/commit/018df27)) +* **scripts:** pass in opts.dir directly ([018df27](https://github.com/npm/libcipm/commit/018df27)) -## [3.0.2](https://github.com/zkat/cipm/compare/v3.0.1...v3.0.2) (2018-08-31) +## [3.0.2](https://github.com/npm/libcipm/compare/v3.0.1...v3.0.2) (2018-08-31) ### Bug Fixes -* **worker:** missed a spot ([4371558](https://github.com/zkat/cipm/commit/4371558)) +* **worker:** missed a spot ([4371558](https://github.com/npm/libcipm/commit/4371558)) -## [3.0.1](https://github.com/zkat/cipm/compare/v3.0.0...v3.0.1) (2018-08-31) +## [3.0.1](https://github.com/npm/libcipm/compare/v3.0.0...v3.0.1) (2018-08-31) ### Bug Fixes -* **workers:** disable workers for now ([64db490](https://github.com/zkat/cipm/commit/64db490)) +* **workers:** disable workers for now ([64db490](https://github.com/npm/libcipm/commit/64db490)) -# [3.0.0](https://github.com/zkat/cipm/compare/v2.0.2...v3.0.0) (2018-08-31) +# [3.0.0](https://github.com/npm/libcipm/compare/v2.0.2...v3.0.0) (2018-08-31) ### Features -* **config:** switch to modern, figgy-pudding configuration ([#57](https://github.com/zkat/cipm/issues/57)) ([161f6b2](https://github.com/zkat/cipm/commit/161f6b2)) +* **config:** switch to modern, figgy-pudding configuration ([#57](https://github.com/npm/libcipm/issues/57)) ([161f6b2](https://github.com/npm/libcipm/commit/161f6b2)) ### BREAKING CHANGES @@ -66,32 +91,32 @@ All notable changes to this project will be documented in this file. See [standa -## [2.0.2](https://github.com/zkat/cipm/compare/v2.0.1...v2.0.2) (2018-08-10) +## [2.0.2](https://github.com/npm/libcipm/compare/v2.0.1...v2.0.2) (2018-08-10) ### Bug Fixes -* **child:** only override dirPacker if opts.dirPacker is defined ([#55](https://github.com/zkat/cipm/issues/55)) ([13ab2f0](https://github.com/zkat/cipm/commit/13ab2f0)) +* **child:** only override dirPacker if opts.dirPacker is defined ([#55](https://github.com/npm/libcipm/issues/55)) ([13ab2f0](https://github.com/npm/libcipm/commit/13ab2f0)) -## [2.0.1](https://github.com/zkat/cipm/compare/v2.0.0...v2.0.1) (2018-07-27) +## [2.0.1](https://github.com/npm/libcipm/compare/v2.0.0...v2.0.1) (2018-07-27) ### Bug Fixes -* **deps:** move mkdirp to prod deps ([6878f39](https://github.com/zkat/cipm/commit/6878f39)) +* **deps:** move mkdirp to prod deps ([6878f39](https://github.com/npm/libcipm/commit/6878f39)) -# [2.0.0](https://github.com/zkat/cipm/compare/v1.6.3...v2.0.0) (2018-05-24) +# [2.0.0](https://github.com/npm/libcipm/compare/v1.6.3...v2.0.0) (2018-05-24) ### meta -* update node version support ([694b4d3](https://github.com/zkat/cipm/commit/694b4d3)) +* update node version support ([694b4d3](https://github.com/npm/libcipm/commit/694b4d3)) ### BREAKING CHANGES @@ -101,173 +126,173 @@ All notable changes to this project will be documented in this file. See [standa -## [1.6.3](https://github.com/zkat/cipm/compare/v1.6.2...v1.6.3) (2018-05-24) +## [1.6.3](https://github.com/npm/libcipm/compare/v1.6.2...v1.6.3) (2018-05-24) -## [1.6.2](https://github.com/zkat/cipm/compare/v1.6.1...v1.6.2) (2018-04-08) +## [1.6.2](https://github.com/npm/libcipm/compare/v1.6.1...v1.6.2) (2018-04-08) ### Bug Fixes -* **lifecycle:** detect binding.gyp for default install lifecycle ([#46](https://github.com/zkat/cipm/issues/46)) ([9149631](https://github.com/zkat/cipm/commit/9149631)), closes [#45](https://github.com/zkat/cipm/issues/45) +* **lifecycle:** detect binding.gyp for default install lifecycle ([#46](https://github.com/npm/libcipm/issues/46)) ([9149631](https://github.com/npm/libcipm/commit/9149631)), closes [#45](https://github.com/npm/libcipm/issues/45) -## [1.6.1](https://github.com/zkat/cipm/compare/v1.6.0...v1.6.1) (2018-03-13) +## [1.6.1](https://github.com/npm/libcipm/compare/v1.6.0...v1.6.1) (2018-03-13) ### Bug Fixes -* **bin:** Set non-zero exit code on error ([#41](https://github.com/zkat/cipm/issues/41)) ([54d0106](https://github.com/zkat/cipm/commit/54d0106)) -* **lifecycle:** defer to lifecycle’s internal logic as to whether or not to execute a run-script ([#42](https://github.com/zkat/cipm/issues/42)) ([7f27a52](https://github.com/zkat/cipm/commit/7f27a52)), closes [npm/npm#19258](https://github.com/npm/npm/issues/19258) -* **prefix:** don't reference prefix before computing it ([#40](https://github.com/zkat/cipm/issues/40)) ([08ed1cc](https://github.com/zkat/cipm/commit/08ed1cc)) -* **prefix:** Resolve to promise when passing --prefix to npm ci ([#43](https://github.com/zkat/cipm/issues/43)) ([401d466](https://github.com/zkat/cipm/commit/401d466)) +* **bin:** Set non-zero exit code on error ([#41](https://github.com/npm/libcipm/issues/41)) ([54d0106](https://github.com/npm/libcipm/commit/54d0106)) +* **lifecycle:** defer to lifecycle’s internal logic as to whether or not to execute a run-script ([#42](https://github.com/npm/libcipm/issues/42)) ([7f27a52](https://github.com/npm/libcipm/commit/7f27a52)), closes [npm/npm#19258](https://github.com/npm/npm/issues/19258) +* **prefix:** don't reference prefix before computing it ([#40](https://github.com/npm/libcipm/issues/40)) ([08ed1cc](https://github.com/npm/libcipm/commit/08ed1cc)) +* **prefix:** Resolve to promise when passing --prefix to npm ci ([#43](https://github.com/npm/libcipm/issues/43)) ([401d466](https://github.com/npm/libcipm/commit/401d466)) -# [1.6.0](https://github.com/zkat/cipm/compare/v1.5.1...v1.6.0) (2018-03-01) +# [1.6.0](https://github.com/npm/libcipm/compare/v1.5.1...v1.6.0) (2018-03-01) ### Bug Fixes -* **bin:** cli.js was being excluded ([d62668e](https://github.com/zkat/cipm/commit/d62668e)) +* **bin:** cli.js was being excluded ([d62668e](https://github.com/npm/libcipm/commit/d62668e)) ### Features -* **libcipm:** working standalone cipm release! ([a3383fd](https://github.com/zkat/cipm/commit/a3383fd)) +* **libcipm:** working standalone cipm release! ([a3383fd](https://github.com/npm/libcipm/commit/a3383fd)) -## [1.5.1](https://github.com/zkat/cipm/compare/v1.5.0...v1.5.1) (2018-03-01) +## [1.5.1](https://github.com/npm/libcipm/compare/v1.5.0...v1.5.1) (2018-03-01) ### Bug Fixes -* **_from:** do not add _from to directory deps ([7405360](https://github.com/zkat/cipm/commit/7405360)) +* **_from:** do not add _from to directory deps ([7405360](https://github.com/npm/libcipm/commit/7405360)) -# [1.5.0](https://github.com/zkat/cipm/compare/v1.4.1...v1.5.0) (2018-03-01) +# [1.5.0](https://github.com/npm/libcipm/compare/v1.4.1...v1.5.0) (2018-03-01) ### Bug Fixes -* **errors:** handle aggregate errors better ([6239499](https://github.com/zkat/cipm/commit/6239499)) +* **errors:** handle aggregate errors better ([6239499](https://github.com/npm/libcipm/commit/6239499)) ### Features -* **logger:** rudimentary progress bar update ([c5d9dc7](https://github.com/zkat/cipm/commit/c5d9dc7)) +* **logger:** rudimentary progress bar update ([c5d9dc7](https://github.com/npm/libcipm/commit/c5d9dc7)) -## [1.4.1](https://github.com/zkat/cipm/compare/v1.4.0...v1.4.1) (2018-02-27) +## [1.4.1](https://github.com/npm/libcipm/compare/v1.4.0...v1.4.1) (2018-02-27) ### Bug Fixes -* **buildTree:** linking in parallel causes hoist-clobbering ([5ffbc0e](https://github.com/zkat/cipm/commit/5ffbc0e)), closes [#39](https://github.com/zkat/cipm/issues/39) -* **buildTree:** use checkDepEnv here too ([41a4634](https://github.com/zkat/cipm/commit/41a4634)) -* **perf:** split up updateJson and buildTree ([df5aba0](https://github.com/zkat/cipm/commit/df5aba0)) -* **perf:** stop using the readPackageJson version to update packages ([8da3d5a](https://github.com/zkat/cipm/commit/8da3d5a)) +* **buildTree:** linking in parallel causes hoist-clobbering ([5ffbc0e](https://github.com/npm/libcipm/commit/5ffbc0e)), closes [#39](https://github.com/npm/libcipm/issues/39) +* **buildTree:** use checkDepEnv here too ([41a4634](https://github.com/npm/libcipm/commit/41a4634)) +* **perf:** split up updateJson and buildTree ([df5aba0](https://github.com/npm/libcipm/commit/df5aba0)) +* **perf:** stop using the readPackageJson version to update packages ([8da3d5a](https://github.com/npm/libcipm/commit/8da3d5a)) -# [1.4.0](https://github.com/zkat/cipm/compare/v1.3.3...v1.4.0) (2018-02-21) +# [1.4.0](https://github.com/npm/libcipm/compare/v1.3.3...v1.4.0) (2018-02-21) ### Features -* **extract:** add support for --only and --also ([ad143ae](https://github.com/zkat/cipm/commit/ad143ae)) +* **extract:** add support for --only and --also ([ad143ae](https://github.com/npm/libcipm/commit/ad143ae)) -## [1.3.3](https://github.com/zkat/cipm/compare/v1.3.2...v1.3.3) (2018-02-21) +## [1.3.3](https://github.com/npm/libcipm/compare/v1.3.2...v1.3.3) (2018-02-21) ### Bug Fixes -* **extract:** stop extracting deps before parent :\ ([c6847dc](https://github.com/zkat/cipm/commit/c6847dc)) +* **extract:** stop extracting deps before parent :\ ([c6847dc](https://github.com/npm/libcipm/commit/c6847dc)) -## [1.3.2](https://github.com/zkat/cipm/compare/v1.3.1...v1.3.2) (2018-02-15) +## [1.3.2](https://github.com/npm/libcipm/compare/v1.3.1...v1.3.2) (2018-02-15) -## [1.3.1](https://github.com/zkat/cipm/compare/v1.3.0...v1.3.1) (2018-02-15) +## [1.3.1](https://github.com/npm/libcipm/compare/v1.3.0...v1.3.1) (2018-02-15) -# [1.3.0](https://github.com/zkat/cipm/compare/v1.2.0...v1.3.0) (2018-02-13) +# [1.3.0](https://github.com/npm/libcipm/compare/v1.2.0...v1.3.0) (2018-02-13) ### Features -* **extract:** link directory deps and install missing bundle deps ([8334e9e](https://github.com/zkat/cipm/commit/8334e9e)) +* **extract:** link directory deps and install missing bundle deps ([8334e9e](https://github.com/npm/libcipm/commit/8334e9e)) -# [1.2.0](https://github.com/zkat/cipm/compare/v1.1.2...v1.2.0) (2018-02-07) +# [1.2.0](https://github.com/npm/libcipm/compare/v1.1.2...v1.2.0) (2018-02-07) ### Features -* **metadata:** add _resolved, _integrity, and _from on install ([36642dc](https://github.com/zkat/cipm/commit/36642dc)) +* **metadata:** add _resolved, _integrity, and _from on install ([36642dc](https://github.com/npm/libcipm/commit/36642dc)) -## [1.1.2](https://github.com/zkat/cipm/compare/v1.1.1...v1.1.2) (2018-01-19) +## [1.1.2](https://github.com/npm/libcipm/compare/v1.1.1...v1.1.2) (2018-01-19) -## [1.1.1](https://github.com/zkat/cipm/compare/v1.1.0...v1.1.1) (2018-01-19) +## [1.1.1](https://github.com/npm/libcipm/compare/v1.1.0...v1.1.1) (2018-01-19) -# [1.1.0](https://github.com/zkat/cipm/compare/v1.0.1...v1.1.0) (2018-01-07) +# [1.1.0](https://github.com/npm/libcipm/compare/v1.0.1...v1.1.0) (2018-01-07) ### Features -* **log:** add some helpful log output ([f443f03](https://github.com/zkat/cipm/commit/f443f03)) +* **log:** add some helpful log output ([f443f03](https://github.com/npm/libcipm/commit/f443f03)) -## [1.0.1](https://github.com/zkat/cipm/compare/v1.0.0...v1.0.1) (2018-01-07) +## [1.0.1](https://github.com/npm/libcipm/compare/v1.0.0...v1.0.1) (2018-01-07) ### Bug Fixes -* **deps:** added protoduck to pkgjson ([ecbe719](https://github.com/zkat/cipm/commit/ecbe719)) +* **deps:** added protoduck to pkgjson ([ecbe719](https://github.com/npm/libcipm/commit/ecbe719)) -# [1.0.0](https://github.com/zkat/cipm/compare/v0.9.1...v1.0.0) (2018-01-07) +# [1.0.0](https://github.com/npm/libcipm/compare/v0.9.1...v1.0.0) (2018-01-07) ### Features -* **cli:** splitting off CLI into a separate tool ([cff65c1](https://github.com/zkat/cipm/commit/cff65c1)) +* **cli:** splitting off CLI into a separate tool ([cff65c1](https://github.com/npm/libcipm/commit/cff65c1)) ### BREAKING CHANGES @@ -277,173 +302,173 @@ All notable changes to this project will be documented in this file. See [standa -## [0.9.1](https://github.com/zkat/cipm/compare/v0.9.0...v0.9.1) (2018-01-07) +## [0.9.1](https://github.com/npm/libcipm/compare/v0.9.0...v0.9.1) (2018-01-07) ### Bug Fixes -* **prefix:** oops @ prefix ([cc5adac](https://github.com/zkat/cipm/commit/cc5adac)) +* **prefix:** oops @ prefix ([cc5adac](https://github.com/npm/libcipm/commit/cc5adac)) -# [0.9.0](https://github.com/zkat/cipm/compare/v0.8.0...v0.9.0) (2018-01-07) +# [0.9.0](https://github.com/npm/libcipm/compare/v0.8.0...v0.9.0) (2018-01-07) ### Bug Fixes -* **package:** add pacote to bundleDependencies ([#36](https://github.com/zkat/cipm/issues/36)) ([a69742e](https://github.com/zkat/cipm/commit/a69742e)) +* **package:** add pacote to bundleDependencies ([#36](https://github.com/npm/libcipm/issues/36)) ([a69742e](https://github.com/npm/libcipm/commit/a69742e)) ### Features -* **config:** allow injection of npm configs ([#35](https://github.com/zkat/cipm/issues/35)) ([1f5694b](https://github.com/zkat/cipm/commit/1f5694b)) +* **config:** allow injection of npm configs ([#35](https://github.com/npm/libcipm/issues/35)) ([1f5694b](https://github.com/npm/libcipm/commit/1f5694b)) -# [0.8.0](https://github.com/zkat/cipm/compare/v0.7.2...v0.8.0) (2017-11-28) +# [0.8.0](https://github.com/npm/libcipm/compare/v0.7.2...v0.8.0) (2017-11-28) ### Features -* **gyp:** new npm-lifecycle[@2](https://github.com/2) with included node-gyp ([a4ed938](https://github.com/zkat/cipm/commit/a4ed938)) +* **gyp:** new npm-lifecycle[@2](https://github.com/2) with included node-gyp ([a4ed938](https://github.com/npm/libcipm/commit/a4ed938)) -## [0.7.2](https://github.com/zkat/cipm/compare/v0.7.1...v0.7.2) (2017-10-13) +## [0.7.2](https://github.com/npm/libcipm/compare/v0.7.1...v0.7.2) (2017-10-13) ### Bug Fixes -* **extract:** idk why this was breaking. Seriously. ([433a2be](https://github.com/zkat/cipm/commit/433a2be)) -* **tree:** pass through a custom Promise to logiTree ([2d29efb](https://github.com/zkat/cipm/commit/2d29efb)) +* **extract:** idk why this was breaking. Seriously. ([433a2be](https://github.com/npm/libcipm/commit/433a2be)) +* **tree:** pass through a custom Promise to logiTree ([2d29efb](https://github.com/npm/libcipm/commit/2d29efb)) ### Performance Improvements -* zoomzoom. Even more concurrency! ([db9c2e0](https://github.com/zkat/cipm/commit/db9c2e0)) +* zoomzoom. Even more concurrency! ([db9c2e0](https://github.com/npm/libcipm/commit/db9c2e0)) -## [0.7.1](https://github.com/zkat/cipm/compare/v0.7.0...v0.7.1) (2017-10-13) +## [0.7.1](https://github.com/npm/libcipm/compare/v0.7.0...v0.7.1) (2017-10-13) ### Bug Fixes -* **scripts:** separate extract and build and fix ordering ([eb072a5](https://github.com/zkat/cipm/commit/eb072a5)) +* **scripts:** separate extract and build and fix ordering ([eb072a5](https://github.com/npm/libcipm/commit/eb072a5)) -# [0.7.0](https://github.com/zkat/cipm/compare/v0.6.0...v0.7.0) (2017-10-12) +# [0.7.0](https://github.com/npm/libcipm/compare/v0.6.0...v0.7.0) (2017-10-12) ### Bug Fixes -* **lockfile:** npm-shrinkwrap takes precedence over package-lock (#28) ([3b98fb3](https://github.com/zkat/cipm/commit/3b98fb3)) +* **lockfile:** npm-shrinkwrap takes precedence over package-lock (#28) ([3b98fb3](https://github.com/npm/libcipm/commit/3b98fb3)) ### Features -* **optional:** ignore failed optional deps (#27) ([a654629](https://github.com/zkat/cipm/commit/a654629)) +* **optional:** ignore failed optional deps (#27) ([a654629](https://github.com/npm/libcipm/commit/a654629)) -# [0.6.0](https://github.com/zkat/cipm/compare/v0.5.1...v0.6.0) (2017-10-09) +# [0.6.0](https://github.com/npm/libcipm/compare/v0.5.1...v0.6.0) (2017-10-09) ### Features -* **scripts:** run prepare and prepublish scripts in the root (#26) ([e0e35a3](https://github.com/zkat/cipm/commit/e0e35a3)) +* **scripts:** run prepare and prepublish scripts in the root (#26) ([e0e35a3](https://github.com/npm/libcipm/commit/e0e35a3)) -## [0.5.1](https://github.com/zkat/cipm/compare/v0.5.0...v0.5.1) (2017-10-09) +## [0.5.1](https://github.com/npm/libcipm/compare/v0.5.0...v0.5.1) (2017-10-09) -# [0.5.0](https://github.com/zkat/cipm/compare/v0.4.0...v0.5.0) (2017-10-09) +# [0.5.0](https://github.com/npm/libcipm/compare/v0.4.0...v0.5.0) (2017-10-09) ### Bug Fixes -* **output:** npm does not punctuate this ([e7ba976](https://github.com/zkat/cipm/commit/e7ba976)) -* **shutdown:** make sure workers close ([7ab57d0](https://github.com/zkat/cipm/commit/7ab57d0)) +* **output:** npm does not punctuate this ([e7ba976](https://github.com/npm/libcipm/commit/e7ba976)) +* **shutdown:** make sure workers close ([7ab57d0](https://github.com/npm/libcipm/commit/7ab57d0)) ### Features -* **bin:** link bins and run scripts (#25) ([fab74bf](https://github.com/zkat/cipm/commit/fab74bf)) -* **lifecycle:** run scripts in dep order (#23) ([68ecfac](https://github.com/zkat/cipm/commit/68ecfac)) +* **bin:** link bins and run scripts (#25) ([fab74bf](https://github.com/npm/libcipm/commit/fab74bf)) +* **lifecycle:** run scripts in dep order (#23) ([68ecfac](https://github.com/npm/libcipm/commit/68ecfac)) -# [0.4.0](https://github.com/zkat/cipm/compare/v0.3.2...v0.4.0) (2017-10-04) +# [0.4.0](https://github.com/npm/libcipm/compare/v0.3.2...v0.4.0) (2017-10-04) ### Features -* **opts:** support full range of relevant CLI opts (#19) ([6f2bd51](https://github.com/zkat/cipm/commit/6f2bd51)) +* **opts:** support full range of relevant CLI opts (#19) ([6f2bd51](https://github.com/npm/libcipm/commit/6f2bd51)) -## [0.3.2](https://github.com/zkat/cipm/compare/v0.3.1...v0.3.2) (2017-09-06) +## [0.3.2](https://github.com/npm/libcipm/compare/v0.3.1...v0.3.2) (2017-09-06) ### Bug Fixes -* **bin:** make cli executable by default (#13) ([14a9a5f](https://github.com/zkat/cipm/commit/14a9a5f)) -* **config:** use npm.cmd on win32 and fix tests (#12) ([d912d16](https://github.com/zkat/cipm/commit/d912d16)), closes [#12](https://github.com/zkat/cipm/issues/12) -* **json:** strip BOM when reading JSON files (#8) ([2529149](https://github.com/zkat/cipm/commit/2529149)) +* **bin:** make cli executable by default (#13) ([14a9a5f](https://github.com/npm/libcipm/commit/14a9a5f)) +* **config:** use npm.cmd on win32 and fix tests (#12) ([d912d16](https://github.com/npm/libcipm/commit/d912d16)), closes [#12](https://github.com/npm/libcipm/issues/12) +* **json:** strip BOM when reading JSON files (#8) ([2529149](https://github.com/npm/libcipm/commit/2529149)) -## [0.3.1](https://github.com/zkat/cipm/compare/v0.3.0...v0.3.1) (2017-09-05) +## [0.3.1](https://github.com/npm/libcipm/compare/v0.3.0...v0.3.1) (2017-09-05) -# [0.3.0](https://github.com/zkat/cipm/compare/v0.2.0...v0.3.0) (2017-09-05) +# [0.3.0](https://github.com/npm/libcipm/compare/v0.2.0...v0.3.0) (2017-09-05) ### Features -* **lockfile:** verify that lockfile matches package.json (#5) ([f631203](https://github.com/zkat/cipm/commit/f631203)) -* **scripts:** support --ignore-scripts option (#9) ([213ca02](https://github.com/zkat/cipm/commit/213ca02)) +* **lockfile:** verify that lockfile matches package.json (#5) ([f631203](https://github.com/npm/libcipm/commit/f631203)) +* **scripts:** support --ignore-scripts option (#9) ([213ca02](https://github.com/npm/libcipm/commit/213ca02)) -# [0.2.0](https://github.com/zkat/cipm/compare/v0.1.1...v0.2.0) (2017-09-01) +# [0.2.0](https://github.com/npm/libcipm/compare/v0.1.1...v0.2.0) (2017-09-01) ### Bug Fixes -* **main:** default --prefix ([ff06a31](https://github.com/zkat/cipm/commit/ff06a31)) +* **main:** default --prefix ([ff06a31](https://github.com/npm/libcipm/commit/ff06a31)) ### Features -* **lifecycle:** actually run lifecycle scripts correctly ([7f8933e](https://github.com/zkat/cipm/commit/7f8933e)) +* **lifecycle:** actually run lifecycle scripts correctly ([7f8933e](https://github.com/npm/libcipm/commit/7f8933e)) -## [0.1.1](https://github.com/zkat/cipm/compare/v0.1.0...v0.1.1) (2017-08-30) +## [0.1.1](https://github.com/npm/libcipm/compare/v0.1.0...v0.1.1) (2017-08-30) ### Bug Fixes -* **files:** oops. forgot to include new files in tarball ([1ee85c9](https://github.com/zkat/cipm/commit/1ee85c9)) +* **files:** oops. forgot to include new files in tarball ([1ee85c9](https://github.com/npm/libcipm/commit/1ee85c9)) @@ -453,13 +478,13 @@ All notable changes to this project will be documented in this file. See [standa ### Bug Fixes -* **config:** pipe stdout ([08e6af8](https://github.com/zkat/cipm/commit/08e6af8)) -* **extract:** make sure to extract properly ([9643583](https://github.com/zkat/cipm/commit/9643583)) -* **license:** switch to MIT ([0d10d0d](https://github.com/zkat/cipm/commit/0d10d0d)) +* **config:** pipe stdout ([08e6af8](https://github.com/npm/libcipm/commit/08e6af8)) +* **extract:** make sure to extract properly ([9643583](https://github.com/npm/libcipm/commit/9643583)) +* **license:** switch to MIT ([0d10d0d](https://github.com/npm/libcipm/commit/0d10d0d)) ### Features -* **impl:** rough prototype ([2970e43](https://github.com/zkat/cipm/commit/2970e43)) -* **lifecycle:** Run lifecycle events, implement prefix option, add unit tests (#1) ([d6629be](https://github.com/zkat/cipm/commit/d6629be)), closes [#1](https://github.com/zkat/cipm/issues/1) -* **opts:** add usage string and --help ([efcc48d](https://github.com/zkat/cipm/commit/efcc48d)) +* **impl:** rough prototype ([2970e43](https://github.com/npm/libcipm/commit/2970e43)) +* **lifecycle:** Run lifecycle events, implement prefix option, add unit tests (#1) ([d6629be](https://github.com/npm/libcipm/commit/d6629be)), closes [#1](https://github.com/npm/libcipm/issues/1) +* **opts:** add usage string and --help ([efcc48d](https://github.com/npm/libcipm/commit/efcc48d)) diff --git a/deps/npm/node_modules/libcipm/LICENSE.md b/deps/npm/node_modules/libcipm/LICENSE.md index 409d7d5a9c911c..2ed9c0311c9974 100644 --- a/deps/npm/node_modules/libcipm/LICENSE.md +++ b/deps/npm/node_modules/libcipm/LICENSE.md @@ -1,7 +1,19 @@ -Copyright 2017 Kat Marchán and Contributors +Copyright npm, Inc., Kat Marchán, and Contributors -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/deps/npm/node_modules/libcipm/README.md b/deps/npm/node_modules/libcipm/README.md index 00f9b81619bdbb..3dd9cbe149220a 100644 --- a/deps/npm/node_modules/libcipm/README.md +++ b/deps/npm/node_modules/libcipm/README.md @@ -1,6 +1,6 @@ -[![npm](https://img.shields.io/npm/v/libcipm.svg)](https://npm.im/libcipm) [![license](https://img.shields.io/npm/l/libcipm.svg)](https://npm.im/libcipm) [![Travis](https://img.shields.io/travis/zkat/cipm.svg)](https://travis-ci.org/zkat/cipm) [![AppVeyor](https://ci.appveyor.com/api/projects/status/github/zkat/cipm?svg=true)](https://ci.appveyor.com/project/zkat/cipm) [![Coverage Status](https://coveralls.io/repos/github/zkat/cipm/badge.svg?branch=latest)](https://coveralls.io/github/zkat/cipm?branch=latest) +[![npm](https://img.shields.io/npm/v/libcipm.svg)](https://npm.im/libcipm) [![license](https://img.shields.io/npm/l/libcipm.svg)](https://npm.im/libcipm) [![Travis](https://img.shields.io/travis/npm/libcipm.svg)](https://travis-ci.org/npm/libcipm) [![AppVeyor](https://ci.appveyor.com/api/projects/status/github/npm/libcipm?svg=true)](https://ci.appveyor.com/project/npm/libcipm) [![Coverage Status](https://coveralls.io/repos/github/npm/libcipm/badge.svg?branch=latest)](https://coveralls.io/github/npm/libcipm?branch=latest) -[`libcipm`](https://github.com/zkat/cipm) installs npm projects in a way that's +[`libcipm`](https://github.com/npm/libcipm) installs npm projects in a way that's optimized for continuous integration/deployment/etc scenarios. It gives up the ability to build its own trees or install packages individually, as well as other user-oriented features, in exchange for speed, and being more strict diff --git a/deps/npm/node_modules/libcipm/index.js b/deps/npm/node_modules/libcipm/index.js index ebc05385e19732..7c9b59f62d44df 100644 --- a/deps/npm/node_modules/libcipm/index.js +++ b/deps/npm/node_modules/libcipm/index.js @@ -21,25 +21,6 @@ const statAsync = BB.promisify(fs.stat) const symlinkAsync = BB.promisify(fs.symlink) const writeFileAsync = BB.promisify(fs.writeFile) -const CipmOpts = figgyPudding({ - also: {}, - dev: 'development', - development: {}, - dirPacker: {}, - force: {}, - global: {}, - ignoreScripts: 'ignore-scripts', - 'ignore-scripts': {}, - log: {}, - loglevel: {}, - only: {}, - prefix: {}, - prod: 'production', - production: {}, - Promise: { default: () => BB }, - umask: {} -}) - const LifecycleOpts = figgyPudding({ config: {}, 'script-shell': {}, @@ -59,7 +40,7 @@ const LifecycleOpts = figgyPudding({ class Installer { constructor (opts) { - this.opts = CipmOpts(opts) + this.opts = opts // Stats this.startTime = Date.now() @@ -389,7 +370,7 @@ class Installer { pkg, stage, pkgPath, LifecycleOpts(this.opts).concat({ // TODO: can be removed once npm-lifecycle is updated to modern // config practices. - config: this.opts, + config: Object.assign({}, this.opts, { log: null }), dir: this.prefix })) ).tap(() => { this.timings.scripts += Date.now() - start }) diff --git a/deps/npm/node_modules/libcipm/package.json b/deps/npm/node_modules/libcipm/package.json index d2dde91e8bd473..f520bd6fbc3db8 100644 --- a/deps/npm/node_modules/libcipm/package.json +++ b/deps/npm/node_modules/libcipm/package.json @@ -1,34 +1,34 @@ { - "_from": "libcipm@4.0.0", - "_id": "libcipm@4.0.0", + "_from": "libcipm@4.0.3", + "_id": "libcipm@4.0.3", "_inBundle": false, - "_integrity": "sha512-5IIamvUIqWYjfNscYdirKisXyaTMw7Mf7yuGrjHH2isz7xBZDCUOIdujZxNk2g6lBBs8AGxYW6lHpNnnt92bww==", + "_integrity": "sha512-nuIxNtqA+kIkwUiNM/nZ0yPyR7NkSUov6g6mCfFPkYylO1dEovZBL+NZ3axdouS2UOTa8GdnJ7/meSc1/0AIGw==", "_location": "/libcipm", "_phantomChildren": {}, "_requested": { "type": "version", "registry": true, - "raw": "libcipm@4.0.0", + "raw": "libcipm@4.0.3", "name": "libcipm", "escapedName": "libcipm", - "rawSpec": "4.0.0", + "rawSpec": "4.0.3", "saveSpec": null, - "fetchSpec": "4.0.0" + "fetchSpec": "4.0.3" }, "_requiredBy": [ "#USER", "/" ], - "_resolved": "https://registry.npmjs.org/libcipm/-/libcipm-4.0.0.tgz", - "_shasum": "30053bee09b0b1f4df855137d631a6d27f5d59de", - "_spec": "libcipm@4.0.0", + "_resolved": "https://registry.npmjs.org/libcipm/-/libcipm-4.0.3.tgz", + "_shasum": "6a6db4a6e040e56f4af18bb1d664e05e8eb23a39", + "_spec": "libcipm@4.0.3", "_where": "/Users/isaacs/dev/npm/cli", "author": { "name": "Kat Marchán", "email": "kzm@sykosomatic.org" }, "bugs": { - "url": "https://github.com/zkat/cipm/issues" + "url": "https://github.com/npm/libcipm/issues" }, "bundleDependencies": false, "config": { @@ -73,7 +73,7 @@ "*.js", "lib" ], - "homepage": "https://github.com/zkat/cipm#readme", + "homepage": "https://github.com/npm/libcipm#readme", "keywords": [ "npm", "package manager", @@ -85,7 +85,7 @@ "name": "libcipm", "repository": { "type": "git", - "url": "git+https://github.com/zkat/cipm.git" + "url": "git+https://github.com/npm/libcipm.git" }, "scripts": { "postrelease": "npm publish && git push --follow-tags", @@ -96,5 +96,5 @@ "update-coc": "weallbehave -o . && git add CODE_OF_CONDUCT.md && git commit -m 'docs(coc): updated CODE_OF_CONDUCT.md'", "update-contrib": "weallcontribute -o . && git add CONTRIBUTING.md && git commit -m 'docs(contributing): updated CONTRIBUTING.md'" }, - "version": "4.0.0" + "version": "4.0.3" } diff --git a/deps/npm/node_modules/mute-stream/.nyc_output/33508.json b/deps/npm/node_modules/mute-stream/.nyc_output/33508.json deleted file mode 100644 index 9e26dfeeb6e641..00000000000000 --- a/deps/npm/node_modules/mute-stream/.nyc_output/33508.json +++ /dev/null @@ -1 +0,0 @@ -{} \ No newline at end of file diff --git a/deps/npm/node_modules/mute-stream/.nyc_output/33510.json b/deps/npm/node_modules/mute-stream/.nyc_output/33510.json deleted file mode 100644 index 1d04442328beff..00000000000000 --- a/deps/npm/node_modules/mute-stream/.nyc_output/33510.json +++ /dev/null @@ -1 +0,0 @@ -{"./mute.js":{"path":"./mute.js","s":{"1":1,"2":1,"3":1,"4":7,"5":7,"6":7,"7":7,"8":7,"9":7,"10":7,"11":7,"12":1,"13":1,"14":1,"15":10,"16":1,"17":6,"18":1,"19":1,"20":5,"21":1,"22":1,"23":8,"24":1,"25":2,"26":1,"27":5,"28":1,"29":5,"30":1,"31":2,"32":2,"33":1,"34":2,"35":2,"36":1,"37":2,"38":2,"39":1,"40":25,"41":13,"42":5,"43":8,"44":0,"45":0,"46":0,"47":0,"48":0,"49":0,"50":8,"51":0,"52":0,"53":0,"54":8,"55":20,"56":1,"57":2,"58":2,"59":0,"60":2,"61":2,"62":0,"63":2,"64":1,"65":3,"66":0,"67":0,"68":0,"69":0,"70":0,"71":0,"72":1,"73":1,"74":1},"b":{"1":[7,5],"2":[7,7],"3":[3,5],"4":[3,2],"5":[4,1],"6":[0,1],"7":[4,1],"8":[0,1],"9":[2,0],"10":[2,0],"11":[13,12],"12":[5,8],"13":[0,8],"14":[0,0],"15":[0,8],"16":[8,0,0],"17":[2,0],"18":[0,2],"19":[2,1],"20":[0,2],"21":[0,0],"22":[0,0],"23":[0,0],"24":[0,0]},"f":{"1":7,"2":10,"3":6,"4":5,"5":8,"6":2,"7":5,"8":5,"9":2,"10":2,"11":2,"12":25,"13":2,"14":3,"15":0},"fnMap":{"1":{"name":"MuteStream","line":7,"loc":{"start":{"line":7,"column":0},"end":{"line":7,"column":27}}},"2":{"name":"(anonymous_2)","line":29,"loc":{"start":{"line":29,"column":28},"end":{"line":29,"column":40}}},"3":{"name":"(anonymous_3)","line":33,"loc":{"start":{"line":33,"column":30},"end":{"line":33,"column":42}}},"4":{"name":"onPipe","line":44,"loc":{"start":{"line":44,"column":0},"end":{"line":44,"column":22}}},"5":{"name":"getIsTTY","line":55,"loc":{"start":{"line":55,"column":0},"end":{"line":55,"column":21}}},"6":{"name":"setIsTTY","line":63,"loc":{"start":{"line":63,"column":0},"end":{"line":63,"column":26}}},"7":{"name":"(anonymous_7)","line":73,"loc":{"start":{"line":73,"column":7},"end":{"line":73,"column":19}}},"8":{"name":"(anonymous_8)","line":80,"loc":{"start":{"line":80,"column":7},"end":{"line":80,"column":19}}},"9":{"name":"(anonymous_9)","line":87,"loc":{"start":{"line":87,"column":28},"end":{"line":87,"column":53}}},"10":{"name":"(anonymous_10)","line":92,"loc":{"start":{"line":92,"column":29},"end":{"line":92,"column":41}}},"11":{"name":"(anonymous_11)","line":96,"loc":{"start":{"line":96,"column":30},"end":{"line":96,"column":42}}},"12":{"name":"(anonymous_12)","line":100,"loc":{"start":{"line":100,"column":29},"end":{"line":100,"column":42}}},"13":{"name":"(anonymous_13)","line":124,"loc":{"start":{"line":124,"column":27},"end":{"line":124,"column":40}}},"14":{"name":"proxy","line":136,"loc":{"start":{"line":136,"column":0},"end":{"line":136,"column":20}}},"15":{"name":"(anonymous_15)","line":136,"loc":{"start":{"line":136,"column":29},"end":{"line":136,"column":41}}}},"statementMap":{"1":{"start":{"line":1,"column":0},"end":{"line":1,"column":30}},"2":{"start":{"line":3,"column":0},"end":{"line":3,"column":27}},"3":{"start":{"line":7,"column":0},"end":{"line":20,"column":1}},"4":{"start":{"line":8,"column":2},"end":{"line":8,"column":20}},"5":{"start":{"line":9,"column":2},"end":{"line":9,"column":19}},"6":{"start":{"line":10,"column":2},"end":{"line":10,"column":38}},"7":{"start":{"line":11,"column":2},"end":{"line":11,"column":20}},"8":{"start":{"line":12,"column":2},"end":{"line":12,"column":31}},"9":{"start":{"line":13,"column":2},"end":{"line":13,"column":29}},"10":{"start":{"line":18,"column":2},"end":{"line":18,"column":36}},"11":{"start":{"line":19,"column":2},"end":{"line":19,"column":26}},"12":{"start":{"line":22,"column":0},"end":{"line":22,"column":54}},"13":{"start":{"line":24,"column":0},"end":{"line":27,"column":2}},"14":{"start":{"line":29,"column":0},"end":{"line":31,"column":1}},"15":{"start":{"line":30,"column":2},"end":{"line":30,"column":19}},"16":{"start":{"line":33,"column":0},"end":{"line":35,"column":1}},"17":{"start":{"line":34,"column":2},"end":{"line":34,"column":20}},"18":{"start":{"line":37,"column":0},"end":{"line":42,"column":2}},"19":{"start":{"line":44,"column":0},"end":{"line":46,"column":1}},"20":{"start":{"line":45,"column":2},"end":{"line":45,"column":17}},"21":{"start":{"line":48,"column":0},"end":{"line":53,"column":2}},"22":{"start":{"line":55,"column":0},"end":{"line":60,"column":1}},"23":{"start":{"line":56,"column":2},"end":{"line":59,"column":9}},"24":{"start":{"line":63,"column":0},"end":{"line":70,"column":1}},"25":{"start":{"line":64,"column":2},"end":{"line":69,"column":4}},"26":{"start":{"line":72,"column":0},"end":{"line":77,"column":44}},"27":{"start":{"line":74,"column":4},"end":{"line":76,"column":23}},"28":{"start":{"line":79,"column":0},"end":{"line":84,"column":44}},"29":{"start":{"line":81,"column":4},"end":{"line":83,"column":23}},"30":{"start":{"line":87,"column":0},"end":{"line":90,"column":1}},"31":{"start":{"line":88,"column":2},"end":{"line":88,"column":19}},"32":{"start":{"line":89,"column":2},"end":{"line":89,"column":56}},"33":{"start":{"line":92,"column":0},"end":{"line":94,"column":1}},"34":{"start":{"line":93,"column":2},"end":{"line":93,"column":41}},"35":{"start":{"line":93,"column":17},"end":{"line":93,"column":41}},"36":{"start":{"line":96,"column":0},"end":{"line":98,"column":1}},"37":{"start":{"line":97,"column":2},"end":{"line":97,"column":42}},"38":{"start":{"line":97,"column":17},"end":{"line":97,"column":42}},"39":{"start":{"line":100,"column":0},"end":{"line":122,"column":1}},"40":{"start":{"line":101,"column":2},"end":{"line":120,"column":3}},"41":{"start":{"line":102,"column":4},"end":{"line":102,"column":34}},"42":{"start":{"line":102,"column":23},"end":{"line":102,"column":34}},"43":{"start":{"line":103,"column":4},"end":{"line":119,"column":5}},"44":{"start":{"line":104,"column":6},"end":{"line":108,"column":7}},"45":{"start":{"line":105,"column":8},"end":{"line":105,"column":42}},"46":{"start":{"line":106,"column":8},"end":{"line":106,"column":42}},"47":{"start":{"line":107,"column":8},"end":{"line":107,"column":29}},"48":{"start":{"line":109,"column":6},"end":{"line":109,"column":29}},"49":{"start":{"line":110,"column":6},"end":{"line":110,"column":33}},"50":{"start":{"line":112,"column":6},"end":{"line":117,"column":7}},"51":{"start":{"line":114,"column":8},"end":{"line":114,"column":32}},"52":{"start":{"line":115,"column":8},"end":{"line":115,"column":39}},"53":{"start":{"line":116,"column":8},"end":{"line":116,"column":41}},"54":{"start":{"line":118,"column":6},"end":{"line":118,"column":50}},"55":{"start":{"line":121,"column":2},"end":{"line":121,"column":22}},"56":{"start":{"line":124,"column":0},"end":{"line":134,"column":1}},"57":{"start":{"line":125,"column":2},"end":{"line":131,"column":3}},"58":{"start":{"line":126,"column":4},"end":{"line":130,"column":5}},"59":{"start":{"line":127,"column":6},"end":{"line":127,"column":50}},"60":{"start":{"line":129,"column":6},"end":{"line":129,"column":14}},"61":{"start":{"line":132,"column":2},"end":{"line":132,"column":29}},"62":{"start":{"line":132,"column":9},"end":{"line":132,"column":29}},"63":{"start":{"line":133,"column":2},"end":{"line":133,"column":18}},"64":{"start":{"line":136,"column":0},"end":{"line":141,"column":2}},"65":{"start":{"line":136,"column":22},"end":{"line":141,"column":1}},"66":{"start":{"line":137,"column":2},"end":{"line":137,"column":20}},"67":{"start":{"line":138,"column":2},"end":{"line":138,"column":19}},"68":{"start":{"line":139,"column":2},"end":{"line":139,"column":43}},"69":{"start":{"line":139,"column":18},"end":{"line":139,"column":43}},"70":{"start":{"line":140,"column":2},"end":{"line":140,"column":43}},"71":{"start":{"line":140,"column":18},"end":{"line":140,"column":43}},"72":{"start":{"line":143,"column":0},"end":{"line":143,"column":47}},"73":{"start":{"line":144,"column":0},"end":{"line":144,"column":55}},"74":{"start":{"line":145,"column":0},"end":{"line":145,"column":43}}},"branchMap":{"1":{"line":9,"type":"binary-expr","locations":[{"start":{"line":9,"column":9},"end":{"line":9,"column":13}},{"start":{"line":9,"column":17},"end":{"line":9,"column":19}}]},"2":{"line":18,"type":"binary-expr","locations":[{"start":{"line":18,"column":17},"end":{"line":18,"column":28}},{"start":{"line":18,"column":32},"end":{"line":18,"column":36}}]},"3":{"line":56,"type":"cond-expr","locations":[{"start":{"line":56,"column":25},"end":{"line":56,"column":41}},{"start":{"line":57,"column":10},"end":{"line":58,"column":15}}]},"4":{"line":57,"type":"cond-expr","locations":[{"start":{"line":57,"column":24},"end":{"line":57,"column":39}},{"start":{"line":58,"column":10},"end":{"line":58,"column":15}}]},"5":{"line":74,"type":"cond-expr","locations":[{"start":{"line":74,"column":25},"end":{"line":74,"column":40}},{"start":{"line":75,"column":12},"end":{"line":76,"column":21}}]},"6":{"line":75,"type":"cond-expr","locations":[{"start":{"line":75,"column":24},"end":{"line":75,"column":38}},{"start":{"line":76,"column":12},"end":{"line":76,"column":21}}]},"7":{"line":81,"type":"cond-expr","locations":[{"start":{"line":81,"column":25},"end":{"line":81,"column":43}},{"start":{"line":82,"column":12},"end":{"line":83,"column":21}}]},"8":{"line":82,"type":"cond-expr","locations":[{"start":{"line":82,"column":24},"end":{"line":82,"column":41}},{"start":{"line":83,"column":12},"end":{"line":83,"column":21}}]},"9":{"line":93,"type":"if","locations":[{"start":{"line":93,"column":2},"end":{"line":93,"column":2}},{"start":{"line":93,"column":2},"end":{"line":93,"column":2}}]},"10":{"line":97,"type":"if","locations":[{"start":{"line":97,"column":2},"end":{"line":97,"column":2}},{"start":{"line":97,"column":2},"end":{"line":97,"column":2}}]},"11":{"line":101,"type":"if","locations":[{"start":{"line":101,"column":2},"end":{"line":101,"column":2}},{"start":{"line":101,"column":2},"end":{"line":101,"column":2}}]},"12":{"line":102,"type":"if","locations":[{"start":{"line":102,"column":4},"end":{"line":102,"column":4}},{"start":{"line":102,"column":4},"end":{"line":102,"column":4}}]},"13":{"line":103,"type":"if","locations":[{"start":{"line":103,"column":4},"end":{"line":103,"column":4}},{"start":{"line":103,"column":4},"end":{"line":103,"column":4}}]},"14":{"line":104,"type":"if","locations":[{"start":{"line":104,"column":6},"end":{"line":104,"column":6}},{"start":{"line":104,"column":6},"end":{"line":104,"column":6}}]},"15":{"line":112,"type":"if","locations":[{"start":{"line":112,"column":6},"end":{"line":112,"column":6}},{"start":{"line":112,"column":6},"end":{"line":112,"column":6}}]},"16":{"line":112,"type":"binary-expr","locations":[{"start":{"line":112,"column":10},"end":{"line":112,"column":22}},{"start":{"line":112,"column":26},"end":{"line":112,"column":42}},{"start":{"line":113,"column":10},"end":{"line":113,"column":39}}]},"17":{"line":125,"type":"if","locations":[{"start":{"line":125,"column":2},"end":{"line":125,"column":2}},{"start":{"line":125,"column":2},"end":{"line":125,"column":2}}]},"18":{"line":126,"type":"if","locations":[{"start":{"line":126,"column":4},"end":{"line":126,"column":4}},{"start":{"line":126,"column":4},"end":{"line":126,"column":4}}]},"19":{"line":126,"type":"binary-expr","locations":[{"start":{"line":126,"column":8},"end":{"line":126,"column":9}},{"start":{"line":126,"column":13},"end":{"line":126,"column":25}}]},"20":{"line":132,"type":"if","locations":[{"start":{"line":132,"column":2},"end":{"line":132,"column":2}},{"start":{"line":132,"column":2},"end":{"line":132,"column":2}}]},"21":{"line":139,"type":"if","locations":[{"start":{"line":139,"column":2},"end":{"line":139,"column":2}},{"start":{"line":139,"column":2},"end":{"line":139,"column":2}}]},"22":{"line":139,"type":"binary-expr","locations":[{"start":{"line":139,"column":6},"end":{"line":139,"column":7}},{"start":{"line":139,"column":11},"end":{"line":139,"column":16}}]},"23":{"line":140,"type":"if","locations":[{"start":{"line":140,"column":2},"end":{"line":140,"column":2}},{"start":{"line":140,"column":2},"end":{"line":140,"column":2}}]},"24":{"line":140,"type":"binary-expr","locations":[{"start":{"line":140,"column":6},"end":{"line":140,"column":7}},{"start":{"line":140,"column":11},"end":{"line":140,"column":16}}]}}}} \ No newline at end of file diff --git a/deps/npm/node_modules/node-gyp/.jshintrc b/deps/npm/node_modules/node-gyp/.jshintrc deleted file mode 100644 index 52475ba2e6c155..00000000000000 --- a/deps/npm/node_modules/node-gyp/.jshintrc +++ /dev/null @@ -1,7 +0,0 @@ -{ - "asi": true, - "laxcomma": true, - "es5": true, - "node": true, - "strict": false -} diff --git a/deps/npm/node_modules/normalize-package-data/node_modules/resolve/.eslintrc b/deps/npm/node_modules/normalize-package-data/node_modules/resolve/.eslintrc deleted file mode 100644 index 8b5748ab63b58d..00000000000000 --- a/deps/npm/node_modules/normalize-package-data/node_modules/resolve/.eslintrc +++ /dev/null @@ -1,31 +0,0 @@ -{ - "extends": "@ljharb", - "root": true, - "rules": { - "array-bracket-newline": 0, - "array-element-newline": 0, - "indent": [2, 4], - "strict": 0, - "complexity": 0, - "consistent-return": 0, - "curly": 0, - "dot-notation": [2, { "allowKeywords": true }], - "func-name-matching": 0, - "func-style": 0, - "global-require": 0, - "id-length": [2, { "min": 1, "max": 30 }], - "max-lines-per-function": 0, - "max-nested-callbacks": 0, - "max-params": 0, - "max-statements-per-line": [2, { "max": 2 }], - "max-statements": 0, - "no-magic-numbers": 0, - "no-console": 0, - "no-shadow": 0, - "no-unused-vars": [2, { "vars": "all", "args": "none" }], - "no-use-before-define": 0, - "object-curly-newline": 0, - "operator-linebreak": [2, "before"], - "sort-keys": 0, - } -} diff --git a/deps/npm/node_modules/normalize-package-data/node_modules/resolve/test/.eslintrc b/deps/npm/node_modules/normalize-package-data/node_modules/resolve/test/.eslintrc deleted file mode 100644 index ddd262df503c72..00000000000000 --- a/deps/npm/node_modules/normalize-package-data/node_modules/resolve/test/.eslintrc +++ /dev/null @@ -1,5 +0,0 @@ -{ - "rules": { - "max-lines": 0 - } -} diff --git a/deps/npm/node_modules/npm-lifecycle/CHANGELOG.md b/deps/npm/node_modules/npm-lifecycle/CHANGELOG.md index 1a8c916bf3d39d..c5449670d5b966 100644 --- a/deps/npm/node_modules/npm-lifecycle/CHANGELOG.md +++ b/deps/npm/node_modules/npm-lifecycle/CHANGELOG.md @@ -2,6 +2,16 @@ All notable changes to this project will be documented in this file. See [standard-version](https://github.com/conventional-changelog/standard-version) for commit guidelines. + +## [3.1.3](https://github.com/npm/lifecycle/compare/v3.1.2...v3.1.3) (2019-08-12) + + +### Bug Fixes + +* fail properly if uid-number raises an error ([e0e1b62](https://github.com/npm/lifecycle/commit/e0e1b62)) + + + ## [3.1.2](https://github.com/npm/lifecycle/compare/v3.1.1...v3.1.2) (2019-07-22) diff --git a/deps/npm/node_modules/npm-lifecycle/index.js b/deps/npm/node_modules/npm-lifecycle/index.js index 0972870b18c972..f775155d5eb669 100644 --- a/deps/npm/node_modules/npm-lifecycle/index.js +++ b/deps/npm/node_modules/npm-lifecycle/index.js @@ -265,6 +265,12 @@ function runCmd (note, cmd, pkg, env, stage, wd, opts, cb) { runCmd_(cmd, pkg, env, wd, opts, stage, unsafe, 0, 0, cb) } else { uidNumber(user, group, function (er, uid, gid) { + if (er) { + er.code = 'EUIDLOOKUP' + opts.log.resume() + process.nextTick(dequeue) + return cb(er) + } runCmd_(cmd, pkg, env, wd, opts, stage, unsafe, uid, gid, cb) }) } diff --git a/deps/npm/node_modules/npm-lifecycle/package.json b/deps/npm/node_modules/npm-lifecycle/package.json index 80cbae3a5fb4fc..5eed875c7b7eb2 100644 --- a/deps/npm/node_modules/npm-lifecycle/package.json +++ b/deps/npm/node_modules/npm-lifecycle/package.json @@ -1,19 +1,19 @@ { - "_from": "npm-lifecycle@3.1.2", - "_id": "npm-lifecycle@3.1.2", + "_from": "npm-lifecycle@3.1.3", + "_id": "npm-lifecycle@3.1.3", "_inBundle": false, - "_integrity": "sha512-nhfOcoTHrW1lJJlM2o77vTE2RWR4YOVyj7YzmY0y5itsMjEuoJHteio/ez0BliENEPsNxIUQgwhyEW9dShj3Ww==", + "_integrity": "sha512-M0QmmqbEHBXxDrmc6X3+eKjW9+F7Edg1ENau92WkYw1sox6wojHzEZJIRm1ItljEiaigZlKL8mXni/4ylAy1Dg==", "_location": "/npm-lifecycle", "_phantomChildren": {}, "_requested": { "type": "version", "registry": true, - "raw": "npm-lifecycle@3.1.2", + "raw": "npm-lifecycle@3.1.3", "name": "npm-lifecycle", "escapedName": "npm-lifecycle", - "rawSpec": "3.1.2", + "rawSpec": "3.1.3", "saveSpec": null, - "fetchSpec": "3.1.2" + "fetchSpec": "3.1.3" }, "_requiredBy": [ "#USER", @@ -21,9 +21,9 @@ "/libcipm", "/libnpm" ], - "_resolved": "https://registry.npmjs.org/npm-lifecycle/-/npm-lifecycle-3.1.2.tgz", - "_shasum": "06f2253ea3b9e122ce3e55e3496670a810afcc84", - "_spec": "npm-lifecycle@3.1.2", + "_resolved": "https://registry.npmjs.org/npm-lifecycle/-/npm-lifecycle-3.1.3.tgz", + "_shasum": "09e9b0b6686e85fd53bab82364386222d97a3730", + "_spec": "npm-lifecycle@3.1.3", "_where": "/Users/isaacs/dev/npm/cli", "author": { "name": "Mike Sherov" @@ -82,5 +82,5 @@ "update-coc": "weallbehave -o . && git add CODE_OF_CONDUCT.md && git commit -m 'docs(coc): updated CODE_OF_CONDUCT.md'", "update-contrib": "weallcontribute -o . && git add CONTRIBUTING.md && git commit -m 'docs(contributing): updated CONTRIBUTING.md'" }, - "version": "3.1.2" + "version": "3.1.3" } diff --git a/deps/npm/node_modules/npm-package-arg/CHANGELOG.md b/deps/npm/node_modules/npm-package-arg/CHANGELOG.md index 83e5763f4ffec7..1b3431acced775 100644 --- a/deps/npm/node_modules/npm-package-arg/CHANGELOG.md +++ b/deps/npm/node_modules/npm-package-arg/CHANGELOG.md @@ -2,6 +2,16 @@ All notable changes to this project will be documented in this file. See [standard-version](https://github.com/conventional-changelog/standard-version) for commit guidelines. + +## [6.1.1](https://github.com/npm/npm-package-arg/compare/v6.1.0...v6.1.1) (2019-08-21) + + +### Bug Fixes + +* preserve drive letter on windows git file:// urls ([3909203](https://github.com/npm/npm-package-arg/commit/3909203)) + + + # [6.1.0](https://github.com/npm/npm-package-arg/compare/v6.0.0...v6.1.0) (2018-04-10) diff --git a/deps/npm/node_modules/npm-package-arg/npa.js b/deps/npm/node_modules/npm-package-arg/npa.js index 4d56237a570b98..bf2c17cfd513fb 100644 --- a/deps/npm/node_modules/npm-package-arg/npa.js +++ b/deps/npm/node_modules/npm-package-arg/npa.js @@ -6,7 +6,11 @@ module.exports.Result = Result let url let HostedGit let semver -let path +let path_ +function path () { + if (!path_) path_ = require('path') + return path_ +} let validatePackageName let osenv @@ -109,7 +113,6 @@ function Result (opts) { this.gitCommittish = opts.gitCommittish this.hosted = opts.hosted } -Result.prototype = {} Result.prototype.setName = function (name) { if (!validatePackageName) validatePackageName = require('validate-npm-package-name') @@ -152,8 +155,7 @@ const isAbsolutePath = /^[/]|^[A-Za-z]:/ function resolvePath (where, spec) { if (isAbsolutePath.test(spec)) return spec - if (!path) path = require('path') - return path.resolve(where, spec) + return path().resolve(where, spec) } function isAbsolute (dir) { @@ -180,8 +182,7 @@ function fromFile (res, where) { if (isAbsolute(spec)) { res.saveSpec = 'file:' + spec } else { - if (!path) path = require('path') - res.saveSpec = 'file:' + path.relative(where, res.fetchSpec) + res.saveSpec = 'file:' + path().relative(where, res.fetchSpec) } } return res @@ -238,6 +239,11 @@ function fromURL (res) { } else { setGitCommittish(res, urlparse.hash != null ? urlparse.hash.slice(1) : '') urlparse.protocol = urlparse.protocol.replace(/^git[+]/, '') + if (urlparse.protocol === 'file:' && /^git\+file:\/\/[a-z]:/i.test(res.rawSpec)) { + // keep the drive letter : on windows file paths + urlparse.host += ':' + urlparse.hostname += ':' + } delete urlparse.hash res.fetchSpec = url.format(urlparse) } diff --git a/deps/npm/node_modules/npm-package-arg/package.json b/deps/npm/node_modules/npm-package-arg/package.json index 7bd5efeb43f4ba..7d978a4d48a5c0 100644 --- a/deps/npm/node_modules/npm-package-arg/package.json +++ b/deps/npm/node_modules/npm-package-arg/package.json @@ -1,41 +1,38 @@ { - "_args": [ - [ - "npm-package-arg@6.1.0", - "/Users/rebecca/code/npm" - ] - ], - "_from": "npm-package-arg@6.1.0", - "_id": "npm-package-arg@6.1.0", + "_from": "npm-package-arg@6.1.1", + "_id": "npm-package-arg@6.1.1", "_inBundle": false, - "_integrity": "sha512-zYbhP2k9DbJhA0Z3HKUePUgdB1x7MfIfKssC+WLPFMKTBZKpZh5m13PgexJjCq6KW7j17r0jHWcCpxEqnnncSA==", + "_integrity": "sha512-qBpssaL3IOZWi5vEKUKW0cO7kzLeT+EQO9W8RsLOZf76KF9E/K9+wH0C7t06HXPpaH8WH5xF1MExLuCwbTqRUg==", "_location": "/npm-package-arg", "_phantomChildren": {}, "_requested": { "type": "version", "registry": true, - "raw": "npm-package-arg@6.1.0", + "raw": "npm-package-arg@6.1.1", "name": "npm-package-arg", "escapedName": "npm-package-arg", - "rawSpec": "6.1.0", + "rawSpec": "6.1.1", "saveSpec": null, - "fetchSpec": "6.1.0" + "fetchSpec": "6.1.1" }, "_requiredBy": [ + "#USER", "/", "/init-package-json", "/libcipm", - "/libnpmhook/npm-registry-fetch", + "/libnpm", + "/libnpmaccess", + "/libnpmpublish", "/libnpx", "/lock-verify", "/npm-pick-manifest", - "/npm-registry-client", "/npm-registry-fetch", "/pacote" ], - "_resolved": "https://registry.npmjs.org/npm-package-arg/-/npm-package-arg-6.1.0.tgz", - "_spec": "6.1.0", - "_where": "/Users/rebecca/code/npm", + "_resolved": "https://registry.npmjs.org/npm-package-arg/-/npm-package-arg-6.1.1.tgz", + "_shasum": "02168cb0a49a2b75bf988a28698de7b529df5cb7", + "_spec": "npm-package-arg@6.1.1", + "_where": "/Users/isaacs/dev/npm/cli", "author": { "name": "Isaac Z. Schlueter", "email": "i@izs.me", @@ -44,17 +41,19 @@ "bugs": { "url": "https://github.com/npm/npm-package-arg/issues" }, + "bundleDependencies": false, "dependencies": { - "hosted-git-info": "^2.6.0", + "hosted-git-info": "^2.7.1", "osenv": "^0.1.5", - "semver": "^5.5.0", + "semver": "^5.6.0", "validate-npm-package-name": "^3.0.0" }, + "deprecated": false, "description": "Parse the things that can be arguments to `npm install`", "devDependencies": { "standard": "^11.0.1", - "standard-version": "^4.3.0", - "tap": "^11.1.3", + "standard-version": "^4.4.0", + "tap": "^12.5.0", "weallbehave": "^1.2.0", "weallcontribute": "^1.0.8" }, @@ -77,9 +76,9 @@ "prerelease": "npm t", "pretest": "standard", "release": "standard-version -s", - "test": "tap -J --coverage test/*.js", + "test": "tap --100 -J --coverage test/*.js", "update-coc": "weallbehave -o . && git add CODE_OF_CONDUCT.md && git commit -m 'docs(coc): updated CODE_OF_CONDUCT.md'", "update-contrib": "weallcontribute -o . && git add CONTRIBUTING.md && git commit -m 'docs(contributing): updated CONTRIBUTING.md'" }, - "version": "6.1.0" + "version": "6.1.1" } diff --git a/deps/npm/node_modules/npm-pick-manifest/CHANGELOG.md b/deps/npm/node_modules/npm-pick-manifest/CHANGELOG.md index 2112665f7572eb..c594ba140f72b1 100644 --- a/deps/npm/node_modules/npm-pick-manifest/CHANGELOG.md +++ b/deps/npm/node_modules/npm-pick-manifest/CHANGELOG.md @@ -2,73 +2,107 @@ All notable changes to this project will be documented in this file. See [standard-version](https://github.com/conventional-changelog/standard-version) for commit guidelines. + +## [3.0.2](https://github.com/npm/npm-pick-manifest/compare/v3.0.1...v3.0.2) (2019-08-30) + + + + +## [3.0.1](https://github.com/npm/npm-pick-manifest/compare/v3.0.0...v3.0.1) (2019-08-28) + + +### Bug Fixes + +* throw 403 for forbidden major/minor versions ([003286e](https://github.com/npm/npm-pick-manifest/commit/003286e)), closes [#2](https://github.com/npm/npm-pick-manifest/issues/2) + + + + +# [3.0.0](https://github.com/npm/npm-pick-manifest/compare/v2.2.3...v3.0.0) (2019-08-20) + + +### Features + +* throw forbidden error when package is blocked by policy ([ad2a962](https://github.com/npm/npm-pick-manifest/commit/ad2a962)), closes [#1](https://github.com/npm/npm-pick-manifest/issues/1) + + +### BREAKING CHANGES + +* This adds a new error code when package versions are +blocked. + +PR-URL: https://github.com/npm/npm-pick-manifest/pull/1 +Credit: @claudiahdz + + + -## [2.2.3](https://github.com/zkat/npm-pick-manifest/compare/v2.2.2...v2.2.3) (2018-10-31) +## [2.2.3](https://github.com/npm/npm-pick-manifest/compare/v2.2.2...v2.2.3) (2018-10-31) ### Bug Fixes -* **enjoyBy:** rework semantics for enjoyBy again ([5e89b62](https://github.com/zkat/npm-pick-manifest/commit/5e89b62)) +* **enjoyBy:** rework semantics for enjoyBy again ([5e89b62](https://github.com/npm/npm-pick-manifest/commit/5e89b62)) -## [2.2.2](https://github.com/zkat/npm-pick-manifest/compare/v2.2.1...v2.2.2) (2018-10-31) +## [2.2.2](https://github.com/npm/npm-pick-manifest/compare/v2.2.1...v2.2.2) (2018-10-31) ### Bug Fixes -* **enjoyBy:** rework semantics for enjoyBy ([5684f45](https://github.com/zkat/npm-pick-manifest/commit/5684f45)) +* **enjoyBy:** rework semantics for enjoyBy ([5684f45](https://github.com/npm/npm-pick-manifest/commit/5684f45)) -## [2.2.1](https://github.com/zkat/npm-pick-manifest/compare/v2.2.0...v2.2.1) (2018-10-30) +## [2.2.1](https://github.com/npm/npm-pick-manifest/compare/v2.2.0...v2.2.1) (2018-10-30) -# [2.2.0](https://github.com/zkat/npm-pick-manifest/compare/v2.1.0...v2.2.0) (2018-10-30) +# [2.2.0](https://github.com/npm/npm-pick-manifest/compare/v2.1.0...v2.2.0) (2018-10-30) ### Bug Fixes -* **audit:** npm audit fix --force ([d5ae6c4](https://github.com/zkat/npm-pick-manifest/commit/d5ae6c4)) +* **audit:** npm audit fix --force ([d5ae6c4](https://github.com/npm/npm-pick-manifest/commit/d5ae6c4)) ### Features -* **enjoyBy:** add opts.enjoyBy option to filter versions by date ([0b8a790](https://github.com/zkat/npm-pick-manifest/commit/0b8a790)) +* **enjoyBy:** add opts.enjoyBy option to filter versions by date ([0b8a790](https://github.com/npm/npm-pick-manifest/commit/0b8a790)) -# [2.1.0](https://github.com/zkat/npm-pick-manifest/compare/v2.0.1...v2.1.0) (2017-10-18) +# [2.1.0](https://github.com/npm/npm-pick-manifest/compare/v2.0.1...v2.1.0) (2017-10-18) ### Features -* **selection:** allow manually disabling deprecation skipping ([0d239d3](https://github.com/zkat/npm-pick-manifest/commit/0d239d3)) +* **selection:** allow manually disabling deprecation skipping ([0d239d3](https://github.com/npm/npm-pick-manifest/commit/0d239d3)) -## [2.0.1](https://github.com/zkat/npm-pick-manifest/compare/v2.0.0...v2.0.1) (2017-10-18) +## [2.0.1](https://github.com/npm/npm-pick-manifest/compare/v2.0.0...v2.0.1) (2017-10-18) -# [2.0.0](https://github.com/zkat/npm-pick-manifest/compare/v1.0.4...v2.0.0) (2017-10-03) +# [2.0.0](https://github.com/npm/npm-pick-manifest/compare/v1.0.4...v2.0.0) (2017-10-03) ### Bug Fixes -* **license:** relicense project according to npm policy (#3) ([ed743a0](https://github.com/zkat/npm-pick-manifest/commit/ed743a0)) +* **license:** relicense project according to npm policy (#3) ([ed743a0](https://github.com/npm/npm-pick-manifest/commit/ed743a0)) ### Features -* **selection:** Avoid matching deprecated packages if possible ([3fc6c3a](https://github.com/zkat/npm-pick-manifest/commit/3fc6c3a)) +* **selection:** Avoid matching deprecated packages if possible ([3fc6c3a](https://github.com/npm/npm-pick-manifest/commit/3fc6c3a)) ### BREAKING CHANGES @@ -79,43 +113,43 @@ All notable changes to this project will be documented in this file. See [standa -## [1.0.4](https://github.com/zkat/npm-pick-manifest/compare/v1.0.3...v1.0.4) (2017-06-29) +## [1.0.4](https://github.com/npm/npm-pick-manifest/compare/v1.0.3...v1.0.4) (2017-06-29) ### Bug Fixes -* **npa:** bump npa version for bugfixes ([7cdaca7](https://github.com/zkat/npm-pick-manifest/commit/7cdaca7)) -* **semver:** use loose semver parsing for *all* ops ([bbc0daa](https://github.com/zkat/npm-pick-manifest/commit/bbc0daa)) +* **npa:** bump npa version for bugfixes ([7cdaca7](https://github.com/npm/npm-pick-manifest/commit/7cdaca7)) +* **semver:** use loose semver parsing for *all* ops ([bbc0daa](https://github.com/npm/npm-pick-manifest/commit/bbc0daa)) -## [1.0.3](https://github.com/zkat/npm-pick-manifest/compare/v1.0.2...v1.0.3) (2017-05-04) +## [1.0.3](https://github.com/npm/npm-pick-manifest/compare/v1.0.2...v1.0.3) (2017-05-04) ### Bug Fixes -* **semver:** use semver.clean() instead ([f4133b5](https://github.com/zkat/npm-pick-manifest/commit/f4133b5)) +* **semver:** use semver.clean() instead ([f4133b5](https://github.com/npm/npm-pick-manifest/commit/f4133b5)) -## [1.0.2](https://github.com/zkat/npm-pick-manifest/compare/v1.0.1...v1.0.2) (2017-05-04) +## [1.0.2](https://github.com/npm/npm-pick-manifest/compare/v1.0.1...v1.0.2) (2017-05-04) ### Bug Fixes -* **picker:** spaces in `wanted` prevented match ([97a7d0a](https://github.com/zkat/npm-pick-manifest/commit/97a7d0a)) +* **picker:** spaces in `wanted` prevented match ([97a7d0a](https://github.com/npm/npm-pick-manifest/commit/97a7d0a)) -## [1.0.1](https://github.com/zkat/npm-pick-manifest/compare/v1.0.0...v1.0.1) (2017-04-24) +## [1.0.1](https://github.com/npm/npm-pick-manifest/compare/v1.0.0...v1.0.1) (2017-04-24) ### Bug Fixes -* **deps:** forgot to add semver ([1876f4f](https://github.com/zkat/npm-pick-manifest/commit/1876f4f)) +* **deps:** forgot to add semver ([1876f4f](https://github.com/npm/npm-pick-manifest/commit/1876f4f)) @@ -125,7 +159,7 @@ All notable changes to this project will be documented in this file. See [standa ### Features -* **api:** initial implementation. ([b086912](https://github.com/zkat/npm-pick-manifest/commit/b086912)) +* **api:** initial implementation. ([b086912](https://github.com/npm/npm-pick-manifest/commit/b086912)) ### BREAKING CHANGES diff --git a/deps/npm/node_modules/npm-pick-manifest/README.md b/deps/npm/node_modules/npm-pick-manifest/README.md index a9a027bfcb4600..d32d47af1997b6 100644 --- a/deps/npm/node_modules/npm-pick-manifest/README.md +++ b/deps/npm/node_modules/npm-pick-manifest/README.md @@ -1,6 +1,6 @@ -# npm-pick-manifest [![npm version](https://img.shields.io/npm/v/npm-pick-manifest.svg)](https://npm.im/npm-pick-manifest) [![license](https://img.shields.io/npm/l/npm-pick-manifest.svg)](https://npm.im/npm-pick-manifest) [![Travis](https://img.shields.io/travis/zkat/npm-pick-manifest.svg)](https://travis-ci.org/zkat/npm-pick-manifest) [![AppVeyor](https://ci.appveyor.com/api/projects/status/github/zkat/npm-pick-manifest?svg=true)](https://ci.appveyor.com/project/zkat/npm-pick-manifest) [![Coverage Status](https://coveralls.io/repos/github/zkat/npm-pick-manifest/badge.svg?branch=latest)](https://coveralls.io/github/zkat/npm-pick-manifest?branch=latest) +# npm-pick-manifest [![npm version](https://img.shields.io/npm/v/npm-pick-manifest.svg)](https://npm.im/npm-pick-manifest) [![license](https://img.shields.io/npm/l/npm-pick-manifest.svg)](https://npm.im/npm-pick-manifest) [![Travis](https://img.shields.io/travis/npm/npm-pick-manifest.svg)](https://travis-ci.org/npm/npm-pick-manifest) [![AppVeyor](https://ci.appveyor.com/api/projects/status/github/npm/npm-pick-manifest?svg=true)](https://ci.appveyor.com/project/npm/npm-pick-manifest) [![Coverage Status](https://coveralls.io/repos/github/npm/npm-pick-manifest/badge.svg?branch=latest)](https://coveralls.io/github/npm/npm-pick-manifest?branch=latest) -[`npm-pick-manifest`](https://github.com/zkat/npm-pick-manifest) is a standalone +[`npm-pick-manifest`](https://github.com/npm/npm-pick-manifest) is a standalone implementation of [npm](https://npmjs.com)'s semver range resolution algorithm. ## Install diff --git a/deps/npm/node_modules/npm-pick-manifest/index.js b/deps/npm/node_modules/npm-pick-manifest/index.js index d9a8373e57f142..9eb2d82d100245 100644 --- a/deps/npm/node_modules/npm-pick-manifest/index.js +++ b/deps/npm/node_modules/npm-pick-manifest/index.js @@ -23,6 +23,9 @@ function pickManifest (packument, wanted, opts) { const versions = Object.keys(packument.versions || {}).filter(v => { return semver.valid(v, true) }) + const policyRestrictions = packument.policyRestrictions + const restrictedVersions = policyRestrictions + ? Object.keys(policyRestrictions.versions) : [] function enjoyableBy (v) { return !time || ( @@ -32,7 +35,7 @@ function pickManifest (packument, wanted, opts) { let err - if (!versions.length) { + if (!versions.length && !restrictedVersions.length) { err = new Error(`No valid versions available for ${packument.name}`) err.code = 'ENOVERSIONS' err.name = packument.name @@ -93,21 +96,33 @@ function pickManifest (packument, wanted, opts) { target = stillFresh[0] } + if (!target && restrictedVersions) { + target = semver.maxSatisfying(restrictedVersions, wanted, true) + } + const manifest = ( target && packument.versions[target] ) if (!manifest) { - err = new Error( - `No matching version found for ${packument.name}@${wanted}${ - opts.enjoyBy - ? ` with an Enjoy By date of ${ - new Date(opts.enjoyBy).toLocaleString() - }. Maybe try a different date?` - : '' - }` - ) - err.code = 'ETARGET' + // Check if target is forbidden + const isForbidden = target && policyRestrictions && policyRestrictions.versions[target] + const pckg = `${packument.name}@${wanted}${ + opts.enjoyBy + ? ` with an Enjoy By date of ${ + new Date(opts.enjoyBy).toLocaleString() + }. Maybe try a different date?` + : '' + }` + + if (isForbidden) { + err = new Error(`Could not download ${pckg} due to policy violations.\n${policyRestrictions.message}\n`) + err.code = 'E403' + } else { + err = new Error(`No matching version found for ${pckg}.`) + err.code = 'ETARGET' + } + err.name = packument.name err.type = type err.wanted = wanted diff --git a/deps/npm/node_modules/npm-pick-manifest/package.json b/deps/npm/node_modules/npm-pick-manifest/package.json index a80c76d372d015..5adbc269574858 100644 --- a/deps/npm/node_modules/npm-pick-manifest/package.json +++ b/deps/npm/node_modules/npm-pick-manifest/package.json @@ -1,34 +1,35 @@ { - "_from": "npm-pick-manifest@2.2.3", - "_id": "npm-pick-manifest@2.2.3", + "_from": "npm-pick-manifest@3.0.2", + "_id": "npm-pick-manifest@3.0.2", "_inBundle": false, - "_integrity": "sha512-+IluBC5K201+gRU85vFlUwX3PFShZAbAgDNp2ewJdWMVSppdo/Zih0ul2Ecky/X7b51J7LrrUAP+XOmOCvYZqA==", + "_integrity": "sha512-wNprTNg+X5nf+tDi+hbjdHhM4bX+mKqv6XmPh7B5eG+QY9VARfQPfCEH013H5GqfNj6ee8Ij2fg8yk0mzps1Vw==", "_location": "/npm-pick-manifest", "_phantomChildren": {}, "_requested": { "type": "version", "registry": true, - "raw": "npm-pick-manifest@2.2.3", + "raw": "npm-pick-manifest@3.0.2", "name": "npm-pick-manifest", "escapedName": "npm-pick-manifest", - "rawSpec": "2.2.3", + "rawSpec": "3.0.2", "saveSpec": null, - "fetchSpec": "2.2.3" + "fetchSpec": "3.0.2" }, "_requiredBy": [ "#USER", - "/" + "/", + "/pacote" ], - "_resolved": "https://registry.npmjs.org/npm-pick-manifest/-/npm-pick-manifest-2.2.3.tgz", - "_shasum": "32111d2a9562638bb2c8f2bf27f7f3092c8fae40", - "_spec": "npm-pick-manifest@2.2.3", - "_where": "/Users/aeschright/code/cli", + "_resolved": "https://registry.npmjs.org/npm-pick-manifest/-/npm-pick-manifest-3.0.2.tgz", + "_shasum": "f4d9e5fd4be2153e5f4e5f9b7be8dc419a99abb7", + "_spec": "npm-pick-manifest@3.0.2", + "_where": "/Users/claudiahdz/npm/cli", "author": { "name": "Kat Marchán", "email": "kzm@sykosomatic.org" }, "bugs": { - "url": "https://github.com/zkat/npm-pick-manifest/issues" + "url": "https://github.com/npm/npm-pick-manifest/issues" }, "bundleDependencies": false, "config": { @@ -57,7 +58,7 @@ "files": [ "*.js" ], - "homepage": "https://github.com/zkat/npm-pick-manifest#readme", + "homepage": "https://github.com/npm/npm-pick-manifest#readme", "keywords": [ "npm", "semver", @@ -68,7 +69,7 @@ "name": "npm-pick-manifest", "repository": { "type": "git", - "url": "git+https://github.com/zkat/npm-pick-manifest.git" + "url": "git+https://github.com/npm/npm-pick-manifest.git" }, "scripts": { "postrelease": "npm publish && git push --follow-tags", @@ -79,5 +80,5 @@ "update-coc": "weallbehave -o . && git add CODE_OF_CONDUCT.md && git commit -m 'docs(coc): updated CODE_OF_CONDUCT.md'", "update-contrib": "weallcontribute -o . && git add CONTRIBUTING.md && git commit -m 'docs(contributing): updated CONTRIBUTING.md'" }, - "version": "2.2.3" + "version": "3.0.2" } diff --git a/deps/npm/node_modules/object-keys/.eslintrc b/deps/npm/node_modules/object-keys/.eslintrc deleted file mode 100644 index 9a8d5b0e95e361..00000000000000 --- a/deps/npm/node_modules/object-keys/.eslintrc +++ /dev/null @@ -1,17 +0,0 @@ -{ - "root": true, - - "extends": "@ljharb", - - "rules": { - "complexity": [2, 23], - "id-length": [2, { "min": 1, "max": 40 }], - "max-params": [2, 3], - "max-statements": [2, 23], - "max-statements-per-line": [2, { "max": 2 }], - "no-extra-parens": [1], - "no-invalid-this": [1], - "no-restricted-syntax": [2, "BreakStatement", "ContinueStatement", "LabeledStatement", "WithStatement"], - "operator-linebreak": [2, "after"] - } -} diff --git a/deps/npm/node_modules/object.getownpropertydescriptors/.eslintrc b/deps/npm/node_modules/object.getownpropertydescriptors/.eslintrc deleted file mode 100644 index 97ada3121bac03..00000000000000 --- a/deps/npm/node_modules/object.getownpropertydescriptors/.eslintrc +++ /dev/null @@ -1,10 +0,0 @@ -{ - "root": true, - - "extends": "@ljharb", - - "rules": { - "id-length": [2, { "min": 1, "max": 30 }], - "new-cap": [2, { "capIsNewExceptions": ["IsCallable", "RequireObjectCoercible", "ToObject"] }] - } -} diff --git a/deps/npm/node_modules/object.getownpropertydescriptors/test/.eslintrc b/deps/npm/node_modules/object.getownpropertydescriptors/test/.eslintrc deleted file mode 100644 index e9ca97953e783d..00000000000000 --- a/deps/npm/node_modules/object.getownpropertydescriptors/test/.eslintrc +++ /dev/null @@ -1,8 +0,0 @@ -{ - "rules": { - "max-nested-callbacks": [2, 3], - "max-statements": [2, 15], - "max-statements-per-line": [2, { "max": 2 }], - "no-invalid-this": [1] - } -} diff --git a/deps/npm/node_modules/pacote/CHANGELOG.md b/deps/npm/node_modules/pacote/CHANGELOG.md index 8559760b3eb395..4ed92beb68213d 100644 --- a/deps/npm/node_modules/pacote/CHANGELOG.md +++ b/deps/npm/node_modules/pacote/CHANGELOG.md @@ -2,6 +2,43 @@ All notable changes to this project will be documented in this file. See [standard-version](https://github.com/conventional-changelog/standard-version) for commit guidelines. + +## [9.5.8](https://github.com/npm/pacote/compare/v9.5.7...v9.5.8) (2019-08-20) + + + + +## [9.5.7](https://github.com/npm/pacote/compare/v9.5.6...v9.5.7) (2019-08-19) + + +### Bug Fixes + +* do not try to chown if not running as root ([bbc5da3](https://github.com/npm/pacote/commit/bbc5da3)) + + + + +## [9.5.6](https://github.com/npm/pacote/compare/v9.5.5...v9.5.6) (2019-08-15) + + +### Bug Fixes + +* **extract:** chown properly when more than one directory is made ([5161828](https://github.com/npm/pacote/commit/5161828)) + + + + +## [9.5.5](https://github.com/npm/pacote/compare/v9.5.4...v9.5.5) (2019-08-12) + + +### Bug Fixes + +* don't pass uid/gid to cacache ([0a0c73c](https://github.com/npm/pacote/commit/0a0c73c)) +* Infer owner of all unpacked files ([f12e7ef](https://github.com/npm/pacote/commit/f12e7ef)) +* invalid arg detection in extract() ([b4dc363](https://github.com/npm/pacote/commit/b4dc363)), closes [#5](https://github.com/npm/pacote/issues/5) [#6](https://github.com/npm/pacote/issues/6) + + + ## [9.5.4](https://github.com/npm/pacote/compare/v9.5.3...v9.5.4) (2019-07-16) diff --git a/deps/npm/node_modules/pacote/LICENSE b/deps/npm/node_modules/pacote/LICENSE index ab41caa64b86cf..841ef53a26dff6 100644 --- a/deps/npm/node_modules/pacote/LICENSE +++ b/deps/npm/node_modules/pacote/LICENSE @@ -1,5 +1,5 @@ The MIT License (MIT) -Copyright (c) 2017 Kat Marchán +Copyright (c) Kat Marchán, npm, Inc., and Contributors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/deps/npm/node_modules/pacote/extract.js b/deps/npm/node_modules/pacote/extract.js index f49a54420242ae..6ed0b18aaae1a4 100644 --- a/deps/npm/node_modules/pacote/extract.js +++ b/deps/npm/node_modules/pacote/extract.js @@ -10,41 +10,60 @@ const optCheck = require('./lib/util/opt-check.js') const path = require('path') const rimraf = BB.promisify(require('rimraf')) const withTarballStream = require('./lib/with-tarball-stream.js') +const inferOwner = require('infer-owner') +const chown = BB.promisify(require('chownr')) const truncateAsync = BB.promisify(fs.truncate) const readFileAsync = BB.promisify(fs.readFile) const appendFileAsync = BB.promisify(fs.appendFile) +// you used to call me on my... +const selfOwner = process.getuid ? { + uid: process.getuid(), + gid: process.getgid() +} : { + uid: undefined, + gid: undefined +} + module.exports = extract function extract (spec, dest, opts) { opts = optCheck(opts) spec = npa(spec, opts.where) + if (spec.type === 'git' && !opts.cache) { + throw new TypeError('Extracting git packages requires a cache folder') + } + if (typeof dest !== 'string') { + throw new TypeError('Extract requires a destination') + } const startTime = Date.now() - - return withTarballStream(spec, opts, stream => { - return tryExtract(spec, stream, dest, opts) - }) - .then(() => { - if (!opts.resolved) { - const pjson = path.join(dest, 'package.json') - return readFileAsync(pjson, 'utf8') - .then(str => truncateAsync(pjson) - .then(() => appendFileAsync(pjson, str.replace( - /}\s*$/, - `\n,"_resolved": ${ - JSON.stringify(opts.resolved || '') - }\n,"_integrity": ${ - JSON.stringify(opts.integrity || '') - }\n,"_from": ${ - JSON.stringify(spec.toString()) - }\n}` - )))) - } + return inferOwner(dest).then(({ uid, gid }) => { + opts = opts.concat({ uid, gid }) + return withTarballStream(spec, opts, stream => { + return tryExtract(spec, stream, dest, opts) }) - .then(() => opts.log.silly( - 'extract', - `${spec} extracted to ${dest} (${Date.now() - startTime}ms)` - )) + .then(() => { + if (!opts.resolved) { + const pjson = path.join(dest, 'package.json') + return readFileAsync(pjson, 'utf8') + .then(str => truncateAsync(pjson) + .then(() => appendFileAsync(pjson, str.replace( + /}\s*$/, + `\n,"_resolved": ${ + JSON.stringify(opts.resolved || '') + }\n,"_integrity": ${ + JSON.stringify(opts.integrity || '') + }\n,"_from": ${ + JSON.stringify(spec.toString()) + }\n}` + )))) + } + }) + .then(() => opts.log.silly( + 'extract', + `${spec} extracted to ${dest} (${Date.now() - startTime}ms)` + )) + }) } function tryExtract (spec, tarStream, dest, opts) { @@ -53,6 +72,15 @@ function tryExtract (spec, tarStream, dest, opts) { rimraf(dest) .then(() => mkdirp(dest)) + .then((made) => { + // respect the current ownership of unpack targets + // but don't try to chown if we're not root. + if (selfOwner.uid === 0 && + typeof selfOwner.gid === 'number' && + selfOwner.uid !== opts.uid && selfOwner.gid !== opts.gid) { + return chown(made || dest, opts.uid, opts.gid) + } + }) .then(() => { const xtractor = extractStream(spec, dest, opts) xtractor.on('error', reject) diff --git a/deps/npm/node_modules/pacote/lib/fetchers/file.js b/deps/npm/node_modules/pacote/lib/fetchers/file.js index 7348a7ec634c1d..a58e329130987f 100644 --- a/deps/npm/node_modules/pacote/lib/fetchers/file.js +++ b/deps/npm/node_modules/pacote/lib/fetchers/file.js @@ -60,8 +60,6 @@ Fetcher.impl(fetchFile, { : (pipe( fs.createReadStream(src), cacache.put.stream(opts.cache, `pacote:tarball:${src}`, { - uid: opts.uid, - gid: opts.gid, integrity: opts.integrity }).on('integrity', d => { integrity = d }) )) diff --git a/deps/npm/node_modules/pacote/lib/fetchers/registry/manifest.js b/deps/npm/node_modules/pacote/lib/fetchers/registry/manifest.js index d29ec71c3375e3..00deb13af236de 100644 --- a/deps/npm/node_modules/pacote/lib/fetchers/registry/manifest.js +++ b/deps/npm/node_modules/pacote/lib/fetchers/registry/manifest.js @@ -27,7 +27,7 @@ function getManifest (spec, opts) { includeDeprecated: opts.includeDeprecated }) } catch (err) { - if (err.code === 'ETARGET' && packument._cached && !opts.offline) { + if ((err.code === 'ETARGET' || err.code === 'E403') && packument._cached && !opts.offline) { opts.log.silly( 'registry:manifest', `no matching version for ${spec.name}@${spec.fetchSpec} in the cache. Forcing revalidation.` diff --git a/deps/npm/node_modules/pacote/package.json b/deps/npm/node_modules/pacote/package.json index 215e5dce843f12..58826586a35207 100644 --- a/deps/npm/node_modules/pacote/package.json +++ b/deps/npm/node_modules/pacote/package.json @@ -1,8 +1,8 @@ { - "_from": "pacote@9.5.4", - "_id": "pacote@9.5.4", + "_from": "pacote@9.5.8", + "_id": "pacote@9.5.8", "_inBundle": false, - "_integrity": "sha512-nWr0ari6E+apbdoN0hToTKZElO5h4y8DGFa2pyNA5GQIdcP0imC96bA0bbPw1gpeguVIiUgHHaAlq/6xfPp8Qw==", + "_integrity": "sha512-0Tl8Oi/K0Lo4MZmH0/6IsT3gpGf9eEAznLXEQPKgPq7FscnbUOyopnVpwXlnQdIbCUaojWy1Wd7VMyqfVsRrIw==", "_location": "/pacote", "_phantomChildren": { "safe-buffer": "5.1.2", @@ -11,12 +11,12 @@ "_requested": { "type": "version", "registry": true, - "raw": "pacote@9.5.4", + "raw": "pacote@9.5.8", "name": "pacote", "escapedName": "pacote", - "rawSpec": "9.5.4", + "rawSpec": "9.5.8", "saveSpec": null, - "fetchSpec": "9.5.4" + "fetchSpec": "9.5.8" }, "_requiredBy": [ "#USER", @@ -24,9 +24,9 @@ "/libcipm", "/libnpm" ], - "_resolved": "https://registry.npmjs.org/pacote/-/pacote-9.5.4.tgz", - "_shasum": "8baa26f3d1326d13dc2fe0fe84040a364ae30aad", - "_spec": "pacote@9.5.4", + "_resolved": "https://registry.npmjs.org/pacote/-/pacote-9.5.8.tgz", + "_shasum": "23480efdc4fa74515855c9ecf39cf64078f99786", + "_spec": "pacote@9.5.8", "_where": "/Users/isaacs/dev/npm/cli", "author": { "name": "Kat Marchán", @@ -48,10 +48,12 @@ ], "dependencies": { "bluebird": "^3.5.3", - "cacache": "^12.0.0", + "cacache": "^12.0.2", + "chownr": "^1.1.2", "figgy-pudding": "^3.5.1", "get-stream": "^4.1.0", "glob": "^7.1.3", + "infer-owner": "^1.0.4", "lru-cache": "^5.1.1", "make-fetch-happen": "^5.0.0", "minimatch": "^3.0.4", @@ -61,7 +63,7 @@ "normalize-package-data": "^2.4.0", "npm-package-arg": "^6.1.0", "npm-packlist": "^1.1.12", - "npm-pick-manifest": "^2.2.3", + "npm-pick-manifest": "^3.0.0", "npm-registry-fetch": "^4.0.0", "osenv": "^0.1.5", "promise-inflight": "^1.0.1", @@ -71,7 +73,7 @@ "safe-buffer": "^5.1.2", "semver": "^5.6.0", "ssri": "^6.0.1", - "tar": "^4.4.8", + "tar": "^4.4.10", "unique-filename": "^1.1.1", "which": "^1.3.1" }, @@ -117,5 +119,5 @@ "update-coc": "weallbehave -o . && git add CODE_OF_CONDUCT.md && git commit -m 'docs(coc): updated CODE_OF_CONDUCT.md'", "update-contrib": "weallcontribute -o . && git add CONTRIBUTING.md && git commit -m 'docs(contributing): updated CONTRIBUTING.md'" }, - "version": "9.5.4" + "version": "9.5.8" } diff --git a/deps/npm/node_modules/psl/.eslintrc b/deps/npm/node_modules/psl/.eslintrc deleted file mode 100644 index ebbcf70bf8531a..00000000000000 --- a/deps/npm/node_modules/psl/.eslintrc +++ /dev/null @@ -1,11 +0,0 @@ -{ - "rules": { - "indent": [ 2, 2 ], - "padding-line-between-statements": "off", - "hapi/hapi-no-var": false - }, - "extends": "hapi", - "env": { - "mocha": true - } -} diff --git a/deps/npm/node_modules/qs/.eslintrc b/deps/npm/node_modules/qs/.eslintrc deleted file mode 100644 index b7a87b93dfd730..00000000000000 --- a/deps/npm/node_modules/qs/.eslintrc +++ /dev/null @@ -1,19 +0,0 @@ -{ - "root": true, - - "extends": "@ljharb", - - "rules": { - "complexity": 0, - "consistent-return": 1, - "func-name-matching": 0, - "id-length": [2, { "min": 1, "max": 25, "properties": "never" }], - "indent": [2, 4], - "max-params": [2, 12], - "max-statements": [2, 45], - "no-continue": 1, - "no-magic-numbers": 0, - "no-restricted-syntax": [2, "BreakStatement", "DebuggerStatement", "ForInStatement", "LabeledStatement", "WithStatement"], - "operator-linebreak": [2, "before"], - } -} diff --git a/deps/npm/node_modules/qs/test/.eslintrc b/deps/npm/node_modules/qs/test/.eslintrc deleted file mode 100644 index 20175d64d9dbf7..00000000000000 --- a/deps/npm/node_modules/qs/test/.eslintrc +++ /dev/null @@ -1,15 +0,0 @@ -{ - "rules": { - "array-bracket-newline": 0, - "array-element-newline": 0, - "consistent-return": 2, - "max-lines": 0, - "max-nested-callbacks": [2, 3], - "max-statements": 0, - "no-buffer-constructor": 0, - "no-extend-native": 0, - "no-magic-numbers": 0, - "object-curly-newline": 0, - "sort-keys": 0 - } -} diff --git a/deps/npm/node_modules/read-cmd-shim/.npmignore b/deps/npm/node_modules/read-cmd-shim/.npmignore deleted file mode 100644 index ac50549639a976..00000000000000 --- a/deps/npm/node_modules/read-cmd-shim/.npmignore +++ /dev/null @@ -1,3 +0,0 @@ -.#* -*~ -node_modules diff --git a/deps/npm/node_modules/read-cmd-shim/LICENSE b/deps/npm/node_modules/read-cmd-shim/LICENSE new file mode 100644 index 00000000000000..2a4982dc40cb69 --- /dev/null +++ b/deps/npm/node_modules/read-cmd-shim/LICENSE @@ -0,0 +1,13 @@ +Copyright (c) 2015, Rebecca Turner + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/deps/npm/node_modules/read-cmd-shim/README.md b/deps/npm/node_modules/read-cmd-shim/README.md index 2f0b5f6cc671e0..b139ea7ff638c4 100644 --- a/deps/npm/node_modules/read-cmd-shim/README.md +++ b/deps/npm/node_modules/read-cmd-shim/README.md @@ -14,6 +14,7 @@ readCmdShim('/path/to/shim.cmd', function (er, destination) { }) var destination = readCmdShim.sync('/path/to/shim.cmd') +``` ### readCmdShim(path, callback) diff --git a/deps/npm/node_modules/read-cmd-shim/index.js b/deps/npm/node_modules/read-cmd-shim/index.js index 6a2265449af268..f565ababe4e035 100644 --- a/deps/npm/node_modules/read-cmd-shim/index.js +++ b/deps/npm/node_modules/read-cmd-shim/index.js @@ -10,7 +10,7 @@ function extractPath (path, cmdshimContents) { } function extractPathFromCmd (cmdshimContents) { - var matches = cmdshimContents.match(/"%~dp0\\([^"]+?)"\s+%[*]/) + var matches = cmdshimContents.match(/"%(?:~dp0|dp0%)\\([^"]+?)"\s+%[*]/) return matches && matches[1] } diff --git a/deps/npm/node_modules/read-cmd-shim/package.json b/deps/npm/node_modules/read-cmd-shim/package.json index 8df095488f91b9..df2fe27e7b1705 100644 --- a/deps/npm/node_modules/read-cmd-shim/package.json +++ b/deps/npm/node_modules/read-cmd-shim/package.json @@ -1,33 +1,29 @@ { - "_args": [ - [ - "read-cmd-shim@1.0.1", - "/Users/rebecca/code/npm" - ] - ], - "_from": "read-cmd-shim@1.0.1", - "_id": "read-cmd-shim@1.0.1", + "_from": "read-cmd-shim@1.0.4", + "_id": "read-cmd-shim@1.0.4", "_inBundle": false, - "_integrity": "sha1-LV0Vd4ajfAVdIgd8MsU/gynpHHs=", + "_integrity": "sha512-Pqpl3qJ/QdOIjRYA0q5DND/gLvGOfpIz/fYVDGYpOXfW/lFrIttmLsBnd6IkyK10+JHU9zhsaudfvrQTBB9YFQ==", "_location": "/read-cmd-shim", "_phantomChildren": {}, "_requested": { "type": "version", "registry": true, - "raw": "read-cmd-shim@1.0.1", + "raw": "read-cmd-shim@1.0.4", "name": "read-cmd-shim", "escapedName": "read-cmd-shim", - "rawSpec": "1.0.1", + "rawSpec": "1.0.4", "saveSpec": null, - "fetchSpec": "1.0.1" + "fetchSpec": "1.0.4" }, "_requiredBy": [ + "#USER", "/", "/gentle-fs" ], - "_resolved": "https://registry.npmjs.org/read-cmd-shim/-/read-cmd-shim-1.0.1.tgz", - "_spec": "1.0.1", - "_where": "/Users/rebecca/code/npm", + "_resolved": "https://registry.npmjs.org/read-cmd-shim/-/read-cmd-shim-1.0.4.tgz", + "_shasum": "b4a53d43376211b45243f0072b6e603a8e37640d", + "_spec": "read-cmd-shim@1.0.4", + "_where": "/Users/claudiahdz/npm/cli", "author": { "name": "Rebecca Turner", "email": "me@re-becca.org", @@ -36,16 +32,21 @@ "bugs": { "url": "https://github.com/npm/read-cmd-shim/issues" }, + "bundleDependencies": false, "dependencies": { "graceful-fs": "^4.1.2" }, + "deprecated": false, "description": "Figure out what a cmd-shim is pointing at. This acts as the equivalent of fs.readlink.", "devDependencies": { - "cmd-shim": "^2.0.1", + "cmd-shim": "^3.0.0", "rimraf": "^2.4.3", "standard": "^5.2.2", - "tap": "^1.4.1" + "tap": "^12.7.0" }, + "files": [ + "index.js" + ], "homepage": "https://github.com/npm/read-cmd-shim#readme", "license": "ISC", "main": "index.js", @@ -55,7 +56,8 @@ "url": "git+https://github.com/npm/read-cmd-shim.git" }, "scripts": { - "test": "standard && tap test/*.js" + "pretest": "standard", + "test": "tap test/*.js --100" }, - "version": "1.0.1" + "version": "1.0.4" } diff --git a/deps/npm/node_modules/read-cmd-shim/test/integration.js b/deps/npm/node_modules/read-cmd-shim/test/integration.js deleted file mode 100644 index 269f964727cd6b..00000000000000 --- a/deps/npm/node_modules/read-cmd-shim/test/integration.js +++ /dev/null @@ -1,139 +0,0 @@ -'use strict' -var path = require('path') -var fs = require('graceful-fs') -var test = require('tap').test -var rimraf = require('rimraf') -var cmdShim = require('cmd-shim') -var readCmdShim = require('../index.js') -var workDir = path.join(__dirname, path.basename(__filename, '.js')) -var testShbang = path.join(workDir, 'test-shbang') -var testShbangCmd = testShbang + '.cmd' -var testShim = path.join(workDir, 'test') -var testShimCmd = testShim + '.cmd' - -test('setup', function (t) { - rimraf.sync(workDir) - fs.mkdirSync(workDir) - fs.writeFileSync(testShbang + '.js', '#!/usr/bin/env node\ntrue') - cmdShim(__filename, testShim, function (er) { - t.error(er) - cmdShim(testShbang + '.js', testShbang, function (er) { - t.error(er) - t.done() - }) - }) -}) - -test('async-read-no-shbang', function (t) { - t.plan(2) - readCmdShim(testShimCmd, function (er, dest) { - t.error(er) - t.is(dest, '..\\basic.js') - t.done() - }) -}) - -test('sync-read-no-shbang', function (t) { - t.plan(1) - var dest = readCmdShim.sync(testShimCmd) - t.is(dest, '..\\basic.js') - t.done() -}) - -test('async-read-shbang', function (t) { - t.plan(2) - readCmdShim(testShbangCmd, function (er, dest) { - t.error(er) - t.is(dest, 'test-shbang.js') - t.done() - }) -}) - -test('sync-read-shbang', function (t) { - t.plan(1) - var dest = readCmdShim.sync(testShbangCmd) - t.is(dest, 'test-shbang.js') - t.done() -}) - -test('async-read-no-shbang-cygwin', function (t) { - t.plan(2) - readCmdShim(testShim, function (er, dest) { - t.error(er) - t.is(dest, '../basic.js') - t.done() - }) -}) - -test('sync-read-no-shbang-cygwin', function (t) { - t.plan(1) - var dest = readCmdShim.sync(testShim) - t.is(dest, '../basic.js') - t.done() -}) - -test('async-read-shbang-cygwin', function (t) { - t.plan(2) - readCmdShim(testShbang, function (er, dest) { - t.error(er) - t.is(dest, 'test-shbang.js') - t.done() - }) -}) - -test('sync-read-shbang-cygwin', function (t) { - t.plan(1) - var dest = readCmdShim.sync(testShbang) - t.is(dest, 'test-shbang.js') - t.done() -}) - -test('async-read-dir', function (t) { - t.plan(2) - readCmdShim(workDir, function (er) { - t.ok(er) - t.is(er.code, 'EISDIR', "cmd-shims can't be directories") - t.done() - }) -}) - -test('sync-read-dir', function (t) { - t.plan(1) - t.throws(function () { readCmdShim.sync(workDir) }, "cmd-shims can't be directories") - t.done() -}) - -test('async-read-not-there', function (t) { - t.plan(2) - readCmdShim('/path/to/nowhere', function (er, dest) { - t.ok(er, 'missing files throw errors') - t.is(er.code, 'ENOENT', "cmd-shim file doesn't exist") - t.done() - }) -}) - -test('sync-read-not-there', function (t) { - t.plan(1) - t.throws(function () { readCmdShim.sync('/path/to/nowhere') }, "cmd-shim file doesn't exist") - t.done() -}) - -test('async-read-not-shim', function (t) { - t.plan(2) - readCmdShim(__filename, function (er, dest) { - t.ok(er) - t.is(er.code, 'ENOTASHIM', 'shim file specified is not a shim') - t.done() - }) -}) - -test('sync-read-not-shim', function (t) { - t.plan(1) - t.throws(function () { readCmdShim.sync(__filename) }, 'shim file specified is not a shim') - t.done() -}) - -test('cleanup', function (t) { - rimraf.sync(workDir) - t.done() -}) diff --git a/deps/npm/node_modules/read-package-json/CHANGELOG.md b/deps/npm/node_modules/read-package-json/CHANGELOG.md index 25e9896cece35d..b21f94d72792bc 100644 --- a/deps/npm/node_modules/read-package-json/CHANGELOG.md +++ b/deps/npm/node_modules/read-package-json/CHANGELOG.md @@ -2,6 +2,16 @@ All notable changes to this project will be documented in this file. See [standard-version](https://github.com/conventional-changelog/standard-version) for commit guidelines. + +# [2.1.0](https://github.com/npm/read-package-json/compare/v2.0.13...v2.1.0) (2019-08-13) + + +### Features + +* support bundleDependencies: true ([76f6f42](https://github.com/npm/read-package-json/commit/76f6f42)) + + + ## [2.0.13](https://github.com/npm/read-package-json/compare/v2.0.12...v2.0.13) (2018-03-08) diff --git a/deps/npm/node_modules/read-package-json/package.json b/deps/npm/node_modules/read-package-json/package.json index d4b3405c7a6af5..5c597498814bd3 100644 --- a/deps/npm/node_modules/read-package-json/package.json +++ b/deps/npm/node_modules/read-package-json/package.json @@ -1,36 +1,33 @@ { - "_args": [ - [ - "read-package-json@2.0.13", - "/Users/rebecca/code/npm" - ] - ], - "_from": "read-package-json@2.0.13", - "_id": "read-package-json@2.0.13", + "_from": "read-package-json@2.1.0", + "_id": "read-package-json@2.1.0", "_inBundle": false, - "_integrity": "sha512-/1dZ7TRZvGrYqE0UAfN6qQb5GYBsNcqS1C0tNK601CFOJmtHI7NIGXwetEPU/OtoFHZL3hDxm4rolFFVE9Bnmg==", + "_integrity": "sha512-KLhu8M1ZZNkMcrq1+0UJbR8Dii8KZUqB0Sha4mOx/bknfKI/fyrQVrG/YIt2UOtG667sD8+ee4EXMM91W9dC+A==", "_location": "/read-package-json", "_phantomChildren": {}, "_requested": { "type": "version", "registry": true, - "raw": "read-package-json@2.0.13", + "raw": "read-package-json@2.1.0", "name": "read-package-json", "escapedName": "read-package-json", - "rawSpec": "2.0.13", + "rawSpec": "2.1.0", "saveSpec": null, - "fetchSpec": "2.0.13" + "fetchSpec": "2.1.0" }, "_requiredBy": [ + "#USER", "/", "/init-package-json", "/libcipm", + "/libnpm", "/read-installed", "/read-package-tree" ], - "_resolved": "https://registry.npmjs.org/read-package-json/-/read-package-json-2.0.13.tgz", - "_spec": "2.0.13", - "_where": "/Users/rebecca/code/npm", + "_resolved": "https://registry.npmjs.org/read-package-json/-/read-package-json-2.1.0.tgz", + "_shasum": "e3d42e6c35ea5ae820d9a03ab0c7291217fc51d5", + "_spec": "read-package-json@2.1.0", + "_where": "/Users/isaacs/dev/npm/cli", "author": { "name": "Isaac Z. Schlueter", "email": "i@izs.me", @@ -39,6 +36,7 @@ "bugs": { "url": "https://github.com/npm/read-package-json/issues" }, + "bundleDependencies": false, "dependencies": { "glob": "^7.1.1", "graceful-fs": "^4.1.2", @@ -46,6 +44,7 @@ "normalize-package-data": "^2.0.0", "slash": "^1.0.0" }, + "deprecated": false, "description": "The thing npm uses to read package.json files with semantics and defaults and validation", "devDependencies": { "standard": "^11.0.0", @@ -73,5 +72,5 @@ "release": "standard-version -s", "test": "tap --nyc-arg=--all --coverage test/*.js" }, - "version": "2.0.13" + "version": "2.1.0" } diff --git a/deps/npm/node_modules/read-package-json/read-json.js b/deps/npm/node_modules/read-package-json/read-json.js index 243af8e271d3f2..9f8f81bd1cadac 100644 --- a/deps/npm/node_modules/read-package-json/read-json.js +++ b/deps/npm/node_modules/read-package-json/read-json.js @@ -17,6 +17,7 @@ module.exports = readJson // put more stuff on here to customize. readJson.extraSet = [ + bundleDependencies, gypfile, serverjs, scriptpath, @@ -324,6 +325,23 @@ function bins_ (file, data, bins, cb) { return cb(null, data) } +function bundleDependencies (file, data, cb) { + var bd = 'bundleDependencies' + var bdd = 'bundledDependencies' + // normalize key name + if (data[bdd] !== undefined) { + if (data[bd] === undefined) data[bd] = data[bdd] + delete data[bdd] + } + if (data[bd] === false) delete data[bd] + else if (data[bd] === true) { + data[bd] = Object.keys(data.dependencies || {}) + } else if (data[bd] !== undefined && !Array.isArray(data[bd])) { + delete data[bd] + } + return cb(null, data) +} + function githead (file, data, cb) { if (data.gitHead) return cb(null, data) var dir = path.dirname(file) @@ -385,17 +403,16 @@ function checkBinReferences_ (file, data, warn, cb) { keys.forEach(function (key) { var dirName = path.dirname(file) var relName = data.bin[key] - try { - var binPath = path.resolve(dirName, relName) - fs.stat(binPath, (err) => handleExists(relName, !err)) - } catch (error) { - if (error.message === 'Arguments to path.resolve must be strings' || error.message.indexOf('Path must be a string') === 0) { - warn('Bin filename for ' + key + ' is not a string: ' + util.inspect(relName)) - handleExists(relName, true) - } else { - cb(error) - } + if (typeof relName !== 'string') { + var msg = 'Bin filename for ' + key + + ' is not a string: ' + util.inspect(relName) + warn(msg) + delete data.bin[key] + handleExists(relName, true) + return } + var binPath = path.resolve(dirName, relName) + fs.stat(binPath, (err) => handleExists(relName, !err)) }) } diff --git a/deps/npm/node_modules/require-directory/.jshintrc b/deps/npm/node_modules/require-directory/.jshintrc deleted file mode 100644 index e14e4dcbd2975e..00000000000000 --- a/deps/npm/node_modules/require-directory/.jshintrc +++ /dev/null @@ -1,67 +0,0 @@ -{ - "maxerr" : 50, - "bitwise" : true, - "camelcase" : true, - "curly" : true, - "eqeqeq" : true, - "forin" : true, - "immed" : true, - "indent" : 2, - "latedef" : true, - "newcap" : true, - "noarg" : true, - "noempty" : true, - "nonew" : true, - "plusplus" : true, - "quotmark" : true, - "undef" : true, - "unused" : true, - "strict" : true, - "trailing" : true, - "maxparams" : false, - "maxdepth" : false, - "maxstatements" : false, - "maxcomplexity" : false, - "maxlen" : false, - "asi" : false, - "boss" : false, - "debug" : false, - "eqnull" : true, - "es5" : false, - "esnext" : false, - "moz" : false, - "evil" : false, - "expr" : true, - "funcscope" : true, - "globalstrict" : true, - "iterator" : true, - "lastsemic" : false, - "laxbreak" : false, - "laxcomma" : false, - "loopfunc" : false, - "multistr" : false, - "proto" : false, - "scripturl" : false, - "smarttabs" : false, - "shadow" : false, - "sub" : false, - "supernew" : false, - "validthis" : false, - "browser" : true, - "couch" : false, - "devel" : true, - "dojo" : false, - "jquery" : false, - "mootools" : false, - "node" : true, - "nonstandard" : false, - "prototypejs" : false, - "rhino" : false, - "worker" : false, - "wsh" : false, - "yui" : false, - "nomen" : true, - "onevar" : true, - "passfail" : false, - "white" : true -} diff --git a/deps/npm/node_modules/semver/README.md b/deps/npm/node_modules/semver/README.md index e5ccececf4808f..f8dfa5a0df5fc4 100644 --- a/deps/npm/node_modules/semver/README.md +++ b/deps/npm/node_modules/semver/README.md @@ -398,14 +398,15 @@ range, use the `satisfies(version, range)` function. * `coerce(version)`: Coerces a string to semver if possible -This aims to provide a very forgiving translation of a non-semver -string to semver. It looks for the first digit in a string, and -consumes all remaining characters which satisfy at least a partial semver -(e.g., `1`, `1.2`, `1.2.3`) up to the max permitted length (256 characters). -Longer versions are simply truncated (`4.6.3.9.2-alpha2` becomes `4.6.3`). -All surrounding text is simply ignored (`v3.4 replaces v3.3.1` becomes `3.4.0`). -Only text which lacks digits will fail coercion (`version one` is not valid). -The maximum length for any semver component considered for coercion is 16 characters; -longer components will be ignored (`10000000000000000.4.7.4` becomes `4.7.4`). -The maximum value for any semver component is `Integer.MAX_SAFE_INTEGER || (2**53 - 1)`; -higher value components are invalid (`9999999999999999.4.7.4` is likely invalid). +This aims to provide a very forgiving translation of a non-semver string to +semver. It looks for the first digit in a string, and consumes all +remaining characters which satisfy at least a partial semver (e.g., `1`, +`1.2`, `1.2.3`) up to the max permitted length (256 characters). Longer +versions are simply truncated (`4.6.3.9.2-alpha2` becomes `4.6.3`). All +surrounding text is simply ignored (`v3.4 replaces v3.3.1` becomes +`3.4.0`). Only text which lacks digits will fail coercion (`version one` +is not valid). The maximum length for any semver component considered for +coercion is 16 characters; longer components will be ignored +(`10000000000000000.4.7.4` becomes `4.7.4`). The maximum value for any +semver component is `Number.MAX_SAFE_INTEGER || (2**53 - 1)`; higher value +components are invalid (`9999999999999999.4.7.4` is likely invalid). diff --git a/deps/npm/node_modules/semver/package.json b/deps/npm/node_modules/semver/package.json index 6673fdeb17329e..7c0576b7fe0a81 100644 --- a/deps/npm/node_modules/semver/package.json +++ b/deps/npm/node_modules/semver/package.json @@ -1,19 +1,19 @@ { - "_from": "semver@5.7.0", - "_id": "semver@5.7.0", + "_from": "semver@5.7.1", + "_id": "semver@5.7.1", "_inBundle": false, - "_integrity": "sha512-Ya52jSX2u7QKghxeoFGpLwCtGlt7j0oY9DYb5apt9nPlJ42ID+ulTXESnt/qAQcoSERyZ5sl3LDIOw0nAn/5DA==", + "_integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", "_location": "/semver", "_phantomChildren": {}, "_requested": { "type": "version", "registry": true, - "raw": "semver@5.7.0", + "raw": "semver@5.7.1", "name": "semver", "escapedName": "semver", - "rawSpec": "5.7.0", + "rawSpec": "5.7.1", "saveSpec": null, - "fetchSpec": "5.7.0" + "fetchSpec": "5.7.1" }, "_requiredBy": [ "#USER", @@ -39,9 +39,9 @@ "/read-installed", "/semver-diff" ], - "_resolved": "https://registry.npmjs.org/semver/-/semver-5.7.0.tgz", - "_shasum": "790a7cf6fea5459bac96110b29b60412dc8ff96b", - "_spec": "semver@5.7.0", + "_resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", + "_shasum": "a954f931aeba508d307bbf069eff0c01c96116f7", + "_spec": "semver@5.7.1", "_where": "/Users/isaacs/dev/npm/cli", "bin": { "semver": "./bin/semver" @@ -77,5 +77,5 @@ "tap": { "check-coverage": true }, - "version": "5.7.0" + "version": "5.7.1" } diff --git a/deps/npm/node_modules/through2/.nyc_output/28dcebf0-9c4a-47bb-900e-93fcef37bbe2.json b/deps/npm/node_modules/through2/.nyc_output/28dcebf0-9c4a-47bb-900e-93fcef37bbe2.json deleted file mode 100644 index 78548b9a4a5f03..00000000000000 --- a/deps/npm/node_modules/through2/.nyc_output/28dcebf0-9c4a-47bb-900e-93fcef37bbe2.json +++ /dev/null @@ -1 +0,0 @@ -{"/Users/rvagg/git/through2/through2.js":{"path":"/Users/rvagg/git/through2/through2.js","statementMap":{"0":{"start":{"line":1,"column":16},"end":{"line":1,"column":52}},"1":{"start":{"line":2,"column":16},"end":{"line":2,"column":40}},"2":{"start":{"line":5,"column":2},"end":{"line":5,"column":28}},"3":{"start":{"line":6,"column":2},"end":{"line":6,"column":25}},"4":{"start":{"line":9,"column":0},"end":{"line":9,"column":41}},"5":{"start":{"line":11,"column":0},"end":{"line":21,"column":1}},"6":{"start":{"line":12,"column":2},"end":{"line":12,"column":29}},"7":{"start":{"line":12,"column":23},"end":{"line":12,"column":29}},"8":{"start":{"line":13,"column":2},"end":{"line":13,"column":24}},"9":{"start":{"line":15,"column":13},"end":{"line":15,"column":17}},"10":{"start":{"line":16,"column":2},"end":{"line":20,"column":4}},"11":{"start":{"line":17,"column":4},"end":{"line":18,"column":29}},"12":{"start":{"line":18,"column":6},"end":{"line":18,"column":29}},"13":{"start":{"line":19,"column":4},"end":{"line":19,"column":22}},"14":{"start":{"line":25,"column":2},"end":{"line":25,"column":23}},"15":{"start":{"line":32,"column":2},"end":{"line":46,"column":3}},"16":{"start":{"line":33,"column":4},"end":{"line":37,"column":5}},"17":{"start":{"line":34,"column":6},"end":{"line":34,"column":27}},"18":{"start":{"line":35,"column":6},"end":{"line":35,"column":25}},"19":{"start":{"line":36,"column":6},"end":{"line":36,"column":20}},"20":{"start":{"line":39,"column":4},"end":{"line":40,"column":22}},"21":{"start":{"line":40,"column":6},"end":{"line":40,"column":22}},"22":{"start":{"line":42,"column":4},"end":{"line":43,"column":18}},"23":{"start":{"line":43,"column":6},"end":{"line":43,"column":18}},"24":{"start":{"line":45,"column":4},"end":{"line":45,"column":47}},"25":{"start":{"line":51,"column":0},"end":{"line":60,"column":2}},"26":{"start":{"line":52,"column":11},"end":{"line":52,"column":44}},"27":{"start":{"line":54,"column":2},"end":{"line":54,"column":27}},"28":{"start":{"line":56,"column":2},"end":{"line":57,"column":21}},"29":{"start":{"line":57,"column":4},"end":{"line":57,"column":21}},"30":{"start":{"line":59,"column":2},"end":{"line":59,"column":11}},"31":{"start":{"line":65,"column":0},"end":{"line":83,"column":2}},"32":{"start":{"line":67,"column":4},"end":{"line":68,"column":35}},"33":{"start":{"line":68,"column":6},"end":{"line":68,"column":35}},"34":{"start":{"line":70,"column":4},"end":{"line":70,"column":55}},"35":{"start":{"line":72,"column":4},"end":{"line":72,"column":49}},"36":{"start":{"line":75,"column":2},"end":{"line":75,"column":42}},"37":{"start":{"line":77,"column":2},"end":{"line":77,"column":43}},"38":{"start":{"line":79,"column":2},"end":{"line":80,"column":37}},"39":{"start":{"line":80,"column":4},"end":{"line":80,"column":37}},"40":{"start":{"line":82,"column":2},"end":{"line":82,"column":17}},"41":{"start":{"line":86,"column":0},"end":{"line":95,"column":2}},"42":{"start":{"line":87,"column":11},"end":{"line":87,"column":100}},"43":{"start":{"line":89,"column":2},"end":{"line":89,"column":27}},"44":{"start":{"line":91,"column":2},"end":{"line":92,"column":21}},"45":{"start":{"line":92,"column":4},"end":{"line":92,"column":21}},"46":{"start":{"line":94,"column":2},"end":{"line":94,"column":11}}},"fnMap":{"0":{"name":"DestroyableTransform","decl":{"start":{"line":4,"column":9},"end":{"line":4,"column":29}},"loc":{"start":{"line":4,"column":36},"end":{"line":7,"column":1}},"line":4},"1":{"name":"(anonymous_1)","decl":{"start":{"line":11,"column":41},"end":{"line":11,"column":42}},"loc":{"start":{"line":11,"column":55},"end":{"line":21,"column":1}},"line":11},"2":{"name":"(anonymous_2)","decl":{"start":{"line":16,"column":19},"end":{"line":16,"column":20}},"loc":{"start":{"line":16,"column":30},"end":{"line":20,"column":3}},"line":16},"3":{"name":"noop","decl":{"start":{"line":24,"column":9},"end":{"line":24,"column":13}},"loc":{"start":{"line":24,"column":37},"end":{"line":26,"column":1}},"line":24},"4":{"name":"through2","decl":{"start":{"line":31,"column":9},"end":{"line":31,"column":17}},"loc":{"start":{"line":31,"column":30},"end":{"line":47,"column":1}},"line":31},"5":{"name":"(anonymous_5)","decl":{"start":{"line":32,"column":9},"end":{"line":32,"column":10}},"loc":{"start":{"line":32,"column":46},"end":{"line":46,"column":3}},"line":32},"6":{"name":"(anonymous_6)","decl":{"start":{"line":51,"column":26},"end":{"line":51,"column":27}},"loc":{"start":{"line":51,"column":63},"end":{"line":60,"column":1}},"line":51},"7":{"name":"(anonymous_7)","decl":{"start":{"line":65,"column":31},"end":{"line":65,"column":32}},"loc":{"start":{"line":65,"column":68},"end":{"line":83,"column":1}},"line":65},"8":{"name":"Through2","decl":{"start":{"line":66,"column":11},"end":{"line":66,"column":19}},"loc":{"start":{"line":66,"column":31},"end":{"line":73,"column":3}},"line":66},"9":{"name":"(anonymous_9)","decl":{"start":{"line":86,"column":30},"end":{"line":86,"column":31}},"loc":{"start":{"line":86,"column":67},"end":{"line":95,"column":1}},"line":86}},"branchMap":{"0":{"loc":{"start":{"line":12,"column":2},"end":{"line":12,"column":29}},"type":"if","locations":[{"start":{"line":12,"column":2},"end":{"line":12,"column":29}},{"start":{"line":12,"column":2},"end":{"line":12,"column":29}}],"line":12},"1":{"loc":{"start":{"line":17,"column":4},"end":{"line":18,"column":29}},"type":"if","locations":[{"start":{"line":17,"column":4},"end":{"line":18,"column":29}},{"start":{"line":17,"column":4},"end":{"line":18,"column":29}}],"line":17},"2":{"loc":{"start":{"line":33,"column":4},"end":{"line":37,"column":5}},"type":"if","locations":[{"start":{"line":33,"column":4},"end":{"line":37,"column":5}},{"start":{"line":33,"column":4},"end":{"line":37,"column":5}}],"line":33},"3":{"loc":{"start":{"line":39,"column":4},"end":{"line":40,"column":22}},"type":"if","locations":[{"start":{"line":39,"column":4},"end":{"line":40,"column":22}},{"start":{"line":39,"column":4},"end":{"line":40,"column":22}}],"line":39},"4":{"loc":{"start":{"line":42,"column":4},"end":{"line":43,"column":18}},"type":"if","locations":[{"start":{"line":42,"column":4},"end":{"line":43,"column":18}},{"start":{"line":42,"column":4},"end":{"line":43,"column":18}}],"line":42},"5":{"loc":{"start":{"line":56,"column":2},"end":{"line":57,"column":21}},"type":"if","locations":[{"start":{"line":56,"column":2},"end":{"line":57,"column":21}},{"start":{"line":56,"column":2},"end":{"line":57,"column":21}}],"line":56},"6":{"loc":{"start":{"line":67,"column":4},"end":{"line":68,"column":35}},"type":"if","locations":[{"start":{"line":67,"column":4},"end":{"line":68,"column":35}},{"start":{"line":67,"column":4},"end":{"line":68,"column":35}}],"line":67},"7":{"loc":{"start":{"line":79,"column":2},"end":{"line":80,"column":37}},"type":"if","locations":[{"start":{"line":79,"column":2},"end":{"line":80,"column":37}},{"start":{"line":79,"column":2},"end":{"line":80,"column":37}}],"line":79},"8":{"loc":{"start":{"line":91,"column":2},"end":{"line":92,"column":21}},"type":"if","locations":[{"start":{"line":91,"column":2},"end":{"line":92,"column":21}},{"start":{"line":91,"column":2},"end":{"line":92,"column":21}}],"line":91}},"s":{"0":1,"1":1,"2":16,"3":16,"4":1,"5":1,"6":3,"7":1,"8":2,"9":2,"10":2,"11":2,"12":0,"13":2,"14":0,"15":3,"16":15,"17":8,"18":8,"19":8,"20":15,"21":2,"22":15,"23":14,"24":15,"25":1,"26":6,"27":6,"28":6,"29":1,"30":6,"31":1,"32":16,"33":7,"34":9,"35":9,"36":8,"37":8,"38":8,"39":0,"40":8,"41":1,"42":1,"43":1,"44":1,"45":0,"46":1},"f":{"0":16,"1":3,"2":2,"3":0,"4":3,"5":15,"6":6,"7":8,"8":16,"9":1},"b":{"0":[1,2],"1":[0,2],"2":[8,7],"3":[2,13],"4":[14,1],"5":[1,5],"6":[7,9],"7":[0,8],"8":[0,1]},"_coverageSchema":"43e27e138ebf9cfc5966b082cf9a028302ed4184","hash":"0e6d9c139041e468222a5271346b1d4b44840ea4","contentHash":"e8d9fa1f16cd3fbb5bfdcbe9002323f0"}} \ No newline at end of file diff --git a/deps/npm/node_modules/validate-npm-package-name/.nyc_output/aa4ee25ac41a9c3c7ee37ce965e6d1ac.json b/deps/npm/node_modules/validate-npm-package-name/.nyc_output/aa4ee25ac41a9c3c7ee37ce965e6d1ac.json deleted file mode 100644 index 8540780a8efbe5..00000000000000 --- a/deps/npm/node_modules/validate-npm-package-name/.nyc_output/aa4ee25ac41a9c3c7ee37ce965e6d1ac.json +++ /dev/null @@ -1 +0,0 @@ -{"/Users/chris/projects/npm/validate-npm-package-name/index.js":{"path":"/Users/chris/projects/npm/validate-npm-package-name/index.js","statementMap":{"0":{"start":{"line":3,"column":27},"end":{"line":3,"column":68}},"1":{"start":{"line":4,"column":15},"end":{"line":4,"column":34}},"2":{"start":{"line":5,"column":16},"end":{"line":8,"column":1}},"3":{"start":{"line":10,"column":15},"end":{"line":91,"column":1}},"4":{"start":{"line":11,"column":17},"end":{"line":11,"column":19}},"5":{"start":{"line":12,"column":15},"end":{"line":12,"column":17}},"6":{"start":{"line":14,"column":2},"end":{"line":17,"column":3}},"7":{"start":{"line":15,"column":4},"end":{"line":15,"column":38}},"8":{"start":{"line":16,"column":4},"end":{"line":16,"column":33}},"9":{"start":{"line":19,"column":2},"end":{"line":22,"column":3}},"10":{"start":{"line":20,"column":4},"end":{"line":20,"column":43}},"11":{"start":{"line":21,"column":4},"end":{"line":21,"column":33}},"12":{"start":{"line":24,"column":2},"end":{"line":27,"column":3}},"13":{"start":{"line":25,"column":4},"end":{"line":25,"column":40}},"14":{"start":{"line":26,"column":4},"end":{"line":26,"column":33}},"15":{"start":{"line":29,"column":2},"end":{"line":31,"column":3}},"16":{"start":{"line":30,"column":4},"end":{"line":30,"column":56}},"17":{"start":{"line":33,"column":2},"end":{"line":35,"column":3}},"18":{"start":{"line":34,"column":4},"end":{"line":34,"column":50}},"19":{"start":{"line":37,"column":2},"end":{"line":39,"column":3}},"20":{"start":{"line":38,"column":4},"end":{"line":38,"column":55}},"21":{"start":{"line":41,"column":2},"end":{"line":43,"column":3}},"22":{"start":{"line":42,"column":4},"end":{"line":42,"column":65}},"23":{"start":{"line":46,"column":2},"end":{"line":50,"column":4}},"24":{"start":{"line":47,"column":4},"end":{"line":49,"column":5}},"25":{"start":{"line":48,"column":6},"end":{"line":48,"column":61}},"26":{"start":{"line":55,"column":2},"end":{"line":59,"column":4}},"27":{"start":{"line":56,"column":4},"end":{"line":58,"column":5}},"28":{"start":{"line":57,"column":6},"end":{"line":57,"column":55}},"29":{"start":{"line":63,"column":2},"end":{"line":65,"column":3}},"30":{"start":{"line":64,"column":4},"end":{"line":64,"column":72}},"31":{"start":{"line":68,"column":2},"end":{"line":70,"column":3}},"32":{"start":{"line":69,"column":4},"end":{"line":69,"column":63}},"33":{"start":{"line":72,"column":2},"end":{"line":74,"column":3}},"34":{"start":{"line":73,"column":4},"end":{"line":73,"column":78}},"35":{"start":{"line":76,"column":2},"end":{"line":88,"column":3}},"36":{"start":{"line":78,"column":20},"end":{"line":78,"column":52}},"37":{"start":{"line":79,"column":4},"end":{"line":85,"column":5}},"38":{"start":{"line":80,"column":17},"end":{"line":80,"column":29}},"39":{"start":{"line":81,"column":16},"end":{"line":81,"column":28}},"40":{"start":{"line":82,"column":6},"end":{"line":84,"column":7}},"41":{"start":{"line":83,"column":8},"end":{"line":83,"column":37}},"42":{"start":{"line":87,"column":4},"end":{"line":87,"column":64}},"43":{"start":{"line":90,"column":2},"end":{"line":90,"column":31}},"44":{"start":{"line":93,"column":0},"end":{"line":93,"column":52}},"45":{"start":{"line":95,"column":11},"end":{"line":105,"column":1}},"46":{"start":{"line":96,"column":15},"end":{"line":101,"column":3}},"47":{"start":{"line":102,"column":2},"end":{"line":102,"column":53}},"48":{"start":{"line":102,"column":31},"end":{"line":102,"column":53}},"49":{"start":{"line":103,"column":2},"end":{"line":103,"column":49}},"50":{"start":{"line":103,"column":29},"end":{"line":103,"column":49}},"51":{"start":{"line":104,"column":2},"end":{"line":104,"column":15}}},"fnMap":{"0":{"name":"(anonymous_0)","decl":{"start":{"line":10,"column":32},"end":{"line":10,"column":33}},"loc":{"start":{"line":10,"column":48},"end":{"line":91,"column":1}}},"1":{"name":"(anonymous_1)","decl":{"start":{"line":46,"column":20},"end":{"line":46,"column":21}},"loc":{"start":{"line":46,"column":47},"end":{"line":50,"column":3}}},"2":{"name":"(anonymous_2)","decl":{"start":{"line":55,"column":19},"end":{"line":55,"column":20}},"loc":{"start":{"line":55,"column":38},"end":{"line":59,"column":3}}},"3":{"name":"(anonymous_3)","decl":{"start":{"line":95,"column":11},"end":{"line":95,"column":12}},"loc":{"start":{"line":95,"column":39},"end":{"line":105,"column":1}}}},"branchMap":{"0":{"loc":{"start":{"line":14,"column":2},"end":{"line":17,"column":3}},"type":"if","locations":[{"start":{"line":14,"column":2},"end":{"line":17,"column":3}},{"start":{"line":14,"column":2},"end":{"line":17,"column":3}}]},"1":{"loc":{"start":{"line":19,"column":2},"end":{"line":22,"column":3}},"type":"if","locations":[{"start":{"line":19,"column":2},"end":{"line":22,"column":3}},{"start":{"line":19,"column":2},"end":{"line":22,"column":3}}]},"2":{"loc":{"start":{"line":24,"column":2},"end":{"line":27,"column":3}},"type":"if","locations":[{"start":{"line":24,"column":2},"end":{"line":27,"column":3}},{"start":{"line":24,"column":2},"end":{"line":27,"column":3}}]},"3":{"loc":{"start":{"line":29,"column":2},"end":{"line":31,"column":3}},"type":"if","locations":[{"start":{"line":29,"column":2},"end":{"line":31,"column":3}},{"start":{"line":29,"column":2},"end":{"line":31,"column":3}}]},"4":{"loc":{"start":{"line":33,"column":2},"end":{"line":35,"column":3}},"type":"if","locations":[{"start":{"line":33,"column":2},"end":{"line":35,"column":3}},{"start":{"line":33,"column":2},"end":{"line":35,"column":3}}]},"5":{"loc":{"start":{"line":37,"column":2},"end":{"line":39,"column":3}},"type":"if","locations":[{"start":{"line":37,"column":2},"end":{"line":39,"column":3}},{"start":{"line":37,"column":2},"end":{"line":39,"column":3}}]},"6":{"loc":{"start":{"line":41,"column":2},"end":{"line":43,"column":3}},"type":"if","locations":[{"start":{"line":41,"column":2},"end":{"line":43,"column":3}},{"start":{"line":41,"column":2},"end":{"line":43,"column":3}}]},"7":{"loc":{"start":{"line":47,"column":4},"end":{"line":49,"column":5}},"type":"if","locations":[{"start":{"line":47,"column":4},"end":{"line":49,"column":5}},{"start":{"line":47,"column":4},"end":{"line":49,"column":5}}]},"8":{"loc":{"start":{"line":56,"column":4},"end":{"line":58,"column":5}},"type":"if","locations":[{"start":{"line":56,"column":4},"end":{"line":58,"column":5}},{"start":{"line":56,"column":4},"end":{"line":58,"column":5}}]},"9":{"loc":{"start":{"line":63,"column":2},"end":{"line":65,"column":3}},"type":"if","locations":[{"start":{"line":63,"column":2},"end":{"line":65,"column":3}},{"start":{"line":63,"column":2},"end":{"line":65,"column":3}}]},"10":{"loc":{"start":{"line":68,"column":2},"end":{"line":70,"column":3}},"type":"if","locations":[{"start":{"line":68,"column":2},"end":{"line":70,"column":3}},{"start":{"line":68,"column":2},"end":{"line":70,"column":3}}]},"11":{"loc":{"start":{"line":72,"column":2},"end":{"line":74,"column":3}},"type":"if","locations":[{"start":{"line":72,"column":2},"end":{"line":74,"column":3}},{"start":{"line":72,"column":2},"end":{"line":74,"column":3}}]},"12":{"loc":{"start":{"line":76,"column":2},"end":{"line":88,"column":3}},"type":"if","locations":[{"start":{"line":76,"column":2},"end":{"line":88,"column":3}},{"start":{"line":76,"column":2},"end":{"line":88,"column":3}}]},"13":{"loc":{"start":{"line":79,"column":4},"end":{"line":85,"column":5}},"type":"if","locations":[{"start":{"line":79,"column":4},"end":{"line":85,"column":5}},{"start":{"line":79,"column":4},"end":{"line":85,"column":5}}]},"14":{"loc":{"start":{"line":82,"column":6},"end":{"line":84,"column":7}},"type":"if","locations":[{"start":{"line":82,"column":6},"end":{"line":84,"column":7}},{"start":{"line":82,"column":6},"end":{"line":84,"column":7}}]},"15":{"loc":{"start":{"line":82,"column":10},"end":{"line":82,"column":78}},"type":"binary-expr","locations":[{"start":{"line":82,"column":10},"end":{"line":82,"column":43}},{"start":{"line":82,"column":47},"end":{"line":82,"column":78}}]},"16":{"loc":{"start":{"line":97,"column":25},"end":{"line":97,"column":69}},"type":"binary-expr","locations":[{"start":{"line":97,"column":25},"end":{"line":97,"column":44}},{"start":{"line":97,"column":48},"end":{"line":97,"column":69}}]},"17":{"loc":{"start":{"line":102,"column":2},"end":{"line":102,"column":53}},"type":"if","locations":[{"start":{"line":102,"column":2},"end":{"line":102,"column":53}},{"start":{"line":102,"column":2},"end":{"line":102,"column":53}}]},"18":{"loc":{"start":{"line":103,"column":2},"end":{"line":103,"column":49}},"type":"if","locations":[{"start":{"line":103,"column":2},"end":{"line":103,"column":49}},{"start":{"line":103,"column":2},"end":{"line":103,"column":49}}]}},"s":{"0":1,"1":1,"2":1,"3":1,"4":22,"5":22,"6":22,"7":0,"8":0,"9":22,"10":0,"11":0,"12":22,"13":0,"14":0,"15":22,"16":2,"17":22,"18":1,"19":22,"20":1,"21":22,"22":2,"23":22,"24":44,"25":2,"26":22,"27":726,"28":1,"29":22,"30":1,"31":22,"32":1,"33":22,"34":2,"35":22,"36":6,"37":6,"38":5,"39":5,"40":5,"41":2,"42":4,"43":20,"44":1,"45":1,"46":22,"47":22,"48":17,"49":22,"50":12,"51":22},"f":{"0":22,"1":44,"2":726,"3":22},"b":{"0":[0,22],"1":[0,22],"2":[0,22],"3":[2,20],"4":[1,21],"5":[1,21],"6":[2,20],"7":[2,42],"8":[1,725],"9":[1,21],"10":[1,21],"11":[2,20],"12":[6,16],"13":[5,1],"14":[2,3],"15":[5,2],"16":[22,12],"17":[17,5],"18":[12,10]},"_coverageSchema":"332fd63041d2c1bcb487cc26dd0d5f7d97098a6c","hash":"7a01205bc1fdf589bdf194d23f1405400131fa00","contentHash":"8b2210ff664cab8b0916540357b1d2f9_10.1.2"}} \ No newline at end of file diff --git a/deps/npm/node_modules/validate-npm-package-name/.nyc_output/bb918173e62b9517f55b630902d07ef4.json b/deps/npm/node_modules/validate-npm-package-name/.nyc_output/bb918173e62b9517f55b630902d07ef4.json deleted file mode 100644 index 9e26dfeeb6e641..00000000000000 --- a/deps/npm/node_modules/validate-npm-package-name/.nyc_output/bb918173e62b9517f55b630902d07ef4.json +++ /dev/null @@ -1 +0,0 @@ -{} \ No newline at end of file diff --git a/deps/npm/node_modules/xtend/.jshintrc b/deps/npm/node_modules/xtend/.jshintrc deleted file mode 100644 index 77887b5f0f2efc..00000000000000 --- a/deps/npm/node_modules/xtend/.jshintrc +++ /dev/null @@ -1,30 +0,0 @@ -{ - "maxdepth": 4, - "maxstatements": 200, - "maxcomplexity": 12, - "maxlen": 80, - "maxparams": 5, - - "curly": true, - "eqeqeq": true, - "immed": true, - "latedef": false, - "noarg": true, - "noempty": true, - "nonew": true, - "undef": true, - "unused": "vars", - "trailing": true, - - "quotmark": true, - "expr": true, - "asi": true, - - "browser": false, - "esnext": true, - "devel": false, - "node": false, - "nonstandard": false, - - "predef": ["require", "module", "__dirname", "__filename"] -} diff --git a/deps/npm/package.json b/deps/npm/package.json index 58631e96bb75d1..9b64923c418bd3 100644 --- a/deps/npm/package.json +++ b/deps/npm/package.json @@ -1,5 +1,5 @@ { - "version": "6.10.3", + "version": "6.11.3", "name": "npm", "description": "a package manager for JavaScript", "keywords": [ @@ -39,16 +39,16 @@ "ansistyles": "~0.1.3", "aproba": "^2.0.0", "archy": "~1.0.0", - "bin-links": "^1.1.2", + "bin-links": "^1.1.3", "bluebird": "^3.5.5", "byte-size": "^5.0.1", - "cacache": "^12.0.2", + "cacache": "^12.0.3", "call-limit": "^1.1.1", "chownr": "^1.1.2", "ci-info": "^2.0.0", "cli-columns": "^3.1.2", "cli-table3": "^0.5.1", - "cmd-shim": "~2.0.2", + "cmd-shim": "^3.0.3", "columnify": "~1.5.4", "config-chain": "^1.1.12", "detect-indent": "~5.0.0", @@ -59,9 +59,9 @@ "find-npm-prefix": "^1.0.2", "fs-vacuum": "~1.2.10", "fs-write-stream-atomic": "~1.0.10", - "gentle-fs": "^2.0.1", + "gentle-fs": "^2.2.1", "glob": "^7.1.4", - "graceful-fs": "^4.2.0", + "graceful-fs": "^4.2.2", "has-unicode": "~2.0.1", "hosted-git-info": "^2.8.2", "iferr": "^1.0.2", @@ -73,7 +73,7 @@ "is-cidr": "^3.0.0", "json-parse-better-errors": "^1.0.2", "lazy-property": "~1.0.0", - "libcipm": "^4.0.0", + "libcipm": "^4.0.3", "libnpm": "^3.0.1", "libnpmaccess": "^3.0.2", "libnpmhook": "^5.0.3", @@ -99,10 +99,10 @@ "npm-audit-report": "^1.3.2", "npm-cache-filename": "~1.0.2", "npm-install-checks": "~3.0.0", - "npm-lifecycle": "^3.1.2", - "npm-package-arg": "^6.1.0", + "npm-lifecycle": "^3.1.3", + "npm-package-arg": "^6.1.1", "npm-packlist": "^1.4.4", - "npm-pick-manifest": "^2.2.3", + "npm-pick-manifest": "^3.0.2", "npm-profile": "^4.0.2", "npm-registry-fetch": "^4.0.0", "npm-user-validate": "~1.0.0", @@ -110,16 +110,16 @@ "once": "~1.4.0", "opener": "^1.5.1", "osenv": "^0.1.5", - "pacote": "^9.5.4", + "pacote": "^9.5.8", "path-is-inside": "~1.0.2", "promise-inflight": "~1.0.1", "qrcode-terminal": "^0.12.0", "query-string": "^6.8.2", "qw": "~1.0.1", "read": "~1.0.7", - "read-cmd-shim": "~1.0.1", + "read-cmd-shim": "^1.0.4", "read-installed": "~4.0.3", - "read-package-json": "^2.0.13", + "read-package-json": "^2.1.0", "read-package-tree": "^5.3.1", "readable-stream": "^3.4.0", "readdir-scoped-modules": "^1.1.0", @@ -127,7 +127,7 @@ "retry": "^0.12.0", "rimraf": "^2.6.3", "safe-buffer": "^5.1.2", - "semver": "^5.7.0", + "semver": "^5.7.1", "sha": "^3.0.0", "slide": "~1.1.6", "sorted-object": "~2.0.1", @@ -281,7 +281,7 @@ "marked": "^0.6.3", "marked-man": "^0.6.0", "npm-registry-couchapp": "^2.7.3", - "npm-registry-mock": "^1.2.1", + "npm-registry-mock": "^1.3.0", "require-inject": "^1.4.4", "sprintf-js": "^1.1.2", "standard": "^11.0.1", @@ -302,9 +302,9 @@ "sudotest": "sudo npm run tap -- \"test/tap/*.js\"", "sudotest:nocleanup": "sudo NO_TEST_CLEANUP=1 npm run tap -- \"test/tap/*.js\"", "posttest": "rimraf test/npm_cache*", - "test-coverage": "npm run tap-cover -- \"test/tap/*.js\" \"test/network/*.js\" \"test/broken-under-*/*.js\"", - "test-tap": "npm run tap -- \"test/tap/*.js\" \"test/network/*.js\" \"test/broken-under-*/*.js\"", - "test-node": "tap --timeout 240 \"test/tap/*.js\" \"test/network/*.js\" \"test/broken-under-nyc*/*.js\"" + "test-coverage": "npm run tap-cover -- \"test/tap/*.js\" \"test/network/*.js\"", + "test-tap": "npm run tap -- \"test/tap/*.js\" \"test/network/*.js\"", + "test-node": "tap --timeout 240 \"test/tap/*.js\" \"test/network/*.js\"" }, "license": "Artistic-2.0" } diff --git a/deps/npm/scripts/update-authors.sh b/deps/npm/scripts/update-authors.sh index 75a6e549b16dd5..ac7b99137a9874 100755 --- a/deps/npm/scripts/update-authors.sh +++ b/deps/npm/scripts/update-authors.sh @@ -1,6 +1,6 @@ #!/bin/sh -git log --reverse --format='%aN <%aE>' | perl -wnE ' +git log --use-mailmap --reverse --format='%aN <%aE>' | perl -wnE ' BEGIN { say "# Authors sorted by whether or not they\x27re me"; } diff --git a/deps/npm/test/common-tap.js b/deps/npm/test/common-tap.js index 44b68d719de1a6..83a61f4bdbefa9 100644 --- a/deps/npm/test/common-tap.js +++ b/deps/npm/test/common-tap.js @@ -7,6 +7,11 @@ var readCmdShim = require('read-cmd-shim') var isWindows = require('../lib/utils/is-windows.js') var Bluebird = require('bluebird') +if (isWindows) { + var PATH = process.env.PATH ? 'PATH' : 'Path' + process.env[PATH] += ';C:\\Program Files\\Git\\mingw64\\libexec\\git-core' +} + // remove any git envs so that we don't mess with the main repo // when running git subprocesses in tests Object.keys(process.env).filter(k => /^GIT/.test(k)).forEach( @@ -103,6 +108,7 @@ ourenv.npm_config_globalconfig = exports.npm_config_globalconfig = configCommon. ourenv.npm_config_global_style = 'false' ourenv.npm_config_legacy_bundling = 'false' ourenv.npm_config_fetch_retries = '0' +ourenv.npm_config_update_notifier = 'false' ourenv.random_env_var = 'foo' // suppress warnings about using a prerelease version of node ourenv.npm_config_node_version = process.version.replace(/-.*$/, '') @@ -179,7 +185,7 @@ exports.makeGitRepo = function (params, cb) { var added = params.added || ['package.json'] var message = params.message || 'stub repo' - var opts = { cwd: root, env: { PATH: process.env.PATH } } + var opts = { cwd: root, env: { PATH: process.env.PATH || process.env.Path } } var commands = [ git.chainableExec(['init'], opts), git.chainableExec(['config', 'user.name', user], opts), diff --git a/deps/npm/test/fixtures/config/userconfig-with-gc b/deps/npm/test/fixtures/config/userconfig-with-gc index cf774bb883d966..b00d5195bd8361 100644 --- a/deps/npm/test/fixtures/config/userconfig-with-gc +++ b/deps/npm/test/fixtures/config/userconfig-with-gc @@ -1,4 +1,4 @@ -globalconfig = /Users/isaacs/dev/npm/cli/test/fixtures/config/globalconfig +globalconfig = /Users/claudiahdz/npm/cli/test/fixtures/config/globalconfig email = i@izs.me env-thing = ${random_env_var} init.author.name = Isaac Z. Schlueter diff --git a/deps/npm/test/tap/aliases.js b/deps/npm/test/tap/aliases.js index 0a0a9dfd953b1f..21a68ac50f7a58 100644 --- a/deps/npm/test/tap/aliases.js +++ b/deps/npm/test/tap/aliases.js @@ -116,18 +116,18 @@ test('installs an npm: protocol alias package', t => { t.comment(stdout) t.comment(stderr) const parsed = JSON.parse(stdout) - t.deepEqual(parsed, { + t.match(parsed, { foo: { current: '1.2.3', wanted: '1.2.4', latest: '1.2.4', - location: 'node_modules/foo' + location: /node_modules[/\\]foo/ }, bar: { current: 'npm:foo@1.2.3', wanted: 'npm:foo@1.2.4', latest: 'npm:foo@1.2.4', - location: 'node_modules/bar' + location: /node_modules[/\\]bar/ } }, 'both regular and aliased dependency reported') return common.npm([ diff --git a/deps/npm/test/tap/anon-cli-metrics.js b/deps/npm/test/tap/anon-cli-metrics.js index cb1f878a4cb0aa..729d9e607a4a02 100644 --- a/deps/npm/test/tap/anon-cli-metrics.js +++ b/deps/npm/test/tap/anon-cli-metrics.js @@ -54,7 +54,7 @@ var fixture = new Tacks(Dir({ name: 'slow', version: '1.0.0', scripts: { - preinstall: "node -e 'setTimeout(function(){}, 500)'" + preinstall: 'node -e "setTimeout(function(){}, 500)"' } }) }), diff --git a/deps/npm/test/tap/ci-permissions.js b/deps/npm/test/tap/ci-permissions.js new file mode 100644 index 00000000000000..c73d464236540e --- /dev/null +++ b/deps/npm/test/tap/ci-permissions.js @@ -0,0 +1,53 @@ +const t = require('tap') +const tar = require('tar') +const common = require('../common-tap.js') +const pkg = common.pkg +const rimraf = require('rimraf') +const { writeFileSync, statSync, chmodSync } = require('fs') +const { resolve } = require('path') +const mkdirp = require('mkdirp') + +t.test('setup', t => { + mkdirp.sync(resolve(pkg, 'package')) + const pj = resolve(pkg, 'package', 'package.json') + writeFileSync(pj, JSON.stringify({ + name: 'foo', + version: '1.2.3' + })) + chmodSync(pj, 0o640) + tar.c({ + sync: true, + file: resolve(pkg, 'foo.tgz'), + gzip: true, + cwd: pkg + }, ['package']) + writeFileSync(resolve(pkg, 'package.json'), JSON.stringify({ + name: 'root', + version: '1.2.3', + dependencies: { + foo: 'file:foo.tgz' + } + })) + t.end() +}) + +t.test('run install to generate package-lock', t => + common.npm(['install'], { cwd: pkg }).then(([code]) => t.equal(code, 0))) + +t.test('remove node_modules', t => rimraf(resolve(pkg, 'node_modules'), t.end)) + +t.test('run ci and check modes', t => + common.npm(['ci'], { cwd: pkg, stdio: 'inherit' }).then(([code]) => { + t.equal(code, 0) + const file = resolve(pkg, 'node_modules', 'foo', 'package.json') + // bitwise AND against 0o705 so that we can detect whether + // the file is world-readable. + // Typical unix systems would leave the file 0o644 + // Travis-ci and some other Linux systems will be 0o664 + // Windows is 0o666 + // The regression this is detecting (ie, the default in the tarball) + // leaves the file as 0o640. + // Bitwise-AND 0o705 should always result in 0o604, and never 0o600 + const mode = statSync(file).mode & 0o705 + t.equal(mode, 0o604) + })) diff --git a/deps/npm/test/tap/ci.js b/deps/npm/test/tap/ci.js index 9150f26efeedce..3f3e251d03f1d7 100644 --- a/deps/npm/test/tap/ci.js +++ b/deps/npm/test/tap/ci.js @@ -19,6 +19,9 @@ const EXEC_OPTS = { cwd: testDir } const PKG = { name: 'top', version: '1.2.3', + scripts: { + install: 'node -p process.env.npm_config_foo' + }, dependencies: { optimist: '0.6.0', clean: '2.1.6' @@ -77,6 +80,7 @@ test('basic installation', (t) => { .then(() => fixture.create(testDir)) .then(() => common.npm([ 'ci', + '--foo=asdf', '--registry', common.registry, '--loglevel', 'warn' ], EXEC_OPTS)) @@ -88,7 +92,7 @@ test('basic installation', (t) => { t.equal(stderr.trim(), '', 'no output on stderr') t.match( stdout.trim(), - /^added 6 packages in \d+(?:\.\d+)?s$/, + /\nasdf\nadded 6 packages in \d+(?:\.\d+)?s$/, 'no warnings on stderr, and final output has right number of packages' ) return fs.readdirAsync(path.join(testDir, 'node_modules')) @@ -144,6 +148,7 @@ test('supports npm-shrinkwrap.json as well', (t) => { .then(() => fixture.create(testDir)) .then(() => common.npm([ 'ci', + '--foo=asdf', '--registry', common.registry, '--loglevel', 'warn' ], EXEC_OPTS)) @@ -155,7 +160,7 @@ test('supports npm-shrinkwrap.json as well', (t) => { t.equal(stderr.trim(), '', 'no output on stderr') t.match( stdout.trim(), - /^added 6 packages in \d+(?:\.\d+)?s$/, + /\nasdf\nadded 6 packages in \d+(?:\.\d+)?s$/, 'no warnings on stderr, and final output has right number of packages' ) }) @@ -194,6 +199,7 @@ test('removes existing node_modules/ before installing', (t) => { .then(() => fixture.create(testDir)) .then(() => common.npm([ 'ci', + '--foo=asdf', '--registry', common.registry, '--loglevel', 'warn' ], EXEC_OPTS)) @@ -232,6 +238,7 @@ test('errors if package-lock.json missing', (t) => { .then(() => fixture.create(testDir)) .then(() => common.npm([ 'ci', + '--foo=asdf', '--registry', common.registry, '--loglevel', 'warn' ], EXEC_OPTS)) @@ -268,6 +275,7 @@ test('errors if package-lock.json invalid', (t) => { .then(() => fixture.create(testDir)) .then(() => common.npm([ 'ci', + '--foo=asdf', '--registry', common.registry, '--loglevel', 'warn' ], EXEC_OPTS)) diff --git a/deps/npm/test/tap/correct-mkdir.js b/deps/npm/test/tap/correct-mkdir.js index 5c2e9771dfc083..30907d725ca08a 100644 --- a/deps/npm/test/tap/correct-mkdir.js +++ b/deps/npm/test/tap/correct-mkdir.js @@ -1,10 +1,16 @@ /* eslint-disable camelcase */ -var test = require('tap').test +var t = require('tap') +var test = t.test var assert = require('assert') var requireInject = require('require-inject') const common = require('../common-tap.js') var cache_dir = common.pkg +if (process.platform === 'win32') { + t.plan(0, 'windows does not use correct-mkdir behavior') + process.exit(0) +} + test('correct-mkdir: no race conditions', function (t) { var mock_fs = {} var did_hook = false diff --git a/deps/npm/test/tap/dist-tag.js b/deps/npm/test/tap/dist-tag.js index 0567964d35a230..a5cce5d2a0f93c 100644 --- a/deps/npm/test/tap/dist-tag.js +++ b/deps/npm/test/tap/dist-tag.js @@ -46,6 +46,12 @@ function mocks (server) { server.delete('/-/package/@scoped%2fanother/dist-tags/c') .reply(200, { c: '7.7.7' }) + // using a scoped registry + server.get('/-/package/@scoped%2ffoo/dist-tags') + .reply(200, { latest: '2.0.0', a: '0.0.2', b: '0.6.0' }) + server.delete('/-/package/@scoped%2ffoo/dist-tags/b') + .reply(200, { b: '0.6.0' }) + // rm server.get('/-/package/@scoped%2fanother/dist-tags') .reply(200, { latest: '4.0.0' }) @@ -232,6 +238,31 @@ test('npm dist-tags rm @scoped/another nonexistent', function (t) { ) }) +test('npm dist-tags rm with registry assigned to scope', function (t) { + fs.writeFileSync(path.resolve(pkg, '.npmrc'), ` +@scoped:registry=${common.registry} +${common.registry.replace(/^https?:/, '')}:_authToken=taken +`) + + common.npm( + [ + 'dist-tags', + 'rm', '@scoped/foo', 'b', + '--loglevel', 'silent', + '--userconfig', path.resolve(pkg, '.npmrc') + ], + { cwd: pkg }, + function (er, code, stdout, stderr) { + t.ifError(er, 'npm access') + t.notOk(code, 'exited OK') + t.notOk(stderr, 'no error output') + t.equal(stdout, '-b: @scoped/foo@0.6.0\n') + + t.end() + } + ) +}) + test('cleanup', function (t) { t.pass('cleaned up') rimraf.sync(pkg) diff --git a/deps/npm/test/tap/ignore-install-link.js b/deps/npm/test/tap/ignore-install-link.js index 7b3b1be3b9d5ee..a2caa23dfdb561 100644 --- a/deps/npm/test/tap/ignore-install-link.js +++ b/deps/npm/test/tap/ignore-install-link.js @@ -1,5 +1,5 @@ if (process.platform === 'win32') { - console.log('ok - symlinks are weird on windows, skip this test') + require('tap').plan(0, 'symlinks are weird on windows, skip this test') process.exit(0) } var common = require('../common-tap.js') diff --git a/deps/npm/test/tap/install-from-local-multipath.js b/deps/npm/test/tap/install-from-local-multipath.js new file mode 100644 index 00000000000000..83dbdadde9e55e --- /dev/null +++ b/deps/npm/test/tap/install-from-local-multipath.js @@ -0,0 +1,182 @@ +var fs = require('graceful-fs') +var path = require('path') + +var mkdirp = require('mkdirp') +var osenv = require('osenv') +var rimraf = require('rimraf') +var test = require('tap').test + +var common = require('../common-tap') + +var root = common.pkg +// Allow running this test on older commits (useful for bisecting) +if (!root) { + var main = require.main.filename + root = path.resolve(path.dirname(main), path.basename(main, '.js')) +} +var pkg = path.join(root, 'parent') + +var EXEC_OPTS = { cwd: pkg } + +var parent = { + name: 'parent', + version: '0.0.0', + dependencies: { + 'child-1-1': 'file:../children/child-1-1', + 'child-1-2': 'file:../children/child-1-2', + 'child-2': 'file:../children/child-2' + } +} + +var parentLock = { + 'name': 'parent', + 'version': '1.0.0', + 'lockfileVersion': 1, + 'requires': true, + 'dependencies': { + 'child-1-1': { + 'version': 'file:../children/child-1-1', + 'requires': { + 'child-2': 'file:../children/child-2' + } + }, + 'child-1-2': { + 'version': 'file:../children/child-1-2', + 'requires': { + 'child-1-1': 'file:../children/child-1-1', + 'child-2': 'file:../children/child-2' + } + }, + 'child-2': { + 'version': 'file:../children/child-2' + } + } +} + +var child11 = { + name: 'parent', + version: '0.0.0', + 'dependencies': { + 'child-2': 'file:../child-2' + } +} + +var child11Lock = { + 'name': 'child-1-1', + 'version': '1.0.0', + 'lockfileVersion': 1, + 'requires': true, + 'dependencies': { + 'child-2': { + 'version': 'file:../child-2' + } + } +} + +var child12 = { + 'name': 'child-1-2', + 'version': '1.0.0', + 'dependencies': { + 'child-1-1': 'file:../child-1-1', + 'child-2': 'file:../child-2' + } +} + +var child12Lock = { + 'name': 'child-1-2', + 'version': '1.0.0', + 'lockfileVersion': 1, + 'requires': true, + 'dependencies': { + 'child-1-1': { + 'version': 'file:../child-1-1', + 'requires': { + 'child-2': 'file:../child-2' + } + }, + 'child-2': { + 'version': 'file:../child-2' + } + } +} + +var child2 = { + 'name': 'child-2', + 'version': '1.0.0', + 'dependencies': {} +} + +var child2Lock = { + 'name': 'child-2', + 'version': '1.0.0', + 'lockfileVersion': 1, + 'requires': true, + 'dependencies': {} +} + +test('setup', function (t) { + rimraf.sync(pkg) + mkdirp.sync(pkg) + fs.writeFileSync( + path.join(pkg, 'package.json'), + JSON.stringify(parent, null, 2) + ) + + fs.writeFileSync( + path.join(pkg, 'package-lock.json'), + JSON.stringify(parentLock, null, 2) + ) + + mkdirp.sync(path.join(root, 'children', 'child-1-1')) + fs.writeFileSync( + path.join(root, 'children', 'child-1-1', 'package.json'), + JSON.stringify(child11, null, 2) + ) + fs.writeFileSync( + path.join(root, 'children', 'child-1-1', 'package-lock.json'), + JSON.stringify(child11Lock, null, 2) + ) + + mkdirp.sync(path.join(root, 'children', 'child-1-2')) + fs.writeFileSync( + path.join(root, 'children', 'child-1-2', 'package.json'), + JSON.stringify(child12, null, 2) + ) + fs.writeFileSync( + path.join(root, 'children', 'child-1-2', 'package-lock.json'), + JSON.stringify(child12Lock, null, 2) + ) + + mkdirp.sync(path.join(root, 'children', 'child-2')) + fs.writeFileSync( + path.join(root, 'children', 'child-2', 'package.json'), + JSON.stringify(child2, null, 2) + ) + fs.writeFileSync( + path.join(root, 'children', 'child-2', 'package-lock.json'), + JSON.stringify(child2Lock, null, 2) + ) + + process.chdir(pkg) + t.end() +}) + +test('\'npm install\' should install local packages', function (t) { + common.npm( + [ + 'install', '.' + ], + EXEC_OPTS, + function (err, code) { + t.ifError(err, 'error should not exist') + t.notOk(code, 'npm install exited with code 0') + t.end() + } + ) +}) + +test('cleanup', function (t) { + process.chdir(osenv.tmpdir()) + rimraf.sync(root) + t.end() +}) diff --git a/deps/npm/test/tap/install-link-metadeps-locally.js b/deps/npm/test/tap/install-link-metadeps-locally.js new file mode 100644 index 00000000000000..136fd46d10bbf0 --- /dev/null +++ b/deps/npm/test/tap/install-link-metadeps-locally.js @@ -0,0 +1,52 @@ +// XXX Remove in npm v7, when this is no longer how we do things +const t = require('tap') +const common = require('../common-tap.js') +const pkg = common.pkg +const mkdirp = require('mkdirp') +const { writeFileSync, statSync } = require('fs') +const { resolve } = require('path') +const mr = require('npm-registry-mock') +const rimraf = require('rimraf') + +t.test('setup', t => { + mkdirp.sync(resolve(pkg, 'node_modules')) + mkdirp.sync(resolve(pkg, 'foo')) + writeFileSync(resolve(pkg, 'foo', 'package.json'), JSON.stringify({ + name: 'foo', + version: '1.2.3', + dependencies: { + underscore: '*' + } + })) + + writeFileSync(resolve(pkg, 'package.json'), JSON.stringify({ + name: 'root', + version: '1.2.3', + dependencies: { + foo: 'file:foo' + } + })) + + mr({ port: common.port }, (er, s) => { + if (er) { + throw er + } + t.parent.teardown(() => s.close()) + t.end() + }) +}) + +t.test('initial install to create package-lock', + t => common.npm(['install', '--registry', common.registry], { cwd: pkg }) + .then(([code]) => t.equal(code, 0, 'command worked'))) + +t.test('remove node_modules', t => + rimraf(resolve(pkg, 'node_modules'), t.end)) + +t.test('install again from package-lock', t => + common.npm(['install', '--registry', common.registry], { cwd: pkg }) + .then(([code]) => { + t.equal(code, 0, 'command worked') + const underscore = resolve(pkg, 'node_modules', 'underscore') + t.equal(statSync(underscore).isDirectory(), true, 'underscore installed') + })) diff --git a/deps/npm/test/tap/install-link-metadeps-subfolders.js b/deps/npm/test/tap/install-link-metadeps-subfolders.js new file mode 100644 index 00000000000000..7544c8a4ebe841 --- /dev/null +++ b/deps/npm/test/tap/install-link-metadeps-subfolders.js @@ -0,0 +1,68 @@ +const t = require('tap') +const common = require('../common-tap.js') +const mkdirp = require('mkdirp') +const { writeFileSync, readFileSync } = require('fs') +const { resolve } = require('path') +const pkg = common.pkg +const app = resolve(pkg, 'app') +const lib = resolve(pkg, 'lib') +const moda = resolve(lib, 'module-a') +const modb = resolve(lib, 'module-b') + +const rimraf = require('rimraf') + +t.test('setup', t => { + mkdirp.sync(app) + mkdirp.sync(moda) + mkdirp.sync(modb) + + writeFileSync(resolve(app, 'package.json'), JSON.stringify({ + name: 'app', + version: '1.2.3', + dependencies: { + moda: 'file:../lib/module-a' + } + })) + + writeFileSync(resolve(moda, 'package.json'), JSON.stringify({ + name: 'moda', + version: '1.2.3', + dependencies: { + modb: 'file:../module-b' + } + })) + + writeFileSync(resolve(modb, 'package.json'), JSON.stringify({ + name: 'modb', + version: '1.2.3' + })) + + t.end() +}) + +t.test('initial install to create package-lock', + t => common.npm(['install'], { cwd: app }) + .then(([code]) => t.equal(code, 0, 'command worked'))) + +t.test('remove node_modules', t => + rimraf(resolve(pkg, 'node_modules'), t.end)) + +t.test('install again from package-lock', t => + common.npm(['install'], { cwd: app }) + .then(([code]) => { + t.equal(code, 0, 'command worked') + // verify that module-b is linked under module-a + const depPkg = resolve( + app, + 'node_modules', + 'moda', + 'node_modules', + 'modb', + 'package.json' + ) + const data = JSON.parse(readFileSync(depPkg, 'utf8')) + t.strictSame(data, { + name: 'modb', + version: '1.2.3' + }) + })) diff --git a/deps/npm/test/tap/install-link-scripts.js b/deps/npm/test/tap/install-link-scripts.js index bff4dd5ef0106e..3553e6377370aa 100644 --- a/deps/npm/test/tap/install-link-scripts.js +++ b/deps/npm/test/tap/install-link-scripts.js @@ -1,3 +1,7 @@ +if (process.platform === 'win32') { + require('tap').plan(0, 'links are weird on windows, skip this') + process.exit(0) +} var fs = require('graceful-fs') var path = require('path') diff --git a/deps/npm/test/tap/lifecycle-INIT_CWD.js b/deps/npm/test/tap/lifecycle-INIT_CWD.js index e035cf86d40337..3e9c1c8257f460 100644 --- a/deps/npm/test/tap/lifecycle-INIT_CWD.js +++ b/deps/npm/test/tap/lifecycle-INIT_CWD.js @@ -15,7 +15,7 @@ var json = { name: 'init-cwd', version: '1.0.0', scripts: { - initcwd: 'echo "$INIT_CWD"' + initcwd: process.platform === 'win32' ? 'echo %INIT_CWD%' : 'echo "$INIT_CWD"' } } diff --git a/deps/npm/test/broken-under-nyc-and-travis/lifecycle-path.js b/deps/npm/test/tap/lifecycle-path.js similarity index 93% rename from deps/npm/test/broken-under-nyc-and-travis/lifecycle-path.js rename to deps/npm/test/tap/lifecycle-path.js index 6209319b412f65..70fb8391971912 100644 --- a/deps/npm/test/broken-under-nyc-and-travis/lifecycle-path.js +++ b/deps/npm/test/tap/lifecycle-path.js @@ -2,14 +2,14 @@ var fs = require('fs') var path = require('path') var mkdirp = require('mkdirp') -var osenv = require('osenv') var rimraf = require('rimraf') +var which = require('which') var test = require('tap').test var common = require('../common-tap.js') var isWindows = require('../../lib/utils/is-windows.js') -var pkg = path.resolve(__dirname, 'lifecycle-path') +var pkg = common.pkg var PATH if (isWindows) { @@ -21,9 +21,10 @@ if (isWindows) { PATH = '/bin:/usr/bin' } +var systemNode = which.sync('node', { nothrow: true, path: PATH }) +// the path to the system wide node (null if none) + test('setup', function (t) { - cleanup() - mkdirp.sync(pkg) fs.writeFileSync( path.join(pkg, 'package.json'), JSON.stringify({}, null, 2) @@ -183,6 +184,12 @@ function checkPath (testconfig, t) { 'The node binary used for scripts is.*' + process.execPath.replace(/[/\\]/g, '.')) t.match(stderr, regex, 'reports the current binary vs conflicting') + } else if (systemNode !== null) { + var regexSystemNode = new RegExp( + 'The node binary used for scripts is.*' + + systemNode.replace(/[/\\]/g, '.') + ) + t.match(stderr, regexSystemNode, 'reports the system binary vs conflicting') } else { t.match(stderr, /there is no node binary in the current PATH/, 'informs user that there is no node binary in PATH') } @@ -201,13 +208,3 @@ function checkPath (testconfig, t) { t.end() }) } - -test('cleanup', function (t) { - cleanup() - t.end() -}) - -function cleanup () { - process.chdir(osenv.tmpdir()) - rimraf.sync(pkg) -} diff --git a/deps/npm/test/tap/ls-l-depth-0.js b/deps/npm/test/tap/ls-l-depth-0.js index 8459bd52032cbf..b2516c9fa24496 100644 --- a/deps/npm/test/tap/ls-l-depth-0.js +++ b/deps/npm/test/tap/ls-l-depth-0.js @@ -67,10 +67,12 @@ test('#6311: npm ll --depth=0 duplicates listing', function (t) { if (err) throw err t.notOk(code, 'npm install exited cleanly') t.is(stderr, '', 'npm install ran silently') - t.equal( + t.match( stdout.trim(), - 'add\tunderscore\t1.5.1\tnode_modules/underscore\t\t\n' + - 'add\tglock\t1.8.7\tnode_modules/glock', + new RegExp( + '^add\tunderscore\t1[.]5[.]1\tnode_modules[\\\\/]underscore\t\t[\n]' + + 'add\tglock\t1[.]8[.]7\tnode_modules[\\\\/]glock$' + ), 'got expected install output' ) diff --git a/deps/npm/test/tap/outdated-depth.js b/deps/npm/test/tap/outdated-depth.js index 5cf7c7edac9c4b..8e272e60027b16 100644 --- a/deps/npm/test/tap/outdated-depth.js +++ b/deps/npm/test/tap/outdated-depth.js @@ -47,6 +47,7 @@ test('outdated depth zero', function (t) { mr({ port: common.port }, function (er, s) { npm.load( { + depth: 0, loglevel: 'silent', registry: common.registry }, @@ -54,12 +55,24 @@ test('outdated depth zero', function (t) { npm.install('.', function (er) { if (er) throw new Error(er) npm.outdated(function (err, d) { - t.ifError(err, 'npm outdated ran without error') + if (err) { + throw err + } t.is(process.exitCode, 1, 'exit code set to 1') process.exitCode = 0 t.deepEqual(d[0], expected) - s.close() - t.end() + t.equal(d.length, 1) + npm.config.set('depth', 1) + npm.outdated(function (err, d) { + t.equal(d.length, 2) + if (err) { + throw err + } + t.is(process.exitCode, 1, 'exit code set to 1') + process.exitCode = 0 + s.close() + t.end() + }) }) }) } diff --git a/deps/npm/test/tap/outdated-long.js b/deps/npm/test/tap/outdated-long.js index 0a338815983eb8..8cd2ceadb96fc9 100644 --- a/deps/npm/test/tap/outdated-long.js +++ b/deps/npm/test/tap/outdated-long.js @@ -79,6 +79,7 @@ test('it should not throw', function (t) { t.is(process.exitCode, 1, 'exit code set to 1') process.exitCode = 0 console.log = originalLog + output[0] = output[0].replace(/\\/g, '/') t.same(output, expOut) t.same(d, expData) diff --git a/deps/npm/test/tap/prepublish-only.js b/deps/npm/test/tap/prepublish-only.js index 57af26038a7bd6..56881494d23814 100644 --- a/deps/npm/test/tap/prepublish-only.js +++ b/deps/npm/test/tap/prepublish-only.js @@ -17,7 +17,6 @@ var tmpdir = join(pkg, 'tmp') var env = { 'npm_config_cache': cachedir, 'npm_config_tmp': tmpdir, - 'npm_config_prefix': pkg, 'npm_config_global': 'false' } @@ -64,7 +63,6 @@ var fixture = new Tacks(Dir({ })) test('setup', function (t) { - cleanup() fixture.create(pkg) mr({port: common.port, throwOnUnmatched: true}, function (err, s) { t.ifError(err, 'registry mocked successfully') @@ -131,12 +129,7 @@ test('test', function (t) { }) test('cleanup', function (t) { - cleanup() server.close() t.pass('cleaned up') t.end() }) - -function cleanup () { - fixture.remove(pkg) -} diff --git a/deps/npm/test/tap/prune.js b/deps/npm/test/tap/prune.js index 936ee3a91b6b81..ce2a300b1a5378 100644 --- a/deps/npm/test/tap/prune.js +++ b/deps/npm/test/tap/prune.js @@ -104,7 +104,7 @@ test('production: npm prune', function (t) { ], EXEC_OPTS, function (err, code, stdout) { if (err) throw err t.notOk(code, 'exit ok') - t.equal(stdout.trim(), 'remove\tmkdirp\t0.3.5\tnode_modules/mkdirp') + t.equal(stdout.trim().replace(/\\/g, '/'), 'remove\tmkdirp\t0.3.5\tnode_modules/mkdirp') t.end() }) }) diff --git a/deps/npm/test/tap/shared-linked.js b/deps/npm/test/tap/shared-linked.js index cbdbcf66c1612a..517be4699d9738 100644 --- a/deps/npm/test/tap/shared-linked.js +++ b/deps/npm/test/tap/shared-linked.js @@ -133,7 +133,7 @@ test('shared-linked', function (t) { common.npm(config.concat(['install', '--dry-run', '--parseable']), options, function (err, code, stdout, stderr) { if (err) throw err t.is(code, 0) - var got = stdout.trim().replace(/\s+\n/g, '\n') + var got = stdout.trim().replace(/\s+\n/g, '\n').replace(/\\/g, '/') var expected = 'add\tminimist\t0.0.5\tnode_modules/minimist\n' + 'add\twordwrap\t0.0.2\tnode_modules/wordwrap\n' + diff --git a/deps/npm/test/tap/shrinkwrap-lifecycle-cwd.js b/deps/npm/test/tap/shrinkwrap-lifecycle-cwd.js index f8927df5a29698..78f40f52976285 100644 --- a/deps/npm/test/tap/shrinkwrap-lifecycle-cwd.js +++ b/deps/npm/test/tap/shrinkwrap-lifecycle-cwd.js @@ -12,7 +12,6 @@ var testdir = path.join(basedir, 'testdir') var cachedir = common.cache var globaldir = path.join(basedir, 'global') var tmpdir = path.join(basedir, 'tmp') -var escapeArg = require('../../lib/utils/escape-arg.js') var conf = { cwd: testdir, @@ -39,8 +38,8 @@ var fixture = new Tacks(Dir({ // add this to the end of the command to preserve the debug log: // || mv npm-debug.log real-debug.log // removed for windows compat reasons - abc: escapeArg(common.nodeBin) + ' ' + escapeArg(common.bin) + ' shrinkwrap', - shrinkwrap: escapeArg(common.nodeBin) + ' scripts/shrinkwrap.js' + abc: 'node ' + JSON.stringify(common.bin) + ' shrinkwrap', + shrinkwrap: 'node scripts/shrinkwrap.js' } }), scripts: Dir({ diff --git a/deps/npm/test/tap/shrinkwrap-save-with-existing-dev-deps.js b/deps/npm/test/tap/shrinkwrap-save-with-existing-dev-deps.js index fcbbeeffddf4d3..8a3f449fa88653 100644 --- a/deps/npm/test/tap/shrinkwrap-save-with-existing-dev-deps.js +++ b/deps/npm/test/tap/shrinkwrap-save-with-existing-dev-deps.js @@ -18,7 +18,13 @@ var example_pkg = path.join(example, 'package.json') var installed = path.join(example, 'node_modules', 'installed') var installed_pkg = path.join(installed, 'package.json') -var EXEC_OPTS = { cwd: example } +// Ignore max listeners warnings until that gets fixed +var env = Object.keys(process.env).reduce((set, key) => { + if (!set[key]) set[key] = process.env[key] + return set +}, { NODE_NO_WARNINGS: '1' }) + +var EXEC_OPTS = { cwd: example, env: env } var installme_pkg_json = { name: 'installme', diff --git a/deps/npm/test/broken-under-nyc-and-travis/whoami.js b/deps/npm/test/tap/whoami.js similarity index 88% rename from deps/npm/test/broken-under-nyc-and-travis/whoami.js rename to deps/npm/test/tap/whoami.js index a5668b121059c0..9f4bf4266b74cd 100644 --- a/deps/npm/test/broken-under-nyc-and-travis/whoami.js +++ b/deps/npm/test/tap/whoami.js @@ -9,14 +9,14 @@ var rimraf = require('rimraf') var opts = { cwd: __dirname } -var FIXTURE_PATH = path.resolve(__dirname, 'fixture_npmrc') +var FIXTURE_PATH = path.resolve(common.pkg, 'fixture_npmrc') test('npm whoami with basic auth', function (t) { var s = '//registry.lvh.me/:username = wombat\n' + '//registry.lvh.me/:_password = YmFkIHBhc3N3b3Jk\n' + '//registry.lvh.me/:email = lindsay@wdu.org.au\n' fs.writeFileSync(FIXTURE_PATH, s, 'ascii') - fs.chmodSync(FIXTURE_PATH, '0444') + fs.chmodSync(FIXTURE_PATH, 0o644) common.npm( [ @@ -31,17 +31,16 @@ test('npm whoami with basic auth', function (t) { t.equal(stderr, '', 'got nothing on stderr') t.equal(code, 0, 'exit ok') t.equal(stdout, 'wombat\n', 'got username') - rimraf.sync(FIXTURE_PATH) t.end() } ) }) -test('npm whoami with bearer auth', { timeout: 2 * 1000 }, function (t) { +test('npm whoami with bearer auth', { timeout: 6000 }, function (t) { var s = '//localhost:' + common.port + '/:_authToken = wombat-developers-union\n' fs.writeFileSync(FIXTURE_PATH, s, 'ascii') - fs.chmodSync(FIXTURE_PATH, '0444') + fs.chmodSync(FIXTURE_PATH, 0o644) function verify (req, res) { t.equal(req.method, 'GET') diff --git a/deps/uv/AUTHORS b/deps/uv/AUTHORS index 5d5866d3ff2405..8c3d342d33e6f8 100644 --- a/deps/uv/AUTHORS +++ b/deps/uv/AUTHORS @@ -396,3 +396,10 @@ Nan Xiao Ben Davies Nhan Khong Crunkle +Tomas Krizek +Konstantin Podsvirov +seny +Vladimir Karnushin +MaYuming +Eneas U de Queiroz +Daniel Hahler diff --git a/deps/uv/CMakeLists.txt b/deps/uv/CMakeLists.txt index bf7990f745fee2..6f18f3397d0129 100644 --- a/deps/uv/CMakeLists.txt +++ b/deps/uv/CMakeLists.txt @@ -1,5 +1,5 @@ # TODO: determine CMAKE_SYSTEM_NAME on OS/390. Currently assumes "OS/390". -cmake_minimum_required(VERSION 3.0) +cmake_minimum_required(VERSION 2.8.12) project(libuv) if(MSVC) @@ -116,6 +116,7 @@ set(uv_test_sources test/test-shutdown-eof.c test/test-shutdown-twice.c test/test-signal-multiple-loops.c + test/test-signal-pending-on-close.c test/test-signal.c test/test-socket-buffer-size.c test/test-spawn.c @@ -127,6 +128,7 @@ set(uv_test_sources test/test-tcp-close-accept.c test/test-tcp-close-while-connecting.c test/test-tcp-close.c + test/test-tcp-close-reset.c test/test-tcp-connect-error-after-write.c test/test-tcp-connect-error.c test/test-tcp-connect-timeout.c @@ -317,6 +319,7 @@ if(CMAKE_SYSTEM_NAME STREQUAL "OS/390") list(APPEND uv_defines _OPEN_MSGQ_EXT) list(APPEND uv_defines _OPEN_SYS_FILE_EXT) list(APPEND uv_defines _OPEN_SYS_IF_EXT) + list(APPEND uv_defines _OPEN_SYS_SOCK_EXT3) list(APPEND uv_defines _OPEN_SYS_SOCK_IPV6) list(APPEND uv_defines _UNIX03_SOURCE) list(APPEND uv_defines _UNIX03_THREADS) @@ -340,15 +343,17 @@ if(APPLE OR CMAKE_SYSTEM_NAME MATCHES "DragonFly|FreeBSD|Linux|NetBSD|OpenBSD") endif() add_library(uv SHARED ${uv_sources}) -target_compile_definitions(uv PRIVATE ${uv_defines} BUILDING_UV_SHARED=1) +target_compile_definitions(uv + INTERFACE USING_UV_SHARED=1 + PRIVATE ${uv_defines} BUILDING_UV_SHARED=1) target_compile_options(uv PRIVATE ${uv_cflags}) -target_include_directories(uv PRIVATE include src) +target_include_directories(uv PUBLIC include PRIVATE src) target_link_libraries(uv ${uv_libraries}) add_library(uv_a STATIC ${uv_sources}) target_compile_definitions(uv_a PRIVATE ${uv_defines}) target_compile_options(uv_a PRIVATE ${uv_cflags}) -target_include_directories(uv_a PRIVATE include src) +target_include_directories(uv_a PUBLIC include PRIVATE src) target_link_libraries(uv_a ${uv_libraries}) option(libuv_buildtests "Build the unit tests when BUILD_TESTING is enabled." ON) @@ -360,7 +365,6 @@ if(BUILD_TESTING AND libuv_buildtests) target_compile_definitions(uv_run_tests PRIVATE ${uv_defines} USING_UV_SHARED=1) target_compile_options(uv_run_tests PRIVATE ${uv_cflags}) - target_include_directories(uv_run_tests PRIVATE include) target_link_libraries(uv_run_tests uv ${uv_test_libraries}) add_test(NAME uv_test COMMAND uv_run_tests @@ -368,7 +372,6 @@ if(BUILD_TESTING AND libuv_buildtests) add_executable(uv_run_tests_a ${uv_test_sources}) target_compile_definitions(uv_run_tests_a PRIVATE ${uv_defines}) target_compile_options(uv_run_tests_a PRIVATE ${uv_cflags}) - target_include_directories(uv_run_tests_a PRIVATE include) target_link_libraries(uv_run_tests_a uv_a ${uv_test_libraries}) add_test(NAME uv_test_a COMMAND uv_run_tests_a @@ -383,6 +386,10 @@ if(UNIX) endforeach(x) file(STRINGS configure.ac configure_ac REGEX ^AC_INIT) string(REGEX MATCH [0-9]+[.][0-9]+[.][0-9]+ PACKAGE_VERSION "${configure_ac}") + string(REGEX MATCH ^[0-9]+ UV_VERSION_MAJOR "${PACKAGE_VERSION}") + # The version in the filename is mirroring the behaviour of autotools. + set_target_properties(uv PROPERTIES VERSION ${UV_VERSION_MAJOR}.0.0 + SOVERSION ${UV_VERSION_MAJOR}) set(includedir ${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_INCLUDEDIR}) set(libdir ${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}) set(prefix ${CMAKE_INSTALL_PREFIX}) diff --git a/deps/uv/ChangeLog b/deps/uv/ChangeLog index d440b5507719c5..093579de8e4985 100644 --- a/deps/uv/ChangeLog +++ b/deps/uv/ChangeLog @@ -1,3 +1,78 @@ +2019.09.10, Version 1.32.0 (Stable), 697bea87b3a0b0e9b4e5ff86b39d1dedb70ee46d + +Changes since version 1.31.0: + +* misc: enable stalebot (Saúl Ibarra Corretgé) + +* win: map ERROR_ENVVAR_NOT_FOUND to UV_ENOENT (cjihrig) + +* win: use L'\0' as UTF-16 null terminator (cjihrig) + +* win: support retrieving empty env variables (cjihrig) + +* unix,stream: fix returned error codes (Santiago Gimeno) + +* test: fix typo in DYLD_LIBRARY_PATH (Ben Noordhuis) + +* unix,signal: keep handle active if pending signal (Santiago Gimeno) + +* openbsd: fix uv_cpu_info (Santiago Gimeno) + +* src: move uv_free_cpu_info to uv-common.c (Santiago Gimeno) + +* tcp: add uv_tcp_close_reset method (Santiago Gimeno) + +* test: fix udp-multicast-join tests (Santiago Gimeno) + +* test: remove assertion in fs_statfs test (cjihrig) + +* doc: clarify uv_buf_t usage in uv_alloc_cb (Tomas Krizek) + +* win: fix typo in preprocessor expression (Konstantin Podsvirov) + +* timer: fix uv_timer_start on closing timer (seny) + +* udp: add source-specific multicast support (Vladimir Karnushin) + +* udp: fix error return values (Santiago Gimeno) + +* udp: drop IPV6_SSM_SUPPORT macro (Santiago Gimeno) + +* udp: fix uv__udp_set_source_membership6 (Santiago Gimeno) + +* udp: use sockaddr_storage instead of union (Santiago Gimeno) + +* build,zos: add _OPEN_SYS_SOCK_EXT3 flag (Santiago Gimeno) + +* test: add specific source multicast tests (Santiago Gimeno) + +* include: map EILSEQ error code (cjihrig) + +* win, tty: improve SIGWINCH performance (Bartosz Sosnowski) + +* build: fix ios build error (MaYuming) + +* aix: replace ECONNRESET with EOF if already closed (Milad Farazmand) + +* build: add cmake library VERSION, SOVERSION (Eneas U de Queiroz) + +* build: make include/ public in CMakeLists.txt (Ben Noordhuis) + +* build: export USING_UV_SHARED=1 to cmake deps (Ben Noordhuis) + +* build: cmake_minimum_required(VERSION 2.8.12) (Daniel Hahler) + +* aix: Fix broken cmpxchgi() XL C++ specialization. (Andrew Paprocki) + +* test: fix -Wsign-compare warning (Ben Noordhuis) + +* unix: simplify open(O_CLOEXEC) feature detection (Ben Noordhuis) + +* unix: fix UV_FS_O_DIRECT definition on Linux (Joran Dirk Greef) + +* doc: uv_handle_t documentation suggestion (Daniel Bevenius) + + 2019.08.10, Version 1.31.0 (Stable), 0a6771cee4c15184c924bfe9d397bdd0c3b206ba Changes since version 1.30.1: diff --git a/deps/uv/Makefile.am b/deps/uv/Makefile.am index 6b11c9349ce9dc..099b0efb084343 100644 --- a/deps/uv/Makefile.am +++ b/deps/uv/Makefile.am @@ -248,6 +248,7 @@ test_run_tests_SOURCES = test/blackhole-server.c \ test/test-shutdown-eof.c \ test/test-shutdown-twice.c \ test/test-signal-multiple-loops.c \ + test/test-signal-pending-on-close.c \ test/test-signal.c \ test/test-socket-buffer-size.c \ test/test-spawn.c \ @@ -259,6 +260,7 @@ test_run_tests_SOURCES = test/blackhole-server.c \ test/test-tcp-close-accept.c \ test/test-tcp-close-while-connecting.c \ test/test-tcp-close.c \ + test/test-tcp-close-reset.c \ test/test-tcp-create-socket-early.c \ test/test-tcp-connect-error-after-write.c \ test/test-tcp-connect-error.c \ @@ -517,6 +519,7 @@ libuv_la_CFLAGS += -D_UNIX03_THREADS \ -D_XOPEN_SOURCE_EXTENDED \ -D_ALL_SOURCE \ -D_LARGE_TIME_API \ + -D_OPEN_SYS_SOCK_EXT3 \ -D_OPEN_SYS_SOCK_IPV6 \ -D_OPEN_SYS_FILE_EXT \ -DUV_PLATFORM_SEM_T=int \ diff --git a/deps/uv/README.md b/deps/uv/README.md index b55c3a9238a1d1..f9daaa1cea153c 100644 --- a/deps/uv/README.md +++ b/deps/uv/README.md @@ -401,6 +401,8 @@ Check the [SUPPORTED_PLATFORMS file](SUPPORTED_PLATFORMS.md). ### AIX Notes +AIX compilation using IBM XL C/C++ requires version 12.1 or greater. + AIX support for filesystem events requires the non-default IBM `bos.ahafs` package to be installed. This package provides the AIX Event Infrastructure that is detected by `autoconf`. diff --git a/deps/uv/configure.ac b/deps/uv/configure.ac index c5e29fef849e4e..b503e538298dd9 100644 --- a/deps/uv/configure.ac +++ b/deps/uv/configure.ac @@ -13,7 +13,7 @@ # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. AC_PREREQ(2.57) -AC_INIT([libuv], [1.31.0], [https://github.com/libuv/libuv/issues]) +AC_INIT([libuv], [1.32.0], [https://github.com/libuv/libuv/issues]) AC_CONFIG_MACRO_DIR([m4]) m4_include([m4/libuv-extra-automake-flags.m4]) m4_include([m4/as_case.m4]) diff --git a/deps/uv/docs/src/handle.rst b/deps/uv/docs/src/handle.rst index 544794db06c956..0a25bfa8b27dd1 100644 --- a/deps/uv/docs/src/handle.rst +++ b/deps/uv/docs/src/handle.rst @@ -60,6 +60,9 @@ Data types a ``UV_ENOBUFS`` error will be triggered in the :c:type:`uv_udp_recv_cb` or the :c:type:`uv_read_cb` callback. + Each buffer is used only once and the user is responsible for freeing it in the + :c:type:`uv_udp_recv_cb` or the :c:type:`uv_read_cb` callback. + A suggested size (65536 at the moment in most cases) is provided, but it's just an indication, not related in any way to the pending data to be read. The user is free to allocate the amount of memory they decide. @@ -87,7 +90,7 @@ Public members .. c:member:: uv_loop_t* uv_handle_t.loop - Pointer to the :c:type:`uv_loop_t` where the handle is running on. Readonly. + Pointer to the :c:type:`uv_loop_t` the handle is running on. Readonly. .. c:member:: uv_handle_type uv_handle_t.type diff --git a/deps/uv/docs/src/signal.rst b/deps/uv/docs/src/signal.rst index f5a809ab0bb73d..eeadb95b0a47d4 100644 --- a/deps/uv/docs/src/signal.rst +++ b/deps/uv/docs/src/signal.rst @@ -20,6 +20,15 @@ Reception of some signals is emulated: program is given approximately 10 seconds to perform cleanup. After that Windows will unconditionally terminate it. +* SIGWINCH is raised whenever libuv detects that the console has been + resized. When a libuv app is running under a console emulator, or when a + 32-bit libuv app is running on 64-bit system, SIGWINCH will be emulated. In + such cases SIGWINCH signals may not always be delivered in a timely manner. + For a writable :c:type:`uv_tty_t` handle libuv will only detect size changes + when the cursor is moved. When a readable :c:type:`uv_tty_t` handle is used, + resizing of the console buffer will be detected only if the handle is in raw + mode and is being read. + * Watchers for other signals can be successfully created, but these signals are never received. These signals are: `SIGILL`, `SIGABRT`, `SIGFPE`, `SIGSEGV`, `SIGTERM` and `SIGKILL.` @@ -28,6 +37,8 @@ Reception of some signals is emulated: not detected by libuv; these will not trigger a signal watcher. .. versionchanged:: 1.15.0 SIGWINCH support on Windows was improved. +.. versionchanged:: 1.31.0 32-bit libuv SIGWINCH support on 64-bit Windows was + rolled back to old implementation. Unix notes ---------- diff --git a/deps/uv/docs/src/stream.rst b/deps/uv/docs/src/stream.rst index 6a704367b1b361..2ccb59b51cb432 100644 --- a/deps/uv/docs/src/stream.rst +++ b/deps/uv/docs/src/stream.rst @@ -50,8 +50,8 @@ Data types from the stream again is undefined. The callee is responsible for freeing the buffer, libuv does not reuse it. - The buffer may be a null buffer (where buf->base=NULL and buf->len=0) on - error. + The buffer may be a null buffer (where `buf->base` == NULL and `buf->len` == 0) + on error. .. c:type:: void (*uv_write_cb)(uv_write_t* req, int status) diff --git a/deps/uv/docs/src/tcp.rst b/deps/uv/docs/src/tcp.rst index d20a6362af94d5..bcb163ea0f0356 100644 --- a/deps/uv/docs/src/tcp.rst +++ b/deps/uv/docs/src/tcp.rst @@ -113,3 +113,13 @@ API mapping .. seealso:: The :c:type:`uv_stream_t` API functions also apply. + +.. c:function:: int uv_tcp_close_reset(uv_tcp_t* handle, uv_close_cb close_cb) + + Resets a TCP connection by sending a RST packet. This is accomplished by + setting the `SO_LINGER` socket option with a linger interval of zero and + then calling :c:func:`uv_close`. + Due to some platform inconsistencies, mixing of :c:func:`uv_shutdown` and + :c:func:`uv_tcp_close_reset` calls is not allowed. + + .. versionadded:: 1.32.0 diff --git a/deps/uv/docs/src/udp.rst b/deps/uv/docs/src/udp.rst index f3de53fbab0568..53b1fea4933aae 100644 --- a/deps/uv/docs/src/udp.rst +++ b/deps/uv/docs/src/udp.rst @@ -56,16 +56,19 @@ Data types * `handle`: UDP handle * `nread`: Number of bytes that have been received. - 0 if there is no more data to read. You may discard or repurpose - the read buffer. Note that 0 may also mean that an empty datagram - was received (in this case `addr` is not NULL). < 0 if a transmission - error was detected. + 0 if there is no more data to read. Note that 0 may also mean that an + empty datagram was received (in this case `addr` is not NULL). < 0 if + a transmission error was detected. * `buf`: :c:type:`uv_buf_t` with the received data. * `addr`: ``struct sockaddr*`` containing the address of the sender. Can be NULL. Valid for the duration of the callback only. * `flags`: One or more or'ed UV_UDP_* constants. Right now only ``UV_UDP_PARTIAL`` is used. + The callee is responsible for freeing the buffer, libuv does not reuse it. + The buffer may be a null buffer (where `buf->base` == NULL and `buf->len` == 0) + on error. + .. note:: The receive callback will be called with `nread` == 0 and `addr` == NULL when there is nothing to read, and with `nread` == 0 and `addr` != NULL when an empty UDP packet is @@ -219,6 +222,25 @@ API :returns: 0 on success, or an error code < 0 on failure. +.. c:function:: int uv_udp_set_source_membership(uv_udp_t* handle, const char* multicast_addr, const char* interface_addr, const char* source_addr, uv_membership membership) + + Set membership for a source-specific multicast group. + + :param handle: UDP handle. Should have been initialized with + :c:func:`uv_udp_init`. + + :param multicast_addr: Multicast address to set membership for. + + :param interface_addr: Interface address. + + :param source_addr: Source address. + + :param membership: Should be ``UV_JOIN_GROUP`` or ``UV_LEAVE_GROUP``. + + :returns: 0 on success, or an error code < 0 on failure. + + .. versionadded:: 1.32.0 + .. c:function:: int uv_udp_set_multicast_loop(uv_udp_t* handle, int on) Set IP multicast loop flag. Makes multicast packets loop back to diff --git a/deps/uv/include/uv.h b/deps/uv/include/uv.h index f71767b6e9fab5..ee45bcaefce1d3 100644 --- a/deps/uv/include/uv.h +++ b/deps/uv/include/uv.h @@ -27,6 +27,10 @@ extern "C" { #endif +#if defined(BUILDING_UV_SHARED) && defined(USING_UV_SHARED) +#error "Define either BUILDING_UV_SHARED or USING_UV_SHARED, not both." +#endif + #ifdef _WIN32 /* Windows - set up dll import/export decorators. */ # if defined(BUILDING_UV_SHARED) @@ -143,6 +147,7 @@ extern "C" { XX(EREMOTEIO, "remote I/O error") \ XX(ENOTTY, "inappropriate ioctl for device") \ XX(EFTYPE, "inappropriate file type or format") \ + XX(EILSEQ, "illegal byte sequence") \ #define UV_HANDLE_TYPE_MAP(XX) \ XX(ASYNC, async) \ @@ -559,6 +564,7 @@ UV_EXTERN int uv_tcp_getsockname(const uv_tcp_t* handle, UV_EXTERN int uv_tcp_getpeername(const uv_tcp_t* handle, struct sockaddr* name, int* namelen); +UV_EXTERN int uv_tcp_close_reset(uv_tcp_t* handle, uv_close_cb close_cb); UV_EXTERN int uv_tcp_connect(uv_connect_t* req, uv_tcp_t* handle, const struct sockaddr* addr, @@ -645,6 +651,11 @@ UV_EXTERN int uv_udp_set_membership(uv_udp_t* handle, const char* multicast_addr, const char* interface_addr, uv_membership membership); +UV_EXTERN int uv_udp_set_source_membership(uv_udp_t* handle, + const char* multicast_addr, + const char* interface_addr, + const char* source_addr, + uv_membership membership); UV_EXTERN int uv_udp_set_multicast_loop(uv_udp_t* handle, int on); UV_EXTERN int uv_udp_set_multicast_ttl(uv_udp_t* handle, int ttl); UV_EXTERN int uv_udp_set_multicast_interface(uv_udp_t* handle, diff --git a/deps/uv/include/uv/errno.h b/deps/uv/include/uv/errno.h index 8eeb95de31b065..165fd11c376a1c 100644 --- a/deps/uv/include/uv/errno.h +++ b/deps/uv/include/uv/errno.h @@ -439,5 +439,10 @@ # define UV__EFTYPE (-4028) #endif +#if defined(EILSEQ) && !defined(_WIN32) +# define UV__EILSEQ UV__ERR(EILSEQ) +#else +# define UV__EILSEQ (-4027) +#endif #endif /* UV_ERRNO_H_ */ diff --git a/deps/uv/include/uv/unix.h b/deps/uv/include/uv/unix.h index 9080352d31dfc0..3a131638f77606 100644 --- a/deps/uv/include/uv/unix.h +++ b/deps/uv/include/uv/unix.h @@ -405,11 +405,25 @@ typedef struct { #else # define UV_FS_O_CREAT 0 #endif -#if defined(O_DIRECT) + +#if defined(__linux__) && defined(__arm__) +# define UV_FS_O_DIRECT 0x10000 +#elif defined(__linux__) && defined(__m68k__) +# define UV_FS_O_DIRECT 0x10000 +#elif defined(__linux__) && defined(__mips__) +# define UV_FS_O_DIRECT 0x08000 +#elif defined(__linux__) && defined(__powerpc__) +# define UV_FS_O_DIRECT 0x20000 +#elif defined(__linux__) && defined(__s390x__) +# define UV_FS_O_DIRECT 0x04000 +#elif defined(__linux__) && defined(__x86_64__) +# define UV_FS_O_DIRECT 0x04000 +#elif defined(O_DIRECT) # define UV_FS_O_DIRECT O_DIRECT #else # define UV_FS_O_DIRECT 0 #endif + #if defined(O_DIRECTORY) # define UV_FS_O_DIRECTORY O_DIRECTORY #else diff --git a/deps/uv/include/uv/version.h b/deps/uv/include/uv/version.h index 37a6a445b8d0da..928647b8200502 100644 --- a/deps/uv/include/uv/version.h +++ b/deps/uv/include/uv/version.h @@ -31,7 +31,7 @@ */ #define UV_VERSION_MAJOR 1 -#define UV_VERSION_MINOR 31 +#define UV_VERSION_MINOR 32 #define UV_VERSION_PATCH 0 #define UV_VERSION_IS_RELEASE 1 #define UV_VERSION_SUFFIX "" diff --git a/deps/uv/src/timer.c b/deps/uv/src/timer.c index dd78bcbad9a986..8fce7f6472f9fd 100644 --- a/deps/uv/src/timer.c +++ b/deps/uv/src/timer.c @@ -74,7 +74,7 @@ int uv_timer_start(uv_timer_t* handle, uint64_t repeat) { uint64_t clamped_timeout; - if (cb == NULL) + if (uv__is_closing(handle) || cb == NULL) return UV_EINVAL; if (uv__is_active(handle)) diff --git a/deps/uv/src/unix/aix-common.c b/deps/uv/src/unix/aix-common.c index 63ac16a03431e6..b9d313c0c5d7cb 100644 --- a/deps/uv/src/unix/aix-common.c +++ b/deps/uv/src/unix/aix-common.c @@ -155,16 +155,6 @@ int uv_exepath(char* buffer, size_t* size) { } } -void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) { - int i; - - for (i = 0; i < count; ++i) { - uv__free(cpu_infos[i].model); - } - - uv__free(cpu_infos); -} - int uv_interface_addresses(uv_interface_address_t** addresses, int* count) { uv_interface_address_t* address; diff --git a/deps/uv/src/unix/atomic-ops.h b/deps/uv/src/unix/atomic-ops.h index 541a6c864882a0..bc37c0d45d159c 100644 --- a/deps/uv/src/unix/atomic-ops.h +++ b/deps/uv/src/unix/atomic-ops.h @@ -36,10 +36,6 @@ UV_UNUSED(static int cmpxchgi(int* ptr, int oldval, int newval)) { : "r" (newval), "0" (oldval) : "memory"); return out; -#elif defined(_AIX) && defined(__xlC__) - const int out = (*(volatile int*) ptr); - __compare_and_swap(ptr, &oldval, newval); - return out; #elif defined(__MVS__) unsigned int op4; if (__plo_CSST(ptr, (unsigned int*) &oldval, newval, diff --git a/deps/uv/src/unix/core.c b/deps/uv/src/unix/core.c index f4b94e30cc0049..366c43c2ab0843 100644 --- a/deps/uv/src/unix/core.c +++ b/deps/uv/src/unix/core.c @@ -30,7 +30,7 @@ #include #include #include -#include +#include /* O_CLOEXEC */ #include #include #include @@ -49,17 +49,19 @@ # include #endif -#ifdef __APPLE__ +#if defined(__APPLE__) +# include +# endif /* defined(__APPLE__) */ + + +#if defined(__APPLE__) && !TARGET_OS_IPHONE # include # include /* _NSGetExecutablePath */ -# include -# if defined(O_CLOEXEC) -# define UV__O_CLOEXEC O_CLOEXEC -# endif # define environ (*_NSGetEnviron()) -#else +#else /* defined(__APPLE__) && !TARGET_OS_IPHONE */ extern char** environ; -#endif +#endif /* !(defined(__APPLE__) && !TARGET_OS_IPHONE) */ + #if defined(__DragonFly__) || \ defined(__FreeBSD__) || \ @@ -68,7 +70,6 @@ extern char** environ; # include # include # include -# define UV__O_CLOEXEC O_CLOEXEC # if defined(__FreeBSD__) && __FreeBSD__ >= 10 # define uv__accept4 accept4 # endif @@ -1000,24 +1001,17 @@ int uv_getrusage(uv_rusage_t* rusage) { int uv__open_cloexec(const char* path, int flags) { - int err; +#if defined(O_CLOEXEC) int fd; -#if defined(UV__O_CLOEXEC) - static int no_cloexec; - - if (!no_cloexec) { - fd = open(path, flags | UV__O_CLOEXEC); - if (fd != -1) - return fd; - - if (errno != EINVAL) - return UV__ERR(errno); + fd = open(path, flags | O_CLOEXEC); + if (fd == -1) + return UV__ERR(errno); - /* O_CLOEXEC not supported. */ - no_cloexec = 1; - } -#endif + return fd; +#else /* O_CLOEXEC */ + int err; + int fd; fd = open(path, flags); if (fd == -1) @@ -1030,6 +1024,7 @@ int uv__open_cloexec(const char* path, int flags) { } return fd; +#endif /* O_CLOEXEC */ } @@ -1051,7 +1046,7 @@ int uv__dup2_cloexec(int oldfd, int newfd) { static int no_dup3; if (!no_dup3) { do - r = uv__dup3(oldfd, newfd, UV__O_CLOEXEC); + r = uv__dup3(oldfd, newfd, O_CLOEXEC); while (r == -1 && errno == EBUSY); if (r != -1) return r; diff --git a/deps/uv/src/unix/cygwin.c b/deps/uv/src/unix/cygwin.c index 6b5cfb7ba5b817..169958d55f2ed0 100644 --- a/deps/uv/src/unix/cygwin.c +++ b/deps/uv/src/unix/cygwin.c @@ -48,11 +48,6 @@ int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) { return UV_ENOSYS; } -void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) { - (void)cpu_infos; - (void)count; -} - uint64_t uv_get_constrained_memory(void) { return 0; /* Memory constraints are unknown. */ } diff --git a/deps/uv/src/unix/darwin.c b/deps/uv/src/unix/darwin.c index e4cd8ff7e0cf99..5cf03aea0b4054 100644 --- a/deps/uv/src/unix/darwin.c +++ b/deps/uv/src/unix/darwin.c @@ -223,14 +223,3 @@ int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) { return 0; } - - -void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) { - int i; - - for (i = 0; i < count; i++) { - uv__free(cpu_infos[i].model); - } - - uv__free(cpu_infos); -} diff --git a/deps/uv/src/unix/freebsd.c b/deps/uv/src/unix/freebsd.c index 7de88d6a52faf6..d0b7d8e9d11f95 100644 --- a/deps/uv/src/unix/freebsd.c +++ b/deps/uv/src/unix/freebsd.c @@ -288,14 +288,3 @@ int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) { uv__free(cp_times); return 0; } - - -void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) { - int i; - - for (i = 0; i < count; i++) { - uv__free(cpu_infos[i].model); - } - - uv__free(cpu_infos); -} diff --git a/deps/uv/src/unix/fs.c b/deps/uv/src/unix/fs.c index fc80d00d5c563e..fd3dd4c287e9ba 100644 --- a/deps/uv/src/unix/fs.c +++ b/deps/uv/src/unix/fs.c @@ -255,20 +255,10 @@ static ssize_t uv__fs_mkdtemp(uv_fs_t* req) { static ssize_t uv__fs_open(uv_fs_t* req) { - static int no_cloexec_support; - int r; - - /* Try O_CLOEXEC before entering locks */ - if (no_cloexec_support == 0) { #ifdef O_CLOEXEC - r = open(req->path, req->flags | O_CLOEXEC, req->mode); - if (r >= 0) - return r; - if (errno != EINVAL) - return r; - no_cloexec_support = 1; -#endif /* O_CLOEXEC */ - } + return open(req->path, req->flags | O_CLOEXEC, req->mode); +#else /* O_CLOEXEC */ + int r; if (req->cb != NULL) uv_rwlock_rdlock(&req->loop->cloexec_lock); @@ -289,6 +279,7 @@ static ssize_t uv__fs_open(uv_fs_t* req) { uv_rwlock_rdunlock(&req->loop->cloexec_lock); return r; +#endif /* O_CLOEXEC */ } diff --git a/deps/uv/src/unix/haiku.c b/deps/uv/src/unix/haiku.c index 7708851c2a5fa9..cf17d836b4c7e8 100644 --- a/deps/uv/src/unix/haiku.c +++ b/deps/uv/src/unix/haiku.c @@ -165,12 +165,3 @@ int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) { return 0; } - -void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) { - int i; - - for (i = 0; i < count; i++) - uv__free(cpu_infos[i].model); - - uv__free(cpu_infos); -} diff --git a/deps/uv/src/unix/linux-core.c b/deps/uv/src/unix/linux-core.c index b539beb86ae576..433e201fe19dbf 100644 --- a/deps/uv/src/unix/linux-core.c +++ b/deps/uv/src/unix/linux-core.c @@ -812,16 +812,6 @@ static uint64_t read_cpufreq(unsigned int cpunum) { } -void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) { - int i; - - for (i = 0; i < count; i++) { - uv__free(cpu_infos[i].model); - } - - uv__free(cpu_infos); -} - static int uv__ifaddr_exclude(struct ifaddrs *ent, int exclude_type) { if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING))) return 1; diff --git a/deps/uv/src/unix/netbsd.c b/deps/uv/src/unix/netbsd.c index c649bb375f32a1..cfe2c6a49dc3c7 100644 --- a/deps/uv/src/unix/netbsd.c +++ b/deps/uv/src/unix/netbsd.c @@ -234,14 +234,3 @@ int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) { uv__free(cp_times); return 0; } - - -void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) { - int i; - - for (i = 0; i < count; i++) { - uv__free(cpu_infos[i].model); - } - - uv__free(cpu_infos); -} diff --git a/deps/uv/src/unix/openbsd.c b/deps/uv/src/unix/openbsd.c index b5cdc80c3e92fe..1f5228dc13fd7a 100644 --- a/deps/uv/src/unix/openbsd.c +++ b/deps/uv/src/unix/openbsd.c @@ -202,14 +202,13 @@ int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) { if (!(*cpu_infos)) return UV_ENOMEM; + i = 0; *count = numcpus; which[1] = HW_CPUSPEED; size = sizeof(cpuspeed); - if (sysctl(which, 2, &cpuspeed, &size, NULL, 0)) { - uv__free(*cpu_infos); - return UV__ERR(errno); - } + if (sysctl(which, 2, &cpuspeed, &size, NULL, 0)) + goto error; size = sizeof(info); which[0] = CTL_KERN; @@ -217,10 +216,8 @@ int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) { for (i = 0; i < numcpus; i++) { which[2] = i; size = sizeof(info); - if (sysctl(which, 3, &info, &size, NULL, 0)) { - uv__free(*cpu_infos); - return UV__ERR(errno); - } + if (sysctl(which, 3, &info, &size, NULL, 0)) + goto error; cpu_info = &(*cpu_infos)[i]; @@ -235,15 +232,13 @@ int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) { } return 0; -} +error: + *count = 0; + for (j = 0; j < i; j++) + uv__free((*cpu_infos)[j].model); -void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) { - int i; - - for (i = 0; i < count; i++) { - uv__free(cpu_infos[i].model); - } - - uv__free(cpu_infos); + uv__free(*cpu_infos); + *cpu_infos = NULL; + return UV__ERR(errno); } diff --git a/deps/uv/src/unix/os390.c b/deps/uv/src/unix/os390.c index 273ded7ca5e81c..a7305006c1756d 100644 --- a/deps/uv/src/unix/os390.c +++ b/deps/uv/src/unix/os390.c @@ -433,13 +433,6 @@ int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) { } -void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) { - for (int i = 0; i < count; ++i) - uv__free(cpu_infos[i].model); - uv__free(cpu_infos); -} - - static int uv__interface_addresses_v6(uv_interface_address_t** addresses, int* count) { uv_interface_address_t* address; diff --git a/deps/uv/src/unix/signal.c b/deps/uv/src/unix/signal.c index 5e89ded2d842ce..3a257f04b57807 100644 --- a/deps/uv/src/unix/signal.c +++ b/deps/uv/src/unix/signal.c @@ -477,9 +477,11 @@ static void uv__signal_event(uv_loop_t* loop, * yet dispatched, the uv__finish_close was deferred. Make close pending * now if this has happened. */ - if ((handle->flags & UV_HANDLE_CLOSING) && - (handle->caught_signals == handle->dispatched_signals)) { - uv__make_close_pending((uv_handle_t*) handle); + if (handle->caught_signals == handle->dispatched_signals) { + if (handle->signum == 0) + uv__handle_stop(handle); + if (handle->flags & UV_HANDLE_CLOSING) + uv__make_close_pending((uv_handle_t*) handle); } } @@ -569,5 +571,6 @@ static void uv__signal_stop(uv_signal_t* handle) { uv__signal_unlock_and_unblock(&saved_sigmask); handle->signum = 0; - uv__handle_stop(handle); + if (handle->caught_signals == handle->dispatched_signals) + uv__handle_stop(handle); } diff --git a/deps/uv/src/unix/stream.c b/deps/uv/src/unix/stream.c index 9de01e3c78403e..78ce8e84870e8e 100644 --- a/deps/uv/src/unix/stream.c +++ b/deps/uv/src/unix/stream.c @@ -1180,6 +1180,10 @@ static void uv__read(uv_stream_t* stream) { } else if (errno == ECONNRESET && stream->type == UV_NAMED_PIPE) { uv__stream_eof(stream, &buf); return; +#elif defined(_AIX) + } else if (errno == ECONNRESET && (stream->flags & UV_DISCONNECT)) { + uv__stream_eof(stream, &buf); + return; #endif } else { /* Error. User should call uv_close(). */ @@ -1403,7 +1407,7 @@ int uv_write2(uv_write_t* req, return UV_EBADF; if (!(stream->flags & UV_HANDLE_WRITABLE)) - return -EPIPE; + return UV_EPIPE; if (send_handle) { if (stream->type != UV_NAMED_PIPE || !((uv_pipe_t*)stream)->ipc) @@ -1557,7 +1561,7 @@ int uv_read_start(uv_stream_t* stream, return UV_EINVAL; if (!(stream->flags & UV_HANDLE_READABLE)) - return -ENOTCONN; + return UV_ENOTCONN; /* The UV_HANDLE_READING flag is irrelevant of the state of the tcp - it just * expresses the desired state of the user. diff --git a/deps/uv/src/unix/sunos.c b/deps/uv/src/unix/sunos.c index f323d1defdef98..180cc84651db37 100644 --- a/deps/uv/src/unix/sunos.c +++ b/deps/uv/src/unix/sunos.c @@ -696,16 +696,6 @@ int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) { } -void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) { - int i; - - for (i = 0; i < count; i++) { - uv__free(cpu_infos[i].model); - } - - uv__free(cpu_infos); -} - #ifdef SUNOS_NO_IFADDRS int uv_interface_addresses(uv_interface_address_t** addresses, int* count) { *count = 0; diff --git a/deps/uv/src/unix/tcp.c b/deps/uv/src/unix/tcp.c index 8cedcd6027be52..fa660f1381315e 100644 --- a/deps/uv/src/unix/tcp.c +++ b/deps/uv/src/unix/tcp.c @@ -308,6 +308,23 @@ int uv_tcp_getpeername(const uv_tcp_t* handle, } +int uv_tcp_close_reset(uv_tcp_t* handle, uv_close_cb close_cb) { + int fd; + struct linger l = { 1, 0 }; + + /* Disallow setting SO_LINGER to zero due to some platform inconsistencies */ + if (handle->flags & UV_HANDLE_SHUTTING) + return UV_EINVAL; + + fd = uv__stream_fd(handle); + if (0 != setsockopt(fd, SOL_SOCKET, SO_LINGER, &l, sizeof(l))) + return UV__ERR(errno); + + uv_close((uv_handle_t*) handle, close_cb); + return 0; +} + + int uv_tcp_listen(uv_tcp_t* tcp, int backlog, uv_connection_cb cb) { static int single_accept = -1; unsigned long flags; diff --git a/deps/uv/src/unix/udp.c b/deps/uv/src/unix/udp.c index b578e7bc1037ef..dba8eff8382edd 100644 --- a/deps/uv/src/unix/udp.c +++ b/deps/uv/src/unix/udp.c @@ -659,6 +659,98 @@ static int uv__udp_set_membership6(uv_udp_t* handle, } +static int uv__udp_set_source_membership4(uv_udp_t* handle, + const struct sockaddr_in* multicast_addr, + const char* interface_addr, + const struct sockaddr_in* source_addr, + uv_membership membership) { + struct ip_mreq_source mreq; + int optname; + int err; + + err = uv__udp_maybe_deferred_bind(handle, AF_INET, UV_UDP_REUSEADDR); + if (err) + return err; + + memset(&mreq, 0, sizeof(mreq)); + + if (interface_addr != NULL) { + err = uv_inet_pton(AF_INET, interface_addr, &mreq.imr_interface.s_addr); + if (err) + return err; + } else { + mreq.imr_interface.s_addr = htonl(INADDR_ANY); + } + + mreq.imr_multiaddr.s_addr = multicast_addr->sin_addr.s_addr; + mreq.imr_sourceaddr.s_addr = source_addr->sin_addr.s_addr; + + if (membership == UV_JOIN_GROUP) + optname = IP_ADD_SOURCE_MEMBERSHIP; + else if (membership == UV_LEAVE_GROUP) + optname = IP_DROP_SOURCE_MEMBERSHIP; + else + return UV_EINVAL; + + if (setsockopt(handle->io_watcher.fd, + IPPROTO_IP, + optname, + &mreq, + sizeof(mreq))) { + return UV__ERR(errno); + } + + return 0; +} + + +static int uv__udp_set_source_membership6(uv_udp_t* handle, + const struct sockaddr_in6* multicast_addr, + const char* interface_addr, + const struct sockaddr_in6* source_addr, + uv_membership membership) { + struct group_source_req mreq; + struct sockaddr_in6 addr6; + int optname; + int err; + + err = uv__udp_maybe_deferred_bind(handle, AF_INET6, UV_UDP_REUSEADDR); + if (err) + return err; + + memset(&mreq, 0, sizeof(mreq)); + + if (interface_addr != NULL) { + err = uv_ip6_addr(interface_addr, 0, &addr6); + if (err) + return err; + mreq.gsr_interface = addr6.sin6_scope_id; + } else { + mreq.gsr_interface = 0; + } + + memcpy(&mreq.gsr_group, multicast_addr, sizeof(mreq.gsr_group)); + memcpy(&mreq.gsr_source, source_addr, sizeof(mreq.gsr_source)); + + if (membership == UV_JOIN_GROUP) + optname = MCAST_JOIN_SOURCE_GROUP; + else if (membership == UV_LEAVE_GROUP) + optname = MCAST_LEAVE_SOURCE_GROUP; + else + return UV_EINVAL; + + if (setsockopt(handle->io_watcher.fd, + IPPROTO_IPV6, + optname, + &mreq, + sizeof(mreq))) { + return UV__ERR(errno); + } + + return 0; +} + + int uv_udp_init_ex(uv_loop_t* loop, uv_udp_t* handle, unsigned int flags) { int domain; int err; @@ -748,6 +840,51 @@ int uv_udp_set_membership(uv_udp_t* handle, } } + +int uv_udp_set_source_membership(uv_udp_t* handle, + const char* multicast_addr, + const char* interface_addr, + const char* source_addr, + uv_membership membership) { + int err; + struct sockaddr_storage mcast_addr; + struct sockaddr_in* mcast_addr4; + struct sockaddr_in6* mcast_addr6; + struct sockaddr_storage src_addr; + struct sockaddr_in* src_addr4; + struct sockaddr_in6* src_addr6; + + mcast_addr4 = (struct sockaddr_in*)&mcast_addr; + mcast_addr6 = (struct sockaddr_in6*)&mcast_addr; + src_addr4 = (struct sockaddr_in*)&src_addr; + src_addr6 = (struct sockaddr_in6*)&src_addr; + + err = uv_ip4_addr(multicast_addr, 0, mcast_addr4); + if (err) { + err = uv_ip6_addr(multicast_addr, 0, mcast_addr6); + if (err) + return err; + err = uv_ip6_addr(source_addr, 0, src_addr6); + if (err) + return err; + return uv__udp_set_source_membership6(handle, + mcast_addr6, + interface_addr, + src_addr6, + membership); + } + + err = uv_ip4_addr(source_addr, 0, src_addr4); + if (err) + return err; + return uv__udp_set_source_membership4(handle, + mcast_addr4, + interface_addr, + src_addr4, + membership); +} + + static int uv__setsockopt(uv_udp_t* handle, int option4, int option6, diff --git a/deps/uv/src/uv-common.c b/deps/uv/src/uv-common.c index d1a5e2fbe6b77e..70db53ab04dacf 100644 --- a/deps/uv/src/uv-common.c +++ b/deps/uv/src/uv-common.c @@ -797,3 +797,13 @@ void uv_os_free_environ(uv_env_item_t* envitems, int count) { uv__free(envitems); } + + +void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) { + int i; + + for (i = 0; i < count; i++) + uv__free(cpu_infos[i].model); + + uv__free(cpu_infos); +} diff --git a/deps/uv/src/win/error.c b/deps/uv/src/win/error.c index 24924ba81ef3b2..32ac5e596fea82 100644 --- a/deps/uv/src/win/error.c +++ b/deps/uv/src/win/error.c @@ -132,6 +132,7 @@ int uv_translate_sys_error(int sys_errno) { case WSAENOBUFS: return UV_ENOBUFS; case ERROR_BAD_PATHNAME: return UV_ENOENT; case ERROR_DIRECTORY: return UV_ENOENT; + case ERROR_ENVVAR_NOT_FOUND: return UV_ENOENT; case ERROR_FILE_NOT_FOUND: return UV_ENOENT; case ERROR_INVALID_NAME: return UV_ENOENT; case ERROR_INVALID_DRIVE: return UV_ENOENT; diff --git a/deps/uv/src/win/stream.c b/deps/uv/src/win/stream.c index 7656627e902da0..46a0709a38e3bd 100644 --- a/deps/uv/src/win/stream.c +++ b/deps/uv/src/win/stream.c @@ -198,8 +198,10 @@ int uv_try_write(uv_stream_t* stream, int uv_shutdown(uv_shutdown_t* req, uv_stream_t* handle, uv_shutdown_cb cb) { uv_loop_t* loop = handle->loop; - if (!(handle->flags & UV_HANDLE_WRITABLE)) { - return UV_EPIPE; + if (!(handle->flags & UV_HANDLE_WRITABLE) || + handle->flags & UV_HANDLE_SHUTTING || + uv__is_closing(handle)) { + return UV_ENOTCONN; } UV_REQ_INIT(req, UV_SHUTDOWN); @@ -207,6 +209,7 @@ int uv_shutdown(uv_shutdown_t* req, uv_stream_t* handle, uv_shutdown_cb cb) { req->cb = cb; handle->flags &= ~UV_HANDLE_WRITABLE; + handle->flags |= UV_HANDLE_SHUTTING; handle->stream.conn.shutdown_req = req; handle->reqs_pending++; REGISTER_HANDLE_REQ(loop, handle, req); diff --git a/deps/uv/src/win/tcp.c b/deps/uv/src/win/tcp.c index f2cb5271b8d77d..81e48136a3b9ef 100644 --- a/deps/uv/src/win/tcp.c +++ b/deps/uv/src/win/tcp.c @@ -549,6 +549,21 @@ static void uv_tcp_queue_read(uv_loop_t* loop, uv_tcp_t* handle) { } +int uv_tcp_close_reset(uv_tcp_t* handle, uv_close_cb close_cb) { + struct linger l = { 1, 0 }; + + /* Disallow setting SO_LINGER to zero due to some platform inconsistencies */ + if (handle->flags & UV_HANDLE_SHUTTING) + return UV_EINVAL; + + if (0 != setsockopt(handle->socket, SOL_SOCKET, SO_LINGER, &l, sizeof(l))) + return uv_translate_sys_error(WSAGetLastError()); + + uv_close((uv_handle_t*) handle, close_cb); + return 0; +} + + int uv_tcp_listen(uv_tcp_t* handle, int backlog, uv_connection_cb cb) { unsigned int i, simultaneous_accepts; uv_tcp_accept_t* req; diff --git a/deps/uv/src/win/tty.c b/deps/uv/src/win/tty.c index 07436dc804f0f1..8f84bcd0e45544 100644 --- a/deps/uv/src/win/tty.c +++ b/deps/uv/src/win/tty.c @@ -120,6 +120,8 @@ static int uv_tty_virtual_width = -1; static HANDLE uv__tty_console_handle = INVALID_HANDLE_VALUE; static int uv__tty_console_height = -1; static int uv__tty_console_width = -1; +static HANDLE uv__tty_console_resized = INVALID_HANDLE_VALUE; +static uv_mutex_t uv__tty_console_resize_mutex; static DWORD WINAPI uv__tty_console_resize_message_loop_thread(void* param); static void CALLBACK uv__tty_console_resize_event(HWINEVENTHOOK hWinEventHook, @@ -129,6 +131,8 @@ static void CALLBACK uv__tty_console_resize_event(HWINEVENTHOOK hWinEventHook, LONG idChild, DWORD dwEventThread, DWORD dwmsEventTime); +static DWORD WINAPI uv__tty_console_resize_watcher_thread(void* param); +static void uv__tty_console_signal_resize(void); /* We use a semaphore rather than a mutex or critical section because in some cases (uv__cancel_read_console) we need take the lock in the main thread and @@ -168,6 +172,7 @@ void uv_console_init(void) { QueueUserWorkItem(uv__tty_console_resize_message_loop_thread, NULL, WT_EXECUTELONGFUNCTION); + uv_mutex_init(&uv__tty_console_resize_mutex); } } @@ -728,6 +733,12 @@ void uv_process_tty_read_raw_req(uv_loop_t* loop, uv_tty_t* handle, } records_left--; + /* We might be not subscribed to EVENT_CONSOLE_LAYOUT or we might be + * running under some TTY emulator that does not send those events. */ + if (handle->tty.rd.last_input_record.EventType == WINDOW_BUFFER_SIZE_EVENT) { + uv__tty_console_signal_resize(); + } + /* Ignore other events that are not key events. */ if (handle->tty.rd.last_input_record.EventType != KEY_EVENT) { continue; @@ -2299,15 +2310,24 @@ static DWORD WINAPI uv__tty_console_resize_message_loop_thread(void* param) { sizeof(conhost_pid), NULL); - if (!NT_SUCCESS(status)) + if (!NT_SUCCESS(status)) { /* We couldn't retrieve our console host process, probably because this * is a 32-bit process running on 64-bit Windows. Fall back to receiving - * console events from all processes. */ - conhost_pid = 0; + * console events from the input stream only. */ + return 0; + } /* Ensure the PID is a multiple of 4, which is required by SetWinEventHook */ conhost_pid &= ~(ULONG_PTR)0x3; + uv__tty_console_resized = CreateEvent(NULL, TRUE, FALSE, NULL); + if (uv__tty_console_resized == NULL) + return 0; + if (QueueUserWorkItem(uv__tty_console_resize_watcher_thread, + NULL, + WT_EXECUTELONGFUNCTION) == 0) + return 0; + if (!pSetWinEventHook(EVENT_CONSOLE_LAYOUT, EVENT_CONSOLE_LAYOUT, NULL, @@ -2331,6 +2351,20 @@ static void CALLBACK uv__tty_console_resize_event(HWINEVENTHOOK hWinEventHook, LONG idChild, DWORD dwEventThread, DWORD dwmsEventTime) { + SetEvent(uv__tty_console_resized); +} + +static DWORD WINAPI uv__tty_console_resize_watcher_thread(void* param) { + for (;;) { + /* Make sure to not overwhelm the system with resize events */ + Sleep(33); + WaitForSingleObject(uv__tty_console_resized, INFINITE); + uv__tty_console_signal_resize(); + ResetEvent(uv__tty_console_resized); + } +} + +static void uv__tty_console_signal_resize(void) { CONSOLE_SCREEN_BUFFER_INFO sb_info; int width, height; @@ -2340,9 +2374,13 @@ static void CALLBACK uv__tty_console_resize_event(HWINEVENTHOOK hWinEventHook, width = sb_info.dwSize.X; height = sb_info.srWindow.Bottom - sb_info.srWindow.Top + 1; + uv_mutex_lock(&uv__tty_console_resize_mutex); if (width != uv__tty_console_width || height != uv__tty_console_height) { uv__tty_console_width = width; uv__tty_console_height = height; + uv_mutex_unlock(&uv__tty_console_resize_mutex); uv__signal_dispatch(SIGWINCH); + } else { + uv_mutex_unlock(&uv__tty_console_resize_mutex); } } diff --git a/deps/uv/src/win/udp.c b/deps/uv/src/win/udp.c index 8aeeab3b4628c3..39fc34d3bfcd94 100644 --- a/deps/uv/src/win/udp.c +++ b/deps/uv/src/win/udp.c @@ -702,6 +702,112 @@ int uv__udp_set_membership6(uv_udp_t* handle, } +static int uv__udp_set_source_membership4(uv_udp_t* handle, + const struct sockaddr_in* multicast_addr, + const char* interface_addr, + const struct sockaddr_in* source_addr, + uv_membership membership) { + struct ip_mreq_source mreq; + int optname; + int err; + + if (handle->flags & UV_HANDLE_IPV6) + return UV_EINVAL; + + /* If the socket is unbound, bind to inaddr_any. */ + err = uv_udp_maybe_bind(handle, + (const struct sockaddr*) &uv_addr_ip4_any_, + sizeof(uv_addr_ip4_any_), + UV_UDP_REUSEADDR); + if (err) + return uv_translate_sys_error(err); + + memset(&mreq, 0, sizeof(mreq)); + + if (interface_addr != NULL) { + err = uv_inet_pton(AF_INET, interface_addr, &mreq.imr_interface.s_addr); + if (err) + return err; + } else { + mreq.imr_interface.s_addr = htonl(INADDR_ANY); + } + + mreq.imr_multiaddr.s_addr = multicast_addr->sin_addr.s_addr; + mreq.imr_sourceaddr.s_addr = source_addr->sin_addr.s_addr; + + if (membership == UV_JOIN_GROUP) + optname = IP_ADD_SOURCE_MEMBERSHIP; + else if (membership == UV_LEAVE_GROUP) + optname = IP_DROP_SOURCE_MEMBERSHIP; + else + return UV_EINVAL; + + if (setsockopt(handle->socket, + IPPROTO_IP, + optname, + (char*) &mreq, + sizeof(mreq)) == SOCKET_ERROR) { + return uv_translate_sys_error(WSAGetLastError()); + } + + return 0; +} + + +int uv__udp_set_source_membership6(uv_udp_t* handle, + const struct sockaddr_in6* multicast_addr, + const char* interface_addr, + const struct sockaddr_in6* source_addr, + uv_membership membership) { + struct group_source_req mreq; + struct sockaddr_in6 addr6; + int optname; + int err; + + if ((handle->flags & UV_HANDLE_BOUND) && !(handle->flags & UV_HANDLE_IPV6)) + return UV_EINVAL; + + err = uv_udp_maybe_bind(handle, + (const struct sockaddr*) &uv_addr_ip6_any_, + sizeof(uv_addr_ip6_any_), + UV_UDP_REUSEADDR); + + if (err) + return uv_translate_sys_error(err); + + memset(&mreq, 0, sizeof(mreq)); + + if (interface_addr != NULL) { + err = uv_ip6_addr(interface_addr, 0, &addr6); + if (err) + return err; + mreq.gsr_interface = addr6.sin6_scope_id; + } else { + mreq.gsr_interface = 0; + } + + memcpy(&mreq.gsr_group, multicast_addr, sizeof(mreq.gsr_group)); + memcpy(&mreq.gsr_source, source_addr, sizeof(mreq.gsr_source)); + + if (membership == UV_JOIN_GROUP) + optname = MCAST_JOIN_SOURCE_GROUP; + else if (membership == UV_LEAVE_GROUP) + optname = MCAST_LEAVE_SOURCE_GROUP; + else + return UV_EINVAL; + + if (setsockopt(handle->socket, + IPPROTO_IPV6, + optname, + (char*) &mreq, + sizeof(mreq)) == SOCKET_ERROR) { + return uv_translate_sys_error(WSAGetLastError()); + } + + return 0; +} + + int uv_udp_set_membership(uv_udp_t* handle, const char* multicast_addr, const char* interface_addr, @@ -718,6 +824,50 @@ int uv_udp_set_membership(uv_udp_t* handle, } +int uv_udp_set_source_membership(uv_udp_t* handle, + const char* multicast_addr, + const char* interface_addr, + const char* source_addr, + uv_membership membership) { + int err; + struct sockaddr_storage mcast_addr; + struct sockaddr_in* mcast_addr4; + struct sockaddr_in6* mcast_addr6; + struct sockaddr_storage src_addr; + struct sockaddr_in* src_addr4; + struct sockaddr_in6* src_addr6; + + mcast_addr4 = (struct sockaddr_in*)&mcast_addr; + mcast_addr6 = (struct sockaddr_in6*)&mcast_addr; + src_addr4 = (struct sockaddr_in*)&src_addr; + src_addr6 = (struct sockaddr_in6*)&src_addr; + + err = uv_ip4_addr(multicast_addr, 0, mcast_addr4); + if (err) { + err = uv_ip6_addr(multicast_addr, 0, mcast_addr6); + if (err) + return err; + err = uv_ip6_addr(source_addr, 0, src_addr6); + if (err) + return err; + return uv__udp_set_source_membership6(handle, + mcast_addr6, + interface_addr, + src_addr6, + membership); + } + + err = uv_ip4_addr(source_addr, 0, src_addr4); + if (err) + return err; + return uv__udp_set_source_membership4(handle, + mcast_addr4, + interface_addr, + src_addr4, + membership); +} + + int uv_udp_set_multicast_interface(uv_udp_t* handle, const char* interface_addr) { struct sockaddr_storage addr_st; struct sockaddr_in* addr4; diff --git a/deps/uv/src/win/util.c b/deps/uv/src/win/util.c index 359a16aed4bb27..8849d041bf0283 100644 --- a/deps/uv/src/win/util.c +++ b/deps/uv/src/win/util.c @@ -721,17 +721,6 @@ int uv_cpu_info(uv_cpu_info_t** cpu_infos_ptr, int* cpu_count_ptr) { } -void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) { - int i; - - for (i = 0; i < count; i++) { - uv__free(cpu_infos[i].model); - } - - uv__free(cpu_infos); -} - - static int is_windows_version_or_greater(DWORD os_major, DWORD os_minor, WORD service_pack_major, @@ -1325,7 +1314,7 @@ int uv__convert_utf8_to_utf16(const char* utf8, int utf8len, WCHAR** utf16) { return uv_translate_sys_error(GetLastError()); } - (*utf16)[bufsize] = '\0'; + (*utf16)[bufsize] = L'\0'; return 0; } @@ -1481,17 +1470,15 @@ int uv_os_getenv(const char* name, char* buffer, size_t* size) { if (r != 0) return r; + SetLastError(ERROR_SUCCESS); len = GetEnvironmentVariableW(name_w, var, MAX_ENV_VAR_LENGTH); uv__free(name_w); assert(len < MAX_ENV_VAR_LENGTH); /* len does not include the null */ if (len == 0) { r = GetLastError(); - - if (r == ERROR_ENVVAR_NOT_FOUND) - return UV_ENOENT; - - return uv_translate_sys_error(r); + if (r != ERROR_SUCCESS) + return uv_translate_sys_error(r); } /* Check how much space we need */ diff --git a/deps/uv/src/win/winapi.h b/deps/uv/src/win/winapi.h index 203393c2e3af03..322a212dd73c19 100644 --- a/deps/uv/src/win/winapi.h +++ b/deps/uv/src/win/winapi.h @@ -4109,7 +4109,7 @@ #endif /* from winternl.h */ -#if !defined(__UNICODE_STRING_DEFINED) && defined(__MINGW32_) +#if !defined(__UNICODE_STRING_DEFINED) && defined(__MINGW32__) #define __UNICODE_STRING_DEFINED #endif typedef struct _UNICODE_STRING { diff --git a/deps/uv/test/test-env-vars.c b/deps/uv/test/test-env-vars.c index d7abb4249561a4..3814699356db55 100644 --- a/deps/uv/test/test-env-vars.c +++ b/deps/uv/test/test-env-vars.c @@ -88,6 +88,15 @@ TEST_IMPL(env_vars) { r = uv_os_unsetenv(name); ASSERT(r == 0); + /* Setting an environment variable to the empty string does not delete it. */ + r = uv_os_setenv(name, ""); + ASSERT(r == 0); + size = BUF_SIZE; + r = uv_os_getenv(name, buf, &size); + ASSERT(r == 0); + ASSERT(size == 0); + ASSERT(strlen(buf) == 0); + /* Check getting all env variables. */ r = uv_os_setenv(name, "123456789"); ASSERT(r == 0); diff --git a/deps/uv/test/test-fs.c b/deps/uv/test/test-fs.c index 0d92b0d3a0d319..9326c6bc2753f0 100644 --- a/deps/uv/test/test-fs.c +++ b/deps/uv/test/test-fs.c @@ -354,7 +354,7 @@ static void statfs_cb(uv_fs_t* req) { ASSERT(stats->f_files == 0); ASSERT(stats->f_ffree == 0); #else - ASSERT(stats->f_files > 0); + /* There is no assertion for stats->f_files that makes sense, so ignore it. */ ASSERT(stats->f_ffree <= stats->f_files); #endif uv_fs_req_cleanup(req); diff --git a/deps/uv/test/test-list.h b/deps/uv/test/test-list.h index 6eb8ecadc7870f..b6066f27276dc6 100644 --- a/deps/uv/test/test-list.h +++ b/deps/uv/test/test-list.h @@ -112,6 +112,10 @@ TEST_DECLARE (tcp_connect_error_fault) TEST_DECLARE (tcp_connect_timeout) TEST_DECLARE (tcp_close_while_connecting) TEST_DECLARE (tcp_close) +TEST_DECLARE (tcp_close_reset_accepted) +TEST_DECLARE (tcp_close_reset_accepted_after_shutdown) +TEST_DECLARE (tcp_close_reset_client) +TEST_DECLARE (tcp_close_reset_client_after_shutdown) TEST_DECLARE (tcp_create_early) TEST_DECLARE (tcp_create_early_bad_bind) TEST_DECLARE (tcp_create_early_bad_domain) @@ -193,6 +197,7 @@ TEST_DECLARE (timer_huge_timeout) TEST_DECLARE (timer_huge_repeat) TEST_DECLARE (timer_run_once) TEST_DECLARE (timer_from_check) +TEST_DECLARE (timer_is_closing) TEST_DECLARE (timer_null_callback) TEST_DECLARE (timer_early_check) TEST_DECLARE (idle_starvation) @@ -445,6 +450,7 @@ TEST_DECLARE (we_get_signals) TEST_DECLARE (we_get_signal_one_shot) TEST_DECLARE (we_get_signals_mixed) TEST_DECLARE (signal_multiple_loops) +TEST_DECLARE (signal_pending_on_close) TEST_DECLARE (closed_fd_events) #endif #ifdef __APPLE__ @@ -623,6 +629,10 @@ TASK_LIST_START TEST_ENTRY (tcp_connect_timeout) TEST_ENTRY (tcp_close_while_connecting) TEST_ENTRY (tcp_close) + TEST_ENTRY (tcp_close_reset_accepted) + TEST_ENTRY (tcp_close_reset_accepted_after_shutdown) + TEST_ENTRY (tcp_close_reset_client) + TEST_ENTRY (tcp_close_reset_client_after_shutdown) TEST_ENTRY (tcp_create_early) TEST_ENTRY (tcp_create_early_bad_bind) TEST_ENTRY (tcp_create_early_bad_domain) @@ -720,6 +730,7 @@ TASK_LIST_START TEST_ENTRY (timer_huge_repeat) TEST_ENTRY (timer_run_once) TEST_ENTRY (timer_from_check) + TEST_ENTRY (timer_is_closing) TEST_ENTRY (timer_null_callback) TEST_ENTRY (timer_early_check) @@ -886,6 +897,7 @@ TASK_LIST_START TEST_ENTRY (we_get_signal_one_shot) TEST_ENTRY (we_get_signals_mixed) TEST_ENTRY (signal_multiple_loops) + TEST_ENTRY (signal_pending_on_close) TEST_ENTRY (closed_fd_events) #endif diff --git a/deps/uv/test/test-signal-pending-on-close.c b/deps/uv/test/test-signal-pending-on-close.c new file mode 100644 index 00000000000000..bf8d2793d51f01 --- /dev/null +++ b/deps/uv/test/test-signal-pending-on-close.c @@ -0,0 +1,94 @@ +/* Copyright libuv project contributors. All rights reserved. +* +* Permission is hereby granted, free of charge, to any person obtaining a copy +* of this software and associated documentation files (the "Software"), to +* deal in the Software without restriction, including without limitation the +* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +* sell copies of the Software, and to permit persons to whom the Software is +* furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +* IN THE SOFTWARE. +*/ +#ifndef _WIN32 + +#include "uv.h" +#include "task.h" + +#include +#include + +static uv_loop_t loop; +static uv_signal_t signal_hdl; +static uv_pipe_t pipe_hdl; +static uv_write_t write_req; +static char* buf; +static int close_cb_called; + + +static void signal_cb(uv_signal_t* signal, int signum) { + ASSERT(0); +} + +static void close_cb(uv_handle_t *handle) { + close_cb_called++; +} + + +static void write_cb(uv_write_t* req, int status) { + ASSERT(req != NULL); + ASSERT(status == UV_EPIPE); + free(buf); + uv_close((uv_handle_t *) &pipe_hdl, close_cb); + uv_close((uv_handle_t *) &signal_hdl, close_cb); +} + + +TEST_IMPL(signal_pending_on_close) { + int pipefds[2]; + uv_buf_t buffer; + int r; + + ASSERT(0 == uv_loop_init(&loop)); + + ASSERT(0 == uv_signal_init(&loop, &signal_hdl)); + + ASSERT(0 == uv_signal_start(&signal_hdl, signal_cb, SIGPIPE)); + + ASSERT(0 == pipe(pipefds)); + + ASSERT(0 == uv_pipe_init(&loop, &pipe_hdl, 0)); + + ASSERT(0 == uv_pipe_open(&pipe_hdl, pipefds[1])); + + /* Write data large enough so it needs loop iteration */ + buf = malloc(1<<24); + ASSERT(buf != NULL); + memset(buf, '.', 1<<24); + buffer = uv_buf_init(buf, 1<<24); + + r = uv_write(&write_req, (uv_stream_t *) &pipe_hdl, &buffer, 1, write_cb); + ASSERT(0 == r); + + /* cause a SIGPIPE on write in next iteration */ + close(pipefds[0]); + + ASSERT(0 == uv_run(&loop, UV_RUN_DEFAULT)); + + ASSERT(0 == uv_loop_close(&loop)); + + ASSERT(2 == close_cb_called); + + MAKE_VALGRIND_HAPPY(); + return 0; +} + +#endif \ No newline at end of file diff --git a/deps/uv/test/test-spawn.c b/deps/uv/test/test-spawn.c index fec610bfdef97e..be9e2539aa6562 100644 --- a/deps/uv/test/test-spawn.c +++ b/deps/uv/test/test-spawn.c @@ -240,7 +240,7 @@ TEST_IMPL(spawn_empty_env) { * in the environment, but of course that doesn't work with * the empty environment that we're testing here. */ - if (NULL != getenv("DYLD_LIBARY_PATH") || + if (NULL != getenv("DYLD_LIBRARY_PATH") || NULL != getenv("LD_LIBRARY_PATH")) { RETURN_SKIP("doesn't work with DYLD_LIBRARY_PATH/LD_LIBRARY_PATH"); } diff --git a/deps/uv/test/test-tcp-bind-error.c b/deps/uv/test/test-tcp-bind-error.c index 1456d081ae6374..f95efd9f0c8900 100644 --- a/deps/uv/test/test-tcp-bind-error.c +++ b/deps/uv/test/test-tcp-bind-error.c @@ -239,11 +239,7 @@ TEST_IMPL(tcp_bind_writable_flags) { r = uv_write(&write_req, (uv_stream_t*) &server, &buf, 1, NULL); ASSERT(r == UV_EPIPE); r = uv_shutdown(&shutdown_req, (uv_stream_t*) &server, NULL); -#ifdef _WIN32 - ASSERT(r == UV_EPIPE); -#else ASSERT(r == UV_ENOTCONN); -#endif r = uv_read_start((uv_stream_t*) &server, NULL, NULL); ASSERT(r == UV_ENOTCONN); diff --git a/deps/uv/test/test-tcp-close-reset.c b/deps/uv/test/test-tcp-close-reset.c new file mode 100644 index 00000000000000..7ca55c4c7f984b --- /dev/null +++ b/deps/uv/test/test-tcp-close-reset.c @@ -0,0 +1,290 @@ +/* Copyright libuv project contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "task.h" + +#include +#include /* memset */ + +static uv_loop_t* loop; +static uv_tcp_t tcp_server; +static uv_tcp_t tcp_client; +static uv_tcp_t tcp_accepted; +static uv_connect_t connect_req; +static uv_shutdown_t shutdown_req; +static uv_write_t write_reqs[4]; + +static int client_close; +static int shutdown_before_close; + +static int write_cb_called; +static int close_cb_called; +static int shutdown_cb_called; + +static void connect_cb(uv_connect_t* req, int status); +static void write_cb(uv_write_t* req, int status); +static void close_cb(uv_handle_t* handle); +static void shutdown_cb(uv_shutdown_t* req, int status); + +static int read_size; + + +static void do_write(uv_tcp_t* handle) { + uv_buf_t buf; + unsigned i; + int r; + + buf = uv_buf_init("PING", 4); + for (i = 0; i < ARRAY_SIZE(write_reqs); i++) { + r = uv_write(&write_reqs[i], (uv_stream_t*) handle, &buf, 1, write_cb); + ASSERT(r == 0); + } +} + + +static void do_close(uv_tcp_t* handle) { + if (shutdown_before_close == 1) { + ASSERT(0 == uv_shutdown(&shutdown_req, (uv_stream_t*) handle, shutdown_cb)); + ASSERT(UV_EINVAL == uv_tcp_close_reset(handle, close_cb)); + } else { + ASSERT(0 == uv_tcp_close_reset(handle, close_cb)); + ASSERT(UV_ENOTCONN == uv_shutdown(&shutdown_req, (uv_stream_t*) handle, shutdown_cb)); + } + + uv_close((uv_handle_t*) &tcp_server, NULL); +} + +static void alloc_cb(uv_handle_t* handle, size_t size, uv_buf_t* buf) { + static char slab[1024]; + buf->base = slab; + buf->len = sizeof(slab); +} + +static void read_cb2(uv_stream_t* stream, ssize_t nread, const uv_buf_t* buf) { + ASSERT((uv_tcp_t*)stream == &tcp_client); + if (nread == UV_EOF) + uv_close((uv_handle_t*) stream, NULL); +} + + +static void connect_cb(uv_connect_t* conn_req, int status) { + ASSERT(conn_req == &connect_req); + uv_read_start((uv_stream_t*) &tcp_client, alloc_cb, read_cb2); + do_write(&tcp_client); + if (client_close) + do_close(&tcp_client); +} + + +static void write_cb(uv_write_t* req, int status) { + /* write callbacks should run before the close callback */ + ASSERT(close_cb_called == 0); + ASSERT(req->handle == (uv_stream_t*)&tcp_client); + write_cb_called++; +} + + +static void close_cb(uv_handle_t* handle) { + if (client_close) + ASSERT(handle == (uv_handle_t*) &tcp_client); + else + ASSERT(handle == (uv_handle_t*) &tcp_accepted); + + close_cb_called++; +} + +static void shutdown_cb(uv_shutdown_t* req, int status) { + if (client_close) + ASSERT(req->handle == (uv_stream_t*) &tcp_client); + else + ASSERT(req->handle == (uv_stream_t*) &tcp_accepted); + + shutdown_cb_called++; +} + + +static void read_cb(uv_stream_t* stream, ssize_t nread, const uv_buf_t* buf) { + ASSERT((uv_tcp_t*)stream == &tcp_accepted); + if (nread < 0) { + uv_close((uv_handle_t*) stream, NULL); + } else { + read_size += nread; + if (read_size == 16 && client_close == 0) + do_close(&tcp_accepted); + } +} + + +static void connection_cb(uv_stream_t* server, int status) { + ASSERT(status == 0); + + ASSERT(0 == uv_tcp_init(loop, &tcp_accepted)); + ASSERT(0 == uv_accept(server, (uv_stream_t*) &tcp_accepted)); + + uv_read_start((uv_stream_t*) &tcp_accepted, alloc_cb, read_cb); +} + + +static void start_server(uv_loop_t* loop, uv_tcp_t* handle) { + struct sockaddr_in addr; + int r; + + ASSERT(0 == uv_ip4_addr("127.0.0.1", TEST_PORT, &addr)); + + r = uv_tcp_init(loop, handle); + ASSERT(r == 0); + + r = uv_tcp_bind(handle, (const struct sockaddr*) &addr, 0); + ASSERT(r == 0); + + r = uv_listen((uv_stream_t*)handle, 128, connection_cb); + ASSERT(r == 0); +} + + +static void do_connect(uv_loop_t* loop, uv_tcp_t* tcp_client) { + struct sockaddr_in addr; + int r; + + ASSERT(0 == uv_ip4_addr("127.0.0.1", TEST_PORT, &addr)); + + r = uv_tcp_init(loop, tcp_client); + ASSERT(r == 0); + + r = uv_tcp_connect(&connect_req, + tcp_client, + (const struct sockaddr*) &addr, + connect_cb); + ASSERT(r == 0); +} + + +/* Check that pending write requests have their callbacks + * invoked when the handle is closed. + */ +TEST_IMPL(tcp_close_reset_client) { + int r; + + loop = uv_default_loop(); + + start_server(loop, &tcp_server); + + client_close = 1; + shutdown_before_close = 0; + + do_connect(loop, &tcp_client); + + ASSERT(write_cb_called == 0); + ASSERT(close_cb_called == 0); + ASSERT(shutdown_cb_called == 0); + + r = uv_run(loop, UV_RUN_DEFAULT); + ASSERT(r == 0); + + ASSERT(write_cb_called == 4); + ASSERT(close_cb_called == 1); + ASSERT(shutdown_cb_called == 0); + + MAKE_VALGRIND_HAPPY(); + return 0; +} + +TEST_IMPL(tcp_close_reset_client_after_shutdown) { + int r; + + loop = uv_default_loop(); + + start_server(loop, &tcp_server); + + client_close = 1; + shutdown_before_close = 1; + + do_connect(loop, &tcp_client); + + ASSERT(write_cb_called == 0); + ASSERT(close_cb_called == 0); + ASSERT(shutdown_cb_called == 0); + + r = uv_run(loop, UV_RUN_DEFAULT); + ASSERT(r == 0); + + ASSERT(write_cb_called == 4); + ASSERT(close_cb_called == 0); + ASSERT(shutdown_cb_called == 1); + + MAKE_VALGRIND_HAPPY(); + return 0; +} + +TEST_IMPL(tcp_close_reset_accepted) { + int r; + + loop = uv_default_loop(); + + start_server(loop, &tcp_server); + + client_close = 0; + shutdown_before_close = 0; + + do_connect(loop, &tcp_client); + + ASSERT(write_cb_called == 0); + ASSERT(close_cb_called == 0); + ASSERT(shutdown_cb_called == 0); + + r = uv_run(loop, UV_RUN_DEFAULT); + ASSERT(r == 0); + + ASSERT(write_cb_called == 4); + ASSERT(close_cb_called == 1); + ASSERT(shutdown_cb_called == 0); + + MAKE_VALGRIND_HAPPY(); + return 0; +} + +TEST_IMPL(tcp_close_reset_accepted_after_shutdown) { + int r; + + loop = uv_default_loop(); + + start_server(loop, &tcp_server); + + client_close = 0; + shutdown_before_close = 1; + + do_connect(loop, &tcp_client); + + ASSERT(write_cb_called == 0); + ASSERT(close_cb_called == 0); + ASSERT(shutdown_cb_called == 0); + + r = uv_run(loop, UV_RUN_DEFAULT); + ASSERT(r == 0); + + ASSERT(write_cb_called == 4); + ASSERT(close_cb_called == 0); + ASSERT(shutdown_cb_called == 1); + + MAKE_VALGRIND_HAPPY(); + return 0; +} diff --git a/deps/uv/test/test-timer.c b/deps/uv/test/test-timer.c index 080a73005ee3c2..c667da00ec3af8 100644 --- a/deps/uv/test/test-timer.c +++ b/deps/uv/test/test-timer.c @@ -292,6 +292,19 @@ TEST_IMPL(timer_run_once) { } +TEST_IMPL(timer_is_closing) { + uv_timer_t handle; + + ASSERT(0 == uv_timer_init(uv_default_loop(), &handle)); + uv_close((uv_handle_t *)&handle, NULL); + + ASSERT(UV_EINVAL == uv_timer_start(&handle, never_cb, 100, 100)); + + MAKE_VALGRIND_HAPPY(); + return 0; +} + + TEST_IMPL(timer_null_callback) { uv_timer_t handle; diff --git a/deps/uv/test/test-udp-multicast-join.c b/deps/uv/test/test-udp-multicast-join.c index 053d2f791498f7..9ee80e44e7c24a 100644 --- a/deps/uv/test/test-udp-multicast-join.c +++ b/deps/uv/test/test-udp-multicast-join.c @@ -29,8 +29,12 @@ #define CHECK_HANDLE(handle) \ ASSERT((uv_udp_t*)(handle) == &server || (uv_udp_t*)(handle) == &client) +#define MULTICAST_ADDR "239.255.0.1" + static uv_udp_t server; static uv_udp_t client; +static uv_udp_send_t req; +static uv_udp_send_t req_ss; static int cl_recv_cb_called; @@ -62,7 +66,26 @@ static void sv_send_cb(uv_udp_send_t* req, int status) { sv_send_cb_called++; - uv_close((uv_handle_t*) req->handle, close_cb); + if (sv_send_cb_called == 2) + uv_close((uv_handle_t*) req->handle, close_cb); +} + + +static int do_send(uv_udp_send_t* send_req) { + uv_buf_t buf; + struct sockaddr_in addr; + + buf = uv_buf_init("PING", 4); + + ASSERT(0 == uv_ip4_addr(MULTICAST_ADDR, TEST_PORT, &addr)); + + /* client sends "PING" */ + return uv_udp_send(send_req, + &client, + &buf, + 1, + (const struct sockaddr*) &addr, + sv_send_cb); } @@ -74,8 +97,6 @@ static void cl_recv_cb(uv_udp_t* handle, CHECK_HANDLE(handle); ASSERT(flags == 0); - cl_recv_cb_called++; - if (nread < 0) { ASSERT(0 && "unexpected error"); } @@ -90,18 +111,35 @@ static void cl_recv_cb(uv_udp_t* handle, ASSERT(nread == 4); ASSERT(!memcmp("PING", buf->base, nread)); - /* we are done with the client handle, we can close it */ - uv_close((uv_handle_t*) &client, close_cb); + cl_recv_cb_called++; + + if (cl_recv_cb_called == 2) { + /* we are done with the server handle, we can close it */ + uv_close((uv_handle_t*) &server, close_cb); + } else { + int r; + char source_addr[64]; + + r = uv_ip4_name((const struct sockaddr_in*)addr, source_addr, sizeof(source_addr)); + ASSERT(r == 0); + + r = uv_udp_set_membership(&server, MULTICAST_ADDR, NULL, UV_LEAVE_GROUP); + ASSERT(r == 0); + + r = uv_udp_set_source_membership(&server, MULTICAST_ADDR, NULL, source_addr, UV_JOIN_GROUP); + ASSERT(r == 0); + + r = do_send(&req_ss); + ASSERT(r == 0); + } } TEST_IMPL(udp_multicast_join) { int r; - uv_udp_send_t req; - uv_buf_t buf; struct sockaddr_in addr; - ASSERT(0 == uv_ip4_addr("127.0.0.1", TEST_PORT, &addr)); + ASSERT(0 == uv_ip4_addr("0.0.0.0", TEST_PORT, &addr)); r = uv_udp_init(uv_default_loop(), &server); ASSERT(r == 0); @@ -110,27 +148,19 @@ TEST_IMPL(udp_multicast_join) { ASSERT(r == 0); /* bind to the desired port */ - r = uv_udp_bind(&client, (const struct sockaddr*) &addr, 0); + r = uv_udp_bind(&server, (const struct sockaddr*) &addr, 0); ASSERT(r == 0); /* join the multicast channel */ - r = uv_udp_set_membership(&client, "239.255.0.1", NULL, UV_JOIN_GROUP); + r = uv_udp_set_membership(&server, MULTICAST_ADDR, NULL, UV_JOIN_GROUP); if (r == UV_ENODEV) RETURN_SKIP("No multicast support."); ASSERT(r == 0); - r = uv_udp_recv_start(&client, alloc_cb, cl_recv_cb); + r = uv_udp_recv_start(&server, alloc_cb, cl_recv_cb); ASSERT(r == 0); - buf = uv_buf_init("PING", 4); - - /* server sends "PING" */ - r = uv_udp_send(&req, - &server, - &buf, - 1, - (const struct sockaddr*) &addr, - sv_send_cb); + r = do_send(&req); ASSERT(r == 0); ASSERT(close_cb_called == 0); @@ -140,8 +170,8 @@ TEST_IMPL(udp_multicast_join) { /* run the loop till all events are processed */ uv_run(uv_default_loop(), UV_RUN_DEFAULT); - ASSERT(cl_recv_cb_called == 1); - ASSERT(sv_send_cb_called == 1); + ASSERT(cl_recv_cb_called == 2); + ASSERT(sv_send_cb_called == 2); ASSERT(close_cb_called == 2); MAKE_VALGRIND_HAPPY(); diff --git a/deps/uv/test/test-udp-multicast-join6.c b/deps/uv/test/test-udp-multicast-join6.c index bda5e20ea70403..edcd371b2c22c0 100644 --- a/deps/uv/test/test-udp-multicast-join6.c +++ b/deps/uv/test/test-udp-multicast-join6.c @@ -30,8 +30,23 @@ #define CHECK_HANDLE(handle) \ ASSERT((uv_udp_t*)(handle) == &server || (uv_udp_t*)(handle) == &client) +#if defined(__APPLE__) || \ + defined(_AIX) || \ + defined(__MVS__) || \ + defined(__FreeBSD_kernel__) || \ + defined(__NetBSD__) || \ + defined(__OpenBSD__) + #define MULTICAST_ADDR "ff02::1%lo0" + #define INTERFACE_ADDR "::1%lo0" +#else + #define MULTICAST_ADDR "ff02::1" + #define INTERFACE_ADDR NULL +#endif + static uv_udp_t server; static uv_udp_t client; +static uv_udp_send_t req; +static uv_udp_send_t req_ss; static int cl_recv_cb_called; @@ -63,7 +78,26 @@ static void sv_send_cb(uv_udp_send_t* req, int status) { sv_send_cb_called++; - uv_close((uv_handle_t*) req->handle, close_cb); + if (sv_send_cb_called == 2) + uv_close((uv_handle_t*) req->handle, close_cb); +} + + +static int do_send(uv_udp_send_t* send_req) { + uv_buf_t buf; + struct sockaddr_in6 addr; + + buf = uv_buf_init("PING", 4); + + ASSERT(0 == uv_ip6_addr(MULTICAST_ADDR, TEST_PORT, &addr)); + + /* client sends "PING" */ + return uv_udp_send(send_req, + &client, + &buf, + 1, + (const struct sockaddr*) &addr, + sv_send_cb); } @@ -75,8 +109,6 @@ static void cl_recv_cb(uv_udp_t* handle, CHECK_HANDLE(handle); ASSERT(flags == 0); - cl_recv_cb_called++; - if (nread < 0) { ASSERT(0 && "unexpected error"); } @@ -91,21 +123,57 @@ static void cl_recv_cb(uv_udp_t* handle, ASSERT(nread == 4); ASSERT(!memcmp("PING", buf->base, nread)); - /* we are done with the client handle, we can close it */ - uv_close((uv_handle_t*) &client, close_cb); + cl_recv_cb_called++; + + if (cl_recv_cb_called == 2) { + /* we are done with the server handle, we can close it */ + uv_close((uv_handle_t*) &server, close_cb); + } else { + int r; + char source_addr[64]; + + r = uv_ip6_name((const struct sockaddr_in6*)addr, source_addr, sizeof(source_addr)); + ASSERT(r == 0); + + r = uv_udp_set_membership(&server, MULTICAST_ADDR, INTERFACE_ADDR, UV_LEAVE_GROUP); + ASSERT(r == 0); + + r = uv_udp_set_source_membership(&server, MULTICAST_ADDR, INTERFACE_ADDR, source_addr, UV_JOIN_GROUP); + ASSERT(r == 0); + + r = do_send(&req_ss); + ASSERT(r == 0); + } +} + + +static int can_ipv6_external(void) { + uv_interface_address_t* addr; + int supported; + int count; + int i; + + if (uv_interface_addresses(&addr, &count)) + return 0; /* Assume no IPv6 support on failure. */ + + supported = 0; + for (i = 0; supported == 0 && i < count; i += 1) + supported = (AF_INET6 == addr[i].address.address6.sin6_family && + !addr[i].is_internal); + + uv_free_interface_addresses(addr, count); + return supported; } TEST_IMPL(udp_multicast_join6) { int r; - uv_udp_send_t req; - uv_buf_t buf; struct sockaddr_in6 addr; - if (!can_ipv6()) - RETURN_SKIP("IPv6 not supported"); + if (!can_ipv6_external()) + RETURN_SKIP("No external IPv6 interface available"); - ASSERT(0 == uv_ip6_addr("::1", TEST_PORT, &addr)); + ASSERT(0 == uv_ip6_addr("::", TEST_PORT, &addr)); r = uv_udp_init(uv_default_loop(), &server); ASSERT(r == 0); @@ -114,20 +182,10 @@ TEST_IMPL(udp_multicast_join6) { ASSERT(r == 0); /* bind to the desired port */ - r = uv_udp_bind(&client, (const struct sockaddr*) &addr, 0); + r = uv_udp_bind(&server, (const struct sockaddr*) &addr, 0); ASSERT(r == 0); - /* join the multicast channel */ -#if defined(__APPLE__) || \ - defined(_AIX) || \ - defined(__MVS__) || \ - defined(__FreeBSD_kernel__) || \ - defined(__NetBSD__) || \ - defined(__OpenBSD__) - r = uv_udp_set_membership(&client, "ff02::1", "::1%lo0", UV_JOIN_GROUP); -#else - r = uv_udp_set_membership(&client, "ff02::1", NULL, UV_JOIN_GROUP); -#endif + r = uv_udp_set_membership(&server, MULTICAST_ADDR, INTERFACE_ADDR, UV_JOIN_GROUP); if (r == UV_ENODEV) { MAKE_VALGRIND_HAPPY(); RETURN_SKIP("No ipv6 multicast route"); @@ -135,18 +193,10 @@ TEST_IMPL(udp_multicast_join6) { ASSERT(r == 0); - r = uv_udp_recv_start(&client, alloc_cb, cl_recv_cb); + r = uv_udp_recv_start(&server, alloc_cb, cl_recv_cb); ASSERT(r == 0); - buf = uv_buf_init("PING", 4); - - /* server sends "PING" */ - r = uv_udp_send(&req, - &server, - &buf, - 1, - (const struct sockaddr*) &addr, - sv_send_cb); + r = do_send(&req); ASSERT(r == 0); ASSERT(close_cb_called == 0); @@ -156,8 +206,8 @@ TEST_IMPL(udp_multicast_join6) { /* run the loop till all events are processed */ uv_run(uv_default_loop(), UV_RUN_DEFAULT); - ASSERT(cl_recv_cb_called == 1); - ASSERT(sv_send_cb_called == 1); + ASSERT(cl_recv_cb_called == 2); + ASSERT(sv_send_cb_called == 2); ASSERT(close_cb_called == 2); MAKE_VALGRIND_HAPPY(); diff --git a/deps/uv/test/test.gyp b/deps/uv/test/test.gyp index 6158a2b8b6601f..60792ad6ebbb57 100644 --- a/deps/uv/test/test.gyp +++ b/deps/uv/test/test.gyp @@ -99,6 +99,7 @@ 'test-shutdown-twice.c', 'test-signal.c', 'test-signal-multiple-loops.c', + 'test-signal-pending-on-close.c', 'test-socket-buffer-size.c', 'test-spawn.c', 'test-strscpy.c', @@ -108,6 +109,7 @@ 'test-tcp-bind6-error.c', 'test-tcp-close.c', 'test-tcp-close-accept.c', + 'test-tcp-close-reset.c', 'test-tcp-close-while-connecting.c', 'test-tcp-create-socket-early.c', 'test-tcp-connect-error-after-write.c', diff --git a/deps/uv/uv.gyp b/deps/uv/uv.gyp index 46de9b769e76e4..75a6d9781995ae 100644 --- a/deps/uv/uv.gyp +++ b/deps/uv/uv.gyp @@ -20,6 +20,7 @@ '_UNIX03_SOURCE', '_UNIX03_WITHDRAWN', '_OPEN_SYS_IF_EXT', + '_OPEN_SYS_SOCK_EXT3', '_OPEN_SYS_SOCK_IPV6', '_OPEN_MSGQ_EXT', '_XOPEN_SOURCE_EXTENDED', diff --git a/deps/v8/.flake8 b/deps/v8/.flake8 new file mode 100644 index 00000000000000..c58d00ca051b81 --- /dev/null +++ b/deps/v8/.flake8 @@ -0,0 +1,11 @@ +[flake8] +ignore = E111,E114,E310 # We use 2-space indentation +exclude = + ./third_party/, # third-party code + ./build/, # third-party code + ./buildtools/, # third-party code + ./tools/swarming_client/, # third-party code + ./test/wasm-js/, # third-party code + ./test/wasm-js/data/, # third-party code + ./test/test262/data/, # third-party code + ./test/test262/harness/, # third-party code diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore index 6350393ebf376e..ce47fa36103f45 100644 --- a/deps/v8/.gitignore +++ b/deps/v8/.gitignore @@ -26,7 +26,6 @@ .ccls-cache .cpplint-cache .cproject -.d8_history .gclient_entries .gdb_history .landmines @@ -39,8 +38,7 @@ /build /buildtools /check-header-includes -/hydrogen.cfg -/obj +/Default/ /out /out.gn /perf.data @@ -72,6 +70,7 @@ /third_party/googletest/src/googletest/include/gtest/* !/third_party/googletest/src/googletest/include/gtest/gtest_prod.h !/third_party/v8 +!/third_party/wasm-api /tools/clang /tools/gcmole/gcmole-tools /tools/gcmole/gcmole-tools.tar.gz @@ -83,6 +82,9 @@ /tools/oom_dump/oom_dump /tools/oom_dump/oom_dump.o /tools/swarming_client +/tools/turbolizer/build +/tools/turbolizer/.rpt2_cache +/tools/turbolizer/deploy /tools/visual_studio/Debug /tools/visual_studio/Release /v8.log.ll @@ -94,23 +96,15 @@ GTAGS TAGS bsuite compile_commands.json -!/test/mjsunit/d8 -d8_g gccauses gcsuspects gtags.files -shell -shell_g +node_modules tags turbo*.cfg turbo*.dot turbo*.json v8.ignition_dispatches_table.json -/Default/ -node_modules -tools/turbolizer/build -tools/turbolizer/.rpt2_cache -tools/turbolizer/deploy !/third_party/jinja2 !/third_party/markupsafe diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS index 5a8628b4cb2a77..827d124b0dcf0d 100644 --- a/deps/v8/AUTHORS +++ b/deps/v8/AUTHORS @@ -128,17 +128,20 @@ Matthew Sporleder Maxim Mazurok Maxim Mossienko Michael Lutz +Michael Mclaughlin Michael Smith Michaël Zasso Mike Gilbert Mike Pennisi Mikhail Gusarov Milton Chiang +Mu Tao Myeong-bo Shim Nicolas Antonius Ernst Leopold Maria Kaiser Niklas Hambüchen Noj Vek Oleksandr Chekhovskyi +Oliver Dunk Paolo Giarrusso Patrick Gansterer Peng Fei @@ -160,6 +163,7 @@ Rob Wu Robert Meijer Robert Mustacchi Robert Nagy +Ross Kirsling Ruben Bridgewater Ryan Dahl Sakthipriyan Vairamani (thefourtheye) @@ -168,6 +172,7 @@ Sandro Santilli Sanjoy Das Seo Sanghyeon Shawn Anastasio +Shawn Presser Stefan Penner Sylvestre Ledru Taketoshi Aono diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn index 8640517ae5c23d..efca4a626f1633 100644 --- a/deps/v8/BUILD.gn +++ b/deps/v8/BUILD.gn @@ -225,7 +225,7 @@ if (v8_enable_snapshot_native_code_counters == "") { v8_enable_snapshot_native_code_counters = v8_enable_debugging_features } if (v8_enable_shared_ro_heap == "") { - v8_enable_shared_ro_heap = v8_enable_lite_mode + v8_enable_shared_ro_heap = !v8_enable_pointer_compression && v8_use_snapshot } if (v8_enable_fast_torque == "") { v8_enable_fast_torque = v8_enable_fast_mksnapshot @@ -242,6 +242,8 @@ assert(!v8_enable_lite_mode || v8_use_snapshot, assert( !v8_enable_pointer_compression || !v8_enable_shared_ro_heap, "Pointer compression is not supported with shared read-only heap enabled") +assert(v8_use_snapshot || !v8_enable_shared_ro_heap, + "Shared read-only heap requires snapshot") v8_random_seed = "314159265" v8_toolset_for_shell = "host" @@ -408,6 +410,7 @@ config("features") { if (v8_enable_test_features) { defines += [ "V8_ENABLE_ALLOCATION_TIMEOUT" ] defines += [ "V8_ENABLE_FORCE_SLOW_PATH" ] + defines += [ "V8_ENABLE_DOUBLE_CONST_STORE_CHECK" ] } if (v8_enable_i18n_support) { defines += [ "V8_INTL_SUPPORT" ] @@ -940,6 +943,7 @@ torque_files = [ "src/builtins/array-unshift.tq", "src/builtins/array.tq", "src/builtins/base.tq", + "src/builtins/bigint.tq", "src/builtins/boolean.tq", "src/builtins/collections.tq", "src/builtins/data-view.tq", @@ -950,13 +954,20 @@ torque_files = [ "src/builtins/iterator.tq", "src/builtins/math.tq", "src/builtins/object-fromentries.tq", + "src/builtins/object.tq", "src/builtins/proxy-constructor.tq", + "src/builtins/proxy-delete-property.tq", "src/builtins/proxy-get-property.tq", + "src/builtins/proxy-get-prototype-of.tq", "src/builtins/proxy-has-property.tq", + "src/builtins/proxy-is-extensible.tq", + "src/builtins/proxy-prevent-extensions.tq", "src/builtins/proxy-revocable.tq", "src/builtins/proxy-revoke.tq", "src/builtins/proxy-set-property.tq", + "src/builtins/proxy-set-prototype-of.tq", "src/builtins/proxy.tq", + "src/builtins/reflect.tq", "src/builtins/regexp-replace.tq", "src/builtins/regexp.tq", "src/builtins/string.tq", @@ -988,57 +999,6 @@ if (!v8_enable_i18n_support) { torque_files -= [ "src/objects/intl-objects.tq" ] } -torque_namespaces = [ - "arguments", - "array", - "array-copywithin", - "array-filter", - "array-find", - "array-findindex", - "array-foreach", - "array-join", - "array-map", - "array-of", - "array-reverse", - "array-shift", - "array-slice", - "array-splice", - "array-unshift", - "array-lastindexof", - "base", - "boolean", - "collections", - "data-view", - "extras-utils", - "growable-fixed-array", - "internal-coverage", - "iterator", - "math", - "object", - "proxy", - "regexp", - "regexp-replace", - "string", - "string-html", - "string-iterator", - "string-repeat", - "string-slice", - "string-substring", - "test", - "typed-array", - "typed-array-createtypedarray", - "typed-array-every", - "typed-array-filter", - "typed-array-find", - "typed-array-findindex", - "typed-array-foreach", - "typed-array-reduce", - "typed-array-reduceright", - "typed-array-slice", - "typed-array-some", - "typed-array-subarray", -] - action("run_torque") { visibility = [ ":*", @@ -1066,11 +1026,13 @@ action("run_torque") { "$target_gen_dir/torque-generated/exported-macros-assembler-tq.cc", "$target_gen_dir/torque-generated/exported-macros-assembler-tq.h", "$target_gen_dir/torque-generated/csa-types-tq.h", + "$target_gen_dir/torque-generated/instance-types-tq.h", ] - foreach(namespace, torque_namespaces) { + foreach(file, torque_files) { + filetq = string_replace(file, ".tq", "-tq-csa") outputs += [ - "$target_gen_dir/torque-generated/builtins-$namespace-gen-tq.cc", - "$target_gen_dir/torque-generated/builtins-$namespace-gen-tq.h", + "$target_gen_dir/torque-generated/$filetq.cc", + "$target_gen_dir/torque-generated/$filetq.h", ] } @@ -1080,11 +1042,10 @@ action("run_torque") { root_build_dir), "-o", rebase_path("$target_gen_dir/torque-generated", root_build_dir), + "-v8-root", + rebase_path(".", root_build_dir), ] - - foreach(file, torque_files) { - args += [ rebase_path(file, root_build_dir) ] - } + args += torque_files } group("v8_maybe_icu") { @@ -1112,10 +1073,11 @@ v8_source_set("torque_generated_initializers") { "$target_gen_dir/torque-generated/exported-macros-assembler-tq.cc", "$target_gen_dir/torque-generated/exported-macros-assembler-tq.h", ] - foreach(namespace, torque_namespaces) { + foreach(file, torque_files) { + filetq = string_replace(file, ".tq", "-tq-csa") sources += [ - "$target_gen_dir/torque-generated/builtins-$namespace-gen-tq.cc", - "$target_gen_dir/torque-generated/builtins-$namespace-gen-tq.h", + "$target_gen_dir/torque-generated/$filetq.cc", + "$target_gen_dir/torque-generated/$filetq.h", ] } @@ -1515,6 +1477,7 @@ v8_source_set("v8_initializers") { "src/builtins/builtins-async-generator-gen.cc", "src/builtins/builtins-async-iterator-gen.cc", "src/builtins/builtins-bigint-gen.cc", + "src/builtins/builtins-bigint-gen.h", "src/builtins/builtins-boolean-gen.cc", "src/builtins/builtins-call-gen.cc", "src/builtins/builtins-call-gen.h", @@ -1776,6 +1739,8 @@ v8_compiler_sources = [ "src/compiler/control-equivalence.h", "src/compiler/control-flow-optimizer.cc", "src/compiler/control-flow-optimizer.h", + "src/compiler/csa-load-elimination.cc", + "src/compiler/csa-load-elimination.h", "src/compiler/dead-code-elimination.cc", "src/compiler/dead-code-elimination.h", "src/compiler/decompression-elimination.cc", @@ -1913,6 +1878,8 @@ v8_compiler_sources = [ "src/compiler/state-values-utils.h", "src/compiler/store-store-elimination.cc", "src/compiler/store-store-elimination.h", + "src/compiler/add-type-assertions-reducer.cc", + "src/compiler/add-type-assertions-reducer.h", "src/compiler/type-cache.cc", "src/compiler/type-cache.h", "src/compiler/type-narrowing-reducer.cc", @@ -2123,6 +2090,8 @@ v8_source_set("v8_base_without_compiler") { "src/codegen/macro-assembler.h", "src/codegen/optimized-compilation-info.cc", "src/codegen/optimized-compilation-info.h", + "src/codegen/pending-optimization-table.cc", + "src/codegen/pending-optimization-table.h", "src/codegen/register-arch.h", "src/codegen/register-configuration.cc", "src/codegen/register-configuration.h", @@ -2139,6 +2108,8 @@ v8_source_set("v8_base_without_compiler") { "src/codegen/source-position.h", "src/codegen/string-constants.cc", "src/codegen/string-constants.h", + "src/codegen/tick-counter.cc", + "src/codegen/tick-counter.h", "src/codegen/turbo-assembler.cc", "src/codegen/turbo-assembler.h", "src/codegen/unoptimized-compilation-info.cc", @@ -2148,7 +2119,6 @@ v8_source_set("v8_base_without_compiler") { "src/common/checks.h", "src/common/ptr-compr-inl.h", "src/common/ptr-compr.h", - "src/common/v8memory.h", "src/compiler-dispatcher/compiler-dispatcher.cc", "src/compiler-dispatcher/compiler-dispatcher.h", "src/compiler-dispatcher/optimizing-compile-dispatcher.cc", @@ -2212,8 +2182,11 @@ v8_source_set("v8_base_without_compiler") { "src/execution/frames.h", "src/execution/futex-emulation.cc", "src/execution/futex-emulation.h", + "src/execution/interrupts-scope.cc", + "src/execution/interrupts-scope.h", "src/execution/isolate-data.h", "src/execution/isolate-inl.h", + "src/execution/isolate-utils.h", "src/execution/isolate.cc", "src/execution/isolate.h", "src/execution/message-template.h", @@ -2226,6 +2199,8 @@ v8_source_set("v8_base_without_compiler") { "src/execution/simulator-base.cc", "src/execution/simulator-base.h", "src/execution/simulator.h", + "src/execution/stack-guard.cc", + "src/execution/stack-guard.h", "src/execution/thread-id.cc", "src/execution/thread-id.h", "src/execution/thread-local-top.cc", @@ -2234,6 +2209,8 @@ v8_source_set("v8_base_without_compiler") { "src/execution/v8threads.h", "src/execution/vm-state-inl.h", "src/execution/vm-state.h", + "src/extensions/cputracemark-extension.cc", + "src/extensions/cputracemark-extension.h", "src/extensions/externalize-string-extension.cc", "src/extensions/externalize-string-extension.h", "src/extensions/free-buffer-extension.cc", @@ -2262,6 +2239,8 @@ v8_source_set("v8_base_without_compiler") { "src/heap/array-buffer-tracker.cc", "src/heap/array-buffer-tracker.h", "src/heap/barrier.h", + "src/heap/basic-memory-chunk.cc", + "src/heap/basic-memory-chunk.h", "src/heap/code-stats.cc", "src/heap/code-stats.h", "src/heap/combined-heap.cc", @@ -2308,6 +2287,7 @@ v8_source_set("v8_base_without_compiler") { "src/heap/objects-visiting-inl.h", "src/heap/objects-visiting.cc", "src/heap/objects-visiting.h", + "src/heap/read-only-heap-inl.h", "src/heap/read-only-heap.cc", "src/heap/read-only-heap.h", "src/heap/remembered-set.h", @@ -2623,6 +2603,8 @@ v8_source_set("v8_base_without_compiler") { "src/objects/slots-atomic-inl.h", "src/objects/slots-inl.h", "src/objects/slots.h", + "src/objects/source-text-module.cc", + "src/objects/source-text-module.h", "src/objects/stack-frame-info-inl.h", "src/objects/stack-frame-info.cc", "src/objects/stack-frame-info.h", @@ -2635,6 +2617,10 @@ v8_source_set("v8_base_without_compiler") { "src/objects/string.h", "src/objects/struct-inl.h", "src/objects/struct.h", + "src/objects/synthetic-module.cc", + "src/objects/synthetic-module.h", + "src/objects/tagged-field-inl.h", + "src/objects/tagged-field.h", "src/objects/tagged-impl-inl.h", "src/objects/tagged-impl.cc", "src/objects/tagged-impl.h", @@ -2709,23 +2695,27 @@ v8_source_set("v8_base_without_compiler") { "src/profiler/tick-sample.h", "src/profiler/tracing-cpu-profiler.cc", "src/profiler/tracing-cpu-profiler.h", - "src/regexp/bytecodes-irregexp.h", - "src/regexp/interpreter-irregexp.cc", - "src/regexp/interpreter-irregexp.h", - "src/regexp/jsregexp-inl.h", - "src/regexp/jsregexp.cc", - "src/regexp/jsregexp.h", "src/regexp/property-sequences.cc", "src/regexp/property-sequences.h", "src/regexp/regexp-ast.cc", "src/regexp/regexp-ast.h", - "src/regexp/regexp-macro-assembler-irregexp-inl.h", - "src/regexp/regexp-macro-assembler-irregexp.cc", - "src/regexp/regexp-macro-assembler-irregexp.h", + "src/regexp/regexp-bytecode-generator-inl.h", + "src/regexp/regexp-bytecode-generator.cc", + "src/regexp/regexp-bytecode-generator.h", + "src/regexp/regexp-bytecodes.h", + "src/regexp/regexp-compiler-tonode.cc", + "src/regexp/regexp-compiler.cc", + "src/regexp/regexp-compiler.h", + "src/regexp/regexp-dotprinter.cc", + "src/regexp/regexp-dotprinter.h", + "src/regexp/regexp-interpreter.cc", + "src/regexp/regexp-interpreter.h", + "src/regexp/regexp-macro-assembler-arch.h", "src/regexp/regexp-macro-assembler-tracer.cc", "src/regexp/regexp-macro-assembler-tracer.h", "src/regexp/regexp-macro-assembler.cc", "src/regexp/regexp-macro-assembler.h", + "src/regexp/regexp-nodes.h", "src/regexp/regexp-parser.cc", "src/regexp/regexp-parser.h", "src/regexp/regexp-special-case.h", @@ -2733,6 +2723,8 @@ v8_source_set("v8_base_without_compiler") { "src/regexp/regexp-stack.h", "src/regexp/regexp-utils.cc", "src/regexp/regexp-utils.h", + "src/regexp/regexp.cc", + "src/regexp/regexp.h", "src/roots/roots-inl.h", "src/roots/roots.cc", "src/roots/roots.h", @@ -2866,8 +2858,6 @@ v8_source_set("v8_base_without_compiler") { "src/utils/ostreams.cc", "src/utils/ostreams.h", "src/utils/pointer-with-payload.h", - "src/utils/splay-tree-inl.h", - "src/utils/splay-tree.h", "src/utils/utils-inl.h", "src/utils/utils.cc", "src/utils/utils.h", @@ -2889,7 +2879,6 @@ v8_source_set("v8_base_without_compiler") { "src/wasm/function-compiler.h", "src/wasm/graph-builder-interface.cc", "src/wasm/graph-builder-interface.h", - "src/wasm/js-to-wasm-wrapper-cache.h", "src/wasm/jump-table-assembler.cc", "src/wasm/jump-table-assembler.h", "src/wasm/leb-helper.h", @@ -2909,6 +2898,7 @@ v8_source_set("v8_base_without_compiler") { "src/wasm/streaming-decoder.cc", "src/wasm/streaming-decoder.h", "src/wasm/value-type.h", + "src/wasm/wasm-arguments.h", "src/wasm/wasm-code-manager.cc", "src/wasm/wasm-code-manager.h", "src/wasm/wasm-constants.h", @@ -2956,7 +2946,6 @@ v8_source_set("v8_base_without_compiler") { "src/zone/zone-list-inl.h", "src/zone/zone-segment.cc", "src/zone/zone-segment.h", - "src/zone/zone-splay-tree.h", "src/zone/zone.cc", "src/zone/zone.h", ] @@ -3348,6 +3337,7 @@ v8_source_set("torque_base") { "src/torque/declarations.h", "src/torque/earley-parser.cc", "src/torque/earley-parser.h", + "src/torque/global-context.cc", "src/torque/global-context.h", "src/torque/implementation-visitor.cc", "src/torque/implementation-visitor.h", @@ -3379,6 +3369,9 @@ v8_source_set("torque_base") { ":v8_libbase", ] + # The use of exceptions for Torque in violation of the Chromium style-guide + # is justified by the fact that it is only used from the non-essential + # language server and can be removed anytime if it causes problems. configs = [ ":internal_config", "//build/config/compiler:exceptions", @@ -3421,6 +3414,9 @@ v8_source_set("torque_ls_base") { ":torque_base", ] + # The use of exceptions for Torque in violation of the Chromium style-guide + # is justified by the fact that it is only used from the non-essential + # language server and can be removed anytime if it causes problems. configs = [ ":internal_config", "//build/config/compiler:exceptions", @@ -3476,7 +3472,9 @@ v8_component("v8_libbase") { "src/base/list.h", "src/base/logging.cc", "src/base/logging.h", + "src/base/lsan.h", "src/base/macros.h", + "src/base/memory.h", "src/base/once.cc", "src/base/once.h", "src/base/optional.h", @@ -3506,6 +3504,8 @@ v8_component("v8_libbase") { "src/base/type-traits.h", "src/base/utils/random-number-generator.cc", "src/base/utils/random-number-generator.h", + "src/base/vlq-base64.cc", + "src/base/vlq-base64.h", ] configs = [ ":internal_config_base" ] @@ -3671,21 +3671,15 @@ v8_component("v8_libplatform") { sources += [ "src/libplatform/tracing/json-trace-event-listener.cc", "src/libplatform/tracing/json-trace-event-listener.h", - "src/libplatform/tracing/perfetto-consumer.cc", - "src/libplatform/tracing/perfetto-consumer.h", - "src/libplatform/tracing/perfetto-producer.cc", - "src/libplatform/tracing/perfetto-producer.h", - "src/libplatform/tracing/perfetto-shared-memory.cc", - "src/libplatform/tracing/perfetto-shared-memory.h", - "src/libplatform/tracing/perfetto-tasks.cc", - "src/libplatform/tracing/perfetto-tasks.h", - "src/libplatform/tracing/perfetto-tracing-controller.cc", - "src/libplatform/tracing/perfetto-tracing-controller.h", + "src/libplatform/tracing/trace-event-listener.cc", "src/libplatform/tracing/trace-event-listener.h", ] deps += [ - "//third_party/perfetto:libperfetto", + "//third_party/perfetto/protos/perfetto/trace:lite", "//third_party/perfetto/protos/perfetto/trace/chrome:minimal_complete_lite", + "//third_party/perfetto/protos/perfetto/trace/chrome:zero", + "//third_party/perfetto/src/tracing:client_api", + "//third_party/perfetto/src/tracing:platform_posix", ] } } @@ -3846,6 +3840,9 @@ if (current_toolchain == v8_snapshot_toolchain) { "//build/win:default_exe_manifest", ] + # The use of exceptions for Torque in violation of the Chromium style-guide + # is justified by the fact that it is only used from the non-essential + # language server and can be removed anytime if it causes problems. configs = [ ":internal_config", "//build/config/compiler:exceptions", @@ -3876,6 +3873,9 @@ v8_executable("torque-language-server") { "//build/win:default_exe_manifest", ] + # The use of exceptions for Torque in violation of the Chromium style-guide + # is justified by the fact that it is only used from the non-essential + # language server and can be removed anytime if it causes problems. configs = [ ":internal_config", "//build/config/compiler:exceptions", @@ -3892,48 +3892,51 @@ v8_executable("torque-language-server") { } } -if (current_toolchain == v8_generator_toolchain) { - v8_executable("gen-regexp-special-case") { - visibility = [ ":*" ] # Only targets in this file can depend on this. +if (v8_enable_i18n_support) { + if (current_toolchain == v8_generator_toolchain) { + v8_executable("gen-regexp-special-case") { + visibility = [ ":*" ] # Only targets in this file can depend on this. - sources = [ - "src/regexp/gen-regexp-special-case.cc", - ] + sources = [ + "src/regexp/gen-regexp-special-case.cc", + ] - deps = [ - ":v8_libbase", - "//build/win:default_exe_manifest", - "//third_party/icu", - ] + deps = [ + ":v8_libbase", + "//build/win:default_exe_manifest", + "//third_party/icu", + ] - configs = [ ":internal_config" ] + configs = [ ":internal_config" ] + } } -} -action("run_gen-regexp-special-case") { - visibility = [ ":*" ] # Only targets in this file can depend on this. + action("run_gen-regexp-special-case") { + visibility = [ ":*" ] # Only targets in this file can depend on this. - script = "tools/run.py" + script = "tools/run.py" - sources = v8_extra_library_files + sources = v8_extra_library_files - deps = [ - ":gen-regexp-special-case($v8_generator_toolchain)", - ] + deps = [ + ":gen-regexp-special-case($v8_generator_toolchain)", + ] - output_file = "$target_gen_dir/src/regexp/special-case.cc" + output_file = "$target_gen_dir/src/regexp/special-case.cc" - outputs = [ - output_file, - ] + outputs = [ + output_file, + ] - args = [ - "./" + rebase_path( - get_label_info(":gen-regexp-special-case($v8_generator_toolchain)", - "root_out_dir") + "/gen-regexp-special-case", - root_build_dir), - rebase_path(output_file, root_build_dir), - ] + args = [ + "./" + rebase_path( + get_label_info( + ":gen-regexp-special-case($v8_generator_toolchain)", + "root_out_dir") + "/gen-regexp-special-case", + root_build_dir), + rebase_path(output_file, root_build_dir), + ] + } } ############################################################################### @@ -4146,6 +4149,10 @@ v8_executable("d8") { if (v8_enable_vtunejit) { deps += [ "src/third_party/vtune:v8_vtune" ] } + + if (v8_use_perfetto) { + deps += [ "//third_party/perfetto/include/perfetto/tracing" ] + } } v8_executable("v8_hello_world") { @@ -4451,7 +4458,6 @@ group("v8_generated_cc_files") { ":js2c_extras", ":run_torque", "src/inspector:v8_generated_cc_files", - "test/cctest:v8_generated_cc_files", ] } diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog index c21ac1176032d5..27afc18a5117cd 100644 --- a/deps/v8/ChangeLog +++ b/deps/v8/ChangeLog @@ -1,3 +1,1538 @@ +2019-07-23: Version 7.7.299 + + Performance and stability improvements on all platforms. + + +2019-07-23: Version 7.7.298 + + Performance and stability improvements on all platforms. + + +2019-07-23: Version 7.7.297 + + Performance and stability improvements on all platforms. + + +2019-07-23: Version 7.7.296 + + Performance and stability improvements on all platforms. + + +2019-07-22: Version 7.7.295 + + Performance and stability improvements on all platforms. + + +2019-07-22: Version 7.7.294 + + Performance and stability improvements on all platforms. + + +2019-07-22: Version 7.7.293 + + Performance and stability improvements on all platforms. + + +2019-07-22: Version 7.7.292 + + Performance and stability improvements on all platforms. + + +2019-07-22: Version 7.7.291 + + Performance and stability improvements on all platforms. + + +2019-07-21: Version 7.7.290 + + Performance and stability improvements on all platforms. + + +2019-07-20: Version 7.7.289 + + Performance and stability improvements on all platforms. + + +2019-07-20: Version 7.7.288 + + Performance and stability improvements on all platforms. + + +2019-07-19: Version 7.7.287 + + Performance and stability improvements on all platforms. + + +2019-07-19: Version 7.7.286 + + Performance and stability improvements on all platforms. + + +2019-07-19: Version 7.7.285 + + Performance and stability improvements on all platforms. + + +2019-07-19: Version 7.7.284 + + Performance and stability improvements on all platforms. + + +2019-07-19: Version 7.7.283 + + Performance and stability improvements on all platforms. + + +2019-07-19: Version 7.7.282 + + Performance and stability improvements on all platforms. + + +2019-07-19: Version 7.7.281 + + Performance and stability improvements on all platforms. + + +2019-07-19: Version 7.7.280 + + Performance and stability improvements on all platforms. + + +2019-07-19: Version 7.7.279 + + Performance and stability improvements on all platforms. + + +2019-07-19: Version 7.7.278 + + Performance and stability improvements on all platforms. + + +2019-07-19: Version 7.7.277 + + Performance and stability improvements on all platforms. + + +2019-07-18: Version 7.7.276 + + Performance and stability improvements on all platforms. + + +2019-07-18: Version 7.7.275 + + Performance and stability improvements on all platforms. + + +2019-07-18: Version 7.7.274 + + Performance and stability improvements on all platforms. + + +2019-07-18: Version 7.7.273 + + Performance and stability improvements on all platforms. + + +2019-07-18: Version 7.7.272 + + Performance and stability improvements on all platforms. + + +2019-07-18: Version 7.7.271 + + Performance and stability improvements on all platforms. + + +2019-07-18: Version 7.7.270 + + Performance and stability improvements on all platforms. + + +2019-07-18: Version 7.7.269 + + Performance and stability improvements on all platforms. + + +2019-07-17: Version 7.7.268 + + Performance and stability improvements on all platforms. + + +2019-07-17: Version 7.7.267 + + Performance and stability improvements on all platforms. + + +2019-07-17: Version 7.7.266 + + Performance and stability improvements on all platforms. + + +2019-07-17: Version 7.7.265 + + Performance and stability improvements on all platforms. + + +2019-07-17: Version 7.7.264 + + Performance and stability improvements on all platforms. + + +2019-07-17: Version 7.7.263 + + Performance and stability improvements on all platforms. + + +2019-07-17: Version 7.7.262 + + Performance and stability improvements on all platforms. + + +2019-07-17: Version 7.7.261 + + Performance and stability improvements on all platforms. + + +2019-07-17: Version 7.7.260 + + Performance and stability improvements on all platforms. + + +2019-07-17: Version 7.7.259 + + Performance and stability improvements on all platforms. + + +2019-07-17: Version 7.7.258 + + Performance and stability improvements on all platforms. + + +2019-07-17: Version 7.7.257 + + Performance and stability improvements on all platforms. + + +2019-07-17: Version 7.7.256 + + Performance and stability improvements on all platforms. + + +2019-07-17: Version 7.7.255 + + Performance and stability improvements on all platforms. + + +2019-07-16: Version 7.7.254 + + Performance and stability improvements on all platforms. + + +2019-07-16: Version 7.7.253 + + Performance and stability improvements on all platforms. + + +2019-07-16: Version 7.7.252 + + Performance and stability improvements on all platforms. + + +2019-07-16: Version 7.7.251 + + Performance and stability improvements on all platforms. + + +2019-07-16: Version 7.7.250 + + Performance and stability improvements on all platforms. + + +2019-07-16: Version 7.7.249 + + Performance and stability improvements on all platforms. + + +2019-07-16: Version 7.7.248 + + Performance and stability improvements on all platforms. + + +2019-07-16: Version 7.7.247 + + Performance and stability improvements on all platforms. + + +2019-07-16: Version 7.7.246 + + Performance and stability improvements on all platforms. + + +2019-07-16: Version 7.7.245 + + Performance and stability improvements on all platforms. + + +2019-07-16: Version 7.7.244 + + Performance and stability improvements on all platforms. + + +2019-07-16: Version 7.7.243 + + Performance and stability improvements on all platforms. + + +2019-07-16: Version 7.7.242 + + Performance and stability improvements on all platforms. + + +2019-07-15: Version 7.7.241 + + Performance and stability improvements on all platforms. + + +2019-07-15: Version 7.7.240 + + Performance and stability improvements on all platforms. + + +2019-07-15: Version 7.7.239 + + Performance and stability improvements on all platforms. + + +2019-07-15: Version 7.7.238 + + Performance and stability improvements on all platforms. + + +2019-07-15: Version 7.7.237 + + Performance and stability improvements on all platforms. + + +2019-07-15: Version 7.7.236 + + Performance and stability improvements on all platforms. + + +2019-07-15: Version 7.7.235 + + Performance and stability improvements on all platforms. + + +2019-07-15: Version 7.7.234 + + Performance and stability improvements on all platforms. + + +2019-07-15: Version 7.7.233 + + Performance and stability improvements on all platforms. + + +2019-07-15: Version 7.7.232 + + Performance and stability improvements on all platforms. + + +2019-07-15: Version 7.7.231 + + Performance and stability improvements on all platforms. + + +2019-07-14: Version 7.7.230 + + Performance and stability improvements on all platforms. + + +2019-07-14: Version 7.7.229 + + Performance and stability improvements on all platforms. + + +2019-07-13: Version 7.7.228 + + Performance and stability improvements on all platforms. + + +2019-07-13: Version 7.7.227 + + Performance and stability improvements on all platforms. + + +2019-07-13: Version 7.7.226 + + Performance and stability improvements on all platforms. + + +2019-07-12: Version 7.7.225 + + Performance and stability improvements on all platforms. + + +2019-07-12: Version 7.7.224 + + Performance and stability improvements on all platforms. + + +2019-07-12: Version 7.7.223 + + Performance and stability improvements on all platforms. + + +2019-07-12: Version 7.7.222 + + Performance and stability improvements on all platforms. + + +2019-07-12: Version 7.7.221 + + Performance and stability improvements on all platforms. + + +2019-07-12: Version 7.7.220 + + Performance and stability improvements on all platforms. + + +2019-07-12: Version 7.7.219 + + Performance and stability improvements on all platforms. + + +2019-07-12: Version 7.7.218 + + Performance and stability improvements on all platforms. + + +2019-07-11: Version 7.7.217 + + Performance and stability improvements on all platforms. + + +2019-07-11: Version 7.7.216 + + Performance and stability improvements on all platforms. + + +2019-07-11: Version 7.7.215 + + Performance and stability improvements on all platforms. + + +2019-07-10: Version 7.7.214 + + Performance and stability improvements on all platforms. + + +2019-07-09: Version 7.7.213 + + Performance and stability improvements on all platforms. + + +2019-07-09: Version 7.7.212 + + Performance and stability improvements on all platforms. + + +2019-07-09: Version 7.7.211 + + Performance and stability improvements on all platforms. + + +2019-07-09: Version 7.7.210 + + Performance and stability improvements on all platforms. + + +2019-07-09: Version 7.7.209 + + Performance and stability improvements on all platforms. + + +2019-07-09: Version 7.7.208 + + Performance and stability improvements on all platforms. + + +2019-07-09: Version 7.7.207 + + Performance and stability improvements on all platforms. + + +2019-07-09: Version 7.7.206 + + Performance and stability improvements on all platforms. + + +2019-07-08: Version 7.7.205 + + Performance and stability improvements on all platforms. + + +2019-07-08: Version 7.7.204 + + Performance and stability improvements on all platforms. + + +2019-07-08: Version 7.7.203 + + Performance and stability improvements on all platforms. + + +2019-07-08: Version 7.7.202 + + Performance and stability improvements on all platforms. + + +2019-07-08: Version 7.7.201 + + Performance and stability improvements on all platforms. + + +2019-07-08: Version 7.7.200 + + Performance and stability improvements on all platforms. + + +2019-07-08: Version 7.7.199 + + Performance and stability improvements on all platforms. + + +2019-07-08: Version 7.7.198 + + Performance and stability improvements on all platforms. + + +2019-07-08: Version 7.7.197 + + Performance and stability improvements on all platforms. + + +2019-07-08: Version 7.7.196 + + Performance and stability improvements on all platforms. + + +2019-07-08: Version 7.7.195 + + Performance and stability improvements on all platforms. + + +2019-07-08: Version 7.7.194 + + Performance and stability improvements on all platforms. + + +2019-07-08: Version 7.7.193 + + Performance and stability improvements on all platforms. + + +2019-07-06: Version 7.7.192 + + Performance and stability improvements on all platforms. + + +2019-07-06: Version 7.7.191 + + Performance and stability improvements on all platforms. + + +2019-07-05: Version 7.7.190 + + Performance and stability improvements on all platforms. + + +2019-07-05: Version 7.7.189 + + Performance and stability improvements on all platforms. + + +2019-07-05: Version 7.7.188 + + Performance and stability improvements on all platforms. + + +2019-07-05: Version 7.7.187 + + Performance and stability improvements on all platforms. + + +2019-07-05: Version 7.7.186 + + Performance and stability improvements on all platforms. + + +2019-07-03: Version 7.7.185 + + Performance and stability improvements on all platforms. + + +2019-07-03: Version 7.7.184 + + Performance and stability improvements on all platforms. + + +2019-07-03: Version 7.7.183 + + Performance and stability improvements on all platforms. + + +2019-07-03: Version 7.7.182 + + Performance and stability improvements on all platforms. + + +2019-07-03: Version 7.7.181 + + Performance and stability improvements on all platforms. + + +2019-07-03: Version 7.7.180 + + Performance and stability improvements on all platforms. + + +2019-07-02: Version 7.7.179 + + Performance and stability improvements on all platforms. + + +2019-07-02: Version 7.7.178 + + Performance and stability improvements on all platforms. + + +2019-07-02: Version 7.7.177 + + Performance and stability improvements on all platforms. + + +2019-07-02: Version 7.7.176 + + Performance and stability improvements on all platforms. + + +2019-07-02: Version 7.7.175 + + Performance and stability improvements on all platforms. + + +2019-07-02: Version 7.7.174 + + Performance and stability improvements on all platforms. + + +2019-07-02: Version 7.7.173 + + Performance and stability improvements on all platforms. + + +2019-07-02: Version 7.7.172 + + Performance and stability improvements on all platforms. + + +2019-07-01: Version 7.7.171 + + Performance and stability improvements on all platforms. + + +2019-07-01: Version 7.7.170 + + Performance and stability improvements on all platforms. + + +2019-07-01: Version 7.7.169 + + Performance and stability improvements on all platforms. + + +2019-07-01: Version 7.7.168 + + Performance and stability improvements on all platforms. + + +2019-07-01: Version 7.7.167 + + Performance and stability improvements on all platforms. + + +2019-06-28: Version 7.7.166 + + Performance and stability improvements on all platforms. + + +2019-06-28: Version 7.7.165 + + Performance and stability improvements on all platforms. + + +2019-06-28: Version 7.7.164 + + Performance and stability improvements on all platforms. + + +2019-06-27: Version 7.7.163 + + Performance and stability improvements on all platforms. + + +2019-06-27: Version 7.7.162 + + Performance and stability improvements on all platforms. + + +2019-06-27: Version 7.7.161 + + Performance and stability improvements on all platforms. + + +2019-06-27: Version 7.7.160 + + Performance and stability improvements on all platforms. + + +2019-06-27: Version 7.7.159 + + Performance and stability improvements on all platforms. + + +2019-06-27: Version 7.7.158 + + Performance and stability improvements on all platforms. + + +2019-06-27: Version 7.7.157 + + Performance and stability improvements on all platforms. + + +2019-06-27: Version 7.7.156 + + Performance and stability improvements on all platforms. + + +2019-06-27: Version 7.7.155 + + Performance and stability improvements on all platforms. + + +2019-06-27: Version 7.7.154 + + Performance and stability improvements on all platforms. + + +2019-06-26: Version 7.7.153 + + Performance and stability improvements on all platforms. + + +2019-06-26: Version 7.7.152 + + Performance and stability improvements on all platforms. + + +2019-06-26: Version 7.7.151 + + Performance and stability improvements on all platforms. + + +2019-06-26: Version 7.7.150 + + Performance and stability improvements on all platforms. + + +2019-06-25: Version 7.7.149 + + Performance and stability improvements on all platforms. + + +2019-06-25: Version 7.7.148 + + Performance and stability improvements on all platforms. + + +2019-06-25: Version 7.7.147 + + Performance and stability improvements on all platforms. + + +2019-06-25: Version 7.7.146 + + Performance and stability improvements on all platforms. + + +2019-06-25: Version 7.7.145 + + Performance and stability improvements on all platforms. + + +2019-06-25: Version 7.7.144 + + Performance and stability improvements on all platforms. + + +2019-06-25: Version 7.7.143 + + Performance and stability improvements on all platforms. + + +2019-06-25: Version 7.7.142 + + Performance and stability improvements on all platforms. + + +2019-06-25: Version 7.7.141 + + Performance and stability improvements on all platforms. + + +2019-06-24: Version 7.7.140 + + Performance and stability improvements on all platforms. + + +2019-06-24: Version 7.7.139 + + Performance and stability improvements on all platforms. + + +2019-06-24: Version 7.7.138 + + Performance and stability improvements on all platforms. + + +2019-06-24: Version 7.7.137 + + Performance and stability improvements on all platforms. + + +2019-06-24: Version 7.7.136 + + Performance and stability improvements on all platforms. + + +2019-06-23: Version 7.7.135 + + Performance and stability improvements on all platforms. + + +2019-06-23: Version 7.7.134 + + Performance and stability improvements on all platforms. + + +2019-06-22: Version 7.7.133 + + Performance and stability improvements on all platforms. + + +2019-06-21: Version 7.7.132 + + Performance and stability improvements on all platforms. + + +2019-06-21: Version 7.7.131 + + Performance and stability improvements on all platforms. + + +2019-06-21: Version 7.7.130 + + Performance and stability improvements on all platforms. + + +2019-06-21: Version 7.7.129 + + Performance and stability improvements on all platforms. + + +2019-06-21: Version 7.7.128 + + Performance and stability improvements on all platforms. + + +2019-06-21: Version 7.7.127 + + Performance and stability improvements on all platforms. + + +2019-06-21: Version 7.7.126 + + Performance and stability improvements on all platforms. + + +2019-06-21: Version 7.7.125 + + Performance and stability improvements on all platforms. + + +2019-06-20: Version 7.7.124 + + Performance and stability improvements on all platforms. + + +2019-06-20: Version 7.7.123 + + Performance and stability improvements on all platforms. + + +2019-06-20: Version 7.7.122 + + Performance and stability improvements on all platforms. + + +2019-06-20: Version 7.7.121 + + Performance and stability improvements on all platforms. + + +2019-06-20: Version 7.7.120 + + Performance and stability improvements on all platforms. + + +2019-06-19: Version 7.7.119 + + Performance and stability improvements on all platforms. + + +2019-06-19: Version 7.7.118 + + Performance and stability improvements on all platforms. + + +2019-06-19: Version 7.7.117 + + Performance and stability improvements on all platforms. + + +2019-06-19: Version 7.7.116 + + Performance and stability improvements on all platforms. + + +2019-06-19: Version 7.7.115 + + Performance and stability improvements on all platforms. + + +2019-06-19: Version 7.7.114 + + Performance and stability improvements on all platforms. + + +2019-06-19: Version 7.7.113 + + Performance and stability improvements on all platforms. + + +2019-06-19: Version 7.7.112 + + Performance and stability improvements on all platforms. + + +2019-06-19: Version 7.7.111 + + Performance and stability improvements on all platforms. + + +2019-06-19: Version 7.7.110 + + Performance and stability improvements on all platforms. + + +2019-06-19: Version 7.7.109 + + Performance and stability improvements on all platforms. + + +2019-06-19: Version 7.7.108 + + Performance and stability improvements on all platforms. + + +2019-06-19: Version 7.7.107 + + Performance and stability improvements on all platforms. + + +2019-06-18: Version 7.7.106 + + Performance and stability improvements on all platforms. + + +2019-06-18: Version 7.7.105 + + Performance and stability improvements on all platforms. + + +2019-06-18: Version 7.7.104 + + Performance and stability improvements on all platforms. + + +2019-06-18: Version 7.7.103 + + Performance and stability improvements on all platforms. + + +2019-06-18: Version 7.7.102 + + Performance and stability improvements on all platforms. + + +2019-06-18: Version 7.7.101 + + Performance and stability improvements on all platforms. + + +2019-06-18: Version 7.7.100 + + Performance and stability improvements on all platforms. + + +2019-06-18: Version 7.7.99 + + Performance and stability improvements on all platforms. + + +2019-06-18: Version 7.7.98 + + Performance and stability improvements on all platforms. + + +2019-06-18: Version 7.7.97 + + Performance and stability improvements on all platforms. + + +2019-06-18: Version 7.7.96 + + Performance and stability improvements on all platforms. + + +2019-06-18: Version 7.7.95 + + Performance and stability improvements on all platforms. + + +2019-06-17: Version 7.7.94 + + Performance and stability improvements on all platforms. + + +2019-06-17: Version 7.7.93 + + Performance and stability improvements on all platforms. + + +2019-06-17: Version 7.7.92 + + Performance and stability improvements on all platforms. + + +2019-06-17: Version 7.7.91 + + Performance and stability improvements on all platforms. + + +2019-06-17: Version 7.7.90 + + Performance and stability improvements on all platforms. + + +2019-06-17: Version 7.7.89 + + Performance and stability improvements on all platforms. + + +2019-06-17: Version 7.7.88 + + Performance and stability improvements on all platforms. + + +2019-06-17: Version 7.7.87 + + Performance and stability improvements on all platforms. + + +2019-06-16: Version 7.7.86 + + Performance and stability improvements on all platforms. + + +2019-06-16: Version 7.7.85 + + Performance and stability improvements on all platforms. + + +2019-06-14: Version 7.7.84 + + Performance and stability improvements on all platforms. + + +2019-06-14: Version 7.7.83 + + Performance and stability improvements on all platforms. + + +2019-06-14: Version 7.7.82 + + Performance and stability improvements on all platforms. + + +2019-06-14: Version 7.7.81 + + Performance and stability improvements on all platforms. + + +2019-06-14: Version 7.7.80 + + Performance and stability improvements on all platforms. + + +2019-06-14: Version 7.7.79 + + Performance and stability improvements on all platforms. + + +2019-06-14: Version 7.7.78 + + Performance and stability improvements on all platforms. + + +2019-06-14: Version 7.7.77 + + Performance and stability improvements on all platforms. + + +2019-06-14: Version 7.7.76 + + Performance and stability improvements on all platforms. + + +2019-06-14: Version 7.7.75 + + Performance and stability improvements on all platforms. + + +2019-06-14: Version 7.7.74 + + Performance and stability improvements on all platforms. + + +2019-06-14: Version 7.7.73 + + Performance and stability improvements on all platforms. + + +2019-06-13: Version 7.7.72 + + Performance and stability improvements on all platforms. + + +2019-06-13: Version 7.7.71 + + Performance and stability improvements on all platforms. + + +2019-06-13: Version 7.7.70 + + Performance and stability improvements on all platforms. + + +2019-06-13: Version 7.7.69 + + Performance and stability improvements on all platforms. + + +2019-06-13: Version 7.7.68 + + Performance and stability improvements on all platforms. + + +2019-06-13: Version 7.7.67 + + Performance and stability improvements on all platforms. + + +2019-06-13: Version 7.7.66 + + Performance and stability improvements on all platforms. + + +2019-06-13: Version 7.7.65 + + Performance and stability improvements on all platforms. + + +2019-06-13: Version 7.7.64 + + Performance and stability improvements on all platforms. + + +2019-06-13: Version 7.7.63 + + Performance and stability improvements on all platforms. + + +2019-06-13: Version 7.7.62 + + Performance and stability improvements on all platforms. + + +2019-06-12: Version 7.7.61 + + Performance and stability improvements on all platforms. + + +2019-06-12: Version 7.7.60 + + Performance and stability improvements on all platforms. + + +2019-06-12: Version 7.7.59 + + Performance and stability improvements on all platforms. + + +2019-06-12: Version 7.7.58 + + Performance and stability improvements on all platforms. + + +2019-06-12: Version 7.7.57 + + Performance and stability improvements on all platforms. + + +2019-06-12: Version 7.7.56 + + Performance and stability improvements on all platforms. + + +2019-06-12: Version 7.7.55 + + Performance and stability improvements on all platforms. + + +2019-06-12: Version 7.7.54 + + Performance and stability improvements on all platforms. + + +2019-06-12: Version 7.7.53 + + Performance and stability improvements on all platforms. + + +2019-06-12: Version 7.7.52 + + Performance and stability improvements on all platforms. + + +2019-06-12: Version 7.7.51 + + Performance and stability improvements on all platforms. + + +2019-06-11: Version 7.7.50 + + Performance and stability improvements on all platforms. + + +2019-06-11: Version 7.7.49 + + Performance and stability improvements on all platforms. + + +2019-06-11: Version 7.7.48 + + Performance and stability improvements on all platforms. + + +2019-06-11: Version 7.7.47 + + Performance and stability improvements on all platforms. + + +2019-06-11: Version 7.7.46 + + Performance and stability improvements on all platforms. + + +2019-06-11: Version 7.7.45 + + Performance and stability improvements on all platforms. + + +2019-06-11: Version 7.7.44 + + Performance and stability improvements on all platforms. + + +2019-06-11: Version 7.7.43 + + Performance and stability improvements on all platforms. + + +2019-06-11: Version 7.7.42 + + Performance and stability improvements on all platforms. + + +2019-06-11: Version 7.7.41 + + Performance and stability improvements on all platforms. + + +2019-06-10: Version 7.7.40 + + Performance and stability improvements on all platforms. + + +2019-06-10: Version 7.7.39 + + Performance and stability improvements on all platforms. + + +2019-06-10: Version 7.7.38 + + Performance and stability improvements on all platforms. + + +2019-06-10: Version 7.7.37 + + Performance and stability improvements on all platforms. + + +2019-06-09: Version 7.7.36 + + Performance and stability improvements on all platforms. + + +2019-06-09: Version 7.7.35 + + Performance and stability improvements on all platforms. + + +2019-06-09: Version 7.7.34 + + Performance and stability improvements on all platforms. + + +2019-06-08: Version 7.7.33 + + Performance and stability improvements on all platforms. + + +2019-06-08: Version 7.7.32 + + Performance and stability improvements on all platforms. + + +2019-06-08: Version 7.7.31 + + Performance and stability improvements on all platforms. + + +2019-06-08: Version 7.7.30 + + Performance and stability improvements on all platforms. + + +2019-06-07: Version 7.7.29 + + Performance and stability improvements on all platforms. + + +2019-06-07: Version 7.7.28 + + Performance and stability improvements on all platforms. + + +2019-06-07: Version 7.7.27 + + Performance and stability improvements on all platforms. + + +2019-06-07: Version 7.7.26 + + Performance and stability improvements on all platforms. + + +2019-06-07: Version 7.7.25 + + Performance and stability improvements on all platforms. + + +2019-06-07: Version 7.7.24 + + Performance and stability improvements on all platforms. + + +2019-06-05: Version 7.7.23 + + Performance and stability improvements on all platforms. + + +2019-06-05: Version 7.7.22 + + Performance and stability improvements on all platforms. + + +2019-06-05: Version 7.7.21 + + Performance and stability improvements on all platforms. + + +2019-06-05: Version 7.7.20 + + Performance and stability improvements on all platforms. + + +2019-06-05: Version 7.7.19 + + Performance and stability improvements on all platforms. + + +2019-06-05: Version 7.7.18 + + Performance and stability improvements on all platforms. + + +2019-06-04: Version 7.7.17 + + Performance and stability improvements on all platforms. + + +2019-06-04: Version 7.7.16 + + Performance and stability improvements on all platforms. + + +2019-06-04: Version 7.7.15 + + Performance and stability improvements on all platforms. + + +2019-06-04: Version 7.7.14 + + Performance and stability improvements on all platforms. + + +2019-06-04: Version 7.7.13 + + Performance and stability improvements on all platforms. + + +2019-06-03: Version 7.7.12 + + Performance and stability improvements on all platforms. + + +2019-06-03: Version 7.7.11 + + Performance and stability improvements on all platforms. + + +2019-06-03: Version 7.7.10 + + Performance and stability improvements on all platforms. + + +2019-06-03: Version 7.7.9 + + Performance and stability improvements on all platforms. + + +2019-06-03: Version 7.7.8 + + Performance and stability improvements on all platforms. + + +2019-06-03: Version 7.7.7 + + Performance and stability improvements on all platforms. + + +2019-06-03: Version 7.7.6 + + Performance and stability improvements on all platforms. + + +2019-06-03: Version 7.7.5 + + Performance and stability improvements on all platforms. + + +2019-06-03: Version 7.7.4 + + Performance and stability improvements on all platforms. + + +2019-06-03: Version 7.7.3 + + Performance and stability improvements on all platforms. + + +2019-06-03: Version 7.7.2 + + Performance and stability improvements on all platforms. + + +2019-05-31: Version 7.7.1 + + Performance and stability improvements on all platforms. + + +2019-05-29: Version 7.6.311 + + Performance and stability improvements on all platforms. + + +2019-05-29: Version 7.6.310 + + Performance and stability improvements on all platforms. + + +2019-05-29: Version 7.6.309 + + Performance and stability improvements on all platforms. + + +2019-05-29: Version 7.6.308 + + Performance and stability improvements on all platforms. + + +2019-05-28: Version 7.6.307 + + Performance and stability improvements on all platforms. + + +2019-05-28: Version 7.6.306 + + Performance and stability improvements on all platforms. + + +2019-05-28: Version 7.6.305 + + Performance and stability improvements on all platforms. + + +2019-05-28: Version 7.6.304 + + Performance and stability improvements on all platforms. + + 2019-05-28: Version 7.6.303 Performance and stability improvements on all platforms. diff --git a/deps/v8/DEPS b/deps/v8/DEPS index bca59b724f292a..986264356f99cc 100644 --- a/deps/v8/DEPS +++ b/deps/v8/DEPS @@ -12,10 +12,10 @@ vars = { 'check_v8_header_includes': False, # GN CIPD package version. - 'gn_version': 'git_revision:81ee1967d3fcbc829bac1c005c3da59739c88df9', + 'gn_version': 'git_revision:972ed755f8e6d31cae9ba15fcd08136ae1a7886f', # luci-go CIPD package version. - 'luci_go': 'git_revision:25958d48e89e980e2a97daeddc977fb5e2e1fb8c', + 'luci_go': 'git_revision:7d11fd9e66407c49cb6c8546a2ae45ea993a240c', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_build-tools_version @@ -24,11 +24,11 @@ vars = { # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_emulator_version # and whatever else without interference from each other. - 'android_sdk_emulator_version': 'ki7EDQRAiZAUYlnTWR1XmI6cJTk65fJ-DNZUU1zrtS8C', + 'android_sdk_emulator_version': 'xhyuoquVvBTcJelgRjMKZeoBVSQRjB7pLVJPt5C9saIC', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_extras_version # and whatever else without interference from each other. - 'android_sdk_extras_version': 'iIwhhDox5E-mHgwUhCz8JACWQCpUjdqt5KTY9VLugKQC', + 'android_sdk_extras_version': 'ppQ4TnqDvBHQ3lXx5KPq97egzF5X2FFyOrVHkGmiTMQC', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_patcher_version # and whatever else without interference from each other. @@ -36,7 +36,7 @@ vars = { # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_platform-tools_version # and whatever else without interference from each other. - 'android_sdk_platform-tools_version': '4Y2Cb2LGzoc-qt-oIUIlhySotJaKeE3ELFedSVe6Uk8C', + 'android_sdk_platform-tools_version': 'MSnxgXN7IurL-MQs1RrTkSFSb8Xd1UtZjLArI8Ty1FgC', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_platforms_version # and whatever else without interference from each other. @@ -57,15 +57,15 @@ vars = { deps = { 'v8/build': - Var('chromium_url') + '/chromium/src/build.git' + '@' + '4cebfa34c79bcfbce6a3f55d1b4f7628bb70ea8a', + Var('chromium_url') + '/chromium/src/build.git' + '@' + '1e5d7d692f816af8136c738b79fe9e8dde8057f6', 'v8/third_party/depot_tools': - Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '26af0d34d281440ad0dc6d2e43fe60f32ef62da0', + Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + 'ee7b9dda90e409fb92031d511151debe5db7db9f', 'v8/third_party/icu': - Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '64e5d7d43a1ff205e3787ab6150bbc1a1837332b', + Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'fd97d4326fac6da84452b2d5fe75ff0949368dab', 'v8/third_party/instrumented_libraries': - Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + 'a959e4f0cb643003f2d75d179cede449979e3e77', + Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + 'b1c3ca20848c117eb935b02c25d441f03e6fbc5e', 'v8/buildtools': - Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '0218c0f9ac9fdba00e5c27b5aca94d3a64c74f34', + Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '67b293ca1316d06f7f00160ce35c92b8849a9dc9', 'v8/buildtools/clang_format/script': Var('chromium_url') + '/chromium/llvm-project/cfe/tools/clang-format.git' + '@' + '96636aa0e9f047f17447f2d45a094d0b59ed7917', 'v8/buildtools/linux64': { @@ -110,11 +110,6 @@ deps = { 'url': Var('chromium_url') + '/android_ndk.git' + '@' + '4e2cea441bfd43f0863d14f57b1e1844260b9884', 'condition': 'checkout_android', }, - # This is deprecated. - 'v8/third_party/android_tools': { - 'url': Var('chromium_url') + '/android_tools.git' + '@' + '347a7c8078a009e98995985b7ab6ec6b35696dea', - 'condition': 'checkout_android', - }, 'v8/third_party/android_sdk/public': { 'packages': [ { @@ -158,7 +153,7 @@ deps = { 'dep_type': 'cipd', }, 'v8/third_party/catapult': { - 'url': Var('chromium_url') + '/catapult.git' + '@' + 'a7b33124672f301cebe0ca94a67ca7d0362e3d6a', + 'url': Var('chromium_url') + '/catapult.git' + '@' + '53913cecb11a3ef993f6496b9110964e2e2aeec3', 'condition': 'checkout_android', }, 'v8/third_party/colorama/src': { @@ -166,23 +161,23 @@ deps = { 'condition': 'checkout_android', }, 'v8/third_party/fuchsia-sdk': { - 'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-sdk.git' + '@' + 'ae68779f84fc36bd88ba4fe0ff78ed9ea3c91d73', + 'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-sdk.git' + '@' + '5fd29151cf35c0813c33cc368a7c78389e3f5caa', 'condition': 'checkout_fuchsia', }, 'v8/third_party/googletest/src': - Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + 'f71fb4f9a912ec945401cc49a287a759b6131026', + Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '6077f444da944d96d311d358d761164261f1cdd0', 'v8/third_party/jinja2': Var('chromium_url') + '/chromium/src/third_party/jinja2.git' + '@' + 'b41863e42637544c2941b574c7877d3e1f663e25', 'v8/third_party/markupsafe': Var('chromium_url') + '/chromium/src/third_party/markupsafe.git' + '@' + '8f45f5cfa0009d2a70589bcda0349b8cb2b72783', 'v8/tools/swarming_client': - Var('chromium_url') + '/infra/luci/client-py.git' + '@' + '779c4f0f8488c64587b75dbb001d18c3c0c4cda9', + Var('chromium_url') + '/infra/luci/client-py.git' + '@' + '96f125709acfd0b48fc1e5dae7d6ea42291726ac', 'v8/test/benchmarks/data': Var('chromium_url') + '/v8/deps/third_party/benchmarks.git' + '@' + '05d7188267b4560491ff9155c5ee13e207ecd65f', 'v8/test/mozilla/data': Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be', 'v8/test/test262/data': - Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'a9abd418ccc7999b00b8c7df60b25620a7d3c541', + Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '26a2268436f28f64c4539d9aab9ebd0f0b7c99c5', 'v8/test/test262/harness': Var('chromium_url') + '/external/github.com/test262-utils/test262-harness-py.git' + '@' + '4555345a943d0c99a9461182705543fb171dda4b', 'v8/third_party/qemu-linux-x64': { @@ -206,7 +201,7 @@ deps = { 'dep_type': 'cipd', }, 'v8/tools/clang': - Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'fe8ba88894e4b3927d3cd9e24274a0f1a688cf71', + Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'f485a21a9cb05494161d97d545c3b29447610ffb', 'v8/tools/luci-go': { 'packages': [ { @@ -236,15 +231,12 @@ deps = { 'dep_type': 'cipd', }, 'v8/test/wasm-js/data': - Var('chromium_url') + '/external/github.com/WebAssembly/spec.git' + '@' + 'bc7d3006bbda0de5031c2a1b9266a62fa7895019', + Var('chromium_url') + '/external/github.com/WebAssembly/spec.git' + '@' + '1a411f713d9850ce7da24719aba5bb80c535f562', 'v8/third_party/perfetto': - Var('android_url') + '/platform/external/perfetto.git' + '@' + '10c98fe0cfae669f71610d97e9da94260a6da173', + Var('android_url') + '/platform/external/perfetto.git' + '@' + '0e8281399fd854de13461f2c1c9f2fb0b8e9c3ae', 'v8/third_party/protobuf': Var('chromium_url') + '/external/github.com/google/protobuf'+ '@' + 'b68a347f56137b4b1a746e8c7438495a6ac1bd91', } -recursedeps = [ - 'v8/third_party/android_tools', -] include_rules = [ # Everybody can use some things. diff --git a/deps/v8/INTL_OWNERS b/deps/v8/INTL_OWNERS new file mode 100644 index 00000000000000..dbe6f3b7b54292 --- /dev/null +++ b/deps/v8/INTL_OWNERS @@ -0,0 +1,3 @@ +cira@chromium.org +mnita@google.com +jshin@chromium.org diff --git a/deps/v8/OWNERS b/deps/v8/OWNERS index c428ba6d0bbd7b..be360966665b38 100644 --- a/deps/v8/OWNERS +++ b/deps/v8/OWNERS @@ -2,20 +2,20 @@ # Disagreement among owners should be escalated to eng reviewers. file://ENG_REVIEW_OWNERS -# TODO(9247) remove this. -file://COMMON_OWNERS - per-file .clang-format=file://INFRA_OWNERS per-file .clang-tidy=file://INFRA_OWNERS per-file .editorconfig=file://INFRA_OWNERS +per-file .flake8=file://INFRA_OWNERS per-file .git-blame-ignore-revs=file://INFRA_OWNERS per-file .gitattributes=file://INFRA_OWNERS per-file .gitignore=file://INFRA_OWNERS per-file .gn=file://INFRA_OWNERS per-file .vpython=file://INFRA_OWNERS per-file .ycm_extra_conf.py=file://INFRA_OWNERS -per-file BUILD.gn=file://INFRA_OWNERS +per-file BUILD.gn=file://COMMON_OWNERS per-file DEPS=file://INFRA_OWNERS +# For Test262 rolls. +per-file DEPS=mathias@chromium.org per-file PRESUBMIT=file://INFRA_OWNERS per-file codereview.settings=file://INFRA_OWNERS diff --git a/deps/v8/test/wasm-api-tests/OWNERS b/deps/v8/benchmarks/OWNERS similarity index 100% rename from deps/v8/test/wasm-api-tests/OWNERS rename to deps/v8/benchmarks/OWNERS diff --git a/deps/v8/gni/proto_library.gni b/deps/v8/gni/proto_library.gni index cf581ed46e4dfe..b16d8f93bd8fcd 100644 --- a/deps/v8/gni/proto_library.gni +++ b/deps/v8/gni/proto_library.gni @@ -187,7 +187,10 @@ template("proto_library") { "visibility", ]) + # Exclude the config.descriptor file which is an output for some reason. + set_sources_assignment_filter([ "*.descriptor" ]) sources = get_target_outputs(":$action_name") + set_sources_assignment_filter(sources_assignment_filter) # configs -= [ "//gn/standalone:extra_warnings" ] if (defined(invoker.extra_configs)) { diff --git a/deps/v8/gni/v8.gni b/deps/v8/gni/v8.gni index 506b8428ee3217..e55c4cf3468460 100644 --- a/deps/v8/gni/v8.gni +++ b/deps/v8/gni/v8.gni @@ -92,7 +92,7 @@ if (v8_enable_backtrace == "") { # subdirectories. v8_path_prefix = get_path_info("../", "abspath") -v8_inspector_js_protocol = v8_path_prefix + "/src/inspector/js_protocol.pdl" +v8_inspector_js_protocol = v8_path_prefix + "/include/js_protocol.pdl" ############################################################################### # Templates diff --git a/deps/v8/include/APIDesign.md b/deps/v8/include/APIDesign.md index 8830fff7d1897a..fe42c8ed5da36e 100644 --- a/deps/v8/include/APIDesign.md +++ b/deps/v8/include/APIDesign.md @@ -67,3 +67,6 @@ which in turn guarantees long-term stability of the API. # The V8 inspector All debugging capabilities of V8 should be exposed via the inspector protocol. +The exception to this are profiling features exposed via v8-profiler.h. +Changes to the inspector protocol need to ensure backwards compatibility and +commitment to maintain. diff --git a/deps/v8/include/OWNERS b/deps/v8/include/OWNERS index 7953cfe133a2c0..7ffbf74ce94d90 100644 --- a/deps/v8/include/OWNERS +++ b/deps/v8/include/OWNERS @@ -1,16 +1,17 @@ -set noparent - adamk@chromium.org danno@chromium.org ulan@chromium.org yangguo@chromium.org -per-file v8-internal.h=file://OWNERS +per-file *DEPS=file://COMMON_OWNERS +per-file v8-internal.h=file://COMMON_OWNERS per-file v8-inspector.h=dgozman@chromium.org per-file v8-inspector.h=pfeldman@chromium.org per-file v8-inspector.h=kozyatinskiy@chromium.org per-file v8-inspector-protocol.h=dgozman@chromium.org per-file v8-inspector-protocol.h=pfeldman@chromium.org per-file v8-inspector-protocol.h=kozyatinskiy@chromium.org +per-file js_protocol.pdl=dgozman@chromium.org +per-file js_protocol.pdl=pfeldman@chromium.org # COMPONENT: Blink>JavaScript>API diff --git a/deps/v8/src/inspector/js_protocol-1.2.json b/deps/v8/include/js_protocol-1.2.json similarity index 100% rename from deps/v8/src/inspector/js_protocol-1.2.json rename to deps/v8/include/js_protocol-1.2.json diff --git a/deps/v8/src/inspector/js_protocol-1.3.json b/deps/v8/include/js_protocol-1.3.json similarity index 100% rename from deps/v8/src/inspector/js_protocol-1.3.json rename to deps/v8/include/js_protocol-1.3.json diff --git a/deps/v8/src/inspector/js_protocol.pdl b/deps/v8/include/js_protocol.pdl similarity index 100% rename from deps/v8/src/inspector/js_protocol.pdl rename to deps/v8/include/js_protocol.pdl diff --git a/deps/v8/include/libplatform/v8-tracing.h b/deps/v8/include/libplatform/v8-tracing.h index ccdca0a8c5cfc6..e7cd8bfcdb66d0 100644 --- a/deps/v8/include/libplatform/v8-tracing.h +++ b/deps/v8/include/libplatform/v8-tracing.h @@ -14,6 +14,10 @@ #include "libplatform/libplatform-export.h" #include "v8-platform.h" // NOLINT(build/include) +namespace perfetto { +class TracingSession; +} + namespace v8 { namespace base { @@ -23,8 +27,8 @@ class Mutex; namespace platform { namespace tracing { -class PerfettoTracingController; class TraceEventListener; +class JSONTraceEventListener; const int kTraceMaxNumArgs = 2; @@ -292,11 +296,10 @@ class V8_PLATFORM_EXPORT TracingController std::unordered_set observers_; std::atomic_bool recording_{false}; #ifdef V8_USE_PERFETTO - std::atomic_bool perfetto_recording_{false}; - std::unique_ptr perfetto_tracing_controller_; std::ostream* output_stream_ = nullptr; - std::unique_ptr json_listener_; + std::unique_ptr json_listener_; TraceEventListener* listener_for_testing_ = nullptr; + std::unique_ptr tracing_session_; #endif // Disallow copy and assign diff --git a/deps/v8/include/v8-inspector.h b/deps/v8/include/v8-inspector.h index b96a6e29ac0cb6..cfa2aaba96d12e 100644 --- a/deps/v8/include/v8-inspector.h +++ b/deps/v8/include/v8-inspector.h @@ -109,6 +109,8 @@ class V8_EXPORT V8StackTrace { virtual ~V8StackTrace() = default; virtual std::unique_ptr buildInspectorObject() const = 0; + virtual std::unique_ptr + buildInspectorObject(int maxAsyncDepth) const = 0; virtual std::unique_ptr toString() const = 0; // Safe to pass between threads, drops async chain. @@ -130,10 +132,6 @@ class V8_EXPORT V8InspectorSession { // Dispatching protocol messages. static bool canDispatchMethod(const StringView& method); virtual void dispatchProtocolMessage(const StringView& message) = 0; - virtual V8_DEPRECATED("Use state() instead", - std::unique_ptr stateJSON()) { - return nullptr; - } virtual std::vector state() = 0; virtual std::vector> supportedDomains() = 0; diff --git a/deps/v8/include/v8-platform.h b/deps/v8/include/v8-platform.h index b707fafc49229a..7e43b0d9db4a9d 100644 --- a/deps/v8/include/v8-platform.h +++ b/deps/v8/include/v8-platform.h @@ -439,6 +439,14 @@ class Platform { */ virtual void DumpWithoutCrashing() {} + /** + * Lets the embedder to add crash keys. + */ + virtual void AddCrashKey(int id, const char* name, uintptr_t value) { + // "noop" is a valid implementation if the embedder doesn't care to log + // additional data for crashes. + } + protected: /** * Default implementation of current wall-clock time in milliseconds diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h index e9c5c339f280d8..0406f65b08e684 100644 --- a/deps/v8/include/v8-version.h +++ b/deps/v8/include/v8-version.h @@ -9,9 +9,9 @@ // NOTE these macros are used by some of the tool scripts and the build // system so their names cannot be changed without changing the scripts. #define V8_MAJOR_VERSION 7 -#define V8_MINOR_VERSION 6 -#define V8_BUILD_NUMBER 303 -#define V8_PATCH_LEVEL 29 +#define V8_MINOR_VERSION 7 +#define V8_BUILD_NUMBER 299 +#define V8_PATCH_LEVEL 11 // Use 1 for candidates and 0 otherwise. // (Boolean macro values are not supported by all preprocessors.) diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h index 3b73ae6413a44d..f3fbdc696294e4 100644 --- a/deps/v8/include/v8.h +++ b/deps/v8/include/v8.h @@ -1359,6 +1359,37 @@ class V8_EXPORT Module { * kEvaluated or kErrored. */ Local GetUnboundModuleScript(); + + /* + * Callback defined in the embedder. This is responsible for setting + * the module's exported values with calls to SetSyntheticModuleExport(). + * The callback must return a Value to indicate success (where no + * exception was thrown) and return an empy MaybeLocal to indicate falure + * (where an exception was thrown). + */ + typedef MaybeLocal (*SyntheticModuleEvaluationSteps)( + Local context, Local module); + + /** + * Creates a new SyntheticModule with the specified export names, where + * evaluation_steps will be executed upon module evaluation. + * export_names must not contain duplicates. + * module_name is used solely for logging/debugging and doesn't affect module + * behavior. + */ + static Local CreateSyntheticModule( + Isolate* isolate, Local module_name, + const std::vector>& export_names, + SyntheticModuleEvaluationSteps evaluation_steps); + + /** + * Set this module's exported value for the name export_name to the specified + * export_value. This method must be called only on Modules created via + * CreateSyntheticModule. export_name must be one of the export_names that + * were passed in that CreateSyntheticModule call. + */ + void SetSyntheticModuleExport(Local export_name, + Local export_value); }; /** @@ -3420,7 +3451,8 @@ class V8_EXPORT Object : public Value { // // Returns true on success. V8_WARN_UNUSED_RESULT Maybe DefineProperty( - Local context, Local key, PropertyDescriptor& descriptor); + Local context, Local key, + PropertyDescriptor& descriptor); // NOLINT(runtime/references) V8_DEPRECATED("Use maybe version", Local Get(Local key)); V8_WARN_UNUSED_RESULT MaybeLocal Get(Local context, @@ -5437,6 +5469,8 @@ class V8_EXPORT RegExp : public Object { kDotAll = 1 << 5, }; + static constexpr int kFlagCount = 6; + /** * Creates a regular expression from the given pattern string and * the flags bit field. May throw a JavaScript exception as @@ -6552,27 +6586,86 @@ class V8_EXPORT ResourceConstraints { void ConfigureDefaults(uint64_t physical_memory, uint64_t virtual_memory_limit); - // Returns the max semi-space size in KB. - size_t max_semi_space_size_in_kb() const { - return max_semi_space_size_in_kb_; + /** + * The address beyond which the VM's stack may not grow. + */ + uint32_t* stack_limit() const { return stack_limit_; } + void set_stack_limit(uint32_t* value) { stack_limit_ = value; } + + /** + * The amount of virtual memory reserved for generated code. This is relevant + * for 64-bit architectures that rely on code range for calls in code. + */ + size_t code_range_size_in_bytes() const { + return code_range_size_ * kMB; + } + void set_code_range_size_in_bytes(size_t limit) { + code_range_size_ = limit / kMB; } - // Sets the max semi-space size in KB. - void set_max_semi_space_size_in_kb(size_t limit_in_kb) { - max_semi_space_size_in_kb_ = limit_in_kb; + /** + * The maximum size of the old generation. + * When the old generation approaches this limit, V8 will perform series of + * garbage collections and invoke the NearHeapLimitCallback. + * If the garbage collections do not help and the callback does not + * increase the limit, then V8 will crash with V8::FatalProcessOutOfMemory. + */ + size_t max_old_generation_size_in_bytes() const { + return max_old_space_size_ * kMB; + } + void set_max_old_generation_size_in_bytes(size_t limit) { + max_old_space_size_ = limit / kMB; } - size_t max_old_space_size() const { return max_old_space_size_; } - void set_max_old_space_size(size_t limit_in_mb) { - max_old_space_size_ = limit_in_mb; + /** + * The maximum size of the young generation, which consists of two semi-spaces + * and a large object space. This affects frequency of Scavenge garbage + * collections and should be typically much smaller that the old generation. + */ + size_t max_young_generation_size_in_bytes() const; + void set_max_young_generation_size_in_bytes(size_t limit); + + size_t initial_old_generation_size_in_bytes() const { + return 0; } - uint32_t* stack_limit() const { return stack_limit_; } - // Sets an address beyond which the VM's stack may not grow. - void set_stack_limit(uint32_t* value) { stack_limit_ = value; } - size_t code_range_size() const { return code_range_size_; } - void set_code_range_size(size_t limit_in_mb) { + void set_initial_old_generation_size_in_bytes(size_t initial_size) { + // Not available on Node 12. + } + + size_t initial_young_generation_size_in_bytes() const { + return 0; + } + void set_initial_young_generation_size_in_bytes(size_t initial_size) { + // Not available on Node 12. + } + + /** + * Deprecated functions. Do not use in new code. + */ + V8_DEPRECATE_SOON("Use code_range_size_in_bytes.", + size_t code_range_size() const) { + return code_range_size_; + } + V8_DEPRECATE_SOON("Use set_code_range_size_in_bytes.", + void set_code_range_size(size_t limit_in_mb)) { code_range_size_ = limit_in_mb; } + V8_DEPRECATE_SOON("Use max_young_generation_size_in_bytes.", + size_t max_semi_space_size_in_kb() const) { + return max_semi_space_size_in_kb_; + } + V8_DEPRECATE_SOON("Use set_max_young_generation_size_in_bytes.", + void set_max_semi_space_size_in_kb(size_t limit_in_kb)) { + max_semi_space_size_in_kb_ = limit_in_kb; + } + V8_DEPRECATE_SOON("Use max_old_generation_size_in_bytes.", + size_t max_old_space_size() const) { + return max_old_space_size_; + } + V8_DEPRECATE_SOON("Use set_max_old_generation_size_in_bytes.", + void set_max_old_space_size(size_t limit_in_mb)) { + max_old_space_size_ = limit_in_mb; + } V8_DEPRECATE_SOON("Zone does not pool memory any more.", size_t max_zone_pool_size() const) { return max_zone_pool_size_; @@ -6583,14 +6676,16 @@ class V8_EXPORT ResourceConstraints { } private: + static constexpr size_t kMB = 1048576u; + // max_semi_space_size_ is in KB - size_t max_semi_space_size_in_kb_; + size_t max_semi_space_size_in_kb_ = 0; // The remaining limits are in MB - size_t max_old_space_size_; - uint32_t* stack_limit_; - size_t code_range_size_; - size_t max_zone_pool_size_; + size_t max_old_space_size_ = 0; + uint32_t* stack_limit_ = nullptr; + size_t code_range_size_ = 0; + size_t max_zone_pool_size_ = 0; }; @@ -6892,6 +6987,8 @@ typedef void (*FailedAccessCheckCallback)(Local target, */ typedef bool (*AllowCodeGenerationFromStringsCallback)(Local context, Local source); +typedef MaybeLocal (*ModifyCodeGenerationFromStringsCallback)( + Local context, Local source); // --- WebAssembly compilation callbacks --- typedef bool (*ExtensionCallback)(const FunctionCallbackInfo&); @@ -7352,12 +7449,13 @@ class V8_EXPORT EmbedderHeapTracer { void GarbageCollectionForTesting(EmbedderStackState stack_state); /* - * Called by the embedder to signal newly allocated memory. Not bound to - * tracing phases. Embedders should trade off when increments are reported as - * V8 may consult global heuristics on whether to trigger garbage collection - * on this change. + * Called by the embedder to signal newly allocated or freed memory. Not bound + * to tracing phases. Embedders should trade off when increments are reported + * as V8 may consult global heuristics on whether to trigger garbage + * collection on this change. */ void IncreaseAllocatedSize(size_t bytes); + void DecreaseAllocatedSize(size_t bytes); /* * Returns the v8::Isolate this tracer is attached too and |nullptr| if it @@ -7685,6 +7783,8 @@ class V8_EXPORT Isolate { kRegExpMatchIsFalseishOnJSRegExp = 73, kDateGetTimezoneOffset = 74, kStringNormalize = 75, + kCallSiteAPIGetFunctionSloppyCall = 76, + kCallSiteAPIGetThisSloppyCall = 77, // If you add new values here, you'll also need to update Chromium's: // web_feature.mojom, UseCounterCallback.cpp, and enums.xml. V8 changes to @@ -8489,6 +8589,8 @@ class V8_EXPORT Isolate { */ void SetAllowCodeGenerationFromStringsCallback( AllowCodeGenerationFromStringsCallback callback); + void SetModifyCodeGenerationFromStringsCallback( + ModifyCodeGenerationFromStringsCallback callback); /** * Set the callback to invoke to check if wasm code generation should @@ -9524,6 +9626,15 @@ class V8_EXPORT Context { template V8_INLINE MaybeLocal GetDataFromSnapshotOnce(size_t index); + /** + * If callback is set, abort any attempt to execute JavaScript in this + * context, call the specified callback, and throw an exception. + * To unset abort, pass nullptr as callback. + */ + typedef void (*AbortScriptExecutionCallback)(Isolate* isolate, + Local context); + void SetAbortScriptExecution(AbortScriptExecutionCallback callback); + /** * Stack-allocated class which sets the execution context for all * operations executed within a local scope. diff --git a/deps/v8/include/v8config.h b/deps/v8/include/v8config.h index 5ec0480cf5c0c2..7bd2938225bc74 100644 --- a/deps/v8/include/v8config.h +++ b/deps/v8/include/v8config.h @@ -353,6 +353,12 @@ #define V8_WARN_UNUSED_RESULT /* NOT SUPPORTED */ #endif +#if defined(BUILDING_V8_SHARED) && defined(USING_V8_SHARED) +#error Inconsistent build configuration: To build the V8 shared library \ +set BUILDING_V8_SHARED, to include its headers for linking against the \ +V8 shared library set USING_V8_SHARED. +#endif + #ifdef V8_OS_WIN // Setup for Windows DLL export/import. When building the V8 DLL the diff --git a/deps/v8/infra/mb/gn_isolate_map.pyl b/deps/v8/infra/mb/gn_isolate_map.pyl index 05b147d503f000..110b36c500bbd7 100644 --- a/deps/v8/infra/mb/gn_isolate_map.pyl +++ b/deps/v8/infra/mb/gn_isolate_map.pyl @@ -31,6 +31,10 @@ "label": "//test:v8_d8_default", "type": "script", }, + "generate-bytecode-expectations": { + "label": "//test/cctest:generate-bytecode-expectations", + "type": "script", + }, "mjsunit": { "label": "//test/mjsunit:v8_mjsunit", "type": "script", diff --git a/deps/v8/infra/mb/mb_config.pyl b/deps/v8/infra/mb/mb_config.pyl index 354415ef438ccf..d5d192fb20ca94 100644 --- a/deps/v8/infra/mb/mb_config.pyl +++ b/deps/v8/infra/mb/mb_config.pyl @@ -95,6 +95,8 @@ 'V8 iOS - sim': 'release_x64_ios_simulator', 'V8 Linux64 - debug - perfetto - builder': 'debug_x64_perfetto', 'V8 Linux64 - pointer compression': 'release_x64_pointer_compression', + 'V8 Linux64 - pointer compression without dchecks': + 'release_x64_pointer_compression_without_dchecks', 'V8 Linux64 - arm64 - sim - pointer compression - builder': 'release_simulate_arm64_pointer_compression', 'V8 Linux - noembed': 'release_x86_noembed', @@ -201,6 +203,7 @@ 'v8_linux_verify_csa_rel_ng': 'release_x86_verify_csa', 'v8_linux_nodcheck_rel_ng': 'release_x86_minimal_symbols', 'v8_linux_dbg_ng': 'debug_x86_trybot', + 'v8_linux_noi18n_compile_dbg': 'debug_x86_no_i18n', 'v8_linux_noi18n_rel_ng': 'release_x86_no_i18n_trybot', 'v8_linux_gc_stress_dbg': 'debug_x86_trybot', 'v8_linux_nosnap_rel': 'release_x86_no_snap_trybot', @@ -458,6 +461,8 @@ 'release_x64_pointer_compression': [ 'release_bot', 'x64', 'dcheck_always_on', 'v8_enable_slow_dchecks', 'v8_enable_pointer_compression'], + 'release_x64_pointer_compression_without_dchecks': [ + 'release_bot', 'x64', 'v8_enable_pointer_compression'], 'release_x64_trybot': [ 'release_trybot', 'x64'], 'release_x64_test_features_trybot': [ @@ -491,7 +496,7 @@ 'debug_x64_fuchsia': [ 'debug_bot', 'x64', 'fuchsia'], 'debug_x64_gcc': [ - 'debug_bot', 'x64', 'gcc'], + 'debug_bot', 'x64', 'gcc', 'v8_check_header_includes'], 'debug_x64_header_includes': [ 'debug_bot', 'x64', 'v8_check_header_includes'], 'debug_x64_jumbo': [ @@ -535,9 +540,10 @@ 'release_x86_noembed_trybot': [ 'release_trybot', 'x86', 'v8_no_enable_embedded_builtins'], 'release_x86_gcc': [ - 'release_bot', 'x86', 'gcc'], + 'release_bot', 'x86', 'gcc', 'v8_check_header_includes'], 'release_x86_gcc_minimal_symbols': [ - 'release_bot', 'x86', 'gcc', 'minimal_symbols'], + 'release_bot', 'x86', 'gcc', 'minimal_symbols', + 'v8_check_header_includes'], 'release_x86_gcmole': [ 'release_bot', 'x86', 'gcmole'], 'release_x86_gcmole_trybot': [ diff --git a/deps/v8/infra/testing/PRESUBMIT.py b/deps/v8/infra/testing/PRESUBMIT.py index b8e059724e0805..f1a64707b9cb1c 100644 --- a/deps/v8/infra/testing/PRESUBMIT.py +++ b/deps/v8/infra/testing/PRESUBMIT.py @@ -33,7 +33,9 @@ ] # This is not an exhaustive list. It only reflects what we currently use. If -# there's need to specify a different property, just add it here. +# there's need to specify a different property, add it here and update the +# properties passed to swarming in: +# //build/scripts/slave/recipe_modules/v8/testing.py. SUPPORTED_SWARMING_TASK_ATTRS = [ 'expiration', 'hard_timeout', diff --git a/deps/v8/infra/testing/builders.pyl b/deps/v8/infra/testing/builders.pyl index 0d39ea31f75512..13a73f3e94cd90 100644 --- a/deps/v8/infra/testing/builders.pyl +++ b/deps/v8/infra/testing/builders.pyl @@ -51,7 +51,7 @@ 'v8_linux_dbg_ng_triggered': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'benchmarks'}, @@ -67,7 +67,7 @@ }, 'v8_linux_gc_stress_dbg': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'mjsunit', 'variant': 'slow_path', 'test_args': ['--gc-stress'], 'shards': 2}, @@ -85,7 +85,7 @@ 'v8_linux_nodcheck_rel_ng_triggered': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'benchmarks'}, @@ -100,7 +100,7 @@ }, 'v8_linux_noembed_rel_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'v8testing', 'shards': 2}, @@ -108,7 +108,7 @@ }, 'v8_linux_noi18n_rel_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'mozilla', 'variant': 'default'}, @@ -118,7 +118,7 @@ }, 'v8_linux_nosnap_rel': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'v8testing', 'variant': 'default', 'shards': 6}, @@ -135,7 +135,7 @@ 'v8_linux_rel_ng_triggered': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'benchmarks'}, @@ -155,7 +155,7 @@ 'v8_linux_optional_rel_ng_triggered': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ # Code serializer. @@ -210,7 +210,7 @@ }, 'v8_linux_verify_csa_rel_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'v8testing', 'shards': 2}, @@ -220,7 +220,7 @@ # Linux32 with arm simulators 'v8_linux_arm_dbg': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'mjsunit_sp_frame_access'}, @@ -233,7 +233,7 @@ }, 'v8_linux_arm_lite_rel_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'v8testing', 'variant': 'default', 'shards': 4}, @@ -241,7 +241,7 @@ }, 'v8_linux_arm_rel_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'mjsunit_sp_frame_access', 'shards': 2}, @@ -256,7 +256,7 @@ # Linux64 'v8_linux64_asan_rel_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'test262_variants', 'shards': 7}, @@ -267,7 +267,7 @@ }, 'v8_linux64_cfi_rel_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'benchmarks'}, @@ -280,7 +280,7 @@ 'v8_linux64_dbg_ng_triggered': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'benchmarks'}, @@ -297,7 +297,7 @@ }, 'v8_linux64_gc_stress_custom_snapshot_dbg_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ { @@ -309,7 +309,7 @@ }, 'v8_linux64_fyi_rel_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ # Stress sampling. @@ -322,7 +322,7 @@ }, 'v8_linux64_msan_rel': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'test262', 'shards': 2}, @@ -332,23 +332,28 @@ 'v8_linux64_nodcheck_rel_ng_triggered': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'benchmarks'}, + {'name': 'benchmarks', 'variant': 'assert_types'}, {'name': 'benchmarks', 'variant': 'extra'}, + {'name': 'check-bytecode-baseline'}, {'name': 'mozilla'}, + {'name': 'mozilla', 'variant': 'assert_types'}, {'name': 'mozilla', 'variant': 'extra'}, {'name': 'perf_integration'}, {'name': 'test262_variants', 'shards': 2}, + {'name': 'test262_variants', 'variant': 'assert_types', 'shards': 2}, {'name': 'test262_variants', 'variant': 'extra', 'shards': 2}, {'name': 'v8testing', 'shards': 2}, + {'name': 'v8testing', 'variant': 'assert_types'}, {'name': 'v8testing', 'variant': 'extra'}, ], }, 'v8_linux64_perfetto_dbg_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'v8testing', 'shards': 3}, @@ -356,7 +361,7 @@ }, 'v8_linux64_pointer_compression_rel_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'v8testing', 'shards': 3}, @@ -365,7 +370,7 @@ 'v8_linux64_rel_ng_triggered': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ # TODO(machenbach): Add benchmarks. @@ -386,7 +391,7 @@ 'v8_linux64_rel_xg': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'v8initializers'}, @@ -395,7 +400,7 @@ }, 'v8_linux64_sanitizer_coverage_rel': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'v8testing', 'shards': 3}, @@ -403,7 +408,7 @@ }, 'v8_linux64_tsan_rel': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'benchmarks'}, @@ -416,7 +421,7 @@ }, 'v8_linux64_tsan_isolates_rel_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'v8testing', 'test_args': ['--isolates'], 'shards': 7}, @@ -424,7 +429,7 @@ }, 'v8_linux64_ubsan_rel_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'v8testing', 'shards': 2}, @@ -432,7 +437,7 @@ }, 'v8_linux64_verify_csa_rel_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'v8testing', 'shards': 2}, @@ -442,7 +447,7 @@ # Linux64 with arm64 simulators 'v8_linux_arm64_dbg': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'mjsunit_sp_frame_access'}, @@ -455,7 +460,7 @@ }, 'v8_linux_arm64_gc_stress_dbg': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'd8testing', 'test_args': ['--gc-stress'], 'shards': 10}, @@ -463,7 +468,7 @@ }, 'v8_linux_arm64_rel_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'mjsunit_sp_frame_access', 'shards': 2}, @@ -476,7 +481,7 @@ }, 'v8_linux64_arm64_pointer_compression_rel_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'swarming_task_attrs': { 'expiration': 14400, @@ -632,7 +637,7 @@ # Main. 'V8 Fuzzer': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'swarming_task_attrs': { 'expiration': 14400, @@ -646,7 +651,7 @@ 'V8 Linux': { 'swarming_dimensions': { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'benchmarks'}, @@ -688,7 +693,7 @@ }, 'V8 Linux - arm64 - sim - MSAN': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'test262', 'shards': 3}, @@ -698,7 +703,7 @@ 'V8 Linux - debug': { 'swarming_dimensions': { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'benchmarks'}, @@ -759,7 +764,7 @@ }, 'V8 Linux - noembed': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'v8testing'}, @@ -767,7 +772,7 @@ }, 'V8 Linux - noembed - debug': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'v8testing', 'shards': 3}, @@ -775,7 +780,7 @@ }, 'V8 Linux - full debug': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'swarming_task_attrs': { 'expiration': 14400, @@ -788,7 +793,7 @@ }, 'V8 Linux - gc stress': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ { @@ -806,7 +811,7 @@ }, 'V8 Linux - noi18n - debug': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'mozilla', 'variant': 'default'}, @@ -816,7 +821,7 @@ }, 'V8 Linux - nosnap': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'swarming_task_attrs': { 'expiration': 14400, @@ -831,7 +836,7 @@ }, 'V8 Linux - nosnap - debug': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'swarming_task_attrs': { 'expiration': 14400, @@ -844,7 +849,7 @@ }, 'V8 Linux - predictable': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'benchmarks'}, @@ -854,7 +859,7 @@ }, 'V8 Linux - shared': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'mozilla'}, @@ -864,7 +869,7 @@ }, 'V8 Linux - verify csa': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'v8testing'}, @@ -881,20 +886,25 @@ 'V8 Linux64': { 'swarming_dimensions': { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'benchmarks'}, + {'name': 'benchmarks', 'variant': 'assert_types'}, {'name': 'benchmarks', 'variant': 'extra'}, + {'name': 'check-bytecode-baseline'}, {'name': 'mjsunit_sp_frame_access'}, {'name': 'mozilla'}, + {'name': 'mozilla', 'variant': 'assert_types'}, {'name': 'mozilla', 'variant': 'extra'}, {'name': 'optimize_for_size'}, {'name': 'perf_integration'}, {'name': 'test262_variants', 'shards': 2}, + {'name': 'test262_variants', 'variant': 'assert_types'}, {'name': 'test262_variants', 'variant': 'extra'}, {'name': 'v8initializers'}, {'name': 'v8testing'}, + {'name': 'v8testing', 'variant': 'assert_types'}, {'name': 'v8testing', 'variant': 'extra'}, {'name': 'v8testing', 'variant': 'minor_mc', 'shards': 1}, # Noavx. @@ -917,7 +927,7 @@ }, 'V8 Linux64 - cfi': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'benchmarks'}, @@ -929,7 +939,7 @@ }, 'V8 Linux64 - custom snapshot - debug': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'mjsunit', 'test_args': ['--no-harness']}, @@ -938,7 +948,7 @@ 'V8 Linux64 - debug': { 'swarming_dimensions': { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'benchmarks'}, @@ -974,7 +984,7 @@ }, 'V8 Linux64 - debug - fyi': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ # Infra staging. @@ -986,7 +996,7 @@ }, 'V8 Linux64 - debug - perfetto': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'swarming_task_attrs': { 'expiration': 14400, @@ -999,7 +1009,7 @@ }, 'V8 Linux64 - fyi': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ # Infra staging. @@ -1011,7 +1021,7 @@ }, 'V8 Linux64 - gcov coverage': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'v8testing'}, @@ -1019,7 +1029,7 @@ }, 'V8 Linux64 - internal snapshot': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'v8testing'}, @@ -1027,7 +1037,7 @@ }, 'V8 Linux64 - pointer compression': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'v8testing', 'shards': 2}, @@ -1035,7 +1045,7 @@ }, 'V8 Linux64 - shared': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'mozilla'}, @@ -1045,7 +1055,7 @@ }, 'V8 Linux64 - verify csa': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'v8testing'}, @@ -1053,7 +1063,7 @@ }, 'V8 Linux64 ASAN': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'test262_variants', 'shards': 5}, @@ -1064,7 +1074,7 @@ }, 'V8 Linux64 GC Stress - custom snapshot': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ { @@ -1076,7 +1086,7 @@ }, 'V8 Linux64 TSAN': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'benchmarks'}, @@ -1089,7 +1099,7 @@ }, 'V8 Linux64 TSAN - concurrent marking': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'swarming_task_attrs': { 'expiration': 14400, @@ -1119,7 +1129,7 @@ }, 'V8 Linux64 TSAN - isolates': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'v8testing', 'test_args': ['--isolates'], 'shards': 7}, @@ -1127,7 +1137,7 @@ }, 'V8 Linux64 UBSan': { 'swarming_dimensions' : { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'mozilla'}, @@ -1156,8 +1166,8 @@ 'tests': [ {'name': 'mozilla'}, {'name': 'test262', 'shards': 2}, - {'name': 'v8testing', 'shards': 3}, - {'name': 'v8testing', 'variant': 'extra'}, + {'name': 'v8testing', 'shards': 4}, + {'name': 'v8testing', 'variant': 'extra', 'shards': 2}, ], }, 'V8 Mac64 ASAN': { @@ -1266,74 +1276,44 @@ }, 'V8 Arm': { 'swarming_dimensions': { - 'cores': '2', - 'cpu': 'armv7l', - 'os': 'Ubuntu-14.04', + 'cores': '8', + 'cpu': 'armv7l-32-ODROID-XU4', + 'os': 'Ubuntu-16.04', }, 'swarming_task_attrs': { 'expiration': 21600, 'hard_timeout': 5400, }, 'tests': [ - {'name': 'benchmarks'}, - {'name': 'optimize_for_size'}, - {'name': 'v8testing', 'shards': 2}, # Odroid. { 'name': 'benchmarks', 'suffix': 'ODROID', - 'swarming_dimensions': { - 'cores': '8', - 'os': 'Ubuntu-16.04', - 'cpu': 'armv7l-32-ODROID-XU4', - }, # Less parallelism to prevent OOMs in benchmarks. 'test_args': ['-j2'], }, { 'name': 'optimize_for_size', 'suffix': 'ODROID', - 'swarming_dimensions': { - 'cores': '8', - 'os': 'Ubuntu-16.04', - 'cpu': 'armv7l-32-ODROID-XU4', - } }, { 'name': 'v8testing', 'suffix': 'ODROID', 'shards': 2, - 'swarming_dimensions': { - 'cores': '8', - 'os': 'Ubuntu-16.04', - 'cpu': 'armv7l-32-ODROID-XU4', - } }, ], }, 'V8 Arm - debug': { 'swarming_dimensions': { - 'cores': '2', - 'cpu': 'armv7l', - 'os': 'Ubuntu-14.04', + 'cores': '8', + 'cpu': 'armv7l-32-ODROID-XU4', + 'os': 'Ubuntu-16.04', }, 'swarming_task_attrs': { 'expiration': 21600, 'hard_timeout': 3600, }, 'tests': [ - { - 'name': 'optimize_for_size', - 'variant': 'default', - 'test_args': ['--extra-flags=--verify-heap-skip-remembered-set'], - 'shards': 2 - }, - { - 'name': 'v8testing', - 'variant': 'default', - 'test_args': ['--extra-flags=--verify-heap-skip-remembered-set'], - 'shards': 3 - }, # Odroid. { 'name': 'optimize_for_size', @@ -1341,11 +1321,6 @@ 'variant': 'default', 'test_args': ['--extra-flags=--verify-heap-skip-remembered-set'], 'shards': 2, - 'swarming_dimensions': { - 'cores': '8', - 'os': 'Ubuntu-16.04', - 'cpu': 'armv7l-32-ODROID-XU4', - } }, { 'name': 'v8testing', @@ -1353,48 +1328,32 @@ 'variant': 'default', 'test_args': ['--extra-flags=--verify-heap-skip-remembered-set'], 'shards': 3, - 'swarming_dimensions': { - 'cores': '8', - 'os': 'Ubuntu-16.04', - 'cpu': 'armv7l-32-ODROID-XU4', - } }, ], }, 'V8 Arm GC Stress': { 'swarming_dimensions': { - 'cores': '2', - 'cpu': 'armv7l', - 'os': 'Ubuntu-14.04', + 'cores': '8', + 'cpu': 'armv7l-32-ODROID-XU4', + 'os': 'Ubuntu-16.04', }, 'swarming_task_attrs': { 'expiration': 21600, 'hard_timeout': 7200, }, 'tests': [ - { - 'name': 'd8testing', - 'variant': 'default', - 'test_args': ['--gc-stress', '--extra-flags=--verify-heap-skip-remembered-set'], - 'shards': 3 - }, { 'name': 'd8testing', 'suffix': 'ODROID', 'variant': 'default', 'test_args': ['--gc-stress', '--extra-flags=--verify-heap-skip-remembered-set'], 'shards': 3, - 'swarming_dimensions': { - 'cores': '8', - 'os': 'Ubuntu-16.04', - 'cpu': 'armv7l-32-ODROID-XU4', - } }, ], }, 'V8 Linux - arm - sim': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'mjsunit_sp_frame_access'}, @@ -1433,7 +1392,7 @@ }, 'V8 Linux - arm - sim - debug': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'mjsunit_sp_frame_access'}, @@ -1446,12 +1405,14 @@ { 'name': 'mozilla', 'suffix': 'armv8-a', - 'test_args': ['--extra-flags', '--enable-armv8'] + 'test_args': ['--extra-flags', '--enable-armv8'], + 'shards': 2, }, { 'name': 'test262', 'suffix': 'armv8-a', - 'test_args': ['--extra-flags', '--enable-armv8'] + 'test_args': ['--extra-flags', '--enable-armv8'], + 'shards': 2, }, { 'name': 'v8testing', @@ -1483,7 +1444,7 @@ }, 'V8 Linux - arm - sim - lite': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'v8testing', 'variant': 'default', 'shards': 2}, @@ -1491,7 +1452,7 @@ }, 'V8 Linux - arm - sim - lite - debug': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'v8testing', 'variant': 'default', 'shards': 4}, @@ -1499,7 +1460,7 @@ }, 'V8 Linux - arm64 - sim': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'mjsunit_sp_frame_access'}, @@ -1512,7 +1473,7 @@ }, 'V8 Linux - arm64 - sim - debug': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, # TODO(machenbach): Remove longer timeout when this builder scales better. 'swarming_task_attrs': { @@ -1529,7 +1490,7 @@ }, 'V8 Linux - arm64 - sim - gc stress': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'swarming_task_attrs': { 'expiration': 14400, @@ -1546,7 +1507,7 @@ }, 'V8 Linux - mips64el - sim': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'swarming_task_attrs': { 'expiration': 14400, @@ -1560,7 +1521,7 @@ }, 'V8 Linux - mipsel - sim': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'swarming_task_attrs': { 'expiration': 14400, @@ -1574,7 +1535,7 @@ }, 'V8 Linux - ppc64 - sim': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'swarming_task_attrs': { 'expiration': 14400, @@ -1587,7 +1548,7 @@ }, 'V8 Linux - s390x - sim': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'swarming_task_attrs': { 'expiration': 14400, @@ -1600,7 +1561,7 @@ }, 'V8 Linux64 - arm64 - sim - pointer compression': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'swarming_task_attrs': { 'expiration': 14400, @@ -1628,7 +1589,7 @@ # Clusterfuzz. 'V8 NumFuzz': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'swarming_task_attrs': { 'expiration': 13800, @@ -1645,7 +1606,7 @@ }, 'V8 NumFuzz - TSAN': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'swarming_task_attrs': { 'expiration': 13800, @@ -1693,7 +1654,7 @@ }, 'V8 NumFuzz - debug': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'swarming_task_attrs': { 'expiration': 13800, @@ -1750,7 +1711,7 @@ # Branches. 'V8 Linux - beta branch': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'mozilla'}, @@ -1760,7 +1721,7 @@ }, 'V8 Linux - beta branch - debug': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'mozilla'}, @@ -1770,7 +1731,7 @@ }, 'V8 Linux - stable branch': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'mozilla'}, @@ -1780,7 +1741,7 @@ }, 'V8 Linux - stable branch - debug': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'mozilla'}, @@ -1790,7 +1751,7 @@ }, 'V8 Linux64 - beta branch': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'mozilla'}, @@ -1800,7 +1761,7 @@ }, 'V8 Linux64 - beta branch - debug': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'mozilla'}, @@ -1810,7 +1771,7 @@ }, 'V8 Linux64 - stable branch': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'mozilla'}, @@ -1820,7 +1781,7 @@ }, 'V8 Linux64 - stable branch - debug': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'mozilla'}, @@ -1830,7 +1791,7 @@ }, 'V8 arm - sim - beta branch': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'mozilla'}, @@ -1840,7 +1801,7 @@ }, 'V8 arm - sim - beta branch - debug': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'mozilla'}, @@ -1850,7 +1811,7 @@ }, 'V8 arm - sim - stable branch': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'mozilla'}, @@ -1860,7 +1821,7 @@ }, 'V8 arm - sim - stable branch - debug': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'mozilla'}, @@ -1870,7 +1831,7 @@ }, 'V8 mips64el - sim - beta branch': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'unittests'}, @@ -1878,7 +1839,7 @@ }, 'V8 mips64el - sim - stable branch': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'unittests'}, @@ -1886,7 +1847,7 @@ }, 'V8 mipsel - sim - beta branch': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'v8testing', 'shards': 4}, @@ -1894,7 +1855,7 @@ }, 'V8 mipsel - sim - stable branch': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'v8testing', 'shards': 4}, @@ -1902,7 +1863,7 @@ }, 'V8 ppc64 - sim - beta branch': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'unittests'}, @@ -1910,7 +1871,7 @@ }, 'V8 ppc64 - sim - stable branch': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'unittests'}, @@ -1918,7 +1879,7 @@ }, 'V8 s390x - sim - beta branch': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'unittests'}, @@ -1926,7 +1887,7 @@ }, 'V8 s390x - sim - stable branch': { 'swarming_dimensions': { - 'os': 'Ubuntu-14.04', + 'os': 'Ubuntu-16.04', }, 'tests': [ {'name': 'unittests'}, diff --git a/deps/v8/samples/process.cc b/deps/v8/samples/process.cc index 9af1c0b23b7894..e4f6fd9cee733d 100644 --- a/deps/v8/samples/process.cc +++ b/deps/v8/samples/process.cc @@ -676,19 +676,17 @@ StringHttpRequest kSampleRequests[kSampleSize] = { StringHttpRequest("/", "localhost", "yahoo.com", "firefox") }; - -bool ProcessEntries(v8::Platform* platform, HttpRequestProcessor* processor, - int count, StringHttpRequest* reqs) { +bool ProcessEntries(v8::Isolate* isolate, v8::Platform* platform, + HttpRequestProcessor* processor, int count, + StringHttpRequest* reqs) { for (int i = 0; i < count; i++) { bool result = processor->Process(&reqs[i]); - while (v8::platform::PumpMessageLoop(platform, Isolate::GetCurrent())) - continue; + while (v8::platform::PumpMessageLoop(platform, isolate)) continue; if (!result) return false; } return true; } - void PrintMap(map* m) { for (map::iterator i = m->begin(); i != m->end(); i++) { pair entry = *i; @@ -727,7 +725,9 @@ int main(int argc, char* argv[]) { fprintf(stderr, "Error initializing processor.\n"); return 1; } - if (!ProcessEntries(platform.get(), &processor, kSampleSize, kSampleRequests)) + if (!ProcessEntries(isolate, platform.get(), &processor, kSampleSize, + kSampleRequests)) { return 1; + } PrintMap(&output); } diff --git a/deps/v8/src/DEPS b/deps/v8/src/DEPS index d24e647b24157d..1ae6a569e70e0f 100644 --- a/deps/v8/src/DEPS +++ b/deps/v8/src/DEPS @@ -16,6 +16,7 @@ include_rules = [ "+src/heap/heap-inl.h", "+src/heap/heap-write-barrier-inl.h", "+src/heap/heap-write-barrier.h", + "+src/heap/read-only-heap-inl.h", "+src/heap/read-only-heap.h", "-src/inspector", "-src/interpreter", @@ -29,6 +30,10 @@ include_rules = [ "+src/interpreter/interpreter.h", "+src/interpreter/interpreter-generator.h", "+src/interpreter/setup-interpreter.h", + "-src/regexp", + "+src/regexp/regexp.h", + "+src/regexp/regexp-stack.h", + "+src/regexp/regexp-utils.h", "-src/trap-handler", "+src/trap-handler/handler-inside-posix.h", "+src/trap-handler/handler-inside-win.h", @@ -44,5 +49,6 @@ specific_include_rules = { "d8\.cc": [ "+include/libplatform/libplatform.h", "+include/libplatform/v8-tracing.h", + "+perfetto/tracing.h" ], } diff --git a/deps/v8/src/OWNERS b/deps/v8/src/OWNERS index abad5274c880f8..c6881f232117b2 100644 --- a/deps/v8/src/OWNERS +++ b/deps/v8/src/OWNERS @@ -1,9 +1,5 @@ -per-file intl.*=cira@chromium.org -per-file intl.*=mnita@google.com -per-file intl.*=jshin@chromium.org -per-file typing-asm.*=aseemgarg@chromium.org -per-file objects-body-descriptors*=hpayer@chromium.org -per-file objects-body-descriptors*=mlippautz@chromium.org -per-file objects-body-descriptors*=ulan@chromium.org +per-file *DEPS=file://COMMON_OWNERS +per-file intl-*=file://INTL_OWNERS +per-file *-intl*=file://INTL_OWNERS # COMPONENT: Blink>JavaScript diff --git a/deps/v8/src/api/OWNERS b/deps/v8/src/api/OWNERS new file mode 100644 index 00000000000000..ce6fb20af84d38 --- /dev/null +++ b/deps/v8/src/api/OWNERS @@ -0,0 +1,11 @@ +file://include/OWNERS +clemensh@chromium.org +ishell@chromium.org +jkummerow@chromium.org +leszeks@chromium.org +mlippautz@chromium.org +mslekova@chromium.org +mstarzinger@chromium.org +verwaest@chromium.org + +# COMPONENT: Blink>JavaScript>API diff --git a/deps/v8/src/api/api-natives.cc b/deps/v8/src/api/api-natives.cc index c22b7c47f9cf4b..cd380d3cda1aa2 100644 --- a/deps/v8/src/api/api-natives.cc +++ b/deps/v8/src/api/api-natives.cc @@ -5,8 +5,8 @@ #include "src/api/api-natives.h" #include "src/api/api-inl.h" +#include "src/common/message-template.h" #include "src/execution/isolate-inl.h" -#include "src/execution/message-template.h" #include "src/objects/api-callbacks.h" #include "src/objects/hash-table-inl.h" #include "src/objects/lookup.h" @@ -39,7 +39,6 @@ class InvokeScope { MaybeHandle InstantiateObject(Isolate* isolate, Handle data, Handle new_target, - bool is_hidden_prototype, bool is_prototype); MaybeHandle InstantiateFunction( @@ -54,7 +53,7 @@ MaybeHandle Instantiate( isolate, Handle::cast(data), maybe_name); } else if (data->IsObjectTemplateInfo()) { return InstantiateObject(isolate, Handle::cast(data), - Handle(), false, false); + Handle(), false); } else { return data; } @@ -129,7 +128,7 @@ void DisableAccessChecks(Isolate* isolate, Handle object) { // Copy map so it won't interfere constructor's initial map. Handle new_map = Map::Copy(isolate, old_map, "DisableAccessChecks"); new_map->set_is_access_check_needed(false); - JSObject::MigrateToMap(Handle::cast(object), new_map); + JSObject::MigrateToMap(isolate, Handle::cast(object), new_map); } void EnableAccessChecks(Isolate* isolate, Handle object) { @@ -138,7 +137,7 @@ void EnableAccessChecks(Isolate* isolate, Handle object) { Handle new_map = Map::Copy(isolate, old_map, "EnableAccessChecks"); new_map->set_is_access_check_needed(true); new_map->set_may_have_interesting_symbols(true); - JSObject::MigrateToMap(object, new_map); + JSObject::MigrateToMap(isolate, object, new_map); } class AccessCheckDisableScope { @@ -178,8 +177,7 @@ Object GetIntrinsic(Isolate* isolate, v8::Intrinsic intrinsic) { template MaybeHandle ConfigureInstance(Isolate* isolate, Handle obj, - Handle data, - bool is_hidden_prototype) { + Handle data) { HandleScope scope(isolate); // Disable access checks while instantiating the object. AccessCheckDisableScope access_check_scope(isolate, obj); @@ -246,11 +244,10 @@ MaybeHandle ConfigureInstance(Isolate* isolate, Handle obj, } else { auto getter = handle(properties->get(i++), isolate); auto setter = handle(properties->get(i++), isolate); - RETURN_ON_EXCEPTION( - isolate, - DefineAccessorProperty(isolate, obj, name, getter, setter, - attributes, is_hidden_prototype), - JSObject); + RETURN_ON_EXCEPTION(isolate, + DefineAccessorProperty(isolate, obj, name, getter, + setter, attributes, false), + JSObject); } } else { // Intrinsic data property --- Get appropriate value from the current @@ -364,7 +361,6 @@ bool IsSimpleInstantiation(Isolate* isolate, ObjectTemplateInfo info, MaybeHandle InstantiateObject(Isolate* isolate, Handle info, Handle new_target, - bool is_hidden_prototype, bool is_prototype) { Handle constructor; int serial_number = Smi::ToInt(info->serial_number()); @@ -413,8 +409,7 @@ MaybeHandle InstantiateObject(Isolate* isolate, if (is_prototype) JSObject::OptimizeAsPrototype(object); ASSIGN_RETURN_ON_EXCEPTION( - isolate, result, - ConfigureInstance(isolate, object, info, is_hidden_prototype), JSObject); + isolate, result, ConfigureInstance(isolate, object, info), JSObject); if (info->immutable_proto()) { JSObject::SetImmutableProto(object); } @@ -486,7 +481,7 @@ MaybeHandle InstantiateFunction(Isolate* isolate, InstantiateObject( isolate, handle(ObjectTemplateInfo::cast(prototype_templ), isolate), - Handle(), false, true), + Handle(), true), JSFunction); } Object parent = data->GetParentTemplate(); @@ -514,8 +509,7 @@ MaybeHandle InstantiateFunction(Isolate* isolate, CacheTemplateInstantiation(isolate, serial_number, CachingMode::kUnlimited, function); } - MaybeHandle result = - ConfigureInstance(isolate, function, data, false); + MaybeHandle result = ConfigureInstance(isolate, function, data); if (result.is_null()) { // Uncache on error. if (serial_number) { @@ -560,8 +554,7 @@ MaybeHandle ApiNatives::InstantiateObject( Isolate* isolate, Handle data, Handle new_target) { InvokeScope invoke_scope(isolate); - return ::v8::internal::InstantiateObject(isolate, data, new_target, false, - false); + return ::v8::internal::InstantiateObject(isolate, data, new_target, false); } MaybeHandle ApiNatives::InstantiateRemoteObject( diff --git a/deps/v8/src/api/api.cc b/deps/v8/src/api/api.cc index 0965e23632e3b1..a18aeeda8711f2 100644 --- a/deps/v8/src/api/api.cc +++ b/deps/v8/src/api/api.cc @@ -447,7 +447,7 @@ void i::V8::FatalProcessOutOfMemory(i::Isolate* isolate, const char* location, heap_stats.end_marker = &end_marker; if (isolate->heap()->HasBeenSetUp()) { // BUG(1718): Don't use the take_snapshot since we don't support - // HeapIterator here without doing a special GC. + // HeapObjectIterator here without doing a special GC. isolate->heap()->RecordStats(&heap_stats, false); char* first_newline = strchr(last_few_messages, '\n'); if (first_newline == nullptr || first_newline[1] == '\0') @@ -764,9 +764,9 @@ StartupData SnapshotCreator::CreateBlob( std::vector> sfis_to_clear; { // Heap allocation is disallowed within this scope. - i::HeapIterator heap_iterator(isolate->heap()); - for (i::HeapObject current_obj = heap_iterator.next(); - !current_obj.is_null(); current_obj = heap_iterator.next()) { + i::HeapObjectIterator heap_iterator(isolate->heap()); + for (i::HeapObject current_obj = heap_iterator.Next(); + !current_obj.is_null(); current_obj = heap_iterator.Next()) { if (current_obj.IsSharedFunctionInfo()) { i::SharedFunctionInfo shared = i::SharedFunctionInfo::cast(current_obj); @@ -810,17 +810,19 @@ StartupData SnapshotCreator::CreateBlob( i::SerializedHandleChecker handle_checker(isolate, &contexts); CHECK(handle_checker.CheckGlobalAndEternalHandles()); - i::HeapIterator heap_iterator(isolate->heap()); - for (i::HeapObject current_obj = heap_iterator.next(); !current_obj.is_null(); - current_obj = heap_iterator.next()) { + i::HeapObjectIterator heap_iterator(isolate->heap()); + for (i::HeapObject current_obj = heap_iterator.Next(); !current_obj.is_null(); + current_obj = heap_iterator.Next()) { if (current_obj.IsJSFunction()) { i::JSFunction fun = i::JSFunction::cast(current_obj); // Complete in-object slack tracking for all functions. fun.CompleteInobjectSlackTrackingIfActive(); + fun.ResetIfBytecodeFlushed(); + // Also, clear out feedback vectors, or any optimized code. - if (!fun.raw_feedback_cell().value().IsUndefined()) { + if (fun.IsOptimized() || fun.IsInterpreted()) { fun.raw_feedback_cell().set_value( i::ReadOnlyRoots(isolate).undefined_value()); fun.set_code(isolate->builtins()->builtin(i::Builtins::kCompileLazy)); @@ -959,42 +961,32 @@ Extension::Extension(const char* name, const char* source, int dep_count, CHECK(source != nullptr || source_length_ == 0); } -ResourceConstraints::ResourceConstraints() - : max_semi_space_size_in_kb_(0), - max_old_space_size_(0), - stack_limit_(nullptr), - code_range_size_(0), - max_zone_pool_size_(0) {} +ResourceConstraints::ResourceConstraints() {} void ResourceConstraints::ConfigureDefaults(uint64_t physical_memory, uint64_t virtual_memory_limit) { - set_max_semi_space_size_in_kb( - i::Heap::ComputeMaxSemiSpaceSize(physical_memory)); - set_max_old_space_size(i::Heap::ComputeMaxOldGenerationSize(physical_memory)); + size_t heap_size = i::Heap::HeapSizeFromPhysicalMemory(physical_memory); + size_t young_generation, old_generation; + i::Heap::GenerationSizesFromHeapSize(heap_size, &young_generation, + &old_generation); + set_max_young_generation_size_in_bytes(young_generation); + set_max_old_generation_size_in_bytes(old_generation); if (virtual_memory_limit > 0 && i::kRequiresCodeRange) { - // Reserve no more than 1/8 of the memory for the code range, but at most - // kMaximalCodeRangeSize. - set_code_range_size( - i::Min(i::kMaximalCodeRangeSize / i::MB, - static_cast((virtual_memory_limit >> 3) / i::MB))); + set_code_range_size_in_bytes( + i::Min(i::kMaximalCodeRangeSize, + static_cast(virtual_memory_limit / 8))); } } -void SetResourceConstraints(i::Isolate* isolate, - const ResourceConstraints& constraints) { - size_t semi_space_size = constraints.max_semi_space_size_in_kb(); - size_t old_space_size = constraints.max_old_space_size(); - size_t code_range_size = constraints.code_range_size(); - if (semi_space_size != 0 || old_space_size != 0 || code_range_size != 0) { - isolate->heap()->ConfigureHeap(semi_space_size, old_space_size, - code_range_size); - } +size_t ResourceConstraints::max_young_generation_size_in_bytes() const { + return i::Heap::YoungGenerationSizeFromSemiSpaceSize( + max_semi_space_size_in_kb_ * i::KB); +} - if (constraints.stack_limit() != nullptr) { - uintptr_t limit = reinterpret_cast(constraints.stack_limit()); - isolate->stack_guard()->SetStackLimit(limit); - } +void ResourceConstraints::set_max_young_generation_size_in_bytes(size_t limit) { + max_semi_space_size_in_kb_ = + i::Heap::SemiSpaceSizeFromYoungGenerationSize(limit) / i::KB; } i::Address* V8::GlobalizeReference(i::Isolate* isolate, i::Address* obj) { @@ -1370,29 +1362,28 @@ static Local ObjectTemplateNew( bool do_not_cache); Local FunctionTemplate::PrototypeTemplate() { - i::Isolate* i_isolate = Utils::OpenHandle(this)->GetIsolate(); + auto self = Utils::OpenHandle(this); + i::Isolate* i_isolate = self->GetIsolate(); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); - i::Handle result(Utils::OpenHandle(this)->GetPrototypeTemplate(), - i_isolate); + i::Handle result(self->GetPrototypeTemplate(), i_isolate); if (result->IsUndefined(i_isolate)) { // Do not cache prototype objects. result = Utils::OpenHandle( *ObjectTemplateNew(i_isolate, Local(), true)); - i::FunctionTemplateInfo::SetPrototypeTemplate( - i_isolate, Utils::OpenHandle(this), result); + i::FunctionTemplateInfo::SetPrototypeTemplate(i_isolate, self, result); } return ToApiHandle(result); } void FunctionTemplate::SetPrototypeProviderTemplate( Local prototype_provider) { - i::Isolate* i_isolate = Utils::OpenHandle(this)->GetIsolate(); + auto self = Utils::OpenHandle(this); + i::Isolate* i_isolate = self->GetIsolate(); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); i::Handle result = Utils::OpenHandle(*prototype_provider); - auto info = Utils::OpenHandle(this); - CHECK(info->GetPrototypeTemplate().IsUndefined(i_isolate)); - CHECK(info->GetParentTemplate().IsUndefined(i_isolate)); - i::FunctionTemplateInfo::SetPrototypeProviderTemplate(i_isolate, info, + CHECK(self->GetPrototypeTemplate().IsUndefined(i_isolate)); + CHECK(self->GetParentTemplate().IsUndefined(i_isolate)); + i::FunctionTemplateInfo::SetPrototypeProviderTemplate(i_isolate, self, result); } @@ -1421,17 +1412,21 @@ static Local FunctionTemplateNew( i::FUNCTION_TEMPLATE_INFO_TYPE, i::AllocationType::kOld); i::Handle obj = i::Handle::cast(struct_obj); - InitializeFunctionTemplate(obj); - obj->set_do_not_cache(do_not_cache); - int next_serial_number = i::FunctionTemplateInfo::kInvalidSerialNumber; - if (!do_not_cache) { - next_serial_number = isolate->heap()->GetNextTemplateSerialNumber(); + { + // Disallow GC until all fields of obj have acceptable types. + i::DisallowHeapAllocation no_gc; + InitializeFunctionTemplate(obj); + obj->set_length(length); + obj->set_do_not_cache(do_not_cache); + int next_serial_number = i::FunctionTemplateInfo::kInvalidSerialNumber; + if (!do_not_cache) { + next_serial_number = isolate->heap()->GetNextTemplateSerialNumber(); + } + obj->set_serial_number(i::Smi::FromInt(next_serial_number)); } - obj->set_serial_number(i::Smi::FromInt(next_serial_number)); if (callback != nullptr) { Utils::ToLocal(obj)->SetCallHandler(callback, data, side_effect_type); } - obj->set_length(length); obj->set_undetectable(false); obj->set_needs_access_check(false); obj->set_accept_any_receiver(true); @@ -2005,9 +2000,10 @@ bool ObjectTemplate::IsImmutableProto() { } void ObjectTemplate::SetImmutableProto() { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); + auto self = Utils::OpenHandle(this); + i::Isolate* isolate = self->GetIsolate(); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate); - Utils::OpenHandle(this)->set_immutable_proto(true); + self->set_immutable_proto(true); } // --- S c r i p t s --- @@ -2227,29 +2223,40 @@ Local Module::GetException() const { int Module::GetModuleRequestsLength() const { i::Handle self = Utils::OpenHandle(this); - return self->info().module_requests().length(); + if (self->IsSyntheticModule()) return 0; + return i::Handle::cast(self) + ->info() + .module_requests() + .length(); } Local Module::GetModuleRequest(int i) const { CHECK_GE(i, 0); i::Handle self = Utils::OpenHandle(this); + CHECK(self->IsSourceTextModule()); i::Isolate* isolate = self->GetIsolate(); - i::Handle module_requests(self->info().module_requests(), - isolate); + i::Handle module_requests( + i::Handle::cast(self)->info().module_requests(), + isolate); CHECK_LT(i, module_requests->length()); return ToApiHandle(i::handle(module_requests->get(i), isolate)); } Location Module::GetModuleRequestLocation(int i) const { CHECK_GE(i, 0); - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - i::HandleScope scope(isolate); i::Handle self = Utils::OpenHandle(this); + i::Isolate* isolate = self->GetIsolate(); + i::HandleScope scope(isolate); + CHECK(self->IsSourceTextModule()); i::Handle module_request_positions( - self->info().module_request_positions(), isolate); + i::Handle::cast(self) + ->info() + .module_request_positions(), + isolate); CHECK_LT(i, module_request_positions->length()); int position = i::Smi::ToInt(module_request_positions->get(i)); - i::Handle script(self->script(), isolate); + i::Handle script( + i::Handle::cast(self)->script(), isolate); i::Script::PositionInfo info; i::Script::GetPositionInfo(script, position, &info, i::Script::WITH_OFFSET); return v8::Location(info.line, info.column); @@ -2270,8 +2277,10 @@ Local Module::GetUnboundModuleScript() { GetStatus() < kEvaluating, "v8::Module::GetUnboundScript", "v8::Module::GetUnboundScript must be used on an unevaluated module"); i::Handle self = Utils::OpenHandle(this); + CHECK(self->IsSourceTextModule()); return ToApiHandle(i::Handle( - self->GetSharedFunctionInfo(), self->GetIsolate())); + i::Handle::cast(self)->GetSharedFunctionInfo(), + self->GetIsolate())); } int Module::GetIdentityHash() const { return Utils::OpenHandle(this)->hash(); } @@ -2306,6 +2315,37 @@ MaybeLocal Module::Evaluate(Local context) { RETURN_ESCAPED(result); } +Local Module::CreateSyntheticModule( + Isolate* isolate, Local module_name, + const std::vector>& export_names, + v8::Module::SyntheticModuleEvaluationSteps evaluation_steps) { + auto i_isolate = reinterpret_cast(isolate); + i::Handle i_module_name = Utils::OpenHandle(*module_name); + i::Handle i_export_names = i_isolate->factory()->NewFixedArray( + static_cast(export_names.size())); + for (int i = 0; i < i_export_names->length(); ++i) { + i::Handle str = Utils::OpenHandle(*export_names[i]); + i_export_names->set(i, *str); + } + return v8::Utils::ToLocal( + i::Handle(i_isolate->factory()->NewSyntheticModule( + i_module_name, i_export_names, evaluation_steps))); +} + +void Module::SetSyntheticModuleExport(Local export_name, + Local export_value) { + i::Handle i_export_name = Utils::OpenHandle(*export_name); + i::Handle i_export_value = Utils::OpenHandle(*export_value); + i::Handle self = Utils::OpenHandle(this); + Utils::ApiCheck(self->IsSyntheticModule(), + "v8::Module::SetSyntheticModuleExport", + "v8::Module::SetSyntheticModuleExport must only be called on " + "a SyntheticModule"); + i::SyntheticModule::SetExport(self->GetIsolate(), + i::Handle::cast(self), + i_export_name, i_export_value); +} + namespace { i::Compiler::ScriptDetails GetScriptDetails( @@ -2416,7 +2456,7 @@ MaybeLocal ScriptCompiler::CompileModule( if (!maybe.ToLocal(&unbound)) return MaybeLocal(); i::Handle shared = Utils::OpenHandle(*unbound); - return ToApiHandle(i_isolate->factory()->NewModule(shared)); + return ToApiHandle(i_isolate->factory()->NewSourceTextModule(shared)); } namespace { @@ -2760,11 +2800,12 @@ void v8::TryCatch::SetCaptureMessage(bool value) { capture_message_ = value; } // --- M e s s a g e --- Local Message::Get() const { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); + auto self = Utils::OpenHandle(this); + i::Isolate* isolate = self->GetIsolate(); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate); EscapableHandleScope scope(reinterpret_cast(isolate)); - i::Handle obj = Utils::OpenHandle(this); - i::Handle raw_result = i::MessageHandler::GetMessage(isolate, obj); + i::Handle raw_result = + i::MessageHandler::GetMessage(isolate, self); Local result = Utils::ToLocal(raw_result); return scope.Escape(result); } @@ -2775,10 +2816,10 @@ v8::Isolate* Message::GetIsolate() const { } ScriptOrigin Message::GetScriptOrigin() const { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); + auto self = Utils::OpenHandle(this); + i::Isolate* isolate = self->GetIsolate(); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate); - auto message = i::Handle::cast(Utils::OpenHandle(this)); - i::Handle script(message->script(), isolate); + i::Handle script(self->script(), isolate); return GetScriptOriginForScript(isolate, script); } @@ -2787,11 +2828,11 @@ v8::Local Message::GetScriptResourceName() const { } v8::Local Message::GetStackTrace() const { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); + auto self = Utils::OpenHandle(this); + i::Isolate* isolate = self->GetIsolate(); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate); EscapableHandleScope scope(reinterpret_cast(isolate)); - auto message = i::Handle::cast(Utils::OpenHandle(this)); - i::Handle stackFramesObj(message->stack_frames(), isolate); + i::Handle stackFramesObj(self->stack_frames(), isolate); if (!stackFramesObj->IsFixedArray()) return v8::Local(); auto stackTrace = i::Handle::cast(stackFramesObj); return scope.Escape(Utils::StackTraceToLocal(stackTrace)); @@ -2860,18 +2901,17 @@ Maybe Message::GetEndColumn(Local context) const { } bool Message::IsSharedCrossOrigin() const { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); + auto self = Utils::OpenHandle(this); + i::Isolate* isolate = self->GetIsolate(); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate); - return Utils::OpenHandle(this) - ->script() - .origin_options() - .IsSharedCrossOrigin(); + return self->script().origin_options().IsSharedCrossOrigin(); } bool Message::IsOpaque() const { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); + auto self = Utils::OpenHandle(this); + i::Isolate* isolate = self->GetIsolate(); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate); - return Utils::OpenHandle(this)->script().origin_options().IsOpaque(); + return self->script().origin_options().IsOpaque(); } MaybeLocal Message::GetSourceLine(Local context) const { @@ -2918,11 +2958,11 @@ Local StackTrace::CurrentStackTrace(Isolate* isolate, // --- S t a c k F r a m e --- int StackFrame::GetLineNumber() const { - return i::StackTraceFrame::GetLineNumber(Utils::OpenHandle(this)); + return i::StackTraceFrame::GetOneBasedLineNumber(Utils::OpenHandle(this)); } int StackFrame::GetColumn() const { - return i::StackTraceFrame::GetColumnNumber(Utils::OpenHandle(this)); + return i::StackTraceFrame::GetOneBasedColumnNumber(Utils::OpenHandle(this)); } int StackFrame::GetScriptId() const { @@ -2930,30 +2970,31 @@ int StackFrame::GetScriptId() const { } Local StackFrame::GetScriptName() const { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); + auto self = Utils::OpenHandle(this); + i::Isolate* isolate = self->GetIsolate(); EscapableHandleScope scope(reinterpret_cast(isolate)); - i::Handle name = - i::StackTraceFrame::GetFileName(Utils::OpenHandle(this)); + i::Handle name = i::StackTraceFrame::GetFileName(self); return name->IsString() ? scope.Escape(Local::Cast(Utils::ToLocal(name))) : Local(); } Local StackFrame::GetScriptNameOrSourceURL() const { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); + auto self = Utils::OpenHandle(this); + i::Isolate* isolate = self->GetIsolate(); EscapableHandleScope scope(reinterpret_cast(isolate)); i::Handle name = - i::StackTraceFrame::GetScriptNameOrSourceUrl(Utils::OpenHandle(this)); + i::StackTraceFrame::GetScriptNameOrSourceUrl(self); return name->IsString() ? scope.Escape(Local::Cast(Utils::ToLocal(name))) : Local(); } Local StackFrame::GetFunctionName() const { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); + auto self = Utils::OpenHandle(this); + i::Isolate* isolate = self->GetIsolate(); EscapableHandleScope scope(reinterpret_cast(isolate)); - i::Handle name = - i::StackTraceFrame::GetFunctionName(Utils::OpenHandle(this)); + i::Handle name = i::StackTraceFrame::GetFunctionName(self); return name->IsString() ? scope.Escape(Local::Cast(Utils::ToLocal(name))) : Local(); @@ -3567,8 +3608,7 @@ MaybeLocal Value::ToUint32(Local context) const { } i::Isolate* i::IsolateFromNeverReadOnlySpaceObject(i::Address obj) { - return i::NeverReadOnlySpaceObject::GetIsolate( - i::HeapObject::cast(i::Object(obj))); + return i::GetIsolateFromWritableObject(i::HeapObject::cast(i::Object(obj))); } bool i::ShouldThrowOnError(i::Isolate* isolate) { @@ -4227,8 +4267,8 @@ MaybeLocal v8::Object::GetOwnPropertyDescriptor(Local context, } Local v8::Object::GetPrototype() { - auto isolate = Utils::OpenHandle(this)->GetIsolate(); auto self = Utils::OpenHandle(this); + auto isolate = self->GetIsolate(); i::PrototypeIterator iter(isolate, self); return Utils::ToLocal(i::PrototypeIterator::GetCurrent(iter)); } @@ -4480,10 +4520,10 @@ void Object::SetAccessorProperty(Local name, Local getter, AccessControl settings) { // TODO(verwaest): Remove |settings|. DCHECK_EQ(v8::DEFAULT, settings); - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); + auto self = Utils::OpenHandle(this); + i::Isolate* isolate = self->GetIsolate(); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate); i::HandleScope scope(isolate); - auto self = Utils::OpenHandle(this); if (!self->IsJSObject()) return; i::Handle getter_i = v8::Utils::OpenHandle(*getter); i::Handle setter_i = v8::Utils::OpenHandle(*setter, true); @@ -4693,9 +4733,9 @@ Local v8::Object::CreationContext() { int v8::Object::GetIdentityHash() { i::DisallowHeapAllocation no_gc; - auto isolate = Utils::OpenHandle(this)->GetIsolate(); - i::HandleScope scope(isolate); auto self = Utils::OpenHandle(this); + auto isolate = self->GetIsolate(); + i::HandleScope scope(isolate); return self->GetOrCreateIdentityHash(isolate).value(); } @@ -4881,9 +4921,9 @@ Local Function::GetDebugName() const { } Local Function::GetDisplayName() const { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate); auto self = Utils::OpenHandle(this); + i::Isolate* isolate = self->GetIsolate(); + ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate); if (!self->IsJSFunction()) { return ToApiHandle(isolate->factory()->undefined_value()); } @@ -5414,20 +5454,15 @@ Local Symbol::Name() const { i::Handle sym = Utils::OpenHandle(this); i::Isolate* isolate; - if (!i::GetIsolateFromWritableObject(*sym, &isolate)) { - // If the Symbol is in RO_SPACE, then its name must be too. Since RO_SPACE - // objects are immovable we can use the Handle(Address*) constructor with - // the address of the name field in the Symbol object without needing an - // isolate. -#ifdef V8_COMPRESS_POINTERS - // Compressed fields can't serve as handle locations. - // TODO(ishell): get Isolate as a parameter. - isolate = i::Isolate::Current(); -#else + if (!i::GetIsolateFromHeapObject(*sym, &isolate)) { + // Symbol is in RO_SPACE, which means that its name is also in RO_SPACE. + // Since RO_SPACE objects are immovable we can use the Handle(Address*) + // constructor with the address of the name field in the Symbol object + // without needing an isolate. + DCHECK(!COMPRESS_POINTERS_BOOL); i::Handle ro_name(reinterpret_cast( sym->GetFieldAddress(i::Symbol::kNameOffset))); return Utils::ToLocal(ro_name); -#endif } i::Handle name(sym->name(), isolate); @@ -5973,6 +6008,19 @@ void Context::SetErrorMessageForCodeGenerationFromStrings(Local error) { context->set_error_message_for_code_gen_from_strings(*error_handle); } +void Context::SetAbortScriptExecution( + Context::AbortScriptExecutionCallback callback) { + i::Handle context = Utils::OpenHandle(this); + i::Isolate* isolate = context->GetIsolate(); + if (callback == nullptr) { + context->set_script_execution_callback( + i::ReadOnlyRoots(isolate).undefined_value()); + } else { + SET_FIELD_WRAPPED(isolate, context, set_script_execution_callback, + callback); + } +} + namespace { i::Address* GetSerializedDataFromFixedArray(i::Isolate* isolate, i::FixedArray list, size_t index) { @@ -6304,8 +6352,7 @@ bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) { // It is safe to call GetIsolateFromWritableHeapObject because // SupportsExternalization already checked that the object is writable. - i::Isolate* isolate; - i::GetIsolateFromWritableObject(obj, &isolate); + i::Isolate* isolate = i::GetIsolateFromWritableObject(obj); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate); CHECK(resource && resource->data()); @@ -6332,8 +6379,7 @@ bool v8::String::MakeExternal( // It is safe to call GetIsolateFromWritableHeapObject because // SupportsExternalization already checked that the object is writable. - i::Isolate* isolate; - i::GetIsolateFromWritableObject(obj, &isolate); + i::Isolate* isolate = i::GetIsolateFromWritableObject(obj); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate); CHECK(resource && resource->data()); @@ -6450,10 +6496,11 @@ Local v8::NumberObject::New(Isolate* isolate, double value) { double v8::NumberObject::ValueOf() const { i::Handle obj = Utils::OpenHandle(this); - i::Handle jsvalue = i::Handle::cast(obj); - i::Isolate* isolate = jsvalue->GetIsolate(); + i::Handle js_primitive_wrapper = + i::Handle::cast(obj); + i::Isolate* isolate = js_primitive_wrapper->GetIsolate(); LOG_API(isolate, NumberObject, NumberValue); - return jsvalue->value().Number(); + return js_primitive_wrapper->value().Number(); } Local v8::BigIntObject::New(Isolate* isolate, int64_t value) { @@ -6468,11 +6515,12 @@ Local v8::BigIntObject::New(Isolate* isolate, int64_t value) { Local v8::BigIntObject::ValueOf() const { i::Handle obj = Utils::OpenHandle(this); - i::Handle jsvalue = i::Handle::cast(obj); - i::Isolate* isolate = jsvalue->GetIsolate(); + i::Handle js_primitive_wrapper = + i::Handle::cast(obj); + i::Isolate* isolate = js_primitive_wrapper->GetIsolate(); LOG_API(isolate, BigIntObject, BigIntValue); - return Utils::ToLocal( - i::Handle(i::BigInt::cast(jsvalue->value()), isolate)); + return Utils::ToLocal(i::Handle( + i::BigInt::cast(js_primitive_wrapper->value()), isolate)); } Local v8::BooleanObject::New(Isolate* isolate, bool value) { @@ -6490,10 +6538,11 @@ Local v8::BooleanObject::New(Isolate* isolate, bool value) { bool v8::BooleanObject::ValueOf() const { i::Handle obj = Utils::OpenHandle(this); - i::Handle jsvalue = i::Handle::cast(obj); - i::Isolate* isolate = jsvalue->GetIsolate(); + i::Handle js_primitive_wrapper = + i::Handle::cast(obj); + i::Isolate* isolate = js_primitive_wrapper->GetIsolate(); LOG_API(isolate, BooleanObject, BooleanValue); - return jsvalue->value().IsTrue(isolate); + return js_primitive_wrapper->value().IsTrue(isolate); } Local v8::StringObject::New(Isolate* v8_isolate, @@ -6509,11 +6558,12 @@ Local v8::StringObject::New(Isolate* v8_isolate, Local v8::StringObject::ValueOf() const { i::Handle obj = Utils::OpenHandle(this); - i::Handle jsvalue = i::Handle::cast(obj); - i::Isolate* isolate = jsvalue->GetIsolate(); + i::Handle js_primitive_wrapper = + i::Handle::cast(obj); + i::Isolate* isolate = js_primitive_wrapper->GetIsolate(); LOG_API(isolate, StringObject, StringValue); - return Utils::ToLocal( - i::Handle(i::String::cast(jsvalue->value()), isolate)); + return Utils::ToLocal(i::Handle( + i::String::cast(js_primitive_wrapper->value()), isolate)); } Local v8::SymbolObject::New(Isolate* isolate, Local value) { @@ -6528,11 +6578,12 @@ Local v8::SymbolObject::New(Isolate* isolate, Local value) { Local v8::SymbolObject::ValueOf() const { i::Handle obj = Utils::OpenHandle(this); - i::Handle jsvalue = i::Handle::cast(obj); - i::Isolate* isolate = jsvalue->GetIsolate(); + i::Handle js_primitive_wrapper = + i::Handle::cast(obj); + i::Isolate* isolate = js_primitive_wrapper->GetIsolate(); LOG_API(isolate, SymbolObject, SymbolValue); - return Utils::ToLocal( - i::Handle(i::Symbol::cast(jsvalue->value()), isolate)); + return Utils::ToLocal(i::Handle( + i::Symbol::cast(js_primitive_wrapper->value()), isolate)); } MaybeLocal v8::Date::New(Local context, double time) { @@ -7936,7 +7987,12 @@ void Isolate::Initialize(Isolate* isolate, i_isolate->set_api_external_references(params.external_references); i_isolate->set_allow_atomics_wait(params.allow_atomics_wait); - SetResourceConstraints(i_isolate, params.constraints); + i_isolate->heap()->ConfigureHeap(params.constraints); + if (params.constraints.stack_limit() != nullptr) { + uintptr_t limit = + reinterpret_cast(params.constraints.stack_limit()); + i_isolate->stack_guard()->SetStackLimit(limit); + } // TODO(jochen): Once we got rid of Isolate::Current(), we can remove this. Isolate::Scope isolate_scope(isolate); if (!i::Snapshot::Initialize(i_isolate)) { @@ -8388,9 +8444,9 @@ void Isolate::LowMemoryNotification() { i::GarbageCollectionReason::kLowMemoryNotification); } { - i::HeapIterator iterator(isolate->heap()); - for (i::HeapObject obj = iterator.next(); !obj.is_null(); - obj = iterator.next()) { + i::HeapObjectIterator iterator(isolate->heap()); + for (i::HeapObject obj = iterator.Next(); !obj.is_null(); + obj = iterator.Next()) { if (obj.IsAbstractCode()) { i::AbstractCode::cast(obj).DropStackFrameCache(); } @@ -8401,9 +8457,14 @@ void Isolate::LowMemoryNotification() { int Isolate::ContextDisposedNotification(bool dependant_context) { i::Isolate* isolate = reinterpret_cast(this); if (!dependant_context) { - // We left the current context, we can abort all WebAssembly compilations on - // that isolate. - isolate->wasm_engine()->DeleteCompileJobsOnIsolate(isolate); + if (!isolate->context().is_null()) { + // We left the current context, we can abort all WebAssembly compilations + // of that context. + // A handle scope for the native context. + i::HandleScope handle_scope(isolate); + isolate->wasm_engine()->DeleteCompileJobsOnContext( + isolate->native_context()); + } } // TODO(ahaas): move other non-heap activity out of the heap call. return isolate->heap()->NotifyContextDisposed(dependant_context); @@ -8505,6 +8566,9 @@ CALLBACK_SETTER(FatalErrorHandler, FatalErrorCallback, exception_behavior) CALLBACK_SETTER(OOMErrorHandler, OOMErrorCallback, oom_behavior) CALLBACK_SETTER(AllowCodeGenerationFromStringsCallback, AllowCodeGenerationFromStringsCallback, allow_code_gen_callback) +CALLBACK_SETTER(ModifyCodeGenerationFromStringsCallback, + ModifyCodeGenerationFromStringsCallback, + modify_code_gen_callback) CALLBACK_SETTER(AllowWasmCodeGenerationCallback, AllowWasmCodeGenerationCallback, allow_wasm_code_gen_callback) @@ -8931,9 +8995,9 @@ std::vector debug::Script::LineEnds() const { } MaybeLocal debug::Script::Name() const { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - i::HandleScope handle_scope(isolate); i::Handle script = Utils::OpenHandle(this); + i::Isolate* isolate = script->GetIsolate(); + i::HandleScope handle_scope(isolate); i::Handle value(script->name(), isolate); if (!value->IsString()) return MaybeLocal(); return Utils::ToLocal( @@ -8941,9 +9005,9 @@ MaybeLocal debug::Script::Name() const { } MaybeLocal debug::Script::SourceURL() const { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - i::HandleScope handle_scope(isolate); i::Handle script = Utils::OpenHandle(this); + i::Isolate* isolate = script->GetIsolate(); + i::HandleScope handle_scope(isolate); i::Handle value(script->source_url(), isolate); if (!value->IsString()) return MaybeLocal(); return Utils::ToLocal( @@ -8951,9 +9015,9 @@ MaybeLocal debug::Script::SourceURL() const { } MaybeLocal debug::Script::SourceMappingURL() const { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - i::HandleScope handle_scope(isolate); i::Handle script = Utils::OpenHandle(this); + i::Isolate* isolate = script->GetIsolate(); + i::HandleScope handle_scope(isolate); i::Handle value(script->source_mapping_url(), isolate); if (!value->IsString()) return MaybeLocal(); return Utils::ToLocal( @@ -8961,18 +9025,18 @@ MaybeLocal debug::Script::SourceMappingURL() const { } Maybe debug::Script::ContextId() const { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - i::HandleScope handle_scope(isolate); i::Handle script = Utils::OpenHandle(this); + i::Isolate* isolate = script->GetIsolate(); + i::HandleScope handle_scope(isolate); i::Object value = script->context_data(); if (value.IsSmi()) return Just(i::Smi::ToInt(value)); return Nothing(); } MaybeLocal debug::Script::Source() const { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - i::HandleScope handle_scope(isolate); i::Handle script = Utils::OpenHandle(this); + i::Isolate* isolate = script->GetIsolate(); + i::HandleScope handle_scope(isolate); i::Handle value(script->source(), isolate); if (!value->IsString()) return MaybeLocal(); return Utils::ToLocal( @@ -10273,6 +10337,17 @@ void EmbedderHeapTracer::IncreaseAllocatedSize(size_t bytes) { } } +void EmbedderHeapTracer::DecreaseAllocatedSize(size_t bytes) { + if (isolate_) { + i::LocalEmbedderHeapTracer* const tracer = + reinterpret_cast(isolate_) + ->heap() + ->local_embedder_heap_tracer(); + DCHECK_NOT_NULL(tracer); + tracer->DecreaseAllocatedSize(bytes); + } +} + void EmbedderHeapTracer::RegisterEmbedderReference( const TracedGlobal& ref) { if (ref.IsEmpty()) return; @@ -10462,8 +10537,7 @@ void InvokeAccessorGetterCallback( void InvokeFunctionCallback(const v8::FunctionCallbackInfo& info, v8::FunctionCallback callback) { Isolate* isolate = reinterpret_cast(info.GetIsolate()); - RuntimeCallTimerScope timer(isolate, - RuntimeCallCounterId::kInvokeFunctionCallback); + RuntimeCallTimerScope timer(isolate, RuntimeCallCounterId::kFunctionCallback); Address callback_address = reinterpret_cast
    (callback); VMState state(isolate); ExternalCallbackScope call_scope(isolate, callback_address); diff --git a/deps/v8/src/api/api.h b/deps/v8/src/api/api.h index e041a5daf086b3..6135a7dfc62024 100644 --- a/deps/v8/src/api/api.h +++ b/deps/v8/src/api/api.h @@ -14,9 +14,9 @@ #include "src/objects/js-generator.h" #include "src/objects/js-promise.h" #include "src/objects/js-proxy.h" -#include "src/objects/module.h" #include "src/objects/objects.h" #include "src/objects/shared-function-info.h" +#include "src/objects/source-text-module.h" #include "src/utils/detachable-vector.h" #include "src/objects/templates.h" diff --git a/deps/v8/src/asmjs/OWNERS b/deps/v8/src/asmjs/OWNERS index d4103ae0c1be91..08f39f8d6a2df3 100644 --- a/deps/v8/src/asmjs/OWNERS +++ b/deps/v8/src/asmjs/OWNERS @@ -1,5 +1,3 @@ -set noparent - ahaas@chromium.org clemensh@chromium.org mstarzinger@chromium.org diff --git a/deps/v8/src/asmjs/asm-js.cc b/deps/v8/src/asmjs/asm-js.cc index 5a38eeef361fba..7433b6a12cbb72 100644 --- a/deps/v8/src/asmjs/asm-js.cc +++ b/deps/v8/src/asmjs/asm-js.cc @@ -12,9 +12,9 @@ #include "src/codegen/compiler.h" #include "src/codegen/unoptimized-compilation-info.h" #include "src/common/assert-scope.h" +#include "src/common/message-template.h" #include "src/execution/execution.h" #include "src/execution/isolate.h" -#include "src/execution/message-template.h" #include "src/handles/handles.h" #include "src/heap/factory.h" #include "src/logging/counters.h" @@ -249,9 +249,9 @@ UnoptimizedCompilationJob::Status AsmJsCompilationJob::ExecuteJobImpl() { return FAILED; } module_ = new (compile_zone) wasm::ZoneBuffer(compile_zone); - parser.module_builder()->WriteTo(*module_); + parser.module_builder()->WriteTo(module_); asm_offsets_ = new (compile_zone) wasm::ZoneBuffer(compile_zone); - parser.module_builder()->WriteAsmJsOffsetTable(*asm_offsets_); + parser.module_builder()->WriteAsmJsOffsetTable(asm_offsets_); stdlib_uses_ = *parser.stdlib_uses(); size_t compile_zone_size = @@ -287,7 +287,7 @@ UnoptimizedCompilationJob::Status AsmJsCompilationJob::FinalizeJobImpl( isolate, &thrower, wasm::ModuleWireBytes(module_->begin(), module_->end()), Vector(asm_offsets_->begin(), asm_offsets_->size()), - uses_bitset) + uses_bitset, shared_info->language_mode()) .ToHandleChecked(); DCHECK(!thrower.error()); compile_time_ = compile_timer.Elapsed().InMillisecondsF(); @@ -319,10 +319,10 @@ void AsmJsCompilationJob::RecordHistograms(Isolate* isolate) { translation_throughput); } -UnoptimizedCompilationJob* AsmJs::NewCompilationJob( +std::unique_ptr AsmJs::NewCompilationJob( ParseInfo* parse_info, FunctionLiteral* literal, AccountingAllocator* allocator) { - return new AsmJsCompilationJob(parse_info, literal, allocator); + return base::make_unique(parse_info, literal, allocator); } namespace { diff --git a/deps/v8/src/asmjs/asm-js.h b/deps/v8/src/asmjs/asm-js.h index 46dd3f2e34bb68..3e714cba7a67ed 100644 --- a/deps/v8/src/asmjs/asm-js.h +++ b/deps/v8/src/asmjs/asm-js.h @@ -23,7 +23,7 @@ class UnoptimizedCompilationJob; // Interface to compile and instantiate for asm.js modules. class AsmJs { public: - static UnoptimizedCompilationJob* NewCompilationJob( + static std::unique_ptr NewCompilationJob( ParseInfo* parse_info, FunctionLiteral* literal, AccountingAllocator* allocator); static MaybeHandle InstantiateAsmWasm(Isolate* isolate, diff --git a/deps/v8/src/asmjs/asm-parser.cc b/deps/v8/src/asmjs/asm-parser.cc index 3d290a1fe1a93b..6ac39dc89ccf31 100644 --- a/deps/v8/src/asmjs/asm-parser.cc +++ b/deps/v8/src/asmjs/asm-parser.cc @@ -253,7 +253,7 @@ void AsmJsParser::DeclareGlobal(VarInfo* info, bool mutable_variable, const WasmInitExpr& init) { info->kind = VarKind::kGlobal; info->type = type; - info->index = module_builder_->AddGlobal(vtype, false, true, init); + info->index = module_builder_->AddGlobal(vtype, true, init); info->mutable_variable = mutable_variable; } @@ -385,7 +385,8 @@ void AsmJsParser::ValidateModule() { module_builder_->MarkStartFunction(start); for (auto& global_import : global_imports_) { uint32_t import_index = module_builder_->AddGlobalImport( - global_import.import_name, global_import.value_type); + global_import.import_name, global_import.value_type, + false /* mutability */); start->EmitWithI32V(kExprGetGlobal, import_index); start->EmitWithI32V(kExprSetGlobal, VarIndex(global_import.var_info)); } @@ -754,7 +755,7 @@ void AsmJsParser::ValidateFunction() { // Record start of the function, used as position for the stack check. current_function_builder_->SetAsmFunctionStartPosition(scanner_.Position()); - CachedVector params(cached_asm_type_p_vectors_); + CachedVector params(&cached_asm_type_p_vectors_); ValidateFunctionParams(¶ms); // Check against limit on number of parameters. @@ -762,7 +763,7 @@ void AsmJsParser::ValidateFunction() { FAIL("Number of parameters exceeds internal limit"); } - CachedVector locals(cached_valuetype_vectors_); + CachedVector locals(&cached_valuetype_vectors_); ValidateFunctionLocals(params.size(), &locals); function_temp_locals_offset_ = static_cast( @@ -837,7 +838,7 @@ void AsmJsParser::ValidateFunctionParams(ZoneVector* params) { scanner_.EnterLocalScope(); EXPECT_TOKEN('('); CachedVector function_parameters( - cached_token_t_vectors_); + &cached_token_t_vectors_); while (!failed_ && !Peek(')')) { if (!scanner_.IsLocal()) { FAIL("Expected parameter name"); @@ -969,7 +970,8 @@ void AsmJsParser::ValidateFunctionLocals(size_t param_count, if (negate) { dvalue = -dvalue; } - current_function_builder_->EmitF32Const(dvalue); + float fvalue = DoubleToFloat32(dvalue); + current_function_builder_->EmitF32Const(fvalue); current_function_builder_->EmitSetLocal(info->index); } else if (CheckForUnsigned(&uvalue)) { if (uvalue > 0x7FFFFFFF) { @@ -1314,7 +1316,7 @@ void AsmJsParser::SwitchStatement() { Begin(pending_label_); pending_label_ = 0; // TODO(bradnelson): Make less weird. - CachedVector cases(cached_int_vectors_); + CachedVector cases(&cached_int_vectors_); GatherCases(&cases); EXPECT_TOKEN('{'); size_t count = cases.size() + 1; @@ -2108,7 +2110,11 @@ AsmType* AsmJsParser::ValidateCall() { // need to match the information stored at this point. base::Optional tmp; if (Check('[')) { - RECURSEn(EqualityExpression()); + AsmType* index = nullptr; + RECURSEn(index = EqualityExpression()); + if (!index->IsA(AsmType::Intish())) { + FAILn("Expected intish index"); + } EXPECT_TOKENn('&'); uint32_t mask = 0; if (!CheckForUnsigned(&mask)) { @@ -2161,8 +2167,8 @@ AsmType* AsmJsParser::ValidateCall() { } // Parse argument list and gather types. - CachedVector param_types(cached_asm_type_p_vectors_); - CachedVector param_specific_types(cached_asm_type_p_vectors_); + CachedVector param_types(&cached_asm_type_p_vectors_); + CachedVector param_specific_types(&cached_asm_type_p_vectors_); EXPECT_TOKENn('('); while (!failed_ && !Peek(')')) { AsmType* t; diff --git a/deps/v8/src/asmjs/asm-parser.h b/deps/v8/src/asmjs/asm-parser.h index 8740cdad1198db..c7bf30c29e56cb 100644 --- a/deps/v8/src/asmjs/asm-parser.h +++ b/deps/v8/src/asmjs/asm-parser.h @@ -154,9 +154,9 @@ class AsmJsParser { template class CachedVector final : public ZoneVector { public: - explicit CachedVector(CachedVectors& cache) - : ZoneVector(cache.zone()), cache_(&cache) { - cache.fill(this); + explicit CachedVector(CachedVectors* cache) + : ZoneVector(cache->zone()), cache_(cache) { + cache->fill(this); } ~CachedVector() { cache_->reuse(this); } diff --git a/deps/v8/src/ast/OWNERS b/deps/v8/src/ast/OWNERS index e95afc8afa05fa..e6daa80ec97fe7 100644 --- a/deps/v8/src/ast/OWNERS +++ b/deps/v8/src/ast/OWNERS @@ -1,5 +1,3 @@ -set noparent - adamk@chromium.org bmeurer@chromium.org gsathya@chromium.org diff --git a/deps/v8/src/ast/ast.cc b/deps/v8/src/ast/ast.cc index a930a374b8eaa1..9987eb28449a21 100644 --- a/deps/v8/src/ast/ast.cc +++ b/deps/v8/src/ast/ast.cc @@ -49,8 +49,6 @@ static const char* NameForNativeContextIntrinsicIndex(uint32_t idx) { return "UnknownIntrinsicIndex"; } -void AstNode::Print() { Print(Isolate::Current()); } - void AstNode::Print(Isolate* isolate) { AllowHandleDereference allow_deref; AstPrinter::PrintOut(isolate, this); @@ -132,6 +130,10 @@ bool Expression::ToBooleanIsFalse() const { return IsLiteral() && AsLiteral()->ToBooleanIsFalse(); } +bool Expression::IsPrivateName() const { + return IsVariableProxy() && AsVariableProxy()->IsPrivateName(); +} + bool Expression::IsValidReferenceExpression() const { return IsProperty() || (IsVariableProxy() && AsVariableProxy()->IsValidReferenceExpression()); @@ -176,7 +178,7 @@ void VariableProxy::BindTo(Variable* var) { set_var(var); set_is_resolved(); var->set_is_used(); - if (is_assigned()) var->set_maybe_assigned(); + if (is_assigned()) var->SetMaybeAssigned(); } Assignment::Assignment(NodeType node_type, Token::Value op, Expression* target, @@ -601,8 +603,8 @@ void ArrayLiteral::BuildBoilerplateDescription(Isolate* isolate) { boilerplate_value = handle(Smi::kZero, isolate); } - kind = GetMoreGeneralElementsKind(kind, - boilerplate_value->OptimalElementsKind()); + kind = GetMoreGeneralElementsKind( + kind, boilerplate_value->OptimalElementsKind(isolate)); fixed_array->set(array_index, *boilerplate_value); } @@ -832,6 +834,9 @@ Call::CallType Call::GetCallType() const { Property* property = expression()->AsProperty(); if (property != nullptr) { + if (property->IsPrivateReference()) { + return PRIVATE_CALL; + } bool is_super = property->IsSuperAccess(); if (property->key()->IsPropertyName()) { return is_super ? NAMED_SUPER_PROPERTY_CALL : NAMED_PROPERTY_CALL; diff --git a/deps/v8/src/ast/ast.h b/deps/v8/src/ast/ast.h index 27d298c88ea186..bd52d1b2c04065 100644 --- a/deps/v8/src/ast/ast.h +++ b/deps/v8/src/ast/ast.h @@ -147,7 +147,6 @@ class AstNode: public ZoneObject { int position() const { return position_; } #ifdef DEBUG - void Print(); void Print(Isolate* isolate); #endif // DEBUG @@ -205,6 +204,9 @@ class Expression : public AstNode { // True iff the expression is a valid reference expression. bool IsValidReferenceExpression() const; + // True iff the expression is a private name. + bool IsPrivateName() const; + // Helpers for ToBoolean conversion. bool ToBooleanIsTrue() const; bool ToBooleanIsFalse() const; @@ -1421,32 +1423,6 @@ class ObjectLiteral final : public AggregateLiteral { : public BitField {}; }; - -// A map from property names to getter/setter pairs allocated in the zone. -class AccessorTable - : public base::TemplateHashMap { - public: - explicit AccessorTable(Zone* zone) - : base::TemplateHashMap( - Literal::Match, ZoneAllocationPolicy(zone)), - zone_(zone) {} - - Iterator lookup(Literal* literal) { - Iterator it = find(literal, true, ZoneAllocationPolicy(zone_)); - if (it->second == nullptr) { - it->second = new (zone_) ObjectLiteral::Accessors(); - } - return it; - } - - private: - Zone* zone_; -}; - - // An array literal has a literals object that is used // for minimizing the work when constructing it at runtime. class ArrayLiteral final : public AggregateLiteral { @@ -1533,7 +1509,7 @@ class VariableProxy final : public Expression { void set_is_assigned() { bit_field_ = IsAssignedField::update(bit_field_, true); if (is_resolved()) { - var()->set_maybe_assigned(); + var()->SetMaybeAssigned(); } } @@ -1635,11 +1611,12 @@ class VariableProxy final : public Expression { // Otherwise, the assignment is to a non-property (a global, a local slot, a // parameter slot, or a destructuring pattern). enum AssignType { - NON_PROPERTY, - NAMED_PROPERTY, - KEYED_PROPERTY, - NAMED_SUPER_PROPERTY, - KEYED_SUPER_PROPERTY + NON_PROPERTY, // destructuring + NAMED_PROPERTY, // obj.key + KEYED_PROPERTY, // obj[key] + NAMED_SUPER_PROPERTY, // super.key + KEYED_SUPER_PROPERTY, // super[key] + PRIVATE_METHOD // obj.#key: #key is a private method }; class Property final : public Expression { @@ -1650,10 +1627,19 @@ class Property final : public Expression { Expression* key() const { return key_; } bool IsSuperAccess() { return obj()->IsSuperPropertyReference(); } + bool IsPrivateReference() const { return key()->IsPrivateName(); } // Returns the properties assign type. static AssignType GetAssignType(Property* property) { if (property == nullptr) return NON_PROPERTY; + if (property->IsPrivateReference()) { + DCHECK(!property->IsSuperAccess()); + VariableProxy* proxy = property->key()->AsVariableProxy(); + DCHECK_NOT_NULL(proxy); + Variable* var = proxy->var(); + // Use KEYED_PROPERTY for private fields. + return var->requires_brand_check() ? PRIVATE_METHOD : KEYED_PROPERTY; + } bool super_access = property->IsSuperAccess(); return (property->key()->IsPropertyName()) ? (super_access ? NAMED_SUPER_PROPERTY : NAMED_PROPERTY) @@ -1715,6 +1701,7 @@ class Call final : public Expression { KEYED_PROPERTY_CALL, NAMED_SUPER_PROPERTY_CALL, KEYED_SUPER_PROPERTY_CALL, + PRIVATE_CALL, SUPER_CALL, RESOLVED_PROPERTY_CALL, OTHER_CALL diff --git a/deps/v8/src/ast/modules.cc b/deps/v8/src/ast/modules.cc index 5e9bbc6332e40a..261b72c352a55d 100644 --- a/deps/v8/src/ast/modules.cc +++ b/deps/v8/src/ast/modules.cc @@ -12,7 +12,7 @@ namespace v8 { namespace internal { -bool ModuleDescriptor::AstRawStringComparer::operator()( +bool SourceTextModuleDescriptor::AstRawStringComparer::operator()( const AstRawString* lhs, const AstRawString* rhs) const { // Fast path for equal pointers: a pointer is not strictly less than itself. if (lhs == rhs) return false; @@ -27,12 +27,10 @@ bool ModuleDescriptor::AstRawStringComparer::operator()( return memcmp(lhs->raw_data(), rhs->raw_data(), lhs->byte_length()) < 0; } -void ModuleDescriptor::AddImport(const AstRawString* import_name, - const AstRawString* local_name, - const AstRawString* module_request, - const Scanner::Location loc, - const Scanner::Location specifier_loc, - Zone* zone) { +void SourceTextModuleDescriptor::AddImport( + const AstRawString* import_name, const AstRawString* local_name, + const AstRawString* module_request, const Scanner::Location loc, + const Scanner::Location specifier_loc, Zone* zone) { Entry* entry = new (zone) Entry(loc); entry->local_name = local_name; entry->import_name = import_name; @@ -40,38 +38,34 @@ void ModuleDescriptor::AddImport(const AstRawString* import_name, AddRegularImport(entry); } -void ModuleDescriptor::AddStarImport(const AstRawString* local_name, - const AstRawString* module_request, - const Scanner::Location loc, - const Scanner::Location specifier_loc, - Zone* zone) { +void SourceTextModuleDescriptor::AddStarImport( + const AstRawString* local_name, const AstRawString* module_request, + const Scanner::Location loc, const Scanner::Location specifier_loc, + Zone* zone) { Entry* entry = new (zone) Entry(loc); entry->local_name = local_name; entry->module_request = AddModuleRequest(module_request, specifier_loc); AddNamespaceImport(entry, zone); } -void ModuleDescriptor::AddEmptyImport(const AstRawString* module_request, - const Scanner::Location specifier_loc) { +void SourceTextModuleDescriptor::AddEmptyImport( + const AstRawString* module_request, const Scanner::Location specifier_loc) { AddModuleRequest(module_request, specifier_loc); } - -void ModuleDescriptor::AddExport( - const AstRawString* local_name, const AstRawString* export_name, - Scanner::Location loc, Zone* zone) { +void SourceTextModuleDescriptor::AddExport(const AstRawString* local_name, + const AstRawString* export_name, + Scanner::Location loc, Zone* zone) { Entry* entry = new (zone) Entry(loc); entry->export_name = export_name; entry->local_name = local_name; AddRegularExport(entry); } -void ModuleDescriptor::AddExport(const AstRawString* import_name, - const AstRawString* export_name, - const AstRawString* module_request, - const Scanner::Location loc, - const Scanner::Location specifier_loc, - Zone* zone) { +void SourceTextModuleDescriptor::AddExport( + const AstRawString* import_name, const AstRawString* export_name, + const AstRawString* module_request, const Scanner::Location loc, + const Scanner::Location specifier_loc, Zone* zone) { DCHECK_NOT_NULL(import_name); DCHECK_NOT_NULL(export_name); Entry* entry = new (zone) Entry(loc); @@ -81,10 +75,9 @@ void ModuleDescriptor::AddExport(const AstRawString* import_name, AddSpecialExport(entry, zone); } -void ModuleDescriptor::AddStarExport(const AstRawString* module_request, - const Scanner::Location loc, - const Scanner::Location specifier_loc, - Zone* zone) { +void SourceTextModuleDescriptor::AddStarExport( + const AstRawString* module_request, const Scanner::Location loc, + const Scanner::Location specifier_loc, Zone* zone) { Entry* entry = new (zone) Entry(loc); entry->module_request = AddModuleRequest(module_request, specifier_loc); AddSpecialExport(entry, zone); @@ -98,24 +91,25 @@ Handle ToStringOrUndefined(Isolate* isolate, const AstRawString* s) { } } // namespace -Handle ModuleDescriptor::Entry::Serialize( +Handle SourceTextModuleDescriptor::Entry::Serialize( Isolate* isolate) const { CHECK(Smi::IsValid(module_request)); // TODO(neis): Check earlier? - return ModuleInfoEntry::New( + return SourceTextModuleInfoEntry::New( isolate, ToStringOrUndefined(isolate, export_name), ToStringOrUndefined(isolate, local_name), ToStringOrUndefined(isolate, import_name), module_request, cell_index, location.beg_pos, location.end_pos); } -Handle ModuleDescriptor::SerializeRegularExports(Isolate* isolate, - Zone* zone) const { +Handle SourceTextModuleDescriptor::SerializeRegularExports( + Isolate* isolate, Zone* zone) const { // We serialize regular exports in a way that lets us later iterate over their // local names and for each local name immediately access all its export // names. (Regular exports have neither import name nor module request.) ZoneVector> data( - ModuleInfo::kRegularExportLength * regular_exports_.size(), zone); + SourceTextModuleInfo::kRegularExportLength * regular_exports_.size(), + zone); int index = 0; for (auto it = regular_exports_.begin(); it != regular_exports_.end();) { @@ -130,12 +124,13 @@ Handle ModuleDescriptor::SerializeRegularExports(Isolate* isolate, } while (next != regular_exports_.end() && next->first == it->first); Handle export_names = isolate->factory()->NewFixedArray(count); - data[index + ModuleInfo::kRegularExportLocalNameOffset] = + data[index + SourceTextModuleInfo::kRegularExportLocalNameOffset] = it->second->local_name->string(); - data[index + ModuleInfo::kRegularExportCellIndexOffset] = + data[index + SourceTextModuleInfo::kRegularExportCellIndexOffset] = handle(Smi::FromInt(it->second->cell_index), isolate); - data[index + ModuleInfo::kRegularExportExportNamesOffset] = export_names; - index += ModuleInfo::kRegularExportLength; + data[index + SourceTextModuleInfo::kRegularExportExportNamesOffset] = + export_names; + index += SourceTextModuleInfo::kRegularExportLength; // Collect the export names. int i = 0; @@ -159,7 +154,7 @@ Handle ModuleDescriptor::SerializeRegularExports(Isolate* isolate, return result; } -void ModuleDescriptor::MakeIndirectExportsExplicit(Zone* zone) { +void SourceTextModuleDescriptor::MakeIndirectExportsExplicit(Zone* zone) { for (auto it = regular_exports_.begin(); it != regular_exports_.end();) { Entry* entry = it->second; DCHECK_NOT_NULL(entry->local_name); @@ -191,14 +186,14 @@ void ModuleDescriptor::MakeIndirectExportsExplicit(Zone* zone) { } } -ModuleDescriptor::CellIndexKind ModuleDescriptor::GetCellIndexKind( - int cell_index) { +SourceTextModuleDescriptor::CellIndexKind +SourceTextModuleDescriptor::GetCellIndexKind(int cell_index) { if (cell_index > 0) return kExport; if (cell_index < 0) return kImport; return kInvalid; } -void ModuleDescriptor::AssignCellIndices() { +void SourceTextModuleDescriptor::AssignCellIndices() { int export_index = 1; for (auto it = regular_exports_.begin(); it != regular_exports_.end();) { auto current_key = it->first; @@ -230,10 +225,11 @@ void ModuleDescriptor::AssignCellIndices() { namespace { -const ModuleDescriptor::Entry* BetterDuplicate( - const ModuleDescriptor::Entry* candidate, - ZoneMap& export_names, - const ModuleDescriptor::Entry* current_duplicate) { +const SourceTextModuleDescriptor::Entry* BetterDuplicate( + const SourceTextModuleDescriptor::Entry* candidate, + ZoneMap& + export_names, + const SourceTextModuleDescriptor::Entry* current_duplicate) { DCHECK_NOT_NULL(candidate->export_name); DCHECK(candidate->location.IsValid()); auto insert_result = @@ -249,11 +245,11 @@ const ModuleDescriptor::Entry* BetterDuplicate( } // namespace -const ModuleDescriptor::Entry* ModuleDescriptor::FindDuplicateExport( - Zone* zone) const { - const ModuleDescriptor::Entry* duplicate = nullptr; - ZoneMap export_names( - zone); +const SourceTextModuleDescriptor::Entry* +SourceTextModuleDescriptor::FindDuplicateExport(Zone* zone) const { + const SourceTextModuleDescriptor::Entry* duplicate = nullptr; + ZoneMap + export_names(zone); for (const auto& elem : regular_exports_) { duplicate = BetterDuplicate(elem.second, export_names, duplicate); } @@ -264,9 +260,9 @@ const ModuleDescriptor::Entry* ModuleDescriptor::FindDuplicateExport( return duplicate; } -bool ModuleDescriptor::Validate(ModuleScope* module_scope, - PendingCompilationErrorHandler* error_handler, - Zone* zone) { +bool SourceTextModuleDescriptor::Validate( + ModuleScope* module_scope, PendingCompilationErrorHandler* error_handler, + Zone* zone) { DCHECK_EQ(this, module_scope->module()); DCHECK_NOT_NULL(error_handler); diff --git a/deps/v8/src/ast/modules.h b/deps/v8/src/ast/modules.h index c3aa2bd0ada21f..4921d41932e31b 100644 --- a/deps/v8/src/ast/modules.h +++ b/deps/v8/src/ast/modules.h @@ -13,13 +13,13 @@ namespace internal { class AstRawString; -class ModuleInfo; -class ModuleInfoEntry; +class SourceTextModuleInfo; +class SourceTextModuleInfoEntry; class PendingCompilationErrorHandler; -class ModuleDescriptor : public ZoneObject { +class SourceTextModuleDescriptor : public ZoneObject { public: - explicit ModuleDescriptor(Zone* zone) + explicit SourceTextModuleDescriptor(Zone* zone) : module_requests_(zone), special_exports_(zone), namespace_imports_(zone), @@ -84,9 +84,9 @@ class ModuleDescriptor : public ZoneObject { const AstRawString* import_name; // The module_request value records the order in which modules are - // requested. It also functions as an index into the ModuleInfo's array of - // module specifiers and into the Module's array of requested modules. A - // negative value means no module request. + // requested. It also functions as an index into the SourceTextModuleInfo's + // array of module specifiers and into the Module's array of requested + // modules. A negative value means no module request. int module_request; // Import/export entries that are associated with a MODULE-allocated @@ -107,7 +107,7 @@ class ModuleDescriptor : public ZoneObject { module_request(-1), cell_index(0) {} - Handle Serialize(Isolate* isolate) const; + Handle Serialize(Isolate* isolate) const; }; enum CellIndexKind { kInvalid, kExport, kImport }; diff --git a/deps/v8/src/ast/prettyprinter.cc b/deps/v8/src/ast/prettyprinter.cc index eca091d61ff80e..c0fe3baff398bc 100644 --- a/deps/v8/src/ast/prettyprinter.cc +++ b/deps/v8/src/ast/prettyprinter.cc @@ -1278,14 +1278,24 @@ void AstPrinter::VisitProperty(Property* node) { IndentedScope indent(this, buf.begin(), node->position()); Visit(node->obj()); - AssignType property_kind = Property::GetAssignType(node); - if (property_kind == NAMED_PROPERTY || - property_kind == NAMED_SUPER_PROPERTY) { - PrintLiteralIndented("NAME", node->key()->AsLiteral(), false); - } else { - DCHECK(property_kind == KEYED_PROPERTY || - property_kind == KEYED_SUPER_PROPERTY); - PrintIndentedVisit("KEY", node->key()); + AssignType type = Property::GetAssignType(node); + switch (type) { + case NAMED_PROPERTY: + case NAMED_SUPER_PROPERTY: { + PrintLiteralIndented("NAME", node->key()->AsLiteral(), false); + break; + } + case PRIVATE_METHOD: { + PrintIndentedVisit("PRIVATE_METHOD", node->key()); + break; + } + case KEYED_PROPERTY: + case KEYED_SUPER_PROPERTY: { + PrintIndentedVisit("KEY", node->key()); + break; + } + case NON_PROPERTY: + UNREACHABLE(); } } diff --git a/deps/v8/src/ast/scopes.cc b/deps/v8/src/ast/scopes.cc index e45303c64b577b..237d98ec6047f2 100644 --- a/deps/v8/src/ast/scopes.cc +++ b/deps/v8/src/ast/scopes.cc @@ -9,7 +9,7 @@ #include "src/ast/ast.h" #include "src/base/optional.h" #include "src/builtins/accessors.h" -#include "src/execution/message-template.h" +#include "src/common/message-template.h" #include "src/init/bootstrapper.h" #include "src/logging/counters.h" #include "src/objects/module-inl.h" @@ -40,6 +40,7 @@ Variable* VariableMap::Declare(Zone* zone, Scope* scope, VariableKind kind, InitializationFlag initialization_flag, MaybeAssignedFlag maybe_assigned_flag, + RequiresBrandCheckFlag requires_brand_check, bool* was_added) { // AstRawStrings are unambiguous, i.e., the same string is always represented // by the same AstRawString*. @@ -51,8 +52,9 @@ Variable* VariableMap::Declare(Zone* zone, Scope* scope, if (*was_added) { // The variable has not been declared yet -> insert it. DCHECK_EQ(name, p->key); - Variable* variable = new (zone) Variable( - scope, name, mode, kind, initialization_flag, maybe_assigned_flag); + Variable* variable = + new (zone) Variable(scope, name, mode, kind, initialization_flag, + maybe_assigned_flag, requires_brand_check); p->value = variable; } return reinterpret_cast(p->value); @@ -128,7 +130,7 @@ ModuleScope::ModuleScope(DeclarationScope* script_scope, AstValueFactory* avfactory) : DeclarationScope(avfactory->zone(), script_scope, MODULE_SCOPE, kModule), module_descriptor_(new (avfactory->zone()) - ModuleDescriptor(avfactory->zone())) { + SourceTextModuleDescriptor(avfactory->zone())) { set_language_mode(LanguageMode::kStrict); DeclareThis(avfactory); } @@ -262,7 +264,6 @@ void Scope::SetDefaults() { is_debug_evaluate_scope_ = false; inner_scope_calls_eval_ = false; - force_context_allocation_ = false; force_context_allocation_for_parameters_ = false; is_declaration_scope_ = false; @@ -506,8 +507,9 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) { DCHECK(is_being_lazily_parsed_); bool was_added; Variable* var = DeclareVariableName(name, VariableMode::kVar, &was_added); - if (sloppy_block_function->init() == Token::ASSIGN) - var->set_maybe_assigned(); + if (sloppy_block_function->init() == Token::ASSIGN) { + var->SetMaybeAssigned(); + } } } } @@ -785,11 +787,13 @@ Variable* Scope::LookupInScopeInfo(const AstRawString* name, Scope* cache) { VariableMode mode; InitializationFlag init_flag; MaybeAssignedFlag maybe_assigned_flag; + RequiresBrandCheckFlag requires_brand_check = kNoBrandCheck; { location = VariableLocation::CONTEXT; index = ScopeInfo::ContextSlotIndex(*scope_info_, name_handle, &mode, - &init_flag, &maybe_assigned_flag); + &init_flag, &maybe_assigned_flag, + &requires_brand_check); found = index >= 0; } @@ -814,9 +818,9 @@ Variable* Scope::LookupInScopeInfo(const AstRawString* name, Scope* cache) { } bool was_added; - Variable* var = - cache->variables_.Declare(zone(), this, name, mode, NORMAL_VARIABLE, - init_flag, maybe_assigned_flag, &was_added); + Variable* var = cache->variables_.Declare( + zone(), this, name, mode, NORMAL_VARIABLE, init_flag, maybe_assigned_flag, + requires_brand_check, &was_added); DCHECK(was_added); var->AllocateTo(location, index); return var; @@ -889,7 +893,7 @@ Variable* Scope::DeclareLocal(const AstRawString* name, VariableMode mode, // assigned because they might be accessed by a lazily parsed top-level // function, which, for efficiency, we preparse without variable tracking. if (is_script_scope() || is_module_scope()) { - if (mode != VariableMode::kConst) var->set_maybe_assigned(); + if (mode != VariableMode::kConst) var->SetMaybeAssigned(); var->set_is_used(); } @@ -938,7 +942,7 @@ Variable* Scope::DeclareVariable( DCHECK(*was_added); } } else { - var->set_maybe_assigned(); + var->SetMaybeAssigned(); if (V8_UNLIKELY(IsLexicalVariableMode(mode) || IsLexicalVariableMode(var->mode()))) { // The name was declared in this scope before; check for conflicting @@ -1009,7 +1013,7 @@ Variable* Scope::DeclareVariableName(const AstRawString* name, } // Sloppy block function redefinition. } - var->set_maybe_assigned(); + var->SetMaybeAssigned(); } var->set_is_used(); return var; @@ -1040,7 +1044,7 @@ Variable* DeclarationScope::DeclareDynamicGlobal(const AstRawString* name, bool was_added; return cache->variables_.Declare( zone(), this, name, VariableMode::kDynamicGlobal, kind, - kCreatedInitialized, kNotAssigned, &was_added); + kCreatedInitialized, kNotAssigned, kNoBrandCheck, &was_added); // TODO(neis): Mark variable as maybe-assigned? } @@ -1063,7 +1067,7 @@ Variable* Scope::NewTemporary(const AstRawString* name, Variable* var = new (zone()) Variable(scope, name, VariableMode::kTemporary, NORMAL_VARIABLE, kCreatedInitialized); scope->AddLocal(var); - if (maybe_assigned == kMaybeAssigned) var->set_maybe_assigned(); + if (maybe_assigned == kMaybeAssigned) var->SetMaybeAssigned(); return var; } @@ -1401,7 +1405,7 @@ void Scope::AnalyzePartially(DeclarationScope* max_outer_scope, } } else { var->set_is_used(); - if (proxy->is_assigned()) var->set_maybe_assigned(); + if (proxy->is_assigned()) var->SetMaybeAssigned(); } } @@ -1592,6 +1596,10 @@ void PrintVar(int indent, Variable* var) { if (comma) PrintF(", "); PrintF("hole initialization elided"); } + if (var->requires_brand_check()) { + if (comma) PrintF(", "); + PrintF("requires brand check"); + } PrintF("\n"); } @@ -1766,9 +1774,9 @@ Variable* Scope::NonLocal(const AstRawString* name, VariableMode mode) { // Declare a new non-local. DCHECK(IsDynamicVariableMode(mode)); bool was_added; - Variable* var = - variables_.Declare(zone(), this, name, mode, NORMAL_VARIABLE, - kCreatedInitialized, kNotAssigned, &was_added); + Variable* var = variables_.Declare(zone(), this, name, mode, NORMAL_VARIABLE, + kCreatedInitialized, kNotAssigned, + kNoBrandCheck, &was_added); // Allocate it by giving it a dynamic lookup. var->AllocateTo(VariableLocation::LOOKUP, -1); return var; @@ -1879,11 +1887,14 @@ Variable* Scope::LookupWith(VariableProxy* proxy, Scope* scope, DCHECK(!scope->already_resolved_); var->set_is_used(); var->ForceContextAllocation(); - if (proxy->is_assigned()) var->set_maybe_assigned(); + if (proxy->is_assigned()) var->SetMaybeAssigned(); } if (entry_point != nullptr) entry_point->variables_.Remove(var); Scope* target = entry_point == nullptr ? scope : entry_point; - return target->NonLocal(proxy->raw_name(), VariableMode::kDynamic); + Variable* dynamic = + target->NonLocal(proxy->raw_name(), VariableMode::kDynamic); + dynamic->set_local_if_not_shadowed(var); + return dynamic; } Variable* Scope::LookupSloppyEval(VariableProxy* proxy, Scope* scope, @@ -1912,7 +1923,7 @@ Variable* Scope::LookupSloppyEval(VariableProxy* proxy, Scope* scope, // script scope are always dynamic. if (var->IsGlobalObjectProperty()) { Scope* target = entry_point == nullptr ? scope : entry_point; - return target->NonLocal(proxy->raw_name(), VariableMode::kDynamicGlobal); + var = target->NonLocal(proxy->raw_name(), VariableMode::kDynamicGlobal); } if (var->is_dynamic()) return var; @@ -2010,7 +2021,7 @@ void Scope::ResolvePreparsedVariable(VariableProxy* proxy, Scope* scope, var->set_is_used(); if (!var->is_dynamic()) { var->ForceContextAllocation(); - if (proxy->is_assigned()) var->set_maybe_assigned(); + if (proxy->is_assigned()) var->SetMaybeAssigned(); return; } } @@ -2054,7 +2065,7 @@ bool Scope::MustAllocate(Variable* var) { if (!var->raw_name()->IsEmpty() && (inner_scope_calls_eval_ || is_catch_scope() || is_script_scope())) { var->set_is_used(); - if (inner_scope_calls_eval_) var->set_maybe_assigned(); + if (inner_scope_calls_eval_) var->SetMaybeAssigned(); } DCHECK(!var->has_forced_context_allocation() || var->is_used()); // Global variables do not need to be allocated. @@ -2124,7 +2135,7 @@ void DeclarationScope::AllocateParameterLocals() { DCHECK_EQ(this, var->scope()); if (has_mapped_arguments) { var->set_is_used(); - var->set_maybe_assigned(); + var->SetMaybeAssigned(); var->ForceContextAllocation(); } AllocateParameter(var, i); @@ -2315,12 +2326,13 @@ int Scope::ContextLocalCount() const { (is_function_var_in_context ? 1 : 0); } -Variable* ClassScope::DeclarePrivateName(const AstRawString* name, - bool* was_added) { +Variable* ClassScope::DeclarePrivateName( + const AstRawString* name, RequiresBrandCheckFlag requires_brand_check, + bool* was_added) { Variable* result = EnsureRareData()->private_name_map.Declare( zone(), this, name, VariableMode::kConst, NORMAL_VARIABLE, InitializationFlag::kNeedsInitialization, - MaybeAssignedFlag::kMaybeAssigned, was_added); + MaybeAssignedFlag::kMaybeAssigned, requires_brand_check, was_added); if (*was_added) { locals_.Add(result); } @@ -2404,8 +2416,10 @@ Variable* ClassScope::LookupPrivateNameInScopeInfo(const AstRawString* name) { VariableMode mode; InitializationFlag init_flag; MaybeAssignedFlag maybe_assigned_flag; - int index = ScopeInfo::ContextSlotIndex(*scope_info_, name_handle, &mode, - &init_flag, &maybe_assigned_flag); + RequiresBrandCheckFlag requires_brand_check; + int index = + ScopeInfo::ContextSlotIndex(*scope_info_, name_handle, &mode, &init_flag, + &maybe_assigned_flag, &requires_brand_check); if (index < 0) { return nullptr; } @@ -2417,7 +2431,7 @@ Variable* ClassScope::LookupPrivateNameInScopeInfo(const AstRawString* name) { // Add the found private name to the map to speed up subsequent // lookups for the same name. bool was_added; - Variable* var = DeclarePrivateName(name, &was_added); + Variable* var = DeclarePrivateName(name, requires_brand_check, &was_added); DCHECK(was_added); var->AllocateTo(VariableLocation::CONTEXT, index); return var; @@ -2454,8 +2468,7 @@ bool ClassScope::ResolvePrivateNames(ParseInfo* info) { Scanner::Location loc = proxy->location(); info->pending_error_handler()->ReportMessageAt( loc.beg_pos, loc.end_pos, - MessageTemplate::kInvalidPrivateFieldResolution, proxy->raw_name(), - kSyntaxError); + MessageTemplate::kInvalidPrivateFieldResolution, proxy->raw_name()); return false; } else { var->set_is_used(); diff --git a/deps/v8/src/ast/scopes.h b/deps/v8/src/ast/scopes.h index 1feaad2a9041b5..932d5c70b937b8 100644 --- a/deps/v8/src/ast/scopes.h +++ b/deps/v8/src/ast/scopes.h @@ -41,7 +41,9 @@ class VariableMap : public ZoneHashMap { Variable* Declare(Zone* zone, Scope* scope, const AstRawString* name, VariableMode mode, VariableKind kind, InitializationFlag initialization_flag, - MaybeAssignedFlag maybe_assigned_flag, bool* was_added); + MaybeAssignedFlag maybe_assigned_flag, + RequiresBrandCheckFlag requires_brand_check, + bool* was_added); V8_EXPORT_PRIVATE Variable* Lookup(const AstRawString* name); void Remove(Variable* var); @@ -556,7 +558,7 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) { MaybeAssignedFlag maybe_assigned_flag, bool* was_added) { Variable* result = variables_.Declare(zone, this, name, mode, kind, initialization_flag, - maybe_assigned_flag, was_added); + maybe_assigned_flag, kNoBrandCheck, was_added); if (*was_added) locals_.Add(result); return result; } @@ -712,7 +714,6 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) { // True if one of the inner scopes or the scope itself calls eval. bool inner_scope_calls_eval_ : 1; - bool force_context_allocation_ : 1; bool force_context_allocation_for_parameters_ : 1; // True if it holds 'var' declarations. @@ -1155,14 +1156,14 @@ class ModuleScope final : public DeclarationScope { AstValueFactory* avfactory); // Returns nullptr in a deserialized scope. - ModuleDescriptor* module() const { return module_descriptor_; } + SourceTextModuleDescriptor* module() const { return module_descriptor_; } // Set MODULE as VariableLocation for all variables that will live in a // module's export table. void AllocateModuleVariables(); private: - ModuleDescriptor* const module_descriptor_; + SourceTextModuleDescriptor* const module_descriptor_; }; class V8_EXPORT_PRIVATE ClassScope : public Scope { @@ -1174,7 +1175,9 @@ class V8_EXPORT_PRIVATE ClassScope : public Scope { // Declare a private name in the private name map and add it to the // local variables of this scope. - Variable* DeclarePrivateName(const AstRawString* name, bool* was_added); + Variable* DeclarePrivateName(const AstRawString* name, + RequiresBrandCheckFlag requires_brand_check, + bool* was_added); void AddUnresolvedPrivateName(VariableProxy* proxy); diff --git a/deps/v8/src/ast/source-range-ast-visitor.cc b/deps/v8/src/ast/source-range-ast-visitor.cc index d171e30587584f..2fcf151999ace0 100644 --- a/deps/v8/src/ast/source-range-ast-visitor.cc +++ b/deps/v8/src/ast/source-range-ast-visitor.cc @@ -25,6 +25,14 @@ void SourceRangeAstVisitor::VisitBlock(Block* stmt) { } } +void SourceRangeAstVisitor::VisitSwitchStatement(SwitchStatement* stmt) { + AstTraversalVisitor::VisitSwitchStatement(stmt); + ZonePtrList* clauses = stmt->cases(); + for (CaseClause* clause : *clauses) { + MaybeRemoveLastContinuationRange(clause->statements()); + } +} + void SourceRangeAstVisitor::VisitFunctionLiteral(FunctionLiteral* expr) { AstTraversalVisitor::VisitFunctionLiteral(expr); ZonePtrList* stmts = expr->body(); diff --git a/deps/v8/src/ast/source-range-ast-visitor.h b/deps/v8/src/ast/source-range-ast-visitor.h index 4ea36a947f58e6..4ba5feb2d299f9 100644 --- a/deps/v8/src/ast/source-range-ast-visitor.h +++ b/deps/v8/src/ast/source-range-ast-visitor.h @@ -34,6 +34,7 @@ class SourceRangeAstVisitor final friend class AstTraversalVisitor; void VisitBlock(Block* stmt); + void VisitSwitchStatement(SwitchStatement* stmt); void VisitFunctionLiteral(FunctionLiteral* expr); bool VisitNode(AstNode* node); diff --git a/deps/v8/src/ast/variables.h b/deps/v8/src/ast/variables.h index df40fee754073a..7805fa20c8c8f6 100644 --- a/deps/v8/src/ast/variables.h +++ b/deps/v8/src/ast/variables.h @@ -21,7 +21,8 @@ class Variable final : public ZoneObject { public: Variable(Scope* scope, const AstRawString* name, VariableMode mode, VariableKind kind, InitializationFlag initialization_flag, - MaybeAssignedFlag maybe_assigned_flag = kNotAssigned) + MaybeAssignedFlag maybe_assigned_flag = kNotAssigned, + RequiresBrandCheckFlag requires_brand_check = kNoBrandCheck) : scope_(scope), name_(name), local_if_not_shadowed_(nullptr), @@ -31,6 +32,7 @@ class Variable final : public ZoneObject { bit_field_(MaybeAssignedFlagField::encode(maybe_assigned_flag) | InitializationFlagField::encode(initialization_flag) | VariableModeField::encode(mode) | + RequiresBrandCheckField::encode(requires_brand_check) | IsUsedField::encode(false) | ForceContextAllocationField::encode(false) | ForceHoleInitializationField::encode(false) | @@ -69,8 +71,31 @@ class Variable final : public ZoneObject { MaybeAssignedFlag maybe_assigned() const { return MaybeAssignedFlagField::decode(bit_field_); } - void set_maybe_assigned() { - bit_field_ = MaybeAssignedFlagField::update(bit_field_, kMaybeAssigned); + void SetMaybeAssigned() { + // If this variable is dynamically shadowing another variable, then that + // variable could also be assigned (in the non-shadowing case). + if (has_local_if_not_shadowed()) { + // Avoid repeatedly marking the same tree of variables by only recursing + // when this variable's maybe_assigned status actually changes. + if (!maybe_assigned()) { + local_if_not_shadowed()->SetMaybeAssigned(); + } + DCHECK(local_if_not_shadowed()->maybe_assigned()); + } + set_maybe_assigned(); + } + + RequiresBrandCheckFlag get_requires_brand_check_flag() const { + return RequiresBrandCheckField::decode(bit_field_); + } + + bool requires_brand_check() const { + return get_requires_brand_check_flag() == kRequiresBrandCheck; + } + + void set_requires_brand_check() { + bit_field_ = + RequiresBrandCheckField::update(bit_field_, kRequiresBrandCheck); } int initializer_position() { return initializer_position_; } @@ -143,11 +168,16 @@ class Variable final : public ZoneObject { } Variable* local_if_not_shadowed() const { - DCHECK(mode() == VariableMode::kDynamicLocal && - local_if_not_shadowed_ != nullptr); + DCHECK((mode() == VariableMode::kDynamicLocal || + mode() == VariableMode::kDynamic) && + has_local_if_not_shadowed()); return local_if_not_shadowed_; } + bool has_local_if_not_shadowed() const { + return local_if_not_shadowed_ != nullptr; + } + void set_local_if_not_shadowed(Variable* local) { local_if_not_shadowed_ = local; } @@ -200,15 +230,19 @@ class Variable final : public ZoneObject { const AstRawString* name_; // If this field is set, this variable references the stored locally bound - // variable, but it might be shadowed by variable bindings introduced by - // sloppy 'eval' calls between the reference scope (inclusive) and the - // binding scope (exclusive). + // variable, but it might be shadowed by variable bindings introduced by with + // blocks or sloppy 'eval' calls between the reference scope (inclusive) and + // the binding scope (exclusive). Variable* local_if_not_shadowed_; Variable* next_; int index_; int initializer_position_; uint16_t bit_field_; + void set_maybe_assigned() { + bit_field_ = MaybeAssignedFlagField::update(bit_field_, kMaybeAssigned); + } + class VariableModeField : public BitField16 {}; class VariableKindField : public BitField16 {}; @@ -225,6 +259,9 @@ class Variable final : public ZoneObject { class MaybeAssignedFlagField : public BitField16 {}; + class RequiresBrandCheckField + : public BitField16 {}; Variable** next() { return &next_; } friend List; friend base::ThreadedListTraits; diff --git a/deps/v8/src/base/adapters.h b/deps/v8/src/base/adapters.h index 92c500085d1ed2..f684b52ccb6dc0 100644 --- a/deps/v8/src/base/adapters.h +++ b/deps/v8/src/base/adapters.h @@ -45,7 +45,7 @@ class ReversedAdapter { // // iterates through v from back to front // } template -ReversedAdapter Reversed(T& t) { +ReversedAdapter Reversed(T&& t) { return ReversedAdapter(t); } diff --git a/deps/v8/src/base/lsan.h b/deps/v8/src/base/lsan.h new file mode 100644 index 00000000000000..fd9bbd21c1b818 --- /dev/null +++ b/deps/v8/src/base/lsan.h @@ -0,0 +1,29 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// LeakSanitizer support. + +#ifndef V8_BASE_LSAN_H_ +#define V8_BASE_LSAN_H_ + +#include + +// There is no compile time flag for LSan, to enable this whenever ASan is +// enabled. Note that LSan can be used as part of ASan with 'detect_leaks=1'. +// On windows, LSan is not implemented yet, so disable it there. +#if defined(V8_USE_ADDRESS_SANITIZER) && !defined(V8_OS_WIN) + +#include + +#define LSAN_IGNORE_OBJECT(ptr) __lsan_ignore_object(ptr) + +#else // defined(V8_USE_ADDRESS_SANITIZER) && !defined(V8_OS_WIN) + +#define LSAN_IGNORE_OBJECT(ptr) \ + static_assert(std::is_convertible::value, \ + "LSAN_IGNORE_OBJECT can only be used with pointer types") + +#endif // defined(V8_USE_ADDRESS_SANITIZER) && !defined(V8_OS_WIN) + +#endif // V8_BASE_LSAN_H_ diff --git a/deps/v8/src/common/v8memory.h b/deps/v8/src/base/memory.h similarity index 79% rename from deps/v8/src/common/v8memory.h rename to deps/v8/src/base/memory.h index 02ba2de8481334..087f67291d201d 100644 --- a/deps/v8/src/common/v8memory.h +++ b/deps/v8/src/base/memory.h @@ -2,14 +2,16 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -#ifndef V8_COMMON_V8MEMORY_H_ -#define V8_COMMON_V8MEMORY_H_ +#ifndef V8_BASE_MEMORY_H_ +#define V8_BASE_MEMORY_H_ #include "src/base/macros.h" -#include "src/common/globals.h" namespace v8 { -namespace internal { +namespace base { + +using Address = uintptr_t; +using byte = uint8_t; // Memory provides an interface to 'raw' memory. It encapsulates the casts // that typically are needed when incompatible pointer types are used. @@ -39,22 +41,6 @@ static inline void WriteUnalignedValue(Address p, V value) { memcpy(reinterpret_cast(p), &value, sizeof(V)); } -static inline uint16_t ReadUnalignedUInt16(Address p) { - return ReadUnalignedValue(p); -} - -static inline void WriteUnalignedUInt16(Address p, uint16_t value) { - WriteUnalignedValue(p, value); -} - -static inline uint32_t ReadUnalignedUInt32(Address p) { - return ReadUnalignedValue(p); -} - -static inline void WriteUnalignedUInt32(Address p, uint32_t value) { - WriteUnalignedValue(p, value); -} - template static inline V ReadLittleEndianValue(Address p) { #if defined(V8_TARGET_LITTLE_ENDIAN) @@ -93,7 +79,7 @@ static inline void WriteLittleEndianValue(V* p, V value) { WriteLittleEndianValue(reinterpret_cast
    (p), value); } -} // namespace internal +} // namespace base } // namespace v8 -#endif // V8_COMMON_V8MEMORY_H_ +#endif // V8_BASE_MEMORY_H_ diff --git a/deps/v8/src/base/platform/OWNERS b/deps/v8/src/base/platform/OWNERS index 7f64f4dedb8102..bf5455c9afaa8a 100644 --- a/deps/v8/src/base/platform/OWNERS +++ b/deps/v8/src/base/platform/OWNERS @@ -1,5 +1,3 @@ -set noparent - hpayer@chromium.org mlippautz@chromium.org ulan@chromium.org diff --git a/deps/v8/src/base/platform/platform-fuchsia.cc b/deps/v8/src/base/platform/platform-fuchsia.cc index 11499f572cfd93..fa175c39177aea 100644 --- a/deps/v8/src/base/platform/platform-fuchsia.cc +++ b/deps/v8/src/base/platform/platform-fuchsia.cc @@ -48,7 +48,7 @@ void* OS::Allocate(void* address, size_t size, size_t alignment, size_t request_size = size + (alignment - page_size); zx_handle_t vmo; - if (zx_vmo_create(request_size, ZX_VMO_NON_RESIZABLE, &vmo) != ZX_OK) { + if (zx_vmo_create(request_size, 0, &vmo) != ZX_OK) { return nullptr; } static const char kVirtualMemoryName[] = "v8-virtualmem"; @@ -152,7 +152,7 @@ int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) { const auto kMicrosPerSecond = 1000000ULL; zx_time_t nanos_since_thread_started; zx_status_t status = - zx_clock_get_new(ZX_CLOCK_THREAD, &nanos_since_thread_started); + zx_clock_get(ZX_CLOCK_THREAD, &nanos_since_thread_started); if (status != ZX_OK) { return -1; } diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc index 7f4ce192dbcfed..6da83d7e0208a3 100644 --- a/deps/v8/src/base/platform/platform-posix.cc +++ b/deps/v8/src/base/platform/platform-posix.cc @@ -199,6 +199,12 @@ void* OS::GetRandomMmapAddr() { MutexGuard guard(rng_mutex.Pointer()); GetPlatformRandomNumberGenerator()->NextBytes(&raw_addr, sizeof(raw_addr)); } +#if defined(__APPLE__) +#if V8_TARGET_ARCH_ARM64 + DCHECK_EQ(1 << 14, AllocatePageSize()); + raw_addr = RoundDown(raw_addr, 1 << 14); +#endif +#endif #if defined(V8_USE_ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \ defined(THREAD_SANITIZER) || defined(LEAK_SANITIZER) // If random hint addresses interfere with address ranges hard coded in diff --git a/deps/v8/src/base/small-vector.h b/deps/v8/src/base/small-vector.h index bb024ca87ead36..b11dfb86b446dc 100644 --- a/deps/v8/src/base/small-vector.h +++ b/deps/v8/src/base/small-vector.h @@ -88,22 +88,29 @@ class SmallVector { DCHECK_NE(0, size()); return end_[-1]; } + const T& back() const { + DCHECK_NE(0, size()); + return end_[-1]; + } T& operator[](size_t index) { DCHECK_GT(size(), index); return begin_[index]; } - const T& operator[](size_t index) const { + const T& at(size_t index) const { DCHECK_GT(size(), index); return begin_[index]; } + const T& operator[](size_t index) const { return at(index); } + template void emplace_back(Args&&... args) { - if (V8_UNLIKELY(end_ == end_of_storage_)) Grow(); - new (end_) T(std::forward(args)...); - ++end_; + T* end = end_; + if (V8_UNLIKELY(end == end_of_storage_)) end = Grow(); + new (end) T(std::forward(args)...); + end_ = end + 1; } void pop_back(size_t count = 1) { @@ -135,7 +142,12 @@ class SmallVector { typename std::aligned_storage::type inline_storage_; - void Grow(size_t min_capacity = 0) { + // Grows the backing store by a factor of two. Returns the new end of the used + // storage (this reduces binary size). + V8_NOINLINE T* Grow() { return Grow(0); } + + // Grows the backing store by a factor of two, and at least to {min_capacity}. + V8_NOINLINE T* Grow(size_t min_capacity) { size_t in_use = end_ - begin_; size_t new_capacity = base::bits::RoundUpToPowerOfTwo(std::max(min_capacity, 2 * capacity())); @@ -145,6 +157,7 @@ class SmallVector { begin_ = new_storage; end_ = new_storage + in_use; end_of_storage_ = new_storage + new_capacity; + return end_; } bool is_big() const { return begin_ != inline_storage_begin(); } diff --git a/deps/v8/src/base/vlq-base64.cc b/deps/v8/src/base/vlq-base64.cc new file mode 100644 index 00000000000000..62e63ac87261d4 --- /dev/null +++ b/deps/v8/src/base/vlq-base64.cc @@ -0,0 +1,58 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include +#include + +#include "src/base/logging.h" +#include "src/base/vlq-base64.h" + +namespace v8 { +namespace base { + +namespace { +constexpr int8_t kCharToDigit[] = { + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, 0x3e, -1, -1, -1, 0x3f, + 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, -1, -1, + -1, -1, -1, -1, -1, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, + 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, + 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, -1, -1, -1, -1, -1, + -1, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, + 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, + 0x31, 0x32, 0x33, -1, -1, -1, -1, -1}; + +constexpr uint32_t kContinueShift = 5; +constexpr uint32_t kContinueMask = 1 << kContinueShift; +constexpr uint32_t kDataMask = kContinueMask - 1; + +int8_t charToDigitDecode(uint8_t c) { return c < 128u ? kCharToDigit[c] : -1; } +} // namespace + +int8_t charToDigitDecodeForTesting(uint8_t c) { return charToDigitDecode(c); } + +int32_t VLQBase64Decode(const char* start, size_t sz, size_t* pos) { + uint32_t res = 0; + uint64_t shift = 0; + int32_t digit; + + do { + if (*pos >= sz) { + return std::numeric_limits::min(); + } + digit = static_cast(charToDigitDecode(start[*pos])); + bool is_last_byte = (shift + kContinueShift >= 32); + if (digit == -1 || (is_last_byte && (digit >> 2) != 0)) { + return std::numeric_limits::min(); + } + res += (digit & kDataMask) << shift; + shift += kContinueShift; + (*pos)++; + } while (digit & kContinueMask); + return (res & 1) ? -static_cast(res >> 1) : (res >> 1); +} +} // namespace base +} // namespace v8 diff --git a/deps/v8/src/base/vlq-base64.h b/deps/v8/src/base/vlq-base64.h new file mode 100644 index 00000000000000..5d8633798bcf30 --- /dev/null +++ b/deps/v8/src/base/vlq-base64.h @@ -0,0 +1,23 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_BASE_VLQ_BASE64_H_ +#define V8_BASE_VLQ_BASE64_H_ + +#include + +#include "src/base/base-export.h" + +namespace v8 { +namespace base { +V8_BASE_EXPORT int8_t charToDigitDecodeForTesting(uint8_t c); + +// Decodes a VLQ-Base64-encoded string into 32bit digits. A valid return value +// is within [-2^31+1, 2^31-1]. This function returns -2^31 +// (std::numeric_limits::min()) when bad input s is passed. +V8_BASE_EXPORT int32_t VLQBase64Decode(const char* start, size_t sz, + size_t* pos); +} // namespace base +} // namespace v8 +#endif // V8_BASE_VLQ_BASE64_H_ diff --git a/deps/v8/src/builtins/OWNERS b/deps/v8/src/builtins/OWNERS new file mode 100644 index 00000000000000..450423f87850ba --- /dev/null +++ b/deps/v8/src/builtins/OWNERS @@ -0,0 +1,3 @@ +file://COMMON_OWNERS + +# COMPONENT: Blink>JavaScript>Runtime diff --git a/deps/v8/src/builtins/accessors.cc b/deps/v8/src/builtins/accessors.cc index 25d37d73b4a646..ea6308622da13b 100644 --- a/deps/v8/src/builtins/accessors.cc +++ b/deps/v8/src/builtins/accessors.cc @@ -287,7 +287,8 @@ void Accessors::StringLengthGetter( if (!value.IsString()) { // Not a string value. That means that we either got a String wrapper or // a Value with a String wrapper in its prototype chain. - value = JSValue::cast(*Utils::OpenHandle(*info.Holder())).value(); + value = + JSPrimitiveWrapper::cast(*Utils::OpenHandle(*info.Holder())).value(); } Object result = Smi::FromInt(String::cast(value).length()); info.GetReturnValue().Set(Utils::ToLocal(Handle(result, isolate))); @@ -305,7 +306,7 @@ Handle Accessors::MakeStringLengthInfo(Isolate* isolate) { static Handle GetFunctionPrototype(Isolate* isolate, Handle function) { if (!function->has_prototype()) { - Handle proto = isolate->factory()->NewFunctionPrototype(function); + Handle proto = isolate->factory()->NewFunctionPrototype(function); JSFunction::SetPrototype(function, proto); } return Handle(function->prototype(), isolate); diff --git a/deps/v8/src/builtins/arguments.tq b/deps/v8/src/builtins/arguments.tq index add66917c03689..6df5f801a3945a 100644 --- a/deps/v8/src/builtins/arguments.tq +++ b/deps/v8/src/builtins/arguments.tq @@ -34,13 +34,13 @@ namespace arguments { @export macro GetArgumentsFrameAndCount(implicit context: Context)(f: JSFunction): ArgumentsInfo { - let frame: Frame = LoadParentFramePointer(); + const frame: Frame = LoadParentFramePointer(); assert(frame.function == f); const shared: SharedFunctionInfo = f.shared_function_info; const formalParameterCount: bint = Convert(Convert(shared.formal_parameter_count)); - let argumentCount: bint = formalParameterCount; + const argumentCount: bint = formalParameterCount; const adaptor: ArgumentsAdaptorFrame = Cast(frame.caller) diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc index 54c16932fa3a83..9b9956b0fbba0a 100644 --- a/deps/v8/src/builtins/arm/builtins-arm.cc +++ b/deps/v8/src/builtins/arm/builtins-arm.cc @@ -1093,11 +1093,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // 8-bit fields next to each other, so we could just optimize by writing a // 16-bit. These static asserts guard our assumption is valid. STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset == - BytecodeArray::kOSRNestingLevelOffset + kCharSize); + BytecodeArray::kOsrNestingLevelOffset + kCharSize); STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0); __ mov(r9, Operand(0)); __ strh(r9, FieldMemOperand(kInterpreterBytecodeArrayRegister, - BytecodeArray::kOSRNestingLevelOffset)); + BytecodeArray::kOsrNestingLevelOffset)); // Load the initial bytecode offset. __ mov(kInterpreterBytecodeOffsetRegister, @@ -1509,13 +1509,16 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, __ ldr(fp, MemOperand( sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp)); + // Load builtin index (stored as a Smi) and use it to get the builtin start + // address from the builtins table. UseScratchRegisterScope temps(masm); - Register scratch = temps.Acquire(); - __ Pop(scratch); + Register builtin = temps.Acquire(); + __ Pop(builtin); __ add(sp, sp, Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp)); __ Pop(lr); - __ add(pc, scratch, Operand(Code::kHeaderSize - kHeapObjectTag)); + __ LoadEntryFromBuiltinIndex(builtin); + __ bx(builtin); } } // namespace @@ -2577,7 +2580,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, __ tst(sp, Operand(frame_alignment_mask)); __ b(eq, &alignment_as_expected); // Don't use Check here, as it will call Runtime_Abort re-entering here. - __ stop("Unexpected alignment"); + __ stop(); __ bind(&alignment_as_expected); } } @@ -2606,7 +2609,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, __ CompareRoot(r3, RootIndex::kTheHoleValue); // Cannot use check here as it attempts to generate call into runtime. __ b(eq, &okay); - __ stop("Unexpected pending exception"); + __ stop(); __ bind(&okay); } @@ -2835,19 +2838,25 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address, DCHECK(function_address == r1 || function_address == r2); - Label profiler_disabled; - Label end_profiler_check; + Label profiler_enabled, end_profiler_check; __ Move(r9, ExternalReference::is_profiling_address(isolate)); __ ldrb(r9, MemOperand(r9, 0)); __ cmp(r9, Operand(0)); - __ b(eq, &profiler_disabled); - - // Additional parameter is the address of the actual callback. - __ Move(r3, thunk_ref); - __ jmp(&end_profiler_check); - - __ bind(&profiler_disabled); - __ Move(r3, function_address); + __ b(ne, &profiler_enabled); + __ Move(r9, ExternalReference::address_of_runtime_stats_flag()); + __ ldr(r9, MemOperand(r9, 0)); + __ cmp(r9, Operand(0)); + __ b(ne, &profiler_enabled); + { + // Call the api function directly. + __ Move(r3, function_address); + __ b(&end_profiler_check); + } + __ bind(&profiler_enabled); + { + // Additional parameter is the address of the actual callback. + __ Move(r3, thunk_ref); + } __ bind(&end_profiler_check); // Allocate HandleScope in callee-save registers. diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc index f81a1955eeb474..bcee8f0b5dcbbe 100644 --- a/deps/v8/src/builtins/arm64/builtins-arm64.cc +++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc @@ -1201,10 +1201,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // 8-bit fields next to each other, so we could just optimize by writing a // 16-bit. These static asserts guard our assumption is valid. STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset == - BytecodeArray::kOSRNestingLevelOffset + kCharSize); + BytecodeArray::kOsrNestingLevelOffset + kCharSize); STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0); __ Strh(wzr, FieldMemOperand(kInterpreterBytecodeArrayRegister, - BytecodeArray::kOSRNestingLevelOffset)); + BytecodeArray::kOsrNestingLevelOffset)); // Load the initial bytecode offset. __ Mov(kInterpreterBytecodeOffsetRegister, @@ -1683,18 +1683,20 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, if (java_script_builtin) __ SmiUntag(kJavaScriptCallArgCountRegister); - // Load builtin object. + // Load builtin index (stored as a Smi) and use it to get the builtin start + // address from the builtins table. UseScratchRegisterScope temps(masm); Register builtin = temps.AcquireX(); - __ Ldr(builtin, - MemOperand(fp, BuiltinContinuationFrameConstants::kBuiltinOffset)); + __ Ldr( + builtin, + MemOperand(fp, BuiltinContinuationFrameConstants::kBuiltinIndexOffset)); // Restore fp, lr. __ Mov(sp, fp); __ Pop(fp, lr); - // Call builtin. - __ JumpCodeObject(builtin); + __ LoadEntryFromBuiltinIndex(builtin); + __ Jump(builtin); } } // namespace @@ -3400,16 +3402,23 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address, DCHECK(function_address.is(x1) || function_address.is(x2)); - Label profiler_disabled; - Label end_profiler_check; + Label profiler_enabled, end_profiler_check; __ Mov(x10, ExternalReference::is_profiling_address(isolate)); __ Ldrb(w10, MemOperand(x10)); - __ Cbz(w10, &profiler_disabled); - __ Mov(x3, thunk_ref); - __ B(&end_profiler_check); - - __ Bind(&profiler_disabled); - __ Mov(x3, function_address); + __ Cbnz(w10, &profiler_enabled); + __ Mov(x10, ExternalReference::address_of_runtime_stats_flag()); + __ Ldrsw(w10, MemOperand(x10)); + __ Cbnz(w10, &profiler_enabled); + { + // Call the api function directly. + __ Mov(x3, function_address); + __ B(&end_profiler_check); + } + __ Bind(&profiler_enabled); + { + // Additional parameter is the address of the actual callback. + __ Mov(x3, thunk_ref); + } __ Bind(&end_profiler_check); // Save the callee-save registers we are going to use. diff --git a/deps/v8/src/builtins/array-copywithin.tq b/deps/v8/src/builtins/array-copywithin.tq index bfc95a28bf46df..94d871e8f74c13 100644 --- a/deps/v8/src/builtins/array-copywithin.tq +++ b/deps/v8/src/builtins/array-copywithin.tq @@ -9,7 +9,7 @@ namespace array_copywithin { // https://tc39.github.io/ecma262/#sec-array.prototype.copyWithin transitioning javascript builtin ArrayPrototypeCopyWithin( - context: Context, receiver: Object, ...arguments): Object { + js-implicit context: Context, receiver: Object)(...arguments): Object { // 1. Let O be ? ToObject(this value). const object: JSReceiver = ToObject_Inline(context, receiver); diff --git a/deps/v8/src/builtins/array-every.tq b/deps/v8/src/builtins/array-every.tq index 245b07556cba3a..3451cd769b92e7 100644 --- a/deps/v8/src/builtins/array-every.tq +++ b/deps/v8/src/builtins/array-every.tq @@ -4,8 +4,9 @@ namespace array { transitioning javascript builtin - ArrayEveryLoopEagerDeoptContinuation(implicit context: Context)( - receiver: Object, callback: Object, thisArg: Object, initialK: Object, + ArrayEveryLoopEagerDeoptContinuation( + js-implicit context: Context, receiver: Object)( + callback: Object, thisArg: Object, initialK: Object, length: Object): Object { // All continuation points in the optimized every implementation are // after the ToObject(O) call that ensures we are dealing with a @@ -25,9 +26,10 @@ namespace array { } transitioning javascript builtin - ArrayEveryLoopLazyDeoptContinuation(implicit context: Context)( - receiver: Object, callback: Object, thisArg: Object, initialK: Object, - length: Object, result: Object): Object { + ArrayEveryLoopLazyDeoptContinuation( + js-implicit context: Context, receiver: Object)( + callback: Object, thisArg: Object, initialK: Object, length: Object, + result: Object): Object { // All continuation points in the optimized every implementation are // after the ToObject(O) call that ensures we are dealing with a // JSReceiver. @@ -51,9 +53,9 @@ namespace array { } transitioning builtin ArrayEveryLoopContinuation(implicit context: Context)( - receiver: JSReceiver, callbackfn: Callable, thisArg: Object, - array: Object, o: JSReceiver, initialK: Number, length: Number, - initialTo: Object): Object { + _receiver: JSReceiver, callbackfn: Callable, thisArg: Object, + _array: Object, o: JSReceiver, initialK: Number, length: Number, + _initialTo: Object): Object { // 5. Let k be 0. // 6. Repeat, while k < len for (let k: Number = initialK; k < length; k++) { @@ -88,7 +90,7 @@ namespace array { labels Bailout(Smi) { let k: Smi = 0; const smiLen = Cast(len) otherwise goto Bailout(k); - let fastO: FastJSArray = Cast(o) otherwise goto Bailout(k); + const fastO: FastJSArray = Cast(o) otherwise goto Bailout(k); let fastOW = NewFastJSArrayWitness(fastO); // Build a fast loop over the smi array. @@ -109,12 +111,10 @@ namespace array { // https://tc39.github.io/ecma262/#sec-array.prototype.every transitioning javascript builtin - ArrayEvery(implicit context: Context)(receiver: Object, ...arguments): + ArrayEvery(js-implicit context: Context, receiver: Object)(...arguments): Object { try { - if (IsNullOrUndefined(receiver)) { - goto NullOrUndefinedError; - } + RequireObjectCoercible(receiver, 'Array.prototype.every'); // 1. Let O be ? ToObject(this value). const o: JSReceiver = ToObject_Inline(context, receiver); @@ -144,8 +144,5 @@ namespace array { label TypeError deferred { ThrowTypeError(kCalledNonCallable, arguments[0]); } - label NullOrUndefinedError deferred { - ThrowTypeError(kCalledOnNullOrUndefined, 'Array.prototype.every'); - } } } diff --git a/deps/v8/src/builtins/array-filter.tq b/deps/v8/src/builtins/array-filter.tq index 4bf175a787aefe..9acd0d04ee3cd7 100644 --- a/deps/v8/src/builtins/array-filter.tq +++ b/deps/v8/src/builtins/array-filter.tq @@ -4,9 +4,10 @@ namespace array_filter { transitioning javascript builtin - ArrayFilterLoopEagerDeoptContinuation(implicit context: Context)( - receiver: Object, callback: Object, thisArg: Object, array: Object, - initialK: Object, length: Object, initialTo: Object): Object { + ArrayFilterLoopEagerDeoptContinuation( + js-implicit context: Context, receiver: Object)( + callback: Object, thisArg: Object, array: Object, initialK: Object, + length: Object, initialTo: Object): Object { // All continuation points in the optimized filter implementation are // after the ToObject(O) call that ensures we are dealing with a // JSReceiver. @@ -27,9 +28,10 @@ namespace array_filter { } transitioning javascript builtin - ArrayFilterLoopLazyDeoptContinuation(implicit context: Context)( - receiver: Object, callback: Object, thisArg: Object, array: Object, - initialK: Object, length: Object, valueK: Object, initialTo: Object, + ArrayFilterLoopLazyDeoptContinuation( + js-implicit context: Context, receiver: Object)( + callback: Object, thisArg: Object, array: Object, initialK: Object, + length: Object, valueK: Object, initialTo: Object, result: Object): Object { // All continuation points in the optimized filter implementation are // after the ToObject(O) call that ensures we are dealing with a @@ -42,9 +44,9 @@ namespace array_filter { const numberLength = Cast(length) otherwise unreachable; // This custom lazy deopt point is right after the callback. filter() needs - // to pick up at the next step, which is setting the callback result in - // the output array. After incrementing k and to, we can glide into the loop - // continuation builtin. + // to pick up at the next step, which is setting the callback + // result in the output array. After incrementing k and to, we can glide + // into the loop continuation builtin. if (ToBoolean(result)) { FastCreateDataProperty(outputArray, numberTo, valueK); numberTo = numberTo + 1; @@ -58,7 +60,7 @@ namespace array_filter { } transitioning builtin ArrayFilterLoopContinuation(implicit context: Context)( - receiver: JSReceiver, callbackfn: Callable, thisArg: Object, + _receiver: JSReceiver, callbackfn: Callable, thisArg: Object, array: JSReceiver, o: JSReceiver, initialK: Number, length: Number, initialTo: Number): Object { let to: Number = initialTo; @@ -145,12 +147,10 @@ namespace array_filter { // https://tc39.github.io/ecma262/#sec-array.prototype.filter transitioning javascript builtin - ArrayFilter(implicit context: Context)(receiver: Object, ...arguments): + ArrayFilter(js-implicit context: Context, receiver: Object)(...arguments): Object { try { - if (IsNullOrUndefined(receiver)) { - goto NullOrUndefinedError; - } + RequireObjectCoercible(receiver, 'Array.prototype.filter'); // 1. Let O be ? ToObject(this value). const o: JSReceiver = ToObject_Inline(context, receiver); @@ -199,8 +199,5 @@ namespace array_filter { label TypeError deferred { ThrowTypeError(kCalledNonCallable, arguments[0]); } - label NullOrUndefinedError deferred { - ThrowTypeError(kCalledOnNullOrUndefined, 'Array.prototype.filter'); - } } } diff --git a/deps/v8/src/builtins/array-find.tq b/deps/v8/src/builtins/array-find.tq index 28223e4c492bdc..ef54dd4666ef72 100644 --- a/deps/v8/src/builtins/array-find.tq +++ b/deps/v8/src/builtins/array-find.tq @@ -4,8 +4,9 @@ namespace array_find { transitioning javascript builtin - ArrayFindLoopEagerDeoptContinuation(implicit context: Context)( - receiver: Object, callback: Object, thisArg: Object, initialK: Object, + ArrayFindLoopEagerDeoptContinuation( + js-implicit context: Context, receiver: Object)( + callback: Object, thisArg: Object, initialK: Object, length: Object): Object { // All continuation points in the optimized find implementation are // after the ToObject(O) call that ensures we are dealing with a @@ -24,9 +25,10 @@ namespace array_find { } transitioning javascript builtin - ArrayFindLoopLazyDeoptContinuation(implicit context: Context)( - receiver: Object, callback: Object, thisArg: Object, initialK: Object, - length: Object, result: Object): Object { + ArrayFindLoopLazyDeoptContinuation( + js-implicit context: Context, receiver: Object)( + _callback: Object, _thisArg: Object, _initialK: Object, _length: Object, + _result: Object): Object { // This deopt continuation point is never actually called, it just // exists to make stack traces correct from a ThrowTypeError if the // callback was found to be non-callable. @@ -37,15 +39,16 @@ namespace array_find { // happens right after the callback and it's returned value must be handled // before iteration continues. transitioning javascript builtin - ArrayFindLoopAfterCallbackLazyDeoptContinuation(implicit context: Context)( - receiver: Object, callback: Object, thisArg: Object, initialK: Object, - length: Object, foundValue: Object, isFound: Object): Object { + ArrayFindLoopAfterCallbackLazyDeoptContinuation( + js-implicit context: Context, receiver: Object)( + callback: Object, thisArg: Object, initialK: Object, length: Object, + foundValue: Object, isFound: Object): Object { // All continuation points in the optimized find implementation are // after the ToObject(O) call that ensures we are dealing with a // JSReceiver. const jsreceiver = Cast(receiver) otherwise unreachable; const callbackfn = Cast(callback) otherwise unreachable; - let numberK = Cast(initialK) otherwise unreachable; + const numberK = Cast(initialK) otherwise unreachable; const numberLength = Cast(length) otherwise unreachable; // This custom lazy deopt point is right after the callback. find() needs @@ -62,7 +65,7 @@ namespace array_find { } transitioning builtin ArrayFindLoopContinuation(implicit context: Context)( - receiver: JSReceiver, callbackfn: Callable, thisArg: Object, + _receiver: JSReceiver, callbackfn: Callable, thisArg: Object, o: JSReceiver, initialK: Number, length: Number): Object { // 5. Let k be 0. // 6. Repeat, while k < len @@ -116,12 +119,10 @@ namespace array_find { // https://tc39.github.io/ecma262/#sec-array.prototype.find transitioning javascript builtin - ArrayPrototypeFind(implicit context: Context)(receiver: Object, ...arguments): - Object { + ArrayPrototypeFind(js-implicit context: Context, receiver: Object)( + ...arguments): Object { try { - if (IsNullOrUndefined(receiver)) { - goto NullOrUndefinedError; - } + RequireObjectCoercible(receiver, 'Array.prototype.find'); // 1. Let O be ? ToObject(this value). const o: JSReceiver = ToObject_Inline(context, receiver); @@ -151,8 +152,5 @@ namespace array_find { label NotCallableError deferred { ThrowTypeError(kCalledNonCallable, arguments[0]); } - label NullOrUndefinedError deferred { - ThrowTypeError(kCalledOnNullOrUndefined, 'Array.prototype.find'); - } } } diff --git a/deps/v8/src/builtins/array-findindex.tq b/deps/v8/src/builtins/array-findindex.tq index 00d8378dfa6979..5a8bb85fbadd4c 100644 --- a/deps/v8/src/builtins/array-findindex.tq +++ b/deps/v8/src/builtins/array-findindex.tq @@ -4,8 +4,9 @@ namespace array_findindex { transitioning javascript builtin - ArrayFindIndexLoopEagerDeoptContinuation(implicit context: Context)( - receiver: Object, callback: Object, thisArg: Object, initialK: Object, + ArrayFindIndexLoopEagerDeoptContinuation( + js-implicit context: Context, receiver: Object)( + callback: Object, thisArg: Object, initialK: Object, length: Object): Object { // All continuation points in the optimized findIndex implementation are // after the ToObject(O) call that ensures we are dealing with a @@ -24,9 +25,10 @@ namespace array_findindex { } transitioning javascript builtin - ArrayFindIndexLoopLazyDeoptContinuation(implicit context: Context)( - receiver: Object, callback: Object, thisArg: Object, initialK: Object, - length: Object, result: Object): Object { + ArrayFindIndexLoopLazyDeoptContinuation( + js-implicit context: Context, receiver: Object)( + _callback: Object, _thisArg: Object, _initialK: Object, _length: Object, + _result: Object): Object { // This deopt continuation point is never actually called, it just // exists to make stack traces correct from a ThrowTypeError if the // callback was found to be non-callable. @@ -37,16 +39,16 @@ namespace array_findindex { // happens right after the callback and it's returned value must be handled // before iteration continues. transitioning javascript builtin - ArrayFindIndexLoopAfterCallbackLazyDeoptContinuation(implicit context: - Context)( - receiver: Object, callback: Object, thisArg: Object, initialK: Object, - length: Object, foundValue: Object, isFound: Object): Object { + ArrayFindIndexLoopAfterCallbackLazyDeoptContinuation( + js-implicit context: Context, receiver: Object)( + callback: Object, thisArg: Object, initialK: Object, length: Object, + foundValue: Object, isFound: Object): Object { // All continuation points in the optimized findIndex implementation are // after the ToObject(O) call that ensures we are dealing with a // JSReceiver. const jsreceiver = Cast(receiver) otherwise unreachable; const callbackfn = Cast(callback) otherwise unreachable; - let numberK = Cast(initialK) otherwise unreachable; + const numberK = Cast(initialK) otherwise unreachable; const numberLength = Cast(length) otherwise unreachable; // This custom lazy deopt point is right after the callback. find() needs @@ -64,7 +66,7 @@ namespace array_findindex { transitioning builtin ArrayFindIndexLoopContinuation(implicit context: Context)( - receiver: JSReceiver, callbackfn: Callable, thisArg: Object, + _receiver: JSReceiver, callbackfn: Callable, thisArg: Object, o: JSReceiver, initialK: Number, length: Number): Number { // 5. Let k be 0. // 6. Repeat, while k < len @@ -118,12 +120,10 @@ namespace array_findindex { // https://tc39.github.io/ecma262/#sec-array.prototype.findIndex transitioning javascript builtin - ArrayPrototypeFindIndex(implicit context: - Context)(receiver: Object, ...arguments): Object { + ArrayPrototypeFindIndex(js-implicit context: Context, receiver: Object)( + ...arguments): Object { try { - if (IsNullOrUndefined(receiver)) { - goto NullOrUndefinedError; - } + RequireObjectCoercible(receiver, 'Array.prototype.findIndex'); // 1. Let O be ? ToObject(this value). const o: JSReceiver = ToObject_Inline(context, receiver); @@ -154,8 +154,5 @@ namespace array_findindex { label NotCallableError deferred { ThrowTypeError(kCalledNonCallable, arguments[0]); } - label NullOrUndefinedError deferred { - ThrowTypeError(kCalledOnNullOrUndefined, 'Array.prototype.findIndex'); - } } } diff --git a/deps/v8/src/builtins/array-foreach.tq b/deps/v8/src/builtins/array-foreach.tq index d362e95950dc62..f52d944291ec7f 100644 --- a/deps/v8/src/builtins/array-foreach.tq +++ b/deps/v8/src/builtins/array-foreach.tq @@ -4,8 +4,9 @@ namespace array_foreach { transitioning javascript builtin - ArrayForEachLoopEagerDeoptContinuation(implicit context: Context)( - receiver: Object, callback: Object, thisArg: Object, initialK: Object, + ArrayForEachLoopEagerDeoptContinuation( + js-implicit context: Context, receiver: Object)( + callback: Object, thisArg: Object, initialK: Object, length: Object): Object { // All continuation points in the optimized forEach implemntation are // after the ToObject(O) call that ensures we are dealing with a @@ -21,9 +22,10 @@ namespace array_foreach { } transitioning javascript builtin - ArrayForEachLoopLazyDeoptContinuation(implicit context: Context)( - receiver: Object, callback: Object, thisArg: Object, initialK: Object, - length: Object, result: Object): Object { + ArrayForEachLoopLazyDeoptContinuation( + js-implicit context: Context, receiver: Object)( + callback: Object, thisArg: Object, initialK: Object, length: Object, + _result: Object): Object { // All continuation points in the optimized forEach implemntation are // after the ToObject(O) call that ensures we are dealing with a // JSReceiver. @@ -38,9 +40,9 @@ namespace array_foreach { } transitioning builtin ArrayForEachLoopContinuation(implicit context: Context)( - receiver: JSReceiver, callbackfn: Callable, thisArg: Object, - array: Object, o: JSReceiver, initialK: Number, len: Number, - to: Object): Object { + _receiver: JSReceiver, callbackfn: Callable, thisArg: Object, + _array: Object, o: JSReceiver, initialK: Number, len: Number, + _to: Object): Object { // variables {array} and {to} are ignored. // 5. Let k be 0. @@ -72,7 +74,7 @@ namespace array_foreach { labels Bailout(Smi) { let k: Smi = 0; const smiLen = Cast(len) otherwise goto Bailout(k); - let fastO = Cast(o) otherwise goto Bailout(k); + const fastO = Cast(o) otherwise goto Bailout(k); let fastOW = NewFastJSArrayWitness(fastO); // Build a fast loop over the smi array. @@ -90,11 +92,10 @@ namespace array_foreach { // https://tc39.github.io/ecma262/#sec-array.prototype.foreach transitioning javascript builtin - ArrayForEach(context: Context, receiver: Object, ...arguments): Object { + ArrayForEach(js-implicit context: Context, receiver: Object)(...arguments): + Object { try { - if (IsNullOrUndefined(receiver)) { - goto NullOrUndefinedError; - } + RequireObjectCoercible(receiver, 'Array.prototype.forEach'); // 1. Let O be ? ToObject(this value). const o: JSReceiver = ToObject_Inline(context, receiver); @@ -127,8 +128,5 @@ namespace array_foreach { label TypeError deferred { ThrowTypeError(kCalledNonCallable, arguments[0]); } - label NullOrUndefinedError deferred { - ThrowTypeError(kCalledOnNullOrUndefined, 'Array.prototype.forEach'); - } } } diff --git a/deps/v8/src/builtins/array-join.tq b/deps/v8/src/builtins/array-join.tq index 72e1a3661ecb33..c04233b22244ec 100644 --- a/deps/v8/src/builtins/array-join.tq +++ b/deps/v8/src/builtins/array-join.tq @@ -37,7 +37,7 @@ namespace array_join { const array: JSArray = UnsafeCast(receiver); const fixedArray: FixedArray = UnsafeCast(array.elements); const element: Object = fixedArray.objects[UnsafeCast(k)]; - return element == Hole ? kEmptyString : element; + return element == TheHole ? kEmptyString : element; } LoadJoinElement( @@ -56,7 +56,7 @@ namespace array_join { assert(!IsDetachedBuffer(typedArray.buffer)); return typed_array::LoadFixedTypedArrayElementAsTagged( typedArray.data_ptr, UnsafeCast(k), - typed_array::KindForArrayType(), SMI_PARAMETERS); + typed_array::KindForArrayType()); } transitioning builtin ConvertToLocaleString( @@ -103,8 +103,8 @@ namespace array_join { } CannotUseSameArrayAccessor(implicit context: Context)( - loadFn: LoadJoinElementFn, receiver: JSReceiver, initialMap: Map, - initialLen: Number): never + _loadFn: LoadJoinElementFn, receiver: JSReceiver, _initialMap: Map, + _initialLen: Number): never labels Cannot, Can { const typedArray: JSTypedArray = UnsafeCast(receiver); if (IsDetachedBuffer(typedArray.buffer)) goto Cannot; @@ -246,7 +246,7 @@ namespace array_join { case (nofSeparators: Number): { return StringRepeat(context, sep, nofSeparators); } - case (obj: Object): { + case (Object): { unreachable; } } @@ -448,7 +448,7 @@ namespace array_join { const previouslyVisited: Object = stack.objects[i]; // Add `receiver` to the first open slot - if (previouslyVisited == Hole) { + if (previouslyVisited == TheHole) { stack.objects[i] = receiver; return True; } @@ -473,7 +473,7 @@ namespace array_join { try { const stack: FixedArray = LoadJoinStack() otherwise IfUninitialized; - if (stack.objects[0] == Hole) { + if (stack.objects[0] == TheHole) { stack.objects[0] = receiver; } else if (JoinStackPush(stack, receiver) == False) deferred { @@ -504,7 +504,7 @@ namespace array_join { SetJoinStack(newStack); } else { - stack.objects[i] = Hole; + stack.objects[i] = TheHole; } return Undefined; } @@ -521,7 +521,7 @@ namespace array_join { // Builtin call was not nested (receiver is the first entry) and // did not contain other nested arrays that expanded the stack. if (stack.objects[0] == receiver && len == kMinJoinStackSize) { - StoreFixedArrayElement(stack, 0, Hole, SKIP_WRITE_BARRIER); + StoreFixedArrayElement(stack, 0, TheHole, SKIP_WRITE_BARRIER); } else deferred { JoinStackPop(stack, receiver); @@ -535,7 +535,7 @@ namespace array_join { sepObj: Object, locales: Object, options: Object): Object { // 3. If separator is undefined, let sep be the single-element String ",". // 4. Else, let sep be ? ToString(separator). - let sep: String = + const sep: String = sepObj == Undefined ? ',' : ToString_Inline(context, sepObj); // If the receiver is not empty and not already being joined, continue with @@ -557,7 +557,8 @@ namespace array_join { // https://tc39.github.io/ecma262/#sec-array.prototype.join transitioning javascript builtin - ArrayPrototypeJoin(context: Context, receiver: Object, ...arguments): Object { + ArrayPrototypeJoin(js-implicit context: Context, receiver: Object)( + ...arguments): Object { const separator: Object = arguments[0]; // 1. Let O be ? ToObject(this value). @@ -566,8 +567,8 @@ namespace array_join { // 2. Let len be ? ToLength(? Get(O, "length")). const len: Number = GetLengthProperty(o); - // Only handle valid array lengths. Although the spec allows larger values, - // this matches historical V8 behavior. + // Only handle valid array lengths. Although the spec allows larger + // values, this matches historical V8 behavior. if (len > kMaxArrayIndex + 1) ThrowTypeError(kInvalidArrayLength); return CycleProtectedArrayJoin( @@ -576,7 +577,7 @@ namespace array_join { // https://tc39.github.io/ecma262/#sec-array.prototype.tolocalestring transitioning javascript builtin ArrayPrototypeToLocaleString( - context: Context, receiver: Object, ...arguments): Object { + js-implicit context: Context, receiver: Object)(...arguments): Object { const locales: Object = arguments[0]; const options: Object = arguments[1]; @@ -586,8 +587,8 @@ namespace array_join { // 2. Let len be ? ToLength(? Get(O, "length")). const len: Number = GetLengthProperty(o); - // Only handle valid array lengths. Although the spec allows larger values, - // this matches historical V8 behavior. + // Only handle valid array lengths. Although the spec allows larger + // values, this matches historical V8 behavior. if (len > kMaxArrayIndex + 1) ThrowTypeError(kInvalidArrayLength); return CycleProtectedArrayJoin( @@ -596,7 +597,7 @@ namespace array_join { // https://tc39.github.io/ecma262/#sec-array.prototype.tostring transitioning javascript builtin ArrayPrototypeToString( - context: Context, receiver: Object, ...arguments): Object { + js-implicit context: Context, receiver: Object)(...arguments): Object { // 1. Let array be ? ToObject(this value). const array: JSReceiver = ToObject_Inline(context, receiver); @@ -617,7 +618,7 @@ namespace array_join { // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.join transitioning javascript builtin TypedArrayPrototypeJoin( - context: Context, receiver: Object, ...arguments): Object { + js-implicit context: Context, receiver: Object)(...arguments): Object { const separator: Object = arguments[0]; // Spec: ValidateTypedArray is applied to the this value prior to evaluating @@ -632,7 +633,7 @@ namespace array_join { // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.tolocalestring transitioning javascript builtin TypedArrayPrototypeToLocaleString( - context: Context, receiver: Object, ...arguments): Object { + js-implicit context: Context, receiver: Object)(...arguments): Object { const locales: Object = arguments[0]; const options: Object = arguments[1]; diff --git a/deps/v8/src/builtins/array-lastindexof.tq b/deps/v8/src/builtins/array-lastindexof.tq index d6213157dc1eff..5ebc451e435117 100644 --- a/deps/v8/src/builtins/array-lastindexof.tq +++ b/deps/v8/src/builtins/array-lastindexof.tq @@ -12,7 +12,7 @@ namespace array_lastindexof { labels IfHole { const elements: FixedArray = UnsafeCast(elements); const element: Object = elements.objects[index]; - if (element == Hole) goto IfHole; + if (element == TheHole) goto IfHole; return element; } @@ -131,7 +131,7 @@ namespace array_lastindexof { // https://tc39.github.io/ecma262/#sec-array.prototype.lastIndexOf transitioning javascript builtin ArrayPrototypeLastIndexOf( - context: Context, receiver: Object, ...arguments): Object { + js-implicit context: Context, receiver: Object)(...arguments): Object { // 1. Let O be ? ToObject(this value). const object: JSReceiver = ToObject_Inline(context, receiver); diff --git a/deps/v8/src/builtins/array-map.tq b/deps/v8/src/builtins/array-map.tq index 7546f1cd00542d..dda569c68236b3 100644 --- a/deps/v8/src/builtins/array-map.tq +++ b/deps/v8/src/builtins/array-map.tq @@ -4,9 +4,10 @@ namespace array_map { transitioning javascript builtin - ArrayMapLoopEagerDeoptContinuation(implicit context: Context)( - receiver: Object, callback: Object, thisArg: Object, array: Object, - initialK: Object, length: Object): Object { + ArrayMapLoopEagerDeoptContinuation( + js-implicit context: Context, receiver: Object)( + callback: Object, thisArg: Object, array: Object, initialK: Object, + length: Object): Object { // All continuation points in the optimized filter implementation are // after the ToObject(O) call that ensures we are dealing with a // JSReceiver. @@ -26,9 +27,10 @@ namespace array_map { } transitioning javascript builtin - ArrayMapLoopLazyDeoptContinuation(implicit context: Context)( - receiver: Object, callback: Object, thisArg: Object, array: Object, - initialK: Object, length: Object, result: Object): Object { + ArrayMapLoopLazyDeoptContinuation( + js-implicit context: Context, receiver: Object)( + callback: Object, thisArg: Object, array: Object, initialK: Object, + length: Object, result: Object): Object { // All continuation points in the optimized filter implementation are // after the ToObject(O) call that ensures we are dealing with a // JSReceiver. @@ -55,7 +57,7 @@ namespace array_map { } transitioning builtin ArrayMapLoopContinuation(implicit context: Context)( - receiver: JSReceiver, callbackfn: Callable, thisArg: Object, + _receiver: JSReceiver, callbackfn: Callable, thisArg: Object, array: JSReceiver, o: JSReceiver, initialK: Number, length: Number): Object { // 6. Let k be 0. @@ -94,7 +96,7 @@ namespace array_map { } CreateJSArray(implicit context: Context)(validLength: Smi): JSArray { - let length: Smi = this.fixedArray.length; + const length: Smi = this.fixedArray.length; assert(validLength <= length); let kind: ElementsKind = PACKED_SMI_ELEMENTS; if (!this.onlySmis) { @@ -114,7 +116,7 @@ namespace array_map { kind = FastHoleyElementsKind(kind); } - let map: Map = LoadJSArrayElementsMap(kind, LoadNativeContext(context)); + const map: Map = LoadJSArrayElementsMap(kind, LoadNativeContext(context)); let a: JSArray; if (IsDoubleElementsKind(kind)) { @@ -130,7 +132,7 @@ namespace array_map { elements.floats[i] = Convert(n); } case (h: HeapObject): { - assert(h == Hole); + assert(h == TheHole); } } } @@ -182,11 +184,11 @@ namespace array_map { } transitioning macro FastArrayMap(implicit context: Context)( - fastO: FastJSArray, len: Smi, callbackfn: Callable, + fastO: FastJSArrayForRead, len: Smi, callbackfn: Callable, thisArg: Object): JSArray labels Bailout(JSArray, Smi) { let k: Smi = 0; - let fastOW = NewFastJSArrayWitness(fastO); + let fastOW = NewFastJSArrayForReadWitness(fastO); let vector = NewVector(len); // Build a fast loop over the smi array. @@ -220,24 +222,12 @@ namespace array_map { return vector.CreateJSArray(len); } - // Bails out if the slow path needs to be taken. - // It's useful to structure it this way, because the consequences of - // using the slow path on species creation are interesting to the caller. - macro FastMapSpeciesCreate(implicit context: Context)( - receiver: JSReceiver, length: Number): JSArray labels Bailout { - if (IsArraySpeciesProtectorCellInvalid()) goto Bailout; - const o = Cast(receiver) otherwise Bailout; - const smiLength = Cast(length) otherwise Bailout; - const newMap: Map = - LoadJSArrayElementsMap(PACKED_SMI_ELEMENTS, LoadNativeContext(context)); - return AllocateJSArray(PACKED_SMI_ELEMENTS, newMap, smiLength, smiLength); - } - // https://tc39.github.io/ecma262/#sec-array.prototype.map transitioning javascript builtin - ArrayMap(implicit context: Context)(receiver: Object, ...arguments): Object { + ArrayMap(js-implicit context: Context, receiver: Object)(...arguments): + Object { try { - if (IsNullOrUndefined(receiver)) goto NullOrUndefinedError; + RequireObjectCoercible(receiver, 'Array.prototype.map'); // 1. Let O be ? ToObject(this value). const o: JSReceiver = ToObject_Inline(context, receiver); @@ -258,7 +248,7 @@ namespace array_map { try { // 5. Let A be ? ArraySpeciesCreate(O, len). if (IsArraySpeciesProtectorCellInvalid()) goto SlowSpeciesCreate; - const o: FastJSArray = Cast(receiver) + const o: FastJSArrayForRead = Cast(receiver) otherwise SlowSpeciesCreate; const smiLength: Smi = Cast(len) otherwise SlowSpeciesCreate; @@ -279,8 +269,5 @@ namespace array_map { label TypeError deferred { ThrowTypeError(kCalledNonCallable, arguments[0]); } - label NullOrUndefinedError deferred { - ThrowTypeError(kCalledOnNullOrUndefined, 'Array.prototype.map'); - } } } diff --git a/deps/v8/src/builtins/array-of.tq b/deps/v8/src/builtins/array-of.tq index 76123207fd34b0..72933186257231 100644 --- a/deps/v8/src/builtins/array-of.tq +++ b/deps/v8/src/builtins/array-of.tq @@ -5,7 +5,8 @@ namespace array_of { // https://tc39.github.io/ecma262/#sec-array.of transitioning javascript builtin - ArrayOf(implicit context: Context)(receiver: Object, ...arguments): Object { + ArrayOf(js-implicit context: Context, receiver: Object)(...arguments): + Object { // 1. Let len be the actual number of arguments passed to this function. const len: Smi = Convert(arguments.length); @@ -35,7 +36,7 @@ namespace array_of { // 7. Repeat, while k < len while (k < len) { // a. Let kValue be items[k]. - let kValue: Object = items[Convert(k)]; + const kValue: Object = items[Convert(k)]; // b. Let Pk be ! ToString(k). // c. Perform ? CreateDataPropertyOrThrow(A, Pk, kValue). diff --git a/deps/v8/src/builtins/array-reduce-right.tq b/deps/v8/src/builtins/array-reduce-right.tq index 33661c38d106c1..b1aa71b85b4623 100644 --- a/deps/v8/src/builtins/array-reduce-right.tq +++ b/deps/v8/src/builtins/array-reduce-right.tq @@ -4,8 +4,9 @@ namespace array { transitioning javascript builtin - ArrayReduceRightPreLoopEagerDeoptContinuation(implicit context: Context)( - receiver: Object, callback: Object, length: Object): Object { + ArrayReduceRightPreLoopEagerDeoptContinuation( + js-implicit context: Context, + receiver: Object)(callback: Object, length: Object): Object { // All continuation points in the optimized every implementation are // after the ToObject(O) call that ensures we are dealing with a // JSReceiver. @@ -21,12 +22,13 @@ namespace array { // the hole. The continuation stub will search for the initial non-hole // element, rightly throwing an exception if not found. return ArrayReduceRightLoopContinuation( - jsreceiver, callbackfn, Hole, jsreceiver, 0, numberLength); + jsreceiver, callbackfn, TheHole, jsreceiver, 0, numberLength); } transitioning javascript builtin - ArrayReduceRightLoopEagerDeoptContinuation(implicit context: Context)( - receiver: Object, callback: Object, initialK: Object, length: Object, + ArrayReduceRightLoopEagerDeoptContinuation( + js-implicit context: Context, receiver: Object)( + callback: Object, initialK: Object, length: Object, accumulator: Object): Object { // All continuation points in the optimized every implementation are // after the ToObject(O) call that ensures we are dealing with a @@ -45,27 +47,28 @@ namespace array { } transitioning javascript builtin - ArrayReduceRightLoopLazyDeoptContinuation(implicit context: Context)( - receiver: Object, callback: Object, initialK: Object, length: Object, + ArrayReduceRightLoopLazyDeoptContinuation( + js-implicit context: Context, receiver: Object)( + callback: Object, initialK: Object, length: Object, result: Object): Object { // All continuation points in the optimized every implementation are // after the ToObject(O) call that ensures we are dealing with a // JSReceiver. const jsreceiver = Cast(receiver) otherwise unreachable; const callbackfn = Cast(callback) otherwise unreachable; - let numberK = Cast(initialK) otherwise unreachable; + const numberK = Cast(initialK) otherwise unreachable; const numberLength = Cast(length) otherwise unreachable; // The accumulator is the result from the callback call which just occured. - let r = ArrayReduceRightLoopContinuation( + const r = ArrayReduceRightLoopContinuation( jsreceiver, callbackfn, result, jsreceiver, numberK, numberLength); return r; } transitioning builtin ArrayReduceRightLoopContinuation(implicit context: Context)( - receiver: JSReceiver, callbackfn: Callable, initialAccumulator: Object, - o: JSReceiver, initialK: Number, length: Number): Object { + _receiver: JSReceiver, callbackfn: Callable, initialAccumulator: Object, + o: JSReceiver, initialK: Number, _length: Number): Object { let accumulator = initialAccumulator; // 8b and 9. Repeat, while k >= 0 @@ -82,7 +85,7 @@ namespace array { // 8b iii and 9c i. Let kValue be ? Get(O, Pk). const value: Object = GetProperty(o, k); - if (accumulator == Hole) { + if (accumulator == TheHole) { // 8b iii 1. accumulator = value; } else { @@ -99,7 +102,7 @@ namespace array { // 8c. if kPresent is false, throw a TypeError exception. // If the accumulator is discovered with the sentinel hole value, // this means kPresent is false. - if (accumulator == Hole) { + if (accumulator == TheHole) { ThrowTypeError(kReduceNoInitial, 'Array.prototype.reduceRight'); } return accumulator; @@ -111,9 +114,9 @@ namespace array { labels Bailout(Number, Object) { let accumulator = initialAccumulator; const smiLen = Cast(len) otherwise goto Bailout(len - 1, accumulator); - let fastO = - Cast(o) otherwise goto Bailout(len - 1, accumulator); - let fastOW = NewFastJSArrayWitness(fastO); + const fastO = Cast(o) + otherwise goto Bailout(len - 1, accumulator); + let fastOW = NewFastJSArrayForReadWitness(fastO); // Build a fast loop over the array. for (let k: Smi = smiLen - 1; k >= 0; k--) { @@ -123,7 +126,7 @@ namespace array { if (k >= fastOW.Get().length) goto Bailout(k, accumulator); const value: Object = fastOW.LoadElementNoHole(k) otherwise continue; - if (accumulator == Hole) { + if (accumulator == TheHole) { accumulator = value; } else { accumulator = Call( @@ -131,7 +134,7 @@ namespace array { fastOW.Get()); } } - if (accumulator == Hole) { + if (accumulator == TheHole) { ThrowTypeError(kReduceNoInitial, 'Array.prototype.reduceRight'); } return accumulator; @@ -139,12 +142,10 @@ namespace array { // https://tc39.github.io/ecma262/#sec-array.prototype.reduceRight transitioning javascript builtin - ArrayReduceRight(implicit context: Context)(receiver: Object, ...arguments): - Object { + ArrayReduceRight(js-implicit context: Context, receiver: Object)( + ...arguments): Object { try { - if (IsNullOrUndefined(receiver)) { - goto NullOrUndefinedError; - } + RequireObjectCoercible(receiver, 'Array.prototype.reduceRight'); // 1. Let O be ? ToObject(this value). const o: JSReceiver = ToObject_Inline(context, receiver); @@ -162,7 +163,8 @@ namespace array { // exception. (This case is handled at the end of // ArrayReduceRightLoopContinuation). - const initialValue: Object = arguments.length > 1 ? arguments[1] : Hole; + const initialValue: Object = + arguments.length > 1 ? arguments[1] : TheHole; try { return FastArrayReduceRight(o, len, callbackfn, initialValue) @@ -176,8 +178,5 @@ namespace array { label NoCallableError deferred { ThrowTypeError(kCalledNonCallable, arguments[0]); } - label NullOrUndefinedError deferred { - ThrowTypeError(kCalledOnNullOrUndefined, 'Array.prototype.reduceRight'); - } } } diff --git a/deps/v8/src/builtins/array-reduce.tq b/deps/v8/src/builtins/array-reduce.tq index 67a112fd418878..a5f6feb9ccedf3 100644 --- a/deps/v8/src/builtins/array-reduce.tq +++ b/deps/v8/src/builtins/array-reduce.tq @@ -4,8 +4,9 @@ namespace array { transitioning javascript builtin - ArrayReducePreLoopEagerDeoptContinuation(implicit context: Context)( - receiver: Object, callback: Object, length: Object): Object { + ArrayReducePreLoopEagerDeoptContinuation( + js-implicit context: Context, + receiver: Object)(callback: Object, length: Object): Object { // All continuation points in the optimized every implementation are // after the ToObject(O) call that ensures we are dealing with a // JSReceiver. @@ -21,12 +22,13 @@ namespace array { // the hole. The continuation stub will search for the initial non-hole // element, rightly throwing an exception if not found. return ArrayReduceLoopContinuation( - jsreceiver, callbackfn, Hole, jsreceiver, 0, numberLength); + jsreceiver, callbackfn, TheHole, jsreceiver, 0, numberLength); } transitioning javascript builtin - ArrayReduceLoopEagerDeoptContinuation(implicit context: Context)( - receiver: Object, callback: Object, initialK: Object, length: Object, + ArrayReduceLoopEagerDeoptContinuation( + js-implicit context: Context, receiver: Object)( + callback: Object, initialK: Object, length: Object, accumulator: Object): Object { // All continuation points in the optimized every implementation are // after the ToObject(O) call that ensures we are dealing with a @@ -45,25 +47,26 @@ namespace array { } transitioning javascript builtin - ArrayReduceLoopLazyDeoptContinuation(implicit context: Context)( - receiver: Object, callback: Object, initialK: Object, length: Object, + ArrayReduceLoopLazyDeoptContinuation( + js-implicit context: Context, receiver: Object)( + callback: Object, initialK: Object, length: Object, result: Object): Object { // All continuation points in the optimized every implementation are // after the ToObject(O) call that ensures we are dealing with a // JSReceiver. const jsreceiver = Cast(receiver) otherwise unreachable; const callbackfn = Cast(callback) otherwise unreachable; - let numberK = Cast(initialK) otherwise unreachable; + const numberK = Cast(initialK) otherwise unreachable; const numberLength = Cast(length) otherwise unreachable; // The accumulator is the result from the callback call which just occured. - let r = ArrayReduceLoopContinuation( + const r = ArrayReduceLoopContinuation( jsreceiver, callbackfn, result, jsreceiver, numberK, numberLength); return r; } transitioning builtin ArrayReduceLoopContinuation(implicit context: Context)( - receiver: JSReceiver, callbackfn: Callable, initialAccumulator: Object, + _receiver: JSReceiver, callbackfn: Callable, initialAccumulator: Object, o: JSReceiver, initialK: Number, length: Number): Object { let accumulator = initialAccumulator; @@ -81,7 +84,7 @@ namespace array { // 6c. i. Let kValue be ? Get(O, Pk). const value: Object = GetProperty(o, k); - if (accumulator == Hole) { + if (accumulator == TheHole) { // 8b. accumulator = value; } else { @@ -98,7 +101,7 @@ namespace array { // 8c. if kPresent is false, throw a TypeError exception. // If the accumulator is discovered with the sentinel hole value, // this means kPresent is false. - if (accumulator == Hole) { + if (accumulator == TheHole) { ThrowTypeError(kReduceNoInitial, 'Array.prototype.reduce'); } return accumulator; @@ -110,9 +113,10 @@ namespace array { labels Bailout(Number, Object) { const k = 0; let accumulator = initialAccumulator; - const smiLen = Cast(len) otherwise goto Bailout(k, accumulator); - let fastO = Cast(o) otherwise goto Bailout(k, accumulator); - let fastOW = NewFastJSArrayWitness(fastO); + Cast(len) otherwise goto Bailout(k, accumulator); + const fastO = + Cast(o) otherwise goto Bailout(k, accumulator); + let fastOW = NewFastJSArrayForReadWitness(fastO); // Build a fast loop over the array. for (let k: Smi = 0; k < len; k++) { @@ -122,7 +126,7 @@ namespace array { if (k >= fastOW.Get().length) goto Bailout(k, accumulator); const value: Object = fastOW.LoadElementNoHole(k) otherwise continue; - if (accumulator == Hole) { + if (accumulator == TheHole) { accumulator = value; } else { accumulator = Call( @@ -130,7 +134,7 @@ namespace array { fastOW.Get()); } } - if (accumulator == Hole) { + if (accumulator == TheHole) { ThrowTypeError(kReduceNoInitial, 'Array.prototype.reduce'); } return accumulator; @@ -138,12 +142,10 @@ namespace array { // https://tc39.github.io/ecma262/#sec-array.prototype.reduce transitioning javascript builtin - ArrayReduce(implicit context: Context)(receiver: Object, ...arguments): + ArrayReduce(js-implicit context: Context, receiver: Object)(...arguments): Object { try { - if (IsNullOrUndefined(receiver)) { - goto NullOrUndefinedError; - } + RequireObjectCoercible(receiver, 'Array.prototype.reduce'); // 1. Let O be ? ToObject(this value). const o: JSReceiver = ToObject_Inline(context, receiver); @@ -161,7 +163,8 @@ namespace array { // exception. (This case is handled at the end of // ArrayReduceLoopContinuation). - const initialValue: Object = arguments.length > 1 ? arguments[1] : Hole; + const initialValue: Object = + arguments.length > 1 ? arguments[1] : TheHole; try { return FastArrayReduce(o, len, callbackfn, initialValue) @@ -175,8 +178,5 @@ namespace array { label NoCallableError deferred { ThrowTypeError(kCalledNonCallable, arguments[0]); } - label NullOrUndefinedError deferred { - ThrowTypeError(kCalledOnNullOrUndefined, 'Array.prototype.reduce'); - } } } diff --git a/deps/v8/src/builtins/array-reverse.tq b/deps/v8/src/builtins/array-reverse.tq index f1ba8fddf7cafe..82d2e6b6058661 100644 --- a/deps/v8/src/builtins/array-reverse.tq +++ b/deps/v8/src/builtins/array-reverse.tq @@ -165,7 +165,7 @@ namespace array_reverse { // https://tc39.github.io/ecma262/#sec-array.prototype.reverse transitioning javascript builtin ArrayPrototypeReverse( - context: Context, receiver: Object, ...arguments): Object { + js-implicit context: Context, receiver: Object)(...arguments): Object { try { TryFastPackedArrayReverse(receiver) otherwise Baseline; return receiver; diff --git a/deps/v8/src/builtins/array-shift.tq b/deps/v8/src/builtins/array-shift.tq index 3c8c1491bb4dd0..4dd82d7b886d0a 100644 --- a/deps/v8/src/builtins/array-shift.tq +++ b/deps/v8/src/builtins/array-shift.tq @@ -103,7 +103,7 @@ namespace array_shift { // https://tc39.github.io/ecma262/#sec-array.prototype.shift transitioning javascript builtin ArrayPrototypeShift( - implicit context: Context)(receiver: Object, ...arguments): Object { + js-implicit context: Context, receiver: Object)(...arguments): Object { try { return TryFastArrayShift(receiver, arguments) otherwise Slow; } diff --git a/deps/v8/src/builtins/array-slice.tq b/deps/v8/src/builtins/array-slice.tq index 51623294082433..c3a6ac75cb0ec7 100644 --- a/deps/v8/src/builtins/array-slice.tq +++ b/deps/v8/src/builtins/array-slice.tq @@ -63,7 +63,7 @@ namespace array_slice { for (let current: Smi = start; current < to; ++current) { const e: Object = sloppyElements.objects[current + kSloppyArgumentsParameterMapStart]; - const newElement: Object = e != Hole ? + const newElement: Object = e != TheHole ? argumentsContext[UnsafeCast(e)] : unmappedElements.objects[current]; // It is safe to skip the write barrier here because resultElements was @@ -105,7 +105,6 @@ namespace array_slice { return ExtractFastJSArray(context, a, start, count); } case (a: JSArgumentsObjectWithLength): { - const nativeContext: NativeContext = LoadNativeContext(context); const map: Map = a.map; if (IsFastAliasedArgumentsMap(map)) { return HandleFastAliasedSloppyArgumentsSlice(context, a, start, count) @@ -123,8 +122,8 @@ namespace array_slice { // https://tc39.github.io/ecma262/#sec-array.prototype.slice transitioning javascript builtin - ArrayPrototypeSlice(context: Context, receiver: Object, ...arguments): - Object { + ArrayPrototypeSlice(js-implicit context: Context, receiver: Object)( + ...arguments): Object { // Handle array cloning case if the receiver is a fast array. if (arguments.length == 0) { typeswitch (receiver) { @@ -186,7 +185,7 @@ namespace array_slice { // 10. Repeat, while k < final while (k < final) { // a. Let Pk be ! ToString(k). - let pK: Number = k; + const pK: Number = k; // b. Let kPresent be ? HasProperty(O, Pk). const fromPresent: Boolean = HasProperty(o, pK); diff --git a/deps/v8/src/builtins/array-some.tq b/deps/v8/src/builtins/array-some.tq index f68ea4ac30be66..a30af4e47a42c4 100644 --- a/deps/v8/src/builtins/array-some.tq +++ b/deps/v8/src/builtins/array-some.tq @@ -4,8 +4,9 @@ namespace array { transitioning javascript builtin - ArraySomeLoopEagerDeoptContinuation(implicit context: Context)( - receiver: Object, callback: Object, thisArg: Object, initialK: Object, + ArraySomeLoopEagerDeoptContinuation( + js-implicit context: Context, receiver: Object)( + callback: Object, thisArg: Object, initialK: Object, length: Object): Object { // All continuation points in the optimized some implementation are // after the ToObject(O) call that ensures we are dealing with a @@ -25,9 +26,10 @@ namespace array { } transitioning javascript builtin - ArraySomeLoopLazyDeoptContinuation(implicit context: Context)( - receiver: Object, callback: Object, thisArg: Object, initialK: Object, - length: Object, result: Object): Object { + ArraySomeLoopLazyDeoptContinuation( + js-implicit context: Context, receiver: Object)( + callback: Object, thisArg: Object, initialK: Object, length: Object, + result: Object): Object { // All continuation points in the optimized some implementation are // after the ToObject(O) call that ensures we are dealing with a // JSReceiver. @@ -51,9 +53,9 @@ namespace array { } transitioning builtin ArraySomeLoopContinuation(implicit context: Context)( - receiver: JSReceiver, callbackfn: Callable, thisArg: Object, - array: Object, o: JSReceiver, initialK: Number, length: Number, - initialTo: Object): Object { + _receiver: JSReceiver, callbackfn: Callable, thisArg: Object, + _array: Object, o: JSReceiver, initialK: Number, length: Number, + _initialTo: Object): Object { // 5. Let k be 0. // 6. Repeat, while k < len for (let k: Number = initialK; k < length; k++) { @@ -88,7 +90,7 @@ namespace array { labels Bailout(Smi) { let k: Smi = 0; const smiLen = Cast(len) otherwise goto Bailout(k); - let fastO = Cast(o) otherwise goto Bailout(k); + const fastO = Cast(o) otherwise goto Bailout(k); let fastOW = NewFastJSArrayWitness(fastO); // Build a fast loop over the smi array. @@ -109,11 +111,10 @@ namespace array { // https://tc39.github.io/ecma262/#sec-array.prototype.some transitioning javascript builtin - ArraySome(implicit context: Context)(receiver: Object, ...arguments): Object { + ArraySome(js-implicit context: Context, receiver: Object)(...arguments): + Object { try { - if (IsNullOrUndefined(receiver)) { - goto NullOrUndefinedError; - } + RequireObjectCoercible(receiver, 'Array.prototype.some'); // 1. Let O be ? ToObject(this value). const o: JSReceiver = ToObject_Inline(context, receiver); @@ -143,8 +144,5 @@ namespace array { label TypeError deferred { ThrowTypeError(kCalledNonCallable, arguments[0]); } - label NullOrUndefinedError deferred { - ThrowTypeError(kCalledOnNullOrUndefined, 'Array.prototype.some'); - } } } diff --git a/deps/v8/src/builtins/array-splice.tq b/deps/v8/src/builtins/array-splice.tq index e24b51760c155e..3b65bb03d48bac 100644 --- a/deps/v8/src/builtins/array-splice.tq +++ b/deps/v8/src/builtins/array-splice.tq @@ -54,8 +54,7 @@ namespace array_splice { macro FastSplice(implicit context: Context)( args: Arguments, a: JSArray, length: Smi, newLength: Smi, - lengthDelta: Smi, actualStart: Smi, insertCount: Smi, - actualDeleteCount: Smi): void labels Bailout { + actualStart: Smi, insertCount: Smi, actualDeleteCount: Smi): void { // Make sure elements are writable. array::EnsureWriteableFastElements(a); @@ -77,7 +76,7 @@ namespace array_splice { UnsafeCast(elements), dstIndex, srcIndex, count); } else { // Grow. - let capacity: Smi = CalculateNewElementsCapacity(newLength); + const capacity: Smi = CalculateNewElementsCapacity(newLength); const newElements: FixedArrayType = Extract(elements, 0, actualStart, capacity); a.elements = newElements; @@ -168,12 +167,12 @@ namespace array_splice { if (IsFastSmiOrTaggedElementsKind(elementsKind)) { FastSplice( - args, a, length, newLength, lengthDelta, actualStart, insertCount, - actualDeleteCount) otherwise Bailout; + args, a, length, newLength, actualStart, insertCount, + actualDeleteCount); } else { FastSplice( - args, a, length, newLength, lengthDelta, actualStart, insertCount, - actualDeleteCount) otherwise Bailout; + args, a, length, newLength, actualStart, insertCount, + actualDeleteCount); } return deletedResult; @@ -301,8 +300,6 @@ namespace array_splice { context: Context, arguments: Arguments, o: JSReceiver, len: Number, actualStart: Number, insertCount: Smi, actualDeleteCount: Number): Object { - const affected: Number = len - actualStart - actualDeleteCount; - // 9. Let A be ? ArraySpeciesCreate(O, actualDeleteCount). const a: JSReceiver = ArraySpeciesCreate(context, o, actualDeleteCount); const itemCount: Number = insertCount; @@ -353,8 +350,8 @@ namespace array_splice { // https://tc39.github.io/ecma262/#sec-array.prototype.splice transitioning javascript builtin - ArrayPrototypeSplice(context: Context, receiver: Object, ...arguments): - Object { + ArrayPrototypeSplice(js-implicit context: Context, receiver: Object)( + ...arguments): Object { // 1. Let O be ? ToObject(this value). const o: JSReceiver = ToObject(context, receiver); diff --git a/deps/v8/src/builtins/array-unshift.tq b/deps/v8/src/builtins/array-unshift.tq index b2e746db470bab..e685d520cd963a 100644 --- a/deps/v8/src/builtins/array-unshift.tq +++ b/deps/v8/src/builtins/array-unshift.tq @@ -93,7 +93,7 @@ namespace array_unshift { // https://tc39.github.io/ecma262/#sec-array.prototype.unshift transitioning javascript builtin ArrayPrototypeUnshift( - context: Context, receiver: Object, ...arguments): Object { + js-implicit context: Context, receiver: Object)(...arguments): Object { try { TryFastArrayUnshift(context, receiver, arguments) otherwise Baseline; } diff --git a/deps/v8/src/builtins/array.tq b/deps/v8/src/builtins/array.tq index 9807db19c6d774..7e044e086b89b3 100644 --- a/deps/v8/src/builtins/array.tq +++ b/deps/v8/src/builtins/array.tq @@ -33,18 +33,19 @@ namespace array { } macro IsJSArray(implicit context: Context)(o: Object): bool { - try { - const array: JSArray = Cast(o) otherwise NotArray; - return true; - } - label NotArray { - return false; + typeswitch (o) { + case (JSArray): { + return true; + } + case (Object): { + return false; + } } } macro LoadElementOrUndefined(a: FixedArray, i: Smi): Object { const e: Object = a.objects[i]; - return e == Hole ? Undefined : e; + return e == TheHole ? Undefined : e; } macro LoadElementOrUndefined(a: FixedDoubleArray, i: Smi): NumberOrUndefined { @@ -62,26 +63,7 @@ namespace array { } macro StoreArrayHole(elements: FixedArray, k: Smi): void { - elements.objects[k] = Hole; - } - - macro CopyArrayElement( - elements: FixedArray, newElements: FixedArray, from: Smi, to: Smi): void { - const e: Object = elements.objects[from]; - newElements.objects[to] = e; - } - - macro CopyArrayElement( - elements: FixedDoubleArray, newElements: FixedDoubleArray, from: Smi, - to: Smi): void { - try { - const floatValue: float64 = LoadDoubleWithHoleCheck(elements, from) - otherwise FoundHole; - newElements.floats[to] = floatValue; - } - label FoundHole { - StoreArrayHole(newElements, to); - } + elements.objects[k] = TheHole; } extern macro SetPropertyLength(implicit context: Context)(Object, Number); diff --git a/deps/v8/src/builtins/base.tq b/deps/v8/src/builtins/base.tq index 76e1a486c815ab..07af1f441f8060 100644 --- a/deps/v8/src/builtins/base.tq +++ b/deps/v8/src/builtins/base.tq @@ -14,9 +14,11 @@ #include 'src/objects/js-generator.h' #include 'src/objects/js-promise.h' #include 'src/objects/js-regexp-string-iterator.h' -#include 'src/objects/module.h' +#include 'src/objects/js-weak-refs.h' #include 'src/objects/objects.h' +#include 'src/objects/source-text-module.h' #include 'src/objects/stack-frame-info.h' +#include 'src/objects/synthetic-module.h' #include 'src/objects/template-objects.h' type void; @@ -31,12 +33,16 @@ type PositiveSmi extends Smi; // The Smi value zero, which is often used as null for HeapObject types. type Zero extends PositiveSmi; +// A value with the size of Tagged which may contain arbitrary data. +type Uninitialized extends Tagged; + @abstract extern class HeapObject extends Tagged { map: Map; } type Object = Smi | HeapObject; + type int32 generates 'TNode' constexpr 'int32_t'; type uint32 generates 'TNode' constexpr 'uint32_t'; type int31 extends int32 @@ -84,32 +90,33 @@ extern class Oddball extends HeapObject { extern class HeapNumber extends HeapObject { value: float64; } type Number = Smi | HeapNumber; -type BigInt extends HeapObject generates 'TNode'; type Numeric = Number | BigInt; @abstract -@noVerifier +@generateCppClass extern class Name extends HeapObject { - hash_field: int32; + hash_field: uint32; } +@generateCppClass extern class Symbol extends Name { flags: int32; - name: Object; + name: Object; // The print name of a symbol, or undefined if none. } @abstract +@generateCppClass extern class String extends Name { - length: uint32; + length: int32; } +@generateCppClass extern class ConsString extends String { first: String; second: String; } @abstract -@noVerifier extern class ExternalString extends String { resource: RawPtr; resource_data: RawPtr; @@ -118,28 +125,37 @@ extern class ExternalString extends String { extern class ExternalOneByteString extends ExternalString {} extern class ExternalTwoByteString extends ExternalString {} -extern class InternalizedString extends String {} +@generateCppClass +extern class InternalizedString extends String { +} // TODO(v8:8983): Add declaration for variable-sized region. @abstract -@noVerifier +@generateCppClass extern class SeqString extends String { } -extern class SeqOneByteString extends SeqString {} -extern class SeqTwoByteString extends SeqString {} +@generateCppClass +extern class SeqOneByteString extends SeqString { +} +@generateCppClass +extern class SeqTwoByteString extends SeqString { +} +@generateCppClass extern class SlicedString extends String { parent: String; offset: Smi; } -extern class ThinString extends String { actual: String; } +@generateCppClass +extern class ThinString extends String { + actual: String; +} // The HeapNumber value NaN type NaN extends HeapNumber; @abstract -@noVerifier @generatePrint @generateCppClass extern class Struct extends HeapObject { @@ -169,7 +185,6 @@ type DirectString extends String; type RootIndex generates 'TNode' constexpr 'RootIndex'; @abstract -@noVerifier @generateCppClass extern class FixedArrayBase extends HeapObject { length: Smi; @@ -190,9 +205,7 @@ type LayoutDescriptor extends ByteArray type TransitionArray extends WeakFixedArray generates 'TNode'; -// InstanceType actually extends uint16, but a bunch of methods in -// CodeStubAssembler expect a TNode, so keeping it signed for now. -type InstanceType extends int16 constexpr 'InstanceType'; +type InstanceType extends uint16 constexpr 'InstanceType'; extern class Map extends HeapObject { instance_size_in_words: uint8; @@ -214,19 +227,21 @@ extern class Map extends HeapObject { @ifnot(V8_DOUBLE_FIELDS_UNBOXING) layout_descriptor: void; dependent_code: DependentCode; prototype_validity_cell: Smi | Cell; + // TODO(v8:9108): Misusing "weak" keyword; type should be + // Map | Weak | TransitionArray | PrototypeInfo | Smi. weak transitions_or_prototype_info: Map | TransitionArray | PrototypeInfo | Smi; } -type BytecodeArray extends FixedArrayBase; - @generatePrint +@generateCppClass extern class EnumCache extends Struct { keys: FixedArray; indices: FixedArray; } @generatePrint +@generateCppClass extern class SourcePositionTableWithFrameCache extends Struct { source_position_table: ByteArray; stack_frame_cache: Object; @@ -250,8 +265,7 @@ extern class DescriptorArray extends HeapObject { // than building the definition from C++. intrinsic %GetAllocationBaseSize(map: Map): intptr; intrinsic %Allocate(size: intptr): Class; -intrinsic %AllocateInternalClass(slotCount: constexpr intptr): - Class; +intrinsic %GetStructMap(instanceKind: constexpr InstanceType): Map; intrinsic %AddIndexedFieldSizeToObjectSize( baseSize: intptr, indexSize: T, fieldSize: int32): intptr { @@ -282,24 +296,35 @@ intrinsic } @abstract -@noVerifier extern class JSReceiver extends HeapObject { - properties_or_hash: FixedArrayBase | Smi; + properties_or_hash: FixedArrayBase | PropertyArray | Smi; } type Constructor extends JSReceiver; @abstract @dirtyInstantiatedAbstractClass +@generateCppClass extern class JSObject extends JSReceiver { - @noVerifier elements: FixedArrayBase; + // [elements]: The elements (properties with names that are integers). + // + // Elements can be in two general modes: fast and slow. Each mode + // corresponds to a set of object representations of elements that + // have something in common. + // + // In the fast mode elements is a FixedArray and so each element can be + // quickly accessed. The elements array can have one of several maps in this + // mode: fixed_array_map, fixed_double_array_map, + // sloppy_arguments_elements_map or fixed_cow_array_map (for copy-on-write + // arrays). In the latter case the elements array may be shared by a few + // objects and so before writing to any element the array must be copied. Use + // EnsureWritableFastElements in this case. + // + // In the slow mode the elements is either a NumberDictionary or a + // FixedArray parameter map for a (sloppy) arguments object. + elements: FixedArrayBase; } -macro NewJSObject( - map: Map, properties: FixedArrayBase | Smi, - elements: FixedArrayBase): JSObject { - return new JSObject{map, properties_or_hash: properties, elements}; -} macro NewJSObject(implicit context: Context)(): JSObject { const objectFunction: JSFunction = GetObjectFunction(); const map: Map = Cast(objectFunction.prototype_or_initial_map) @@ -311,10 +336,16 @@ macro NewJSObject(implicit context: Context)(): JSObject { }; } +extern macro HasPrototypeSlot(JSFunction): bool; + macro GetDerivedMap(implicit context: Context)( target: JSFunction, newTarget: JSReceiver): Map { try { const constructor = Cast(newTarget) otherwise SlowPath; + if (!HasPrototypeSlot(constructor)) { + goto SlowPath; + } + assert(IsConstructor(constructor)); const map = Cast(constructor.prototype_or_initial_map) otherwise SlowPath; if (LoadConstructorOrBackPointer(map) != target) { @@ -328,19 +359,33 @@ macro GetDerivedMap(implicit context: Context)( } } +macro AllocateFastOrSlowJSObjectFromMap(implicit context: Context)(map: Map): + JSObject { + let properties = kEmptyFixedArray; + if (IsDictionaryMap(map)) { + properties = AllocateNameDictionary(kNameDictionaryInitialCapacity); + } + return AllocateJSObjectFromMap( + map, properties, kEmptyFixedArray, kNone, kWithSlackTracking); +} + extern class JSFunction extends JSObject { shared_function_info: SharedFunctionInfo; context: Context; feedback_cell: FeedbackCell; weak code: Code; + + // Space for the following field may or may not be allocated. @noVerifier weak prototype_or_initial_map: JSReceiver | Map; } +@generateCppClass extern class JSProxy extends JSReceiver { - target: Object; - handler: Object; + target: JSReceiver | Null; + handler: JSReceiver | Null; } +// Just a starting shape for JSObject; properties can move after initialization. @noVerifier extern class JSProxyRevocableResult extends JSObject { proxy: Object; @@ -358,21 +403,39 @@ macro NewJSProxyRevocableResult(implicit context: Context)( }; } -extern class JSGlobalProxy extends JSObject { native_context: Object; } +@generateCppClass +extern class JSGlobalProxy extends JSObject { + // [native_context]: the owner native context of this global proxy object. + // It is null value if this object is not used by any context. + native_context: Object; +} -extern class JSValue extends JSObject { value: Object; } +@generateCppClass +extern class JSPrimitiveWrapper extends JSObject { + value: Object; +} extern class JSArgumentsObject extends JSObject {} + +// Just a starting shape for JSObject; properties can move after initialization. @noVerifier @hasSameInstanceTypeAsParent extern class JSArgumentsObjectWithLength extends JSArgumentsObject { length: Object; } + +// Just a starting shape for JSObject; properties can move after initialization. @hasSameInstanceTypeAsParent extern class JSSloppyArgumentsObject extends JSArgumentsObjectWithLength { callee: Object; } +// Just a starting shape for JSObject; properties can move after initialization. +@hasSameInstanceTypeAsParent +@noVerifier +extern class JSStrictArgumentsObject extends JSArgumentsObjectWithLength { +} + extern class JSArrayIterator extends JSObject { iterated_object: JSReceiver; next_index: Number; @@ -405,20 +468,6 @@ macro NewJSArray(implicit context: Context)(): JSArray { }; } -struct HoleIterator { - Next(): Object labels NoMore() { - return Hole; - } -} - -macro NewJSArray(implicit context: Context)(map: Map, length: Smi): JSArray { - const map = GetFastPackedSmiElementsJSArrayMap(); - const i = HoleIterator{}; - const elements = new FixedArray{map, length, objects: ...i}; - return new - JSArray{map, properties_or_hash: kEmptyFixedArray, elements, length}; -} - // A HeapObject with a JSArray map, and either fast packed elements, or fast // holey elements when the global NoElementsProtector is not invalidated. transient type FastJSArray extends JSArray; @@ -441,18 +490,61 @@ transient type FastJSArrayForReadWithNoCustomIteration extends type NoSharedNameSentinel extends Smi; -type JSModuleNamespace extends JSObject; -type WeakArrayList extends HeapObject; +@generateCppClass +extern class CallHandlerInfo extends Struct { + callback: Foreign | Undefined; + js_callback: Foreign | Undefined; + data: Object; +} + +type ObjectHashTable extends FixedArray; @abstract +extern class Module extends HeapObject { + exports: ObjectHashTable; + hash: Smi; + status: Smi; + module_namespace: JSModuleNamespace | Undefined; + exception: Object; +} + +type SourceTextModuleInfo extends FixedArray; + +extern class SourceTextModule extends Module { + code: SharedFunctionInfo | JSFunction | + JSGeneratorObject | SourceTextModuleInfo; + regular_exports: FixedArray; + regular_imports: FixedArray; + requested_modules: FixedArray; + script: Script; + import_meta: TheHole | JSObject; + dfs_index: Smi; + dfs_ancestor_index: Smi; +} + +extern class SyntheticModule extends Module { + name: String; + export_names: FixedArray; + evaluation_steps: Foreign; +} + +@abstract +extern class JSModuleNamespace extends JSObject { + module: Module; +} + +@hasSameInstanceTypeAsParent @noVerifier +extern class TemplateList extends FixedArray { +} + +@abstract extern class JSWeakCollection extends JSObject { table: Object; } extern class JSWeakSet extends JSWeakCollection {} extern class JSWeakMap extends JSWeakCollection {} -@noVerifier extern class JSCollectionIterator extends JSObject { table: Object; index: Object; @@ -474,12 +566,20 @@ extern class JSMessageObject extends JSObject { error_level: Smi; } +extern class WeakArrayList extends HeapObject { + capacity: Smi; + length: Smi; + // TODO(v8:8983): declare variable-sized region for contained MaybeObject's + // objects[length]: MaybeObject; +} + extern class PrototypeInfo extends Struct { js_module_namespace: JSModuleNamespace | Undefined; prototype_users: WeakArrayList | Zero; registry_slot: Smi; validity_cell: Object; - @noVerifier object_create_map: Smi | WeakArrayList; + // TODO(v8:9108): Should be Weak | Undefined. + @noVerifier object_create_map: Map | Undefined; bit_field: Smi; } @@ -503,7 +603,7 @@ extern class Script extends Struct { extern class EmbedderDataArray extends HeapObject { length: Smi; } -type ScopeInfo extends Object generates 'TNode'; +type ScopeInfo extends HeapObject generates 'TNode'; extern class PreparseData extends HeapObject { // TODO(v8:8983): Add declaration for variable-sized region. @@ -527,16 +627,30 @@ extern class SharedFunctionInfo extends HeapObject { expected_nof_properties: uint16; function_token_offset: int16; flags: int32; + function_literal_id: int32; @if(V8_SFI_HAS_UNIQUE_ID) unique_id: int32; } extern class JSBoundFunction extends JSObject { - bound_target_function: JSReceiver; + bound_target_function: Callable; bound_this: Object; bound_arguments: FixedArray; } -type Callable = JSFunction | JSBoundFunction | JSProxy; +// Specialized types. The following three type definitions don't correspond to +// actual C++ classes, but have Is... methods that check additional constraints. + +// A Foreign object whose raw pointer is not allowed to be null. +type NonNullForeign extends Foreign; + +// A function built with InstantiateFunction for the public API. +type CallableApiObject extends HeapObject; + +// A JSProxy with the callable bit set. +type CallableJSProxy extends JSProxy; + +type Callable = + JSFunction | JSBoundFunction | CallableJSProxy | CallableApiObject; extern operator '.length_intptr' macro LoadAndUntagFixedArrayBaseLength( FixedArrayBase): intptr; @@ -547,7 +661,7 @@ type NumberDictionary extends HeapObject extern class FreeSpace extends HeapObject { size: Smi; - @noVerifier next: FreeSpace; + next: FreeSpace | Uninitialized; } // %RawDownCast should *never* be used anywhere in Torque code except for @@ -609,45 +723,12 @@ extern class JSArrayBufferView extends JSObject { } extern class JSTypedArray extends JSArrayBufferView { - AttachOffHeapBuffer(buffer: JSArrayBuffer, byteOffset: uintptr): void { - const basePointer: Smi = 0; - - // The max byteOffset is 8 * MaxSmi on the particular platform. 32 bit - // platforms are self-limiting, because we can't allocate an array bigger - // than our 32-bit arithmetic range anyway. 64 bit platforms could - // theoretically have an offset up to 2^35 - 1. - const backingStore = buffer.backing_store; - const externalPointer = backingStore + Convert(byteOffset); - - // Assert no overflow has occurred. Only assert if the mock array buffer - // allocator is NOT used. When the mock array buffer is used, impossibly - // large allocations are allowed that would erroneously cause an overflow - // and this assertion to fail. - assert( - IsMockArrayBufferAllocatorFlag() || - Convert(externalPointer) >= Convert(backingStore)); - - this.elements = kEmptyByteArray; - this.buffer = buffer; - this.external_pointer = externalPointer; - this.base_pointer = basePointer; - } - length: uintptr; external_pointer: RawPtr; base_pointer: ByteArray | Smi; } -@noVerifier -extern class JSAccessorPropertyDescriptor extends JSObject { - get: Object; - set: Object; - enumerable: Object; - configurable: Object; -} - @abstract -@noVerifier extern class JSCollection extends JSObject { table: Object; } @@ -681,14 +762,6 @@ extern class JSStringIterator extends JSObject { next_index: Smi; } -@noVerifier -extern class JSDataPropertyDescriptor extends JSObject { - value: Object; - writable: Object; - enumerable: Object; - configurable: Object; -} - @abstract extern class TemplateInfo extends Struct { tag: Object; @@ -722,7 +795,7 @@ extern class FunctionTemplateInfo extends TemplateInfo { function_template_rare_data: Object; shared_function_info: Object; flag: Smi; - @noVerifier length: Smi; + length: Smi; cached_property_name: Object; } @@ -749,8 +822,6 @@ type LanguageMode extends Smi constexpr 'LanguageMode'; type ExtractFixedArrayFlags generates 'TNode' constexpr 'CodeStubAssembler::ExtractFixedArrayFlags'; -type ParameterMode - generates 'TNode' constexpr 'ParameterMode'; type WriteBarrierMode generates 'TNode' constexpr 'WriteBarrierMode'; @@ -770,21 +841,21 @@ const UTF32: extern class Foreign extends HeapObject { foreign_address: RawPtr; } extern class InterceptorInfo extends Struct { - @noVerifier getter: Foreign | Zero; - @noVerifier setter: Foreign | Zero; - @noVerifier query: Foreign | Zero; - @noVerifier descriptor: Foreign | Zero; - @noVerifier deleter: Foreign | Zero; - @noVerifier enumerator: Foreign | Zero; - @noVerifier definer: Foreign | Zero; + getter: NonNullForeign | Zero | Undefined; + setter: NonNullForeign | Zero | Undefined; + query: NonNullForeign | Zero | Undefined; + descriptor: NonNullForeign | Zero | Undefined; + deleter: NonNullForeign | Zero | Undefined; + enumerator: NonNullForeign | Zero | Undefined; + definer: NonNullForeign | Zero | Undefined; data: Object; flags: Smi; } extern class AccessCheckInfo extends Struct { - callback: Foreign | Zero; - named_interceptor: InterceptorInfo | Zero; - indexed_interceptor: InterceptorInfo | Zero; + callback: Foreign | Zero | Undefined; + named_interceptor: InterceptorInfo | Zero | Undefined; + indexed_interceptor: InterceptorInfo | Zero | Undefined; data: Object; } @@ -800,6 +871,9 @@ extern class Cell extends HeapObject { value: Object; } extern class DataHandler extends Struct { smi_handler: Smi | Code; validity_cell: Smi | Cell; + + // Space for the following fields may or may not be allocated. + // TODO(v8:9108): Misusing "weak" keyword; should be MaybeObject. @noVerifier weak data_1: Object; @noVerifier weak data_2: Object; @noVerifier weak data_3: Object; @@ -850,17 +924,22 @@ extern class StackFrameInfo extends Struct { column_number: Smi; promise_all_index: Smi; script_id: Smi; - script_name: Object; - script_name_or_source_url: Object; - function_name: Object; - wasm_module_name: Object; + script_name: String | Null | Undefined; + script_name_or_source_url: String | Null | Undefined; + function_name: String | Null | Undefined; + method_name: String | Null | Undefined; + type_name: String | Null | Undefined; + eval_origin: String | Null | Undefined; + wasm_module_name: String | Null | Undefined; flag: Smi; } +type FrameArray extends FixedArray; + extern class StackTraceFrame extends Struct { - frame_array: Object; + frame_array: FrameArray | Undefined; frame_index: Smi; - frame_info: Object; + frame_info: StackFrameInfo | Undefined; id: Smi; } @@ -876,9 +955,20 @@ extern class WasmExportedFunctionData extends Struct { instance: WasmInstanceObject; jump_table_offset: Smi; function_index: Smi; + // The remaining fields are for fast calling from C++. The contract is + // that they are lazily populated, and either all will be present or none. + c_wrapper_code: Object; + wasm_call_target: Smi; // Pseudo-smi: one-bit shift on all platforms. + packed_args_size: Smi; } -extern class WasmJSFunctionData extends Struct { wrapper_code: Code; } +extern class WasmJSFunctionData extends Struct { + callable: JSReceiver; + wrapper_code: Code; + serialized_return_count: Smi; + serialized_parameter_count: Smi; + serialized_signature: ByteArray; // PodArray +} extern class WasmCapiFunctionData extends Struct { call_target: RawPtr; @@ -887,6 +977,16 @@ extern class WasmCapiFunctionData extends Struct { serialized_signature: ByteArray; // PodArray } +extern class WasmIndirectFunctionTable extends Struct { + size: uint32; + @if(TAGGED_SIZE_8_BYTES) optional_padding: uint32; + @ifnot(TAGGED_SIZE_8_BYTES) optional_padding: void; + sig_ids: RawPtr; + targets: RawPtr; + managed_native_allocations: Foreign | Undefined; + refs: FixedArray; +} + extern class WasmDebugInfo extends Struct { instance: WasmInstanceObject; interpreter_handle: Foreign | Undefined; @@ -947,9 +1047,9 @@ const kAllowLargeObjectAllocation: constexpr AllocationFlags generates 'CodeStubAssembler::kAllowLargeObjectAllocation'; const kWithSlackTracking: constexpr SlackTrackingMode - generates 'SlackTrackingMode::kWithSlackTracking'; + generates 'CodeStubAssembler::SlackTrackingMode::kWithSlackTracking'; const kNoSlackTracking: constexpr SlackTrackingMode - generates 'SlackTrackingMode::kNoSlackTracking'; + generates 'CodeStubAssembler::SlackTrackingMode::kNoSlackTracking'; const kFixedDoubleArrays: constexpr ExtractFixedArrayFlags generates 'CodeStubAssembler::ExtractFixedArrayFlag::kFixedDoubleArrays'; @@ -977,6 +1077,8 @@ const kCalledNonCallable: constexpr MessageTemplate generates 'MessageTemplate::kCalledNonCallable'; const kCalledOnNullOrUndefined: constexpr MessageTemplate generates 'MessageTemplate::kCalledOnNullOrUndefined'; +const kProtoObjectOrNull: constexpr MessageTemplate + generates 'MessageTemplate::kProtoObjectOrNull'; const kInvalidOffset: constexpr MessageTemplate generates 'MessageTemplate::kInvalidOffset'; const kInvalidTypedArrayLength: constexpr MessageTemplate @@ -1003,13 +1105,17 @@ const kSymbolToString: constexpr MessageTemplate generates 'MessageTemplate::kSymbolToString'; const kPropertyNotFunction: constexpr MessageTemplate generates 'MessageTemplate::kPropertyNotFunction'; +const kBigIntMaxLength: constexpr intptr + generates 'BigInt::kMaxLength'; +const kBigIntTooBig: constexpr MessageTemplate + generates 'MessageTemplate::kBigIntTooBig'; const kMaxArrayIndex: constexpr uint32 generates 'JSArray::kMaxArrayIndex'; const kArrayBufferMaxByteLength: constexpr uintptr generates 'JSArrayBuffer::kMaxByteLength'; -const V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP: - constexpr int31 generates 'V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP'; +const kMaxTypedArrayInHeap: + constexpr int31 generates 'JSTypedArray::kMaxSizeInHeap'; const kMaxSafeInteger: constexpr float64 generates 'kMaxSafeInteger'; const kSmiMaxValue: constexpr uintptr generates 'kSmiMaxValue'; const kSmiMax: uintptr = kSmiMaxValue; @@ -1054,7 +1160,13 @@ const kStrictReadOnlyProperty: constexpr MessageTemplate const kString: constexpr PrimitiveType generates 'PrimitiveType::kString'; -type Hole extends Oddball; +const kExternalPointerForOnHeapArray: constexpr RawPtr + generates 'JSTypedArray::ExternalPointerForOnHeapArray()'; + +const kNameDictionaryInitialCapacity: + constexpr int32 generates 'NameDictionary::kInitialCapacity'; + +type TheHole extends Oddball; type Null extends Oddball; type Undefined extends Oddball; type True extends Oddball; @@ -1064,7 +1176,7 @@ type Boolean = True | False; type NumberOrUndefined = Number | Undefined; -extern macro TheHoleConstant(): Hole; +extern macro TheHoleConstant(): TheHole; extern macro NullConstant(): Null; extern macro UndefinedConstant(): Undefined; extern macro TrueConstant(): True; @@ -1075,7 +1187,7 @@ extern macro EmptyStringConstant(): EmptyString; extern macro LengthStringConstant(): String; extern macro NanConstant(): NaN; -const Hole: Hole = TheHoleConstant(); +const TheHole: TheHole = TheHoleConstant(); const Null: Null = NullConstant(); const Undefined: Undefined = UndefinedConstant(); const True: True = TrueConstant(); @@ -1090,11 +1202,6 @@ const false: constexpr bool generates 'false'; const kStrict: constexpr LanguageMode generates 'LanguageMode::kStrict'; const kSloppy: constexpr LanguageMode generates 'LanguageMode::kSloppy'; -const SMI_PARAMETERS: constexpr ParameterMode - generates 'CodeStubAssembler::SMI_PARAMETERS'; -const INTPTR_PARAMETERS: constexpr ParameterMode - generates 'CodeStubAssembler::INTPTR_PARAMETERS'; - const SKIP_WRITE_BARRIER: constexpr WriteBarrierMode generates 'SKIP_WRITE_BARRIER'; const UNSAFE_SKIP_WRITE_BARRIER: @@ -1107,7 +1214,7 @@ extern class AsyncGeneratorRequest extends Struct { promise: JSPromise; } -extern class ModuleInfoEntry extends Struct { +extern class SourceTextModuleInfoEntry extends Struct { export_name: String | Undefined; local_name: String | Undefined; import_name: String | Undefined; @@ -1134,7 +1241,7 @@ extern class PromiseReaction extends Struct { extern class PromiseReactionJobTask extends Microtask { argument: Object; context: Context; - @noVerifier handler: Callable | Undefined; + handler: Callable | Undefined; promise_or_capability: JSPromise | PromiseCapability | Undefined; } @@ -1155,22 +1262,8 @@ extern class JSRegExp extends JSObject { flags: Smi | Undefined; } -@noVerifier -extern class JSIteratorResult extends JSObject { - value: Object; - done: Boolean; -} - -macro NewJSIteratorResult(implicit context: Context)( - value: Object, done: Boolean): JSIteratorResult { - return new JSIteratorResult{ - map: GetIteratorResultMap(), - properties_or_hash: kEmptyFixedArray, - elements: kEmptyFixedArray, - value, - done - }; -} +extern transitioning macro AllocateJSIteratorResult(implicit context: Context)( + Object, Boolean): JSObject; // Note: Although a condition for a FastJSRegExp is having a positive smi // lastIndex (see RegExpBuiltinsAssembler::BranchIfFastRegExp), it is possible @@ -1230,9 +1323,9 @@ extern class AccessorInfo extends Struct { name: Object; flags: Smi; expected_receiver_type: Object; - @noVerifier setter: Foreign | Zero; - @noVerifier getter: Foreign | Zero; - @noVerifier js_getter: Foreign | Zero; + setter: NonNullForeign | Zero; + getter: NonNullForeign | Zero; + js_getter: NonNullForeign | Zero; data: Object; } @@ -1277,7 +1370,7 @@ extern class FeedbackCell extends Struct { type AllocationSite extends Struct; extern class AllocationMemento extends Struct { - @noVerifier allocation_site: AllocationSite; + allocation_site: AllocationSite; } extern class WasmModuleObject extends JSObject { @@ -1303,8 +1396,8 @@ extern class WasmMemoryObject extends JSObject { } extern class WasmGlobalObject extends JSObject { - untagged_buffer: JSArrayBuffer; - tagged_buffer: FixedArray; + untagged_buffer: JSArrayBuffer | Undefined; + tagged_buffer: FixedArray | Undefined; offset: Smi; flags: Smi; } @@ -1314,10 +1407,6 @@ extern class WasmExceptionObject extends JSObject { exception_tag: HeapObject; } -@noVerifier -extern class WasmExceptionPackage extends JSReceiver { -} - type WasmExportedFunction extends JSFunction; extern class AsmWasmData extends Struct { @@ -1327,6 +1416,46 @@ extern class AsmWasmData extends Struct { uses_bitset: HeapNumber; } +extern class JSFinalizationGroup extends JSObject { + native_context: NativeContext; + cleanup: Object; + active_cells: Undefined | WeakCell; + cleared_cells: Undefined | WeakCell; + key_map: Object; + next: Undefined | JSFinalizationGroup; + flags: Smi; +} + +extern class JSFinalizationGroupCleanupIterator extends JSObject { + finalization_group: JSFinalizationGroup; +} + +extern class WeakCell extends HeapObject { + finalization_group: Undefined | JSFinalizationGroup; + target: Undefined | JSReceiver; + holdings: Object; + prev: Undefined | WeakCell; + next: Undefined | WeakCell; + key: Object; + key_list_prev: Undefined | WeakCell; + key_list_next: Undefined | WeakCell; +} + +extern class JSWeakRef extends JSObject { target: Undefined | JSReceiver; } + +extern class BytecodeArray extends FixedArrayBase { + // TODO(v8:8983): bytecode array object sizes vary based on their contents. + constant_pool: FixedArray; + handler_table: ByteArray; + source_position_table: Undefined | ByteArray | + SourcePositionTableWithFrameCache; + frame_size: int32; + parameter_size: int32; + incoming_new_target_or_generator_register: int32; + osr_nesting_level: int8; + bytecode_age: int8; +} + extern macro Is64(): constexpr bool; extern macro SelectBooleanConstant(bool): Boolean; @@ -1358,7 +1487,7 @@ extern transitioning builtin SetProperty(implicit context: Context)( extern transitioning builtin SetPropertyInLiteral(implicit context: Context)( Object, Object, Object); extern transitioning builtin DeleteProperty(implicit context: Context)( - Object, Object, LanguageMode); + Object, Object, LanguageMode): Object; extern transitioning builtin HasProperty(implicit context: Context)( Object, Object): Boolean; extern transitioning macro HasProperty_Inline(implicit context: Context)( @@ -1403,6 +1532,10 @@ extern macro ConstructWithTarget(implicit context: Context)( extern macro SpeciesConstructor(implicit context: Context)( Object, JSReceiver): JSReceiver; +extern macro ConstructorBuiltinsAssembler::IsDictionaryMap(Map): bool; +extern macro CodeStubAssembler::AllocateNameDictionary(constexpr int32): + NameDictionary; + extern builtin ToObject(Context, Object): JSReceiver; extern macro ToObject_Inline(Context, Object): JSReceiver; extern macro IsNullOrUndefined(Object): bool; @@ -1598,6 +1731,7 @@ extern operator '==' macro Word32Equal(bool, bool): bool; extern operator '!=' macro Word32NotEqual(bool, bool): bool; extern operator '+' macro Float64Add(float64, float64): float64; +extern operator '-' macro Float64Sub(float64, float64): float64; extern operator '+' macro NumberAdd(Number, Number): Number; extern operator '-' macro NumberSub(Number, Number): Number; @@ -1650,6 +1784,8 @@ extern macro TaggedIsNotSmi(Object): bool; extern macro TaggedIsPositiveSmi(Object): bool; extern macro IsValidPositiveSmi(intptr): bool; +extern macro IsInteger(HeapNumber): bool; + extern macro HeapObjectToJSDataView(HeapObject): JSDataView labels CastError; extern macro HeapObjectToJSProxy(HeapObject): JSProxy @@ -1713,7 +1849,7 @@ macro Cast(o: HeapObject): A labels CastError; Cast(o: HeapObject): HeapObject - labels CastError { +labels _CastError { return o; } @@ -1837,6 +1973,11 @@ Cast(o: HeapObject): HeapNumber goto CastError; } +Cast(o: HeapObject): BigInt labels CastError { + if (IsBigInt(o)) return %RawDownCast(o); + goto CastError; +} + Cast(o: HeapObject): JSRegExp labels CastError { if (IsJSRegExp(o)) return %RawDownCast(o); @@ -1849,9 +1990,9 @@ Cast(implicit context: Context)(o: HeapObject): Map goto CastError; } -Cast(o: HeapObject): JSValue +Cast(o: HeapObject): JSPrimitiveWrapper labels CastError { - if (IsJSValue(o)) return %RawDownCast(o); + if (IsJSPrimitiveWrapper(o)) return %RawDownCast(o); goto CastError; } @@ -1915,24 +2056,24 @@ Cast(implicit context: Context)(o: HeapObject): FastJSArrayForCopy labels CastError { if (IsArraySpeciesProtectorCellInvalid()) goto CastError; - const a: FastJSArray = Cast(o) otherwise CastError; - return %RawDownCast(o); + const a = Cast(o) otherwise CastError; + return %RawDownCast(a); } Cast(implicit context: Context)( o: HeapObject): FastJSArrayWithNoCustomIteration labels CastError { if (IsArrayIteratorProtectorCellInvalid()) goto CastError; - const a: FastJSArray = Cast(o) otherwise CastError; - return %RawDownCast(o); + const a = Cast(o) otherwise CastError; + return %RawDownCast(a); } Cast(implicit context: Context)( o: HeapObject): FastJSArrayForReadWithNoCustomIteration labels CastError { if (IsArrayIteratorProtectorCellInvalid()) goto CastError; - const a: FastJSArrayForRead = Cast(o) otherwise CastError; - return %RawDownCast(o); + const a = Cast(o) otherwise CastError; + return %RawDownCast(a); } Cast(implicit context: Context)(o: HeapObject): JSReceiver @@ -1990,7 +2131,7 @@ extern macro ChangeInt32ToIntPtr(int32): intptr; // Sign-extends. extern macro ChangeUint32ToWord(uint32): uintptr; // Doesn't sign-extend. extern macro LoadNativeContext(Context): NativeContext; extern macro TruncateFloat64ToFloat32(float64): float32; -extern macro TruncateHeapNumberValueToWord32(Number): int32; +extern macro TruncateHeapNumberValueToWord32(HeapNumber): int32; extern macro LoadJSArrayElementsMap(constexpr ElementsKind, Context): Map; extern macro LoadJSArrayElementsMap(ElementsKind, Context): Map; extern macro ChangeNonnegativeNumberToUintPtr(Number): uintptr; @@ -2007,13 +2148,14 @@ extern macro Float64Constant(constexpr float64): float64; extern macro SmiConstant(constexpr int31): Smi; extern macro SmiConstant(constexpr Smi): Smi; extern macro SmiConstant(constexpr MessageTemplate): Smi; +extern macro SmiConstant(constexpr LanguageMode): Smi; extern macro BoolConstant(constexpr bool): bool; extern macro StringConstant(constexpr string): String; -extern macro LanguageModeConstant(constexpr LanguageMode): LanguageMode; extern macro Int32Constant(constexpr ElementsKind): ElementsKind; extern macro IntPtrConstant(constexpr NativeContextSlot): NativeContextSlot; extern macro IntPtrConstant(constexpr ContextSlot): ContextSlot; extern macro IntPtrConstant(constexpr intptr): intptr; +extern macro PointerConstant(constexpr RawPtr): RawPtr; extern macro SingleCharacterStringConstant(constexpr string): String; extern macro BitcastWordToTaggedSigned(intptr): Smi; @@ -2126,6 +2268,9 @@ Convert(i: int32): Number { Convert(i: int32): intptr { return ChangeInt32ToIntPtr(i); } +Convert(i: uint32): intptr { + return Signed(ChangeUint32ToWord(i)); +} Convert(i: int32): Smi { return SmiFromInt32(i); } @@ -2333,10 +2478,6 @@ extern operator '.floats[]=' macro StoreFixedDoubleArrayElement( FixedDoubleArray, intptr, float64): void; extern operator '.floats[]=' macro StoreFixedDoubleArrayElementSmi( FixedDoubleArray, Smi, float64): void; -operator '.floats[]=' macro StoreFixedDoubleArrayElementSmi( - a: FixedDoubleArray, i: Smi, n: Number): void { - StoreFixedDoubleArrayElementSmi(a, i, Convert(n)); -} operator '[]=' macro StoreFixedDoubleArrayDirect( a: FixedDoubleArray, i: Smi, v: Number) { a.floats[i] = Convert(v); @@ -2418,7 +2559,7 @@ extern macro AllocateJSArray(constexpr ElementsKind, Map, Smi, Smi): JSArray; extern macro AllocateJSArray(Map, FixedArrayBase, Smi): JSArray; extern macro AllocateJSObjectFromMap(Map): JSObject; extern macro AllocateJSObjectFromMap( - Map, FixedArray, FixedArray, constexpr AllocationFlags, + Map, FixedArray | PropertyArray, FixedArray, constexpr AllocationFlags, constexpr SlackTrackingMode): JSObject; extern macro LoadDoubleWithHoleCheck(FixedDoubleArray, Smi): float64 @@ -2531,10 +2672,10 @@ LoadElementNoHole(implicit context: Context)( a: JSArray, index: Smi): Object labels IfHole { try { - let elements: FixedArray = + const elements: FixedArray = Cast(a.elements) otherwise Unexpected; - let e: Object = elements.objects[index]; - if (e == Hole) { + const e: Object = elements.objects[index]; + if (e == TheHole) { goto IfHole; } return e; @@ -2548,9 +2689,10 @@ LoadElementNoHole(implicit context: Context)( a: JSArray, index: Smi): Object labels IfHole { try { - let elements: FixedDoubleArray = + const elements: FixedDoubleArray = Cast(a.elements) otherwise Unexpected; - let e: float64 = LoadDoubleWithHoleCheck(elements, index) otherwise IfHole; + const e: float64 = + LoadDoubleWithHoleCheck(elements, index) otherwise IfHole; return AllocateHeapNumberWithValue(e); } label Unexpected { @@ -2594,7 +2736,7 @@ struct FastJSArrayWitness { } else { const elements = Cast(this.unstable.elements) otherwise unreachable; - StoreFixedArrayElement(elements, k, Hole); + StoreFixedArrayElement(elements, k, TheHole); } } @@ -2638,12 +2780,12 @@ struct FastJSArrayWitness { MoveElements(dst: intptr, src: intptr, length: intptr) { assert(this.arrayIsPushable); if (this.hasDoubles) { - let elements: FixedDoubleArray = + const elements: FixedDoubleArray = Cast(this.unstable.elements) otherwise unreachable; TorqueMoveElements(elements, dst, src, length); } else { - let elements: FixedArray = Cast(this.unstable.elements) + const elements: FixedArray = Cast(this.unstable.elements) otherwise unreachable; if (this.hasSmis) { TorqueMoveElementsSmi(elements, dst, src, length); @@ -2662,17 +2804,62 @@ struct FastJSArrayWitness { } macro NewFastJSArrayWitness(array: FastJSArray): FastJSArrayWitness { - let kind = array.map.elements_kind; + const kind = array.map.elements_kind; return FastJSArrayWitness{ stable: array, unstable: array, map: array.map, - hasDoubles: !IsElementsKindLessThanOrEqual(kind, HOLEY_ELEMENTS), + hasDoubles: IsDoubleElementsKind(kind), hasSmis: IsElementsKindLessThanOrEqual(kind, HOLEY_SMI_ELEMENTS), arrayIsPushable: false }; } +struct FastJSArrayForReadWitness { + Get(): FastJSArrayForRead { + return this.unstable; + } + + Recheck() labels CastError { + if (this.stable.map != this.map) goto CastError; + // We don't need to check elements kind or whether the prototype + // has changed away from the default JSArray prototype, because + // if the map remains the same then those properties hold. + // + // However, we have to make sure there are no elements in the + // prototype chain. + if (IsNoElementsProtectorCellInvalid()) goto CastError; + this.unstable = %RawDownCast(this.stable); + } + + LoadElementNoHole(implicit context: Context)(k: Smi): Object + labels FoundHole { + if (this.hasDoubles) { + return LoadElementNoHole(this.unstable, k) + otherwise FoundHole; + } else { + return LoadElementNoHole(this.unstable, k) + otherwise FoundHole; + } + } + + const stable: JSArray; + unstable: FastJSArrayForRead; + const map: Map; + const hasDoubles: bool; +} + +macro NewFastJSArrayForReadWitness(array: FastJSArrayForRead): + FastJSArrayForReadWitness { + const kind = array.map.elements_kind; + return FastJSArrayForReadWitness{ + stable: array, + unstable: array, + map: array.map, + hasDoubles: IsDoubleElementsKind(kind) + }; +} + extern macro TransitionElementsKind( JSObject, Map, constexpr ElementsKind, constexpr ElementsKind): void labels Bailout; @@ -2693,6 +2880,7 @@ extern macro IsJSReceiver(HeapObject): bool; extern macro TaggedIsCallable(Object): bool; extern macro IsDetachedBuffer(JSArrayBuffer): bool; extern macro IsHeapNumber(HeapObject): bool; +extern macro IsBigInt(HeapObject): bool; extern macro IsFixedArray(HeapObject): bool; extern macro IsName(HeapObject): bool; extern macro IsPrivateSymbol(HeapObject): bool; @@ -2702,7 +2890,7 @@ extern macro IsOddball(HeapObject): bool; extern macro IsSymbol(HeapObject): bool; extern macro IsJSArrayMap(Map): bool; extern macro IsExtensibleMap(Map): bool; -extern macro IsJSValue(HeapObject): bool; +extern macro IsJSPrimitiveWrapper(HeapObject): bool; extern macro IsCustomElementsReceiverInstanceType(int32): bool; extern macro Typeof(Object): Object; @@ -2713,7 +2901,7 @@ macro NumberIsNaN(number: Number): bool { return false; } case (hn: HeapNumber): { - let value: float64 = Convert(hn); + const value: float64 = Convert(hn); return value != value; } } @@ -2722,6 +2910,8 @@ macro NumberIsNaN(number: Number): bool { extern macro GotoIfForceSlowPath() labels Taken; extern macro BranchIfToBooleanIsTrue(Object): never labels Taken, NotTaken; +extern macro BranchIfToBooleanIsFalse(Object): never + labels Taken, NotTaken; macro ToBoolean(obj: Object): bool { if (BranchIfToBooleanIsTrue(obj)) { @@ -2731,13 +2921,24 @@ macro ToBoolean(obj: Object): bool { } } +@export +macro RequireObjectCoercible(implicit context: Context)( + value: Object, name: constexpr string): Object { + if (IsNullOrUndefined(value)) { + ThrowTypeError(kCalledOnNullOrUndefined, name); + } + return value; +} + +extern macro BranchIfSameValue(Object, Object): never labels Taken, NotTaken; + transitioning macro ToIndex(input: Object, context: Context): Number labels RangeError { if (input == Undefined) { return 0; } - let value: Number = ToInteger_Inline(context, input, kTruncateMinusZero); + const value: Number = ToInteger_Inline(context, input, kTruncateMinusZero); if (value < 0 || value > kMaxSafeInteger) { goto RangeError; } @@ -2824,19 +3025,6 @@ macro BranchIfFastJSArrayForRead(o: Object, context: Context): BranchIf(o) otherwise True, False; } -macro BranchIfNotFastJSArray(o: Object, context: Context): never - labels True, False { - BranchIfNot(o) otherwise True, False; -} - -macro BranchIfFastJSArrayForCopy(o: Object, context: Context): never - labels True, False { - // Long-term, it's likely not a good idea to have this slow-path test here, - // since it fundamentally breaks the type system. - GotoIfForceSlowPath() otherwise False; - BranchIf(o) otherwise True, False; -} - @export macro IsFastJSArrayWithNoCustomIteration(context: Context, o: Object): bool { return Is(o); @@ -2859,7 +3047,7 @@ namespace runtime { transitioning builtin FastCreateDataProperty(implicit context: Context)( receiver: JSReceiver, key: Object, value: Object): Object { try { - let array = Cast(receiver) otherwise Slow; + const array = Cast(receiver) otherwise Slow; const index: Smi = Cast(key) otherwise goto Slow; if (index < 0 || index > array.length) goto Slow; array::EnsureWriteableFastElements(array); @@ -2929,3 +3117,46 @@ transitioning macro ToStringImpl(context: Context, o: Object): String { } unreachable; } + +macro VerifiedUnreachable(): never { + StaticAssert(false); + unreachable; +} + +macro Float64IsSomeInfinity(value: float64): bool { + if (value == V8_INFINITY) { + return true; + } + return value == (Convert(0) - V8_INFINITY); +} + +@export +macro IsIntegerOrSomeInfinity(o: Object): bool { + typeswitch (o) { + case (Smi): { + return true; + } + case (hn: HeapNumber): { + if (Float64IsSomeInfinity(Convert(hn))) { + return true; + } + return IsInteger(hn); + } + case (Object): { + return false; + } + } +} + +builtin CheckNumberInRange(implicit context: Context)( + value: Number, min: Number, max: Number): Undefined { + if (IsIntegerOrSomeInfinity(value) && min <= value && value <= max) { + return Undefined; + } else { + Print('Range type assertion failed! (value/min/max)'); + Print(value); + Print(min); + Print(max); + unreachable; + } +} diff --git a/deps/v8/src/builtins/bigint.tq b/deps/v8/src/builtins/bigint.tq new file mode 100644 index 00000000000000..a1b1cb67809d84 --- /dev/null +++ b/deps/v8/src/builtins/bigint.tq @@ -0,0 +1,206 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include 'src/builtins/builtins-bigint-gen.h' + +// TODO(nicohartmann): Discuss whether types used by multiple builtins should be +// in global namespace +@noVerifier +extern class BigIntBase extends HeapObject generates 'TNode' { +} + +type BigInt extends BigIntBase; + +@noVerifier +@hasSameInstanceTypeAsParent +extern class MutableBigInt extends BigIntBase generates 'TNode' { +} + +Convert(i: MutableBigInt): BigInt { + assert(bigint::IsCanonicalized(i)); + return %RawDownCast(Convert(i)); +} + +namespace bigint { + + const kPositiveSign: uint32 = 0; + const kNegativeSign: uint32 = 1; + + extern macro BigIntBuiltinsAssembler::CppAbsoluteAddAndCanonicalize( + MutableBigInt, BigIntBase, BigIntBase): void; + extern macro BigIntBuiltinsAssembler::CppAbsoluteSubAndCanonicalize( + MutableBigInt, BigIntBase, BigIntBase): void; + extern macro BigIntBuiltinsAssembler::CppAbsoluteCompare( + BigIntBase, BigIntBase): int32; + + extern macro BigIntBuiltinsAssembler::ReadBigIntSign(BigIntBase): uint32; + extern macro BigIntBuiltinsAssembler::ReadBigIntLength(BigIntBase): intptr; + extern macro BigIntBuiltinsAssembler::WriteBigIntSignAndLength( + MutableBigInt, uint32, intptr): void; + + extern macro CodeStubAssembler::AllocateBigInt(intptr): MutableBigInt; + extern macro CodeStubAssembler::StoreBigIntDigit( + MutableBigInt, intptr, uintptr): void; + extern macro CodeStubAssembler::LoadBigIntDigit(BigIntBase, intptr): uintptr; + + @export // Silence unused warning. + // TODO(szuend): Remove @export once macros that are only used in + // asserts are no longer detected as unused. + macro IsCanonicalized(bigint: BigIntBase): bool { + const length = ReadBigIntLength(bigint); + + if (length == 0) { + return ReadBigIntSign(bigint) == kPositiveSign; + } + + return LoadBigIntDigit(bigint, length - 1) != 0; + } + + macro InvertSign(sign: uint32): uint32 { + return sign == kPositiveSign ? kNegativeSign : kPositiveSign; + } + + macro AllocateEmptyBigIntNoThrow(implicit context: Context)( + sign: uint32, length: intptr): MutableBigInt labels BigIntTooBig { + if (length > kBigIntMaxLength) { + goto BigIntTooBig; + } + const result: MutableBigInt = AllocateBigInt(length); + + WriteBigIntSignAndLength(result, sign, length); + return result; + } + + macro AllocateEmptyBigInt(implicit context: Context)( + sign: uint32, length: intptr): MutableBigInt { + try { + return AllocateEmptyBigIntNoThrow(sign, length) otherwise BigIntTooBig; + } + label BigIntTooBig { + ThrowRangeError(kBigIntTooBig); + } + } + + macro MutableBigIntAbsoluteCompare(x: BigIntBase, y: BigIntBase): int32 { + return CppAbsoluteCompare(x, y); + } + + macro MutableBigIntAbsoluteSub(implicit context: Context)( + x: BigInt, y: BigInt, resultSign: uint32): BigInt { + const xlength = ReadBigIntLength(x); + const ylength = ReadBigIntLength(y); + const xsign = ReadBigIntSign(x); + + assert(MutableBigIntAbsoluteCompare(x, y) >= 0); + if (xlength == 0) { + assert(ylength == 0); + return x; + } + + if (ylength == 0) { + return resultSign == xsign ? x : BigIntUnaryMinus(x); + } + + const result = AllocateEmptyBigInt(resultSign, xlength); + CppAbsoluteSubAndCanonicalize(result, x, y); + return Convert(result); + } + + macro MutableBigIntAbsoluteAdd(implicit context: Context)( + xBigint: BigInt, yBigint: BigInt, + resultSign: uint32): BigInt labels BigIntTooBig { + let xlength = ReadBigIntLength(xBigint); + let ylength = ReadBigIntLength(yBigint); + + let x = xBigint; + let y = yBigint; + if (xlength < ylength) { + // Swap x and y so that x is longer. + x = yBigint; + y = xBigint; + const tempLength = xlength; + xlength = ylength; + ylength = tempLength; + } + + // case: 0n + 0n + if (xlength == 0) { + assert(ylength == 0); + return x; + } + + // case: x + 0n + if (ylength == 0) { + return resultSign == ReadBigIntSign(x) ? x : BigIntUnaryMinus(x); + } + + // case: x + y + const result = AllocateEmptyBigIntNoThrow(resultSign, xlength + 1) + otherwise BigIntTooBig; + CppAbsoluteAddAndCanonicalize(result, x, y); + return Convert(result); + } + + macro BigIntAddImpl(implicit context: Context)(x: BigInt, y: BigInt): BigInt + labels BigIntTooBig { + const xsign = ReadBigIntSign(x); + const ysign = ReadBigIntSign(y); + if (xsign == ysign) { + // x + y == x + y + // -x + -y == -(x + y) + return MutableBigIntAbsoluteAdd(x, y, xsign) otherwise BigIntTooBig; + } + + // x + -y == x - y == -(y - x) + // -x + y == y - x == -(x - y) + if (MutableBigIntAbsoluteCompare(x, y) >= 0) { + return MutableBigIntAbsoluteSub(x, y, xsign); + } + return MutableBigIntAbsoluteSub(y, x, InvertSign(xsign)); + } + + builtin BigIntAddNoThrow(implicit context: Context)(x: BigInt, y: BigInt): + Numeric { + try { + return BigIntAddImpl(x, y) otherwise BigIntTooBig; + } + label BigIntTooBig { + // Smi sentinal is used to signal BigIntTooBig exception. + return Convert(0); + } + } + + builtin BigIntAdd(implicit context: Context)(xNum: Numeric, yNum: Numeric): + BigInt { + try { + const x = Cast(xNum) otherwise MixedTypes; + const y = Cast(yNum) otherwise MixedTypes; + + return BigIntAddImpl(x, y) otherwise BigIntTooBig; + } + label MixedTypes { + ThrowTypeError(kBigIntMixedTypes); + } + label BigIntTooBig { + ThrowRangeError(kBigIntTooBig); + } + } + + builtin BigIntUnaryMinus(implicit context: Context)(bigint: BigInt): BigInt { + const length = ReadBigIntLength(bigint); + + // There is no -0n. + if (length == 0) { + return bigint; + } + + const result = + AllocateEmptyBigInt(InvertSign(ReadBigIntSign(bigint)), length); + for (let i: intptr = 0; i < length; ++i) { + StoreBigIntDigit(result, i, LoadBigIntDigit(bigint, i)); + } + return Convert(result); + } + +} // namespace bigint diff --git a/deps/v8/src/builtins/boolean.tq b/deps/v8/src/builtins/boolean.tq index a41ef76d2138a2..25f9ebd3961add 100644 --- a/deps/v8/src/builtins/boolean.tq +++ b/deps/v8/src/builtins/boolean.tq @@ -3,39 +3,20 @@ // found in the LICENSE file. namespace boolean { - const kNameDictionaryInitialCapacity: - constexpr int32 generates 'NameDictionary::kInitialCapacity'; - - extern macro ConstructorBuiltinsAssembler::IsDictionaryMap(Map): bool; - extern macro CodeStubAssembler::AllocateNameDictionary(constexpr int32): - NameDictionary; - - // TODO(v8:9120): This is a workaround to get access to target and new.target - // in javascript builtins. Requires cleanup once this is fully supported by - // torque. - const NEW_TARGET_INDEX: - constexpr int32 generates 'Descriptor::kJSNewTarget'; - const TARGET_INDEX: constexpr int32 generates 'Descriptor::kJSTarget'; - extern macro Parameter(constexpr int32): Object; - javascript builtin - BooleanConstructor(context: Context, receiver: Object, ...arguments): Object { + BooleanConstructor( + js-implicit context: Context, receiver: Object, newTarget: Object, + target: JSFunction)(...arguments): Object { const value = SelectBooleanConstant(ToBoolean(arguments[0])); - const newTarget = Parameter(NEW_TARGET_INDEX); if (newTarget == Undefined) { return value; } - const target = UnsafeCast(Parameter(TARGET_INDEX)); const map = GetDerivedMap(target, UnsafeCast(newTarget)); - let properties = kEmptyFixedArray; - if (IsDictionaryMap(map)) { - properties = AllocateNameDictionary(kNameDictionaryInitialCapacity); - } - const obj = UnsafeCast(AllocateJSObjectFromMap( - map, properties, kEmptyFixedArray, kNone, kWithSlackTracking)); + const obj = + UnsafeCast(AllocateFastOrSlowJSObjectFromMap(map)); obj.value = value; return obj; } diff --git a/deps/v8/src/builtins/builtins-api.cc b/deps/v8/src/builtins/builtins-api.cc index 7ee879ab51d070..0c30e521541717 100644 --- a/deps/v8/src/builtins/builtins-api.cc +++ b/deps/v8/src/builtins/builtins-api.cc @@ -32,14 +32,16 @@ JSReceiver GetCompatibleReceiver(Isolate* isolate, FunctionTemplateInfo info, JSObject js_obj_receiver = JSObject::cast(receiver); FunctionTemplateInfo signature = FunctionTemplateInfo::cast(recv_type); - // Check the receiver. Fast path for receivers with no hidden prototypes. + // Check the receiver. if (signature.IsTemplateFor(js_obj_receiver)) return receiver; - if (!js_obj_receiver.map().has_hidden_prototype()) return JSReceiver(); - for (PrototypeIterator iter(isolate, js_obj_receiver, kStartAtPrototype, - PrototypeIterator::END_AT_NON_HIDDEN); - !iter.IsAtEnd(); iter.Advance()) { - JSObject current = iter.GetCurrent(); - if (signature.IsTemplateFor(current)) return current; + + // The JSGlobalProxy might have a hidden prototype. + if (V8_UNLIKELY(js_obj_receiver.IsJSGlobalProxy())) { + HeapObject prototype = js_obj_receiver.map().prototype(); + if (!prototype.IsNull(isolate)) { + JSObject js_obj_prototype = JSObject::cast(prototype); + if (signature.IsTemplateFor(js_obj_prototype)) return js_obj_prototype; + } } return JSReceiver(); } diff --git a/deps/v8/src/builtins/builtins-arguments-gen.cc b/deps/v8/src/builtins/builtins-arguments-gen.cc index 6cc9fd9623ac81..d65d57cc79b079 100644 --- a/deps/v8/src/builtins/builtins-arguments-gen.cc +++ b/deps/v8/src/builtins/builtins-arguments-gen.cc @@ -266,7 +266,8 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context, var_list1, argument_offset, mapped_offset, [this, elements, ¤t_argument](Node* offset) { Increment(¤t_argument, kSystemPointerSize); - Node* arg = LoadBufferObject(current_argument.value(), 0); + Node* arg = LoadBufferObject( + UncheckedCast(current_argument.value()), 0); StoreNoWriteBarrier(MachineRepresentation::kTagged, elements, offset, arg); }, diff --git a/deps/v8/src/builtins/builtins-array-gen.cc b/deps/v8/src/builtins/builtins-array-gen.cc index 29bcae6feb97dc..07f74cb4298db9 100644 --- a/deps/v8/src/builtins/builtins-array-gen.cc +++ b/deps/v8/src/builtins/builtins-array-gen.cc @@ -227,7 +227,7 @@ ArrayBuiltinsAssembler::ArrayBuiltinsAssembler( VariableList list({&a_, &k_, &to_}, zone()); FastLoopBody body = [&](Node* index) { - GotoIf(IsDetachedBuffer(array_buffer), detached); + GotoIf(IsDetachedBuffer(CAST(array_buffer)), detached); TNode data_ptr = LoadJSTypedArrayBackingStore(typed_array); Node* value = LoadFixedTypedArrayElementAsTagged( data_ptr, index, source_elements_kind_, SMI_PARAMETERS); @@ -402,7 +402,7 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) { CodeStubArguments args(this, ChangeInt32ToIntPtr(argc)); TNode receiver = args.GetReceiver(); TNode array_receiver; - Node* kind = nullptr; + TNode kind; Label fast(this); BranchIfFastJSArray(receiver, context, &fast, &runtime); @@ -709,19 +709,19 @@ TF_BUILTIN(ArrayFrom, ArrayPopulatorAssembler) { iterator_assembler.GetIterator(context, items, iterator_method); TNode native_context = LoadNativeContext(context); - TNode fast_iterator_result_map = - LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX); + TNode fast_iterator_result_map = CAST( + LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX)); Goto(&loop); BIND(&loop); { // Loop while iterator is not done. - TNode next = iterator_assembler.IteratorStep( + TNode next = iterator_assembler.IteratorStep( context, iterator_record, &loop_done, fast_iterator_result_map); TVARIABLE(Object, value, - CAST(iterator_assembler.IteratorValue( - context, next, fast_iterator_result_map))); + iterator_assembler.IteratorValue(context, next, + fast_iterator_result_map)); // If a map_function is supplied then call it (using this_arg as // receiver), on the value returned from the iterator. Exceptions are @@ -2035,8 +2035,7 @@ void ArrayBuiltinsAssembler::CreateArrayDispatchSingleArgument( &normal_sequence); { // Make elements kind holey and update elements kind in the type info. - var_elements_kind = - Signed(Word32Or(var_elements_kind.value(), Int32Constant(1))); + var_elements_kind = Word32Or(var_elements_kind.value(), Int32Constant(1)); StoreObjectFieldNoWriteBarrier( allocation_site, AllocationSite::kTransitionInfoOrBoilerplateOffset, SmiOr(transition_info, SmiConstant(fast_elements_kind_holey_mask))); diff --git a/deps/v8/src/builtins/builtins-array.cc b/deps/v8/src/builtins/builtins-array.cc index e6ab965a7ed047..96c10ed0fd545e 100644 --- a/deps/v8/src/builtins/builtins-array.cc +++ b/deps/v8/src/builtins/builtins-array.cc @@ -970,8 +970,9 @@ void CollectElementIndices(Isolate* isolate, Handle object, } case FAST_STRING_WRAPPER_ELEMENTS: case SLOW_STRING_WRAPPER_ELEMENTS: { - DCHECK(object->IsJSValue()); - Handle js_value = Handle::cast(object); + DCHECK(object->IsJSPrimitiveWrapper()); + Handle js_value = + Handle::cast(object); DCHECK(js_value->value().IsString()); Handle string(String::cast(js_value->value()), isolate); uint32_t length = static_cast(string->length()); diff --git a/deps/v8/src/builtins/builtins-async-function-gen.cc b/deps/v8/src/builtins/builtins-async-function-gen.cc index 03df1aaaad16bb..a95365e4255c36 100644 --- a/deps/v8/src/builtins/builtins-async-function-gen.cc +++ b/deps/v8/src/builtins/builtins-async-function-gen.cc @@ -36,6 +36,21 @@ void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwaitResumeClosure( TNode async_function_object = CAST(LoadContextElement(context, Context::EXTENSION_INDEX)); + // Push the promise for the {async_function_object} back onto the catch + // prediction stack to handle exceptions thrown after resuming from the + // await properly. + Label if_instrumentation(this, Label::kDeferred), + if_instrumentation_done(this); + Branch(IsDebugActive(), &if_instrumentation, &if_instrumentation_done); + BIND(&if_instrumentation); + { + TNode promise = LoadObjectField( + async_function_object, JSAsyncFunctionObject::kPromiseOffset); + CallRuntime(Runtime::kDebugAsyncFunctionResumed, context, promise); + Goto(&if_instrumentation_done); + } + BIND(&if_instrumentation_done); + // Inline version of GeneratorPrototypeNext / GeneratorPrototypeReturn with // unnecessary runtime checks removed. @@ -80,27 +95,19 @@ TF_BUILTIN(AsyncFunctionEnter, AsyncFunctionBuiltinsAssembler) { Signed(IntPtrAdd(WordSar(frame_size, IntPtrConstant(kTaggedSizeLog2)), formal_parameter_count)); - // Allocate space for the promise, the async function object - // and the register file. - TNode size = IntPtrAdd( - IntPtrConstant(JSPromise::kSizeWithEmbedderFields + - JSAsyncFunctionObject::kSize + FixedArray::kHeaderSize), - Signed(WordShl(parameters_and_register_length, - IntPtrConstant(kTaggedSizeLog2)))); - TNode base = AllocateInNewSpace(size); - - // Initialize the register file. - TNode parameters_and_registers = UncheckedCast( - InnerAllocate(base, JSAsyncFunctionObject::kSize + - JSPromise::kSizeWithEmbedderFields)); - StoreMapNoWriteBarrier(parameters_and_registers, RootIndex::kFixedArrayMap); - StoreObjectFieldNoWriteBarrier(parameters_and_registers, - FixedArray::kLengthOffset, - SmiFromIntPtr(parameters_and_register_length)); + // Allocate and initialize the register file. + TNode parameters_and_registers = + AllocateFixedArray(HOLEY_ELEMENTS, parameters_and_register_length, + INTPTR_PARAMETERS, kAllowLargeObjectAllocation); FillFixedArrayWithValue(HOLEY_ELEMENTS, parameters_and_registers, IntPtrConstant(0), parameters_and_register_length, RootIndex::kUndefinedValue); + // Allocate space for the promise, the async function object. + TNode size = IntPtrConstant(JSPromise::kSizeWithEmbedderFields + + JSAsyncFunctionObject::kSize); + TNode base = AllocateInNewSpace(size); + // Initialize the promise. TNode native_context = LoadNativeContext(context); TNode promise_function = diff --git a/deps/v8/src/builtins/builtins-bigint-gen.cc b/deps/v8/src/builtins/builtins-bigint-gen.cc index 8a752f2517ed86..d4818f0e010a9e 100644 --- a/deps/v8/src/builtins/builtins-bigint-gen.cc +++ b/deps/v8/src/builtins/builtins-bigint-gen.cc @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. +#include "src/builtins/builtins-bigint-gen.h" #include "src/builtins/builtins-utils-gen.h" #include "src/builtins/builtins.h" #include "src/codegen/code-stub-assembler.h" diff --git a/deps/v8/src/builtins/builtins-bigint-gen.h b/deps/v8/src/builtins/builtins-bigint-gen.h new file mode 100644 index 00000000000000..288418258bf894 --- /dev/null +++ b/deps/v8/src/builtins/builtins-bigint-gen.h @@ -0,0 +1,80 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_BUILTINS_BUILTINS_BIGINT_GEN_H_ +#define V8_BUILTINS_BUILTINS_BIGINT_GEN_H_ + +#include "src/codegen/code-stub-assembler.h" +#include "src/objects/bigint.h" + +namespace v8 { +namespace internal { + +class BigIntBuiltinsAssembler : public CodeStubAssembler { + public: + explicit BigIntBuiltinsAssembler(compiler::CodeAssemblerState* state) + : CodeStubAssembler(state) {} + + TNode ReadBigIntLength(TNode value) { + TNode bitfield = LoadBigIntBitfield(value); + return ChangeInt32ToIntPtr( + Signed(DecodeWord32(bitfield))); + } + + TNode ReadBigIntSign(TNode value) { + TNode bitfield = LoadBigIntBitfield(value); + return DecodeWord32(bitfield); + } + + void WriteBigIntSignAndLength(TNode bigint, TNode sign, + TNode length) { + STATIC_ASSERT(BigIntBase::SignBits::kShift == 0); + TNode bitfield = Unsigned( + Word32Or(Word32Shl(TruncateIntPtrToInt32(length), + Int32Constant(BigIntBase::LengthBits::kShift)), + Word32And(sign, Int32Constant(BigIntBase::SignBits::kMask)))); + StoreBigIntBitfield(bigint, bitfield); + } + + void CppAbsoluteAddAndCanonicalize(TNode result, TNode x, + TNode y) { + TNode mutable_big_int_absolute_add_and_canonicalize = + ExternalConstant( + ExternalReference:: + mutable_big_int_absolute_add_and_canonicalize_function()); + CallCFunction(mutable_big_int_absolute_add_and_canonicalize, + MachineType::AnyTagged(), + std::make_pair(MachineType::AnyTagged(), result), + std::make_pair(MachineType::AnyTagged(), x), + std::make_pair(MachineType::AnyTagged(), y)); + } + + void CppAbsoluteSubAndCanonicalize(TNode result, TNode x, + TNode y) { + TNode mutable_big_int_absolute_sub_and_canonicalize = + ExternalConstant( + ExternalReference:: + mutable_big_int_absolute_sub_and_canonicalize_function()); + CallCFunction(mutable_big_int_absolute_sub_and_canonicalize, + MachineType::AnyTagged(), + std::make_pair(MachineType::AnyTagged(), result), + std::make_pair(MachineType::AnyTagged(), x), + std::make_pair(MachineType::AnyTagged(), y)); + } + + TNode CppAbsoluteCompare(TNode x, TNode y) { + TNode mutable_big_int_absolute_compare = + ExternalConstant( + ExternalReference::mutable_big_int_absolute_compare_function()); + TNode result = UncheckedCast( + CallCFunction(mutable_big_int_absolute_compare, MachineType::Int32(), + std::make_pair(MachineType::AnyTagged(), x), + std::make_pair(MachineType::AnyTagged(), y))); + return result; + } +}; + +} // namespace internal +} // namespace v8 +#endif // V8_BUILTINS_BUILTINS_BIGINT_GEN_H_ diff --git a/deps/v8/src/builtins/builtins-bigint.cc b/deps/v8/src/builtins/builtins-bigint.cc index a8a847ef479b08..09d71a056275cb 100644 --- a/deps/v8/src/builtins/builtins-bigint.cc +++ b/deps/v8/src/builtins/builtins-bigint.cc @@ -80,10 +80,10 @@ MaybeHandle ThisBigIntValue(Isolate* isolate, Handle value, // 1. If Type(value) is BigInt, return value. if (value->IsBigInt()) return Handle::cast(value); // 2. If Type(value) is Object and value has a [[BigIntData]] internal slot: - if (value->IsJSValue()) { + if (value->IsJSPrimitiveWrapper()) { // 2a. Assert: value.[[BigIntData]] is a BigInt value. // 2b. Return value.[[BigIntData]]. - Object data = JSValue::cast(*value).value(); + Object data = JSPrimitiveWrapper::cast(*value).value(); if (data.IsBigInt()) return handle(BigInt::cast(data), isolate); } // 3. Throw a TypeError exception. diff --git a/deps/v8/src/builtins/builtins-boolean-gen.cc b/deps/v8/src/builtins/builtins-boolean-gen.cc index 30cf7ba0c1972f..74474a8918f15c 100644 --- a/deps/v8/src/builtins/builtins-boolean-gen.cc +++ b/deps/v8/src/builtins/builtins-boolean-gen.cc @@ -15,22 +15,23 @@ namespace internal { // ES6 #sec-boolean.prototype.tostring TF_BUILTIN(BooleanPrototypeToString, CodeStubAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode receiver = CAST(Parameter(Descriptor::kReceiver)); - Node* value = ToThisValue(context, receiver, PrimitiveType::kBoolean, - "Boolean.prototype.toString"); - Node* result = LoadObjectField(value, Oddball::kToStringOffset); + TNode value = + CAST(ToThisValue(context, receiver, PrimitiveType::kBoolean, + "Boolean.prototype.toString")); + TNode result = CAST(LoadObjectField(value, Oddball::kToStringOffset)); Return(result); } // ES6 #sec-boolean.prototype.valueof TF_BUILTIN(BooleanPrototypeValueOf, CodeStubAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode receiver = CAST(Parameter(Descriptor::kReceiver)); - Node* result = ToThisValue(context, receiver, PrimitiveType::kBoolean, - "Boolean.prototype.valueOf"); + TNode result = CAST(ToThisValue( + context, receiver, PrimitiveType::kBoolean, "Boolean.prototype.valueOf")); Return(result); } diff --git a/deps/v8/src/builtins/builtins-call-gen.cc b/deps/v8/src/builtins/builtins-call-gen.cc index 05142a8f079a3b..deb91dee246811 100644 --- a/deps/v8/src/builtins/builtins-call-gen.cc +++ b/deps/v8/src/builtins/builtins-call-gen.cc @@ -475,14 +475,13 @@ TNode CallOrConstructBuiltinsAssembler::GetCompatibleReceiver( BIND(&holder_next); { - // Continue with the hidden prototype of the {holder} if it - // has one, or throw an illegal invocation exception, since - // the receiver did not pass the {signature} check. + // Continue with the hidden prototype of the {holder} if it is a + // JSGlobalProxy (the hidden prototype can either be null or a + // JSObject in that case), or throw an illegal invocation exception, + // since the receiver did not pass the {signature} check. TNode holder_map = LoadMap(holder); var_holder = LoadMapPrototype(holder_map); - GotoIf(IsSetWord32(LoadMapBitField2(holder_map), - Map::HasHiddenPrototypeBit::kMask), - &holder_loop); + GotoIf(IsJSGlobalProxyMap(holder_map), &holder_loop); ThrowTypeError(context, MessageTemplate::kIllegalInvocation); } } diff --git a/deps/v8/src/builtins/builtins-callsite.cc b/deps/v8/src/builtins/builtins-callsite.cc index d98eba4eeb8ffa..d1082291ef1414 100644 --- a/deps/v8/src/builtins/builtins-callsite.cc +++ b/deps/v8/src/builtins/builtins-callsite.cc @@ -8,6 +8,7 @@ #include "src/logging/counters.h" #include "src/objects/frame-array-inl.h" #include "src/objects/objects-inl.h" +#include "src/objects/stack-frame-info.h" namespace v8 { namespace internal { @@ -76,6 +77,9 @@ BUILTIN(CallSitePrototypeGetFunction) { StackFrameBase* frame = it.Frame(); if (frame->IsStrict()) return ReadOnlyRoots(isolate).undefined_value(); + + isolate->CountUsage(v8::Isolate::kCallSiteAPIGetFunctionSloppyCall); + return *frame->GetFunction(); } @@ -135,6 +139,9 @@ BUILTIN(CallSitePrototypeGetThis) { StackFrameBase* frame = it.Frame(); if (frame->IsStrict()) return ReadOnlyRoots(isolate).undefined_value(); + + isolate->CountUsage(v8::Isolate::kCallSiteAPIGetThisSloppyCall); + return *frame->GetReceiver(); } @@ -197,9 +204,9 @@ BUILTIN(CallSitePrototypeIsToplevel) { BUILTIN(CallSitePrototypeToString) { HandleScope scope(isolate); CHECK_CALLSITE(recv, "toString"); - FrameArrayIterator it(isolate, GetFrameArray(isolate, recv), - GetFrameIndex(isolate, recv)); - RETURN_RESULT_OR_FAILURE(isolate, it.Frame()->ToString()); + Handle frame = isolate->factory()->NewStackTraceFrame( + GetFrameArray(isolate, recv), GetFrameIndex(isolate, recv)); + RETURN_RESULT_OR_FAILURE(isolate, SerializeStackTraceFrame(isolate, frame)); } #undef CHECK_CALLSITE diff --git a/deps/v8/src/builtins/builtins-collections-gen.cc b/deps/v8/src/builtins/builtins-collections-gen.cc index b5a9851c7041c5..613e5f10ff2f17 100644 --- a/deps/v8/src/builtins/builtins-collections-gen.cc +++ b/deps/v8/src/builtins/builtins-collections-gen.cc @@ -66,19 +66,19 @@ class BaseCollectionsAssembler : public CodeStubAssembler { TNode iterable); // Constructs a collection instance. Choosing a fast path when possible. - TNode AllocateJSCollection(TNode context, - TNode constructor, - TNode new_target); + TNode AllocateJSCollection(TNode context, + TNode constructor, + TNode new_target); // Fast path for constructing a collection instance if the constructor // function has not been modified. - TNode AllocateJSCollectionFast(TNode constructor); + TNode AllocateJSCollectionFast(TNode constructor); // Fallback for constructing a collection instance if the constructor function // has been modified. - TNode AllocateJSCollectionSlow(TNode context, - TNode constructor, - TNode new_target); + TNode AllocateJSCollectionSlow(TNode context, + TNode constructor, + TNode new_target); // Allocates the backing store for a collection. virtual TNode AllocateTable(Variant variant, TNode context, @@ -320,17 +320,17 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromIterable( CSA_ASSERT(this, Word32BinaryNot(IsUndefined(iterator.object))); - TNode fast_iterator_result_map = - LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX); + TNode fast_iterator_result_map = CAST( + LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX)); TVARIABLE(Object, var_exception); Goto(&loop); BIND(&loop); { - TNode next = iterator_assembler.IteratorStep( + TNode next = iterator_assembler.IteratorStep( context, iterator, &exit, fast_iterator_result_map); - TNode next_value = CAST(iterator_assembler.IteratorValue( - context, next, fast_iterator_result_map)); + TNode next_value = iterator_assembler.IteratorValue( + context, next, fast_iterator_result_map); AddConstructorEntry(variant, context, collection, add_func, next_value, nullptr, &if_exception, &var_exception); Goto(&loop); @@ -367,33 +367,33 @@ void BaseCollectionsAssembler::GotoIfInitialAddFunctionModified( GetAddFunctionNameIndex(variant), if_modified); } -TNode BaseCollectionsAssembler::AllocateJSCollection( +TNode BaseCollectionsAssembler::AllocateJSCollection( TNode context, TNode constructor, - TNode new_target) { + TNode new_target) { TNode is_target_unmodified = WordEqual(constructor, new_target); - return Select(is_target_unmodified, - [=] { return AllocateJSCollectionFast(constructor); }, - [=] { - return AllocateJSCollectionSlow(context, constructor, - new_target); - }); + return Select( + is_target_unmodified, + [=] { return AllocateJSCollectionFast(constructor); }, + [=] { + return AllocateJSCollectionSlow(context, constructor, new_target); + }); } -TNode BaseCollectionsAssembler::AllocateJSCollectionFast( - TNode constructor) { +TNode BaseCollectionsAssembler::AllocateJSCollectionFast( + TNode constructor) { CSA_ASSERT(this, IsConstructorMap(LoadMap(constructor))); - TNode initial_map = - LoadObjectField(constructor, JSFunction::kPrototypeOrInitialMapOffset); - return CAST(AllocateJSObjectFromMap(initial_map)); + TNode initial_map = + CAST(LoadJSFunctionPrototypeOrInitialMap(constructor)); + return AllocateJSObjectFromMap(initial_map); } -TNode BaseCollectionsAssembler::AllocateJSCollectionSlow( +TNode BaseCollectionsAssembler::AllocateJSCollectionSlow( TNode context, TNode constructor, - TNode new_target) { + TNode new_target) { ConstructorBuiltinsAssembler constructor_assembler(this->state()); - return CAST(constructor_assembler.EmitFastNewObject(context, constructor, - new_target)); + return constructor_assembler.EmitFastNewObject(context, constructor, + new_target); } void BaseCollectionsAssembler::GenerateConstructor( @@ -408,7 +408,7 @@ void BaseCollectionsAssembler::GenerateConstructor( TNode native_context = LoadNativeContext(context); TNode collection = AllocateJSCollection( - context, GetConstructor(variant, native_context), new_target); + context, GetConstructor(variant, native_context), CAST(new_target)); AddConstructorEntries(variant, context, native_context, collection, iterable); Return(collection); diff --git a/deps/v8/src/builtins/builtins-console.cc b/deps/v8/src/builtins/builtins-console.cc index 973f1785d1ad62..28c9261ed41fdc 100644 --- a/deps/v8/src/builtins/builtins-console.cc +++ b/deps/v8/src/builtins/builtins-console.cc @@ -39,13 +39,30 @@ namespace internal { namespace { void ConsoleCall( - Isolate* isolate, internal::BuiltinArguments& args, + Isolate* isolate, + internal::BuiltinArguments& args, // NOLINT(runtime/references) void (debug::ConsoleDelegate::*func)(const v8::debug::ConsoleCallArguments&, const v8::debug::ConsoleContext&)) { CHECK(!isolate->has_pending_exception()); CHECK(!isolate->has_scheduled_exception()); if (!isolate->console_delegate()) return; HandleScope scope(isolate); + + // Access check. The current context has to match the context of all + // arguments, otherwise the inspector might leak objects across contexts. + Handle context = handle(isolate->context(), isolate); + for (int i = 0; i < args.length(); ++i) { + Handle argument = args.at(i); + if (!argument->IsJSObject()) continue; + + Handle argument_obj = Handle::cast(argument); + if (argument->IsAccessCheckNeeded(isolate) && + !isolate->MayAccess(context, argument_obj)) { + isolate->ReportFailedAccessCheck(argument_obj); + return; + } + } + debug::ConsoleCallArguments wrapper(args); Handle context_id_obj = JSObject::GetDataProperty( args.target(), isolate->factory()->console_context_id_symbol()); diff --git a/deps/v8/src/builtins/builtins-constructor-gen.cc b/deps/v8/src/builtins/builtins-constructor-gen.cc index a725f3c4a1e5f5..767e626432e681 100644 --- a/deps/v8/src/builtins/builtins-constructor-gen.cc +++ b/deps/v8/src/builtins/builtins-constructor-gen.cc @@ -147,44 +147,40 @@ TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) { } TF_BUILTIN(FastNewObject, ConstructorBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* target = Parameter(Descriptor::kTarget); - Node* new_target = Parameter(Descriptor::kNewTarget); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode target = CAST(Parameter(Descriptor::kTarget)); + TNode new_target = CAST(Parameter(Descriptor::kNewTarget)); Label call_runtime(this); - Node* result = EmitFastNewObject(context, target, new_target, &call_runtime); + TNode result = + EmitFastNewObject(context, target, new_target, &call_runtime); Return(result); BIND(&call_runtime); TailCallRuntime(Runtime::kNewObject, context, target, new_target); } -Node* ConstructorBuiltinsAssembler::EmitFastNewObject(Node* context, - Node* target, - Node* new_target) { - VARIABLE(var_obj, MachineRepresentation::kTagged); +compiler::TNode ConstructorBuiltinsAssembler::EmitFastNewObject( + SloppyTNode context, SloppyTNode target, + SloppyTNode new_target) { + TVARIABLE(JSObject, var_obj); Label call_runtime(this), end(this); - Node* result = EmitFastNewObject(context, target, new_target, &call_runtime); - var_obj.Bind(result); + var_obj = EmitFastNewObject(context, target, new_target, &call_runtime); Goto(&end); BIND(&call_runtime); - var_obj.Bind(CallRuntime(Runtime::kNewObject, context, target, new_target)); + var_obj = CAST(CallRuntime(Runtime::kNewObject, context, target, new_target)); Goto(&end); BIND(&end); return var_obj.value(); } -Node* ConstructorBuiltinsAssembler::EmitFastNewObject(Node* context, - Node* target, - Node* new_target, - Label* call_runtime) { - CSA_ASSERT(this, HasInstanceType(target, JS_FUNCTION_TYPE)); - CSA_ASSERT(this, IsJSReceiver(new_target)); - +compiler::TNode ConstructorBuiltinsAssembler::EmitFastNewObject( + SloppyTNode context, SloppyTNode target, + SloppyTNode new_target, Label* call_runtime) { // Verify that the new target is a JSFunction. Label fast(this), end(this); GotoIf(HasInstanceType(new_target, JS_FUNCTION_TYPE), &fast); @@ -732,7 +728,7 @@ TF_BUILTIN(NumberConstructor, ConstructorBuiltinsAssembler) { TNode target = LoadTargetFromFrame(); Node* result = CallBuiltin(Builtins::kFastNewObject, context, target, new_target); - StoreObjectField(result, JSValue::kValueOffset, n_value); + StoreObjectField(result, JSPrimitiveWrapper::kValueOffset, n_value); args.PopAndReturn(result); } } @@ -798,7 +794,7 @@ TF_BUILTIN(StringConstructor, ConstructorBuiltinsAssembler) { Node* result = CallBuiltin(Builtins::kFastNewObject, context, target, new_target); - StoreObjectField(result, JSValue::kValueOffset, s_value); + StoreObjectField(result, JSPrimitiveWrapper::kValueOffset, s_value); args.PopAndReturn(result); } } diff --git a/deps/v8/src/builtins/builtins-constructor-gen.h b/deps/v8/src/builtins/builtins-constructor-gen.h index 9093a5a77bbbaf..9208506c79eced 100644 --- a/deps/v8/src/builtins/builtins-constructor-gen.h +++ b/deps/v8/src/builtins/builtins-constructor-gen.h @@ -31,10 +31,14 @@ class ConstructorBuiltinsAssembler : public CodeStubAssembler { Label* call_runtime); Node* EmitCreateEmptyObjectLiteral(Node* context); - Node* EmitFastNewObject(Node* context, Node* target, Node* new_target); - - Node* EmitFastNewObject(Node* context, Node* target, Node* new_target, - Label* call_runtime); + TNode EmitFastNewObject(SloppyTNode context, + SloppyTNode target, + SloppyTNode new_target); + + TNode EmitFastNewObject(SloppyTNode context, + SloppyTNode target, + SloppyTNode new_target, + Label* call_runtime); }; } // namespace internal diff --git a/deps/v8/src/builtins/builtins-conversion-gen.cc b/deps/v8/src/builtins/builtins-conversion-gen.cc index bc7e349ce1f09b..71a9cbf1452836 100644 --- a/deps/v8/src/builtins/builtins-conversion-gen.cc +++ b/deps/v8/src/builtins/builtins-conversion-gen.cc @@ -392,7 +392,8 @@ TF_BUILTIN(ToInteger_TruncateMinusZero, CodeStubAssembler) { // ES6 section 7.1.13 ToObject (argument) TF_BUILTIN(ToObject, CodeStubAssembler) { Label if_smi(this, Label::kDeferred), if_jsreceiver(this), - if_noconstructor(this, Label::kDeferred), if_wrapjsvalue(this); + if_noconstructor(this, Label::kDeferred), + if_wrapjs_primitive_wrapper(this); Node* context = Parameter(Descriptor::kContext); Node* object = Parameter(Descriptor::kArgument); @@ -411,27 +412,30 @@ TF_BUILTIN(ToObject, CodeStubAssembler) { IntPtrConstant(Map::kNoConstructorFunctionIndex)), &if_noconstructor); constructor_function_index_var.Bind(constructor_function_index); - Goto(&if_wrapjsvalue); + Goto(&if_wrapjs_primitive_wrapper); BIND(&if_smi); constructor_function_index_var.Bind( IntPtrConstant(Context::NUMBER_FUNCTION_INDEX)); - Goto(&if_wrapjsvalue); + Goto(&if_wrapjs_primitive_wrapper); - BIND(&if_wrapjsvalue); + BIND(&if_wrapjs_primitive_wrapper); TNode native_context = LoadNativeContext(context); Node* constructor = LoadContextElement( native_context, constructor_function_index_var.value()); Node* initial_map = LoadObjectField(constructor, JSFunction::kPrototypeOrInitialMapOffset); - Node* js_value = Allocate(JSValue::kSize); - StoreMapNoWriteBarrier(js_value, initial_map); - StoreObjectFieldRoot(js_value, JSValue::kPropertiesOrHashOffset, + Node* js_primitive_wrapper = Allocate(JSPrimitiveWrapper::kSize); + StoreMapNoWriteBarrier(js_primitive_wrapper, initial_map); + StoreObjectFieldRoot(js_primitive_wrapper, + JSPrimitiveWrapper::kPropertiesOrHashOffset, RootIndex::kEmptyFixedArray); - StoreObjectFieldRoot(js_value, JSObject::kElementsOffset, + StoreObjectFieldRoot(js_primitive_wrapper, + JSPrimitiveWrapper::kElementsOffset, RootIndex::kEmptyFixedArray); - StoreObjectField(js_value, JSValue::kValueOffset, object); - Return(js_value); + StoreObjectField(js_primitive_wrapper, JSPrimitiveWrapper::kValueOffset, + object); + Return(js_primitive_wrapper); BIND(&if_noconstructor); ThrowTypeError(context, MessageTemplate::kUndefinedOrNullToObject, diff --git a/deps/v8/src/builtins/builtins-data-view-gen.h b/deps/v8/src/builtins/builtins-data-view-gen.h index eeb84f34dbfff4..d5c6571880fd4c 100644 --- a/deps/v8/src/builtins/builtins-data-view-gen.h +++ b/deps/v8/src/builtins/builtins-data-view-gen.h @@ -17,13 +17,13 @@ class DataViewBuiltinsAssembler : public CodeStubAssembler { explicit DataViewBuiltinsAssembler(compiler::CodeAssemblerState* state) : CodeStubAssembler(state) {} - TNode LoadUint8(TNode data_pointer, TNode offset) { - return UncheckedCast( + TNode LoadUint8(TNode data_pointer, TNode offset) { + return UncheckedCast( Load(MachineType::Uint8(), data_pointer, offset)); } - TNode LoadInt8(TNode data_pointer, TNode offset) { - return UncheckedCast( + TNode LoadInt8(TNode data_pointer, TNode offset) { + return UncheckedCast( Load(MachineType::Int8(), data_pointer, offset)); } diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h index 3412edb89d0aae..23ab4a88ca14ff 100644 --- a/deps/v8/src/builtins/builtins-definitions.h +++ b/deps/v8/src/builtins/builtins-definitions.h @@ -103,8 +103,8 @@ namespace internal { \ /* String helpers */ \ TFC(StringCharAt, StringAt) \ - TFC(StringCodePointAtUTF16, StringAt) \ - TFC(StringCodePointAtUTF32, StringAt) \ + TFC(StringCodePointAt, StringAt) \ + TFC(StringFromCodePointAt, StringAtAsString) \ TFC(StringEqual, Compare) \ TFC(StringGreaterThan, Compare) \ TFC(StringGreaterThanOrEqual, Compare) \ @@ -170,7 +170,9 @@ namespace internal { \ /* Adapters for Turbofan into runtime */ \ TFC(AllocateInYoungGeneration, Allocate) \ + TFC(AllocateRegularInYoungGeneration, Allocate) \ TFC(AllocateInOldGeneration, Allocate) \ + TFC(AllocateRegularInOldGeneration, Allocate) \ \ /* TurboFan support builtins */ \ TFS(CopyFastSmiOrObjectElements, kObject) \ @@ -266,7 +268,7 @@ namespace internal { \ /* Abort */ \ TFC(Abort, Abort) \ - TFC(AbortJS, Abort) \ + TFC(AbortCSAAssert, Abort) \ \ /* Built-in functions for Javascript */ \ /* Special internal builtins */ \ @@ -726,16 +728,12 @@ namespace internal { CPP(ObjectGetOwnPropertyDescriptors) \ TFJ(ObjectGetOwnPropertyNames, 1, kReceiver, kObject) \ CPP(ObjectGetOwnPropertySymbols) \ - CPP(ObjectGetPrototypeOf) \ - CPP(ObjectSetPrototypeOf) \ TFJ(ObjectIs, 2, kReceiver, kLeft, kRight) \ - CPP(ObjectIsExtensible) \ CPP(ObjectIsFrozen) \ CPP(ObjectIsSealed) \ TFJ(ObjectKeys, 1, kReceiver, kObject) \ CPP(ObjectLookupGetter) \ CPP(ObjectLookupSetter) \ - CPP(ObjectPreventExtensions) \ /* ES6 #sec-object.prototype.tostring */ \ TFJ(ObjectPrototypeToString, 0, kReceiver) \ /* ES6 #sec-object.prototype.valueof */ \ @@ -823,16 +821,10 @@ namespace internal { ASM(ReflectApply, Dummy) \ ASM(ReflectConstruct, Dummy) \ CPP(ReflectDefineProperty) \ - CPP(ReflectDeleteProperty) \ - CPP(ReflectGet) \ CPP(ReflectGetOwnPropertyDescriptor) \ - CPP(ReflectGetPrototypeOf) \ TFJ(ReflectHas, 2, kReceiver, kTarget, kKey) \ - CPP(ReflectIsExtensible) \ CPP(ReflectOwnKeys) \ - CPP(ReflectPreventExtensions) \ CPP(ReflectSet) \ - CPP(ReflectSetPrototypeOf) \ \ /* RegExp */ \ CPP(RegExpCapture1Getter) \ @@ -1150,6 +1142,7 @@ namespace internal { ASM(StackCheck, Dummy) \ ASM(DoubleToI, Dummy) \ TFC(GetProperty, GetProperty) \ + TFS(GetPropertyWithReceiver, kObject, kKey, kReceiver, kOnNonExistent) \ TFS(SetProperty, kReceiver, kKey, kValue) \ TFS(SetPropertyInLiteral, kReceiver, kKey, kValue) \ ASM(MemCopyUint8Uint8, CCall) \ diff --git a/deps/v8/src/builtins/builtins-error.cc b/deps/v8/src/builtins/builtins-error.cc index e099baeb34f663..3bcc7356d42c10 100644 --- a/deps/v8/src/builtins/builtins-error.cc +++ b/deps/v8/src/builtins/builtins-error.cc @@ -31,10 +31,11 @@ BUILTIN(ErrorConstructor) { } RETURN_RESULT_OR_FAILURE( - isolate, ErrorUtils::Construct(isolate, args.target(), - Handle::cast(args.new_target()), - args.atOrUndefined(isolate, 1), mode, - caller, false)); + isolate, + ErrorUtils::Construct(isolate, args.target(), + Handle::cast(args.new_target()), + args.atOrUndefined(isolate, 1), mode, caller, + ErrorUtils::StackTraceCollection::kDetailed)); } // static diff --git a/deps/v8/src/builtins/builtins-global.cc b/deps/v8/src/builtins/builtins-global.cc index 53e974c4521908..137f7f34021509 100644 --- a/deps/v8/src/builtins/builtins-global.cc +++ b/deps/v8/src/builtins/builtins-global.cc @@ -86,17 +86,27 @@ BUILTIN(GlobalEval) { Handle x = args.atOrUndefined(isolate, 1); Handle target = args.target(); Handle target_global_proxy(target->global_proxy(), isolate); - if (!x->IsString()) return *x; if (!Builtins::AllowDynamicFunction(isolate, target, target_global_proxy)) { isolate->CountUsage(v8::Isolate::kFunctionConstructorReturnedUndefined); return ReadOnlyRoots(isolate).undefined_value(); } + + // Run embedder pre-checks before executing eval. If the argument is a + // non-String (or other object the embedder doesn't know to handle), then + // return it directly. + MaybeHandle source; + bool unhandled_object; + std::tie(source, unhandled_object) = + Compiler::ValidateDynamicCompilationSource( + isolate, handle(target->native_context(), isolate), x); + if (unhandled_object) return *x; + Handle function; ASSIGN_RETURN_FAILURE_ON_EXCEPTION( isolate, function, - Compiler::GetFunctionFromString(handle(target->native_context(), isolate), - Handle::cast(x), - NO_PARSE_RESTRICTION, kNoSourcePosition)); + Compiler::GetFunctionFromValidatedString( + handle(target->native_context(), isolate), source, + NO_PARSE_RESTRICTION, kNoSourcePosition)); RETURN_RESULT_OR_FAILURE( isolate, Execution::Call(isolate, function, target_global_proxy, 0, nullptr)); diff --git a/deps/v8/src/builtins/builtins-handler-gen.cc b/deps/v8/src/builtins/builtins-handler-gen.cc index d1b50f2cdcdf35..973356f569cb67 100644 --- a/deps/v8/src/builtins/builtins-handler-gen.cc +++ b/deps/v8/src/builtins/builtins-handler-gen.cc @@ -28,7 +28,8 @@ class HandlerBuiltinsAssembler : public CodeStubAssembler { // kind. Use with caution. This produces a *lot* of code. using ElementsKindSwitchCase = std::function; void DispatchByElementsKind(TNode elements_kind, - const ElementsKindSwitchCase& case_function); + const ElementsKindSwitchCase& case_function, + bool handle_typed_elements_kind); // Dispatches over all possible combinations of {from,to} elements kinds. using ElementsKindTransitionSwitchCase = @@ -48,7 +49,7 @@ TF_BUILTIN(LoadIC_StringLength, CodeStubAssembler) { TF_BUILTIN(LoadIC_StringWrapperLength, CodeStubAssembler) { Node* value = Parameter(Descriptor::kReceiver); - Node* string = LoadJSValueValue(value); + Node* string = LoadJSPrimitiveWrapperValue(value); Return(LoadStringLengthAsSmi(string)); } @@ -227,7 +228,7 @@ void HandlerBuiltinsAssembler::Generate_ElementsTransitionAndStore( [=, &miss](ElementsKind from_kind, ElementsKind to_kind) { TransitionElementsKind(receiver, map, from_kind, to_kind, &miss); EmitElementStore(receiver, key, value, to_kind, store_mode, &miss, - context); + context, nullptr); }); Return(value); } @@ -280,7 +281,8 @@ TF_BUILTIN(ElementsTransitionAndStore_NoTransitionHandleCOW, V(BIGINT64_ELEMENTS) void HandlerBuiltinsAssembler::DispatchByElementsKind( - TNode elements_kind, const ElementsKindSwitchCase& case_function) { + TNode elements_kind, const ElementsKindSwitchCase& case_function, + bool handle_typed_elements_kind) { Label next(this), if_unknown_type(this, Label::kDeferred); int32_t elements_kinds[] = { @@ -300,6 +302,8 @@ void HandlerBuiltinsAssembler::DispatchByElementsKind( }; STATIC_ASSERT(arraysize(elements_kinds) == arraysize(elements_kind_labels)); + // TODO(mythria): Do not emit cases for typed elements kind when + // handle_typed_elements is false to decrease the size of the jump table. Switch(elements_kind, &if_unknown_type, elements_kinds, elements_kind_labels, arraysize(elements_kinds)); @@ -310,6 +314,9 @@ void HandlerBuiltinsAssembler::DispatchByElementsKind( IsFrozenOrSealedElementsKindUnchecked(KIND)) { \ /* Disable support for frozen or sealed elements kinds. */ \ Unreachable(); \ + } else if (!handle_typed_elements_kind && \ + IsTypedArrayElementsKind(KIND)) { \ + Unreachable(); \ } else { \ case_function(KIND); \ Goto(&next); \ @@ -340,17 +347,26 @@ void HandlerBuiltinsAssembler::Generate_StoreFastElementIC( Label miss(this); + bool handle_typed_elements_kind = + store_mode == STANDARD_STORE || store_mode == STORE_IGNORE_OUT_OF_BOUNDS; + // For typed arrays maybe_converted_value contains the value obtained after + // calling ToNumber. We should pass the converted value to the runtime to + // avoid doing the user visible conversion again. + VARIABLE(maybe_converted_value, MachineRepresentation::kTagged, value); + maybe_converted_value.Bind(value); // TODO(v8:8481): Pass elements_kind in feedback vector slots. - DispatchByElementsKind(LoadElementsKind(receiver), - [=, &miss](ElementsKind elements_kind) { - EmitElementStore(receiver, key, value, elements_kind, - store_mode, &miss, context); - }); + DispatchByElementsKind( + LoadElementsKind(receiver), + [=, &miss, &maybe_converted_value](ElementsKind elements_kind) { + EmitElementStore(receiver, key, value, elements_kind, store_mode, &miss, + context, &maybe_converted_value); + }, + handle_typed_elements_kind); Return(value); BIND(&miss); - TailCallRuntime(Runtime::kKeyedStoreIC_Miss, context, value, slot, vector, - receiver, key); + TailCallRuntime(Runtime::kKeyedStoreIC_Miss, context, + maybe_converted_value.value(), slot, vector, receiver, key); } TF_BUILTIN(StoreFastElementIC_Standard, HandlerBuiltinsAssembler) { diff --git a/deps/v8/src/builtins/builtins-internal-gen.cc b/deps/v8/src/builtins/builtins-internal-gen.cc index baaadb722ad153..8d22767b587d94 100644 --- a/deps/v8/src/builtins/builtins-internal-gen.cc +++ b/deps/v8/src/builtins/builtins-internal-gen.cc @@ -614,8 +614,9 @@ class SetOrCopyDataPropertiesAssembler : public CodeStubAssembler { Label if_done(this), if_noelements(this), if_sourcenotjsobject(this, Label::kDeferred); - // JSValue wrappers for numbers don't have any enumerable own properties, - // so we can immediately skip the whole operation if {source} is a Smi. + // JSPrimitiveWrapper wrappers for numbers don't have any enumerable own + // properties, so we can immediately skip the whole operation if {source} is + // a Smi. GotoIf(TaggedIsSmi(source), &if_done); // Otherwise check if {source} is a proper JSObject, and if not, defer @@ -809,17 +810,49 @@ TF_BUILTIN(AdaptorWithBuiltinExitFrame, CodeStubAssembler) { TF_BUILTIN(AllocateInYoungGeneration, CodeStubAssembler) { TNode requested_size = UncheckedCast(Parameter(Descriptor::kRequestedSize)); + CSA_CHECK(this, IsValidPositiveSmi(requested_size)); + TNode allocation_flags = + SmiConstant(Smi::FromInt(AllocateDoubleAlignFlag::encode(false) | + AllowLargeObjectAllocationFlag::encode(true))); TailCallRuntime(Runtime::kAllocateInYoungGeneration, NoContextConstant(), - SmiFromIntPtr(requested_size)); + SmiFromIntPtr(requested_size), allocation_flags); +} + +TF_BUILTIN(AllocateRegularInYoungGeneration, CodeStubAssembler) { + TNode requested_size = + UncheckedCast(Parameter(Descriptor::kRequestedSize)); + CSA_CHECK(this, IsValidPositiveSmi(requested_size)); + + TNode allocation_flags = + SmiConstant(Smi::FromInt(AllocateDoubleAlignFlag::encode(false) | + AllowLargeObjectAllocationFlag::encode(false))); + TailCallRuntime(Runtime::kAllocateInYoungGeneration, NoContextConstant(), + SmiFromIntPtr(requested_size), allocation_flags); } TF_BUILTIN(AllocateInOldGeneration, CodeStubAssembler) { TNode requested_size = UncheckedCast(Parameter(Descriptor::kRequestedSize)); + CSA_CHECK(this, IsValidPositiveSmi(requested_size)); + + TNode runtime_flags = + SmiConstant(Smi::FromInt(AllocateDoubleAlignFlag::encode(false) | + AllowLargeObjectAllocationFlag::encode(true))); + TailCallRuntime(Runtime::kAllocateInOldGeneration, NoContextConstant(), + SmiFromIntPtr(requested_size), runtime_flags); +} + +TF_BUILTIN(AllocateRegularInOldGeneration, CodeStubAssembler) { + TNode requested_size = + UncheckedCast(Parameter(Descriptor::kRequestedSize)); + CSA_CHECK(this, IsValidPositiveSmi(requested_size)); + TNode runtime_flags = + SmiConstant(Smi::FromInt(AllocateDoubleAlignFlag::encode(false) | + AllowLargeObjectAllocationFlag::encode(false))); TailCallRuntime(Runtime::kAllocateInOldGeneration, NoContextConstant(), - SmiFromIntPtr(requested_size), SmiConstant(0)); + SmiFromIntPtr(requested_size), runtime_flags); } TF_BUILTIN(Abort, CodeStubAssembler) { @@ -827,9 +860,9 @@ TF_BUILTIN(Abort, CodeStubAssembler) { TailCallRuntime(Runtime::kAbort, NoContextConstant(), message_id); } -TF_BUILTIN(AbortJS, CodeStubAssembler) { +TF_BUILTIN(AbortCSAAssert, CodeStubAssembler) { TNode message = CAST(Parameter(Descriptor::kMessageOrMessageId)); - TailCallRuntime(Runtime::kAbortJS, NoContextConstant(), message); + TailCallRuntime(Runtime::kAbortCSAAssert, NoContextConstant(), message); } void Builtins::Generate_CEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit( @@ -907,6 +940,8 @@ TF_BUILTIN(GetProperty, CodeStubAssembler) { Node* object = Parameter(Descriptor::kObject); Node* key = Parameter(Descriptor::kKey); Node* context = Parameter(Descriptor::kContext); + // TODO(duongn): consider tailcalling to GetPropertyWithReceiver(object, + // object, key, OnNonExistent::kReturnUndefined). Label if_notfound(this), if_proxy(this, Label::kDeferred), if_slow(this, Label::kDeferred); @@ -932,7 +967,7 @@ TF_BUILTIN(GetProperty, CodeStubAssembler) { Goto(if_bailout); }; - TryPrototypeChainLookup(object, key, lookup_property_in_holder, + TryPrototypeChainLookup(object, object, key, lookup_property_in_holder, lookup_element_in_holder, &if_notfound, &if_slow, &if_proxy); @@ -955,6 +990,74 @@ TF_BUILTIN(GetProperty, CodeStubAssembler) { } } +// ES6 [[Get]] operation with Receiver. +TF_BUILTIN(GetPropertyWithReceiver, CodeStubAssembler) { + Node* object = Parameter(Descriptor::kObject); + Node* key = Parameter(Descriptor::kKey); + Node* context = Parameter(Descriptor::kContext); + Node* receiver = Parameter(Descriptor::kReceiver); + Node* on_non_existent = Parameter(Descriptor::kOnNonExistent); + Label if_notfound(this), if_proxy(this, Label::kDeferred), + if_slow(this, Label::kDeferred); + + CodeStubAssembler::LookupInHolder lookup_property_in_holder = + [=](Node* receiver, Node* holder, Node* holder_map, + Node* holder_instance_type, Node* unique_name, Label* next_holder, + Label* if_bailout) { + VARIABLE(var_value, MachineRepresentation::kTagged); + Label if_found(this); + TryGetOwnProperty(context, receiver, holder, holder_map, + holder_instance_type, unique_name, &if_found, + &var_value, next_holder, if_bailout); + BIND(&if_found); + Return(var_value.value()); + }; + + CodeStubAssembler::LookupInHolder lookup_element_in_holder = + [=](Node* receiver, Node* holder, Node* holder_map, + Node* holder_instance_type, Node* index, Label* next_holder, + Label* if_bailout) { + // Not supported yet. + Use(next_holder); + Goto(if_bailout); + }; + + TryPrototypeChainLookup(receiver, object, key, lookup_property_in_holder, + lookup_element_in_holder, &if_notfound, &if_slow, + &if_proxy); + + BIND(&if_notfound); + Label throw_reference_error(this); + GotoIf(WordEqual(on_non_existent, + SmiConstant(OnNonExistent::kThrowReferenceError)), + &throw_reference_error); + CSA_ASSERT(this, WordEqual(on_non_existent, + SmiConstant(OnNonExistent::kReturnUndefined))); + Return(UndefinedConstant()); + + BIND(&throw_reference_error); + Return(CallRuntime(Runtime::kThrowReferenceError, context, key)); + + BIND(&if_slow); + TailCallRuntime(Runtime::kGetPropertyWithReceiver, context, object, key, + receiver, on_non_existent); + + BIND(&if_proxy); + { + // Convert the {key} to a Name first. + Node* name = CallBuiltin(Builtins::kToName, context, key); + + // Proxy cannot handle private symbol so bailout. + GotoIf(IsPrivateSymbol(name), &if_slow); + + // The {object} is a JSProxy instance, look up the {name} on it, passing + // {object} both as receiver and holder. If {name} is absent we can safely + // return undefined from here. + TailCallBuiltin(Builtins::kProxyGetProperty, context, object, name, + receiver, on_non_existent); + } +} + // ES6 [[Set]] operation. TF_BUILTIN(SetProperty, CodeStubAssembler) { TNode context = CAST(Parameter(Descriptor::kContext)); diff --git a/deps/v8/src/builtins/builtins-intl.cc b/deps/v8/src/builtins/builtins-intl.cc index 882afa3c32055e..ff8e96f4f512f0 100644 --- a/deps/v8/src/builtins/builtins-intl.cc +++ b/deps/v8/src/builtins/builtins-intl.cc @@ -276,15 +276,14 @@ Object LegacyFormatConstructor(BuiltinArguments args, Isolate* isolate, // 2. Let format be ? OrdinaryCreateFromConstructor(newTarget, // "%Prototype%", ...). - Handle obj; + Handle map; ASSIGN_RETURN_FAILURE_ON_EXCEPTION( - isolate, obj, - JSObject::New(target, new_target, Handle::null())); - Handle format = Handle::cast(obj); + isolate, map, JSFunction::GetDerivedMap(isolate, target, new_target)); // 3. Perform ? Initialize(Format, locales, options). - ASSIGN_RETURN_FAILURE_ON_EXCEPTION( - isolate, format, T::Initialize(isolate, format, locales, options)); + Handle format; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, format, + T::New(isolate, map, locales, options)); // 4. Let this be the this value. Handle receiver = args.receiver(); @@ -351,21 +350,17 @@ Object DisallowCallConstructor(BuiltinArguments args, Isolate* isolate, Handle target = args.target(); Handle new_target = Handle::cast(args.new_target()); - Handle obj; + Handle map; // 2. Let result be OrdinaryCreateFromConstructor(NewTarget, // "%Prototype%"). ASSIGN_RETURN_FAILURE_ON_EXCEPTION( - isolate, obj, - JSObject::New(target, new_target, Handle::null())); - Handle result = Handle::cast(obj); - result->set_flags(0); + isolate, map, JSFunction::GetDerivedMap(isolate, target, new_target)); Handle locales = args.atOrUndefined(isolate, 1); Handle options = args.atOrUndefined(isolate, 2); - // 3. Return Initialize(t, locales, options). - RETURN_RESULT_OR_FAILURE(isolate, - T::Initialize(isolate, result, locales, options)); + // 3. Return New(t, locales, options). + RETURN_RESULT_OR_FAILURE(isolate, T::New(isolate, map, locales, options)); } /** @@ -387,14 +382,11 @@ Object CallOrConstructConstructor(BuiltinArguments args, Isolate* isolate) { Handle locales = args.atOrUndefined(isolate, 1); Handle options = args.atOrUndefined(isolate, 2); - Handle obj; + Handle map; ASSIGN_RETURN_FAILURE_ON_EXCEPTION( - isolate, obj, - JSObject::New(target, new_target, Handle::null())); - Handle result = Handle::cast(obj); + isolate, map, JSFunction::GetDerivedMap(isolate, target, new_target)); - RETURN_RESULT_OR_FAILURE(isolate, - T::Initialize(isolate, result, locales, options)); + RETURN_RESULT_OR_FAILURE(isolate, T::New(isolate, map, locales, options)); } } // namespace @@ -591,12 +583,11 @@ MaybeHandle CreateLocale(Isolate* isolate, Handle constructor, Handle new_target, Handle tag, Handle options) { - Handle locale; + Handle map; // 6. Let locale be ? OrdinaryCreateFromConstructor(NewTarget, // %LocalePrototype%, internalSlotsList). ASSIGN_RETURN_ON_EXCEPTION( - isolate, locale, - JSObject::New(constructor, new_target, Handle::null()), + isolate, map, JSFunction::GetDerivedMap(isolate, constructor, new_target), JSLocale); // 7. If Type(tag) is not String or Object, throw a TypeError exception. @@ -628,8 +619,7 @@ MaybeHandle CreateLocale(Isolate* isolate, Object::ToObject(isolate, options), JSLocale); } - return JSLocale::Initialize(isolate, Handle::cast(locale), - locale_string, options_object); + return JSLocale::New(isolate, map, locale_string, options_object); } } // namespace diff --git a/deps/v8/src/builtins/builtins-iterator-gen.cc b/deps/v8/src/builtins/builtins-iterator-gen.cc index 0484501bfb23dd..b3d8e27dbc3a7d 100644 --- a/deps/v8/src/builtins/builtins-iterator-gen.cc +++ b/deps/v8/src/builtins/builtins-iterator-gen.cc @@ -75,7 +75,7 @@ IteratorRecord IteratorBuiltinsAssembler::GetIterator(Node* context, } } -TNode IteratorBuiltinsAssembler::IteratorStep( +TNode IteratorBuiltinsAssembler::IteratorStep( Node* context, const IteratorRecord& iterator, Label* if_done, Node* fast_iterator_result_map, Label* if_exception, Variable* exception) { DCHECK_NOT_NULL(if_done); @@ -125,23 +125,21 @@ TNode IteratorBuiltinsAssembler::IteratorStep( } BIND(&return_result); - return UncheckedCast(result); + return CAST(result); } -Node* IteratorBuiltinsAssembler::IteratorValue(Node* context, Node* result, - Node* fast_iterator_result_map, - Label* if_exception, - Variable* exception) { - CSA_ASSERT(this, IsJSReceiver(result)); - +TNode IteratorBuiltinsAssembler::IteratorValue( + TNode context, TNode result, + base::Optional> fast_iterator_result_map, Label* if_exception, + Variable* exception) { Label exit(this); - VARIABLE(var_value, MachineRepresentation::kTagged); - if (fast_iterator_result_map != nullptr) { + TVARIABLE(Object, var_value); + if (fast_iterator_result_map) { // Fast iterator result case: Label if_generic(this); Node* map = LoadMap(result); - GotoIfNot(WordEqual(map, fast_iterator_result_map), &if_generic); - var_value.Bind(LoadObjectField(result, JSIteratorResult::kValueOffset)); + GotoIfNot(WordEqual(map, *fast_iterator_result_map), &if_generic); + var_value = LoadObjectField(result, JSIteratorResult::kValueOffset); Goto(&exit); BIND(&if_generic); @@ -149,9 +147,10 @@ Node* IteratorBuiltinsAssembler::IteratorValue(Node* context, Node* result, // Generic iterator result case: { - Node* value = GetProperty(context, result, factory()->value_string()); + TNode value = + GetProperty(context, result, factory()->value_string()); GotoIfException(value, if_exception, exception); - var_value.Bind(value); + var_value = value; Goto(&exit); } @@ -217,10 +216,10 @@ TNode IteratorBuiltinsAssembler::IterableToList( BIND(&loop_start); { // a. Set next to ? IteratorStep(iteratorRecord). - TNode next = IteratorStep(context, iterator_record, &done); + TNode next = IteratorStep(context, iterator_record, &done); // b. If next is not false, then // i. Let nextValue be ? IteratorValue(next). - TNode next_value = CAST(IteratorValue(context, next)); + TNode next_value = IteratorValue(context, next); // ii. Append nextValue to the end of the List values. values.Push(next_value); Goto(&loop_start); diff --git a/deps/v8/src/builtins/builtins-iterator-gen.h b/deps/v8/src/builtins/builtins-iterator-gen.h index cf421dc5b79f83..db86c653857f52 100644 --- a/deps/v8/src/builtins/builtins-iterator-gen.h +++ b/deps/v8/src/builtins/builtins-iterator-gen.h @@ -32,18 +32,19 @@ class IteratorBuiltinsAssembler : public CodeStubAssembler { Variable* exception = nullptr); // https://tc39.github.io/ecma262/#sec-iteratorstep - // Returns `false` if the iterator is done, otherwise returns an - // iterator result. + // If the iterator is done, goto {if_done}, otherwise returns an iterator + // result. // `fast_iterator_result_map` refers to the map for the JSIteratorResult // object, loaded from the native context. - TNode IteratorStep(Node* context, const IteratorRecord& iterator, - Label* if_done, - Node* fast_iterator_result_map = nullptr, - Label* if_exception = nullptr, - Variable* exception = nullptr); - - TNode IteratorStep(Node* context, const IteratorRecord& iterator, - Node* fast_iterator_result_map, Label* if_done) { + TNode IteratorStep(Node* context, const IteratorRecord& iterator, + Label* if_done, + Node* fast_iterator_result_map = nullptr, + Label* if_exception = nullptr, + Variable* exception = nullptr); + + TNode IteratorStep(Node* context, const IteratorRecord& iterator, + Node* fast_iterator_result_map, + Label* if_done) { return IteratorStep(context, iterator, if_done, fast_iterator_result_map); } @@ -51,10 +52,10 @@ class IteratorBuiltinsAssembler : public CodeStubAssembler { // Return the `value` field from an iterator. // `fast_iterator_result_map` refers to the map for the JSIteratorResult // object, loaded from the native context. - Node* IteratorValue(Node* context, Node* result, - Node* fast_iterator_result_map = nullptr, - Label* if_exception = nullptr, - Variable* exception = nullptr); + TNode IteratorValue( + TNode context, TNode result, + base::Optional> fast_iterator_result_map = base::nullopt, + Label* if_exception = nullptr, Variable* exception = nullptr); // https://tc39.github.io/ecma262/#sec-iteratorclose void IteratorCloseOnException(Node* context, const IteratorRecord& iterator, diff --git a/deps/v8/src/builtins/builtins-math.cc b/deps/v8/src/builtins/builtins-math.cc index 6d3274a4a5a632..cce780ab9f6a25 100644 --- a/deps/v8/src/builtins/builtins-math.cc +++ b/deps/v8/src/builtins/builtins-math.cc @@ -20,7 +20,6 @@ BUILTIN(MathHypot) { if (length == 0) return Smi::kZero; DCHECK_LT(0, length); double max = 0; - bool one_arg_is_nan = false; std::vector abs_values; abs_values.reserve(length); for (int i = 0; i < length; i++) { @@ -28,29 +27,20 @@ BUILTIN(MathHypot) { ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, x, Object::ToNumber(isolate, x)); double abs_value = std::abs(x->Number()); - - if (std::isnan(abs_value)) { - one_arg_is_nan = true; - } else { - abs_values.push_back(abs_value); - if (max < abs_value) { - max = abs_value; - } + abs_values.push_back(abs_value); + // Use negation here to make sure that {max} is NaN + // in the end in case any of the arguments was NaN. + if (!(abs_value <= max)) { + max = abs_value; } } - if (max == V8_INFINITY) { - return *isolate->factory()->NewNumber(V8_INFINITY); - } - - if (one_arg_is_nan) { - return ReadOnlyRoots(isolate).nan_value(); - } - if (max == 0) { return Smi::kZero; + } else if (max == V8_INFINITY) { + return ReadOnlyRoots(isolate).infinity_value(); } - DCHECK_GT(max, 0); + DCHECK(!(max <= 0)); // Kahan summation to avoid rounding errors. // Normalize the numbers to the largest one to avoid overflow. diff --git a/deps/v8/src/builtins/builtins-number-gen.cc b/deps/v8/src/builtins/builtins-number-gen.cc index 5b3af79f006667..f5c4477c23df95 100644 --- a/deps/v8/src/builtins/builtins-number-gen.cc +++ b/deps/v8/src/builtins/builtins-number-gen.cc @@ -315,8 +315,8 @@ TF_BUILTIN(NumberParseInt, CodeStubAssembler) { // ES6 #sec-number.prototype.valueof TF_BUILTIN(NumberPrototypeValueOf, CodeStubAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode receiver = CAST(Parameter(Descriptor::kReceiver)); Node* result = ToThisValue(context, receiver, PrimitiveType::kNumber, "Number.prototype.valueOf"); @@ -538,8 +538,8 @@ TF_BUILTIN(Add, AddStubAssembler) { BIND(&do_bigint_add); { - Return(CallRuntime(Runtime::kBigIntBinaryOp, context, var_left.value(), - var_right.value(), SmiConstant(Operation::kAdd))); + TailCallBuiltin(Builtins::kBigIntAdd, context, var_left.value(), + var_right.value()); } BIND(&do_double_add); @@ -996,8 +996,8 @@ TF_BUILTIN(Equal, CodeStubAssembler) { } TF_BUILTIN(StrictEqual, CodeStubAssembler) { - Node* lhs = Parameter(Descriptor::kLeft); - Node* rhs = Parameter(Descriptor::kRight); + TNode lhs = CAST(Parameter(Descriptor::kLeft)); + TNode rhs = CAST(Parameter(Descriptor::kRight)); Return(StrictEqual(lhs, rhs)); } diff --git a/deps/v8/src/builtins/builtins-number.cc b/deps/v8/src/builtins/builtins-number.cc index 929e686604953a..d2fb0ff74c3a02 100644 --- a/deps/v8/src/builtins/builtins-number.cc +++ b/deps/v8/src/builtins/builtins-number.cc @@ -25,8 +25,8 @@ BUILTIN(NumberPrototypeToExponential) { Handle fraction_digits = args.atOrUndefined(isolate, 1); // Unwrap the receiver {value}. - if (value->IsJSValue()) { - value = handle(Handle::cast(value)->value(), isolate); + if (value->IsJSPrimitiveWrapper()) { + value = handle(Handle::cast(value)->value(), isolate); } if (!value->IsNumber()) { THROW_NEW_ERROR_RETURN_FAILURE( @@ -70,8 +70,8 @@ BUILTIN(NumberPrototypeToFixed) { Handle fraction_digits = args.atOrUndefined(isolate, 1); // Unwrap the receiver {value}. - if (value->IsJSValue()) { - value = handle(Handle::cast(value)->value(), isolate); + if (value->IsJSPrimitiveWrapper()) { + value = handle(Handle::cast(value)->value(), isolate); } if (!value->IsNumber()) { THROW_NEW_ERROR_RETURN_FAILURE( @@ -117,8 +117,8 @@ BUILTIN(NumberPrototypeToLocaleString) { Handle value = args.at(0); // Unwrap the receiver {value}. - if (value->IsJSValue()) { - value = handle(Handle::cast(value)->value(), isolate); + if (value->IsJSPrimitiveWrapper()) { + value = handle(Handle::cast(value)->value(), isolate); } // 1. Let x be ? thisNumberValue(this value) if (!value->IsNumber()) { @@ -147,8 +147,8 @@ BUILTIN(NumberPrototypeToPrecision) { Handle precision = args.atOrUndefined(isolate, 1); // Unwrap the receiver {value}. - if (value->IsJSValue()) { - value = handle(Handle::cast(value)->value(), isolate); + if (value->IsJSPrimitiveWrapper()) { + value = handle(Handle::cast(value)->value(), isolate); } if (!value->IsNumber()) { THROW_NEW_ERROR_RETURN_FAILURE( @@ -192,8 +192,8 @@ BUILTIN(NumberPrototypeToString) { Handle radix = args.atOrUndefined(isolate, 1); // Unwrap the receiver {value}. - if (value->IsJSValue()) { - value = handle(Handle::cast(value)->value(), isolate); + if (value->IsJSPrimitiveWrapper()) { + value = handle(Handle::cast(value)->value(), isolate); } if (!value->IsNumber()) { THROW_NEW_ERROR_RETURN_FAILURE( diff --git a/deps/v8/src/builtins/builtins-object-gen.cc b/deps/v8/src/builtins/builtins-object-gen.cc index 314331d49854ba..8d59ee3bd107cf 100644 --- a/deps/v8/src/builtins/builtins-object-gen.cc +++ b/deps/v8/src/builtins/builtins-object-gen.cc @@ -65,8 +65,6 @@ class ObjectEntriesValuesBuiltinsAssembler : public ObjectBuiltinsAssembler { TNode IsPropertyKindData(TNode kind); - TNode HasHiddenPrototype(TNode map); - TNode LoadPropertyKind(TNode details) { return DecodeWord32(details); } @@ -185,12 +183,6 @@ TNode ObjectEntriesValuesBuiltinsAssembler::IsPropertyKindData( return Word32Equal(kind, Int32Constant(PropertyKind::kData)); } -TNode ObjectEntriesValuesBuiltinsAssembler::HasHiddenPrototype( - TNode map) { - TNode bit_field2 = Unsigned(LoadMapBitField2(map)); - return DecodeWord32(bit_field2); -} - void ObjectEntriesValuesBuiltinsAssembler::GetOwnValuesOrEntries( TNode context, TNode maybe_object, CollectType collect_type) { @@ -254,7 +246,6 @@ void ObjectEntriesValuesBuiltinsAssembler::GotoIfMapHasSlowProperties( TNode map, Label* if_slow) { GotoIf(IsStringWrapperElementsKind(map), if_slow); GotoIf(IsSpecialReceiverMap(map), if_slow); - GotoIf(HasHiddenPrototype(map), if_slow); GotoIf(IsDictionaryMap(map), if_slow); } @@ -602,9 +593,19 @@ TF_BUILTIN(ObjectGetOwnPropertyNames, ObjectBuiltinsAssembler) { if_fast(this), try_fast(this, Label::kDeferred), if_slow(this, Label::kDeferred), if_join(this); - // Check if the {object} has a usable enum cache. + // Take the slow path if the {object} IsCustomElementsReceiverInstanceType or + // has any elements. GotoIf(TaggedIsSmi(object), &if_slow); Node* object_map = LoadMap(object); + TNode instance_type = LoadMapInstanceType(object_map); + GotoIf(IsCustomElementsReceiverInstanceType(instance_type), &if_slow); + Node* object_elements = LoadElements(object); + GotoIf(IsEmptyFixedArray(object_elements), &if_empty_elements); + Branch(IsEmptySlowElementDictionary(object_elements), &if_empty_elements, + &if_slow); + + // Check if the {object} has a usable enum cache. + BIND(&if_empty_elements); Node* object_bit_field3 = LoadMapBitField3(object_map); Node* object_enum_length = DecodeWordFromWord32(object_bit_field3); @@ -612,15 +613,7 @@ TF_BUILTIN(ObjectGetOwnPropertyNames, ObjectBuiltinsAssembler) { WordEqual(object_enum_length, IntPtrConstant(kInvalidEnumCacheSentinel)), &try_fast); - // Ensure that the {object} doesn't have any elements. - CSA_ASSERT(this, IsJSObjectMap(object_map)); - Node* object_elements = LoadElements(object); - GotoIf(IsEmptyFixedArray(object_elements), &if_empty_elements); - Branch(IsEmptySlowElementDictionary(object_elements), &if_empty_elements, - &if_slow); - // Check whether all own properties are enumerable. - BIND(&if_empty_elements); Node* number_descriptors = DecodeWordFromWord32(object_bit_field3); GotoIfNot(WordEqual(object_enum_length, number_descriptors), &if_slow); @@ -728,11 +721,11 @@ TF_BUILTIN(ObjectPrototypeIsPrototypeOf, ObjectBuiltinsAssembler) { // invoke the ToObject builtin, which raises the appropriate error. // Otherwise we don't need to invoke ToObject, since {receiver} is // either already a JSReceiver, in which case ToObject is a no-op, - // or it's a Primitive and ToObject would allocate a fresh JSValue + // or it's a Primitive and ToObject would allocate a fresh JSPrimitiveWrapper // wrapper, which wouldn't be identical to any existing JSReceiver // found in the prototype chain of {value}, hence it will return // false no matter if we search for the Primitive {receiver} or - // a newly allocated JSValue wrapper for {receiver}. + // a newly allocated JSPrimitiveWrapper wrapper for {receiver}. GotoIf(IsNull(receiver), &if_receiverisnullorundefined); GotoIf(IsUndefined(receiver), &if_receiverisnullorundefined); @@ -794,7 +787,7 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) { {JS_SPECIAL_API_OBJECT_TYPE, &if_apiobject}, {JS_PROXY_TYPE, &if_proxy}, {JS_ERROR_TYPE, &if_error}, - {JS_VALUE_TYPE, &if_value}}; + {JS_PRIMITIVE_WRAPPER_TYPE, &if_value}}; size_t const kNumCases = arraysize(kJumpTable); Label* case_labels[kNumCases]; int32_t case_values[kNumCases]; @@ -996,7 +989,7 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) { if_value_is_bigint(this, Label::kDeferred), if_value_is_string(this, Label::kDeferred); - Node* receiver_value = LoadJSValueValue(receiver); + Node* receiver_value = LoadJSPrimitiveWrapperValue(receiver); // We need to start with the object to see if the value was a subclass // which might have interesting properties. var_holder.Bind(receiver); @@ -1346,10 +1339,15 @@ TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) { StoreObjectFieldNoWriteBarrier( result, JSGeneratorObject::kParametersAndRegistersOffset, parameters_and_registers); + Node* resume_mode = SmiConstant(JSGeneratorObject::ResumeMode::kNext); + StoreObjectFieldNoWriteBarrier(result, JSGeneratorObject::kResumeModeOffset, + resume_mode); Node* executing = SmiConstant(JSGeneratorObject::kGeneratorExecuting); StoreObjectFieldNoWriteBarrier(result, JSGeneratorObject::kContinuationOffset, executing); - GotoIfNot(HasInstanceType(maybe_map, JS_ASYNC_GENERATOR_OBJECT_TYPE), &done); + GotoIfNot(InstanceTypeEqual(LoadMapInstanceType(maybe_map), + JS_ASYNC_GENERATOR_OBJECT_TYPE), + &done); StoreObjectFieldNoWriteBarrier( result, JSAsyncGeneratorObject::kIsAwaitingOffset, SmiConstant(0)); Goto(&done); diff --git a/deps/v8/src/builtins/builtins-object.cc b/deps/v8/src/builtins/builtins-object.cc index 59e4373f98b078..1ca5fffd8db556 100644 --- a/deps/v8/src/builtins/builtins-object.cc +++ b/deps/v8/src/builtins/builtins-object.cc @@ -5,7 +5,7 @@ #include "src/builtins/builtins-utils-inl.h" #include "src/builtins/builtins.h" #include "src/codegen/code-factory.h" -#include "src/execution/message-template.h" +#include "src/common/message-template.h" #include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop. #include "src/logging/counters.h" #include "src/objects/keys.h" @@ -218,52 +218,6 @@ BUILTIN(ObjectFreeze) { return *object; } -// ES section 19.1.2.9 Object.getPrototypeOf ( O ) -BUILTIN(ObjectGetPrototypeOf) { - HandleScope scope(isolate); - Handle object = args.atOrUndefined(isolate, 1); - - Handle receiver; - ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver, - Object::ToObject(isolate, object)); - - RETURN_RESULT_OR_FAILURE(isolate, - JSReceiver::GetPrototype(isolate, receiver)); -} - -// ES6 section 19.1.2.21 Object.setPrototypeOf ( O, proto ) -BUILTIN(ObjectSetPrototypeOf) { - HandleScope scope(isolate); - - // 1. Let O be ? RequireObjectCoercible(O). - Handle object = args.atOrUndefined(isolate, 1); - if (object->IsNullOrUndefined(isolate)) { - THROW_NEW_ERROR_RETURN_FAILURE( - isolate, NewTypeError(MessageTemplate::kCalledOnNullOrUndefined, - isolate->factory()->NewStringFromAsciiChecked( - "Object.setPrototypeOf"))); - } - - // 2. If Type(proto) is neither Object nor Null, throw a TypeError exception. - Handle proto = args.atOrUndefined(isolate, 2); - if (!proto->IsNull(isolate) && !proto->IsJSReceiver()) { - THROW_NEW_ERROR_RETURN_FAILURE( - isolate, NewTypeError(MessageTemplate::kProtoObjectOrNull, proto)); - } - - // 3. If Type(O) is not Object, return O. - if (!object->IsJSReceiver()) return *object; - Handle receiver = Handle::cast(object); - - // 4. Let status be ? O.[[SetPrototypeOf]](proto). - // 5. If status is false, throw a TypeError exception. - MAYBE_RETURN(JSReceiver::SetPrototype(receiver, proto, true, kThrowOnError), - ReadOnlyRoots(isolate).exception()); - - // 6. Return O. - return *receiver; -} - // ES6 section B.2.2.1.1 get Object.prototype.__proto__ BUILTIN(ObjectPrototypeGetProto) { HandleScope scope(isolate); @@ -332,18 +286,6 @@ BUILTIN(ObjectGetOwnPropertySymbols) { return GetOwnPropertyKeys(isolate, args, SKIP_STRINGS); } -// ES6 section 19.1.2.11 Object.isExtensible ( O ) -BUILTIN(ObjectIsExtensible) { - HandleScope scope(isolate); - Handle object = args.atOrUndefined(isolate, 1); - Maybe result = - object->IsJSReceiver() - ? JSReceiver::IsExtensible(Handle::cast(object)) - : Just(false); - MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception()); - return isolate->heap()->ToBoolean(result.FromJust()); -} - // ES6 section 19.1.2.12 Object.isFrozen ( O ) BUILTIN(ObjectIsFrozen) { HandleScope scope(isolate); @@ -403,18 +345,6 @@ BUILTIN(ObjectGetOwnPropertyDescriptors) { return *descriptors; } -// ES6 section 19.1.2.15 Object.preventExtensions ( O ) -BUILTIN(ObjectPreventExtensions) { - HandleScope scope(isolate); - Handle object = args.atOrUndefined(isolate, 1); - if (object->IsJSReceiver()) { - MAYBE_RETURN(JSReceiver::PreventExtensions(Handle::cast(object), - kThrowOnError), - ReadOnlyRoots(isolate).exception()); - } - return *object; -} - // ES6 section 19.1.2.17 Object.seal ( O ) BUILTIN(ObjectSeal) { HandleScope scope(isolate); diff --git a/deps/v8/src/builtins/builtins-promise-gen.cc b/deps/v8/src/builtins/builtins-promise-gen.cc index ad70fb1dd1a49e..1339e2dccd788e 100644 --- a/deps/v8/src/builtins/builtins-promise-gen.cc +++ b/deps/v8/src/builtins/builtins-promise-gen.cc @@ -2062,7 +2062,7 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll( // 5. Let _promiseResolve_ be ? Get(_constructor_, `"resolve"`). TNode resolve = GetProperty(native_context, constructor, factory()->resolve_string()); - GotoIfException(resolve, if_exception, var_exception); + GotoIfException(resolve, &close_iterator, var_exception); // 6. If IsCallable(_promiseResolve_) is *false*, throw a *TypeError* // exception. @@ -2077,9 +2077,9 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll( // Let next be IteratorStep(iteratorRecord.[[Iterator]]). // If next is an abrupt completion, set iteratorRecord.[[Done]] to true. // ReturnIfAbrupt(next). - Node* const fast_iterator_result_map = - LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX); - Node* const next = iter_assembler.IteratorStep( + TNode const fast_iterator_result_map = CAST( + LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX)); + TNode const next = iter_assembler.IteratorStep( native_context, iterator, &done_loop, fast_iterator_result_map, if_exception, var_exception); @@ -2087,7 +2087,7 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll( // If nextValue is an abrupt completion, set iteratorRecord.[[Done]] to // true. // ReturnIfAbrupt(nextValue). - Node* const next_value = iter_assembler.IteratorValue( + TNode const next_value = iter_assembler.IteratorValue( native_context, next, fast_iterator_result_map, if_exception, var_exception); @@ -2148,7 +2148,7 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll( &if_slow); GotoIf(IsPromiseSpeciesProtectorCellInvalid(), &if_slow); GotoIf(TaggedIsSmi(next_value), &if_slow); - Node* const next_value_map = LoadMap(next_value); + Node* const next_value_map = LoadMap(CAST(next_value)); BranchIfPromiseThenLookupChainIntact(native_context, next_value_map, &if_fast, &if_slow); @@ -2526,8 +2526,7 @@ TF_BUILTIN(PromiseAllSettledResolveElementClosure, PromiseBuiltinsAssembler) { LoadContextElement(native_context, Context::OBJECT_FUNCTION_INDEX)); TNode object_function_map = Cast(LoadObjectField( object_function, JSFunction::kPrototypeOrInitialMapOffset)); - TNode obj = - Cast(AllocateJSObjectFromMap(object_function_map)); + TNode obj = AllocateJSObjectFromMap(object_function_map); // 10. Perform ! CreateDataProperty(obj, "status", "fulfilled"). CallBuiltin(Builtins::kFastCreateDataProperty, context, obj, @@ -2557,8 +2556,7 @@ TF_BUILTIN(PromiseAllSettledRejectElementClosure, PromiseBuiltinsAssembler) { LoadContextElement(native_context, Context::OBJECT_FUNCTION_INDEX)); TNode object_function_map = Cast(LoadObjectField( object_function, JSFunction::kPrototypeOrInitialMapOffset)); - TNode obj = - Cast(AllocateJSObjectFromMap(object_function_map)); + TNode obj = AllocateJSObjectFromMap(object_function_map); // 10. Perform ! CreateDataProperty(obj, "status", "rejected"). CallBuiltin(Builtins::kFastCreateDataProperty, context, obj, @@ -2579,7 +2577,7 @@ TF_BUILTIN(PromiseRace, PromiseBuiltinsAssembler) { VARIABLE(var_exception, MachineRepresentation::kTagged, TheHoleConstant()); Node* const receiver = Parameter(Descriptor::kReceiver); - Node* const context = Parameter(Descriptor::kContext); + TNode const context = CAST(Parameter(Descriptor::kContext)); ThrowIfNotJSReceiver(context, receiver, MessageTemplate::kCalledOnNonObject, "Promise.race"); @@ -2626,11 +2624,11 @@ TF_BUILTIN(PromiseRace, PromiseBuiltinsAssembler) { // 3. Let _promiseResolve_ be ? Get(_constructor_, `"resolve"`). TNode resolve = GetProperty(native_context, receiver, factory()->resolve_string()); - GotoIfException(resolve, &reject_promise, &var_exception); + GotoIfException(resolve, &close_iterator, &var_exception); // 4. If IsCallable(_promiseResolve_) is *false*, throw a *TypeError* // exception. - ThrowIfNotCallable(CAST(context), resolve, "resolve"); + ThrowIfNotCallable(context, resolve, "resolve"); var_promise_resolve_function = resolve; Goto(&loop); @@ -2638,13 +2636,13 @@ TF_BUILTIN(PromiseRace, PromiseBuiltinsAssembler) { BIND(&loop); { - Node* const fast_iterator_result_map = LoadContextElement( - native_context, Context::ITERATOR_RESULT_MAP_INDEX); + TNode const fast_iterator_result_map = CAST(LoadContextElement( + native_context, Context::ITERATOR_RESULT_MAP_INDEX)); // Let next be IteratorStep(iteratorRecord.[[Iterator]]). // If next is an abrupt completion, set iteratorRecord.[[Done]] to true. // ReturnIfAbrupt(next). - Node* const next = iter_assembler.IteratorStep( + TNode const next = iter_assembler.IteratorStep( context, iterator, &break_loop, fast_iterator_result_map, &reject_promise, &var_exception); @@ -2652,7 +2650,7 @@ TF_BUILTIN(PromiseRace, PromiseBuiltinsAssembler) { // If nextValue is an abrupt completion, set iteratorRecord.[[Done]] to // true. // ReturnIfAbrupt(nextValue). - Node* const next_value = + TNode const next_value = iter_assembler.IteratorValue(context, next, fast_iterator_result_map, &reject_promise, &var_exception); diff --git a/deps/v8/src/builtins/builtins-proxy-gen.cc b/deps/v8/src/builtins/builtins-proxy-gen.cc index a1a2f6308ffde2..948540ea5f1d1d 100644 --- a/deps/v8/src/builtins/builtins-proxy-gen.cc +++ b/deps/v8/src/builtins/builtins-proxy-gen.cc @@ -13,8 +13,9 @@ namespace v8 { namespace internal { -Node* ProxiesCodeStubAssembler::AllocateProxy(Node* target, Node* handler, - Node* context) { +compiler::TNode ProxiesCodeStubAssembler::AllocateProxy( + TNode context, TNode target, + TNode handler) { VARIABLE(map, MachineRepresentation::kTagged); Label callable_target(this), constructor_target(this), none_target(this), @@ -53,7 +54,7 @@ Node* ProxiesCodeStubAssembler::AllocateProxy(Node* target, Node* handler, StoreObjectFieldNoWriteBarrier(proxy, JSProxy::kTargetOffset, target); StoreObjectFieldNoWriteBarrier(proxy, JSProxy::kHandlerOffset, handler); - return proxy; + return CAST(proxy); } Node* ProxiesCodeStubAssembler::AllocateJSArrayForCodeStubArguments( @@ -121,8 +122,9 @@ Node* ProxiesCodeStubAssembler::CreateProxyRevokeFunctionContext( return context; } -Node* ProxiesCodeStubAssembler::AllocateProxyRevokeFunction(Node* proxy, - Node* context) { +compiler::TNode +ProxiesCodeStubAssembler::AllocateProxyRevokeFunction(TNode context, + TNode proxy) { Node* const native_context = LoadNativeContext(context); Node* const proxy_context = @@ -132,13 +134,8 @@ Node* ProxiesCodeStubAssembler::AllocateProxyRevokeFunction(Node* proxy, Node* const revoke_info = LoadContextElement(native_context, Context::PROXY_REVOKE_SHARED_FUN); - return AllocateFunctionWithMapAndContext(revoke_map, revoke_info, - proxy_context); -} - -Node* ProxiesCodeStubAssembler::GetProxyConstructorJSNewTarget() { - return CodeAssembler::Parameter(static_cast( - Builtin_ProxyConstructor_InterfaceDescriptor::kJSNewTarget)); + return CAST(AllocateFunctionWithMapAndContext(revoke_map, revoke_info, + proxy_context)); } TF_BUILTIN(CallProxy, ProxiesCodeStubAssembler) { @@ -262,9 +259,11 @@ TF_BUILTIN(ConstructProxy, ProxiesCodeStubAssembler) { { ThrowTypeError(context, MessageTemplate::kProxyRevoked, "construct"); } } -Node* ProxiesCodeStubAssembler::CheckGetSetTrapResult( - Node* context, Node* target, Node* proxy, Node* name, Node* trap_result, +void ProxiesCodeStubAssembler::CheckGetSetTrapResult( + TNode context, TNode target, TNode proxy, + TNode name, TNode trap_result, JSProxy::AccessKind access_kind) { + // TODO(mslekova): Think of a better name for the trap_result param. Node* map = LoadMap(target); VARIABLE(var_value, MachineRepresentation::kTagged); VARIABLE(var_details, MachineRepresentation::kWord32); @@ -273,7 +272,7 @@ Node* ProxiesCodeStubAssembler::CheckGetSetTrapResult( Label if_found_value(this), check_in_runtime(this, Label::kDeferred), check_passed(this); - GotoIfNot(IsUniqueNameNoIndex(CAST(name)), &check_in_runtime); + GotoIfNot(IsUniqueNameNoIndex(name), &check_in_runtime); Node* instance_type = LoadInstanceType(target); TryGetOwnProperty(context, target, target, map, instance_type, name, &if_found_value, &var_value, &var_details, &var_raw_value, @@ -366,12 +365,13 @@ Node* ProxiesCodeStubAssembler::CheckGetSetTrapResult( } BIND(&check_passed); - return trap_result; } } -Node* ProxiesCodeStubAssembler::CheckHasTrapResult(Node* context, Node* target, - Node* proxy, Node* name) { +void ProxiesCodeStubAssembler::CheckHasTrapResult(TNode context, + TNode target, + TNode proxy, + TNode name) { Node* target_map = LoadMap(target); VARIABLE(var_value, MachineRepresentation::kTagged); VARIABLE(var_details, MachineRepresentation::kWord32); @@ -383,7 +383,7 @@ Node* ProxiesCodeStubAssembler::CheckHasTrapResult(Node* context, Node* target, check_in_runtime(this, Label::kDeferred); // 9.a. Let targetDesc be ? target.[[GetOwnProperty]](P). - GotoIfNot(IsUniqueNameNoIndex(CAST(name)), &check_in_runtime); + GotoIfNot(IsUniqueNameNoIndex(name), &check_in_runtime); Node* instance_type = LoadInstanceType(target); TryGetOwnProperty(context, target, target, target_map, instance_type, name, &if_found_value, &var_value, &var_details, &var_raw_value, @@ -419,7 +419,64 @@ Node* ProxiesCodeStubAssembler::CheckHasTrapResult(Node* context, Node* target, } BIND(&check_passed); - return FalseConstant(); +} + +void ProxiesCodeStubAssembler::CheckDeleteTrapResult(TNode context, + TNode target, + TNode proxy, + TNode name) { + TNode target_map = LoadMap(target); + TVARIABLE(Object, var_value); + TVARIABLE(Uint32T, var_details); + TVARIABLE(Object, var_raw_value); + + Label if_found_value(this, Label::kDeferred), + throw_non_configurable(this, Label::kDeferred), + throw_non_extensible(this, Label::kDeferred), check_passed(this), + check_in_runtime(this, Label::kDeferred); + + // 10. Let targetDesc be ? target.[[GetOwnProperty]](P). + GotoIfNot(IsUniqueNameNoIndex(name), &check_in_runtime); + TNode instance_type = LoadInstanceType(target); + TryGetOwnProperty(context, target, target, target_map, instance_type, name, + &if_found_value, &var_value, &var_details, &var_raw_value, + &check_passed, &check_in_runtime, kReturnAccessorPair); + + // 11. If targetDesc is undefined, return true. + BIND(&if_found_value); + { + // 12. If targetDesc.[[Configurable]] is false, throw a TypeError exception. + TNode non_configurable = IsSetWord32( + var_details.value(), PropertyDetails::kAttributesDontDeleteMask); + GotoIf(non_configurable, &throw_non_configurable); + + // 13. Let extensibleTarget be ? IsExtensible(target). + TNode target_extensible = IsExtensibleMap(target_map); + + // 14. If extensibleTarget is false, throw a TypeError exception. + GotoIfNot(target_extensible, &throw_non_extensible); + Goto(&check_passed); + } + + BIND(&throw_non_configurable); + { + ThrowTypeError(context, + MessageTemplate::kProxyDeletePropertyNonConfigurable, name); + } + + BIND(&throw_non_extensible); + { + ThrowTypeError(context, MessageTemplate::kProxyDeletePropertyNonExtensible, + name); + } + + BIND(&check_in_runtime); + { + CallRuntime(Runtime::kCheckProxyDeleteTrapResult, context, name, target); + Goto(&check_passed); + } + + BIND(&check_passed); } } // namespace internal diff --git a/deps/v8/src/builtins/builtins-proxy-gen.h b/deps/v8/src/builtins/builtins-proxy-gen.h index fcaac7df6661ae..cb51faf57553fd 100644 --- a/deps/v8/src/builtins/builtins-proxy-gen.h +++ b/deps/v8/src/builtins/builtins-proxy-gen.h @@ -17,19 +17,21 @@ class ProxiesCodeStubAssembler : public CodeStubAssembler { explicit ProxiesCodeStubAssembler(compiler::CodeAssemblerState* state) : CodeStubAssembler(state) {} - Node* AllocateProxy(Node* target, Node* handler, Node* context); - Node* AllocateProxyRevokeFunction(Node* proxy, Node* context); + TNode AllocateProxy(TNode context, TNode target, + TNode handler); + TNode AllocateProxyRevokeFunction(TNode context, + TNode proxy); - // Get JSNewTarget parameter for ProxyConstructor builtin (Torque). - // TODO(v8:9120): Remove this once torque support exists - Node* GetProxyConstructorJSNewTarget(); + void CheckGetSetTrapResult(TNode context, TNode target, + TNode proxy, TNode name, + TNode trap_result, + JSProxy::AccessKind access_kind); - Node* CheckGetSetTrapResult(Node* context, Node* target, Node* proxy, - Node* name, Node* trap_result, - JSProxy::AccessKind access_kind); + void CheckHasTrapResult(TNode context, TNode target, + TNode proxy, TNode name); - Node* CheckHasTrapResult(Node* context, Node* target, Node* proxy, - Node* name); + void CheckDeleteTrapResult(TNode context, TNode target, + TNode proxy, TNode name); protected: enum ProxyRevokeFunctionContextSlot { @@ -37,9 +39,10 @@ class ProxiesCodeStubAssembler : public CodeStubAssembler { kProxyContextLength, }; - Node* AllocateJSArrayForCodeStubArguments(Node* context, - CodeStubArguments& args, Node* argc, - ParameterMode mode); + Node* AllocateJSArrayForCodeStubArguments( + Node* context, + CodeStubArguments& args, // NOLINT(runtime/references) + Node* argc, ParameterMode mode); private: Node* CreateProxyRevokeFunctionContext(Node* proxy, Node* native_context); diff --git a/deps/v8/src/builtins/builtins-reflect.cc b/deps/v8/src/builtins/builtins-reflect.cc index e998652dad3224..6151fcbd4761c4 100644 --- a/deps/v8/src/builtins/builtins-reflect.cc +++ b/deps/v8/src/builtins/builtins-reflect.cc @@ -46,53 +46,6 @@ BUILTIN(ReflectDefineProperty) { return *isolate->factory()->ToBoolean(result.FromJust()); } -// ES6 section 26.1.4 Reflect.deleteProperty -BUILTIN(ReflectDeleteProperty) { - HandleScope scope(isolate); - DCHECK_EQ(3, args.length()); - Handle target = args.at(1); - Handle key = args.at(2); - - if (!target->IsJSReceiver()) { - THROW_NEW_ERROR_RETURN_FAILURE( - isolate, NewTypeError(MessageTemplate::kCalledOnNonObject, - isolate->factory()->NewStringFromAsciiChecked( - "Reflect.deleteProperty"))); - } - - Handle name; - ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name, - Object::ToName(isolate, key)); - - Maybe result = JSReceiver::DeletePropertyOrElement( - Handle::cast(target), name, LanguageMode::kSloppy); - MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception()); - return *isolate->factory()->ToBoolean(result.FromJust()); -} - -// ES6 section 26.1.6 Reflect.get -BUILTIN(ReflectGet) { - HandleScope scope(isolate); - Handle target = args.atOrUndefined(isolate, 1); - Handle key = args.atOrUndefined(isolate, 2); - Handle receiver = args.length() > 3 ? args.at(3) : target; - - if (!target->IsJSReceiver()) { - THROW_NEW_ERROR_RETURN_FAILURE( - isolate, NewTypeError(MessageTemplate::kCalledOnNonObject, - isolate->factory()->NewStringFromAsciiChecked( - "Reflect.get"))); - } - - Handle name; - ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name, - Object::ToName(isolate, key)); - - RETURN_RESULT_OR_FAILURE( - isolate, Object::GetPropertyOrElement(receiver, name, - Handle::cast(target))); -} - // ES6 section 26.1.7 Reflect.getOwnPropertyDescriptor BUILTIN(ReflectGetOwnPropertyDescriptor) { HandleScope scope(isolate); @@ -119,42 +72,6 @@ BUILTIN(ReflectGetOwnPropertyDescriptor) { return *desc.ToObject(isolate); } -// ES6 section 26.1.8 Reflect.getPrototypeOf -BUILTIN(ReflectGetPrototypeOf) { - HandleScope scope(isolate); - DCHECK_EQ(2, args.length()); - Handle target = args.at(1); - - if (!target->IsJSReceiver()) { - THROW_NEW_ERROR_RETURN_FAILURE( - isolate, NewTypeError(MessageTemplate::kCalledOnNonObject, - isolate->factory()->NewStringFromAsciiChecked( - "Reflect.getPrototypeOf"))); - } - Handle receiver = Handle::cast(target); - RETURN_RESULT_OR_FAILURE(isolate, - JSReceiver::GetPrototype(isolate, receiver)); -} - -// ES6 section 26.1.10 Reflect.isExtensible -BUILTIN(ReflectIsExtensible) { - HandleScope scope(isolate); - DCHECK_EQ(2, args.length()); - Handle target = args.at(1); - - if (!target->IsJSReceiver()) { - THROW_NEW_ERROR_RETURN_FAILURE( - isolate, NewTypeError(MessageTemplate::kCalledOnNonObject, - isolate->factory()->NewStringFromAsciiChecked( - "Reflect.isExtensible"))); - } - - Maybe result = - JSReceiver::IsExtensible(Handle::cast(target)); - MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception()); - return *isolate->factory()->ToBoolean(result.FromJust()); -} - // ES6 section 26.1.11 Reflect.ownKeys BUILTIN(ReflectOwnKeys) { HandleScope scope(isolate); @@ -177,25 +94,6 @@ BUILTIN(ReflectOwnKeys) { return *isolate->factory()->NewJSArrayWithElements(keys); } -// ES6 section 26.1.12 Reflect.preventExtensions -BUILTIN(ReflectPreventExtensions) { - HandleScope scope(isolate); - DCHECK_EQ(2, args.length()); - Handle target = args.at(1); - - if (!target->IsJSReceiver()) { - THROW_NEW_ERROR_RETURN_FAILURE( - isolate, NewTypeError(MessageTemplate::kCalledOnNonObject, - isolate->factory()->NewStringFromAsciiChecked( - "Reflect.preventExtensions"))); - } - - Maybe result = JSReceiver::PreventExtensions( - Handle::cast(target), kDontThrow); - MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception()); - return *isolate->factory()->ToBoolean(result.FromJust()); -} - // ES6 section 26.1.13 Reflect.set BUILTIN(ReflectSet) { HandleScope scope(isolate); @@ -223,30 +121,5 @@ BUILTIN(ReflectSet) { return *isolate->factory()->ToBoolean(result.FromJust()); } -// ES6 section 26.1.14 Reflect.setPrototypeOf -BUILTIN(ReflectSetPrototypeOf) { - HandleScope scope(isolate); - DCHECK_EQ(3, args.length()); - Handle target = args.at(1); - Handle proto = args.at(2); - - if (!target->IsJSReceiver()) { - THROW_NEW_ERROR_RETURN_FAILURE( - isolate, NewTypeError(MessageTemplate::kCalledOnNonObject, - isolate->factory()->NewStringFromAsciiChecked( - "Reflect.setPrototypeOf"))); - } - - if (!proto->IsJSReceiver() && !proto->IsNull(isolate)) { - THROW_NEW_ERROR_RETURN_FAILURE( - isolate, NewTypeError(MessageTemplate::kProtoObjectOrNull, proto)); - } - - Maybe result = JSReceiver::SetPrototype( - Handle::cast(target), proto, true, kDontThrow); - MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception()); - return *isolate->factory()->ToBoolean(result.FromJust()); -} - } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/builtins-regexp-gen.cc b/deps/v8/src/builtins/builtins-regexp-gen.cc index 51ee2796e678ef..d53518ff7ee094 100644 --- a/deps/v8/src/builtins/builtins-regexp-gen.cc +++ b/deps/v8/src/builtins/builtins-regexp-gen.cc @@ -15,7 +15,7 @@ #include "src/objects/js-regexp-string-iterator.h" #include "src/objects/js-regexp.h" #include "src/objects/regexp-match-info.h" -#include "src/regexp/regexp-macro-assembler.h" +#include "src/regexp/regexp.h" namespace v8 { namespace internal { @@ -94,12 +94,12 @@ TNode RegExpBuiltinsAssembler::RegExpCreate(TNode context, TNode pattern = Select( IsUndefined(maybe_string), [=] { return EmptyStringConstant(); }, [=] { return ToString_Inline(context, maybe_string); }); - TNode regexp = CAST(AllocateJSObjectFromMap(initial_map)); + TNode regexp = AllocateJSObjectFromMap(initial_map); return CallRuntime(Runtime::kRegExpInitializeAndCompile, context, regexp, pattern, flags); } -TNode RegExpBuiltinsAssembler::FastLoadLastIndex( +TNode RegExpBuiltinsAssembler::FastLoadLastIndexBeforeSmiCheck( TNode regexp) { // Load the in-object field. static const int field_offset = @@ -121,23 +121,27 @@ TNode RegExpBuiltinsAssembler::LoadLastIndex(TNode context, // The fast-path of StoreLastIndex when regexp is guaranteed to be an unmodified // JSRegExp instance. -void RegExpBuiltinsAssembler::FastStoreLastIndex(Node* regexp, Node* value) { +void RegExpBuiltinsAssembler::FastStoreLastIndex(TNode regexp, + TNode value) { // Store the in-object field. static const int field_offset = JSRegExp::kSize + JSRegExp::kLastIndexFieldIndex * kTaggedSize; StoreObjectField(regexp, field_offset, value); } -void RegExpBuiltinsAssembler::SlowStoreLastIndex(Node* context, Node* regexp, - Node* value) { - Node* const name = HeapConstant(isolate()->factory()->lastIndex_string()); - SetPropertyStrict(CAST(context), CAST(regexp), CAST(name), CAST(value)); +void RegExpBuiltinsAssembler::SlowStoreLastIndex(SloppyTNode context, + SloppyTNode regexp, + SloppyTNode value) { + TNode name = HeapConstant(isolate()->factory()->lastIndex_string()); + SetPropertyStrict(context, regexp, name, value); } -void RegExpBuiltinsAssembler::StoreLastIndex(Node* context, Node* regexp, - Node* value, bool is_fastpath) { +void RegExpBuiltinsAssembler::StoreLastIndex(TNode context, + TNode regexp, + TNode value, + bool is_fastpath) { if (is_fastpath) { - FastStoreLastIndex(regexp, value); + FastStoreLastIndex(CAST(regexp), CAST(value)); } else { SlowStoreLastIndex(context, regexp, value); } @@ -248,10 +252,10 @@ TNode RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo( TNode native_context = LoadNativeContext(context); TNode map = CAST(LoadContextElement( native_context, Context::SLOW_OBJECT_WITH_NULL_PROTOTYPE_MAP)); - TNode properties = AllocateNameDictionary(num_properties); + TNode properties = + AllocateNameDictionary(num_properties, kAllowLargeObjectAllocation); - TNode group_object = - CAST(AllocateJSObjectFromMap(map, properties)); + TNode group_object = AllocateJSObjectFromMap(map, properties); StoreObjectField(result, JSRegExpResult::kGroupsOffset, group_object); TVARIABLE(IntPtrT, var_i, IntPtrZero()); @@ -534,19 +538,18 @@ TNode RegExpBuiltinsAssembler::RegExpExecInternal( // We expect exactly one result since we force the called regexp to behave // as non-global. TNode int_result = ChangeInt32ToIntPtr(result); + GotoIf( + IntPtrEqual(int_result, IntPtrConstant(RegExp::kInternalRegExpSuccess)), + &if_success); + GotoIf( + IntPtrEqual(int_result, IntPtrConstant(RegExp::kInternalRegExpFailure)), + &if_failure); GotoIf(IntPtrEqual(int_result, - IntPtrConstant(NativeRegExpMacroAssembler::SUCCESS)), - &if_success); - GotoIf(IntPtrEqual(int_result, - IntPtrConstant(NativeRegExpMacroAssembler::FAILURE)), - &if_failure); - GotoIf(IntPtrEqual(int_result, - IntPtrConstant(NativeRegExpMacroAssembler::EXCEPTION)), + IntPtrConstant(RegExp::kInternalRegExpException)), &if_exception); - CSA_ASSERT(this, - IntPtrEqual(int_result, - IntPtrConstant(NativeRegExpMacroAssembler::RETRY))); + CSA_ASSERT(this, IntPtrEqual(int_result, + IntPtrConstant(RegExp::kInternalRegExpRetry))); Goto(&runtime); } @@ -755,7 +758,7 @@ RegExpBuiltinsAssembler::RegExpPrototypeExecBodyWithoutResult( GotoIfNot(should_update_last_index, &out); // Update the new last index from {match_indices}. - TNode new_lastindex = CAST(UnsafeLoadFixedArrayElement( + TNode new_lastindex = CAST(UnsafeLoadFixedArrayElement( CAST(match_indices), RegExpMatchInfo::kFirstCaptureIndex + 1)); StoreLastIndex(context, regexp, new_lastindex, is_fastpath); @@ -852,7 +855,7 @@ Node* RegExpBuiltinsAssembler::IsFastRegExpNoPrototype(Node* const context, // The smi check is required to omit ToLength(lastIndex) calls with possible // user-code execution on the fast path. - Node* const last_index = FastLoadLastIndex(CAST(object)); + TNode last_index = FastLoadLastIndexBeforeSmiCheck(CAST(object)); var_result.Bind(TaggedIsPositiveSmi(last_index)); Goto(&out); @@ -897,7 +900,7 @@ TNode RegExpBuiltinsAssembler::IsFastRegExpWithOriginalExec( BIND(&check_last_index); // The smi check is required to omit ToLength(lastIndex) calls with possible // user-code execution on the fast path. - TNode last_index = FastLoadLastIndex(object); + TNode last_index = FastLoadLastIndexBeforeSmiCheck(object); var_result = TaggedIsPositiveSmi(last_index); Goto(&out); @@ -925,9 +928,9 @@ void RegExpBuiltinsAssembler::BranchIfFastRegExp( // This should only be needed for String.p.(split||matchAll), but we are // conservative here. - GotoIf(IsRegExpSpeciesProtectorCellInvalid(), if_ismodified); + TNode native_context = LoadNativeContext(context); + GotoIf(IsRegExpSpeciesProtectorCellInvalid(native_context), if_ismodified); - Node* const native_context = LoadNativeContext(context); Node* const regexp_fun = LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX); Node* const initial_map = @@ -954,7 +957,7 @@ void RegExpBuiltinsAssembler::BranchIfFastRegExp( // The smi check is required to omit ToLength(lastIndex) calls with possible // user-code execution on the fast path. - Node* const last_index = FastLoadLastIndex(CAST(object)); + TNode last_index = FastLoadLastIndexBeforeSmiCheck(CAST(object)); Branch(TaggedIsPositiveSmi(last_index), if_isunmodified, if_ismodified); } @@ -1012,7 +1015,7 @@ TF_BUILTIN(RegExpPrototypeExecSlow, RegExpBuiltinsAssembler) { // Fast path stub for ATOM regexps. String matching is done by StringIndexOf, // and {match_info} is updated on success. -// The slow path is implemented in RegExpImpl::AtomExec. +// The slow path is implemented in RegExp::AtomExec. TF_BUILTIN(RegExpExecAtom, RegExpBuiltinsAssembler) { TNode regexp = CAST(Parameter(Descriptor::kRegExp)); TNode subject_string = CAST(Parameter(Descriptor::kString)); @@ -1538,7 +1541,8 @@ TNode RegExpBuiltinsAssembler::FastFlagGetter(TNode regexp, JSRegExp::Flag flag) { TNode flags = CAST(LoadObjectField(regexp, JSRegExp::kFlagsOffset)); TNode mask = SmiConstant(flag); - return SmiToInt32(SmiShr(SmiAnd(flags, mask), JSRegExp::FlagShiftBits(flag))); + return SmiToInt32(SmiShr(SmiAnd(flags, mask), base::bits::CountTrailingZeros( + static_cast(flag)))); } // Load through the GetProperty stub. @@ -1807,10 +1811,9 @@ TF_BUILTIN(RegExpPrototypeTestFast, RegExpBuiltinsAssembler) { Return(FalseConstant()); } -Node* RegExpBuiltinsAssembler::AdvanceStringIndex(Node* const string, - Node* const index, - Node* const is_unicode, - bool is_fastpath) { +TNode RegExpBuiltinsAssembler::AdvanceStringIndex( + SloppyTNode string, SloppyTNode index, + SloppyTNode is_unicode, bool is_fastpath) { CSA_ASSERT(this, IsString(string)); CSA_ASSERT(this, IsNumberNormalized(index)); if (is_fastpath) CSA_ASSERT(this, TaggedIsPositiveSmi(index)); @@ -1818,8 +1821,8 @@ Node* RegExpBuiltinsAssembler::AdvanceStringIndex(Node* const string, // Default to last_index + 1. // TODO(pwong): Consider using TrySmiAdd for the fast path to reduce generated // code. - Node* const index_plus_one = NumberInc(index); - VARIABLE(var_result, MachineRepresentation::kTagged, index_plus_one); + TNode index_plus_one = NumberInc(index); + TVARIABLE(Number, var_result, index_plus_one); // Advancing the index has some subtle issues involving the distinction // between Smis and HeapNumbers. There's three cases: @@ -1846,10 +1849,10 @@ Node* RegExpBuiltinsAssembler::AdvanceStringIndex(Node* const string, BIND(&if_isunicode); { TNode const string_length = LoadStringLengthAsWord(string); - TNode untagged_plus_one = SmiUntag(index_plus_one); + TNode untagged_plus_one = SmiUntag(CAST(index_plus_one)); GotoIfNot(IntPtrLessThan(untagged_plus_one, string_length), &out); - Node* const lead = StringCharCodeAt(string, SmiUntag(index)); + Node* const lead = StringCharCodeAt(string, SmiUntag(CAST(index))); GotoIfNot(Word32Equal(Word32And(lead, Int32Constant(0xFC00)), Int32Constant(0xD800)), &out); @@ -1860,8 +1863,8 @@ Node* RegExpBuiltinsAssembler::AdvanceStringIndex(Node* const string, &out); // At a surrogate pair, return index + 2. - Node* const index_plus_two = NumberInc(index_plus_one); - var_result.Bind(index_plus_two); + TNode index_plus_two = NumberInc(index_plus_one); + var_result = index_plus_two; Goto(&out); } @@ -1870,31 +1873,30 @@ Node* RegExpBuiltinsAssembler::AdvanceStringIndex(Node* const string, return var_result.value(); } -void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context, - Node* const regexp, +void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(TNode context, + TNode regexp, TNode string, const bool is_fastpath) { if (is_fastpath) CSA_ASSERT(this, IsFastRegExp(context, regexp)); Node* const is_global = - FlagGetter(CAST(context), CAST(regexp), JSRegExp::kGlobal, is_fastpath); + FlagGetter(context, regexp, JSRegExp::kGlobal, is_fastpath); Label if_isglobal(this), if_isnotglobal(this); Branch(is_global, &if_isglobal, &if_isnotglobal); BIND(&if_isnotglobal); { - Node* const result = - is_fastpath - ? RegExpPrototypeExecBody(CAST(context), CAST(regexp), string, true) - : RegExpExec(context, regexp, string); + Node* const result = is_fastpath ? RegExpPrototypeExecBody( + context, CAST(regexp), string, true) + : RegExpExec(context, regexp, string); Return(result); } BIND(&if_isglobal); { - Node* const is_unicode = FlagGetter(CAST(context), CAST(regexp), - JSRegExp::kUnicode, is_fastpath); + Node* const is_unicode = + FlagGetter(context, regexp, JSRegExp::kUnicode, is_fastpath); StoreLastIndex(context, regexp, SmiZero(), is_fastpath); @@ -1935,8 +1937,8 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context, // On the fast path, grab the matching string from the raw match index // array. TNode match_indices = - RegExpPrototypeExecBodyWithoutResult(CAST(context), CAST(regexp), - string, &if_didnotmatch, true); + RegExpPrototypeExecBodyWithoutResult(context, CAST(regexp), string, + &if_didnotmatch, true); Label dosubstring(this), donotsubstring(this); Branch(var_atom.value(), &donotsubstring, &dosubstring); @@ -1988,15 +1990,14 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context, TNode const match_length = LoadStringLengthAsSmi(match); GotoIfNot(SmiEqual(match_length, SmiZero()), &loop); - Node* last_index = - LoadLastIndex(CAST(context), CAST(regexp), is_fastpath); + Node* last_index = LoadLastIndex(context, regexp, is_fastpath); if (is_fastpath) { CSA_ASSERT(this, TaggedIsPositiveSmi(last_index)); } else { last_index = ToLength_Inline(context, last_index); } - Node* const new_last_index = + TNode new_last_index = AdvanceStringIndex(string, last_index, is_unicode, is_fastpath); if (is_fastpath) { @@ -2017,7 +2018,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context, { // Wrap the match in a JSArray. - Node* const result = array.ToJSArray(CAST(context)); + Node* const result = array.ToJSArray(context); Return(result); } } @@ -2034,7 +2035,7 @@ TF_BUILTIN(RegExpPrototypeMatch, RegExpBuiltinsAssembler) { ThrowIfNotJSReceiver(context, maybe_receiver, MessageTemplate::kIncompatibleMethodReceiver, "RegExp.prototype.@@match"); - Node* const receiver = maybe_receiver; + TNode receiver = CAST(maybe_receiver); // Convert {maybe_string} to a String. TNode const string = ToString_Inline(context, maybe_string); @@ -2086,7 +2087,8 @@ void RegExpMatchAllAssembler::Generate(TNode context, // 7. Let lastIndex be ? ToLength(? Get(R, "lastIndex")). // 8. Perform ? Set(matcher, "lastIndex", lastIndex, true). - FastStoreLastIndex(var_matcher.value(), FastLoadLastIndex(fast_regexp)); + FastStoreLastIndex(CAST(var_matcher.value()), + FastLoadLastIndex(fast_regexp)); // 9. If flags contains "g", let global be true. // 10. Else, let global be false. @@ -2226,12 +2228,11 @@ TF_BUILTIN(RegExpMatchFast, RegExpBuiltinsAssembler) { } void RegExpBuiltinsAssembler::RegExpPrototypeSearchBodyFast( - Node* const context, Node* const regexp, Node* const string) { + TNode context, TNode regexp, TNode string) { CSA_ASSERT(this, IsFastRegExp(context, regexp)); - CSA_ASSERT(this, IsString(string)); // Grab the initial value of last index. - Node* const previous_last_index = FastLoadLastIndex(CAST(regexp)); + TNode previous_last_index = FastLoadLastIndex(regexp); // Ensure last index is 0. FastStoreLastIndex(regexp, SmiZero()); @@ -2239,7 +2240,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSearchBodyFast( // Call exec. Label if_didnotmatch(this); TNode match_indices = RegExpPrototypeExecBodyWithoutResult( - CAST(context), CAST(regexp), CAST(string), &if_didnotmatch, true); + context, regexp, string, &if_didnotmatch, true); // Successful match. { @@ -2839,16 +2840,14 @@ TF_BUILTIN(RegExpStringIteratorPrototypeNext, RegExpStringIteratorAssembler) { GotoIfNot(IsEmptyString(match_str), &return_result); // 1. Let thisIndex be ? ToLength(? Get(R, "lastIndex")). - TNode this_index = CAST(FastLoadLastIndex(CAST(iterating_regexp))); - CSA_ASSERT(this, TaggedIsSmi(this_index)); + TNode this_index = FastLoadLastIndex(CAST(iterating_regexp)); // 2. Let nextIndex be ! AdvanceStringIndex(S, thisIndex, fullUnicode). - TNode next_index = CAST(AdvanceStringIndex( - iterating_string, this_index, HasUnicodeFlag(flags), true)); - CSA_ASSERT(this, TaggedIsSmi(next_index)); + TNode next_index = AdvanceStringIndexFast( + iterating_string, this_index, HasUnicodeFlag(flags)); // 3. Perform ? Set(R, "lastIndex", nextIndex, true). - FastStoreLastIndex(iterating_regexp, next_index); + FastStoreLastIndex(CAST(iterating_regexp), next_index); // iii. Return ! CreateIterResultObject(match, false). Goto(&return_result); @@ -2866,8 +2865,8 @@ TF_BUILTIN(RegExpStringIteratorPrototypeNext, RegExpStringIteratorAssembler) { TNode this_index = ToLength_Inline(context, last_index); // 2. Let nextIndex be ! AdvanceStringIndex(S, thisIndex, fullUnicode). - TNode next_index = CAST(AdvanceStringIndex( - iterating_string, this_index, HasUnicodeFlag(flags), false)); + TNode next_index = AdvanceStringIndex( + iterating_string, this_index, HasUnicodeFlag(flags), false); // 3. Perform ? Set(R, "lastIndex", nextIndex, true). SlowStoreLastIndex(context, iterating_regexp, next_index); diff --git a/deps/v8/src/builtins/builtins-regexp-gen.h b/deps/v8/src/builtins/builtins-regexp-gen.h index 88c00095b9d112..3677314f195ead 100644 --- a/deps/v8/src/builtins/builtins-regexp-gen.h +++ b/deps/v8/src/builtins/builtins-regexp-gen.h @@ -7,7 +7,7 @@ #include "src/base/optional.h" #include "src/codegen/code-stub-assembler.h" -#include "src/execution/message-template.h" +#include "src/common/message-template.h" namespace v8 { namespace internal { @@ -42,15 +42,20 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler { TNode context, TNode length, TNode index, TNode input, TNode* elements_out = nullptr); - TNode FastLoadLastIndex(TNode regexp); + TNode FastLoadLastIndexBeforeSmiCheck(TNode regexp); + TNode FastLoadLastIndex(TNode regexp) { + return CAST(FastLoadLastIndexBeforeSmiCheck(regexp)); + } TNode SlowLoadLastIndex(TNode context, TNode regexp); TNode LoadLastIndex(TNode context, TNode regexp, bool is_fastpath); - void FastStoreLastIndex(Node* regexp, Node* value); - void SlowStoreLastIndex(Node* context, Node* regexp, Node* value); - void StoreLastIndex(Node* context, Node* regexp, Node* value, - bool is_fastpath); + void FastStoreLastIndex(TNode regexp, TNode value); + void SlowStoreLastIndex(SloppyTNode context, + SloppyTNode regexp, + SloppyTNode value); + void StoreLastIndex(TNode context, TNode regexp, + TNode value, bool is_fastpath); // Loads {var_string_start} and {var_string_end} with the corresponding // offsets into the given {string_data}. @@ -127,20 +132,23 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler { Node* RegExpExec(Node* context, Node* regexp, Node* string); - Node* AdvanceStringIndex(Node* const string, Node* const index, - Node* const is_unicode, bool is_fastpath); + TNode AdvanceStringIndex(SloppyTNode string, + SloppyTNode index, + SloppyTNode is_unicode, + bool is_fastpath); - Node* AdvanceStringIndexFast(Node* const string, Node* const index, - Node* const is_unicode) { - return AdvanceStringIndex(string, index, is_unicode, true); + TNode AdvanceStringIndexFast(TNode string, TNode index, + TNode is_unicode) { + return CAST(AdvanceStringIndex(string, index, is_unicode, true)); } - void RegExpPrototypeMatchBody(Node* const context, Node* const regexp, + void RegExpPrototypeMatchBody(TNode context, TNode regexp, TNode const string, const bool is_fastpath); - void RegExpPrototypeSearchBodyFast(Node* const context, Node* const regexp, - Node* const string); + void RegExpPrototypeSearchBodyFast(TNode context, + TNode regexp, + TNode string); void RegExpPrototypeSearchBodySlow(Node* const context, Node* const regexp, Node* const string); diff --git a/deps/v8/src/builtins/builtins-regexp.cc b/deps/v8/src/builtins/builtins-regexp.cc index 3e0f7182c75285..e758782a997dd0 100644 --- a/deps/v8/src/builtins/builtins-regexp.cc +++ b/deps/v8/src/builtins/builtins-regexp.cc @@ -6,8 +6,8 @@ #include "src/builtins/builtins.h" #include "src/logging/counters.h" #include "src/objects/objects-inl.h" -#include "src/regexp/jsregexp.h" #include "src/regexp/regexp-utils.h" +#include "src/regexp/regexp.h" #include "src/strings/string-builder-inl.h" namespace v8 { diff --git a/deps/v8/src/builtins/builtins-string-gen.cc b/deps/v8/src/builtins/builtins-string-gen.cc index 5689b42619a95c..97dc8ca895b8ae 100644 --- a/deps/v8/src/builtins/builtins-string-gen.cc +++ b/deps/v8/src/builtins/builtins-string-gen.cc @@ -545,32 +545,33 @@ TF_BUILTIN(StringCharAt, StringBuiltinsAssembler) { Return(result); } -TF_BUILTIN(StringCodePointAtUTF16, StringBuiltinsAssembler) { +TF_BUILTIN(StringCodePointAt, StringBuiltinsAssembler) { Node* receiver = Parameter(Descriptor::kReceiver); Node* position = Parameter(Descriptor::kPosition); + // TODO(sigurds) Figure out if passing length as argument pays off. TNode length = LoadStringLengthAsWord(receiver); // Load the character code at the {position} from the {receiver}. TNode code = - LoadSurrogatePairAt(receiver, length, position, UnicodeEncoding::UTF16); + LoadSurrogatePairAt(receiver, length, position, UnicodeEncoding::UTF32); // And return it as TaggedSigned value. // TODO(turbofan): Allow builtins to return values untagged. TNode result = SmiFromInt32(code); Return(result); } -TF_BUILTIN(StringCodePointAtUTF32, StringBuiltinsAssembler) { - Node* receiver = Parameter(Descriptor::kReceiver); - Node* position = Parameter(Descriptor::kPosition); +TF_BUILTIN(StringFromCodePointAt, StringBuiltinsAssembler) { + TNode receiver = CAST(Parameter(Descriptor::kReceiver)); + TNode position = + UncheckedCast(Parameter(Descriptor::kPosition)); // TODO(sigurds) Figure out if passing length as argument pays off. TNode length = LoadStringLengthAsWord(receiver); // Load the character code at the {position} from the {receiver}. TNode code = - LoadSurrogatePairAt(receiver, length, position, UnicodeEncoding::UTF32); - // And return it as TaggedSigned value. - // TODO(turbofan): Allow builtins to return values untagged. - TNode result = SmiFromInt32(code); + LoadSurrogatePairAt(receiver, length, position, UnicodeEncoding::UTF16); + // Create a String from the UTF16 encoded code point + TNode result = StringFromSingleUTF16EncodedCodePoint(code); Return(result); } @@ -952,19 +953,6 @@ void StringIncludesIndexOfAssembler::Generate(SearchVariant variant, } } -void StringBuiltinsAssembler::RequireObjectCoercible(Node* const context, - Node* const value, - const char* method_name) { - Label out(this), throw_exception(this, Label::kDeferred); - Branch(IsNullOrUndefined(value), &throw_exception, &out); - - BIND(&throw_exception); - ThrowTypeError(context, MessageTemplate::kCalledOnNullOrUndefined, - method_name); - - BIND(&out); -} - void StringBuiltinsAssembler::MaybeCallFunctionAtSymbol( Node* const context, Node* const object, Node* const maybe_string, Handle symbol, DescriptorIndexAndName symbol_index, @@ -1072,10 +1060,10 @@ compiler::Node* StringBuiltinsAssembler::GetSubstitution( TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) { Label out(this); - Node* const receiver = Parameter(Descriptor::kReceiver); + TNode receiver = CAST(Parameter(Descriptor::kReceiver)); Node* const search = Parameter(Descriptor::kSearch); Node* const replace = Parameter(Descriptor::kReplace); - Node* const context = Parameter(Descriptor::kContext); + TNode context = CAST(Parameter(Descriptor::kContext)); TNode const smi_zero = SmiConstant(0); @@ -1578,7 +1566,7 @@ TF_BUILTIN(StringPrototypeSplit, StringBuiltinsAssembler) { ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount)); CodeStubArguments args(this, argc); - Node* const receiver = args.GetReceiver(); + TNode receiver = args.GetReceiver(); Node* const separator = args.GetOptionalArgumentValue(kSeparatorArg); Node* const limit = args.GetOptionalArgumentValue(kLimitArg); TNode context = CAST(Parameter(Descriptor::kContext)); @@ -1986,12 +1974,12 @@ TNode StringBuiltinsAssembler::LoadSurrogatePairAt( switch (encoding) { case UnicodeEncoding::UTF16: - var_result = Signed(Word32Or( + var_result = Word32Or( // Need to swap the order for big-endian platforms #if V8_TARGET_BIG_ENDIAN - Word32Shl(lead, Int32Constant(16)), trail)); + Word32Shl(lead, Int32Constant(16)), trail); #else - Word32Shl(trail, Int32Constant(16)), lead)); + Word32Shl(trail, Int32Constant(16)), lead); #endif break; @@ -2002,8 +1990,8 @@ TNode StringBuiltinsAssembler::LoadSurrogatePairAt( Int32Constant(0x10000 - (0xD800 << 10) - 0xDC00); // (lead << 10) + trail + SURROGATE_OFFSET - var_result = Signed(Int32Add(Word32Shl(lead, Int32Constant(10)), - Int32Add(trail, surrogate_offset))); + var_result = Int32Add(Word32Shl(lead, Int32Constant(10)), + Int32Add(trail, surrogate_offset)); break; } } diff --git a/deps/v8/src/builtins/builtins-string-gen.h b/deps/v8/src/builtins/builtins-string-gen.h index 92ebd3803b8bd5..679ce0e17fe8ee 100644 --- a/deps/v8/src/builtins/builtins-string-gen.h +++ b/deps/v8/src/builtins/builtins-string-gen.h @@ -76,9 +76,6 @@ class StringBuiltinsAssembler : public CodeStubAssembler { TNode subject_length, TNode limit_number); - void RequireObjectCoercible(Node* const context, Node* const value, - const char* method_name); - TNode SmiIsNegative(TNode value) { return SmiLessThan(value, SmiConstant(0)); } diff --git a/deps/v8/src/builtins/builtins-symbol-gen.cc b/deps/v8/src/builtins/builtins-symbol-gen.cc index 4e8c9f98502a4c..610a8baeb314f2 100644 --- a/deps/v8/src/builtins/builtins-symbol-gen.cc +++ b/deps/v8/src/builtins/builtins-symbol-gen.cc @@ -13,8 +13,8 @@ namespace internal { // ES #sec-symbol-objects // ES #sec-symbol.prototype.description TF_BUILTIN(SymbolPrototypeDescriptionGetter, CodeStubAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode receiver = CAST(Parameter(Descriptor::kReceiver)); Node* value = ToThisValue(context, receiver, PrimitiveType::kSymbol, "Symbol.prototype.description"); @@ -24,8 +24,8 @@ TF_BUILTIN(SymbolPrototypeDescriptionGetter, CodeStubAssembler) { // ES6 #sec-symbol.prototype-@@toprimitive TF_BUILTIN(SymbolPrototypeToPrimitive, CodeStubAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode receiver = CAST(Parameter(Descriptor::kReceiver)); Node* result = ToThisValue(context, receiver, PrimitiveType::kSymbol, "Symbol.prototype [ @@toPrimitive ]"); @@ -34,8 +34,8 @@ TF_BUILTIN(SymbolPrototypeToPrimitive, CodeStubAssembler) { // ES6 #sec-symbol.prototype.tostring TF_BUILTIN(SymbolPrototypeToString, CodeStubAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode receiver = CAST(Parameter(Descriptor::kReceiver)); Node* value = ToThisValue(context, receiver, PrimitiveType::kSymbol, "Symbol.prototype.toString"); @@ -45,8 +45,8 @@ TF_BUILTIN(SymbolPrototypeToString, CodeStubAssembler) { // ES6 #sec-symbol.prototype.valueof TF_BUILTIN(SymbolPrototypeValueOf, CodeStubAssembler) { - Node* context = Parameter(Descriptor::kContext); - Node* receiver = Parameter(Descriptor::kReceiver); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode receiver = CAST(Parameter(Descriptor::kReceiver)); Node* result = ToThisValue(context, receiver, PrimitiveType::kSymbol, "Symbol.prototype.valueOf"); diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.cc b/deps/v8/src/builtins/builtins-typed-array-gen.cc index 8484685a6a5912..857d33988f32f5 100644 --- a/deps/v8/src/builtins/builtins-typed-array-gen.cc +++ b/deps/v8/src/builtins/builtins-typed-array-gen.cc @@ -18,32 +18,12 @@ using compiler::Node; template using TNode = compiler::TNode; -// This is needed for gc_mole which will compile this file without the full set -// of GN defined macros. -#ifndef V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP -#define V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP 64 -#endif - // ----------------------------------------------------------------------------- // ES6 section 22.2 TypedArray Objects -// Setup the TypedArray which is under construction. -// - Set the length. -// - Set the byte_offset. -// - Set the byte_length. -// - Set EmbedderFields to 0. -void TypedArrayBuiltinsAssembler::SetupTypedArray(TNode holder, - TNode length, - TNode byte_offset, - TNode byte_length) { - StoreObjectFieldNoWriteBarrier(holder, JSTypedArray::kLengthOffset, length, - MachineType::PointerRepresentation()); - StoreObjectFieldNoWriteBarrier(holder, JSArrayBufferView::kByteOffsetOffset, - byte_offset, - MachineType::PointerRepresentation()); - StoreObjectFieldNoWriteBarrier(holder, JSArrayBufferView::kByteLengthOffset, - byte_length, - MachineType::PointerRepresentation()); +// Sets the embedder fields to 0 for a TypedArray which is under construction. +void TypedArrayBuiltinsAssembler::SetupTypedArrayEmbedderFields( + TNode holder) { for (int offset = JSTypedArray::kHeaderSize; offset < JSTypedArray::kSizeWithEmbedderFields; offset += kTaggedSize) { StoreObjectField(holder, offset, SmiConstant(0)); @@ -54,8 +34,7 @@ void TypedArrayBuiltinsAssembler::SetupTypedArray(TNode holder, // elements. // TODO(bmeurer,v8:4153): Rename this and maybe fix up the implementation a bit. TNode TypedArrayBuiltinsAssembler::AllocateEmptyOnHeapBuffer( - TNode context, TNode holder, - TNode byte_length) { + TNode context, TNode byte_length) { TNode native_context = LoadNativeContext(context); TNode map = CAST(LoadContextElement(native_context, Context::ARRAY_BUFFER_MAP_INDEX)); @@ -97,16 +76,6 @@ TNode TypedArrayBuiltinsAssembler::AllocateEmptyOnHeapBuffer( offset < JSArrayBuffer::kSizeWithEmbedderFields; offset += kTaggedSize) { StoreObjectFieldNoWriteBarrier(buffer, offset, SmiConstant(0)); } - - StoreObjectField(holder, JSTypedArray::kBufferOffset, buffer); - - TNode elements = AllocateByteArray(byte_length); - StoreObjectField(holder, JSTypedArray::kElementsOffset, elements); - StoreObjectField(holder, JSTypedArray::kBasePointerOffset, elements); - StoreObjectFieldNoWriteBarrier( - holder, JSTypedArray::kExternalPointerOffset, - PointerConstant(JSTypedArray::ExternalPointerForOnHeapArray()), - MachineType::PointerRepresentation()); return buffer; } @@ -200,13 +169,13 @@ TF_BUILTIN(TypedArrayPrototypeLength, TypedArrayBuiltinsAssembler) { Return(ChangeUintPtrToTagged(length)); } -TNode TypedArrayBuiltinsAssembler::IsUint8ElementsKind( +TNode TypedArrayBuiltinsAssembler::IsUint8ElementsKind( TNode kind) { return Word32Or(Word32Equal(kind, Int32Constant(UINT8_ELEMENTS)), Word32Equal(kind, Int32Constant(UINT8_CLAMPED_ELEMENTS))); } -TNode TypedArrayBuiltinsAssembler::IsBigInt64ElementsKind( +TNode TypedArrayBuiltinsAssembler::IsBigInt64ElementsKind( TNode kind) { return Word32Or(Word32Equal(kind, Int32Constant(BIGINT64_ELEMENTS)), Word32Equal(kind, Int32Constant(BIGUINT64_ELEMENTS))); @@ -228,7 +197,12 @@ TNode TypedArrayBuiltinsAssembler::GetTypedArrayElementSize( TorqueStructTypedArrayElementsInfo TypedArrayBuiltinsAssembler::GetTypedArrayElementsInfo( TNode typed_array) { - TNode elements_kind = LoadElementsKind(typed_array); + return GetTypedArrayElementsInfo(LoadMap(typed_array)); +} + +TorqueStructTypedArrayElementsInfo +TypedArrayBuiltinsAssembler::GetTypedArrayElementsInfo(TNode map) { + TNode elements_kind = LoadMapElementsKind(map); TVARIABLE(UintPtrT, var_size_log2); TVARIABLE(Map, var_map); ReadOnlyRoots roots(isolate()); @@ -294,10 +268,9 @@ TNode TypedArrayBuiltinsAssembler::GetBuffer( Label call_runtime(this), done(this); TVARIABLE(Object, var_result); - TNode buffer = LoadObjectField(array, JSTypedArray::kBufferOffset); + TNode buffer = LoadJSArrayBufferViewBuffer(array); GotoIf(IsDetachedBuffer(buffer), &call_runtime); - TNode backing_store = LoadObjectField( - CAST(buffer), JSArrayBuffer::kBackingStoreOffset); + TNode backing_store = LoadJSArrayBufferBackingStore(buffer); GotoIf(WordEqual(backing_store, IntPtrConstant(0)), &call_runtime); var_result = buffer; Goto(&done); @@ -327,10 +300,10 @@ void TypedArrayBuiltinsAssembler::SetTypedArraySource( TNode context, TNode source, TNode target, TNode offset, Label* call_runtime, Label* if_source_too_large) { - CSA_ASSERT(this, Word32BinaryNot(IsDetachedBuffer( - LoadObjectField(source, JSTypedArray::kBufferOffset)))); - CSA_ASSERT(this, Word32BinaryNot(IsDetachedBuffer( - LoadObjectField(target, JSTypedArray::kBufferOffset)))); + CSA_ASSERT(this, Word32BinaryNot( + IsDetachedBuffer(LoadJSArrayBufferViewBuffer(source)))); + CSA_ASSERT(this, Word32BinaryNot( + IsDetachedBuffer(LoadJSArrayBufferViewBuffer(target)))); CSA_ASSERT(this, IntPtrGreaterThanOrEqual(offset, IntPtrConstant(0))); CSA_ASSERT(this, IntPtrLessThanOrEqual(offset, IntPtrConstant(Smi::kMaxValue))); @@ -774,8 +747,8 @@ TF_BUILTIN(TypedArrayOf, TypedArrayBuiltinsAssembler) { // ToNumber/ToBigInt may execute JavaScript code, which could // detach the array's buffer. - Node* buffer = - LoadObjectField(new_typed_array, JSTypedArray::kBufferOffset); + TNode buffer = + LoadJSArrayBufferViewBuffer(new_typed_array); GotoIf(IsDetachedBuffer(buffer), &if_detached); // GC may move backing store in ToNumber, thus load backing @@ -997,8 +970,8 @@ TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) { // ToNumber/ToBigInt may execute JavaScript code, which could // detach the array's buffer. - Node* buffer = LoadObjectField(target_obj.value(), - JSTypedArray::kBufferOffset); + TNode buffer = + LoadJSArrayBufferViewBuffer(target_obj.value()); GotoIf(IsDetachedBuffer(buffer), &if_detached); // GC may move backing store in map_fn, thus load backing @@ -1027,7 +1000,5 @@ TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) { "%TypedArray%.from"); } -#undef V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP - } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.h b/deps/v8/src/builtins/builtins-typed-array-gen.h index 6fb02a657c5431..d637bc9c6b6c9b 100644 --- a/deps/v8/src/builtins/builtins-typed-array-gen.h +++ b/deps/v8/src/builtins/builtins-typed-array-gen.h @@ -27,15 +27,12 @@ class TypedArrayBuiltinsAssembler : public CodeStubAssembler { const char* method_name, IterationKind iteration_kind); - void SetupTypedArray(TNode holder, TNode length, - TNode byte_offset, - TNode byte_length); + void SetupTypedArrayEmbedderFields(TNode holder); void AttachBuffer(TNode holder, TNode buffer, TNode map, TNode length, TNode byte_offset); TNode AllocateEmptyOnHeapBuffer(TNode context, - TNode holder, TNode byte_length); TNode LoadMapForType(TNode array); @@ -44,16 +41,17 @@ class TypedArrayBuiltinsAssembler : public CodeStubAssembler { TNode byte_offset); // Returns true if kind is either UINT8_ELEMENTS or UINT8_CLAMPED_ELEMENTS. - TNode IsUint8ElementsKind(TNode kind); + TNode IsUint8ElementsKind(TNode kind); // Returns true if kind is either BIGINT64_ELEMENTS or BIGUINT64_ELEMENTS. - TNode IsBigInt64ElementsKind(TNode kind); + TNode IsBigInt64ElementsKind(TNode kind); // Returns the byte size of an element for a TypedArray elements kind. TNode GetTypedArrayElementSize(TNode elements_kind); // Returns information (byte size and map) about a TypedArray's elements. ElementsInfo GetTypedArrayElementsInfo(TNode typed_array); + ElementsInfo GetTypedArrayElementsInfo(TNode map); TNode GetDefaultConstructor(TNode context, TNode exemplar); diff --git a/deps/v8/src/builtins/builtins-weak-refs.cc b/deps/v8/src/builtins/builtins-weak-refs.cc index 78f37c0cf5dfbd..18738d2c487703 100644 --- a/deps/v8/src/builtins/builtins-weak-refs.cc +++ b/deps/v8/src/builtins/builtins-weak-refs.cc @@ -48,14 +48,24 @@ BUILTIN(FinalizationGroupRegister) { HandleScope scope(isolate); const char* method_name = "FinalizationGroup.prototype.register"; + // 1. Let finalizationGroup be the this value. + // + // 2. If Type(finalizationGroup) is not Object, throw a TypeError + // exception. + // + // 4. If finalizationGroup does not have a [[Cells]] internal slot, + // throw a TypeError exception. CHECK_RECEIVER(JSFinalizationGroup, finalization_group, method_name); Handle target = args.atOrUndefined(isolate, 1); + + // 3. If Type(target) is not Object, throw a TypeError exception. if (!target->IsJSReceiver()) { THROW_NEW_ERROR_RETURN_FAILURE( isolate, NewTypeError(MessageTemplate::kWeakRefsRegisterTargetMustBeObject)); } + Handle holdings = args.atOrUndefined(isolate, 2); if (target->SameValue(*holdings)) { THROW_NEW_ERROR_RETURN_FAILURE( @@ -64,15 +74,21 @@ BUILTIN(FinalizationGroupRegister) { MessageTemplate::kWeakRefsRegisterTargetAndHoldingsMustNotBeSame)); } - Handle key = args.atOrUndefined(isolate, 3); - // TODO(marja, gsathya): Restrictions on "key" (e.g., does it need to be an - // object). + Handle unregister_token = args.atOrUndefined(isolate, 3); + // 5. If Type(unregisterToken) is not Object, + // a. If unregisterToken is not undefined, throw a TypeError exception. + if (!unregister_token->IsJSReceiver() && !unregister_token->IsUndefined()) { + THROW_NEW_ERROR_RETURN_FAILURE( + isolate, + NewTypeError(MessageTemplate::kWeakRefsUnregisterTokenMustBeObject, + unregister_token)); + } // TODO(marja): Realms. JSFinalizationGroup::Register(finalization_group, - Handle::cast(target), holdings, key, - isolate); + Handle::cast(target), holdings, + unregister_token, isolate); return ReadOnlyRoots(isolate).undefined_value(); } @@ -80,25 +96,63 @@ BUILTIN(FinalizationGroupUnregister) { HandleScope scope(isolate); const char* method_name = "FinalizationGroup.prototype.unregister"; + // 1. Let finalizationGroup be the this value. + // + // 2. If Type(finalizationGroup) is not Object, throw a TypeError + // exception. + // + // 3. If finalizationGroup does not have a [[Cells]] internal slot, + // throw a TypeError exception. CHECK_RECEIVER(JSFinalizationGroup, finalization_group, method_name); - Handle key = args.atOrUndefined(isolate, 1); - JSFinalizationGroup::Unregister(finalization_group, key, isolate); - return ReadOnlyRoots(isolate).undefined_value(); + Handle unregister_token = args.atOrUndefined(isolate, 1); + + // 4. If Type(unregisterToken) is not Object, throw a TypeError exception. + if (!unregister_token->IsJSReceiver()) { + THROW_NEW_ERROR_RETURN_FAILURE( + isolate, + NewTypeError(MessageTemplate::kWeakRefsUnregisterTokenMustBeObject, + unregister_token)); + } + + bool success = JSFinalizationGroup::Unregister( + finalization_group, Handle::cast(unregister_token), isolate); + + return *isolate->factory()->ToBoolean(success); } BUILTIN(FinalizationGroupCleanupSome) { HandleScope scope(isolate); const char* method_name = "FinalizationGroup.prototype.cleanupSome"; + // 1. Let finalizationGroup be the this value. + // + // 2. If Type(finalizationGroup) is not Object, throw a TypeError + // exception. + // + // 3. If finalizationGroup does not have a [[Cells]] internal slot, + // throw a TypeError exception. CHECK_RECEIVER(JSFinalizationGroup, finalization_group, method_name); - // TODO(marja, gsathya): Add missing "cleanup" callback. + Handle callback(finalization_group->cleanup(), isolate); + Handle callback_obj = args.atOrUndefined(isolate, 1); + + // 4. If callback is not undefined and IsCallable(callback) is + // false, throw a TypeError exception. + if (!callback_obj->IsUndefined(isolate)) { + if (!callback_obj->IsCallable()) { + THROW_NEW_ERROR_RETURN_FAILURE( + isolate, + NewTypeError(MessageTemplate::kWeakRefsCleanupMustBeCallable)); + } + callback = callback_obj; + } // Don't do set_scheduled_for_cleanup(false); we still have the microtask // scheduled and don't want to schedule another one in case the user never // executes microtasks. - JSFinalizationGroup::Cleanup(finalization_group, isolate); + JSFinalizationGroup::Cleanup(isolate, finalization_group, callback); + return ReadOnlyRoots(isolate).undefined_value(); } @@ -138,7 +192,7 @@ BUILTIN(WeakRefConstructor) { } Handle target_receiver = handle(JSReceiver::cast(*target_object), isolate); - isolate->heap()->AddKeepDuringJobTarget(target_receiver); + isolate->heap()->KeepDuringJob(target_receiver); // TODO(marja): Realms. @@ -158,9 +212,9 @@ BUILTIN(WeakRefDeref) { if (weak_ref->target().IsJSReceiver()) { Handle target = handle(JSReceiver::cast(weak_ref->target()), isolate); - // AddKeepDuringJobTarget might allocate and cause a GC, but it won't clear + // KeepDuringJob might allocate and cause a GC, but it won't clear // weak_ref since we hold a Handle to its target. - isolate->heap()->AddKeepDuringJobTarget(target); + isolate->heap()->KeepDuringJob(target); } else { DCHECK(weak_ref->target().IsUndefined(isolate)); } diff --git a/deps/v8/src/builtins/collections.tq b/deps/v8/src/builtins/collections.tq index eb95a7702368e3..b83906d109b2da 100644 --- a/deps/v8/src/builtins/collections.tq +++ b/deps/v8/src/builtins/collections.tq @@ -33,7 +33,7 @@ namespace collections { } } } - case (receiver: JSReceiver): { + case (JSReceiver): { goto MayHaveSideEffects; } case (o: Object): deferred { diff --git a/deps/v8/src/builtins/data-view.tq b/deps/v8/src/builtins/data-view.tq index 842e9527ee5903..62a0cc31c3c34a 100644 --- a/deps/v8/src/builtins/data-view.tq +++ b/deps/v8/src/builtins/data-view.tq @@ -74,16 +74,17 @@ namespace data_view { // ES6 section 24.2.4.1 get DataView.prototype.buffer javascript builtin DataViewPrototypeGetBuffer( - context: Context, receiver: Object, ...arguments): JSArrayBuffer { - let dataView: JSDataView = + js-implicit context: Context, + receiver: Object)(...arguments): JSArrayBuffer { + const dataView: JSDataView = ValidateDataView(context, receiver, 'get DataView.prototype.buffer'); return dataView.buffer; } // ES6 section 24.2.4.2 get DataView.prototype.byteLength javascript builtin DataViewPrototypeGetByteLength( - context: Context, receiver: Object, ...arguments): Number { - let dataView: JSDataView = ValidateDataView( + js-implicit context: Context, receiver: Object)(...arguments): Number { + const dataView: JSDataView = ValidateDataView( context, receiver, 'get DataView.prototype.byte_length'); if (WasNeutered(dataView)) { // TODO(bmeurer): According to the ES6 spec, we should throw a TypeError @@ -95,8 +96,8 @@ namespace data_view { // ES6 section 24.2.4.3 get DataView.prototype.byteOffset javascript builtin DataViewPrototypeGetByteOffset( - context: Context, receiver: Object, ...arguments): Number { - let dataView: JSDataView = ValidateDataView( + js-implicit context: Context, receiver: Object)(...arguments): Number { + const dataView: JSDataView = ValidateDataView( context, receiver, 'get DataView.prototype.byte_offset'); if (WasNeutered(dataView)) { // TODO(bmeurer): According to the ES6 spec, we should throw a TypeError @@ -128,7 +129,7 @@ namespace data_view { macro LoadDataView16( buffer: JSArrayBuffer, offset: uintptr, requestedLittleEndian: bool, signed: constexpr bool): Number { - let dataPointer: RawPtr = buffer.backing_store; + const dataPointer: RawPtr = buffer.backing_store; let b0: int32; let b1: int32; @@ -155,12 +156,12 @@ namespace data_view { macro LoadDataView32( buffer: JSArrayBuffer, offset: uintptr, requestedLittleEndian: bool, kind: constexpr ElementsKind): Number { - let dataPointer: RawPtr = buffer.backing_store; + const dataPointer: RawPtr = buffer.backing_store; - let b0: uint32 = LoadUint8(dataPointer, offset); - let b1: uint32 = LoadUint8(dataPointer, offset + 1); - let b2: uint32 = LoadUint8(dataPointer, offset + 2); - let b3: uint32 = LoadUint8(dataPointer, offset + 3); + const b0: uint32 = LoadUint8(dataPointer, offset); + const b1: uint32 = LoadUint8(dataPointer, offset + 1); + const b2: uint32 = LoadUint8(dataPointer, offset + 2); + const b3: uint32 = LoadUint8(dataPointer, offset + 3); let result: uint32; if (requestedLittleEndian) { @@ -174,7 +175,7 @@ namespace data_view { } else if constexpr (kind == UINT32_ELEMENTS) { return Convert(result); } else if constexpr (kind == FLOAT32_ELEMENTS) { - let floatRes: float64 = Convert(BitcastInt32ToFloat32(result)); + const floatRes: float64 = Convert(BitcastInt32ToFloat32(result)); return Convert(floatRes); } else { unreachable; @@ -184,16 +185,16 @@ namespace data_view { macro LoadDataViewFloat64( buffer: JSArrayBuffer, offset: uintptr, requestedLittleEndian: bool): Number { - let dataPointer: RawPtr = buffer.backing_store; - - let b0: uint32 = LoadUint8(dataPointer, offset); - let b1: uint32 = LoadUint8(dataPointer, offset + 1); - let b2: uint32 = LoadUint8(dataPointer, offset + 2); - let b3: uint32 = LoadUint8(dataPointer, offset + 3); - let b4: uint32 = LoadUint8(dataPointer, offset + 4); - let b5: uint32 = LoadUint8(dataPointer, offset + 5); - let b6: uint32 = LoadUint8(dataPointer, offset + 6); - let b7: uint32 = LoadUint8(dataPointer, offset + 7); + const dataPointer: RawPtr = buffer.backing_store; + + const b0: uint32 = LoadUint8(dataPointer, offset); + const b1: uint32 = LoadUint8(dataPointer, offset + 1); + const b2: uint32 = LoadUint8(dataPointer, offset + 2); + const b3: uint32 = LoadUint8(dataPointer, offset + 3); + const b4: uint32 = LoadUint8(dataPointer, offset + 4); + const b5: uint32 = LoadUint8(dataPointer, offset + 5); + const b6: uint32 = LoadUint8(dataPointer, offset + 6); + const b7: uint32 = LoadUint8(dataPointer, offset + 7); let lowWord: uint32; let highWord: uint32; @@ -212,74 +213,49 @@ namespace data_view { return Convert(result); } - extern macro AllocateBigInt(intptr): BigInt; - extern macro StoreBigIntBitfield(BigInt, uint32): void; - extern macro StoreBigIntDigit(BigInt, constexpr int31, uintptr): void; - extern macro DataViewBuiltinsAssembler::DataViewEncodeBigIntBits( - constexpr bool, constexpr int31): uint32; - - const kPositiveBigInt: constexpr bool = false; - const kNegativeBigInt: constexpr bool = true; const kZeroDigitBigInt: constexpr int31 = 0; const kOneDigitBigInt: constexpr int31 = 1; const kTwoDigitBigInt: constexpr int31 = 2; - macro CreateEmptyBigInt(isPositive: bool, length: constexpr int31): BigInt { - // Allocate a BigInt with the desired length (number of digits). - let result: BigInt = AllocateBigInt(length); - - // Write the desired sign and length to the BigInt bitfield. - if (isPositive) { - StoreBigIntBitfield( - result, DataViewEncodeBigIntBits(kPositiveBigInt, length)); - } else { - StoreBigIntBitfield( - result, DataViewEncodeBigIntBits(kNegativeBigInt, length)); - } - - return result; - } - // Create a BigInt on a 64-bit architecture from two 32-bit values. - macro MakeBigIntOn64Bit( + macro MakeBigIntOn64Bit(implicit context: Context)( lowWord: uint32, highWord: uint32, signed: constexpr bool): BigInt { // 0n is represented by a zero-length BigInt. if (lowWord == 0 && highWord == 0) { - return AllocateBigInt(kZeroDigitBigInt); + return Convert(bigint::AllocateBigInt(kZeroDigitBigInt)); } - let isPositive: bool = true; - let highPart: intptr = Signed(Convert(highWord)); - let lowPart: intptr = Signed(Convert(lowWord)); + let sign: uint32 = bigint::kPositiveSign; + const highPart: intptr = Signed(Convert(highWord)); + const lowPart: intptr = Signed(Convert(lowWord)); let rawValue: intptr = (highPart << 32) + lowPart; if constexpr (signed) { if (rawValue < 0) { - isPositive = false; + sign = bigint::kNegativeSign; // We have to store the absolute value of rawValue in the digit. rawValue = 0 - rawValue; } } // Allocate the BigInt and store the absolute value. - let result: BigInt = CreateEmptyBigInt(isPositive, kOneDigitBigInt); - - StoreBigIntDigit(result, 0, Unsigned(rawValue)); - - return result; + const result: MutableBigInt = + bigint::AllocateEmptyBigInt(sign, kOneDigitBigInt); + bigint::StoreBigIntDigit(result, 0, Unsigned(rawValue)); + return Convert(result); } // Create a BigInt on a 32-bit architecture from two 32-bit values. - macro MakeBigIntOn32Bit( + macro MakeBigIntOn32Bit(implicit context: Context)( lowWord: uint32, highWord: uint32, signed: constexpr bool): BigInt { // 0n is represented by a zero-length BigInt. if (lowWord == 0 && highWord == 0) { - return AllocateBigInt(kZeroDigitBigInt); + return Convert(bigint::AllocateBigInt(kZeroDigitBigInt)); } // On a 32-bit platform, we might need 1 or 2 digits to store the number. let needTwoDigits: bool = false; - let isPositive: bool = true; + let sign: uint32 = bigint::kPositiveSign; // We need to do some math on lowWord and highWord, // so Convert them to int32. @@ -293,7 +269,7 @@ namespace data_view { if constexpr (signed) { // If highPart < 0, the number is always negative. if (highPart < 0) { - isPositive = false; + sign = bigint::kNegativeSign; // We have to compute the absolute value by hand. // There will be a negative carry from the low word @@ -322,25 +298,23 @@ namespace data_view { } // Allocate the BigInt with the right sign and length. - let result: BigInt; + let result: MutableBigInt; if (needTwoDigits) { - result = CreateEmptyBigInt(isPositive, kTwoDigitBigInt); + result = bigint::AllocateEmptyBigInt(sign, kTwoDigitBigInt); } else { - result = CreateEmptyBigInt(isPositive, kOneDigitBigInt); + result = bigint::AllocateEmptyBigInt(sign, kOneDigitBigInt); } // Finally, write the digit(s) to the BigInt. - StoreBigIntDigit(result, 0, Unsigned(Convert(lowPart))); - + bigint::StoreBigIntDigit(result, 0, Unsigned(Convert(lowPart))); if (needTwoDigits) { - StoreBigIntDigit(result, 1, Unsigned(Convert(highPart))); + bigint::StoreBigIntDigit(result, 1, Unsigned(Convert(highPart))); } - - return result; + return Convert(result); } - macro MakeBigInt(lowWord: uint32, highWord: uint32, signed: constexpr bool): - BigInt { + macro MakeBigInt(implicit context: Context)( + lowWord: uint32, highWord: uint32, signed: constexpr bool): BigInt { // A BigInt digit has the platform word size, so we only need one digit // on 64-bit platforms but may need two on 32-bit. if constexpr (Is64()) { @@ -350,19 +324,19 @@ namespace data_view { } } - macro LoadDataViewBigInt( + macro LoadDataViewBigInt(implicit context: Context)( buffer: JSArrayBuffer, offset: uintptr, requestedLittleEndian: bool, signed: constexpr bool): BigInt { - let dataPointer: RawPtr = buffer.backing_store; - - let b0: uint32 = LoadUint8(dataPointer, offset); - let b1: uint32 = LoadUint8(dataPointer, offset + 1); - let b2: uint32 = LoadUint8(dataPointer, offset + 2); - let b3: uint32 = LoadUint8(dataPointer, offset + 3); - let b4: uint32 = LoadUint8(dataPointer, offset + 4); - let b5: uint32 = LoadUint8(dataPointer, offset + 5); - let b6: uint32 = LoadUint8(dataPointer, offset + 6); - let b7: uint32 = LoadUint8(dataPointer, offset + 7); + const dataPointer: RawPtr = buffer.backing_store; + + const b0: uint32 = LoadUint8(dataPointer, offset); + const b1: uint32 = LoadUint8(dataPointer, offset + 1); + const b2: uint32 = LoadUint8(dataPointer, offset + 2); + const b3: uint32 = LoadUint8(dataPointer, offset + 3); + const b4: uint32 = LoadUint8(dataPointer, offset + 4); + const b5: uint32 = LoadUint8(dataPointer, offset + 5); + const b6: uint32 = LoadUint8(dataPointer, offset + 6); + const b7: uint32 = LoadUint8(dataPointer, offset + 7); let lowWord: uint32; let highWord: uint32; @@ -385,7 +359,7 @@ namespace data_view { transitioning macro DataViewGet( context: Context, receiver: Object, offset: Object, requestedLittleEndian: Object, kind: constexpr ElementsKind): Numeric { - let dataView: JSDataView = + const dataView: JSDataView = ValidateDataView(context, receiver, MakeDataViewGetterNameString(kind)); let getIndex: Number; @@ -396,25 +370,25 @@ namespace data_view { ThrowRangeError(kInvalidDataViewAccessorOffset); } - let littleEndian: bool = ToBoolean(requestedLittleEndian); - let buffer: JSArrayBuffer = dataView.buffer; + const littleEndian: bool = ToBoolean(requestedLittleEndian); + const buffer: JSArrayBuffer = dataView.buffer; if (IsDetachedBuffer(buffer)) { ThrowTypeError(kDetachedOperation, MakeDataViewGetterNameString(kind)); } - let getIndexFloat: float64 = Convert(getIndex); - let getIndexWord: uintptr = Convert(getIndexFloat); + const getIndexFloat: float64 = Convert(getIndex); + const getIndexWord: uintptr = Convert(getIndexFloat); - let viewOffsetWord: uintptr = dataView.byte_offset; - let viewSizeFloat: float64 = Convert(dataView.byte_length); - let elementSizeFloat: float64 = DataViewElementSize(kind); + const viewOffsetWord: uintptr = dataView.byte_offset; + const viewSizeFloat: float64 = Convert(dataView.byte_length); + const elementSizeFloat: float64 = DataViewElementSize(kind); if (getIndexFloat + elementSizeFloat > viewSizeFloat) { ThrowRangeError(kInvalidDataViewAccessorOffset); } - let bufferIndex: uintptr = getIndexWord + viewOffsetWord; + const bufferIndex: uintptr = getIndexWord + viewOffsetWord; if constexpr (kind == UINT8_ELEMENTS) { return LoadDataView8(buffer, bufferIndex, false); @@ -442,84 +416,84 @@ namespace data_view { } transitioning javascript builtin DataViewPrototypeGetUint8( - context: Context, receiver: Object, ...arguments): Object { - let offset: Object = arguments.length > 0 ? arguments[0] : Undefined; + js-implicit context: Context, receiver: Object)(...arguments): Object { + const offset: Object = arguments.length > 0 ? arguments[0] : Undefined; return DataViewGet(context, receiver, offset, Undefined, UINT8_ELEMENTS); } transitioning javascript builtin DataViewPrototypeGetInt8( - context: Context, receiver: Object, ...arguments): Object { - let offset: Object = arguments.length > 0 ? arguments[0] : Undefined; + js-implicit context: Context, receiver: Object)(...arguments): Object { + const offset: Object = arguments.length > 0 ? arguments[0] : Undefined; return DataViewGet(context, receiver, offset, Undefined, INT8_ELEMENTS); } transitioning javascript builtin DataViewPrototypeGetUint16( - context: Context, receiver: Object, ...arguments): Object { - let offset: Object = arguments.length > 0 ? arguments[0] : Undefined; - let isLittleEndian: Object = + js-implicit context: Context, receiver: Object)(...arguments): Object { + const offset: Object = arguments.length > 0 ? arguments[0] : Undefined; + const isLittleEndian: Object = arguments.length > 1 ? arguments[1] : Undefined; return DataViewGet( context, receiver, offset, isLittleEndian, UINT16_ELEMENTS); } transitioning javascript builtin DataViewPrototypeGetInt16( - context: Context, receiver: Object, ...arguments): Object { - let offset: Object = arguments.length > 0 ? arguments[0] : Undefined; - let isLittleEndian: Object = + js-implicit context: Context, receiver: Object)(...arguments): Object { + const offset: Object = arguments.length > 0 ? arguments[0] : Undefined; + const isLittleEndian: Object = arguments.length > 1 ? arguments[1] : Undefined; return DataViewGet( context, receiver, offset, isLittleEndian, INT16_ELEMENTS); } transitioning javascript builtin DataViewPrototypeGetUint32( - context: Context, receiver: Object, ...arguments): Object { - let offset: Object = arguments.length > 0 ? arguments[0] : Undefined; - let isLittleEndian: Object = + js-implicit context: Context, receiver: Object)(...arguments): Object { + const offset: Object = arguments.length > 0 ? arguments[0] : Undefined; + const isLittleEndian: Object = arguments.length > 1 ? arguments[1] : Undefined; return DataViewGet( context, receiver, offset, isLittleEndian, UINT32_ELEMENTS); } transitioning javascript builtin DataViewPrototypeGetInt32( - context: Context, receiver: Object, ...arguments): Object { - let offset: Object = arguments.length > 0 ? arguments[0] : Undefined; - let isLittleEndian: Object = + js-implicit context: Context, receiver: Object)(...arguments): Object { + const offset: Object = arguments.length > 0 ? arguments[0] : Undefined; + const isLittleEndian: Object = arguments.length > 1 ? arguments[1] : Undefined; return DataViewGet( context, receiver, offset, isLittleEndian, INT32_ELEMENTS); } transitioning javascript builtin DataViewPrototypeGetFloat32( - context: Context, receiver: Object, ...arguments): Object { - let offset: Object = arguments.length > 0 ? arguments[0] : Undefined; - let isLittleEndian: Object = + js-implicit context: Context, receiver: Object)(...arguments): Object { + const offset: Object = arguments.length > 0 ? arguments[0] : Undefined; + const isLittleEndian: Object = arguments.length > 1 ? arguments[1] : Undefined; return DataViewGet( context, receiver, offset, isLittleEndian, FLOAT32_ELEMENTS); } transitioning javascript builtin DataViewPrototypeGetFloat64( - context: Context, receiver: Object, ...arguments): Object { - let offset: Object = arguments.length > 0 ? arguments[0] : Undefined; - let isLittleEndian: Object = + js-implicit context: Context, receiver: Object)(...arguments): Object { + const offset: Object = arguments.length > 0 ? arguments[0] : Undefined; + const isLittleEndian: Object = arguments.length > 1 ? arguments[1] : Undefined; return DataViewGet( context, receiver, offset, isLittleEndian, FLOAT64_ELEMENTS); } transitioning javascript builtin DataViewPrototypeGetBigUint64( - context: Context, receiver: Object, ...arguments): Object { - let offset: Object = arguments.length > 0 ? arguments[0] : Undefined; - let isLittleEndian: Object = + js-implicit context: Context, receiver: Object)(...arguments): Object { + const offset: Object = arguments.length > 0 ? arguments[0] : Undefined; + const isLittleEndian: Object = arguments.length > 1 ? arguments[1] : Undefined; return DataViewGet( context, receiver, offset, isLittleEndian, BIGUINT64_ELEMENTS); } transitioning javascript builtin DataViewPrototypeGetBigInt64( - context: Context, receiver: Object, ...arguments): Object { - let offset: Object = arguments.length > 0 ? arguments[0] : Undefined; - let isLittleEndian: Object = + js-implicit context: Context, receiver: Object)(...arguments): Object { + const offset: Object = arguments.length > 0 ? arguments[0] : Undefined; + const isLittleEndian: Object = arguments.length > 1 ? arguments[1] : Undefined; return DataViewGet( context, receiver, offset, isLittleEndian, BIGINT64_ELEMENTS); @@ -539,10 +513,10 @@ namespace data_view { macro StoreDataView16( buffer: JSArrayBuffer, offset: uintptr, value: uint32, requestedLittleEndian: bool) { - let dataPointer: RawPtr = buffer.backing_store; + const dataPointer: RawPtr = buffer.backing_store; - let b0: uint32 = value & 0xFF; - let b1: uint32 = (value >>> 8) & 0xFF; + const b0: uint32 = value & 0xFF; + const b1: uint32 = (value >>> 8) & 0xFF; if (requestedLittleEndian) { StoreWord8(dataPointer, offset, b0); @@ -556,12 +530,12 @@ namespace data_view { macro StoreDataView32( buffer: JSArrayBuffer, offset: uintptr, value: uint32, requestedLittleEndian: bool) { - let dataPointer: RawPtr = buffer.backing_store; + const dataPointer: RawPtr = buffer.backing_store; - let b0: uint32 = value & 0xFF; - let b1: uint32 = (value >>> 8) & 0xFF; - let b2: uint32 = (value >>> 16) & 0xFF; - let b3: uint32 = value >>> 24; // We don't need to mask here. + const b0: uint32 = value & 0xFF; + const b1: uint32 = (value >>> 8) & 0xFF; + const b2: uint32 = (value >>> 16) & 0xFF; + const b3: uint32 = value >>> 24; // We don't need to mask here. if (requestedLittleEndian) { StoreWord8(dataPointer, offset, b0); @@ -579,17 +553,17 @@ namespace data_view { macro StoreDataView64( buffer: JSArrayBuffer, offset: uintptr, lowWord: uint32, highWord: uint32, requestedLittleEndian: bool) { - let dataPointer: RawPtr = buffer.backing_store; + const dataPointer: RawPtr = buffer.backing_store; - let b0: uint32 = lowWord & 0xFF; - let b1: uint32 = (lowWord >>> 8) & 0xFF; - let b2: uint32 = (lowWord >>> 16) & 0xFF; - let b3: uint32 = lowWord >>> 24; + const b0: uint32 = lowWord & 0xFF; + const b1: uint32 = (lowWord >>> 8) & 0xFF; + const b2: uint32 = (lowWord >>> 16) & 0xFF; + const b3: uint32 = lowWord >>> 24; - let b4: uint32 = highWord & 0xFF; - let b5: uint32 = (highWord >>> 8) & 0xFF; - let b6: uint32 = (highWord >>> 16) & 0xFF; - let b7: uint32 = highWord >>> 24; + const b4: uint32 = highWord & 0xFF; + const b5: uint32 = (highWord >>> 8) & 0xFF; + const b6: uint32 = (highWord >>> 16) & 0xFF; + const b7: uint32 = highWord >>> 24; if (requestedLittleEndian) { StoreWord8(dataPointer, offset, b0); @@ -612,11 +586,10 @@ namespace data_view { } } - extern macro DataViewBuiltinsAssembler::DataViewDecodeBigIntLength(BigInt): - uint32; - extern macro DataViewBuiltinsAssembler::DataViewDecodeBigIntSign(BigInt): + extern macro DataViewBuiltinsAssembler::DataViewDecodeBigIntLength( + BigIntBase): uint32; + extern macro DataViewBuiltinsAssembler::DataViewDecodeBigIntSign(BigIntBase): uint32; - extern macro LoadBigIntDigit(BigInt, constexpr int31): uintptr; // We might get here a BigInt that is bigger than 64 bits, but we're only // interested in the 64 lowest ones. This means the lowest BigInt digit @@ -624,8 +597,8 @@ namespace data_view { macro StoreDataViewBigInt( buffer: JSArrayBuffer, offset: uintptr, bigIntValue: BigInt, requestedLittleEndian: bool) { - let length: uint32 = DataViewDecodeBigIntLength(bigIntValue); - let sign: uint32 = DataViewDecodeBigIntSign(bigIntValue); + const length: uint32 = DataViewDecodeBigIntLength(bigIntValue); + const sign: uint32 = DataViewDecodeBigIntSign(bigIntValue); // The 32-bit words that will hold the BigInt's value in // two's complement representation. @@ -636,13 +609,13 @@ namespace data_view { if (length != 0) { if constexpr (Is64()) { // There is always exactly 1 BigInt digit to load in this case. - let value: uintptr = LoadBigIntDigit(bigIntValue, 0); + const value: uintptr = bigint::LoadBigIntDigit(bigIntValue, 0); lowWord = Convert(value); // Truncates value to 32 bits. highWord = Convert(value >>> 32); } else { // There might be either 1 or 2 BigInt digits we need to load. - lowWord = Convert(LoadBigIntDigit(bigIntValue, 0)); + lowWord = Convert(bigint::LoadBigIntDigit(bigIntValue, 0)); if (length >= 2) { // Only load the second digit if there is one. - highWord = Convert(LoadBigIntDigit(bigIntValue, 1)); + highWord = Convert(bigint::LoadBigIntDigit(bigIntValue, 1)); } } } @@ -661,7 +634,7 @@ namespace data_view { transitioning macro DataViewSet( context: Context, receiver: Object, offset: Object, value: Object, requestedLittleEndian: Object, kind: constexpr ElementsKind): Object { - let dataView: JSDataView = + const dataView: JSDataView = ValidateDataView(context, receiver, MakeDataViewSetterNameString(kind)); let getIndex: Number; @@ -672,52 +645,52 @@ namespace data_view { ThrowRangeError(kInvalidDataViewAccessorOffset); } - let littleEndian: bool = ToBoolean(requestedLittleEndian); - let buffer: JSArrayBuffer = dataView.buffer; + const littleEndian: bool = ToBoolean(requestedLittleEndian); + const buffer: JSArrayBuffer = dataView.buffer; // According to ES6 section 24.2.1.2 SetViewValue, we must perform // the conversion before doing the bounds check. if constexpr (kind == BIGUINT64_ELEMENTS || kind == BIGINT64_ELEMENTS) { - let bigIntValue: BigInt = ToBigInt(context, value); + const bigIntValue: BigInt = ToBigInt(context, value); if (IsDetachedBuffer(buffer)) { ThrowTypeError(kDetachedOperation, MakeDataViewSetterNameString(kind)); } - let getIndexFloat: float64 = Convert(getIndex); - let getIndexWord: uintptr = Convert(getIndexFloat); + const getIndexFloat: float64 = Convert(getIndex); + const getIndexWord: uintptr = Convert(getIndexFloat); - let viewOffsetWord: uintptr = dataView.byte_offset; - let viewSizeFloat: float64 = Convert(dataView.byte_length); - let elementSizeFloat: float64 = DataViewElementSize(kind); + const viewOffsetWord: uintptr = dataView.byte_offset; + const viewSizeFloat: float64 = Convert(dataView.byte_length); + const elementSizeFloat: float64 = DataViewElementSize(kind); if (getIndexFloat + elementSizeFloat > viewSizeFloat) { ThrowRangeError(kInvalidDataViewAccessorOffset); } - let bufferIndex: uintptr = getIndexWord + viewOffsetWord; + const bufferIndex: uintptr = getIndexWord + viewOffsetWord; StoreDataViewBigInt(buffer, bufferIndex, bigIntValue, littleEndian); } else { - let numValue: Number = ToNumber(context, value); + const numValue: Number = ToNumber(context, value); if (IsDetachedBuffer(buffer)) { ThrowTypeError(kDetachedOperation, MakeDataViewSetterNameString(kind)); } - let getIndexFloat: float64 = Convert(getIndex); - let getIndexWord: uintptr = Convert(getIndexFloat); + const getIndexFloat: float64 = Convert(getIndex); + const getIndexWord: uintptr = Convert(getIndexFloat); - let viewOffsetWord: uintptr = dataView.byte_offset; - let viewSizeFloat: float64 = Convert(dataView.byte_length); - let elementSizeFloat: float64 = DataViewElementSize(kind); + const viewOffsetWord: uintptr = dataView.byte_offset; + const viewSizeFloat: float64 = Convert(dataView.byte_length); + const elementSizeFloat: float64 = DataViewElementSize(kind); if (getIndexFloat + elementSizeFloat > viewSizeFloat) { ThrowRangeError(kInvalidDataViewAccessorOffset); } - let bufferIndex: uintptr = getIndexWord + viewOffsetWord; + const bufferIndex: uintptr = getIndexWord + viewOffsetWord; - let doubleValue: float64 = ChangeNumberToFloat64(numValue); + const doubleValue: float64 = ChangeNumberToFloat64(numValue); if constexpr (kind == UINT8_ELEMENTS || kind == INT8_ELEMENTS) { StoreDataView8( @@ -731,13 +704,13 @@ namespace data_view { buffer, bufferIndex, TruncateFloat64ToWord32(doubleValue), littleEndian); } else if constexpr (kind == FLOAT32_ELEMENTS) { - let floatValue: float32 = TruncateFloat64ToFloat32(doubleValue); + const floatValue: float32 = TruncateFloat64ToFloat32(doubleValue); StoreDataView32( buffer, bufferIndex, BitcastFloat32ToInt32(floatValue), littleEndian); } else if constexpr (kind == FLOAT64_ELEMENTS) { - let lowWord: uint32 = Float64ExtractLowWord32(doubleValue); - let highWord: uint32 = Float64ExtractHighWord32(doubleValue); + const lowWord: uint32 = Float64ExtractLowWord32(doubleValue); + const highWord: uint32 = Float64ExtractHighWord32(doubleValue); StoreDataView64(buffer, bufferIndex, lowWord, highWord, littleEndian); } } @@ -745,96 +718,96 @@ namespace data_view { } transitioning javascript builtin DataViewPrototypeSetUint8( - context: Context, receiver: Object, ...arguments): Object { - let offset: Object = arguments.length > 0 ? arguments[0] : Undefined; - let value: Object = arguments.length > 1 ? arguments[1] : Undefined; + js-implicit context: Context, receiver: Object)(...arguments): Object { + const offset: Object = arguments.length > 0 ? arguments[0] : Undefined; + const value: Object = arguments.length > 1 ? arguments[1] : Undefined; return DataViewSet( context, receiver, offset, value, Undefined, UINT8_ELEMENTS); } transitioning javascript builtin DataViewPrototypeSetInt8( - context: Context, receiver: Object, ...arguments): Object { - let offset: Object = arguments.length > 0 ? arguments[0] : Undefined; - let value: Object = arguments.length > 1 ? arguments[1] : Undefined; + js-implicit context: Context, receiver: Object)(...arguments): Object { + const offset: Object = arguments.length > 0 ? arguments[0] : Undefined; + const value: Object = arguments.length > 1 ? arguments[1] : Undefined; return DataViewSet( context, receiver, offset, value, Undefined, INT8_ELEMENTS); } transitioning javascript builtin DataViewPrototypeSetUint16( - context: Context, receiver: Object, ...arguments): Object { - let offset: Object = arguments.length > 0 ? arguments[0] : Undefined; - let value: Object = arguments.length > 1 ? arguments[1] : Undefined; - let isLittleEndian: Object = + js-implicit context: Context, receiver: Object)(...arguments): Object { + const offset: Object = arguments.length > 0 ? arguments[0] : Undefined; + const value: Object = arguments.length > 1 ? arguments[1] : Undefined; + const isLittleEndian: Object = arguments.length > 2 ? arguments[2] : Undefined; return DataViewSet( context, receiver, offset, value, isLittleEndian, UINT16_ELEMENTS); } transitioning javascript builtin DataViewPrototypeSetInt16( - context: Context, receiver: Object, ...arguments): Object { - let offset: Object = arguments.length > 0 ? arguments[0] : Undefined; - let value: Object = arguments.length > 1 ? arguments[1] : Undefined; - let isLittleEndian: Object = + js-implicit context: Context, receiver: Object)(...arguments): Object { + const offset: Object = arguments.length > 0 ? arguments[0] : Undefined; + const value: Object = arguments.length > 1 ? arguments[1] : Undefined; + const isLittleEndian: Object = arguments.length > 2 ? arguments[2] : Undefined; return DataViewSet( context, receiver, offset, value, isLittleEndian, INT16_ELEMENTS); } transitioning javascript builtin DataViewPrototypeSetUint32( - context: Context, receiver: Object, ...arguments): Object { - let offset: Object = arguments.length > 0 ? arguments[0] : Undefined; - let value: Object = arguments.length > 1 ? arguments[1] : Undefined; - let isLittleEndian: Object = + js-implicit context: Context, receiver: Object)(...arguments): Object { + const offset: Object = arguments.length > 0 ? arguments[0] : Undefined; + const value: Object = arguments.length > 1 ? arguments[1] : Undefined; + const isLittleEndian: Object = arguments.length > 2 ? arguments[2] : Undefined; return DataViewSet( context, receiver, offset, value, isLittleEndian, UINT32_ELEMENTS); } transitioning javascript builtin DataViewPrototypeSetInt32( - context: Context, receiver: Object, ...arguments): Object { - let offset: Object = arguments.length > 0 ? arguments[0] : Undefined; - let value: Object = arguments.length > 1 ? arguments[1] : Undefined; - let isLittleEndian: Object = + js-implicit context: Context, receiver: Object)(...arguments): Object { + const offset: Object = arguments.length > 0 ? arguments[0] : Undefined; + const value: Object = arguments.length > 1 ? arguments[1] : Undefined; + const isLittleEndian: Object = arguments.length > 2 ? arguments[2] : Undefined; return DataViewSet( context, receiver, offset, value, isLittleEndian, INT32_ELEMENTS); } transitioning javascript builtin DataViewPrototypeSetFloat32( - context: Context, receiver: Object, ...arguments): Object { - let offset: Object = arguments.length > 0 ? arguments[0] : Undefined; - let value: Object = arguments.length > 1 ? arguments[1] : Undefined; - let isLittleEndian: Object = + js-implicit context: Context, receiver: Object)(...arguments): Object { + const offset: Object = arguments.length > 0 ? arguments[0] : Undefined; + const value: Object = arguments.length > 1 ? arguments[1] : Undefined; + const isLittleEndian: Object = arguments.length > 2 ? arguments[2] : Undefined; return DataViewSet( context, receiver, offset, value, isLittleEndian, FLOAT32_ELEMENTS); } transitioning javascript builtin DataViewPrototypeSetFloat64( - context: Context, receiver: Object, ...arguments): Object { - let offset: Object = arguments.length > 0 ? arguments[0] : Undefined; - let value: Object = arguments.length > 1 ? arguments[1] : Undefined; - let isLittleEndian: Object = + js-implicit context: Context, receiver: Object)(...arguments): Object { + const offset: Object = arguments.length > 0 ? arguments[0] : Undefined; + const value: Object = arguments.length > 1 ? arguments[1] : Undefined; + const isLittleEndian: Object = arguments.length > 2 ? arguments[2] : Undefined; return DataViewSet( context, receiver, offset, value, isLittleEndian, FLOAT64_ELEMENTS); } transitioning javascript builtin DataViewPrototypeSetBigUint64( - context: Context, receiver: Object, ...arguments): Object { - let offset: Object = arguments.length > 0 ? arguments[0] : Undefined; - let value: Object = arguments.length > 1 ? arguments[1] : Undefined; - let isLittleEndian: Object = + js-implicit context: Context, receiver: Object)(...arguments): Object { + const offset: Object = arguments.length > 0 ? arguments[0] : Undefined; + const value: Object = arguments.length > 1 ? arguments[1] : Undefined; + const isLittleEndian: Object = arguments.length > 2 ? arguments[2] : Undefined; return DataViewSet( context, receiver, offset, value, isLittleEndian, BIGUINT64_ELEMENTS); } transitioning javascript builtin DataViewPrototypeSetBigInt64( - context: Context, receiver: Object, ...arguments): Object { - let offset: Object = arguments.length > 0 ? arguments[0] : Undefined; - let value: Object = arguments.length > 1 ? arguments[1] : Undefined; - let isLittleEndian: Object = + js-implicit context: Context, receiver: Object)(...arguments): Object { + const offset: Object = arguments.length > 0 ? arguments[0] : Undefined; + const value: Object = arguments.length > 1 ? arguments[1] : Undefined; + const isLittleEndian: Object = arguments.length > 2 ? arguments[2] : Undefined; return DataViewSet( context, receiver, offset, value, isLittleEndian, BIGINT64_ELEMENTS); diff --git a/deps/v8/src/builtins/extras-utils.tq b/deps/v8/src/builtins/extras-utils.tq index 2b9b79739e0f3d..3675fda19165f4 100644 --- a/deps/v8/src/builtins/extras-utils.tq +++ b/deps/v8/src/builtins/extras-utils.tq @@ -8,17 +8,18 @@ namespace extras_utils { extern runtime PromiseStatus(Context, Object): Smi; javascript builtin ExtrasUtilsCreatePrivateSymbol( - context: Context, receiver: Object, ...arguments): HeapObject { + js-implicit context: Context, + receiver: Object)(...arguments): HeapObject { return CreatePrivateSymbol(context, arguments[0]); } javascript builtin ExtrasUtilsMarkPromiseAsHandled( - context: Context, receiver: Object, ...arguments): Undefined { + js-implicit context: Context, receiver: Object)(...arguments): Undefined { return PromiseMarkAsHandled(context, arguments[0]); } javascript builtin ExtrasUtilsPromiseState( - context: Context, receiver: Object, ...arguments): Smi { + js-implicit context: Context, receiver: Object)(...arguments): Smi { return PromiseStatus(context, arguments[0]); } } diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc index 0d80c681fbf1c1..995be77f754ee0 100644 --- a/deps/v8/src/builtins/ia32/builtins-ia32.cc +++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc @@ -1023,10 +1023,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // 8-bit fields next to each other, so we could just optimize by writing a // 16-bit. These static asserts guard our assumption is valid. STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset == - BytecodeArray::kOSRNestingLevelOffset + kCharSize); + BytecodeArray::kOsrNestingLevelOffset + kCharSize); STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0); __ mov_w(FieldOperand(kInterpreterBytecodeArrayRegister, - BytecodeArray::kOSRNestingLevelOffset), + BytecodeArray::kOsrNestingLevelOffset), Immediate(0)); // Push bytecode array. @@ -1534,6 +1534,15 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, BuiltinContinuationFrameConstants::kFixedFrameSize), eax); } + + // Replace the builtin index Smi on the stack with the start address of the + // builtin loaded from the builtins table. The ret below will return to this + // address. + int offset_to_builtin_index = allocatable_register_count * kSystemPointerSize; + __ mov(eax, Operand(esp, offset_to_builtin_index)); + __ LoadEntryFromBuiltinIndex(eax); + __ mov(Operand(esp, offset_to_builtin_index), eax); + for (int i = allocatable_register_count - 1; i >= 0; --i) { int code = config->GetAllocatableGeneralCode(i); __ pop(Register::from_code(code)); @@ -1549,7 +1558,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, kSystemPointerSize; __ pop(Operand(esp, offsetToPC)); __ Drop(offsetToPC / kSystemPointerSize); - __ add(Operand(esp, 0), Immediate(Code::kHeaderSize - kHeapObjectTag)); __ ret(0); } } // namespace @@ -3012,23 +3020,28 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address, __ mov(esi, __ ExternalReferenceAsOperand(next_address, esi)); __ mov(edi, __ ExternalReferenceAsOperand(limit_address, edi)); - Label profiler_disabled; - Label end_profiler_check; + Label profiler_enabled, end_profiler_check; __ Move(eax, Immediate(ExternalReference::is_profiling_address(isolate))); __ cmpb(Operand(eax, 0), Immediate(0)); - __ j(zero, &profiler_disabled); + __ j(not_zero, &profiler_enabled); + __ Move(eax, Immediate(ExternalReference::address_of_runtime_stats_flag())); + __ cmp(Operand(eax, 0), Immediate(0)); + __ j(not_zero, &profiler_enabled); + { + // Call the api function directly. + __ mov(eax, function_address); + __ jmp(&end_profiler_check); + } + __ bind(&profiler_enabled); + { + // Additional parameter is the address of the actual getter function. + __ mov(thunk_last_arg, function_address); + __ Move(eax, Immediate(thunk_ref)); + } + __ bind(&end_profiler_check); - // Additional parameter is the address of the actual getter function. - __ mov(thunk_last_arg, function_address); // Call the api function. - __ Move(eax, Immediate(thunk_ref)); __ call(eax); - __ jmp(&end_profiler_check); - - __ bind(&profiler_disabled); - // Call the api function. - __ call(function_address); - __ bind(&end_profiler_check); Label prologue; // Load the value from ReturnValue @@ -3080,6 +3093,9 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address, __ CompareRoot(map, RootIndex::kHeapNumberMap); __ j(equal, &ok, Label::kNear); + __ CompareRoot(map, RootIndex::kBigIntMap); + __ j(equal, &ok, Label::kNear); + __ CompareRoot(return_value, RootIndex::kUndefinedValue); __ j(equal, &ok, Label::kNear); diff --git a/deps/v8/src/builtins/internal-coverage.tq b/deps/v8/src/builtins/internal-coverage.tq index 4e75c6d837eced..d96fa924ab0418 100644 --- a/deps/v8/src/builtins/internal-coverage.tq +++ b/deps/v8/src/builtins/internal-coverage.tq @@ -28,6 +28,8 @@ namespace internal_coverage { return UnsafeCast(debugInfo.coverage_info); } + @export // Silence unused warning on release builds. SlotCount is only used + // in an assert. TODO(szuend): Remove once macros and asserts work. macro SlotCount(coverageInfo: CoverageInfo): Smi { assert(kFirstSlotIndex == 0); // Otherwise we'd have to consider it below. assert(kFirstSlotIndex == (coverageInfo.length & kSlotIndexCountMask)); diff --git a/deps/v8/src/builtins/iterator.tq b/deps/v8/src/builtins/iterator.tq index 5c9439dfc7ffce..b770f1b6528378 100644 --- a/deps/v8/src/builtins/iterator.tq +++ b/deps/v8/src/builtins/iterator.tq @@ -20,16 +20,16 @@ namespace iterator { implicit context: Context)(Object): IteratorRecord; extern macro IteratorBuiltinsAssembler::IteratorStep( - implicit context: Context)(IteratorRecord): Object + implicit context: Context)(IteratorRecord): JSReceiver labels Done; extern macro IteratorBuiltinsAssembler::IteratorStep( - implicit context: Context)(IteratorRecord, Map): Object + implicit context: Context)(IteratorRecord, Map): JSReceiver labels Done; extern macro IteratorBuiltinsAssembler::IteratorValue( - implicit context: Context)(Object): Object; + implicit context: Context)(JSReceiver): Object; extern macro IteratorBuiltinsAssembler::IteratorValue( - implicit context: Context)(Object, Map): Object; + implicit context: Context)(JSReceiver, Map): Object; extern macro IteratorBuiltinsAssembler::IteratorCloseOnException( implicit context: Context)(IteratorRecord, Object): never; diff --git a/deps/v8/src/builtins/math.tq b/deps/v8/src/builtins/math.tq index 84dd1261fa07b8..df43b30efc4e0b 100644 --- a/deps/v8/src/builtins/math.tq +++ b/deps/v8/src/builtins/math.tq @@ -7,7 +7,7 @@ namespace math { extern macro Float64Acos(float64): float64; transitioning javascript builtin - MathAcos(context: Context, receiver: Object, x: Object): Number { + MathAcos(context: Context, _receiver: Object, x: Object): Number { const value = Convert(ToNumber_Inline(context, x)); return Convert(Float64Acos(value)); } @@ -16,7 +16,7 @@ namespace math { extern macro Float64Acosh(float64): float64; transitioning javascript builtin - MathAcosh(context: Context, receiver: Object, x: Object): Number { + MathAcosh(context: Context, _receiver: Object, x: Object): Number { const value = Convert(ToNumber_Inline(context, x)); return Convert(Float64Acosh(value)); } @@ -25,7 +25,7 @@ namespace math { extern macro Float64Asin(float64): float64; transitioning javascript builtin - MathAsin(context: Context, receiver: Object, x: Object): Number { + MathAsin(context: Context, _receiver: Object, x: Object): Number { const value = Convert(ToNumber_Inline(context, x)); return Convert(Float64Asin(value)); } @@ -34,7 +34,7 @@ namespace math { extern macro Float64Asinh(float64): float64; transitioning javascript builtin - MathAsinh(context: Context, receiver: Object, x: Object): Number { + MathAsinh(context: Context, _receiver: Object, x: Object): Number { const value = Convert(ToNumber_Inline(context, x)); return Convert(Float64Asinh(value)); } @@ -43,7 +43,7 @@ namespace math { extern macro Float64Atan(float64): float64; transitioning javascript builtin - MathAtan(context: Context, receiver: Object, x: Object): Number { + MathAtan(context: Context, _receiver: Object, x: Object): Number { const value = Convert(ToNumber_Inline(context, x)); return Convert(Float64Atan(value)); } @@ -52,7 +52,7 @@ namespace math { extern macro Float64Atan2(float64, float64): float64; transitioning javascript builtin - MathAtan2(context: Context, receiver: Object, y: Object, x: Object): Number { + MathAtan2(context: Context, _receiver: Object, y: Object, x: Object): Number { const yValue = Convert(ToNumber_Inline(context, y)); const xValue = Convert(ToNumber_Inline(context, x)); return Convert(Float64Atan2(yValue, xValue)); @@ -62,7 +62,7 @@ namespace math { extern macro Float64Atanh(float64): float64; transitioning javascript builtin - MathAtanh(context: Context, receiver: Object, x: Object): Number { + MathAtanh(context: Context, _receiver: Object, x: Object): Number { const value = Convert(ToNumber_Inline(context, x)); return Convert(Float64Atanh(value)); } @@ -71,7 +71,7 @@ namespace math { extern macro Float64Cbrt(float64): float64; transitioning javascript builtin - MathCbrt(context: Context, receiver: Object, x: Object): Number { + MathCbrt(context: Context, _receiver: Object, x: Object): Number { const value = Convert(ToNumber_Inline(context, x)); return Convert(Float64Cbrt(value)); } @@ -80,7 +80,7 @@ namespace math { extern macro Word32Clz(int32): int32; transitioning javascript builtin - MathClz32(context: Context, receiver: Object, x: Object): Number { + MathClz32(context: Context, _receiver: Object, x: Object): Number { const num = ToNumber_Inline(context, x); let value: int32; @@ -100,7 +100,7 @@ namespace math { extern macro Float64Cos(float64): float64; transitioning javascript builtin - MathCos(context: Context, receiver: Object, x: Object): Number { + MathCos(context: Context, _receiver: Object, x: Object): Number { const value = Convert(ToNumber_Inline(context, x)); return Convert(Float64Cos(value)); } @@ -109,7 +109,7 @@ namespace math { extern macro Float64Cosh(float64): float64; transitioning javascript builtin - MathCosh(context: Context, receiver: Object, x: Object): Number { + MathCosh(context: Context, _receiver: Object, x: Object): Number { const value = Convert(ToNumber_Inline(context, x)); return Convert(Float64Cosh(value)); } @@ -118,7 +118,7 @@ namespace math { extern macro Float64Exp(float64): float64; transitioning javascript builtin - MathExp(context: Context, receiver: Object, x: Object): Number { + MathExp(context: Context, _receiver: Object, x: Object): Number { const value = Convert(ToNumber_Inline(context, x)); return Convert(Float64Exp(value)); } @@ -127,14 +127,14 @@ namespace math { extern macro Float64Expm1(float64): float64; transitioning javascript builtin - MathExpm1(context: Context, receiver: Object, x: Object): Number { + MathExpm1(context: Context, _receiver: Object, x: Object): Number { const value = Convert(ToNumber_Inline(context, x)); return Convert(Float64Expm1(value)); } // ES6 #sec-math.fround transitioning javascript builtin - MathFround(context: Context, receiver: Object, x: Object): Number { + MathFround(context: Context, _receiver: Object, x: Object): Number { const x32 = Convert(ToNumber_Inline(context, x)); const x64 = Convert(x32); return Convert(x64); @@ -144,7 +144,7 @@ namespace math { extern macro Float64Log(float64): float64; transitioning javascript builtin - MathLog(context: Context, receiver: Object, x: Object): Number { + MathLog(context: Context, _receiver: Object, x: Object): Number { const value = Convert(ToNumber_Inline(context, x)); return Convert(Float64Log(value)); } @@ -153,7 +153,7 @@ namespace math { extern macro Float64Log1p(float64): float64; transitioning javascript builtin - MathLog1p(context: Context, receiver: Object, x: Object): Number { + MathLog1p(context: Context, _receiver: Object, x: Object): Number { const value = Convert(ToNumber_Inline(context, x)); return Convert(Float64Log1p(value)); } @@ -162,7 +162,7 @@ namespace math { extern macro Float64Log10(float64): float64; transitioning javascript builtin - MathLog10(context: Context, receiver: Object, x: Object): Number { + MathLog10(context: Context, _receiver: Object, x: Object): Number { const value = Convert(ToNumber_Inline(context, x)); return Convert(Float64Log10(value)); } @@ -171,7 +171,7 @@ namespace math { extern macro Float64Log2(float64): float64; transitioning javascript builtin - MathLog2(context: Context, receiver: Object, x: Object): Number { + MathLog2(context: Context, _receiver: Object, x: Object): Number { const value = Convert(ToNumber_Inline(context, x)); return Convert(Float64Log2(value)); } @@ -180,14 +180,14 @@ namespace math { extern macro Float64Sin(float64): float64; transitioning javascript builtin - MathSin(context: Context, receiver: Object, x: Object): Number { + MathSin(context: Context, _receiver: Object, x: Object): Number { const value = Convert(ToNumber_Inline(context, x)); return Convert(Float64Sin(value)); } // ES6 #sec-math.sign transitioning javascript builtin - MathSign(context: Context, receiver: Object, x: Object): Number { + MathSign(context: Context, _receiver: Object, x: Object): Number { const num = ToNumber_Inline(context, x); const value = Convert(num); @@ -204,7 +204,7 @@ namespace math { extern macro Float64Sinh(float64): float64; transitioning javascript builtin - MathSinh(context: Context, receiver: Object, x: Object): Number { + MathSinh(context: Context, _receiver: Object, x: Object): Number { const value = Convert(ToNumber_Inline(context, x)); return Convert(Float64Sinh(value)); } @@ -213,7 +213,7 @@ namespace math { extern macro Float64Sqrt(float64): float64; transitioning javascript builtin - MathSqrt(context: Context, receiver: Object, x: Object): Number { + MathSqrt(context: Context, _receiver: Object, x: Object): Number { const value = Convert(ToNumber_Inline(context, x)); return Convert(Float64Sqrt(value)); } @@ -222,7 +222,7 @@ namespace math { extern macro Float64Tan(float64): float64; transitioning javascript builtin - MathTan(context: Context, receiver: Object, x: Object): Number { + MathTan(context: Context, _receiver: Object, x: Object): Number { const value = Convert(ToNumber_Inline(context, x)); return Convert(Float64Tan(value)); } @@ -231,7 +231,7 @@ namespace math { extern macro Float64Tanh(float64): float64; transitioning javascript builtin - MathTanh(context: Context, receiver: Object, x: Object): Number { + MathTanh(context: Context, _receiver: Object, x: Object): Number { const value = Convert(ToNumber_Inline(context, x)); return Convert(Float64Tanh(value)); } diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc index ec65c78ee9d65d..a359b2436f1818 100644 --- a/deps/v8/src/builtins/mips/builtins-mips.cc +++ b/deps/v8/src/builtins/mips/builtins-mips.cc @@ -62,7 +62,6 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) { static void GenerateTailCallToReturnedCode(MacroAssembler* masm, Runtime::FunctionId function_id) { // ----------- S t a t e ------------- - // -- a0 : argument count (preserved for callee) // -- a1 : target function (preserved for callee) // -- a3 : new target (preserved for callee) // ----------------------------------- @@ -70,14 +69,12 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm, FrameScope scope(masm, StackFrame::INTERNAL); // Push a copy of the target function and the new target. // Push function as parameter to the runtime call. - __ SmiTag(a0); - __ Push(a0, a1, a3, a1); + __ Push(a1, a3, a1); __ CallRuntime(function_id, 1); // Restore target function and new target. - __ Pop(a0, a1, a3); - __ SmiUntag(a0); + __ Pop(a1, a3); } static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); @@ -853,13 +850,11 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, Register scratch1, Register scratch2, Register scratch3) { // ----------- S t a t e ------------- - // -- a0 : argument count (preserved for callee if needed, and caller) // -- a3 : new target (preserved for callee if needed, and caller) // -- a1 : target function (preserved for callee if needed, and caller) // -- feedback vector (preserved for caller if needed) // ----------------------------------- - DCHECK( - !AreAliased(feedback_vector, a0, a1, a3, scratch1, scratch2, scratch3)); + DCHECK(!AreAliased(feedback_vector, a1, a3, scratch1, scratch2, scratch3)); Label optimized_code_slot_is_weak_ref, fallthrough; @@ -1035,17 +1030,18 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ lw(feedback_vector, FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); __ lw(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset)); + + Label push_stack_frame; + // Check if feedback vector is valid. If valid, check for optimized code + // and update invocation count. Otherwise, setup the stack frame. + __ lw(t0, FieldMemOperand(feedback_vector, HeapObject::kMapOffset)); + __ lhu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset)); + __ Branch(&push_stack_frame, ne, t0, Operand(FEEDBACK_VECTOR_TYPE)); + // Read off the optimized code slot in the feedback vector, and if there // is optimized code or an optimization marker, call that instead. MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, t0, t3, t1); - // Open a frame scope to indicate that there is a frame on the stack. The - // MANUAL indicates that the scope shouldn't actually generate code to set up - // the frame (that is done below). - FrameScope frame_scope(masm, StackFrame::MANUAL); - __ PushStandardFrame(closure); - - // Increment invocation count for the function. __ lw(t0, FieldMemOperand(feedback_vector, FeedbackVector::kInvocationCountOffset)); @@ -1053,10 +1049,21 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ sw(t0, FieldMemOperand(feedback_vector, FeedbackVector::kInvocationCountOffset)); - // Reset code age. - DCHECK_EQ(0, BytecodeArray::kNoAgeBytecodeAge); - __ sb(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister, - BytecodeArray::kBytecodeAgeOffset)); + // Open a frame scope to indicate that there is a frame on the stack. The + // MANUAL indicates that the scope shouldn't actually generate code to set up + // the frame (that is done below). + __ bind(&push_stack_frame); + FrameScope frame_scope(masm, StackFrame::MANUAL); + __ PushStandardFrame(closure); + + // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset are + // 8-bit fields next to each other, so we could just optimize by writing a + // 16-bit. These static asserts guard our assumption is valid. + STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset == + BytecodeArray::kOsrNestingLevelOffset + kCharSize); + STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0); + __ sh(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister, + BytecodeArray::kOsrNestingLevelOffset)); // Load initial bytecode offset. __ li(kInterpreterBytecodeOffsetRegister, @@ -1464,11 +1471,13 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, } __ lw(fp, MemOperand( sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp)); + // Load builtin index (stored as a Smi) and use it to get the builtin start + // address from the builtins table. __ Pop(t0); __ Addu(sp, sp, Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp)); __ Pop(ra); - __ Addu(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag)); + __ LoadEntryFromBuiltinIndex(t0); __ Jump(t0); } } // namespace @@ -2559,7 +2568,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, __ LoadRoot(t0, RootIndex::kTheHoleValue); // Cannot use check here as it attempts to generate call into runtime. __ Branch(&okay, eq, t0, Operand(a2)); - __ stop("Unexpected pending exception"); + __ stop(); __ bind(&okay); } @@ -2825,18 +2834,23 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address, DCHECK(function_address == a1 || function_address == a2); - Label profiler_disabled; - Label end_profiler_check; + Label profiler_enabled, end_profiler_check; __ li(t9, ExternalReference::is_profiling_address(isolate)); __ lb(t9, MemOperand(t9, 0)); - __ Branch(&profiler_disabled, eq, t9, Operand(zero_reg)); - - // Additional parameter is the address of the actual callback. - __ li(t9, thunk_ref); - __ jmp(&end_profiler_check); - - __ bind(&profiler_disabled); - __ mov(t9, function_address); + __ Branch(&profiler_enabled, ne, t9, Operand(zero_reg)); + __ li(t9, ExternalReference::address_of_runtime_stats_flag()); + __ lw(t9, MemOperand(t9, 0)); + __ Branch(&profiler_enabled, ne, t9, Operand(zero_reg)); + { + // Call the api function directly. + __ mov(t9, function_address); + __ Branch(&end_profiler_check); + } + __ bind(&profiler_enabled); + { + // Additional parameter is the address of the actual callback. + __ li(t9, thunk_ref); + } __ bind(&end_profiler_check); // Allocate HandleScope in callee-save registers. diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc index 34a5774d656901..c5565b90de7a9d 100644 --- a/deps/v8/src/builtins/mips64/builtins-mips64.cc +++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc @@ -62,7 +62,6 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) { static void GenerateTailCallToReturnedCode(MacroAssembler* masm, Runtime::FunctionId function_id) { // ----------- S t a t e ------------- - // -- a0 : argument count (preserved for callee) // -- a1 : target function (preserved for callee) // -- a3 : new target (preserved for callee) // ----------------------------------- @@ -70,13 +69,11 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm, FrameScope scope(masm, StackFrame::INTERNAL); // Push a copy of the function onto the stack. // Push a copy of the target function and the new target. - __ SmiTag(a0); - __ Push(a0, a1, a3, a1); + __ Push(a1, a3, a1); __ CallRuntime(function_id, 1); // Restore target function and new target. - __ Pop(a0, a1, a3); - __ SmiUntag(a0); + __ Pop(a1, a3); } static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); @@ -870,13 +867,11 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, Register scratch1, Register scratch2, Register scratch3) { // ----------- S t a t e ------------- - // -- a0 : argument count (preserved for callee if needed, and caller) // -- a3 : new target (preserved for callee if needed, and caller) // -- a1 : target function (preserved for callee if needed, and caller) // -- feedback vector (preserved for caller if needed) // ----------------------------------- - DCHECK( - !AreAliased(feedback_vector, a0, a1, a3, scratch1, scratch2, scratch3)); + DCHECK(!AreAliased(feedback_vector, a1, a3, scratch1, scratch2, scratch3)); Label optimized_code_slot_is_weak_ref, fallthrough; @@ -1052,16 +1047,18 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ Ld(feedback_vector, FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); __ Ld(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset)); + + Label push_stack_frame; + // Check if feedback vector is valid. If valid, check for optimized code + // and update invocation count. Otherwise, setup the stack frame. + __ Ld(a4, FieldMemOperand(feedback_vector, HeapObject::kMapOffset)); + __ Lhu(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset)); + __ Branch(&push_stack_frame, ne, a4, Operand(FEEDBACK_VECTOR_TYPE)); + // Read off the optimized code slot in the feedback vector, and if there // is optimized code or an optimization marker, call that instead. MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, a4, t3, a5); - // Open a frame scope to indicate that there is a frame on the stack. The - // MANUAL indicates that the scope shouldn't actually generate code to set up - // the frame (that is done below). - FrameScope frame_scope(masm, StackFrame::MANUAL); - __ PushStandardFrame(closure); - // Increment invocation count for the function. __ Lw(a4, FieldMemOperand(feedback_vector, FeedbackVector::kInvocationCountOffset)); @@ -1069,10 +1066,21 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ Sw(a4, FieldMemOperand(feedback_vector, FeedbackVector::kInvocationCountOffset)); - // Reset code age. - DCHECK_EQ(0, BytecodeArray::kNoAgeBytecodeAge); - __ sb(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister, - BytecodeArray::kBytecodeAgeOffset)); + // Open a frame scope to indicate that there is a frame on the stack. The + // MANUAL indicates that the scope shouldn't actually generate code to set up + // the frame (that is done below). + __ bind(&push_stack_frame); + FrameScope frame_scope(masm, StackFrame::MANUAL); + __ PushStandardFrame(closure); + + // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset are + // 8-bit fields next to each other, so we could just optimize by writing a + // 16-bit. These static asserts guard our assumption is valid. + STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset == + BytecodeArray::kOsrNestingLevelOffset + kCharSize); + STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0); + __ sh(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister, + BytecodeArray::kOsrNestingLevelOffset)); // Load initial bytecode offset. __ li(kInterpreterBytecodeOffsetRegister, @@ -1479,11 +1487,13 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, } __ Ld(fp, MemOperand( sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp)); + // Load builtin index (stored as a Smi) and use it to get the builtin start + // address from the builtins table. __ Pop(t0); __ Daddu(sp, sp, Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp)); __ Pop(ra); - __ Daddu(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag)); + __ LoadEntryFromBuiltinIndex(t0); __ Jump(t0); } } // namespace @@ -2595,7 +2605,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, __ LoadRoot(a4, RootIndex::kTheHoleValue); // Cannot use check here as it attempts to generate call into runtime. __ Branch(&okay, eq, a4, Operand(a2)); - __ stop("Unexpected pending exception"); + __ stop(); __ bind(&okay); } @@ -2864,18 +2874,24 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address, DCHECK(function_address == a1 || function_address == a2); - Label profiler_disabled; - Label end_profiler_check; + Label profiler_enabled, end_profiler_check; __ li(t9, ExternalReference::is_profiling_address(isolate)); __ Lb(t9, MemOperand(t9, 0)); - __ Branch(&profiler_disabled, eq, t9, Operand(zero_reg)); - - // Additional parameter is the address of the actual callback. - __ li(t9, thunk_ref); - __ jmp(&end_profiler_check); + __ Branch(&profiler_enabled, ne, t9, Operand(zero_reg)); + __ li(t9, ExternalReference::address_of_runtime_stats_flag()); + __ Lw(t9, MemOperand(t9, 0)); + __ Branch(&profiler_enabled, ne, t9, Operand(zero_reg)); + { + // Call the api function directly. + __ mov(t9, function_address); + __ Branch(&end_profiler_check); + } - __ bind(&profiler_disabled); - __ mov(t9, function_address); + __ bind(&profiler_enabled); + { + // Additional parameter is the address of the actual callback. + __ li(t9, thunk_ref); + } __ bind(&end_profiler_check); // Allocate HandleScope in callee-save registers. diff --git a/deps/v8/src/builtins/object-fromentries.tq b/deps/v8/src/builtins/object-fromentries.tq index 93851d4e11314d..32115e78eab250 100644 --- a/deps/v8/src/builtins/object-fromentries.tq +++ b/deps/v8/src/builtins/object-fromentries.tq @@ -33,8 +33,8 @@ namespace object { } transitioning javascript builtin - ObjectFromEntries(implicit context: Context)(receiver: Object, ...arguments): - Object { + ObjectFromEntries(js-implicit context: Context, receiver: Object)( + ...arguments): Object { const iterable: Object = arguments[0]; try { if (IsNullOrUndefined(iterable)) goto Throw; @@ -47,7 +47,8 @@ namespace object { try { assert(!IsNullOrUndefined(i.object)); while (true) { - const step: Object = iterator::IteratorStep(i, fastIteratorResultMap) + const step: JSReceiver = + iterator::IteratorStep(i, fastIteratorResultMap) otherwise return result; const iteratorValue: Object = iterator::IteratorValue(step, fastIteratorResultMap); diff --git a/deps/v8/src/builtins/object.tq b/deps/v8/src/builtins/object.tq new file mode 100644 index 00000000000000..6706a8f943399f --- /dev/null +++ b/deps/v8/src/builtins/object.tq @@ -0,0 +1,138 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +namespace runtime { + extern transitioning runtime + ObjectIsExtensible(implicit context: Context)(Object): Object; + + extern transitioning runtime + JSReceiverPreventExtensionsThrow(implicit context: Context)(JSReceiver): + Object; + + extern transitioning runtime + JSReceiverPreventExtensionsDontThrow(implicit context: Context)(JSReceiver): + Object; + + extern transitioning runtime + JSReceiverGetPrototypeOf(implicit context: Context)(JSReceiver): Object; + + extern transitioning runtime + JSReceiverSetPrototypeOfThrow(implicit context: Context)(JSReceiver, Object): + Object; + + extern transitioning runtime + JSReceiverSetPrototypeOfDontThrow(implicit context: + Context)(JSReceiver, Object): Object; +} // namespace runtime + +namespace object { + transitioning macro + ObjectIsExtensible(implicit context: Context)(object: Object): Object { + const objectJSReceiver = Cast(object) otherwise return False; + const objectJSProxy = Cast(objectJSReceiver) + otherwise return runtime::ObjectIsExtensible(objectJSReceiver); + return proxy::ProxyIsExtensible(objectJSProxy); + } + + transitioning macro + ObjectPreventExtensionsThrow(implicit context: Context)(object: Object): + Object { + const objectJSReceiver = Cast(object) otherwise return object; + const objectJSProxy = Cast(objectJSReceiver) + otherwise return runtime::JSReceiverPreventExtensionsThrow( + objectJSReceiver); + proxy::ProxyPreventExtensions(objectJSProxy, True); + return objectJSReceiver; + } + + transitioning macro + ObjectPreventExtensionsDontThrow(implicit context: Context)(object: Object): + Object { + const objectJSReceiver = Cast(object) otherwise return False; + const objectJSProxy = Cast(objectJSReceiver) + otherwise return runtime::JSReceiverPreventExtensionsDontThrow( + objectJSReceiver); + return proxy::ProxyPreventExtensions(objectJSProxy, False); + } + + transitioning macro + ObjectGetPrototypeOf(implicit context: Context)(object: Object): Object { + const objectJSReceiver: JSReceiver = ToObject_Inline(context, object); + return object::JSReceiverGetPrototypeOf(objectJSReceiver); + } + + transitioning macro + JSReceiverGetPrototypeOf(implicit context: Context)(object: JSReceiver): + Object { + const objectJSProxy = Cast(object) + otherwise return runtime::JSReceiverGetPrototypeOf(object); + return proxy::ProxyGetPrototypeOf(objectJSProxy); + } + + transitioning macro + ObjectSetPrototypeOfThrow(implicit context: Context)( + object: Object, proto: Object): Object { + const objectJSReceiver = Cast(object) otherwise return object; + const objectJSProxy = Cast(objectJSReceiver) + otherwise return runtime::JSReceiverSetPrototypeOfThrow( + objectJSReceiver, proto); + proxy::ProxySetPrototypeOf(objectJSProxy, proto, True); + return objectJSReceiver; + } + + transitioning macro + ObjectSetPrototypeOfDontThrow(implicit context: Context)( + object: Object, proto: Object): Object { + const objectJSReceiver = Cast(object) otherwise return False; + const objectJSProxy = Cast(objectJSReceiver) + otherwise return runtime::JSReceiverSetPrototypeOfDontThrow( + objectJSReceiver, proto); + return proxy::ProxySetPrototypeOf(objectJSProxy, proto, False); + } +} // namespace object + +namespace object_isextensible { + // ES6 section 19.1.2.11 Object.isExtensible ( O ) + transitioning javascript builtin ObjectIsExtensible( + js-implicit context: Context)(_receiver: Object, object: Object): Object { + return object::ObjectIsExtensible(object); + } +} // namespace object_isextensible + +namespace object_preventextensions { + // ES6 section 19.1.2.11 Object.isExtensible ( O ) + transitioning javascript builtin ObjectPreventExtensions( + js-implicit context: Context)(_receiver: Object, object: Object): Object { + return object::ObjectPreventExtensionsThrow(object); + } +} // namespace object_preventextensions + +namespace object_getprototypeof { + // ES6 section 19.1.2.9 Object.getPrototypeOf ( O ) + transitioning javascript builtin ObjectGetPrototypeOf( + js-implicit context: Context)(_receiver: Object, object: Object): Object { + return object::ObjectGetPrototypeOf(object); + } +} // namespace object_getprototypeof + +namespace object_setprototypeof { + // ES6 section 19.1.2.21 Object.setPrototypeOf ( O, proto ) + transitioning javascript builtin ObjectSetPrototypeOf( + js-implicit context: + Context)(_receiver: Object, object: Object, proto: Object): Object { + // 1. Set O to ? RequireObjectCoercible(O). + RequireObjectCoercible(object, 'Object.setPrototypeOf'); + + // 2. If Type(proto) is neither Object nor Null, throw a TypeError + // exception. + // 3. If Type(O) is not Object, return O. + // 4. Let status be ? O.[[SetPrototypeOf]](proto). + // 5. If status is false, throw a TypeError exception. + // 6. Return O. + if (proto == Null || Is(proto)) { + return object::ObjectSetPrototypeOfThrow(object, proto); + } + ThrowTypeError(kProtoObjectOrNull, proto); + } +} // namespace object_setprototypeof diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc index e3c6ce6407f275..a42cb9bebd2824 100644 --- a/deps/v8/src/builtins/ppc/builtins-ppc.cc +++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc @@ -60,24 +60,20 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) { static void GenerateTailCallToReturnedCode(MacroAssembler* masm, Runtime::FunctionId function_id) { // ----------- S t a t e ------------- - // -- r3 : argument count (preserved for callee) // -- r4 : target function (preserved for callee) // -- r6 : new target (preserved for callee) // ----------------------------------- { FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); - // Push the number of arguments to the callee. // Push a copy of the target function and the new target. // Push function as parameter to the runtime call. - __ SmiTag(r3); - __ Push(r3, r4, r6, r4); + __ Push(r4, r6, r4); __ CallRuntime(function_id, 1); __ mr(r5, r3); // Restore target function and new target. - __ Pop(r3, r4, r6); - __ SmiUntag(r3); + __ Pop(r4, r6); } static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch"); __ JumpCodeObject(r5); @@ -110,6 +106,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { // -- sp[...]: constructor arguments // ----------------------------------- + Register scratch = r5; + Label stack_overflow; Generate_StackOverflowCheck(masm, r3, r8, &stack_overflow); @@ -141,13 +139,13 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { // -- sp[2*kPointerSize]: context // ----------------------------------- __ beq(&no_args, cr0); - __ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2)); - __ sub(sp, sp, ip); + __ ShiftLeftImm(scratch, r3, Operand(kPointerSizeLog2)); + __ sub(sp, sp, scratch); __ mtctr(r3); __ bind(&loop); - __ subi(ip, ip, Operand(kPointerSize)); - __ LoadPX(r0, MemOperand(r7, ip)); - __ StorePX(r0, MemOperand(sp, ip)); + __ subi(scratch, scratch, Operand(kPointerSize)); + __ LoadPX(r0, MemOperand(r7, scratch)); + __ StorePX(r0, MemOperand(sp, scratch)); __ bdnz(&loop); __ bind(&no_args); @@ -300,13 +298,13 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // ----------------------------------- __ cmpi(r3, Operand::Zero()); __ beq(&no_args); - __ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2)); - __ sub(sp, sp, ip); + __ ShiftLeftImm(r9, r3, Operand(kPointerSizeLog2)); + __ sub(sp, sp, r9); __ mtctr(r3); __ bind(&loop); - __ subi(ip, ip, Operand(kPointerSize)); - __ LoadPX(r0, MemOperand(r7, ip)); - __ StorePX(r0, MemOperand(sp, ip)); + __ subi(r9, r9, Operand(kPointerSize)); + __ LoadPX(r0, MemOperand(r7, r9)); + __ StorePX(r0, MemOperand(sp, r9)); __ bdnz(&loop); __ bind(&no_args); @@ -416,12 +414,13 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { // Flood function if we are stepping. Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator; Label stepping_prepared; + Register scratch = r8; ExternalReference debug_hook = ExternalReference::debug_hook_on_function_call_address(masm->isolate()); - __ Move(ip, debug_hook); - __ LoadByte(ip, MemOperand(ip), r0); - __ extsb(ip, ip); - __ CmpSmiLiteral(ip, Smi::zero(), r0); + __ Move(scratch, debug_hook); + __ LoadByte(scratch, MemOperand(scratch), r0); + __ extsb(scratch, scratch); + __ CmpSmiLiteral(scratch, Smi::zero(), r0); __ bne(&prepare_step_in_if_stepping); // Flood function if we need to continue stepping in the suspended generator. @@ -429,9 +428,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { ExternalReference debug_suspended_generator = ExternalReference::debug_suspended_generator_address(masm->isolate()); - __ Move(ip, debug_suspended_generator); - __ LoadP(ip, MemOperand(ip)); - __ cmp(ip, r4); + __ Move(scratch, debug_suspended_generator); + __ LoadP(scratch, MemOperand(scratch)); + __ cmp(scratch, r4); __ beq(&prepare_step_in_suspended_generator); __ bind(&stepping_prepared); @@ -442,8 +441,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ blt(&stack_overflow); // Push receiver. - __ LoadP(ip, FieldMemOperand(r4, JSGeneratorObject::kReceiverOffset)); - __ Push(ip); + __ LoadP(scratch, FieldMemOperand(r4, JSGeneratorObject::kReceiverOffset)); + __ Push(scratch); // ----------- S t a t e ------------- // -- r4 : the JSGeneratorObject to resume @@ -470,8 +469,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ mtctr(r6); __ bind(&loop); - __ LoadPU(ip, MemOperand(r9, kPointerSize)); - __ push(ip); + __ LoadPU(scratch, MemOperand(r9, kPointerSize)); + __ push(scratch); __ bdnz(&loop); __ bind(&done_loop); @@ -602,6 +601,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, __ LoadP(r0, MemOperand(r3)); __ push(r0); + Register scratch = r9; // Set up frame pointer for the frame to be pushed. __ addi(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); @@ -611,17 +611,17 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, ExternalReference::Create(IsolateAddressId::kJSEntrySPAddress, masm->isolate()); __ Move(r3, js_entry_sp); - __ LoadP(r9, MemOperand(r3)); - __ cmpi(r9, Operand::Zero()); + __ LoadP(scratch, MemOperand(r3)); + __ cmpi(scratch, Operand::Zero()); __ bne(&non_outermost_js); __ StoreP(fp, MemOperand(r3)); - __ mov(ip, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME)); + __ mov(scratch, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME)); Label cont; __ b(&cont); __ bind(&non_outermost_js); - __ mov(ip, Operand(StackFrame::INNER_JSENTRY_FRAME)); + __ mov(scratch, Operand(StackFrame::INNER_JSENTRY_FRAME)); __ bind(&cont); - __ push(ip); // frame-type + __ push(scratch); // frame-type // Jump to a faked try block that does the invoke, with a faked catch // block that sets the pending exception. @@ -642,12 +642,12 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, // field in the JSEnv and return a failure sentinel. Coming in here the // fp will be invalid because the PushStackHandler below sets it to 0 to // signal the existence of the JSEntry frame. - __ Move(ip, - ExternalReference::Create(IsolateAddressId::kPendingExceptionAddress, - masm->isolate())); + __ Move(scratch, + ExternalReference::Create( + IsolateAddressId::kPendingExceptionAddress, masm->isolate())); } - __ StoreP(r3, MemOperand(ip)); + __ StoreP(r3, MemOperand(scratch)); __ LoadRoot(r3, RootIndex::kException); __ b(&exit); @@ -679,16 +679,16 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, __ pop(r8); __ cmpi(r8, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME)); __ bne(&non_outermost_js_2); - __ mov(r9, Operand::Zero()); + __ mov(scratch, Operand::Zero()); __ Move(r8, js_entry_sp); - __ StoreP(r9, MemOperand(r8)); + __ StoreP(scratch, MemOperand(r8)); __ bind(&non_outermost_js_2); // Restore the top frame descriptors from the stack. __ pop(r6); - __ Move(ip, ExternalReference::Create( - IsolateAddressId::kCEntryFPAddress, masm->isolate())); - __ StoreP(r6, MemOperand(ip)); + __ Move(scratch, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, + masm->isolate())); + __ StoreP(r6, MemOperand(scratch)); // Reset the stack to the callee saved registers. __ addi(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); @@ -894,13 +894,11 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, Register scratch1, Register scratch2, Register scratch3) { // ----------- S t a t e ------------- - // -- r0 : argument count (preserved for callee if needed, and caller) - // -- r3 : new target (preserved for callee if needed, and caller) - // -- r1 : target function (preserved for callee if needed, and caller) + // -- r6 : new target (preserved for callee if needed, and caller) + // -- r4 : target function (preserved for callee if needed, and caller) // -- feedback vector (preserved for caller if needed) // ----------------------------------- - DCHECK( - !AreAliased(feedback_vector, r3, r4, r6, scratch1, scratch2, scratch3)); + DCHECK(!AreAliased(feedback_vector, r4, r6, scratch1, scratch2, scratch3)); Label optimized_code_slot_is_weak_ref, fallthrough; @@ -1084,6 +1082,15 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); __ LoadP(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset)); + + Label push_stack_frame; + // Check if feedback vector is valid. If valid, check for optimized code + // and update invocation count. Otherwise, setup the stack frame. + __ LoadP(r7, FieldMemOperand(feedback_vector, HeapObject::kMapOffset)); + __ LoadHalfWord(r7, FieldMemOperand(r7, Map::kInstanceTypeOffset)); + __ cmpi(r7, Operand(FEEDBACK_VECTOR_TYPE)); + __ bne(&push_stack_frame); + // Read off the optimized code slot in the feedback vector, and if there // is optimized code or an optimization marker, call that instead. MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r7, r9, r8); @@ -1102,6 +1109,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // Open a frame scope to indicate that there is a frame on the stack. The // MANUAL indicates that the scope shouldn't actually generate code to set up // the frame (that is done below). + + __ bind(&push_stack_frame); + FrameScope frame_scope(masm, StackFrame::MANUAL); __ PushStandardFrame(closure); @@ -1109,12 +1119,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // 8-bit fields next to each other, so we could just optimize by writing a // 16-bit. These static asserts guard our assumption is valid. STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset == - BytecodeArray::kOSRNestingLevelOffset + kCharSize); + BytecodeArray::kOsrNestingLevelOffset + kCharSize); STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0); __ li(r8, Operand(0)); __ StoreHalfWord(r8, FieldMemOperand(kInterpreterBytecodeArrayRegister, - BytecodeArray::kOSRNestingLevelOffset), + BytecodeArray::kOsrNestingLevelOffset), r0); // Load initial bytecode offset. @@ -1395,11 +1405,13 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { __ SmiUntag(kInterpreterBytecodeOffsetRegister); // Dispatch to the target bytecode. + UseScratchRegisterScope temps(masm); + Register scratch = temps.Acquire(); __ lbzx(ip, MemOperand(kInterpreterBytecodeArrayRegister, kInterpreterBytecodeOffsetRegister)); - __ ShiftLeftImm(ip, ip, Operand(kPointerSizeLog2)); + __ ShiftLeftImm(scratch, scratch, Operand(kPointerSizeLog2)); __ LoadPX(kJavaScriptCallCodeStartRegister, - MemOperand(kInterpreterDispatchTableRegister, ip)); + MemOperand(kInterpreterDispatchTableRegister, scratch)); __ Jump(kJavaScriptCallCodeStartRegister); } @@ -1526,13 +1538,17 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, __ LoadP( fp, MemOperand(sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp)); - __ Pop(ip); + // Load builtin index (stored as a Smi) and use it to get the builtin start + // address from the builtins table. + UseScratchRegisterScope temps(masm); + Register builtin = temps.Acquire(); + __ Pop(builtin); __ addi(sp, sp, Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp)); __ Pop(r0); __ mtlr(r0); - __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag)); - __ Jump(ip); + __ LoadEntryFromBuiltinIndex(builtin); + __ Jump(builtin); } } // namespace @@ -1702,14 +1718,15 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) { // r3: actual number of arguments // r4: callable { + Register scratch = r6; Label loop; // Calculate the copy start address (destination). Copy end address is sp. __ add(r5, sp, r5); __ mtctr(r3); __ bind(&loop); - __ LoadP(ip, MemOperand(r5, -kPointerSize)); - __ StoreP(ip, MemOperand(r5)); + __ LoadP(scratch, MemOperand(r5, -kPointerSize)); + __ StoreP(scratch, MemOperand(r5)); __ subi(r5, r5, Operand(kPointerSize)); __ bdnz(&loop); // Adjust the actual number of arguments and remove the top element @@ -1891,7 +1908,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, // Check for stack overflow. Label stack_overflow; - Generate_StackOverflowCheck(masm, r7, ip, &stack_overflow); + Generate_StackOverflowCheck(masm, r7, scratch, &stack_overflow); // Push arguments onto the stack (thisArgument is already on the stack). { @@ -1902,12 +1919,12 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize)); __ mtctr(r7); __ bind(&loop); - __ LoadPU(ip, MemOperand(r5, kPointerSize)); - __ CompareRoot(ip, RootIndex::kTheHoleValue); + __ LoadPU(scratch, MemOperand(r5, kPointerSize)); + __ CompareRoot(scratch, RootIndex::kTheHoleValue); __ bne(&skip); - __ LoadRoot(ip, RootIndex::kUndefinedValue); + __ LoadRoot(scratch, RootIndex::kUndefinedValue); __ bind(&skip); - __ push(ip); + __ push(scratch); __ bdnz(&loop); __ bind(&no_args); __ add(r3, r3, r7); @@ -1953,8 +1970,10 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, // Check if we have an arguments adaptor frame below the function frame. Label arguments_adaptor, arguments_done; __ LoadP(r7, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ LoadP(ip, MemOperand(r7, CommonFrameConstants::kContextOrFrameTypeOffset)); - __ cmpi(ip, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR))); + __ LoadP(scratch, + MemOperand(r7, CommonFrameConstants::kContextOrFrameTypeOffset)); + __ cmpi(scratch, + Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR))); __ beq(&arguments_adaptor); { __ LoadP(r8, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); @@ -1988,9 +2007,9 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, __ add(r3, r3, r8); __ bind(&loop); { - __ ShiftLeftImm(ip, r8, Operand(kPointerSizeLog2)); - __ LoadPX(ip, MemOperand(r7, ip)); - __ push(ip); + __ ShiftLeftImm(scratch, r8, Operand(kPointerSizeLog2)); + __ LoadPX(scratch, MemOperand(r7, scratch)); + __ push(scratch); __ subi(r8, r8, Operand(1)); __ cmpi(r8, Operand::Zero()); __ bne(&loop); @@ -2134,10 +2153,11 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { // -- r7 : the number of [[BoundArguments]] // ----------------------------------- + Register scratch = r9; // Reserve stack space for the [[BoundArguments]]. { Label done; - __ mr(r9, sp); // preserve previous stack pointer + __ mr(scratch, sp); // preserve previous stack pointer __ ShiftLeftImm(r10, r7, Operand(kPointerSizeLog2)); __ sub(sp, sp, r10); // Check the stack for overflow. We are not trying to catch interruptions @@ -2146,7 +2166,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { __ CompareRoot(sp, RootIndex::kRealStackLimit); __ bgt(&done); // Signed comparison. // Restore the stack pointer. - __ mr(sp, r9); + __ mr(sp, scratch); { FrameScope scope(masm, StackFrame::MANUAL); __ EnterFrame(StackFrame::INTERNAL); @@ -2166,7 +2186,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { __ beq(&skip); __ mtctr(r3); __ bind(&loop); - __ LoadPX(r0, MemOperand(r9, r8)); + __ LoadPX(r0, MemOperand(scratch, r8)); __ StorePX(r0, MemOperand(sp, r8)); __ addi(r8, r8, Operand(kPointerSize)); __ bdnz(&loop); @@ -2201,9 +2221,9 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) { __ AssertBoundFunction(r4); // Patch the receiver to [[BoundThis]]. - __ LoadP(ip, FieldMemOperand(r4, JSBoundFunction::kBoundThisOffset)); + __ LoadP(r6, FieldMemOperand(r4, JSBoundFunction::kBoundThisOffset)); __ ShiftLeftImm(r0, r3, Operand(kPointerSizeLog2)); - __ StorePX(ip, MemOperand(sp, r0)); + __ StorePX(r6, MemOperand(sp, r0)); // Push the [[BoundArguments]] onto the stack. Generate_PushBoundArguments(masm); @@ -2388,7 +2408,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { __ cmpli(r5, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel)); __ beq(&dont_adapt_arguments); __ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset)); - __ LoadP(r7, FieldMemOperand(r7, SharedFunctionInfo::kFlagsOffset)); + __ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kFlagsOffset)); __ TestBitMask(r7, SharedFunctionInfo::IsSafeToSkipArgumentsAdaptorBit::kMask, r0); __ bne(&skip_adapt_arguments, cr0); @@ -2686,7 +2706,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, __ CompareRoot(r6, RootIndex::kTheHoleValue); // Cannot use check here as it attempts to generate call into runtime. __ beq(&okay); - __ stop("Unexpected pending exception"); + __ stop(); __ bind(&okay); } @@ -2961,13 +2981,22 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm, __ Move(scratch, thunk_ref); __ isel(eq, scratch, function_address, scratch); } else { - Label profiler_disabled; - Label end_profiler_check; - __ beq(&profiler_disabled); - __ Move(scratch, thunk_ref); - __ b(&end_profiler_check); - __ bind(&profiler_disabled); - __ mr(scratch, function_address); + Label profiler_enabled, end_profiler_check; + __ bne(&profiler_enabled); + __ Move(scratch, ExternalReference::address_of_runtime_stats_flag()); + __ lwz(scratch, MemOperand(scratch, 0)); + __ cmpi(scratch, Operand::Zero()); + __ bne(&profiler_enabled); + { + // Call the api function directly. + __ mr(scratch, function_address); + __ b(&end_profiler_check); + } + __ bind(&profiler_enabled); + { + // Additional parameter is the address of the actual callback. + __ Move(scratch, thunk_ref); + } __ bind(&end_profiler_check); } @@ -3264,6 +3293,8 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { } void Builtins::Generate_DirectCEntry(MacroAssembler* masm) { + UseScratchRegisterScope temps(masm); + Register temp2 = temps.Acquire(); // Place the return address on the stack, making the call // GC safe. The RegExp backend also relies on this. __ mflr(r0); @@ -3271,11 +3302,11 @@ void Builtins::Generate_DirectCEntry(MacroAssembler* masm) { if (ABI_USES_FUNCTION_DESCRIPTORS && FLAG_embedded_builtins) { // AIX/PPC64BE Linux use a function descriptor; - __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(ip, kPointerSize)); - __ LoadP(ip, MemOperand(ip, 0)); // Instruction address + __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(temp2, kPointerSize)); + __ LoadP(temp2, MemOperand(temp2, 0)); // Instruction address } - __ Call(ip); // Call the C++ function. + __ Call(temp2); // Call the C++ function. __ LoadP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize)); __ mtlr(r0); __ blr(); diff --git a/deps/v8/src/builtins/proxy-constructor.tq b/deps/v8/src/builtins/proxy-constructor.tq index 178759b595cea8..ad60c20e2c2ea3 100644 --- a/deps/v8/src/builtins/proxy-constructor.tq +++ b/deps/v8/src/builtins/proxy-constructor.tq @@ -6,17 +6,14 @@ namespace proxy { - extern macro ProxiesCodeStubAssembler::GetProxyConstructorJSNewTarget(): - Object; - // ES #sec-proxy-constructor // https://tc39.github.io/ecma262/#sec-proxy-constructor transitioning javascript builtin - ProxyConstructor(implicit context: Context)( - receiver: Object, target: Object, handler: Object): JSProxy { + ProxyConstructor( + js-implicit context: Context, receiver: Object, + newTarget: Object)(target: Object, handler: Object): JSProxy { try { // 1. If NewTarget is undefined, throw a TypeError exception. - const newTarget: Object = GetProxyConstructorJSNewTarget(); if (newTarget == Undefined) { ThrowTypeError(kConstructorNotFunction, 'Proxy'); } diff --git a/deps/v8/src/builtins/proxy-delete-property.tq b/deps/v8/src/builtins/proxy-delete-property.tq new file mode 100644 index 00000000000000..759de766efbed1 --- /dev/null +++ b/deps/v8/src/builtins/proxy-delete-property.tq @@ -0,0 +1,67 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include 'src/builtins/builtins-proxy-gen.h' + +namespace proxy { + + // ES #sec-proxy-object-internal-methods-and-internal-slots-delete-p + // https://tc39.es/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-delete-p + transitioning builtin + ProxyDeleteProperty(implicit context: Context)( + proxy: JSProxy, name: Name, languageMode: LanguageMode): Object { + const kTrapName: constexpr string = 'deleteProperty'; + // 1. Assert: IsPropertyKey(P) is true. + assert(TaggedIsNotSmi(name)); + assert(IsName(name)); + assert(!IsPrivateSymbol(name)); + + try { + // 2. Let handler be O.[[ProxyHandler]]. + // 3. If handler is null, throw a TypeError exception. + // 4. Assert: Type(handler) is Object. + assert(proxy.handler == Null || Is(proxy.handler)); + const handler = + Cast(proxy.handler) otherwise ThrowProxyHandlerRevoked; + + // 5. Let target be O.[[ProxyTarget]]. + const target = UnsafeCast(proxy.target); + + // 6. Let trap be ? GetMethod(handler, "deleteProperty"). + // 7. If trap is undefined, then (see 7.a below). + const trap: Callable = GetMethod(handler, kTrapName) + otherwise goto TrapUndefined(target); + + // 8. Let booleanTrapResult be ToBoolean(? Call(trap, handler, + // « target, P »)). + const trapResult = Call(context, trap, handler, target, name); + + // 9. If booleanTrapResult is false, return false. + if (BranchIfToBooleanIsFalse(trapResult)) { + if (languageMode == SmiConstant(kStrict)) { + ThrowTypeError(kProxyTrapReturnedFalsishFor, kTrapName, name); + } + return False; + } + + // 10. Let targetDesc be ? target.[[GetOwnProperty]](P). + // 11. If targetDesc is undefined, return true. + // 12. If targetDesc.[[Configurable]] is false, throw a TypeError + // exception. + // 13. Let extensibleTarget be ? IsExtensible(target). + // 14. If extensibleTarget is false, throw a TypeError exception. + CheckDeleteTrapResult(target, proxy, name); + + // 15. Return true. + return True; + } + label TrapUndefined(target: Object) { + // 7.a. Return ? target.[[Delete]](P). + return DeleteProperty(target, name, languageMode); + } + label ThrowProxyHandlerRevoked deferred { + ThrowTypeError(kProxyRevoked, kTrapName); + } + } +} diff --git a/deps/v8/src/builtins/proxy-get-property.tq b/deps/v8/src/builtins/proxy-get-property.tq index 0915a66d5f79ae..bac07f550c3eb9 100644 --- a/deps/v8/src/builtins/proxy-get-property.tq +++ b/deps/v8/src/builtins/proxy-get-property.tq @@ -6,9 +6,8 @@ namespace proxy { - extern transitioning runtime - GetPropertyWithReceiver(implicit context: Context)(Object, Name, Object, Smi): - Object; + extern transitioning builtin GetPropertyWithReceiver( + implicit context: Context)(Object, Name, Object, Smi): Object; // ES #sec-proxy-object-internal-methods-and-internal-slots-get-p-receiver // https://tc39.github.io/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-get-p-receiver @@ -16,36 +15,38 @@ namespace proxy { ProxyGetProperty(implicit context: Context)( proxy: JSProxy, name: Name, receiverValue: Object, onNonExistent: Smi): Object { + PerformStackCheck(); // 1. Assert: IsPropertyKey(P) is true. assert(TaggedIsNotSmi(name)); assert(IsName(name)); assert(!IsPrivateSymbol(name)); // 2. Let handler be O.[[ProxyHandler]]. - const handler: Object = proxy.handler; - // 3. If handler is null, throw a TypeError exception. - if (handler == Null) { - ThrowTypeError(kProxyRevoked, 'get'); - } - // 4. Assert: Type(handler) is Object. - const handlerJSReceiver = UnsafeCast(handler); + let handler: JSReceiver; + typeswitch (proxy.handler) { + case (Null): { + ThrowTypeError(kProxyRevoked, 'get'); + } + case (h: JSReceiver): { + handler = h; + } + } // 5. Let target be O.[[ProxyTarget]]. - const target = proxy.target; + const target = Cast(proxy.target) otherwise unreachable; // 6. Let trap be ? GetMethod(handler, "get"). // 7. If trap is undefined, then (see 7.a below). // 7.a. Return ? target.[[Get]](P, Receiver). - // TODO(mslekova): Introduce GetPropertyWithReceiver stub - const trap: Callable = GetMethod(handlerJSReceiver, 'get') + const trap: Callable = GetMethod(handler, 'get') otherwise return GetPropertyWithReceiver( target, name, receiverValue, onNonExistent); // 8. Let trapResult be ? Call(trap, handler, « target, P, Receiver »). const trapResult = - Call(context, trap, handlerJSReceiver, target, name, receiverValue); + Call(context, trap, handler, target, name, receiverValue); // 9. Let targetDesc be ? target.[[GetOwnProperty]](P). // 10. If targetDesc is not undefined and targetDesc.[[Configurable]] is @@ -58,6 +59,7 @@ namespace proxy { // is undefined, then // i. If trapResult is not undefined, throw a TypeError exception. // 11. Return trapResult. - return CheckGetSetTrapResult(target, proxy, name, trapResult, kProxyGet); + CheckGetSetTrapResult(target, proxy, name, trapResult, kProxyGet); + return trapResult; } } diff --git a/deps/v8/src/builtins/proxy-get-prototype-of.tq b/deps/v8/src/builtins/proxy-get-prototype-of.tq new file mode 100644 index 00000000000000..2418eaf4230cb3 --- /dev/null +++ b/deps/v8/src/builtins/proxy-get-prototype-of.tq @@ -0,0 +1,70 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include 'src/builtins/builtins-proxy-gen.h' + +namespace proxy { + + // ES #sec-proxy-object-internal-methods-and-internal-slots-isextensible + // https://tc39.github.io/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-isextensible + transitioning builtin + ProxyGetPrototypeOf(implicit context: Context)(proxy: JSProxy): Object { + PerformStackCheck(); + const kTrapName: constexpr string = 'getPrototypeOf'; + try { + // 1. Let handler be O.[[ProxyHandler]]. + // 2. If handler is null, throw a TypeError exception. + // 3. Assert: Type(handler) is Object. + assert(proxy.handler == Null || Is(proxy.handler)); + const handler = + Cast(proxy.handler) otherwise ThrowProxyHandlerRevoked; + + // 4. Let target be O.[[ProxyTarget]]. + const target = proxy.target; + + // 5. Let trap be ? GetMethod(handler, "getPrototypeOf"). + // 6. If trap is undefined, then (see 6.a below). + const trap: Callable = GetMethod(handler, kTrapName) + otherwise goto TrapUndefined(target); + + // 7. Let handlerProto be ? Call(trap, handler, « target »). + const handlerProto = Call(context, trap, handler, target); + + // 8. If Type(handlerProto) is neither Object nor Null, throw a TypeError + // exception. + if (!Is(handlerProto)) { + goto ThrowProxyGetPrototypeOfInvalid; + } + + // 9. Let extensibleTarget be ? IsExtensible(target). + // 10. If extensibleTarget is true, return handlerProto. + const extensibleTarget: Object = object::ObjectIsExtensible(target); + assert(extensibleTarget == True || extensibleTarget == False); + if (extensibleTarget == True) { + return handlerProto; + } + + // 11. Let targetProto be ? target.[[GetPrototypeOf]](). + const targetProto = object::ObjectGetPrototypeOf(target); + + // 12. If SameValue(handlerProto, targetProto) is false, throw a TypeError + // exception. + // 13. Return handlerProto. + if (BranchIfSameValue(targetProto, handlerProto)) { + return handlerProto; + } + ThrowTypeError(kProxyGetPrototypeOfNonExtensible); + } + label TrapUndefined(target: Object) { + // 6.a. Return ? target.[[GetPrototypeOf]](). + return object::ObjectGetPrototypeOf(target); + } + label ThrowProxyHandlerRevoked deferred { + ThrowTypeError(kProxyRevoked, kTrapName); + } + label ThrowProxyGetPrototypeOfInvalid deferred { + ThrowTypeError(kProxyGetPrototypeOfInvalid); + } + } +} diff --git a/deps/v8/src/builtins/proxy-has-property.tq b/deps/v8/src/builtins/proxy-has-property.tq index ab3898a9c765ee..ee394c5d847d89 100644 --- a/deps/v8/src/builtins/proxy-has-property.tq +++ b/deps/v8/src/builtins/proxy-has-property.tq @@ -22,11 +22,12 @@ namespace proxy { // 2. Let handler be O.[[ProxyHandler]]. // 3. If handler is null, throw a TypeError exception. // 4. Assert: Type(handler) is Object. + assert(proxy.handler == Null || Is(proxy.handler)); const handler = Cast(proxy.handler) otherwise ThrowProxyHandlerRevoked; // 5. Let target be O.[[ProxyTarget]]. - const target = proxy.target; + const target = Cast(proxy.target) otherwise unreachable; // 6. Let trap be ? GetMethod(handler, "has"). // 7. If trap is undefined, then (see 7.a below). @@ -42,7 +43,8 @@ namespace proxy { if (BranchIfToBooleanIsTrue(trapResult)) { return True; } - return CheckHasTrapResult(target, proxy, name); + CheckHasTrapResult(target, proxy, name); + return False; } label TrapUndefined(target: Object) { // 7.a. Return ? target.[[HasProperty]](P). diff --git a/deps/v8/src/builtins/proxy-is-extensible.tq b/deps/v8/src/builtins/proxy-is-extensible.tq new file mode 100644 index 00000000000000..82f4a5b955c297 --- /dev/null +++ b/deps/v8/src/builtins/proxy-is-extensible.tq @@ -0,0 +1,56 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include 'src/builtins/builtins-proxy-gen.h' + +namespace proxy { + + // ES #sec-proxy-object-internal-methods-and-internal-slots-isextensible + // https://tc39.github.io/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-isextensible + transitioning builtin ProxyIsExtensible(implicit context: + Context)(proxy: JSProxy): Object { + PerformStackCheck(); + const kTrapName: constexpr string = 'isExtensible'; + try { + // 1. Let handler be O.[[ProxyHandler]]. + // 2. If handler is null, throw a TypeError exception. + // 3. Assert: Type(handler) is Object. + assert(proxy.handler == Null || Is(proxy.handler)); + const handler = + Cast(proxy.handler) otherwise ThrowProxyHandlerRevoked; + + // 4. Let target be O.[[ProxyTarget]]. + const target = proxy.target; + + // 5. Let trap be ? GetMethod(handler, "isExtensible"). + // 6. If trap is undefined, then (see 6.a below). + const trap: Callable = GetMethod(handler, kTrapName) + otherwise goto TrapUndefined(target); + + // 7. Let booleanTrapResult be ToBoolean(? Call(trap, handler, « + // target»)). + const trapResult = ToBoolean(Call(context, trap, handler, target)); + + // 8. Let targetResult be ? IsExtensible(target). + const targetResult: bool = ToBoolean(object::ObjectIsExtensible(target)); + + // 9. If SameValue(booleanTrapResult, targetResult) is false, throw a + // TypeError exception. + if (trapResult != targetResult) { + ThrowTypeError( + kProxyIsExtensibleInconsistent, + SelectBooleanConstant(targetResult)); + } + // 10. Return booleanTrapResult. + return SelectBooleanConstant(trapResult); + } + label TrapUndefined(target: Object) { + // 6.a. Return ? IsExtensible(target). + return object::ObjectIsExtensible(target); + } + label ThrowProxyHandlerRevoked deferred { + ThrowTypeError(kProxyRevoked, kTrapName); + } + } +} diff --git a/deps/v8/src/builtins/proxy-prevent-extensions.tq b/deps/v8/src/builtins/proxy-prevent-extensions.tq new file mode 100644 index 00000000000000..6d5d2569fb8645 --- /dev/null +++ b/deps/v8/src/builtins/proxy-prevent-extensions.tq @@ -0,0 +1,66 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include 'src/builtins/builtins-proxy-gen.h' + +namespace proxy { + + // ES #sec-proxy-object-internal-methods-and-internal-slots-preventextensions + // https://tc39.es/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-preventextensions + transitioning builtin + ProxyPreventExtensions(implicit context: Context)( + proxy: JSProxy, doThrow: Boolean): Object { + PerformStackCheck(); + const kTrapName: constexpr string = 'preventExtensions'; + try { + // 1. Let handler be O.[[ProxyHandler]]. + // 2. If handler is null, throw a TypeError exception. + // 3. Assert: Type(handler) is Object. + assert(proxy.handler == Null || Is(proxy.handler)); + const handler = + Cast(proxy.handler) otherwise ThrowProxyHandlerRevoked; + + // 4. Let target be O.[[ProxyTarget]]. + const target = proxy.target; + + // 5. Let trap be ? GetMethod(handler, "preventExtensions"). + // 6. If trap is undefined, then (see 6.a below). + const trap: Callable = GetMethod(handler, kTrapName) + otherwise goto TrapUndefined(target); + + // 7. Let booleanTrapResult be ToBoolean(? Call(trap, handler, « + // target»)). + const trapResult = Call(context, trap, handler, target); + + // 8. If booleanTrapResult is true, then + // 8.a. Let extensibleTarget be ? IsExtensible(target). + // 8.b If extensibleTarget is true, throw a TypeError exception. + if (BranchIfToBooleanIsTrue(trapResult)) { + const extensibleTarget: Object = object::ObjectIsExtensible(target); + assert(extensibleTarget == True || extensibleTarget == False); + if (extensibleTarget == True) { + ThrowTypeError(kProxyPreventExtensionsExtensible); + } + } else { + if (doThrow == True) { + ThrowTypeError(kProxyTrapReturnedFalsish, kTrapName); + } + return False; + } + + // 9. Return booleanTrapResult. + return True; + } + label TrapUndefined(target: Object) { + // 6.a. Return ? target.[[PreventExtensions]](). + if (doThrow == True) { + return object::ObjectPreventExtensionsThrow(target); + } + return object::ObjectPreventExtensionsDontThrow(target); + } + label ThrowProxyHandlerRevoked deferred { + ThrowTypeError(kProxyRevoked, kTrapName); + } + } +} // namespace proxy diff --git a/deps/v8/src/builtins/proxy-revocable.tq b/deps/v8/src/builtins/proxy-revocable.tq index 695f005c9bc76e..b09baab9cf1913 100644 --- a/deps/v8/src/builtins/proxy-revocable.tq +++ b/deps/v8/src/builtins/proxy-revocable.tq @@ -7,17 +7,13 @@ namespace proxy { extern macro ProxiesCodeStubAssembler::AllocateProxyRevokeFunction( - Object, Object): JSFunction; - macro AllocateProxyRevokeFunction(implicit context: Context)(proxy: JSProxy): - JSFunction { - return AllocateProxyRevokeFunction(proxy, context); - } + implicit context: Context)(JSProxy): JSFunction; // Proxy.revocable(target, handler) // https://tc39.github.io/ecma262/#sec-proxy.revocable transitioning javascript builtin ProxyRevocable( - context: Context, receiver: Object, target: Object, + context: Context, _receiver: Object, target: Object, handler: Object): JSProxyRevocableResult { try { const targetJSReceiver = diff --git a/deps/v8/src/builtins/proxy-revoke.tq b/deps/v8/src/builtins/proxy-revoke.tq index 400f586b2159c1..d89b54077ae109 100644 --- a/deps/v8/src/builtins/proxy-revoke.tq +++ b/deps/v8/src/builtins/proxy-revoke.tq @@ -9,7 +9,7 @@ namespace proxy { // Proxy Revocation Functions // https://tc39.github.io/ecma262/#sec-proxy-revocation-functions transitioning javascript builtin - ProxyRevoke(implicit context: Context)(): Undefined { + ProxyRevoke(js-implicit context: Context)(): Undefined { // 1. Let p be F.[[RevocableProxy]]. const proxyObject: Object = context[PROXY_SLOT]; diff --git a/deps/v8/src/builtins/proxy-set-property.tq b/deps/v8/src/builtins/proxy-set-property.tq index 72181e08a824fc..d0411a8e894e9a 100644 --- a/deps/v8/src/builtins/proxy-set-property.tq +++ b/deps/v8/src/builtins/proxy-set-property.tq @@ -30,21 +30,20 @@ namespace proxy { return Undefined; } - // 2. Let handler be O.[[ProxyHandler]]. - const handler: Object = proxy.handler; - try { + // 2. Let handler be O.[[ProxyHandler]]. // 3. If handler is null, throw a TypeError exception. // 4. Assert: Type(handler) is Object. - const handlerJSReceiver = - Cast(handler) otherwise ThrowProxyHandlerRevoked; + assert(proxy.handler == Null || Is(proxy.handler)); + const handler = + Cast(proxy.handler) otherwise ThrowProxyHandlerRevoked; // 5. Let target be O.[[ProxyTarget]]. - const target = proxy.target; + const target = UnsafeCast(proxy.target); // 6. Let trap be ? GetMethod(handler, "set"). // 7. If trap is undefined, then (see 7.a below). - const trap: Callable = GetMethod(handlerJSReceiver, 'set') + const trap: Callable = GetMethod(handler, 'set') otherwise goto TrapUndefined(target); // 8. Let booleanTrapResult be ToBoolean(? Call(trap, handler, @@ -61,11 +60,11 @@ namespace proxy { // i. If targetDesc.[[Set]] is undefined, throw a TypeError // exception. // 12. Return true. - const trapResult = Call( - context, trap, handlerJSReceiver, target, name, value, receiverValue); + const trapResult = + Call(context, trap, handler, target, name, value, receiverValue); if (BranchIfToBooleanIsTrue(trapResult)) { - return CheckGetSetTrapResult( - target, proxy, name, trapResult, kProxySet); + CheckGetSetTrapResult(target, proxy, name, value, kProxySet); + return value; } ThrowTypeErrorIfStrict( SmiConstant(kProxyTrapReturnedFalsishFor), 'set', name); @@ -77,7 +76,6 @@ namespace proxy { return value; } label ThrowProxyHandlerRevoked deferred { - assert(handler == Null); ThrowTypeError(kProxyRevoked, 'set'); } } diff --git a/deps/v8/src/builtins/proxy-set-prototype-of.tq b/deps/v8/src/builtins/proxy-set-prototype-of.tq new file mode 100644 index 00000000000000..bbd99be4117eaa --- /dev/null +++ b/deps/v8/src/builtins/proxy-set-prototype-of.tq @@ -0,0 +1,77 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include 'src/builtins/builtins-proxy-gen.h' + +namespace proxy { + + // ES #sec-proxy-object-internal-methods-and-internal-slots-setprototypeof-v + // https://tc39.es/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-setprototypeof-v + transitioning builtin + ProxySetPrototypeOf(implicit context: Context)( + proxy: JSProxy, proto: Object, doThrow: Boolean): Object { + PerformStackCheck(); + const kTrapName: constexpr string = 'setPrototypeOf'; + try { + // 1. Assert: Either Type(V) is Object or Type(V) is Null. + assert(proto == Null || Is(proto)); + + // 2. Let handler be O.[[ProxyHandler]]. + // 3. If handler is null, throw a TypeError exception. + // 4. Assert: Type(handler) is Object. + assert(proxy.handler == Null || Is(proxy.handler)); + const handler = + Cast(proxy.handler) otherwise ThrowProxyHandlerRevoked; + + // 5. Let target be O.[[ProxyTarget]]. + const target = proxy.target; + + // 6. Let trap be ? GetMethod(handler, "setPrototypeOf"). + // 7. If trap is undefined, then (see 7.a below). + const trap: Callable = GetMethod(handler, kTrapName) + otherwise goto TrapUndefined(target, proto); + + // 8. Let booleanTrapResult be ToBoolean(? Call(trap, handler, « target, V + // »)). + const trapResult = Call(context, trap, handler, target, proto); + + // 9. If booleanTrapResult is false, return false. + if (BranchIfToBooleanIsFalse(trapResult)) { + if (doThrow == True) { + ThrowTypeError(kProxyTrapReturnedFalsishFor, kTrapName); + } + return False; + } + + // 10. Let extensibleTarget be ? IsExtensible(target). + // 11. If extensibleTarget is true, return true. + const extensibleTarget: Object = object::ObjectIsExtensible(target); + assert(extensibleTarget == True || extensibleTarget == False); + if (extensibleTarget == True) { + return True; + } + + // 12. Let targetProto be ? target.[[GetPrototypeOf]](). + const targetProto = object::ObjectGetPrototypeOf(target); + + // 13. If SameValue(V, targetProto) is false, throw a TypeError + // exception. + // 14. Return true. + if (BranchIfSameValue(proto, targetProto)) { + return True; + } + ThrowTypeError(kProxySetPrototypeOfNonExtensible); + } + label TrapUndefined(target: Object, proto: Object) { + // 7.a. Return ? target.[[SetPrototypeOf]](). + if (doThrow == True) { + return object::ObjectSetPrototypeOfThrow(target, proto); + } + return object::ObjectSetPrototypeOfDontThrow(target, proto); + } + label ThrowProxyHandlerRevoked deferred { + ThrowTypeError(kProxyRevoked, kTrapName); + } + } +} diff --git a/deps/v8/src/builtins/proxy.tq b/deps/v8/src/builtins/proxy.tq index 16bba85292f898..d95def5d0e3874 100644 --- a/deps/v8/src/builtins/proxy.tq +++ b/deps/v8/src/builtins/proxy.tq @@ -7,25 +7,23 @@ namespace proxy { extern macro ProxiesCodeStubAssembler::AllocateProxy( - JSReceiver, JSReceiver, Context): JSProxy; - macro AllocateProxy(implicit context: Context)( - target: JSReceiver, handler: JSReceiver): JSProxy { - return AllocateProxy(target, handler, context); - } + implicit context: Context)(JSReceiver, JSReceiver): JSProxy; macro IsRevokedProxy(implicit context: Context)(o: JSReceiver): bool { const proxy: JSProxy = Cast(o) otherwise return false; - const handler: JSReceiver = - Cast(proxy.handler) otherwise return true; + Cast(proxy.handler) otherwise return true; return false; } extern transitioning macro ProxiesCodeStubAssembler::CheckGetSetTrapResult( implicit context: - Context)(Object, JSProxy, Name, Object, constexpr int31): Object; + Context)(JSReceiver, JSProxy, Name, Object, constexpr int31); + + extern transitioning macro ProxiesCodeStubAssembler::CheckDeleteTrapResult( + implicit context: Context)(JSReceiver, JSProxy, Name); extern transitioning macro ProxiesCodeStubAssembler::CheckHasTrapResult( - implicit context: Context)(Object, JSProxy, Name): Object; + implicit context: Context)(JSReceiver, JSProxy, Name); const kProxyNonObject: constexpr MessageTemplate generates 'MessageTemplate::kProxyNonObject'; @@ -37,6 +35,20 @@ namespace proxy { generates 'MessageTemplate::kProxyTrapReturnedFalsishFor'; const kProxyPrivate: constexpr MessageTemplate generates 'MessageTemplate::kProxyPrivate'; + const kProxyIsExtensibleInconsistent: constexpr MessageTemplate + generates 'MessageTemplate::kProxyIsExtensibleInconsistent'; + const kProxyPreventExtensionsExtensible: constexpr MessageTemplate + generates 'MessageTemplate::kProxyPreventExtensionsExtensible'; + const kProxyTrapReturnedFalsish: constexpr MessageTemplate + generates 'MessageTemplate::kProxyTrapReturnedFalsish'; + const kProxyGetPrototypeOfInvalid: constexpr MessageTemplate + generates 'MessageTemplate::kProxyGetPrototypeOfInvalid'; + const kProxyGetPrototypeOfNonExtensible: constexpr MessageTemplate + generates 'MessageTemplate::kProxyGetPrototypeOfNonExtensible'; + const kProxySetPrototypeOfNonExtensible: constexpr MessageTemplate + generates 'MessageTemplate::kProxySetPrototypeOfNonExtensible'; + const kProxyDeletePropertyNonExtensible: constexpr MessageTemplate + generates 'MessageTemplate::kProxyDeletePropertyNonExtensible'; const kProxyGet: constexpr int31 generates 'JSProxy::AccessKind::kGet'; diff --git a/deps/v8/src/builtins/reflect.tq b/deps/v8/src/builtins/reflect.tq new file mode 100644 index 00000000000000..4c25e8338f8883 --- /dev/null +++ b/deps/v8/src/builtins/reflect.tq @@ -0,0 +1,82 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +namespace reflect { + + const kCalledOnNonObject: constexpr MessageTemplate + generates 'MessageTemplate::kCalledOnNonObject'; + + // ES6 section 26.1.10 Reflect.isExtensible + transitioning javascript builtin ReflectIsExtensible( + js-implicit context: Context)(_receiver: Object, object: Object): Object { + const objectJSReceiver = Cast(object) + otherwise ThrowTypeError(kCalledOnNonObject, 'Reflect.isExtensible'); + return object::ObjectIsExtensible(objectJSReceiver); + } + + // ES6 section 26.1.12 Reflect.preventExtensions + transitioning javascript builtin ReflectPreventExtensions( + js-implicit context: Context)(_receiver: Object, object: Object): Object { + const objectJSReceiver = Cast(object) + otherwise ThrowTypeError(kCalledOnNonObject, 'Reflect.preventExtensions'); + return object::ObjectPreventExtensionsDontThrow(objectJSReceiver); + } + + // ES6 section 26.1.8 Reflect.getPrototypeOf + transitioning javascript builtin ReflectGetPrototypeOf( + js-implicit context: Context)(_receiver: Object, object: Object): Object { + const objectJSReceiver = Cast(object) + otherwise ThrowTypeError(kCalledOnNonObject, 'Reflect.getPrototypeOf'); + return object::JSReceiverGetPrototypeOf(objectJSReceiver); + } + + // ES6 section 26.1.14 Reflect.setPrototypeOf + transitioning javascript builtin ReflectSetPrototypeOf( + js-implicit context: + Context)(_receiver: Object, object: Object, proto: Object): Object { + const objectJSReceiver = Cast(object) + otherwise ThrowTypeError(kCalledOnNonObject, 'Reflect.setPrototypeOf'); + if (proto == Null || Is(proto)) { + return object::ObjectSetPrototypeOfDontThrow(objectJSReceiver, proto); + } + ThrowTypeError(kProtoObjectOrNull, proto); + } + + extern transitioning builtin ToName(implicit context: Context)(Object): Name; + type OnNonExistent constexpr 'OnNonExistent'; + const kReturnUndefined: constexpr OnNonExistent + generates 'OnNonExistent::kReturnUndefined'; + extern macro SmiConstant(constexpr OnNonExistent): Smi; + extern transitioning builtin GetPropertyWithReceiver( + implicit context: Context)(Object, Name, Object, Smi): Object; + + // ES6 section 26.1.6 Reflect.get + transitioning javascript builtin + ReflectGet(js-implicit context: Context)(...arguments): Object { + const length = arguments.length; + const object: Object = length > 0 ? arguments[0] : Undefined; + const objectJSReceiver = Cast(object) + otherwise ThrowTypeError(kCalledOnNonObject, 'Reflect.get'); + const propertyKey: Object = length > 1 ? arguments[1] : Undefined; + const name: Name = ToName(propertyKey); + const receiver: Object = length > 2 ? arguments[2] : objectJSReceiver; + return GetPropertyWithReceiver( + objectJSReceiver, name, receiver, SmiConstant(kReturnUndefined)); + } + + // ES6 section 26.1.4 Reflect.deleteProperty + transitioning javascript builtin ReflectDeleteProperty( + js-implicit context: + Context)(_receiver: Object, object: Object, key: Object): Object { + const objectJSReceiver = Cast(object) + otherwise ThrowTypeError(kCalledOnNonObject, 'Reflect.deleteProperty'); + const name: Name = ToName(key); + if (IsPrivateSymbol(name)) { + return DeleteProperty(objectJSReceiver, name, kSloppy); + } + const proxy = Cast(objectJSReceiver) + otherwise return DeleteProperty(objectJSReceiver, name, kSloppy); + return proxy::ProxyDeleteProperty(proxy, name, kSloppy); + } +} // namespace reflect diff --git a/deps/v8/src/builtins/regexp-replace.tq b/deps/v8/src/builtins/regexp-replace.tq index 9b95f99f416de2..cb0038c6b61722 100644 --- a/deps/v8/src/builtins/regexp-replace.tq +++ b/deps/v8/src/builtins/regexp-replace.tq @@ -22,7 +22,7 @@ namespace regexp_replace { String, JSRegExp, Callable): String; extern macro - RegExpBuiltinsAssembler::AdvanceStringIndexFast(String, Number, bool): Smi; + RegExpBuiltinsAssembler::AdvanceStringIndexFast(String, Smi, bool): Smi; extern macro RegExpBuiltinsAssembler::RegExpPrototypeExecBodyWithoutResultFast( implicit context: Context)(JSReceiver, String): @@ -72,8 +72,7 @@ namespace regexp_replace { transitioning macro RegExpReplaceCallableWithExplicitCaptures(implicit context: Context)( - matchesElements: FixedArray, matchesLength: intptr, string: String, - replaceFn: Callable) { + matchesElements: FixedArray, matchesLength: intptr, replaceFn: Callable) { for (let i: intptr = 0; i < matchesLength; i++) { const elArray = Cast(matchesElements.objects[i]) otherwise continue; @@ -124,7 +123,7 @@ namespace regexp_replace { matchesElements, matchesLengthInt, string, replaceFn); } else { RegExpReplaceCallableWithExplicitCaptures( - matchesElements, matchesLengthInt, string, replaceFn); + matchesElements, matchesLengthInt, replaceFn); } return StringBuilderConcat(matches, matchesLength, string); @@ -138,7 +137,7 @@ namespace regexp_replace { let result: String = kEmptyString; let lastMatchEnd: Smi = 0; let unicode: bool = false; - let replaceLength: Smi = replaceString.length_smi; + const replaceLength: Smi = replaceString.length_smi; const global: bool = regexp.global; if (global) { @@ -209,7 +208,7 @@ namespace regexp_replace { } transitioning javascript builtin RegExpPrototypeReplace( - context: Context, receiver: Object, ...arguments): Object { + js-implicit context: Context, receiver: Object)(...arguments): Object { const methodName: constexpr string = 'RegExp.prototype.@@replace'; // RegExpPrototypeReplace is a bit of a beast - a summary of dispatch logic: diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc index bf8c0cb68a8bf0..854f31cece3cd8 100644 --- a/deps/v8/src/builtins/s390/builtins-s390.cc +++ b/deps/v8/src/builtins/s390/builtins-s390.cc @@ -60,24 +60,20 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) { static void GenerateTailCallToReturnedCode(MacroAssembler* masm, Runtime::FunctionId function_id) { // ----------- S t a t e ------------- - // -- r2 : argument count (preserved for callee) // -- r3 : target function (preserved for callee) // -- r5 : new target (preserved for callee) // ----------------------------------- { FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); - // Push the number of arguments to the callee. // Push a copy of the target function and the new target. // Push function as parameter to the runtime call. - __ SmiTag(r2); - __ Push(r2, r3, r5, r3); + __ Push(r3, r5, r3); __ CallRuntime(function_id, 1); __ LoadRR(r4, r2); // Restore target function and new target. - __ Pop(r2, r3, r5); - __ SmiUntag(r2); + __ Pop(r3, r5); } static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch"); __ JumpCodeObject(r4); @@ -110,6 +106,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { // -- sp[...]: constructor arguments // ----------------------------------- + Register scratch = r4; Label stack_overflow; Generate_StackOverflowCheck(masm, r2, r7, &stack_overflow); @@ -138,13 +135,13 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { // sp[2]: number of arguments (smi-tagged) Label loop, no_args; __ beq(&no_args); - __ ShiftLeftP(ip, r2, Operand(kPointerSizeLog2)); - __ SubP(sp, sp, ip); + __ ShiftLeftP(scratch, r2, Operand(kPointerSizeLog2)); + __ SubP(sp, sp, scratch); __ LoadRR(r1, r2); __ bind(&loop); - __ lay(ip, MemOperand(ip, -kPointerSize)); - __ LoadP(r0, MemOperand(ip, r6)); - __ StoreP(r0, MemOperand(ip, sp)); + __ lay(scratch, MemOperand(scratch, -kPointerSize)); + __ LoadP(r0, MemOperand(scratch, r6)); + __ StoreP(r0, MemOperand(scratch, sp)); __ BranchOnCount(r1, &loop); __ bind(&no_args); @@ -159,15 +156,15 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { // Restore context from the frame. __ LoadP(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset)); // Restore smi-tagged arguments count from the frame. - __ LoadP(r3, MemOperand(fp, ConstructFrameConstants::kLengthOffset)); + __ LoadP(scratch, MemOperand(fp, ConstructFrameConstants::kLengthOffset)); // Leave construct frame. } // Remove caller arguments from the stack and return. STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); - __ SmiToPtrArrayOffset(r3, r3); - __ AddP(sp, sp, r3); + __ SmiToPtrArrayOffset(scratch, scratch); + __ AddP(sp, sp, scratch); __ AddP(sp, sp, Operand(kPointerSize)); __ Ret(); @@ -296,13 +293,13 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { __ ltgr(r2, r2); __ beq(&no_args); - __ ShiftLeftP(ip, r2, Operand(kPointerSizeLog2)); - __ SubP(sp, sp, ip); + __ ShiftLeftP(r8, r2, Operand(kPointerSizeLog2)); + __ SubP(sp, sp, r8); __ LoadRR(r1, r2); __ bind(&loop); - __ lay(ip, MemOperand(ip, -kPointerSize)); - __ LoadP(r0, MemOperand(ip, r6)); - __ StoreP(r0, MemOperand(ip, sp)); + __ lay(r8, MemOperand(r8, -kPointerSize)); + __ LoadP(r0, MemOperand(r8, r6)); + __ StoreP(r0, MemOperand(r8, sp)); __ BranchOnCount(r1, &loop); __ bind(&no_args); @@ -409,11 +406,13 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { // Flood function if we are stepping. Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator; Label stepping_prepared; + Register scratch = r7; + ExternalReference debug_hook = ExternalReference::debug_hook_on_function_call_address(masm->isolate()); - __ Move(ip, debug_hook); - __ LoadB(ip, MemOperand(ip)); - __ CmpSmiLiteral(ip, Smi::zero(), r0); + __ Move(scratch, debug_hook); + __ LoadB(scratch, MemOperand(scratch)); + __ CmpSmiLiteral(scratch, Smi::zero(), r0); __ bne(&prepare_step_in_if_stepping); // Flood function if we need to continue stepping in the suspended generator. @@ -421,9 +420,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { ExternalReference debug_suspended_generator = ExternalReference::debug_suspended_generator_address(masm->isolate()); - __ Move(ip, debug_suspended_generator); - __ LoadP(ip, MemOperand(ip)); - __ CmpP(ip, r3); + __ Move(scratch, debug_suspended_generator); + __ LoadP(scratch, MemOperand(scratch)); + __ CmpP(scratch, r3); __ beq(&prepare_step_in_suspended_generator); __ bind(&stepping_prepared); @@ -434,8 +433,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ blt(&stack_overflow); // Push receiver. - __ LoadP(ip, FieldMemOperand(r3, JSGeneratorObject::kReceiverOffset)); - __ Push(ip); + __ LoadP(scratch, FieldMemOperand(r3, JSGeneratorObject::kReceiverOffset)); + __ Push(scratch); // ----------- S t a t e ------------- // -- r3 : the JSGeneratorObject to resume @@ -626,6 +625,9 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, IsolateAddressId::kCEntryFPAddress, masm->isolate())); __ LoadP(r6, MemOperand(r6)); __ StoreMultipleP(r6, r9, MemOperand(sp, kPointerSize)); + + Register scrach = r8; + // Set up frame pointer for the frame to be pushed. // Need to add kPointerSize, because sp has one extra // frame already for the frame type being pushed later. @@ -642,17 +644,17 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, ExternalReference::Create(IsolateAddressId::kJSEntrySPAddress, masm->isolate()); __ Move(r7, js_entry_sp); - __ LoadAndTestP(r8, MemOperand(r7)); + __ LoadAndTestP(scrach, MemOperand(r7)); __ bne(&non_outermost_js, Label::kNear); __ StoreP(fp, MemOperand(r7)); - __ Load(ip, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME)); + __ Load(scrach, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME)); Label cont; __ b(&cont, Label::kNear); __ bind(&non_outermost_js); - __ Load(ip, Operand(StackFrame::INNER_JSENTRY_FRAME)); + __ Load(scrach, Operand(StackFrame::INNER_JSENTRY_FRAME)); __ bind(&cont); - __ StoreP(ip, MemOperand(sp)); // frame-type + __ StoreP(scrach, MemOperand(sp)); // frame-type // Jump to a faked try block that does the invoke, with a faked catch // block that sets the pending exception. @@ -668,10 +670,11 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, // field in the JSEnv and return a failure sentinel. Coming in here the // fp will be invalid because the PushStackHandler below sets it to 0 to // signal the existence of the JSEntry frame. - __ Move(ip, ExternalReference::Create( - IsolateAddressId::kPendingExceptionAddress, masm->isolate())); + __ Move(scrach, + ExternalReference::Create(IsolateAddressId::kPendingExceptionAddress, + masm->isolate())); - __ StoreP(r2, MemOperand(ip)); + __ StoreP(r2, MemOperand(scrach)); __ LoadRoot(r2, RootIndex::kException); __ b(&exit, Label::kNear); @@ -704,16 +707,16 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, __ pop(r7); __ CmpP(r7, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME)); __ bne(&non_outermost_js_2, Label::kNear); - __ mov(r8, Operand::Zero()); + __ mov(scrach, Operand::Zero()); __ Move(r7, js_entry_sp); - __ StoreP(r8, MemOperand(r7)); + __ StoreP(scrach, MemOperand(r7)); __ bind(&non_outermost_js_2); // Restore the top frame descriptors from the stack. __ pop(r5); - __ Move(ip, ExternalReference::Create( - IsolateAddressId::kCEntryFPAddress, masm->isolate())); - __ StoreP(r5, MemOperand(ip)); + __ Move(scrach, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, + masm->isolate())); + __ StoreP(r5, MemOperand(scrach)); // Reset the stack to the callee saved registers. __ lay(sp, MemOperand(sp, -EntryFrameConstants::kCallerFPOffset)); @@ -949,13 +952,11 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, Register scratch1, Register scratch2, Register scratch3) { // ----------- S t a t e ------------- - // -- r0 : argument count (preserved for callee if needed, and caller) - // -- r3 : new target (preserved for callee if needed, and caller) - // -- r1 : target function (preserved for callee if needed, and caller) + // -- r5 : new target (preserved for callee if needed, and caller) + // -- r3 : target function (preserved for callee if needed, and caller) // -- feedback vector (preserved for caller if needed) // ----------------------------------- - DCHECK( - !AreAliased(feedback_vector, r2, r3, r5, scratch1, scratch2, scratch3)); + DCHECK(!AreAliased(feedback_vector, r3, r5, scratch1, scratch2, scratch3)); Label optimized_code_slot_is_weak_ref, fallthrough; @@ -1140,6 +1141,15 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); __ LoadP(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset)); + + Label push_stack_frame; + // Check if feedback vector is valid. If valid, check for optimized code + // and update invocation count. Otherwise, setup the stack frame. + __ LoadP(r6, FieldMemOperand(feedback_vector, HeapObject::kMapOffset)); + __ LoadLogicalHalfWordP(r6, FieldMemOperand(r6, Map::kInstanceTypeOffset)); + __ CmpP(r6, Operand(FEEDBACK_VECTOR_TYPE)); + __ bne(&push_stack_frame); + // Read off the optimized code slot in the feedback vector, and if there // is optimized code or an optimization marker, call that instead. MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r6, r8, r7); @@ -1154,6 +1164,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // Open a frame scope to indicate that there is a frame on the stack. The // MANUAL indicates that the scope shouldn't actually generate code to set up // the frame (that is done below). + __ bind(&push_stack_frame); FrameScope frame_scope(masm, StackFrame::MANUAL); __ PushStandardFrame(closure); @@ -1161,12 +1172,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // 8-bit fields next to each other, so we could just optimize by writing a // 16-bit. These static asserts guard our assumption is valid. STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset == - BytecodeArray::kOSRNestingLevelOffset + kCharSize); + BytecodeArray::kOsrNestingLevelOffset + kCharSize); STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0); __ lghi(r1, Operand(0)); __ StoreHalfWord(r1, FieldMemOperand(kInterpreterBytecodeArrayRegister, - BytecodeArray::kOSRNestingLevelOffset), + BytecodeArray::kOsrNestingLevelOffset), r0); // Load the initial bytecode offset. @@ -1447,11 +1458,13 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { __ SmiUntag(kInterpreterBytecodeOffsetRegister); // Dispatch to the target bytecode. - __ LoadlB(ip, MemOperand(kInterpreterBytecodeArrayRegister, - kInterpreterBytecodeOffsetRegister)); - __ ShiftLeftP(ip, ip, Operand(kPointerSizeLog2)); + UseScratchRegisterScope temps(masm); + Register scratch = temps.Acquire(); + __ LoadlB(scratch, MemOperand(kInterpreterBytecodeArrayRegister, + kInterpreterBytecodeOffsetRegister)); + __ ShiftLeftP(scratch, scratch, Operand(kPointerSizeLog2)); __ LoadP(kJavaScriptCallCodeStartRegister, - MemOperand(kInterpreterDispatchTableRegister, ip)); + MemOperand(kInterpreterDispatchTableRegister, scratch)); __ Jump(kJavaScriptCallCodeStartRegister); } @@ -1578,13 +1591,17 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, __ LoadP( fp, MemOperand(sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp)); - __ Pop(ip); + // Load builtin index (stored as a Smi) and use it to get the builtin start + // address from the builtins table. + UseScratchRegisterScope temps(masm); + Register builtin = temps.Acquire(); + __ Pop(builtin); __ AddP(sp, sp, Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp)); __ Pop(r0); __ LoadRR(r14, r0); - __ AddP(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag)); - __ Jump(ip); + __ LoadEntryFromBuiltinIndex(builtin); + __ Jump(builtin); } } // namespace @@ -1745,13 +1762,14 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) { // r2: actual number of arguments // r3: callable { + Register scratch = r5; Label loop; // Calculate the copy start address (destination). Copy end address is sp. __ AddP(r4, sp, r4); __ bind(&loop); - __ LoadP(ip, MemOperand(r4, -kPointerSize)); - __ StoreP(ip, MemOperand(r4)); + __ LoadP(scratch, MemOperand(r4, -kPointerSize)); + __ StoreP(scratch, MemOperand(r4)); __ SubP(r4, Operand(kPointerSize)); __ CmpP(r4, sp); __ bne(&loop); @@ -1944,7 +1962,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, // Check for stack overflow. Label stack_overflow; - Generate_StackOverflowCheck(masm, r6, ip, &stack_overflow); + Generate_StackOverflowCheck(masm, r6, scratch, &stack_overflow); // Push arguments onto the stack (thisArgument is already on the stack). { @@ -1955,13 +1973,13 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize)); __ LoadRR(r1, r6); __ bind(&loop); - __ LoadP(ip, MemOperand(r4, kPointerSize)); + __ LoadP(scratch, MemOperand(r4, kPointerSize)); __ la(r4, MemOperand(r4, kPointerSize)); - __ CompareRoot(ip, RootIndex::kTheHoleValue); + __ CompareRoot(scratch, RootIndex::kTheHoleValue); __ bne(&skip, Label::kNear); - __ LoadRoot(ip, RootIndex::kUndefinedValue); + __ LoadRoot(scratch, RootIndex::kUndefinedValue); __ bind(&skip); - __ push(ip); + __ push(scratch); __ BranchOnCount(r1, &loop); __ bind(&no_args); __ AddP(r2, r2, r6); @@ -2007,8 +2025,10 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, // Check if we have an arguments adaptor frame below the function frame. Label arguments_adaptor, arguments_done; __ LoadP(r6, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ LoadP(ip, MemOperand(r6, CommonFrameConstants::kContextOrFrameTypeOffset)); - __ CmpP(ip, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR))); + __ LoadP(scratch, + MemOperand(r6, CommonFrameConstants::kContextOrFrameTypeOffset)); + __ CmpP(scratch, + Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR))); __ beq(&arguments_adaptor); { __ LoadP(r7, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); @@ -2042,9 +2062,9 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, __ AddP(r2, r2, r7); __ bind(&loop); { - __ ShiftLeftP(ip, r7, Operand(kPointerSizeLog2)); - __ LoadP(ip, MemOperand(r6, ip)); - __ push(ip); + __ ShiftLeftP(scratch, r7, Operand(kPointerSizeLog2)); + __ LoadP(scratch, MemOperand(r6, scratch)); + __ push(scratch); __ SubP(r7, r7, Operand(1)); __ CmpP(r7, Operand::Zero()); __ bne(&loop); @@ -2189,10 +2209,11 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { // -- r6 : the number of [[BoundArguments]] // ----------------------------------- + Register scratch = r8; // Reserve stack space for the [[BoundArguments]]. { Label done; - __ LoadRR(r8, sp); // preserve previous stack pointer + __ LoadRR(scratch, sp); // preserve previous stack pointer __ ShiftLeftP(r9, r6, Operand(kPointerSizeLog2)); __ SubP(sp, sp, r9); // Check the stack for overflow. We are not trying to catch interruptions @@ -2201,7 +2222,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { __ CompareRoot(sp, RootIndex::kRealStackLimit); __ bgt(&done); // Signed comparison. // Restore the stack pointer. - __ LoadRR(sp, r8); + __ LoadRR(sp, scratch); { FrameScope scope(masm, StackFrame::MANUAL); __ EnterFrame(StackFrame::INTERNAL); @@ -2221,7 +2242,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { __ beq(&skip); __ LoadRR(r1, r2); __ bind(&loop); - __ LoadP(r0, MemOperand(r8, r7)); + __ LoadP(r0, MemOperand(scratch, r7)); __ StoreP(r0, MemOperand(sp, r7)); __ AddP(r7, r7, Operand(kPointerSize)); __ BranchOnCount(r1, &loop); @@ -2257,9 +2278,9 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) { __ AssertBoundFunction(r3); // Patch the receiver to [[BoundThis]]. - __ LoadP(ip, FieldMemOperand(r3, JSBoundFunction::kBoundThisOffset)); + __ LoadP(r5, FieldMemOperand(r3, JSBoundFunction::kBoundThisOffset)); __ ShiftLeftP(r1, r2, Operand(kPointerSizeLog2)); - __ StoreP(ip, MemOperand(sp, r1)); + __ StoreP(r5, MemOperand(sp, r1)); // Push the [[BoundArguments]] onto the stack. Generate_PushBoundArguments(masm); @@ -2749,7 +2770,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, __ CompareRoot(r1, RootIndex::kTheHoleValue); // Cannot use check here as it attempts to generate call into runtime. __ beq(&okay, Label::kNear); - __ stop("Unexpected pending exception"); + __ stop(); __ bind(&okay); } @@ -3000,13 +3021,22 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm, __ LoadlB(scratch, MemOperand(scratch, 0)); __ CmpP(scratch, Operand::Zero()); - Label profiler_disabled; - Label end_profiler_check; - __ beq(&profiler_disabled, Label::kNear); - __ Move(scratch, thunk_ref); - __ b(&end_profiler_check, Label::kNear); - __ bind(&profiler_disabled); - __ LoadRR(scratch, function_address); + Label profiler_enabled, end_profiler_check; + __ bne(&profiler_enabled, Label::kNear); + __ Move(scratch, ExternalReference::address_of_runtime_stats_flag()); + __ LoadlW(scratch, MemOperand(scratch, 0)); + __ CmpP(scratch, Operand::Zero()); + __ bne(&profiler_enabled, Label::kNear); + { + // Call the api function directly. + __ LoadRR(scratch, function_address); + __ b(&end_profiler_check, Label::kNear); + } + __ bind(&profiler_enabled); + { + // Additional parameter is the address of the actual callback. + __ Move(scratch, thunk_ref); + } __ bind(&end_profiler_check); // Allocate HandleScope in callee-save registers. @@ -3304,7 +3334,7 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { void Builtins::Generate_DirectCEntry(MacroAssembler* masm) { // Unused. - __ stop(0); + __ stop(); } #undef __ diff --git a/deps/v8/src/builtins/setup-builtins-internal.cc b/deps/v8/src/builtins/setup-builtins-internal.cc index e3403c601d6c0c..3c637db63683d6 100644 --- a/deps/v8/src/builtins/setup-builtins-internal.cc +++ b/deps/v8/src/builtins/setup-builtins-internal.cc @@ -157,10 +157,7 @@ Code BuildWithCodeStubAssemblerJS(Isolate* isolate, int32_t builtin_index, // to code targets without dereferencing their handles. CanonicalHandleScope canonical(isolate); - SegmentSize segment_size = isolate->serializer_enabled() - ? SegmentSize::kLarge - : SegmentSize::kDefault; - Zone zone(isolate->allocator(), ZONE_NAME, segment_size); + Zone zone(isolate->allocator(), ZONE_NAME); const int argc_with_recv = (argc == SharedFunctionInfo::kDontAdaptArgumentsSentinel) ? 0 : argc + 1; compiler::CodeAssemblerState state( @@ -181,10 +178,7 @@ Code BuildWithCodeStubAssemblerCS(Isolate* isolate, int32_t builtin_index, // Canonicalize handles, so that we can share constant pool entries pointing // to code targets without dereferencing their handles. CanonicalHandleScope canonical(isolate); - SegmentSize segment_size = isolate->serializer_enabled() - ? SegmentSize::kLarge - : SegmentSize::kDefault; - Zone zone(isolate->allocator(), ZONE_NAME, segment_size); + Zone zone(isolate->allocator(), ZONE_NAME); // The interface descriptor with given key must be initialized at this point // and this construction just queries the details from the descriptors table. CallInterfaceDescriptor descriptor(interface_descriptor); @@ -232,9 +226,9 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) { RelocInfo::ModeMask(RelocInfo::FULL_EMBEDDED_OBJECT) | RelocInfo::ModeMask(RelocInfo::COMPRESSED_EMBEDDED_OBJECT) | RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET); - HeapIterator iterator(isolate->heap()); - for (HeapObject obj = iterator.next(); !obj.is_null(); - obj = iterator.next()) { + HeapObjectIterator iterator(isolate->heap()); + for (HeapObject obj = iterator.Next(); !obj.is_null(); + obj = iterator.Next()) { if (!obj.IsCode()) continue; Code code = Code::cast(obj); bool flush_icache = false; diff --git a/deps/v8/src/builtins/string-endswith.tq b/deps/v8/src/builtins/string-endswith.tq index 16405d4c1267cb..8b9fe84dfb759b 100644 --- a/deps/v8/src/builtins/string-endswith.tq +++ b/deps/v8/src/builtins/string-endswith.tq @@ -28,12 +28,13 @@ namespace string { // https://tc39.github.io/ecma262/#sec-string.prototype.endswith transitioning javascript builtin StringPrototypeEndsWith( - context: Context, receiver: Object, ...arguments): Boolean { + js-implicit context: Context, receiver: Object)(...arguments): Boolean { const searchString: Object = arguments[0]; const endPosition: Object = arguments[1]; + const kBuiltinName: constexpr string = 'String.prototype.endsWith'; // 1. Let O be ? RequireObjectCoercible(this value). - const object: Object = RequireObjectCoercible(receiver); + const object: Object = RequireObjectCoercible(receiver, kBuiltinName); // 2. Let S be ? ToString(O). const string: String = ToString_Inline(context, object); @@ -41,7 +42,7 @@ namespace string { // 3. Let isRegExp be ? IsRegExp(searchString). // 4. If isRegExp is true, throw a TypeError exception. if (IsRegExp(searchString)) { - ThrowTypeError(kFirstArgumentNotRegExp, 'String.prototype.endsWith'); + ThrowTypeError(kFirstArgumentNotRegExp, kBuiltinName); } // 5. Let searchStr be ? ToString(searchString). @@ -63,7 +64,7 @@ namespace string { const searchLength: Smi = searchStr.length_smi; // 10. Let start be end - searchLength. - let start = end - searchLength; + const start = end - searchLength; // 11. If start is less than 0, return false. if (start < 0) return False; diff --git a/deps/v8/src/builtins/string-html.tq b/deps/v8/src/builtins/string-html.tq index a2b162520666ec..80b5f778877bd4 100644 --- a/deps/v8/src/builtins/string-html.tq +++ b/deps/v8/src/builtins/string-html.tq @@ -22,22 +22,23 @@ namespace string_html { // https://tc39.github.io/ecma262/#sec-string.prototype.anchor transitioning javascript builtin StringPrototypeAnchor( - context: Context, receiver: Object, ...arguments): String { + js-implicit context: Context, receiver: Object)(...arguments): String { return CreateHTML( receiver, 'String.prototype.anchor', 'a', 'name', arguments[0]); } // https://tc39.github.io/ecma262/#sec-string.prototype.big transitioning javascript builtin - StringPrototypeBig(context: Context, receiver: Object, ...arguments): String { + StringPrototypeBig(js-implicit context: Context, receiver: Object)( + ...arguments): String { return CreateHTML( receiver, 'String.prototype.big', 'big', kEmptyString, kEmptyString); } // https://tc39.github.io/ecma262/#sec-string.prototype.blink transitioning javascript builtin - StringPrototypeBlink(context: Context, receiver: Object, ...arguments): - String { + StringPrototypeBlink(js-implicit context: Context, receiver: Object)( + ...arguments): String { return CreateHTML( receiver, 'String.prototype.blink', 'blink', kEmptyString, kEmptyString); @@ -45,56 +46,56 @@ namespace string_html { // https://tc39.github.io/ecma262/#sec-string.prototype.bold transitioning javascript builtin - StringPrototypeBold(context: Context, receiver: Object, ...arguments): - String { + StringPrototypeBold(js-implicit context: Context, receiver: Object)( + ...arguments): String { return CreateHTML( receiver, 'String.prototype.bold', 'b', kEmptyString, kEmptyString); } // https://tc39.github.io/ecma262/#sec-string.prototype.fontcolor transitioning javascript builtin - StringPrototypeFontcolor(context: Context, receiver: Object, ...arguments): - String { + StringPrototypeFontcolor(js-implicit context: Context, receiver: Object)( + ...arguments): String { return CreateHTML( receiver, 'String.prototype.fontcolor', 'font', 'color', arguments[0]); } // https://tc39.github.io/ecma262/#sec-string.prototype.fontsize transitioning javascript builtin - StringPrototypeFontsize(context: Context, receiver: Object, ...arguments): - String { + StringPrototypeFontsize(js-implicit context: Context, receiver: Object)( + ...arguments): String { return CreateHTML( receiver, 'String.prototype.fontsize', 'font', 'size', arguments[0]); } // https://tc39.github.io/ecma262/#sec-string.prototype.fixed transitioning javascript builtin - StringPrototypeFixed(context: Context, receiver: Object, ...arguments): - String { + StringPrototypeFixed(js-implicit context: Context, receiver: Object)( + ...arguments): String { return CreateHTML( receiver, 'String.prototype.fixed', 'tt', kEmptyString, kEmptyString); } // https://tc39.github.io/ecma262/#sec-string.prototype.italics transitioning javascript builtin - StringPrototypeItalics(context: Context, receiver: Object, ...arguments): - String { + StringPrototypeItalics(js-implicit context: Context, receiver: Object)( + ...arguments): String { return CreateHTML( receiver, 'String.prototype.italics', 'i', kEmptyString, kEmptyString); } // https://tc39.github.io/ecma262/#sec-string.prototype.link transitioning javascript builtin - StringPrototypeLink(context: Context, receiver: Object, ...arguments): - String { + StringPrototypeLink(js-implicit context: Context, receiver: Object)( + ...arguments): String { return CreateHTML( receiver, 'String.prototype.link', 'a', 'href', arguments[0]); } // https://tc39.github.io/ecma262/#sec-string.prototype.small transitioning javascript builtin - StringPrototypeSmall(context: Context, receiver: Object, ...arguments): - String { + StringPrototypeSmall(js-implicit context: Context, receiver: Object)( + ...arguments): String { return CreateHTML( receiver, 'String.prototype.small', 'small', kEmptyString, kEmptyString); @@ -102,8 +103,8 @@ namespace string_html { // https://tc39.github.io/ecma262/#sec-string.prototype.strike transitioning javascript builtin - StringPrototypeStrike(context: Context, receiver: Object, ...arguments): - String { + StringPrototypeStrike(js-implicit context: Context, receiver: Object)( + ...arguments): String { return CreateHTML( receiver, 'String.prototype.strike', 'strike', kEmptyString, kEmptyString); @@ -111,14 +112,16 @@ namespace string_html { // https://tc39.github.io/ecma262/#sec-string.prototype.sub transitioning javascript builtin - StringPrototypeSub(context: Context, receiver: Object, ...arguments): String { + StringPrototypeSub(js-implicit context: Context, receiver: Object)( + ...arguments): String { return CreateHTML( receiver, 'String.prototype.sub', 'sub', kEmptyString, kEmptyString); } // https://tc39.github.io/ecma262/#sec-string.prototype.sup transitioning javascript builtin - StringPrototypeSup(context: Context, receiver: Object, ...arguments): String { + StringPrototypeSup(js-implicit context: Context, receiver: Object)( + ...arguments): String { return CreateHTML( receiver, 'String.prototype.sup', 'sup', kEmptyString, kEmptyString); } diff --git a/deps/v8/src/builtins/string-iterator.tq b/deps/v8/src/builtins/string-iterator.tq index f5c6099c255f4d..5b8f864661295a 100644 --- a/deps/v8/src/builtins/string-iterator.tq +++ b/deps/v8/src/builtins/string-iterator.tq @@ -17,7 +17,7 @@ namespace string_iterator { // ES6 #sec-string.prototype-@@iterator transitioning javascript builtin StringPrototypeIterator( - implicit context: Context)(receiver: Object): JSStringIterator { + js-implicit context: Context)(receiver: Object): JSStringIterator { const name: String = ToThisString(receiver, 'String.prototype[Symbol.iterator]'); const index: Smi = 0; @@ -26,7 +26,7 @@ namespace string_iterator { // ES6 #sec-%stringiteratorprototype%.next transitioning javascript builtin StringIteratorPrototypeNext( - implicit context: Context)(receiver: Object): JSIteratorResult { + js-implicit context: Context)(receiver: Object): JSObject { const iterator = Cast(receiver) otherwise ThrowTypeError( kIncompatibleMethodReceiver, 'String Iterator.prototype.next', receiver); @@ -34,13 +34,13 @@ namespace string_iterator { const position: intptr = SmiUntag(iterator.next_index); const length: intptr = string.length_intptr; if (position >= length) { - return NewJSIteratorResult(Undefined, True); + return AllocateJSIteratorResult(Undefined, True); } // Move to next codepoint. const encoding = UTF16; const ch = string::LoadSurrogatePairAt(string, length, position, encoding); - const value: String = string::StringFromSingleCodePoint(ch, encoding); + const value: String = string::StringFromSingleUTF16EncodedCodePoint(ch); iterator.next_index = SmiTag(position + value.length_intptr); - return NewJSIteratorResult(value, False); + return AllocateJSIteratorResult(value, False); } } diff --git a/deps/v8/src/builtins/string-repeat.tq b/deps/v8/src/builtins/string-repeat.tq index f2590011ea2b07..0d9d4ee4982d50 100644 --- a/deps/v8/src/builtins/string-repeat.tq +++ b/deps/v8/src/builtins/string-repeat.tq @@ -28,7 +28,7 @@ namespace string_repeat { // https://tc39.github.io/ecma262/#sec-string.prototype.repeat transitioning javascript builtin StringPrototypeRepeat( - context: Context, receiver: Object, count: Object): String { + js-implicit context: Context, receiver: Object)(count: Object): String { // 1. Let O be ? RequireObjectCoercible(this value). // 2. Let S be ? ToString(O). const s: String = ToThisString(receiver, kBuiltinName); diff --git a/deps/v8/src/builtins/string-slice.tq b/deps/v8/src/builtins/string-slice.tq index 41eb38b0ad4aa0..b066fb76692ce7 100644 --- a/deps/v8/src/builtins/string-slice.tq +++ b/deps/v8/src/builtins/string-slice.tq @@ -9,7 +9,7 @@ namespace string_slice { // ES6 #sec-string.prototype.slice ( start, end ) // https://tc39.github.io/ecma262/#sec-string.prototype.slice transitioning javascript builtin StringPrototypeSlice( - implicit context: Context)(receiver: Object, ...arguments): String { + js-implicit context: Context, receiver: Object)(...arguments): String { // 1. Let O be ? RequireObjectCoercible(this value). // 2. Let S be ? ToString(O). const string: String = ToThisString(receiver, 'String.prototype.slice'); diff --git a/deps/v8/src/builtins/string-startswith.tq b/deps/v8/src/builtins/string-startswith.tq index 1f885a2afdd178..b03e67ecf5446c 100644 --- a/deps/v8/src/builtins/string-startswith.tq +++ b/deps/v8/src/builtins/string-startswith.tq @@ -8,23 +8,15 @@ namespace string { extern macro RegExpBuiltinsAssembler::IsRegExp(implicit context: Context)(Object): bool; - // TODO(ryzokuken): Add RequireObjectCoercible to base.tq and update callsites - macro RequireObjectCoercible(implicit context: Context)(argument: Object): - Object { - if (IsNullOrUndefined(argument)) { - ThrowTypeError(kCalledOnNullOrUndefined, 'String.prototype.startsWith'); - } - return argument; - } - // https://tc39.github.io/ecma262/#sec-string.prototype.startswith transitioning javascript builtin StringPrototypeStartsWith( - context: Context, receiver: Object, ...arguments): Boolean { + js-implicit context: Context, receiver: Object)(...arguments): Boolean { const searchString: Object = arguments[0]; const position: Object = arguments[1]; + const kBuiltinName: constexpr string = 'String.prototype.startsWith'; // 1. Let O be ? RequireObjectCoercible(this value). - const object: Object = RequireObjectCoercible(receiver); + const object: Object = RequireObjectCoercible(receiver, kBuiltinName); // 2. Let S be ? ToString(O). const string: String = ToString_Inline(context, object); @@ -32,7 +24,7 @@ namespace string { // 3. Let isRegExp be ? IsRegExp(searchString). // 4. If isRegExp is true, throw a TypeError exception. if (IsRegExp(searchString)) { - ThrowTypeError(kFirstArgumentNotRegExp, 'String.prototype.startsWith'); + ThrowTypeError(kFirstArgumentNotRegExp, kBuiltinName); } // 5. Let searchStr be ? ToString(searchString). diff --git a/deps/v8/src/builtins/string-substring.tq b/deps/v8/src/builtins/string-substring.tq index f322eeed06dc7a..1fafb8af4367ca 100644 --- a/deps/v8/src/builtins/string-substring.tq +++ b/deps/v8/src/builtins/string-substring.tq @@ -28,7 +28,7 @@ namespace string_substring { // ES6 #sec-string.prototype.substring transitioning javascript builtin StringPrototypeSubstring( - implicit context: Context)(receiver: Object, ...arguments): String { + js-implicit context: Context, receiver: Object)(...arguments): String { // Check that {receiver} is coercible to Object and convert it to a String. const string: String = ToThisString(receiver, 'String.prototype.substring'); const length = string.length_smi; diff --git a/deps/v8/src/builtins/string.tq b/deps/v8/src/builtins/string.tq index 1e5a74eb49d02b..dbcc5799e1063d 100644 --- a/deps/v8/src/builtins/string.tq +++ b/deps/v8/src/builtins/string.tq @@ -7,20 +7,21 @@ namespace string { // ES6 #sec-string.prototype.tostring transitioning javascript builtin - StringPrototypeToString(implicit context: Context)(receiver: Object): Object { + StringPrototypeToString(js-implicit context: Context)(receiver: Object): + Object { return ToThisValue(receiver, kString, 'String.prototype.toString'); } // ES6 #sec-string.prototype.valueof transitioning javascript builtin - StringPrototypeValueOf(implicit context: Context)(receiver: Object): Object { + StringPrototypeValueOf(js-implicit context: Context)(receiver: Object): + Object { return ToThisValue(receiver, kString, 'String.prototype.valueOf'); } extern macro StringBuiltinsAssembler::LoadSurrogatePairAt( String, intptr, intptr, constexpr UnicodeEncoding): int32; - extern macro StringFromSingleCodePoint(int32, constexpr UnicodeEncoding): - String; + extern macro StringFromSingleUTF16EncodedCodePoint(int32): String; // This function assumes StringPrimitiveWithNoCustomIteration is true. transitioning builtin StringToList(implicit context: Context)(string: String): @@ -38,7 +39,7 @@ namespace string { let i: intptr = 0; while (i < stringLength) { const ch: int32 = LoadSurrogatePairAt(string, stringLength, i, encoding); - const value: String = StringFromSingleCodePoint(ch, encoding); + const value: String = StringFromSingleUTF16EncodedCodePoint(ch); elements[arrayLength] = value; // Increment and continue the loop. i = i + value.length_intptr; @@ -52,9 +53,9 @@ namespace string { } transitioning macro GenerateStringAt(implicit context: Context)( - receiver: Object, position: Object, methodName: constexpr string): - never labels IfInBounds(String, intptr, intptr), - IfOutOfBounds { + receiver: Object, position: Object, + methodName: constexpr string): never labels + IfInBounds(String, intptr, intptr), IfOutOfBounds { // Check that {receiver} is coercible to Object and convert it to a String. const string: String = ToThisString(receiver, methodName); // Convert the {position} to a Smi and check that it's in bounds of @@ -70,12 +71,13 @@ namespace string { // ES6 #sec-string.prototype.charat transitioning javascript builtin StringPrototypeCharAt( - implicit context: Context)(receiver: Object, position: Object): Object { + js-implicit context: Context, + receiver: Object)(position: Object): Object { try { GenerateStringAt(receiver, position, 'String.prototype.charAt') otherwise IfInBounds, IfOutOfBounds; } - label IfInBounds(string: String, index: intptr, length: intptr) { + label IfInBounds(string: String, index: intptr, _length: intptr) { const code: int32 = StringCharCodeAt(string, index); return StringFromSingleCharCode(code); } @@ -86,12 +88,13 @@ namespace string { // ES6 #sec-string.prototype.charcodeat transitioning javascript builtin StringPrototypeCharCodeAt( - implicit context: Context)(receiver: Object, position: Object): Object { + js-implicit context: Context, + receiver: Object)(position: Object): Object { try { GenerateStringAt(receiver, position, 'String.prototype.charCodeAt') otherwise IfInBounds, IfOutOfBounds; } - label IfInBounds(string: String, index: intptr, length: intptr) { + label IfInBounds(string: String, index: intptr, _length: intptr) { const code: int32 = StringCharCodeAt(string, index); return Convert(code); } @@ -102,7 +105,8 @@ namespace string { // ES6 #sec-string.prototype.codepointat transitioning javascript builtin StringPrototypeCodePointAt( - implicit context: Context)(receiver: Object, position: Object): Object { + js-implicit context: Context, + receiver: Object)(position: Object): Object { try { GenerateStringAt(receiver, position, 'String.prototype.codePointAt') otherwise IfInBounds, IfOutOfBounds; @@ -121,7 +125,7 @@ namespace string { // ES6 String.prototype.concat(...args) // ES6 #sec-string.prototype.concat transitioning javascript builtin StringPrototypeConcat( - implicit context: Context)(receiver: Object, ...arguments): Object { + js-implicit context: Context, receiver: Object)(...arguments): Object { // Check that {receiver} is coercible to Object and convert it to a String. let string: String = ToThisString(receiver, 'String.prototype.concat'); diff --git a/deps/v8/src/builtins/typed-array-createtypedarray.tq b/deps/v8/src/builtins/typed-array-createtypedarray.tq index a0d745b2f4deb8..f6ab289e12c166 100644 --- a/deps/v8/src/builtins/typed-array-createtypedarray.tq +++ b/deps/v8/src/builtins/typed-array-createtypedarray.tq @@ -8,30 +8,77 @@ namespace typed_array_createtypedarray { extern builtin IterableToListMayPreserveHoles(Context, Object, Callable): JSArray; - extern macro ConstructorBuiltinsAssembler::EmitFastNewObject( - implicit context: Context)(JSFunction, JSReceiver): JSTypedArray; extern macro TypedArrayBuiltinsAssembler::AllocateEmptyOnHeapBuffer( - implicit context: Context)(JSTypedArray, uintptr): JSArrayBuffer; + implicit context: Context)(uintptr): JSArrayBuffer; + extern macro CodeStubAssembler::AllocateByteArray(uintptr): ByteArray; extern macro TypedArrayBuiltinsAssembler::GetDefaultConstructor( implicit context: Context)(JSTypedArray): JSFunction; extern macro TypedArrayBuiltinsAssembler::IsSharedArrayBuffer(JSArrayBuffer): bool; - extern macro TypedArrayBuiltinsAssembler::SetupTypedArray( - JSTypedArray, uintptr, uintptr, uintptr): void; + extern macro TypedArrayBuiltinsAssembler::SetupTypedArrayEmbedderFields( + JSTypedArray): void; extern runtime ThrowInvalidTypedArrayAlignment(implicit context: Context)( Map, String): never; extern runtime TypedArrayCopyElements(Context, JSTypedArray, Object, Number): void; + transitioning macro AllocateTypedArray(implicit context: Context)( + isOnHeap: constexpr bool, map: Map, buffer: JSArrayBuffer, + byteOffset: uintptr, byteLength: uintptr, length: uintptr): JSTypedArray { + let elements: ByteArray; + let externalPointer: RawPtr; + let basePointer: ByteArray | Smi; + if constexpr (isOnHeap) { + elements = AllocateByteArray(byteLength); + basePointer = elements; + externalPointer = PointerConstant(kExternalPointerForOnHeapArray); + } else { + basePointer = Convert(0); + + // The max byteOffset is 8 * MaxSmi on the particular platform. 32 bit + // platforms are self-limiting, because we can't allocate an array bigger + // than our 32-bit arithmetic range anyway. 64 bit platforms could + // theoretically have an offset up to 2^35 - 1. + const backingStore: RawPtr = buffer.backing_store; + externalPointer = backingStore + Convert(byteOffset); + + // Assert no overflow has occurred. Only assert if the mock array buffer + // allocator is NOT used. When the mock array buffer is used, impossibly + // large allocations are allowed that would erroneously cause an overflow + // and this assertion to fail. + assert( + IsMockArrayBufferAllocatorFlag() || + Convert(externalPointer) >= Convert(backingStore)); + + elements = kEmptyByteArray; + } + + // We can't just build the new object with "new JSTypedArray" here because + // Torque doesn't know its full size including embedder fields, so use CSA + // for the allocation step. + const typedArray = + UnsafeCast(AllocateFastOrSlowJSObjectFromMap(map)); + typedArray.elements = elements; + typedArray.buffer = buffer; + typedArray.byte_offset = byteOffset; + typedArray.byte_length = byteLength; + typedArray.length = length; + typedArray.external_pointer = externalPointer; + typedArray.base_pointer = basePointer; + SetupTypedArrayEmbedderFields(typedArray); + return typedArray; + } + transitioning macro TypedArrayInitialize(implicit context: Context)( - initialize: constexpr bool, typedArray: JSTypedArray, length: PositiveSmi, + initialize: constexpr bool, map: Map, length: PositiveSmi, elementsInfo: typed_array::TypedArrayElementsInfo, - bufferConstructor: JSReceiver): uintptr { + bufferConstructor: JSReceiver): JSTypedArray { const byteLength = elementsInfo.CalculateByteLength(length) otherwise ThrowRangeError(kInvalidArrayBufferLength); const byteLengthNum = Convert(byteLength); const defaultConstructor = GetArrayBufferFunction(); + const byteOffset: uintptr = 0; try { if (bufferConstructor != defaultConstructor) { @@ -39,14 +86,21 @@ namespace typed_array_createtypedarray { defaultConstructor, bufferConstructor, byteLengthNum)); } - if (byteLength > V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP) goto AllocateOffHeap; + if (byteLength > kMaxTypedArrayInHeap) goto AllocateOffHeap; + + const buffer = AllocateEmptyOnHeapBuffer(byteLength); - AllocateEmptyOnHeapBuffer(typedArray, byteLength); + const isOnHeap: constexpr bool = true; + const typedArray = AllocateTypedArray( + isOnHeap, map, buffer, byteOffset, byteLength, + Convert(length)); if constexpr (initialize) { const backingStore = typedArray.data_ptr; typed_array::CallCMemset(backingStore, 0, byteLength); } + + return typedArray; } label AllocateOffHeap { if constexpr (initialize) { @@ -58,22 +112,18 @@ namespace typed_array_createtypedarray { } label AttachOffHeapBuffer(bufferObj: Object) { const buffer = Cast(bufferObj) otherwise unreachable; - const byteOffset: uintptr = 0; - typedArray.AttachOffHeapBuffer(buffer, byteOffset); + const isOnHeap: constexpr bool = false; + return AllocateTypedArray( + isOnHeap, map, buffer, byteOffset, byteLength, + Convert(length)); } - - const byteOffset: uintptr = 0; - SetupTypedArray( - typedArray, Convert(length), byteOffset, byteLength); - - return byteLength; } // 22.2.4.2 TypedArray ( length ) // ES #sec-typedarray-length transitioning macro ConstructByLength(implicit context: Context)( - typedArray: JSTypedArray, length: Object, - elementsInfo: typed_array::TypedArrayElementsInfo): void { + map: Map, length: Object, + elementsInfo: typed_array::TypedArrayElementsInfo): JSTypedArray { const convertedLength: Number = ToInteger_Inline(context, length, kTruncateMinusZero); // The maximum length of a TypedArray is MaxSmi(). @@ -84,23 +134,22 @@ namespace typed_array_createtypedarray { otherwise ThrowRangeError(kInvalidTypedArrayLength, length); const defaultConstructor: Constructor = GetArrayBufferFunction(); const initialize: constexpr bool = true; - TypedArrayInitialize( - initialize, typedArray, positiveLength, elementsInfo, - defaultConstructor); + return TypedArrayInitialize( + initialize, map, positiveLength, elementsInfo, defaultConstructor); } // 22.2.4.4 TypedArray ( object ) // ES #sec-typedarray-object transitioning macro ConstructByArrayLike(implicit context: Context)( - typedArray: JSTypedArray, arrayLike: HeapObject, initialLength: Object, + map: Map, arrayLike: HeapObject, initialLength: Object, elementsInfo: typed_array::TypedArrayElementsInfo, - bufferConstructor: JSReceiver): void { + bufferConstructor: JSReceiver): JSTypedArray { // The caller has looked up length on arrayLike, which is observable. const length: PositiveSmi = ToSmiLength(initialLength) otherwise ThrowRangeError(kInvalidTypedArrayLength, initialLength); const initialize: constexpr bool = false; - const byteLength = TypedArrayInitialize( - initialize, typedArray, length, elementsInfo, bufferConstructor); + const typedArray = TypedArrayInitialize( + initialize, map, length, elementsInfo, bufferConstructor); try { const src: JSTypedArray = Cast(arrayLike) otherwise IfSlow; @@ -112,6 +161,7 @@ namespace typed_array_createtypedarray { goto IfSlow; } else if (length > 0) { + const byteLength = typedArray.byte_length; assert(byteLength <= kArrayBufferMaxByteLength); typed_array::CallCMemcpy(typedArray.data_ptr, src.data_ptr, byteLength); } @@ -121,13 +171,13 @@ namespace typed_array_createtypedarray { TypedArrayCopyElements(context, typedArray, arrayLike, length); } } + return typedArray; } // 22.2.4.4 TypedArray ( object ) // ES #sec-typedarray-object transitioning macro ConstructByIterable(implicit context: Context)( - typedArray: JSTypedArray, iterable: JSReceiver, iteratorFn: Callable, - elementsInfo: typed_array::TypedArrayElementsInfo): never + iterable: JSReceiver, iteratorFn: Callable): never labels IfConstructByArrayLike(HeapObject, Object, JSReceiver) { const array: JSArray = IterableToListMayPreserveHoles(context, iterable, iteratorFn); @@ -137,8 +187,7 @@ namespace typed_array_createtypedarray { // 22.2.4.3 TypedArray ( typedArray ) // ES #sec-typedarray-typedarray transitioning macro ConstructByTypedArray(implicit context: Context)( - typedArray: JSTypedArray, srcTypedArray: JSTypedArray, - elementsInfo: typed_array::TypedArrayElementsInfo): never + srcTypedArray: JSTypedArray): never labels IfConstructByArrayLike(HeapObject, Object, JSReceiver) { let bufferConstructor: JSReceiver = GetArrayBufferFunction(); const srcBuffer: JSArrayBuffer = srcTypedArray.buffer; @@ -161,8 +210,8 @@ namespace typed_array_createtypedarray { // 22.2.4.5 TypedArray ( buffer, byteOffset, length ) // ES #sec-typedarray-buffer-byteoffset-length transitioning macro ConstructByArrayBuffer(implicit context: Context)( - typedArray: JSTypedArray, buffer: JSArrayBuffer, byteOffset: Object, - length: Object, elementsInfo: typed_array::TypedArrayElementsInfo): void { + map: Map, buffer: JSArrayBuffer, byteOffset: Object, length: Object, + elementsInfo: typed_array::TypedArrayElementsInfo): JSTypedArray { try { let offset: uintptr = 0; if (byteOffset != Undefined) { @@ -224,12 +273,13 @@ namespace typed_array_createtypedarray { goto IfInvalidLength; } - SetupTypedArray( - typedArray, Convert(newLength), offset, newByteLength); - typedArray.AttachOffHeapBuffer(buffer, offset); + const isOnHeap: constexpr bool = false; + return AllocateTypedArray( + isOnHeap, map, buffer, offset, newByteLength, + Convert(newLength)); } label IfInvalidAlignment(problemString: String) deferred { - ThrowInvalidTypedArrayAlignment(typedArray.map, problemString); + ThrowInvalidTypedArrayAlignment(map, problemString); } label IfInvalidByteLength deferred { ThrowRangeError(kInvalidArrayBufferLength); @@ -242,16 +292,15 @@ namespace typed_array_createtypedarray { } } - transitioning macro ConstructByJSReceiver(implicit context: Context)( - array: JSTypedArray, obj: JSReceiver, - elementsInfo: typed_array::TypedArrayElementsInfo): never + transitioning macro ConstructByJSReceiver(implicit context: + Context)(obj: JSReceiver): never labels IfConstructByArrayLike(HeapObject, Object, JSReceiver) { try { const iteratorMethod: Object = GetIteratorMethod(obj) otherwise IfIteratorUndefined; const iteratorFn: Callable = Cast(iteratorMethod) otherwise ThrowTypeError(kIteratorSymbolNonCallable); - ConstructByIterable(array, obj, iteratorFn, elementsInfo) + ConstructByIterable(obj, iteratorFn) otherwise IfConstructByArrayLike; } label IfIteratorUndefined { @@ -273,22 +322,12 @@ namespace typed_array_createtypedarray { assert(IsConstructor(target)); // 4. Let O be ? AllocateTypedArray(constructorName, NewTarget, // "%TypedArrayPrototype%"). - const array: JSTypedArray = EmitFastNewObject(target, newTarget); - // We need to set the byte_offset / byte_length to some sane values - // to keep the heap verifier happy. - // TODO(bmeurer, v8:4153): Fix this initialization to not use - // EmitFastNewObject, which causes the problem, since it puts - // Undefined into all slots of the object even though that - // doesn't make any sense for these fields. - array.byte_offset = 0; - array.byte_length = 0; - array.length = 0; - array.base_pointer = Convert(0); + const map = GetDerivedMap(target, newTarget); // 5. Let elementSize be the Number value of the Element Size value in Table // 56 for constructorName. const elementsInfo: typed_array::TypedArrayElementsInfo = - typed_array::GetTypedArrayElementsInfo(array); + typed_array::GetTypedArrayElementsInfo(map); try { typeswitch (arg1) { @@ -296,15 +335,13 @@ namespace typed_array_createtypedarray { goto IfConstructByLength(length); } case (buffer: JSArrayBuffer): { - ConstructByArrayBuffer(array, buffer, arg2, arg3, elementsInfo); + return ConstructByArrayBuffer(map, buffer, arg2, arg3, elementsInfo); } case (typedArray: JSTypedArray): { - ConstructByTypedArray(array, typedArray, elementsInfo) - otherwise IfConstructByArrayLike; + ConstructByTypedArray(typedArray) otherwise IfConstructByArrayLike; } case (obj: JSReceiver): { - ConstructByJSReceiver(array, obj, elementsInfo) - otherwise IfConstructByArrayLike; + ConstructByJSReceiver(obj) otherwise IfConstructByArrayLike; } // The first argument was a number or fell through and is treated as // a number. https://tc39.github.io/ecma262/#sec-typedarray-length @@ -314,14 +351,13 @@ namespace typed_array_createtypedarray { } } label IfConstructByLength(length: Object) { - ConstructByLength(array, length, elementsInfo); + return ConstructByLength(map, length, elementsInfo); } label IfConstructByArrayLike( arrayLike: HeapObject, length: Object, bufferConstructor: JSReceiver) { - ConstructByArrayLike( - array, arrayLike, length, elementsInfo, bufferConstructor); + return ConstructByArrayLike( + map, arrayLike, length, elementsInfo, bufferConstructor); } - return array; } transitioning macro TypedArraySpeciesCreate(implicit context: Context)( diff --git a/deps/v8/src/builtins/typed-array-every.tq b/deps/v8/src/builtins/typed-array-every.tq index 4f8804880e3649..221814cb79d706 100644 --- a/deps/v8/src/builtins/typed-array-every.tq +++ b/deps/v8/src/builtins/typed-array-every.tq @@ -29,8 +29,8 @@ namespace typed_array_every { // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.every transitioning javascript builtin - TypedArrayPrototypeEvery(implicit context: Context)( - receiver: Object, ...arguments): Object { + TypedArrayPrototypeEvery(js-implicit context: Context, receiver: Object)( + ...arguments): Object { // arguments[0] = callback // arguments[1] = thisArg try { diff --git a/deps/v8/src/builtins/typed-array-filter.tq b/deps/v8/src/builtins/typed-array-filter.tq index 9407c3a7afa52f..3937699c731ad3 100644 --- a/deps/v8/src/builtins/typed-array-filter.tq +++ b/deps/v8/src/builtins/typed-array-filter.tq @@ -10,7 +10,7 @@ namespace typed_array_filter { // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.filter transitioning javascript builtin TypedArrayPrototypeFilter( - context: Context, receiver: Object, ...arguments): Object { + js-implicit context: Context, receiver: Object)(...arguments): Object { // arguments[0] = callback // arguments[1] = thisArg try { diff --git a/deps/v8/src/builtins/typed-array-find.tq b/deps/v8/src/builtins/typed-array-find.tq index 3c331eb3bb5423..be1943ccf48ce9 100644 --- a/deps/v8/src/builtins/typed-array-find.tq +++ b/deps/v8/src/builtins/typed-array-find.tq @@ -29,8 +29,8 @@ namespace typed_array_find { // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.find transitioning javascript builtin - TypedArrayPrototypeFind(implicit context: - Context)(receiver: Object, ...arguments): Object { + TypedArrayPrototypeFind(js-implicit context: Context, receiver: Object)( + ...arguments): Object { // arguments[0] = callback // arguments[1] = thisArg try { diff --git a/deps/v8/src/builtins/typed-array-findindex.tq b/deps/v8/src/builtins/typed-array-findindex.tq index 05f112d0d59df0..a5ee7897d3c62d 100644 --- a/deps/v8/src/builtins/typed-array-findindex.tq +++ b/deps/v8/src/builtins/typed-array-findindex.tq @@ -29,8 +29,8 @@ namespace typed_array_findindex { // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.findIndex transitioning javascript builtin - TypedArrayPrototypeFindIndex(implicit context: Context)( - receiver: Object, ...arguments): Object { + TypedArrayPrototypeFindIndex(js-implicit context: Context, receiver: Object)( + ...arguments): Object { // arguments[0] = callback // arguments[1] = thisArg. try { diff --git a/deps/v8/src/builtins/typed-array-foreach.tq b/deps/v8/src/builtins/typed-array-foreach.tq index dbf1a121da2e5d..656a22e07d362a 100644 --- a/deps/v8/src/builtins/typed-array-foreach.tq +++ b/deps/v8/src/builtins/typed-array-foreach.tq @@ -25,8 +25,8 @@ namespace typed_array_foreach { // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.every transitioning javascript builtin - TypedArrayPrototypeForEach(implicit context: Context)( - receiver: Object, ...arguments): Object { + TypedArrayPrototypeForEach(js-implicit context: Context, receiver: Object)( + ...arguments): Object { // arguments[0] = callback // arguments[1] = this_arg. diff --git a/deps/v8/src/builtins/typed-array-reduce.tq b/deps/v8/src/builtins/typed-array-reduce.tq index 7af918a07b1edb..d69dc9a98d840a 100644 --- a/deps/v8/src/builtins/typed-array-reduce.tq +++ b/deps/v8/src/builtins/typed-array-reduce.tq @@ -19,7 +19,7 @@ namespace typed_array_reduce { // BUG(4895): We should throw on detached buffers rather than simply exit. witness.Recheck() otherwise break; const value: Object = witness.Load(k); - if (accumulator == Hole) { + if (accumulator == TheHole) { accumulator = value; } else { accumulator = Call( @@ -27,7 +27,7 @@ namespace typed_array_reduce { witness.GetStable()); } } - if (accumulator == Hole) { + if (accumulator == TheHole) { ThrowTypeError(kReduceNoInitial, kBuiltinName); } return accumulator; @@ -35,8 +35,8 @@ namespace typed_array_reduce { // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.reduce transitioning javascript builtin - TypedArrayPrototypeReduce(implicit context: Context)( - receiver: Object, ...arguments): Object { + TypedArrayPrototypeReduce(js-implicit context: Context, receiver: Object)( + ...arguments): Object { // arguments[0] = callback // arguments[1] = initialValue. try { @@ -45,7 +45,7 @@ namespace typed_array_reduce { const uarray = typed_array::EnsureAttached(array) otherwise IsDetached; const callbackfn = Cast(arguments[0]) otherwise NotCallable; - const initialValue = arguments.length >= 2 ? arguments[1] : Hole; + const initialValue = arguments.length >= 2 ? arguments[1] : TheHole; return ReduceAllElements(uarray, callbackfn, initialValue); } label NotCallable deferred { diff --git a/deps/v8/src/builtins/typed-array-reduceright.tq b/deps/v8/src/builtins/typed-array-reduceright.tq index 59ce7ff55b03a5..99a84401ed1c90 100644 --- a/deps/v8/src/builtins/typed-array-reduceright.tq +++ b/deps/v8/src/builtins/typed-array-reduceright.tq @@ -19,7 +19,7 @@ namespace typed_array_reduceright { // BUG(4895): We should throw on detached buffers rather than simply exit. witness.Recheck() otherwise break; const value: Object = witness.Load(k); - if (accumulator == Hole) { + if (accumulator == TheHole) { accumulator = value; } else { accumulator = Call( @@ -27,7 +27,7 @@ namespace typed_array_reduceright { witness.GetStable()); } } - if (accumulator == Hole) { + if (accumulator == TheHole) { ThrowTypeError(kReduceNoInitial, kBuiltinName); } return accumulator; @@ -35,8 +35,8 @@ namespace typed_array_reduceright { // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.reduceright transitioning javascript builtin - TypedArrayPrototypeReduceRight(implicit context: Context)( - receiver: Object, ...arguments): Object { + TypedArrayPrototypeReduceRight( + js-implicit context: Context, receiver: Object)(...arguments): Object { // arguments[0] = callback // arguments[1] = initialValue. try { @@ -45,7 +45,7 @@ namespace typed_array_reduceright { const uarray = typed_array::EnsureAttached(array) otherwise IsDetached; const callbackfn = Cast(arguments[0]) otherwise NotCallable; - const initialValue = arguments.length >= 2 ? arguments[1] : Hole; + const initialValue = arguments.length >= 2 ? arguments[1] : TheHole; return ReduceRightAllElements(uarray, callbackfn, initialValue); } diff --git a/deps/v8/src/builtins/typed-array-slice.tq b/deps/v8/src/builtins/typed-array-slice.tq index f45654b71ec76d..c0087ae1be0276 100644 --- a/deps/v8/src/builtins/typed-array-slice.tq +++ b/deps/v8/src/builtins/typed-array-slice.tq @@ -53,7 +53,7 @@ namespace typed_array_slice { // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.slice transitioning javascript builtin TypedArrayPrototypeSlice( - context: Context, receiver: Object, ...arguments): Object { + js-implicit context: Context, receiver: Object)(...arguments): Object { // arguments[0] = start // arguments[1] = end diff --git a/deps/v8/src/builtins/typed-array-some.tq b/deps/v8/src/builtins/typed-array-some.tq index 991cad6b1b15b6..7056650fba824e 100644 --- a/deps/v8/src/builtins/typed-array-some.tq +++ b/deps/v8/src/builtins/typed-array-some.tq @@ -29,8 +29,8 @@ namespace typed_array_some { // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.some transitioning javascript builtin - TypedArrayPrototypeSome(implicit context: - Context)(receiver: Object, ...arguments): Object { + TypedArrayPrototypeSome(js-implicit context: Context, receiver: Object)( + ...arguments): Object { // arguments[0] = callback // arguments[1] = thisArg. try { diff --git a/deps/v8/src/builtins/typed-array-subarray.tq b/deps/v8/src/builtins/typed-array-subarray.tq index 54b945f44ea8f1..4f98123f823a19 100644 --- a/deps/v8/src/builtins/typed-array-subarray.tq +++ b/deps/v8/src/builtins/typed-array-subarray.tq @@ -5,7 +5,8 @@ namespace typed_array_subarray { // ES %TypedArray%.prototype.subarray transitioning javascript builtin TypedArrayPrototypeSubArray( - context: Context, receiver: Object, ...arguments): JSTypedArray { + js-implicit context: Context, + receiver: Object)(...arguments): JSTypedArray { const methodName: constexpr string = '%TypedArray%.prototype.subarray'; // 1. Let O be the this value. diff --git a/deps/v8/src/builtins/typed-array.tq b/deps/v8/src/builtins/typed-array.tq index 8f923947f1dd0b..d03c1a0be977e3 100644 --- a/deps/v8/src/builtins/typed-array.tq +++ b/deps/v8/src/builtins/typed-array.tq @@ -65,29 +65,18 @@ namespace typed_array { implicit context: Context)(JSTypedArray): JSArrayBuffer; extern macro TypedArrayBuiltinsAssembler::GetTypedArrayElementsInfo( JSTypedArray): TypedArrayElementsInfo; + extern macro TypedArrayBuiltinsAssembler::GetTypedArrayElementsInfo(Map): + TypedArrayElementsInfo; extern macro TypedArrayBuiltinsAssembler::IsBigInt64ElementsKind( ElementsKind): bool; extern macro LoadFixedTypedArrayElementAsTagged( - RawPtr, Smi, constexpr ElementsKind, constexpr ParameterMode): Object; + RawPtr, Smi, constexpr ElementsKind): Numeric; extern macro StoreJSTypedArrayElementFromTagged( - Context, JSTypedArray, Smi, Object, constexpr ElementsKind, - constexpr ParameterMode); + Context, JSTypedArray, Smi, Object, constexpr ElementsKind); type LoadFn = builtin(Context, JSTypedArray, Smi) => Object; type StoreFn = builtin(Context, JSTypedArray, Smi, Object) => Object; - // These UnsafeCast specializations are necessary becuase there is no - // way to definitively test whether an Object is a Torque function - // with a specific signature, and the default UnsafeCast implementation - // would try to check this through an assert(Is<>), so the test - // is bypassed in this specialization. - UnsafeCast(implicit context: Context)(o: Object): LoadFn { - return %RawDownCast(o); - } - UnsafeCast(implicit context: Context)(o: Object): StoreFn { - return %RawDownCast(o); - } - // AttachedJSTypedArray guards that the array's buffer is not detached. transient type AttachedJSTypedArray extends JSTypedArray; @@ -201,17 +190,16 @@ namespace typed_array { } builtin LoadFixedElement( - context: Context, array: JSTypedArray, index: Smi): Object { + _context: Context, array: JSTypedArray, index: Smi): Object { return LoadFixedTypedArrayElementAsTagged( - array.data_ptr, index, KindForArrayType(), SMI_PARAMETERS); + array.data_ptr, index, KindForArrayType()); } builtin StoreFixedElement( context: Context, typedArray: JSTypedArray, index: Smi, value: Object): Object { StoreJSTypedArrayElementFromTagged( - context, typedArray, index, value, KindForArrayType(), - SMI_PARAMETERS); + context, typedArray, index, value, KindForArrayType()); return Undefined; } @@ -288,7 +276,8 @@ namespace typed_array { // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.sort transitioning javascript builtin TypedArrayPrototypeSort( - context: Context, receiver: Object, ...arguments): JSTypedArray { + js-implicit context: Context, + receiver: Object)(...arguments): JSTypedArray { // 1. If comparefn is not undefined and IsCallable(comparefn) is false, // throw a TypeError exception. const comparefnObj: Object = @@ -322,7 +311,7 @@ namespace typed_array { let loadfn: LoadFn; let storefn: StoreFn; - let elementsKind: ElementsKind = array.elements_kind; + const elementsKind: ElementsKind = array.elements_kind; if (IsElementsKindGreaterThan(elementsKind, UINT32_ELEMENTS)) { if (elementsKind == INT32_ELEMENTS) { diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc index 5c09b3a8dedef4..f15c8ba29f251a 100644 --- a/deps/v8/src/builtins/x64/builtins-x64.cc +++ b/deps/v8/src/builtins/x64/builtins-x64.cc @@ -1109,10 +1109,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // 8-bit fields next to each other, so we could just optimize by writing a // 16-bit. These static asserts guard our assumption is valid. STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset == - BytecodeArray::kOSRNestingLevelOffset + kCharSize); + BytecodeArray::kOsrNestingLevelOffset + kCharSize); STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0); __ movw(FieldOperand(kInterpreterBytecodeArrayRegister, - BytecodeArray::kOSRNestingLevelOffset), + BytecodeArray::kOsrNestingLevelOffset), Immediate(0)); // Load initial bytecode offset. @@ -1562,7 +1562,15 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, kSystemPointerSize; __ popq(Operand(rsp, offsetToPC)); __ Drop(offsetToPC / kSystemPointerSize); - __ addq(Operand(rsp, 0), Immediate(Code::kHeaderSize - kHeapObjectTag)); + + // Replace the builtin index Smi on the stack with the instruction start + // address of the builtin from the builtins table, and then Ret to this + // address + __ movq(kScratchRegister, Operand(rsp, 0)); + __ movq(kScratchRegister, + __ EntryFromBuiltinIndexAsOperand(kScratchRegister)); + __ movq(Operand(rsp, 0), kScratchRegister); + __ Ret(); } } // namespace @@ -3002,21 +3010,24 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address, __ movq(prev_limit_reg, Operand(base_reg, kLimitOffset)); __ addl(Operand(base_reg, kLevelOffset), Immediate(1)); - Label profiler_disabled; - Label end_profiler_check; + Label profiler_enabled, end_profiler_check; __ Move(rax, ExternalReference::is_profiling_address(isolate)); __ cmpb(Operand(rax, 0), Immediate(0)); - __ j(zero, &profiler_disabled); - - // Third parameter is the address of the actual getter function. - __ Move(thunk_last_arg, function_address); - __ Move(rax, thunk_ref); - __ jmp(&end_profiler_check); - - __ bind(&profiler_disabled); - // Call the api function! - __ Move(rax, function_address); - + __ j(not_zero, &profiler_enabled); + __ Move(rax, ExternalReference::address_of_runtime_stats_flag()); + __ cmpl(Operand(rax, 0), Immediate(0)); + __ j(not_zero, &profiler_enabled); + { + // Call the api function directly. + __ Move(rax, function_address); + __ jmp(&end_profiler_check); + } + __ bind(&profiler_enabled); + { + // Third parameter is the address of the actual getter function. + __ Move(thunk_last_arg, function_address); + __ Move(rax, thunk_ref); + } __ bind(&end_profiler_check); // Call the api function! @@ -3065,6 +3076,9 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address, __ CompareRoot(map, RootIndex::kHeapNumberMap); __ j(equal, &ok, Label::kNear); + __ CompareRoot(map, RootIndex::kBigIntMap); + __ j(equal, &ok, Label::kNear); + __ CompareRoot(return_value, RootIndex::kUndefinedValue); __ j(equal, &ok, Label::kNear); diff --git a/deps/v8/src/codegen/DEPS b/deps/v8/src/codegen/DEPS new file mode 100644 index 00000000000000..f3715e6ad01f2a --- /dev/null +++ b/deps/v8/src/codegen/DEPS @@ -0,0 +1,9 @@ +# Copyright 2019 the V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +specific_include_rules = { + "external-reference.cc": [ + "+src/regexp/regexp-macro-assembler-arch.h", + ], +} diff --git a/deps/v8/src/codegen/OWNERS b/deps/v8/src/codegen/OWNERS index 345e80a16e29a9..feb2f62f7878ec 100644 --- a/deps/v8/src/codegen/OWNERS +++ b/deps/v8/src/codegen/OWNERS @@ -1,9 +1,12 @@ -ahaas@chromium.org +bbudge@chromium.org bmeurer@chromium.org clemensh@chromium.org +gdeepti@chromium.org +ishell@chromium.org jarin@chromium.org jgruber@chromium.org jkummerow@chromium.org +leszeks@chromium.org mslekova@chromium.org mstarzinger@chromium.org mvstanton@chromium.org @@ -11,3 +14,6 @@ neis@chromium.org rmcilroy@chromium.org sigurds@chromium.org tebbi@chromium.org +titzer@chromium.org + +# COMPONENT: Blink>JavaScript>Compiler diff --git a/deps/v8/src/codegen/arm/assembler-arm.cc b/deps/v8/src/codegen/arm/assembler-arm.cc index c8ef586fc15bb8..7ca49a3f9fc0ed 100644 --- a/deps/v8/src/codegen/arm/assembler-arm.cc +++ b/deps/v8/src/codegen/arm/assembler-arm.cc @@ -2210,7 +2210,7 @@ void Assembler::stm(BlockAddrMode am, Register base, RegList src, // Exception-generating instructions and debugging support. // Stops with a non-negative code less than kNumOfWatchedStops support // enabling/disabling and a counter feature. See simulator-arm.h . -void Assembler::stop(const char* msg, Condition cond, int32_t code) { +void Assembler::stop(Condition cond, int32_t code) { #ifndef __arm__ DCHECK_GE(code, kDefaultStopCode); { @@ -4827,12 +4827,13 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { void Assembler::ConstantPoolAddEntry(int position, RelocInfo::Mode rmode, intptr_t value) { DCHECK(rmode != RelocInfo::CONST_POOL); - // We can share CODE_TARGETs because we don't patch the code objects anymore, - // and we make sure we emit only one reloc info for them (thus delta patching) - // will apply the delta only once. At the moment, we do not dedup code targets - // if they are wrapped in a heap object request (value == 0). + // We can share CODE_TARGETs and embedded objects, but we must make sure we + // only emit one reloc info for them (thus delta patching will apply the delta + // only once). At the moment, we do not deduplicate heap object request which + // are indicated by value == 0. bool sharing_ok = RelocInfo::IsShareableRelocMode(rmode) || - (rmode == RelocInfo::CODE_TARGET && value != 0); + (rmode == RelocInfo::CODE_TARGET && value != 0) || + (RelocInfo::IsEmbeddedObjectMode(rmode) && value != 0); DCHECK_LT(pending_32_bit_constants_.size(), kMaxNumPending32Constants); if (pending_32_bit_constants_.empty()) { first_const_pool_32_use_ = position; diff --git a/deps/v8/src/codegen/arm/assembler-arm.h b/deps/v8/src/codegen/arm/assembler-arm.h index 4db825fa971b3c..f383632f73aee8 100644 --- a/deps/v8/src/codegen/arm/assembler-arm.h +++ b/deps/v8/src/codegen/arm/assembler-arm.h @@ -625,8 +625,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { void stm(BlockAddrMode am, Register base, RegList src, Condition cond = al); // Exception-generating instructions and debugging support - void stop(const char* msg, Condition cond = al, - int32_t code = kDefaultStopCode); + void stop(Condition cond = al, int32_t code = kDefaultStopCode); void bkpt(uint32_t imm16); // v5 and above void svc(uint32_t imm24, Condition cond = al); diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.cc b/deps/v8/src/codegen/arm/macro-assembler-arm.cc index bcda320f8be7de..ba334cd0b65af2 100644 --- a/deps/v8/src/codegen/arm/macro-assembler-arm.cc +++ b/deps/v8/src/codegen/arm/macro-assembler-arm.cc @@ -303,20 +303,24 @@ void TurboAssembler::Call(Handle code, RelocInfo::Mode rmode, Call(code.address(), rmode, cond, mode); } -void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) { +void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { STATIC_ASSERT(kSystemPointerSize == 4); STATIC_ASSERT(kSmiShiftSize == 0); STATIC_ASSERT(kSmiTagSize == 1); STATIC_ASSERT(kSmiTag == 0); - // The builtin_pointer register contains the builtin index as a Smi. + // The builtin_index register contains the builtin index as a Smi. // Untagging is folded into the indexing operand below. - mov(builtin_pointer, - Operand(builtin_pointer, LSL, kSystemPointerSizeLog2 - kSmiTagSize)); - add(builtin_pointer, builtin_pointer, + mov(builtin_index, + Operand(builtin_index, LSL, kSystemPointerSizeLog2 - kSmiTagSize)); + add(builtin_index, builtin_index, Operand(IsolateData::builtin_entry_table_offset())); - ldr(builtin_pointer, MemOperand(kRootRegister, builtin_pointer)); - Call(builtin_pointer); + ldr(builtin_index, MemOperand(kRootRegister, builtin_index)); +} + +void TurboAssembler::CallBuiltinByIndex(Register builtin_index) { + LoadEntryFromBuiltinIndex(builtin_index); + Call(builtin_index); } void TurboAssembler::LoadCodeObjectEntry(Register destination, @@ -632,7 +636,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset, add(scratch, object, Operand(offset - kHeapObjectTag)); tst(scratch, Operand(kPointerSize - 1)); b(eq, &ok); - stop("Unaligned cell in write barrier"); + stop(); bind(&ok); } @@ -1951,15 +1955,15 @@ void TurboAssembler::Check(Condition cond, AbortReason reason) { void TurboAssembler::Abort(AbortReason reason) { Label abort_start; bind(&abort_start); - const char* msg = GetAbortReason(reason); #ifdef DEBUG + const char* msg = GetAbortReason(reason); RecordComment("Abort message: "); RecordComment(msg); #endif // Avoid emitting call to builtin if requested. if (trap_on_abort()) { - stop(msg); + stop(); return; } @@ -2402,7 +2406,7 @@ void TurboAssembler::CallCFunctionHelper(Register function, b(eq, &alignment_as_expected); // Don't use Check here, as it will call Runtime_Abort possibly // re-entering here. - stop("Unexpected alignment"); + stop(); bind(&alignment_as_expected); } } diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.h b/deps/v8/src/codegen/arm/macro-assembler-arm.h index 4f497dcea473bb..e4ce734f52a37f 100644 --- a/deps/v8/src/codegen/arm/macro-assembler-arm.h +++ b/deps/v8/src/codegen/arm/macro-assembler-arm.h @@ -300,7 +300,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { bool check_constant_pool = true); void Call(Label* target); - void CallBuiltinPointer(Register builtin_pointer) override; + // Load the builtin given by the Smi in |builtin_index| into the same + // register. + void LoadEntryFromBuiltinIndex(Register builtin_index); + void CallBuiltinByIndex(Register builtin_index) override; void LoadCodeObjectEntry(Register destination, Register code_object) override; void CallCodeObject(Register code_object) override; diff --git a/deps/v8/src/codegen/arm64/assembler-arm64-inl.h b/deps/v8/src/codegen/arm64/assembler-arm64-inl.h index 5680d8b054f2e8..baae106c1c6ad8 100644 --- a/deps/v8/src/codegen/arm64/assembler-arm64-inl.h +++ b/deps/v8/src/codegen/arm64/assembler-arm64-inl.h @@ -5,6 +5,9 @@ #ifndef V8_CODEGEN_ARM64_ASSEMBLER_ARM64_INL_H_ #define V8_CODEGEN_ARM64_ASSEMBLER_ARM64_INL_H_ +#include + +#include "src/base/memory.h" #include "src/codegen/arm64/assembler-arm64.h" #include "src/codegen/assembler.h" #include "src/debug/debug.h" @@ -22,8 +25,9 @@ void RelocInfo::apply(intptr_t delta) { // On arm64 only internal references and immediate branches need extra work. if (RelocInfo::IsInternalReference(rmode_)) { // Absolute code pointer inside code object moves with the code object. - intptr_t* p = reinterpret_cast(pc_); - *p += delta; // Relocate entry. + intptr_t internal_ref = ReadUnalignedValue(pc_); + internal_ref += delta; // Relocate entry. + WriteUnalignedValue(pc_, internal_ref); } else { Instruction* instr = reinterpret_cast(pc_); if (instr->IsBranchAndLink() || instr->IsUnconditionalBranch()) { @@ -193,17 +197,16 @@ inline VRegister CPURegister::Q() const { // Default initializer is for int types template struct ImmediateInitializer { - static const bool kIsIntType = true; static inline RelocInfo::Mode rmode_for(T) { return RelocInfo::NONE; } static inline int64_t immediate_for(T t) { STATIC_ASSERT(sizeof(T) <= 8); + STATIC_ASSERT(std::is_integral::value || std::is_enum::value); return t; } }; template <> struct ImmediateInitializer { - static const bool kIsIntType = false; static inline RelocInfo::Mode rmode_for(Smi t) { return RelocInfo::NONE; } static inline int64_t immediate_for(Smi t) { return static_cast(t.ptr()); @@ -212,7 +215,6 @@ struct ImmediateInitializer { template <> struct ImmediateInitializer { - static const bool kIsIntType = false; static inline RelocInfo::Mode rmode_for(ExternalReference t) { return RelocInfo::EXTERNAL_REFERENCE; } @@ -222,8 +224,9 @@ struct ImmediateInitializer { }; template -Immediate::Immediate(Handle value) { - InitializeHandle(value); +Immediate::Immediate(Handle handle, RelocInfo::Mode mode) + : value_(static_cast(handle.address())), rmode_(mode) { + DCHECK(RelocInfo::IsEmbeddedObjectMode(mode)); } template @@ -234,13 +237,9 @@ Immediate::Immediate(T t) template Immediate::Immediate(T t, RelocInfo::Mode rmode) : value_(ImmediateInitializer::immediate_for(t)), rmode_(rmode) { - STATIC_ASSERT(ImmediateInitializer::kIsIntType); + STATIC_ASSERT(std::is_integral::value); } -// Operand. -template -Operand::Operand(Handle value) : immediate_(value), reg_(NoReg) {} - template Operand::Operand(T t) : immediate_(t), reg_(NoReg) {} @@ -479,7 +478,7 @@ void Assembler::Unreachable() { Address Assembler::target_pointer_address_at(Address pc) { Instruction* instr = reinterpret_cast(pc); - DCHECK(instr->IsLdrLiteralX()); + DCHECK(instr->IsLdrLiteralX() || instr->IsLdrLiteralW()); return reinterpret_cast
    (instr->ImmPCOffsetTarget()); } @@ -494,6 +493,13 @@ Address Assembler::target_address_at(Address pc, Address constant_pool) { } } +Tagged_t Assembler::target_compressed_address_at(Address pc, + Address constant_pool) { + Instruction* instr = reinterpret_cast(pc); + CHECK(instr->IsLdrLiteralW()); + return Memory(target_pointer_address_at(pc)); +} + Handle Assembler::code_target_object_handle_at(Address pc) { Instruction* instr = reinterpret_cast(pc); if (instr->IsLdrLiteralX()) { @@ -502,14 +508,39 @@ Handle Assembler::code_target_object_handle_at(Address pc) { } else { DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch()); DCHECK_EQ(instr->ImmPCOffset() % kInstrSize, 0); - return GetCodeTarget(instr->ImmPCOffset() >> kInstrSizeLog2); + return Handle::cast( + GetEmbeddedObject(instr->ImmPCOffset() >> kInstrSizeLog2)); } } -Handle Assembler::compressed_embedded_object_handle_at(Address pc) { +AssemblerBase::EmbeddedObjectIndex +Assembler::embedded_object_index_referenced_from(Address pc) { Instruction* instr = reinterpret_cast(pc); - CHECK(!instr->IsLdrLiteralX()); - return GetCompressedEmbeddedObject(ReadUnalignedValue(pc)); + if (instr->IsLdrLiteralX()) { + STATIC_ASSERT(sizeof(EmbeddedObjectIndex) == sizeof(intptr_t)); + return Memory(target_pointer_address_at(pc)); + } else { + DCHECK(instr->IsLdrLiteralW()); + return Memory(target_pointer_address_at(pc)); + } +} + +void Assembler::set_embedded_object_index_referenced_from( + Address pc, EmbeddedObjectIndex data) { + Instruction* instr = reinterpret_cast(pc); + if (instr->IsLdrLiteralX()) { + Memory(target_pointer_address_at(pc)) = data; + } else { + DCHECK(instr->IsLdrLiteralW()); + DCHECK(is_uint32(data)); + WriteUnalignedValue(target_pointer_address_at(pc), + static_cast(data)); + } +} + +Handle Assembler::target_object_handle_at(Address pc) { + return GetEmbeddedObject( + Assembler::embedded_object_index_referenced_from(pc)); } Address Assembler::runtime_entry_at(Address pc) { @@ -557,7 +588,7 @@ void Assembler::deserialization_set_special_target_at(Address location, void Assembler::deserialization_set_target_internal_reference_at( Address pc, Address target, RelocInfo::Mode mode) { - Memory
    (pc) = target; + WriteUnalignedValue
    (pc, target); } void Assembler::set_target_address_at(Address pc, Address constant_pool, @@ -585,12 +616,21 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool, } } +void Assembler::set_target_compressed_address_at( + Address pc, Address constant_pool, Tagged_t target, + ICacheFlushMode icache_flush_mode) { + Instruction* instr = reinterpret_cast(pc); + CHECK(instr->IsLdrLiteralW()); + Memory(target_pointer_address_at(pc)) = target; +} + int RelocInfo::target_address_size() { if (IsCodedSpecially()) { return Assembler::kSpecialTargetSize; } else { - DCHECK(reinterpret_cast(pc_)->IsLdrLiteralX()); - return kSystemPointerSize; + Instruction* instr = reinterpret_cast(pc_); + DCHECK(instr->IsLdrLiteralX() || instr->IsLdrLiteralW()); + return instr->IsLdrLiteralW() ? kTaggedSize : kSystemPointerSize; } } @@ -629,19 +669,30 @@ Address RelocInfo::constant_pool_entry_address() { } HeapObject RelocInfo::target_object() { - DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_)); - return HeapObject::cast( - Object(Assembler::target_address_at(pc_, constant_pool_))); + DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_)); + if (IsCompressedEmbeddedObject(rmode_)) { + return HeapObject::cast(Object(DecompressTaggedAny( + host_.address(), + Assembler::target_compressed_address_at(pc_, constant_pool_)))); + } else { + return HeapObject::cast( + Object(Assembler::target_address_at(pc_, constant_pool_))); + } } HeapObject RelocInfo::target_object_no_host(Isolate* isolate) { - return target_object(); + if (IsCompressedEmbeddedObject(rmode_)) { + return HeapObject::cast(Object(DecompressTaggedAny( + isolate, + Assembler::target_compressed_address_at(pc_, constant_pool_)))); + } else { + return target_object(); + } } Handle RelocInfo::target_object_handle(Assembler* origin) { - if (IsFullEmbeddedObject(rmode_)) { - return Handle(reinterpret_cast( - Assembler::target_address_at(pc_, constant_pool_))); + if (IsEmbeddedObjectMode(rmode_)) { + return origin->target_object_handle_at(pc_); } else { DCHECK(IsCodeTarget(rmode_)); return origin->code_target_object_handle_at(pc_); @@ -651,9 +702,15 @@ Handle RelocInfo::target_object_handle(Assembler* origin) { void RelocInfo::set_target_object(Heap* heap, HeapObject target, WriteBarrierMode write_barrier_mode, ICacheFlushMode icache_flush_mode) { - DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_)); - Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(), - icache_flush_mode); + DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_)); + if (IsCompressedEmbeddedObject(rmode_)) { + Assembler::set_target_compressed_address_at( + pc_, constant_pool_, CompressTagged(target.ptr()), icache_flush_mode); + } else { + DCHECK(IsFullEmbeddedObject(rmode_)); + Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(), + icache_flush_mode); + } if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) { WriteBarrierForCode(host(), this, target); } @@ -673,7 +730,7 @@ void RelocInfo::set_target_external_reference( Address RelocInfo::target_internal_reference() { DCHECK(rmode_ == INTERNAL_REFERENCE); - return Memory
    (pc_); + return ReadUnalignedValue
    (pc_); } Address RelocInfo::target_internal_reference_address() { @@ -701,11 +758,14 @@ Address RelocInfo::target_off_heap_target() { } void RelocInfo::WipeOut() { - DCHECK(IsFullEmbeddedObject(rmode_) || IsCodeTarget(rmode_) || + DCHECK(IsEmbeddedObjectMode(rmode_) || IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) || IsInternalReference(rmode_) || IsOffHeapTarget(rmode_)); if (IsInternalReference(rmode_)) { - Memory
    (pc_) = kNullAddress; + WriteUnalignedValue
    (pc_, kNullAddress); + } else if (IsCompressedEmbeddedObject(rmode_)) { + Assembler::set_target_compressed_address_at(pc_, constant_pool_, + kNullAddress); } else { Assembler::set_target_address_at(pc_, constant_pool_, kNullAddress); } @@ -1025,9 +1085,7 @@ inline void Assembler::CheckBuffer() { if (pc_offset() >= next_veneer_pool_check_) { CheckVeneerPool(false, true); } - if (pc_offset() >= next_constant_pool_check_) { - CheckConstPool(false, true); - } + constpool_.MaybeCheck(); } } // namespace internal diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.cc b/deps/v8/src/codegen/arm64/assembler-arm64.cc index 1806f82b461a5f..159e763ba26026 100644 --- a/deps/v8/src/codegen/arm64/assembler-arm64.cc +++ b/deps/v8/src/codegen/arm64/assembler-arm64.cc @@ -34,6 +34,7 @@ #include "src/base/cpu.h" #include "src/codegen/arm64/assembler-arm64-inl.h" #include "src/codegen/register-configuration.h" +#include "src/codegen/safepoint-table.h" #include "src/codegen/string-constants.h" #include "src/execution/frame-constants.h" @@ -283,11 +284,6 @@ bool AreConsecutive(const VRegister& reg1, const VRegister& reg2, return true; } -void Immediate::InitializeHandle(Handle handle) { - value_ = static_cast(handle.address()); - rmode_ = RelocInfo::FULL_EMBEDDED_OBJECT; -} - bool Operand::NeedsRelocation(const Assembler* assembler) const { RelocInfo::Mode rmode = immediate_.rmode(); @@ -298,167 +294,6 @@ bool Operand::NeedsRelocation(const Assembler* assembler) const { return !RelocInfo::IsNone(rmode); } -bool ConstPool::AddSharedEntry(SharedEntryMap& entry_map, uint64_t data, - int offset) { - auto existing = entry_map.find(data); - if (existing == entry_map.end()) { - entry_map[data] = static_cast(entries_.size()); - entries_.push_back(std::make_pair(data, std::vector(1, offset))); - return true; - } - int index = existing->second; - entries_[index].second.push_back(offset); - return false; -} - -// Constant Pool. -bool ConstPool::RecordEntry(intptr_t data, RelocInfo::Mode mode) { - DCHECK(mode != RelocInfo::CONST_POOL && mode != RelocInfo::VENEER_POOL && - mode != RelocInfo::DEOPT_SCRIPT_OFFSET && - mode != RelocInfo::DEOPT_INLINING_ID && - mode != RelocInfo::DEOPT_REASON && mode != RelocInfo::DEOPT_ID); - - bool write_reloc_info = true; - - uint64_t raw_data = static_cast(data); - int offset = assm_->pc_offset(); - if (IsEmpty()) { - first_use_ = offset; - } - - if (RelocInfo::IsShareableRelocMode(mode)) { - write_reloc_info = AddSharedEntry(shared_entries_, raw_data, offset); - } else if (mode == RelocInfo::CODE_TARGET && raw_data != 0) { - // A zero data value is a placeholder and must not be shared. - write_reloc_info = AddSharedEntry(handle_to_index_map_, raw_data, offset); - } else { - entries_.push_back(std::make_pair(raw_data, std::vector(1, offset))); - } - - if (EntryCount() > Assembler::kApproxMaxPoolEntryCount) { - // Request constant pool emission after the next instruction. - assm_->SetNextConstPoolCheckIn(1); - } - - return write_reloc_info; -} - -int ConstPool::DistanceToFirstUse() { - DCHECK_GE(first_use_, 0); - return assm_->pc_offset() - first_use_; -} - -int ConstPool::MaxPcOffset() { - // There are no pending entries in the pool so we can never get out of - // range. - if (IsEmpty()) return kMaxInt; - - // Entries are not necessarily emitted in the order they are added so in the - // worst case the first constant pool use will be accessing the last entry. - return first_use_ + kMaxLoadLiteralRange - WorstCaseSize(); -} - -int ConstPool::WorstCaseSize() { - if (IsEmpty()) return 0; - - // Max size prologue: - // b over - // ldr xzr, #pool_size - // blr xzr - // nop - // All entries are 64-bit for now. - return 4 * kInstrSize + EntryCount() * kSystemPointerSize; -} - -int ConstPool::SizeIfEmittedAtCurrentPc(bool require_jump) { - if (IsEmpty()) return 0; - - // Prologue is: - // b over ;; if require_jump - // ldr xzr, #pool_size - // blr xzr - // nop ;; if not 64-bit aligned - int prologue_size = require_jump ? kInstrSize : 0; - prologue_size += 2 * kInstrSize; - prologue_size += - IsAligned(assm_->pc_offset() + prologue_size, 8) ? 0 : kInstrSize; - - // All entries are 64-bit for now. - return prologue_size + EntryCount() * kSystemPointerSize; -} - -void ConstPool::Emit(bool require_jump) { - DCHECK(!assm_->is_const_pool_blocked()); - // Prevent recursive pool emission and protect from veneer pools. - Assembler::BlockPoolsScope block_pools(assm_); - - int size = SizeIfEmittedAtCurrentPc(require_jump); - Label size_check; - assm_->bind(&size_check); - - assm_->RecordConstPool(size); - // Emit the constant pool. It is preceded by an optional branch if - // require_jump and a header which will: - // 1) Encode the size of the constant pool, for use by the disassembler. - // 2) Terminate the program, to try to prevent execution from accidentally - // flowing into the constant pool. - // 3) align the pool entries to 64-bit. - // The header is therefore made of up to three arm64 instructions: - // ldr xzr, # - // blr xzr - // nop - // - // If executed, the header will likely segfault and lr will point to the - // instruction following the offending blr. - // TODO(all): Make the alignment part less fragile. Currently code is - // allocated as a byte array so there are no guarantees the alignment will - // be preserved on compaction. Currently it works as allocation seems to be - // 64-bit aligned. - - // Emit branch if required - Label after_pool; - if (require_jump) { - assm_->b(&after_pool); - } - - // Emit the header. - assm_->RecordComment("[ Constant Pool"); - EmitMarker(); - EmitGuard(); - assm_->Align(8); - - // Emit constant pool entries. - // TODO(all): currently each relocated constant is 64 bits, consider adding - // support for 32-bit entries. - EmitEntries(); - assm_->RecordComment("]"); - - if (after_pool.is_linked()) { - assm_->bind(&after_pool); - } - - DCHECK(assm_->SizeOfCodeGeneratedSince(&size_check) == - static_cast(size)); -} - -void ConstPool::Clear() { - shared_entries_.clear(); - handle_to_index_map_.clear(); - entries_.clear(); - first_use_ = -1; -} - -void ConstPool::EmitMarker() { - // A constant pool size is expressed in number of 32-bits words. - // Currently all entries are 64-bit. - // + 1 is for the crash guard. - // + 0/1 for alignment. - int word_count = - EntryCount() * 2 + 1 + (IsAligned(assm_->pc_offset(), 8) ? 0 : 1); - assm_->Emit(LDR_x_lit | Assembler::ImmLLiteral(word_count) | - Assembler::Rt(xzr)); -} - MemOperand::PairResult MemOperand::AreConsistentForPair( const MemOperand& operandA, const MemOperand& operandB, int access_size_log2) { @@ -484,47 +319,18 @@ MemOperand::PairResult MemOperand::AreConsistentForPair( return kNotPair; } -void ConstPool::EmitGuard() { -#ifdef DEBUG - Instruction* instr = reinterpret_cast(assm_->pc()); - DCHECK(instr->preceding()->IsLdrLiteralX() && - instr->preceding()->Rt() == xzr.code()); -#endif - assm_->EmitPoolGuard(); -} - -void ConstPool::EmitEntries() { - DCHECK(IsAligned(assm_->pc_offset(), 8)); - - // Emit entries. - for (const auto& entry : entries_) { - for (const auto& pc : entry.second) { - Instruction* instr = assm_->InstructionAt(pc); - - // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0. - DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0); - instr->SetImmPCOffsetTarget(assm_->options(), assm_->pc()); - } - - assm_->dc64(entry.first); - } - Clear(); -} - // Assembler Assembler::Assembler(const AssemblerOptions& options, std::unique_ptr buffer) : AssemblerBase(options, std::move(buffer)), - constpool_(this), - unresolved_branches_() { - const_pool_blocked_nesting_ = 0; + unresolved_branches_(), + constpool_(this) { veneer_pool_blocked_nesting_ = 0; Reset(); } Assembler::~Assembler() { DCHECK(constpool_.IsEmpty()); - DCHECK_EQ(const_pool_blocked_nesting_, 0); DCHECK_EQ(veneer_pool_blocked_nesting_, 0); } @@ -533,7 +339,6 @@ void Assembler::AbortedCodeGeneration() { constpool_.Clear(); } void Assembler::Reset() { #ifdef DEBUG DCHECK((pc_ >= buffer_start_) && (pc_ < buffer_start_ + buffer_->size())); - DCHECK_EQ(const_pool_blocked_nesting_, 0); DCHECK_EQ(veneer_pool_blocked_nesting_, 0); DCHECK(unresolved_branches_.empty()); memset(buffer_start_, 0, pc_ - buffer_start_); @@ -541,9 +346,7 @@ void Assembler::Reset() { pc_ = buffer_start_; reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_); constpool_.Clear(); - next_constant_pool_check_ = 0; next_veneer_pool_check_ = kMaxInt; - no_const_pool_before_ = 0; } void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) { @@ -554,14 +357,16 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) { case HeapObjectRequest::kHeapNumber: { Handle object = isolate->factory()->NewHeapNumber( request.heap_number(), AllocationType::kOld); - set_target_address_at(pc, 0 /* unused */, object.address()); + EmbeddedObjectIndex index = AddEmbeddedObject(object); + set_embedded_object_index_referenced_from(pc, index); break; } case HeapObjectRequest::kStringConstant: { const StringConstantBase* str = request.string(); CHECK_NOT_NULL(str); - set_target_address_at(pc, 0 /* unused */, - str->AllocateStringConstant(isolate).address()); + EmbeddedObjectIndex index = + AddEmbeddedObject(str->AllocateStringConstant(isolate)); + set_embedded_object_index_referenced_from(pc, index); break; } } @@ -572,7 +377,7 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc, SafepointTableBuilder* safepoint_table_builder, int handler_table_offset) { // Emit constant pool if necessary. - CheckConstPool(true, false); + ForceConstantPoolEmissionWithoutJump(); DCHECK(constpool_.IsEmpty()); int code_comments_size = WriteCodeComments(); @@ -870,32 +675,6 @@ void Assembler::DeleteUnresolvedBranchInfoForLabel(Label* label) { } } -void Assembler::StartBlockConstPool() { - if (const_pool_blocked_nesting_++ == 0) { - // Prevent constant pool checks happening by setting the next check to - // the biggest possible offset. - next_constant_pool_check_ = kMaxInt; - } -} - -void Assembler::EndBlockConstPool() { - if (--const_pool_blocked_nesting_ == 0) { - // Check the constant pool hasn't been blocked for too long. - DCHECK(pc_offset() < constpool_.MaxPcOffset()); - // Two cases: - // * no_const_pool_before_ >= next_constant_pool_check_ and the emission is - // still blocked - // * no_const_pool_before_ < next_constant_pool_check_ and the next emit - // will trigger a check. - next_constant_pool_check_ = no_const_pool_before_; - } -} - -bool Assembler::is_const_pool_blocked() const { - return (const_pool_blocked_nesting_ > 0) || - (pc_offset() < no_const_pool_before_); -} - bool Assembler::IsConstantPoolAt(Instruction* instr) { // The constant pool marker is made of two instructions. These instructions // will never be emitted by the JIT, so checking for the first one is enough: @@ -1497,6 +1276,7 @@ Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) { void Assembler::ldr(const CPURegister& rt, const Operand& operand) { if (operand.IsHeapObjectRequest()) { + BlockPoolsScope no_pool_before_ldr_of_heap_object_request(this); RequestHeapObject(operand.heap_object_request()); ldr(rt, operand.immediate_for_heap_object_request()); } else { @@ -1505,11 +1285,8 @@ void Assembler::ldr(const CPURegister& rt, const Operand& operand) { } void Assembler::ldr(const CPURegister& rt, const Immediate& imm) { - // Currently we only support 64-bit literals. - DCHECK(rt.Is64Bits()); - + BlockPoolsScope no_pool_before_ldr_pcrel_instr(this); RecordRelocInfo(imm.rmode(), imm.value()); - BlockConstPoolFor(1); // The load will be patched when the constpool is emitted, patching code // expect a load literal with offset 0. ldr_pcrel(rt, 0); @@ -3679,6 +3456,7 @@ void Assembler::dup(const VRegister& vd, const VRegister& vn, int vn_index) { } void Assembler::dcptr(Label* label) { + BlockPoolsScope no_pool_inbetween(this); RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE); if (label->is_bound()) { // The label is bound, so it does not need to be updated and the internal @@ -4471,8 +4249,10 @@ void Assembler::GrowBuffer() { // Relocate internal references. for (auto pos : internal_reference_positions_) { - intptr_t* p = reinterpret_cast(buffer_start_ + pos); - *p += pc_delta; + Address address = reinterpret_cast(buffer_start_) + pos; + intptr_t internal_ref = ReadUnalignedValue(address); + internal_ref += pc_delta; + WriteUnalignedValue(address, internal_ref); } // Pending relocation entries are also relative, no need to relocate. @@ -4492,17 +4272,31 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data, RelocInfo::IsConstPool(rmode) || RelocInfo::IsVeneerPool(rmode)); // These modes do not need an entry in the constant pool. } else if (constant_pool_mode == NEEDS_POOL_ENTRY) { - bool new_constpool_entry = constpool_.RecordEntry(data, rmode); - // Make sure the constant pool is not emitted in place of the next - // instruction for which we just recorded relocation info. - BlockConstPoolFor(1); - if (!new_constpool_entry) return; + if (RelocInfo::IsEmbeddedObjectMode(rmode)) { + Handle handle(reinterpret_cast(data)); + data = AddEmbeddedObject(handle); + } + if (rmode == RelocInfo::COMPRESSED_EMBEDDED_OBJECT) { + if (constpool_.RecordEntry(static_cast(data), rmode) == + RelocInfoStatus::kMustOmitForDuplicate) { + return; + } + } else { + if (constpool_.RecordEntry(static_cast(data), rmode) == + RelocInfoStatus::kMustOmitForDuplicate) { + return; + } + } } // For modes that cannot use the constant pool, a different sequence of // instructions will be emitted by this function's caller. if (!ShouldRecordRelocInfo(rmode)) return; + // Callers should ensure that constant pool emission is blocked until the + // instruction the reloc info is associated with has been emitted. + DCHECK(constpool_.IsBlocked()); + // We do not try to reuse pool constants. RelocInfo rinfo(reinterpret_cast
    (pc_), rmode, data, Code()); @@ -4511,103 +4305,127 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data, } void Assembler::near_jump(int offset, RelocInfo::Mode rmode) { + BlockPoolsScope no_pool_before_b_instr(this); if (!RelocInfo::IsNone(rmode)) RecordRelocInfo(rmode, offset, NO_POOL_ENTRY); b(offset); } void Assembler::near_call(int offset, RelocInfo::Mode rmode) { + BlockPoolsScope no_pool_before_bl_instr(this); if (!RelocInfo::IsNone(rmode)) RecordRelocInfo(rmode, offset, NO_POOL_ENTRY); bl(offset); } void Assembler::near_call(HeapObjectRequest request) { + BlockPoolsScope no_pool_before_bl_instr(this); RequestHeapObject(request); - int index = AddCodeTarget(Handle()); + EmbeddedObjectIndex index = AddEmbeddedObject(Handle()); RecordRelocInfo(RelocInfo::CODE_TARGET, index, NO_POOL_ENTRY); - bl(index); + DCHECK(is_int32(index)); + bl(static_cast(index)); } -void Assembler::BlockConstPoolFor(int instructions) { - int pc_limit = pc_offset() + instructions * kInstrSize; - if (no_const_pool_before_ < pc_limit) { - no_const_pool_before_ = pc_limit; - // Make sure the pool won't be blocked for too long. - DCHECK(pc_limit < constpool_.MaxPcOffset()); - } +// Constant Pool - if (next_constant_pool_check_ < no_const_pool_before_) { - next_constant_pool_check_ = no_const_pool_before_; - } +void ConstantPool::EmitPrologue(Alignment require_alignment) { + // Recorded constant pool size is expressed in number of 32-bits words, + // and includes prologue and alignment, but not the jump around the pool + // and the size of the marker itself. + const int marker_size = 1; + int word_count = + ComputeSize(Jump::kOmitted, require_alignment) / kInt32Size - marker_size; + assm_->Emit(LDR_x_lit | Assembler::ImmLLiteral(word_count) | + Assembler::Rt(xzr)); + assm_->EmitPoolGuard(); } -void Assembler::CheckConstPool(bool force_emit, bool require_jump) { - // Some short sequence of instruction mustn't be broken up by constant pool - // emission, such sequences are protected by calls to BlockConstPoolFor and - // BlockConstPoolScope. - if (is_const_pool_blocked()) { - // Something is wrong if emission is forced and blocked at the same time. - DCHECK(!force_emit); - return; - } +int ConstantPool::PrologueSize(Jump require_jump) const { + // Prologue is: + // b over ;; if require_jump + // ldr xzr, #pool_size + // blr xzr + int prologue_size = require_jump == Jump::kRequired ? kInstrSize : 0; + prologue_size += 2 * kInstrSize; + return prologue_size; +} - // There is nothing to do if there are no pending constant pool entries. - if (constpool_.IsEmpty()) { - // Calculate the offset of the next check. - SetNextConstPoolCheckIn(kCheckConstPoolInterval); - return; - } +void ConstantPool::SetLoadOffsetToConstPoolEntry(int load_offset, + Instruction* entry_offset, + const ConstantPoolKey& key) { + Instruction* instr = assm_->InstructionAt(load_offset); + // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0. + DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0); + instr->SetImmPCOffsetTarget(assm_->options(), entry_offset); +} - // We emit a constant pool when: - // * requested to do so by parameter force_emit (e.g. after each function). - // * the distance to the first instruction accessing the constant pool is - // kApproxMaxDistToConstPool or more. - // * the number of entries in the pool is kApproxMaxPoolEntryCount or more. - int dist = constpool_.DistanceToFirstUse(); - int count = constpool_.EntryCount(); - if (!force_emit && (dist < kApproxMaxDistToConstPool) && - (count < kApproxMaxPoolEntryCount)) { +void ConstantPool::Check(Emission force_emit, Jump require_jump, + size_t margin) { + // Some short sequence of instruction must not be broken up by constant pool + // emission, such sequences are protected by a ConstPool::BlockScope. + if (IsBlocked()) { + // Something is wrong if emission is forced and blocked at the same time. + DCHECK_EQ(force_emit, Emission::kIfNeeded); return; } - // Emit veneers for branches that would go out of range during emission of the - // constant pool. - int worst_case_size = constpool_.WorstCaseSize(); - CheckVeneerPool(false, require_jump, kVeneerDistanceMargin + worst_case_size); + // We emit a constant pool only if : + // * it is not empty + // * emission is forced by parameter force_emit (e.g. at function end). + // * emission is mandatory or opportune according to {ShouldEmitNow}. + if (!IsEmpty() && (force_emit == Emission::kForced || + ShouldEmitNow(require_jump, margin))) { + // Emit veneers for branches that would go out of range during emission of + // the constant pool. + int worst_case_size = ComputeSize(Jump::kRequired, Alignment::kRequired); + assm_->CheckVeneerPool(false, require_jump == Jump::kRequired, + assm_->kVeneerDistanceMargin + worst_case_size + + static_cast(margin)); + + // Check that the code buffer is large enough before emitting the constant + // pool (this includes the gap to the relocation information). + int needed_space = worst_case_size + assm_->kGap; + while (assm_->buffer_space() <= needed_space) { + assm_->GrowBuffer(); + } - // Check that the code buffer is large enough before emitting the constant - // pool (this includes the gap to the relocation information). - int needed_space = worst_case_size + kGap + 1 * kInstrSize; - while (buffer_space() <= needed_space) { - GrowBuffer(); + EmitAndClear(require_jump); } - - Label size_check; - bind(&size_check); - constpool_.Emit(require_jump); - DCHECK(SizeOfCodeGeneratedSince(&size_check) <= - static_cast(worst_case_size)); - - // Since a constant pool was just emitted, move the check offset forward by + // Since a constant pool is (now) empty, move the check offset forward by // the standard interval. - SetNextConstPoolCheckIn(kCheckConstPoolInterval); + SetNextCheckIn(ConstantPool::kCheckInterval); } -bool Assembler::ShouldEmitVeneer(int max_reachable_pc, int margin) { +// Pool entries are accessed with pc relative load therefore this cannot be more +// than 1 * MB. Since constant pool emission checks are interval based, and we +// want to keep entries close to the code, we try to emit every 64KB. +const size_t ConstantPool::kMaxDistToPool32 = 1 * MB; +const size_t ConstantPool::kMaxDistToPool64 = 1 * MB; +const size_t ConstantPool::kCheckInterval = 128 * kInstrSize; +const size_t ConstantPool::kApproxDistToPool32 = 64 * KB; +const size_t ConstantPool::kApproxDistToPool64 = kApproxDistToPool32; + +const size_t ConstantPool::kOpportunityDistToPool32 = 64 * KB; +const size_t ConstantPool::kOpportunityDistToPool64 = 64 * KB; +const size_t ConstantPool::kApproxMaxEntryCount = 512; + +bool Assembler::ShouldEmitVeneer(int max_reachable_pc, size_t margin) { // Account for the branch around the veneers and the guard. int protection_offset = 2 * kInstrSize; - return pc_offset() > - max_reachable_pc - margin - protection_offset - - static_cast(unresolved_branches_.size() * kMaxVeneerCodeSize); + return static_cast(pc_offset() + margin + protection_offset + + unresolved_branches_.size() * + kMaxVeneerCodeSize) >= max_reachable_pc; } void Assembler::RecordVeneerPool(int location_offset, int size) { + Assembler::BlockPoolsScope block_pools(this, PoolEmissionCheck::kSkip); RelocInfo rinfo(reinterpret_cast
    (buffer_start_) + location_offset, RelocInfo::VENEER_POOL, static_cast(size), Code()); reloc_info_writer.Write(&rinfo); } -void Assembler::EmitVeneers(bool force_emit, bool need_protection, int margin) { - BlockPoolsScope scope(this); +void Assembler::EmitVeneers(bool force_emit, bool need_protection, + size_t margin) { + BlockPoolsScope scope(this, PoolEmissionCheck::kSkip); RecordComment("[ Veneers"); // The exact size of the veneer pool must be recorded (see the comment at the @@ -4677,7 +4495,7 @@ void Assembler::EmitVeneers(bool force_emit, bool need_protection, int margin) { } void Assembler::CheckVeneerPool(bool force_emit, bool require_jump, - int margin) { + size_t margin) { // There is nothing to do if there are no pending veneer pool entries. if (unresolved_branches_.empty()) { DCHECK_EQ(next_veneer_pool_check_, kMaxInt); @@ -4713,6 +4531,7 @@ int Assembler::buffer_space() const { void Assembler::RecordConstPool(int size) { // We only need this for debugger support, to correctly compute offsets in the // code. + Assembler::BlockPoolsScope block_pools(this); RecordRelocInfo(RelocInfo::CONST_POOL, static_cast(size)); } diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.h b/deps/v8/src/codegen/arm64/assembler-arm64.h index 04cd4222417f5a..6a6bf633c13ec6 100644 --- a/deps/v8/src/codegen/arm64/assembler-arm64.h +++ b/deps/v8/src/codegen/arm64/assembler-arm64.h @@ -35,7 +35,8 @@ class SafepointTableBuilder; class Immediate { public: template - inline explicit Immediate(Handle handle); + inline explicit Immediate( + Handle handle, RelocInfo::Mode mode = RelocInfo::FULL_EMBEDDED_OBJECT); // This is allowed to be an implicit constructor because Immediate is // a wrapper class that doesn't normally perform any type conversion. @@ -49,8 +50,6 @@ class Immediate { RelocInfo::Mode rmode() const { return rmode_; } private: - V8_EXPORT_PRIVATE void InitializeHandle(Handle value); - int64_t value_; RelocInfo::Mode rmode_; }; @@ -85,9 +84,6 @@ class Operand { inline HeapObjectRequest heap_object_request() const; inline Immediate immediate_for_heap_object_request() const; - template - inline explicit Operand(Handle handle); - // Implicit constructor for all int types, ExternalReference, and Smi. template inline Operand(T t); // NOLINT(runtime/explicit) @@ -174,60 +170,6 @@ class MemOperand { unsigned shift_amount_; }; -class ConstPool { - public: - explicit ConstPool(Assembler* assm) : assm_(assm), first_use_(-1) {} - // Returns true when we need to write RelocInfo and false when we do not. - bool RecordEntry(intptr_t data, RelocInfo::Mode mode); - int EntryCount() const { return static_cast(entries_.size()); } - bool IsEmpty() const { return entries_.empty(); } - // Distance in bytes between the current pc and the first instruction - // using the pool. If there are no pending entries return kMaxInt. - int DistanceToFirstUse(); - // Offset after which instructions using the pool will be out of range. - int MaxPcOffset(); - // Maximum size the constant pool can be with current entries. It always - // includes alignment padding and branch over. - int WorstCaseSize(); - // Size in bytes of the literal pool *if* it is emitted at the current - // pc. The size will include the branch over the pool if it was requested. - int SizeIfEmittedAtCurrentPc(bool require_jump); - // Emit the literal pool at the current pc with a branch over the pool if - // requested. - void Emit(bool require_jump); - // Discard any pending pool entries. - void Clear(); - - private: - void EmitMarker(); - void EmitGuard(); - void EmitEntries(); - - using SharedEntryMap = std::map; - // Adds a shared entry to entries_, using 'entry_map' to determine whether we - // already track this entry. Returns true if this is the first time we add - // this entry, false otherwise. - bool AddSharedEntry(SharedEntryMap& entry_map, uint64_t data, int offset); - - Assembler* assm_; - // Keep track of the first instruction requiring a constant pool entry - // since the previous constant pool was emitted. - int first_use_; - - // Map of data to index in entries_ for shared entries. - SharedEntryMap shared_entries_; - - // Map of address of handle to index in entries_. We need to keep track of - // code targets separately from other shared entries, as they can be - // relocated. - SharedEntryMap handle_to_index_map_; - - // Values, pc offset(s) of entries. Use a vector to preserve the order of - // insertion, as the serializer expects code target RelocInfo to point to - // constant pool addresses in an ascending order. - std::vector > > entries_; -}; - // ----------------------------------------------------------------------------- // Assembler. @@ -312,15 +254,26 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // Read/Modify the code target address in the branch/call instruction at pc. // The isolate argument is unused (and may be nullptr) when skipping flushing. inline static Address target_address_at(Address pc, Address constant_pool); + + // Read/Modify the code target address in the branch/call instruction at pc. + inline static Tagged_t target_compressed_address_at(Address pc, + Address constant_pool); inline static void set_target_address_at( Address pc, Address constant_pool, Address target, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED); + inline static void set_target_compressed_address_at( + Address pc, Address constant_pool, Tagged_t target, + ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED); + // Returns the handle for the code object called at 'pc'. // This might need to be temporarily encoded as an offset into code_targets_. inline Handle code_target_object_handle_at(Address pc); - - inline Handle compressed_embedded_object_handle_at(Address pc); + inline EmbeddedObjectIndex embedded_object_index_referenced_from(Address pc); + inline void set_embedded_object_index_referenced_from( + Address p, EmbeddedObjectIndex index); + // Returns the handle for the heap object referenced at 'pc'. + inline Handle target_object_handle_at(Address pc); // Returns the target address for a runtime function for the call encoded // at 'pc'. @@ -371,16 +324,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { return SizeOfCodeGeneratedSince(label) / kInstrSize; } - // Prevent contant pool emission until EndBlockConstPool is called. - // Call to this function can be nested but must be followed by an equal - // number of calls to EndBlockConstpool. - void StartBlockConstPool(); - - // Resume constant pool emission. Need to be called as many time as - // StartBlockConstPool to have an effect. - void EndBlockConstPool(); - - bool is_const_pool_blocked() const; static bool IsConstantPoolAt(Instruction* instr); static int ConstantPoolSizeAt(Instruction* instr); // See Assembler::CheckConstPool for more info. @@ -399,16 +342,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { return veneer_pool_blocked_nesting_ > 0; } - // Block/resume emission of constant pools and veneer pools. - void StartBlockPools() { - StartBlockConstPool(); - StartBlockVeneerPool(); - } - void EndBlockPools() { - EndBlockConstPool(); - EndBlockVeneerPool(); - } - // Record a deoptimization reason that can be used by a log or cpu profiler. // Use --trace-deopt to enable. void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position, @@ -2120,8 +2053,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // Code generation helpers -------------------------------------------------- - bool IsConstPoolEmpty() const { return constpool_.IsEmpty(); } - Instruction* pc() const { return Instruction::Cast(pc_); } Instruction* InstructionAt(ptrdiff_t offset) const { @@ -2405,31 +2336,26 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // FP register type. inline static Instr FPType(VRegister fd); - // Class for scoping postponing the constant pool generation. - class BlockConstPoolScope { - public: - explicit BlockConstPoolScope(Assembler* assem) : assem_(assem) { - assem_->StartBlockConstPool(); - } - ~BlockConstPoolScope() { assem_->EndBlockConstPool(); } - - private: - Assembler* assem_; - - DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope); - }; - // Unused on this architecture. void MaybeEmitOutOfLineConstantPool() {} - // Check if is time to emit a constant pool. - void CheckConstPool(bool force_emit, bool require_jump); + void ForceConstantPoolEmissionWithoutJump() { + constpool_.Check(Emission::kForced, Jump::kOmitted); + } + void ForceConstantPoolEmissionWithJump() { + constpool_.Check(Emission::kForced, Jump::kRequired); + } + // Check if the const pool needs to be emitted while pretending that {margin} + // more bytes of instructions have already been emitted. + void EmitConstPoolWithJumpIfNeeded(size_t margin = 0) { + constpool_.Check(Emission::kIfNeeded, Jump::kRequired, margin); + } // Returns true if we should emit a veneer as soon as possible for a branch // which can at most reach to specified pc. bool ShouldEmitVeneer(int max_reachable_pc, - int margin = kVeneerDistanceMargin); - bool ShouldEmitVeneers(int margin = kVeneerDistanceMargin) { + size_t margin = kVeneerDistanceMargin); + bool ShouldEmitVeneers(size_t margin = kVeneerDistanceMargin) { return ShouldEmitVeneer(unresolved_branches_first_limit(), margin); } @@ -2443,23 +2369,34 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // If need_protection is true, the veneers are protected by a branch jumping // over the code. void EmitVeneers(bool force_emit, bool need_protection, - int margin = kVeneerDistanceMargin); + size_t margin = kVeneerDistanceMargin); void EmitVeneersGuard() { EmitPoolGuard(); } // Checks whether veneers need to be emitted at this point. // If force_emit is set, a veneer is generated for *all* unresolved branches. void CheckVeneerPool(bool force_emit, bool require_jump, - int margin = kVeneerDistanceMargin); + size_t margin = kVeneerDistanceMargin); + + using BlockConstPoolScope = ConstantPool::BlockScope; class BlockPoolsScope { public: - explicit BlockPoolsScope(Assembler* assem) : assem_(assem) { - assem_->StartBlockPools(); + // Block veneer and constant pool. Emits pools if necessary to ensure that + // {margin} more bytes can be emitted without triggering pool emission. + explicit BlockPoolsScope(Assembler* assem, size_t margin = 0) + : assem_(assem), block_const_pool_(assem, margin) { + assem_->CheckVeneerPool(false, true, margin); + assem_->StartBlockVeneerPool(); + } + + BlockPoolsScope(Assembler* assem, PoolEmissionCheck check) + : assem_(assem), block_const_pool_(assem, check) { + assem_->StartBlockVeneerPool(); } - ~BlockPoolsScope() { assem_->EndBlockPools(); } + ~BlockPoolsScope() { assem_->EndBlockVeneerPool(); } private: Assembler* assem_; - + BlockConstPoolScope block_const_pool_; DISALLOW_IMPLICIT_CONSTRUCTORS(BlockPoolsScope); }; @@ -2622,15 +2559,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // Verify that a label's link chain is intact. void CheckLabelLinkChain(Label const* label); - // Postpone the generation of the constant pool for the specified number of - // instructions. - void BlockConstPoolFor(int instructions); - - // Set how far from current pc the next constant pool check will be. - void SetNextConstPoolCheckIn(int instructions) { - next_constant_pool_check_ = pc_offset() + instructions * kInstrSize; - } - // Emit the instruction at pc_. void Emit(Instr instruction) { STATIC_ASSERT(sizeof(*pc_) == 1); @@ -2658,40 +2586,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { void CheckBufferSpace(); void CheckBuffer(); - // Pc offset of the next constant pool check. - int next_constant_pool_check_; - - // Constant pool generation - // Pools are emitted in the instruction stream. They are emitted when: - // * the distance to the first use is above a pre-defined distance or - // * the numbers of entries in the pool is above a pre-defined size or - // * code generation is finished - // If a pool needs to be emitted before code generation is finished a branch - // over the emitted pool will be inserted. - - // Constants in the pool may be addresses of functions that gets relocated; - // if so, a relocation info entry is associated to the constant pool entry. - - // Repeated checking whether the constant pool should be emitted is rather - // expensive. By default we only check again once a number of instructions - // has been generated. That also means that the sizing of the buffers is not - // an exact science, and that we rely on some slop to not overrun buffers. - static constexpr int kCheckConstPoolInterval = 128; - - // Distance to first use after a which a pool will be emitted. Pool entries - // are accessed with pc relative load therefore this cannot be more than - // 1 * MB. Since constant pool emission checks are interval based this value - // is an approximation. - static constexpr int kApproxMaxDistToConstPool = 64 * KB; - - // Number of pool entries after which a pool will be emitted. Since constant - // pool emission checks are interval based this value is an approximation. - static constexpr int kApproxMaxPoolEntryCount = 512; - - // Emission of the constant pool may be blocked in some code sequences. - int const_pool_blocked_nesting_; // Block emission if this is not zero. - int no_const_pool_before_; // Block emission before this pc offset. - // Emission of the veneer pools may be blocked in some code sequences. int veneer_pool_blocked_nesting_; // Block emission if this is not zero. @@ -2705,16 +2599,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // are already bound. std::deque internal_reference_positions_; - // Relocation info records are also used during code generation as temporary - // containers for constants and code target addresses until they are emitted - // to the constant pool. These pending relocation info records are temporarily - // stored in a separate buffer until a constant pool is emitted. - // If every instruction in a long sequence is accessing the pool, we need one - // pending relocation entry per instruction. - - // The pending constant pool. - ConstPool constpool_; - protected: // Code generation // The relocation writer's position is at least kGap bytes below the end of @@ -2727,17 +2611,18 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { public: #ifdef DEBUG // Functions used for testing. - int GetConstantPoolEntriesSizeForTesting() const { + size_t GetConstantPoolEntriesSizeForTesting() const { // Do not include branch over the pool. - return constpool_.EntryCount() * kSystemPointerSize; + return constpool_.Entry32Count() * kInt32Size + + constpool_.Entry64Count() * kInt64Size; } - static constexpr int GetCheckConstPoolIntervalForTesting() { - return kCheckConstPoolInterval; + static size_t GetCheckConstPoolIntervalForTesting() { + return ConstantPool::kCheckInterval; } - static constexpr int GetApproxMaxDistToConstPoolForTesting() { - return kApproxMaxDistToConstPool; + static size_t GetApproxMaxDistToConstPoolForTesting() { + return ConstantPool::kApproxDistToPool64; } #endif @@ -2779,7 +2664,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { DCHECK(!unresolved_branches_.empty()); return unresolved_branches_.begin()->first; } - // This is similar to next_constant_pool_check_ and helps reduce the overhead + // This PC-offset of the next veneer pool check helps reduce the overhead // of checking for veneer pools. // It is maintained to the closest unresolved branch limit minus the maximum // veneer margin (or kMaxInt if there are no unresolved branches). @@ -2804,8 +2689,11 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { int WriteCodeComments(); + // The pending constant pool. + ConstantPool constpool_; + friend class EnsureSpace; - friend class ConstPool; + friend class ConstantPool; }; class PatchingAssembler : public Assembler { @@ -2822,19 +2710,12 @@ class PatchingAssembler : public Assembler { PatchingAssembler(const AssemblerOptions& options, byte* start, unsigned count) : Assembler(options, - ExternalAssemblerBuffer(start, count * kInstrSize + kGap)) { - // Block constant pool emission. - StartBlockPools(); - } + ExternalAssemblerBuffer(start, count * kInstrSize + kGap)), + block_constant_pool_emission_scope(this) {} ~PatchingAssembler() { - // Const pool should still be blocked. - DCHECK(is_const_pool_blocked()); - EndBlockPools(); // Verify we have generated the number of instruction we expected. DCHECK_EQ(pc_offset() + kGap, buffer_->size()); - // Verify no relocation information has been emitted. - DCHECK(IsConstPoolEmpty()); } // See definition of PatchAdrFar() for details. @@ -2842,11 +2723,19 @@ class PatchingAssembler : public Assembler { static constexpr int kAdrFarPatchableNInstrs = kAdrFarPatchableNNops + 2; void PatchAdrFar(int64_t target_offset); void PatchSubSp(uint32_t immediate); + + private: + BlockPoolsScope block_constant_pool_emission_scope; }; class EnsureSpace { public: - explicit EnsureSpace(Assembler* assembler) { assembler->CheckBufferSpace(); } + explicit EnsureSpace(Assembler* assembler) : block_pools_scope_(assembler) { + assembler->CheckBufferSpace(); + } + + private: + Assembler::BlockPoolsScope block_pools_scope_; }; } // namespace internal diff --git a/deps/v8/src/codegen/arm64/constants-arm64.h b/deps/v8/src/codegen/arm64/constants-arm64.h index eb3fb3a6be36ee..a1e962452b7cb2 100644 --- a/deps/v8/src/codegen/arm64/constants-arm64.h +++ b/deps/v8/src/codegen/arm64/constants-arm64.h @@ -32,8 +32,8 @@ constexpr size_t kMaxPCRelativeCodeRangeInMB = 128; constexpr uint8_t kInstrSize = 4; constexpr uint8_t kInstrSizeLog2 = 2; -constexpr size_t kLoadLiteralScaleLog2 = 2; -constexpr size_t kMaxLoadLiteralRange = 1 * MB; +constexpr uint8_t kLoadLiteralScaleLog2 = 2; +constexpr int kMaxLoadLiteralRange = 1 * MB; const int kNumberOfRegisters = 32; const int kNumberOfVRegisters = 32; diff --git a/deps/v8/src/codegen/arm64/cpu-arm64.cc b/deps/v8/src/codegen/arm64/cpu-arm64.cc index e0ab5899141072..32bcc6f268ea10 100644 --- a/deps/v8/src/codegen/arm64/cpu-arm64.cc +++ b/deps/v8/src/codegen/arm64/cpu-arm64.cc @@ -15,7 +15,7 @@ namespace internal { class CacheLineSizes { public: CacheLineSizes() { -#if !defined(V8_HOST_ARCH_ARM64) || defined(V8_OS_WIN) +#if !defined(V8_HOST_ARCH_ARM64) || defined(V8_OS_WIN) || defined(__APPLE__) cache_type_register_ = 0; #else // Copy the content of the cache type register to a core register. diff --git a/deps/v8/src/codegen/arm64/decoder-arm64.h b/deps/v8/src/codegen/arm64/decoder-arm64.h index 3d113eb8366b5e..7621c516ce79bd 100644 --- a/deps/v8/src/codegen/arm64/decoder-arm64.h +++ b/deps/v8/src/codegen/arm64/decoder-arm64.h @@ -95,7 +95,7 @@ class V8_EXPORT_PRIVATE DecoderVisitor { }; // A visitor that dispatches to a list of visitors. -class DispatchingDecoderVisitor : public DecoderVisitor { +class V8_EXPORT_PRIVATE DispatchingDecoderVisitor : public DecoderVisitor { public: DispatchingDecoderVisitor() {} virtual ~DispatchingDecoderVisitor() {} diff --git a/deps/v8/src/codegen/arm64/instructions-arm64.h b/deps/v8/src/codegen/arm64/instructions-arm64.h index 5c3cf687e75ab3..a73c3feed74396 100644 --- a/deps/v8/src/codegen/arm64/instructions-arm64.h +++ b/deps/v8/src/codegen/arm64/instructions-arm64.h @@ -203,6 +203,7 @@ class Instruction { } bool IsLdrLiteralX() const { return Mask(LoadLiteralMask) == LDR_x_lit; } + bool IsLdrLiteralW() const { return Mask(LoadLiteralMask) == LDR_w_lit; } bool IsPCRelAddressing() const { return Mask(PCRelAddressingFMask) == PCRelAddressingFixed; diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc index aab9fc79a2c2c2..792a8637f698d3 100644 --- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc +++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc @@ -291,8 +291,7 @@ void TurboAssembler::Mov(const Register& rd, const Operand& operand, ExternalReference reference = bit_cast(addr); IndirectLoadExternalReference(rd, reference); return; - } else if (operand.ImmediateRMode() == - RelocInfo::FULL_EMBEDDED_OBJECT) { + } else if (RelocInfo::IsEmbeddedObjectMode(operand.ImmediateRMode())) { Handle x( reinterpret_cast(operand.ImmediateValue())); IndirectLoadConstant(rd, x); @@ -1866,7 +1865,9 @@ void TurboAssembler::Jump(Handle code, RelocInfo::Mode rmode, } if (CanUseNearCallOrJump(rmode)) { - JumpHelper(static_cast(AddCodeTarget(code)), rmode, cond); + EmbeddedObjectIndex index = AddEmbeddedObject(code); + DCHECK(is_int32(index)); + JumpHelper(static_cast(index), rmode, cond); } else { Jump(code.address(), rmode, cond); } @@ -1912,7 +1913,9 @@ void TurboAssembler::Call(Handle code, RelocInfo::Mode rmode) { } if (CanUseNearCallOrJump(rmode)) { - near_call(AddCodeTarget(code), rmode); + EmbeddedObjectIndex index = AddEmbeddedObject(code); + DCHECK(is_int32(index)); + near_call(static_cast(index), rmode); } else { IndirectCall(code.address(), rmode); } @@ -1925,24 +1928,27 @@ void TurboAssembler::Call(ExternalReference target) { Call(temp); } -void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) { +void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { STATIC_ASSERT(kSystemPointerSize == 8); STATIC_ASSERT(kSmiTagSize == 1); STATIC_ASSERT(kSmiTag == 0); - // The builtin_pointer register contains the builtin index as a Smi. + // The builtin_index register contains the builtin index as a Smi. // Untagging is folded into the indexing operand below. #if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) STATIC_ASSERT(kSmiShiftSize == 0); - Lsl(builtin_pointer, builtin_pointer, kSystemPointerSizeLog2 - kSmiShift); + Lsl(builtin_index, builtin_index, kSystemPointerSizeLog2 - kSmiShift); #else STATIC_ASSERT(kSmiShiftSize == 31); - Asr(builtin_pointer, builtin_pointer, kSmiShift - kSystemPointerSizeLog2); + Asr(builtin_index, builtin_index, kSmiShift - kSystemPointerSizeLog2); #endif - Add(builtin_pointer, builtin_pointer, - IsolateData::builtin_entry_table_offset()); - Ldr(builtin_pointer, MemOperand(kRootRegister, builtin_pointer)); - Call(builtin_pointer); + Add(builtin_index, builtin_index, IsolateData::builtin_entry_table_offset()); + Ldr(builtin_index, MemOperand(kRootRegister, builtin_index)); +} + +void TurboAssembler::CallBuiltinByIndex(Register builtin_index) { + LoadEntryFromBuiltinIndex(builtin_index); + Call(builtin_index); } void TurboAssembler::LoadCodeObjectEntry(Register destination, @@ -2723,7 +2729,7 @@ void TurboAssembler::DecompressAnyTagged(const Register& destination, const MemOperand& field_operand) { RecordComment("[ DecompressAnyTagged"); Ldrsw(destination, field_operand); - if (kUseBranchlessPtrDecompression) { + if (kUseBranchlessPtrDecompressionInGeneratedCode) { UseScratchRegisterScope temps(this); // Branchlessly compute |masked_root|: // masked_root = HAS_SMI_TAG(destination) ? 0 : kRootRegister; @@ -2747,7 +2753,7 @@ void TurboAssembler::DecompressAnyTagged(const Register& destination, void TurboAssembler::DecompressAnyTagged(const Register& destination, const Register& source) { RecordComment("[ DecompressAnyTagged"); - if (kUseBranchlessPtrDecompression) { + if (kUseBranchlessPtrDecompressionInGeneratedCode) { UseScratchRegisterScope temps(this); // Branchlessly compute |masked_root|: // masked_root = HAS_SMI_TAG(destination) ? 0 : kRootRegister; diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h index f217c3c586afa7..d4e9c3055b0989 100644 --- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h +++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h @@ -852,7 +852,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { // Generate an indirect call (for when a direct call's range is not adequate). void IndirectCall(Address target, RelocInfo::Mode rmode); - void CallBuiltinPointer(Register builtin_pointer) override; + // Load the builtin given by the Smi in |builtin_index| into the same + // register. + void LoadEntryFromBuiltinIndex(Register builtin_index); + void CallBuiltinByIndex(Register builtin_index) override; void LoadCodeObjectEntry(Register destination, Register code_object) override; void CallCodeObject(Register code_object) override; @@ -1920,17 +1923,15 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { class InstructionAccurateScope { public: explicit InstructionAccurateScope(TurboAssembler* tasm, size_t count = 0) - : tasm_(tasm) + : tasm_(tasm), + block_pool_(tasm, count * kInstrSize) #ifdef DEBUG , size_(count * kInstrSize) #endif { - // Before blocking the const pool, see if it needs to be emitted. - tasm_->CheckConstPool(false, true); - tasm_->CheckVeneerPool(false, true); - - tasm_->StartBlockPools(); + tasm_->CheckVeneerPool(false, true, count * kInstrSize); + tasm_->StartBlockVeneerPool(); #ifdef DEBUG if (count != 0) { tasm_->bind(&start_); @@ -1941,7 +1942,7 @@ class InstructionAccurateScope { } ~InstructionAccurateScope() { - tasm_->EndBlockPools(); + tasm_->EndBlockVeneerPool(); #ifdef DEBUG if (start_.is_bound()) { DCHECK(tasm_->SizeOfCodeGeneratedSince(&start_) == size_); @@ -1952,6 +1953,7 @@ class InstructionAccurateScope { private: TurboAssembler* tasm_; + TurboAssembler::BlockConstPoolScope block_pool_; #ifdef DEBUG size_t size_; Label start_; @@ -1979,7 +1981,7 @@ class UseScratchRegisterScope { DCHECK_EQ(availablefp_->type(), CPURegister::kVRegister); } - ~UseScratchRegisterScope(); + V8_EXPORT_PRIVATE ~UseScratchRegisterScope(); // Take a register from the appropriate temps list. It will be returned // automatically when the scope ends. @@ -1993,10 +1995,11 @@ class UseScratchRegisterScope { } Register AcquireSameSizeAs(const Register& reg); - VRegister AcquireSameSizeAs(const VRegister& reg); + V8_EXPORT_PRIVATE VRegister AcquireSameSizeAs(const VRegister& reg); private: - static CPURegister AcquireNextAvailable(CPURegList* available); + V8_EXPORT_PRIVATE static CPURegister AcquireNextAvailable( + CPURegList* available); // Available scratch registers. CPURegList* available_; // kRegister diff --git a/deps/v8/src/codegen/assembler.cc b/deps/v8/src/codegen/assembler.cc index 687ae98bfe92f8..498afb03206432 100644 --- a/deps/v8/src/codegen/assembler.cc +++ b/deps/v8/src/codegen/assembler.cc @@ -64,8 +64,8 @@ AssemblerOptions AssemblerOptions::Default( // might be run on real hardware. options.enable_simulator_code = !serializer; #endif - options.inline_offheap_trampolines = - FLAG_embedded_builtins && !serializer && !generating_embedded_builtin; + options.inline_offheap_trampolines &= + !serializer && !generating_embedded_builtin; #if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 const base::AddressRegion& code_range = isolate->heap()->memory_allocator()->code_range(); @@ -226,23 +226,33 @@ int AssemblerBase::AddCodeTarget(Handle target) { } } -int AssemblerBase::AddCompressedEmbeddedObject(Handle object) { - int current = static_cast(compressed_embedded_objects_.size()); - compressed_embedded_objects_.push_back(object); - return current; +Handle AssemblerBase::GetCodeTarget(intptr_t code_target_index) const { + DCHECK_LT(static_cast(code_target_index), code_targets_.size()); + return code_targets_[code_target_index]; } -Handle AssemblerBase::GetCompressedEmbeddedObject( - intptr_t index) const { - DCHECK_LT(static_cast(index), compressed_embedded_objects_.size()); - return compressed_embedded_objects_[index]; +AssemblerBase::EmbeddedObjectIndex AssemblerBase::AddEmbeddedObject( + Handle object) { + EmbeddedObjectIndex current = embedded_objects_.size(); + // Do not deduplicate invalid handles, they are to heap object requests. + if (!object.is_null()) { + auto entry = embedded_objects_map_.find(object); + if (entry != embedded_objects_map_.end()) { + return entry->second; + } + embedded_objects_map_[object] = current; + } + embedded_objects_.push_back(object); + return current; } -Handle AssemblerBase::GetCodeTarget(intptr_t code_target_index) const { - DCHECK_LT(static_cast(code_target_index), code_targets_.size()); - return code_targets_[code_target_index]; +Handle AssemblerBase::GetEmbeddedObject( + EmbeddedObjectIndex index) const { + DCHECK_LT(index, embedded_objects_.size()); + return embedded_objects_[index]; } + int Assembler::WriteCodeComments() { if (!FLAG_code_comments || code_comments_writer_.entry_count() == 0) return 0; int offset = pc_offset(); diff --git a/deps/v8/src/codegen/assembler.h b/deps/v8/src/codegen/assembler.h index eae5d53a4fca5a..98639583d8119f 100644 --- a/deps/v8/src/codegen/assembler.h +++ b/deps/v8/src/codegen/assembler.h @@ -36,7 +36,9 @@ #define V8_CODEGEN_ASSEMBLER_H_ #include +#include +#include "src/base/memory.h" #include "src/codegen/code-comments.h" #include "src/codegen/cpu-features.h" #include "src/codegen/external-reference.h" @@ -55,6 +57,10 @@ class ApiFunction; namespace internal { +using base::Memory; +using base::ReadUnalignedValue; +using base::WriteUnalignedValue; + // Forward declarations. class EmbeddedData; class InstructionStream; @@ -155,7 +161,7 @@ struct V8_EXPORT_PRIVATE AssemblerOptions { bool isolate_independent_code = false; // Enables the use of isolate-independent builtins through an off-heap // trampoline. (macro assembler feature). - bool inline_offheap_trampolines = false; + bool inline_offheap_trampolines = FLAG_embedded_builtins; // On some platforms, all code is within a given range in the process, // and the start of this range is configured here. Address code_range_start = 0; @@ -272,8 +278,11 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced { int AddCodeTarget(Handle target); Handle GetCodeTarget(intptr_t code_target_index) const; - int AddCompressedEmbeddedObject(Handle object); - Handle GetCompressedEmbeddedObject(intptr_t index) const; + // Add 'object' to the {embedded_objects_} vector and return the index at + // which it is stored. + using EmbeddedObjectIndex = size_t; + EmbeddedObjectIndex AddEmbeddedObject(Handle object); + Handle GetEmbeddedObject(EmbeddedObjectIndex index) const; // The buffer into which code and relocation info are generated. std::unique_ptr buffer_; @@ -321,12 +330,18 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced { // the code handle in the vector instead. std::vector> code_targets_; - // When pointer compression is enabled, we need to store indexes to this - // table in the code until we are ready to copy the code and embed the real - // object pointers. We don't need to do the same thing for non-compressed - // embedded objects, because we've got enough space (kPointerSize) in the - // code stream to just embed the address of the object handle. - std::vector> compressed_embedded_objects_; + // If an assembler needs a small number to refer to a heap object handle + // (for example, because there are only 32bit available on a 64bit arch), the + // assembler adds the object into this vector using AddEmbeddedObject, and + // may then refer to the heap object using the handle's index in this vector. + std::vector> embedded_objects_; + + // Embedded objects are deduplicated based on handle location. This is a + // compromise that is almost as effective as deduplication based on actual + // heap object addresses maintains GC safety. + std::unordered_map, EmbeddedObjectIndex, + Handle::hash, Handle::equal_to> + embedded_objects_map_; const AssemblerOptions options_; uint64_t enabled_cpu_features_; diff --git a/deps/v8/src/codegen/code-stub-assembler.cc b/deps/v8/src/codegen/code-stub-assembler.cc index d967d84874d848..e4f35ddcc88472 100644 --- a/deps/v8/src/codegen/code-stub-assembler.cc +++ b/deps/v8/src/codegen/code-stub-assembler.cc @@ -63,57 +63,27 @@ void CodeStubAssembler::HandleBreakOnNode() { void CodeStubAssembler::Assert(const BranchGenerator& branch, const char* message, const char* file, int line, - Node* extra_node1, const char* extra_node1_name, - Node* extra_node2, const char* extra_node2_name, - Node* extra_node3, const char* extra_node3_name, - Node* extra_node4, const char* extra_node4_name, - Node* extra_node5, - const char* extra_node5_name) { + std::initializer_list extra_nodes) { #if defined(DEBUG) if (FLAG_debug_code) { - Check(branch, message, file, line, extra_node1, extra_node1_name, - extra_node2, extra_node2_name, extra_node3, extra_node3_name, - extra_node4, extra_node4_name, extra_node5, extra_node5_name); + Check(branch, message, file, line, extra_nodes); } #endif } void CodeStubAssembler::Assert(const NodeGenerator& condition_body, const char* message, const char* file, int line, - Node* extra_node1, const char* extra_node1_name, - Node* extra_node2, const char* extra_node2_name, - Node* extra_node3, const char* extra_node3_name, - Node* extra_node4, const char* extra_node4_name, - Node* extra_node5, - const char* extra_node5_name) { + std::initializer_list extra_nodes) { #if defined(DEBUG) if (FLAG_debug_code) { - Check(condition_body, message, file, line, extra_node1, extra_node1_name, - extra_node2, extra_node2_name, extra_node3, extra_node3_name, - extra_node4, extra_node4_name, extra_node5, extra_node5_name); + Check(condition_body, message, file, line, extra_nodes); } #endif } -#ifdef DEBUG -namespace { -void MaybePrintNodeWithName(CodeStubAssembler* csa, Node* node, - const char* node_name) { - if (node != nullptr) { - csa->CallRuntime(Runtime::kPrintWithNameForAssert, csa->SmiConstant(0), - csa->StringConstant(node_name), node); - } -} -} // namespace -#endif - void CodeStubAssembler::Check(const BranchGenerator& branch, const char* message, const char* file, int line, - Node* extra_node1, const char* extra_node1_name, - Node* extra_node2, const char* extra_node2_name, - Node* extra_node3, const char* extra_node3_name, - Node* extra_node4, const char* extra_node4_name, - Node* extra_node5, const char* extra_node5_name) { + std::initializer_list extra_nodes) { Label ok(this); Label not_ok(this, Label::kDeferred); if (message != nullptr && FLAG_code_comments) { @@ -124,9 +94,7 @@ void CodeStubAssembler::Check(const BranchGenerator& branch, branch(&ok, ¬_ok); BIND(¬_ok); - FailAssert(message, file, line, extra_node1, extra_node1_name, extra_node2, - extra_node2_name, extra_node3, extra_node3_name, extra_node4, - extra_node4_name, extra_node5, extra_node5_name); + FailAssert(message, file, line, extra_nodes); BIND(&ok); Comment("] Assert"); @@ -134,20 +102,14 @@ void CodeStubAssembler::Check(const BranchGenerator& branch, void CodeStubAssembler::Check(const NodeGenerator& condition_body, const char* message, const char* file, int line, - Node* extra_node1, const char* extra_node1_name, - Node* extra_node2, const char* extra_node2_name, - Node* extra_node3, const char* extra_node3_name, - Node* extra_node4, const char* extra_node4_name, - Node* extra_node5, const char* extra_node5_name) { + std::initializer_list extra_nodes) { BranchGenerator branch = [=](Label* ok, Label* not_ok) { Node* condition = condition_body(); DCHECK_NOT_NULL(condition); Branch(condition, ok, not_ok); }; - Check(branch, message, file, line, extra_node1, extra_node1_name, extra_node2, - extra_node2_name, extra_node3, extra_node3_name, extra_node4, - extra_node4_name, extra_node5, extra_node5_name); + Check(branch, message, file, line, extra_nodes); } void CodeStubAssembler::FastCheck(TNode condition) { @@ -162,31 +124,25 @@ void CodeStubAssembler::FastCheck(TNode condition) { } void CodeStubAssembler::FailAssert( - const char* message, const char* file, int line, Node* extra_node1, - const char* extra_node1_name, Node* extra_node2, - const char* extra_node2_name, Node* extra_node3, - const char* extra_node3_name, Node* extra_node4, - const char* extra_node4_name, Node* extra_node5, - const char* extra_node5_name) { + const char* message, const char* file, int line, + std::initializer_list extra_nodes) { DCHECK_NOT_NULL(message); EmbeddedVector chars; if (file != nullptr) { - SNPrintF(chars, "CSA_ASSERT failed: %s [%s:%d]\n", message, file, line); - } else { - SNPrintF(chars, "CSA_ASSERT failed: %s\n", message); + SNPrintF(chars, "%s [%s:%d]", message, file, line); + message = chars.begin(); } - Node* message_node = StringConstant(chars.begin()); + Node* message_node = StringConstant(message); #ifdef DEBUG // Only print the extra nodes in debug builds. - MaybePrintNodeWithName(this, extra_node1, extra_node1_name); - MaybePrintNodeWithName(this, extra_node2, extra_node2_name); - MaybePrintNodeWithName(this, extra_node3, extra_node3_name); - MaybePrintNodeWithName(this, extra_node4, extra_node4_name); - MaybePrintNodeWithName(this, extra_node5, extra_node5_name); + for (auto& node : extra_nodes) { + CallRuntime(Runtime::kPrintWithNameForAssert, SmiConstant(0), + StringConstant(node.second), node.first); + } #endif - DebugAbort(message_node); + AbortCSAAssert(message_node); Unreachable(); } @@ -567,7 +523,7 @@ TNode CodeStubAssembler::Float64Trunc(SloppyTNode x) { TNode CodeStubAssembler::IsValidSmi(TNode smi) { if (SmiValuesAre31Bits() && kSystemPointerSize == kInt64Size) { // Check that the Smi value is properly sign-extended. - TNode value = Signed(BitcastTaggedToWord(smi)); + TNode value = Signed(BitcastTaggedSignedToWord(smi)); return WordEqual(value, ChangeInt32ToIntPtr(TruncateIntPtrToInt32(value))); } return Int32TrueConstant(); @@ -611,7 +567,8 @@ TNode CodeStubAssembler::SmiUntag(SloppyTNode value) { if (ToIntPtrConstant(value, constant_value)) { return IntPtrConstant(constant_value >> (kSmiShiftSize + kSmiTagSize)); } - return Signed(WordSar(BitcastTaggedToWord(value), SmiShiftBitsConstant())); + return Signed( + WordSar(BitcastTaggedSignedToWord(value), SmiShiftBitsConstant())); } TNode CodeStubAssembler::SmiToInt32(SloppyTNode value) { @@ -660,13 +617,14 @@ TNode CodeStubAssembler::TryInt32Mul(TNode a, TNode b, TNode CodeStubAssembler::TrySmiAdd(TNode lhs, TNode rhs, Label* if_overflow) { if (SmiValuesAre32Bits()) { - return BitcastWordToTaggedSigned(TryIntPtrAdd( - BitcastTaggedToWord(lhs), BitcastTaggedToWord(rhs), if_overflow)); + return BitcastWordToTaggedSigned( + TryIntPtrAdd(BitcastTaggedSignedToWord(lhs), + BitcastTaggedSignedToWord(rhs), if_overflow)); } else { DCHECK(SmiValuesAre31Bits()); - TNode> pair = - Int32AddWithOverflow(TruncateIntPtrToInt32(BitcastTaggedToWord(lhs)), - TruncateIntPtrToInt32(BitcastTaggedToWord(rhs))); + TNode> pair = Int32AddWithOverflow( + TruncateIntPtrToInt32(BitcastTaggedSignedToWord(lhs)), + TruncateIntPtrToInt32(BitcastTaggedSignedToWord(rhs))); TNode overflow = Projection<1>(pair); GotoIf(overflow, if_overflow); TNode result = Projection<0>(pair); @@ -678,16 +636,16 @@ TNode CodeStubAssembler::TrySmiSub(TNode lhs, TNode rhs, Label* if_overflow) { if (SmiValuesAre32Bits()) { TNode> pair = IntPtrSubWithOverflow( - BitcastTaggedToWord(lhs), BitcastTaggedToWord(rhs)); + BitcastTaggedSignedToWord(lhs), BitcastTaggedSignedToWord(rhs)); TNode overflow = Projection<1>(pair); GotoIf(overflow, if_overflow); TNode result = Projection<0>(pair); return BitcastWordToTaggedSigned(result); } else { DCHECK(SmiValuesAre31Bits()); - TNode> pair = - Int32SubWithOverflow(TruncateIntPtrToInt32(BitcastTaggedToWord(lhs)), - TruncateIntPtrToInt32(BitcastTaggedToWord(rhs))); + TNode> pair = Int32SubWithOverflow( + TruncateIntPtrToInt32(BitcastTaggedSignedToWord(lhs)), + TruncateIntPtrToInt32(BitcastTaggedSignedToWord(rhs))); TNode overflow = Projection<1>(pair); GotoIf(overflow, if_overflow); TNode result = Projection<0>(pair); @@ -933,7 +891,7 @@ TNode CodeStubAssembler::TrySmiDiv(TNode dividend, TNode divisor, BIND(&divisor_is_not_minus_one); TNode untagged_result = Int32Div(untagged_dividend, untagged_divisor); - TNode truncated = Signed(Int32Mul(untagged_result, untagged_divisor)); + TNode truncated = Int32Mul(untagged_result, untagged_divisor); // Do floating point division if the remainder is not 0. GotoIf(Word32NotEqual(untagged_dividend, truncated), bailout); @@ -973,9 +931,12 @@ TNode CodeStubAssembler::TaggedIsSmi(TNode a) { } TNode CodeStubAssembler::TaggedIsNotSmi(SloppyTNode a) { - return WordNotEqual( - WordAnd(BitcastTaggedToWord(a), IntPtrConstant(kSmiTagMask)), - IntPtrConstant(0)); + // Although BitcastTaggedSignedToWord is generally unsafe on HeapObjects, we + // can nonetheless use it to inspect the Smi tag. The assumption here is that + // the GC will not exchange Smis for HeapObjects or vice-versa. + TNode a_bitcast = BitcastTaggedSignedToWord(UncheckedCast(a)); + return WordNotEqual(WordAnd(a_bitcast, IntPtrConstant(kSmiTagMask)), + IntPtrConstant(0)); } TNode CodeStubAssembler::TaggedIsPositiveSmi(SloppyTNode a) { @@ -1031,7 +992,7 @@ void CodeStubAssembler::BranchIfPrototypesHaveNoElements( TNode prototype_instance_type = LoadMapInstanceType(prototype_map); // Pessimistically assume elements if a Proxy, Special API Object, - // or JSValue wrapper is found on the prototype chain. After this + // or JSPrimitiveWrapper wrapper is found on the prototype chain. After this // instance type check, it's not necessary to check for interceptors or // access checks. Label if_custom(this, Label::kDeferred), if_notcustom(this); @@ -1040,11 +1001,12 @@ void CodeStubAssembler::BranchIfPrototypesHaveNoElements( BIND(&if_custom); { - // For string JSValue wrappers we still support the checks as long - // as they wrap the empty string. - GotoIfNot(InstanceTypeEqual(prototype_instance_type, JS_VALUE_TYPE), - possibly_elements); - Node* prototype_value = LoadJSValueValue(prototype); + // For string JSPrimitiveWrapper wrappers we still support the checks as + // long as they wrap the empty string. + GotoIfNot( + InstanceTypeEqual(prototype_instance_type, JS_PRIMITIVE_WRAPPER_TYPE), + possibly_elements); + Node* prototype_value = LoadJSPrimitiveWrapperValue(prototype); Branch(IsEmptyString(prototype_value), &if_notcustom, possibly_elements); } @@ -1121,20 +1083,23 @@ TNode CodeStubAssembler::AllocateRaw(TNode size_in_bytes, Label runtime_call(this, Label::kDeferred), no_runtime_call(this), out(this); bool needs_double_alignment = flags & kDoubleAlignment; + bool allow_large_object_allocation = flags & kAllowLargeObjectAllocation; - if (flags & kAllowLargeObjectAllocation) { + if (allow_large_object_allocation) { Label next(this); GotoIf(IsRegularHeapObjectSize(size_in_bytes), &next); + TNode runtime_flags = SmiConstant(Smi::FromInt( + AllocateDoubleAlignFlag::encode(needs_double_alignment) | + AllowLargeObjectAllocationFlag::encode(allow_large_object_allocation))); if (FLAG_young_generation_large_objects) { - result = CallRuntime(Runtime::kAllocateInYoungGeneration, - NoContextConstant(), SmiTag(size_in_bytes)); + result = + CallRuntime(Runtime::kAllocateInYoungGeneration, NoContextConstant(), + SmiTag(size_in_bytes), runtime_flags); } else { - TNode alignment_flag = SmiConstant(Smi::FromInt( - AllocateDoubleAlignFlag::encode(needs_double_alignment))); result = CallRuntime(Runtime::kAllocateInOldGeneration, NoContextConstant(), - SmiTag(size_in_bytes), alignment_flag); + SmiTag(size_in_bytes), runtime_flags); } Goto(&out); @@ -1161,15 +1126,17 @@ TNode CodeStubAssembler::AllocateRaw(TNode size_in_bytes, BIND(&runtime_call); { + TNode runtime_flags = SmiConstant(Smi::FromInt( + AllocateDoubleAlignFlag::encode(needs_double_alignment) | + AllowLargeObjectAllocationFlag::encode(allow_large_object_allocation))); if (flags & kPretenured) { - TNode runtime_flags = SmiConstant(Smi::FromInt( - AllocateDoubleAlignFlag::encode(needs_double_alignment))); result = CallRuntime(Runtime::kAllocateInOldGeneration, NoContextConstant(), SmiTag(size_in_bytes), runtime_flags); } else { - result = CallRuntime(Runtime::kAllocateInYoungGeneration, - NoContextConstant(), SmiTag(size_in_bytes)); + result = + CallRuntime(Runtime::kAllocateInYoungGeneration, NoContextConstant(), + SmiTag(size_in_bytes), runtime_flags); } Goto(&out); } @@ -1394,14 +1361,15 @@ Node* CodeStubAssembler::LoadBufferObject(Node* buffer, int offset, Node* CodeStubAssembler::LoadObjectField(SloppyTNode object, int offset, MachineType type) { CSA_ASSERT(this, IsStrong(object)); - return Load(type, object, IntPtrConstant(offset - kHeapObjectTag)); + return LoadFromObject(type, object, IntPtrConstant(offset - kHeapObjectTag)); } Node* CodeStubAssembler::LoadObjectField(SloppyTNode object, SloppyTNode offset, MachineType type) { CSA_ASSERT(this, IsStrong(object)); - return Load(type, object, IntPtrSub(offset, IntPtrConstant(kHeapObjectTag))); + return LoadFromObject(type, object, + IntPtrSub(offset, IntPtrConstant(kHeapObjectTag))); } TNode CodeStubAssembler::LoadAndUntagObjectField( @@ -1469,12 +1437,18 @@ TNode CodeStubAssembler::LoadHeapNumberValue( object, HeapNumber::kValueOffset, MachineType::Float64())); } +TNode CodeStubAssembler::GetStructMap(InstanceType instance_type) { + Handle map_handle(Map::GetStructMap(isolate(), instance_type), + isolate()); + return HeapConstant(map_handle); +} + TNode CodeStubAssembler::LoadMap(SloppyTNode object) { return UncheckedCast(LoadObjectField(object, HeapObject::kMapOffset, MachineType::TaggedPointer())); } -TNode CodeStubAssembler::LoadInstanceType( +TNode CodeStubAssembler::LoadInstanceType( SloppyTNode object) { return LoadMapInstanceType(LoadMap(object)); } @@ -1591,8 +1565,8 @@ TNode CodeStubAssembler::LoadMapBitField3(SloppyTNode map) { LoadObjectField(map, Map::kBitField3Offset, MachineType::Uint32())); } -TNode CodeStubAssembler::LoadMapInstanceType(SloppyTNode map) { - return UncheckedCast( +TNode CodeStubAssembler::LoadMapInstanceType(SloppyTNode map) { + return UncheckedCast( LoadObjectField(map, Map::kInstanceTypeOffset, MachineType::Uint16())); } @@ -1700,12 +1674,10 @@ TNode CodeStubAssembler::LoadMapBackPointer(SloppyTNode map) { TNode CodeStubAssembler::EnsureOnlyHasSimpleProperties( TNode map, TNode instance_type, Label* bailout) { - // This check can have false positives, since it applies to any JSValueType. + // This check can have false positives, since it applies to any + // JSPrimitiveWrapper type. GotoIf(IsCustomElementsReceiverInstanceType(instance_type), bailout); - GotoIf(IsSetWord32(LoadMapBitField2(map), Map::HasHiddenPrototypeBit::kMask), - bailout); - TNode bit_field3 = LoadMapBitField3(map); GotoIf(IsSetWord32(bit_field3, Map::IsDictionaryMapBit::kMask), bailout); @@ -1810,9 +1782,9 @@ Node* CodeStubAssembler::PointerToSeqStringData(Node* seq_string) { IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag)); } -Node* CodeStubAssembler::LoadJSValueValue(Node* object) { - CSA_ASSERT(this, IsJSValue(object)); - return LoadObjectField(object, JSValue::kValueOffset); +Node* CodeStubAssembler::LoadJSPrimitiveWrapperValue(Node* object) { + CSA_ASSERT(this, IsJSPrimitiveWrapper(object)); + return LoadObjectField(object, JSPrimitiveWrapper::kValueOffset); } void CodeStubAssembler::DispatchMaybeObject(TNode maybe_object, @@ -1941,11 +1913,13 @@ TNode CodeStubAssembler::LoadArrayLength( return LoadAndUntagWeakFixedArrayLength(array); } -template -TNode CodeStubAssembler::LoadArrayElement( - TNode array, int array_header_size, Node* index_node, - int additional_offset, ParameterMode parameter_mode, - LoadSensitivity needs_poisoning) { +template +TNode CodeStubAssembler::LoadArrayElement(TNode array, + int array_header_size, + Node* index_node, + int additional_offset, + ParameterMode parameter_mode, + LoadSensitivity needs_poisoning) { CSA_ASSERT(this, IntPtrGreaterThanOrEqual( ParameterToIntPtr(index_node, parameter_mode), IntPtrConstant(0))); @@ -1955,8 +1929,13 @@ TNode CodeStubAssembler::LoadArrayElement( parameter_mode, header_size); CSA_ASSERT(this, IsOffsetInBounds(offset, LoadArrayLength(array), array_header_size)); - return UncheckedCast( - Load(MachineType::AnyTagged(), array, offset, needs_poisoning)); + constexpr MachineType machine_type = MachineTypeOf::value; + // TODO(gsps): Remove the Load case once LoadFromObject supports poisoning + if (needs_poisoning == LoadSensitivity::kSafe) { + return UncheckedCast(LoadFromObject(machine_type, array, offset)); + } else { + return UncheckedCast(Load(machine_type, array, offset, needs_poisoning)); + } } template TNode @@ -2046,8 +2025,8 @@ TNode CodeStubAssembler::LoadJSTypedArrayBackingStore( IntPtrAdd(external_pointer, BitcastTaggedToWord(base_pointer))); } -Node* CodeStubAssembler::LoadFixedBigInt64ArrayElementAsTagged( - Node* data_pointer, Node* offset) { +TNode CodeStubAssembler::LoadFixedBigInt64ArrayElementAsTagged( + SloppyTNode data_pointer, SloppyTNode offset) { if (Is64()) { TNode value = UncheckedCast( Load(MachineType::IntPtr(), data_pointer, offset)); @@ -2059,13 +2038,15 @@ Node* CodeStubAssembler::LoadFixedBigInt64ArrayElementAsTagged( Load(MachineType::UintPtr(), data_pointer, offset)); TNode low = UncheckedCast( Load(MachineType::UintPtr(), data_pointer, - Int32Add(offset, Int32Constant(kSystemPointerSize)))); + Int32Add(TruncateIntPtrToInt32(offset), + Int32Constant(kSystemPointerSize)))); #else TNode low = UncheckedCast( Load(MachineType::UintPtr(), data_pointer, offset)); TNode high = UncheckedCast( Load(MachineType::UintPtr(), data_pointer, - Int32Add(offset, Int32Constant(kSystemPointerSize)))); + Int32Add(TruncateIntPtrToInt32(offset), + Int32Constant(kSystemPointerSize)))); #endif return BigIntFromInt32Pair(low, high); } @@ -2176,8 +2157,9 @@ TNode CodeStubAssembler::BigIntFromInt64(TNode value) { return var_result.value(); } -Node* CodeStubAssembler::LoadFixedBigUint64ArrayElementAsTagged( - Node* data_pointer, Node* offset) { +compiler::TNode +CodeStubAssembler::LoadFixedBigUint64ArrayElementAsTagged( + SloppyTNode data_pointer, SloppyTNode offset) { Label if_zero(this), done(this); if (Is64()) { TNode value = UncheckedCast( @@ -2190,13 +2172,15 @@ Node* CodeStubAssembler::LoadFixedBigUint64ArrayElementAsTagged( Load(MachineType::UintPtr(), data_pointer, offset)); TNode low = UncheckedCast( Load(MachineType::UintPtr(), data_pointer, - Int32Add(offset, Int32Constant(kSystemPointerSize)))); + Int32Add(TruncateIntPtrToInt32(offset), + Int32Constant(kSystemPointerSize)))); #else TNode low = UncheckedCast( Load(MachineType::UintPtr(), data_pointer, offset)); TNode high = UncheckedCast( Load(MachineType::UintPtr(), data_pointer, - Int32Add(offset, Int32Constant(kSystemPointerSize)))); + Int32Add(TruncateIntPtrToInt32(offset), + Int32Constant(kSystemPointerSize)))); #endif return BigIntFromUint32Pair(low, high); } @@ -2244,10 +2228,10 @@ TNode CodeStubAssembler::BigIntFromUint64(TNode value) { return var_result.value(); } -Node* CodeStubAssembler::LoadFixedTypedArrayElementAsTagged( - Node* data_pointer, Node* index_node, ElementsKind elements_kind, +TNode CodeStubAssembler::LoadFixedTypedArrayElementAsTagged( + TNode data_pointer, Node* index_node, ElementsKind elements_kind, ParameterMode parameter_mode) { - Node* offset = + TNode offset = ElementOffsetFromIndex(index_node, elements_kind, parameter_mode, 0); switch (elements_kind) { case UINT8_ELEMENTS: /* fall through */ @@ -2281,7 +2265,8 @@ Node* CodeStubAssembler::LoadFixedTypedArrayElementAsTagged( } TNode CodeStubAssembler::LoadFixedTypedArrayElementAsTagged( - TNode data_pointer, TNode index, TNode elements_kind) { + TNode data_pointer, TNode index, + TNode elements_kind) { TVARIABLE(Numeric, var_result); Label done(this), if_unknown_type(this, Label::kDeferred); int32_t elements_kinds[] = { @@ -2307,12 +2292,12 @@ TNode CodeStubAssembler::LoadFixedTypedArrayElementAsTagged( BIND(&if_unknown_type); Unreachable(); -#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \ - BIND(&if_##type##array); \ - { \ - var_result = CAST(LoadFixedTypedArrayElementAsTagged( \ - data_pointer, index, TYPE##_ELEMENTS, SMI_PARAMETERS)); \ - Goto(&done); \ +#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \ + BIND(&if_##type##array); \ + { \ + var_result = LoadFixedTypedArrayElementAsTagged( \ + data_pointer, index, TYPE##_ELEMENTS, SMI_PARAMETERS); \ + Goto(&done); \ } TYPED_ARRAYS(TYPED_ARRAY_CASE) #undef TYPED_ARRAY_CASE @@ -2323,8 +2308,7 @@ TNode CodeStubAssembler::LoadFixedTypedArrayElementAsTagged( void CodeStubAssembler::StoreJSTypedArrayElementFromTagged( TNode context, TNode typed_array, - TNode index_node, TNode value, ElementsKind elements_kind, - ParameterMode parameter_mode) { + TNode index_node, TNode value, ElementsKind elements_kind) { TNode data_pointer = LoadJSTypedArrayBackingStore(typed_array); switch (elements_kind) { case UINT8_ELEMENTS: @@ -2333,26 +2317,26 @@ void CodeStubAssembler::StoreJSTypedArrayElementFromTagged( case UINT16_ELEMENTS: case INT16_ELEMENTS: StoreElement(data_pointer, elements_kind, index_node, - SmiToInt32(CAST(value)), parameter_mode); + SmiToInt32(CAST(value)), SMI_PARAMETERS); break; case UINT32_ELEMENTS: case INT32_ELEMENTS: StoreElement(data_pointer, elements_kind, index_node, - TruncateTaggedToWord32(context, value), parameter_mode); + TruncateTaggedToWord32(context, value), SMI_PARAMETERS); break; case FLOAT32_ELEMENTS: StoreElement(data_pointer, elements_kind, index_node, TruncateFloat64ToFloat32(LoadHeapNumberValue(CAST(value))), - parameter_mode); + SMI_PARAMETERS); break; case FLOAT64_ELEMENTS: StoreElement(data_pointer, elements_kind, index_node, - LoadHeapNumberValue(CAST(value)), parameter_mode); + LoadHeapNumberValue(CAST(value)), SMI_PARAMETERS); break; case BIGUINT64_ELEMENTS: case BIGINT64_ELEMENTS: StoreElement(data_pointer, elements_kind, index_node, - UncheckedCast(value), parameter_mode); + UncheckedCast(value), SMI_PARAMETERS); break; default: UNREACHABLE(); @@ -2638,6 +2622,11 @@ TNode CodeStubAssembler::IsGeneratorFunction( Int32Constant(FunctionKind::kConciseGeneratorMethod)))); } +TNode CodeStubAssembler::HasPrototypeSlot(TNode function) { + return TNode::UncheckedCast(IsSetWord32( + LoadMapBitField(LoadMap(function)))); +} + TNode CodeStubAssembler::HasPrototypeProperty(TNode function, TNode map) { // (has_prototype_slot() && IsConstructor()) || @@ -2925,15 +2914,12 @@ TNode CodeStubAssembler::EnsureArrayPushable(TNode map, // Disallow pushing onto prototypes. It might be the JSArray prototype. // Disallow pushing onto non-extensible objects. Comment("Disallow pushing onto prototypes"); - Node* bit_field2 = LoadMapBitField2(map); - int mask = Map::IsPrototypeMapBit::kMask | Map::IsExtensibleBit::kMask; - Node* test = Word32And(bit_field2, Int32Constant(mask)); - GotoIf(Word32NotEqual(test, Int32Constant(Map::IsExtensibleBit::kMask)), - bailout); + GotoIfNot(IsExtensibleNonPrototypeMap(map), bailout); EnsureArrayLengthWritable(map, bailout); - TNode kind = DecodeWord32(bit_field2); + TNode kind = + DecodeWord32(LoadMapBitField2(map)); return Signed(kind); } @@ -3022,7 +3008,7 @@ void CodeStubAssembler::TryStoreArrayElement(ElementsKind kind, GotoIfNotNumber(value, bailout); } if (IsDoubleElementsKind(kind)) { - value = ChangeNumberToFloat64(value); + value = ChangeNumberToFloat64(CAST(value)); } StoreElement(elements, kind, index, value, mode); } @@ -3131,14 +3117,10 @@ TNode CodeStubAssembler::AllocateBigInt(TNode length) { } TNode CodeStubAssembler::AllocateRawBigInt(TNode length) { - // This is currently used only for 64-bit wide BigInts. If more general - // applicability is required, a large-object check must be added. - CSA_ASSERT(this, UintPtrLessThan(length, IntPtrConstant(3))); - TNode size = IntPtrAdd(IntPtrConstant(BigInt::kHeaderSize), Signed(WordShl(length, kSystemPointerSizeLog2))); - Node* raw_result = Allocate(size, kNone); + Node* raw_result = Allocate(size, kAllowLargeObjectAllocation); StoreMapNoWriteBarrier(raw_result, RootIndex::kBigIntMap); if (FIELD_SIZE(BigInt::kOptionalPaddingOffset) != 0) { DCHECK_EQ(4, FIELD_SIZE(BigInt::kOptionalPaddingOffset)); @@ -3155,11 +3137,26 @@ void CodeStubAssembler::StoreBigIntBitfield(TNode bigint, MachineRepresentation::kWord32); } -void CodeStubAssembler::StoreBigIntDigit(TNode bigint, int digit_index, +void CodeStubAssembler::StoreBigIntDigit(TNode bigint, + intptr_t digit_index, TNode digit) { + CHECK_LE(0, digit_index); + CHECK_LT(digit_index, BigInt::kMaxLength); StoreObjectFieldNoWriteBarrier( - bigint, BigInt::kDigitsOffset + digit_index * kSystemPointerSize, digit, - UintPtrT::kMachineRepresentation); + bigint, + BigInt::kDigitsOffset + + static_cast(digit_index) * kSystemPointerSize, + digit, UintPtrT::kMachineRepresentation); +} + +void CodeStubAssembler::StoreBigIntDigit(TNode bigint, + TNode digit_index, + TNode digit) { + TNode offset = + IntPtrAdd(IntPtrConstant(BigInt::kDigitsOffset), + IntPtrMul(digit_index, IntPtrConstant(kSystemPointerSize))); + StoreObjectFieldNoWriteBarrier(bigint, offset, digit, + UintPtrT::kMachineRepresentation); } TNode CodeStubAssembler::LoadBigIntBitfield(TNode bigint) { @@ -3168,10 +3165,23 @@ TNode CodeStubAssembler::LoadBigIntBitfield(TNode bigint) { } TNode CodeStubAssembler::LoadBigIntDigit(TNode bigint, - int digit_index) { - return UncheckedCast(LoadObjectField( - bigint, BigInt::kDigitsOffset + digit_index * kSystemPointerSize, - MachineType::UintPtr())); + intptr_t digit_index) { + CHECK_LE(0, digit_index); + CHECK_LT(digit_index, BigInt::kMaxLength); + return UncheckedCast( + LoadObjectField(bigint, + BigInt::kDigitsOffset + + static_cast(digit_index) * kSystemPointerSize, + MachineType::UintPtr())); +} + +TNode CodeStubAssembler::LoadBigIntDigit(TNode bigint, + TNode digit_index) { + TNode offset = + IntPtrAdd(IntPtrConstant(BigInt::kDigitsOffset), + IntPtrMul(digit_index, IntPtrConstant(kSystemPointerSize))); + return UncheckedCast( + LoadObjectField(bigint, offset, MachineType::UintPtr())); } TNode CodeStubAssembler::AllocateByteArray(TNode length, @@ -3440,16 +3450,16 @@ TNode CodeStubAssembler::AllocateNameDictionary( } TNode CodeStubAssembler::AllocateNameDictionary( - TNode at_least_space_for) { + TNode at_least_space_for, AllocationFlags flags) { CSA_ASSERT(this, UintPtrLessThanOrEqual( at_least_space_for, IntPtrConstant(NameDictionary::kMaxCapacity))); TNode capacity = HashTableComputeCapacity(at_least_space_for); - return AllocateNameDictionaryWithCapacity(capacity); + return AllocateNameDictionaryWithCapacity(capacity, flags); } TNode CodeStubAssembler::AllocateNameDictionaryWithCapacity( - TNode capacity) { + TNode capacity, AllocationFlags flags) { CSA_ASSERT(this, WordIsPowerOfTwo(capacity)); CSA_ASSERT(this, IntPtrGreaterThan(capacity, IntPtrConstant(0))); TNode length = EntryToIndex(capacity); @@ -3457,39 +3467,51 @@ TNode CodeStubAssembler::AllocateNameDictionaryWithCapacity( TimesTaggedSize(length), IntPtrConstant(NameDictionary::kHeaderSize)); TNode result = - UncheckedCast(AllocateInNewSpace(store_size)); - Comment("Initialize NameDictionary"); + UncheckedCast(Allocate(store_size, flags)); + // Initialize FixedArray fields. - DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kNameDictionaryMap)); - StoreMapNoWriteBarrier(result, RootIndex::kNameDictionaryMap); - StoreObjectFieldNoWriteBarrier(result, FixedArray::kLengthOffset, - SmiFromIntPtr(length)); + { + DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kNameDictionaryMap)); + StoreMapNoWriteBarrier(result, RootIndex::kNameDictionaryMap); + StoreObjectFieldNoWriteBarrier(result, FixedArray::kLengthOffset, + SmiFromIntPtr(length)); + } + // Initialized HashTable fields. - TNode zero = SmiConstant(0); - StoreFixedArrayElement(result, NameDictionary::kNumberOfElementsIndex, zero, - SKIP_WRITE_BARRIER); - StoreFixedArrayElement(result, NameDictionary::kNumberOfDeletedElementsIndex, - zero, SKIP_WRITE_BARRIER); - StoreFixedArrayElement(result, NameDictionary::kCapacityIndex, - SmiTag(capacity), SKIP_WRITE_BARRIER); - // Initialize Dictionary fields. - TNode filler = UndefinedConstant(); - StoreFixedArrayElement(result, NameDictionary::kNextEnumerationIndexIndex, - SmiConstant(PropertyDetails::kInitialIndex), - SKIP_WRITE_BARRIER); - StoreFixedArrayElement(result, NameDictionary::kObjectHashIndex, - SmiConstant(PropertyArray::kNoHashSentinel), - SKIP_WRITE_BARRIER); + { + TNode zero = SmiConstant(0); + StoreFixedArrayElement(result, NameDictionary::kNumberOfElementsIndex, zero, + SKIP_WRITE_BARRIER); + StoreFixedArrayElement(result, + NameDictionary::kNumberOfDeletedElementsIndex, zero, + SKIP_WRITE_BARRIER); + StoreFixedArrayElement(result, NameDictionary::kCapacityIndex, + SmiTag(capacity), SKIP_WRITE_BARRIER); + // Initialize Dictionary fields. + StoreFixedArrayElement(result, NameDictionary::kNextEnumerationIndexIndex, + SmiConstant(PropertyDetails::kInitialIndex), + SKIP_WRITE_BARRIER); + StoreFixedArrayElement(result, NameDictionary::kObjectHashIndex, + SmiConstant(PropertyArray::kNoHashSentinel), + SKIP_WRITE_BARRIER); + } // Initialize NameDictionary elements. - TNode result_word = BitcastTaggedToWord(result); - TNode start_address = IntPtrAdd( - result_word, IntPtrConstant(NameDictionary::OffsetOfElementAt( - NameDictionary::kElementsStartIndex) - - kHeapObjectTag)); - TNode end_address = IntPtrAdd( - result_word, IntPtrSub(store_size, IntPtrConstant(kHeapObjectTag))); - StoreFieldsNoWriteBarrier(start_address, end_address, filler); + { + TNode result_word = BitcastTaggedToWord(result); + TNode start_address = IntPtrAdd( + result_word, IntPtrConstant(NameDictionary::OffsetOfElementAt( + NameDictionary::kElementsStartIndex) - + kHeapObjectTag)); + TNode end_address = IntPtrAdd( + result_word, IntPtrSub(store_size, IntPtrConstant(kHeapObjectTag))); + + TNode filler = UndefinedConstant(); + DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kUndefinedValue)); + + StoreFieldsNoWriteBarrier(start_address, end_address, filler); + } + return result; } @@ -3605,6 +3627,17 @@ TNode CodeStubAssembler::AllocateSmallOrderedHashTable( StoreMapNoWriteBarrier(table_obj, small_ordered_hash_map); TNode table = UncheckedCast(table_obj); + { + // This store overlaps with the header fields stored below. + // Since it happens first, it effectively still just zero-initializes the + // padding. + constexpr int offset = + RoundDown(CollectionType::PaddingOffset()); + STATIC_ASSERT(offset + kTaggedSize == CollectionType::PaddingOffset() + + CollectionType::PaddingSize()); + StoreObjectFieldNoWriteBarrier(table, offset, SmiConstant(0)); + } + // Initialize the SmallOrderedHashTable fields. StoreObjectByteNoWriteBarrier( table, CollectionType::NumberOfBucketsOffset(), @@ -3748,8 +3781,9 @@ void CodeStubAssembler::InitializeStructBody(Node* object, Node* map, StoreFieldsNoWriteBarrier(start_address, end_address, filler); } -Node* CodeStubAssembler::AllocateJSObjectFromMap( - Node* map, Node* properties, Node* elements, AllocationFlags flags, +TNode CodeStubAssembler::AllocateJSObjectFromMap( + SloppyTNode map, SloppyTNode properties, + SloppyTNode elements, AllocationFlags flags, SlackTrackingMode slack_tracking_mode) { CSA_ASSERT(this, IsMap(map)); CSA_ASSERT(this, Word32BinaryNot(IsJSFunctionMap(map))); @@ -3761,7 +3795,7 @@ Node* CodeStubAssembler::AllocateJSObjectFromMap( StoreMapNoWriteBarrier(object, map); InitializeJSObjectFromMap(object, map, instance_size, properties, elements, slack_tracking_mode); - return object; + return CAST(object); } void CodeStubAssembler::InitializeJSObjectFromMap( @@ -5508,7 +5542,7 @@ void CodeStubAssembler::TaggedToWord32OrBigIntImpl( } BIND(&is_heap_number); - var_word32->Bind(TruncateHeapNumberValueToWord32(value)); + var_word32->Bind(TruncateHeapNumberValueToWord32(CAST(value))); CombineFeedback(var_feedback, BinaryOperationFeedback::kNumber); Goto(if_number); @@ -5521,9 +5555,10 @@ void CodeStubAssembler::TaggedToWord32OrBigIntImpl( } } -Node* CodeStubAssembler::TruncateHeapNumberValueToWord32(Node* object) { +TNode CodeStubAssembler::TruncateHeapNumberValueToWord32( + TNode object) { Node* value = LoadHeapNumberValue(object); - return TruncateFloat64ToWord32(value); + return Signed(TruncateFloat64ToWord32(value)); } void CodeStubAssembler::TryHeapNumberToSmi(TNode number, @@ -5731,10 +5766,7 @@ TNode CodeStubAssembler::ChangeNumberToUint32(TNode value) { return var_result.value(); } -TNode CodeStubAssembler::ChangeNumberToFloat64( - SloppyTNode value) { - // TODO(tebbi): Remove assert once argument is TNode instead of SloppyTNode. - CSA_SLOW_ASSERT(this, IsNumber(value)); +TNode CodeStubAssembler::ChangeNumberToFloat64(TNode value) { TVARIABLE(Float64T, result); Label smi(this); Label done(this, &result); @@ -5795,43 +5827,43 @@ TNode CodeStubAssembler::TimesDoubleSize(SloppyTNode value) { return WordShl(value, kDoubleSizeLog2); } -Node* CodeStubAssembler::ToThisValue(Node* context, Node* value, - PrimitiveType primitive_type, - char const* method_name) { - // We might need to loop once due to JSValue unboxing. - VARIABLE(var_value, MachineRepresentation::kTagged, value); +TNode CodeStubAssembler::ToThisValue(TNode context, + TNode value, + PrimitiveType primitive_type, + char const* method_name) { + // We might need to loop once due to JSPrimitiveWrapper unboxing. + TVARIABLE(Object, var_value, value); Label loop(this, &var_value), done_loop(this), done_throw(this, Label::kDeferred); Goto(&loop); BIND(&loop); { - // Load the current {value}. - value = var_value.value(); - // Check if the {value} is a Smi or a HeapObject. - GotoIf(TaggedIsSmi(value), (primitive_type == PrimitiveType::kNumber) - ? &done_loop - : &done_throw); + GotoIf( + TaggedIsSmi(var_value.value()), + (primitive_type == PrimitiveType::kNumber) ? &done_loop : &done_throw); + + TNode value = CAST(var_value.value()); // Load the map of the {value}. - Node* value_map = LoadMap(value); + TNode value_map = LoadMap(value); // Load the instance type of the {value}. - Node* value_instance_type = LoadMapInstanceType(value_map); + TNode value_instance_type = LoadMapInstanceType(value_map); - // Check if {value} is a JSValue. - Label if_valueisvalue(this, Label::kDeferred), if_valueisnotvalue(this); - Branch(InstanceTypeEqual(value_instance_type, JS_VALUE_TYPE), - &if_valueisvalue, &if_valueisnotvalue); + // Check if {value} is a JSPrimitiveWrapper. + Label if_valueiswrapper(this, Label::kDeferred), if_valueisnotwrapper(this); + Branch(InstanceTypeEqual(value_instance_type, JS_PRIMITIVE_WRAPPER_TYPE), + &if_valueiswrapper, &if_valueisnotwrapper); - BIND(&if_valueisvalue); + BIND(&if_valueiswrapper); { // Load the actual value from the {value}. - var_value.Bind(LoadObjectField(value, JSValue::kValueOffset)); + var_value = LoadObjectField(value, JSPrimitiveWrapper::kValueOffset); Goto(&loop); } - BIND(&if_valueisnotvalue); + BIND(&if_valueisnotwrapper); { switch (primitive_type) { case PrimitiveType::kBoolean: @@ -5988,13 +6020,12 @@ TNode CodeStubAssembler::InstanceTypeEqual( TNode CodeStubAssembler::IsDictionaryMap(SloppyTNode map) { CSA_SLOW_ASSERT(this, IsMap(map)); - Node* bit_field3 = LoadMapBitField3(map); - return IsSetWord32(bit_field3); + return IsSetWord32(LoadMapBitField3(map)); } TNode CodeStubAssembler::IsExtensibleMap(SloppyTNode map) { CSA_ASSERT(this, IsMap(map)); - return IsSetWord32(LoadMapBitField2(map)); + return IsSetWord32(LoadMapBitField3(map)); } TNode CodeStubAssembler::IsFrozenOrSealedElementsKindMap( @@ -6007,7 +6038,7 @@ TNode CodeStubAssembler::IsFrozenOrSealedElementsKindMap( TNode CodeStubAssembler::IsExtensibleNonPrototypeMap(TNode map) { int kMask = Map::IsExtensibleBit::kMask | Map::IsPrototypeMapBit::kMask; int kExpected = Map::IsExtensibleBit::kMask; - return Word32Equal(Word32And(LoadMapBitField2(map), Int32Constant(kMask)), + return Word32Equal(Word32And(LoadMapBitField3(map), Int32Constant(kMask)), Int32Constant(kExpected)); } @@ -6072,10 +6103,13 @@ TNode CodeStubAssembler::IsTypedArraySpeciesProtectorCellInvalid() { return WordEqual(cell_value, invalid); } -TNode CodeStubAssembler::IsRegExpSpeciesProtectorCellInvalid() { - Node* invalid = SmiConstant(Isolate::kProtectorInvalid); - Node* cell = LoadRoot(RootIndex::kRegExpSpeciesProtector); - Node* cell_value = LoadObjectField(cell, PropertyCell::kValueOffset); +TNode CodeStubAssembler::IsRegExpSpeciesProtectorCellInvalid( + TNode native_context) { + CSA_ASSERT(this, IsNativeContext(native_context)); + TNode cell = CAST(LoadContextElement( + native_context, Context::REGEXP_SPECIES_PROTECTOR_INDEX)); + TNode cell_value = LoadObjectField(cell, PropertyCell::kValueOffset); + TNode invalid = SmiConstant(Isolate::kProtectorInvalid); return WordEqual(cell_value, invalid); } @@ -6270,6 +6304,15 @@ TNode CodeStubAssembler::IsJSGlobalProxyInstanceType( return InstanceTypeEqual(instance_type, JS_GLOBAL_PROXY_TYPE); } +TNode CodeStubAssembler::IsJSGlobalProxyMap(SloppyTNode map) { + return IsJSGlobalProxyInstanceType(LoadMapInstanceType(map)); +} + +TNode CodeStubAssembler::IsJSGlobalProxy( + SloppyTNode object) { + return IsJSGlobalProxyMap(LoadMap(object)); +} + TNode CodeStubAssembler::IsJSObjectInstanceType( SloppyTNode instance_type) { STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE); @@ -6304,26 +6347,22 @@ TNode CodeStubAssembler::IsJSStringIterator( return HasInstanceType(object, JS_STRING_ITERATOR_TYPE); } -TNode CodeStubAssembler::IsJSGlobalProxy( - SloppyTNode object) { - return HasInstanceType(object, JS_GLOBAL_PROXY_TYPE); -} - TNode CodeStubAssembler::IsMap(SloppyTNode map) { return IsMetaMap(LoadMap(map)); } -TNode CodeStubAssembler::IsJSValueInstanceType( +TNode CodeStubAssembler::IsJSPrimitiveWrapperInstanceType( SloppyTNode instance_type) { - return InstanceTypeEqual(instance_type, JS_VALUE_TYPE); + return InstanceTypeEqual(instance_type, JS_PRIMITIVE_WRAPPER_TYPE); } -TNode CodeStubAssembler::IsJSValue(SloppyTNode object) { - return IsJSValueMap(LoadMap(object)); +TNode CodeStubAssembler::IsJSPrimitiveWrapper( + SloppyTNode object) { + return IsJSPrimitiveWrapperMap(LoadMap(object)); } -TNode CodeStubAssembler::IsJSValueMap(SloppyTNode map) { - return IsJSValueInstanceType(LoadMapInstanceType(map)); +TNode CodeStubAssembler::IsJSPrimitiveWrapperMap(SloppyTNode map) { + return IsJSPrimitiveWrapperInstanceType(LoadMapInstanceType(map)); } TNode CodeStubAssembler::IsJSArrayInstanceType( @@ -6420,7 +6459,7 @@ TNode CodeStubAssembler::IsFixedArrayWithKind( if (IsDoubleElementsKind(kind)) { return IsFixedDoubleArray(object); } else { - DCHECK(IsSmiOrObjectElementsKind(kind)); + DCHECK(IsSmiOrObjectElementsKind(kind) || IsSealedElementsKind(kind)); return IsFixedArraySubclass(object); } } @@ -6562,6 +6601,11 @@ TNode CodeStubAssembler::IsPrivateSymbol( [=] { return Int32FalseConstant(); }); } +TNode CodeStubAssembler::IsPrivateName(SloppyTNode symbol) { + TNode flags = LoadObjectField(symbol, Symbol::kFlagsOffset); + return IsSetWord32(flags); +} + TNode CodeStubAssembler::IsNativeContext( SloppyTNode object) { return WordEqual(LoadMap(object), LoadRoot(RootIndex::kNativeContextMap)); @@ -6769,7 +6813,7 @@ TNode CodeStubAssembler::IsHeapNumberUint32(TNode number) { IsHeapNumberPositive(number), [=] { TNode value = LoadHeapNumberValue(number); - TNode int_value = Unsigned(TruncateFloat64ToWord32(value)); + TNode int_value = TruncateFloat64ToWord32(value); return Float64Equal(value, ChangeUint32ToFloat64(int_value)); }, [=] { return Int32FalseConstant(); }); @@ -7423,8 +7467,8 @@ TNode CodeStubAssembler::StringAdd(Node* context, TNode left, return result.value(); } -TNode CodeStubAssembler::StringFromSingleCodePoint( - TNode codepoint, UnicodeEncoding encoding) { +TNode CodeStubAssembler::StringFromSingleUTF16EncodedCodePoint( + TNode codepoint) { VARIABLE(var_result, MachineRepresentation::kTagged, EmptyStringConstant()); Label if_isword16(this), if_isword32(this), return_result(this); @@ -7440,27 +7484,6 @@ TNode CodeStubAssembler::StringFromSingleCodePoint( BIND(&if_isword32); { - switch (encoding) { - case UnicodeEncoding::UTF16: - break; - case UnicodeEncoding::UTF32: { - // Convert UTF32 to UTF16 code units, and store as a 32 bit word. - Node* lead_offset = Int32Constant(0xD800 - (0x10000 >> 10)); - - // lead = (codepoint >> 10) + LEAD_OFFSET - Node* lead = - Int32Add(Word32Shr(codepoint, Int32Constant(10)), lead_offset); - - // trail = (codepoint & 0x3FF) + 0xDC00; - Node* trail = Int32Add(Word32And(codepoint, Int32Constant(0x3FF)), - Int32Constant(0xDC00)); - - // codpoint = (trail << 16) | lead; - codepoint = Signed(Word32Or(Word32Shl(trail, Int32Constant(16)), lead)); - break; - } - } - Node* value = AllocateSeqTwoByteString(2); StoreNoWriteBarrier( MachineRepresentation::kWord32, value, @@ -7513,7 +7536,7 @@ TNode CodeStubAssembler::NumberToString(TNode input) { // contains two elements (number and string) for each cache entry. // TODO(ishell): cleanup mask handling. Node* mask = - BitcastTaggedToWord(LoadFixedArrayBaseLength(number_string_cache)); + BitcastTaggedSignedToWord(LoadFixedArrayBaseLength(number_string_cache)); TNode one = IntPtrConstant(1); mask = IntPtrSub(mask, one); @@ -7560,8 +7583,8 @@ TNode CodeStubAssembler::NumberToString(TNode input) { BIND(&if_smi); { // Load the smi key, make sure it matches the smi we're looking for. - Node* smi_index = BitcastWordToTagged( - WordAnd(WordShl(BitcastTaggedToWord(smi_input.value()), one), mask)); + Node* smi_index = BitcastWordToTagged(WordAnd( + WordShl(BitcastTaggedSignedToWord(smi_input.value()), one), mask)); Node* smi_key = UnsafeLoadFixedArrayElement(CAST(number_string_cache), smi_index, 0, SMI_PARAMETERS); GotoIf(WordNotEqual(smi_key, smi_input.value()), &runtime); @@ -8333,40 +8356,41 @@ TNode CodeStubAssembler::EntryToIndex(TNode entry, field_index)); } -TNode CodeStubAssembler::LoadDescriptorArrayElement( - TNode object, Node* index, int additional_offset) { - return LoadArrayElement(object, DescriptorArray::kHeaderSize, index, - additional_offset); +template +TNode CodeStubAssembler::LoadDescriptorArrayElement( + TNode object, TNode index, + int additional_offset) { + return LoadArrayElement( + object, DescriptorArray::kHeaderSize, index, additional_offset); } TNode CodeStubAssembler::LoadKeyByKeyIndex( TNode container, TNode key_index) { - return CAST(LoadDescriptorArrayElement(container, key_index, 0)); + return CAST(LoadDescriptorArrayElement(container, key_index, 0)); } TNode CodeStubAssembler::LoadDetailsByKeyIndex( TNode container, TNode key_index) { - const int kKeyToDetails = - DescriptorArray::ToDetailsIndex(0) - DescriptorArray::ToKeyIndex(0); - return Unsigned( - LoadAndUntagToWord32ArrayElement(container, DescriptorArray::kHeaderSize, - key_index, kKeyToDetails * kTaggedSize)); + const int kKeyToDetailsOffset = + DescriptorArray::kEntryDetailsOffset - DescriptorArray::kEntryKeyOffset; + return Unsigned(LoadAndUntagToWord32ArrayElement( + container, DescriptorArray::kHeaderSize, key_index, kKeyToDetailsOffset)); } TNode CodeStubAssembler::LoadValueByKeyIndex( TNode container, TNode key_index) { - const int kKeyToValue = - DescriptorArray::ToValueIndex(0) - DescriptorArray::ToKeyIndex(0); - return CAST(LoadDescriptorArrayElement(container, key_index, - kKeyToValue * kTaggedSize)); + const int kKeyToValueOffset = + DescriptorArray::kEntryValueOffset - DescriptorArray::kEntryKeyOffset; + return LoadDescriptorArrayElement(container, key_index, + kKeyToValueOffset); } TNode CodeStubAssembler::LoadFieldTypeByKeyIndex( TNode container, TNode key_index) { - const int kKeyToValue = - DescriptorArray::ToValueIndex(0) - DescriptorArray::ToKeyIndex(0); - return LoadDescriptorArrayElement(container, key_index, - kKeyToValue * kTaggedSize); + const int kKeyToValueOffset = + DescriptorArray::kEntryValueOffset - DescriptorArray::kEntryKeyOffset; + return LoadDescriptorArrayElement(container, key_index, + kKeyToValueOffset); } TNode CodeStubAssembler::DescriptorEntryToIndex( @@ -8377,14 +8401,14 @@ TNode CodeStubAssembler::DescriptorEntryToIndex( TNode CodeStubAssembler::LoadKeyByDescriptorEntry( TNode container, TNode descriptor_entry) { - return CAST(LoadDescriptorArrayElement( + return CAST(LoadDescriptorArrayElement( container, DescriptorEntryToIndex(descriptor_entry), DescriptorArray::ToKeyIndex(0) * kTaggedSize)); } TNode CodeStubAssembler::LoadKeyByDescriptorEntry( TNode container, int descriptor_entry) { - return CAST(LoadDescriptorArrayElement( + return CAST(LoadDescriptorArrayElement( container, IntPtrConstant(0), DescriptorArray::ToKeyIndex(descriptor_entry) * kTaggedSize)); } @@ -8406,14 +8430,14 @@ TNode CodeStubAssembler::LoadDetailsByDescriptorEntry( TNode CodeStubAssembler::LoadValueByDescriptorEntry( TNode container, int descriptor_entry) { - return CAST(LoadDescriptorArrayElement( + return LoadDescriptorArrayElement( container, IntPtrConstant(0), - DescriptorArray::ToValueIndex(descriptor_entry) * kTaggedSize)); + DescriptorArray::ToValueIndex(descriptor_entry) * kTaggedSize); } TNode CodeStubAssembler::LoadFieldTypeByDescriptorEntry( TNode container, TNode descriptor_entry) { - return LoadDescriptorArrayElement( + return LoadDescriptorArrayElement( container, DescriptorEntryToIndex(descriptor_entry), DescriptorArray::ToValueIndex(0) * kTaggedSize); } @@ -9503,15 +9527,15 @@ TNode CodeStubAssembler::CallGetterIfAccessor( Node* accessor_info = value; CSA_ASSERT(this, IsAccessorInfo(value)); CSA_ASSERT(this, TaggedIsNotSmi(receiver)); - Label if_array(this), if_function(this), if_value(this); + Label if_array(this), if_function(this), if_wrapper(this); // Dispatch based on {receiver} instance type. Node* receiver_map = LoadMap(receiver); Node* receiver_instance_type = LoadMapInstanceType(receiver_map); GotoIf(IsJSArrayInstanceType(receiver_instance_type), &if_array); GotoIf(IsJSFunctionInstanceType(receiver_instance_type), &if_function); - Branch(IsJSValueInstanceType(receiver_instance_type), &if_value, - if_bailout); + Branch(IsJSPrimitiveWrapperInstanceType(receiver_instance_type), + &if_wrapper, if_bailout); // JSArray AccessorInfo case. BIND(&if_array); @@ -9538,14 +9562,15 @@ TNode CodeStubAssembler::CallGetterIfAccessor( Goto(&done); } - // JSValue AccessorInfo case. - BIND(&if_value); + // JSPrimitiveWrapper AccessorInfo case. + BIND(&if_wrapper); { - // We only deal with the "length" accessor on JSValue string wrappers. + // We only deal with the "length" accessor on JSPrimitiveWrapper string + // wrappers. GotoIfNot(IsLengthString( LoadObjectField(accessor_info, AccessorInfo::kNameOffset)), if_bailout); - Node* receiver_value = LoadJSValueValue(receiver); + Node* receiver_value = LoadJSPrimitiveWrapperValue(receiver); GotoIfNot(TaggedIsNotSmi(receiver_value), if_bailout); GotoIfNot(IsString(receiver_value), if_bailout); var_value.Bind(LoadStringLengthAsSmi(receiver_value)); @@ -9646,8 +9671,9 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map, // clang-format off int32_t values[] = { // Handled by {if_isobjectorsmi}. - PACKED_SMI_ELEMENTS, HOLEY_SMI_ELEMENTS, PACKED_ELEMENTS, - HOLEY_ELEMENTS, + PACKED_SMI_ELEMENTS, HOLEY_SMI_ELEMENTS, PACKED_ELEMENTS, HOLEY_ELEMENTS, + PACKED_SEALED_ELEMENTS, HOLEY_SEALED_ELEMENTS, PACKED_FROZEN_ELEMENTS, + HOLEY_FROZEN_ELEMENTS, // Handled by {if_isdouble}. PACKED_DOUBLE_ELEMENTS, HOLEY_DOUBLE_ELEMENTS, // Handled by {if_isdictionary}. @@ -9673,7 +9699,8 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map, }; Label* labels[] = { &if_isobjectorsmi, &if_isobjectorsmi, &if_isobjectorsmi, - &if_isobjectorsmi, + &if_isobjectorsmi, &if_isobjectorsmi, &if_isobjectorsmi, + &if_isobjectorsmi, &if_isobjectorsmi, &if_isdouble, &if_isdouble, &if_isdictionary, &if_isfaststringwrapper, @@ -9731,8 +9758,8 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map, } BIND(&if_isfaststringwrapper); { - CSA_ASSERT(this, HasInstanceType(object, JS_VALUE_TYPE)); - Node* string = LoadJSValueValue(object); + CSA_ASSERT(this, HasInstanceType(object, JS_PRIMITIVE_WRAPPER_TYPE)); + Node* string = LoadJSPrimitiveWrapperValue(object); CSA_ASSERT(this, IsString(string)); Node* length = LoadStringLengthAsWord(string); GotoIf(UintPtrLessThan(intptr_index, length), if_found); @@ -9740,8 +9767,8 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map, } BIND(&if_isslowstringwrapper); { - CSA_ASSERT(this, HasInstanceType(object, JS_VALUE_TYPE)); - Node* string = LoadJSValueValue(object); + CSA_ASSERT(this, HasInstanceType(object, JS_PRIMITIVE_WRAPPER_TYPE)); + Node* string = LoadJSPrimitiveWrapperValue(object); CSA_ASSERT(this, IsString(string)); Node* length = LoadStringLengthAsWord(string); GotoIf(UintPtrLessThan(intptr_index, length), if_found); @@ -9749,7 +9776,7 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map, } BIND(&if_typedarray); { - Node* buffer = LoadObjectField(object, JSArrayBufferView::kBufferOffset); + TNode buffer = LoadJSArrayBufferViewBuffer(CAST(object)); GotoIf(IsDetachedBuffer(buffer), if_absent); TNode length = LoadJSTypedArrayLength(CAST(object)); @@ -9794,15 +9821,15 @@ void CodeStubAssembler::BranchIfMaybeSpecialIndex(TNode name_string, } void CodeStubAssembler::TryPrototypeChainLookup( - Node* receiver, Node* key, const LookupInHolder& lookup_property_in_holder, + Node* receiver, Node* object, Node* key, + const LookupInHolder& lookup_property_in_holder, const LookupInHolder& lookup_element_in_holder, Label* if_end, Label* if_bailout, Label* if_proxy) { // Ensure receiver is JSReceiver, otherwise bailout. - Label if_objectisnotsmi(this); - Branch(TaggedIsSmi(receiver), if_bailout, &if_objectisnotsmi); - BIND(&if_objectisnotsmi); + GotoIf(TaggedIsSmi(receiver), if_bailout); + CSA_ASSERT(this, TaggedIsNotSmi(object)); - Node* map = LoadMap(receiver); + Node* map = LoadMap(object); Node* instance_type = LoadMapInstanceType(map); { Label if_objectisreceiver(this); @@ -9812,9 +9839,7 @@ void CodeStubAssembler::TryPrototypeChainLookup( if_bailout); BIND(&if_objectisreceiver); - if (if_proxy) { - GotoIf(InstanceTypeEqual(instance_type, JS_PROXY_TYPE), if_proxy); - } + GotoIf(InstanceTypeEqual(instance_type, JS_PROXY_TYPE), if_proxy); } VARIABLE(var_index, MachineType::PointerRepresentation()); @@ -9826,7 +9851,7 @@ void CodeStubAssembler::TryPrototypeChainLookup( BIND(&if_iskeyunique); { - VARIABLE(var_holder, MachineRepresentation::kTagged, receiver); + VARIABLE(var_holder, MachineRepresentation::kTagged, object); VARIABLE(var_holder_map, MachineRepresentation::kTagged, map); VARIABLE(var_holder_instance_type, MachineRepresentation::kWord32, instance_type); @@ -9872,7 +9897,7 @@ void CodeStubAssembler::TryPrototypeChainLookup( } BIND(&if_keyisindex); { - VARIABLE(var_holder, MachineRepresentation::kTagged, receiver); + VARIABLE(var_holder, MachineRepresentation::kTagged, object); VARIABLE(var_holder_map, MachineRepresentation::kTagged, map); VARIABLE(var_holder_instance_type, MachineRepresentation::kWord32, instance_type); @@ -10049,7 +10074,7 @@ TNode CodeStubAssembler::ElementOffsetFromIndex(Node* index_node, Smi smi_index; constant_index = ToSmiConstant(index_node, &smi_index); if (constant_index) index = smi_index.value(); - index_node = BitcastTaggedToWord(index_node); + index_node = BitcastTaggedSignedToWord(index_node); } else { DCHECK(mode == INTPTR_PARAMETERS); constant_index = ToIntPtrConstant(index_node, index); @@ -10594,7 +10619,8 @@ void CodeStubAssembler::BigIntToRawBytes(TNode bigint, void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value, ElementsKind elements_kind, KeyedAccessStoreMode store_mode, - Label* bailout, Node* context) { + Label* bailout, Node* context, + Variable* maybe_converted_value) { CSA_ASSERT(this, Word32BinaryNot(IsJSProxy(object))); Node* elements = LoadElements(object); @@ -10610,12 +10636,12 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value, TNode intptr_key = TryToIntptr(key, bailout); if (IsTypedArrayElementsKind(elements_kind)) { - Label done(this); + Label done(this), update_value_and_bailout(this, Label::kDeferred); // IntegerIndexedElementSet converts value to a Number/BigInt prior to the // bounds check. - value = PrepareValueForWriteToTypedArray(CAST(value), elements_kind, - CAST(context)); + Node* converted_value = PrepareValueForWriteToTypedArray( + CAST(value), elements_kind, CAST(context)); // There must be no allocations between the buffer load and // and the actual store to backing store, because GC may decide that @@ -10623,8 +10649,12 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value, // TODO(ishell): introduce DisallowHeapAllocationCode scope here. // Check if buffer has been detached. - Node* buffer = LoadObjectField(object, JSArrayBufferView::kBufferOffset); - GotoIf(IsDetachedBuffer(buffer), bailout); + TNode buffer = LoadJSArrayBufferViewBuffer(CAST(object)); + if (maybe_converted_value) { + GotoIf(IsDetachedBuffer(buffer), &update_value_and_bailout); + } else { + GotoIf(IsDetachedBuffer(buffer), bailout); + } // Bounds check. TNode length = LoadJSTypedArrayLength(CAST(object)); @@ -10633,27 +10663,88 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value, // Skip the store if we write beyond the length or // to a property with a negative integer index. GotoIfNot(UintPtrLessThan(intptr_key, length), &done); - } else if (store_mode == STANDARD_STORE) { - GotoIfNot(UintPtrLessThan(intptr_key, length), bailout); } else { - // This case is produced due to the dispatched call in - // ElementsTransitionAndStore and StoreFastElement. - // TODO(jgruber): Avoid generating unsupported combinations to save code - // size. - DebugBreak(); + DCHECK_EQ(store_mode, STANDARD_STORE); + GotoIfNot(UintPtrLessThan(intptr_key, length), &update_value_and_bailout); } TNode backing_store = LoadJSTypedArrayBackingStore(CAST(object)); - StoreElement(backing_store, elements_kind, intptr_key, value, + StoreElement(backing_store, elements_kind, intptr_key, converted_value, parameter_mode); Goto(&done); + BIND(&update_value_and_bailout); + // We already prepared the incoming value for storing into a typed array. + // This might involve calling ToNumber in some cases. We shouldn't call + // ToNumber again in the runtime so pass the converted value to the runtime. + // The prepared value is an untagged value. Convert it to a tagged value + // to pass it to runtime. It is not possible to do the detached buffer check + // before we prepare the value, since ToNumber can detach the ArrayBuffer. + // The spec specifies the order of these operations. + if (maybe_converted_value != nullptr) { + switch (elements_kind) { + case UINT8_ELEMENTS: + case INT8_ELEMENTS: + case UINT16_ELEMENTS: + case INT16_ELEMENTS: + case UINT8_CLAMPED_ELEMENTS: + maybe_converted_value->Bind(SmiFromInt32(converted_value)); + break; + case UINT32_ELEMENTS: + maybe_converted_value->Bind(ChangeUint32ToTagged(converted_value)); + break; + case INT32_ELEMENTS: + maybe_converted_value->Bind(ChangeInt32ToTagged(converted_value)); + break; + case FLOAT32_ELEMENTS: { + Label dont_allocate_heap_number(this), end(this); + GotoIf(TaggedIsSmi(value), &dont_allocate_heap_number); + GotoIf(IsHeapNumber(value), &dont_allocate_heap_number); + { + maybe_converted_value->Bind(AllocateHeapNumberWithValue( + ChangeFloat32ToFloat64(converted_value))); + Goto(&end); + } + BIND(&dont_allocate_heap_number); + { + maybe_converted_value->Bind(value); + Goto(&end); + } + BIND(&end); + break; + } + case FLOAT64_ELEMENTS: { + Label dont_allocate_heap_number(this), end(this); + GotoIf(TaggedIsSmi(value), &dont_allocate_heap_number); + GotoIf(IsHeapNumber(value), &dont_allocate_heap_number); + { + maybe_converted_value->Bind( + AllocateHeapNumberWithValue(converted_value)); + Goto(&end); + } + BIND(&dont_allocate_heap_number); + { + maybe_converted_value->Bind(value); + Goto(&end); + } + BIND(&end); + break; + } + case BIGINT64_ELEMENTS: + case BIGUINT64_ELEMENTS: + maybe_converted_value->Bind(converted_value); + break; + default: + UNREACHABLE(); + } + } + Goto(bailout); + BIND(&done); return; } - DCHECK( - IsFastElementsKind(elements_kind) || - IsInRange(elements_kind, PACKED_SEALED_ELEMENTS, HOLEY_SEALED_ELEMENTS)); + DCHECK(IsFastElementsKind(elements_kind) || + IsSealedElementsKind(elements_kind)); Node* length = SelectImpl( IsJSArray(object), [=]() { return LoadJSArrayLength(object); }, @@ -10670,18 +10761,24 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value, value = TryTaggedToFloat64(value, bailout); } - if (IsGrowStoreMode(store_mode) && - !(IsInRange(elements_kind, PACKED_SEALED_ELEMENTS, - HOLEY_SEALED_ELEMENTS))) { + if (IsGrowStoreMode(store_mode) && !IsSealedElementsKind(elements_kind)) { elements = CheckForCapacityGrow(object, elements, elements_kind, length, intptr_key, parameter_mode, bailout); } else { GotoIfNot(UintPtrLessThan(intptr_key, length), bailout); } + // Cannot store to a hole in holey sealed elements so bailout. + if (elements_kind == HOLEY_SEALED_ELEMENTS) { + TNode target_value = + LoadFixedArrayElement(CAST(elements), intptr_key); + GotoIf(IsTheHole(target_value), bailout); + } + // If we didn't grow {elements}, it might still be COW, in which case we // copy it now. - if (!IsSmiOrObjectElementsKind(elements_kind)) { + if (!(IsSmiOrObjectElementsKind(elements_kind) || + IsSealedElementsKind(elements_kind))) { CSA_ASSERT(this, Word32BinaryNot(IsFixedCOWArrayMap(LoadMap(elements)))); } else if (IsCOWHandlingStoreMode(store_mode)) { elements = CopyElementsOnWrite(object, elements, elements_kind, length, @@ -10925,7 +11022,8 @@ TNode CodeStubAssembler::CreateAllocationSiteInFeedbackVector( // Link the object to the allocation site list TNode site_list = ExternalConstant( ExternalReference::allocation_sites_list_address(isolate())); - TNode next_site = CAST(LoadBufferObject(site_list, 0)); + TNode next_site = + LoadBufferObject(ReinterpretCast(site_list), 0); // TODO(mvstanton): This is a store to a weak pointer, which we may want to // mark as such in order to skip the write barrier, once we have a unified @@ -12155,8 +12253,9 @@ Node* CodeStubAssembler::Equal(Node* left, Node* right, Node* context, return result.value(); } -Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs, - Variable* var_type_feedback) { +TNode CodeStubAssembler::StrictEqual(SloppyTNode lhs, + SloppyTNode rhs, + Variable* var_type_feedback) { // Pseudo-code for the algorithm below: // // if (lhs == rhs) { @@ -12208,7 +12307,7 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs, Label if_equal(this), if_notequal(this), if_not_equivalent_types(this), end(this); - VARIABLE(result, MachineRepresentation::kTagged); + TVARIABLE(Oddball, result); OverwriteFeedback(var_type_feedback, CompareOperationFeedback::kNone); @@ -12235,7 +12334,7 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs, BIND(&if_lhsisnotsmi); { // Load the map of {lhs}. - Node* lhs_map = LoadMap(lhs); + TNode lhs_map = LoadMap(CAST(lhs)); // Check if {lhs} is a HeapNumber. Label if_lhsisnumber(this), if_lhsisnotnumber(this); @@ -12250,8 +12349,8 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs, BIND(&if_rhsissmi); { // Convert {lhs} and {rhs} to floating point values. - Node* lhs_value = LoadHeapNumberValue(lhs); - Node* rhs_value = SmiToFloat64(rhs); + Node* lhs_value = LoadHeapNumberValue(CAST(lhs)); + Node* rhs_value = SmiToFloat64(CAST(rhs)); CombineFeedback(var_type_feedback, CompareOperationFeedback::kNumber); @@ -12261,8 +12360,9 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs, BIND(&if_rhsisnotsmi); { + TNode rhs_ho = CAST(rhs); // Load the map of {rhs}. - Node* rhs_map = LoadMap(rhs); + TNode rhs_map = LoadMap(rhs_ho); // Check if {rhs} is also a HeapNumber. Label if_rhsisnumber(this), if_rhsisnotnumber(this); @@ -12271,8 +12371,8 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs, BIND(&if_rhsisnumber); { // Convert {lhs} and {rhs} to floating point values. - Node* lhs_value = LoadHeapNumberValue(lhs); - Node* rhs_value = LoadHeapNumberValue(rhs); + Node* lhs_value = LoadHeapNumberValue(CAST(lhs)); + Node* rhs_value = LoadHeapNumberValue(CAST(rhs)); CombineFeedback(var_type_feedback, CompareOperationFeedback::kNumber); @@ -12308,7 +12408,7 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs, BIND(&if_lhsisstring); { // Load the instance type of {rhs}. - Node* rhs_instance_type = LoadInstanceType(rhs); + Node* rhs_instance_type = LoadInstanceType(CAST(rhs)); // Check if {rhs} is also a String. Label if_rhsisstring(this, Label::kDeferred), @@ -12325,8 +12425,8 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs, CollectFeedbackForString(rhs_instance_type); var_type_feedback->Bind(SmiOr(lhs_feedback, rhs_feedback)); } - result.Bind(CallBuiltin(Builtins::kStringEqual, - NoContextConstant(), lhs, rhs)); + result = CAST(CallBuiltin(Builtins::kStringEqual, + NoContextConstant(), lhs, rhs)); Goto(&end); } @@ -12344,7 +12444,7 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs, BIND(&if_lhsisbigint); { // Load the instance type of {rhs}. - Node* rhs_instance_type = LoadInstanceType(rhs); + TNode rhs_instance_type = LoadInstanceType(CAST(rhs)); // Check if {rhs} is also a BigInt. Label if_rhsisbigint(this, Label::kDeferred), @@ -12356,8 +12456,8 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs, { CombineFeedback(var_type_feedback, CompareOperationFeedback::kBigInt); - result.Bind(CallRuntime(Runtime::kBigIntEqualToBigInt, - NoContextConstant(), lhs, rhs)); + result = CAST(CallRuntime(Runtime::kBigIntEqualToBigInt, + NoContextConstant(), lhs, rhs)); Goto(&end); } @@ -12368,8 +12468,8 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs, BIND(&if_lhsisnotbigint); if (var_type_feedback != nullptr) { // Load the instance type of {rhs}. - Node* rhs_map = LoadMap(rhs); - Node* rhs_instance_type = LoadMapInstanceType(rhs_map); + TNode rhs_map = LoadMap(CAST(rhs)); + TNode rhs_instance_type = LoadMapInstanceType(rhs_map); Label if_lhsissymbol(this), if_lhsisreceiver(this), if_lhsisoddball(this); @@ -12442,7 +12542,7 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs, BIND(&if_rhsisnotsmi); { // Load the map of the {rhs}. - Node* rhs_map = LoadMap(rhs); + TNode rhs_map = LoadMap(CAST(rhs)); // The {rhs} could be a HeapNumber with the same value as {lhs}. Label if_rhsisnumber(this), if_rhsisnotnumber(this); @@ -12451,8 +12551,8 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs, BIND(&if_rhsisnumber); { // Convert {lhs} and {rhs} to floating point values. - Node* lhs_value = SmiToFloat64(lhs); - Node* rhs_value = LoadHeapNumberValue(rhs); + TNode lhs_value = SmiToFloat64(CAST(lhs)); + TNode rhs_value = LoadHeapNumberValue(CAST(rhs)); CombineFeedback(var_type_feedback, CompareOperationFeedback::kNumber); @@ -12468,7 +12568,7 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs, BIND(&if_equal); { - result.Bind(TrueConstant()); + result = TrueConstant(); Goto(&end); } @@ -12480,7 +12580,7 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs, BIND(&if_notequal); { - result.Bind(FalseConstant()); + result = FalseConstant(); Goto(&end); } @@ -12636,7 +12736,7 @@ TNode CodeStubAssembler::HasProperty(SloppyTNode context, &return_true, &return_false, next_holder, if_bailout); }; - TryPrototypeChainLookup(object, key, lookup_property_in_holder, + TryPrototypeChainLookup(object, object, key, lookup_property_in_holder, lookup_element_in_holder, &return_false, &call_runtime, &if_proxy); @@ -13114,8 +13214,9 @@ TNode CodeStubAssembler::CreateArrayIterator( return CAST(iterator); } -Node* CodeStubAssembler::AllocateJSIteratorResult(Node* context, Node* value, - Node* done) { +TNode CodeStubAssembler::AllocateJSIteratorResult( + SloppyTNode context, SloppyTNode value, + SloppyTNode done) { CSA_ASSERT(this, IsBoolean(done)); Node* native_context = LoadNativeContext(context); Node* map = @@ -13128,7 +13229,7 @@ Node* CodeStubAssembler::AllocateJSIteratorResult(Node* context, Node* value, RootIndex::kEmptyFixedArray); StoreObjectFieldNoWriteBarrier(result, JSIteratorResult::kValueOffset, value); StoreObjectFieldNoWriteBarrier(result, JSIteratorResult::kDoneOffset, done); - return result; + return CAST(result); } Node* CodeStubAssembler::AllocateJSIteratorResultForEntry(Node* context, @@ -13174,9 +13275,8 @@ TNode CodeStubAssembler::ArraySpeciesCreate(TNode context, return Construct(context, constructor, len); } -Node* CodeStubAssembler::IsDetachedBuffer(Node* buffer) { - CSA_ASSERT(this, HasInstanceType(buffer, JS_ARRAY_BUFFER_TYPE)); - TNode buffer_bit_field = LoadJSArrayBufferBitField(CAST(buffer)); +TNode CodeStubAssembler::IsDetachedBuffer(TNode buffer) { + TNode buffer_bit_field = LoadJSArrayBufferBitField(buffer); return IsSetWord32(buffer_bit_field); } @@ -13367,7 +13467,8 @@ void CodeStubArguments::PopAndReturn(Node* value) { value); } -Node* CodeStubAssembler::IsFastElementsKind(Node* elements_kind) { +TNode CodeStubAssembler::IsFastElementsKind( + TNode elements_kind) { STATIC_ASSERT(FIRST_ELEMENTS_KIND == FIRST_FAST_ELEMENTS_KIND); return Uint32LessThanOrEqual(elements_kind, Int32Constant(LAST_FAST_ELEMENTS_KIND)); @@ -13382,7 +13483,8 @@ TNode CodeStubAssembler::IsDoubleElementsKind( Int32Constant(PACKED_DOUBLE_ELEMENTS / 2)); } -Node* CodeStubAssembler::IsFastSmiOrTaggedElementsKind(Node* elements_kind) { +TNode CodeStubAssembler::IsFastSmiOrTaggedElementsKind( + TNode elements_kind) { STATIC_ASSERT(FIRST_ELEMENTS_KIND == FIRST_FAST_ELEMENTS_KIND); STATIC_ASSERT(PACKED_DOUBLE_ELEMENTS > TERMINAL_FAST_ELEMENTS_KIND); STATIC_ASSERT(HOLEY_DOUBLE_ELEMENTS > TERMINAL_FAST_ELEMENTS_KIND); @@ -13390,12 +13492,14 @@ Node* CodeStubAssembler::IsFastSmiOrTaggedElementsKind(Node* elements_kind) { Int32Constant(TERMINAL_FAST_ELEMENTS_KIND)); } -Node* CodeStubAssembler::IsFastSmiElementsKind(Node* elements_kind) { +TNode CodeStubAssembler::IsFastSmiElementsKind( + SloppyTNode elements_kind) { return Uint32LessThanOrEqual(elements_kind, Int32Constant(HOLEY_SMI_ELEMENTS)); } -Node* CodeStubAssembler::IsHoleyFastElementsKind(Node* elements_kind) { +TNode CodeStubAssembler::IsHoleyFastElementsKind( + TNode elements_kind) { CSA_ASSERT(this, IsFastElementsKind(elements_kind)); STATIC_ASSERT(HOLEY_SMI_ELEMENTS == (PACKED_SMI_ELEMENTS | 1)); @@ -13404,7 +13508,8 @@ Node* CodeStubAssembler::IsHoleyFastElementsKind(Node* elements_kind) { return IsSetWord32(elements_kind, 1); } -Node* CodeStubAssembler::IsHoleyFastElementsKindForRead(Node* elements_kind) { +TNode CodeStubAssembler::IsHoleyFastElementsKindForRead( + TNode elements_kind) { CSA_ASSERT(this, Uint32LessThanOrEqual(elements_kind, Int32Constant(LAST_FROZEN_ELEMENTS_KIND))); @@ -13417,8 +13522,8 @@ Node* CodeStubAssembler::IsHoleyFastElementsKindForRead(Node* elements_kind) { return IsSetWord32(elements_kind, 1); } -Node* CodeStubAssembler::IsElementsKindGreaterThan( - Node* target_kind, ElementsKind reference_kind) { +TNode CodeStubAssembler::IsElementsKindGreaterThan( + TNode target_kind, ElementsKind reference_kind) { return Int32GreaterThan(target_kind, Int32Constant(reference_kind)); } @@ -13442,14 +13547,6 @@ Node* CodeStubAssembler::IsDebugActive() { return Word32NotEqual(is_debug_active, Int32Constant(0)); } -TNode CodeStubAssembler::IsRuntimeCallStatsEnabled() { - STATIC_ASSERT(sizeof(TracingFlags::runtime_stats) == kInt32Size); - TNode flag_value = UncheckedCast(Load( - MachineType::Int32(), - ExternalConstant(ExternalReference::address_of_runtime_stats_flag()))); - return Word32NotEqual(flag_value, Int32Constant(0)); -} - Node* CodeStubAssembler::IsPromiseHookEnabled() { Node* const promise_hook = Load( MachineType::Pointer(), @@ -13494,8 +13591,9 @@ TNode CodeStubAssembler::LoadBuiltin(TNode builtin_id) { int const kSmiShiftBits = kSmiShiftSize + kSmiTagSize; int index_shift = kSystemPointerSizeLog2 - kSmiShiftBits; TNode table_index = - index_shift >= 0 ? WordShl(BitcastTaggedToWord(builtin_id), index_shift) - : WordSar(BitcastTaggedToWord(builtin_id), -index_shift); + index_shift >= 0 + ? WordShl(BitcastTaggedSignedToWord(builtin_id), index_shift) + : WordSar(BitcastTaggedSignedToWord(builtin_id), -index_shift); return CAST( Load(MachineType::TaggedPointer(), @@ -13637,18 +13735,6 @@ Node* CodeStubAssembler::AllocateFunctionWithMapAndContext(Node* map, return fun; } -Node* CodeStubAssembler::MarkerIsFrameType(Node* marker_or_function, - StackFrame::Type frame_type) { - return WordEqual(marker_or_function, - IntPtrConstant(StackFrame::TypeToMarker(frame_type))); -} - -Node* CodeStubAssembler::MarkerIsNotFrameType(Node* marker_or_function, - StackFrame::Type frame_type) { - return WordNotEqual(marker_or_function, - IntPtrConstant(StackFrame::TypeToMarker(frame_type))); -} - void CodeStubAssembler::CheckPrototypeEnumCache(Node* receiver, Node* receiver_map, Label* if_fast, @@ -13923,7 +14009,7 @@ void CodeStubAssembler::GotoIfInitialPrototypePropertiesModified( if (i == 0) { combined_details = details; } else { - combined_details = Unsigned(Word32And(combined_details, details)); + combined_details = Word32And(combined_details, details); } } diff --git a/deps/v8/src/codegen/code-stub-assembler.h b/deps/v8/src/codegen/code-stub-assembler.h index 207eb509e11cb4..47abd027490679 100644 --- a/deps/v8/src/codegen/code-stub-assembler.h +++ b/deps/v8/src/codegen/code-stub-assembler.h @@ -10,9 +10,8 @@ #include "src/base/macros.h" #include "src/codegen/bailout-reason.h" #include "src/common/globals.h" +#include "src/common/message-template.h" #include "src/compiler/code-assembler.h" -#include "src/execution/frames.h" -#include "src/execution/message-template.h" #include "src/objects/arguments.h" #include "src/objects/bigint.h" #include "src/objects/objects.h" @@ -39,7 +38,6 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol }; PromiseSpeciesProtector) \ V(TypedArraySpeciesProtector, typed_array_species_protector, \ TypedArraySpeciesProtector) \ - V(RegExpSpeciesProtector, regexp_species_protector, RegExpSpeciesProtector) #define HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(V) \ V(AccessorInfoMap, accessor_info_map, AccessorInfoMap) \ @@ -111,59 +109,45 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol }; #endif #ifdef DEBUG -// Add stringified versions to the given values, except the first. That is, -// transform -// x, a, b, c, d, e, f -// to -// a, "a", b, "b", c, "c", d, "d", e, "e", f, "f" -// -// __VA_ARGS__ is ignored to allow the caller to pass through too many -// parameters, and the first element is ignored to support having no extra -// values without empty __VA_ARGS__ (which cause all sorts of problems with -// extra commas). -#define CSA_ASSERT_STRINGIFY_EXTRA_VALUES_5(_, v1, v2, v3, v4, v5, ...) \ - v1, #v1, v2, #v2, v3, #v3, v4, #v4, v5, #v5 - -// Stringify the given variable number of arguments. The arguments are trimmed -// to 5 if there are too many, and padded with nullptr if there are not enough. -#define CSA_ASSERT_STRINGIFY_EXTRA_VALUES(...) \ - CSA_ASSERT_STRINGIFY_EXTRA_VALUES_5(__VA_ARGS__, nullptr, nullptr, nullptr, \ - nullptr, nullptr) - -#define CSA_ASSERT_GET_FIRST(x, ...) (x) -#define CSA_ASSERT_GET_FIRST_STR(x, ...) #x +// CSA_ASSERT_ARGS generates an +// std::initializer_list from __VA_ARGS__. It +// currently supports between 0 and 2 arguments. + +// clang-format off +#define CSA_ASSERT_0_ARGS(...) {} +#define CSA_ASSERT_1_ARG(a, ...) {{a, #a}} +#define CSA_ASSERT_2_ARGS(a, b, ...) {{a, #a}, {b, #b}} +// clang-format on +#define SWITCH_CSA_ASSERT_ARGS(dummy, a, b, FUNC, ...) FUNC(a, b) +#define CSA_ASSERT_ARGS(...) \ + SWITCH_CSA_ASSERT_ARGS(dummy, ##__VA_ARGS__, CSA_ASSERT_2_ARGS, \ + CSA_ASSERT_1_ARG, CSA_ASSERT_0_ARGS) // CSA_ASSERT(csa, , ) -// We have to jump through some hoops to allow to be -// empty. -#define CSA_ASSERT(csa, ...) \ - (csa)->Assert( \ - [&]() -> compiler::Node* { \ - return implicit_cast>( \ - EXPAND(CSA_ASSERT_GET_FIRST(__VA_ARGS__))); \ - }, \ - EXPAND(CSA_ASSERT_GET_FIRST_STR(__VA_ARGS__)), __FILE__, __LINE__, \ - CSA_ASSERT_STRINGIFY_EXTRA_VALUES(__VA_ARGS__)) +#define CSA_ASSERT(csa, condition_node, ...) \ + (csa)->Assert( \ + [&]() -> compiler::Node* { \ + return implicit_cast>(condition_node); \ + }, \ + #condition_node, __FILE__, __LINE__, CSA_ASSERT_ARGS(__VA_ARGS__)) // CSA_ASSERT_BRANCH(csa, [](Label* ok, Label* not_ok) {...}, // ) -#define CSA_ASSERT_BRANCH(csa, ...) \ - (csa)->Assert(EXPAND(CSA_ASSERT_GET_FIRST(__VA_ARGS__)), \ - EXPAND(CSA_ASSERT_GET_FIRST_STR(__VA_ARGS__)), __FILE__, \ - __LINE__, CSA_ASSERT_STRINGIFY_EXTRA_VALUES(__VA_ARGS__)) - -#define CSA_ASSERT_JS_ARGC_OP(csa, Op, op, expected) \ - (csa)->Assert( \ - [&]() -> compiler::Node* { \ - compiler::Node* const argc = \ - (csa)->Parameter(Descriptor::kJSActualArgumentsCount); \ - return (csa)->Op(argc, (csa)->Int32Constant(expected)); \ - }, \ - "argc " #op " " #expected, __FILE__, __LINE__, \ - SmiFromInt32((csa)->Parameter(Descriptor::kJSActualArgumentsCount)), \ - "argc") +#define CSA_ASSERT_BRANCH(csa, gen, ...) \ + (csa)->Assert(gen, #gen, __FILE__, __LINE__, CSA_ASSERT_ARGS(__VA_ARGS__)) + +#define CSA_ASSERT_JS_ARGC_OP(csa, Op, op, expected) \ + (csa)->Assert( \ + [&]() -> compiler::Node* { \ + compiler::Node* const argc = \ + (csa)->Parameter(Descriptor::kJSActualArgumentsCount); \ + return (csa)->Op(argc, (csa)->Int32Constant(expected)); \ + }, \ + "argc " #op " " #expected, __FILE__, __LINE__, \ + {{SmiFromInt32((csa)->Parameter(Descriptor::kJSActualArgumentsCount)), \ + "argc"}}) #define CSA_ASSERT_JS_ARGC_EQ(csa, expected) \ CSA_ASSERT_JS_ARGC_OP(csa, Word32Equal, ==, expected) @@ -490,21 +474,21 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode SmiToInt32(SloppyTNode value); // Smi operations. -#define SMI_ARITHMETIC_BINOP(SmiOpName, IntPtrOpName, Int32OpName) \ - TNode SmiOpName(TNode a, TNode b) { \ - if (SmiValuesAre32Bits()) { \ - return BitcastWordToTaggedSigned( \ - IntPtrOpName(BitcastTaggedToWord(a), BitcastTaggedToWord(b))); \ - } else { \ - DCHECK(SmiValuesAre31Bits()); \ - if (kSystemPointerSize == kInt64Size) { \ - CSA_ASSERT(this, IsValidSmi(a)); \ - CSA_ASSERT(this, IsValidSmi(b)); \ - } \ - return BitcastWordToTaggedSigned(ChangeInt32ToIntPtr( \ - Int32OpName(TruncateIntPtrToInt32(BitcastTaggedToWord(a)), \ - TruncateIntPtrToInt32(BitcastTaggedToWord(b))))); \ - } \ +#define SMI_ARITHMETIC_BINOP(SmiOpName, IntPtrOpName, Int32OpName) \ + TNode SmiOpName(TNode a, TNode b) { \ + if (SmiValuesAre32Bits()) { \ + return BitcastWordToTaggedSigned(IntPtrOpName( \ + BitcastTaggedSignedToWord(a), BitcastTaggedSignedToWord(b))); \ + } else { \ + DCHECK(SmiValuesAre31Bits()); \ + if (kSystemPointerSize == kInt64Size) { \ + CSA_ASSERT(this, IsValidSmi(a)); \ + CSA_ASSERT(this, IsValidSmi(b)); \ + } \ + return BitcastWordToTaggedSigned(ChangeInt32ToIntPtr( \ + Int32OpName(TruncateIntPtrToInt32(BitcastTaggedSignedToWord(a)), \ + TruncateIntPtrToInt32(BitcastTaggedSignedToWord(b))))); \ + } \ } SMI_ARITHMETIC_BINOP(SmiAdd, IntPtrAdd, Int32Add) SMI_ARITHMETIC_BINOP(SmiSub, IntPtrSub, Int32Sub) @@ -523,19 +507,20 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode TrySmiSub(TNode a, TNode b, Label* if_overflow); TNode SmiShl(TNode a, int shift) { - return BitcastWordToTaggedSigned(WordShl(BitcastTaggedToWord(a), shift)); + return BitcastWordToTaggedSigned( + WordShl(BitcastTaggedSignedToWord(a), shift)); } TNode SmiShr(TNode a, int shift) { return BitcastWordToTaggedSigned( - WordAnd(WordShr(BitcastTaggedToWord(a), shift), - BitcastTaggedToWord(SmiConstant(-1)))); + WordAnd(WordShr(BitcastTaggedSignedToWord(a), shift), + BitcastTaggedSignedToWord(SmiConstant(-1)))); } TNode SmiSar(TNode a, int shift) { return BitcastWordToTaggedSigned( - WordAnd(WordSar(BitcastTaggedToWord(a), shift), - BitcastTaggedToWord(SmiConstant(-1)))); + WordAnd(WordSar(BitcastTaggedSignedToWord(a), shift), + BitcastTaggedSignedToWord(SmiConstant(-1)))); } Node* WordOrSmiShl(Node* a, int shift, ParameterMode mode) { @@ -556,19 +541,20 @@ class V8_EXPORT_PRIVATE CodeStubAssembler } } -#define SMI_COMPARISON_OP(SmiOpName, IntPtrOpName, Int32OpName) \ - TNode SmiOpName(TNode a, TNode b) { \ - if (SmiValuesAre32Bits()) { \ - return IntPtrOpName(BitcastTaggedToWord(a), BitcastTaggedToWord(b)); \ - } else { \ - DCHECK(SmiValuesAre31Bits()); \ - if (kSystemPointerSize == kInt64Size) { \ - CSA_ASSERT(this, IsValidSmi(a)); \ - CSA_ASSERT(this, IsValidSmi(b)); \ - } \ - return Int32OpName(TruncateIntPtrToInt32(BitcastTaggedToWord(a)), \ - TruncateIntPtrToInt32(BitcastTaggedToWord(b))); \ - } \ +#define SMI_COMPARISON_OP(SmiOpName, IntPtrOpName, Int32OpName) \ + TNode SmiOpName(TNode a, TNode b) { \ + if (SmiValuesAre32Bits()) { \ + return IntPtrOpName(BitcastTaggedSignedToWord(a), \ + BitcastTaggedSignedToWord(b)); \ + } else { \ + DCHECK(SmiValuesAre31Bits()); \ + if (kSystemPointerSize == kInt64Size) { \ + CSA_ASSERT(this, IsValidSmi(a)); \ + CSA_ASSERT(this, IsValidSmi(b)); \ + } \ + return Int32OpName(TruncateIntPtrToInt32(BitcastTaggedSignedToWord(a)), \ + TruncateIntPtrToInt32(BitcastTaggedSignedToWord(b))); \ + } \ } SMI_COMPARISON_OP(SmiEqual, WordEqual, Word32Equal) SMI_COMPARISON_OP(SmiNotEqual, WordNotEqual, Word32NotEqual) @@ -626,43 +612,22 @@ class V8_EXPORT_PRIVATE CodeStubAssembler using BranchGenerator = std::function; using NodeGenerator = std::function; - - void Assert(const BranchGenerator& branch, const char* message = nullptr, - const char* file = nullptr, int line = 0, - Node* extra_node1 = nullptr, const char* extra_node1_name = "", - Node* extra_node2 = nullptr, const char* extra_node2_name = "", - Node* extra_node3 = nullptr, const char* extra_node3_name = "", - Node* extra_node4 = nullptr, const char* extra_node4_name = "", - Node* extra_node5 = nullptr, const char* extra_node5_name = ""); - void Assert(const NodeGenerator& condition_body, - const char* message = nullptr, const char* file = nullptr, - int line = 0, Node* extra_node1 = nullptr, - const char* extra_node1_name = "", Node* extra_node2 = nullptr, - const char* extra_node2_name = "", Node* extra_node3 = nullptr, - const char* extra_node3_name = "", Node* extra_node4 = nullptr, - const char* extra_node4_name = "", Node* extra_node5 = nullptr, - const char* extra_node5_name = ""); - void Check(const BranchGenerator& branch, const char* message = nullptr, - const char* file = nullptr, int line = 0, - Node* extra_node1 = nullptr, const char* extra_node1_name = "", - Node* extra_node2 = nullptr, const char* extra_node2_name = "", - Node* extra_node3 = nullptr, const char* extra_node3_name = "", - Node* extra_node4 = nullptr, const char* extra_node4_name = "", - Node* extra_node5 = nullptr, const char* extra_node5_name = ""); - void Check(const NodeGenerator& condition_body, const char* message = nullptr, - const char* file = nullptr, int line = 0, - Node* extra_node1 = nullptr, const char* extra_node1_name = "", - Node* extra_node2 = nullptr, const char* extra_node2_name = "", - Node* extra_node3 = nullptr, const char* extra_node3_name = "", - Node* extra_node4 = nullptr, const char* extra_node4_name = "", - Node* extra_node5 = nullptr, const char* extra_node5_name = ""); - void FailAssert( - const char* message = nullptr, const char* file = nullptr, int line = 0, - Node* extra_node1 = nullptr, const char* extra_node1_name = "", - Node* extra_node2 = nullptr, const char* extra_node2_name = "", - Node* extra_node3 = nullptr, const char* extra_node3_name = "", - Node* extra_node4 = nullptr, const char* extra_node4_name = "", - Node* extra_node5 = nullptr, const char* extra_node5_name = ""); + using ExtraNode = std::pair; + + void Assert(const BranchGenerator& branch, const char* message, + const char* file, int line, + std::initializer_list extra_nodes = {}); + void Assert(const NodeGenerator& condition_body, const char* message, + const char* file, int line, + std::initializer_list extra_nodes = {}); + void Check(const BranchGenerator& branch, const char* message, + const char* file, int line, + std::initializer_list extra_nodes = {}); + void Check(const NodeGenerator& condition_body, const char* message, + const char* file, int line, + std::initializer_list extra_nodes = {}); + void FailAssert(const char* message, const char* file, int line, + std::initializer_list extra_nodes = {}); void FastCheck(TNode condition); @@ -794,6 +759,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // otherwise goes to {if_false}. void BranchIfToBooleanIsTrue(Node* value, Label* if_true, Label* if_false); + // Branches to {if_false} if ToBoolean applied to {value} yields false, + // otherwise goes to {if_true}. + void BranchIfToBooleanIsFalse(Node* value, Label* if_false, Label* if_true) { + BranchIfToBooleanIsTrue(value, if_true, if_false); + } + void BranchIfJSReceiver(Node* object, Label* if_true, Label* if_false); // Branches to {if_true} when --force-slow-path flag has been passed. @@ -811,8 +782,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler MachineType type = MachineType::AnyTagged()); // Load an object pointer from a buffer that isn't in the heap. - Node* LoadBufferObject(Node* buffer, int offset, - MachineType type = MachineType::AnyTagged()); + Node* LoadBufferObject(Node* buffer, int offset, MachineType type); + TNode LoadBufferObject(TNode buffer, int offset) { + return CAST(LoadBufferObject(buffer, offset, MachineType::AnyTagged())); + } TNode LoadBufferPointer(TNode buffer, int offset) { return UncheckedCast( LoadBufferObject(buffer, offset, MachineType::Pointer())); @@ -887,15 +860,19 @@ class V8_EXPORT_PRIVATE CodeStubAssembler std::is_convertible, TNode>::value, int>::type = 0> TNode LoadReference(Reference reference) { - return CAST(LoadFromObject(MachineTypeOf::value, reference.object, - reference.offset)); + TNode offset = + IntPtrSub(reference.offset, IntPtrConstant(kHeapObjectTag)); + return CAST( + LoadFromObject(MachineTypeOf::value, reference.object, offset)); } template , TNode>::value, int>::type = 0> TNode LoadReference(Reference reference) { - return UncheckedCast(LoadFromObject(MachineTypeOf::value, - reference.object, reference.offset)); + TNode offset = + IntPtrSub(reference.offset, IntPtrConstant(kHeapObjectTag)); + return UncheckedCast( + LoadFromObject(MachineTypeOf::value, reference.object, offset)); } template , TNode>::value, @@ -908,15 +885,18 @@ class V8_EXPORT_PRIVATE CodeStubAssembler } else if (std::is_same::value) { write_barrier = StoreToObjectWriteBarrier::kMap; } - StoreToObject(rep, reference.object, reference.offset, value, - write_barrier); + TNode offset = + IntPtrSub(reference.offset, IntPtrConstant(kHeapObjectTag)); + StoreToObject(rep, reference.object, offset, value, write_barrier); } template , TNode>::value, int>::type = 0> void StoreReference(Reference reference, TNode value) { - StoreToObject(MachineRepresentationOf::value, reference.object, - reference.offset, value, StoreToObjectWriteBarrier::kNone); + TNode offset = + IntPtrSub(reference.offset, IntPtrConstant(kHeapObjectTag)); + StoreToObject(MachineRepresentationOf::value, reference.object, offset, + value, StoreToObjectWriteBarrier::kNone); } // Tag a smi and store it. @@ -927,7 +907,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // Load the Map of an HeapObject. TNode LoadMap(SloppyTNode object); // Load the instance type of an HeapObject. - TNode LoadInstanceType(SloppyTNode object); + TNode LoadInstanceType(SloppyTNode object); // Compare the instance the type of the object against the provided one. TNode HasInstanceType(SloppyTNode object, InstanceType type); @@ -967,7 +947,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // Load bit field 3 of a map. TNode LoadMapBitField3(SloppyTNode map); // Load the instance type of a map. - TNode LoadMapInstanceType(SloppyTNode map); + TNode LoadMapInstanceType(SloppyTNode map); // Load the ElementsKind of a map. TNode LoadMapElementsKind(SloppyTNode map); TNode LoadElementsKind(SloppyTNode object); @@ -1023,8 +1003,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode LoadStringLengthAsWord32(SloppyTNode string); // Loads a pointer to the sequential String char array. Node* PointerToSeqStringData(Node* seq_string); - // Load value field of a JSValue object. - Node* LoadJSValueValue(Node* object); + // Load value field of a JSPrimitiveWrapper object. + Node* LoadJSPrimitiveWrapperValue(Node* object); // Figures out whether the value of maybe_object is: // - a SMI (jump to "if_smi", "extracted" will be the SMI value) @@ -1076,8 +1056,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // Array is any array-like type that has a fixed header followed by // tagged elements. - template - TNode LoadArrayElement( + template + TNode LoadArrayElement( TNode array, int array_header_size, Node* index, int additional_offset = 0, ParameterMode parameter_mode = INTPTR_PARAMETERS, @@ -1232,15 +1212,23 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode LoadDoubleWithHoleCheck( SloppyTNode base, SloppyTNode offset, Label* if_hole, MachineType machine_type = MachineType::Float64()); - Node* LoadFixedTypedArrayElementAsTagged( - Node* data_pointer, Node* index_node, ElementsKind elements_kind, + TNode LoadFixedTypedArrayElementAsTagged( + TNode data_pointer, Node* index_node, ElementsKind elements_kind, ParameterMode parameter_mode = INTPTR_PARAMETERS); TNode LoadFixedTypedArrayElementAsTagged( - TNode data_pointer, TNode index, TNode elements_kind); + TNode data_pointer, TNode index_node, + ElementsKind elements_kind) { + return LoadFixedTypedArrayElementAsTagged(data_pointer, index_node, + elements_kind, SMI_PARAMETERS); + } + TNode LoadFixedTypedArrayElementAsTagged( + TNode data_pointer, TNode index, + TNode elements_kind); // Parts of the above, factored out for readability: - Node* LoadFixedBigInt64ArrayElementAsTagged(Node* data_pointer, Node* offset); - Node* LoadFixedBigUint64ArrayElementAsTagged(Node* data_pointer, - Node* offset); + TNode LoadFixedBigInt64ArrayElementAsTagged( + SloppyTNode data_pointer, SloppyTNode offset); + TNode LoadFixedBigUint64ArrayElementAsTagged( + SloppyTNode data_pointer, SloppyTNode offset); // 64-bit platforms only: TNode BigIntFromInt64(TNode value); TNode BigIntFromUint64(TNode value); @@ -1250,10 +1238,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler void StoreJSTypedArrayElementFromTagged(TNode context, TNode typed_array, - TNode index_node, + TNode index_node, TNode value, - ElementsKind elements_kind, - ParameterMode parameter_mode); + ElementsKind elements_kind); // Context manipulation TNode LoadContextElement(SloppyTNode context, @@ -1285,6 +1272,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode LoadJSArrayElementsMap(SloppyTNode kind, SloppyTNode native_context); + TNode HasPrototypeSlot(TNode function); TNode IsGeneratorFunction(TNode function); TNode HasPrototypeProperty(TNode function, TNode map); void GotoIfPrototypeRequiresRuntimeLookup(TNode function, @@ -1534,10 +1522,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // Like above, but allowing custom bitfield initialization. TNode AllocateRawBigInt(TNode length); void StoreBigIntBitfield(TNode bigint, TNode bitfield); - void StoreBigIntDigit(TNode bigint, int digit_index, + void StoreBigIntDigit(TNode bigint, intptr_t digit_index, + TNode digit); + void StoreBigIntDigit(TNode bigint, TNode digit_index, TNode digit); + TNode LoadBigIntBitfield(TNode bigint); - TNode LoadBigIntDigit(TNode bigint, int digit_index); + TNode LoadBigIntDigit(TNode bigint, intptr_t digit_index); + TNode LoadBigIntDigit(TNode bigint, + TNode digit_index); // Allocate a ByteArray with the given length. TNode AllocateByteArray(TNode length, @@ -1573,9 +1566,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode AllocateNameDictionary(int at_least_space_for); TNode AllocateNameDictionary( - TNode at_least_space_for); + TNode at_least_space_for, AllocationFlags = kNone); TNode AllocateNameDictionaryWithCapacity( - TNode capacity); + TNode capacity, AllocationFlags = kNone); TNode CopyNameDictionary(TNode dictionary, Label* large_object_fallback); @@ -1604,9 +1597,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler void InitializeStructBody(Node* object, Node* map, Node* size, int start_offset = Struct::kHeaderSize); - Node* AllocateJSObjectFromMap( - Node* map, Node* properties = nullptr, Node* elements = nullptr, - AllocationFlags flags = kNone, + TNode AllocateJSObjectFromMap( + SloppyTNode map, SloppyTNode properties = nullptr, + SloppyTNode elements = nullptr, AllocationFlags flags = kNone, SlackTrackingMode slack_tracking_mode = kNoSlackTracking); void InitializeJSObjectFromMap( @@ -1696,6 +1689,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler fixed_array_map); } + TNode GetStructMap(InstanceType instance_type); + TNode AllocateUninitializedFixedArray(intptr_t capacity) { return UncheckedCast(AllocateFixedArray( PACKED_ELEMENTS, IntPtrConstant(capacity), AllocationFlag::kNone)); @@ -1745,7 +1740,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode object, IterationKind mode); - Node* AllocateJSIteratorResult(Node* context, Node* value, Node* done); + TNode AllocateJSIteratorResult(SloppyTNode context, + SloppyTNode value, + SloppyTNode done); Node* AllocateJSIteratorResultForEntry(Node* context, Node* key, Node* value); TNode ArraySpeciesCreate(TNode context, @@ -1934,6 +1931,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler SMI_PARAMETERS); } + TNode ExtractFixedArray( + TNode source, TNode first, TNode count, + TNode capacity, + ExtractFixedArrayFlags extract_flags = + ExtractFixedArrayFlag::kAllFixedArrays) { + return CAST(ExtractFixedArray(source, first, count, capacity, extract_flags, + INTPTR_PARAMETERS)); + } + // Copy a portion of an existing FixedArray or FixedDoubleArray into a new // FixedArray, including special appropriate handling for COW arrays. // * |source| is either a FixedArray or FixedDoubleArray from which to copy @@ -2043,6 +2049,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode CalculateNewElementsCapacity(TNode old_capacity) { return CAST(CalculateNewElementsCapacity(old_capacity, SMI_PARAMETERS)); } + TNode CalculateNewElementsCapacity(TNode old_capacity) { + return UncheckedCast( + CalculateNewElementsCapacity(old_capacity, INTPTR_PARAMETERS)); + } // Tries to grow the |elements| array of given |object| to store the |key| // or bails out if the growing gap is too big. Returns new elements. @@ -2086,19 +2096,21 @@ class V8_EXPORT_PRIVATE CodeStubAssembler Label* if_bigint, Variable* var_bigint, Variable* var_feedback); // Truncate the floating point value of a HeapNumber to an Int32. - Node* TruncateHeapNumberValueToWord32(Node* object); + TNode TruncateHeapNumberValueToWord32(TNode object); // Conversions. - void TryHeapNumberToSmi(TNode number, TVariable& output, + void TryHeapNumberToSmi(TNode number, + TVariable& output, // NOLINT(runtime/references) Label* if_smi); - void TryFloat64ToSmi(TNode number, TVariable& output, + void TryFloat64ToSmi(TNode number, + TVariable& output, // NOLINT(runtime/references) Label* if_smi); TNode ChangeFloat64ToTagged(SloppyTNode value); TNode ChangeInt32ToTagged(SloppyTNode value); TNode ChangeUint32ToTagged(SloppyTNode value); TNode ChangeUintPtrToTagged(TNode value); TNode ChangeNumberToUint32(TNode value); - TNode ChangeNumberToFloat64(SloppyTNode value); + TNode ChangeNumberToFloat64(TNode value); TNode TryNumberToUintPtr(TNode value, Label* if_negative); TNode ChangeNonnegativeNumberToUintPtr(TNode value) { return TryNumberToUintPtr(value, nullptr); @@ -2145,10 +2157,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler } // Throws a TypeError for {method_name} if {value} is neither of the given - // {primitive_type} nor a JSValue wrapping a value of {primitive_type}, or - // returns the {value} (or wrapped value) otherwise. - Node* ToThisValue(Node* context, Node* value, PrimitiveType primitive_type, - char const* method_name); + // {primitive_type} nor a JSPrimitiveWrapper wrapping a value of + // {primitive_type}, or returns the {value} (or wrapped value) otherwise. + TNode ToThisValue(TNode context, TNode value, + PrimitiveType primitive_type, + char const* method_name); // Throws a TypeError for {method_name} if {value} is not of the given // instance type. Returns {value}'s map. @@ -2231,6 +2244,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode IsJSFunction(SloppyTNode object); TNode IsJSGeneratorObject(SloppyTNode object); TNode IsJSGlobalProxyInstanceType(SloppyTNode instance_type); + TNode IsJSGlobalProxyMap(SloppyTNode map); TNode IsJSGlobalProxy(SloppyTNode object); TNode IsJSObjectInstanceType(SloppyTNode instance_type); TNode IsJSObjectMap(SloppyTNode map); @@ -2246,9 +2260,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode IsJSTypedArrayInstanceType(SloppyTNode instance_type); TNode IsJSTypedArrayMap(SloppyTNode map); TNode IsJSTypedArray(SloppyTNode object); - TNode IsJSValueInstanceType(SloppyTNode instance_type); - TNode IsJSValueMap(SloppyTNode map); - TNode IsJSValue(SloppyTNode object); + TNode IsJSPrimitiveWrapperInstanceType( + SloppyTNode instance_type); + TNode IsJSPrimitiveWrapperMap(SloppyTNode map); + TNode IsJSPrimitiveWrapper(SloppyTNode object); TNode IsMap(SloppyTNode object); TNode IsMutableHeapNumber(SloppyTNode object); TNode IsName(SloppyTNode object); @@ -2260,6 +2275,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode IsOneByteStringInstanceType(SloppyTNode instance_type); TNode IsPrimitiveInstanceType(SloppyTNode instance_type); TNode IsPrivateSymbol(SloppyTNode object); + TNode IsPrivateName(SloppyTNode symbol); TNode IsPromiseCapability(SloppyTNode object); TNode IsPropertyArray(SloppyTNode object); TNode IsPropertyCell(SloppyTNode object); @@ -2305,7 +2321,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode IsPromiseThenProtectorCellInvalid(); TNode IsArraySpeciesProtectorCellInvalid(); TNode IsTypedArraySpeciesProtectorCellInvalid(); - TNode IsRegExpSpeciesProtectorCellInvalid(); + TNode IsRegExpSpeciesProtectorCellInvalid( + TNode native_context); TNode IsPromiseSpeciesProtectorCellInvalid(); TNode IsMockArrayBufferAllocatorFlag() { @@ -2355,7 +2372,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler return Word32Equal(a, b); } bool ElementsKindEqual(ElementsKind a, ElementsKind b) { return a == b; } - Node* IsFastElementsKind(Node* elements_kind); + TNode IsFastElementsKind(TNode elements_kind); bool IsFastElementsKind(ElementsKind kind) { return v8::internal::IsFastElementsKind(kind); } @@ -2366,12 +2383,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler bool IsDoubleElementsKind(ElementsKind kind) { return v8::internal::IsDoubleElementsKind(kind); } - Node* IsFastSmiOrTaggedElementsKind(Node* elements_kind); - Node* IsFastSmiElementsKind(Node* elements_kind); - Node* IsHoleyFastElementsKind(Node* elements_kind); - Node* IsHoleyFastElementsKindForRead(Node* elements_kind); - Node* IsElementsKindGreaterThan(Node* target_kind, - ElementsKind reference_kind); + TNode IsFastSmiOrTaggedElementsKind(TNode elements_kind); + TNode IsFastSmiElementsKind(SloppyTNode elements_kind); + TNode IsHoleyFastElementsKind(TNode elements_kind); + TNode IsHoleyFastElementsKindForRead(TNode elements_kind); + TNode IsElementsKindGreaterThan(TNode target_kind, + ElementsKind reference_kind); TNode IsElementsKindLessThanOrEqual(TNode target_kind, ElementsKind reference_kind); // Check if reference_kind_a <= target_kind <= reference_kind_b @@ -2413,8 +2430,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler Node* DerefIndirectString(TNode string, TNode instance_type, Label* cannot_deref); - TNode StringFromSingleCodePoint(TNode codepoint, - UnicodeEncoding encoding); + TNode StringFromSingleUTF16EncodedCodePoint(TNode codepoint); // Type conversion helpers. enum class BigIntHandling { kConvertToNumber, kThrow }; @@ -2578,7 +2594,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode IsSetSmi(SloppyTNode smi, int untagged_mask) { intptr_t mask_word = bit_cast(Smi::FromInt(untagged_mask)); return WordNotEqual( - WordAnd(BitcastTaggedToWord(smi), IntPtrConstant(mask_word)), + WordAnd(BitcastTaggedSignedToWord(smi), IntPtrConstant(mask_word)), IntPtrConstant(0)); } @@ -2950,11 +2966,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // If it can't handle the case {receiver}/{key} case then the control goes // to {if_bailout}. // If {if_proxy} is nullptr, proxies go to if_bailout. - void TryPrototypeChainLookup(Node* receiver, Node* key, + void TryPrototypeChainLookup(Node* receiver, Node* object, Node* key, const LookupInHolder& lookup_property_in_holder, const LookupInHolder& lookup_element_in_holder, Label* if_end, Label* if_bailout, - Label* if_proxy = nullptr); + Label* if_proxy); // Instanceof helpers. // Returns true if {object} has {prototype} somewhere in it's prototype @@ -3055,7 +3071,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler void EmitElementStore(Node* object, Node* key, Node* value, ElementsKind elements_kind, KeyedAccessStoreMode store_mode, Label* bailout, - Node* context); + Node* context, + Variable* maybe_converted_value = nullptr); Node* CheckForCapacityGrow(Node* object, Node* elements, ElementsKind kind, Node* length, Node* key, ParameterMode mode, @@ -3204,8 +3221,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler Node* Equal(Node* lhs, Node* rhs, Node* context, Variable* var_type_feedback = nullptr); - Node* StrictEqual(Node* lhs, Node* rhs, - Variable* var_type_feedback = nullptr); + TNode StrictEqual(SloppyTNode lhs, SloppyTNode rhs, + Variable* var_type_feedback = nullptr); // ECMA#sec-samevalue // Similar to StrictEqual except that NaNs are treated as equal and minus zero @@ -3248,13 +3265,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // Debug helpers Node* IsDebugActive(); - TNode IsRuntimeCallStatsEnabled(); - // JSArrayBuffer helpers TNode LoadJSArrayBufferBitField(TNode array_buffer); TNode LoadJSArrayBufferBackingStore( TNode array_buffer); - Node* IsDetachedBuffer(Node* buffer); + TNode IsDetachedBuffer(TNode buffer); void ThrowIfArrayBufferIsDetached(SloppyTNode context, TNode array_buffer, const char* method_name); @@ -3301,12 +3316,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler Node* IsPromiseHookEnabledOrHasAsyncEventDelegate(); Node* IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(); - // Helpers for StackFrame markers. - Node* MarkerIsFrameType(Node* marker_or_function, - StackFrame::Type frame_type); - Node* MarkerIsNotFrameType(Node* marker_or_function, - StackFrame::Type frame_type); - // for..in helpers void CheckPrototypeEnumCache(Node* receiver, Node* receiver_map, Label* if_fast, Label* if_slow); @@ -3589,9 +3598,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler private: // Low-level accessors for Descriptor arrays. - TNode LoadDescriptorArrayElement(TNode object, - Node* index, - int additional_offset = 0); + template + TNode LoadDescriptorArrayElement(TNode object, + TNode index, + int additional_offset); }; class V8_EXPORT_PRIVATE CodeStubArguments { diff --git a/deps/v8/src/codegen/compiler.cc b/deps/v8/src/codegen/compiler.cc index 5197dd3a2fc752..906eb0f0ca2d5e 100644 --- a/deps/v8/src/codegen/compiler.cc +++ b/deps/v8/src/codegen/compiler.cc @@ -15,8 +15,10 @@ #include "src/codegen/assembler-inl.h" #include "src/codegen/compilation-cache.h" #include "src/codegen/optimized-compilation-info.h" +#include "src/codegen/pending-optimization-table.h" #include "src/codegen/unoptimized-compilation-info.h" #include "src/common/globals.h" +#include "src/common/message-template.h" #include "src/compiler-dispatcher/compiler-dispatcher.h" #include "src/compiler-dispatcher/optimizing-compile-dispatcher.h" #include "src/compiler/pipeline.h" @@ -24,7 +26,6 @@ #include "src/debug/liveedit.h" #include "src/execution/frames-inl.h" #include "src/execution/isolate-inl.h" -#include "src/execution/message-template.h" #include "src/execution/runtime-profiler.h" #include "src/execution/vm-state-inl.h" #include "src/heap/heap-inl.h" @@ -319,6 +320,8 @@ void OptimizedCompilationJob::RecordCompilationStats(CompilationMode mode, counters->turbofan_optimize_total_foreground()->AddSample( static_cast(time_foreground.InMicroseconds())); } + counters->turbofan_ticks()->AddSample(static_cast( + compilation_info()->tick_counter().CurrentTicks() / 1000)); } } @@ -593,6 +596,12 @@ MaybeHandle GenerateUnoptimizedCodeForToplevel( return MaybeHandle(); } + if (FLAG_stress_lazy_source_positions) { + // Collect source positions immediately to try and flush out bytecode + // mismatches. + SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate, shared_info); + } + if (shared_info.is_identical_to(top_level)) { // Ensure that the top level function is retained. *is_compiled_scope = shared_info->is_compiled_scope(); @@ -797,18 +806,10 @@ MaybeHandle GetOptimizedCode(Handle function, return MaybeHandle(); } - // If code was pending optimization for testing, delete remove the strong root - // that was preventing the bytecode from being flushed between marking and - // optimization. - if (!isolate->heap()->pending_optimize_for_test_bytecode().IsUndefined()) { - Handle table = - handle(ObjectHashTable::cast( - isolate->heap()->pending_optimize_for_test_bytecode()), - isolate); - bool was_present; - table = table->Remove(isolate, table, handle(function->shared(), isolate), - &was_present); - isolate->heap()->SetPendingOptimizeForTestBytecode(*table); + // If code was pending optimization for testing, delete remove the entry + // from the table that was preventing the bytecode from being flushed + if (V8_UNLIKELY(FLAG_testing_d8_test_runner)) { + PendingOptimizationTable::FunctionWasOptimized(isolate, function); } Handle cached_code; @@ -1346,6 +1347,13 @@ bool Compiler::Compile(Handle shared_info, DCHECK(!isolate->has_pending_exception()); *is_compiled_scope = shared_info->is_compiled_scope(); DCHECK(is_compiled_scope->is_compiled()); + + if (FLAG_stress_lazy_source_positions) { + // Collect source positions immediately to try and flush out bytecode + // mismatches. + SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate, shared_info); + } + return true; } @@ -1599,33 +1607,103 @@ MaybeHandle Compiler::GetFunctionFromEval( return result; } -bool Compiler::CodeGenerationFromStringsAllowed(Isolate* isolate, - Handle context, - Handle source) { +// Check whether embedder allows code generation in this context. +// (via v8::Isolate::SetAllowCodeGenerationFromStringsCallback) +bool CodeGenerationFromStringsAllowed(Isolate* isolate, Handle context, + Handle source) { DCHECK(context->allow_code_gen_from_strings().IsFalse(isolate)); - // Check with callback if set. + DCHECK(isolate->allow_code_gen_callback()); + + // Callback set. Let it decide if code generation is allowed. + VMState state(isolate); + RuntimeCallTimerScope timer( + isolate, RuntimeCallCounterId::kCodeGenerationFromStringsCallbacks); AllowCodeGenerationFromStringsCallback callback = isolate->allow_code_gen_callback(); - if (callback == nullptr) { - // No callback set and code generation disallowed. - return false; - } else { - // Callback set. Let it decide if code generation is allowed. - VMState state(isolate); - return callback(v8::Utils::ToLocal(context), v8::Utils::ToLocal(source)); + return callback(v8::Utils::ToLocal(context), v8::Utils::ToLocal(source)); +} + +// Check whether embedder allows code generation in this context. +// (via v8::Isolate::SetModifyCodeGenerationFromStringsCallback) +bool ModifyCodeGenerationFromStrings(Isolate* isolate, Handle context, + Handle* source) { + DCHECK(context->allow_code_gen_from_strings().IsFalse(isolate)); + DCHECK(isolate->modify_code_gen_callback()); + DCHECK(source); + + // Callback set. Run it, and use the return value as source, or block + // execution if it's not set. + VMState state(isolate); + ModifyCodeGenerationFromStringsCallback modify_callback = + isolate->modify_code_gen_callback(); + RuntimeCallTimerScope timer( + isolate, RuntimeCallCounterId::kCodeGenerationFromStringsCallbacks); + MaybeLocal modified_source = + modify_callback(v8::Utils::ToLocal(context), v8::Utils::ToLocal(*source)); + if (modified_source.IsEmpty()) return false; + + // Use the new source (which might be the same as the old source) and return. + *source = Utils::OpenHandle(*modified_source.ToLocalChecked(), false); + return true; +} + +// Run Embedder-mandated checks before generating code from a string. +// +// Returns a string to be used for compilation, or a flag that an object type +// was encountered that is neither a string, nor something the embedder knows +// how to handle. +// +// Returns: (assuming: std::tie(source, unknown_object)) +// - !source.is_null(): compilation allowed, source contains the source string. +// - unknown_object is true: compilation allowed, but we don't know how to +// deal with source_object. +// - source.is_null() && !unknown_object: compilation should be blocked. +// +// - !source_is_null() and unknown_object can't be true at the same time. +std::pair, bool> Compiler::ValidateDynamicCompilationSource( + Isolate* isolate, Handle context, + Handle source_object) { + Handle source; + if (source_object->IsString()) source = Handle::cast(source_object); + + // Check if the context unconditionally allows code gen from strings. + // allow_code_gen_from_strings can be many things, so we'll always check + // against the 'false' literal, so that e.g. undefined and 'true' are treated + // the same. + if (!context->allow_code_gen_from_strings().IsFalse(isolate)) { + return {source, !source_object->IsString()}; + } + + // Check if the context allows code generation for this string. + // allow_code_gen_callback only allows proper strings. + // (I.e., let allow_code_gen_callback decide, if it has been set.) + if (isolate->allow_code_gen_callback()) { + if (source_object->IsString() && + CodeGenerationFromStringsAllowed(isolate, context, source)) { + return {source, !source_object->IsString()}; + } + } + + // Check if the context wants to block or modify this source object. + // Double-check that we really have a string now. + // (Let modify_code_gen_callback decide, if it's been set.) + if (isolate->modify_code_gen_callback()) { + if (ModifyCodeGenerationFromStrings(isolate, context, &source_object) && + source_object->IsString()) + return {Handle::cast(source_object), false}; } + + return {MaybeHandle(), !source_object->IsString()}; } -MaybeHandle Compiler::GetFunctionFromString( - Handle context, Handle source, +MaybeHandle Compiler::GetFunctionFromValidatedString( + Handle context, MaybeHandle source, ParseRestriction restriction, int parameters_end_pos) { Isolate* const isolate = context->GetIsolate(); Handle native_context(context->native_context(), isolate); - // Check if native context allows code generation from - // strings. Throw an exception if it doesn't. - if (native_context->allow_code_gen_from_strings().IsFalse(isolate) && - !CodeGenerationFromStringsAllowed(isolate, native_context, source)) { + // Raise an EvalError if we did not receive a string. + if (source.is_null()) { Handle error_message = native_context->ErrorMessageForCodeGenerationFromStrings(); THROW_NEW_ERROR( @@ -1639,9 +1717,20 @@ MaybeHandle Compiler::GetFunctionFromString( int eval_position = kNoSourcePosition; Handle outer_info( native_context->empty_function().shared(), isolate); - return Compiler::GetFunctionFromEval( - source, outer_info, native_context, LanguageMode::kSloppy, restriction, - parameters_end_pos, eval_scope_position, eval_position); + return Compiler::GetFunctionFromEval(source.ToHandleChecked(), outer_info, + native_context, LanguageMode::kSloppy, + restriction, parameters_end_pos, + eval_scope_position, eval_position); +} + +MaybeHandle Compiler::GetFunctionFromString( + Handle context, Handle source, + ParseRestriction restriction, int parameters_end_pos) { + Isolate* const isolate = context->GetIsolate(); + Handle native_context(context->native_context(), isolate); + return GetFunctionFromValidatedString( + context, ValidateDynamicCompilationSource(isolate, context, source).first, + restriction, parameters_end_pos); } namespace { diff --git a/deps/v8/src/codegen/compiler.h b/deps/v8/src/codegen/compiler.h index a5987063737c9e..836f7381233b3a 100644 --- a/deps/v8/src/codegen/compiler.h +++ b/deps/v8/src/codegen/compiler.h @@ -132,17 +132,22 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic { v8::ScriptCompiler::CompileOptions compile_options, v8::ScriptCompiler::NoCacheReason no_cache_reason); - // Returns true if the embedder permits compiling the given source string in - // the given context. - static bool CodeGenerationFromStringsAllowed(Isolate* isolate, - Handle context, - Handle source); - // Create a (bound) function for a String source within a context for eval. V8_WARN_UNUSED_RESULT static MaybeHandle GetFunctionFromString( - Handle context, Handle source, + Handle context, Handle source, ParseRestriction restriction, int parameters_end_pos); + // Decompose GetFunctionFromString into two functions, to allow callers to + // deal seperately with a case of object not handled by the embedder. + V8_WARN_UNUSED_RESULT static std::pair, bool> + ValidateDynamicCompilationSource(Isolate* isolate, Handle context, + Handle source_object); + V8_WARN_UNUSED_RESULT static MaybeHandle + GetFunctionFromValidatedString(Handle context, + MaybeHandle source, + ParseRestriction restriction, + int parameters_end_pos); + // Create a shared function info object for a String source. static MaybeHandle GetSharedFunctionInfoForScript( Isolate* isolate, Handle source, diff --git a/deps/v8/src/codegen/constant-pool.cc b/deps/v8/src/codegen/constant-pool.cc index 613a142f243618..6816c5b7ad580b 100644 --- a/deps/v8/src/codegen/constant-pool.cc +++ b/deps/v8/src/codegen/constant-pool.cc @@ -3,6 +3,7 @@ // found in the LICENSE file. #include "src/codegen/constant-pool.h" +#include "src/codegen/assembler-arch.h" #include "src/codegen/assembler-inl.h" namespace v8 { @@ -210,5 +211,253 @@ int ConstantPoolBuilder::Emit(Assembler* assm) { #endif // defined(V8_TARGET_ARCH_PPC) +#if defined(V8_TARGET_ARCH_ARM64) + +// Constant Pool. + +ConstantPool::ConstantPool(Assembler* assm) : assm_(assm) {} +ConstantPool::~ConstantPool() { DCHECK_EQ(blocked_nesting_, 0); } + +RelocInfoStatus ConstantPool::RecordEntry(uint32_t data, + RelocInfo::Mode rmode) { + ConstantPoolKey key(data, rmode); + CHECK(key.is_value32()); + return RecordKey(std::move(key), assm_->pc_offset()); +} + +RelocInfoStatus ConstantPool::RecordEntry(uint64_t data, + RelocInfo::Mode rmode) { + ConstantPoolKey key(data, rmode); + CHECK(!key.is_value32()); + return RecordKey(std::move(key), assm_->pc_offset()); +} + +RelocInfoStatus ConstantPool::RecordKey(ConstantPoolKey key, int offset) { + RelocInfoStatus write_reloc_info = GetRelocInfoStatusFor(key); + if (write_reloc_info == RelocInfoStatus::kMustRecord) { + if (key.is_value32()) { + if (entry32_count_ == 0) first_use_32_ = offset; + ++entry32_count_; + } else { + if (entry64_count_ == 0) first_use_64_ = offset; + ++entry64_count_; + } + } + entries_.insert(std::make_pair(key, offset)); + + if (Entry32Count() + Entry64Count() > ConstantPool::kApproxMaxEntryCount) { + // Request constant pool emission after the next instruction. + SetNextCheckIn(1); + } + + return write_reloc_info; +} + +RelocInfoStatus ConstantPool::GetRelocInfoStatusFor( + const ConstantPoolKey& key) { + if (key.AllowsDeduplication()) { + auto existing = entries_.find(key); + if (existing != entries_.end()) { + return RelocInfoStatus::kMustOmitForDuplicate; + } + } + return RelocInfoStatus::kMustRecord; +} + +void ConstantPool::EmitAndClear(Jump require_jump) { + DCHECK(!IsBlocked()); + // Prevent recursive pool emission. + Assembler::BlockPoolsScope block_pools(assm_, PoolEmissionCheck::kSkip); + Alignment require_alignment = + IsAlignmentRequiredIfEmittedAt(require_jump, assm_->pc_offset()); + int size = ComputeSize(require_jump, require_alignment); + Label size_check; + assm_->bind(&size_check); + assm_->RecordConstPool(size); + + // Emit the constant pool. It is preceded by an optional branch if + // {require_jump} and a header which will: + // 1) Encode the size of the constant pool, for use by the disassembler. + // 2) Terminate the program, to try to prevent execution from accidentally + // flowing into the constant pool. + // 3) align the 64bit pool entries to 64-bit. + // TODO(all): Make the alignment part less fragile. Currently code is + // allocated as a byte array so there are no guarantees the alignment will + // be preserved on compaction. Currently it works as allocation seems to be + // 64-bit aligned. + + Label after_pool; + if (require_jump == Jump::kRequired) assm_->b(&after_pool); + + assm_->RecordComment("[ Constant Pool"); + EmitPrologue(require_alignment); + if (require_alignment == Alignment::kRequired) assm_->Align(kInt64Size); + EmitEntries(); + assm_->RecordComment("]"); + + if (after_pool.is_linked()) assm_->bind(&after_pool); + + DCHECK_EQ(assm_->SizeOfCodeGeneratedSince(&size_check), size); + Clear(); +} + +void ConstantPool::Clear() { + entries_.clear(); + first_use_32_ = -1; + first_use_64_ = -1; + entry32_count_ = 0; + entry64_count_ = 0; + next_check_ = 0; +} + +void ConstantPool::StartBlock() { + if (blocked_nesting_ == 0) { + // Prevent constant pool checks from happening by setting the next check to + // the biggest possible offset. + next_check_ = kMaxInt; + } + ++blocked_nesting_; +} + +void ConstantPool::EndBlock() { + --blocked_nesting_; + if (blocked_nesting_ == 0) { + DCHECK(IsInImmRangeIfEmittedAt(assm_->pc_offset())); + // Make sure a check happens quickly after getting unblocked. + next_check_ = 0; + } +} + +bool ConstantPool::IsBlocked() const { return blocked_nesting_ > 0; } + +void ConstantPool::SetNextCheckIn(size_t instructions) { + next_check_ = + assm_->pc_offset() + static_cast(instructions * kInstrSize); +} + +void ConstantPool::EmitEntries() { + for (auto iter = entries_.begin(); iter != entries_.end();) { + DCHECK(iter->first.is_value32() || IsAligned(assm_->pc_offset(), 8)); + auto range = entries_.equal_range(iter->first); + bool shared = iter->first.AllowsDeduplication(); + for (auto it = range.first; it != range.second; ++it) { + SetLoadOffsetToConstPoolEntry(it->second, assm_->pc(), it->first); + if (!shared) Emit(it->first); + } + if (shared) Emit(iter->first); + iter = range.second; + } +} + +void ConstantPool::Emit(const ConstantPoolKey& key) { + if (key.is_value32()) { + assm_->dd(key.value32()); + } else { + assm_->dq(key.value64()); + } +} + +bool ConstantPool::ShouldEmitNow(Jump require_jump, size_t margin) const { + if (IsEmpty()) return false; + if (Entry32Count() + Entry64Count() > ConstantPool::kApproxMaxEntryCount) { + return true; + } + // We compute {dist32/64}, i.e. the distance from the first instruction + // accessing a 32bit/64bit entry in the constant pool to any of the + // 32bit/64bit constant pool entries, respectively. This is required because + // we do not guarantee that entries are emitted in order of reference, i.e. it + // is possible that the entry with the earliest reference is emitted last. + // The constant pool should be emitted if either of the following is true: + // (A) {dist32/64} will be out of range at the next check in. + // (B) Emission can be done behind an unconditional branch and {dist32/64} + // exceeds {kOpportunityDist*}. + // (C) {dist32/64} exceeds the desired approximate distance to the pool. + int worst_case_size = ComputeSize(Jump::kRequired, Alignment::kRequired); + size_t pool_end_32 = assm_->pc_offset() + margin + worst_case_size; + size_t pool_end_64 = pool_end_32 - Entry32Count() * kInt32Size; + if (Entry64Count() != 0) { + // The 64-bit constants are always emitted before the 32-bit constants, so + // we subtract the size of the 32-bit constants from {size}. + size_t dist64 = pool_end_64 - first_use_64_; + bool next_check_too_late = dist64 + 2 * kCheckInterval >= kMaxDistToPool64; + bool opportune_emission_without_jump = + require_jump == Jump::kOmitted && (dist64 >= kOpportunityDistToPool64); + bool approximate_distance_exceeded = dist64 >= kApproxDistToPool64; + if (next_check_too_late || opportune_emission_without_jump || + approximate_distance_exceeded) { + return true; + } + } + if (Entry32Count() != 0) { + size_t dist32 = pool_end_32 - first_use_32_; + bool next_check_too_late = dist32 + 2 * kCheckInterval >= kMaxDistToPool32; + bool opportune_emission_without_jump = + require_jump == Jump::kOmitted && (dist32 >= kOpportunityDistToPool32); + bool approximate_distance_exceeded = dist32 >= kApproxDistToPool32; + if (next_check_too_late || opportune_emission_without_jump || + approximate_distance_exceeded) { + return true; + } + } + return false; +} + +int ConstantPool::ComputeSize(Jump require_jump, + Alignment require_alignment) const { + int size_up_to_marker = PrologueSize(require_jump); + int alignment = require_alignment == Alignment::kRequired ? kInstrSize : 0; + size_t size_after_marker = + Entry32Count() * kInt32Size + alignment + Entry64Count() * kInt64Size; + return size_up_to_marker + static_cast(size_after_marker); +} + +Alignment ConstantPool::IsAlignmentRequiredIfEmittedAt(Jump require_jump, + int pc_offset) const { + int size_up_to_marker = PrologueSize(require_jump); + if (Entry64Count() != 0 && + !IsAligned(pc_offset + size_up_to_marker, kInt64Size)) { + return Alignment::kRequired; + } + return Alignment::kOmitted; +} + +bool ConstantPool::IsInImmRangeIfEmittedAt(int pc_offset) { + // Check that all entries are in range if the pool is emitted at {pc_offset}. + // This ignores kPcLoadDelta (conservatively, since all offsets are positive), + // and over-estimates the last entry's address with the pool's end. + Alignment require_alignment = + IsAlignmentRequiredIfEmittedAt(Jump::kRequired, pc_offset); + size_t pool_end_32 = + pc_offset + ComputeSize(Jump::kRequired, require_alignment); + size_t pool_end_64 = pool_end_32 - Entry32Count() * kInt32Size; + bool entries_in_range_32 = + Entry32Count() == 0 || (pool_end_32 < first_use_32_ + kMaxDistToPool32); + bool entries_in_range_64 = + Entry64Count() == 0 || (pool_end_64 < first_use_64_ + kMaxDistToPool64); + return entries_in_range_32 && entries_in_range_64; +} + +ConstantPool::BlockScope::BlockScope(Assembler* assm, size_t margin) + : pool_(&assm->constpool_) { + pool_->assm_->EmitConstPoolWithJumpIfNeeded(margin); + pool_->StartBlock(); +} + +ConstantPool::BlockScope::BlockScope(Assembler* assm, PoolEmissionCheck check) + : pool_(&assm->constpool_) { + DCHECK_EQ(check, PoolEmissionCheck::kSkip); + pool_->StartBlock(); +} + +ConstantPool::BlockScope::~BlockScope() { pool_->EndBlock(); } + +void ConstantPool::MaybeCheck() { + if (assm_->pc_offset() >= next_check_) { + Check(Emission::kIfNeeded, Jump::kRequired); + } +} + +#endif // defined(V8_TARGET_ARCH_ARM64) + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/codegen/constant-pool.h b/deps/v8/src/codegen/constant-pool.h index 4399f6fc1fe53d..d07452336b4e40 100644 --- a/deps/v8/src/codegen/constant-pool.h +++ b/deps/v8/src/codegen/constant-pool.h @@ -15,6 +15,8 @@ namespace v8 { namespace internal { +class Instruction; + // ----------------------------------------------------------------------------- // Constant pool support @@ -136,8 +138,9 @@ class ConstantPoolBuilder { inline Label* EmittedPosition() { return &emitted_label_; } private: - ConstantPoolEntry::Access AddEntry(ConstantPoolEntry& entry, - ConstantPoolEntry::Type type); + ConstantPoolEntry::Access AddEntry( + ConstantPoolEntry& entry, // NOLINT(runtime/references) + ConstantPoolEntry::Type type); void EmitSharedEntries(Assembler* assm, ConstantPoolEntry::Type type); void EmitGroup(Assembler* assm, ConstantPoolEntry::Access access, ConstantPoolEntry::Type type); @@ -161,6 +164,189 @@ class ConstantPoolBuilder { #endif // defined(V8_TARGET_ARCH_PPC) +#if defined(V8_TARGET_ARCH_ARM64) + +class ConstantPoolKey { + public: + explicit ConstantPoolKey(uint64_t value, + RelocInfo::Mode rmode = RelocInfo::NONE) + : is_value32_(false), value64_(value), rmode_(rmode) {} + + explicit ConstantPoolKey(uint32_t value, + RelocInfo::Mode rmode = RelocInfo::NONE) + : is_value32_(true), value32_(value), rmode_(rmode) {} + + uint64_t value64() const { + CHECK(!is_value32_); + return value64_; + } + uint32_t value32() const { + CHECK(is_value32_); + return value32_; + } + + bool is_value32() const { return is_value32_; } + RelocInfo::Mode rmode() const { return rmode_; } + + bool AllowsDeduplication() const { + DCHECK(rmode_ != RelocInfo::CONST_POOL && + rmode_ != RelocInfo::VENEER_POOL && + rmode_ != RelocInfo::DEOPT_SCRIPT_OFFSET && + rmode_ != RelocInfo::DEOPT_INLINING_ID && + rmode_ != RelocInfo::DEOPT_REASON && rmode_ != RelocInfo::DEOPT_ID); + // CODE_TARGETs can be shared because they aren't patched anymore, + // and we make sure we emit only one reloc info for them (thus delta + // patching) will apply the delta only once. At the moment, we do not dedup + // code targets if they are wrapped in a heap object request (value == 0). + bool is_sharable_code_target = + rmode_ == RelocInfo::CODE_TARGET && + (is_value32() ? (value32() != 0) : (value64() != 0)); + bool is_sharable_embedded_object = RelocInfo::IsEmbeddedObjectMode(rmode_); + return RelocInfo::IsShareableRelocMode(rmode_) || is_sharable_code_target || + is_sharable_embedded_object; + } + + private: + bool is_value32_; + union { + uint64_t value64_; + uint32_t value32_; + }; + RelocInfo::Mode rmode_; +}; + +// Order for pool entries. 64bit entries go first. +inline bool operator<(const ConstantPoolKey& a, const ConstantPoolKey& b) { + if (a.is_value32() < b.is_value32()) return true; + if (a.is_value32() > b.is_value32()) return false; + if (a.rmode() < b.rmode()) return true; + if (a.rmode() > b.rmode()) return false; + if (a.is_value32()) return a.value32() < b.value32(); + return a.value64() < b.value64(); +} + +inline bool operator==(const ConstantPoolKey& a, const ConstantPoolKey& b) { + if (a.rmode() != b.rmode() || a.is_value32() != b.is_value32()) { + return false; + } + if (a.is_value32()) return a.value32() == b.value32(); + return a.value64() == b.value64(); +} + +// Constant pool generation +enum class Jump { kOmitted, kRequired }; +enum class Emission { kIfNeeded, kForced }; +enum class Alignment { kOmitted, kRequired }; +enum class RelocInfoStatus { kMustRecord, kMustOmitForDuplicate }; +enum class PoolEmissionCheck { kSkip }; + +// Pools are emitted in the instruction stream, preferably after unconditional +// jumps or after returns from functions (in dead code locations). +// If a long code sequence does not contain unconditional jumps, it is +// necessary to emit the constant pool before the pool gets too far from the +// location it is accessed from. In this case, we emit a jump over the emitted +// constant pool. +// Constants in the pool may be addresses of functions that gets relocated; +// if so, a relocation info entry is associated to the constant pool entry. +class ConstantPool { + public: + explicit ConstantPool(Assembler* assm); + ~ConstantPool(); + + // Returns true when we need to write RelocInfo and false when we do not. + RelocInfoStatus RecordEntry(uint32_t data, RelocInfo::Mode rmode); + RelocInfoStatus RecordEntry(uint64_t data, RelocInfo::Mode rmode); + + size_t Entry32Count() const { return entry32_count_; } + size_t Entry64Count() const { return entry64_count_; } + bool IsEmpty() const { return entries_.empty(); } + // Check if pool will be out of range at {pc_offset}. + bool IsInImmRangeIfEmittedAt(int pc_offset); + // Size in bytes of the constant pool. Depending on parameters, the size will + // include the branch over the pool and alignment padding. + int ComputeSize(Jump require_jump, Alignment require_alignment) const; + + // Emit the pool at the current pc with a branch over the pool if requested. + void EmitAndClear(Jump require); + bool ShouldEmitNow(Jump require_jump, size_t margin = 0) const; + V8_EXPORT_PRIVATE void Check(Emission force_emission, Jump require_jump, + size_t margin = 0); + + V8_EXPORT_PRIVATE void MaybeCheck(); + void Clear(); + + // Constant pool emisssion can be blocked temporarily. + bool IsBlocked() const; + + // Repeated checking whether the constant pool should be emitted is expensive; + // only check once a number of instructions have been generated. + void SetNextCheckIn(size_t instructions); + + // Class for scoping postponing the constant pool generation. + class V8_EXPORT_PRIVATE BlockScope { + public: + // BlockScope immediatelly emits the pool if necessary to ensure that + // during the block scope at least {margin} bytes can be emitted without + // pool emission becomming necessary. + explicit BlockScope(Assembler* pool, size_t margin = 0); + BlockScope(Assembler* pool, PoolEmissionCheck); + ~BlockScope(); + + private: + ConstantPool* pool_; + DISALLOW_IMPLICIT_CONSTRUCTORS(BlockScope); + }; + + // Hard limit to the const pool which must not be exceeded. + static const size_t kMaxDistToPool32; + static const size_t kMaxDistToPool64; + // Approximate distance where the pool should be emitted. + static const size_t kApproxDistToPool32; + V8_EXPORT_PRIVATE static const size_t kApproxDistToPool64; + // Approximate distance where the pool may be emitted if + // no jump is required (due to a recent unconditional jump). + static const size_t kOpportunityDistToPool32; + static const size_t kOpportunityDistToPool64; + // PC distance between constant pool checks. + V8_EXPORT_PRIVATE static const size_t kCheckInterval; + // Number of entries in the pool which trigger a check. + static const size_t kApproxMaxEntryCount; + + private: + void StartBlock(); + void EndBlock(); + + void EmitEntries(); + void EmitPrologue(Alignment require_alignment); + int PrologueSize(Jump require_jump) const; + RelocInfoStatus RecordKey(ConstantPoolKey key, int offset); + RelocInfoStatus GetRelocInfoStatusFor(const ConstantPoolKey& key); + void Emit(const ConstantPoolKey& key); + void SetLoadOffsetToConstPoolEntry(int load_offset, Instruction* entry_offset, + const ConstantPoolKey& key); + Alignment IsAlignmentRequiredIfEmittedAt(Jump require_jump, + int pc_offset) const; + + Assembler* assm_; + // Keep track of the first instruction requiring a constant pool entry + // since the previous constant pool was emitted. + int first_use_32_ = -1; + int first_use_64_ = -1; + // We sort not according to insertion order, but since we do not insert + // addresses (for heap objects we insert an index which is created in + // increasing order), the order is deterministic. We map each entry to the + // pc offset of the load. We use a multimap because we need to record the + // pc offset of each load of the same constant so that the immediate of the + // loads can be back-patched when the pool is emitted. + std::multimap entries_; + size_t entry32_count_ = 0; + size_t entry64_count_ = 0; + int next_check_ = 0; + int blocked_nesting_ = 0; +}; + +#endif // defined(V8_TARGET_ARCH_ARM64) + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/codegen/cpu-features.h b/deps/v8/src/codegen/cpu-features.h index b2f792e339c6f6..dae9992c57f6c1 100644 --- a/deps/v8/src/codegen/cpu-features.h +++ b/deps/v8/src/codegen/cpu-features.h @@ -14,6 +14,7 @@ namespace internal { // CPU feature flags. enum CpuFeature { // x86 + SSE4_2, SSE4_1, SSSE3, SSE3, diff --git a/deps/v8/src/codegen/external-reference.cc b/deps/v8/src/codegen/external-reference.cc index 5538f361f076c1..c0774079311122 100644 --- a/deps/v8/src/codegen/external-reference.cc +++ b/deps/v8/src/codegen/external-reference.cc @@ -26,31 +26,11 @@ #include "src/logging/log.h" #include "src/numbers/math-random.h" #include "src/objects/objects-inl.h" +#include "src/regexp/regexp-macro-assembler-arch.h" #include "src/regexp/regexp-stack.h" #include "src/strings/string-search.h" #include "src/wasm/wasm-external-refs.h" -// Include native regexp-macro-assembler. -#if V8_TARGET_ARCH_IA32 -#include "src/regexp/ia32/regexp-macro-assembler-ia32.h" // NOLINT -#elif V8_TARGET_ARCH_X64 -#include "src/regexp/x64/regexp-macro-assembler-x64.h" // NOLINT -#elif V8_TARGET_ARCH_ARM64 -#include "src/regexp/arm64/regexp-macro-assembler-arm64.h" // NOLINT -#elif V8_TARGET_ARCH_ARM -#include "src/regexp/arm/regexp-macro-assembler-arm.h" // NOLINT -#elif V8_TARGET_ARCH_PPC -#include "src/regexp/ppc/regexp-macro-assembler-ppc.h" // NOLINT -#elif V8_TARGET_ARCH_MIPS -#include "src/regexp/mips/regexp-macro-assembler-mips.h" // NOLINT -#elif V8_TARGET_ARCH_MIPS64 -#include "src/regexp/mips64/regexp-macro-assembler-mips64.h" // NOLINT -#elif V8_TARGET_ARCH_S390 -#include "src/regexp/s390/regexp-macro-assembler-s390.h" // NOLINT -#else // Unknown architecture. -#error "Unknown architecture." -#endif // Target architecture. - #ifdef V8_INTL_SUPPORT #include "src/objects/intl-objects.h" #endif // V8_INTL_SUPPORT @@ -671,6 +651,15 @@ static Address LexicographicCompareWrapper(Isolate* isolate, Address smi_x, FUNCTION_REFERENCE(smi_lexicographic_compare_function, LexicographicCompareWrapper) +FUNCTION_REFERENCE(mutable_big_int_absolute_add_and_canonicalize_function, + MutableBigInt_AbsoluteAddAndCanonicalize) + +FUNCTION_REFERENCE(mutable_big_int_absolute_compare_function, + MutableBigInt_AbsoluteCompare) + +FUNCTION_REFERENCE(mutable_big_int_absolute_sub_and_canonicalize_function, + MutableBigInt_AbsoluteSubAndCanonicalize) + FUNCTION_REFERENCE(check_object_type, CheckObjectType) #ifdef V8_INTL_SUPPORT @@ -786,6 +775,12 @@ ExternalReference ExternalReference::fast_c_call_caller_pc_address( isolate->isolate_data()->fast_c_call_caller_pc_address()); } +ExternalReference ExternalReference::stack_is_iterable_address( + Isolate* isolate) { + return ExternalReference( + isolate->isolate_data()->stack_is_iterable_address()); +} + FUNCTION_REFERENCE(call_enqueue_microtask_function, MicrotaskQueue::CallEnqueueMicrotask) diff --git a/deps/v8/src/codegen/external-reference.h b/deps/v8/src/codegen/external-reference.h index 4c83a9b33af35d..b663ae1621e953 100644 --- a/deps/v8/src/codegen/external-reference.h +++ b/deps/v8/src/codegen/external-reference.h @@ -72,6 +72,7 @@ class StatsCounter; "IsolateData::fast_c_call_caller_fp_address") \ V(fast_c_call_caller_pc_address, \ "IsolateData::fast_c_call_caller_pc_address") \ + V(stack_is_iterable_address, "IsolateData::stack_is_iterable_address") \ V(address_of_regexp_stack_limit, "RegExpStack::limit_address()") \ V(address_of_regexp_stack_memory_address, "RegExpStack::memory_address()") \ V(address_of_regexp_stack_memory_size, "RegExpStack::memory_size()") \ @@ -149,6 +150,12 @@ class StatsCounter; V(libc_memmove_function, "libc_memmove") \ V(libc_memset_function, "libc_memset") \ V(mod_two_doubles_operation, "mod_two_doubles") \ + V(mutable_big_int_absolute_add_and_canonicalize_function, \ + "MutableBigInt_AbsoluteAddAndCanonicalize") \ + V(mutable_big_int_absolute_compare_function, \ + "MutableBigInt_AbsoluteCompare") \ + V(mutable_big_int_absolute_sub_and_canonicalize_function, \ + "MutableBigInt_AbsoluteSubAndCanonicalize") \ V(new_deoptimizer_function, "Deoptimizer::New()") \ V(orderedhashmap_gethash_raw, "orderedhashmap_gethash_raw") \ V(printf_function, "printf") \ diff --git a/deps/v8/src/codegen/handler-table.cc b/deps/v8/src/codegen/handler-table.cc index 12a05e1fbacd8e..4f94746ea58f45 100644 --- a/deps/v8/src/codegen/handler-table.cc +++ b/deps/v8/src/codegen/handler-table.cc @@ -15,31 +15,41 @@ namespace internal { HandlerTable::HandlerTable(Code code) : HandlerTable(code.InstructionStart() + code.handler_table_offset(), - code.handler_table_size()) {} + code.handler_table_size(), kReturnAddressBasedEncoding) {} HandlerTable::HandlerTable(BytecodeArray bytecode_array) : HandlerTable(bytecode_array.handler_table()) {} HandlerTable::HandlerTable(ByteArray byte_array) - : number_of_entries_(byte_array.length() / kRangeEntrySize / - sizeof(int32_t)), -#ifdef DEBUG - mode_(kRangeBasedEncoding), -#endif - raw_encoded_data_( - reinterpret_cast
    (byte_array.GetDataStartAddress())) { - DCHECK_EQ(0, byte_array.length() % (kRangeEntrySize * sizeof(int32_t))); -} + : HandlerTable(reinterpret_cast
    (byte_array.GetDataStartAddress()), + byte_array.length(), kRangeBasedEncoding) {} -HandlerTable::HandlerTable(Address handler_table, int handler_table_size) - : number_of_entries_(handler_table_size / kReturnEntrySize / +HandlerTable::HandlerTable(Address handler_table, int handler_table_size, + EncodingMode encoding_mode) + : number_of_entries_(handler_table_size / EntrySizeFromMode(encoding_mode) / sizeof(int32_t)), #ifdef DEBUG - mode_(kReturnAddressBasedEncoding), + mode_(encoding_mode), #endif raw_encoded_data_(handler_table) { + // Check padding. static_assert(4 < kReturnEntrySize * sizeof(int32_t), "allowed padding"); - DCHECK_GE(4, handler_table_size % (kReturnEntrySize * sizeof(int32_t))); + // For return address encoding, maximum padding is 4; otherwise, there should + // be no padding. + DCHECK_GE(kReturnAddressBasedEncoding == encoding_mode ? 4 : 0, + handler_table_size % + (EntrySizeFromMode(encoding_mode) * sizeof(int32_t))); +} + +// static +int HandlerTable::EntrySizeFromMode(EncodingMode mode) { + switch (mode) { + case kReturnAddressBasedEncoding: + return kReturnEntrySize; + case kRangeBasedEncoding: + return kRangeEntrySize; + } + UNREACHABLE(); } int HandlerTable::GetRangeStart(int index) const { diff --git a/deps/v8/src/codegen/handler-table.h b/deps/v8/src/codegen/handler-table.h index eaa062873b40b0..362412525d8a24 100644 --- a/deps/v8/src/codegen/handler-table.h +++ b/deps/v8/src/codegen/handler-table.h @@ -45,11 +45,14 @@ class V8_EXPORT_PRIVATE HandlerTable { // async/await handling in the debugger can take place. }; + enum EncodingMode { kRangeBasedEncoding, kReturnAddressBasedEncoding }; + // Constructors for the various encodings. explicit HandlerTable(Code code); explicit HandlerTable(ByteArray byte_array); explicit HandlerTable(BytecodeArray bytecode_array); - explicit HandlerTable(Address handler_table, int handler_table_size); + HandlerTable(Address handler_table, int handler_table_size, + EncodingMode encoding_mode); // Getters for handler table based on ranges. int GetRangeStart(int index) const; @@ -88,11 +91,12 @@ class V8_EXPORT_PRIVATE HandlerTable { #endif private: - enum EncodingMode { kRangeBasedEncoding, kReturnAddressBasedEncoding }; - // Getters for handler table based on ranges. CatchPrediction GetRangePrediction(int index) const; + // Gets entry size based on mode. + static int EntrySizeFromMode(EncodingMode mode); + // Getters for handler table based on return addresses. int GetReturnOffset(int index) const; int GetReturnHandler(int index) const; diff --git a/deps/v8/src/codegen/ia32/assembler-ia32.cc b/deps/v8/src/codegen/ia32/assembler-ia32.cc index 99d38890e351f0..aefcab7299c7c8 100644 --- a/deps/v8/src/codegen/ia32/assembler-ia32.cc +++ b/deps/v8/src/codegen/ia32/assembler-ia32.cc @@ -756,6 +756,13 @@ void Assembler::cmpxchg8b(Operand dst) { emit_operand(ecx, dst); } +void Assembler::mfence() { + EnsureSpace ensure_space(this); + EMIT(0x0F); + EMIT(0xAE); + EMIT(0xF0); +} + void Assembler::lfence() { EnsureSpace ensure_space(this); EMIT(0x0F); diff --git a/deps/v8/src/codegen/ia32/assembler-ia32.h b/deps/v8/src/codegen/ia32/assembler-ia32.h index d2dcb0f34848b0..2423f73bdbe9b8 100644 --- a/deps/v8/src/codegen/ia32/assembler-ia32.h +++ b/deps/v8/src/codegen/ia32/assembler-ia32.h @@ -542,6 +542,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { void cmpxchg8b(Operand dst); // Memory Fence + void mfence(); void lfence(); void pause(); diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc index 6a0be9386e1702..f6f0153e54c02c 100644 --- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc +++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc @@ -1887,20 +1887,24 @@ void TurboAssembler::Call(Handle code_object, RelocInfo::Mode rmode) { call(code_object, rmode); } -void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) { +void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { STATIC_ASSERT(kSystemPointerSize == 4); STATIC_ASSERT(kSmiShiftSize == 0); STATIC_ASSERT(kSmiTagSize == 1); STATIC_ASSERT(kSmiTag == 0); - // The builtin_pointer register contains the builtin index as a Smi. + // The builtin_index register contains the builtin index as a Smi. // Untagging is folded into the indexing operand below (we use // times_half_system_pointer_size instead of times_system_pointer_size since // smis are already shifted by one). - mov(builtin_pointer, - Operand(kRootRegister, builtin_pointer, times_half_system_pointer_size, + mov(builtin_index, + Operand(kRootRegister, builtin_index, times_half_system_pointer_size, IsolateData::builtin_entry_table_offset())); - call(builtin_pointer); +} + +void TurboAssembler::CallBuiltinByIndex(Register builtin_index) { + LoadEntryFromBuiltinIndex(builtin_index); + call(builtin_index); } void TurboAssembler::LoadCodeObjectEntry(Register destination, diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h index 345ae815af66ff..9b13e87447920f 100644 --- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h +++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h @@ -87,7 +87,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { void Call(Label* target) { call(target); } void Call(Handle code_object, RelocInfo::Mode rmode); - void CallBuiltinPointer(Register builtin_pointer) override; + // Load the builtin given by the Smi in |builtin_index| into the same + // register. + void LoadEntryFromBuiltinIndex(Register builtin_index); + void CallBuiltinByIndex(Register builtin_index) override; void LoadCodeObjectEntry(Register destination, Register code_object) override; void CallCodeObject(Register code_object) override; diff --git a/deps/v8/src/codegen/interface-descriptors.cc b/deps/v8/src/codegen/interface-descriptors.cc index f8f874359b6d12..5934c80a7d218b 100644 --- a/deps/v8/src/codegen/interface-descriptors.cc +++ b/deps/v8/src/codegen/interface-descriptors.cc @@ -252,6 +252,11 @@ void StringAtDescriptor::InitializePlatformSpecific( DefaultInitializePlatformSpecific(data, kParameterCount); } +void StringAtAsStringDescriptor::InitializePlatformSpecific( + CallInterfaceDescriptorData* data) { + DefaultInitializePlatformSpecific(data, kParameterCount); +} + void StringSubstringDescriptor::InitializePlatformSpecific( CallInterfaceDescriptorData* data) { DefaultInitializePlatformSpecific(data, kParameterCount); diff --git a/deps/v8/src/codegen/interface-descriptors.h b/deps/v8/src/codegen/interface-descriptors.h index d166b477d80868..f6c1adfe47fe5e 100644 --- a/deps/v8/src/codegen/interface-descriptors.h +++ b/deps/v8/src/codegen/interface-descriptors.h @@ -74,6 +74,7 @@ namespace internal { V(StoreTransition) \ V(StoreWithVector) \ V(StringAt) \ + V(StringAtAsString) \ V(StringSubstring) \ V(TypeConversion) \ V(TypeConversionStackParameter) \ @@ -969,6 +970,17 @@ class StringAtDescriptor final : public CallInterfaceDescriptor { DECLARE_DESCRIPTOR(StringAtDescriptor, CallInterfaceDescriptor) }; +class StringAtAsStringDescriptor final : public CallInterfaceDescriptor { + public: + DEFINE_PARAMETERS(kReceiver, kPosition) + // TODO(turbofan): Return untagged value here. + DEFINE_RESULT_AND_PARAMETER_TYPES( + MachineType::TaggedPointer(), // result string + MachineType::AnyTagged(), // kReceiver + MachineType::IntPtr()) // kPosition + DECLARE_DESCRIPTOR(StringAtAsStringDescriptor, CallInterfaceDescriptor) +}; + class StringSubstringDescriptor final : public CallInterfaceDescriptor { public: DEFINE_PARAMETERS(kString, kFrom, kTo) diff --git a/deps/v8/src/codegen/label.h b/deps/v8/src/codegen/label.h index 430958d1906495..f45f1e62d74a2d 100644 --- a/deps/v8/src/codegen/label.h +++ b/deps/v8/src/codegen/label.h @@ -99,7 +99,7 @@ class Label { friend class Assembler; friend class Displacement; - friend class RegExpMacroAssemblerIrregexp; + friend class RegExpBytecodeGenerator; // Disallow copy construction and assignment, but allow move construction and // move assignment on selected platforms (see above). diff --git a/deps/v8/src/codegen/mips/assembler-mips.cc b/deps/v8/src/codegen/mips/assembler-mips.cc index d6337aefb61385..423da2fb65f778 100644 --- a/deps/v8/src/codegen/mips/assembler-mips.cc +++ b/deps/v8/src/codegen/mips/assembler-mips.cc @@ -39,6 +39,7 @@ #include "src/base/bits.h" #include "src/base/cpu.h" #include "src/codegen/mips/assembler-mips-inl.h" +#include "src/codegen/safepoint-table.h" #include "src/codegen/string-constants.h" #include "src/deoptimizer/deoptimizer.h" #include "src/objects/heap-number-inl.h" @@ -2211,7 +2212,7 @@ void Assembler::break_(uint32_t code, bool break_as_stop) { emit(break_instr); } -void Assembler::stop(const char* msg, uint32_t code) { +void Assembler::stop(uint32_t code) { DCHECK_GT(code, kMaxWatchpointCode); DCHECK_LE(code, kMaxStopCode); #if V8_HOST_ARCH_MIPS diff --git a/deps/v8/src/codegen/mips/assembler-mips.h b/deps/v8/src/codegen/mips/assembler-mips.h index 640e11cf1aee86..86a07ab06e9ac2 100644 --- a/deps/v8/src/codegen/mips/assembler-mips.h +++ b/deps/v8/src/codegen/mips/assembler-mips.h @@ -558,7 +558,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // Break / Trap instructions. void break_(uint32_t code, bool break_as_stop = false); - void stop(const char* msg, uint32_t code = kMaxStopCode); + void stop(uint32_t code = kMaxStopCode); void tge(Register rs, Register rt, uint16_t code); void tgeu(Register rs, Register rt, uint16_t code); void tlt(Register rs, Register rt, uint16_t code); @@ -1478,11 +1478,13 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { static bool IsAddImmediate(Instr instr); static Instr SetAddImmediateOffset(Instr instr, int16_t offset); static uint32_t CreateTargetAddress(Instr instr_lui, Instr instr_jic); - static void UnpackTargetAddress(uint32_t address, int16_t& lui_offset, - int16_t& jic_offset); - static void UnpackTargetAddressUnsigned(uint32_t address, - uint32_t& lui_offset, - uint32_t& jic_offset); + static void UnpackTargetAddress( + uint32_t address, int16_t& lui_offset, // NOLINT(runtime/references) + int16_t& jic_offset); // NOLINT(runtime/references) + static void UnpackTargetAddressUnsigned( + uint32_t address, + uint32_t& lui_offset, // NOLINT(runtime/references) + uint32_t& jic_offset); // NOLINT(runtime/references) static bool IsAndImmediate(Instr instr); static bool IsEmittedConstant(Instr instr); @@ -1513,7 +1515,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // Helper function for memory load/store using base register and offset. void AdjustBaseAndOffset( - MemOperand& src, + MemOperand& src, // NOLINT(runtime/references) OffsetAccessType access_type = OffsetAccessType::SINGLE_ACCESS, int second_access_add_to_offset = 4); diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.cc b/deps/v8/src/codegen/mips/macro-assembler-mips.cc index 483b7e895bb247..79373c1b5be197 100644 --- a/deps/v8/src/codegen/mips/macro-assembler-mips.cc +++ b/deps/v8/src/codegen/mips/macro-assembler-mips.cc @@ -189,7 +189,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset, Label ok; And(t8, dst, Operand(kPointerSize - 1)); Branch(&ok, eq, t8, Operand(zero_reg)); - stop("Unaligned cell in write barrier"); + stop(); bind(&ok); } @@ -3974,18 +3974,22 @@ void TurboAssembler::Call(Handle code, RelocInfo::Mode rmode, Call(code.address(), rmode, cond, rs, rt, bd); } -void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) { +void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { STATIC_ASSERT(kSystemPointerSize == 4); STATIC_ASSERT(kSmiShiftSize == 0); STATIC_ASSERT(kSmiTagSize == 1); STATIC_ASSERT(kSmiTag == 0); - // The builtin_pointer register contains the builtin index as a Smi. - SmiUntag(builtin_pointer, builtin_pointer); - Lsa(builtin_pointer, kRootRegister, builtin_pointer, kSystemPointerSizeLog2); - lw(builtin_pointer, - MemOperand(builtin_pointer, IsolateData::builtin_entry_table_offset())); - Call(builtin_pointer); + // The builtin_index register contains the builtin index as a Smi. + SmiUntag(builtin_index, builtin_index); + Lsa(builtin_index, kRootRegister, builtin_index, kSystemPointerSizeLog2); + lw(builtin_index, + MemOperand(builtin_index, IsolateData::builtin_entry_table_offset())); +} + +void TurboAssembler::CallBuiltinByIndex(Register builtin_index) { + LoadEntryFromBuiltinIndex(builtin_index); + Call(builtin_index); } void TurboAssembler::StoreReturnAddressAndCall(Register target) { @@ -4111,6 +4115,11 @@ void MacroAssembler::Swap(Register reg1, Register reg2, Register scratch) { void TurboAssembler::Call(Label* target) { BranchAndLink(target); } +void TurboAssembler::LoadAddress(Register dst, Label* target) { + uint32_t address = jump_address(target); + li(dst, address); +} + void TurboAssembler::Push(Handle handle) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); @@ -4694,15 +4703,15 @@ void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs, void TurboAssembler::Abort(AbortReason reason) { Label abort_start; bind(&abort_start); - const char* msg = GetAbortReason(reason); #ifdef DEBUG + const char* msg = GetAbortReason(reason); RecordComment("Abort message: "); RecordComment(msg); #endif // Avoid emitting call to builtin if requested. if (trap_on_abort()) { - stop(msg); + stop(); return; } @@ -4938,7 +4947,7 @@ void MacroAssembler::AssertStackIsAligned() { andi(scratch, sp, frame_alignment_mask); Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg)); // Don't use Check here, as it will call Runtime_Abort re-entering here. - stop("Unexpected stack alignment"); + stop(); bind(&alignment_as_expected); } } @@ -5352,7 +5361,7 @@ void TurboAssembler::CallCFunctionHelper(Register function_base, Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg)); // Don't use Check here, as it will call Runtime_Abort possibly // re-entering here. - stop("Unexpected alignment in CallCFunction"); + stop(); bind(&alignment_as_expected); } } diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.h b/deps/v8/src/codegen/mips/macro-assembler-mips.h index f394e01769e7f8..3dfc7bfbad1987 100644 --- a/deps/v8/src/codegen/mips/macro-assembler-mips.h +++ b/deps/v8/src/codegen/mips/macro-assembler-mips.h @@ -212,8 +212,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { void Call(Handle code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, COND_ARGS); void Call(Label* target); + void LoadAddress(Register dst, Label* target); - void CallBuiltinPointer(Register builtin_pointer) override; + // Load the builtin given by the Smi in |builtin_index| into the same + // register. + void LoadEntryFromBuiltinIndex(Register builtin_index); + void CallBuiltinByIndex(Register builtin_index) override; void LoadCodeObjectEntry(Register destination, Register code_object) override { @@ -841,9 +845,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { void BranchShortMSA(MSABranchDF df, Label* target, MSABranchCondition cond, MSARegister wt, BranchDelaySlot bd = PROTECT); - bool CalculateOffset(Label* L, int32_t& offset, OffsetSize bits); - bool CalculateOffset(Label* L, int32_t& offset, OffsetSize bits, - Register& scratch, const Operand& rt); + bool CalculateOffset(Label* L, int32_t& offset, // NOLINT(runtime/references) + OffsetSize bits); + bool CalculateOffset(Label* L, int32_t& offset, // NOLINT(runtime/references) + OffsetSize bits, + Register& scratch, // NOLINT(runtime/references) + const Operand& rt); void BranchShortHelperR6(int32_t offset, Label* L); void BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot); diff --git a/deps/v8/src/codegen/mips64/assembler-mips64.cc b/deps/v8/src/codegen/mips64/assembler-mips64.cc index cb8e3dd7d1ef14..801faf6306d861 100644 --- a/deps/v8/src/codegen/mips64/assembler-mips64.cc +++ b/deps/v8/src/codegen/mips64/assembler-mips64.cc @@ -38,6 +38,7 @@ #include "src/base/cpu.h" #include "src/codegen/mips64/assembler-mips64-inl.h" +#include "src/codegen/safepoint-table.h" #include "src/codegen/string-constants.h" #include "src/deoptimizer/deoptimizer.h" #include "src/objects/heap-number-inl.h" @@ -2344,7 +2345,7 @@ void Assembler::break_(uint32_t code, bool break_as_stop) { emit(break_instr); } -void Assembler::stop(const char* msg, uint32_t code) { +void Assembler::stop(uint32_t code) { DCHECK_GT(code, kMaxWatchpointCode); DCHECK_LE(code, kMaxStopCode); #if defined(V8_HOST_ARCH_MIPS) || defined(V8_HOST_ARCH_MIPS64) diff --git a/deps/v8/src/codegen/mips64/assembler-mips64.h b/deps/v8/src/codegen/mips64/assembler-mips64.h index c7c027eef713e9..a22ddf0e7d2cc1 100644 --- a/deps/v8/src/codegen/mips64/assembler-mips64.h +++ b/deps/v8/src/codegen/mips64/assembler-mips64.h @@ -601,7 +601,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // Break / Trap instructions. void break_(uint32_t code, bool break_as_stop = false); - void stop(const char* msg, uint32_t code = kMaxStopCode); + void stop(uint32_t code = kMaxStopCode); void tge(Register rs, Register rt, uint16_t code); void tgeu(Register rs, Register rt, uint16_t code); void tlt(Register rs, Register rt, uint16_t code); @@ -1560,7 +1560,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // Helper function for memory load/store using base register and offset. void AdjustBaseAndOffset( - MemOperand& src, + MemOperand& src, // NOLINT(runtime/references) OffsetAccessType access_type = OffsetAccessType::SINGLE_ACCESS, int second_access_add_to_offset = 4); diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc index 65c0b592ebad85..97e5af1fa8e5bf 100644 --- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc +++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc @@ -187,7 +187,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset, Label ok; And(t8, dst, Operand(kPointerSize - 1)); Branch(&ok, eq, t8, Operand(zero_reg)); - stop("Unaligned cell in write barrier"); + stop(); bind(&ok); } @@ -4274,18 +4274,22 @@ void TurboAssembler::Call(Handle code, RelocInfo::Mode rmode, Call(code.address(), rmode, cond, rs, rt, bd); } -void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) { +void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { STATIC_ASSERT(kSystemPointerSize == 8); STATIC_ASSERT(kSmiShiftSize == 31); STATIC_ASSERT(kSmiTagSize == 1); STATIC_ASSERT(kSmiTag == 0); - // The builtin_pointer register contains the builtin index as a Smi. - SmiUntag(builtin_pointer, builtin_pointer); - Dlsa(builtin_pointer, kRootRegister, builtin_pointer, kSystemPointerSizeLog2); - Ld(builtin_pointer, - MemOperand(builtin_pointer, IsolateData::builtin_entry_table_offset())); - Call(builtin_pointer); + // The builtin_index register contains the builtin index as a Smi. + SmiUntag(builtin_index, builtin_index); + Dlsa(builtin_index, kRootRegister, builtin_index, kSystemPointerSizeLog2); + Ld(builtin_index, + MemOperand(builtin_index, IsolateData::builtin_entry_table_offset())); +} + +void TurboAssembler::CallBuiltinByIndex(Register builtin_index) { + LoadEntryFromBuiltinIndex(builtin_index); + Call(builtin_index); } void TurboAssembler::StoreReturnAddressAndCall(Register target) { @@ -4433,6 +4437,11 @@ void MacroAssembler::Swap(Register reg1, Register reg2, Register scratch) { void TurboAssembler::Call(Label* target) { BranchAndLink(target); } +void TurboAssembler::LoadAddress(Register dst, Label* target) { + uint64_t address = jump_address(target); + li(dst, address); +} + void TurboAssembler::Push(Smi smi) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); @@ -5026,15 +5035,15 @@ void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs, void TurboAssembler::Abort(AbortReason reason) { Label abort_start; bind(&abort_start); - const char* msg = GetAbortReason(reason); #ifdef DEBUG + const char* msg = GetAbortReason(reason); RecordComment("Abort message: "); RecordComment(msg); #endif // Avoid emitting call to builtin if requested. if (trap_on_abort()) { - stop(msg); + stop(); return; } @@ -5273,7 +5282,7 @@ void MacroAssembler::AssertStackIsAligned() { Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg)); } // Don't use Check here, as it will call Runtime_Abort re-entering here. - stop("Unexpected stack alignment"); + stop(); bind(&alignment_as_expected); } } @@ -5698,7 +5707,7 @@ void TurboAssembler::CallCFunctionHelper(Register function, } // Don't use Check here, as it will call Runtime_Abort possibly // re-entering here. - stop("Unexpected alignment in CallCFunction"); + stop(); bind(&alignment_as_expected); } } diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h index d0f9b7f5bc5007..eb62bec0e82395 100644 --- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h +++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h @@ -234,8 +234,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { void Call(Handle code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, COND_ARGS); void Call(Label* target); + void LoadAddress(Register dst, Label* target); - void CallBuiltinPointer(Register builtin_pointer) override; + // Load the builtin given by the Smi in |builtin_index| into the same + // register. + void LoadEntryFromBuiltinIndex(Register builtin_index); + void CallBuiltinByIndex(Register builtin_index) override; void LoadCodeObjectEntry(Register destination, Register code_object) override { @@ -845,9 +849,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { void CallCFunctionHelper(Register function, int num_reg_arguments, int num_double_arguments); - bool CalculateOffset(Label* L, int32_t& offset, OffsetSize bits); - bool CalculateOffset(Label* L, int32_t& offset, OffsetSize bits, - Register& scratch, const Operand& rt); + bool CalculateOffset(Label* L, int32_t& offset, // NOLINT(runtime/references) + OffsetSize bits); + bool CalculateOffset(Label* L, int32_t& offset, // NOLINT(runtime/references) + OffsetSize bits, + Register& scratch, // NOLINT(runtime/references) + const Operand& rt); void BranchShortHelperR6(int32_t offset, Label* L); void BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot); diff --git a/deps/v8/src/codegen/optimized-compilation-info.cc b/deps/v8/src/codegen/optimized-compilation-info.cc index 596d5c261ee197..f3582d868af0e1 100644 --- a/deps/v8/src/codegen/optimized-compilation-info.cc +++ b/deps/v8/src/codegen/optimized-compilation-info.cc @@ -75,9 +75,15 @@ void OptimizedCompilationInfo::ConfigureFlags() { break; case Code::BYTECODE_HANDLER: SetFlag(kCalledWithCodeStartRegister); + if (FLAG_turbo_splitting) { + MarkAsSplittingEnabled(); + } break; case Code::BUILTIN: case Code::STUB: + if (FLAG_turbo_splitting) { + MarkAsSplittingEnabled(); + } #if ENABLE_GDB_JIT_INTERFACE && DEBUG MarkAsSourcePositionsEnabled(); #endif // ENABLE_GDB_JIT_INTERFACE && DEBUG @@ -177,6 +183,8 @@ StackFrame::Type OptimizedCompilationInfo::GetOutputStackFrameType() const { return StackFrame::WASM_TO_JS; case Code::WASM_INTERPRETER_ENTRY: return StackFrame::WASM_INTERPRETER_ENTRY; + case Code::C_WASM_ENTRY: + return StackFrame::C_WASM_ENTRY; default: UNIMPLEMENTED(); return StackFrame::NONE; @@ -206,7 +214,7 @@ bool OptimizedCompilationInfo::has_native_context() const { return !closure().is_null() && !closure()->native_context().is_null(); } -Context OptimizedCompilationInfo::native_context() const { +NativeContext OptimizedCompilationInfo::native_context() const { DCHECK(has_native_context()); return closure()->native_context(); } @@ -234,6 +242,8 @@ void OptimizedCompilationInfo::SetTracingFlags(bool passes_filter) { if (FLAG_trace_turbo) SetFlag(kTraceTurboJson); if (FLAG_trace_turbo_graph) SetFlag(kTraceTurboGraph); if (FLAG_trace_turbo_scheduled) SetFlag(kTraceTurboScheduled); + if (FLAG_trace_turbo_alloc) SetFlag(kTraceTurboAllocation); + if (FLAG_trace_heap_broker) SetFlag(kTraceHeapBroker); } OptimizedCompilationInfo::InlinedFunctionHolder::InlinedFunctionHolder( diff --git a/deps/v8/src/codegen/optimized-compilation-info.h b/deps/v8/src/codegen/optimized-compilation-info.h index eca3a8fa3236d5..624517283e3e2c 100644 --- a/deps/v8/src/codegen/optimized-compilation-info.h +++ b/deps/v8/src/codegen/optimized-compilation-info.h @@ -9,6 +9,7 @@ #include "src/codegen/bailout-reason.h" #include "src/codegen/source-position-table.h" +#include "src/codegen/tick-counter.h" #include "src/common/globals.h" #include "src/execution/frames.h" #include "src/handles/handles.h" @@ -60,9 +61,11 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final { kTraceTurboJson = 1 << 14, kTraceTurboGraph = 1 << 15, kTraceTurboScheduled = 1 << 16, - kWasmRuntimeExceptionSupport = 1 << 17, - kTurboControlFlowAwareAllocation = 1 << 18, - kTurboPreprocessRanges = 1 << 19 + kTraceTurboAllocation = 1 << 17, + kTraceHeapBroker = 1 << 18, + kWasmRuntimeExceptionSupport = 1 << 19, + kTurboControlFlowAwareAllocation = 1 << 20, + kTurboPreprocessRanges = 1 << 21 }; // Construct a compilation info for optimized compilation. @@ -189,10 +192,16 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final { bool trace_turbo_graph_enabled() const { return GetFlag(kTraceTurboGraph); } + bool trace_turbo_allocation_enabled() const { + return GetFlag(kTraceTurboAllocation); + } + bool trace_turbo_scheduled_enabled() const { return GetFlag(kTraceTurboScheduled); } + bool trace_heap_broker_enabled() const { return GetFlag(kTraceHeapBroker); } + // Code getters and setters. void SetCode(Handle code) { code_ = code; } @@ -204,7 +213,7 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final { Context context() const; bool has_native_context() const; - Context native_context() const; + NativeContext native_context() const; bool has_global_object() const; JSGlobalObject global_object() const; @@ -281,6 +290,8 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final { std::unique_ptr ToTracedValue(); + TickCounter& tick_counter() { return tick_counter_; } + private: OptimizedCompilationInfo(Code::Kind code_kind, Zone* zone); void ConfigureFlags(); @@ -333,6 +344,8 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final { Vector debug_name_; std::unique_ptr trace_turbo_filename_; + TickCounter tick_counter_; + DISALLOW_COPY_AND_ASSIGN(OptimizedCompilationInfo); }; diff --git a/deps/v8/src/codegen/pending-optimization-table.cc b/deps/v8/src/codegen/pending-optimization-table.cc new file mode 100644 index 00000000000000..9e33de7918cda2 --- /dev/null +++ b/deps/v8/src/codegen/pending-optimization-table.cc @@ -0,0 +1,97 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/codegen/pending-optimization-table.h" + +#include "src/execution/isolate-inl.h" +#include "src/heap/heap-inl.h" +#include "src/objects/hash-table.h" +#include "src/objects/js-objects.h" + +namespace v8 { +namespace internal { + +enum class FunctionStatus { kPrepareForOptimize, kMarkForOptimize }; + +void PendingOptimizationTable::PreparedForOptimization( + Isolate* isolate, Handle function) { + DCHECK(FLAG_testing_d8_test_runner); + + Handle table = + isolate->heap()->pending_optimize_for_test_bytecode().IsUndefined() + ? ObjectHashTable::New(isolate, 1) + : handle(ObjectHashTable::cast( + isolate->heap()->pending_optimize_for_test_bytecode()), + isolate); + Handle tuple = isolate->factory()->NewTuple2( + handle(function->shared().GetBytecodeArray(), isolate), + handle( + Smi::FromInt(static_cast(FunctionStatus::kPrepareForOptimize)), + isolate), + AllocationType::kYoung); + table = + ObjectHashTable::Put(table, handle(function->shared(), isolate), tuple); + isolate->heap()->SetPendingOptimizeForTestBytecode(*table); +} + +void PendingOptimizationTable::MarkedForOptimization( + Isolate* isolate, Handle function) { + DCHECK(FLAG_testing_d8_test_runner); + + Handle table = + handle(isolate->heap()->pending_optimize_for_test_bytecode(), isolate); + Handle entry = + table->IsUndefined() + ? handle(ReadOnlyRoots(isolate).the_hole_value(), isolate) + : handle(Handle::cast(table)->Lookup( + handle(function->shared(), isolate)), + isolate); + if (entry->IsTheHole()) { + PrintF("Error: Function "); + function->ShortPrint(); + PrintF( + " should be prepared for optimization with " + "%%PrepareFunctionForOptimize before " + "%%OptimizeFunctionOnNextCall / %%OptimizeOSR "); + UNREACHABLE(); + } + + DCHECK(entry->IsTuple2()); + Handle::cast(entry)->set_value2( + Smi::FromInt(static_cast(FunctionStatus::kMarkForOptimize))); + table = ObjectHashTable::Put(Handle::cast(table), + handle(function->shared(), isolate), entry); + isolate->heap()->SetPendingOptimizeForTestBytecode(*table); +} + +void PendingOptimizationTable::FunctionWasOptimized( + Isolate* isolate, Handle function) { + DCHECK(FLAG_testing_d8_test_runner); + + if (isolate->heap()->pending_optimize_for_test_bytecode().IsUndefined()) { + return; + } + + Handle table = + handle(ObjectHashTable::cast( + isolate->heap()->pending_optimize_for_test_bytecode()), + isolate); + Handle value(table->Lookup(handle(function->shared(), isolate)), + isolate); + // Remove only if we have already seen %OptimizeFunctionOnNextCall. If it is + // optimized for other reasons, still keep holding the bytecode since we may + // optimize it later. + if (!value->IsTheHole() && + Smi::cast(Handle::cast(value)->value2()).value() == + static_cast(FunctionStatus::kMarkForOptimize)) { + bool was_present; + table = table->Remove(isolate, table, handle(function->shared(), isolate), + &was_present); + DCHECK(was_present); + isolate->heap()->SetPendingOptimizeForTestBytecode(*table); + } +} + +} // namespace internal +} // namespace v8 diff --git a/deps/v8/src/codegen/pending-optimization-table.h b/deps/v8/src/codegen/pending-optimization-table.h new file mode 100644 index 00000000000000..2a2782d17a67cd --- /dev/null +++ b/deps/v8/src/codegen/pending-optimization-table.h @@ -0,0 +1,44 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_CODEGEN_PENDING_OPTIMIZATION_TABLE_H_ +#define V8_CODEGEN_PENDING_OPTIMIZATION_TABLE_H_ + +#include "src/common/globals.h" + +namespace v8 { +namespace internal { + +// This class adds the functionality to properly test the optimized code. This +// is only for use in tests. All these functions should only be called when +// testing_d8_flag_for_tests is set. +class PendingOptimizationTable { + public: + // This function should be called before we mark the function for + // optimization. Calling this function ensures that |function| is compiled and + // has a feedback vector allocated. This also holds on to the bytecode + // strongly in pending optimization table preventing the bytecode to be + // flushed. + static void PreparedForOptimization(Isolate* isolate, + Handle function); + + // This function should be called when the function is marked for optimization + // via the intrinsics. This will update the state of the bytecode array in the + // pending optimization table, so that the entry can be removed once the + // function is optimized. If the function is already optimized it removes the + // entry from the table. + static void MarkedForOptimization(Isolate* isolate, + Handle function); + + // This function should be called once the function is optimized. If there is + // an entry in the pending optimization table and it is marked for removal + // then this function removes the entry from pending optimization table. + static void FunctionWasOptimized(Isolate* isolate, + Handle function); +}; + +} // namespace internal +} // namespace v8 + +#endif // V8_CODEGEN_PENDING_OPTIMIZATION_TABLE_H_ diff --git a/deps/v8/src/codegen/ppc/assembler-ppc.cc b/deps/v8/src/codegen/ppc/assembler-ppc.cc index 3241f821f9e4b2..2a638af0705055 100644 --- a/deps/v8/src/codegen/ppc/assembler-ppc.cc +++ b/deps/v8/src/codegen/ppc/assembler-ppc.cc @@ -224,6 +224,7 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) { Assembler::Assembler(const AssemblerOptions& options, std::unique_ptr buffer) : AssemblerBase(options, std::move(buffer)), + scratch_register_list_(ip.bit()), constant_pool_builder_(kLoadPtrMaxReachBits, kLoadDoubleMaxReachBits) { reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_); @@ -1490,8 +1491,7 @@ void Assembler::mtfprwa(DoubleRegister dst, Register src) { // Exception-generating instructions and debugging support. // Stops with a non-negative code less than kNumOfWatchedStops support // enabling/disabling and a counter feature. See simulator-ppc.h . -void Assembler::stop(const char* msg, Condition cond, int32_t code, - CRegister cr) { +void Assembler::stop(Condition cond, int32_t code, CRegister cr) { if (cond != al) { Label skip; b(NegateCondition(cond), &skip, cr); @@ -1948,6 +1948,24 @@ PatchingAssembler::~PatchingAssembler() { DCHECK_EQ(reloc_info_writer.pos(), buffer_start_ + buffer_->size()); } +UseScratchRegisterScope::UseScratchRegisterScope(Assembler* assembler) + : assembler_(assembler), + old_available_(*assembler->GetScratchRegisterList()) {} + +UseScratchRegisterScope::~UseScratchRegisterScope() { + *assembler_->GetScratchRegisterList() = old_available_; +} + +Register UseScratchRegisterScope::Acquire() { + RegList* available = assembler_->GetScratchRegisterList(); + DCHECK_NOT_NULL(available); + DCHECK_NE(*available, 0); + int index = static_cast(base::bits::CountTrailingZeros32(*available)); + Register reg = Register::from_code(index); + *available &= ~reg.bit(); + return reg; +} + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/codegen/ppc/assembler-ppc.h b/deps/v8/src/codegen/ppc/assembler-ppc.h index 2c4225849f5712..dee264a75c06bb 100644 --- a/deps/v8/src/codegen/ppc/assembler-ppc.h +++ b/deps/v8/src/codegen/ppc/assembler-ppc.h @@ -437,6 +437,7 @@ class Assembler : public AssemblerBase { PPC_XX3_OPCODE_LIST(DECLARE_PPC_XX3_INSTRUCTIONS) #undef DECLARE_PPC_XX3_INSTRUCTIONS + RegList* GetScratchRegisterList() { return &scratch_register_list_; } // --------------------------------------------------------------------------- // Code generation @@ -841,8 +842,8 @@ class Assembler : public AssemblerBase { void function_descriptor(); // Exception-generating instructions and debugging support - void stop(const char* msg, Condition cond = al, - int32_t code = kDefaultStopCode, CRegister cr = cr7); + void stop(Condition cond = al, int32_t code = kDefaultStopCode, + CRegister cr = cr7); void bkpt(uint32_t imm16); // v5 and above @@ -1182,6 +1183,9 @@ class Assembler : public AssemblerBase { static constexpr int kMaxRelocSize = RelocInfoWriter::kMaxSize; std::vector relocations_; + // Scratch registers available for use by the Assembler. + RegList scratch_register_list_; + // The bound position, before this we cannot do instruction elimination. int last_bound_pos_; // Optimizable cmpi information. @@ -1297,6 +1301,7 @@ class Assembler : public AssemblerBase { friend class RelocInfo; friend class BlockTrampolinePoolScope; friend class EnsureSpace; + friend class UseScratchRegisterScope; }; class EnsureSpace { @@ -1311,6 +1316,24 @@ class PatchingAssembler : public Assembler { ~PatchingAssembler(); }; +class V8_EXPORT_PRIVATE UseScratchRegisterScope { + public: + explicit UseScratchRegisterScope(Assembler* assembler); + ~UseScratchRegisterScope(); + + Register Acquire(); + + // Check if we have registers available to acquire. + bool CanAcquire() const { return *assembler_->GetScratchRegisterList() != 0; } + + private: + friend class Assembler; + friend class TurboAssembler; + + Assembler* assembler_; + RegList old_available_; +}; + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/codegen/ppc/code-stubs-ppc.cc b/deps/v8/src/codegen/ppc/code-stubs-ppc.cc deleted file mode 100644 index 937c7456623282..00000000000000 --- a/deps/v8/src/codegen/ppc/code-stubs-ppc.cc +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2014 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#if V8_TARGET_ARCH_PPC - -#include "src/api/api-arguments-inl.h" -#include "src/base/bits.h" -#include "src/code-stubs.h" -#include "src/codegen/assembler-inl.h" -#include "src/codegen/macro-assembler.h" -#include "src/execution/frame-constants.h" -#include "src/execution/frames.h" -#include "src/execution/isolate.h" -#include "src/ic/ic.h" -#include "src/ic/stub-cache.h" -#include "src/init/bootstrapper.h" -#include "src/numbers/double.h" -#include "src/objects/api-callbacks.h" -#include "src/regexp/jsregexp.h" -#include "src/regexp/regexp-macro-assembler.h" -#include "src/runtime/runtime.h" - -namespace v8 { -namespace internal {} // namespace internal -} // namespace v8 - -#endif // V8_TARGET_ARCH_PPC diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc index 62f0fde3b8e2ee..8ab3e5b83b1866 100644 --- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc +++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc @@ -419,7 +419,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset, Label ok; andi(r0, dst, Operand(kPointerSize - 1)); beq(&ok, cr0); - stop("Unaligned cell in write barrier"); + stop(); bind(&ok); } @@ -1721,15 +1721,15 @@ void TurboAssembler::Check(Condition cond, AbortReason reason, CRegister cr) { void TurboAssembler::Abort(AbortReason reason) { Label abort_start; bind(&abort_start); - const char* msg = GetAbortReason(reason); #ifdef DEBUG + const char* msg = GetAbortReason(reason); RecordComment("Abort message: "); RecordComment(msg); #endif // Avoid emitting call to builtin if requested. if (trap_on_abort()) { - stop(msg); + stop(); return; } @@ -2454,27 +2454,24 @@ void TurboAssembler::LoadP(Register dst, const MemOperand& mem, Register scratch) { DCHECK_EQ(mem.rb(), no_reg); int offset = mem.offset(); + int misaligned = (offset & 3); + int adj = (offset & 3) - 4; + int alignedOffset = (offset & ~3) + 4; - if (!is_int16(offset)) { + if (!is_int16(offset) || (misaligned && !is_int16(alignedOffset))) { /* cannot use d-form */ - DCHECK_NE(scratch, no_reg); mov(scratch, Operand(offset)); LoadPX(dst, MemOperand(mem.ra(), scratch)); } else { -#if V8_TARGET_ARCH_PPC64 - int misaligned = (offset & 3); if (misaligned) { // adjust base to conform to offset alignment requirements // Todo: enhance to use scratch if dst is unsuitable - DCHECK(dst != r0); - addi(dst, mem.ra(), Operand((offset & 3) - 4)); - ld(dst, MemOperand(dst, (offset & ~3) + 4)); + DCHECK_NE(dst, r0); + addi(dst, mem.ra(), Operand(adj)); + ld(dst, MemOperand(dst, alignedOffset)); } else { ld(dst, mem); } -#else - lwz(dst, mem); -#endif } } @@ -2934,20 +2931,24 @@ void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) { blt(dest); } -void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) { +void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { STATIC_ASSERT(kSystemPointerSize == 8); STATIC_ASSERT(kSmiShiftSize == 31); STATIC_ASSERT(kSmiTagSize == 1); STATIC_ASSERT(kSmiTag == 0); - // The builtin_pointer register contains the builtin index as a Smi. + // The builtin_index register contains the builtin index as a Smi. // Untagging is folded into the indexing operand below. - ShiftRightArithImm(builtin_pointer, builtin_pointer, + ShiftRightArithImm(builtin_index, builtin_index, kSmiShift - kSystemPointerSizeLog2); - addi(builtin_pointer, builtin_pointer, + addi(builtin_index, builtin_index, Operand(IsolateData::builtin_entry_table_offset())); - LoadPX(builtin_pointer, MemOperand(kRootRegister, builtin_pointer)); - Call(builtin_pointer); + LoadPX(builtin_index, MemOperand(kRootRegister, builtin_index)); +} + +void TurboAssembler::CallBuiltinByIndex(Register builtin_index) { + LoadEntryFromBuiltinIndex(builtin_index); + Call(builtin_index); } void TurboAssembler::LoadCodeObjectEntry(Register destination, diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h index ae24ef9a55bc07..6249c405e3aa11 100644 --- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h +++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h @@ -408,11 +408,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { Condition cond = al); void Call(Label* target); + // Load the builtin given by the Smi in |builtin_index| into the same + // register. + void LoadEntryFromBuiltinIndex(Register builtin_index); void LoadCodeObjectEntry(Register destination, Register code_object) override; void CallCodeObject(Register code_object) override; void JumpCodeObject(Register code_object) override; - void CallBuiltinPointer(Register builtin_pointer) override; + void CallBuiltinByIndex(Register builtin_index) override; void CallForDeoptimization(Address target, int deopt_id); // Emit code to discard a non-negative number of pointer-sized elements diff --git a/deps/v8/src/codegen/s390/assembler-s390.cc b/deps/v8/src/codegen/s390/assembler-s390.cc index dbfdc9a32a0289..6776626a23a3d4 100644 --- a/deps/v8/src/codegen/s390/assembler-s390.cc +++ b/deps/v8/src/codegen/s390/assembler-s390.cc @@ -351,7 +351,8 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) { Assembler::Assembler(const AssemblerOptions& options, std::unique_ptr buffer) - : AssemblerBase(options, std::move(buffer)) { + : AssemblerBase(options, std::move(buffer)), + scratch_register_list_(ip.bit()) { reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_); last_bound_pos_ = 0; relocations_.reserve(128); @@ -636,8 +637,7 @@ void Assembler::branchOnCond(Condition c, int branch_offset, bool is_bound) { // Exception-generating instructions and debugging support. // Stops with a non-negative code less than kNumOfWatchedStops support // enabling/disabling and a counter feature. See simulator-s390.h . -void Assembler::stop(const char* msg, Condition cond, int32_t code, - CRegister cr) { +void Assembler::stop(Condition cond, int32_t code, CRegister cr) { if (cond != al) { Label skip; b(NegateCondition(cond), &skip, Label::kNear); @@ -831,6 +831,23 @@ void Assembler::EmitRelocations() { } } +UseScratchRegisterScope::UseScratchRegisterScope(Assembler* assembler) + : assembler_(assembler), + old_available_(*assembler->GetScratchRegisterList()) {} + +UseScratchRegisterScope::~UseScratchRegisterScope() { + *assembler_->GetScratchRegisterList() = old_available_; +} + +Register UseScratchRegisterScope::Acquire() { + RegList* available = assembler_->GetScratchRegisterList(); + DCHECK_NOT_NULL(available); + DCHECK_NE(*available, 0); + int index = static_cast(base::bits::CountTrailingZeros32(*available)); + Register reg = Register::from_code(index); + *available &= ~reg.bit(); + return reg; +} } // namespace internal } // namespace v8 #endif // V8_TARGET_ARCH_S390 diff --git a/deps/v8/src/codegen/s390/assembler-s390.h b/deps/v8/src/codegen/s390/assembler-s390.h index e22c037a312855..0653e79b67cf20 100644 --- a/deps/v8/src/codegen/s390/assembler-s390.h +++ b/deps/v8/src/codegen/s390/assembler-s390.h @@ -307,7 +307,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // in the code, so the serializer should not step forwards in memory after // a target is resolved and written. static constexpr int kSpecialTargetSize = 0; - // Number of bytes for instructions used to store pointer sized constant. #if V8_TARGET_ARCH_S390X static constexpr int kBytesForPtrConstant = 12; // IIHF + IILF @@ -315,6 +314,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { static constexpr int kBytesForPtrConstant = 6; // IILF #endif + RegList* GetScratchRegisterList() { return &scratch_register_list_; } + // --------------------------------------------------------------------------- // Code generation @@ -1261,8 +1262,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { void larl(Register r, Label* l); // Exception-generating instructions and debugging support - void stop(const char* msg, Condition cond = al, - int32_t code = kDefaultStopCode, CRegister cr = cr7); + void stop(Condition cond = al, int32_t code = kDefaultStopCode, + CRegister cr = cr7); void bkpt(uint32_t imm16); // v5 and above @@ -1376,6 +1377,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { RelocInfoWriter reloc_info_writer; std::vector relocations_; + // Scratch registers available for use by the Assembler. + RegList scratch_register_list_; + // The bound position, before this we cannot do instruction elimination. int last_bound_pos_; @@ -1455,6 +1459,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { friend class RegExpMacroAssemblerS390; friend class RelocInfo; friend class EnsureSpace; + friend class UseScratchRegisterScope; }; class EnsureSpace { @@ -1462,6 +1467,24 @@ class EnsureSpace { explicit EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); } }; +class V8_EXPORT_PRIVATE UseScratchRegisterScope { + public: + explicit UseScratchRegisterScope(Assembler* assembler); + ~UseScratchRegisterScope(); + + Register Acquire(); + + // Check if we have registers available to acquire. + bool CanAcquire() const { return *assembler_->GetScratchRegisterList() != 0; } + + private: + friend class Assembler; + friend class TurboAssembler; + + Assembler* assembler_; + RegList old_available_; +}; + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/codegen/s390/code-stubs-s390.cc b/deps/v8/src/codegen/s390/code-stubs-s390.cc deleted file mode 100644 index f85c3099439024..00000000000000 --- a/deps/v8/src/codegen/s390/code-stubs-s390.cc +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2014 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#if V8_TARGET_ARCH_S390 - -#include "src/api/api-arguments-inl.h" -#include "src/base/bits.h" -#include "src/code-stubs.h" -#include "src/codegen/assembler-inl.h" -#include "src/codegen/macro-assembler.h" -#include "src/execution/frame-constants.h" -#include "src/execution/frames.h" -#include "src/execution/isolate.h" -#include "src/ic/ic.h" -#include "src/ic/stub-cache.h" -#include "src/init/bootstrapper.h" -#include "src/objects/api-callbacks.h" -#include "src/regexp/jsregexp.h" -#include "src/regexp/regexp-macro-assembler.h" -#include "src/runtime/runtime.h" - -namespace v8 { -namespace internal {} // namespace internal -} // namespace v8 - -#endif // V8_TARGET_ARCH_S390 diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.cc b/deps/v8/src/codegen/s390/macro-assembler-s390.cc index ff94fa839e1d0f..f6c2314a84b8b8 100644 --- a/deps/v8/src/codegen/s390/macro-assembler-s390.cc +++ b/deps/v8/src/codegen/s390/macro-assembler-s390.cc @@ -440,7 +440,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset, Label ok; AndP(r0, dst, Operand(kPointerSize - 1)); beq(&ok, Label::kNear); - stop("Unaligned cell in write barrier"); + stop(); bind(&ok); } @@ -1670,15 +1670,15 @@ void TurboAssembler::Check(Condition cond, AbortReason reason, CRegister cr) { void TurboAssembler::Abort(AbortReason reason) { Label abort_start; bind(&abort_start); - const char* msg = GetAbortReason(reason); #ifdef DEBUG + const char* msg = GetAbortReason(reason); RecordComment("Abort message: "); RecordComment(msg); #endif // Avoid emitting call to builtin if requested. if (trap_on_abort()) { - stop(msg); + stop(); return; } @@ -4332,20 +4332,24 @@ void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) { blt(dest); } -void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) { +void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { STATIC_ASSERT(kSystemPointerSize == 8); STATIC_ASSERT(kSmiShiftSize == 31); STATIC_ASSERT(kSmiTagSize == 1); STATIC_ASSERT(kSmiTag == 0); - // The builtin_pointer register contains the builtin index as a Smi. + // The builtin_index register contains the builtin index as a Smi. // Untagging is folded into the indexing operand below. - ShiftRightArithP(builtin_pointer, builtin_pointer, + ShiftRightArithP(builtin_index, builtin_index, Operand(kSmiShift - kSystemPointerSizeLog2)); - AddP(builtin_pointer, builtin_pointer, + AddP(builtin_index, builtin_index, Operand(IsolateData::builtin_entry_table_offset())); - LoadP(builtin_pointer, MemOperand(kRootRegister, builtin_pointer)); - Call(builtin_pointer); + LoadP(builtin_index, MemOperand(kRootRegister, builtin_index)); +} + +void TurboAssembler::CallBuiltinByIndex(Register builtin_index) { + LoadEntryFromBuiltinIndex(builtin_index); + Call(builtin_index); } void TurboAssembler::LoadCodeObjectEntry(Register destination, diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.h b/deps/v8/src/codegen/s390/macro-assembler-s390.h index ba870874c88db0..52f668d1755a2f 100644 --- a/deps/v8/src/codegen/s390/macro-assembler-s390.h +++ b/deps/v8/src/codegen/s390/macro-assembler-s390.h @@ -166,11 +166,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { void Call(Label* target); + // Load the builtin given by the Smi in |builtin_index| into the same + // register. + void LoadEntryFromBuiltinIndex(Register builtin_index); void LoadCodeObjectEntry(Register destination, Register code_object) override; void CallCodeObject(Register code_object) override; void JumpCodeObject(Register code_object) override; - void CallBuiltinPointer(Register builtin_pointer) override; + void CallBuiltinByIndex(Register builtin_index) override; // Register move. May do nothing if the registers are identical. void Move(Register dst, Smi smi) { LoadSmiLiteral(dst, smi); } diff --git a/deps/v8/src/codegen/safepoint-table.h b/deps/v8/src/codegen/safepoint-table.h index 066f0123fc7ccb..fccce1a7a69122 100644 --- a/deps/v8/src/codegen/safepoint-table.h +++ b/deps/v8/src/codegen/safepoint-table.h @@ -5,8 +5,8 @@ #ifndef V8_CODEGEN_SAFEPOINT_TABLE_H_ #define V8_CODEGEN_SAFEPOINT_TABLE_H_ +#include "src/base/memory.h" #include "src/common/assert-scope.h" -#include "src/common/v8memory.h" #include "src/utils/allocation.h" #include "src/utils/utils.h" #include "src/zone/zone-chunk-list.h" @@ -76,22 +76,23 @@ class SafepointTable { unsigned GetPcOffset(unsigned index) const { DCHECK(index < length_); - return Memory(GetPcOffsetLocation(index)); + return base::Memory(GetPcOffsetLocation(index)); } int GetTrampolinePcOffset(unsigned index) const { DCHECK(index < length_); - return Memory(GetTrampolineLocation(index)); + return base::Memory(GetTrampolineLocation(index)); } unsigned find_return_pc(unsigned pc_offset); SafepointEntry GetEntry(unsigned index) const { DCHECK(index < length_); - unsigned deopt_index = Memory(GetEncodedInfoLocation(index)); - uint8_t* bits = &Memory(entries_ + (index * entry_size_)); + unsigned deopt_index = + base::Memory(GetEncodedInfoLocation(index)); + uint8_t* bits = &base::Memory(entries_ + (index * entry_size_)); int trampoline_pc = - has_deopt_ ? Memory(GetTrampolineLocation(index)) : -1; + has_deopt_ ? base::Memory(GetTrampolineLocation(index)) : -1; return SafepointEntry(deopt_index, bits, trampoline_pc); } diff --git a/deps/v8/src/codegen/source-position-table.cc b/deps/v8/src/codegen/source-position-table.cc index 6c0aa36b27673a..e10cc075714e62 100644 --- a/deps/v8/src/codegen/source-position-table.cc +++ b/deps/v8/src/codegen/source-position-table.cc @@ -31,7 +31,7 @@ class MoreBit : public BitField8 {}; class ValueBits : public BitField8 {}; // Helper: Add the offsets from 'other' to 'value'. Also set is_statement. -void AddAndSetEntry(PositionTableEntry& value, +void AddAndSetEntry(PositionTableEntry& value, // NOLINT(runtime/references) const PositionTableEntry& other) { value.code_offset += other.code_offset; value.source_position += other.source_position; @@ -39,7 +39,7 @@ void AddAndSetEntry(PositionTableEntry& value, } // Helper: Subtract the offsets from 'other' from 'value'. -void SubtractFromEntry(PositionTableEntry& value, +void SubtractFromEntry(PositionTableEntry& value, // NOLINT(runtime/references) const PositionTableEntry& other) { value.code_offset -= other.code_offset; value.source_position -= other.source_position; @@ -47,7 +47,8 @@ void SubtractFromEntry(PositionTableEntry& value, // Helper: Encode an integer. template -void EncodeInt(std::vector& bytes, T value) { +void EncodeInt(std::vector& bytes, // NOLINT(runtime/references) + T value) { using unsigned_type = typename std::make_unsigned::type; // Zig-zag encoding. static const int kShift = sizeof(T) * kBitsPerByte - 1; @@ -65,7 +66,8 @@ void EncodeInt(std::vector& bytes, T value) { } // Encode a PositionTableEntry. -void EncodeEntry(std::vector& bytes, const PositionTableEntry& entry) { +void EncodeEntry(std::vector& bytes, // NOLINT(runtime/references) + const PositionTableEntry& entry) { // We only accept ascending code offsets. DCHECK_GE(entry.code_offset, 0); // Since code_offset is not negative, we use sign to encode is_statement. @@ -113,8 +115,9 @@ Vector VectorFromByteArray(ByteArray byte_array) { } #ifdef ENABLE_SLOW_DCHECKS -void CheckTableEquals(std::vector& raw_entries, - SourcePositionTableIterator& encoded) { +void CheckTableEquals( + std::vector& raw_entries, // NOLINT(runtime/references) + SourcePositionTableIterator& encoded) { // NOLINT(runtime/references) // Brute force testing: Record all positions and decode // the entire table to verify they are identical. auto raw = raw_entries.begin(); diff --git a/deps/v8/src/codegen/tick-counter.cc b/deps/v8/src/codegen/tick-counter.cc new file mode 100644 index 00000000000000..2e72ae0e864ddb --- /dev/null +++ b/deps/v8/src/codegen/tick-counter.cc @@ -0,0 +1,23 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/codegen/tick-counter.h" + +#include "src/base/logging.h" +#include "src/base/macros.h" + +namespace v8 { +namespace internal { + +void TickCounter::DoTick() { + ++ticks_; + // Magical number to detect performance bugs or compiler divergence. + // Selected as being roughly 10x of what's needed frequently. + constexpr size_t kMaxTicks = 100000000; + USE(kMaxTicks); + DCHECK_LT(ticks_, kMaxTicks); +} + +} // namespace internal +} // namespace v8 diff --git a/deps/v8/src/codegen/tick-counter.h b/deps/v8/src/codegen/tick-counter.h new file mode 100644 index 00000000000000..8d6c966bb05075 --- /dev/null +++ b/deps/v8/src/codegen/tick-counter.h @@ -0,0 +1,28 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_CODEGEN_TICK_COUNTER_H_ +#define V8_CODEGEN_TICK_COUNTER_H_ + +#include + +namespace v8 { +namespace internal { + +// A deterministic correlate of time, used to detect performance or +// divergence bugs in Turbofan. DoTick() should be called frequently +// thoughout the compilation. +class TickCounter { + public: + void DoTick(); + size_t CurrentTicks() const { return ticks_; } + + private: + size_t ticks_ = 0; +}; + +} // namespace internal +} // namespace v8 + +#endif // V8_CODEGEN_TICK_COUNTER_H_ diff --git a/deps/v8/src/codegen/turbo-assembler.h b/deps/v8/src/codegen/turbo-assembler.h index afdef22fe7fbd2..2f058eda19aa7a 100644 --- a/deps/v8/src/codegen/turbo-assembler.h +++ b/deps/v8/src/codegen/turbo-assembler.h @@ -50,9 +50,9 @@ class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler { void set_has_frame(bool v) { has_frame_ = v; } bool has_frame() const { return has_frame_; } - // Calls the given builtin. If builtins are embedded, the trampoline Code - // object on the heap is not used. - virtual void CallBuiltinPointer(Register builtin_pointer) = 0; + // Calls the builtin given by the Smi in |builtin|. If builtins are embedded, + // the trampoline Code object on the heap is not used. + virtual void CallBuiltinByIndex(Register builtin_index) = 0; // Calls/jumps to the given Code object. If builtins are embedded, the // trampoline Code object on the heap is not used. diff --git a/deps/v8/src/codegen/x64/assembler-x64-inl.h b/deps/v8/src/codegen/x64/assembler-x64-inl.h index 67cf648c04f03d..f5d0c0ffcf528c 100644 --- a/deps/v8/src/codegen/x64/assembler-x64-inl.h +++ b/deps/v8/src/codegen/x64/assembler-x64-inl.h @@ -8,7 +8,7 @@ #include "src/codegen/x64/assembler-x64.h" #include "src/base/cpu.h" -#include "src/common/v8memory.h" +#include "src/base/memory.h" #include "src/debug/debug.h" #include "src/objects/objects-inl.h" @@ -246,7 +246,7 @@ Handle Assembler::code_target_object_handle_at(Address pc) { } Handle Assembler::compressed_embedded_object_handle_at(Address pc) { - return GetCompressedEmbeddedObject(ReadUnalignedValue(pc)); + return GetEmbeddedObject(ReadUnalignedValue(pc)); } Address Assembler::runtime_entry_at(Address pc) { diff --git a/deps/v8/src/codegen/x64/assembler-x64.cc b/deps/v8/src/codegen/x64/assembler-x64.cc index 3236b0f52c7398..1d28f1d45dd304 100644 --- a/deps/v8/src/codegen/x64/assembler-x64.cc +++ b/deps/v8/src/codegen/x64/assembler-x64.cc @@ -78,6 +78,7 @@ void CpuFeatures::ProbeImpl(bool cross_compile) { // Only use statically determined features for cross compile (snapshot). if (cross_compile) return; + if (cpu.has_sse42() && FLAG_enable_sse4_2) supported_ |= 1u << SSE4_2; if (cpu.has_sse41() && FLAG_enable_sse4_1) { supported_ |= 1u << SSE4_1; supported_ |= 1u << SSSE3; @@ -1257,6 +1258,13 @@ void Assembler::emit_cmpxchg(Operand dst, Register src, int size) { emit_operand(src, dst); } +void Assembler::mfence() { + EnsureSpace ensure_space(this); + emit(0x0F); + emit(0xAE); + emit(0xF0); +} + void Assembler::lfence() { EnsureSpace ensure_space(this); emit(0x0F); @@ -1512,19 +1520,20 @@ void Assembler::j(Condition cc, Handle target, RelocInfo::Mode rmode) { emitl(code_target_index); } -void Assembler::jmp_rel(int offset) { +void Assembler::jmp_rel(int32_t offset) { EnsureSpace ensure_space(this); - const int short_size = sizeof(int8_t); - const int long_size = sizeof(int32_t); - --offset; // This is how jumps are specified on x64. - if (is_int8(offset - short_size) && !predictable_code_size()) { - // 1110 1011 #8-bit disp. + // The offset is encoded relative to the next instruction. + constexpr int32_t kShortJmpDisplacement = 1 + sizeof(int8_t); + constexpr int32_t kNearJmpDisplacement = 1 + sizeof(int32_t); + DCHECK_LE(std::numeric_limits::min() + kNearJmpDisplacement, offset); + if (is_int8(offset - kShortJmpDisplacement) && !predictable_code_size()) { + // 0xEB #8-bit disp. emit(0xEB); - emit((offset - short_size) & 0xFF); + emit(offset - kShortJmpDisplacement); } else { - // 1110 1001 #32-bit disp. + // 0xE9 #32-bit disp. emit(0xE9); - emitl(offset - long_size); + emitl(offset - kNearJmpDisplacement); } } @@ -2005,84 +2014,37 @@ void Assembler::emit_not(Operand dst, int size) { } void Assembler::Nop(int n) { + DCHECK_LE(0, n); // The recommended muti-byte sequences of NOP instructions from the Intel 64 // and IA-32 Architectures Software Developer's Manual. // - // Length Assembly Byte Sequence - // 2 bytes 66 NOP 66 90H - // 3 bytes NOP DWORD ptr [EAX] 0F 1F 00H - // 4 bytes NOP DWORD ptr [EAX + 00H] 0F 1F 40 00H - // 5 bytes NOP DWORD ptr [EAX + EAX*1 + 00H] 0F 1F 44 00 00H - // 6 bytes 66 NOP DWORD ptr [EAX + EAX*1 + 00H] 66 0F 1F 44 00 00H - // 7 bytes NOP DWORD ptr [EAX + 00000000H] 0F 1F 80 00 00 00 00H - // 8 bytes NOP DWORD ptr [EAX + EAX*1 + 00000000H] 0F 1F 84 00 00 00 00 00H - // 9 bytes 66 NOP DWORD ptr [EAX + EAX*1 + 66 0F 1F 84 00 00 00 00 - // 00000000H] 00H - - EnsureSpace ensure_space(this); - while (n > 0) { - switch (n) { - case 2: - emit(0x66); - V8_FALLTHROUGH; - case 1: - emit(0x90); - return; - case 3: - emit(0x0F); - emit(0x1F); - emit(0x00); - return; - case 4: - emit(0x0F); - emit(0x1F); - emit(0x40); - emit(0x00); - return; - case 6: - emit(0x66); - V8_FALLTHROUGH; - case 5: - emit(0x0F); - emit(0x1F); - emit(0x44); - emit(0x00); - emit(0x00); - return; - case 7: - emit(0x0F); - emit(0x1F); - emit(0x80); - emit(0x00); - emit(0x00); - emit(0x00); - emit(0x00); - return; - default: - case 11: - emit(0x66); - n--; - V8_FALLTHROUGH; - case 10: - emit(0x66); - n--; - V8_FALLTHROUGH; - case 9: - emit(0x66); - n--; - V8_FALLTHROUGH; - case 8: - emit(0x0F); - emit(0x1F); - emit(0x84); - emit(0x00); - emit(0x00); - emit(0x00); - emit(0x00); - emit(0x00); - n -= 8; - } - } + // Len Assembly Byte Sequence + // 2 66 NOP 66 90H + // 3 NOP DWORD ptr [EAX] 0F 1F 00H + // 4 NOP DWORD ptr [EAX + 00H] 0F 1F 40 00H + // 5 NOP DWORD ptr [EAX + EAX*1 + 00H] 0F 1F 44 00 00H + // 6 66 NOP DWORD ptr [EAX + EAX*1 + 00H] 66 0F 1F 44 00 00H + // 7 NOP DWORD ptr [EAX + 00000000H] 0F 1F 80 00 00 00 00H + // 8 NOP DWORD ptr [EAX + EAX*1 + 00000000H] 0F 1F 84 00 00 00 00 00H + // 9 66 NOP DWORD ptr [EAX + EAX*1 + 00000000H] 66 0F 1F 84 00 00 00 00 00H + + constexpr const char* kNopSequences = + "\x66\x90" // length 1 (@1) / 2 (@0) + "\x0F\x1F\x00" // length 3 (@2) + "\x0F\x1F\x40\x00" // length 4 (@5) + "\x66\x0F\x1F\x44\x00\x00" // length 5 (@10) / 6 (@9) + "\x0F\x1F\x80\x00\x00\x00\x00" // length 7 (@15) + "\x66\x0F\x1F\x84\x00\x00\x00\x00\x00"; // length 8 (@23) / 9 (@22) + constexpr int8_t kNopOffsets[10] = {0, 1, 0, 2, 5, 10, 9, 15, 23, 22}; + + do { + EnsureSpace ensure_space(this); + int nop_bytes = std::min(n, 9); + const char* sequence = kNopSequences + kNopOffsets[nop_bytes]; + memcpy(pc_, sequence, nop_bytes); + pc_ += nop_bytes; + n -= nop_bytes; + } while (n); } void Assembler::popq(Register dst) { @@ -2883,6 +2845,18 @@ void Assembler::movd(Register dst, XMMRegister src) { } void Assembler::movq(XMMRegister dst, Register src) { + // Mixing AVX and non-AVX is expensive, catch those cases + DCHECK(!IsEnabled(AVX)); + EnsureSpace ensure_space(this); + emit(0x66); + emit_rex_64(dst, src); + emit(0x0F); + emit(0x6E); + emit_sse_operand(dst, src); +} + +void Assembler::movq(XMMRegister dst, Operand src) { + // Mixing AVX and non-AVX is expensive, catch those cases DCHECK(!IsEnabled(AVX)); EnsureSpace ensure_space(this); emit(0x66); @@ -2893,6 +2867,7 @@ void Assembler::movq(XMMRegister dst, Register src) { } void Assembler::movq(Register dst, XMMRegister src) { + // Mixing AVX and non-AVX is expensive, catch those cases DCHECK(!IsEnabled(AVX)); EnsureSpace ensure_space(this); emit(0x66); @@ -2903,6 +2878,7 @@ void Assembler::movq(Register dst, XMMRegister src) { } void Assembler::movq(XMMRegister dst, XMMRegister src) { + // Mixing AVX and non-AVX is expensive, catch those cases DCHECK(!IsEnabled(AVX)); EnsureSpace ensure_space(this); if (dst.low_bits() == 4) { @@ -3068,6 +3044,42 @@ void Assembler::pextrd(Operand dst, XMMRegister src, int8_t imm8) { emit(imm8); } +void Assembler::pextrq(Register dst, XMMRegister src, int8_t imm8) { + DCHECK(IsEnabled(SSE4_1)); + EnsureSpace ensure_space(this); + emit(0x66); + emit_rex_64(src, dst); + emit(0x0F); + emit(0x3A); + emit(0x16); + emit_sse_operand(src, dst); + emit(imm8); +} + +void Assembler::pinsrq(XMMRegister dst, Register src, int8_t imm8) { + DCHECK(IsEnabled(SSE4_1)); + EnsureSpace ensure_space(this); + emit(0x66); + emit_rex_64(dst, src); + emit(0x0F); + emit(0x3A); + emit(0x22); + emit_sse_operand(dst, src); + emit(imm8); +} + +void Assembler::pinsrq(XMMRegister dst, Operand src, int8_t imm8) { + DCHECK(IsEnabled(SSE4_1)); + EnsureSpace ensure_space(this); + emit(0x66); + emit_rex_64(dst, src); + emit(0x0F); + emit(0x3A); + emit(0x22); + emit_sse_operand(dst, src); + emit(imm8); +} + void Assembler::pinsrd(XMMRegister dst, Register src, int8_t imm8) { DCHECK(IsEnabled(SSE4_1)); EnsureSpace ensure_space(this); @@ -4135,6 +4147,22 @@ void Assembler::vmovq(Register dst, XMMRegister src) { emit_sse_operand(src, dst); } +void Assembler::vmovdqu(XMMRegister dst, Operand src) { + DCHECK(IsEnabled(AVX)); + EnsureSpace ensure_space(this); + emit_vex_prefix(dst, xmm0, src, kL128, kF3, k0F, kWIG); + emit(0x6F); + emit_sse_operand(dst, src); +} + +void Assembler::vmovdqu(Operand src, XMMRegister dst) { + DCHECK(IsEnabled(AVX)); + EnsureSpace ensure_space(this); + emit_vex_prefix(dst, xmm0, src, kL128, kF3, k0F, kWIG); + emit(0x7F); + emit_sse_operand(dst, src); +} + void Assembler::vinstr(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2, SIMDPrefix pp, LeadingOpcode m, VexW w) { @@ -4654,6 +4682,30 @@ void Assembler::sse4_instr(XMMRegister dst, Operand src, byte prefix, emit_sse_operand(dst, src); } +void Assembler::sse4_2_instr(XMMRegister dst, XMMRegister src, byte prefix, + byte escape1, byte escape2, byte opcode) { + DCHECK(IsEnabled(SSE4_2)); + EnsureSpace ensure_space(this); + emit(prefix); + emit_optional_rex_32(dst, src); + emit(escape1); + emit(escape2); + emit(opcode); + emit_sse_operand(dst, src); +} + +void Assembler::sse4_2_instr(XMMRegister dst, Operand src, byte prefix, + byte escape1, byte escape2, byte opcode) { + DCHECK(IsEnabled(SSE4_2)); + EnsureSpace ensure_space(this); + emit(prefix); + emit_optional_rex_32(dst, src); + emit(escape1); + emit(escape2); + emit(opcode); + emit_sse_operand(dst, src); +} + void Assembler::lddqu(XMMRegister dst, Operand src) { DCHECK(IsEnabled(SSE3)); EnsureSpace ensure_space(this); diff --git a/deps/v8/src/codegen/x64/assembler-x64.h b/deps/v8/src/codegen/x64/assembler-x64.h index dc6acb67f4fcfa..acb4fce82c1ab3 100644 --- a/deps/v8/src/codegen/x64/assembler-x64.h +++ b/deps/v8/src/codegen/x64/assembler-x64.h @@ -952,6 +952,23 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { SSE4_INSTRUCTION_LIST(DECLARE_SSE4_INSTRUCTION) #undef DECLARE_SSE4_INSTRUCTION + // SSE4.2 + void sse4_2_instr(XMMRegister dst, XMMRegister src, byte prefix, byte escape1, + byte escape2, byte opcode); + void sse4_2_instr(XMMRegister dst, Operand src, byte prefix, byte escape1, + byte escape2, byte opcode); +#define DECLARE_SSE4_2_INSTRUCTION(instruction, prefix, escape1, escape2, \ + opcode) \ + void instruction(XMMRegister dst, XMMRegister src) { \ + sse4_2_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode); \ + } \ + void instruction(XMMRegister dst, Operand src) { \ + sse4_2_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode); \ + } + + SSE4_2_INSTRUCTION_LIST(DECLARE_SSE4_2_INSTRUCTION) +#undef DECLARE_SSE4_2_INSTRUCTION + #define DECLARE_SSE34_AVX_INSTRUCTION(instruction, prefix, escape1, escape2, \ opcode) \ void v##instruction(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \ @@ -969,6 +986,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { void movd(XMMRegister dst, Operand src); void movd(Register dst, XMMRegister src); void movq(XMMRegister dst, Register src); + void movq(XMMRegister dst, Operand src); void movq(Register dst, XMMRegister src); void movq(XMMRegister dst, XMMRegister src); @@ -1068,12 +1086,15 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { void pextrw(Operand dst, XMMRegister src, int8_t imm8); void pextrd(Register dst, XMMRegister src, int8_t imm8); void pextrd(Operand dst, XMMRegister src, int8_t imm8); + void pextrq(Register dst, XMMRegister src, int8_t imm8); void pinsrb(XMMRegister dst, Register src, int8_t imm8); void pinsrb(XMMRegister dst, Operand src, int8_t imm8); void pinsrw(XMMRegister dst, Register src, int8_t imm8); void pinsrw(XMMRegister dst, Operand src, int8_t imm8); void pinsrd(XMMRegister dst, Register src, int8_t imm8); void pinsrd(XMMRegister dst, Operand src, int8_t imm8); + void pinsrq(XMMRegister dst, Register src, int8_t imm8); + void pinsrq(XMMRegister dst, Operand src, int8_t imm8); void roundss(XMMRegister dst, XMMRegister src, RoundingMode mode); void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode); @@ -1284,6 +1305,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { } void vmovsd(XMMRegister dst, Operand src) { vsd(0x10, dst, xmm0, src); } void vmovsd(Operand dst, XMMRegister src) { vsd(0x11, src, xmm0, dst); } + void vmovdqu(XMMRegister dst, Operand src); + void vmovdqu(Operand dst, XMMRegister src); #define AVX_SP_3(instr, opcode) \ AVX_S_3(instr, opcode) \ @@ -1723,6 +1746,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { void rorxl(Register dst, Register src, byte imm8); void rorxl(Register dst, Operand src, byte imm8); + void mfence(); void lfence(); void pause(); diff --git a/deps/v8/src/codegen/x64/constants-x64.h b/deps/v8/src/codegen/x64/constants-x64.h index 0e43b05034899a..775abecd9fd355 100644 --- a/deps/v8/src/codegen/x64/constants-x64.h +++ b/deps/v8/src/codegen/x64/constants-x64.h @@ -12,7 +12,8 @@ namespace internal { // Actual value of root register is offset from the root array's start // to take advantage of negative displacement values. // TODO(sigurds): Choose best value. -constexpr int kRootRegisterBias = 128; +// TODO(ishell): Choose best value for ptr-compr. +constexpr int kRootRegisterBias = kSystemPointerSize == kTaggedSize ? 128 : 0; constexpr size_t kMaxPCRelativeCodeRangeInMB = 2048; } // namespace internal diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.cc b/deps/v8/src/codegen/x64/macro-assembler-x64.cc index 493c7110098f66..f13811b1aec361 100644 --- a/deps/v8/src/codegen/x64/macro-assembler-x64.cc +++ b/deps/v8/src/codegen/x64/macro-assembler-x64.cc @@ -317,15 +317,14 @@ void TurboAssembler::DecompressTaggedPointer(Register destination, void TurboAssembler::DecompressRegisterAnyTagged(Register destination, Register scratch) { - if (kUseBranchlessPtrDecompression) { + if (kUseBranchlessPtrDecompressionInGeneratedCode) { // Branchlessly compute |masked_root|: // masked_root = HAS_SMI_TAG(destination) ? 0 : kRootRegister; STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag < 32)); Register masked_root = scratch; - movl(masked_root, destination); - andl(masked_root, Immediate(kSmiTagMask)); - negq(masked_root); - andq(masked_root, kRootRegister); + xorq(masked_root, masked_root); + Condition smi = CheckSmi(destination); + cmovq(NegateCondition(smi), masked_root, kRootRegister); // Now this add operation will either leave the value unchanged if it is // a smi or add the isolate root if it is a heap object. addq(destination, masked_root); @@ -917,7 +916,7 @@ void TurboAssembler::Cvtqui2ss(XMMRegister dst, Register src) { orq(kScratchRegister, Immediate(1)); bind(&msb_not_set); Cvtqsi2ss(dst, kScratchRegister); - addss(dst, dst); + Addss(dst, dst); bind(&done); } @@ -941,7 +940,7 @@ void TurboAssembler::Cvtqui2sd(XMMRegister dst, Register src) { orq(kScratchRegister, Immediate(1)); bind(&msb_not_set); Cvtqsi2sd(dst, kScratchRegister); - addsd(dst, dst); + Addsd(dst, dst); bind(&done); } @@ -1042,11 +1041,11 @@ void ConvertFloatToUint64(TurboAssembler* tasm, Register dst, // and convert it again to see if it is within the uint64 range. if (is_double) { tasm->Move(kScratchDoubleReg, -9223372036854775808.0); - tasm->addsd(kScratchDoubleReg, src); + tasm->Addsd(kScratchDoubleReg, src); tasm->Cvttsd2siq(dst, kScratchDoubleReg); } else { tasm->Move(kScratchDoubleReg, -9223372036854775808.0f); - tasm->addss(kScratchDoubleReg, src); + tasm->Addss(kScratchDoubleReg, src); tasm->Cvttss2siq(dst, kScratchDoubleReg); } tasm->testq(dst, dst); @@ -1468,8 +1467,9 @@ void TurboAssembler::Move(Register result, Handle object, } } if (RelocInfo::IsCompressedEmbeddedObject(rmode)) { - int compressed_embedded_object_index = AddCompressedEmbeddedObject(object); - movl(result, Immediate(compressed_embedded_object_index, rmode)); + EmbeddedObjectIndex index = AddEmbeddedObject(object); + DCHECK(is_uint32(index)); + movl(result, Immediate(static_cast(index), rmode)); } else { DCHECK(RelocInfo::IsFullEmbeddedObject(rmode)); movq(result, Immediate64(object.address(), rmode)); @@ -1607,29 +1607,33 @@ void TurboAssembler::Call(Handle code_object, RelocInfo::Mode rmode) { call(code_object, rmode); } -void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) { +Operand TurboAssembler::EntryFromBuiltinIndexAsOperand(Register builtin_index) { #if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) STATIC_ASSERT(kSmiShiftSize == 0); STATIC_ASSERT(kSmiTagSize == 1); STATIC_ASSERT(kSmiTag == 0); - // The builtin_pointer register contains the builtin index as a Smi. + // The builtin_index register contains the builtin index as a Smi. // Untagging is folded into the indexing operand below (we use times_4 instead // of times_8 since smis are already shifted by one). - Call(Operand(kRootRegister, builtin_pointer, times_4, - IsolateData::builtin_entry_table_offset())); + return Operand(kRootRegister, builtin_index, times_4, + IsolateData::builtin_entry_table_offset()); #else // defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) STATIC_ASSERT(kSmiShiftSize == 31); STATIC_ASSERT(kSmiTagSize == 1); STATIC_ASSERT(kSmiTag == 0); - // The builtin_pointer register contains the builtin index as a Smi. - SmiUntag(builtin_pointer, builtin_pointer); - Call(Operand(kRootRegister, builtin_pointer, times_8, - IsolateData::builtin_entry_table_offset())); + // The builtin_index register contains the builtin index as a Smi. + SmiUntag(builtin_index, builtin_index); + return Operand(kRootRegister, builtin_index, times_8, + IsolateData::builtin_entry_table_offset()); #endif // defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) } +void TurboAssembler::CallBuiltinByIndex(Register builtin_index) { + Call(EntryFromBuiltinIndexAsOperand(builtin_index)); +} + void TurboAssembler::LoadCodeObjectEntry(Register destination, Register code_object) { // Code objects are called differently depending on whether we are generating @@ -1767,6 +1771,46 @@ void TurboAssembler::Pinsrd(XMMRegister dst, Operand src, int8_t imm8) { } } +void TurboAssembler::Psllq(XMMRegister dst, byte imm8) { + if (CpuFeatures::IsSupported(AVX)) { + CpuFeatureScope scope(this, AVX); + vpsllq(dst, dst, imm8); + } else { + DCHECK(!IsEnabled(AVX)); + psllq(dst, imm8); + } +} + +void TurboAssembler::Psrlq(XMMRegister dst, byte imm8) { + if (CpuFeatures::IsSupported(AVX)) { + CpuFeatureScope scope(this, AVX); + vpsrlq(dst, dst, imm8); + } else { + DCHECK(!IsEnabled(AVX)); + psrlq(dst, imm8); + } +} + +void TurboAssembler::Pslld(XMMRegister dst, byte imm8) { + if (CpuFeatures::IsSupported(AVX)) { + CpuFeatureScope scope(this, AVX); + vpslld(dst, dst, imm8); + } else { + DCHECK(!IsEnabled(AVX)); + pslld(dst, imm8); + } +} + +void TurboAssembler::Psrld(XMMRegister dst, byte imm8) { + if (CpuFeatures::IsSupported(AVX)) { + CpuFeatureScope scope(this, AVX); + vpsrld(dst, dst, imm8); + } else { + DCHECK(!IsEnabled(AVX)); + psrld(dst, imm8); + } +} + void TurboAssembler::Lzcntl(Register dst, Register src) { if (CpuFeatures::IsSupported(LZCNT)) { CpuFeatureScope scope(this, LZCNT); diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.h b/deps/v8/src/codegen/x64/macro-assembler-x64.h index a5b8e60ec53a2b..139690bb8df9b0 100644 --- a/deps/v8/src/codegen/x64/macro-assembler-x64.h +++ b/deps/v8/src/codegen/x64/macro-assembler-x64.h @@ -80,7 +80,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { template struct AvxHelper { Assembler* assm; - // Call an method where the AVX version expects the dst argument to be + // Call a method where the AVX version expects the dst argument to be // duplicated. template @@ -93,7 +93,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { } } - // Call an method where the AVX version expects no duplicated dst argument. + // Call a method where the AVX version expects no duplicated dst argument. template void emit(Dst dst, Args... args) { @@ -127,11 +127,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { AVX_OP(Movmskpd, movmskpd) AVX_OP(Movss, movss) AVX_OP(Movsd, movsd) + AVX_OP(Movdqu, movdqu) AVX_OP(Pcmpeqd, pcmpeqd) - AVX_OP(Pslld, pslld) - AVX_OP(Psllq, psllq) - AVX_OP(Psrld, psrld) - AVX_OP(Psrlq, psrlq) + AVX_OP(Addss, addss) AVX_OP(Addsd, addsd) AVX_OP(Mulsd, mulsd) AVX_OP(Andps, andps) @@ -344,7 +342,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { void Call(ExternalReference ext); void Call(Label* target) { call(target); } - void CallBuiltinPointer(Register builtin_pointer) override; + Operand EntryFromBuiltinIndexAsOperand(Register builtin_index); + void CallBuiltinByIndex(Register builtin_index) override; void LoadCodeObjectEntry(Register destination, Register code_object) override; void CallCodeObject(Register code_object) override; @@ -368,6 +367,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { void Pinsrd(XMMRegister dst, Register src, int8_t imm8); void Pinsrd(XMMRegister dst, Operand src, int8_t imm8); + void Psllq(XMMRegister dst, byte imm8); + void Psrlq(XMMRegister dst, byte imm8); + void Pslld(XMMRegister dst, byte imm8); + void Psrld(XMMRegister dst, byte imm8); + void CompareRoot(Register with, RootIndex index); void CompareRoot(Operand with, RootIndex index); diff --git a/deps/v8/src/codegen/x64/sse-instr.h b/deps/v8/src/codegen/x64/sse-instr.h index ee20483cfeff2c..56618d20e0efbf 100644 --- a/deps/v8/src/codegen/x64/sse-instr.h +++ b/deps/v8/src/codegen/x64/sse-instr.h @@ -21,6 +21,7 @@ V(paddb, 66, 0F, FC) \ V(paddw, 66, 0F, FD) \ V(paddd, 66, 0F, FE) \ + V(paddq, 66, 0F, D4) \ V(paddsb, 66, 0F, EC) \ V(paddsw, 66, 0F, ED) \ V(paddusb, 66, 0F, DC) \ @@ -46,6 +47,7 @@ V(psubb, 66, 0F, F8) \ V(psubw, 66, 0F, F9) \ V(psubd, 66, 0F, FA) \ + V(psubq, 66, 0F, FB) \ V(psubsb, 66, 0F, E8) \ V(psubsw, 66, 0F, E9) \ V(psubusb, 66, 0F, D8) \ @@ -66,6 +68,7 @@ V(psignd, 66, 0F, 38, 0A) #define SSE4_INSTRUCTION_LIST(V) \ + V(pcmpeqq, 66, 0F, 38, 29) \ V(ptest, 66, 0F, 38, 17) \ V(pmovsxbw, 66, 0F, 38, 20) \ V(pmovsxwd, 66, 0F, 38, 23) \ @@ -82,4 +85,6 @@ V(pmaxud, 66, 0F, 38, 3F) \ V(pmulld, 66, 0F, 38, 40) +#define SSE4_2_INSTRUCTION_LIST(V) V(pcmpgtq, 66, 0F, 38, 37) + #endif // V8_CODEGEN_X64_SSE_INSTR_H_ diff --git a/deps/v8/src/common/OWNERS b/deps/v8/src/common/OWNERS new file mode 100644 index 00000000000000..3f9de7e204c675 --- /dev/null +++ b/deps/v8/src/common/OWNERS @@ -0,0 +1,3 @@ +file://COMMON_OWNERS + +# COMPONENT: Blink>JavaScript diff --git a/deps/v8/src/common/globals.h b/deps/v8/src/common/globals.h index 5d4b957e84fc01..8d1bf5dfcc1e72 100644 --- a/deps/v8/src/common/globals.h +++ b/deps/v8/src/common/globals.h @@ -212,15 +212,6 @@ constexpr size_t kReservedCodeRangePages = 0; STATIC_ASSERT(kSystemPointerSize == (1 << kSystemPointerSizeLog2)); -// This macro is used for declaring and defining HeapObject getter methods that -// are a bit more efficient for the pointer compression case than the default -// parameterless getters because isolate root doesn't have to be computed from -// arbitrary field address but it comes "for free" instead. -// These alternatives are always defined (in order to avoid #ifdef mess but -// are not supposed to be used when pointer compression is not enabled. -#define ROOT_VALUE isolate_for_root -#define ROOT_PARAM Isolate* const ROOT_VALUE - #ifdef V8_COMPRESS_POINTERS static_assert( kSystemPointerSize == kInt64Size, @@ -234,11 +225,6 @@ constexpr int kTaggedSizeLog2 = 2; using Tagged_t = int32_t; using AtomicTagged_t = base::Atomic32; -#define DEFINE_ROOT_VALUE(isolate) ROOT_PARAM = isolate -#define WITH_ROOT_PARAM(...) ROOT_PARAM, ##__VA_ARGS__ -#define WITH_ROOT_VALUE(...) ROOT_VALUE, ##__VA_ARGS__ -#define WITH_ROOT(isolate_for_root, ...) isolate_for_root, ##__VA_ARGS__ - #else constexpr int kTaggedSize = kSystemPointerSize; @@ -249,16 +235,12 @@ constexpr int kTaggedSizeLog2 = kSystemPointerSizeLog2; using Tagged_t = Address; using AtomicTagged_t = base::AtomicWord; -#define DEFINE_ROOT_VALUE(isolate) -#define WITH_ROOT_PARAM(...) __VA_ARGS__ -#define WITH_ROOT_VALUE(...) __VA_ARGS__ -#define WITH_ROOT(isolate_for_root, ...) __VA_ARGS__ - #endif // V8_COMPRESS_POINTERS // Defines whether the branchless or branchful implementation of pointer // decompression should be used. -constexpr bool kUseBranchlessPtrDecompression = true; +constexpr bool kUseBranchlessPtrDecompressionInRuntime = false; +constexpr bool kUseBranchlessPtrDecompressionInGeneratedCode = false; STATIC_ASSERT(kTaggedSize == (1 << kTaggedSizeLog2)); STATIC_ASSERT((kTaggedSize == 8) == TAGGED_SIZE_8_BYTES); @@ -667,7 +649,6 @@ struct SlotTraits; template <> struct SlotTraits { using TObjectSlot = FullObjectSlot; - using TMapWordSlot = FullObjectSlot; using TMaybeObjectSlot = FullMaybeObjectSlot; using THeapObjectSlot = FullHeapObjectSlot; }; @@ -678,12 +659,10 @@ template <> struct SlotTraits { #ifdef V8_COMPRESS_POINTERS using TObjectSlot = CompressedObjectSlot; - using TMapWordSlot = CompressedMapWordSlot; using TMaybeObjectSlot = CompressedMaybeObjectSlot; using THeapObjectSlot = CompressedHeapObjectSlot; #else using TObjectSlot = FullObjectSlot; - using TMapWordSlot = FullObjectSlot; using TMaybeObjectSlot = FullMaybeObjectSlot; using THeapObjectSlot = FullHeapObjectSlot; #endif @@ -693,10 +672,6 @@ struct SlotTraits { // holding Object value (smi or strong heap object). using ObjectSlot = SlotTraits::TObjectSlot; -// An MapWordSlot instance describes a kTaggedSize-sized on-heap field ("slot") -// holding HeapObject (strong heap object) value or a forwarding pointer. -using MapWordSlot = SlotTraits::TMapWordSlot; - // A MaybeObjectSlot instance describes a kTaggedSize-sized on-heap field // ("slot") holding MaybeObject (smi or weak heap object or strong heap object). using MaybeObjectSlot = SlotTraits::TMaybeObjectSlot; @@ -1193,7 +1168,7 @@ enum InitializationFlag : uint8_t { kNeedsInitialization, kCreatedInitialized }; enum MaybeAssignedFlag : uint8_t { kNotAssigned, kMaybeAssigned }; -enum ParseErrorType { kSyntaxError = 0, kReferenceError = 1 }; +enum RequiresBrandCheckFlag : uint8_t { kNoBrandCheck, kRequiresBrandCheck }; enum class InterpreterPushArgsMode : unsigned { kArrayFunction, @@ -1554,6 +1529,12 @@ constexpr int kFunctionLiteralIdTopLevel = 0; constexpr int kSmallOrderedHashSetMinCapacity = 4; constexpr int kSmallOrderedHashMapMinCapacity = 4; +// Opaque data type for identifying stack frames. Used extensively +// by the debugger. +// ID_MIN_VALUE and ID_MAX_VALUE are specified to ensure that enumeration type +// has correct value range (see Issue 830 for more details). +enum StackFrameId { ID_MIN_VALUE = kMinInt, ID_MAX_VALUE = kMaxInt, NO_ID = 0 }; + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/execution/message-template.h b/deps/v8/src/common/message-template.h similarity index 98% rename from deps/v8/src/execution/message-template.h rename to deps/v8/src/common/message-template.h index ae88aa4411c814..fedbfa5a100497 100644 --- a/deps/v8/src/execution/message-template.h +++ b/deps/v8/src/common/message-template.h @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -#ifndef V8_EXECUTION_MESSAGE_TEMPLATE_H_ -#define V8_EXECUTION_MESSAGE_TEMPLATE_H_ +#ifndef V8_COMMON_MESSAGE_TEMPLATE_H_ +#define V8_COMMON_MESSAGE_TEMPLATE_H_ #include "src/base/logging.h" @@ -90,6 +90,7 @@ namespace internal { T(ImmutablePrototypeSet, \ "Immutable prototype object '%' cannot have their prototype set") \ T(ImportCallNotNewExpression, "Cannot use new with import") \ + T(ImportOutsideModule, "Cannot use import statement outside a module") \ T(ImportMetaOutsideModule, "Cannot use 'import.meta' outside a module") \ T(ImportMissingSpecifier, "import() requires a specifier") \ T(IncompatibleMethodReceiver, "Method % called on incompatible receiver %") \ @@ -415,6 +416,7 @@ namespace internal { "Read of private field % from an object which did not contain the field") \ T(InvalidPrivateFieldWrite, \ "Write of private field % to an object which did not contain the field") \ + T(InvalidPrivateMethodWrite, "Private method '%' is not writable") \ T(JsonParseUnexpectedEOS, "Unexpected end of JSON input") \ T(JsonParseUnexpectedToken, "Unexpected token % in JSON at position %") \ T(JsonParseUnexpectedTokenNumber, "Unexpected number in JSON at position %") \ @@ -495,7 +497,7 @@ namespace internal { T(UnexpectedSuper, "'super' keyword unexpected here") \ T(UnexpectedNewTarget, "new.target expression is not allowed here") \ T(UnexpectedTemplateString, "Unexpected template string") \ - T(UnexpectedToken, "Unexpected token %") \ + T(UnexpectedToken, "Unexpected token '%'") \ T(UnexpectedTokenUnaryExponentiation, \ "Unary operator used immediately before exponentiation expression. " \ "Parenthesis must be used to disambiguate operator precedence") \ @@ -562,6 +564,8 @@ namespace internal { T(TraceEventPhaseError, "Trace event phase must be a number.") \ T(TraceEventIDError, "Trace event id must be a number.") \ /* Weak refs */ \ + T(WeakRefsUnregisterTokenMustBeObject, \ + "unregisterToken ('%') must be an object") \ T(WeakRefsCleanupMustBeCallable, \ "FinalizationGroup: cleanup must be callable") \ T(WeakRefsRegisterTargetMustBeObject, \ @@ -576,16 +580,16 @@ enum class MessageTemplate { #define TEMPLATE(NAME, STRING) k##NAME, MESSAGE_TEMPLATES(TEMPLATE) #undef TEMPLATE - kLastMessage + kMessageCount }; inline MessageTemplate MessageTemplateFromInt(int message_id) { - DCHECK_LE(0, message_id); - DCHECK_LT(message_id, static_cast(MessageTemplate::kLastMessage)); + DCHECK_LT(static_cast(message_id), + static_cast(MessageTemplate::kMessageCount)); return static_cast(message_id); } } // namespace internal } // namespace v8 -#endif // V8_EXECUTION_MESSAGE_TEMPLATE_H_ +#endif // V8_COMMON_MESSAGE_TEMPLATE_H_ diff --git a/deps/v8/src/common/ptr-compr-inl.h b/deps/v8/src/common/ptr-compr-inl.h index fd0f97e904a2c0..00a79bb29107ff 100644 --- a/deps/v8/src/common/ptr-compr-inl.h +++ b/deps/v8/src/common/ptr-compr-inl.h @@ -25,8 +25,12 @@ V8_INLINE Address GetIsolateRoot(TOnHeapAddress on_heap_addr); template <> V8_INLINE Address GetIsolateRoot
    (Address on_heap_addr) { + // We subtract 1 here in order to let the compiler generate addition of 32-bit + // signed constant instead of 64-bit constant (the problem is that 2Gb looks + // like a negative 32-bit value). It's correct because we will never use + // leftmost address of V8 heap as |on_heap_addr|. return RoundDown(on_heap_addr + - kPtrComprIsolateRootBias); + kPtrComprIsolateRootBias - 1); } template <> @@ -34,17 +38,10 @@ V8_INLINE Address GetIsolateRoot(Isolate* isolate) { return isolate->isolate_root(); } -template <> -V8_INLINE Address GetIsolateRoot(const Isolate* isolate) { - return isolate->isolate_root(); -} - // Decompresses smi value. V8_INLINE Address DecompressTaggedSigned(Tagged_t raw_value) { - // Current compression scheme requires |raw_value| to be sign-extended - // from int32_t to intptr_t. - intptr_t value = static_cast(static_cast(raw_value)); - return static_cast
    (value); + // For runtime code the upper 32-bits of the Smi value do not matter. + return static_cast
    (raw_value); } // Decompresses weak or strong heap object pointer or forwarding pointer, @@ -63,18 +60,18 @@ V8_INLINE Address DecompressTaggedPointer(TOnHeapAddress on_heap_addr, template V8_INLINE Address DecompressTaggedAny(TOnHeapAddress on_heap_addr, Tagged_t raw_value) { - // Current compression scheme requires |raw_value| to be sign-extended - // from int32_t to intptr_t. - intptr_t value = static_cast(static_cast(raw_value)); - if (kUseBranchlessPtrDecompression) { + if (kUseBranchlessPtrDecompressionInRuntime) { + // Current compression scheme requires |raw_value| to be sign-extended + // from int32_t to intptr_t. + intptr_t value = static_cast(static_cast(raw_value)); // |root_mask| is 0 if the |value| was a smi or -1 otherwise. Address root_mask = static_cast
    (-(value & kSmiTagMask)); Address root_or_zero = root_mask & GetIsolateRoot(on_heap_addr); return root_or_zero + static_cast
    (value); } else { - return HAS_SMI_TAG(value) - ? static_cast
    (value) - : (GetIsolateRoot(on_heap_addr) + static_cast
    (value)); + return HAS_SMI_TAG(raw_value) + ? DecompressTaggedSigned(raw_value) + : DecompressTaggedPointer(on_heap_addr, raw_value); } } diff --git a/deps/v8/src/compiler-dispatcher/OWNERS b/deps/v8/src/compiler-dispatcher/OWNERS new file mode 100644 index 00000000000000..9664a4857ca4e8 --- /dev/null +++ b/deps/v8/src/compiler-dispatcher/OWNERS @@ -0,0 +1,7 @@ +ahaas@chromium.org +jkummerow@chromium.org +leszeks@chromium.org +mstarzinger@chromium.org +rmcilroy@chromium.org + +# COMPONENT: Blink>JavaScript>Compiler diff --git a/deps/v8/src/compiler/OWNERS b/deps/v8/src/compiler/OWNERS index 39beced3f3c21b..50e2af71290003 100644 --- a/deps/v8/src/compiler/OWNERS +++ b/deps/v8/src/compiler/OWNERS @@ -1,5 +1,3 @@ -set noparent - bmeurer@chromium.org jarin@chromium.org mstarzinger@chromium.org @@ -19,6 +17,7 @@ per-file wasm-*=gdeepti@chromium.org per-file int64-lowering.*=ahaas@chromium.org -per-file simd-scalar-lowering.*=aseemgarg@chromium.org +per-file simd-scalar-lowering.*=bbudge@chromium.org +per-file simd-scalar-lowering.*=gdeepti@chromium.org # COMPONENT: Blink>JavaScript>Compiler diff --git a/deps/v8/src/compiler/STYLE b/deps/v8/src/compiler/STYLE deleted file mode 100644 index ae41e3f989feaf..00000000000000 --- a/deps/v8/src/compiler/STYLE +++ /dev/null @@ -1,29 +0,0 @@ -Compiler Coding Style -===================== - -Coding style for the TurboFan compiler generally follows the Google C++ Style -Guide and the Chromium Coding Style. The notes below are usually just extensions -beyond what the Google style guide already says. If this document doesn't -mention a rule, follow the Google C++ style. - - -TODOs ------ -We use the following convention for putting TODOs into the code: - - * A TODO(turbofan) implies a performance improvement opportunity. - * A TODO(name) implies an incomplete implementation. - - -Use of C++11 auto keyword -------------------------- -Use auto to avoid type names that are just clutter. Continue to use manifest -type declarations when it helps readability, and never use auto for anything -but local variables, in particular auto should only be used where it is obvious -from context what the type is: - - for (auto block : x->blocks()) // clearly a Block of some kind - for (auto instr : x->instructions()) // clearly an Instruction of some kind - - for (auto b : x->predecessors()) // less clear, better to make it explicit - for (BasicBlock* b : x->predecessors()) // now clear diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc index 726a81a465c34a..a369de48859ef9 100644 --- a/deps/v8/src/compiler/access-builder.cc +++ b/deps/v8/src/compiler/access-builder.cc @@ -14,9 +14,9 @@ #include "src/objects/heap-number.h" #include "src/objects/js-collection.h" #include "src/objects/js-generator.h" -#include "src/objects/module.h" #include "src/objects/objects-inl.h" #include "src/objects/ordered-hash-table.h" +#include "src/objects/source-text-module.h" namespace v8 { namespace internal { @@ -71,6 +71,26 @@ FieldAccess AccessBuilder::ForBigIntBitfield() { return access; } +// static +FieldAccess AccessBuilder::ForBigIntOptionalPadding() { + DCHECK_EQ(FIELD_SIZE(BigInt::kOptionalPaddingOffset), 4); + FieldAccess access = { + kTaggedBase, BigInt::kOptionalPaddingOffset, MaybeHandle(), + MaybeHandle(), TypeCache::Get()->kInt32, MachineType::Uint32(), + kNoWriteBarrier}; + return access; +} + +// static +FieldAccess AccessBuilder::ForBigIntLeastSignificantDigit64() { + DCHECK_EQ(BigInt::SizeFor(1) - BigInt::SizeFor(0), 8); + FieldAccess access = { + kTaggedBase, BigInt::kDigitsOffset, MaybeHandle(), + MaybeHandle(), TypeCache::Get()->kBigUint64, MachineType::Uint64(), + kNoWriteBarrier}; + return access; +} + // static FieldAccess AccessBuilder::ForJSObjectPropertiesOrHash() { FieldAccess access = { @@ -626,7 +646,7 @@ FieldAccess AccessBuilder::ForMapPrototype() { // static FieldAccess AccessBuilder::ForModuleRegularExports() { FieldAccess access = { - kTaggedBase, Module::kRegularExportsOffset, + kTaggedBase, SourceTextModule::kRegularExportsOffset, Handle(), MaybeHandle(), Type::OtherInternal(), MachineType::TypeCompressedTaggedPointer(), kPointerWriteBarrier}; @@ -636,7 +656,7 @@ FieldAccess AccessBuilder::ForModuleRegularExports() { // static FieldAccess AccessBuilder::ForModuleRegularImports() { FieldAccess access = { - kTaggedBase, Module::kRegularImportsOffset, + kTaggedBase, SourceTextModule::kRegularImportsOffset, Handle(), MaybeHandle(), Type::OtherInternal(), MachineType::TypeCompressedTaggedPointer(), kPointerWriteBarrier}; @@ -847,7 +867,7 @@ FieldAccess AccessBuilder::ForJSStringIteratorIndex() { // static FieldAccess AccessBuilder::ForValue() { FieldAccess access = { - kTaggedBase, JSValue::kValueOffset, + kTaggedBase, JSPrimitiveWrapper::kValueOffset, Handle(), MaybeHandle(), Type::NonInternal(), MachineType::TypeCompressedTagged(), kFullWriteBarrier}; diff --git a/deps/v8/src/compiler/access-builder.h b/deps/v8/src/compiler/access-builder.h index e38c487b1a7dde..e3a17fe257d405 100644 --- a/deps/v8/src/compiler/access-builder.h +++ b/deps/v8/src/compiler/access-builder.h @@ -42,6 +42,15 @@ class V8_EXPORT_PRIVATE AccessBuilder final // Provides access to BigInt's bit field. static FieldAccess ForBigIntBitfield(); + // Provides access to BigInt's 32 bit padding that is placed after the + // bitfield on 64 bit architectures without pointer compression. Do not use + // this on 32 bit architectures. + static FieldAccess ForBigIntOptionalPadding(); + + // Provides access to BigInt's least significant digit on 64 bit + // architectures. Do not use this on 32 bit architectures. + static FieldAccess ForBigIntLeastSignificantDigit64(); + // Provides access to JSObject::properties() field. static FieldAccess ForJSObjectPropertiesOrHash(); @@ -263,7 +272,7 @@ class V8_EXPORT_PRIVATE AccessBuilder final // Provides access to JSStringIterator::index() field. static FieldAccess ForJSStringIteratorIndex(); - // Provides access to JSValue::value() field. + // Provides access to JSPrimitiveWrapper::value() field. static FieldAccess ForValue(); // Provides access to Cell::value() field. diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc index 713484f7348c85..6fc9e8214e5b49 100644 --- a/deps/v8/src/compiler/access-info.cc +++ b/deps/v8/src/compiler/access-info.cc @@ -8,6 +8,7 @@ #include "src/builtins/accessors.h" #include "src/compiler/compilation-dependencies.h" +#include "src/compiler/compilation-dependency.h" #include "src/compiler/type-cache.h" #include "src/ic/call-optimization.h" #include "src/logging/counters.h" @@ -78,7 +79,7 @@ PropertyAccessInfo PropertyAccessInfo::NotFound(Zone* zone, // static PropertyAccessInfo PropertyAccessInfo::DataField( Zone* zone, Handle receiver_map, - ZoneVector&& dependencies, + ZoneVector&& dependencies, FieldIndex field_index, Representation field_representation, Type field_type, MaybeHandle field_map, MaybeHandle holder, MaybeHandle transition_map) { @@ -90,7 +91,7 @@ PropertyAccessInfo PropertyAccessInfo::DataField( // static PropertyAccessInfo PropertyAccessInfo::DataConstant( Zone* zone, Handle receiver_map, - ZoneVector&& dependencies, + ZoneVector&& dependencies, FieldIndex field_index, Representation field_representation, Type field_type, MaybeHandle field_map, MaybeHandle holder, MaybeHandle transition_map) { @@ -156,8 +157,7 @@ PropertyAccessInfo::PropertyAccessInfo( FieldIndex field_index, Representation field_representation, Type field_type, MaybeHandle field_map, ZoneVector>&& receiver_maps, - ZoneVector&& - unrecorded_dependencies) + ZoneVector&& unrecorded_dependencies) : kind_(kind), receiver_maps_(receiver_maps), unrecorded_dependencies_(std::move(unrecorded_dependencies)), @@ -258,11 +258,6 @@ bool PropertyAccessInfo::Merge(PropertyAccessInfo const* that, } } -Handle PropertyAccessInfo::export_cell() const { - DCHECK_EQ(kModuleExport, kind_); - return Handle::cast(constant_); -} - AccessInfoFactory::AccessInfoFactory(JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone) @@ -336,11 +331,10 @@ PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo( Type field_type = Type::NonInternal(); MaybeHandle field_map; MapRef map_ref(broker(), map); - ZoneVector - unrecorded_dependencies(zone()); + ZoneVector unrecorded_dependencies(zone()); + map_ref.SerializeOwnDescriptor(descriptor); if (details_representation.IsSmi()) { field_type = Type::SignedSmall(); - map_ref.SerializeOwnDescriptor(descriptor); unrecorded_dependencies.push_back( dependencies()->FieldRepresentationDependencyOffTheRecord(map_ref, descriptor)); @@ -360,19 +354,23 @@ PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo( // The field type was cleared by the GC, so we don't know anything // about the contents now. } - map_ref.SerializeOwnDescriptor(descriptor); unrecorded_dependencies.push_back( dependencies()->FieldRepresentationDependencyOffTheRecord(map_ref, descriptor)); if (descriptors_field_type->IsClass()) { - unrecorded_dependencies.push_back( - dependencies()->FieldTypeDependencyOffTheRecord(map_ref, descriptor)); // Remember the field map, and try to infer a useful type. Handle map(descriptors_field_type->AsClass(), isolate()); field_type = Type::For(MapRef(broker(), map)); field_map = MaybeHandle(map); } + } else { + CHECK(details_representation.IsTagged()); } + // TODO(turbofan): We may want to do this only depending on the use + // of the access info. + unrecorded_dependencies.push_back( + dependencies()->FieldTypeDependencyOffTheRecord(map_ref, descriptor)); + PropertyConstness constness; if (details.IsReadOnly() && !details.IsConfigurable()) { constness = PropertyConstness::kConst; @@ -445,9 +443,6 @@ PropertyAccessInfo AccessInfoFactory::ComputeAccessorDescriptorAccessInfo( DCHECK_IMPLIES(lookup == CallOptimization::kHolderIsReceiver, holder.is_null()); DCHECK_IMPLIES(lookup == CallOptimization::kHolderFound, !holder.is_null()); - if (V8_UNLIKELY(TracingFlags::is_runtime_stats_enabled())) { - return PropertyAccessInfo::Invalid(zone()); - } } if (access_mode == AccessMode::kLoad) { Handle cached_property_name; @@ -569,7 +564,7 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo( if (map_prototype->map().is_deprecated()) { // Try to migrate the prototype object so we don't embed the deprecated // map into the optimized code. - JSObject::TryMigrateInstance(map_prototype); + JSObject::TryMigrateInstance(isolate(), map_prototype); } map = handle(map_prototype->map(), isolate()); holder = map_prototype; @@ -611,8 +606,7 @@ void AccessInfoFactory::ComputePropertyAccessInfos( void PropertyAccessInfo::RecordDependencies( CompilationDependencies* dependencies) { - for (CompilationDependencies::Dependency const* d : - unrecorded_dependencies_) { + for (CompilationDependency const* d : unrecorded_dependencies_) { dependencies->RecordDependency(d); } unrecorded_dependencies_.clear(); @@ -648,6 +642,8 @@ void AccessInfoFactory::MergePropertyAccessInfos( CHECK(!result->empty()); } +Isolate* AccessInfoFactory::isolate() const { return broker()->isolate(); } + namespace { Maybe GeneralizeElementsKind(ElementsKind this_kind, @@ -760,8 +756,7 @@ PropertyAccessInfo AccessInfoFactory::LookupTransition( Type field_type = Type::NonInternal(); MaybeHandle field_map; MapRef transition_map_ref(broker(), transition_map); - ZoneVector - unrecorded_dependencies(zone()); + ZoneVector unrecorded_dependencies(zone()); if (details_representation.IsSmi()) { field_type = Type::SignedSmall(); transition_map_ref.SerializeOwnDescriptor(number); @@ -796,6 +791,7 @@ PropertyAccessInfo AccessInfoFactory::LookupTransition( unrecorded_dependencies.push_back( dependencies()->TransitionDependencyOffTheRecord( MapRef(broker(), transition_map))); + transition_map_ref.SerializeBackPointer(); // For BuildPropertyStore. // Transitioning stores *may* store to const fields. The resulting // DataConstant access infos can be distinguished from later, i.e. redundant, // stores to the same constant field by the presence of a transition map. diff --git a/deps/v8/src/compiler/access-info.h b/deps/v8/src/compiler/access-info.h index 3499069fc44fc6..4c7c3611df685c 100644 --- a/deps/v8/src/compiler/access-info.h +++ b/deps/v8/src/compiler/access-info.h @@ -8,7 +8,6 @@ #include #include "src/codegen/machine-type.h" -#include "src/compiler/compilation-dependencies.h" #include "src/compiler/types.h" #include "src/objects/feedback-vector.h" #include "src/objects/field-index.h" @@ -25,8 +24,10 @@ class Factory; namespace compiler { // Forward declarations. +class CompilationDependencies; +class CompilationDependency; class ElementAccessFeedback; -class Type; +class JSHeapBroker; class TypeCache; std::ostream& operator<<(std::ostream&, AccessMode); @@ -74,16 +75,14 @@ class PropertyAccessInfo final { MaybeHandle holder); static PropertyAccessInfo DataField( Zone* zone, Handle receiver_map, - ZoneVector&& - unrecorded_dependencies, + ZoneVector&& unrecorded_dependencies, FieldIndex field_index, Representation field_representation, Type field_type, MaybeHandle field_map = MaybeHandle(), MaybeHandle holder = MaybeHandle(), MaybeHandle transition_map = MaybeHandle()); static PropertyAccessInfo DataConstant( Zone* zone, Handle receiver_map, - ZoneVector&& - unrecorded_dependencies, + ZoneVector&& unrecorded_dependencies, FieldIndex field_index, Representation field_representation, Type field_type, MaybeHandle field_map, MaybeHandle holder, MaybeHandle transition_map = MaybeHandle()); @@ -113,9 +112,9 @@ class PropertyAccessInfo final { Kind kind() const { return kind_; } MaybeHandle holder() const { - // This CHECK tries to protect against using the access info without - // recording its dependencies first. - CHECK(unrecorded_dependencies_.empty()); + // TODO(neis): There was a CHECK here that tries to protect against + // using the access info without recording its dependencies first. + // Find a more suitable place for it. return holder_; } MaybeHandle transition_map() const { return transition_map_; } @@ -127,7 +126,6 @@ class PropertyAccessInfo final { ZoneVector> const& receiver_maps() const { return receiver_maps_; } - Handle export_cell() const; private: explicit PropertyAccessInfo(Zone* zone); @@ -136,17 +134,16 @@ class PropertyAccessInfo final { PropertyAccessInfo(Zone* zone, Kind kind, MaybeHandle holder, Handle constant, ZoneVector>&& receiver_maps); - PropertyAccessInfo( - Kind kind, MaybeHandle holder, MaybeHandle transition_map, - FieldIndex field_index, Representation field_representation, - Type field_type, MaybeHandle field_map, - ZoneVector>&& receiver_maps, - ZoneVector&& dependencies); + PropertyAccessInfo(Kind kind, MaybeHandle holder, + MaybeHandle transition_map, FieldIndex field_index, + Representation field_representation, Type field_type, + MaybeHandle field_map, + ZoneVector>&& receiver_maps, + ZoneVector&& dependencies); Kind kind_; ZoneVector> receiver_maps_; - ZoneVector - unrecorded_dependencies_; + ZoneVector unrecorded_dependencies_; Handle constant_; MaybeHandle transition_map_; MaybeHandle holder_; @@ -215,7 +212,7 @@ class AccessInfoFactory final { CompilationDependencies* dependencies() const { return dependencies_; } JSHeapBroker* broker() const { return broker_; } - Isolate* isolate() const { return broker()->isolate(); } + Isolate* isolate() const; Zone* zone() const { return zone_; } JSHeapBroker* const broker_; diff --git a/deps/v8/src/compiler/add-type-assertions-reducer.cc b/deps/v8/src/compiler/add-type-assertions-reducer.cc new file mode 100644 index 00000000000000..59d2fe68203ed7 --- /dev/null +++ b/deps/v8/src/compiler/add-type-assertions-reducer.cc @@ -0,0 +1,51 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/compiler/add-type-assertions-reducer.h" + +#include "src/compiler/node-properties.h" + +namespace v8 { +namespace internal { +namespace compiler { + +AddTypeAssertionsReducer::AddTypeAssertionsReducer(Editor* editor, + JSGraph* jsgraph, Zone* zone) + : AdvancedReducer(editor), + jsgraph_(jsgraph), + visited_(jsgraph->graph()->NodeCount(), zone) {} + +AddTypeAssertionsReducer::~AddTypeAssertionsReducer() = default; + +Reduction AddTypeAssertionsReducer::Reduce(Node* node) { + if (node->opcode() == IrOpcode::kAssertType || + node->opcode() == IrOpcode::kPhi || !NodeProperties::IsTyped(node) || + visited_.Get(node)) { + return NoChange(); + } + visited_.Set(node, true); + + Type type = NodeProperties::GetType(node); + if (!type.IsRange()) { + return NoChange(); + } + + Node* assertion = graph()->NewNode(simplified()->AssertType(type), node); + NodeProperties::SetType(assertion, type); + + for (Edge edge : node->use_edges()) { + Node* const user = edge.from(); + DCHECK(!user->IsDead()); + if (NodeProperties::IsValueEdge(edge) && user != assertion) { + edge.UpdateTo(assertion); + Revisit(user); + } + } + + return NoChange(); +} + +} // namespace compiler +} // namespace internal +} // namespace v8 diff --git a/deps/v8/src/compiler/add-type-assertions-reducer.h b/deps/v8/src/compiler/add-type-assertions-reducer.h new file mode 100644 index 00000000000000..36add040e1ce7c --- /dev/null +++ b/deps/v8/src/compiler/add-type-assertions-reducer.h @@ -0,0 +1,45 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_ADD_TYPE_ASSERTIONS_REDUCER_H_ +#define V8_COMPILER_ADD_TYPE_ASSERTIONS_REDUCER_H_ + +#include "src/common/globals.h" +#include "src/compiler/graph-reducer.h" +#include "src/compiler/js-graph.h" +#include "src/compiler/node-aux-data.h" +#include "src/compiler/simplified-operator.h" + +namespace v8 { +namespace internal { + +namespace compiler { + +class V8_EXPORT_PRIVATE AddTypeAssertionsReducer final + : public NON_EXPORTED_BASE(AdvancedReducer) { + public: + AddTypeAssertionsReducer(Editor* editor, JSGraph* jsgraph, Zone* zone); + ~AddTypeAssertionsReducer() final; + + const char* reducer_name() const override { + return "AddTypeAssertionsReducer"; + } + + Reduction Reduce(Node* node) final; + + private: + JSGraph* const jsgraph_; + NodeAuxData visited_; + + Graph* graph() { return jsgraph_->graph(); } + SimplifiedOperatorBuilder* simplified() { return jsgraph_->simplified(); } + + DISALLOW_COPY_AND_ASSIGN(AddTypeAssertionsReducer); +}; + +} // namespace compiler +} // namespace internal +} // namespace v8 + +#endif // V8_COMPILER_ADD_TYPE_ASSERTIONS_REDUCER_H_ diff --git a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc index d93053c64b8fb9..88a9c52a3339f5 100644 --- a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc +++ b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc @@ -130,6 +130,7 @@ class ArmOperandConverter final : public InstructionOperandConverter { return Operand::EmbeddedStringConstant( constant.ToDelayedStringConstant()); case Constant::kInt64: + case Constant::kCompressedHeapObject: case Constant::kHeapObject: // TODO(dcarney): loading RPO constants on arm. case Constant::kRpoNumber: @@ -308,9 +309,9 @@ Condition FlagsConditionToCondition(FlagsCondition condition) { UNREACHABLE(); } -void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, - InstructionCode opcode, - ArmOperandConverter& i) { +void EmitWordLoadPoisoningIfNeeded( + CodeGenerator* codegen, InstructionCode opcode, + ArmOperandConverter& i) { // NOLINT(runtime/references) const MemoryAccessMode access_mode = static_cast(MiscField::decode(opcode)); if (access_mode == kMemoryAccessPoisoned) { @@ -319,9 +320,10 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, } } -void ComputePoisonedAddressForLoad(CodeGenerator* codegen, - InstructionCode opcode, - ArmOperandConverter& i, Register address) { +void ComputePoisonedAddressForLoad( + CodeGenerator* codegen, InstructionCode opcode, + ArmOperandConverter& i, // NOLINT(runtime/references) + Register address) { DCHECK_EQ(kMemoryAccessPoisoned, static_cast(MiscField::decode(opcode))); switch (AddressingModeField::decode(opcode)) { @@ -711,8 +713,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kArchCallBuiltinPointer: { DCHECK(!instr->InputAt(0)->IsImmediate()); - Register builtin_pointer = i.InputRegister(0); - __ CallBuiltinPointer(builtin_pointer); + Register builtin_index = i.InputRegister(0); + __ CallBuiltinByIndex(builtin_index); RecordCallPosition(instr); frame_access_state()->ClearSPDelta(); break; @@ -879,23 +881,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( AssembleArchTableSwitch(instr); DCHECK_EQ(LeaveCC, i.OutputSBit()); break; - case kArchDebugAbort: + case kArchAbortCSAAssert: DCHECK(i.InputRegister(0) == r1); - if (!frame_access_state()->has_frame()) { + { // We don't actually want to generate a pile of code for this, so just // claim there is a stack frame, without generating one. FrameScope scope(tasm(), StackFrame::NONE); - __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS), - RelocInfo::CODE_TARGET); - } else { - __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS), - RelocInfo::CODE_TARGET); + __ Call( + isolate()->builtins()->builtin_handle(Builtins::kAbortCSAAssert), + RelocInfo::CODE_TARGET); } - __ stop("kArchDebugAbort"); + __ stop(); unwinding_info_writer_.MarkBlockWillExit(); break; case kArchDebugBreak: - __ stop("kArchDebugBreak"); + __ stop(); break; case kArchComment: __ RecordComment(reinterpret_cast(i.InputInt32(0))); @@ -1752,6 +1752,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } break; } + case kArmDmbIsh: { + __ dmb(ISH); + break; + } case kArmDsbIsb: { __ dsb(SY); __ isb(SY); @@ -2588,6 +2592,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ vpmax(NeonU32, scratch, src.low(), src.high()); __ vpmax(NeonU32, scratch, scratch, scratch); __ ExtractLane(i.OutputRegister(), scratch, NeonS32, 0); + __ cmp(i.OutputRegister(), Operand(0)); + __ mov(i.OutputRegister(), Operand(1), LeaveCC, ne); break; } case kArmS1x4AllTrue: { @@ -2597,6 +2603,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ vpmin(NeonU32, scratch, src.low(), src.high()); __ vpmin(NeonU32, scratch, scratch, scratch); __ ExtractLane(i.OutputRegister(), scratch, NeonS32, 0); + __ cmp(i.OutputRegister(), Operand(0)); + __ mov(i.OutputRegister(), Operand(1), LeaveCC, ne); break; } case kArmS1x8AnyTrue: { @@ -2607,6 +2615,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ vpmax(NeonU16, scratch, scratch, scratch); __ vpmax(NeonU16, scratch, scratch, scratch); __ ExtractLane(i.OutputRegister(), scratch, NeonS16, 0); + __ cmp(i.OutputRegister(), Operand(0)); + __ mov(i.OutputRegister(), Operand(1), LeaveCC, ne); break; } case kArmS1x8AllTrue: { @@ -2617,6 +2627,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ vpmin(NeonU16, scratch, scratch, scratch); __ vpmin(NeonU16, scratch, scratch, scratch); __ ExtractLane(i.OutputRegister(), scratch, NeonS16, 0); + __ cmp(i.OutputRegister(), Operand(0)); + __ mov(i.OutputRegister(), Operand(1), LeaveCC, ne); break; } case kArmS1x16AnyTrue: { @@ -2631,6 +2643,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( // kDoubleRegZero is not changed, since it is 0. __ vtst(Neon32, q_scratch, q_scratch, q_scratch); __ ExtractLane(i.OutputRegister(), d_scratch, NeonS32, 0); + __ cmp(i.OutputRegister(), Operand(0)); + __ mov(i.OutputRegister(), Operand(1), LeaveCC, ne); break; } case kArmS1x16AllTrue: { @@ -2642,6 +2656,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ vpmin(NeonU8, scratch, scratch, scratch); __ vpmin(NeonU8, scratch, scratch, scratch); __ ExtractLane(i.OutputRegister(), scratch, NeonS8, 0); + __ cmp(i.OutputRegister(), Operand(0)); + __ mov(i.OutputRegister(), Operand(1), LeaveCC, ne); break; } case kWord32AtomicLoadInt8: @@ -2901,7 +2917,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr, new (gen_->zone()) ReferenceMap(gen_->zone()); gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt); if (FLAG_debug_code) { - __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap)); + __ stop(); } } } @@ -2993,8 +3009,14 @@ void CodeGenerator::AssembleConstructFrame() { auto call_descriptor = linkage()->GetIncomingDescriptor(); if (frame_access_state()->has_frame()) { if (call_descriptor->IsCFunctionCall()) { - __ Push(lr, fp); - __ mov(fp, sp); + if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) { + __ StubPrologue(StackFrame::C_WASM_ENTRY); + // Reserve stack space for saving the c_entry_fp later. + __ AllocateStackSpace(kSystemPointerSize); + } else { + __ Push(lr, fp); + __ mov(fp, sp); + } } else if (call_descriptor->IsJSFunctionCall()) { __ Prologue(); if (call_descriptor->PushArgumentCount()) { @@ -3025,8 +3047,8 @@ void CodeGenerator::AssembleConstructFrame() { unwinding_info_writer_.MarkFrameConstructed(__ pc_offset()); } - int required_slots = frame()->GetTotalFrameSlotCount() - - call_descriptor->CalculateFixedFrameSize(); + int required_slots = + frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount(); if (info()->is_osr()) { // TurboFan OSR-compiled functions cannot be entered directly. @@ -3074,7 +3096,7 @@ void CodeGenerator::AssembleConstructFrame() { ReferenceMap* reference_map = new (zone()) ReferenceMap(zone()); RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt); if (FLAG_debug_code) { - __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromThrow)); + __ stop(); } __ bind(&done); diff --git a/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h b/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h index 722502edc7802b..165ca39f9d3620 100644 --- a/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h +++ b/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h @@ -126,6 +126,7 @@ namespace compiler { V(ArmPush) \ V(ArmPoke) \ V(ArmPeek) \ + V(ArmDmbIsh) \ V(ArmDsbIsb) \ V(ArmF32x4Splat) \ V(ArmF32x4ExtractLane) \ diff --git a/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc b/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc index 211abd85b8cd66..41d7b4055fce5a 100644 --- a/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc +++ b/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc @@ -275,6 +275,7 @@ int InstructionScheduler::GetTargetInstructionFlags( case kArmStr: case kArmPush: case kArmPoke: + case kArmDmbIsh: case kArmDsbIsb: case kArmWord32AtomicPairStore: case kArmWord32AtomicPairAdd: diff --git a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc index 678d75ae5eaa73..06aba4491ac737 100644 --- a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc +++ b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc @@ -441,9 +441,9 @@ void InstructionSelector::VisitStackSlot(Node* node) { sequence()->AddImmediate(Constant(slot)), 0, nullptr); } -void InstructionSelector::VisitDebugAbort(Node* node) { +void InstructionSelector::VisitAbortCSAAssert(Node* node) { ArmOperandGenerator g(this); - Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), r1)); + Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), r1)); } void InstructionSelector::VisitLoad(Node* node) { @@ -2020,6 +2020,11 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) { g.UseRegister(right)); } +void InstructionSelector::VisitMemoryBarrier(Node* node) { + ArmOperandGenerator g(this); + Emit(kArmDmbIsh, g.NoOutput()); +} + void InstructionSelector::VisitWord32AtomicLoad(Node* node) { LoadRepresentation load_rep = LoadRepresentationOf(node->op()); ArmOperandGenerator g(this); diff --git a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc index 53864ad2e95f47..c71a63cc3d96e5 100644 --- a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc +++ b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc @@ -224,6 +224,7 @@ class Arm64OperandConverter final : public InstructionOperandConverter { return Operand(Operand::EmbeddedNumber(constant.ToFloat64().value())); case Constant::kExternalReference: return Operand(constant.ToExternalReference()); + case Constant::kCompressedHeapObject: // Fall through. case Constant::kHeapObject: return Operand(constant.ToHeapObject()); case Constant::kDelayedStringConstant: @@ -375,9 +376,9 @@ Condition FlagsConditionToCondition(FlagsCondition condition) { UNREACHABLE(); } -void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, - InstructionCode opcode, Instruction* instr, - Arm64OperandConverter& i) { +void EmitWordLoadPoisoningIfNeeded( + CodeGenerator* codegen, InstructionCode opcode, Instruction* instr, + Arm64OperandConverter& i) { // NOLINT(runtime/references) const MemoryAccessMode access_mode = static_cast(MiscField::decode(opcode)); if (access_mode == kMemoryAccessPoisoned) { @@ -621,8 +622,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kArchCallBuiltinPointer: { DCHECK(!instr->InputAt(0)->IsImmediate()); - Register builtin_pointer = i.InputRegister(0); - __ CallBuiltinPointer(builtin_pointer); + Register builtin_index = i.InputRegister(0); + __ CallBuiltinByIndex(builtin_index); RecordCallPosition(instr); frame_access_state()->ClearSPDelta(); break; @@ -793,19 +794,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArchLookupSwitch: AssembleArchLookupSwitch(instr); break; - case kArchDebugAbort: + case kArchAbortCSAAssert: DCHECK(i.InputRegister(0).is(x1)); - if (!frame_access_state()->has_frame()) { + { // We don't actually want to generate a pile of code for this, so just // claim there is a stack frame, without generating one. FrameScope scope(tasm(), StackFrame::NONE); - __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS), - RelocInfo::CODE_TARGET); - } else { - __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS), - RelocInfo::CODE_TARGET); + __ Call( + isolate()->builtins()->builtin_handle(Builtins::kAbortCSAAssert), + RelocInfo::CODE_TARGET); } - __ Debug("kArchDebugAbort", 0, BREAK); + __ Debug("kArchAbortCSAAssert", 0, BREAK); unwinding_info_writer_.MarkBlockWillExit(); break; case kArchDebugBreak: @@ -867,9 +866,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( this, object, offset, value, mode, DetermineStubCallMode(), &unwinding_info_writer_); __ StoreTaggedField(value, MemOperand(object, offset)); - if (COMPRESS_POINTERS_BOOL) { - __ DecompressTaggedPointer(object, object); - } __ CheckPageFlag(object, MemoryChunk::kPointersFromHereAreInterestingMask, eq, ool->entry()); __ Bind(ool->exit()); @@ -1629,6 +1625,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArm64StrCompressTagged: __ StoreTaggedField(i.InputOrZeroRegister64(0), i.MemoryOperand(1)); break; + case kArm64DmbIsh: + __ Dmb(InnerShareable, BarrierAll); + break; case kArm64DsbIsb: __ Dsb(FullSystem, BarrierAll); __ Isb(); @@ -2200,6 +2199,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( VRegister temp = scope.AcquireV(format); \ __ Instr(temp, i.InputSimd128Register(0).V##FORMAT()); \ __ Umov(i.OutputRegister32(), temp, 0); \ + __ Cmp(i.OutputRegister32(), 0); \ + __ Cset(i.OutputRegister32(), ne); \ break; \ } SIMD_REDUCE_OP_CASE(kArm64S1x4AnyTrue, Umaxv, kFormatS, 4S); @@ -2399,12 +2400,14 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) { __ Adr(temp, &table); __ Add(temp, temp, Operand(input, UXTW, 2)); __ Br(temp); - __ StartBlockPools(); - __ Bind(&table); - for (size_t index = 0; index < case_count; ++index) { - __ B(GetLabel(i.InputRpo(index + 2))); + { + TurboAssembler::BlockPoolsScope block_pools(tasm(), + case_count * kInstrSize); + __ Bind(&table); + for (size_t index = 0; index < case_count; ++index) { + __ B(GetLabel(i.InputRpo(index + 2))); + } } - __ EndBlockPools(); } void CodeGenerator::FinishFrame(Frame* frame) { @@ -2437,8 +2440,8 @@ void CodeGenerator::AssembleConstructFrame() { // The frame has been previously padded in CodeGenerator::FinishFrame(). DCHECK_EQ(frame()->GetTotalFrameSlotCount() % 2, 0); - int required_slots = frame()->GetTotalFrameSlotCount() - - call_descriptor->CalculateFixedFrameSize(); + int required_slots = + frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount(); CPURegList saves = CPURegList(CPURegister::kRegister, kXRegSizeInBits, call_descriptor->CalleeSavedRegisters()); @@ -2577,7 +2580,17 @@ void CodeGenerator::AssembleConstructFrame() { MemOperand(fp, WasmCompiledFrameConstants::kWasmInstanceOffset)); } break; case CallDescriptor::kCallAddress: + if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) { + required_slots += 2; // marker + saved c_entry_fp. + } __ Claim(required_slots); + if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) { + UseScratchRegisterScope temps(tasm()); + Register scratch = temps.AcquireX(); + __ Mov(scratch, StackFrame::TypeToMarker(StackFrame::C_WASM_ENTRY)); + __ Str(scratch, + MemOperand(fp, TypedFrameConstants::kFrameTypeOffset)); + } break; default: UNREACHABLE(); @@ -2654,7 +2667,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) { __ Ret(); } -void CodeGenerator::FinishCode() { __ CheckConstPool(true, false); } +void CodeGenerator::FinishCode() { __ ForceConstantPoolEmissionWithoutJump(); } void CodeGenerator::AssembleMove(InstructionOperand* source, InstructionOperand* destination) { @@ -2669,6 +2682,18 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, } else { __ Mov(dst, src_object); } + } else if (src.type() == Constant::kCompressedHeapObject) { + Handle src_object = src.ToHeapObject(); + RootIndex index; + if (IsMaterializableFromRoot(src_object, &index)) { + __ LoadRoot(dst, index); + } else { + // TODO(v8:8977): Even though this mov happens on 32 bits (Note the + // .W()) and we are passing along the RelocInfo, we still haven't made + // the address embedded in the code-stream actually be compressed. + __ Mov(dst.W(), + Immediate(src_object, RelocInfo::COMPRESSED_EMBEDDED_OBJECT)); + } } else { __ Mov(dst, g.ToImmediate(source)); } diff --git a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h index 4b7b01711172fc..1c4c0e333542c5 100644 --- a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h +++ b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h @@ -171,6 +171,7 @@ namespace compiler { V(Arm64CompressSigned) \ V(Arm64CompressPointer) \ V(Arm64CompressAny) \ + V(Arm64DmbIsh) \ V(Arm64DsbIsb) \ V(Arm64F32x4Splat) \ V(Arm64F32x4ExtractLane) \ diff --git a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc index 502b9d7d82601d..8344887ec2feda 100644 --- a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc +++ b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc @@ -319,6 +319,7 @@ int InstructionScheduler::GetTargetInstructionFlags( case kArm64StrW: case kArm64Str: case kArm64StrCompressTagged: + case kArm64DmbIsh: case kArm64DsbIsb: return kHasSideEffect; diff --git a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc index 69d82b49933ab0..a953e35a669ffb 100644 --- a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc +++ b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc @@ -535,9 +535,9 @@ void InstructionSelector::VisitStackSlot(Node* node) { sequence()->AddImmediate(Constant(slot)), 0, nullptr); } -void InstructionSelector::VisitDebugAbort(Node* node) { +void InstructionSelector::VisitAbortCSAAssert(Node* node) { Arm64OperandGenerator g(this); - Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), x1)); + Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), x1)); } void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode, @@ -676,10 +676,11 @@ void InstructionSelector::VisitStore(Node* node) { InstructionOperand inputs[3]; size_t input_count = 0; inputs[input_count++] = g.UseUniqueRegister(base); - // OutOfLineRecordWrite uses the index in an arithmetic instruction, so we - // must check kArithmeticImm as well as kLoadStoreImm64. - if (g.CanBeImmediate(index, kArithmeticImm) && - g.CanBeImmediate(index, kLoadStoreImm64)) { + // OutOfLineRecordWrite uses the index in an add or sub instruction, but we + // can trust the assembler to generate extra instructions if the index does + // not fit into add or sub. So here only check the immediate for a store. + if (g.CanBeImmediate(index, COMPRESS_POINTERS_BOOL ? kLoadStoreImm32 + : kLoadStoreImm64)) { inputs[input_count++] = g.UseImmediate(index); addressing_mode = kMode_MRI; } else { @@ -1599,7 +1600,7 @@ void InstructionSelector::VisitChangeUint32ToUint64(Node* node) { // 32-bit operations will write their result in a W register (implicitly // clearing the top 32-bit of the corresponding X register) so the // zero-extension is a no-op. - Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value)); + EmitIdentity(node); return; } case IrOpcode::kLoad: { @@ -1610,7 +1611,7 @@ void InstructionSelector::VisitChangeUint32ToUint64(Node* node) { case MachineRepresentation::kWord8: case MachineRepresentation::kWord16: case MachineRepresentation::kWord32: - Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value)); + EmitIdentity(node); return; default: break; @@ -1646,29 +1647,75 @@ void InstructionSelector::VisitChangeTaggedSignedToCompressedSigned( void InstructionSelector::VisitChangeCompressedToTagged(Node* node) { Arm64OperandGenerator g(this); Node* const value = node->InputAt(0); - Emit(kArm64DecompressAny, g.DefineAsRegister(node), g.UseRegister(value)); + if ((value->opcode() == IrOpcode::kLoad || + value->opcode() == IrOpcode::kPoisonedLoad) && + CanCover(node, value)) { + DCHECK_EQ(LoadRepresentationOf(value->op()).representation(), + MachineRepresentation::kCompressed); + InstructionCode opcode = kArm64LdrDecompressAnyTagged; + if (value->opcode() == IrOpcode::kPoisonedLoad) { + CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison); + opcode |= MiscField::encode(kMemoryAccessPoisoned); + } + ImmediateMode immediate_mode = kLoadStoreImm32; + MachineRepresentation rep = MachineRepresentation::kCompressed; + EmitLoad(this, value, opcode, immediate_mode, rep, node); + } else { + Emit(kArm64DecompressAny, g.DefineAsRegister(node), g.UseRegister(value)); + } } void InstructionSelector::VisitChangeCompressedPointerToTaggedPointer( Node* node) { Arm64OperandGenerator g(this); Node* const value = node->InputAt(0); - Emit(kArm64DecompressPointer, g.DefineAsRegister(node), g.UseRegister(value)); + if ((value->opcode() == IrOpcode::kLoad || + value->opcode() == IrOpcode::kPoisonedLoad) && + CanCover(node, value)) { + DCHECK_EQ(LoadRepresentationOf(value->op()).representation(), + MachineRepresentation::kCompressedPointer); + InstructionCode opcode = kArm64LdrDecompressTaggedPointer; + if (value->opcode() == IrOpcode::kPoisonedLoad) { + CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison); + opcode |= MiscField::encode(kMemoryAccessPoisoned); + } + ImmediateMode immediate_mode = kLoadStoreImm32; + MachineRepresentation rep = MachineRepresentation::kCompressedPointer; + EmitLoad(this, value, opcode, immediate_mode, rep, node); + } else { + Emit(kArm64DecompressPointer, g.DefineAsRegister(node), + g.UseRegister(value)); + } } void InstructionSelector::VisitChangeCompressedSignedToTaggedSigned( Node* node) { Arm64OperandGenerator g(this); Node* const value = node->InputAt(0); - Emit(kArm64DecompressSigned, g.DefineAsRegister(node), g.UseRegister(value)); + if ((value->opcode() == IrOpcode::kLoad || + value->opcode() == IrOpcode::kPoisonedLoad) && + CanCover(node, value)) { + DCHECK_EQ(LoadRepresentationOf(value->op()).representation(), + MachineRepresentation::kCompressedSigned); + InstructionCode opcode = kArm64LdrDecompressTaggedSigned; + if (value->opcode() == IrOpcode::kPoisonedLoad) { + CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison); + opcode |= MiscField::encode(kMemoryAccessPoisoned); + } + ImmediateMode immediate_mode = kLoadStoreImm32; + MachineRepresentation rep = MachineRepresentation::kCompressedSigned; + EmitLoad(this, value, opcode, immediate_mode, rep, node); + } else { + Emit(kArm64DecompressSigned, g.DefineAsRegister(node), + g.UseRegister(value)); + } } void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) { Arm64OperandGenerator g(this); - Node* value = node->InputAt(0); // The top 32 bits in the 64-bit register will be undefined, and // must not be used by a dependent node. - Emit(kArchNop, g.DefineSameAsFirst(node), g.UseRegister(value)); + EmitIdentity(node); } void InstructionSelector::VisitFloat64Mod(Node* node) { @@ -2451,7 +2498,7 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) { size_t table_time_cost = 3; size_t lookup_space_cost = 3 + 2 * sw.case_count(); size_t lookup_time_cost = sw.case_count(); - if (sw.case_count() > 0 && + if (sw.case_count() > 4 && table_space_cost + 3 * table_time_cost <= lookup_space_cost + 3 * lookup_time_cost && sw.min_value() > std::numeric_limits::min() && @@ -2755,6 +2802,11 @@ void InstructionSelector::VisitFloat64Mul(Node* node) { return VisitRRR(this, kArm64Float64Mul, node); } +void InstructionSelector::VisitMemoryBarrier(Node* node) { + Arm64OperandGenerator g(this); + Emit(kArm64DmbIsh, g.NoOutput()); +} + void InstructionSelector::VisitWord32AtomicLoad(Node* node) { LoadRepresentation load_rep = LoadRepresentationOf(node->op()); ArchOpcode opcode = kArchNop; diff --git a/deps/v8/src/compiler/backend/code-generator.cc b/deps/v8/src/compiler/backend/code-generator.cc index bb83a8497bbe3e..9ce92dadaa9469 100644 --- a/deps/v8/src/compiler/backend/code-generator.cc +++ b/deps/v8/src/compiler/backend/code-generator.cc @@ -1210,6 +1210,10 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation, DCHECK_EQ(MachineRepresentation::kTagged, type.representation()); literal = DeoptimizationLiteral(constant.ToHeapObject()); break; + case Constant::kCompressedHeapObject: + DCHECK_EQ(MachineRepresentation::kCompressed, type.representation()); + literal = DeoptimizationLiteral(constant.ToHeapObject()); + break; case Constant::kDelayedStringConstant: DCHECK_EQ(MachineRepresentation::kTagged, type.representation()); literal = DeoptimizationLiteral(constant.ToDelayedStringConstant()); diff --git a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc index 0e61c22cbbcd51..ed4be7a47cb296 100644 --- a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc +++ b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc @@ -81,6 +81,8 @@ class IA32OperandConverter : public InstructionOperandConverter { return Immediate(constant.ToExternalReference()); case Constant::kHeapObject: return Immediate(constant.ToHeapObject()); + case Constant::kCompressedHeapObject: + break; case Constant::kDelayedStringConstant: return Immediate::EmbeddedStringConstant( constant.ToDelayedStringConstant()); @@ -462,6 +464,19 @@ class OutOfLineRecordWrite final : public OutOfLineCode { __ opcode(i.OutputSimd128Register(), i.InputOperand(1), imm); \ } +#define ASSEMBLE_SIMD_ALL_TRUE(opcode) \ + do { \ + Register dst = i.OutputRegister(); \ + Operand src = i.InputOperand(0); \ + Register tmp = i.TempRegister(0); \ + __ mov(tmp, Immediate(1)); \ + __ xor_(dst, dst); \ + __ Pxor(kScratchDoubleReg, kScratchDoubleReg); \ + __ opcode(kScratchDoubleReg, src); \ + __ Ptest(kScratchDoubleReg, kScratchDoubleReg); \ + __ cmov(zero, dst, tmp); \ + } while (false) + void CodeGenerator::AssembleDeconstructFrame() { __ mov(esp, ebp); __ pop(ebp); @@ -674,8 +689,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kArchCallBuiltinPointer: { DCHECK(!HasImmediateInput(instr, 0)); - Register builtin_pointer = i.InputRegister(0); - __ CallBuiltinPointer(builtin_pointer); + Register builtin_index = i.InputRegister(0); + __ CallBuiltinByIndex(builtin_index); RecordCallPosition(instr); frame_access_state()->ClearSPDelta(); break; @@ -870,17 +885,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArchComment: __ RecordComment(reinterpret_cast(i.InputInt32(0))); break; - case kArchDebugAbort: + case kArchAbortCSAAssert: DCHECK(i.InputRegister(0) == edx); - if (!frame_access_state()->has_frame()) { + { // We don't actually want to generate a pile of code for this, so just // claim there is a stack frame, without generating one. FrameScope scope(tasm(), StackFrame::NONE); - __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS), - RelocInfo::CODE_TARGET); - } else { - __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS), - RelocInfo::CODE_TARGET); + __ Call( + isolate()->builtins()->builtin_handle(Builtins::kAbortCSAAssert), + RelocInfo::CODE_TARGET); } __ int3(); break; @@ -1204,7 +1217,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArchWordPoisonOnSpeculation: // TODO(860429): Remove remaining poisoning infrastructure on ia32. UNREACHABLE(); - case kLFence: + case kIA32MFence: + __ mfence(); + break; + case kIA32LFence: __ lfence(); break; case kSSEFloat32Cmp: @@ -3663,18 +3679,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ cmov(zero, dst, tmp); break; } + // Need to split up all the different lane structures because the + // comparison instruction used matters, e.g. given 0xff00, pcmpeqb returns + // 0x0011, pcmpeqw returns 0x0000, ptest will set ZF to 0 and 1 + // respectively. case kIA32S1x4AllTrue: + ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqd); + break; case kIA32S1x8AllTrue: + ASSEMBLE_SIMD_ALL_TRUE(pcmpeqw); + break; case kIA32S1x16AllTrue: { - Register dst = i.OutputRegister(); - Operand src = i.InputOperand(0); - Register tmp = i.TempRegister(0); - __ mov(tmp, Immediate(1)); - __ xor_(dst, dst); - __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); - __ Pxor(kScratchDoubleReg, src); - __ Ptest(kScratchDoubleReg, kScratchDoubleReg); - __ cmov(zero, dst, tmp); + ASSEMBLE_SIMD_ALL_TRUE(pcmpeqb); break; } case kIA32StackCheck: { @@ -4224,6 +4240,11 @@ void CodeGenerator::AssembleConstructFrame() { if (call_descriptor->IsCFunctionCall()) { __ push(ebp); __ mov(ebp, esp); + if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) { + __ Push(Immediate(StackFrame::TypeToMarker(StackFrame::C_WASM_ENTRY))); + // Reserve stack space for saving the c_entry_fp later. + __ AllocateStackSpace(kSystemPointerSize); + } } else if (call_descriptor->IsJSFunctionCall()) { __ Prologue(); if (call_descriptor->PushArgumentCount()) { @@ -4254,8 +4275,8 @@ void CodeGenerator::AssembleConstructFrame() { } } - int required_slots = frame()->GetTotalFrameSlotCount() - - call_descriptor->CalculateFixedFrameSize(); + int required_slots = + frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount(); if (info()->is_osr()) { // TurboFan OSR-compiled functions cannot be entered directly. @@ -4629,6 +4650,7 @@ void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) { #undef ASSEMBLE_MOVX #undef ASSEMBLE_SIMD_PUNPCK_SHUFFLE #undef ASSEMBLE_SIMD_IMM_SHUFFLE +#undef ASSEMBLE_SIMD_ALL_TRUE } // namespace compiler } // namespace internal diff --git a/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h b/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h index 60ed1cc29cdd33..56dea82fe2c29a 100644 --- a/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h +++ b/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h @@ -44,7 +44,8 @@ namespace compiler { V(IA32Tzcnt) \ V(IA32Popcnt) \ V(IA32Bswap) \ - V(LFence) \ + V(IA32MFence) \ + V(IA32LFence) \ V(SSEFloat32Cmp) \ V(SSEFloat32Add) \ V(SSEFloat32Sub) \ diff --git a/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc index f2d5cc0d179d8f..15f69b991c7288 100644 --- a/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc +++ b/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc @@ -365,7 +365,8 @@ int InstructionScheduler::GetTargetInstructionFlags( case kIA32PushFloat64: case kIA32PushSimd128: case kIA32Poke: - case kLFence: + case kIA32MFence: + case kIA32LFence: return kHasSideEffect; case kIA32Word32AtomicPairLoad: diff --git a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc index f81b88823e594c..e1fc66b4ba4843 100644 --- a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc +++ b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc @@ -272,9 +272,9 @@ void InstructionSelector::VisitStackSlot(Node* node) { sequence()->AddImmediate(Constant(slot)), 0, nullptr); } -void InstructionSelector::VisitDebugAbort(Node* node) { +void InstructionSelector::VisitAbortCSAAssert(Node* node) { IA32OperandGenerator g(this); - Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), edx)); + Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), edx)); } void InstructionSelector::VisitLoad(Node* node) { @@ -1593,6 +1593,11 @@ void InstructionSelector::VisitFloat64SilenceNaN(Node* node) { g.UseRegister(node->InputAt(0))); } +void InstructionSelector::VisitMemoryBarrier(Node* node) { + IA32OperandGenerator g(this); + Emit(kIA32MFence, g.NoOutput()); +} + void InstructionSelector::VisitWord32AtomicLoad(Node* node) { LoadRepresentation load_rep = LoadRepresentationOf(node->op()); DCHECK(load_rep.representation() == MachineRepresentation::kWord8 || diff --git a/deps/v8/src/compiler/backend/instruction-codes.h b/deps/v8/src/compiler/backend/instruction-codes.h index 068164b57e4eba..1085de2196f8cf 100644 --- a/deps/v8/src/compiler/backend/instruction-codes.h +++ b/deps/v8/src/compiler/backend/instruction-codes.h @@ -82,7 +82,7 @@ inline RecordWriteMode WriteBarrierKindToRecordWriteMode( V(ArchLookupSwitch) \ V(ArchTableSwitch) \ V(ArchNop) \ - V(ArchDebugAbort) \ + V(ArchAbortCSAAssert) \ V(ArchDebugBreak) \ V(ArchComment) \ V(ArchThrowTerminator) \ diff --git a/deps/v8/src/compiler/backend/instruction-scheduler.cc b/deps/v8/src/compiler/backend/instruction-scheduler.cc index b0637c175df927..538af71bb469f3 100644 --- a/deps/v8/src/compiler/backend/instruction-scheduler.cc +++ b/deps/v8/src/compiler/backend/instruction-scheduler.cc @@ -298,7 +298,7 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const { case kArchTailCallCodeObject: case kArchTailCallAddress: case kArchTailCallWasm: - case kArchDebugAbort: + case kArchAbortCSAAssert: case kArchDebugBreak: return kHasSideEffect; diff --git a/deps/v8/src/compiler/backend/instruction-selector-impl.h b/deps/v8/src/compiler/backend/instruction-selector-impl.h index 21edc2f503853d..a3f62e7ba40c45 100644 --- a/deps/v8/src/compiler/backend/instruction-selector-impl.h +++ b/deps/v8/src/compiler/backend/instruction-selector-impl.h @@ -29,8 +29,8 @@ inline bool operator<(const CaseInfo& l, const CaseInfo& r) { // Helper struct containing data about a table or lookup switch. class SwitchInfo { public: - SwitchInfo(ZoneVector& cases, int32_t min_value, int32_t max_value, - BasicBlock* default_branch) + SwitchInfo(ZoneVector& cases, // NOLINT(runtime/references) + int32_t min_value, int32_t max_value, BasicBlock* default_branch) : cases_(cases), min_value_(min_value), max_value_(max_value), @@ -109,13 +109,9 @@ class OperandGenerator { } InstructionOperand DefineAsConstant(Node* node) { - return DefineAsConstant(node, ToConstant(node)); - } - - InstructionOperand DefineAsConstant(Node* node, Constant constant) { selector()->MarkAsDefined(node); int virtual_register = GetVReg(node); - sequence()->AddConstant(virtual_register, constant); + sequence()->AddConstant(virtual_register, ToConstant(node)); return ConstantOperand(virtual_register); } @@ -326,6 +322,8 @@ class OperandGenerator { } case IrOpcode::kHeapConstant: return Constant(HeapConstantOf(node->op())); + case IrOpcode::kCompressedHeapConstant: + return Constant(HeapConstantOf(node->op()), true); case IrOpcode::kDelayedStringConstant: return Constant(StringConstantBaseOf(node->op())); case IrOpcode::kDeadValue: { diff --git a/deps/v8/src/compiler/backend/instruction-selector.cc b/deps/v8/src/compiler/backend/instruction-selector.cc index 2b748a188b9d34..11ba9104059453 100644 --- a/deps/v8/src/compiler/backend/instruction-selector.cc +++ b/deps/v8/src/compiler/backend/instruction-selector.cc @@ -8,6 +8,7 @@ #include "src/base/adapters.h" #include "src/codegen/assembler-inl.h" +#include "src/codegen/tick-counter.h" #include "src/compiler/backend/instruction-selector-impl.h" #include "src/compiler/compiler-source-position-table.h" #include "src/compiler/node-matchers.h" @@ -24,7 +25,7 @@ InstructionSelector::InstructionSelector( Zone* zone, size_t node_count, Linkage* linkage, InstructionSequence* sequence, Schedule* schedule, SourcePositionTable* source_positions, Frame* frame, - EnableSwitchJumpTable enable_switch_jump_table, + EnableSwitchJumpTable enable_switch_jump_table, TickCounter* tick_counter, SourcePositionMode source_position_mode, Features features, EnableScheduling enable_scheduling, EnableRootsRelativeAddressing enable_roots_relative_addressing, @@ -54,7 +55,8 @@ InstructionSelector::InstructionSelector( frame_(frame), instruction_selection_failed_(false), instr_origins_(sequence->zone()), - trace_turbo_(trace_turbo) { + trace_turbo_(trace_turbo), + tick_counter_(tick_counter) { instructions_.reserve(node_count); continuation_inputs_.reserve(5); continuation_outputs_.reserve(2); @@ -1078,7 +1080,8 @@ void InstructionSelector::VisitBlock(BasicBlock* block) { node->opcode() == IrOpcode::kCall || node->opcode() == IrOpcode::kCallWithCallerSavedRegisters || node->opcode() == IrOpcode::kProtectedLoad || - node->opcode() == IrOpcode::kProtectedStore) { + node->opcode() == IrOpcode::kProtectedStore || + node->opcode() == IrOpcode::kMemoryBarrier) { ++effect_level; } } @@ -1251,6 +1254,7 @@ void InstructionSelector::MarkPairProjectionsAsWord32(Node* node) { } void InstructionSelector::VisitNode(Node* node) { + tick_counter_->DoTick(); DCHECK_NOT_NULL(schedule()->block(node)); // should only use scheduled nodes. switch (node->opcode()) { case IrOpcode::kStart: @@ -1301,6 +1305,8 @@ void InstructionSelector::VisitNode(Node* node) { return MarkAsFloat64(node), VisitConstant(node); case IrOpcode::kHeapConstant: return MarkAsReference(node), VisitConstant(node); + case IrOpcode::kCompressedHeapConstant: + return MarkAsCompressed(node), VisitConstant(node); case IrOpcode::kNumberConstant: { double value = OpParameter(node->op()); if (!IsSmiDouble(value)) MarkAsReference(node); @@ -1324,8 +1330,8 @@ void InstructionSelector::VisitNode(Node* node) { case IrOpcode::kStateValues: case IrOpcode::kObjectState: return; - case IrOpcode::kDebugAbort: - VisitDebugAbort(node); + case IrOpcode::kAbortCSAAssert: + VisitAbortCSAAssert(node); return; case IrOpcode::kDebugBreak: VisitDebugBreak(node); @@ -1474,6 +1480,7 @@ void InstructionSelector::VisitNode(Node* node) { case IrOpcode::kUint64Mod: return MarkAsWord64(node), VisitUint64Mod(node); case IrOpcode::kBitcastTaggedToWord: + case IrOpcode::kBitcastTaggedSignedToWord: return MarkAsRepresentation(MachineType::PointerRepresentation(), node), VisitBitcastTaggedToWord(node); case IrOpcode::kBitcastWordToTagged: @@ -1734,6 +1741,8 @@ void InstructionSelector::VisitNode(Node* node) { MarkAsWord32(node); MarkPairProjectionsAsWord32(node); return VisitWord32PairSar(node); + case IrOpcode::kMemoryBarrier: + return VisitMemoryBarrier(node); case IrOpcode::kWord32AtomicLoad: { LoadRepresentation type = LoadRepresentationOf(node->op()); MarkAsRepresentation(type.representation(), node); @@ -1808,6 +1817,24 @@ void InstructionSelector::VisitNode(Node* node) { case IrOpcode::kUnsafePointerAdd: MarkAsRepresentation(MachineType::PointerRepresentation(), node); return VisitUnsafePointerAdd(node); + case IrOpcode::kF64x2Splat: + return MarkAsSimd128(node), VisitF64x2Splat(node); + case IrOpcode::kF64x2ExtractLane: + return MarkAsFloat64(node), VisitF64x2ExtractLane(node); + case IrOpcode::kF64x2ReplaceLane: + return MarkAsSimd128(node), VisitF64x2ReplaceLane(node); + case IrOpcode::kF64x2Abs: + return MarkAsSimd128(node), VisitF64x2Abs(node); + case IrOpcode::kF64x2Neg: + return MarkAsSimd128(node), VisitF64x2Neg(node); + case IrOpcode::kF64x2Eq: + return MarkAsSimd128(node), VisitF64x2Eq(node); + case IrOpcode::kF64x2Ne: + return MarkAsSimd128(node), VisitF64x2Ne(node); + case IrOpcode::kF64x2Lt: + return MarkAsSimd128(node), VisitF64x2Lt(node); + case IrOpcode::kF64x2Le: + return MarkAsSimd128(node), VisitF64x2Le(node); case IrOpcode::kF32x4Splat: return MarkAsSimd128(node), VisitF32x4Splat(node); case IrOpcode::kF32x4ExtractLane: @@ -1846,6 +1873,38 @@ void InstructionSelector::VisitNode(Node* node) { return MarkAsSimd128(node), VisitF32x4Lt(node); case IrOpcode::kF32x4Le: return MarkAsSimd128(node), VisitF32x4Le(node); + case IrOpcode::kI64x2Splat: + return MarkAsSimd128(node), VisitI64x2Splat(node); + case IrOpcode::kI64x2ExtractLane: + return MarkAsWord64(node), VisitI64x2ExtractLane(node); + case IrOpcode::kI64x2ReplaceLane: + return MarkAsSimd128(node), VisitI64x2ReplaceLane(node); + case IrOpcode::kI64x2Neg: + return MarkAsSimd128(node), VisitI64x2Neg(node); + case IrOpcode::kI64x2Shl: + return MarkAsSimd128(node), VisitI64x2Shl(node); + case IrOpcode::kI64x2ShrS: + return MarkAsSimd128(node), VisitI64x2ShrS(node); + case IrOpcode::kI64x2Add: + return MarkAsSimd128(node), VisitI64x2Add(node); + case IrOpcode::kI64x2Sub: + return MarkAsSimd128(node), VisitI64x2Sub(node); + case IrOpcode::kI64x2Mul: + return MarkAsSimd128(node), VisitI64x2Mul(node); + case IrOpcode::kI64x2Eq: + return MarkAsSimd128(node), VisitI64x2Eq(node); + case IrOpcode::kI64x2Ne: + return MarkAsSimd128(node), VisitI64x2Ne(node); + case IrOpcode::kI64x2GtS: + return MarkAsSimd128(node), VisitI64x2GtS(node); + case IrOpcode::kI64x2GeS: + return MarkAsSimd128(node), VisitI64x2GeS(node); + case IrOpcode::kI64x2ShrU: + return MarkAsSimd128(node), VisitI64x2ShrU(node); + case IrOpcode::kI64x2GtU: + return MarkAsSimd128(node), VisitI64x2GtU(node); + case IrOpcode::kI64x2GeU: + return MarkAsSimd128(node), VisitI64x2GeU(node); case IrOpcode::kI32x4Splat: return MarkAsSimd128(node), VisitI32x4Splat(node); case IrOpcode::kI32x4ExtractLane: @@ -2028,6 +2087,10 @@ void InstructionSelector::VisitNode(Node* node) { return MarkAsSimd128(node), VisitS128Select(node); case IrOpcode::kS8x16Shuffle: return MarkAsSimd128(node), VisitS8x16Shuffle(node); + case IrOpcode::kS1x2AnyTrue: + return MarkAsWord32(node), VisitS1x2AnyTrue(node); + case IrOpcode::kS1x2AllTrue: + return MarkAsWord32(node), VisitS1x2AllTrue(node); case IrOpcode::kS1x4AnyTrue: return MarkAsWord32(node), VisitS1x4AnyTrue(node); case IrOpcode::kS1x4AllTrue: @@ -2489,6 +2552,36 @@ void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) { #endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_PPC // !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_S390 +#if !V8_TARGET_ARCH_X64 +void InstructionSelector::VisitF64x2Splat(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitF64x2ExtractLane(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitF64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitF64x2Abs(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitF64x2Neg(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitF64x2Eq(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitF64x2Ne(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitF64x2Lt(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitF64x2Le(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitI64x2Splat(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitI64x2ExtractLane(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitI64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitI64x2Neg(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitS1x2AnyTrue(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitS1x2AllTrue(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitI64x2Shl(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitI64x2ShrS(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitI64x2Add(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitI64x2Sub(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitI64x2Mul(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitI64x2Eq(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitI64x2Ne(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitI64x2GtS(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitI64x2GeS(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitI64x2ShrU(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitI64x2GtU(Node* node) { UNIMPLEMENTED(); } +void InstructionSelector::VisitI64x2GeU(Node* node) { UNIMPLEMENTED(); } +#endif // !V8_TARGET_ARCH_X64 + void InstructionSelector::VisitFinishRegion(Node* node) { EmitIdentity(node); } void InstructionSelector::VisitParameter(Node* node) { @@ -2962,7 +3055,7 @@ void InstructionSelector::CanonicalizeShuffle(bool inputs_equal, void InstructionSelector::CanonicalizeShuffle(Node* node, uint8_t* shuffle, bool* is_swizzle) { // Get raw shuffle indices. - memcpy(shuffle, OpParameter(node->op()), kSimd128Size); + memcpy(shuffle, S8x16ShuffleOf(node->op()), kSimd128Size); bool needs_swap; bool inputs_equal = GetVirtualRegister(node->InputAt(0)) == GetVirtualRegister(node->InputAt(1)); diff --git a/deps/v8/src/compiler/backend/instruction-selector.h b/deps/v8/src/compiler/backend/instruction-selector.h index 4f6b1c5971d8fe..16f88bb5167462 100644 --- a/deps/v8/src/compiler/backend/instruction-selector.h +++ b/deps/v8/src/compiler/backend/instruction-selector.h @@ -19,6 +19,9 @@ namespace v8 { namespace internal { + +class TickCounter; + namespace compiler { // Forward declarations. @@ -266,7 +269,7 @@ class V8_EXPORT_PRIVATE InstructionSelector final { Zone* zone, size_t node_count, Linkage* linkage, InstructionSequence* sequence, Schedule* schedule, SourcePositionTable* source_positions, Frame* frame, - EnableSwitchJumpTable enable_switch_jump_table, + EnableSwitchJumpTable enable_switch_jump_table, TickCounter* tick_counter, SourcePositionMode source_position_mode = kCallSourcePositions, Features features = SupportedFeatures(), EnableScheduling enable_scheduling = FLAG_turbo_instruction_scheduling @@ -496,11 +499,15 @@ class V8_EXPORT_PRIVATE InstructionSelector final { VectorSlotPair const& feedback, Node* frame_state); - void EmitTableSwitch(const SwitchInfo& sw, InstructionOperand& index_operand); - void EmitLookupSwitch(const SwitchInfo& sw, - InstructionOperand& value_operand); - void EmitBinarySearchSwitch(const SwitchInfo& sw, - InstructionOperand& value_operand); + void EmitTableSwitch( + const SwitchInfo& sw, + InstructionOperand& index_operand); // NOLINT(runtime/references) + void EmitLookupSwitch( + const SwitchInfo& sw, + InstructionOperand& value_operand); // NOLINT(runtime/references) + void EmitBinarySearchSwitch( + const SwitchInfo& sw, + InstructionOperand& value_operand); // NOLINT(runtime/references) void TryRename(InstructionOperand* op); int GetRename(int virtual_register); @@ -604,6 +611,8 @@ class V8_EXPORT_PRIVATE InstructionSelector final { MACHINE_SIMD_OP_LIST(DECLARE_GENERATOR) #undef DECLARE_GENERATOR + // Visit the load node with a value and opcode to replace with. + void VisitLoad(Node* node, Node* value, InstructionCode opcode); void VisitFinishRegion(Node* node); void VisitParameter(Node* node); void VisitIfException(Node* node); @@ -772,6 +781,7 @@ class V8_EXPORT_PRIVATE InstructionSelector final { bool instruction_selection_failed_; ZoneVector> instr_origins_; EnableTraceTurboJson trace_turbo_; + TickCounter* const tick_counter_; }; } // namespace compiler diff --git a/deps/v8/src/compiler/backend/instruction.cc b/deps/v8/src/compiler/backend/instruction.cc index c52dca61a1a237..09c7fe22c5f03e 100644 --- a/deps/v8/src/compiler/backend/instruction.cc +++ b/deps/v8/src/compiler/backend/instruction.cc @@ -530,7 +530,7 @@ Constant::Constant(RelocatablePtrConstantInfo info) { } Handle Constant::ToHeapObject() const { - DCHECK_EQ(kHeapObject, type()); + DCHECK(kHeapObject == type() || kCompressedHeapObject == type()); Handle value( reinterpret_cast(static_cast(value_))); return value; @@ -561,7 +561,8 @@ std::ostream& operator<<(std::ostream& os, const Constant& constant) { return os << constant.ToFloat64().value(); case Constant::kExternalReference: return os << constant.ToExternalReference().address(); - case Constant::kHeapObject: + case Constant::kHeapObject: // Fall through. + case Constant::kCompressedHeapObject: return os << Brief(*constant.ToHeapObject()); case Constant::kRpoNumber: return os << "RPO" << constant.ToRpoNumber().ToInt(); diff --git a/deps/v8/src/compiler/backend/instruction.h b/deps/v8/src/compiler/backend/instruction.h index 61875a1a17a1dc..9b322040551df4 100644 --- a/deps/v8/src/compiler/backend/instruction.h +++ b/deps/v8/src/compiler/backend/instruction.h @@ -1007,6 +1007,7 @@ class V8_EXPORT_PRIVATE Constant final { kFloat32, kFloat64, kExternalReference, + kCompressedHeapObject, kHeapObject, kRpoNumber, kDelayedStringConstant @@ -1018,8 +1019,9 @@ class V8_EXPORT_PRIVATE Constant final { explicit Constant(double v) : type_(kFloat64), value_(bit_cast(v)) {} explicit Constant(ExternalReference ref) : type_(kExternalReference), value_(bit_cast(ref.address())) {} - explicit Constant(Handle obj) - : type_(kHeapObject), value_(bit_cast(obj)) {} + explicit Constant(Handle obj, bool is_compressed = false) + : type_(is_compressed ? kCompressedHeapObject : kHeapObject), + value_(bit_cast(obj)) {} explicit Constant(RpoNumber rpo) : type_(kRpoNumber), value_(rpo.ToInt()) {} explicit Constant(const StringConstantBase* str) : type_(kDelayedStringConstant), value_(bit_cast(str)) {} diff --git a/deps/v8/src/compiler/backend/jump-threading.h b/deps/v8/src/compiler/backend/jump-threading.h index e23dd453598e5e..ce60ebcb2e3423 100644 --- a/deps/v8/src/compiler/backend/jump-threading.h +++ b/deps/v8/src/compiler/backend/jump-threading.h @@ -17,14 +17,17 @@ class V8_EXPORT_PRIVATE JumpThreading { public: // Compute the forwarding map of basic blocks to their ultimate destination. // Returns {true} if there is at least one block that is forwarded. - static bool ComputeForwarding(Zone* local_zone, ZoneVector& result, - InstructionSequence* code, bool frame_at_start); + static bool ComputeForwarding( + Zone* local_zone, + ZoneVector& result, // NOLINT(runtime/references) + InstructionSequence* code, bool frame_at_start); // Rewrite the instructions to forward jumps and branches. // May also negate some branches. - static void ApplyForwarding(Zone* local_zone, - ZoneVector& forwarding, - InstructionSequence* code); + static void ApplyForwarding( + Zone* local_zone, + ZoneVector& forwarding, // NOLINT(runtime/references) + InstructionSequence* code); }; } // namespace compiler diff --git a/deps/v8/src/compiler/backend/live-range-separator.cc b/deps/v8/src/compiler/backend/live-range-separator.cc index 6ed04160450789..0a0aadfad143e0 100644 --- a/deps/v8/src/compiler/backend/live-range-separator.cc +++ b/deps/v8/src/compiler/backend/live-range-separator.cc @@ -9,15 +9,16 @@ namespace v8 { namespace internal { namespace compiler { -#define TRACE(...) \ - do { \ - if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \ +#define TRACE_COND(cond, ...) \ + do { \ + if (cond) PrintF(__VA_ARGS__); \ } while (false) namespace { void CreateSplinter(TopLevelLiveRange* range, RegisterAllocationData* data, - LifetimePosition first_cut, LifetimePosition last_cut) { + LifetimePosition first_cut, LifetimePosition last_cut, + bool trace_alloc) { DCHECK(!range->IsSplinter()); // We can ignore ranges that live solely in deferred blocks. // If a range ends right at the end of a deferred block, it is marked by @@ -49,9 +50,10 @@ void CreateSplinter(TopLevelLiveRange* range, RegisterAllocationData* data, range->SetSplinter(splinter); } Zone* zone = data->allocation_zone(); - TRACE("creating splinter %d for range %d between %d and %d\n", - range->splinter()->vreg(), range->vreg(), start.ToInstructionIndex(), - end.ToInstructionIndex()); + TRACE_COND(trace_alloc, + "creating splinter %d for range %d between %d and %d\n", + range->splinter()->vreg(), range->vreg(), + start.ToInstructionIndex(), end.ToInstructionIndex()); range->Splinter(start, end, zone); } } @@ -102,7 +104,8 @@ void SplinterLiveRange(TopLevelLiveRange* range, RegisterAllocationData* data) { current_block->last_instruction_index()); } else { if (first_cut.IsValid()) { - CreateSplinter(range, data, first_cut, last_cut); + CreateSplinter(range, data, first_cut, last_cut, + data->is_trace_alloc()); first_cut = LifetimePosition::Invalid(); last_cut = LifetimePosition::Invalid(); } @@ -116,7 +119,8 @@ void SplinterLiveRange(TopLevelLiveRange* range, RegisterAllocationData* data) { // have to connect blocks anyway, so we can also splinter to the end of the // block, too. if (first_cut.IsValid()) { - CreateSplinter(range, data, first_cut, interval_end); + CreateSplinter(range, data, first_cut, interval_end, + data->is_trace_alloc()); first_cut = LifetimePosition::Invalid(); last_cut = LifetimePosition::Invalid(); } @@ -186,7 +190,7 @@ void LiveRangeMerger::Merge() { } } -#undef TRACE +#undef TRACE_COND } // namespace compiler } // namespace internal diff --git a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc index 1f793868211010..5cec4a8a16beff 100644 --- a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc +++ b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc @@ -80,6 +80,7 @@ class MipsOperandConverter final : public InstructionOperandConverter { return Operand::EmbeddedNumber(constant.ToFloat64().value()); case Constant::kInt64: case Constant::kExternalReference: + case Constant::kCompressedHeapObject: case Constant::kHeapObject: // TODO(plind): Maybe we should handle ExtRef & HeapObj here? // maybe not done on arm due to const pool ?? @@ -264,8 +265,9 @@ Condition FlagsConditionToConditionTst(FlagsCondition condition) { UNREACHABLE(); } -FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate, - FlagsCondition condition) { +FPUCondition FlagsConditionToConditionCmpFPU( + bool& predicate, // NOLINT(runtime/references) + FlagsCondition condition) { switch (condition) { case kEqual: predicate = true; @@ -301,9 +303,9 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate, << "\""; \ UNIMPLEMENTED(); -void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, - InstructionCode opcode, Instruction* instr, - MipsOperandConverter& i) { +void EmitWordLoadPoisoningIfNeeded( + CodeGenerator* codegen, InstructionCode opcode, Instruction* instr, + MipsOperandConverter& i) { // NOLINT(runtime/references) const MemoryAccessMode access_mode = static_cast(MiscField::decode(opcode)); if (access_mode == kMemoryAccessPoisoned) { @@ -662,8 +664,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kArchCallBuiltinPointer: { DCHECK(!instr->InputAt(0)->IsImmediate()); - Register builtin_pointer = i.InputRegister(0); - __ CallBuiltinPointer(builtin_pointer); + Register builtin_index = i.InputRegister(0); + __ CallBuiltinByIndex(builtin_index); RecordCallPosition(instr); frame_access_state()->ClearSPDelta(); break; @@ -778,6 +780,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; case kArchCallCFunction: { int const num_parameters = MiscField::decode(instr->opcode()); + Label return_location; + if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) { + // Put the return address in a stack slot. + __ LoadAddress(kScratchReg, &return_location); + __ sw(kScratchReg, + MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset)); + } if (instr->InputAt(0)->IsImmediate()) { ExternalReference ref = i.InputExternalReference(0); __ CallCFunction(ref, num_parameters); @@ -785,6 +794,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( Register func = i.InputRegister(0); __ CallCFunction(func, num_parameters); } + __ bind(&return_location); + RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt); frame_access_state()->SetFrameAccessToDefault(); // Ideally, we should decrement SP delta to match the change of stack // pointer in CallCFunction. However, for certain architectures (e.g. @@ -816,22 +827,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArchTableSwitch: AssembleArchTableSwitch(instr); break; - case kArchDebugAbort: + case kArchAbortCSAAssert: DCHECK(i.InputRegister(0) == a0); - if (!frame_access_state()->has_frame()) { + { // We don't actually want to generate a pile of code for this, so just // claim there is a stack frame, without generating one. FrameScope scope(tasm(), StackFrame::NONE); - __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS), - RelocInfo::CODE_TARGET); - } else { - __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS), - RelocInfo::CODE_TARGET); + __ Call( + isolate()->builtins()->builtin_handle(Builtins::kAbortCSAAssert), + RelocInfo::CODE_TARGET); } - __ stop("kArchDebugAbort"); + __ stop(); break; case kArchDebugBreak: - __ stop("kArchDebugBreak"); + __ stop(); break; case kArchComment: __ RecordComment(reinterpret_cast(i.InputInt32(0))); @@ -1611,6 +1620,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ Usdc1(ft, i.MemoryOperand(), kScratchReg); break; } + case kMipsSync: { + __ sync(); + break; + } case kMipsPush: if (instr->InputAt(0)->IsFPRegister()) { LocationOperand* op = LocationOperand::cast(instr->InputAt(0)); @@ -3157,7 +3170,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr, new (gen_->zone()) ReferenceMap(gen_->zone()); gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt); if (FLAG_debug_code) { - __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap)); + __ stop(); } } } @@ -3376,8 +3389,14 @@ void CodeGenerator::AssembleConstructFrame() { auto call_descriptor = linkage()->GetIncomingDescriptor(); if (frame_access_state()->has_frame()) { if (call_descriptor->IsCFunctionCall()) { - __ Push(ra, fp); - __ mov(fp, sp); + if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) { + __ StubPrologue(StackFrame::C_WASM_ENTRY); + // Reserve stack space for saving the c_entry_fp later. + __ Subu(sp, sp, Operand(kSystemPointerSize)); + } else { + __ Push(ra, fp); + __ mov(fp, sp); + } } else if (call_descriptor->IsJSFunctionCall()) { __ Prologue(); if (call_descriptor->PushArgumentCount()) { @@ -3387,7 +3406,8 @@ void CodeGenerator::AssembleConstructFrame() { __ StubPrologue(info()->GetOutputStackFrameType()); if (call_descriptor->IsWasmFunctionCall()) { __ Push(kWasmInstanceRegister); - } else if (call_descriptor->IsWasmImportWrapper()) { + } else if (call_descriptor->IsWasmImportWrapper() || + call_descriptor->IsWasmCapiFunction()) { // WASM import wrappers are passed a tuple in the place of the instance. // Unpack the tuple into the instance and the target callable. // This must be done here in the codegen because it cannot be expressed @@ -3397,12 +3417,16 @@ void CodeGenerator::AssembleConstructFrame() { __ lw(kWasmInstanceRegister, FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset)); __ Push(kWasmInstanceRegister); + if (call_descriptor->IsWasmCapiFunction()) { + // Reserve space for saving the PC later. + __ Subu(sp, sp, Operand(kSystemPointerSize)); + } } } } - int required_slots = frame()->GetTotalFrameSlotCount() - - call_descriptor->CalculateFixedFrameSize(); + int required_slots = + frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount(); if (info()->is_osr()) { // TurboFan OSR-compiled functions cannot be entered directly. @@ -3564,6 +3588,8 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, } break; } + case Constant::kCompressedHeapObject: + UNREACHABLE(); case Constant::kRpoNumber: UNREACHABLE(); // TODO(titzer): loading RPO numbers on mips. break; diff --git a/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h b/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h index ba64e594293cf3..44e53ac044e13d 100644 --- a/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h +++ b/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h @@ -134,6 +134,7 @@ namespace compiler { V(MipsStackClaim) \ V(MipsSeb) \ V(MipsSeh) \ + V(MipsSync) \ V(MipsS128Zero) \ V(MipsI32x4Splat) \ V(MipsI32x4ExtractLane) \ diff --git a/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc index 26a3e808cc6e3d..92ab3f93443c65 100644 --- a/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc +++ b/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc @@ -284,6 +284,7 @@ int InstructionScheduler::GetTargetInstructionFlags( case kMipsUsh: case kMipsUsw: case kMipsUswc1: + case kMipsSync: case kMipsWord32AtomicPairStore: case kMipsWord32AtomicPairAdd: case kMipsWord32AtomicPairSub: @@ -1352,7 +1353,7 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) { return AssembleArchLookupSwitchLatency((instr->InputCount() - 2) / 2); case kArchTableSwitch: return AssembleArchTableSwitchLatency(); - case kArchDebugAbort: + case kArchAbortCSAAssert: return CallLatency() + 1; case kArchComment: case kArchDeoptimize: diff --git a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc index 0c7299d4514dc2..452e92a174989e 100644 --- a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc +++ b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc @@ -274,9 +274,9 @@ void InstructionSelector::VisitStackSlot(Node* node) { sequence()->AddImmediate(Constant(alignment)), 0, nullptr); } -void InstructionSelector::VisitDebugAbort(Node* node) { +void InstructionSelector::VisitAbortCSAAssert(Node* node) { MipsOperandGenerator g(this); - Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), a0)); + Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), a0)); } void InstructionSelector::VisitLoad(Node* node) { @@ -1775,6 +1775,11 @@ void InstructionSelector::VisitFloat64SilenceNaN(Node* node) { arraysize(temps), temps); } +void InstructionSelector::VisitMemoryBarrier(Node* node) { + MipsOperandGenerator g(this); + Emit(kMipsSync, g.NoOutput()); +} + void InstructionSelector::VisitWord32AtomicLoad(Node* node) { LoadRepresentation load_rep = LoadRepresentationOf(node->op()); MipsOperandGenerator g(this); diff --git a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc index 5cd9bc54eb4ff8..f746b52df67bf6 100644 --- a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc +++ b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc @@ -82,6 +82,7 @@ class MipsOperandConverter final : public InstructionOperandConverter { case Constant::kFloat64: return Operand::EmbeddedNumber(constant.ToFloat64().value()); case Constant::kExternalReference: + case Constant::kCompressedHeapObject: case Constant::kHeapObject: // TODO(plind): Maybe we should handle ExtRef & HeapObj here? // maybe not done on arm due to const pool ?? @@ -277,8 +278,9 @@ Condition FlagsConditionToConditionOvf(FlagsCondition condition) { UNREACHABLE(); } -FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate, - FlagsCondition condition) { +FPUCondition FlagsConditionToConditionCmpFPU( + bool& predicate, // NOLINT(runtime/references) + FlagsCondition condition) { switch (condition) { case kEqual: predicate = true; @@ -309,9 +311,9 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate, UNREACHABLE(); } -void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, - InstructionCode opcode, Instruction* instr, - MipsOperandConverter& i) { +void EmitWordLoadPoisoningIfNeeded( + CodeGenerator* codegen, InstructionCode opcode, Instruction* instr, + MipsOperandConverter& i) { // NOLINT(runtime/references) const MemoryAccessMode access_mode = static_cast(MiscField::decode(opcode)); if (access_mode == kMemoryAccessPoisoned) { @@ -634,8 +636,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kArchCallBuiltinPointer: { DCHECK(!instr->InputAt(0)->IsImmediate()); - Register builtin_pointer = i.InputRegister(0); - __ CallBuiltinPointer(builtin_pointer); + Register builtin_index = i.InputRegister(0); + __ CallBuiltinByIndex(builtin_index); RecordCallPosition(instr); frame_access_state()->ClearSPDelta(); break; @@ -756,6 +758,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; case kArchCallCFunction: { int const num_parameters = MiscField::decode(instr->opcode()); + Label return_location; + if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) { + // Put the return address in a stack slot. + __ LoadAddress(kScratchReg, &return_location); + __ sd(kScratchReg, + MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset)); + } if (instr->InputAt(0)->IsImmediate()) { ExternalReference ref = i.InputExternalReference(0); __ CallCFunction(ref, num_parameters); @@ -763,6 +772,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( Register func = i.InputRegister(0); __ CallCFunction(func, num_parameters); } + __ bind(&return_location); + RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt); frame_access_state()->SetFrameAccessToDefault(); // Ideally, we should decrement SP delta to match the change of stack // pointer in CallCFunction. However, for certain architectures (e.g. @@ -794,22 +805,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArchTableSwitch: AssembleArchTableSwitch(instr); break; - case kArchDebugAbort: + case kArchAbortCSAAssert: DCHECK(i.InputRegister(0) == a0); - if (!frame_access_state()->has_frame()) { + { // We don't actually want to generate a pile of code for this, so just // claim there is a stack frame, without generating one. FrameScope scope(tasm(), StackFrame::NONE); - __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS), - RelocInfo::CODE_TARGET); - } else { - __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS), - RelocInfo::CODE_TARGET); + __ Call( + isolate()->builtins()->builtin_handle(Builtins::kAbortCSAAssert), + RelocInfo::CODE_TARGET); } - __ stop("kArchDebugAbort"); + __ stop(); break; case kArchDebugBreak: - __ stop("kArchDebugBreak"); + __ stop(); break; case kArchComment: __ RecordComment(reinterpret_cast(i.InputInt64(0))); @@ -1786,6 +1795,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ Usdc1(ft, i.MemoryOperand(), kScratchReg); break; } + case kMips64Sync: { + __ sync(); + break; + } case kMips64Push: if (instr->InputAt(0)->IsFPRegister()) { __ Sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize)); @@ -3304,7 +3317,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr, new (gen_->zone()) ReferenceMap(gen_->zone()); gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt); if (FLAG_debug_code) { - __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap)); + __ stop(); } } } @@ -3535,8 +3548,14 @@ void CodeGenerator::AssembleConstructFrame() { if (frame_access_state()->has_frame()) { if (call_descriptor->IsCFunctionCall()) { - __ Push(ra, fp); - __ mov(fp, sp); + if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) { + __ StubPrologue(StackFrame::C_WASM_ENTRY); + // Reserve stack space for saving the c_entry_fp later. + __ Dsubu(sp, sp, Operand(kSystemPointerSize)); + } else { + __ Push(ra, fp); + __ mov(fp, sp); + } } else if (call_descriptor->IsJSFunctionCall()) { __ Prologue(); if (call_descriptor->PushArgumentCount()) { @@ -3546,7 +3565,8 @@ void CodeGenerator::AssembleConstructFrame() { __ StubPrologue(info()->GetOutputStackFrameType()); if (call_descriptor->IsWasmFunctionCall()) { __ Push(kWasmInstanceRegister); - } else if (call_descriptor->IsWasmImportWrapper()) { + } else if (call_descriptor->IsWasmImportWrapper() || + call_descriptor->IsWasmCapiFunction()) { // WASM import wrappers are passed a tuple in the place of the instance. // Unpack the tuple into the instance and the target callable. // This must be done here in the codegen because it cannot be expressed @@ -3556,12 +3576,16 @@ void CodeGenerator::AssembleConstructFrame() { __ ld(kWasmInstanceRegister, FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset)); __ Push(kWasmInstanceRegister); + if (call_descriptor->IsWasmCapiFunction()) { + // Reserve space for saving the PC later. + __ Dsubu(sp, sp, Operand(kSystemPointerSize)); + } } } } - int required_slots = frame()->GetTotalFrameSlotCount() - - call_descriptor->CalculateFixedFrameSize(); + int required_slots = + frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount(); if (info()->is_osr()) { // TurboFan OSR-compiled functions cannot be entered directly. @@ -3723,6 +3747,8 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, } break; } + case Constant::kCompressedHeapObject: + UNREACHABLE(); case Constant::kRpoNumber: UNREACHABLE(); // TODO(titzer): loading RPO numbers on mips64. break; diff --git a/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h b/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h index 24f01b1af1f730..e375ee8d07dfd5 100644 --- a/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h +++ b/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h @@ -163,6 +163,7 @@ namespace compiler { V(Mips64StackClaim) \ V(Mips64Seb) \ V(Mips64Seh) \ + V(Mips64Sync) \ V(Mips64AssertEqual) \ V(Mips64S128Zero) \ V(Mips64I32x4Splat) \ diff --git a/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc index 499a3da05ae9dc..4dcafe41977a15 100644 --- a/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc +++ b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc @@ -318,6 +318,7 @@ int InstructionScheduler::GetTargetInstructionFlags( case kMips64Ush: case kMips64Usw: case kMips64Uswc1: + case kMips64Sync: case kMips64Word64AtomicStoreWord8: case kMips64Word64AtomicStoreWord16: case kMips64Word64AtomicStoreWord32: @@ -1263,7 +1264,7 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) { return AssembleArchLookupSwitchLatency(instr); case kArchTableSwitch: return AssembleArchTableSwitchLatency(); - case kArchDebugAbort: + case kArchAbortCSAAssert: return CallLatency() + 1; case kArchDebugBreak: return 1; diff --git a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc index 9768a7da9b90b0..95f11ebed1cd00 100644 --- a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc +++ b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc @@ -334,9 +334,9 @@ void InstructionSelector::VisitStackSlot(Node* node) { sequence()->AddImmediate(Constant(alignment)), 0, nullptr); } -void InstructionSelector::VisitDebugAbort(Node* node) { +void InstructionSelector::VisitAbortCSAAssert(Node* node) { Mips64OperandGenerator g(this); - Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), a0)); + Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), a0)); } void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode, @@ -1946,7 +1946,17 @@ void VisitWord32Compare(InstructionSelector* selector, Node* node, // in those cases. Unfortunately, the solution is not complete because // it might skip cases where Word32 full compare is needed, so // basically it is a hack. + // When call to a host function in simulator, if the function return a + // int32 value, the simulator do not sign-extended to int64 because in + // simulator we do not know the function whether return a int32 or int64. + // so we need do a full word32 compare in this case. +#ifndef USE_SIMULATOR if (IsNodeUnsigned(node->InputAt(0)) != IsNodeUnsigned(node->InputAt(1))) { +#else + if (IsNodeUnsigned(node->InputAt(0)) != IsNodeUnsigned(node->InputAt(1)) || + node->InputAt(0)->opcode() == IrOpcode::kCall || + node->InputAt(1)->opcode() == IrOpcode::kCall ) { +#endif VisitFullWord32Compare(selector, node, kMips64Cmp, cont); } else { VisitOptimizedWord32Compare(selector, node, kMips64Cmp, cont); @@ -2398,6 +2408,11 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) { g.UseRegister(left), g.UseRegister(right)); } +void InstructionSelector::VisitMemoryBarrier(Node* node) { + Mips64OperandGenerator g(this); + Emit(kMips64Sync, g.NoOutput()); +} + void InstructionSelector::VisitWord32AtomicLoad(Node* node) { LoadRepresentation load_rep = LoadRepresentationOf(node->op()); ArchOpcode opcode = kArchNop; diff --git a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc index 30605df270d458..5289812cb5f280 100644 --- a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc +++ b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc @@ -79,6 +79,7 @@ class PPCOperandConverter final : public InstructionOperandConverter { case Constant::kDelayedStringConstant: return Operand::EmbeddedStringConstant( constant.ToDelayedStringConstant()); + case Constant::kCompressedHeapObject: case Constant::kHeapObject: case Constant::kRpoNumber: break; @@ -262,8 +263,9 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) { UNREACHABLE(); } -void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr, - PPCOperandConverter& i) { +void EmitWordLoadPoisoningIfNeeded( + CodeGenerator* codegen, Instruction* instr, + PPCOperandConverter& i) { // NOLINT(runtime/references) const MemoryAccessMode access_mode = static_cast(MiscField::decode(instr->opcode())); if (access_mode == kMemoryAccessPoisoned) { @@ -877,8 +879,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kArchCallBuiltinPointer: { DCHECK(!instr->InputAt(0)->IsImmediate()); - Register builtin_pointer = i.InputRegister(0); - __ CallBuiltinPointer(builtin_pointer); + Register builtin_index = i.InputRegister(0); + __ CallBuiltinByIndex(builtin_index); RecordCallPosition(instr); frame_access_state()->ClearSPDelta(); break; @@ -1019,6 +1021,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; case kArchCallCFunction: { int const num_parameters = MiscField::decode(instr->opcode()); + Label start_call; + bool isWasmCapiFunction = + linkage()->GetIncomingDescriptor()->IsWasmCapiFunction(); + constexpr int offset = 12; + if (isWasmCapiFunction) { + __ mflr(kScratchReg); + __ bind(&start_call); + __ LoadPC(r0); + __ addi(r0, r0, Operand(offset)); + __ StoreP(r0, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset)); + __ mtlr(r0); + } if (instr->InputAt(0)->IsImmediate()) { ExternalReference ref = i.InputExternalReference(0); __ CallCFunction(ref, num_parameters); @@ -1026,6 +1040,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( Register func = i.InputRegister(0); __ CallCFunction(func, num_parameters); } + // TODO(miladfar): In the above block, r0 must be populated with the + // strictly-correct PC, which is the return address at this spot. The + // offset is set to 12 right now, which is counted from where we are + // binding to the label and ends at this spot. If failed, replace it it + // with the correct offset suggested. More info on f5ab7d3. + if (isWasmCapiFunction) + CHECK_EQ(offset, __ SizeOfCodeGeneratedSince(&start_call)); + + RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt); frame_access_state()->SetFrameAccessToDefault(); // Ideally, we should decrement SP delta to match the change of stack // pointer in CallCFunction. However, for certain architectures (e.g. @@ -1060,22 +1083,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( AssembleArchTableSwitch(instr); DCHECK_EQ(LeaveRC, i.OutputRCBit()); break; - case kArchDebugAbort: + case kArchAbortCSAAssert: DCHECK(i.InputRegister(0) == r4); - if (!frame_access_state()->has_frame()) { + { // We don't actually want to generate a pile of code for this, so just // claim there is a stack frame, without generating one. FrameScope scope(tasm(), StackFrame::NONE); - __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS), - RelocInfo::CODE_TARGET); - } else { - __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS), - RelocInfo::CODE_TARGET); + __ Call( + isolate()->builtins()->builtin_handle(Builtins::kAbortCSAAssert), + RelocInfo::CODE_TARGET); } - __ stop("kArchDebugAbort"); + __ stop(); break; case kArchDebugBreak: - __ stop("kArchDebugBreak"); + __ stop(); break; case kArchNop: case kArchThrowTerminator: @@ -1174,6 +1195,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } break; } + case kPPC_Sync: { + __ sync(); + break; + } case kPPC_And: if (HasRegisterInput(instr, 1)) { __ and_(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1), @@ -2150,7 +2175,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr, new (gen_->zone()) ReferenceMap(gen_->zone()); gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt); if (FLAG_debug_code) { - __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap)); + __ stop(); } } } @@ -2304,14 +2329,20 @@ void CodeGenerator::AssembleConstructFrame() { auto call_descriptor = linkage()->GetIncomingDescriptor(); if (frame_access_state()->has_frame()) { if (call_descriptor->IsCFunctionCall()) { - __ mflr(r0); - if (FLAG_enable_embedded_constant_pool) { - __ Push(r0, fp, kConstantPoolRegister); - // Adjust FP to point to saved FP. - __ subi(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset)); + if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) { + __ StubPrologue(StackFrame::C_WASM_ENTRY); + // Reserve stack space for saving the c_entry_fp later. + __ addi(sp, sp, Operand(-kSystemPointerSize)); } else { - __ Push(r0, fp); - __ mr(fp, sp); + __ mflr(r0); + if (FLAG_enable_embedded_constant_pool) { + __ Push(r0, fp, kConstantPoolRegister); + // Adjust FP to point to saved FP. + __ subi(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset)); + } else { + __ Push(r0, fp); + __ mr(fp, sp); + } } } else if (call_descriptor->IsJSFunctionCall()) { __ Prologue(); @@ -2325,7 +2356,8 @@ void CodeGenerator::AssembleConstructFrame() { __ StubPrologue(type); if (call_descriptor->IsWasmFunctionCall()) { __ Push(kWasmInstanceRegister); - } else if (call_descriptor->IsWasmImportWrapper()) { + } else if (call_descriptor->IsWasmImportWrapper() || + call_descriptor->IsWasmCapiFunction()) { // WASM import wrappers are passed a tuple in the place of the instance. // Unpack the tuple into the instance and the target callable. // This must be done here in the codegen because it cannot be expressed @@ -2335,12 +2367,16 @@ void CodeGenerator::AssembleConstructFrame() { __ LoadP(kWasmInstanceRegister, FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset)); __ Push(kWasmInstanceRegister); + if (call_descriptor->IsWasmCapiFunction()) { + // Reserve space for saving the PC later. + __ addi(sp, sp, Operand(-kSystemPointerSize)); + } } } } - int required_slots = frame()->GetTotalFrameSlotCount() - - call_descriptor->CalculateFixedFrameSize(); + int required_slots = + frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount(); if (info()->is_osr()) { // TurboFan OSR-compiled functions cannot be entered directly. __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction); @@ -2389,7 +2425,7 @@ void CodeGenerator::AssembleConstructFrame() { ReferenceMap* reference_map = new (zone()) ReferenceMap(zone()); RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt); if (FLAG_debug_code) { - __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromThrow)); + __ stop(); } __ bind(&done); @@ -2554,6 +2590,8 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, } break; } + case Constant::kCompressedHeapObject: + UNREACHABLE(); case Constant::kRpoNumber: UNREACHABLE(); // TODO(dcarney): loading RPO constants on PPC. break; diff --git a/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h b/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h index a34a09b7969606..f37529bd884eaf 100644 --- a/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h +++ b/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h @@ -13,6 +13,7 @@ namespace compiler { // Most opcodes specify a single instruction. #define TARGET_ARCH_OPCODE_LIST(V) \ V(PPC_Peek) \ + V(PPC_Sync) \ V(PPC_And) \ V(PPC_AndComplement) \ V(PPC_Or) \ diff --git a/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc index e5f7d7e45a405c..61c2d2be3bd5a7 100644 --- a/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc +++ b/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc @@ -143,6 +143,7 @@ int InstructionScheduler::GetTargetInstructionFlags( case kPPC_Push: case kPPC_PushFrame: case kPPC_StoreToStackSlot: + case kPPC_Sync: return kHasSideEffect; case kPPC_AtomicStoreUint8: diff --git a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc index bb503763c216fa..bfc77b9412a890 100644 --- a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc +++ b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc @@ -173,9 +173,9 @@ void InstructionSelector::VisitStackSlot(Node* node) { sequence()->AddImmediate(Constant(slot)), 0, nullptr); } -void InstructionSelector::VisitDebugAbort(Node* node) { +void InstructionSelector::VisitAbortCSAAssert(Node* node) { PPCOperandGenerator g(this); - Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), r4)); + Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), r4)); } void InstructionSelector::VisitLoad(Node* node) { @@ -1853,6 +1853,11 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) { g.UseRegister(left), g.UseRegister(right)); } +void InstructionSelector::VisitMemoryBarrier(Node* node) { + PPCOperandGenerator g(this); + Emit(kPPC_Sync, g.NoOutput()); +} + void InstructionSelector::VisitWord32AtomicLoad(Node* node) { VisitLoad(node); } void InstructionSelector::VisitWord64AtomicLoad(Node* node) { VisitLoad(node); } diff --git a/deps/v8/src/compiler/backend/register-allocator.cc b/deps/v8/src/compiler/backend/register-allocator.cc index 57ea2c1a26636e..44701f8159385c 100644 --- a/deps/v8/src/compiler/backend/register-allocator.cc +++ b/deps/v8/src/compiler/backend/register-allocator.cc @@ -9,6 +9,7 @@ #include "src/base/adapters.h" #include "src/base/small-vector.h" #include "src/codegen/assembler-inl.h" +#include "src/codegen/tick-counter.h" #include "src/compiler/linkage.h" #include "src/strings/string-stream.h" #include "src/utils/vector.h" @@ -17,11 +18,13 @@ namespace v8 { namespace internal { namespace compiler { -#define TRACE(...) \ - do { \ - if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \ +#define TRACE_COND(cond, ...) \ + do { \ + if (cond) PrintF(__VA_ARGS__); \ } while (false) +#define TRACE(...) TRACE_COND(data()->is_trace_alloc(), __VA_ARGS__) + namespace { static constexpr int kFloat32Bit = @@ -1119,8 +1122,9 @@ void TopLevelLiveRange::Verify() const { } } -void TopLevelLiveRange::ShortenTo(LifetimePosition start) { - TRACE("Shorten live range %d to [%d\n", vreg(), start.value()); +void TopLevelLiveRange::ShortenTo(LifetimePosition start, bool trace_alloc) { + TRACE_COND(trace_alloc, "Shorten live range %d to [%d\n", vreg(), + start.value()); DCHECK_NOT_NULL(first_interval_); DCHECK(first_interval_->start() <= start); DCHECK(start < first_interval_->end()); @@ -1128,9 +1132,10 @@ void TopLevelLiveRange::ShortenTo(LifetimePosition start) { } void TopLevelLiveRange::EnsureInterval(LifetimePosition start, - LifetimePosition end, Zone* zone) { - TRACE("Ensure live range %d in interval [%d %d[\n", vreg(), start.value(), - end.value()); + LifetimePosition end, Zone* zone, + bool trace_alloc) { + TRACE_COND(trace_alloc, "Ensure live range %d in interval [%d %d[\n", vreg(), + start.value(), end.value()); LifetimePosition new_end = end; while (first_interval_ != nullptr && first_interval_->start() <= end) { if (first_interval_->end() > end) { @@ -1148,9 +1153,10 @@ void TopLevelLiveRange::EnsureInterval(LifetimePosition start, } void TopLevelLiveRange::AddUseInterval(LifetimePosition start, - LifetimePosition end, Zone* zone) { - TRACE("Add to live range %d interval [%d %d[\n", vreg(), start.value(), - end.value()); + LifetimePosition end, Zone* zone, + bool trace_alloc) { + TRACE_COND(trace_alloc, "Add to live range %d interval [%d %d[\n", vreg(), + start.value(), end.value()); if (first_interval_ == nullptr) { UseInterval* interval = new (zone) UseInterval(start, end); first_interval_ = interval; @@ -1173,9 +1179,10 @@ void TopLevelLiveRange::AddUseInterval(LifetimePosition start, } } -void TopLevelLiveRange::AddUsePosition(UsePosition* use_pos) { +void TopLevelLiveRange::AddUsePosition(UsePosition* use_pos, bool trace_alloc) { LifetimePosition pos = use_pos->pos(); - TRACE("Add to live range %d use position %d\n", vreg(), pos.value()); + TRACE_COND(trace_alloc, "Add to live range %d use position %d\n", vreg(), + pos.value()); UsePosition* prev_hint = nullptr; UsePosition* prev = nullptr; UsePosition* current = first_pos_; @@ -1309,13 +1316,8 @@ void LinearScanAllocator::PrintRangeRow(std::ostream& os, if (range->spilled()) { prefix = snprintf(buffer, max_prefix_length, "|%s", kind_string); } else { - const char* reg_name; - if (range->assigned_register() == kUnassignedRegister) { - reg_name = "???"; - } else { - reg_name = RegisterName(range->assigned_register()); - } - prefix = snprintf(buffer, max_prefix_length, "|%s", reg_name); + prefix = snprintf(buffer, max_prefix_length, "|%s", + RegisterName(range->assigned_register())); } os << buffer; position += std::min(prefix, max_prefix_length - 1); @@ -1469,7 +1471,7 @@ void RegisterAllocationData::PhiMapValue::CommitAssignment( RegisterAllocationData::RegisterAllocationData( const RegisterConfiguration* config, Zone* zone, Frame* frame, InstructionSequence* code, RegisterAllocationFlags flags, - const char* debug_name) + TickCounter* tick_counter, const char* debug_name) : allocation_zone_(zone), frame_(frame), code_(code), @@ -1496,7 +1498,8 @@ RegisterAllocationData::RegisterAllocationData( preassigned_slot_ranges_(zone), spill_state_(code->InstructionBlockCount(), ZoneVector(zone), zone), - flags_(flags) { + flags_(flags), + tick_counter_(tick_counter) { if (!kSimpleFPAliasing) { fixed_float_live_ranges_.resize( kNumberOfFixedRangesPerRegister * this->config()->num_float_registers(), @@ -1815,6 +1818,7 @@ InstructionOperand* ConstraintBuilder::AllocateFixed( void ConstraintBuilder::MeetRegisterConstraints() { for (InstructionBlock* block : code()->instruction_blocks()) { + data_->tick_counter()->DoTick(); MeetRegisterConstraints(block); } } @@ -1973,14 +1977,6 @@ void ConstraintBuilder::MeetConstraintsBefore(int instr_index) { second->reference_map(), &gap_move->source()}; data()->delayed_references().push_back(delayed_reference); } - } else if (!code()->IsReference(input_vreg) && - code()->IsReference(output_vreg)) { - // The input is assumed to immediately have a tagged representation, - // before the pointer map can be used. I.e. the pointer map at the - // instruction will include the output operand (whose value at the - // beginning of the instruction is equal to the input operand). If - // this is not desired, then the pointer map at this instruction needs - // to be adjusted manually. } } } @@ -1988,6 +1984,7 @@ void ConstraintBuilder::MeetConstraintsBefore(int instr_index) { void ConstraintBuilder::ResolvePhis() { // Process the blocks in reverse order. for (InstructionBlock* block : base::Reversed(code()->instruction_blocks())) { + data_->tick_counter()->DoTick(); ResolvePhis(block); } } @@ -2071,7 +2068,8 @@ void LiveRangeBuilder::AddInitialIntervals(const InstructionBlock* block, while (!iterator.Done()) { int operand_index = iterator.Current(); TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(operand_index); - range->AddUseInterval(start, end, allocation_zone()); + range->AddUseInterval(start, end, allocation_zone(), + data()->is_trace_alloc()); iterator.Advance(); } } @@ -2192,16 +2190,18 @@ UsePosition* LiveRangeBuilder::Define(LifetimePosition position, if (range->IsEmpty() || range->Start() > position) { // Can happen if there is a definition without use. - range->AddUseInterval(position, position.NextStart(), allocation_zone()); - range->AddUsePosition(NewUsePosition(position.NextStart())); + range->AddUseInterval(position, position.NextStart(), allocation_zone(), + data()->is_trace_alloc()); + range->AddUsePosition(NewUsePosition(position.NextStart()), + data()->is_trace_alloc()); } else { - range->ShortenTo(position); + range->ShortenTo(position, data()->is_trace_alloc()); } if (!operand->IsUnallocated()) return nullptr; UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand); UsePosition* use_pos = NewUsePosition(position, unalloc_operand, hint, hint_type); - range->AddUsePosition(use_pos); + range->AddUsePosition(use_pos, data()->is_trace_alloc()); return use_pos; } @@ -2216,9 +2216,10 @@ UsePosition* LiveRangeBuilder::Use(LifetimePosition block_start, if (operand->IsUnallocated()) { UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand); use_pos = NewUsePosition(position, unalloc_operand, hint, hint_type); - range->AddUsePosition(use_pos); + range->AddUsePosition(use_pos, data()->is_trace_alloc()); } - range->AddUseInterval(block_start, position, allocation_zone()); + range->AddUseInterval(block_start, position, allocation_zone(), + data()->is_trace_alloc()); return use_pos; } @@ -2279,7 +2280,7 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block, int code = config()->GetAllocatableGeneralCode(i); TopLevelLiveRange* range = FixedLiveRangeFor(code, spill_mode); range->AddUseInterval(curr_position, curr_position.End(), - allocation_zone()); + allocation_zone(), data()->is_trace_alloc()); } } @@ -2291,7 +2292,7 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block, TopLevelLiveRange* range = FixedFPLiveRangeFor( code, MachineRepresentation::kFloat64, spill_mode); range->AddUseInterval(curr_position, curr_position.End(), - allocation_zone()); + allocation_zone(), data()->is_trace_alloc()); } // Clobber fixed float registers on archs with non-simple aliasing. if (!kSimpleFPAliasing) { @@ -2304,7 +2305,7 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block, TopLevelLiveRange* range = FixedFPLiveRangeFor( code, MachineRepresentation::kFloat32, spill_mode); range->AddUseInterval(curr_position, curr_position.End(), - allocation_zone()); + allocation_zone(), data()->is_trace_alloc()); } } if (fixed_simd128_live_ranges) { @@ -2314,7 +2315,7 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block, TopLevelLiveRange* range = FixedFPLiveRangeFor( code, MachineRepresentation::kSimd128, spill_mode); range->AddUseInterval(curr_position, curr_position.End(), - allocation_zone()); + allocation_zone(), data()->is_trace_alloc()); } } } @@ -2574,7 +2575,8 @@ void LiveRangeBuilder::ProcessLoopHeader(const InstructionBlock* block, while (!iterator.Done()) { int operand_index = iterator.Current(); TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(operand_index); - range->EnsureInterval(start, end, allocation_zone()); + range->EnsureInterval(start, end, allocation_zone(), + data()->is_trace_alloc()); iterator.Advance(); } // Insert all values into the live in sets of all blocks in the loop. @@ -2588,6 +2590,7 @@ void LiveRangeBuilder::BuildLiveRanges() { // Process the blocks in reverse order. for (int block_id = code()->InstructionBlockCount() - 1; block_id >= 0; --block_id) { + data_->tick_counter()->DoTick(); InstructionBlock* block = code()->InstructionBlockAt(RpoNumber::FromInt(block_id)); BitVector* live = ComputeLiveOut(block, data()); @@ -2607,6 +2610,7 @@ void LiveRangeBuilder::BuildLiveRanges() { // Postprocess the ranges. const size_t live_ranges_size = data()->live_ranges().size(); for (TopLevelLiveRange* range : data()->live_ranges()) { + data_->tick_counter()->DoTick(); CHECK_EQ(live_ranges_size, data()->live_ranges().size()); // TODO(neis): crbug.com/831822 if (range == nullptr) continue; @@ -2773,7 +2777,7 @@ void BundleBuilder::BuildBundles() { LiveRangeBundle* input_bundle = input_range->get_bundle(); if (input_bundle != nullptr) { TRACE("Merge\n"); - if (out->TryMerge(input_bundle)) + if (out->TryMerge(input_bundle, data()->is_trace_alloc())) TRACE("Merged %d and %d to %d\n", phi->virtual_register(), input, out->id()); } else { @@ -2798,7 +2802,7 @@ bool LiveRangeBundle::TryAddRange(LiveRange* range) { InsertUses(range->first_interval()); return true; } -bool LiveRangeBundle::TryMerge(LiveRangeBundle* other) { +bool LiveRangeBundle::TryMerge(LiveRangeBundle* other, bool trace_alloc) { if (other == this) return true; auto iter1 = uses_.begin(); @@ -2810,8 +2814,8 @@ bool LiveRangeBundle::TryMerge(LiveRangeBundle* other) { } else if (iter2->start > iter1->end) { ++iter1; } else { - TRACE("No merge %d:%d %d:%d\n", iter1->start, iter1->end, iter2->start, - iter2->end); + TRACE_COND(trace_alloc, "No merge %d:%d %d:%d\n", iter1->start, + iter1->end, iter2->start, iter2->end); return false; } } @@ -3042,6 +3046,7 @@ void RegisterAllocator::Spill(LiveRange* range, SpillMode spill_mode) { } const char* RegisterAllocator::RegisterName(int register_code) const { + if (register_code == kUnassignedRegister) return "unassigned"; return mode() == GENERAL_REGISTERS ? i::RegisterName(Register::from_code(register_code)) : i::RegisterName(DoubleRegister::from_code(register_code)); @@ -3408,7 +3413,7 @@ void LinearScanAllocator::ComputeStateFromManyPredecessors( to_be_live->emplace(val.first, reg); TRACE("Reset %d as live due vote %zu in %s\n", val.first->TopLevel()->vreg(), val.second.count, - reg == kUnassignedRegister ? "unassigned" : RegisterName(reg)); + RegisterName(reg)); } } }; @@ -3477,6 +3482,8 @@ void LinearScanAllocator::UpdateDeferredFixedRanges(SpillMode spill_mode, RegisterName(other->assigned_register())); LiveRange* split_off = other->SplitAt(next_start, data()->allocation_zone()); + // Try to get the same register after the deferred block. + split_off->set_controlflow_hint(other->assigned_register()); DCHECK_NE(split_off, other); AddToUnhandled(split_off); update_caches(other); @@ -3574,7 +3581,7 @@ void LinearScanAllocator::AllocateRegisters() { SplitAndSpillRangesDefinedByMemoryOperand(); data()->ResetSpillState(); - if (FLAG_trace_alloc) { + if (data()->is_trace_alloc()) { PrintRangeOverview(std::cout); } @@ -3642,6 +3649,7 @@ void LinearScanAllocator::AllocateRegisters() { while (!unhandled_live_ranges().empty() || (data()->is_turbo_control_flow_aware_allocation() && last_block < max_blocks)) { + data()->tick_counter()->DoTick(); LiveRange* current = unhandled_live_ranges().empty() ? nullptr : *unhandled_live_ranges().begin(); @@ -3824,7 +3832,7 @@ void LinearScanAllocator::AllocateRegisters() { ProcessCurrentRange(current, spill_mode); } - if (FLAG_trace_alloc) { + if (data()->is_trace_alloc()) { PrintRangeOverview(std::cout); } } @@ -4557,6 +4565,14 @@ void LinearScanAllocator::SpillBetweenUntil(LiveRange* range, LiveRange* third_part = SplitBetween(second_part, split_start, third_part_end); + if (GetInstructionBlock(data()->code(), second_part->Start()) + ->IsDeferred()) { + // Try to use the same register as before. + TRACE("Setting control flow hint for %d:%d to %s\n", + third_part->TopLevel()->vreg(), third_part->relative_id(), + RegisterName(range->controlflow_hint())); + third_part->set_controlflow_hint(range->controlflow_hint()); + } AddToUnhandled(third_part); // This can happen, even if we checked for start < end above, as we fiddle @@ -4601,6 +4617,7 @@ OperandAssigner::OperandAssigner(RegisterAllocationData* data) : data_(data) {} void OperandAssigner::DecideSpillingMode() { if (data()->is_turbo_control_flow_aware_allocation()) { for (auto range : data()->live_ranges()) { + data()->tick_counter()->DoTick(); int max_blocks = data()->code()->InstructionBlockCount(); if (range != nullptr && range->IsSpilledOnlyInDeferredBlocks(data())) { // If the range is spilled only in deferred blocks and starts in @@ -4629,6 +4646,7 @@ void OperandAssigner::DecideSpillingMode() { void OperandAssigner::AssignSpillSlots() { for (auto range : data()->live_ranges()) { + data()->tick_counter()->DoTick(); if (range != nullptr && range->get_bundle() != nullptr) { range->get_bundle()->MergeSpillRanges(); } @@ -4636,6 +4654,7 @@ void OperandAssigner::AssignSpillSlots() { ZoneVector& spill_ranges = data()->spill_ranges(); // Merge disjoint spill ranges for (size_t i = 0; i < spill_ranges.size(); ++i) { + data()->tick_counter()->DoTick(); SpillRange* range = spill_ranges[i]; if (range == nullptr) continue; if (range->IsEmpty()) continue; @@ -4648,6 +4667,7 @@ void OperandAssigner::AssignSpillSlots() { } // Allocate slots for the merged spill ranges. for (SpillRange* range : spill_ranges) { + data()->tick_counter()->DoTick(); if (range == nullptr || range->IsEmpty()) continue; // Allocate a new operand referring to the spill slot. if (!range->HasSlot()) { @@ -4660,6 +4680,7 @@ void OperandAssigner::AssignSpillSlots() { void OperandAssigner::CommitAssignment() { const size_t live_ranges_size = data()->live_ranges().size(); for (TopLevelLiveRange* top_range : data()->live_ranges()) { + data()->tick_counter()->DoTick(); CHECK_EQ(live_ranges_size, data()->live_ranges().size()); // TODO(neis): crbug.com/831822 if (top_range == nullptr || top_range->IsEmpty()) continue; @@ -4859,6 +4880,7 @@ void LiveRangeConnector::ResolveControlFlow(Zone* local_zone) { BitVector* live = live_in_sets[block->rpo_number().ToInt()]; BitVector::Iterator iterator(live); while (!iterator.Done()) { + data()->tick_counter()->DoTick(); int vreg = iterator.Current(); LiveRangeBoundArray* array = finder.ArrayFor(vreg); for (const RpoNumber& pred : block->predecessors()) { @@ -5130,6 +5152,7 @@ void LiveRangeConnector::CommitSpillsInDeferredBlocks( } #undef TRACE +#undef TRACE_COND } // namespace compiler } // namespace internal diff --git a/deps/v8/src/compiler/backend/register-allocator.h b/deps/v8/src/compiler/backend/register-allocator.h index 8929fb2ee6870f..55f8a8dd1f608a 100644 --- a/deps/v8/src/compiler/backend/register-allocator.h +++ b/deps/v8/src/compiler/backend/register-allocator.h @@ -16,6 +16,9 @@ namespace v8 { namespace internal { + +class TickCounter; + namespace compiler { static const int32_t kUnassignedRegister = RegisterConfiguration::kMaxRegisters; @@ -175,7 +178,8 @@ std::ostream& operator<<(std::ostream& os, const LifetimePosition pos); enum class RegisterAllocationFlag : unsigned { kTurboControlFlowAwareAllocation = 1 << 0, - kTurboPreprocessRanges = 1 << 1 + kTurboPreprocessRanges = 1 << 1, + kTraceAllocation = 1 << 2 }; using RegisterAllocationFlags = base::Flags; @@ -198,6 +202,10 @@ class RegisterAllocationData final : public ZoneObject { return flags_ & RegisterAllocationFlag::kTurboPreprocessRanges; } + bool is_trace_alloc() { + return flags_ & RegisterAllocationFlag::kTraceAllocation; + } + static constexpr int kNumberOfFixedRangesPerRegister = 2; class PhiMapValue : public ZoneObject { @@ -238,6 +246,7 @@ class RegisterAllocationData final : public ZoneObject { Zone* allocation_zone, Frame* frame, InstructionSequence* code, RegisterAllocationFlags flags, + TickCounter* tick_counter, const char* debug_name = nullptr); const ZoneVector& live_ranges() const { @@ -328,6 +337,8 @@ class RegisterAllocationData final : public ZoneObject { void ResetSpillState() { spill_state_.clear(); } + TickCounter* tick_counter() { return tick_counter_; } + private: int GetNextLiveRangeId(); @@ -354,6 +365,7 @@ class RegisterAllocationData final : public ZoneObject { RangesWithPreassignedSlots preassigned_slot_ranges_; ZoneVector> spill_state_; RegisterAllocationFlags flags_; + TickCounter* const tick_counter_; DISALLOW_COPY_AND_ASSIGN(RegisterAllocationData); }; @@ -741,7 +753,7 @@ class LiveRangeBundle : public ZoneObject { : ranges_(zone), uses_(zone), id_(id) {} bool TryAddRange(LiveRange* range); - bool TryMerge(LiveRangeBundle* other); + bool TryMerge(LiveRangeBundle* other, bool trace_alloc); ZoneSet ranges_; ZoneSet uses_; @@ -785,12 +797,14 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange { SlotUseKind slot_use_kind() const { return HasSlotUseField::decode(bits_); } // Add a new interval or a new use position to this live range. - void EnsureInterval(LifetimePosition start, LifetimePosition end, Zone* zone); - void AddUseInterval(LifetimePosition start, LifetimePosition end, Zone* zone); - void AddUsePosition(UsePosition* pos); + void EnsureInterval(LifetimePosition start, LifetimePosition end, Zone* zone, + bool trace_alloc); + void AddUseInterval(LifetimePosition start, LifetimePosition end, Zone* zone, + bool trace_alloc); + void AddUsePosition(UsePosition* pos, bool trace_alloc); // Shorten the most recently added interval by setting a new start. - void ShortenTo(LifetimePosition start); + void ShortenTo(LifetimePosition start, bool trace_alloc); // Detaches between start and end, and attributes the resulting range to // result. @@ -1279,11 +1293,13 @@ class LinearScanAllocator final : public RegisterAllocator { RangeWithRegister::Equals>; void MaybeUndoPreviousSplit(LiveRange* range); - void SpillNotLiveRanges(RangeWithRegisterSet& to_be_live, - LifetimePosition position, SpillMode spill_mode); + void SpillNotLiveRanges( + RangeWithRegisterSet& to_be_live, // NOLINT(runtime/references) + LifetimePosition position, SpillMode spill_mode); LiveRange* AssignRegisterOnReload(LiveRange* range, int reg); - void ReloadLiveRanges(RangeWithRegisterSet& to_be_live, - LifetimePosition position); + void ReloadLiveRanges( + RangeWithRegisterSet& to_be_live, // NOLINT(runtime/references) + LifetimePosition position); void UpdateDeferredFixedRanges(SpillMode spill_mode, InstructionBlock* block); bool BlockIsDeferredOrImmediatePredecessorIsNotDeferred( diff --git a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc index 595800268d8fcc..6457b7c8b44493 100644 --- a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc +++ b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc @@ -73,6 +73,7 @@ class S390OperandConverter final : public InstructionOperandConverter { case Constant::kDelayedStringConstant: return Operand::EmbeddedStringConstant( constant.ToDelayedStringConstant()); + case Constant::kCompressedHeapObject: case Constant::kHeapObject: case Constant::kRpoNumber: break; @@ -1245,8 +1246,9 @@ void AdjustStackPointerForTailCall( } } -void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr, - S390OperandConverter& i) { +void EmitWordLoadPoisoningIfNeeded( + CodeGenerator* codegen, Instruction* instr, + S390OperandConverter& i) { // NOLINT(runtime/references) const MemoryAccessMode access_mode = static_cast(MiscField::decode(instr->opcode())); if (access_mode == kMemoryAccessPoisoned) { @@ -1380,8 +1382,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kArchCallBuiltinPointer: { DCHECK(!instr->InputAt(0)->IsImmediate()); - Register builtin_pointer = i.InputRegister(0); - __ CallBuiltinPointer(builtin_pointer); + Register builtin_index = i.InputRegister(0); + __ CallBuiltinByIndex(builtin_index); RecordCallPosition(instr); frame_access_state()->ClearSPDelta(); break; @@ -1509,6 +1511,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; case kArchCallCFunction: { int const num_parameters = MiscField::decode(instr->opcode()); + Label return_location; + // Put the return address in a stack slot. + if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) { + // Put the return address in a stack slot. + __ larl(r0, &return_location); + __ StoreP(r0, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset)); + } if (instr->InputAt(0)->IsImmediate()) { ExternalReference ref = i.InputExternalReference(0); __ CallCFunction(ref, num_parameters); @@ -1516,6 +1525,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( Register func = i.InputRegister(0); __ CallCFunction(func, num_parameters); } + __ bind(&return_location); + RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt); frame_access_state()->SetFrameAccessToDefault(); // Ideally, we should decrement SP delta to match the change of stack // pointer in CallCFunction. However, for certain architectures (e.g. @@ -1547,22 +1558,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArchTableSwitch: AssembleArchTableSwitch(instr); break; - case kArchDebugAbort: + case kArchAbortCSAAssert: DCHECK(i.InputRegister(0) == r3); - if (!frame_access_state()->has_frame()) { + { // We don't actually want to generate a pile of code for this, so just // claim there is a stack frame, without generating one. FrameScope scope(tasm(), StackFrame::NONE); - __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS), - RelocInfo::CODE_TARGET); - } else { - __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS), - RelocInfo::CODE_TARGET); + __ Call( + isolate()->builtins()->builtin_handle(Builtins::kAbortCSAAssert), + RelocInfo::CODE_TARGET); } - __ stop("kArchDebugAbort"); + __ stop(); break; case kArchDebugBreak: - __ stop("kArchDebugBreak"); + __ stop(); break; case kArchNop: case kArchThrowTerminator: @@ -2891,7 +2900,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr, new (gen_->zone()) ReferenceMap(gen_->zone()); gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt); if (FLAG_debug_code) { - __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap)); + __ stop(); } } } @@ -3014,8 +3023,14 @@ void CodeGenerator::AssembleConstructFrame() { if (frame_access_state()->has_frame()) { if (call_descriptor->IsCFunctionCall()) { - __ Push(r14, fp); - __ LoadRR(fp, sp); + if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) { + __ StubPrologue(StackFrame::C_WASM_ENTRY); + // Reserve stack space for saving the c_entry_fp later. + __ lay(sp, MemOperand(sp, -kSystemPointerSize)); + } else { + __ Push(r14, fp); + __ LoadRR(fp, sp); + } } else if (call_descriptor->IsJSFunctionCall()) { __ Prologue(ip); if (call_descriptor->PushArgumentCount()) { @@ -3028,7 +3043,8 @@ void CodeGenerator::AssembleConstructFrame() { __ StubPrologue(type); if (call_descriptor->IsWasmFunctionCall()) { __ Push(kWasmInstanceRegister); - } else if (call_descriptor->IsWasmImportWrapper()) { + } else if (call_descriptor->IsWasmImportWrapper() || + call_descriptor->IsWasmCapiFunction()) { // WASM import wrappers are passed a tuple in the place of the instance. // Unpack the tuple into the instance and the target callable. // This must be done here in the codegen because it cannot be expressed @@ -3038,12 +3054,16 @@ void CodeGenerator::AssembleConstructFrame() { __ LoadP(kWasmInstanceRegister, FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset)); __ Push(kWasmInstanceRegister); + if (call_descriptor->IsWasmCapiFunction()) { + // Reserve space for saving the PC later. + __ lay(sp, MemOperand(sp, -kSystemPointerSize)); + } } } } - int required_slots = frame()->GetTotalFrameSlotCount() - - call_descriptor->CalculateFixedFrameSize(); + int required_slots = + frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount(); if (info()->is_osr()) { // TurboFan OSR-compiled functions cannot be entered directly. __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction); @@ -3089,7 +3109,7 @@ void CodeGenerator::AssembleConstructFrame() { ReferenceMap* reference_map = new (zone()) ReferenceMap(zone()); RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt); if (FLAG_debug_code) { - __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromThrow)); + __ stop(); } __ bind(&done); @@ -3247,6 +3267,9 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, } break; } + case Constant::kCompressedHeapObject: + UNREACHABLE(); + break; case Constant::kRpoNumber: UNREACHABLE(); // TODO(dcarney): loading RPO constants on S390. break; diff --git a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc index d982605efc30d9..99d3b0fa0f0acf 100644 --- a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc +++ b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc @@ -447,11 +447,13 @@ void VisitTryTruncateDouble(InstructionSelector* selector, ArchOpcode opcode, #endif template -void GenerateRightOperands(InstructionSelector* selector, Node* node, - Node* right, InstructionCode& opcode, - OperandModes& operand_mode, - InstructionOperand* inputs, size_t& input_count, - CanCombineWithLoad canCombineWithLoad) { +void GenerateRightOperands( + InstructionSelector* selector, Node* node, Node* right, + InstructionCode& opcode, // NOLINT(runtime/references) + OperandModes& operand_mode, // NOLINT(runtime/references) + InstructionOperand* inputs, + size_t& input_count, // NOLINT(runtime/references) + CanCombineWithLoad canCombineWithLoad) { S390OperandGenerator g(selector); if ((operand_mode & OperandMode::kAllowImmediate) && @@ -491,11 +493,13 @@ void GenerateRightOperands(InstructionSelector* selector, Node* node, } template -void GenerateBinOpOperands(InstructionSelector* selector, Node* node, - Node* left, Node* right, InstructionCode& opcode, - OperandModes& operand_mode, - InstructionOperand* inputs, size_t& input_count, - CanCombineWithLoad canCombineWithLoad) { +void GenerateBinOpOperands( + InstructionSelector* selector, Node* node, Node* left, Node* right, + InstructionCode& opcode, // NOLINT(runtime/references) + OperandModes& operand_mode, // NOLINT(runtime/references) + InstructionOperand* inputs, + size_t& input_count, // NOLINT(runtime/references) + CanCombineWithLoad canCombineWithLoad) { S390OperandGenerator g(selector); // left is always register InstructionOperand const left_input = g.UseRegister(left); @@ -686,9 +690,9 @@ void InstructionSelector::VisitStackSlot(Node* node) { sequence()->AddImmediate(Constant(slot)), 0, nullptr); } -void InstructionSelector::VisitDebugAbort(Node* node) { +void InstructionSelector::VisitAbortCSAAssert(Node* node) { S390OperandGenerator g(this); - Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), r3)); + Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), r3)); } void InstructionSelector::VisitLoad(Node* node) { @@ -2194,6 +2198,11 @@ void InstructionSelector::EmitPrepareArguments( } } +void InstructionSelector::VisitMemoryBarrier(Node* node) { + S390OperandGenerator g(this); + Emit(kArchNop, g.NoOutput()); +} + bool InstructionSelector::IsTailCallAddressImmediate() { return false; } int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; } diff --git a/deps/v8/src/compiler/backend/unwinding-info-writer.h b/deps/v8/src/compiler/backend/unwinding-info-writer.h index 590a839a06f27a..d3a52b34b7712a 100644 --- a/deps/v8/src/compiler/backend/unwinding-info-writer.h +++ b/deps/v8/src/compiler/backend/unwinding-info-writer.h @@ -23,6 +23,7 @@ namespace v8 { namespace internal { class EhFrameWriter; +class Zone; namespace compiler { diff --git a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc index c6667292fc3a04..a108edeff0e592 100644 --- a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc +++ b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc @@ -349,7 +349,8 @@ class WasmProtectedInstructionTrap final : public WasmOutOfLineTrap { void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen, InstructionCode opcode, Instruction* instr, - X64OperandConverter& i, int pc) { + X64OperandConverter& i, // NOLINT(runtime/references) + int pc) { const MemoryAccessMode access_mode = static_cast(MiscField::decode(opcode)); if (access_mode == kMemoryAccessProtected) { @@ -357,9 +358,9 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen, } } -void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, - InstructionCode opcode, Instruction* instr, - X64OperandConverter& i) { +void EmitWordLoadPoisoningIfNeeded( + CodeGenerator* codegen, InstructionCode opcode, Instruction* instr, + X64OperandConverter& i) { // NOLINT(runtime/references) const MemoryAccessMode access_mode = static_cast(MiscField::decode(opcode)); if (access_mode == kMemoryAccessPoisoned) { @@ -575,6 +576,19 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, __ opcode(i.OutputSimd128Register(), i.InputSimd128Register(1), imm); \ } while (false) +#define ASSEMBLE_SIMD_ALL_TRUE(opcode) \ + do { \ + CpuFeatureScope sse_scope(tasm(), SSE4_1); \ + Register dst = i.OutputRegister(); \ + Register tmp = i.TempRegister(0); \ + __ movq(tmp, Immediate(1)); \ + __ xorq(dst, dst); \ + __ pxor(kScratchDoubleReg, kScratchDoubleReg); \ + __ opcode(kScratchDoubleReg, i.InputSimd128Register(0)); \ + __ ptest(kScratchDoubleReg, kScratchDoubleReg); \ + __ cmovq(zero, dst, tmp); \ + } while (false) + void CodeGenerator::AssembleDeconstructFrame() { unwinding_info_writer_.MarkFrameDeconstructed(__ pc_offset()); __ movq(rsp, rbp); @@ -752,8 +766,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kArchCallBuiltinPointer: { DCHECK(!HasImmediateInput(instr, 0)); - Register builtin_pointer = i.InputRegister(0); - __ CallBuiltinPointer(builtin_pointer); + Register builtin_index = i.InputRegister(0); + __ CallBuiltinByIndex(builtin_index); RecordCallPosition(instr); frame_access_state()->ClearSPDelta(); break; @@ -952,17 +966,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArchComment: __ RecordComment(reinterpret_cast(i.InputInt64(0))); break; - case kArchDebugAbort: + case kArchAbortCSAAssert: DCHECK(i.InputRegister(0) == rdx); - if (!frame_access_state()->has_frame()) { + { // We don't actually want to generate a pile of code for this, so just // claim there is a stack frame, without generating one. FrameScope scope(tasm(), StackFrame::NONE); - __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS), - RelocInfo::CODE_TARGET); - } else { - __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS), - RelocInfo::CODE_TARGET); + __ Call( + isolate()->builtins()->builtin_handle(Builtins::kAbortCSAAssert), + RelocInfo::CODE_TARGET); } __ int3(); unwinding_info_writer_.MarkBlockWillExit(); @@ -1029,9 +1041,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( OutOfLineRecordWrite(this, object, operand, value, scratch0, scratch1, mode, DetermineStubCallMode()); __ StoreTaggedField(operand, value); - if (COMPRESS_POINTERS_BOOL) { - __ DecompressTaggedPointer(object, object); - } __ CheckPageFlag(object, scratch0, MemoryChunk::kPointersFromHereAreInterestingMask, not_zero, ool->entry()); @@ -1042,7 +1051,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( DCHECK_EQ(i.OutputRegister(), i.InputRegister(0)); __ andq(i.InputRegister(0), kSpeculationPoisonRegister); break; - case kLFence: + case kX64MFence: + __ mfence(); + break; + case kX64LFence: __ lfence(); break; case kArchStackSlot: { @@ -1309,16 +1321,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; case kSSEFloat32Abs: { // TODO(bmeurer): Use RIP relative 128-bit constants. - __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); - __ psrlq(kScratchDoubleReg, 33); - __ andps(i.OutputDoubleRegister(), kScratchDoubleReg); + __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); + __ Psrlq(kScratchDoubleReg, 33); + __ Andps(i.OutputDoubleRegister(), kScratchDoubleReg); break; } case kSSEFloat32Neg: { // TODO(bmeurer): Use RIP relative 128-bit constants. - __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); - __ psllq(kScratchDoubleReg, 31); - __ xorps(i.OutputDoubleRegister(), kScratchDoubleReg); + __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); + __ Psllq(kScratchDoubleReg, 31); + __ Xorps(i.OutputDoubleRegister(), kScratchDoubleReg); break; } case kSSEFloat32Sqrt: @@ -1517,18 +1529,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ bind(ool->exit()); break; } + case kX64F64x2Abs: case kSSEFloat64Abs: { // TODO(bmeurer): Use RIP relative 128-bit constants. - __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); - __ psrlq(kScratchDoubleReg, 1); - __ andpd(i.OutputDoubleRegister(), kScratchDoubleReg); + __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); + __ Psrlq(kScratchDoubleReg, 1); + __ Andpd(i.OutputDoubleRegister(), kScratchDoubleReg); break; } + case kX64F64x2Neg: case kSSEFloat64Neg: { // TODO(bmeurer): Use RIP relative 128-bit constants. - __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); - __ psllq(kScratchDoubleReg, 63); - __ xorpd(i.OutputDoubleRegister(), kScratchDoubleReg); + __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); + __ Psllq(kScratchDoubleReg, 63); + __ Xorpd(i.OutputDoubleRegister(), kScratchDoubleReg); break; } case kSSEFloat64Sqrt: @@ -1944,16 +1958,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kX64MovqDecompressTaggedSigned: { CHECK(instr->HasOutput()); __ DecompressTaggedSigned(i.OutputRegister(), i.MemoryOperand()); + EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; } case kX64MovqDecompressTaggedPointer: { CHECK(instr->HasOutput()); __ DecompressTaggedPointer(i.OutputRegister(), i.MemoryOperand()); + EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; } case kX64MovqDecompressAnyTagged: { CHECK(instr->HasOutput()); __ DecompressAnyTagged(i.OutputRegister(), i.MemoryOperand()); + EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; } case kX64MovqCompressTagged: { @@ -1970,16 +1987,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kX64DecompressSigned: { CHECK(instr->HasOutput()); ASSEMBLE_MOVX(DecompressTaggedSigned); + EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; } case kX64DecompressPointer: { CHECK(instr->HasOutput()); ASSEMBLE_MOVX(DecompressTaggedPointer); + EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; } case kX64DecompressAny: { CHECK(instr->HasOutput()); ASSEMBLE_MOVX(DecompressAnyTagged); + EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; } case kX64CompressSigned: // Fall through. @@ -2006,11 +2026,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kX64Movss: EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset()); if (instr->HasOutput()) { - __ movss(i.OutputDoubleRegister(), i.MemoryOperand()); + __ Movss(i.OutputDoubleRegister(), i.MemoryOperand()); } else { size_t index = 0; Operand operand = i.MemoryOperand(&index); - __ movss(operand, i.InputDoubleRegister(index)); + __ Movss(operand, i.InputDoubleRegister(index)); } break; case kX64Movsd: { @@ -2039,11 +2059,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( CpuFeatureScope sse_scope(tasm(), SSSE3); EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset()); if (instr->HasOutput()) { - __ movdqu(i.OutputSimd128Register(), i.MemoryOperand()); + __ Movdqu(i.OutputSimd128Register(), i.MemoryOperand()); } else { size_t index = 0; Operand operand = i.MemoryOperand(&index); - __ movdqu(operand, i.InputSimd128Register(index)); + __ Movdqu(operand, i.InputSimd128Register(index)); } break; } @@ -2065,7 +2085,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( if (instr->InputAt(0)->IsRegister()) { __ Movd(i.OutputDoubleRegister(), i.InputRegister(0)); } else { - __ movss(i.OutputDoubleRegister(), i.InputOperand(0)); + __ Movss(i.OutputDoubleRegister(), i.InputOperand(0)); } break; case kX64BitcastLD: @@ -2235,6 +2255,51 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } break; } + case kX64F64x2Splat: { + XMMRegister dst = i.OutputSimd128Register(); + if (instr->InputAt(0)->IsFPRegister()) { + __ pshufd(dst, i.InputDoubleRegister(0), 0x44); + } else { + __ pshufd(dst, i.InputOperand(0), 0x44); + } + break; + } + case kX64F64x2ReplaceLane: { + CpuFeatureScope sse_scope(tasm(), SSE4_1); + if (instr->InputAt(2)->IsFPRegister()) { + __ movq(kScratchRegister, i.InputDoubleRegister(2)); + __ pinsrq(i.OutputSimd128Register(), kScratchRegister, i.InputInt8(1)); + } else { + __ pinsrq(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1)); + } + break; + } + case kX64F64x2ExtractLane: { + CpuFeatureScope sse_scope(tasm(), SSE4_1); + __ pextrq(kScratchRegister, i.InputSimd128Register(0), i.InputInt8(1)); + __ movq(i.OutputDoubleRegister(), kScratchRegister); + break; + } + case kX64F64x2Eq: { + DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); + __ cmpeqpd(i.OutputSimd128Register(), i.InputSimd128Register(1)); + break; + } + case kX64F64x2Ne: { + DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); + __ cmpneqpd(i.OutputSimd128Register(), i.InputSimd128Register(1)); + break; + } + case kX64F64x2Lt: { + DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); + __ cmpltpd(i.OutputSimd128Register(), i.InputSimd128Register(1)); + break; + } + case kX64F64x2Le: { + DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); + __ cmplepd(i.OutputSimd128Register(), i.InputSimd128Register(1)); + break; + } // TODO(gdeepti): Get rid of redundant moves for F32x4Splat/Extract below case kX64F32x4Splat: { XMMRegister dst = i.OutputSimd128Register(); @@ -2400,6 +2465,171 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ cmpleps(i.OutputSimd128Register(), i.InputSimd128Register(1)); break; } + case kX64I64x2Splat: { + XMMRegister dst = i.OutputSimd128Register(); + if (instr->InputAt(0)->IsRegister()) { + __ movq(dst, i.InputRegister(0)); + } else { + __ movq(dst, i.InputOperand(0)); + } + __ pshufd(dst, dst, 0x44); + break; + } + case kX64I64x2ExtractLane: { + CpuFeatureScope sse_scope(tasm(), SSE4_1); + __ pextrq(i.OutputRegister(), i.InputSimd128Register(0), i.InputInt8(1)); + break; + } + case kX64I64x2ReplaceLane: { + CpuFeatureScope sse_scope(tasm(), SSE4_1); + if (instr->InputAt(2)->IsRegister()) { + __ pinsrq(i.OutputSimd128Register(), i.InputRegister(2), + i.InputInt8(1)); + } else { + __ pinsrq(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1)); + } + break; + } + case kX64I64x2Neg: { + XMMRegister dst = i.OutputSimd128Register(); + XMMRegister src = i.InputSimd128Register(0); + if (dst == src) { + __ movapd(kScratchDoubleReg, src); + src = kScratchDoubleReg; + } + __ pxor(dst, dst); + __ psubq(dst, src); + break; + } + case kX64I64x2Shl: { + __ psllq(i.OutputSimd128Register(), i.InputInt8(1)); + break; + } + case kX64I64x2ShrS: { + // TODO(zhin): there is vpsraq but requires AVX512 + CpuFeatureScope sse_scope(tasm(), SSE4_1); + // ShrS on each quadword one at a time + XMMRegister dst = i.OutputSimd128Register(); + XMMRegister src = i.InputSimd128Register(0); + + // lower quadword + __ pextrq(kScratchRegister, src, 0x0); + __ sarq(kScratchRegister, Immediate(i.InputInt8(1))); + __ pinsrq(dst, kScratchRegister, 0x0); + + // upper quadword + __ pextrq(kScratchRegister, src, 0x1); + __ sarq(kScratchRegister, Immediate(i.InputInt8(1))); + __ pinsrq(dst, kScratchRegister, 0x1); + break; + } + case kX64I64x2Add: { + DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); + __ paddq(i.OutputSimd128Register(), i.InputSimd128Register(1)); + break; + } + case kX64I64x2Sub: { + DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); + __ psubq(i.OutputSimd128Register(), i.InputSimd128Register(1)); + break; + } + case kX64I64x2Mul: { + DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); + CpuFeatureScope sse_scope(tasm(), SSE4_1); + XMMRegister left = i.InputSimd128Register(0); + XMMRegister right = i.InputSimd128Register(1); + XMMRegister tmp1 = i.ToSimd128Register(instr->TempAt(0)); + XMMRegister tmp2 = i.ToSimd128Register(instr->TempAt(1)); + + __ movaps(tmp1, left); + __ movaps(tmp2, right); + + // Multiply high dword of each qword of left with right. + __ psrlq(tmp1, 32); + __ pmuludq(tmp1, right); + + // Multiply high dword of each qword of right with left. + __ psrlq(tmp2, 32); + __ pmuludq(tmp2, left); + + __ paddq(tmp2, tmp1); + __ psllq(tmp2, 32); + + __ pmuludq(left, right); + __ paddq(left, tmp2); // left == dst + break; + } + case kX64I64x2Eq: { + DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); + CpuFeatureScope sse_scope(tasm(), SSE4_1); + __ pcmpeqq(i.OutputSimd128Register(), i.InputSimd128Register(1)); + break; + } + case kX64I64x2Ne: { + DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); + CpuFeatureScope sse_scope(tasm(), SSE4_1); + __ pcmpeqq(i.OutputSimd128Register(), i.InputSimd128Register(1)); + __ pcmpeqq(kScratchDoubleReg, kScratchDoubleReg); + __ pxor(i.OutputSimd128Register(), kScratchDoubleReg); + break; + } + case kX64I64x2GtS: { + DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); + CpuFeatureScope sse_scope(tasm(), SSE4_2); + __ pcmpgtq(i.OutputSimd128Register(), i.InputSimd128Register(1)); + break; + } + case kX64I64x2GeS: { + DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); + CpuFeatureScope sse_scope(tasm(), SSE4_2); + XMMRegister dst = i.OutputSimd128Register(); + XMMRegister src = i.InputSimd128Register(1); + XMMRegister tmp = i.ToSimd128Register(instr->TempAt(0)); + + __ movaps(tmp, src); + __ pcmpgtq(tmp, dst); + __ pcmpeqd(dst, dst); + __ pxor(dst, tmp); + break; + } + case kX64I64x2ShrU: { + __ psrlq(i.OutputSimd128Register(), i.InputInt8(1)); + break; + } + case kX64I64x2GtU: { + DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); + CpuFeatureScope sse_scope(tasm(), SSE4_2); + XMMRegister dst = i.OutputSimd128Register(); + XMMRegister src = i.InputSimd128Register(1); + XMMRegister tmp = i.ToSimd128Register(instr->TempAt(0)); + + __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); + __ psllq(kScratchDoubleReg, 63); + + __ movaps(tmp, src); + __ pxor(tmp, kScratchDoubleReg); + __ pxor(dst, kScratchDoubleReg); + __ pcmpgtq(dst, tmp); + break; + } + case kX64I64x2GeU: { + DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); + CpuFeatureScope sse_scope(tasm(), SSE4_2); + XMMRegister dst = i.OutputSimd128Register(); + XMMRegister src = i.InputSimd128Register(1); + XMMRegister tmp = i.ToSimd128Register(instr->TempAt(0)); + + __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); + __ psllq(kScratchDoubleReg, 63); + + __ movaps(tmp, src); + __ pxor(dst, kScratchDoubleReg); + __ pxor(tmp, kScratchDoubleReg); + __ pcmpgtq(tmp, dst); + __ pcmpeqd(dst, dst); + __ pxor(dst, tmp); + break; + } case kX64I32x4Splat: { XMMRegister dst = i.OutputSimd128Register(); if (instr->InputAt(0)->IsRegister()) { @@ -3297,6 +3527,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ por(dst, kScratchDoubleReg); break; } + case kX64S1x2AnyTrue: case kX64S1x4AnyTrue: case kX64S1x8AnyTrue: case kX64S1x16AnyTrue: { @@ -3310,19 +3541,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ cmovq(zero, dst, tmp); break; } - case kX64S1x4AllTrue: - case kX64S1x8AllTrue: + // Need to split up all the different lane structures because the + // comparison instruction used matters, e.g. given 0xff00, pcmpeqb returns + // 0x0011, pcmpeqw returns 0x0000, ptest will set ZF to 0 and 1 + // respectively. + case kX64S1x2AllTrue: { + ASSEMBLE_SIMD_ALL_TRUE(pcmpeqq); + break; + } + case kX64S1x4AllTrue: { + ASSEMBLE_SIMD_ALL_TRUE(pcmpeqd); + break; + } + case kX64S1x8AllTrue: { + ASSEMBLE_SIMD_ALL_TRUE(pcmpeqw); + break; + } case kX64S1x16AllTrue: { - CpuFeatureScope sse_scope(tasm(), SSE4_1); - Register dst = i.OutputRegister(); - XMMRegister src = i.InputSimd128Register(0); - Register tmp = i.TempRegister(0); - __ movq(tmp, Immediate(1)); - __ xorq(dst, dst); - __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg); - __ pxor(kScratchDoubleReg, src); - __ ptest(kScratchDoubleReg, kScratchDoubleReg); - __ cmovq(zero, dst, tmp); + ASSEMBLE_SIMD_ALL_TRUE(pcmpeqb); break; } case kX64StackCheck: @@ -3507,6 +3743,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( #undef ASSEMBLE_SIMD_IMM_INSTR #undef ASSEMBLE_SIMD_PUNPCK_SHUFFLE #undef ASSEMBLE_SIMD_IMM_SHUFFLE +#undef ASSEMBLE_SIMD_ALL_TRUE namespace { @@ -3734,6 +3971,11 @@ void CodeGenerator::AssembleConstructFrame() { if (call_descriptor->IsCFunctionCall()) { __ pushq(rbp); __ movq(rbp, rsp); + if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) { + __ Push(Immediate(StackFrame::TypeToMarker(StackFrame::C_WASM_ENTRY))); + // Reserve stack space for saving the c_entry_fp later. + __ AllocateStackSpace(kSystemPointerSize); + } } else if (call_descriptor->IsJSFunctionCall()) { __ Prologue(); if (call_descriptor->PushArgumentCount()) { @@ -3765,8 +4007,8 @@ void CodeGenerator::AssembleConstructFrame() { unwinding_info_writer_.MarkFrameConstructed(pc_base); } - int required_slots = frame()->GetTotalFrameSlotCount() - - call_descriptor->CalculateFixedFrameSize(); + int required_slots = + frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount(); if (info()->is_osr()) { // TurboFan OSR-compiled functions cannot be entered directly. @@ -3835,7 +4077,7 @@ void CodeGenerator::AssembleConstructFrame() { int slot_idx = 0; for (int i = 0; i < XMMRegister::kNumRegisters; i++) { if (!((1 << i) & saves_fp)) continue; - __ movdqu(Operand(rsp, kQuadWordSize * slot_idx), + __ Movdqu(Operand(rsp, kQuadWordSize * slot_idx), XMMRegister::from_code(i)); slot_idx++; } @@ -3877,7 +4119,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) { int slot_idx = 0; for (int i = 0; i < XMMRegister::kNumRegisters; i++) { if (!((1 << i) & saves_fp)) continue; - __ movdqu(XMMRegister::from_code(i), + __ Movdqu(XMMRegister::from_code(i), Operand(rsp, kQuadWordSize * slot_idx)); slot_idx++; } @@ -3970,6 +4212,16 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, } break; } + case Constant::kCompressedHeapObject: { + Handle src_object = src.ToHeapObject(); + RootIndex index; + if (IsMaterializableFromRoot(src_object, &index)) { + __ LoadRoot(dst, index); + } else { + __ Move(dst, src_object, RelocInfo::COMPRESSED_EMBEDDED_OBJECT); + } + break; + } case Constant::kDelayedStringConstant: { const StringConstantBase* src_constant = src.ToDelayedStringConstant(); __ MoveStringConstant(dst, src_constant); diff --git a/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h index 57ef26dbd70d75..d6ac3f43dfaa88 100644 --- a/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h +++ b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h @@ -58,7 +58,8 @@ namespace compiler { V(X64Popcnt32) \ V(X64Bswap) \ V(X64Bswap32) \ - V(LFence) \ + V(X64MFence) \ + V(X64LFence) \ V(SSEFloat32Cmp) \ V(SSEFloat32Add) \ V(SSEFloat32Sub) \ @@ -158,6 +159,15 @@ namespace compiler { V(X64Poke) \ V(X64Peek) \ V(X64StackCheck) \ + V(X64F64x2Splat) \ + V(X64F64x2ExtractLane) \ + V(X64F64x2ReplaceLane) \ + V(X64F64x2Abs) \ + V(X64F64x2Neg) \ + V(X64F64x2Eq) \ + V(X64F64x2Ne) \ + V(X64F64x2Lt) \ + V(X64F64x2Le) \ V(X64F32x4Splat) \ V(X64F32x4ExtractLane) \ V(X64F32x4ReplaceLane) \ @@ -177,6 +187,22 @@ namespace compiler { V(X64F32x4Ne) \ V(X64F32x4Lt) \ V(X64F32x4Le) \ + V(X64I64x2Splat) \ + V(X64I64x2ExtractLane) \ + V(X64I64x2ReplaceLane) \ + V(X64I64x2Neg) \ + V(X64I64x2Shl) \ + V(X64I64x2ShrS) \ + V(X64I64x2Add) \ + V(X64I64x2Sub) \ + V(X64I64x2Mul) \ + V(X64I64x2Eq) \ + V(X64I64x2Ne) \ + V(X64I64x2GtS) \ + V(X64I64x2GeS) \ + V(X64I64x2ShrU) \ + V(X64I64x2GtU) \ + V(X64I64x2GeU) \ V(X64I32x4Splat) \ V(X64I32x4ExtractLane) \ V(X64I32x4ReplaceLane) \ @@ -293,6 +319,8 @@ namespace compiler { V(X64S8x8Reverse) \ V(X64S8x4Reverse) \ V(X64S8x2Reverse) \ + V(X64S1x2AnyTrue) \ + V(X64S1x2AllTrue) \ V(X64S1x4AnyTrue) \ V(X64S1x4AllTrue) \ V(X64S1x8AnyTrue) \ diff --git a/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc index 9d48e9175a6c36..6389ef2e503f73 100644 --- a/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc +++ b/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc @@ -124,6 +124,15 @@ int InstructionScheduler::GetTargetInstructionFlags( case kX64Lea: case kX64Dec32: case kX64Inc32: + case kX64F64x2Splat: + case kX64F64x2ExtractLane: + case kX64F64x2ReplaceLane: + case kX64F64x2Abs: + case kX64F64x2Neg: + case kX64F64x2Eq: + case kX64F64x2Ne: + case kX64F64x2Lt: + case kX64F64x2Le: case kX64F32x4Splat: case kX64F32x4ExtractLane: case kX64F32x4ReplaceLane: @@ -143,6 +152,22 @@ int InstructionScheduler::GetTargetInstructionFlags( case kX64F32x4Ne: case kX64F32x4Lt: case kX64F32x4Le: + case kX64I64x2Splat: + case kX64I64x2ExtractLane: + case kX64I64x2ReplaceLane: + case kX64I64x2Neg: + case kX64I64x2Shl: + case kX64I64x2ShrS: + case kX64I64x2Add: + case kX64I64x2Sub: + case kX64I64x2Mul: + case kX64I64x2Eq: + case kX64I64x2Ne: + case kX64I64x2GtS: + case kX64I64x2GeS: + case kX64I64x2ShrU: + case kX64I64x2GtU: + case kX64I64x2GeU: case kX64I32x4Splat: case kX64I32x4ExtractLane: case kX64I32x4ReplaceLane: @@ -233,6 +258,8 @@ int InstructionScheduler::GetTargetInstructionFlags( case kX64S128Not: case kX64S128Select: case kX64S128Zero: + case kX64S1x2AnyTrue: + case kX64S1x2AllTrue: case kX64S1x4AnyTrue: case kX64S1x4AllTrue: case kX64S1x8AnyTrue: @@ -327,7 +354,8 @@ int InstructionScheduler::GetTargetInstructionFlags( case kX64Poke: return kHasSideEffect; - case kLFence: + case kX64MFence: + case kX64LFence: return kHasSideEffect; case kX64Word64AtomicLoadUint8: diff --git a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc index a20590b8d3ceb4..a4908fb846167b 100644 --- a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc +++ b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc @@ -309,21 +309,19 @@ void InstructionSelector::VisitStackSlot(Node* node) { sequence()->AddImmediate(Constant(slot)), 0, nullptr); } -void InstructionSelector::VisitDebugAbort(Node* node) { +void InstructionSelector::VisitAbortCSAAssert(Node* node) { X64OperandGenerator g(this); - Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), rdx)); + Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), rdx)); } -void InstructionSelector::VisitLoad(Node* node) { - LoadRepresentation load_rep = LoadRepresentationOf(node->op()); +void InstructionSelector::VisitLoad(Node* node, Node* value, + InstructionCode opcode) { X64OperandGenerator g(this); - - ArchOpcode opcode = GetLoadOpcode(load_rep); InstructionOperand outputs[] = {g.DefineAsRegister(node)}; InstructionOperand inputs[3]; size_t input_count = 0; AddressingMode mode = - g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count); + g.GetEffectiveAddressMemoryOperand(value, inputs, &input_count); InstructionCode code = opcode | AddressingModeField::encode(mode); if (node->opcode() == IrOpcode::kProtectedLoad) { code |= MiscField::encode(kMemoryAccessProtected); @@ -334,6 +332,11 @@ void InstructionSelector::VisitLoad(Node* node) { Emit(code, 1, outputs, input_count, inputs); } +void InstructionSelector::VisitLoad(Node* node) { + LoadRepresentation load_rep = LoadRepresentationOf(node->op()); + VisitLoad(node, node, GetLoadOpcode(load_rep)); +} + void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); } void InstructionSelector::VisitProtectedLoad(Node* node) { VisitLoad(node); } @@ -898,7 +901,8 @@ void InstructionSelector::VisitInt32Sub(Node* node) { // Omit truncation and turn subtractions of constant values into immediate // "leal" instructions by negating the value. Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI), - g.DefineAsRegister(node), int64_input, g.TempImmediate(-imm)); + g.DefineAsRegister(node), int64_input, + g.TempImmediate(base::NegateWithWraparound(imm))); } return; } @@ -907,9 +911,9 @@ void InstructionSelector::VisitInt32Sub(Node* node) { if (m.left().Is(0)) { Emit(kX64Neg32, g.DefineSameAsFirst(node), g.UseRegister(m.right().node())); } else if (m.right().Is(0)) { - // TODO(jarin): We should be able to use {EmitIdentity} here - // (https://crbug.com/v8/7947). - Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(m.left().node())); + // {EmitIdentity} reuses the virtual register of the first input + // for the output. This is exactly what we want here. + EmitIdentity(node); } else if (m.right().HasValue() && g.CanBeImmediate(m.right().node())) { // Turn subtractions of constant values into immediate "leal" instructions // by negating the value. @@ -1254,23 +1258,47 @@ void InstructionSelector::VisitChangeTaggedSignedToCompressedSigned( } void InstructionSelector::VisitChangeCompressedToTagged(Node* node) { - X64OperandGenerator g(this); Node* const value = node->InputAt(0); - Emit(kX64DecompressAny, g.DefineAsRegister(node), g.Use(value)); + if ((value->opcode() == IrOpcode::kLoad || + value->opcode() == IrOpcode::kPoisonedLoad) && + CanCover(node, value)) { + DCHECK_EQ(LoadRepresentationOf(value->op()).representation(), + MachineRepresentation::kCompressed); + VisitLoad(node, value, kX64MovqDecompressAnyTagged); + } else { + X64OperandGenerator g(this); + Emit(kX64DecompressAny, g.DefineAsRegister(node), g.Use(value)); + } } void InstructionSelector::VisitChangeCompressedPointerToTaggedPointer( Node* node) { - X64OperandGenerator g(this); Node* const value = node->InputAt(0); - Emit(kX64DecompressPointer, g.DefineAsRegister(node), g.Use(value)); + if ((value->opcode() == IrOpcode::kLoad || + value->opcode() == IrOpcode::kPoisonedLoad) && + CanCover(node, value)) { + DCHECK_EQ(LoadRepresentationOf(value->op()).representation(), + MachineRepresentation::kCompressedPointer); + VisitLoad(node, value, kX64MovqDecompressTaggedPointer); + } else { + X64OperandGenerator g(this); + Emit(kX64DecompressPointer, g.DefineAsRegister(node), g.Use(value)); + } } void InstructionSelector::VisitChangeCompressedSignedToTaggedSigned( Node* node) { - X64OperandGenerator g(this); Node* const value = node->InputAt(0); - Emit(kX64DecompressSigned, g.DefineAsRegister(node), g.Use(value)); + if ((value->opcode() == IrOpcode::kLoad || + value->opcode() == IrOpcode::kPoisonedLoad) && + CanCover(node, value)) { + DCHECK_EQ(LoadRepresentationOf(value->op()).representation(), + MachineRepresentation::kCompressedSigned); + VisitLoad(node, value, kX64MovqDecompressTaggedSigned); + } else { + X64OperandGenerator g(this); + Emit(kX64DecompressSigned, g.DefineAsRegister(node), g.Use(value)); + } } namespace { @@ -2343,6 +2371,11 @@ void InstructionSelector::VisitFloat64SilenceNaN(Node* node) { g.UseRegister(node->InputAt(0))); } +void InstructionSelector::VisitMemoryBarrier(Node* node) { + X64OperandGenerator g(this); + Emit(kX64MFence, g.NoOutput()); +} + void InstructionSelector::VisitWord32AtomicLoad(Node* node) { LoadRepresentation load_rep = LoadRepresentationOf(node->op()); DCHECK(load_rep.representation() == MachineRepresentation::kWord8 || @@ -2545,12 +2578,18 @@ VISIT_ATOMIC_BINOP(Xor) #undef VISIT_ATOMIC_BINOP #define SIMD_TYPES(V) \ + V(F64x2) \ V(F32x4) \ + V(I64x2) \ V(I32x4) \ V(I16x8) \ V(I8x16) #define SIMD_BINOP_LIST(V) \ + V(F64x2Eq) \ + V(F64x2Ne) \ + V(F64x2Lt) \ + V(F64x2Le) \ V(F32x4Add) \ V(F32x4AddHoriz) \ V(F32x4Sub) \ @@ -2561,6 +2600,11 @@ VISIT_ATOMIC_BINOP(Xor) V(F32x4Ne) \ V(F32x4Lt) \ V(F32x4Le) \ + V(I64x2Add) \ + V(I64x2Sub) \ + V(I64x2Eq) \ + V(I64x2Ne) \ + V(I64x2GtS) \ V(I32x4Add) \ V(I32x4AddHoriz) \ V(I32x4Sub) \ @@ -2615,12 +2659,18 @@ VISIT_ATOMIC_BINOP(Xor) V(S128Or) \ V(S128Xor) +#define SIMD_BINOP_ONE_TEMP_LIST(V) \ + V(I64x2GeS) \ + V(I64x2GtU) \ + V(I64x2GeU) + #define SIMD_UNOP_LIST(V) \ V(F32x4SConvertI32x4) \ V(F32x4Abs) \ V(F32x4Neg) \ V(F32x4RecipApprox) \ V(F32x4RecipSqrtApprox) \ + V(I64x2Neg) \ V(I32x4SConvertI16x8Low) \ V(I32x4SConvertI16x8High) \ V(I32x4Neg) \ @@ -2635,6 +2685,9 @@ VISIT_ATOMIC_BINOP(Xor) V(S128Not) #define SIMD_SHIFT_OPCODES(V) \ + V(I64x2Shl) \ + V(I64x2ShrS) \ + V(I64x2ShrU) \ V(I32x4Shl) \ V(I32x4ShrS) \ V(I32x4ShrU) \ @@ -2646,11 +2699,13 @@ VISIT_ATOMIC_BINOP(Xor) V(I8x16ShrU) #define SIMD_ANYTRUE_LIST(V) \ + V(S1x2AnyTrue) \ V(S1x4AnyTrue) \ V(S1x8AnyTrue) \ V(S1x16AnyTrue) #define SIMD_ALLTRUE_LIST(V) \ + V(S1x2AllTrue) \ V(S1x4AllTrue) \ V(S1x8AllTrue) \ V(S1x16AllTrue) @@ -2721,6 +2776,18 @@ SIMD_BINOP_LIST(VISIT_SIMD_BINOP) #undef VISIT_SIMD_BINOP #undef SIMD_BINOP_LIST +#define VISIT_SIMD_BINOP_ONE_TEMP(Opcode) \ + void InstructionSelector::Visit##Opcode(Node* node) { \ + X64OperandGenerator g(this); \ + InstructionOperand temps[] = {g.TempSimd128Register()}; \ + Emit(kX64##Opcode, g.DefineSameAsFirst(node), \ + g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), \ + arraysize(temps), temps); \ + } +SIMD_BINOP_ONE_TEMP_LIST(VISIT_SIMD_BINOP_ONE_TEMP) +#undef VISIT_SIMD_BINOP_ONE_TEMP +#undef SIMD_BINOP_ONE_TEMP_LIST + #define VISIT_SIMD_ANYTRUE(Opcode) \ void InstructionSelector::Visit##Opcode(Node* node) { \ X64OperandGenerator g(this); \ @@ -2751,12 +2818,33 @@ void InstructionSelector::VisitS128Select(Node* node) { g.UseRegister(node->InputAt(2))); } +void InstructionSelector::VisitF64x2Abs(Node* node) { + X64OperandGenerator g(this); + Emit(kX64F64x2Abs, g.DefineSameAsFirst(node), + g.UseRegister(node->InputAt(0))); +} + +void InstructionSelector::VisitF64x2Neg(Node* node) { + X64OperandGenerator g(this); + Emit(kX64F64x2Neg, g.DefineSameAsFirst(node), + g.UseRegister(node->InputAt(0))); +} + void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) { X64OperandGenerator g(this); Emit(kX64F32x4UConvertI32x4, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0))); } +void InstructionSelector::VisitI64x2Mul(Node* node) { + X64OperandGenerator g(this); + InstructionOperand temps[] = {g.TempSimd128Register(), + g.TempSimd128Register()}; + Emit(kX64I64x2Mul, g.DefineSameAsFirst(node), + g.UseUniqueRegister(node->InputAt(0)), + g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps); +} + void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) { X64OperandGenerator g(this); Emit(kX64I32x4SConvertF32x4, g.DefineSameAsFirst(node), diff --git a/deps/v8/src/compiler/bytecode-analysis.cc b/deps/v8/src/compiler/bytecode-analysis.cc index 9c23cd460ab742..b44bec5fc88737 100644 --- a/deps/v8/src/compiler/bytecode-analysis.cc +++ b/deps/v8/src/compiler/bytecode-analysis.cc @@ -79,22 +79,28 @@ ResumeJumpTarget ResumeJumpTarget::AtLoopHeader(int loop_header_offset, } BytecodeAnalysis::BytecodeAnalysis(Handle bytecode_array, - Zone* zone, bool do_liveness_analysis) + Zone* zone, BailoutId osr_bailout_id, + bool analyze_liveness) : bytecode_array_(bytecode_array), - do_liveness_analysis_(do_liveness_analysis), zone_(zone), + osr_bailout_id_(osr_bailout_id), + analyze_liveness_(analyze_liveness), loop_stack_(zone), loop_end_index_queue_(zone), resume_jump_targets_(zone), end_to_header_(zone), header_to_info_(zone), osr_entry_point_(-1), - liveness_map_(bytecode_array->length(), zone) {} + liveness_map_(bytecode_array->length(), zone) { + Analyze(); +} namespace { -void UpdateInLiveness(Bytecode bytecode, BytecodeLivenessState& in_liveness, - const interpreter::BytecodeArrayAccessor& accessor) { +void UpdateInLiveness( + Bytecode bytecode, + BytecodeLivenessState& in_liveness, // NOLINT(runtime/references) + const interpreter::BytecodeArrayAccessor& accessor) { int num_operands = Bytecodes::NumberOfOperands(bytecode); const OperandType* operand_types = Bytecodes::GetOperandTypes(bytecode); @@ -201,12 +207,14 @@ void UpdateInLiveness(Bytecode bytecode, BytecodeLivenessState& in_liveness, } } -void UpdateOutLiveness(Bytecode bytecode, BytecodeLivenessState& out_liveness, - BytecodeLivenessState* next_bytecode_in_liveness, - const interpreter::BytecodeArrayAccessor& accessor, - const BytecodeLivenessMap& liveness_map) { +void UpdateOutLiveness( + Bytecode bytecode, + BytecodeLivenessState& out_liveness, // NOLINT(runtime/references) + BytecodeLivenessState* next_bytecode_in_liveness, + const interpreter::BytecodeArrayAccessor& accessor, + Handle bytecode_array, + const BytecodeLivenessMap& liveness_map) { int current_offset = accessor.current_offset(); - const Handle& bytecode_array = accessor.bytecode_array(); // Special case Suspend and Resume to just pass through liveness. if (bytecode == Bytecode::kSuspendGenerator || @@ -261,20 +269,24 @@ void UpdateOutLiveness(Bytecode bytecode, BytecodeLivenessState& out_liveness, } } -void UpdateLiveness(Bytecode bytecode, BytecodeLiveness& liveness, +void UpdateLiveness(Bytecode bytecode, + BytecodeLiveness& liveness, // NOLINT(runtime/references) BytecodeLivenessState** next_bytecode_in_liveness, const interpreter::BytecodeArrayAccessor& accessor, + Handle bytecode_array, const BytecodeLivenessMap& liveness_map) { UpdateOutLiveness(bytecode, *liveness.out, *next_bytecode_in_liveness, - accessor, liveness_map); + accessor, bytecode_array, liveness_map); liveness.in->CopyFrom(*liveness.out); UpdateInLiveness(bytecode, *liveness.in, accessor); *next_bytecode_in_liveness = liveness.in; } -void UpdateAssignments(Bytecode bytecode, BytecodeLoopAssignments& assignments, - const interpreter::BytecodeArrayAccessor& accessor) { +void UpdateAssignments( + Bytecode bytecode, + BytecodeLoopAssignments& assignments, // NOLINT(runtime/references) + const interpreter::BytecodeArrayAccessor& accessor) { int num_operands = Bytecodes::NumberOfOperands(bytecode); const OperandType* operand_types = Bytecodes::GetOperandTypes(bytecode); @@ -307,15 +319,13 @@ void UpdateAssignments(Bytecode bytecode, BytecodeLoopAssignments& assignments, } // namespace -void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) { +void BytecodeAnalysis::Analyze() { loop_stack_.push({-1, nullptr}); BytecodeLivenessState* next_bytecode_in_liveness = nullptr; - - bool is_osr = !osr_bailout_id.IsNone(); - int osr_loop_end_offset = is_osr ? osr_bailout_id.ToInt() : -1; - int generator_switch_index = -1; + int osr_loop_end_offset = osr_bailout_id_.ToInt(); + DCHECK_EQ(osr_loop_end_offset < 0, osr_bailout_id_.IsNone()); interpreter::BytecodeArrayRandomIterator iterator(bytecode_array(), zone()); for (iterator.GoToEnd(); iterator.IsValid(); --iterator) { @@ -337,14 +347,14 @@ void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) { if (current_offset == osr_loop_end_offset) { osr_entry_point_ = loop_header; } else if (current_offset < osr_loop_end_offset) { - // Check we've found the osr_entry_point if we've gone past the + // Assert that we've found the osr_entry_point if we've gone past the // osr_loop_end_offset. Note, we are iterating the bytecode in reverse, - // so the less than in the check is correct. - DCHECK_NE(-1, osr_entry_point_); + // so the less-than in the above condition is correct. + DCHECK_LE(0, osr_entry_point_); } // Save the index so that we can do another pass later. - if (do_liveness_analysis_) { + if (analyze_liveness_) { loop_end_index_queue_.push_back(iterator.current_index()); } } else if (loop_stack_.size() > 1) { @@ -357,8 +367,8 @@ void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) { // information we currently have. UpdateAssignments(bytecode, current_loop_info->assignments(), iterator); - // Update suspend counts for this loop, though only if not OSR. - if (!is_osr && bytecode == Bytecode::kSuspendGenerator) { + // Update suspend counts for this loop. + if (bytecode == Bytecode::kSuspendGenerator) { int suspend_id = iterator.GetUnsignedImmediateOperand(3); int resume_offset = current_offset + iterator.current_bytecode_size(); current_loop_info->AddResumeTarget( @@ -412,7 +422,7 @@ void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) { } } } - } else if (!is_osr && bytecode == Bytecode::kSuspendGenerator) { + } else if (bytecode == Bytecode::kSuspendGenerator) { // If we're not in a loop, we still need to look for suspends. // TODO(leszeks): It would be nice to de-duplicate this with the in-loop // case @@ -422,11 +432,11 @@ void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) { ResumeJumpTarget::Leaf(suspend_id, resume_offset)); } - if (do_liveness_analysis_) { + if (analyze_liveness_) { BytecodeLiveness& liveness = liveness_map_.InitializeLiveness( current_offset, bytecode_array()->register_count(), zone()); UpdateLiveness(bytecode, liveness, &next_bytecode_in_liveness, iterator, - liveness_map_); + bytecode_array(), liveness_map_); } } @@ -435,7 +445,7 @@ void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) { DCHECK(ResumeJumpTargetsAreValid()); - if (!do_liveness_analysis_) return; + if (!analyze_liveness_) return; // At this point, every bytecode has a valid in and out liveness, except for // propagating liveness across back edges (i.e. JumpLoop). Subsequent liveness @@ -489,12 +499,13 @@ void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) { BytecodeLiveness& liveness = liveness_map_.GetLiveness(current_offset); UpdateLiveness(bytecode, liveness, &next_bytecode_in_liveness, iterator, - liveness_map_); + bytecode_array(), liveness_map_); } // Now we are at the loop header. Since the in-liveness of the header // can't change, we need only to update the out-liveness. UpdateOutLiveness(iterator.current_bytecode(), *header_liveness.out, - next_bytecode_in_liveness, iterator, liveness_map_); + next_bytecode_in_liveness, iterator, bytecode_array(), + liveness_map_); } // Process the generator switch statement separately, once the loops are done. @@ -533,12 +544,12 @@ void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) { DCHECK_NE(bytecode, Bytecode::kJumpLoop); UpdateLiveness(bytecode, liveness, &next_bytecode_in_liveness, iterator, - liveness_map_); + bytecode_array(), liveness_map_); } } } - DCHECK(do_liveness_analysis_); + DCHECK(analyze_liveness_); if (FLAG_trace_environment_liveness) { StdoutStream of; PrintLivenessTo(of); @@ -610,14 +621,14 @@ const LoopInfo& BytecodeAnalysis::GetLoopInfoFor(int header_offset) const { const BytecodeLivenessState* BytecodeAnalysis::GetInLivenessFor( int offset) const { - if (!do_liveness_analysis_) return nullptr; + if (!analyze_liveness_) return nullptr; return liveness_map_.GetInLiveness(offset); } const BytecodeLivenessState* BytecodeAnalysis::GetOutLivenessFor( int offset) const { - if (!do_liveness_analysis_) return nullptr; + if (!analyze_liveness_) return nullptr; return liveness_map_.GetOutLiveness(offset); } @@ -662,9 +673,8 @@ bool BytecodeAnalysis::ResumeJumpTargetsAreValid() { } // If the iterator is invalid, we've reached the end without finding the - // generator switch. Similarly, if we are OSR-ing, we're not resuming, so we - // need no jump targets. So, ensure there are no jump targets and exit. - if (!iterator.IsValid() || HasOsrEntryPoint()) { + // generator switch. So, ensure there are no jump targets and exit. + if (!iterator.IsValid()) { // Check top-level. if (!resume_jump_targets().empty()) { PrintF(stderr, @@ -758,14 +768,14 @@ bool BytecodeAnalysis::ResumeJumpTargetLeavesResolveSuspendIds( valid = false; } else { // Make sure we're resuming to a Resume bytecode - interpreter::BytecodeArrayAccessor assessor(bytecode_array(), + interpreter::BytecodeArrayAccessor accessor(bytecode_array(), target.target_offset()); - if (assessor.current_bytecode() != Bytecode::kResumeGenerator) { + if (accessor.current_bytecode() != Bytecode::kResumeGenerator) { PrintF(stderr, "Expected resume target for id %d, offset %d, to be " "ResumeGenerator, but found %s\n", target.suspend_id(), target.target_offset(), - Bytecodes::ToString(assessor.current_bytecode())); + Bytecodes::ToString(accessor.current_bytecode())); valid = false; } @@ -820,7 +830,7 @@ bool BytecodeAnalysis::LivenessIsValid() { previous_liveness.CopyFrom(*liveness.out); UpdateOutLiveness(bytecode, *liveness.out, next_bytecode_in_liveness, - iterator, liveness_map_); + iterator, bytecode_array(), liveness_map_); // UpdateOutLiveness skips kJumpLoop, so we update it manually. if (bytecode == Bytecode::kJumpLoop) { int target_offset = iterator.GetJumpTargetOffset(); diff --git a/deps/v8/src/compiler/bytecode-analysis.h b/deps/v8/src/compiler/bytecode-analysis.h index 53f86ca3063c27..32c5168466e7da 100644 --- a/deps/v8/src/compiler/bytecode-analysis.h +++ b/deps/v8/src/compiler/bytecode-analysis.h @@ -92,18 +92,14 @@ struct V8_EXPORT_PRIVATE LoopInfo { ZoneVector resume_jump_targets_; }; -class V8_EXPORT_PRIVATE BytecodeAnalysis { +// Analyze the bytecodes to find the loop ranges, loop nesting, loop assignments +// and liveness. NOTE: The broker/serializer relies on the fact that an +// analysis for OSR (osr_bailout_id is not None) subsumes an analysis for +// non-OSR (osr_bailout_id is None). +class V8_EXPORT_PRIVATE BytecodeAnalysis : public ZoneObject { public: BytecodeAnalysis(Handle bytecode_array, Zone* zone, - bool do_liveness_analysis); - - // Analyze the bytecodes to find the loop ranges, loop nesting, loop - // assignments and liveness, under the assumption that there is an OSR bailout - // at {osr_bailout_id}. - // - // No other methods in this class return valid information until this has been - // called. - void Analyze(BailoutId osr_bailout_id); + BailoutId osr_bailout_id, bool analyze_liveness); // Return true if the given offset is a loop header bool IsLoopHeader(int offset) const; @@ -118,23 +114,30 @@ class V8_EXPORT_PRIVATE BytecodeAnalysis { return resume_jump_targets_; } - // True if the current analysis has an OSR entry point. - bool HasOsrEntryPoint() const { return osr_entry_point_ != -1; } - - int osr_entry_point() const { return osr_entry_point_; } - - // Gets the in-liveness for the bytecode at {offset}. + // Gets the in-/out-liveness for the bytecode at {offset}. const BytecodeLivenessState* GetInLivenessFor(int offset) const; - - // Gets the out-liveness for the bytecode at {offset}. const BytecodeLivenessState* GetOutLivenessFor(int offset) const; + // In the case of OSR, the analysis also computes the (bytecode offset of the) + // OSR entry point from the {osr_bailout_id} that was given to the + // constructor. + int osr_entry_point() const { + CHECK_LE(0, osr_entry_point_); + return osr_entry_point_; + } + // Return the osr_bailout_id (for verification purposes). + BailoutId osr_bailout_id() const { return osr_bailout_id_; } + + // Return whether liveness analysis was performed (for verification purposes). + bool liveness_analyzed() const { return analyze_liveness_; } + private: struct LoopStackEntry { int header_offset; LoopInfo* loop_info; }; + void Analyze(); void PushLoop(int loop_header, int loop_end); #if DEBUG @@ -153,17 +156,15 @@ class V8_EXPORT_PRIVATE BytecodeAnalysis { std::ostream& PrintLivenessTo(std::ostream& os) const; Handle const bytecode_array_; - bool const do_liveness_analysis_; Zone* const zone_; - + BailoutId const osr_bailout_id_; + bool const analyze_liveness_; ZoneStack loop_stack_; ZoneVector loop_end_index_queue_; ZoneVector resume_jump_targets_; - ZoneMap end_to_header_; ZoneMap header_to_info_; int osr_entry_point_; - BytecodeLivenessMap liveness_map_; DISALLOW_COPY_AND_ASSIGN(BytecodeAnalysis); diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc index 0ab8f85670c349..7c7144632074dd 100644 --- a/deps/v8/src/compiler/bytecode-graph-builder.cc +++ b/deps/v8/src/compiler/bytecode-graph-builder.cc @@ -6,9 +6,11 @@ #include "src/ast/ast.h" #include "src/codegen/source-position-table.h" +#include "src/codegen/tick-counter.h" #include "src/compiler/access-builder.h" #include "src/compiler/bytecode-analysis.h" #include "src/compiler/compiler-source-position-table.h" +#include "src/compiler/js-heap-broker.h" #include "src/compiler/linkage.h" #include "src/compiler/node-matchers.h" #include "src/compiler/operator-properties.h" @@ -32,14 +34,15 @@ namespace compiler { class BytecodeGraphBuilder { public: BytecodeGraphBuilder(JSHeapBroker* broker, Zone* local_zone, - Handle bytecode_array, - Handle shared, - Handle feedback_vector, - BailoutId osr_offset, JSGraph* jsgraph, + BytecodeArrayRef bytecode_array, + SharedFunctionInfoRef shared, + FeedbackVectorRef feedback_vector, BailoutId osr_offset, + JSGraph* jsgraph, CallFrequency const& invocation_frequency, SourcePositionTable* source_positions, - Handle native_context, int inlining_id, - BytecodeGraphBuilderFlags flags); + NativeContextRef native_context, int inlining_id, + BytecodeGraphBuilderFlags flags, + TickCounter* tick_counter); // Creates a graph by visiting bytecodes. void CreateGraph(); @@ -318,12 +321,8 @@ class BytecodeGraphBuilder { return jsgraph_->simplified(); } Zone* local_zone() const { return local_zone_; } - const Handle& bytecode_array() const { - return bytecode_array_; - } - const Handle& feedback_vector() const { - return feedback_vector_; - } + const BytecodeArrayRef bytecode_array() const { return bytecode_array_; } + FeedbackVectorRef feedback_vector() const { return feedback_vector_; } const JSTypeHintLowering& type_hint_lowering() const { return type_hint_lowering_; } @@ -332,7 +331,7 @@ class BytecodeGraphBuilder { } SourcePositionTableIterator& source_position_iterator() { - return source_position_iterator_; + return *source_position_iterator_.get(); } interpreter::BytecodeArrayIterator& bytecode_iterator() { @@ -343,8 +342,6 @@ class BytecodeGraphBuilder { return bytecode_analysis_; } - void RunBytecodeAnalysis() { bytecode_analysis_.Analyze(osr_offset_); } - int currently_peeled_loop_offset() const { return currently_peeled_loop_offset_; } @@ -368,9 +365,9 @@ class BytecodeGraphBuilder { needs_eager_checkpoint_ = value; } - Handle shared_info() const { return shared_info_; } + SharedFunctionInfoRef shared_info() const { return shared_info_; } - Handle native_context() const { return native_context_; } + NativeContextRef native_context() const { return native_context_; } JSHeapBroker* broker() const { return broker_; } @@ -382,15 +379,15 @@ class BytecodeGraphBuilder { Zone* const local_zone_; JSGraph* const jsgraph_; CallFrequency const invocation_frequency_; - Handle const bytecode_array_; - Handle const feedback_vector_; + BytecodeArrayRef const bytecode_array_; + FeedbackVectorRef feedback_vector_; JSTypeHintLowering const type_hint_lowering_; const FrameStateFunctionInfo* const frame_state_function_info_; - SourcePositionTableIterator source_position_iterator_; + std::unique_ptr source_position_iterator_; interpreter::BytecodeArrayIterator bytecode_iterator_; - BytecodeAnalysis bytecode_analysis_; + BytecodeAnalysis const& bytecode_analysis_; Environment* environment_; - BailoutId const osr_offset_; + bool const osr_; int currently_peeled_loop_offset_; bool skip_next_stack_check_; @@ -434,10 +431,12 @@ class BytecodeGraphBuilder { SourcePosition const start_position_; - Handle const shared_info_; + SharedFunctionInfoRef const shared_info_; // The native context for which we optimize. - Handle const native_context_; + NativeContextRef const native_context_; + + TickCounter* const tick_counter_; static int const kBinaryOperationHintIndex = 1; static int const kCountOperationHintIndex = 0; @@ -938,13 +937,12 @@ Node* BytecodeGraphBuilder::Environment::Checkpoint( } BytecodeGraphBuilder::BytecodeGraphBuilder( - JSHeapBroker* broker, Zone* local_zone, - Handle bytecode_array, - Handle shared_info, - Handle feedback_vector, BailoutId osr_offset, - JSGraph* jsgraph, CallFrequency const& invocation_frequency, - SourcePositionTable* source_positions, Handle native_context, - int inlining_id, BytecodeGraphBuilderFlags flags) + JSHeapBroker* broker, Zone* local_zone, BytecodeArrayRef bytecode_array, + SharedFunctionInfoRef shared_info, FeedbackVectorRef feedback_vector, + BailoutId osr_offset, JSGraph* jsgraph, + CallFrequency const& invocation_frequency, + SourcePositionTable* source_positions, NativeContextRef native_context, + int inlining_id, BytecodeGraphBuilderFlags flags, TickCounter* tick_counter) : broker_(broker), local_zone_(local_zone), jsgraph_(jsgraph), @@ -952,22 +950,22 @@ BytecodeGraphBuilder::BytecodeGraphBuilder( bytecode_array_(bytecode_array), feedback_vector_(feedback_vector), type_hint_lowering_( - jsgraph, feedback_vector, + jsgraph, feedback_vector.object(), (flags & BytecodeGraphBuilderFlag::kBailoutOnUninitialized) ? JSTypeHintLowering::kBailoutOnUninitialized : JSTypeHintLowering::kNoFlags), frame_state_function_info_(common()->CreateFrameStateFunctionInfo( FrameStateType::kInterpretedFunction, - bytecode_array->parameter_count(), bytecode_array->register_count(), - shared_info)), - source_position_iterator_( - handle(bytecode_array->SourcePositionTableIfCollected(), isolate())), - bytecode_iterator_(bytecode_array), - bytecode_analysis_( - bytecode_array, local_zone, - flags & BytecodeGraphBuilderFlag::kAnalyzeEnvironmentLiveness), + bytecode_array.parameter_count(), bytecode_array.register_count(), + shared_info.object())), + bytecode_iterator_( + base::make_unique(bytecode_array)), + bytecode_analysis_(broker_->GetBytecodeAnalysis( + bytecode_array.object(), osr_offset, + flags & BytecodeGraphBuilderFlag::kAnalyzeEnvironmentLiveness, + !FLAG_concurrent_inlining)), environment_(nullptr), - osr_offset_(osr_offset), + osr_(!osr_offset.IsNone()), currently_peeled_loop_offset_(-1), skip_next_stack_check_(flags & BytecodeGraphBuilderFlag::kSkipFirstStackCheck), @@ -981,9 +979,23 @@ BytecodeGraphBuilder::BytecodeGraphBuilder( exit_controls_(local_zone), state_values_cache_(jsgraph), source_positions_(source_positions), - start_position_(shared_info->StartPosition(), inlining_id), + start_position_(shared_info.StartPosition(), inlining_id), shared_info_(shared_info), - native_context_(native_context) {} + native_context_(native_context), + tick_counter_(tick_counter) { + if (FLAG_concurrent_inlining) { + // With concurrent inlining on, the source position address doesn't change + // because it's been copied from the heap. + source_position_iterator_ = base::make_unique( + Vector(bytecode_array.source_positions_address(), + bytecode_array.source_positions_size())); + } else { + // Otherwise, we need to access the table through a handle. + source_position_iterator_ = base::make_unique( + handle(bytecode_array.object()->SourcePositionTableIfCollected(), + isolate())); + } +} Node* BytecodeGraphBuilder::GetFunctionClosure() { if (!function_closure_.is_set()) { @@ -997,33 +1009,30 @@ Node* BytecodeGraphBuilder::GetFunctionClosure() { Node* BytecodeGraphBuilder::BuildLoadNativeContextField(int index) { Node* result = NewNode(javascript()->LoadContext(0, index, true)); - NodeProperties::ReplaceContextInput( - result, jsgraph()->HeapConstant(native_context())); + NodeProperties::ReplaceContextInput(result, + jsgraph()->Constant(native_context())); return result; } VectorSlotPair BytecodeGraphBuilder::CreateVectorSlotPair(int slot_id) { FeedbackSlot slot = FeedbackVector::ToSlot(slot_id); - FeedbackNexus nexus(feedback_vector(), slot); - return VectorSlotPair(feedback_vector(), slot, nexus.ic_state()); + FeedbackNexus nexus(feedback_vector().object(), slot); + return VectorSlotPair(feedback_vector().object(), slot, nexus.ic_state()); } void BytecodeGraphBuilder::CreateGraph() { - BytecodeArrayRef bytecode_array_ref(broker(), bytecode_array()); - SourcePositionTable::Scope pos_scope(source_positions_, start_position_); // Set up the basic structure of the graph. Outputs for {Start} are the formal // parameters (including the receiver) plus new target, number of arguments, // context and closure. - int actual_parameter_count = bytecode_array_ref.parameter_count() + 4; + int actual_parameter_count = bytecode_array().parameter_count() + 4; graph()->SetStart(graph()->NewNode(common()->Start(actual_parameter_count))); - Environment env( - this, bytecode_array_ref.register_count(), - bytecode_array_ref.parameter_count(), - bytecode_array_ref.incoming_new_target_or_generator_register(), - graph()->start()); + Environment env(this, bytecode_array().register_count(), + bytecode_array().parameter_count(), + bytecode_array().incoming_new_target_or_generator_register(), + graph()->start()); set_environment(&env); VisitBytecodes(); @@ -1112,19 +1121,17 @@ class BytecodeGraphBuilder::OsrIteratorState { void ProcessOsrPrelude() { ZoneVector outer_loop_offsets(graph_builder_->local_zone()); - BytecodeAnalysis const& bytecode_analysis = - graph_builder_->bytecode_analysis(); - int osr_offset = bytecode_analysis.osr_entry_point(); + int osr_entry = graph_builder_->bytecode_analysis().osr_entry_point(); // We find here the outermost loop which contains the OSR loop. - int outermost_loop_offset = osr_offset; - while ((outermost_loop_offset = - bytecode_analysis.GetLoopInfoFor(outermost_loop_offset) - .parent_offset()) != -1) { + int outermost_loop_offset = osr_entry; + while ((outermost_loop_offset = graph_builder_->bytecode_analysis() + .GetLoopInfoFor(outermost_loop_offset) + .parent_offset()) != -1) { outer_loop_offsets.push_back(outermost_loop_offset); } outermost_loop_offset = - outer_loop_offsets.empty() ? osr_offset : outer_loop_offsets.back(); + outer_loop_offsets.empty() ? osr_entry : outer_loop_offsets.back(); graph_builder_->AdvanceIteratorsTo(outermost_loop_offset); // We save some iterators states at the offsets of the loop headers of the @@ -1142,14 +1149,16 @@ class BytecodeGraphBuilder::OsrIteratorState { } // Finishing by advancing to the OSR entry - graph_builder_->AdvanceIteratorsTo(osr_offset); + graph_builder_->AdvanceIteratorsTo(osr_entry); // Enters all remaining exception handler which end before the OSR loop // so that on next call of VisitSingleBytecode they will get popped from // the exception handlers stack. - graph_builder_->ExitThenEnterExceptionHandlers(osr_offset); + graph_builder_->ExitThenEnterExceptionHandlers(osr_entry); graph_builder_->set_currently_peeled_loop_offset( - bytecode_analysis.GetLoopInfoFor(osr_offset).parent_offset()); + graph_builder_->bytecode_analysis() + .GetLoopInfoFor(osr_entry) + .parent_offset()); } void RestoreState(int target_offset, int new_parent_offset) { @@ -1198,8 +1207,8 @@ void BytecodeGraphBuilder::RemoveMergeEnvironmentsBeforeOffset( void BytecodeGraphBuilder::AdvanceToOsrEntryAndPeelLoops() { OsrIteratorState iterator_states(this); iterator_states.ProcessOsrPrelude(); - int osr_offset = bytecode_analysis().osr_entry_point(); - DCHECK_EQ(bytecode_iterator().current_offset(), osr_offset); + int osr_entry = bytecode_analysis().osr_entry_point(); + DCHECK_EQ(bytecode_iterator().current_offset(), osr_entry); environment()->FillWithOsrValues(); @@ -1217,7 +1226,7 @@ void BytecodeGraphBuilder::AdvanceToOsrEntryAndPeelLoops() { // parent loop entirely, and so on. int current_parent_offset = - bytecode_analysis().GetLoopInfoFor(osr_offset).parent_offset(); + bytecode_analysis().GetLoopInfoFor(osr_entry).parent_offset(); while (current_parent_offset != -1) { const LoopInfo& current_parent_loop = bytecode_analysis().GetLoopInfoFor(current_parent_offset); @@ -1261,6 +1270,7 @@ void BytecodeGraphBuilder::AdvanceToOsrEntryAndPeelLoops() { } void BytecodeGraphBuilder::VisitSingleBytecode() { + tick_counter_->DoTick(); int current_offset = bytecode_iterator().current_offset(); UpdateSourcePosition(current_offset); ExitThenEnterExceptionHandlers(current_offset); @@ -1289,14 +1299,12 @@ void BytecodeGraphBuilder::VisitSingleBytecode() { } void BytecodeGraphBuilder::VisitBytecodes() { - RunBytecodeAnalysis(); - if (!bytecode_analysis().resume_jump_targets().empty()) { environment()->BindGeneratorState( jsgraph()->SmiConstant(JSGeneratorObject::kGeneratorExecuting)); } - if (bytecode_analysis().HasOsrEntryPoint()) { + if (osr_) { // We peel the OSR loop and any outer loop containing it except that we // leave the nodes corresponding to the whole outermost loop (including // the last copies of the loops it contains) to be generated by the normal @@ -1333,7 +1341,7 @@ void BytecodeGraphBuilder::VisitLdaSmi() { void BytecodeGraphBuilder::VisitLdaConstant() { Node* node = jsgraph()->Constant( - handle(bytecode_iterator().GetConstantForIndexOperand(0), isolate())); + bytecode_iterator().GetConstantForIndexOperand(0, isolate())); environment()->BindAccumulator(node); } @@ -1383,15 +1391,16 @@ Node* BytecodeGraphBuilder::BuildLoadGlobal(Handle name, uint32_t feedback_slot_index, TypeofMode typeof_mode) { VectorSlotPair feedback = CreateVectorSlotPair(feedback_slot_index); - DCHECK(IsLoadGlobalICKind(feedback_vector()->GetKind(feedback.slot()))); + DCHECK( + IsLoadGlobalICKind(feedback_vector().object()->GetKind(feedback.slot()))); const Operator* op = javascript()->LoadGlobal(name, feedback, typeof_mode); return NewNode(op); } void BytecodeGraphBuilder::VisitLdaGlobal() { PrepareEagerCheckpoint(); - Handle name( - Name::cast(bytecode_iterator().GetConstantForIndexOperand(0)), isolate()); + Handle name = Handle::cast( + bytecode_iterator().GetConstantForIndexOperand(0, isolate())); uint32_t feedback_slot_index = bytecode_iterator().GetIndexOperand(1); Node* node = BuildLoadGlobal(name, feedback_slot_index, TypeofMode::NOT_INSIDE_TYPEOF); @@ -1400,8 +1409,8 @@ void BytecodeGraphBuilder::VisitLdaGlobal() { void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeof() { PrepareEagerCheckpoint(); - Handle name( - Name::cast(bytecode_iterator().GetConstantForIndexOperand(0)), isolate()); + Handle name = Handle::cast( + bytecode_iterator().GetConstantForIndexOperand(0, isolate())); uint32_t feedback_slot_index = bytecode_iterator().GetIndexOperand(1); Node* node = BuildLoadGlobal(name, feedback_slot_index, TypeofMode::INSIDE_TYPEOF); @@ -1410,8 +1419,8 @@ void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeof() { void BytecodeGraphBuilder::VisitStaGlobal() { PrepareEagerCheckpoint(); - Handle name( - Name::cast(bytecode_iterator().GetConstantForIndexOperand(0)), isolate()); + Handle name = Handle::cast( + bytecode_iterator().GetConstantForIndexOperand(0, isolate())); VectorSlotPair feedback = CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(1)); Node* value = environment()->LookupAccumulator(); @@ -1537,7 +1546,7 @@ void BytecodeGraphBuilder::VisitStaCurrentContextSlot() { void BytecodeGraphBuilder::BuildLdaLookupSlot(TypeofMode typeof_mode) { PrepareEagerCheckpoint(); Node* name = jsgraph()->Constant( - handle(bytecode_iterator().GetConstantForIndexOperand(0), isolate())); + bytecode_iterator().GetConstantForIndexOperand(0, isolate())); const Operator* op = javascript()->CallRuntime(typeof_mode == TypeofMode::NOT_INSIDE_TYPEOF ? Runtime::kLoadLookupSlot @@ -1622,7 +1631,7 @@ void BytecodeGraphBuilder::BuildLdaLookupContextSlot(TypeofMode typeof_mode) { set_environment(slow_environment); { Node* name = jsgraph()->Constant( - handle(bytecode_iterator().GetConstantForIndexOperand(0), isolate())); + bytecode_iterator().GetConstantForIndexOperand(0, isolate())); const Operator* op = javascript()->CallRuntime(typeof_mode == TypeofMode::NOT_INSIDE_TYPEOF @@ -1657,9 +1666,8 @@ void BytecodeGraphBuilder::BuildLdaLookupGlobalSlot(TypeofMode typeof_mode) { // Fast path, do a global load. { PrepareEagerCheckpoint(); - Handle name( - Name::cast(bytecode_iterator().GetConstantForIndexOperand(0)), - isolate()); + Handle name = Handle::cast( + bytecode_iterator().GetConstantForIndexOperand(0, isolate())); uint32_t feedback_slot_index = bytecode_iterator().GetIndexOperand(1); Node* node = BuildLoadGlobal(name, feedback_slot_index, typeof_mode); environment()->BindAccumulator(node, Environment::kAttachFrameState); @@ -1675,7 +1683,7 @@ void BytecodeGraphBuilder::BuildLdaLookupGlobalSlot(TypeofMode typeof_mode) { set_environment(slow_environment); { Node* name = jsgraph()->Constant( - handle(bytecode_iterator().GetConstantForIndexOperand(0), isolate())); + bytecode_iterator().GetConstantForIndexOperand(0, isolate())); const Operator* op = javascript()->CallRuntime(typeof_mode == TypeofMode::NOT_INSIDE_TYPEOF @@ -1705,7 +1713,7 @@ void BytecodeGraphBuilder::VisitStaLookupSlot() { PrepareEagerCheckpoint(); Node* value = environment()->LookupAccumulator(); Node* name = jsgraph()->Constant( - handle(bytecode_iterator().GetConstantForIndexOperand(0), isolate())); + bytecode_iterator().GetConstantForIndexOperand(0, isolate())); int bytecode_flags = bytecode_iterator().GetFlagOperand(1); LanguageMode language_mode = static_cast( interpreter::StoreLookupSlotFlags::LanguageModeBit::decode( @@ -1729,8 +1737,8 @@ void BytecodeGraphBuilder::VisitLdaNamedProperty() { PrepareEagerCheckpoint(); Node* object = environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0)); - Handle name( - Name::cast(bytecode_iterator().GetConstantForIndexOperand(1)), isolate()); + Handle name = Handle::cast( + bytecode_iterator().GetConstantForIndexOperand(1, isolate())); VectorSlotPair feedback = CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(2)); const Operator* op = javascript()->LoadNamed(name, feedback); @@ -1753,8 +1761,8 @@ void BytecodeGraphBuilder::VisitLdaNamedPropertyNoFeedback() { PrepareEagerCheckpoint(); Node* object = environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0)); - Handle name( - Name::cast(bytecode_iterator().GetConstantForIndexOperand(1)), isolate()); + Handle name = Handle::cast( + bytecode_iterator().GetConstantForIndexOperand(1, isolate())); const Operator* op = javascript()->LoadNamed(name, VectorSlotPair()); Node* node = NewNode(op, object); environment()->BindAccumulator(node, Environment::kAttachFrameState); @@ -1788,8 +1796,8 @@ void BytecodeGraphBuilder::BuildNamedStore(StoreMode store_mode) { Node* value = environment()->LookupAccumulator(); Node* object = environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0)); - Handle name( - Name::cast(bytecode_iterator().GetConstantForIndexOperand(1)), isolate()); + Handle name = Handle::cast( + bytecode_iterator().GetConstantForIndexOperand(1, isolate())); VectorSlotPair feedback = CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(2)); @@ -1828,8 +1836,8 @@ void BytecodeGraphBuilder::VisitStaNamedPropertyNoFeedback() { Node* value = environment()->LookupAccumulator(); Node* object = environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0)); - Handle name( - Name::cast(bytecode_iterator().GetConstantForIndexOperand(1)), isolate()); + Handle name = Handle::cast( + bytecode_iterator().GetConstantForIndexOperand(1, isolate())); LanguageMode language_mode = static_cast(bytecode_iterator().GetFlagOperand(2)); const Operator* op = @@ -1902,10 +1910,8 @@ void BytecodeGraphBuilder::VisitPopContext() { } void BytecodeGraphBuilder::VisitCreateClosure() { - Handle shared_info( - SharedFunctionInfo::cast( - bytecode_iterator().GetConstantForIndexOperand(0)), - isolate()); + Handle shared_info = Handle::cast( + bytecode_iterator().GetConstantForIndexOperand(0, isolate())); AllocationType allocation = interpreter::CreateClosureFlags::PretenuredBit::decode( bytecode_iterator().GetFlagOperand(2)) @@ -1913,7 +1919,7 @@ void BytecodeGraphBuilder::VisitCreateClosure() { : AllocationType::kYoung; const Operator* op = javascript()->CreateClosure( shared_info, - feedback_vector()->GetClosureFeedbackCell( + feedback_vector().object()->GetClosureFeedbackCell( bytecode_iterator().GetIndexOperand(1)), handle(jsgraph()->isolate()->builtins()->builtin(Builtins::kCompileLazy), isolate()), @@ -1923,9 +1929,8 @@ void BytecodeGraphBuilder::VisitCreateClosure() { } void BytecodeGraphBuilder::VisitCreateBlockContext() { - Handle scope_info( - ScopeInfo::cast(bytecode_iterator().GetConstantForIndexOperand(0)), - isolate()); + Handle scope_info = Handle::cast( + bytecode_iterator().GetConstantForIndexOperand(0, isolate())); const Operator* op = javascript()->CreateBlockContext(scope_info); Node* context = NewNode(op); @@ -1933,9 +1938,8 @@ void BytecodeGraphBuilder::VisitCreateBlockContext() { } void BytecodeGraphBuilder::VisitCreateFunctionContext() { - Handle scope_info( - ScopeInfo::cast(bytecode_iterator().GetConstantForIndexOperand(0)), - isolate()); + Handle scope_info = Handle::cast( + bytecode_iterator().GetConstantForIndexOperand(0, isolate())); uint32_t slots = bytecode_iterator().GetUnsignedImmediateOperand(1); const Operator* op = javascript()->CreateFunctionContext(scope_info, slots, FUNCTION_SCOPE); @@ -1944,9 +1948,8 @@ void BytecodeGraphBuilder::VisitCreateFunctionContext() { } void BytecodeGraphBuilder::VisitCreateEvalContext() { - Handle scope_info( - ScopeInfo::cast(bytecode_iterator().GetConstantForIndexOperand(0)), - isolate()); + Handle scope_info = Handle::cast( + bytecode_iterator().GetConstantForIndexOperand(0, isolate())); uint32_t slots = bytecode_iterator().GetUnsignedImmediateOperand(1); const Operator* op = javascript()->CreateFunctionContext(scope_info, slots, EVAL_SCOPE); @@ -1957,9 +1960,8 @@ void BytecodeGraphBuilder::VisitCreateEvalContext() { void BytecodeGraphBuilder::VisitCreateCatchContext() { interpreter::Register reg = bytecode_iterator().GetRegisterOperand(0); Node* exception = environment()->LookupRegister(reg); - Handle scope_info( - ScopeInfo::cast(bytecode_iterator().GetConstantForIndexOperand(1)), - isolate()); + Handle scope_info = Handle::cast( + bytecode_iterator().GetConstantForIndexOperand(1, isolate())); const Operator* op = javascript()->CreateCatchContext(scope_info); Node* context = NewNode(op, exception); @@ -1969,9 +1971,8 @@ void BytecodeGraphBuilder::VisitCreateCatchContext() { void BytecodeGraphBuilder::VisitCreateWithContext() { Node* object = environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0)); - Handle scope_info( - ScopeInfo::cast(bytecode_iterator().GetConstantForIndexOperand(1)), - isolate()); + Handle scope_info = Handle::cast( + bytecode_iterator().GetConstantForIndexOperand(1, isolate())); const Operator* op = javascript()->CreateWithContext(scope_info); Node* context = NewNode(op, object); @@ -1997,9 +1998,8 @@ void BytecodeGraphBuilder::VisitCreateRestParameter() { } void BytecodeGraphBuilder::VisitCreateRegExpLiteral() { - Handle constant_pattern( - String::cast(bytecode_iterator().GetConstantForIndexOperand(0)), - isolate()); + Handle constant_pattern = Handle::cast( + bytecode_iterator().GetConstantForIndexOperand(0, isolate())); int const slot_id = bytecode_iterator().GetIndexOperand(1); VectorSlotPair pair = CreateVectorSlotPair(slot_id); int literal_flags = bytecode_iterator().GetFlagOperand(2); @@ -2009,10 +2009,9 @@ void BytecodeGraphBuilder::VisitCreateRegExpLiteral() { } void BytecodeGraphBuilder::VisitCreateArrayLiteral() { - Handle array_boilerplate_description( - ArrayBoilerplateDescription::cast( - bytecode_iterator().GetConstantForIndexOperand(0)), - isolate()); + Handle array_boilerplate_description = + Handle::cast( + bytecode_iterator().GetConstantForIndexOperand(0, isolate())); int const slot_id = bytecode_iterator().GetIndexOperand(1); VectorSlotPair pair = CreateVectorSlotPair(slot_id); int bytecode_flags = bytecode_iterator().GetFlagOperand(2); @@ -2046,10 +2045,9 @@ void BytecodeGraphBuilder::VisitCreateArrayFromIterable() { } void BytecodeGraphBuilder::VisitCreateObjectLiteral() { - Handle constant_properties( - ObjectBoilerplateDescription::cast( - bytecode_iterator().GetConstantForIndexOperand(0)), - isolate()); + Handle constant_properties = + Handle::cast( + bytecode_iterator().GetConstantForIndexOperand(0, isolate())); int const slot_id = bytecode_iterator().GetIndexOperand(1); VectorSlotPair pair = CreateVectorSlotPair(slot_id); int bytecode_flags = bytecode_iterator().GetFlagOperand(2); @@ -2082,29 +2080,13 @@ void BytecodeGraphBuilder::VisitCloneObject() { } void BytecodeGraphBuilder::VisitGetTemplateObject() { - Handle description( - TemplateObjectDescription::cast( - bytecode_iterator().GetConstantForIndexOperand(0)), - isolate()); + DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining); FeedbackSlot slot = bytecode_iterator().GetSlotOperand(1); - FeedbackNexus nexus(feedback_vector(), slot); - - Handle cached_value; - if (nexus.GetFeedback() == MaybeObject::FromSmi(Smi::zero())) { - // It's not observable when the template object is created, so we - // can just create it eagerly during graph building and bake in - // the JSArray constant here. - cached_value = TemplateObjectDescription::GetTemplateObject( - isolate(), native_context(), description, shared_info(), slot.ToInt()); - nexus.vector().Set(slot, *cached_value); - } else { - cached_value = - handle(JSArray::cast(nexus.GetFeedback()->GetHeapObjectAssumeStrong()), - isolate()); - } - - Node* template_object = jsgraph()->HeapConstant(cached_value); - environment()->BindAccumulator(template_object); + ObjectRef description( + broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate())); + JSArrayRef template_object = + shared_info().GetTemplateObject(description, feedback_vector(), slot); + environment()->BindAccumulator(jsgraph()->Constant(template_object)); } Node* const* BytecodeGraphBuilder::GetCallArgumentsFromRegisters( @@ -2587,7 +2569,7 @@ void BytecodeGraphBuilder::VisitThrowReferenceErrorIfHole() { Node* check_for_hole = NewNode(simplified()->ReferenceEqual(), accumulator, jsgraph()->TheHoleConstant()); Node* name = jsgraph()->Constant( - handle(bytecode_iterator().GetConstantForIndexOperand(0), isolate())); + bytecode_iterator().GetConstantForIndexOperand(0, isolate())); BuildHoleCheckAndThrow(check_for_hole, Runtime::kThrowAccessedUninitializedVariable, name); } @@ -2658,7 +2640,7 @@ void BytecodeGraphBuilder::BuildBinaryOp(const Operator* op) { BinaryOperationHint BytecodeGraphBuilder::GetBinaryOperationHint( int operand_index) { FeedbackSlot slot = bytecode_iterator().GetSlotOperand(operand_index); - FeedbackNexus nexus(feedback_vector(), slot); + FeedbackNexus nexus(feedback_vector().object(), slot); return nexus.GetBinaryOperationFeedback(); } @@ -2666,14 +2648,14 @@ BinaryOperationHint BytecodeGraphBuilder::GetBinaryOperationHint( // feedback. CompareOperationHint BytecodeGraphBuilder::GetCompareOperationHint() { FeedbackSlot slot = bytecode_iterator().GetSlotOperand(1); - FeedbackNexus nexus(feedback_vector(), slot); + FeedbackNexus nexus(feedback_vector().object(), slot); return nexus.GetCompareOperationFeedback(); } // Helper function to create for-in mode from the recorded type feedback. ForInMode BytecodeGraphBuilder::GetForInMode(int operand_index) { FeedbackSlot slot = bytecode_iterator().GetSlotOperand(operand_index); - FeedbackNexus nexus(feedback_vector(), slot); + FeedbackNexus nexus(feedback_vector().object(), slot); switch (nexus.GetForInFeedback()) { case ForInHint::kNone: case ForInHint::kEnumCacheKeysAndIndices: @@ -2688,7 +2670,8 @@ ForInMode BytecodeGraphBuilder::GetForInMode(int operand_index) { CallFrequency BytecodeGraphBuilder::ComputeCallFrequency(int slot_id) const { if (invocation_frequency_.IsUnknown()) return CallFrequency(); - FeedbackNexus nexus(feedback_vector(), FeedbackVector::ToSlot(slot_id)); + FeedbackNexus nexus(feedback_vector().object(), + FeedbackVector::ToSlot(slot_id)); float feedback_frequency = nexus.ComputeCallFrequency(); if (feedback_frequency == 0.0f) { // This is to prevent multiplying zero and infinity. @@ -2699,7 +2682,8 @@ CallFrequency BytecodeGraphBuilder::ComputeCallFrequency(int slot_id) const { } SpeculationMode BytecodeGraphBuilder::GetSpeculationMode(int slot_id) const { - FeedbackNexus nexus(feedback_vector(), FeedbackVector::ToSlot(slot_id)); + FeedbackNexus nexus(feedback_vector().object(), + FeedbackVector::ToSlot(slot_id)); return nexus.GetSpeculationMode(); } @@ -3301,8 +3285,7 @@ void BytecodeGraphBuilder::VisitSuspendGenerator() { CHECK_EQ(0, first_reg.index()); int register_count = static_cast(bytecode_iterator().GetRegisterCountOperand(2)); - int parameter_count_without_receiver = - bytecode_array()->parameter_count() - 1; + int parameter_count_without_receiver = bytecode_array().parameter_count() - 1; Node* suspend_id = jsgraph()->SmiConstant( bytecode_iterator().GetUnsignedImmediateOperand(3)); @@ -3442,8 +3425,7 @@ void BytecodeGraphBuilder::VisitResumeGenerator() { const BytecodeLivenessState* liveness = bytecode_analysis().GetOutLivenessFor( bytecode_iterator().current_offset()); - int parameter_count_without_receiver = - bytecode_array()->parameter_count() - 1; + int parameter_count_without_receiver = bytecode_array().parameter_count() - 1; // Mapping between registers and array indices must match that used in // InterpreterAssembler::ExportParametersAndRegisterFile. @@ -3836,7 +3818,10 @@ Node** BytecodeGraphBuilder::EnsureInputBufferSize(int size) { } void BytecodeGraphBuilder::ExitThenEnterExceptionHandlers(int current_offset) { - HandlerTable table(*bytecode_array()); + DisallowHeapAllocation no_allocation; + HandlerTable table(bytecode_array().handler_table_address(), + bytecode_array().handler_table_size(), + HandlerTable::kRangeBasedEncoding); // Potentially exit exception handlers. while (!exception_handlers_.empty()) { @@ -3890,7 +3875,7 @@ Node* BytecodeGraphBuilder::MakeNode(const Operator* op, int value_input_count, if (has_context) { *current_input++ = OperatorProperties::NeedsExactContext(op) ? environment()->Context() - : jsgraph()->HeapConstant(native_context()); + : jsgraph()->Constant(native_context()); } if (has_frame_state) { // The frame state will be inserted later. Here we misuse the {Dead} node @@ -4037,12 +4022,19 @@ void BuildGraphFromBytecode(JSHeapBroker* broker, Zone* local_zone, BailoutId osr_offset, JSGraph* jsgraph, CallFrequency const& invocation_frequency, SourcePositionTable* source_positions, - Handle native_context, int inlining_id, - BytecodeGraphBuilderFlags flags) { - BytecodeGraphBuilder builder(broker, local_zone, bytecode_array, shared, - feedback_vector, osr_offset, jsgraph, - invocation_frequency, source_positions, - native_context, inlining_id, flags); + Handle native_context, + int inlining_id, BytecodeGraphBuilderFlags flags, + TickCounter* tick_counter) { + BytecodeArrayRef bytecode_array_ref(broker, bytecode_array); + DCHECK(bytecode_array_ref.IsSerializedForCompilation()); + FeedbackVectorRef feedback_vector_ref(broker, feedback_vector); + SharedFunctionInfoRef shared_ref(broker, shared); + DCHECK(shared_ref.IsSerializedForCompilation(feedback_vector_ref)); + NativeContextRef native_context_ref(broker, native_context); + BytecodeGraphBuilder builder( + broker, local_zone, bytecode_array_ref, shared_ref, feedback_vector_ref, + osr_offset, jsgraph, invocation_frequency, source_positions, + native_context_ref, inlining_id, flags, tick_counter); builder.CreateGraph(); } diff --git a/deps/v8/src/compiler/bytecode-graph-builder.h b/deps/v8/src/compiler/bytecode-graph-builder.h index b9504a60868920..682569778f6990 100644 --- a/deps/v8/src/compiler/bytecode-graph-builder.h +++ b/deps/v8/src/compiler/bytecode-graph-builder.h @@ -11,6 +11,9 @@ #include "src/handles/handles.h" namespace v8 { + +class TickCounter; + namespace internal { class BytecodeArray; @@ -25,6 +28,9 @@ class SourcePositionTable; enum class BytecodeGraphBuilderFlag : uint8_t { kSkipFirstStackCheck = 1 << 0, + // TODO(neis): Remove liveness flag here when concurrent inlining is always + // on, because then the serializer will be the only place where we perform + // bytecode analysis. kAnalyzeEnvironmentLiveness = 1 << 1, kBailoutOnUninitialized = 1 << 2, }; @@ -39,8 +45,9 @@ void BuildGraphFromBytecode(JSHeapBroker* broker, Zone* local_zone, BailoutId osr_offset, JSGraph* jsgraph, CallFrequency const& invocation_frequency, SourcePositionTable* source_positions, - Handle native_context, int inlining_id, - BytecodeGraphBuilderFlags flags); + Handle native_context, + int inlining_id, BytecodeGraphBuilderFlags flags, + TickCounter* tick_counter); } // namespace compiler } // namespace internal diff --git a/deps/v8/src/compiler/code-assembler.cc b/deps/v8/src/compiler/code-assembler.cc index d8a01d6308f2d2..af0ba98ffd159b 100644 --- a/deps/v8/src/compiler/code-assembler.cc +++ b/deps/v8/src/compiler/code-assembler.cc @@ -226,8 +226,12 @@ void CodeAssembler::GenerateCheckMaybeObjectIsObject(Node* node, IntPtrConstant(kHeapObjectTagMask)), IntPtrConstant(kWeakHeapObjectTag)), &ok); - Node* message_node = StringConstant(location); - DebugAbort(message_node); + EmbeddedVector message; + SNPrintF(message, "no Object: %s", location); + Node* message_node = StringConstant(message.begin()); + // This somewhat misuses the AbortCSAAssert runtime function. This will print + // "abort: CSA_ASSERT failed: ", which is good enough. + AbortCSAAssert(message_node); Unreachable(); Bind(&ok); } @@ -409,8 +413,8 @@ void CodeAssembler::ReturnRaw(Node* value) { return raw_assembler()->Return(value); } -void CodeAssembler::DebugAbort(Node* message) { - raw_assembler()->DebugAbort(message); +void CodeAssembler::AbortCSAAssert(Node* message) { + raw_assembler()->AbortCSAAssert(message); } void CodeAssembler::DebugBreak() { raw_assembler()->DebugBreak(); } @@ -441,16 +445,16 @@ void CodeAssembler::Bind(Label* label, AssemblerDebugInfo debug_info) { } #endif // DEBUG -Node* CodeAssembler::LoadFramePointer() { - return raw_assembler()->LoadFramePointer(); +TNode CodeAssembler::LoadFramePointer() { + return UncheckedCast(raw_assembler()->LoadFramePointer()); } -Node* CodeAssembler::LoadParentFramePointer() { - return raw_assembler()->LoadParentFramePointer(); +TNode CodeAssembler::LoadParentFramePointer() { + return UncheckedCast(raw_assembler()->LoadParentFramePointer()); } -Node* CodeAssembler::LoadStackPointer() { - return raw_assembler()->LoadStackPointer(); +TNode CodeAssembler::LoadStackPointer() { + return UncheckedCast(raw_assembler()->LoadStackPointer()); } TNode CodeAssembler::TaggedPoisonOnSpeculation( @@ -1140,14 +1144,6 @@ Node* CodeAssembler::Retain(Node* value) { return raw_assembler()->Retain(value); } -Node* CodeAssembler::ChangeTaggedToCompressed(Node* tagged) { - return raw_assembler()->ChangeTaggedToCompressed(tagged); -} - -Node* CodeAssembler::ChangeCompressedToTagged(Node* compressed) { - return raw_assembler()->ChangeCompressedToTagged(compressed); -} - Node* CodeAssembler::Projection(int index, Node* value) { DCHECK_LT(index, value->op()->ValueOutputCount()); return raw_assembler()->Projection(index, value); diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h index 0f7ae640828ab4..cc432214aa1063 100644 --- a/deps/v8/src/compiler/code-assembler.h +++ b/deps/v8/src/compiler/code-assembler.h @@ -73,6 +73,9 @@ class PromiseReactionJobTask; class PromiseRejectReactionJobTask; class WasmDebugInfo; class Zone; +#define MAKE_FORWARD_DECLARATION(V, NAME, Name, name) class Name; +TORQUE_STRUCT_LIST_GENERATOR(MAKE_FORWARD_DECLARATION, UNUSED) +#undef MAKE_FORWARD_DECLARATION template class Signature; @@ -107,13 +110,13 @@ struct Uint32T : Word32T { struct Int16T : Int32T { static constexpr MachineType kMachineType = MachineType::Int16(); }; -struct Uint16T : Uint32T { +struct Uint16T : Uint32T, Int32T { static constexpr MachineType kMachineType = MachineType::Uint16(); }; struct Int8T : Int16T { static constexpr MachineType kMachineType = MachineType::Int8(); }; -struct Uint8T : Uint16T { +struct Uint8T : Uint16T, Int16T { static constexpr MachineType kMachineType = MachineType::Uint8(); }; @@ -147,6 +150,12 @@ struct Float64T : UntaggedT { static constexpr MachineType kMachineType = MachineType::Float64(); }; +#ifdef V8_COMPRESS_POINTERS +using TaggedT = Int32T; +#else +using TaggedT = IntPtrT; +#endif + // Result of a comparison operation. struct BoolT : Word32T {}; @@ -329,6 +338,7 @@ class WasmExceptionObject; class WasmExceptionTag; class WasmExportedFunctionData; class WasmGlobalObject; +class WasmIndirectFunctionTable; class WasmJSFunctionData; class WasmMemoryObject; class WasmModuleObject; @@ -413,6 +423,10 @@ struct types_have_common_values { static const bool value = is_subtype::value || is_subtype::value; }; template +struct types_have_common_values { + static const bool value = types_have_common_values::value; +}; +template struct types_have_common_values { static const bool value = types_have_common_values::value; }; @@ -611,14 +625,15 @@ TNode Float64Add(TNode a, TNode b); V(Float64Sqrt, Float64T, Float64T) \ V(Float64Tan, Float64T, Float64T) \ V(Float64Tanh, Float64T, Float64T) \ - V(Float64ExtractLowWord32, Word32T, Float64T) \ - V(Float64ExtractHighWord32, Word32T, Float64T) \ + V(Float64ExtractLowWord32, Uint32T, Float64T) \ + V(Float64ExtractHighWord32, Uint32T, Float64T) \ V(BitcastTaggedToWord, IntPtrT, Object) \ + V(BitcastTaggedSignedToWord, IntPtrT, Smi) \ V(BitcastMaybeObjectToWord, IntPtrT, MaybeObject) \ V(BitcastWordToTagged, Object, WordT) \ V(BitcastWordToTaggedSigned, Smi, WordT) \ V(TruncateFloat64ToFloat32, Float32T, Float64T) \ - V(TruncateFloat64ToWord32, Word32T, Float64T) \ + V(TruncateFloat64ToWord32, Uint32T, Float64T) \ V(TruncateInt64ToInt32, Int32T, Int64T) \ V(ChangeFloat32ToFloat64, Float64T, Float32T) \ V(ChangeFloat64ToUint32, Uint32T, Float64T) \ @@ -628,7 +643,7 @@ TNode Float64Add(TNode a, TNode b); V(ChangeUint32ToFloat64, Float64T, Word32T) \ V(ChangeUint32ToUint64, Uint64T, Word32T) \ V(BitcastInt32ToFloat32, Float32T, Word32T) \ - V(BitcastFloat32ToInt32, Word32T, Float32T) \ + V(BitcastFloat32ToInt32, Uint32T, Float32T) \ V(RoundFloat64ToInt32, Int32T, Float64T) \ V(RoundInt32ToFloat32, Int32T, Float32T) \ V(Float64SilenceNaN, Float64T, Float64T) \ @@ -840,10 +855,13 @@ class V8_EXPORT_PRIVATE CodeAssembler { // TODO(jkummerow): The style guide wants pointers for output parameters. // https://google.github.io/styleguide/cppguide.html#Output_Parameters - bool ToInt32Constant(Node* node, int32_t& out_value); - bool ToInt64Constant(Node* node, int64_t& out_value); + bool ToInt32Constant(Node* node, + int32_t& out_value); // NOLINT(runtime/references) + bool ToInt64Constant(Node* node, + int64_t& out_value); // NOLINT(runtime/references) bool ToSmiConstant(Node* node, Smi* out_value); - bool ToIntPtrConstant(Node* node, intptr_t& out_value); + bool ToIntPtrConstant(Node* node, + intptr_t& out_value); // NOLINT(runtime/references) bool IsUndefinedConstant(TNode node); bool IsNullConstant(TNode node); @@ -872,7 +890,7 @@ class V8_EXPORT_PRIVATE CodeAssembler { void ReturnRaw(Node* value); - void DebugAbort(Node* message); + void AbortCSAAssert(Node* message); void DebugBreak(); void Unreachable(); void Comment(const char* msg) { @@ -938,11 +956,11 @@ class V8_EXPORT_PRIVATE CodeAssembler { Label** case_labels, size_t case_count); // Access to the frame pointer - Node* LoadFramePointer(); - Node* LoadParentFramePointer(); + TNode LoadFramePointer(); + TNode LoadParentFramePointer(); // Access to the stack pointer - Node* LoadStackPointer(); + TNode LoadStackPointer(); // Poison |value| on speculative paths. TNode TaggedPoisonOnSpeculation(SloppyTNode value); @@ -1047,20 +1065,60 @@ class V8_EXPORT_PRIVATE CodeAssembler { CODE_ASSEMBLER_BINARY_OP_LIST(DECLARE_CODE_ASSEMBLER_BINARY_OP) #undef DECLARE_CODE_ASSEMBLER_BINARY_OP - TNode WordShr(TNode left, TNode right) { - return UncheckedCast( + TNode WordShr(TNode left, TNode right) { + return Unsigned( WordShr(static_cast(left), static_cast(right))); } TNode WordSar(TNode left, TNode right) { - return UncheckedCast( - WordSar(static_cast(left), static_cast(right))); + return Signed(WordSar(static_cast(left), static_cast(right))); + } + TNode WordShl(TNode left, TNode right) { + return Signed(WordShl(static_cast(left), static_cast(right))); + } + TNode WordShl(TNode left, TNode right) { + return Unsigned( + WordShl(static_cast(left), static_cast(right))); + } + + TNode Word32Shl(TNode left, TNode right) { + return Signed( + Word32Shl(static_cast(left), static_cast(right))); + } + TNode Word32Shl(TNode left, TNode right) { + return Unsigned( + Word32Shl(static_cast(left), static_cast(right))); + } + TNode Word32Shr(TNode left, TNode right) { + return Unsigned( + Word32Shr(static_cast(left), static_cast(right))); } TNode WordAnd(TNode left, TNode right) { - return UncheckedCast( + return Signed(WordAnd(static_cast(left), static_cast(right))); + } + TNode WordAnd(TNode left, TNode right) { + return Unsigned( WordAnd(static_cast(left), static_cast(right))); } + TNode Word32And(TNode left, TNode right) { + return Signed( + Word32And(static_cast(left), static_cast(right))); + } + TNode Word32And(TNode left, TNode right) { + return Unsigned( + Word32And(static_cast(left), static_cast(right))); + } + + TNode Word32Or(TNode left, TNode right) { + return Signed( + Word32Or(static_cast(left), static_cast(right))); + } + TNode Word32Or(TNode left, TNode right) { + return Unsigned( + Word32Or(static_cast(left), static_cast(right))); + } + template ::value && @@ -1106,6 +1164,15 @@ class V8_EXPORT_PRIVATE CodeAssembler { TNode Word64NotEqual(SloppyTNode left, SloppyTNode right); + TNode Word32Or(TNode left, TNode right) { + return UncheckedCast( + Word32Or(static_cast(left), static_cast(right))); + } + TNode Word32And(TNode left, TNode right) { + return UncheckedCast( + Word32And(static_cast(left), static_cast(right))); + } + TNode Int32Add(TNode left, TNode right) { return Signed( Int32Add(static_cast(left), static_cast(right))); @@ -1116,6 +1183,16 @@ class V8_EXPORT_PRIVATE CodeAssembler { Int32Add(static_cast(left), static_cast(right))); } + TNode Int32Sub(TNode left, TNode right) { + return Signed( + Int32Sub(static_cast(left), static_cast(right))); + } + + TNode Int32Mul(TNode left, TNode right) { + return Signed( + Int32Mul(static_cast(left), static_cast(right))); + } + TNode IntPtrAdd(SloppyTNode left, SloppyTNode right); TNode IntPtrDiv(TNode left, TNode right); TNode IntPtrSub(SloppyTNode left, SloppyTNode right); @@ -1195,6 +1272,12 @@ class V8_EXPORT_PRIVATE CodeAssembler { CODE_ASSEMBLER_UNARY_OP_LIST(DECLARE_CODE_ASSEMBLER_UNARY_OP) #undef DECLARE_CODE_ASSEMBLER_UNARY_OP + template + TNode BitcastTaggedToWord(TNode node) { + static_assert(sizeof(Dummy) < 0, + "Should use BitcastTaggedSignedToWord instead."); + } + // Changes a double to an inptr_t for pointer arithmetic outside of Smi range. // Assumes that the double can be exactly represented as an int. TNode ChangeFloat64ToUintPtr(SloppyTNode value); @@ -1217,10 +1300,6 @@ class V8_EXPORT_PRIVATE CodeAssembler { // Projections Node* Projection(int index, Node* value); - // Pointer compression and decompression. - Node* ChangeTaggedToCompressed(Node* tagged); - Node* ChangeCompressedToTagged(Node* compressed); - template TNode>::type> Projection(TNode> value) { diff --git a/deps/v8/src/compiler/common-operator-reducer.cc b/deps/v8/src/compiler/common-operator-reducer.cc index fa727748f6cfd7..5dd765527fdf42 100644 --- a/deps/v8/src/compiler/common-operator-reducer.cc +++ b/deps/v8/src/compiler/common-operator-reducer.cc @@ -337,9 +337,9 @@ Reduction CommonOperatorReducer::ReduceReturn(Node* node) { // End // Now the effect input to the {Return} node can be either an {EffectPhi} - // hanging off the same {Merge}, or the {Merge} node is only connected to - // the {Return} and the {Phi}, in which case we know that the effect input - // must somehow dominate all merged branches. + // hanging off the same {Merge}, or the effect chain doesn't depend on the + // {Phi} or the {Merge}, in which case we know that the effect input must + // somehow dominate all merged branches. Node::Inputs control_inputs = control->inputs(); Node::Inputs value_inputs = value->inputs(); @@ -347,7 +347,7 @@ Reduction CommonOperatorReducer::ReduceReturn(Node* node) { DCHECK_EQ(control_inputs.count(), value_inputs.count() - 1); DCHECK_EQ(IrOpcode::kEnd, graph()->end()->opcode()); DCHECK_NE(0, graph()->end()->InputCount()); - if (control->OwnedBy(node, value)) { + if (control->OwnedBy(node, value) && value->OwnedBy(node)) { for (int i = 0; i < control_inputs.count(); ++i) { // Create a new {Return} and connect it to {end}. We don't need to mark // {end} as revisit, because we mark {node} as {Dead} below, which was diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc index 45e558f6096102..0ef6402264bfac 100644 --- a/deps/v8/src/compiler/common-operator.cc +++ b/deps/v8/src/compiler/common-operator.cc @@ -1216,8 +1216,18 @@ const Operator* CommonOperatorBuilder::HeapConstant( value); // parameter } +const Operator* CommonOperatorBuilder::CompressedHeapConstant( + const Handle& value) { + return new (zone()) Operator1>( // -- + IrOpcode::kCompressedHeapConstant, Operator::kPure, // opcode + "CompressedHeapConstant", // name + 0, 0, 0, 1, 0, 0, // counts + value); // parameter +} + Handle HeapConstantOf(const Operator* op) { - DCHECK_EQ(IrOpcode::kHeapConstant, op->opcode()); + DCHECK(IrOpcode::kHeapConstant == op->opcode() || + IrOpcode::kCompressedHeapConstant == op->opcode()); return OpParameter>(op); } diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h index 43a689b5c27004..9f634e72ec27a9 100644 --- a/deps/v8/src/compiler/common-operator.h +++ b/deps/v8/src/compiler/common-operator.h @@ -499,6 +499,7 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final const Operator* NumberConstant(volatile double); const Operator* PointerConstant(intptr_t); const Operator* HeapConstant(const Handle&); + const Operator* CompressedHeapConstant(const Handle&); const Operator* ObjectId(uint32_t); const Operator* RelocatableInt32Constant(int32_t value, diff --git a/deps/v8/src/compiler/compilation-dependencies.cc b/deps/v8/src/compiler/compilation-dependencies.cc index f0bb797b68fe18..673f4a341be8ce 100644 --- a/deps/v8/src/compiler/compilation-dependencies.cc +++ b/deps/v8/src/compiler/compilation-dependencies.cc @@ -4,6 +4,7 @@ #include "src/compiler/compilation-dependencies.h" +#include "src/compiler/compilation-dependency.h" #include "src/handles/handles-inl.h" #include "src/objects/allocation-site-inl.h" #include "src/objects/objects-inl.h" @@ -17,18 +18,7 @@ CompilationDependencies::CompilationDependencies(JSHeapBroker* broker, Zone* zone) : zone_(zone), broker_(broker), dependencies_(zone) {} -class CompilationDependencies::Dependency : public ZoneObject { - public: - virtual bool IsValid() const = 0; - virtual void PrepareInstall() const {} - virtual void Install(const MaybeObjectHandle& code) const = 0; - -#ifdef DEBUG - virtual bool IsPretenureModeDependency() const { return false; } -#endif -}; - -class InitialMapDependency final : public CompilationDependencies::Dependency { +class InitialMapDependency final : public CompilationDependency { public: // TODO(neis): Once the concurrent compiler frontend is always-on, we no // longer need to explicitly store the initial map. @@ -56,8 +46,7 @@ class InitialMapDependency final : public CompilationDependencies::Dependency { MapRef initial_map_; }; -class PrototypePropertyDependency final - : public CompilationDependencies::Dependency { +class PrototypePropertyDependency final : public CompilationDependency { public: // TODO(neis): Once the concurrent compiler frontend is always-on, we no // longer need to explicitly store the prototype. @@ -96,7 +85,7 @@ class PrototypePropertyDependency final ObjectRef prototype_; }; -class StableMapDependency final : public CompilationDependencies::Dependency { +class StableMapDependency final : public CompilationDependency { public: explicit StableMapDependency(const MapRef& map) : map_(map) { DCHECK(map_.is_stable()); @@ -114,7 +103,7 @@ class StableMapDependency final : public CompilationDependencies::Dependency { MapRef map_; }; -class TransitionDependency final : public CompilationDependencies::Dependency { +class TransitionDependency final : public CompilationDependency { public: explicit TransitionDependency(const MapRef& map) : map_(map) { DCHECK(!map_.is_deprecated()); @@ -132,8 +121,7 @@ class TransitionDependency final : public CompilationDependencies::Dependency { MapRef map_; }; -class PretenureModeDependency final - : public CompilationDependencies::Dependency { +class PretenureModeDependency final : public CompilationDependency { public: // TODO(neis): Once the concurrent compiler frontend is always-on, we no // longer need to explicitly store the mode. @@ -163,8 +151,7 @@ class PretenureModeDependency final AllocationType allocation_; }; -class FieldRepresentationDependency final - : public CompilationDependencies::Dependency { +class FieldRepresentationDependency final : public CompilationDependency { public: // TODO(neis): Once the concurrent compiler frontend is always-on, we no // longer need to explicitly store the representation. @@ -197,7 +184,7 @@ class FieldRepresentationDependency final Representation representation_; }; -class FieldTypeDependency final : public CompilationDependencies::Dependency { +class FieldTypeDependency final : public CompilationDependency { public: // TODO(neis): Once the concurrent compiler frontend is always-on, we no // longer need to explicitly store the type. @@ -227,8 +214,7 @@ class FieldTypeDependency final : public CompilationDependencies::Dependency { ObjectRef type_; }; -class FieldConstnessDependency final - : public CompilationDependencies::Dependency { +class FieldConstnessDependency final : public CompilationDependency { public: FieldConstnessDependency(const MapRef& owner, int descriptor) : owner_(owner), descriptor_(descriptor) { @@ -255,8 +241,7 @@ class FieldConstnessDependency final int descriptor_; }; -class GlobalPropertyDependency final - : public CompilationDependencies::Dependency { +class GlobalPropertyDependency final : public CompilationDependency { public: // TODO(neis): Once the concurrent compiler frontend is always-on, we no // longer need to explicitly store the type and the read_only flag. @@ -294,7 +279,7 @@ class GlobalPropertyDependency final bool read_only_; }; -class ProtectorDependency final : public CompilationDependencies::Dependency { +class ProtectorDependency final : public CompilationDependency { public: explicit ProtectorDependency(const PropertyCellRef& cell) : cell_(cell) { DCHECK_EQ(cell_.value().AsSmi(), Isolate::kProtectorValid); @@ -315,8 +300,7 @@ class ProtectorDependency final : public CompilationDependencies::Dependency { PropertyCellRef cell_; }; -class ElementsKindDependency final - : public CompilationDependencies::Dependency { +class ElementsKindDependency final : public CompilationDependency { public: // TODO(neis): Once the concurrent compiler frontend is always-on, we no // longer need to explicitly store the elements kind. @@ -349,7 +333,7 @@ class ElementsKindDependency final }; class InitialMapInstanceSizePredictionDependency final - : public CompilationDependencies::Dependency { + : public CompilationDependency { public: InitialMapInstanceSizePredictionDependency(const JSFunctionRef& function, int instance_size) @@ -380,7 +364,8 @@ class InitialMapInstanceSizePredictionDependency final int instance_size_; }; -void CompilationDependencies::RecordDependency(Dependency const* dependency) { +void CompilationDependencies::RecordDependency( + CompilationDependency const* dependency) { if (dependency != nullptr) dependencies_.push_front(dependency); } @@ -565,6 +550,11 @@ namespace { // This function expects to never see a JSProxy. void DependOnStablePrototypeChain(CompilationDependencies* deps, MapRef map, base::Optional last_prototype) { + // TODO(neis): Remove heap access (SerializePrototype call). + AllowCodeDependencyChange dependency_change_; + AllowHandleAllocation handle_allocation_; + AllowHandleDereference handle_dereference_; + AllowHeapAllocation heap_allocation_; while (true) { map.SerializePrototype(); HeapObjectRef proto = map.prototype(); @@ -635,7 +625,7 @@ CompilationDependencies::DependOnInitialMapInstanceSizePrediction( return SlackTrackingPrediction(initial_map, instance_size); } -CompilationDependencies::Dependency const* +CompilationDependency const* CompilationDependencies::TransitionDependencyOffTheRecord( const MapRef& target_map) const { if (target_map.CanBeDeprecated()) { @@ -646,7 +636,7 @@ CompilationDependencies::TransitionDependencyOffTheRecord( } } -CompilationDependencies::Dependency const* +CompilationDependency const* CompilationDependencies::FieldRepresentationDependencyOffTheRecord( const MapRef& map, int descriptor) const { MapRef owner = map.FindFieldOwner(descriptor); @@ -657,7 +647,7 @@ CompilationDependencies::FieldRepresentationDependencyOffTheRecord( details.representation()); } -CompilationDependencies::Dependency const* +CompilationDependency const* CompilationDependencies::FieldTypeDependencyOffTheRecord(const MapRef& map, int descriptor) const { MapRef owner = map.FindFieldOwner(descriptor); diff --git a/deps/v8/src/compiler/compilation-dependencies.h b/deps/v8/src/compiler/compilation-dependencies.h index 37a2bc3a28c66b..cb6cea0685f29e 100644 --- a/deps/v8/src/compiler/compilation-dependencies.h +++ b/deps/v8/src/compiler/compilation-dependencies.h @@ -25,6 +25,8 @@ class SlackTrackingPrediction { int inobject_property_count_; }; +class CompilationDependency; + // Collects and installs dependencies of the code that is being generated. class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject { public: @@ -113,14 +115,13 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject { // DependOnTransition(map); // is equivalent to: // RecordDependency(TransitionDependencyOffTheRecord(map)); - class Dependency; - void RecordDependency(Dependency const* dependency); - Dependency const* TransitionDependencyOffTheRecord( + void RecordDependency(CompilationDependency const* dependency); + CompilationDependency const* TransitionDependencyOffTheRecord( const MapRef& target_map) const; - Dependency const* FieldRepresentationDependencyOffTheRecord( + CompilationDependency const* FieldRepresentationDependencyOffTheRecord( + const MapRef& map, int descriptor) const; + CompilationDependency const* FieldTypeDependencyOffTheRecord( const MapRef& map, int descriptor) const; - Dependency const* FieldTypeDependencyOffTheRecord(const MapRef& map, - int descriptor) const; // Exposed only for testing purposes. bool AreValid() const; @@ -128,7 +129,7 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject { private: Zone* const zone_; JSHeapBroker* const broker_; - ZoneForwardList dependencies_; + ZoneForwardList dependencies_; }; } // namespace compiler diff --git a/deps/v8/src/compiler/compilation-dependency.h b/deps/v8/src/compiler/compilation-dependency.h new file mode 100644 index 00000000000000..e5726a0ddb8dc4 --- /dev/null +++ b/deps/v8/src/compiler/compilation-dependency.h @@ -0,0 +1,32 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_COMPILATION_DEPENDENCY_H_ +#define V8_COMPILER_COMPILATION_DEPENDENCY_H_ + +#include "src/zone/zone.h" + +namespace v8 { +namespace internal { + +class MaybeObjectHandle; + +namespace compiler { + +class CompilationDependency : public ZoneObject { + public: + virtual bool IsValid() const = 0; + virtual void PrepareInstall() const {} + virtual void Install(const MaybeObjectHandle& code) const = 0; + +#ifdef DEBUG + virtual bool IsPretenureModeDependency() const { return false; } +#endif +}; + +} // namespace compiler +} // namespace internal +} // namespace v8 + +#endif // V8_COMPILER_COMPILATION_DEPENDENCY_H_ diff --git a/deps/v8/src/compiler/control-flow-optimizer.cc b/deps/v8/src/compiler/control-flow-optimizer.cc index 7177a6069da26c..600db1d160847a 100644 --- a/deps/v8/src/compiler/control-flow-optimizer.cc +++ b/deps/v8/src/compiler/control-flow-optimizer.cc @@ -4,6 +4,7 @@ #include "src/compiler/control-flow-optimizer.h" +#include "src/codegen/tick-counter.h" #include "src/compiler/common-operator.h" #include "src/compiler/graph.h" #include "src/compiler/node-matchers.h" @@ -16,18 +17,20 @@ namespace compiler { ControlFlowOptimizer::ControlFlowOptimizer(Graph* graph, CommonOperatorBuilder* common, MachineOperatorBuilder* machine, + TickCounter* tick_counter, Zone* zone) : graph_(graph), common_(common), machine_(machine), queue_(zone), queued_(graph, 2), - zone_(zone) {} - + zone_(zone), + tick_counter_(tick_counter) {} void ControlFlowOptimizer::Optimize() { Enqueue(graph()->start()); while (!queue_.empty()) { + tick_counter_->DoTick(); Node* node = queue_.front(); queue_.pop(); if (node->IsDead()) continue; diff --git a/deps/v8/src/compiler/control-flow-optimizer.h b/deps/v8/src/compiler/control-flow-optimizer.h index 0a688a7c3926d5..07fc9e6fc2c3cd 100644 --- a/deps/v8/src/compiler/control-flow-optimizer.h +++ b/deps/v8/src/compiler/control-flow-optimizer.h @@ -11,6 +11,9 @@ namespace v8 { namespace internal { + +class TickCounter; + namespace compiler { // Forward declarations. @@ -22,7 +25,8 @@ class Node; class V8_EXPORT_PRIVATE ControlFlowOptimizer final { public: ControlFlowOptimizer(Graph* graph, CommonOperatorBuilder* common, - MachineOperatorBuilder* machine, Zone* zone); + MachineOperatorBuilder* machine, + TickCounter* tick_counter, Zone* zone); void Optimize(); @@ -45,6 +49,7 @@ class V8_EXPORT_PRIVATE ControlFlowOptimizer final { ZoneQueue queue_; NodeMarker queued_; Zone* const zone_; + TickCounter* const tick_counter_; DISALLOW_COPY_AND_ASSIGN(ControlFlowOptimizer); }; diff --git a/deps/v8/src/compiler/csa-load-elimination.cc b/deps/v8/src/compiler/csa-load-elimination.cc new file mode 100644 index 00000000000000..620d98019fd18d --- /dev/null +++ b/deps/v8/src/compiler/csa-load-elimination.cc @@ -0,0 +1,336 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/compiler/csa-load-elimination.h" + +#include "src/compiler/common-operator.h" +#include "src/compiler/node-matchers.h" +#include "src/compiler/node-properties.h" +#include "src/compiler/simplified-operator.h" + +namespace v8 { +namespace internal { +namespace compiler { + +Reduction CsaLoadElimination::Reduce(Node* node) { + if (FLAG_trace_turbo_load_elimination) { + if (node->op()->EffectInputCount() > 0) { + PrintF(" visit #%d:%s", node->id(), node->op()->mnemonic()); + if (node->op()->ValueInputCount() > 0) { + PrintF("("); + for (int i = 0; i < node->op()->ValueInputCount(); ++i) { + if (i > 0) PrintF(", "); + Node* const value = NodeProperties::GetValueInput(node, i); + PrintF("#%d:%s", value->id(), value->op()->mnemonic()); + } + PrintF(")"); + } + PrintF("\n"); + for (int i = 0; i < node->op()->EffectInputCount(); ++i) { + Node* const effect = NodeProperties::GetEffectInput(node, i); + if (AbstractState const* const state = node_states_.Get(effect)) { + PrintF(" state[%i]: #%d:%s\n", i, effect->id(), + effect->op()->mnemonic()); + state->Print(); + } else { + PrintF(" no state[%i]: #%d:%s\n", i, effect->id(), + effect->op()->mnemonic()); + } + } + } + } + switch (node->opcode()) { + case IrOpcode::kLoadFromObject: + return ReduceLoadFromObject(node, ObjectAccessOf(node->op())); + case IrOpcode::kStoreToObject: + return ReduceStoreToObject(node, ObjectAccessOf(node->op())); + case IrOpcode::kDebugBreak: + case IrOpcode::kAbortCSAAssert: + // Avoid changing optimizations in the presence of debug instructions. + return PropagateInputState(node); + case IrOpcode::kCall: + return ReduceCall(node); + case IrOpcode::kEffectPhi: + return ReduceEffectPhi(node); + case IrOpcode::kDead: + break; + case IrOpcode::kStart: + return ReduceStart(node); + default: + return ReduceOtherNode(node); + } + return NoChange(); +} + +namespace CsaLoadEliminationHelpers { + +bool IsCompatible(MachineRepresentation r1, MachineRepresentation r2) { + if (r1 == r2) return true; + return IsAnyCompressedTagged(r1) && IsAnyCompressedTagged(r2); +} + +bool ObjectMayAlias(Node* a, Node* b) { + if (a != b) { + if (b->opcode() == IrOpcode::kAllocate) { + std::swap(a, b); + } + if (a->opcode() == IrOpcode::kAllocate) { + switch (b->opcode()) { + case IrOpcode::kAllocate: + case IrOpcode::kHeapConstant: + case IrOpcode::kParameter: + return false; + default: + break; + } + } + } + return true; +} + +bool OffsetMayAlias(Node* offset1, MachineRepresentation repr1, Node* offset2, + MachineRepresentation repr2) { + IntPtrMatcher matcher1(offset1); + IntPtrMatcher matcher2(offset2); + // If either of the offsets is variable, accesses may alias + if (!matcher1.HasValue() || !matcher2.HasValue()) { + return true; + } + // Otherwise, we return whether accesses overlap + intptr_t start1 = matcher1.Value(); + intptr_t end1 = start1 + ElementSizeInBytes(repr1); + intptr_t start2 = matcher2.Value(); + intptr_t end2 = start2 + ElementSizeInBytes(repr2); + return !(end1 <= start2 || end2 <= start1); +} + +} // namespace CsaLoadEliminationHelpers + +namespace Helpers = CsaLoadEliminationHelpers; + +void CsaLoadElimination::AbstractState::Merge(AbstractState const* that, + Zone* zone) { + FieldInfo empty_info; + for (std::pair entry : field_infos_) { + if (that->field_infos_.Get(entry.first) != entry.second) { + field_infos_.Set(entry.first, empty_info); + } + } +} + +CsaLoadElimination::AbstractState const* +CsaLoadElimination::AbstractState::KillField(Node* kill_object, + Node* kill_offset, + MachineRepresentation kill_repr, + Zone* zone) const { + FieldInfo empty_info; + AbstractState* that = new (zone) AbstractState(*this); + for (std::pair entry : that->field_infos_) { + Field field = entry.first; + MachineRepresentation field_repr = entry.second.representation; + if (Helpers::OffsetMayAlias(kill_offset, kill_repr, field.second, + field_repr) && + Helpers::ObjectMayAlias(kill_object, field.first)) { + that->field_infos_.Set(field, empty_info); + } + } + return that; +} + +CsaLoadElimination::AbstractState const* +CsaLoadElimination::AbstractState::AddField(Node* object, Node* offset, + CsaLoadElimination::FieldInfo info, + Zone* zone) const { + AbstractState* that = new (zone) AbstractState(*this); + that->field_infos_.Set({object, offset}, info); + return that; +} + +CsaLoadElimination::FieldInfo CsaLoadElimination::AbstractState::Lookup( + Node* object, Node* offset) const { + if (object->IsDead()) { + return {}; + } + return field_infos_.Get({object, offset}); +} + +void CsaLoadElimination::AbstractState::Print() const { + for (std::pair entry : field_infos_) { + Field field = entry.first; + Node* object = field.first; + Node* offset = field.second; + FieldInfo info = entry.second; + PrintF(" #%d+#%d:%s -> #%d:%s [repr=%s]\n", object->id(), offset->id(), + object->op()->mnemonic(), info.value->id(), + info.value->op()->mnemonic(), + MachineReprToString(info.representation)); + } +} + +Reduction CsaLoadElimination::ReduceLoadFromObject(Node* node, + ObjectAccess const& access) { + Node* object = NodeProperties::GetValueInput(node, 0); + Node* offset = NodeProperties::GetValueInput(node, 1); + Node* effect = NodeProperties::GetEffectInput(node); + AbstractState const* state = node_states_.Get(effect); + if (state == nullptr) return NoChange(); + + MachineRepresentation representation = access.machine_type.representation(); + FieldInfo lookup_result = state->Lookup(object, offset); + if (!lookup_result.IsEmpty()) { + // Make sure we don't reuse values that were recorded with a different + // representation or resurrect dead {replacement} nodes. + Node* replacement = lookup_result.value; + if (Helpers::IsCompatible(representation, lookup_result.representation) && + !replacement->IsDead()) { + ReplaceWithValue(node, replacement, effect); + return Replace(replacement); + } + } + FieldInfo info(node, representation); + state = state->AddField(object, offset, info, zone()); + + return UpdateState(node, state); +} + +Reduction CsaLoadElimination::ReduceStoreToObject(Node* node, + ObjectAccess const& access) { + Node* object = NodeProperties::GetValueInput(node, 0); + Node* offset = NodeProperties::GetValueInput(node, 1); + Node* value = NodeProperties::GetValueInput(node, 2); + Node* effect = NodeProperties::GetEffectInput(node); + AbstractState const* state = node_states_.Get(effect); + if (state == nullptr) return NoChange(); + + FieldInfo info(value, access.machine_type.representation()); + state = state->KillField(object, offset, info.representation, zone()); + state = state->AddField(object, offset, info, zone()); + + return UpdateState(node, state); +} + +Reduction CsaLoadElimination::ReduceEffectPhi(Node* node) { + Node* const effect0 = NodeProperties::GetEffectInput(node, 0); + Node* const control = NodeProperties::GetControlInput(node); + AbstractState const* state0 = node_states_.Get(effect0); + if (state0 == nullptr) return NoChange(); + if (control->opcode() == IrOpcode::kLoop) { + // Here we rely on having only reducible loops: + // The loop entry edge always dominates the header, so we can just take + // the state from the first input, and compute the loop state based on it. + AbstractState const* state = ComputeLoopState(node, state0); + return UpdateState(node, state); + } + DCHECK_EQ(IrOpcode::kMerge, control->opcode()); + + // Shortcut for the case when we do not know anything about some input. + int const input_count = node->op()->EffectInputCount(); + for (int i = 1; i < input_count; ++i) { + Node* const effect = NodeProperties::GetEffectInput(node, i); + if (node_states_.Get(effect) == nullptr) return NoChange(); + } + + // Make a copy of the first input's state and merge with the state + // from other inputs. + AbstractState* state = new (zone()) AbstractState(*state0); + for (int i = 1; i < input_count; ++i) { + Node* const input = NodeProperties::GetEffectInput(node, i); + state->Merge(node_states_.Get(input), zone()); + } + return UpdateState(node, state); +} + +Reduction CsaLoadElimination::ReduceStart(Node* node) { + return UpdateState(node, empty_state()); +} + +Reduction CsaLoadElimination::ReduceCall(Node* node) { + Node* value = NodeProperties::GetValueInput(node, 0); + ExternalReferenceMatcher m(value); + if (m.Is(ExternalReference::check_object_type())) { + return PropagateInputState(node); + } + return ReduceOtherNode(node); +} + +Reduction CsaLoadElimination::ReduceOtherNode(Node* node) { + if (node->op()->EffectInputCount() == 1) { + if (node->op()->EffectOutputCount() == 1) { + Node* const effect = NodeProperties::GetEffectInput(node); + AbstractState const* state = node_states_.Get(effect); + // If we do not know anything about the predecessor, do not propagate + // just yet because we will have to recompute anyway once we compute + // the predecessor. + if (state == nullptr) return NoChange(); + // Check if this {node} has some uncontrolled side effects. + if (!node->op()->HasProperty(Operator::kNoWrite)) { + state = empty_state(); + } + return UpdateState(node, state); + } else { + return NoChange(); + } + } + DCHECK_EQ(0, node->op()->EffectInputCount()); + DCHECK_EQ(0, node->op()->EffectOutputCount()); + return NoChange(); +} + +Reduction CsaLoadElimination::UpdateState(Node* node, + AbstractState const* state) { + AbstractState const* original = node_states_.Get(node); + // Only signal that the {node} has Changed, if the information about {state} + // has changed wrt. the {original}. + if (state != original) { + if (original == nullptr || !state->Equals(original)) { + node_states_.Set(node, state); + return Changed(node); + } + } + return NoChange(); +} + +Reduction CsaLoadElimination::PropagateInputState(Node* node) { + Node* const effect = NodeProperties::GetEffectInput(node); + AbstractState const* state = node_states_.Get(effect); + if (state == nullptr) return NoChange(); + return UpdateState(node, state); +} + +CsaLoadElimination::AbstractState const* CsaLoadElimination::ComputeLoopState( + Node* node, AbstractState const* state) const { + DCHECK_EQ(node->opcode(), IrOpcode::kEffectPhi); + Node* const control = NodeProperties::GetControlInput(node); + ZoneQueue queue(zone()); + ZoneSet visited(zone()); + visited.insert(node); + for (int i = 1; i < control->InputCount(); ++i) { + queue.push(node->InputAt(i)); + } + while (!queue.empty()) { + Node* const current = queue.front(); + queue.pop(); + if (visited.insert(current).second) { + if (!current->op()->HasProperty(Operator::kNoWrite)) { + return empty_state(); + } + for (int i = 0; i < current->op()->EffectInputCount(); ++i) { + queue.push(NodeProperties::GetEffectInput(current, i)); + } + } + } + return state; +} + +CommonOperatorBuilder* CsaLoadElimination::common() const { + return jsgraph()->common(); +} + +Graph* CsaLoadElimination::graph() const { return jsgraph()->graph(); } + +Isolate* CsaLoadElimination::isolate() const { return jsgraph()->isolate(); } + +} // namespace compiler +} // namespace internal +} // namespace v8 diff --git a/deps/v8/src/compiler/csa-load-elimination.h b/deps/v8/src/compiler/csa-load-elimination.h new file mode 100644 index 00000000000000..9460858d046a7b --- /dev/null +++ b/deps/v8/src/compiler/csa-load-elimination.h @@ -0,0 +1,118 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_CSA_LOAD_ELIMINATION_H_ +#define V8_COMPILER_CSA_LOAD_ELIMINATION_H_ + +#include "src/base/compiler-specific.h" +#include "src/codegen/machine-type.h" +#include "src/common/globals.h" +#include "src/compiler/graph-reducer.h" +#include "src/compiler/js-graph.h" +#include "src/compiler/node-aux-data.h" +#include "src/compiler/persistent-map.h" +#include "src/handles/maybe-handles.h" +#include "src/zone/zone-handle-set.h" + +namespace v8 { +namespace internal { + +namespace compiler { + +// Forward declarations. +class CommonOperatorBuilder; +struct ObjectAccess; +class Graph; +class JSGraph; + +class V8_EXPORT_PRIVATE CsaLoadElimination final + : public NON_EXPORTED_BASE(AdvancedReducer) { + public: + CsaLoadElimination(Editor* editor, JSGraph* jsgraph, Zone* zone) + : AdvancedReducer(editor), + empty_state_(zone), + node_states_(jsgraph->graph()->NodeCount(), zone), + jsgraph_(jsgraph), + zone_(zone) {} + ~CsaLoadElimination() final = default; + + const char* reducer_name() const override { return "CsaLoadElimination"; } + + Reduction Reduce(Node* node) final; + + private: + struct FieldInfo { + FieldInfo() = default; + FieldInfo(Node* value, MachineRepresentation representation) + : value(value), representation(representation) {} + + bool operator==(const FieldInfo& other) const { + return value == other.value && representation == other.representation; + } + + bool operator!=(const FieldInfo& other) const { return !(*this == other); } + + bool IsEmpty() const { return value == nullptr; } + + Node* value = nullptr; + MachineRepresentation representation = MachineRepresentation::kNone; + }; + + class AbstractState final : public ZoneObject { + public: + explicit AbstractState(Zone* zone) : field_infos_(zone) {} + + bool Equals(AbstractState const* that) const { + return field_infos_ == that->field_infos_; + } + void Merge(AbstractState const* that, Zone* zone); + + AbstractState const* KillField(Node* object, Node* offset, + MachineRepresentation repr, + Zone* zone) const; + AbstractState const* AddField(Node* object, Node* offset, FieldInfo info, + Zone* zone) const; + FieldInfo Lookup(Node* object, Node* offset) const; + + void Print() const; + + private: + using Field = std::pair; + using FieldInfos = PersistentMap; + FieldInfos field_infos_; + }; + + Reduction ReduceLoadFromObject(Node* node, ObjectAccess const& access); + Reduction ReduceStoreToObject(Node* node, ObjectAccess const& access); + Reduction ReduceEffectPhi(Node* node); + Reduction ReduceStart(Node* node); + Reduction ReduceCall(Node* node); + Reduction ReduceOtherNode(Node* node); + + Reduction UpdateState(Node* node, AbstractState const* state); + Reduction PropagateInputState(Node* node); + + AbstractState const* ComputeLoopState(Node* node, + AbstractState const* state) const; + + CommonOperatorBuilder* common() const; + Isolate* isolate() const; + Graph* graph() const; + JSGraph* jsgraph() const { return jsgraph_; } + Zone* zone() const { return zone_; } + AbstractState const* empty_state() const { return &empty_state_; } + + AbstractState const empty_state_; + NodeAuxData node_states_; + JSGraph* const jsgraph_; + Zone* zone_; + + DISALLOW_COPY_AND_ASSIGN(CsaLoadElimination); +}; + +} // namespace compiler +} // namespace internal +} // namespace v8 + +#endif // V8_COMPILER_CSA_LOAD_ELIMINATION_H_ diff --git a/deps/v8/src/compiler/decompression-elimination.cc b/deps/v8/src/compiler/decompression-elimination.cc index e69e61fac5e61b..537744652b9686 100644 --- a/deps/v8/src/compiler/decompression-elimination.cc +++ b/deps/v8/src/compiler/decompression-elimination.cc @@ -21,10 +21,8 @@ bool DecompressionElimination::IsReducibleConstantOpcode( IrOpcode::Value opcode) { switch (opcode) { case IrOpcode::kInt64Constant: - return true; - // TODO(v8:8977): Disabling HeapConstant until CompressedHeapConstant - // exists, since it breaks with verify CSA on. case IrOpcode::kHeapConstant: + return true; default: return false; } @@ -55,13 +53,8 @@ Node* DecompressionElimination::GetCompressedConstant(Node* constant) { static_cast(OpParameter(constant->op())))); break; case IrOpcode::kHeapConstant: - // TODO(v8:8977): The HeapConstant remains as 64 bits. This does not - // affect the comparison and it will still work correctly. However, we are - // introducing a 64 bit value in the stream where a 32 bit one will - // suffice. Currently there is no "CompressedHeapConstant", and - // introducing a new opcode and handling it correctly throught the - // pipeline seems that it will involve quite a bit of work. - return constant; + return graph()->NewNode( + common()->CompressedHeapConstant(HeapConstantOf(constant->op()))); default: UNREACHABLE(); } @@ -84,6 +77,21 @@ Reduction DecompressionElimination::ReduceCompress(Node* node) { } } +Reduction DecompressionElimination::ReduceDecompress(Node* node) { + DCHECK(IrOpcode::IsDecompressOpcode(node->opcode())); + + DCHECK_EQ(node->InputCount(), 1); + Node* input_node = node->InputAt(0); + IrOpcode::Value input_opcode = input_node->opcode(); + if (IrOpcode::IsCompressOpcode(input_opcode)) { + DCHECK(IsValidDecompress(input_opcode, node->opcode())); + DCHECK_EQ(input_node->InputCount(), 1); + return Replace(input_node->InputAt(0)); + } else { + return NoChange(); + } +} + Reduction DecompressionElimination::ReducePhi(Node* node) { DCHECK_EQ(node->opcode(), IrOpcode::kPhi); @@ -138,7 +146,10 @@ Reduction DecompressionElimination::ReducePhi(Node* node) { // Add a decompress after the Phi. To do this, we need to replace the Phi with // "Phi <- Decompress". - return Replace(graph()->NewNode(op, node)); + Node* decompress = graph()->NewNode(op, node); + ReplaceWithValue(node, decompress); + decompress->ReplaceInput(0, node); + return Changed(node); } Reduction DecompressionElimination::ReduceTypedStateValues(Node* node) { @@ -201,6 +212,10 @@ Reduction DecompressionElimination::Reduce(Node* node) { case IrOpcode::kChangeTaggedSignedToCompressedSigned: case IrOpcode::kChangeTaggedPointerToCompressedPointer: return ReduceCompress(node); + case IrOpcode::kChangeCompressedToTagged: + case IrOpcode::kChangeCompressedSignedToTaggedSigned: + case IrOpcode::kChangeCompressedPointerToTaggedPointer: + return ReduceDecompress(node); case IrOpcode::kPhi: return ReducePhi(node); case IrOpcode::kTypedStateValues: diff --git a/deps/v8/src/compiler/decompression-elimination.h b/deps/v8/src/compiler/decompression-elimination.h index c850f064a96639..85a6c98aa0bbb5 100644 --- a/deps/v8/src/compiler/decompression-elimination.h +++ b/deps/v8/src/compiler/decompression-elimination.h @@ -38,7 +38,7 @@ class V8_EXPORT_PRIVATE DecompressionElimination final // elimination. bool IsReducibleConstantOpcode(IrOpcode::Value opcode); - // Get the new 32 bit node constant given the 64 bit one + // Get the new 32 bit node constant given the 64 bit one. Node* GetCompressedConstant(Node* constant); // Removes direct Decompressions & Compressions, going from @@ -48,6 +48,9 @@ class V8_EXPORT_PRIVATE DecompressionElimination final // Can be used for Any, Signed, and Pointer compressions. Reduction ReduceCompress(Node* node); + // Removes direct Compressions & Decompressions, analogously to ReduceCompress + Reduction ReduceDecompress(Node* node); + // Replaces Phi's input decompressions with their input node, if and only if // all of the Phi's inputs are Decompress nodes. Reduction ReducePhi(Node* node); diff --git a/deps/v8/src/compiler/diamond.h b/deps/v8/src/compiler/diamond.h index cc6ca954f3d92e..cac1b1726b4ad9 100644 --- a/deps/v8/src/compiler/diamond.h +++ b/deps/v8/src/compiler/diamond.h @@ -33,13 +33,13 @@ struct Diamond { } // Place {this} after {that} in control flow order. - void Chain(Diamond& that) { branch->ReplaceInput(1, that.merge); } + void Chain(Diamond const& that) { branch->ReplaceInput(1, that.merge); } // Place {this} after {that} in control flow order. void Chain(Node* that) { branch->ReplaceInput(1, that); } // Nest {this} into either the if_true or if_false branch of {that}. - void Nest(Diamond& that, bool if_true) { + void Nest(Diamond const& that, bool if_true) { if (if_true) { branch->ReplaceInput(1, that.if_true); that.merge->ReplaceInput(0, merge); diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc index ced078a17899a6..788638fe68b8f4 100644 --- a/deps/v8/src/compiler/effect-control-linearizer.cc +++ b/deps/v8/src/compiler/effect-control-linearizer.cc @@ -17,6 +17,7 @@ #include "src/compiler/node-properties.h" #include "src/compiler/node.h" #include "src/compiler/schedule.h" +#include "src/execution/frames.h" #include "src/heap/factory-inl.h" #include "src/objects/heap-number.h" #include "src/objects/oddball.h" @@ -51,6 +52,7 @@ class EffectControlLinearizer { bool TryWireInStateEffect(Node* node, Node* frame_state, Node** effect, Node** control); Node* LowerChangeBitToTagged(Node* node); + Node* LowerChangeInt31ToCompressedSigned(Node* node); Node* LowerChangeInt31ToTaggedSigned(Node* node); Node* LowerChangeInt32ToTagged(Node* node); Node* LowerChangeInt64ToTagged(Node* node); @@ -58,6 +60,7 @@ class EffectControlLinearizer { Node* LowerChangeUint64ToTagged(Node* node); Node* LowerChangeFloat64ToTagged(Node* node); Node* LowerChangeFloat64ToTaggedPointer(Node* node); + Node* LowerChangeCompressedSignedToInt32(Node* node); Node* LowerChangeTaggedSignedToInt32(Node* node); Node* LowerChangeTaggedSignedToInt64(Node* node); Node* LowerChangeTaggedToBit(Node* node); @@ -75,6 +78,7 @@ class EffectControlLinearizer { Node* LowerCheckReceiver(Node* node, Node* frame_state); Node* LowerCheckReceiverOrNullOrUndefined(Node* node, Node* frame_state); Node* LowerCheckString(Node* node, Node* frame_state); + Node* LowerCheckBigInt(Node* node, Node* frame_state); Node* LowerCheckSymbol(Node* node, Node* frame_state); void LowerCheckIf(Node* node, Node* frame_state); Node* LowerCheckedInt32Add(Node* node, Node* frame_state); @@ -84,6 +88,7 @@ class EffectControlLinearizer { Node* LowerCheckedUint32Div(Node* node, Node* frame_state); Node* LowerCheckedUint32Mod(Node* node, Node* frame_state); Node* LowerCheckedInt32Mul(Node* node, Node* frame_state); + Node* LowerCheckedInt32ToCompressedSigned(Node* node, Node* frame_state); Node* LowerCheckedInt32ToTaggedSigned(Node* node, Node* frame_state); Node* LowerCheckedInt64ToInt32(Node* node, Node* frame_state); Node* LowerCheckedInt64ToTaggedSigned(Node* node, Node* frame_state); @@ -101,6 +106,9 @@ class EffectControlLinearizer { Node* LowerCheckedTaggedToFloat64(Node* node, Node* frame_state); Node* LowerCheckedTaggedToTaggedSigned(Node* node, Node* frame_state); Node* LowerCheckedTaggedToTaggedPointer(Node* node, Node* frame_state); + Node* LowerBigIntAsUintN(Node* node, Node* frame_state); + Node* LowerChangeUint64ToBigInt(Node* node); + Node* LowerTruncateBigIntToUint64(Node* node); Node* LowerCheckedCompressedToTaggedSigned(Node* node, Node* frame_state); Node* LowerCheckedCompressedToTaggedPointer(Node* node, Node* frame_state); Node* LowerCheckedTaggedToCompressedSigned(Node* node, Node* frame_state); @@ -150,17 +158,20 @@ class EffectControlLinearizer { Node* LowerStringConcat(Node* node); Node* LowerStringToNumber(Node* node); Node* LowerStringCharCodeAt(Node* node); - Node* LowerStringCodePointAt(Node* node, UnicodeEncoding encoding); + Node* LowerStringCodePointAt(Node* node); Node* LowerStringToLowerCaseIntl(Node* node); Node* LowerStringToUpperCaseIntl(Node* node); Node* LowerStringFromSingleCharCode(Node* node); Node* LowerStringFromSingleCodePoint(Node* node); Node* LowerStringIndexOf(Node* node); Node* LowerStringSubstring(Node* node); + Node* LowerStringFromCodePointAt(Node* node); Node* LowerStringLength(Node* node); Node* LowerStringEqual(Node* node); Node* LowerStringLessThan(Node* node); Node* LowerStringLessThanOrEqual(Node* node); + Node* LowerBigIntAdd(Node* node, Node* frame_state); + Node* LowerBigIntNegate(Node* node); Node* LowerCheckFloat64Hole(Node* node, Node* frame_state); Node* LowerCheckNotTaggedHole(Node* node, Node* frame_state); Node* LowerConvertTaggedHoleToUndefined(Node* node); @@ -186,6 +197,7 @@ class EffectControlLinearizer { void LowerTransitionAndStoreNumberElement(Node* node); void LowerTransitionAndStoreNonNumberElement(Node* node); void LowerRuntimeAbort(Node* node); + Node* LowerAssertType(Node* node); Node* LowerConvertReceiver(Node* node); Node* LowerDateNow(Node* node); @@ -214,6 +226,7 @@ class EffectControlLinearizer { Node* LowerStringComparison(Callable const& callable, Node* node); Node* IsElementsKindGreaterThan(Node* kind, ElementsKind reference_kind); + Node* ChangeInt32ToCompressedSmi(Node* value); Node* ChangeInt32ToSmi(Node* value); Node* ChangeInt32ToIntPtr(Node* value); Node* ChangeInt64ToSmi(Node* value); @@ -222,6 +235,7 @@ class EffectControlLinearizer { Node* ChangeUint32ToUintPtr(Node* value); Node* ChangeUint32ToSmi(Node* value); Node* ChangeSmiToIntPtr(Node* value); + Node* ChangeCompressedSmiToInt32(Node* value); Node* ChangeSmiToInt32(Node* value); Node* ChangeSmiToInt64(Node* value); Node* ObjectIsSmi(Node* value); @@ -827,6 +841,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node, case IrOpcode::kChangeBitToTagged: result = LowerChangeBitToTagged(node); break; + case IrOpcode::kChangeInt31ToCompressedSigned: + result = LowerChangeInt31ToCompressedSigned(node); + break; case IrOpcode::kChangeInt31ToTaggedSigned: result = LowerChangeInt31ToTaggedSigned(node); break; @@ -848,6 +865,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node, case IrOpcode::kChangeFloat64ToTaggedPointer: result = LowerChangeFloat64ToTaggedPointer(node); break; + case IrOpcode::kChangeCompressedSignedToInt32: + result = LowerChangeCompressedSignedToInt32(node); + break; case IrOpcode::kChangeTaggedSignedToInt32: result = LowerChangeTaggedSignedToInt32(node); break; @@ -911,6 +931,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node, case IrOpcode::kCheckString: result = LowerCheckString(node, frame_state); break; + case IrOpcode::kCheckBigInt: + result = LowerCheckBigInt(node, frame_state); + break; case IrOpcode::kCheckInternalizedString: result = LowerCheckInternalizedString(node, frame_state); break; @@ -938,6 +961,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node, case IrOpcode::kCheckedInt32Mul: result = LowerCheckedInt32Mul(node, frame_state); break; + case IrOpcode::kCheckedInt32ToCompressedSigned: + result = LowerCheckedInt32ToCompressedSigned(node, frame_state); + break; case IrOpcode::kCheckedInt32ToTaggedSigned: result = LowerCheckedInt32ToTaggedSigned(node, frame_state); break; @@ -993,6 +1019,15 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node, case IrOpcode::kCheckedTaggedToTaggedPointer: result = LowerCheckedTaggedToTaggedPointer(node, frame_state); break; + case IrOpcode::kBigIntAsUintN: + result = LowerBigIntAsUintN(node, frame_state); + break; + case IrOpcode::kChangeUint64ToBigInt: + result = LowerChangeUint64ToBigInt(node); + break; + case IrOpcode::kTruncateBigIntToUint64: + result = LowerTruncateBigIntToUint64(node); + break; case IrOpcode::kCheckedCompressedToTaggedSigned: result = LowerCheckedCompressedToTaggedSigned(node, frame_state); break; @@ -1110,6 +1145,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node, case IrOpcode::kStringIndexOf: result = LowerStringIndexOf(node); break; + case IrOpcode::kStringFromCodePointAt: + result = LowerStringFromCodePointAt(node); + break; case IrOpcode::kStringLength: result = LowerStringLength(node); break; @@ -1120,7 +1158,7 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node, result = LowerStringCharCodeAt(node); break; case IrOpcode::kStringCodePointAt: - result = LowerStringCodePointAt(node, UnicodeEncodingOf(node->op())); + result = LowerStringCodePointAt(node); break; case IrOpcode::kStringToLowerCaseIntl: result = LowerStringToLowerCaseIntl(node); @@ -1140,6 +1178,12 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node, case IrOpcode::kStringLessThanOrEqual: result = LowerStringLessThanOrEqual(node); break; + case IrOpcode::kBigIntAdd: + result = LowerBigIntAdd(node, frame_state); + break; + case IrOpcode::kBigIntNegate: + result = LowerBigIntNegate(node); + break; case IrOpcode::kNumberIsFloat64Hole: result = LowerNumberIsFloat64Hole(node); break; @@ -1233,6 +1277,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node, case IrOpcode::kRuntimeAbort: LowerRuntimeAbort(node); break; + case IrOpcode::kAssertType: + result = LowerAssertType(node); + break; case IrOpcode::kConvertReceiver: result = LowerConvertReceiver(node); break; @@ -1357,6 +1404,11 @@ Node* EffectControlLinearizer::LowerChangeBitToTagged(Node* node) { return done.PhiAt(0); } +Node* EffectControlLinearizer::LowerChangeInt31ToCompressedSigned(Node* node) { + Node* value = node->InputAt(0); + return ChangeInt32ToCompressedSmi(value); +} + Node* EffectControlLinearizer::LowerChangeInt31ToTaggedSigned(Node* node) { Node* value = node->InputAt(0); return ChangeInt32ToSmi(value); @@ -1461,6 +1513,11 @@ Node* EffectControlLinearizer::LowerChangeTaggedSignedToInt32(Node* node) { return ChangeSmiToInt32(value); } +Node* EffectControlLinearizer::LowerChangeCompressedSignedToInt32(Node* node) { + Node* value = node->InputAt(0); + return ChangeCompressedSmiToInt32(value); +} + Node* EffectControlLinearizer::LowerChangeTaggedSignedToInt64(Node* node) { Node* value = node->InputAt(0); return ChangeSmiToInt64(value); @@ -1684,8 +1741,7 @@ Node* EffectControlLinearizer::LowerChangeTaggedToCompressedSigned(Node* node) { STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset); Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value); vfalse = __ ChangeFloat64ToInt32(vfalse); - vfalse = ChangeInt32ToSmi(vfalse); - vfalse = __ ChangeTaggedSignedToCompressedSigned(vfalse); + vfalse = ChangeInt32ToCompressedSmi(vfalse); __ Goto(&done, vfalse); __ Bind(&done); @@ -2283,6 +2339,19 @@ Node* EffectControlLinearizer::LowerCheckedInt32Mul(Node* node, return value; } +Node* EffectControlLinearizer::LowerCheckedInt32ToCompressedSigned( + Node* node, Node* frame_state) { + DCHECK(SmiValuesAre31Bits()); + Node* value = node->InputAt(0); + const CheckParameters& params = CheckParametersOf(node->op()); + + Node* add = __ Int32AddWithOverflow(value, value); + Node* check = __ Projection(1, add); + __ DeoptimizeIf(DeoptimizeReason::kLostPrecision, params.feedback(), check, + frame_state); + return __ Projection(0, add); +} + Node* EffectControlLinearizer::LowerCheckedInt32ToTaggedSigned( Node* node, Node* frame_state) { DCHECK(SmiValuesAre31Bits()); @@ -2651,6 +2720,121 @@ Node* EffectControlLinearizer::LowerCheckedTaggedToTaggedPointer( return value; } +Node* EffectControlLinearizer::LowerCheckBigInt(Node* node, Node* frame_state) { + Node* value = node->InputAt(0); + const CheckParameters& params = CheckParametersOf(node->op()); + + // Check for Smi. + Node* smi_check = ObjectIsSmi(value); + __ DeoptimizeIf(DeoptimizeReason::kSmi, params.feedback(), smi_check, + frame_state); + + // Check for BigInt. + Node* value_map = __ LoadField(AccessBuilder::ForMap(), value); + Node* bi_check = __ WordEqual(value_map, __ BigIntMapConstant()); + __ DeoptimizeIfNot(DeoptimizeReason::kWrongInstanceType, params.feedback(), + bi_check, frame_state); + + return value; +} + +Node* EffectControlLinearizer::LowerBigIntAsUintN(Node* node, + Node* frame_state) { + DCHECK(machine()->Is64()); + + const int bits = OpParameter(node->op()); + DCHECK(0 <= bits && bits <= 64); + + if (bits == 64) { + // Reduce to nop. + return node->InputAt(0); + } else { + const uint64_t msk = (1ULL << bits) - 1ULL; + return __ Word64And(node->InputAt(0), __ Int64Constant(msk)); + } +} + +Node* EffectControlLinearizer::LowerChangeUint64ToBigInt(Node* node) { + DCHECK(machine()->Is64()); + + Node* value = node->InputAt(0); + Node* map = jsgraph()->HeapConstant(factory()->bigint_map()); + // BigInts with value 0 must be of size 0 (canonical form). + auto if_zerodigits = __ MakeLabel(); + auto if_onedigit = __ MakeLabel(); + auto done = __ MakeLabel(MachineRepresentation::kTagged); + + __ GotoIf(__ Word64Equal(value, __ IntPtrConstant(0)), &if_zerodigits); + __ Goto(&if_onedigit); + + __ Bind(&if_onedigit); + { + Node* result = __ Allocate(AllocationType::kYoung, + __ IntPtrConstant(BigInt::SizeFor(1))); + const auto bitfield = BigInt::LengthBits::update(0, 1); + __ StoreField(AccessBuilder::ForMap(), result, map); + __ StoreField(AccessBuilder::ForBigIntBitfield(), result, + __ IntPtrConstant(bitfield)); + // BigInts have no padding on 64 bit architectures with pointer compression. + if (BigInt::HasOptionalPadding()) { + __ StoreField(AccessBuilder::ForBigIntOptionalPadding(), result, + __ IntPtrConstant(0)); + } + __ StoreField(AccessBuilder::ForBigIntLeastSignificantDigit64(), result, + value); + __ Goto(&done, result); + } + + __ Bind(&if_zerodigits); + { + Node* result = __ Allocate(AllocationType::kYoung, + __ IntPtrConstant(BigInt::SizeFor(0))); + const auto bitfield = BigInt::LengthBits::update(0, 0); + __ StoreField(AccessBuilder::ForMap(), result, map); + __ StoreField(AccessBuilder::ForBigIntBitfield(), result, + __ IntPtrConstant(bitfield)); + // BigInts have no padding on 64 bit architectures with pointer compression. + if (BigInt::HasOptionalPadding()) { + __ StoreField(AccessBuilder::ForBigIntOptionalPadding(), result, + __ IntPtrConstant(0)); + } + __ Goto(&done, result); + } + + __ Bind(&done); + return done.PhiAt(0); +} + +Node* EffectControlLinearizer::LowerTruncateBigIntToUint64(Node* node) { + DCHECK(machine()->Is64()); + + auto done = __ MakeLabel(MachineRepresentation::kWord64); + auto if_neg = __ MakeLabel(); + auto if_not_zero = __ MakeLabel(); + + Node* value = node->InputAt(0); + + Node* bitfield = __ LoadField(AccessBuilder::ForBigIntBitfield(), value); + __ GotoIfNot(__ Word32Equal(bitfield, __ Int32Constant(0)), &if_not_zero); + __ Goto(&done, __ Int64Constant(0)); + + __ Bind(&if_not_zero); + { + Node* lsd = + __ LoadField(AccessBuilder::ForBigIntLeastSignificantDigit64(), value); + Node* sign = + __ Word32And(bitfield, __ Int32Constant(BigInt::SignBits::kMask)); + __ GotoIf(__ Word32Equal(sign, __ Int32Constant(1)), &if_neg); + __ Goto(&done, lsd); + + __ Bind(&if_neg); + __ Goto(&done, __ Int64Sub(__ Int64Constant(0), lsd)); + } + + __ Bind(&done); + return done.PhiAt(0); +} + Node* EffectControlLinearizer::LowerCheckedCompressedToTaggedSigned( Node* node, Node* frame_state) { Node* value = node->InputAt(0); @@ -3726,16 +3910,12 @@ Node* EffectControlLinearizer::LowerStringCharCodeAt(Node* node) { return loop_done.PhiAt(0); } -Node* EffectControlLinearizer::LowerStringCodePointAt( - Node* node, UnicodeEncoding encoding) { +Node* EffectControlLinearizer::LowerStringCodePointAt(Node* node) { Node* receiver = node->InputAt(0); Node* position = node->InputAt(1); - Builtins::Name builtin = encoding == UnicodeEncoding::UTF16 - ? Builtins::kStringCodePointAtUTF16 - : Builtins::kStringCodePointAtUTF32; - - Callable const callable = Builtins::CallableFor(isolate(), builtin); + Callable const callable = + Builtins::CallableFor(isolate(), Builtins::kStringCodePointAt); Operator::Properties properties = Operator::kNoThrow | Operator::kNoWrite; CallDescriptor::Flags flags = CallDescriptor::kNoFlags; auto call_descriptor = Linkage::GetStubCallDescriptor( @@ -3968,31 +4148,23 @@ Node* EffectControlLinearizer::LowerStringFromSingleCodePoint(Node* node) { __ Bind(&if_not_single_code); // Generate surrogate pair string { - switch (UnicodeEncodingOf(node->op())) { - case UnicodeEncoding::UTF16: - break; + // Convert UTF32 to UTF16 code units, and store as a 32 bit word. + Node* lead_offset = __ Int32Constant(0xD800 - (0x10000 >> 10)); - case UnicodeEncoding::UTF32: { - // Convert UTF32 to UTF16 code units, and store as a 32 bit word. - Node* lead_offset = __ Int32Constant(0xD800 - (0x10000 >> 10)); + // lead = (codepoint >> 10) + LEAD_OFFSET + Node* lead = + __ Int32Add(__ Word32Shr(code, __ Int32Constant(10)), lead_offset); - // lead = (codepoint >> 10) + LEAD_OFFSET - Node* lead = - __ Int32Add(__ Word32Shr(code, __ Int32Constant(10)), lead_offset); + // trail = (codepoint & 0x3FF) + 0xDC00; + Node* trail = __ Int32Add(__ Word32And(code, __ Int32Constant(0x3FF)), + __ Int32Constant(0xDC00)); - // trail = (codepoint & 0x3FF) + 0xDC00; - Node* trail = __ Int32Add(__ Word32And(code, __ Int32Constant(0x3FF)), - __ Int32Constant(0xDC00)); - - // codpoint = (trail << 16) | lead; + // codpoint = (trail << 16) | lead; #if V8_TARGET_BIG_ENDIAN - code = __ Word32Or(__ Word32Shl(lead, __ Int32Constant(16)), trail); + code = __ Word32Or(__ Word32Shl(lead, __ Int32Constant(16)), trail); #else - code = __ Word32Or(__ Word32Shl(trail, __ Int32Constant(16)), lead); + code = __ Word32Or(__ Word32Shl(trail, __ Int32Constant(16)), lead); #endif - break; - } - } // Allocate a new SeqTwoByteString for {code}. Node* vfalse0 = @@ -4032,6 +4204,21 @@ Node* EffectControlLinearizer::LowerStringIndexOf(Node* node) { search_string, position, __ NoContextConstant()); } +Node* EffectControlLinearizer::LowerStringFromCodePointAt(Node* node) { + Node* string = node->InputAt(0); + Node* index = node->InputAt(1); + + Callable callable = + Builtins::CallableFor(isolate(), Builtins::kStringFromCodePointAt); + Operator::Properties properties = Operator::kEliminatable; + CallDescriptor::Flags flags = CallDescriptor::kNoFlags; + auto call_descriptor = Linkage::GetStubCallDescriptor( + graph()->zone(), callable.descriptor(), + callable.descriptor().GetStackParameterCount(), flags, properties); + return __ Call(call_descriptor, __ HeapConstant(callable.code()), string, + index, __ NoContextConstant()); +} + Node* EffectControlLinearizer::LowerStringLength(Node* node) { Node* subject = node->InputAt(0); @@ -4083,6 +4270,41 @@ Node* EffectControlLinearizer::LowerStringLessThanOrEqual(Node* node) { Builtins::CallableFor(isolate(), Builtins::kStringLessThanOrEqual), node); } +Node* EffectControlLinearizer::LowerBigIntAdd(Node* node, Node* frame_state) { + Node* lhs = node->InputAt(0); + Node* rhs = node->InputAt(1); + + Callable const callable = + Builtins::CallableFor(isolate(), Builtins::kBigIntAddNoThrow); + auto call_descriptor = Linkage::GetStubCallDescriptor( + graph()->zone(), callable.descriptor(), + callable.descriptor().GetStackParameterCount(), CallDescriptor::kNoFlags, + Operator::kFoldable | Operator::kNoThrow); + Node* value = + __ Call(call_descriptor, jsgraph()->HeapConstant(callable.code()), lhs, + rhs, __ NoContextConstant()); + + // Check for exception sentinel: Smi is returned to signal BigIntTooBig. + __ DeoptimizeIf(DeoptimizeReason::kBigIntTooBig, VectorSlotPair{}, + ObjectIsSmi(value), frame_state); + + return value; +} + +Node* EffectControlLinearizer::LowerBigIntNegate(Node* node) { + Callable const callable = + Builtins::CallableFor(isolate(), Builtins::kBigIntUnaryMinus); + auto call_descriptor = Linkage::GetStubCallDescriptor( + graph()->zone(), callable.descriptor(), + callable.descriptor().GetStackParameterCount(), CallDescriptor::kNoFlags, + Operator::kFoldable | Operator::kNoThrow); + Node* value = + __ Call(call_descriptor, jsgraph()->HeapConstant(callable.code()), + node->InputAt(0), __ NoContextConstant()); + + return value; +} + Node* EffectControlLinearizer::LowerCheckFloat64Hole(Node* node, Node* frame_state) { // If we reach this point w/o eliminating the {node} that's marked @@ -4256,6 +4478,11 @@ Node* EffectControlLinearizer::ChangeIntPtrToInt32(Node* value) { return value; } +Node* EffectControlLinearizer::ChangeInt32ToCompressedSmi(Node* value) { + CHECK(machine()->Is64() && SmiValuesAre31Bits()); + return __ Word32Shl(value, SmiShiftBitsConstant()); +} + Node* EffectControlLinearizer::ChangeInt32ToSmi(Node* value) { // Do shift on 32bit values if Smis are stored in the lower word. if (machine()->Is64() && SmiValuesAre31Bits()) { @@ -4305,6 +4532,11 @@ Node* EffectControlLinearizer::ChangeSmiToInt32(Node* value) { return ChangeSmiToIntPtr(value); } +Node* EffectControlLinearizer::ChangeCompressedSmiToInt32(Node* value) { + CHECK(machine()->Is64() && SmiValuesAre31Bits()); + return __ Word32Sar(value, SmiShiftBitsConstant()); +} + Node* EffectControlLinearizer::ChangeSmiToInt64(Node* value) { CHECK(machine()->Is64()); return ChangeSmiToIntPtr(value); @@ -5163,6 +5395,30 @@ void EffectControlLinearizer::LowerRuntimeAbort(Node* node) { __ Int32Constant(1), __ NoContextConstant()); } +Node* EffectControlLinearizer::LowerAssertType(Node* node) { + DCHECK_EQ(node->opcode(), IrOpcode::kAssertType); + Type type = OpParameter(node->op()); + DCHECK(type.IsRange()); + auto range = type.AsRange(); + + Node* const input = node->InputAt(0); + Node* const min = __ NumberConstant(range->Min()); + Node* const max = __ NumberConstant(range->Max()); + + { + Callable const callable = + Builtins::CallableFor(isolate(), Builtins::kCheckNumberInRange); + Operator::Properties const properties = node->op()->properties(); + CallDescriptor::Flags const flags = CallDescriptor::kNoFlags; + auto call_descriptor = Linkage::GetStubCallDescriptor( + graph()->zone(), callable.descriptor(), + callable.descriptor().GetStackParameterCount(), flags, properties); + __ Call(call_descriptor, __ HeapConstant(callable.code()), input, min, max, + __ NoContextConstant()); + return input; + } +} + Node* EffectControlLinearizer::LowerConvertReceiver(Node* node) { ConvertReceiverMode const mode = ConvertReceiverModeOf(node->op()); Node* value = node->InputAt(0); @@ -5187,7 +5443,7 @@ Node* EffectControlLinearizer::LowerConvertReceiver(Node* node) { __ GotoIf(check, &convert_to_object); __ Goto(&done_convert, value); - // Wrap the primitive {value} into a JSValue. + // Wrap the primitive {value} into a JSPrimitiveWrapper. __ Bind(&convert_to_object); Operator::Properties properties = Operator::kEliminatable; Callable callable = Builtins::CallableFor(isolate(), Builtins::kToObject); @@ -5220,7 +5476,7 @@ Node* EffectControlLinearizer::LowerConvertReceiver(Node* node) { __ GotoIf(check, &convert_to_object); __ Goto(&done_convert, value); - // Wrap the primitive {value} into a JSValue. + // Wrap the primitive {value} into a JSPrimitiveWrapper. __ Bind(&convert_to_object); __ GotoIf(__ WordEqual(value, __ UndefinedConstant()), &convert_global_proxy); diff --git a/deps/v8/src/compiler/escape-analysis.cc b/deps/v8/src/compiler/escape-analysis.cc index dc0db4d780eb91..aee0121384ac61 100644 --- a/deps/v8/src/compiler/escape-analysis.cc +++ b/deps/v8/src/compiler/escape-analysis.cc @@ -4,6 +4,7 @@ #include "src/compiler/escape-analysis.h" +#include "src/codegen/tick-counter.h" #include "src/compiler/linkage.h" #include "src/compiler/node-matchers.h" #include "src/compiler/operator-properties.h" @@ -153,6 +154,7 @@ class VariableTracker { ZoneVector buffer_; EffectGraphReducer* reducer_; int next_variable_ = 0; + TickCounter* const tick_counter_; DISALLOW_COPY_AND_ASSIGN(VariableTracker); }; @@ -279,12 +281,14 @@ class EscapeAnalysisTracker : public ZoneObject { }; EffectGraphReducer::EffectGraphReducer( - Graph* graph, std::function reduce, Zone* zone) + Graph* graph, std::function reduce, + TickCounter* tick_counter, Zone* zone) : graph_(graph), state_(graph, kNumStates), revisit_(zone), stack_(zone), - reduce_(std::move(reduce)) {} + reduce_(std::move(reduce)), + tick_counter_(tick_counter) {} void EffectGraphReducer::ReduceFrom(Node* node) { // Perform DFS and eagerly trigger revisitation as soon as possible. @@ -293,6 +297,7 @@ void EffectGraphReducer::ReduceFrom(Node* node) { DCHECK(stack_.empty()); stack_.push({node, 0}); while (!stack_.empty()) { + tick_counter_->DoTick(); Node* current = stack_.top().node; int& input_index = stack_.top().input_index; if (input_index < current->InputCount()) { @@ -357,7 +362,8 @@ VariableTracker::VariableTracker(JSGraph* graph, EffectGraphReducer* reducer, graph_(graph), table_(zone, State(zone)), buffer_(zone), - reducer_(reducer) {} + reducer_(reducer), + tick_counter_(reducer->tick_counter()) {} VariableTracker::Scope::Scope(VariableTracker* states, Node* node, Reduction* reduction) @@ -406,6 +412,7 @@ VariableTracker::State VariableTracker::MergeInputs(Node* effect_phi) { State first_input = table_.Get(NodeProperties::GetEffectInput(effect_phi, 0)); State result = first_input; for (std::pair var_value : first_input) { + tick_counter_->DoTick(); if (Node* value = var_value.second) { Variable var = var_value.first; TRACE("var %i:\n", var.id_); @@ -441,10 +448,12 @@ VariableTracker::State VariableTracker::MergeInputs(Node* effect_phi) { // [old_value] cannot originate from the inputs. Thus [old_value] // must have been created by a previous reduction of this [effect_phi]. for (int i = 0; i < arity; ++i) { - NodeProperties::ReplaceValueInput( - old_value, buffer_[i] ? buffer_[i] : graph_->Dead(), i); - // This change cannot affect the rest of the reducer, so there is no - // need to trigger additional revisitations. + Node* old_input = NodeProperties::GetValueInput(old_value, i); + Node* new_input = buffer_[i] ? buffer_[i] : graph_->Dead(); + if (old_input != new_input) { + NodeProperties::ReplaceValueInput(old_value, new_input, i); + reducer_->Revisit(old_value); + } } result.Set(var, old_value); } else { @@ -701,21 +710,19 @@ void ReduceNode(const Operator* op, EscapeAnalysisTracker::Scope* current, } else if (right_object && !right_object->HasEscaped()) { replacement = jsgraph->FalseConstant(); } - if (replacement) { - // TODO(tebbi) This is a workaround for uninhabited types. If we - // replaced a value of uninhabited type with a constant, we would - // widen the type of the node. This could produce inconsistent - // types (which might confuse representation selection). We get - // around this by refusing to constant-fold and escape-analyze - // if the type is not inhabited. - if (!NodeProperties::GetType(left).IsNone() && - !NodeProperties::GetType(right).IsNone()) { - current->SetReplacement(replacement); - } else { - current->SetEscaped(left); - current->SetEscaped(right); - } + // TODO(tebbi) This is a workaround for uninhabited types. If we + // replaced a value of uninhabited type with a constant, we would + // widen the type of the node. This could produce inconsistent + // types (which might confuse representation selection). We get + // around this by refusing to constant-fold and escape-analyze + // if the type is not inhabited. + if (replacement && !NodeProperties::GetType(left).IsNone() && + !NodeProperties::GetType(right).IsNone()) { + current->SetReplacement(replacement); + break; } + current->SetEscaped(left); + current->SetEscaped(right); break; } case IrOpcode::kCheckMaps: { @@ -817,11 +824,12 @@ void EscapeAnalysis::Reduce(Node* node, Reduction* reduction) { ReduceNode(op, ¤t, jsgraph()); } -EscapeAnalysis::EscapeAnalysis(JSGraph* jsgraph, Zone* zone) +EscapeAnalysis::EscapeAnalysis(JSGraph* jsgraph, TickCounter* tick_counter, + Zone* zone) : EffectGraphReducer( jsgraph->graph(), [this](Node* node, Reduction* reduction) { Reduce(node, reduction); }, - zone), + tick_counter, zone), tracker_(new (zone) EscapeAnalysisTracker(jsgraph, this, zone)), jsgraph_(jsgraph) {} diff --git a/deps/v8/src/compiler/escape-analysis.h b/deps/v8/src/compiler/escape-analysis.h index c3dcd2f74d6e72..0fbc7d0bdd2e99 100644 --- a/deps/v8/src/compiler/escape-analysis.h +++ b/deps/v8/src/compiler/escape-analysis.h @@ -14,6 +14,9 @@ namespace v8 { namespace internal { + +class TickCounter; + namespace compiler { class CommonOperatorBuilder; @@ -38,7 +41,8 @@ class EffectGraphReducer { }; EffectGraphReducer(Graph* graph, - std::function reduce, Zone* zone); + std::function reduce, + TickCounter* tick_counter, Zone* zone); void ReduceGraph() { ReduceFrom(graph_->end()); } @@ -56,6 +60,8 @@ class EffectGraphReducer { bool Complete() { return stack_.empty() && revisit_.empty(); } + TickCounter* tick_counter() const { return tick_counter_; } + private: struct NodeState { Node* node; @@ -69,6 +75,7 @@ class EffectGraphReducer { ZoneStack revisit_; ZoneStack stack_; std::function reduce_; + TickCounter* const tick_counter_; }; // A variable is an abstract storage location, which is lowered to SSA values @@ -164,7 +171,7 @@ class EscapeAnalysisResult { class V8_EXPORT_PRIVATE EscapeAnalysis final : public NON_EXPORTED_BASE(EffectGraphReducer) { public: - EscapeAnalysis(JSGraph* jsgraph, Zone* zone); + EscapeAnalysis(JSGraph* jsgraph, TickCounter* tick_counter, Zone* zone); EscapeAnalysisResult analysis_result() { DCHECK(Complete()); diff --git a/deps/v8/src/compiler/graph-assembler.cc b/deps/v8/src/compiler/graph-assembler.cc index cc9dbd9dfdb9da..50f29d968bfdf6 100644 --- a/deps/v8/src/compiler/graph-assembler.cc +++ b/deps/v8/src/compiler/graph-assembler.cc @@ -52,6 +52,9 @@ Node* GraphAssembler::HeapConstant(Handle object) { return jsgraph()->HeapConstant(object); } +Node* GraphAssembler::NumberConstant(double value) { + return jsgraph()->Constant(value); +} Node* GraphAssembler::ExternalConstant(ExternalReference ref) { return jsgraph()->ExternalConstant(ref); @@ -221,6 +224,12 @@ Node* GraphAssembler::BitcastTaggedToWord(Node* value) { current_effect_, current_control_); } +Node* GraphAssembler::BitcastTaggedSignedToWord(Node* value) { + return current_effect_ = + graph()->NewNode(machine()->BitcastTaggedSignedToWord(), value, + current_effect_, current_control_); +} + Node* GraphAssembler::Word32PoisonOnSpeculation(Node* value) { return current_effect_ = graph()->NewNode(machine()->Word32PoisonOnSpeculation(), value, diff --git a/deps/v8/src/compiler/graph-assembler.h b/deps/v8/src/compiler/graph-assembler.h index 74b885b7887f56..e2c0005d15741f 100644 --- a/deps/v8/src/compiler/graph-assembler.h +++ b/deps/v8/src/compiler/graph-assembler.h @@ -57,6 +57,7 @@ namespace compiler { V(Word32Shr) \ V(Word32Shl) \ V(Word32Sar) \ + V(Word64And) \ V(IntAdd) \ V(IntSub) \ V(IntMul) \ @@ -71,6 +72,7 @@ namespace compiler { V(Uint64LessThan) \ V(Uint64LessThanOrEqual) \ V(Int32LessThan) \ + V(Int64Sub) \ V(Float64Add) \ V(Float64Sub) \ V(Float64Div) \ @@ -93,22 +95,24 @@ namespace compiler { V(Uint32Mod) \ V(Uint32Div) -#define JSGRAPH_SINGLETON_CONSTANT_LIST(V) \ - V(TrueConstant) \ - V(FalseConstant) \ - V(NullConstant) \ - V(BigIntMapConstant) \ - V(BooleanMapConstant) \ - V(HeapNumberMapConstant) \ - V(NoContextConstant) \ - V(EmptyStringConstant) \ - V(UndefinedConstant) \ - V(TheHoleConstant) \ - V(FixedArrayMapConstant) \ - V(FixedDoubleArrayMapConstant) \ - V(ToNumberBuiltinConstant) \ - V(AllocateInYoungGenerationStubConstant) \ - V(AllocateInOldGenerationStubConstant) +#define JSGRAPH_SINGLETON_CONSTANT_LIST(V) \ + V(TrueConstant) \ + V(FalseConstant) \ + V(NullConstant) \ + V(BigIntMapConstant) \ + V(BooleanMapConstant) \ + V(HeapNumberMapConstant) \ + V(NoContextConstant) \ + V(EmptyStringConstant) \ + V(UndefinedConstant) \ + V(TheHoleConstant) \ + V(FixedArrayMapConstant) \ + V(FixedDoubleArrayMapConstant) \ + V(ToNumberBuiltinConstant) \ + V(AllocateInYoungGenerationStubConstant) \ + V(AllocateRegularInYoungGenerationStubConstant) \ + V(AllocateInOldGenerationStubConstant) \ + V(AllocateRegularInOldGenerationStubConstant) class GraphAssembler; @@ -196,6 +200,7 @@ class GraphAssembler { Node* Float64Constant(double value); Node* Projection(int index, Node* value); Node* HeapConstant(Handle object); + Node* NumberConstant(double value); Node* CEntryStubConstant(int result_size); Node* ExternalConstant(ExternalReference ref); @@ -225,6 +230,7 @@ class GraphAssembler { Node* ToNumber(Node* value); Node* BitcastWordToTagged(Node* value); Node* BitcastTaggedToWord(Node* value); + Node* BitcastTaggedSignedToWord(Node* value); Node* Allocate(AllocationType allocation, Node* size); Node* LoadField(FieldAccess const&, Node* object); Node* LoadElement(ElementAccess const&, Node* object, Node* index); diff --git a/deps/v8/src/compiler/graph-reducer.cc b/deps/v8/src/compiler/graph-reducer.cc index fafa322d87cb81..9a0dea6b260fe1 100644 --- a/deps/v8/src/compiler/graph-reducer.cc +++ b/deps/v8/src/compiler/graph-reducer.cc @@ -5,10 +5,11 @@ #include #include -#include "src/compiler/graph.h" +#include "src/codegen/tick-counter.h" #include "src/compiler/graph-reducer.h" -#include "src/compiler/node.h" +#include "src/compiler/graph.h" #include "src/compiler/node-properties.h" +#include "src/compiler/node.h" #include "src/compiler/verifier.h" namespace v8 { @@ -25,13 +26,15 @@ enum class GraphReducer::State : uint8_t { void Reducer::Finalize() {} -GraphReducer::GraphReducer(Zone* zone, Graph* graph, Node* dead) +GraphReducer::GraphReducer(Zone* zone, Graph* graph, TickCounter* tick_counter, + Node* dead) : graph_(graph), dead_(dead), state_(graph, 4), reducers_(zone), revisit_(zone), - stack_(zone) { + stack_(zone), + tick_counter_(tick_counter) { if (dead != nullptr) { NodeProperties::SetType(dead_, Type::None()); } @@ -82,6 +85,7 @@ Reduction GraphReducer::Reduce(Node* const node) { auto skip = reducers_.end(); for (auto i = reducers_.begin(); i != reducers_.end();) { if (i != skip) { + tick_counter_->DoTick(); Reduction reduction = (*i)->Reduce(node); if (!reduction.Changed()) { // No change from this reducer. diff --git a/deps/v8/src/compiler/graph-reducer.h b/deps/v8/src/compiler/graph-reducer.h index 3bb20a462514ea..bbcc67b0748cd3 100644 --- a/deps/v8/src/compiler/graph-reducer.h +++ b/deps/v8/src/compiler/graph-reducer.h @@ -12,13 +12,15 @@ namespace v8 { namespace internal { + +class TickCounter; + namespace compiler { // Forward declarations. class Graph; class Node; - // NodeIds are identifying numbers for nodes that can be used to index auxiliary // out-of-line data associated with each node. using NodeId = uint32_t; @@ -129,7 +131,8 @@ class AdvancedReducer : public Reducer { class V8_EXPORT_PRIVATE GraphReducer : public NON_EXPORTED_BASE(AdvancedReducer::Editor) { public: - GraphReducer(Zone* zone, Graph* graph, Node* dead = nullptr); + GraphReducer(Zone* zone, Graph* graph, TickCounter* tick_counter, + Node* dead = nullptr); ~GraphReducer() override; Graph* graph() const { return graph_; } @@ -181,6 +184,7 @@ class V8_EXPORT_PRIVATE GraphReducer ZoneVector reducers_; ZoneQueue revisit_; ZoneStack stack_; + TickCounter* const tick_counter_; DISALLOW_COPY_AND_ASSIGN(GraphReducer); }; diff --git a/deps/v8/src/compiler/heap-refs.h b/deps/v8/src/compiler/heap-refs.h new file mode 100644 index 00000000000000..5547039fa63c5f --- /dev/null +++ b/deps/v8/src/compiler/heap-refs.h @@ -0,0 +1,906 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_HEAP_REFS_H_ +#define V8_COMPILER_HEAP_REFS_H_ + +#include "src/base/optional.h" +#include "src/ic/call-optimization.h" +#include "src/objects/elements-kind.h" +#include "src/objects/feedback-vector.h" +#include "src/objects/instance-type.h" + +namespace v8 { +namespace internal { + +class BytecodeArray; +class CallHandlerInfo; +class FixedDoubleArray; +class FunctionTemplateInfo; +class HeapNumber; +class InternalizedString; +class JSBoundFunction; +class JSDataView; +class JSGlobalProxy; +class JSRegExp; +class JSTypedArray; +class NativeContext; +class ScriptContextTable; +class VectorSlotPair; + +namespace compiler { + +// Whether we are loading a property or storing to a property. +// For a store during literal creation, do not walk up the prototype chain. +enum class AccessMode { kLoad, kStore, kStoreInLiteral, kHas }; + +enum class OddballType : uint8_t { + kNone, // Not an Oddball. + kBoolean, // True or False. + kUndefined, + kNull, + kHole, + kUninitialized, + kOther // Oddball, but none of the above. +}; + +// This list is sorted such that subtypes appear before their supertypes. +// DO NOT VIOLATE THIS PROPERTY! +#define HEAP_BROKER_OBJECT_LIST(V) \ + /* Subtypes of JSObject */ \ + V(JSArray) \ + V(JSBoundFunction) \ + V(JSDataView) \ + V(JSFunction) \ + V(JSGlobalProxy) \ + V(JSRegExp) \ + V(JSTypedArray) \ + /* Subtypes of Context */ \ + V(NativeContext) \ + /* Subtypes of FixedArray */ \ + V(Context) \ + V(ScopeInfo) \ + V(ScriptContextTable) \ + /* Subtypes of FixedArrayBase */ \ + V(BytecodeArray) \ + V(FixedArray) \ + V(FixedDoubleArray) \ + /* Subtypes of Name */ \ + V(InternalizedString) \ + V(String) \ + V(Symbol) \ + /* Subtypes of HeapObject */ \ + V(AllocationSite) \ + V(BigInt) \ + V(CallHandlerInfo) \ + V(Cell) \ + V(Code) \ + V(DescriptorArray) \ + V(FeedbackCell) \ + V(FeedbackVector) \ + V(FixedArrayBase) \ + V(FunctionTemplateInfo) \ + V(HeapNumber) \ + V(JSObject) \ + V(Map) \ + V(MutableHeapNumber) \ + V(Name) \ + V(PropertyCell) \ + V(SharedFunctionInfo) \ + V(SourceTextModule) \ + /* Subtypes of Object */ \ + V(HeapObject) + +class CompilationDependencies; +class JSHeapBroker; +class ObjectData; +class PerIsolateCompilerCache; +class PropertyAccessInfo; +#define FORWARD_DECL(Name) class Name##Ref; +HEAP_BROKER_OBJECT_LIST(FORWARD_DECL) +#undef FORWARD_DECL + +class V8_EXPORT_PRIVATE ObjectRef { + public: + ObjectRef(JSHeapBroker* broker, Handle object); + ObjectRef(JSHeapBroker* broker, ObjectData* data) + : data_(data), broker_(broker) { + CHECK_NOT_NULL(data_); + } + + Handle object() const; + + bool equals(const ObjectRef& other) const; + + bool IsSmi() const; + int AsSmi() const; + +#define HEAP_IS_METHOD_DECL(Name) bool Is##Name() const; + HEAP_BROKER_OBJECT_LIST(HEAP_IS_METHOD_DECL) +#undef HEAP_IS_METHOD_DECL + +#define HEAP_AS_METHOD_DECL(Name) Name##Ref As##Name() const; + HEAP_BROKER_OBJECT_LIST(HEAP_AS_METHOD_DECL) +#undef HEAP_AS_METHOD_DECL + + bool IsNullOrUndefined() const; + + bool BooleanValue() const; + Maybe OddballToNumber() const; + + // Return the element at key {index} if {index} is known to be an own data + // property of the object that is non-writable and non-configurable. + base::Optional GetOwnConstantElement(uint32_t index, + bool serialize = false) const; + + Isolate* isolate() const; + + struct Hash { + size_t operator()(const ObjectRef& ref) const { + return base::hash_combine(ref.object().address()); + } + }; + struct Equal { + bool operator()(const ObjectRef& lhs, const ObjectRef& rhs) const { + return lhs.equals(rhs); + } + }; + + protected: + JSHeapBroker* broker() const; + ObjectData* data() const; + ObjectData* data_; // Should be used only by object() getters. + + private: + friend class FunctionTemplateInfoRef; + friend class JSArrayData; + friend class JSGlobalProxyRef; + friend class JSGlobalProxyData; + friend class JSObjectData; + friend class StringData; + + friend std::ostream& operator<<(std::ostream& os, const ObjectRef& ref); + + JSHeapBroker* broker_; +}; + +// Temporary class that carries information from a Map. We'd like to remove +// this class and use MapRef instead, but we can't as long as we support the +// kDisabled broker mode. That's because obtaining the MapRef via +// HeapObjectRef::map() requires a HandleScope when the broker is disabled. +// During OptimizeGraph we generally don't have a HandleScope, however. There +// are two places where we therefore use GetHeapObjectType() instead. Both that +// function and this class should eventually be removed. +class HeapObjectType { + public: + enum Flag : uint8_t { kUndetectable = 1 << 0, kCallable = 1 << 1 }; + + using Flags = base::Flags; + + HeapObjectType(InstanceType instance_type, Flags flags, + OddballType oddball_type) + : instance_type_(instance_type), + oddball_type_(oddball_type), + flags_(flags) { + DCHECK_EQ(instance_type == ODDBALL_TYPE, + oddball_type != OddballType::kNone); + } + + OddballType oddball_type() const { return oddball_type_; } + InstanceType instance_type() const { return instance_type_; } + Flags flags() const { return flags_; } + + bool is_callable() const { return flags_ & kCallable; } + bool is_undetectable() const { return flags_ & kUndetectable; } + + private: + InstanceType const instance_type_; + OddballType const oddball_type_; + Flags const flags_; +}; + +class HeapObjectRef : public ObjectRef { + public: + using ObjectRef::ObjectRef; + Handle object() const; + + MapRef map() const; + + // See the comment on the HeapObjectType class. + HeapObjectType GetHeapObjectType() const; +}; + +class PropertyCellRef : public HeapObjectRef { + public: + using HeapObjectRef::HeapObjectRef; + Handle object() const; + + PropertyDetails property_details() const; + + void Serialize(); + ObjectRef value() const; +}; + +class JSObjectRef : public HeapObjectRef { + public: + using HeapObjectRef::HeapObjectRef; + Handle object() const; + + uint64_t RawFastDoublePropertyAsBitsAt(FieldIndex index) const; + double RawFastDoublePropertyAt(FieldIndex index) const; + ObjectRef RawFastPropertyAt(FieldIndex index) const; + + // Return the value of the property identified by the field {index} + // if {index} is known to be an own data property of the object. + base::Optional GetOwnProperty(Representation field_representation, + FieldIndex index, + bool serialize = false) const; + + FixedArrayBaseRef elements() const; + void SerializeElements(); + void EnsureElementsTenured(); + ElementsKind GetElementsKind() const; + + void SerializeObjectCreateMap(); + base::Optional GetObjectCreateMap() const; +}; + +class JSDataViewRef : public JSObjectRef { + public: + using JSObjectRef::JSObjectRef; + Handle object() const; + + size_t byte_length() const; + size_t byte_offset() const; +}; + +class JSBoundFunctionRef : public JSObjectRef { + public: + using JSObjectRef::JSObjectRef; + Handle object() const; + + void Serialize(); + + // The following are available only after calling Serialize(). + ObjectRef bound_target_function() const; + ObjectRef bound_this() const; + FixedArrayRef bound_arguments() const; +}; + +class V8_EXPORT_PRIVATE JSFunctionRef : public JSObjectRef { + public: + using JSObjectRef::JSObjectRef; + Handle object() const; + + bool has_feedback_vector() const; + bool has_initial_map() const; + bool has_prototype() const; + bool PrototypeRequiresRuntimeLookup() const; + + void Serialize(); + bool serialized() const; + + // The following are available only after calling Serialize(). + ObjectRef prototype() const; + MapRef initial_map() const; + ContextRef context() const; + NativeContextRef native_context() const; + SharedFunctionInfoRef shared() const; + FeedbackVectorRef feedback_vector() const; + int InitialMapInstanceSizeWithMinSlack() const; + + bool IsSerializedForCompilation() const; +}; + +class JSRegExpRef : public JSObjectRef { + public: + using JSObjectRef::JSObjectRef; + Handle object() const; + + ObjectRef raw_properties_or_hash() const; + ObjectRef data() const; + ObjectRef source() const; + ObjectRef flags() const; + ObjectRef last_index() const; +}; + +class HeapNumberRef : public HeapObjectRef { + public: + using HeapObjectRef::HeapObjectRef; + Handle object() const; + + double value() const; +}; + +class MutableHeapNumberRef : public HeapObjectRef { + public: + using HeapObjectRef::HeapObjectRef; + Handle object() const; + + double value() const; +}; + +class ContextRef : public HeapObjectRef { + public: + using HeapObjectRef::HeapObjectRef; + Handle object() const; + + // {previous} decrements {depth} by 1 for each previous link successfully + // followed. If {depth} != 0 on function return, then it only got + // partway to the desired depth. If {serialize} is true, then + // {previous} will cache its findings. + ContextRef previous(size_t* depth, bool serialize = false) const; + + // Only returns a value if the index is valid for this ContextRef. + base::Optional get(int index, bool serialize = false) const; + + // We only serialize the ScopeInfo if certain Promise + // builtins are called. + void SerializeScopeInfo(); + base::Optional scope_info() const; +}; + +#define BROKER_COMPULSORY_NATIVE_CONTEXT_FIELDS(V) \ + V(JSFunction, array_function) \ + V(JSFunction, boolean_function) \ + V(JSFunction, bigint_function) \ + V(JSFunction, number_function) \ + V(JSFunction, object_function) \ + V(JSFunction, promise_function) \ + V(JSFunction, promise_then) \ + V(JSFunction, string_function) \ + V(JSFunction, symbol_function) \ + V(JSGlobalProxy, global_proxy_object) \ + V(JSObject, promise_prototype) \ + V(Map, bound_function_with_constructor_map) \ + V(Map, bound_function_without_constructor_map) \ + V(Map, fast_aliased_arguments_map) \ + V(Map, initial_array_iterator_map) \ + V(Map, initial_string_iterator_map) \ + V(Map, iterator_result_map) \ + V(Map, js_array_holey_double_elements_map) \ + V(Map, js_array_holey_elements_map) \ + V(Map, js_array_holey_smi_elements_map) \ + V(Map, js_array_packed_double_elements_map) \ + V(Map, js_array_packed_elements_map) \ + V(Map, js_array_packed_smi_elements_map) \ + V(Map, sloppy_arguments_map) \ + V(Map, slow_object_with_null_prototype_map) \ + V(Map, strict_arguments_map) \ + V(ScriptContextTable, script_context_table) \ + V(SharedFunctionInfo, promise_capability_default_reject_shared_fun) \ + V(SharedFunctionInfo, promise_catch_finally_shared_fun) \ + V(SharedFunctionInfo, promise_then_finally_shared_fun) \ + V(SharedFunctionInfo, promise_capability_default_resolve_shared_fun) + +// Those are set by Bootstrapper::ExportFromRuntime, which may not yet have +// happened when Turbofan is invoked via --always-opt. +#define BROKER_OPTIONAL_NATIVE_CONTEXT_FIELDS(V) \ + V(Map, async_function_object_map) \ + V(Map, map_key_iterator_map) \ + V(Map, map_key_value_iterator_map) \ + V(Map, map_value_iterator_map) \ + V(JSFunction, regexp_exec_function) \ + V(Map, set_key_value_iterator_map) \ + V(Map, set_value_iterator_map) + +#define BROKER_NATIVE_CONTEXT_FIELDS(V) \ + BROKER_COMPULSORY_NATIVE_CONTEXT_FIELDS(V) \ + BROKER_OPTIONAL_NATIVE_CONTEXT_FIELDS(V) + +class NativeContextRef : public ContextRef { + public: + using ContextRef::ContextRef; + Handle object() const; + + void Serialize(); + +#define DECL_ACCESSOR(type, name) type##Ref name() const; + BROKER_NATIVE_CONTEXT_FIELDS(DECL_ACCESSOR) +#undef DECL_ACCESSOR + + ScopeInfoRef scope_info() const; + MapRef GetFunctionMapFromIndex(int index) const; + MapRef GetInitialJSArrayMap(ElementsKind kind) const; + base::Optional GetConstructorFunction(const MapRef& map) const; +}; + +class NameRef : public HeapObjectRef { + public: + using HeapObjectRef::HeapObjectRef; + Handle object() const; + + bool IsUniqueName() const; +}; + +class ScriptContextTableRef : public HeapObjectRef { + public: + using HeapObjectRef::HeapObjectRef; + Handle object() const; + + struct LookupResult { + ContextRef context; + bool immutable; + int index; + }; + + base::Optional lookup(const NameRef& name) const; +}; + +class DescriptorArrayRef : public HeapObjectRef { + public: + using HeapObjectRef::HeapObjectRef; + Handle object() const; +}; + +class FeedbackCellRef : public HeapObjectRef { + public: + using HeapObjectRef::HeapObjectRef; + Handle object() const; + + HeapObjectRef value() const; +}; + +class FeedbackVectorRef : public HeapObjectRef { + public: + using HeapObjectRef::HeapObjectRef; + Handle object() const; + + ObjectRef get(FeedbackSlot slot) const; + + void SerializeSlots(); +}; + +class CallHandlerInfoRef : public HeapObjectRef { + public: + using HeapObjectRef::HeapObjectRef; + Handle object() const; + + Address callback() const; + + void Serialize(); + ObjectRef data() const; +}; + +class AllocationSiteRef : public HeapObjectRef { + public: + using HeapObjectRef::HeapObjectRef; + Handle object() const; + + bool PointsToLiteral() const; + AllocationType GetAllocationType() const; + ObjectRef nested_site() const; + + // {IsFastLiteral} determines whether the given array or object literal + // boilerplate satisfies all limits to be considered for fast deep-copying + // and computes the total size of all objects that are part of the graph. + // + // If PointsToLiteral() is false, then IsFastLiteral() is also false. + bool IsFastLiteral() const; + // We only serialize boilerplate if IsFastLiteral is true. + base::Optional boilerplate() const; + + ElementsKind GetElementsKind() const; + bool CanInlineCall() const; +}; + +class BigIntRef : public HeapObjectRef { + public: + using HeapObjectRef::HeapObjectRef; + Handle object() const; + + uint64_t AsUint64() const; +}; + +class V8_EXPORT_PRIVATE MapRef : public HeapObjectRef { + public: + using HeapObjectRef::HeapObjectRef; + Handle object() const; + + int instance_size() const; + InstanceType instance_type() const; + int GetInObjectProperties() const; + int GetInObjectPropertiesStartInWords() const; + int NumberOfOwnDescriptors() const; + int GetInObjectPropertyOffset(int index) const; + int constructor_function_index() const; + int NextFreePropertyIndex() const; + int UnusedPropertyFields() const; + ElementsKind elements_kind() const; + bool is_stable() const; + bool is_extensible() const; + bool is_constructor() const; + bool has_prototype_slot() const; + bool is_access_check_needed() const; + bool is_deprecated() const; + bool CanBeDeprecated() const; + bool CanTransition() const; + bool IsInobjectSlackTrackingInProgress() const; + bool is_dictionary_map() const; + bool IsFixedCowArrayMap() const; + bool IsPrimitiveMap() const; + bool is_undetectable() const; + bool is_callable() const; + bool has_indexed_interceptor() const; + bool is_migration_target() const; + bool supports_fast_array_iteration() const; + bool supports_fast_array_resize() const; + bool IsMapOfCurrentGlobalProxy() const; + + OddballType oddball_type() const; + +#define DEF_TESTER(Type, ...) bool Is##Type##Map() const; + INSTANCE_TYPE_CHECKERS(DEF_TESTER) +#undef DEF_TESTER + + void SerializeBackPointer(); + HeapObjectRef GetBackPointer() const; + + void SerializePrototype(); + bool serialized_prototype() const; + HeapObjectRef prototype() const; + + void SerializeForElementLoad(); + + void SerializeForElementStore(); + bool HasOnlyStablePrototypesWithFastElements( + ZoneVector* prototype_maps); + + // Concerning the underlying instance_descriptors: + void SerializeOwnDescriptors(); + void SerializeOwnDescriptor(int descriptor_index); + MapRef FindFieldOwner(int descriptor_index) const; + PropertyDetails GetPropertyDetails(int descriptor_index) const; + NameRef GetPropertyKey(int descriptor_index) const; + FieldIndex GetFieldIndexFor(int descriptor_index) const; + ObjectRef GetFieldType(int descriptor_index) const; + bool IsUnboxedDoubleField(int descriptor_index) const; + + // Available after calling JSFunctionRef::Serialize on a function that has + // this map as initial map. + ObjectRef GetConstructor() const; + base::Optional AsElementsKind(ElementsKind kind) const; +}; + +struct HolderLookupResult { + HolderLookupResult(CallOptimization::HolderLookup lookup_ = + CallOptimization::kHolderNotFound, + base::Optional holder_ = base::nullopt) + : lookup(lookup_), holder(holder_) {} + CallOptimization::HolderLookup lookup; + base::Optional holder; +}; + +class FunctionTemplateInfoRef : public HeapObjectRef { + public: + using HeapObjectRef::HeapObjectRef; + Handle object() const; + + bool is_signature_undefined() const; + bool accept_any_receiver() const; + // The following returns true if the CallHandlerInfo is present. + bool has_call_code() const; + + void SerializeCallCode(); + base::Optional call_code() const; + + HolderLookupResult LookupHolderOfExpectedType(MapRef receiver_map, + bool serialize); +}; + +class FixedArrayBaseRef : public HeapObjectRef { + public: + using HeapObjectRef::HeapObjectRef; + Handle object() const; + + int length() const; +}; + +class FixedArrayRef : public FixedArrayBaseRef { + public: + using FixedArrayBaseRef::FixedArrayBaseRef; + Handle object() const; + + ObjectRef get(int i) const; +}; + +class FixedDoubleArrayRef : public FixedArrayBaseRef { + public: + using FixedArrayBaseRef::FixedArrayBaseRef; + Handle object() const; + + double get_scalar(int i) const; + bool is_the_hole(int i) const; +}; + +class BytecodeArrayRef : public FixedArrayBaseRef { + public: + using FixedArrayBaseRef::FixedArrayBaseRef; + Handle object() const; + + int register_count() const; + int parameter_count() const; + interpreter::Register incoming_new_target_or_generator_register() const; + + // Bytecode access methods. + uint8_t get(int index) const; + Address GetFirstBytecodeAddress() const; + + // Source position table. + const byte* source_positions_address() const; + int source_positions_size() const; + + // Constant pool access. + Handle GetConstantAtIndex(int index) const; + bool IsConstantAtIndexSmi(int index) const; + Smi GetConstantAtIndexAsSmi(int index) const; + + // Exception handler table. + Address handler_table_address() const; + int handler_table_size() const; + + bool IsSerializedForCompilation() const; + void SerializeForCompilation(); +}; + +class JSArrayRef : public JSObjectRef { + public: + using JSObjectRef::JSObjectRef; + Handle object() const; + + ObjectRef length() const; + + // Return the element at key {index} if the array has a copy-on-write elements + // storage and {index} is known to be an own data property. + base::Optional GetOwnCowElement(uint32_t index, + bool serialize = false) const; +}; + +class ScopeInfoRef : public HeapObjectRef { + public: + using HeapObjectRef::HeapObjectRef; + Handle object() const; + + int ContextLength() const; +}; + +#define BROKER_SFI_FIELDS(V) \ + V(int, internal_formal_parameter_count) \ + V(bool, has_duplicate_parameters) \ + V(int, function_map_index) \ + V(FunctionKind, kind) \ + V(LanguageMode, language_mode) \ + V(bool, native) \ + V(bool, HasBreakInfo) \ + V(bool, HasBuiltinId) \ + V(bool, construct_as_builtin) \ + V(bool, HasBytecodeArray) \ + V(bool, is_safe_to_skip_arguments_adaptor) \ + V(bool, IsInlineable) \ + V(int, StartPosition) \ + V(bool, is_compiled) + +class V8_EXPORT_PRIVATE SharedFunctionInfoRef : public HeapObjectRef { + public: + using HeapObjectRef::HeapObjectRef; + Handle object() const; + + int builtin_id() const; + BytecodeArrayRef GetBytecodeArray() const; + +#define DECL_ACCESSOR(type, name) type name() const; + BROKER_SFI_FIELDS(DECL_ACCESSOR) +#undef DECL_ACCESSOR + + bool IsSerializedForCompilation(FeedbackVectorRef feedback) const; + void SetSerializedForCompilation(FeedbackVectorRef feedback); + + // Template objects may not be created at compilation time. This method + // wraps the retrieval of the template object and creates it if + // necessary. + JSArrayRef GetTemplateObject(ObjectRef description, FeedbackVectorRef vector, + FeedbackSlot slot, bool serialize = false); + + void SerializeFunctionTemplateInfo(); + base::Optional function_template_info() const; +}; + +class StringRef : public NameRef { + public: + using NameRef::NameRef; + Handle object() const; + + int length() const; + uint16_t GetFirstChar(); + base::Optional ToNumber(); + bool IsSeqString() const; + bool IsExternalString() const; +}; + +class SymbolRef : public NameRef { + public: + using NameRef::NameRef; + Handle object() const; +}; + +class JSTypedArrayRef : public JSObjectRef { + public: + using JSObjectRef::JSObjectRef; + Handle object() const; + + bool is_on_heap() const; + size_t length() const; + void* external_pointer() const; + + void Serialize(); + bool serialized() const; + + HeapObjectRef buffer() const; +}; + +class SourceTextModuleRef : public HeapObjectRef { + public: + using HeapObjectRef::HeapObjectRef; + Handle object() const; + + void Serialize(); + + CellRef GetCell(int cell_index) const; +}; + +class CellRef : public HeapObjectRef { + public: + using HeapObjectRef::HeapObjectRef; + Handle object() const; + + ObjectRef value() const; +}; + +class JSGlobalProxyRef : public JSObjectRef { + public: + using JSObjectRef::JSObjectRef; + Handle object() const; + + // If {serialize} is false: + // If the property is known to exist as a property cell (on the global + // object), return that property cell. Otherwise (not known to exist as a + // property cell or known not to exist as a property cell) return nothing. + // If {serialize} is true: + // Like above but potentially access the heap and serialize the necessary + // information. + base::Optional GetPropertyCell(NameRef const& name, + bool serialize = false) const; +}; + +class CodeRef : public HeapObjectRef { + public: + using HeapObjectRef::HeapObjectRef; + Handle object() const; +}; + +class InternalizedStringRef : public StringRef { + public: + using StringRef::StringRef; + Handle object() const; +}; + +class ElementAccessFeedback; +class NamedAccessFeedback; + +class ProcessedFeedback : public ZoneObject { + public: + enum Kind { kInsufficient, kGlobalAccess, kNamedAccess, kElementAccess }; + Kind kind() const { return kind_; } + + ElementAccessFeedback const* AsElementAccess() const; + NamedAccessFeedback const* AsNamedAccess() const; + + protected: + explicit ProcessedFeedback(Kind kind) : kind_(kind) {} + + private: + Kind const kind_; +}; + +class InsufficientFeedback final : public ProcessedFeedback { + public: + InsufficientFeedback(); +}; + +class GlobalAccessFeedback : public ProcessedFeedback { + public: + explicit GlobalAccessFeedback(PropertyCellRef cell); + GlobalAccessFeedback(ContextRef script_context, int slot_index, + bool immutable); + + bool IsPropertyCell() const; + PropertyCellRef property_cell() const; + + bool IsScriptContextSlot() const { return !IsPropertyCell(); } + ContextRef script_context() const; + int slot_index() const; + bool immutable() const; + + base::Optional GetConstantHint() const; + + private: + ObjectRef const cell_or_context_; + int const index_and_immutable_; +}; + +class KeyedAccessMode { + public: + static KeyedAccessMode FromNexus(FeedbackNexus const& nexus); + + AccessMode access_mode() const; + bool IsLoad() const; + bool IsStore() const; + KeyedAccessLoadMode load_mode() const; + KeyedAccessStoreMode store_mode() const; + + private: + AccessMode const access_mode_; + union LoadStoreMode { + LoadStoreMode(KeyedAccessLoadMode load_mode); + LoadStoreMode(KeyedAccessStoreMode store_mode); + KeyedAccessLoadMode load_mode; + KeyedAccessStoreMode store_mode; + } const load_store_mode_; + + KeyedAccessMode(AccessMode access_mode, KeyedAccessLoadMode load_mode); + KeyedAccessMode(AccessMode access_mode, KeyedAccessStoreMode store_mode); +}; + +class ElementAccessFeedback : public ProcessedFeedback { + public: + ElementAccessFeedback(Zone* zone, KeyedAccessMode const& keyed_mode); + + // No transition sources appear in {receiver_maps}. + // All transition targets appear in {receiver_maps}. + ZoneVector> receiver_maps; + ZoneVector, Handle>> transitions; + + KeyedAccessMode const keyed_mode; + + class MapIterator { + public: + bool done() const; + void advance(); + MapRef current() const; + + private: + friend class ElementAccessFeedback; + + explicit MapIterator(ElementAccessFeedback const& processed, + JSHeapBroker* broker); + + ElementAccessFeedback const& processed_; + JSHeapBroker* const broker_; + size_t index_ = 0; + }; + + // Iterator over all maps: first {receiver_maps}, then transition sources. + MapIterator all_maps(JSHeapBroker* broker) const; +}; + +class NamedAccessFeedback : public ProcessedFeedback { + public: + NamedAccessFeedback(NameRef const& name, + ZoneVector const& access_infos); + + NameRef const& name() const { return name_; } + ZoneVector const& access_infos() const { + return access_infos_; + } + + private: + NameRef const name_; + ZoneVector const access_infos_; +}; + +} // namespace compiler +} // namespace internal +} // namespace v8 + +#endif // V8_COMPILER_HEAP_REFS_H_ diff --git a/deps/v8/src/compiler/int64-lowering.cc b/deps/v8/src/compiler/int64-lowering.cc index 3430b6b3393bf8..eda866e5f2ad04 100644 --- a/deps/v8/src/compiler/int64-lowering.cc +++ b/deps/v8/src/compiler/int64-lowering.cc @@ -97,7 +97,10 @@ int GetReturnCountAfterLowering(CallDescriptor* call_descriptor) { int GetParameterIndexAfterLowering( Signature* signature, int old_index) { int result = old_index; - for (int i = 0; i < old_index; i++) { + // Be robust towards special indexes (>= param count). + int max_to_check = + std::min(old_index, static_cast(signature->parameter_count())); + for (int i = 0; i < max_to_check; i++) { if (signature->GetParam(i) == MachineRepresentation::kWord64) { result++; } @@ -142,16 +145,16 @@ int Int64Lowering::GetParameterCountAfterLowering( signature, static_cast(signature->parameter_count())); } -void Int64Lowering::GetIndexNodes(Node* index, Node*& index_low, - Node*& index_high) { +void Int64Lowering::GetIndexNodes(Node* index, Node** index_low, + Node** index_high) { #if defined(V8_TARGET_LITTLE_ENDIAN) - index_low = index; - index_high = graph()->NewNode(machine()->Int32Add(), index, - graph()->NewNode(common()->Int32Constant(4))); + *index_low = index; + *index_high = graph()->NewNode(machine()->Int32Add(), index, + graph()->NewNode(common()->Int32Constant(4))); #elif defined(V8_TARGET_BIG_ENDIAN) - index_low = graph()->NewNode(machine()->Int32Add(), index, - graph()->NewNode(common()->Int32Constant(4))); - index_high = index; + *index_low = graph()->NewNode(machine()->Int32Add(), index, + graph()->NewNode(common()->Int32Constant(4))); + *index_high = index; #endif } @@ -182,7 +185,7 @@ void Int64Lowering::LowerNode(Node* node) { Node* index = node->InputAt(1); Node* index_low; Node* index_high; - GetIndexNodes(index, index_low, index_high); + GetIndexNodes(index, &index_low, &index_high); const Operator* load_op; if (node->opcode() == IrOpcode::kLoad) { @@ -232,7 +235,7 @@ void Int64Lowering::LowerNode(Node* node) { Node* index = node->InputAt(1); Node* index_low; Node* index_high; - GetIndexNodes(index, index_low, index_high); + GetIndexNodes(index, &index_low, &index_high); Node* value = node->InputAt(2); DCHECK(HasReplacementLow(value)); DCHECK(HasReplacementHigh(value)); @@ -291,12 +294,6 @@ void Int64Lowering::LowerNode(Node* node) { // changes. if (GetParameterCountAfterLowering(signature()) != param_count) { int old_index = ParameterIndexOf(node->op()); - // Prevent special lowering of wasm's instance or JS - // context/closure parameters. - if (old_index <= 0 || old_index > param_count) { - DefaultLowering(node); - break; - } // Adjust old_index to be compliant with the signature. --old_index; int new_index = GetParameterIndexAfterLowering(signature(), old_index); @@ -304,6 +301,12 @@ void Int64Lowering::LowerNode(Node* node) { ++new_index; NodeProperties::ChangeOp(node, common()->Parameter(new_index)); + if (old_index < 0 || old_index >= param_count) { + // Special parameters (JS closure/context) don't have kWord64 + // representation anyway. + break; + } + if (signature()->GetParam(old_index) == MachineRepresentation::kWord64) { Node* high_node = graph()->NewNode(common()->Parameter(new_index + 1), diff --git a/deps/v8/src/compiler/int64-lowering.h b/deps/v8/src/compiler/int64-lowering.h index b0838057719d64..9c77cf41a33137 100644 --- a/deps/v8/src/compiler/int64-lowering.h +++ b/deps/v8/src/compiler/int64-lowering.h @@ -59,7 +59,7 @@ class V8_EXPORT_PRIVATE Int64Lowering { bool HasReplacementHigh(Node* node); Node* GetReplacementHigh(Node* node); void PreparePhiReplacement(Node* phi); - void GetIndexNodes(Node* index, Node*& index_low, Node*& index_high); + void GetIndexNodes(Node* index, Node** index_low, Node** index_high); void ReplaceNodeWithProjections(Node* node); void LowerMemoryBaseAndIndex(Node* node); diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc index d58331c85e3fb5..8128f899497192 100644 --- a/deps/v8/src/compiler/js-call-reducer.cc +++ b/deps/v8/src/compiler/js-call-reducer.cc @@ -179,6 +179,100 @@ Reduction JSCallReducer::ReduceMathMinMax(Node* node, const Operator* op, return Replace(value); } +// ES section #sec-math.hypot Math.hypot ( value1, value2, ...values ) +Reduction JSCallReducer::ReduceMathHypot(Node* node) { + CallParameters const& p = CallParametersOf(node->op()); + if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) { + return NoChange(); + } + if (node->op()->ValueInputCount() < 3) { + Node* value = jsgraph()->ZeroConstant(); + ReplaceWithValue(node, value); + return Replace(value); + } + Node* effect = NodeProperties::GetEffectInput(node); + Node* control = NodeProperties::GetControlInput(node); + NodeVector values(graph()->zone()); + + Node* max = effect = + graph()->NewNode(simplified()->SpeculativeToNumber( + NumberOperationHint::kNumberOrOddball, p.feedback()), + NodeProperties::GetValueInput(node, 2), effect, control); + max = graph()->NewNode(simplified()->NumberAbs(), max); + values.push_back(max); + for (int i = 3; i < node->op()->ValueInputCount(); ++i) { + Node* input = effect = graph()->NewNode( + simplified()->SpeculativeToNumber(NumberOperationHint::kNumberOrOddball, + p.feedback()), + NodeProperties::GetValueInput(node, i), effect, control); + input = graph()->NewNode(simplified()->NumberAbs(), input); + values.push_back(input); + + // Make sure {max} is NaN in the end in case any argument was NaN. + max = graph()->NewNode( + common()->Select(MachineRepresentation::kTagged), + graph()->NewNode(simplified()->NumberLessThanOrEqual(), input, max), + max, input); + } + + Node* check0 = graph()->NewNode(simplified()->NumberEqual(), max, + jsgraph()->ZeroConstant()); + Node* branch0 = + graph()->NewNode(common()->Branch(BranchHint::kFalse), check0, control); + + Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0); + Node* vtrue0 = jsgraph()->ZeroConstant(); + + Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0); + Node* vfalse0; + { + Node* check1 = graph()->NewNode(simplified()->NumberEqual(), max, + jsgraph()->Constant(V8_INFINITY)); + Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse), + check1, if_false0); + + Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1); + Node* vtrue1 = jsgraph()->Constant(V8_INFINITY); + + Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1); + Node* vfalse1; + { + // Kahan summation to avoid rounding errors. + // Normalize the numbers to the largest one to avoid overflow. + Node* sum = jsgraph()->ZeroConstant(); + Node* compensation = jsgraph()->ZeroConstant(); + for (Node* value : values) { + Node* n = graph()->NewNode(simplified()->NumberDivide(), value, max); + Node* summand = graph()->NewNode( + simplified()->NumberSubtract(), + graph()->NewNode(simplified()->NumberMultiply(), n, n), + compensation); + Node* preliminary = + graph()->NewNode(simplified()->NumberAdd(), sum, summand); + compensation = graph()->NewNode( + simplified()->NumberSubtract(), + graph()->NewNode(simplified()->NumberSubtract(), preliminary, sum), + summand); + sum = preliminary; + } + vfalse1 = graph()->NewNode( + simplified()->NumberMultiply(), + graph()->NewNode(simplified()->NumberSqrt(), sum), max); + } + + if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1); + vfalse0 = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2), + vtrue1, vfalse1, if_false0); + } + + control = graph()->NewNode(common()->Merge(2), if_true0, if_false0); + Node* value = + graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2), vtrue0, + vfalse0, control); + ReplaceWithValue(node, value, effect, control); + return Replace(value); +} + Reduction JSCallReducer::Reduce(Node* node) { switch (node->opcode()) { case IrOpcode::kJSConstruct: @@ -274,6 +368,8 @@ Reduction JSCallReducer::ReduceObjectConstructor(Node* node) { // ES6 section 19.2.3.1 Function.prototype.apply ( thisArg, argArray ) Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) { + DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining); + DCHECK_EQ(IrOpcode::kJSCall, node->opcode()); CallParameters const& p = CallParametersOf(node->op()); size_t arity = p.arity(); @@ -381,9 +477,17 @@ Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) { } } // Change {node} to the new {JSCall} operator. + // TODO(mslekova): Since this introduces a Call that will get optimized by + // the JSCallReducer, we basically might have to do all the serialization + // that we do for that here as well. The only difference is that here we + // disable speculation (cf. the empty VectorSlotPair above), causing the + // JSCallReducer to do much less work. We should revisit this later. NodeProperties::ChangeOp( node, javascript()->Call(arity, p.frequency(), VectorSlotPair(), convert_mode)); + // TODO(mslekova): Remove once ReduceJSCall is brokerized. + AllowHandleDereference allow_handle_dereference; + AllowHandleAllocation allow_handle_allocation; // Try to further reduce the JSCall {node}. Reduction const reduction = ReduceJSCall(node); return reduction.Changed() ? reduction : Changed(node); @@ -496,6 +600,8 @@ Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) { // ES6 section 19.2.3.3 Function.prototype.call (thisArg, ...args) Reduction JSCallReducer::ReduceFunctionPrototypeCall(Node* node) { + DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining); + DCHECK_EQ(IrOpcode::kJSCall, node->opcode()); CallParameters const& p = CallParametersOf(node->op()); Node* target = NodeProperties::GetValueInput(node, 0); @@ -508,6 +614,10 @@ Reduction JSCallReducer::ReduceFunctionPrototypeCall(Node* node) { HeapObjectMatcher m(target); if (m.HasValue()) { JSFunctionRef function = m.Ref(broker()).AsJSFunction(); + if (FLAG_concurrent_inlining && !function.serialized()) { + TRACE_BROKER_MISSING(broker(), "Serialize call on function " << function); + return NoChange(); + } context = jsgraph()->Constant(function.context()); } else { context = effect = graph()->NewNode( @@ -537,6 +647,9 @@ Reduction JSCallReducer::ReduceFunctionPrototypeCall(Node* node) { NodeProperties::ChangeOp( node, javascript()->Call(arity, p.frequency(), VectorSlotPair(), convert_mode)); + // TODO(mslekova): Remove once ReduceJSCall is brokerized. + AllowHandleDereference allow_handle_dereference; + AllowHandleAllocation allow_handle_allocation; // Try to further reduce the JSCall {node}. Reduction const reduction = ReduceJSCall(node); return reduction.Changed() ? reduction : Changed(node); @@ -588,7 +701,6 @@ Reduction JSCallReducer::ReduceObjectGetPrototype(Node* node, Node* object) { MapRef object_map(broker(), object_maps[i]); object_map.SerializePrototype(); if (IsSpecialReceiverInstanceType(object_map.instance_type()) || - object_map.has_hidden_prototype() || !object_map.prototype().equals(candidate_prototype)) { // We exclude special receivers, like JSProxy or API objects that // might require access checks here; we also don't want to deal @@ -1002,27 +1114,28 @@ bool CanInlineArrayIteratingBuiltin(JSHeapBroker* broker, return true; } -bool CanInlineArrayResizingBuiltin(JSHeapBroker* broker, - MapHandles const& receiver_maps, - ElementsKind* kind_return, - bool builtin_is_push = false) { +bool CanInlineArrayResizingBuiltin( + JSHeapBroker* broker, MapHandles const& receiver_maps, + std::vector& kinds, // NOLINT(runtime/references) + bool builtin_is_push = false) { DCHECK_NE(0, receiver_maps.size()); - *kind_return = MapRef(broker, receiver_maps[0]).elements_kind(); for (auto receiver_map : receiver_maps) { MapRef map(broker, receiver_map); if (!map.supports_fast_array_resize()) return false; - if (builtin_is_push) { - if (!UnionElementsKindUptoPackedness(kind_return, map.elements_kind())) { - return false; - } - } else { - // TODO(turbofan): We should also handle fast holey double elements once - // we got the hole NaN mess sorted out in TurboFan/V8. - if (map.elements_kind() == HOLEY_DOUBLE_ELEMENTS || - !UnionElementsKindUptoSize(kind_return, map.elements_kind())) { - return false; + // TODO(turbofan): We should also handle fast holey double elements once + // we got the hole NaN mess sorted out in TurboFan/V8. + if (map.elements_kind() == HOLEY_DOUBLE_ELEMENTS && !builtin_is_push) { + return false; + } + ElementsKind current_kind = map.elements_kind(); + auto kind_ptr = kinds.data(); + size_t i; + for (i = 0; i < kinds.size(); i++, kind_ptr++) { + if (UnionElementsKindUptoPackedness(kind_ptr, current_kind)) { + break; } } + if (i == kinds.size()) kinds.push_back(current_kind); } return true; } @@ -2735,6 +2848,8 @@ Reduction JSCallReducer::ReduceArraySome(Node* node, Reduction JSCallReducer::ReduceCallApiFunction( Node* node, const SharedFunctionInfoRef& shared) { + DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining); + DCHECK_EQ(IrOpcode::kJSCall, node->opcode()); CallParameters const& p = CallParametersOf(node->op()); int const argc = static_cast(p.arity()) - 2; @@ -2750,78 +2865,21 @@ Reduction JSCallReducer::ReduceCallApiFunction( Node* context = NodeProperties::GetContextInput(node); Node* frame_state = NodeProperties::GetFrameStateInput(node); - // See if we can optimize this API call to {shared}. - Handle function_template_info( - FunctionTemplateInfo::cast(shared.object()->function_data()), isolate()); - CallOptimization call_optimization(isolate(), function_template_info); - if (!call_optimization.is_simple_api_call()) return NoChange(); - - // Try to infer the {receiver} maps from the graph. - MapInference inference(broker(), receiver, effect); - if (inference.HaveMaps()) { - MapHandles const& receiver_maps = inference.GetMaps(); - - // Check that all {receiver_maps} are actually JSReceiver maps and - // that the {function_template_info} accepts them without access - // checks (even if "access check needed" is set for {receiver}). - // - // Note that we don't need to know the concrete {receiver} maps here, - // meaning it's fine if the {receiver_maps} are unreliable, and we also - // don't need to install any stability dependencies, since the only - // relevant information regarding the {receiver} is the Map::constructor - // field on the root map (which is different from the JavaScript exposed - // "constructor" property) and that field cannot change. - // - // So if we know that {receiver} had a certain constructor at some point - // in the past (i.e. it had a certain map), then this constructor is going - // to be the same later, since this information cannot change with map - // transitions. - // - // The same is true for the instance type, e.g. we still know that the - // instance type is JSObject even if that information is unreliable, and - // the "access check needed" bit, which also cannot change later. - for (Handle map : receiver_maps) { - MapRef receiver_map(broker(), map); - if (!receiver_map.IsJSReceiverMap() || - (receiver_map.is_access_check_needed() && - !function_template_info->accept_any_receiver())) { - return inference.NoChange(); - } - } - - // See if we can constant-fold the compatible receiver checks. - CallOptimization::HolderLookup lookup; - Handle api_holder = - call_optimization.LookupHolderOfExpectedType(receiver_maps[0], &lookup); - if (lookup == CallOptimization::kHolderNotFound) - return inference.NoChange(); - for (size_t i = 1; i < receiver_maps.size(); ++i) { - CallOptimization::HolderLookup lookupi; - Handle holderi = call_optimization.LookupHolderOfExpectedType( - receiver_maps[i], &lookupi); - if (lookup != lookupi) return inference.NoChange(); - if (!api_holder.is_identical_to(holderi)) return inference.NoChange(); - } + if (!shared.function_template_info().has_value()) { + TRACE_BROKER_MISSING( + broker(), "FunctionTemplateInfo for function with SFI " << shared); + return NoChange(); + } - if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation && - !inference.RelyOnMapsViaStability(dependencies())) { - // We were not able to make the receiver maps reliable without map checks - // but doing map checks would lead to deopt loops, so give up. - return inference.NoChange(); - } + // See if we can optimize this API call to {shared}. + FunctionTemplateInfoRef function_template_info( + shared.function_template_info().value()); - // TODO(neis): The maps were used in a way that does not actually require - // map checks or stability dependencies. - inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect, - control, p.feedback()); + if (!function_template_info.has_call_code()) return NoChange(); - // Determine the appropriate holder for the {lookup}. - holder = lookup == CallOptimization::kHolderFound - ? jsgraph()->HeapConstant(api_holder) - : receiver; - } else if (function_template_info->accept_any_receiver() && - function_template_info->signature().IsUndefined(isolate())) { - // We haven't found any {receiver_maps}, but we might still be able to + if (function_template_info.accept_any_receiver() && + function_template_info.is_signature_undefined()) { + // We might be able to // optimize the API call depending on the {function_template_info}. // If the API function accepts any kind of {receiver}, we only need to // ensure that the {receiver} is actually a JSReceiver at this point, @@ -2840,51 +2898,127 @@ Reduction JSCallReducer::ReduceCallApiFunction( graph()->NewNode(simplified()->ConvertReceiver(p.convert_mode()), receiver, global_proxy, effect, control); } else { - // We don't have enough information to eliminate the access check - // and/or the compatible receiver check, so use the generic builtin - // that does those checks dynamically. This is still significantly - // faster than the generic call sequence. - Builtins::Name builtin_name = - !function_template_info->accept_any_receiver() - ? (function_template_info->signature().IsUndefined(isolate()) - ? Builtins::kCallFunctionTemplate_CheckAccess - : Builtins:: - kCallFunctionTemplate_CheckAccessAndCompatibleReceiver) - : Builtins::kCallFunctionTemplate_CheckCompatibleReceiver; - - // The CallFunctionTemplate builtin requires the {receiver} to be - // an actual JSReceiver, so make sure we do the proper conversion - // first if necessary. - receiver = holder = effect = - graph()->NewNode(simplified()->ConvertReceiver(p.convert_mode()), - receiver, global_proxy, effect, control); + // Try to infer the {receiver} maps from the graph. + MapInference inference(broker(), receiver, effect); + if (inference.HaveMaps()) { + MapHandles const& receiver_maps = inference.GetMaps(); + MapRef first_receiver_map(broker(), receiver_maps[0]); + + // See if we can constant-fold the compatible receiver checks. + HolderLookupResult api_holder = + function_template_info.LookupHolderOfExpectedType(first_receiver_map, + false); + if (api_holder.lookup == CallOptimization::kHolderNotFound) + return inference.NoChange(); - Callable callable = Builtins::CallableFor(isolate(), builtin_name); - auto call_descriptor = Linkage::GetStubCallDescriptor( - graph()->zone(), callable.descriptor(), - argc + 1 /* implicit receiver */, CallDescriptor::kNeedsFrameState); - node->InsertInput(graph()->zone(), 0, - jsgraph()->HeapConstant(callable.code())); - node->ReplaceInput(1, jsgraph()->HeapConstant(function_template_info)); - node->InsertInput(graph()->zone(), 2, jsgraph()->Constant(argc)); - node->ReplaceInput(3, receiver); // Update receiver input. - node->ReplaceInput(6 + argc, effect); // Update effect input. - NodeProperties::ChangeOp(node, common()->Call(call_descriptor)); - return Changed(node); + // Check that all {receiver_maps} are actually JSReceiver maps and + // that the {function_template_info} accepts them without access + // checks (even if "access check needed" is set for {receiver}). + // + // Note that we don't need to know the concrete {receiver} maps here, + // meaning it's fine if the {receiver_maps} are unreliable, and we also + // don't need to install any stability dependencies, since the only + // relevant information regarding the {receiver} is the Map::constructor + // field on the root map (which is different from the JavaScript exposed + // "constructor" property) and that field cannot change. + // + // So if we know that {receiver} had a certain constructor at some point + // in the past (i.e. it had a certain map), then this constructor is going + // to be the same later, since this information cannot change with map + // transitions. + // + // The same is true for the instance type, e.g. we still know that the + // instance type is JSObject even if that information is unreliable, and + // the "access check needed" bit, which also cannot change later. + CHECK(first_receiver_map.IsJSReceiverMap()); + CHECK(!first_receiver_map.is_access_check_needed() || + function_template_info.accept_any_receiver()); + + for (size_t i = 1; i < receiver_maps.size(); ++i) { + MapRef receiver_map(broker(), receiver_maps[i]); + HolderLookupResult holder_i = + function_template_info.LookupHolderOfExpectedType(receiver_map, + false); + + if (api_holder.lookup != holder_i.lookup) return inference.NoChange(); + if (!(api_holder.holder.has_value() && holder_i.holder.has_value())) + return inference.NoChange(); + if (!api_holder.holder->equals(*holder_i.holder)) + return inference.NoChange(); + + CHECK(receiver_map.IsJSReceiverMap()); + CHECK(!receiver_map.is_access_check_needed() || + function_template_info.accept_any_receiver()); + } + + if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation && + !inference.RelyOnMapsViaStability(dependencies())) { + // We were not able to make the receiver maps reliable without map + // checks but doing map checks would lead to deopt loops, so give up. + return inference.NoChange(); + } + + // TODO(neis): The maps were used in a way that does not actually require + // map checks or stability dependencies. + inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect, + control, p.feedback()); + + // Determine the appropriate holder for the {lookup}. + holder = api_holder.lookup == CallOptimization::kHolderFound + ? jsgraph()->Constant(*api_holder.holder) + : receiver; + } else { + // We don't have enough information to eliminate the access check + // and/or the compatible receiver check, so use the generic builtin + // that does those checks dynamically. This is still significantly + // faster than the generic call sequence. + Builtins::Name builtin_name; + if (function_template_info.accept_any_receiver()) { + builtin_name = Builtins::kCallFunctionTemplate_CheckCompatibleReceiver; + } else if (function_template_info.is_signature_undefined()) { + builtin_name = Builtins::kCallFunctionTemplate_CheckAccess; + } else { + builtin_name = + Builtins::kCallFunctionTemplate_CheckAccessAndCompatibleReceiver; + } + + // The CallFunctionTemplate builtin requires the {receiver} to be + // an actual JSReceiver, so make sure we do the proper conversion + // first if necessary. + receiver = holder = effect = + graph()->NewNode(simplified()->ConvertReceiver(p.convert_mode()), + receiver, global_proxy, effect, control); + + Callable callable = Builtins::CallableFor(isolate(), builtin_name); + auto call_descriptor = Linkage::GetStubCallDescriptor( + graph()->zone(), callable.descriptor(), + argc + 1 /* implicit receiver */, CallDescriptor::kNeedsFrameState); + node->InsertInput(graph()->zone(), 0, + jsgraph()->HeapConstant(callable.code())); + node->ReplaceInput(1, jsgraph()->Constant(function_template_info)); + node->InsertInput(graph()->zone(), 2, jsgraph()->Constant(argc)); + node->ReplaceInput(3, receiver); // Update receiver input. + node->ReplaceInput(6 + argc, effect); // Update effect input. + NodeProperties::ChangeOp(node, common()->Call(call_descriptor)); + return Changed(node); + } } // TODO(turbofan): Consider introducing a JSCallApiCallback operator for // this and lower it during JSGenericLowering, and unify this with the // JSNativeContextSpecialization::InlineApiCall method a bit. - Handle call_handler_info( - CallHandlerInfo::cast(function_template_info->call_code()), isolate()); - Handle data(call_handler_info->data(), isolate()); + if (!function_template_info.call_code().has_value()) { + TRACE_BROKER_MISSING(broker(), "call code for function template info " + << function_template_info); + return NoChange(); + } + CallHandlerInfoRef call_handler_info = *function_template_info.call_code(); Callable call_api_callback = CodeFactory::CallApiCallback(isolate()); CallInterfaceDescriptor cid = call_api_callback.descriptor(); auto call_descriptor = Linkage::GetStubCallDescriptor( graph()->zone(), cid, argc + 1 /* implicit receiver */, CallDescriptor::kNeedsFrameState); - ApiFunction api_function(v8::ToCData
    (call_handler_info->callback())); + ApiFunction api_function(call_handler_info.callback()); ExternalReference function_reference = ExternalReference::Create( &api_function, ExternalReference::DIRECT_API_CALL); @@ -2895,7 +3029,8 @@ Reduction JSCallReducer::ReduceCallApiFunction( jsgraph()->HeapConstant(call_api_callback.code())); node->ReplaceInput(1, jsgraph()->ExternalConstant(function_reference)); node->InsertInput(graph()->zone(), 2, jsgraph()->Constant(argc)); - node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(data)); + node->InsertInput(graph()->zone(), 3, + jsgraph()->Constant(call_handler_info.data())); node->InsertInput(graph()->zone(), 4, holder); node->ReplaceInput(5, receiver); // Update receiver input. node->ReplaceInput(7 + argc, continuation_frame_state); @@ -3495,6 +3630,8 @@ Reduction JSCallReducer::ReduceJSCall(Node* node, return ReduceMathUnary(node, simplified()->NumberFloor()); case Builtins::kMathFround: return ReduceMathUnary(node, simplified()->NumberFround()); + case Builtins::kMathHypot: + return ReduceMathHypot(node); case Builtins::kMathLog: return ReduceMathUnary(node, simplified()->NumberLog()); case Builtins::kMathLog1p: @@ -3563,8 +3700,8 @@ Reduction JSCallReducer::ReduceJSCall(Node* node, return ReduceStringPrototypeStringAt(simplified()->StringCharCodeAt(), node); case Builtins::kStringPrototypeCodePointAt: - return ReduceStringPrototypeStringAt( - simplified()->StringCodePointAt(UnicodeEncoding::UTF32), node); + return ReduceStringPrototypeStringAt(simplified()->StringCodePointAt(), + node); case Builtins::kStringPrototypeSubstring: return ReduceStringPrototypeSubstring(node); case Builtins::kStringPrototypeSlice: @@ -3642,18 +3779,23 @@ Reduction JSCallReducer::ReduceJSCall(Node* node, return ReduceDateNow(node); case Builtins::kNumberConstructor: return ReduceNumberConstructor(node); + case Builtins::kBigIntAsUintN: + return ReduceBigIntAsUintN(node); default: break; } - if (!TracingFlags::is_runtime_stats_enabled() && - shared.object()->IsApiFunction()) { + if (shared.object()->IsApiFunction()) { return ReduceCallApiFunction(node, shared); } return NoChange(); } Reduction JSCallReducer::ReduceJSCallWithArrayLike(Node* node) { + // TODO(mslekova): Remove once ReduceJSCallWithArrayLike is brokerized. + AllowHandleDereference allow_handle_dereference; + AllowHandleAllocation allow_handle_allocation; + DCHECK_EQ(IrOpcode::kJSCallWithArrayLike, node->opcode()); CallFrequency frequency = CallFrequencyOf(node->op()); VectorSlotPair feedback; @@ -4250,6 +4392,52 @@ Reduction JSCallReducer::ReduceSoftDeoptimize(Node* node, return Changed(node); } +Node* JSCallReducer::LoadReceiverElementsKind(Node* receiver, Node** effect, + Node** control) { + Node* receiver_map = *effect = + graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()), + receiver, *effect, *control); + Node* receiver_bit_field2 = *effect = graph()->NewNode( + simplified()->LoadField(AccessBuilder::ForMapBitField2()), receiver_map, + *effect, *control); + Node* receiver_elements_kind = graph()->NewNode( + simplified()->NumberShiftRightLogical(), + graph()->NewNode(simplified()->NumberBitwiseAnd(), receiver_bit_field2, + jsgraph()->Constant(Map::ElementsKindBits::kMask)), + jsgraph()->Constant(Map::ElementsKindBits::kShift)); + return receiver_elements_kind; +} + +void JSCallReducer::CheckIfElementsKind(Node* receiver_elements_kind, + ElementsKind kind, Node* control, + Node** if_true, Node** if_false) { + Node* is_packed_kind = + graph()->NewNode(simplified()->NumberEqual(), receiver_elements_kind, + jsgraph()->Constant(GetPackedElementsKind(kind))); + Node* packed_branch = + graph()->NewNode(common()->Branch(), is_packed_kind, control); + Node* if_packed = graph()->NewNode(common()->IfTrue(), packed_branch); + + if (IsHoleyElementsKind(kind)) { + Node* if_not_packed = graph()->NewNode(common()->IfFalse(), packed_branch); + Node* is_holey_kind = + graph()->NewNode(simplified()->NumberEqual(), receiver_elements_kind, + jsgraph()->Constant(GetHoleyElementsKind(kind))); + Node* holey_branch = + graph()->NewNode(common()->Branch(), is_holey_kind, if_not_packed); + Node* if_holey = graph()->NewNode(common()->IfTrue(), holey_branch); + + Node* if_not_packed_not_holey = + graph()->NewNode(common()->IfFalse(), holey_branch); + + *if_true = graph()->NewNode(common()->Merge(2), if_packed, if_holey); + *if_false = if_not_packed_not_holey; + } else { + *if_true = if_packed; + *if_false = graph()->NewNode(common()->IfFalse(), packed_branch); + } +} + // ES6 section 22.1.3.18 Array.prototype.push ( ) Reduction JSCallReducer::ReduceArrayPrototypePush(Node* node) { DCHECK_EQ(IrOpcode::kJSCall, node->opcode()); @@ -4267,81 +4455,121 @@ Reduction JSCallReducer::ReduceArrayPrototypePush(Node* node) { if (!inference.HaveMaps()) return NoChange(); MapHandles const& receiver_maps = inference.GetMaps(); - ElementsKind kind; - if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, &kind, true)) { + std::vector kinds; + if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, kinds, true)) { return inference.NoChange(); } if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE(); inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect, control, p.feedback()); - // Collect the value inputs to push. - std::vector values(num_values); - for (int i = 0; i < num_values; ++i) { - values[i] = NodeProperties::GetValueInput(node, 2 + i); - } - - for (auto& value : values) { - if (IsSmiElementsKind(kind)) { - value = effect = graph()->NewNode(simplified()->CheckSmi(p.feedback()), - value, effect, control); - } else if (IsDoubleElementsKind(kind)) { - value = effect = graph()->NewNode(simplified()->CheckNumber(p.feedback()), - value, effect, control); - // Make sure we do not store signaling NaNs into double arrays. - value = graph()->NewNode(simplified()->NumberSilenceNaN(), value); + std::vector controls_to_merge; + std::vector effects_to_merge; + std::vector values_to_merge; + Node* return_value = jsgraph()->UndefinedConstant(); + + Node* receiver_elements_kind = + LoadReceiverElementsKind(receiver, &effect, &control); + Node* next_control = control; + Node* next_effect = effect; + for (size_t i = 0; i < kinds.size(); i++) { + ElementsKind kind = kinds[i]; + control = next_control; + effect = next_effect; + // We do not need branch for the last elements kind. + if (i != kinds.size() - 1) { + CheckIfElementsKind(receiver_elements_kind, kind, control, &control, + &next_control); } - } - // Load the "length" property of the {receiver}. - Node* length = effect = graph()->NewNode( - simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver, - effect, control); - Node* value = length; + // Collect the value inputs to push. + std::vector values(num_values); + for (int i = 0; i < num_values; ++i) { + values[i] = NodeProperties::GetValueInput(node, 2 + i); + } - // Check if we have any {values} to push. - if (num_values > 0) { - // Compute the resulting "length" of the {receiver}. - Node* new_length = value = graph()->NewNode( - simplified()->NumberAdd(), length, jsgraph()->Constant(num_values)); + for (auto& value : values) { + if (IsSmiElementsKind(kind)) { + value = effect = graph()->NewNode(simplified()->CheckSmi(p.feedback()), + value, effect, control); + } else if (IsDoubleElementsKind(kind)) { + value = effect = graph()->NewNode( + simplified()->CheckNumber(p.feedback()), value, effect, control); + // Make sure we do not store signaling NaNs into double arrays. + value = graph()->NewNode(simplified()->NumberSilenceNaN(), value); + } + } - // Load the elements backing store of the {receiver}. - Node* elements = effect = graph()->NewNode( - simplified()->LoadField(AccessBuilder::ForJSObjectElements()), receiver, - effect, control); - Node* elements_length = effect = graph()->NewNode( - simplified()->LoadField(AccessBuilder::ForFixedArrayLength()), elements, - effect, control); + // Load the "length" property of the {receiver}. + Node* length = effect = graph()->NewNode( + simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), + receiver, effect, control); + return_value = length; - GrowFastElementsMode mode = - IsDoubleElementsKind(kind) ? GrowFastElementsMode::kDoubleElements - : GrowFastElementsMode::kSmiOrObjectElements; - elements = effect = graph()->NewNode( - simplified()->MaybeGrowFastElements(mode, p.feedback()), receiver, - elements, - graph()->NewNode(simplified()->NumberAdd(), length, - jsgraph()->Constant(num_values - 1)), - elements_length, effect, control); - - // Update the JSArray::length field. Since this is observable, - // there must be no other check after this. - effect = graph()->NewNode( - simplified()->StoreField(AccessBuilder::ForJSArrayLength(kind)), - receiver, new_length, effect, control); + // Check if we have any {values} to push. + if (num_values > 0) { + // Compute the resulting "length" of the {receiver}. + Node* new_length = return_value = graph()->NewNode( + simplified()->NumberAdd(), length, jsgraph()->Constant(num_values)); - // Append the {values} to the {elements}. - for (int i = 0; i < num_values; ++i) { - Node* value = values[i]; - Node* index = graph()->NewNode(simplified()->NumberAdd(), length, - jsgraph()->Constant(i)); + // Load the elements backing store of the {receiver}. + Node* elements = effect = graph()->NewNode( + simplified()->LoadField(AccessBuilder::ForJSObjectElements()), + receiver, effect, control); + Node* elements_length = effect = graph()->NewNode( + simplified()->LoadField(AccessBuilder::ForFixedArrayLength()), + elements, effect, control); + + GrowFastElementsMode mode = + IsDoubleElementsKind(kind) + ? GrowFastElementsMode::kDoubleElements + : GrowFastElementsMode::kSmiOrObjectElements; + elements = effect = graph()->NewNode( + simplified()->MaybeGrowFastElements(mode, p.feedback()), receiver, + elements, + graph()->NewNode(simplified()->NumberAdd(), length, + jsgraph()->Constant(num_values - 1)), + elements_length, effect, control); + + // Update the JSArray::length field. Since this is observable, + // there must be no other check after this. effect = graph()->NewNode( - simplified()->StoreElement(AccessBuilder::ForFixedArrayElement(kind)), - elements, index, value, effect, control); + simplified()->StoreField(AccessBuilder::ForJSArrayLength(kind)), + receiver, new_length, effect, control); + + // Append the {values} to the {elements}. + for (int i = 0; i < num_values; ++i) { + Node* value = values[i]; + Node* index = graph()->NewNode(simplified()->NumberAdd(), length, + jsgraph()->Constant(i)); + effect = + graph()->NewNode(simplified()->StoreElement( + AccessBuilder::ForFixedArrayElement(kind)), + elements, index, value, effect, control); + } } + + controls_to_merge.push_back(control); + effects_to_merge.push_back(effect); + values_to_merge.push_back(return_value); } - ReplaceWithValue(node, value, effect, control); - return Replace(value); + if (controls_to_merge.size() > 1) { + int const count = static_cast(controls_to_merge.size()); + + control = graph()->NewNode(common()->Merge(count), count, + &controls_to_merge.front()); + effects_to_merge.push_back(control); + effect = graph()->NewNode(common()->EffectPhi(count), count + 1, + &effects_to_merge.front()); + values_to_merge.push_back(control); + return_value = + graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, count), + count + 1, &values_to_merge.front()); + } + + ReplaceWithValue(node, return_value, effect, control); + return Replace(return_value); } // ES6 section 22.1.3.17 Array.prototype.pop ( ) @@ -4360,79 +4588,117 @@ Reduction JSCallReducer::ReduceArrayPrototypePop(Node* node) { if (!inference.HaveMaps()) return NoChange(); MapHandles const& receiver_maps = inference.GetMaps(); - ElementsKind kind; - if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, &kind)) { + std::vector kinds; + if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, kinds)) { return inference.NoChange(); } if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE(); inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect, control, p.feedback()); - // Load the "length" property of the {receiver}. - Node* length = effect = graph()->NewNode( - simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver, - effect, control); + std::vector controls_to_merge; + std::vector effects_to_merge; + std::vector values_to_merge; + Node* value = jsgraph()->UndefinedConstant(); + + Node* receiver_elements_kind = + LoadReceiverElementsKind(receiver, &effect, &control); + Node* next_control = control; + Node* next_effect = effect; + for (size_t i = 0; i < kinds.size(); i++) { + ElementsKind kind = kinds[i]; + control = next_control; + effect = next_effect; + // We do not need branch for the last elements kind. + if (i != kinds.size() - 1) { + CheckIfElementsKind(receiver_elements_kind, kind, control, &control, + &next_control); + } - // Check if the {receiver} has any elements. - Node* check = graph()->NewNode(simplified()->NumberEqual(), length, - jsgraph()->ZeroConstant()); - Node* branch = - graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control); + // Load the "length" property of the {receiver}. + Node* length = effect = graph()->NewNode( + simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), + receiver, effect, control); - Node* if_true = graph()->NewNode(common()->IfTrue(), branch); - Node* etrue = effect; - Node* vtrue = jsgraph()->UndefinedConstant(); + // Check if the {receiver} has any elements. + Node* check = graph()->NewNode(simplified()->NumberEqual(), length, + jsgraph()->ZeroConstant()); + Node* branch = + graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control); - Node* if_false = graph()->NewNode(common()->IfFalse(), branch); - Node* efalse = effect; - Node* vfalse; - { - // TODO(tebbi): We should trim the backing store if the capacity is too - // big, as implemented in elements.cc:ElementsAccessorBase::SetLengthImpl. - - // Load the elements backing store from the {receiver}. - Node* elements = efalse = graph()->NewNode( - simplified()->LoadField(AccessBuilder::ForJSObjectElements()), receiver, - efalse, if_false); - - // Ensure that we aren't popping from a copy-on-write backing store. - if (IsSmiOrObjectElementsKind(kind)) { - elements = efalse = - graph()->NewNode(simplified()->EnsureWritableFastElements(), receiver, - elements, efalse, if_false); - } + Node* if_true = graph()->NewNode(common()->IfTrue(), branch); + Node* etrue = effect; + Node* vtrue = jsgraph()->UndefinedConstant(); - // Compute the new {length}. - length = graph()->NewNode(simplified()->NumberSubtract(), length, - jsgraph()->OneConstant()); + Node* if_false = graph()->NewNode(common()->IfFalse(), branch); + Node* efalse = effect; + Node* vfalse; + { + // TODO(tebbi): We should trim the backing store if the capacity is too + // big, as implemented in elements.cc:ElementsAccessorBase::SetLengthImpl. + + // Load the elements backing store from the {receiver}. + Node* elements = efalse = graph()->NewNode( + simplified()->LoadField(AccessBuilder::ForJSObjectElements()), + receiver, efalse, if_false); + + // Ensure that we aren't popping from a copy-on-write backing store. + if (IsSmiOrObjectElementsKind(kind)) { + elements = efalse = + graph()->NewNode(simplified()->EnsureWritableFastElements(), + receiver, elements, efalse, if_false); + } + + // Compute the new {length}. + length = graph()->NewNode(simplified()->NumberSubtract(), length, + jsgraph()->OneConstant()); - // Store the new {length} to the {receiver}. - efalse = graph()->NewNode( - simplified()->StoreField(AccessBuilder::ForJSArrayLength(kind)), - receiver, length, efalse, if_false); + // Store the new {length} to the {receiver}. + efalse = graph()->NewNode( + simplified()->StoreField(AccessBuilder::ForJSArrayLength(kind)), + receiver, length, efalse, if_false); + + // Load the last entry from the {elements}. + vfalse = efalse = graph()->NewNode( + simplified()->LoadElement(AccessBuilder::ForFixedArrayElement(kind)), + elements, length, efalse, if_false); + + // Store a hole to the element we just removed from the {receiver}. + efalse = graph()->NewNode( + simplified()->StoreElement( + AccessBuilder::ForFixedArrayElement(GetHoleyElementsKind(kind))), + elements, length, jsgraph()->TheHoleConstant(), efalse, if_false); + } + + control = graph()->NewNode(common()->Merge(2), if_true, if_false); + effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control); + value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2), + vtrue, vfalse, control); - // Load the last entry from the {elements}. - vfalse = efalse = graph()->NewNode( - simplified()->LoadElement(AccessBuilder::ForFixedArrayElement(kind)), - elements, length, efalse, if_false); + // Convert the hole to undefined. Do this last, so that we can optimize + // conversion operator via some smart strength reduction in many cases. + if (IsHoleyElementsKind(kind)) { + value = + graph()->NewNode(simplified()->ConvertTaggedHoleToUndefined(), value); + } - // Store a hole to the element we just removed from the {receiver}. - efalse = graph()->NewNode( - simplified()->StoreElement( - AccessBuilder::ForFixedArrayElement(GetHoleyElementsKind(kind))), - elements, length, jsgraph()->TheHoleConstant(), efalse, if_false); + controls_to_merge.push_back(control); + effects_to_merge.push_back(effect); + values_to_merge.push_back(value); } - control = graph()->NewNode(common()->Merge(2), if_true, if_false); - effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control); - Node* value = graph()->NewNode( - common()->Phi(MachineRepresentation::kTagged, 2), vtrue, vfalse, control); + if (controls_to_merge.size() > 1) { + int const count = static_cast(controls_to_merge.size()); - // Convert the hole to undefined. Do this last, so that we can optimize - // conversion operator via some smart strength reduction in many cases. - if (IsHoleyElementsKind(kind)) { + control = graph()->NewNode(common()->Merge(count), count, + &controls_to_merge.front()); + effects_to_merge.push_back(control); + effect = graph()->NewNode(common()->EffectPhi(count), count + 1, + &effects_to_merge.front()); + values_to_merge.push_back(control); value = - graph()->NewNode(simplified()->ConvertTaggedHoleToUndefined(), value); + graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, count), + count + 1, &values_to_merge.front()); } ReplaceWithValue(node, value, effect, control); @@ -4458,151 +4724,172 @@ Reduction JSCallReducer::ReduceArrayPrototypeShift(Node* node) { if (!inference.HaveMaps()) return NoChange(); MapHandles const& receiver_maps = inference.GetMaps(); - ElementsKind kind; - if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, &kind)) { + std::vector kinds; + if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, kinds)) { return inference.NoChange(); } if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE(); inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect, control, p.feedback()); - // Load length of the {receiver}. - Node* length = effect = graph()->NewNode( - simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver, - effect, control); + std::vector controls_to_merge; + std::vector effects_to_merge; + std::vector values_to_merge; + Node* value = jsgraph()->UndefinedConstant(); + + Node* receiver_elements_kind = + LoadReceiverElementsKind(receiver, &effect, &control); + Node* next_control = control; + Node* next_effect = effect; + for (size_t i = 0; i < kinds.size(); i++) { + ElementsKind kind = kinds[i]; + control = next_control; + effect = next_effect; + // We do not need branch for the last elements kind. + if (i != kinds.size() - 1) { + CheckIfElementsKind(receiver_elements_kind, kind, control, &control, + &next_control); + } - // Return undefined if {receiver} has no elements. - Node* check0 = graph()->NewNode(simplified()->NumberEqual(), length, - jsgraph()->ZeroConstant()); - Node* branch0 = - graph()->NewNode(common()->Branch(BranchHint::kFalse), check0, control); + // Load length of the {receiver}. + Node* length = effect = graph()->NewNode( + simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), + receiver, effect, control); - Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0); - Node* etrue0 = effect; - Node* vtrue0 = jsgraph()->UndefinedConstant(); + // Return undefined if {receiver} has no elements. + Node* check0 = graph()->NewNode(simplified()->NumberEqual(), length, + jsgraph()->ZeroConstant()); + Node* branch0 = + graph()->NewNode(common()->Branch(BranchHint::kFalse), check0, control); - Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0); - Node* efalse0 = effect; - Node* vfalse0; - { - // Check if we should take the fast-path. - Node* check1 = - graph()->NewNode(simplified()->NumberLessThanOrEqual(), length, - jsgraph()->Constant(JSArray::kMaxCopyElements)); - Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kTrue), - check1, if_false0); + Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0); + Node* etrue0 = effect; + Node* vtrue0 = jsgraph()->UndefinedConstant(); - Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1); - Node* etrue1 = efalse0; - Node* vtrue1; + Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0); + Node* efalse0 = effect; + Node* vfalse0; { - Node* elements = etrue1 = graph()->NewNode( - simplified()->LoadField(AccessBuilder::ForJSObjectElements()), - receiver, etrue1, if_true1); - - // Load the first element here, which we return below. - vtrue1 = etrue1 = graph()->NewNode( - simplified()->LoadElement(AccessBuilder::ForFixedArrayElement(kind)), - elements, jsgraph()->ZeroConstant(), etrue1, if_true1); + // Check if we should take the fast-path. + Node* check1 = + graph()->NewNode(simplified()->NumberLessThanOrEqual(), length, + jsgraph()->Constant(JSArray::kMaxCopyElements)); + Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kTrue), + check1, if_false0); + + Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1); + Node* etrue1 = efalse0; + Node* vtrue1; + { + Node* elements = etrue1 = graph()->NewNode( + simplified()->LoadField(AccessBuilder::ForJSObjectElements()), + receiver, etrue1, if_true1); - // Ensure that we aren't shifting a copy-on-write backing store. - if (IsSmiOrObjectElementsKind(kind)) { - elements = etrue1 = - graph()->NewNode(simplified()->EnsureWritableFastElements(), - receiver, elements, etrue1, if_true1); - } + // Load the first element here, which we return below. + vtrue1 = etrue1 = graph()->NewNode( + simplified()->LoadElement( + AccessBuilder::ForFixedArrayElement(kind)), + elements, jsgraph()->ZeroConstant(), etrue1, if_true1); + + // Ensure that we aren't shifting a copy-on-write backing store. + if (IsSmiOrObjectElementsKind(kind)) { + elements = etrue1 = + graph()->NewNode(simplified()->EnsureWritableFastElements(), + receiver, elements, etrue1, if_true1); + } - // Shift the remaining {elements} by one towards the start. - Node* loop = graph()->NewNode(common()->Loop(2), if_true1, if_true1); - Node* eloop = - graph()->NewNode(common()->EffectPhi(2), etrue1, etrue1, loop); - Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop); - NodeProperties::MergeControlToEnd(graph(), common(), terminate); - Node* index = graph()->NewNode( - common()->Phi(MachineRepresentation::kTagged, 2), - jsgraph()->OneConstant(), - jsgraph()->Constant(JSArray::kMaxCopyElements - 1), loop); + // Shift the remaining {elements} by one towards the start. + Node* loop = graph()->NewNode(common()->Loop(2), if_true1, if_true1); + Node* eloop = + graph()->NewNode(common()->EffectPhi(2), etrue1, etrue1, loop); + Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop); + NodeProperties::MergeControlToEnd(graph(), common(), terminate); + Node* index = graph()->NewNode( + common()->Phi(MachineRepresentation::kTagged, 2), + jsgraph()->OneConstant(), + jsgraph()->Constant(JSArray::kMaxCopyElements - 1), loop); - { - Node* check2 = - graph()->NewNode(simplified()->NumberLessThan(), index, length); - Node* branch2 = graph()->NewNode(common()->Branch(), check2, loop); + { + Node* check2 = + graph()->NewNode(simplified()->NumberLessThan(), index, length); + Node* branch2 = graph()->NewNode(common()->Branch(), check2, loop); - if_true1 = graph()->NewNode(common()->IfFalse(), branch2); - etrue1 = eloop; + if_true1 = graph()->NewNode(common()->IfFalse(), branch2); + etrue1 = eloop; - Node* control = graph()->NewNode(common()->IfTrue(), branch2); - Node* effect = etrue1; + Node* control = graph()->NewNode(common()->IfTrue(), branch2); + Node* effect = etrue1; - ElementAccess const access = AccessBuilder::ForFixedArrayElement(kind); - Node* value = effect = - graph()->NewNode(simplified()->LoadElement(access), elements, index, - effect, control); - effect = - graph()->NewNode(simplified()->StoreElement(access), elements, - graph()->NewNode(simplified()->NumberSubtract(), - index, jsgraph()->OneConstant()), - value, effect, control); - - loop->ReplaceInput(1, control); - eloop->ReplaceInput(1, effect); - index->ReplaceInput(1, - graph()->NewNode(simplified()->NumberAdd(), index, - jsgraph()->OneConstant())); - } + ElementAccess const access = + AccessBuilder::ForFixedArrayElement(kind); + Node* value = effect = + graph()->NewNode(simplified()->LoadElement(access), elements, + index, effect, control); + effect = graph()->NewNode( + simplified()->StoreElement(access), elements, + graph()->NewNode(simplified()->NumberSubtract(), index, + jsgraph()->OneConstant()), + value, effect, control); + + loop->ReplaceInput(1, control); + eloop->ReplaceInput(1, effect); + index->ReplaceInput(1, + graph()->NewNode(simplified()->NumberAdd(), index, + jsgraph()->OneConstant())); + } - // Compute the new {length}. - length = graph()->NewNode(simplified()->NumberSubtract(), length, - jsgraph()->OneConstant()); + // Compute the new {length}. + length = graph()->NewNode(simplified()->NumberSubtract(), length, + jsgraph()->OneConstant()); - // Store the new {length} to the {receiver}. - etrue1 = graph()->NewNode( - simplified()->StoreField(AccessBuilder::ForJSArrayLength(kind)), - receiver, length, etrue1, if_true1); + // Store the new {length} to the {receiver}. + etrue1 = graph()->NewNode( + simplified()->StoreField(AccessBuilder::ForJSArrayLength(kind)), + receiver, length, etrue1, if_true1); - // Store a hole to the element we just removed from the {receiver}. - etrue1 = graph()->NewNode( - simplified()->StoreElement( - AccessBuilder::ForFixedArrayElement(GetHoleyElementsKind(kind))), - elements, length, jsgraph()->TheHoleConstant(), etrue1, if_true1); - } + // Store a hole to the element we just removed from the {receiver}. + etrue1 = graph()->NewNode( + simplified()->StoreElement(AccessBuilder::ForFixedArrayElement( + GetHoleyElementsKind(kind))), + elements, length, jsgraph()->TheHoleConstant(), etrue1, if_true1); + } - Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1); - Node* efalse1 = efalse0; - Node* vfalse1; - { - // Call the generic C++ implementation. - const int builtin_index = Builtins::kArrayShift; - auto call_descriptor = Linkage::GetCEntryStubCallDescriptor( - graph()->zone(), 1, BuiltinArguments::kNumExtraArgsWithReceiver, - Builtins::name(builtin_index), node->op()->properties(), - CallDescriptor::kNeedsFrameState); - Node* stub_code = - jsgraph()->CEntryStubConstant(1, kDontSaveFPRegs, kArgvOnStack, true); - Address builtin_entry = Builtins::CppEntryOf(builtin_index); - Node* entry = - jsgraph()->ExternalConstant(ExternalReference::Create(builtin_entry)); - Node* argc = - jsgraph()->Constant(BuiltinArguments::kNumExtraArgsWithReceiver); - if_false1 = efalse1 = vfalse1 = - graph()->NewNode(common()->Call(call_descriptor), stub_code, receiver, - jsgraph()->PaddingConstant(), argc, target, - jsgraph()->UndefinedConstant(), entry, argc, context, - frame_state, efalse1, if_false1); - } + Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1); + Node* efalse1 = efalse0; + Node* vfalse1; + { + // Call the generic C++ implementation. + const int builtin_index = Builtins::kArrayShift; + auto call_descriptor = Linkage::GetCEntryStubCallDescriptor( + graph()->zone(), 1, BuiltinArguments::kNumExtraArgsWithReceiver, + Builtins::name(builtin_index), node->op()->properties(), + CallDescriptor::kNeedsFrameState); + Node* stub_code = jsgraph()->CEntryStubConstant(1, kDontSaveFPRegs, + kArgvOnStack, true); + Address builtin_entry = Builtins::CppEntryOf(builtin_index); + Node* entry = jsgraph()->ExternalConstant( + ExternalReference::Create(builtin_entry)); + Node* argc = + jsgraph()->Constant(BuiltinArguments::kNumExtraArgsWithReceiver); + if_false1 = efalse1 = vfalse1 = + graph()->NewNode(common()->Call(call_descriptor), stub_code, + receiver, jsgraph()->PaddingConstant(), argc, + target, jsgraph()->UndefinedConstant(), entry, + argc, context, frame_state, efalse1, if_false1); + } - if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1); - efalse0 = - graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0); - vfalse0 = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2), - vtrue1, vfalse1, if_false0); + if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1); + efalse0 = + graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0); + vfalse0 = + graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2), + vtrue1, vfalse1, if_false0); } control = graph()->NewNode(common()->Merge(2), if_true0, if_false0); effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control); - Node* value = - graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2), - vtrue0, vfalse0, control); + value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2), + vtrue0, vfalse0, control); // Convert the hole to undefined. Do this last, so that we can optimize // conversion operator via some smart strength reduction in many cases. @@ -4611,8 +4898,27 @@ Reduction JSCallReducer::ReduceArrayPrototypeShift(Node* node) { graph()->NewNode(simplified()->ConvertTaggedHoleToUndefined(), value); } - ReplaceWithValue(node, value, effect, control); - return Replace(value); + controls_to_merge.push_back(control); + effects_to_merge.push_back(effect); + values_to_merge.push_back(value); + } + + if (controls_to_merge.size() > 1) { + int const count = static_cast(controls_to_merge.size()); + + control = graph()->NewNode(common()->Merge(count), count, + &controls_to_merge.front()); + effects_to_merge.push_back(control); + effect = graph()->NewNode(common()->EffectPhi(count), count + 1, + &effects_to_merge.front()); + values_to_merge.push_back(control); + value = + graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, count), + count + 1, &values_to_merge.front()); + } + + ReplaceWithValue(node, value, effect, control); + return Replace(value); } // ES6 section 22.1.3.23 Array.prototype.slice ( ) @@ -5230,8 +5536,8 @@ Reduction JSCallReducer::ReduceStringFromCodePoint(Node* node) { graph()->NewNode(simplified()->CheckBounds(p.feedback()), input, jsgraph()->Constant(0x10FFFF + 1), effect, control); - Node* value = graph()->NewNode( - simplified()->StringFromSingleCodePoint(UnicodeEncoding::UTF32), input); + Node* value = + graph()->NewNode(simplified()->StringFromSingleCodePoint(), input); ReplaceWithValue(node, value, effect); return Replace(value); } @@ -5287,12 +5593,8 @@ Reduction JSCallReducer::ReduceStringIteratorPrototypeNext(Node* node) { Node* vtrue0; { done_true = jsgraph()->FalseConstant(); - Node* codepoint = etrue0 = graph()->NewNode( - simplified()->StringCodePointAt(UnicodeEncoding::UTF16), string, index, - etrue0, if_true0); - vtrue0 = graph()->NewNode( - simplified()->StringFromSingleCodePoint(UnicodeEncoding::UTF16), - codepoint); + vtrue0 = etrue0 = graph()->NewNode(simplified()->StringFromCodePointAt(), + string, index, etrue0, if_true0); // Update iterator.[[NextIndex]] Node* char_length = graph()->NewNode(simplified()->StringLength(), vtrue0); @@ -5396,6 +5698,8 @@ Node* JSCallReducer::CreateArtificialFrameState( } Reduction JSCallReducer::ReducePromiseConstructor(Node* node) { + DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining); + DCHECK_EQ(IrOpcode::kJSConstruct, node->opcode()); ConstructParameters const& p = ConstructParametersOf(node->op()); int arity = static_cast(p.arity() - 2); @@ -5404,7 +5708,6 @@ Reduction JSCallReducer::ReducePromiseConstructor(Node* node) { Node* target = NodeProperties::GetValueInput(node, 0); Node* executor = NodeProperties::GetValueInput(node, 1); Node* new_target = NodeProperties::GetValueInput(node, arity + 1); - Node* context = NodeProperties::GetContextInput(node); Node* outer_frame_state = NodeProperties::GetFrameStateInput(node); Node* effect = NodeProperties::GetEffectInput(node); @@ -5459,7 +5762,7 @@ Reduction JSCallReducer::ReducePromiseConstructor(Node* node) { // Allocate a promise context for the closures below. Node* promise_context = effect = graph()->NewNode( javascript()->CreateFunctionContext( - handle(native_context().object()->scope_info(), isolate()), + native_context().scope_info().object(), PromiseBuiltins::kPromiseContextLength - Context::MIN_CONTEXT_SLOTS, FUNCTION_SCOPE), context, effect, control); @@ -5477,21 +5780,13 @@ Reduction JSCallReducer::ReducePromiseConstructor(Node* node) { promise_context, jsgraph()->TrueConstant(), effect, control); // Allocate the closure for the resolve case. - SharedFunctionInfoRef resolve_shared = - native_context().promise_capability_default_resolve_shared_fun(); - Node* resolve = effect = graph()->NewNode( - javascript()->CreateClosure( - resolve_shared.object(), factory()->many_closures_cell(), - handle(resolve_shared.object()->GetCode(), isolate())), + Node* resolve = effect = CreateClosureFromBuiltinSharedFunctionInfo( + native_context().promise_capability_default_resolve_shared_fun(), promise_context, effect, control); // Allocate the closure for the reject case. - SharedFunctionInfoRef reject_shared = - native_context().promise_capability_default_reject_shared_fun(); - Node* reject = effect = graph()->NewNode( - javascript()->CreateClosure( - reject_shared.object(), factory()->many_closures_cell(), - handle(reject_shared.object()->GetCode(), isolate())), + Node* reject = effect = CreateClosureFromBuiltinSharedFunctionInfo( + native_context().promise_capability_default_reject_shared_fun(), promise_context, effect, control); const std::vector checkpoint_parameters_continuation( @@ -5624,6 +5919,30 @@ Reduction JSCallReducer::ReducePromiseInternalResolve(Node* node) { return Replace(value); } +bool JSCallReducer::DoPromiseChecks(MapInference* inference) { + if (!inference->HaveMaps()) return false; + MapHandles const& receiver_maps = inference->GetMaps(); + + // Check whether all {receiver_maps} are JSPromise maps and + // have the initial Promise.prototype as their [[Prototype]]. + for (Handle map : receiver_maps) { + MapRef receiver_map(broker(), map); + if (!receiver_map.IsJSPromiseMap()) return false; + if (!FLAG_concurrent_inlining) { + receiver_map.SerializePrototype(); + } else if (!receiver_map.serialized_prototype()) { + TRACE_BROKER_MISSING(broker(), "prototype for map " << receiver_map); + return false; + } + if (!receiver_map.prototype().equals( + native_context().promise_prototype())) { + return false; + } + } + + return true; +} + // ES section #sec-promise.prototype.catch Reduction JSCallReducer::ReducePromisePrototypeCatch(Node* node) { DCHECK_EQ(IrOpcode::kJSCall, node->opcode()); @@ -5637,20 +5956,7 @@ Reduction JSCallReducer::ReducePromisePrototypeCatch(Node* node) { Node* control = NodeProperties::GetControlInput(node); MapInference inference(broker(), receiver, effect); - if (!inference.HaveMaps()) return NoChange(); - MapHandles const& receiver_maps = inference.GetMaps(); - - // Check whether all {receiver_maps} are JSPromise maps and - // have the initial Promise.prototype as their [[Prototype]]. - for (Handle map : receiver_maps) { - MapRef receiver_map(broker(), map); - if (!receiver_map.IsJSPromiseMap()) return inference.NoChange(); - receiver_map.SerializePrototype(); - if (!receiver_map.prototype().equals( - native_context().promise_prototype())) { - return inference.NoChange(); - } - } + if (!DoPromiseChecks(&inference)) return inference.NoChange(); if (!dependencies()->DependOnPromiseThenProtector()) return inference.NoChange(); @@ -5675,8 +5981,21 @@ Reduction JSCallReducer::ReducePromisePrototypeCatch(Node* node) { return reduction.Changed() ? reduction : Changed(node); } +Node* JSCallReducer::CreateClosureFromBuiltinSharedFunctionInfo( + SharedFunctionInfoRef shared, Node* context, Node* effect, Node* control) { + DCHECK(shared.HasBuiltinId()); + Callable const callable = Builtins::CallableFor( + isolate(), static_cast(shared.builtin_id())); + return graph()->NewNode( + javascript()->CreateClosure( + shared.object(), factory()->many_closures_cell(), callable.code()), + context, effect, control); +} + // ES section #sec-promise.prototype.finally Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) { + DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining); + DCHECK_EQ(IrOpcode::kJSCall, node->opcode()); CallParameters const& p = CallParametersOf(node->op()); int arity = static_cast(p.arity() - 2); @@ -5690,21 +6009,9 @@ Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) { } MapInference inference(broker(), receiver, effect); - if (!inference.HaveMaps()) return NoChange(); + if (!DoPromiseChecks(&inference)) return inference.NoChange(); MapHandles const& receiver_maps = inference.GetMaps(); - // Check whether all {receiver_maps} are JSPromise maps and - // have the initial Promise.prototype as their [[Prototype]]. - for (Handle map : receiver_maps) { - MapRef receiver_map(broker(), map); - if (!receiver_map.IsJSPromiseMap()) return inference.NoChange(); - receiver_map.SerializePrototype(); - if (!receiver_map.prototype().equals( - native_context().promise_prototype())) { - return inference.NoChange(); - } - } - if (!dependencies()->DependOnPromiseHookProtector()) return inference.NoChange(); if (!dependencies()->DependOnPromiseThenProtector()) @@ -5730,13 +6037,13 @@ Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) { jsgraph()->Constant(native_context().promise_function()); // Allocate shared context for the closures below. - context = etrue = graph()->NewNode( - javascript()->CreateFunctionContext( - handle(native_context().object()->scope_info(), isolate()), - PromiseBuiltins::kPromiseFinallyContextLength - - Context::MIN_CONTEXT_SLOTS, - FUNCTION_SCOPE), - context, etrue, if_true); + context = etrue = + graph()->NewNode(javascript()->CreateFunctionContext( + native_context().scope_info().object(), + PromiseBuiltins::kPromiseFinallyContextLength - + Context::MIN_CONTEXT_SLOTS, + FUNCTION_SCOPE), + context, etrue, if_true); etrue = graph()->NewNode( simplified()->StoreField( AccessBuilder::ForContextSlot(PromiseBuiltins::kOnFinallySlot)), @@ -5747,22 +6054,14 @@ Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) { context, constructor, etrue, if_true); // Allocate the closure for the reject case. - SharedFunctionInfoRef catch_finally = - native_context().promise_catch_finally_shared_fun(); - catch_true = etrue = graph()->NewNode( - javascript()->CreateClosure( - catch_finally.object(), factory()->many_closures_cell(), - handle(catch_finally.object()->GetCode(), isolate())), - context, etrue, if_true); + catch_true = etrue = CreateClosureFromBuiltinSharedFunctionInfo( + native_context().promise_catch_finally_shared_fun(), context, etrue, + if_true); // Allocate the closure for the fulfill case. - SharedFunctionInfoRef then_finally = - native_context().promise_then_finally_shared_fun(); - then_true = etrue = graph()->NewNode( - javascript()->CreateClosure( - then_finally.object(), factory()->many_closures_cell(), - handle(then_finally.object()->GetCode(), isolate())), - context, etrue, if_true); + then_true = etrue = CreateClosureFromBuiltinSharedFunctionInfo( + native_context().promise_then_finally_shared_fun(), context, etrue, + if_true); } Node* if_false = graph()->NewNode(common()->IfFalse(), branch); @@ -5810,6 +6109,8 @@ Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) { } Reduction JSCallReducer::ReducePromisePrototypeThen(Node* node) { + DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining); + DCHECK_EQ(IrOpcode::kJSCall, node->opcode()); CallParameters const& p = CallParametersOf(node->op()); if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) { @@ -5829,20 +6130,7 @@ Reduction JSCallReducer::ReducePromisePrototypeThen(Node* node) { Node* frame_state = NodeProperties::GetFrameStateInput(node); MapInference inference(broker(), receiver, effect); - if (!inference.HaveMaps()) return NoChange(); - MapHandles const& receiver_maps = inference.GetMaps(); - - // Check whether all {receiver_maps} are JSPromise maps and - // have the initial Promise.prototype as their [[Prototype]]. - for (Handle map : receiver_maps) { - MapRef receiver_map(broker(), map); - if (!receiver_map.IsJSPromiseMap()) return inference.NoChange(); - receiver_map.SerializePrototype(); - if (!receiver_map.prototype().equals( - native_context().promise_prototype())) { - return inference.NoChange(); - } - } + if (!DoPromiseChecks(&inference)) return inference.NoChange(); if (!dependencies()->DependOnPromiseHookProtector()) return inference.NoChange(); @@ -5889,6 +6177,8 @@ Reduction JSCallReducer::ReducePromisePrototypeThen(Node* node) { // ES section #sec-promise.resolve Reduction JSCallReducer::ReducePromiseResolveTrampoline(Node* node) { + DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining); + DCHECK_EQ(IrOpcode::kJSCall, node->opcode()); Node* receiver = NodeProperties::GetValueInput(node, 1); Node* value = node->op()->ValueInputCount() > 2 @@ -6828,8 +7118,11 @@ Reduction JSCallReducer::ReduceNumberParseInt(Node* node) { } Reduction JSCallReducer::ReduceRegExpPrototypeTest(Node* node) { + DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining); + if (FLAG_force_slow_path) return NoChange(); if (node->op()->ValueInputCount() < 3) return NoChange(); + CallParameters const& p = CallParametersOf(node->op()); if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) { return NoChange(); @@ -6846,13 +7139,24 @@ Reduction JSCallReducer::ReduceRegExpPrototypeTest(Node* node) { } MapHandles const& regexp_maps = inference.GetMaps(); - // Compute property access info for "exec" on {resolution}. ZoneVector access_infos(graph()->zone()); AccessInfoFactory access_info_factory(broker(), dependencies(), graph()->zone()); - access_info_factory.ComputePropertyAccessInfos( - MapHandles(regexp_maps.begin(), regexp_maps.end()), - factory()->exec_string(), AccessMode::kLoad, &access_infos); + if (!FLAG_concurrent_inlining) { + // Compute property access info for "exec" on {resolution}. + access_info_factory.ComputePropertyAccessInfos( + MapHandles(regexp_maps.begin(), regexp_maps.end()), + factory()->exec_string(), AccessMode::kLoad, &access_infos); + } else { + // Obtain precomputed access infos from the broker. + for (auto map : regexp_maps) { + MapRef map_ref(broker(), map); + PropertyAccessInfo access_info = + broker()->GetAccessInfoForLoadingExec(map_ref); + access_infos.push_back(access_info); + } + } + PropertyAccessInfo ai_exec = access_info_factory.FinalizePropertyAccessInfosAsOne(access_infos, AccessMode::kLoad); @@ -6864,34 +7168,24 @@ Reduction JSCallReducer::ReduceRegExpPrototypeTest(Node* node) { // Do not reduce if the exec method is not on the prototype chain. if (!ai_exec.holder().ToHandle(&holder)) return inference.NoChange(); + JSObjectRef holder_ref(broker(), holder); + // Bail out if the exec method is not the original one. - Handle constant = JSObject::FastPropertyAt( - holder, ai_exec.field_representation(), ai_exec.field_index()); - if (!constant.is_identical_to(isolate()->regexp_exec_function())) { + base::Optional constant = holder_ref.GetOwnProperty( + ai_exec.field_representation(), ai_exec.field_index()); + if (!constant.has_value() || + !constant->equals(native_context().regexp_exec_function())) { return inference.NoChange(); } - // Protect the exec method change in the holder. - Handle exec_on_proto; - MapRef holder_map(broker(), handle(holder->map(), isolate())); - Handle descriptors( - holder_map.object()->instance_descriptors(), isolate()); - int descriptor_index = - descriptors->Search(*(factory()->exec_string()), *holder_map.object()); - CHECK_NE(descriptor_index, DescriptorArray::kNotFound); - holder_map.SerializeOwnDescriptors(); - dependencies()->DependOnFieldType(holder_map, descriptor_index); - } else { - return inference.NoChange(); - } - - // Add proper dependencies on the {regexp}s [[Prototype]]s. - Handle holder; - if (ai_exec.holder().ToHandle(&holder)) { + // Add proper dependencies on the {regexp}s [[Prototype]]s. dependencies()->DependOnStablePrototypeChains( ai_exec.receiver_maps(), kStartAtPrototype, JSObjectRef(broker(), holder)); + } else { + return inference.NoChange(); } + inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect, control, p.feedback()); @@ -6955,12 +7249,47 @@ Reduction JSCallReducer::ReduceNumberConstructor(Node* node) { return Changed(node); } +Reduction JSCallReducer::ReduceBigIntAsUintN(Node* node) { + if (!jsgraph()->machine()->Is64()) { + return NoChange(); + } + + CallParameters const& p = CallParametersOf(node->op()); + if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) { + return NoChange(); + } + if (node->op()->ValueInputCount() < 3) { + return NoChange(); + } + + Node* effect = NodeProperties::GetEffectInput(node); + Node* control = NodeProperties::GetControlInput(node); + Node* bits = NodeProperties::GetValueInput(node, 2); + Node* value = NodeProperties::GetValueInput(node, 3); + + NumberMatcher matcher(bits); + if (matcher.IsInteger() && matcher.IsInRange(0, 64)) { + const int bits_value = static_cast(matcher.Value()); + value = effect = graph()->NewNode(simplified()->CheckBigInt(p.feedback()), + value, effect, control); + value = graph()->NewNode(simplified()->BigIntAsUintN(bits_value), value); + ReplaceWithValue(node, value, effect); + return Replace(value); + } + + return NoChange(); +} + Graph* JSCallReducer::graph() const { return jsgraph()->graph(); } Isolate* JSCallReducer::isolate() const { return jsgraph()->isolate(); } Factory* JSCallReducer::factory() const { return isolate()->factory(); } +NativeContextRef JSCallReducer::native_context() const { + return broker()->native_context(); +} + CommonOperatorBuilder* JSCallReducer::common() const { return jsgraph()->common(); } diff --git a/deps/v8/src/compiler/js-call-reducer.h b/deps/v8/src/compiler/js-call-reducer.h index 02821ebb0dc4cb..bf3676c5b2202f 100644 --- a/deps/v8/src/compiler/js-call-reducer.h +++ b/deps/v8/src/compiler/js-call-reducer.h @@ -29,6 +29,7 @@ struct FieldAccess; class JSGraph; class JSHeapBroker; class JSOperatorBuilder; +class MapInference; class NodeProperties; class SimplifiedOperatorBuilder; @@ -155,6 +156,7 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer { Reduction ReduceMathImul(Node* node); Reduction ReduceMathClz32(Node* node); Reduction ReduceMathMinMax(Node* node, const Operator* op, Node* empty_value); + Reduction ReduceMathHypot(Node* node); Reduction ReduceNumberIsFinite(Node* node); Reduction ReduceNumberIsInteger(Node* node); @@ -190,6 +192,15 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer { Reduction ReduceNumberParseInt(Node* node); Reduction ReduceNumberConstructor(Node* node); + Reduction ReduceBigIntAsUintN(Node* node); + + // Helper to verify promise receiver maps are as expected. + // On bailout from a reduction, be sure to return inference.NoChange(). + bool DoPromiseChecks(MapInference* inference); + + Node* CreateClosureFromBuiltinSharedFunctionInfo(SharedFunctionInfoRef shared, + Node* context, Node* effect, + Node* control); // Returns the updated {to} node, and updates control and effect along the // way. @@ -231,12 +242,16 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer { const SharedFunctionInfoRef& shared, Node* context = nullptr); + void CheckIfElementsKind(Node* receiver_elements_kind, ElementsKind kind, + Node* control, Node** if_true, Node** if_false); + Node* LoadReceiverElementsKind(Node* receiver, Node** effect, Node** control); + Graph* graph() const; JSGraph* jsgraph() const { return jsgraph_; } JSHeapBroker* broker() const { return broker_; } Isolate* isolate() const; Factory* factory() const; - NativeContextRef native_context() const { return broker()->native_context(); } + NativeContextRef native_context() const; CommonOperatorBuilder* common() const; JSOperatorBuilder* javascript() const; SimplifiedOperatorBuilder* simplified() const; diff --git a/deps/v8/src/compiler/js-context-specialization.cc b/deps/v8/src/compiler/js-context-specialization.cc index dea6d7fc2b62a6..035e8b7ceb9392 100644 --- a/deps/v8/src/compiler/js-context-specialization.cc +++ b/deps/v8/src/compiler/js-context-specialization.cc @@ -6,6 +6,7 @@ #include "src/compiler/common-operator.h" #include "src/compiler/js-graph.h" +#include "src/compiler/js-heap-broker.h" #include "src/compiler/js-operator.h" #include "src/compiler/linkage.h" #include "src/compiler/node-matchers.h" @@ -144,9 +145,10 @@ Reduction JSContextSpecialization::ReduceJSLoadContext(Node* node) { // Now walk up the concrete context chain for the remaining depth. ContextRef concrete = maybe_concrete.value(); - concrete.SerializeContextChain(); // TODO(neis): Remove later. - for (; depth > 0; --depth) { - concrete = concrete.previous(); + concrete = concrete.previous(&depth); + if (depth > 0) { + TRACE_BROKER_MISSING(broker(), "previous value for context " << concrete); + return SimplifyJSLoadContext(node, jsgraph()->Constant(concrete), depth); } if (!access.immutable()) { @@ -157,8 +159,6 @@ Reduction JSContextSpecialization::ReduceJSLoadContext(Node* node) { // This will hold the final value, if we can figure it out. base::Optional maybe_value; - - concrete.SerializeSlot(static_cast(access.index())); maybe_value = concrete.get(static_cast(access.index())); if (maybe_value.has_value() && !maybe_value->IsSmi()) { // Even though the context slot is immutable, the context might have escaped @@ -174,6 +174,9 @@ Reduction JSContextSpecialization::ReduceJSLoadContext(Node* node) { } if (!maybe_value.has_value()) { + TRACE_BROKER_MISSING(broker(), "slot value " << access.index() + << " for context " + << concrete); return SimplifyJSLoadContext(node, jsgraph()->Constant(concrete), depth); } @@ -207,9 +210,10 @@ Reduction JSContextSpecialization::ReduceJSStoreContext(Node* node) { // Now walk up the concrete context chain for the remaining depth. ContextRef concrete = maybe_concrete.value(); - concrete.SerializeContextChain(); // TODO(neis): Remove later. - for (; depth > 0; --depth) { - concrete = concrete.previous(); + concrete = concrete.previous(&depth); + if (depth > 0) { + TRACE_BROKER_MISSING(broker(), "previous value for context " << concrete); + return SimplifyJSStoreContext(node, jsgraph()->Constant(concrete), depth); } return SimplifyJSStoreContext(node, jsgraph()->Constant(concrete), depth); diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc index 8fc8dd1308cfb7..4e69db6b9bca6c 100644 --- a/deps/v8/src/compiler/js-create-lowering.cc +++ b/deps/v8/src/compiler/js-create-lowering.cc @@ -837,7 +837,7 @@ Reduction JSCreateLowering::ReduceJSCreateCollectionIterator(Node* node) { simplified()->LoadField(AccessBuilder::ForJSCollectionTable()), iterated_object, effect, control); - // Create the JSArrayIterator result. + // Create the JSCollectionIterator result. AllocationBuilder a(jsgraph(), effect, control); a.Allocate(JSCollectionIterator::kSize, AllocationType::kYoung, Type::OtherObject()); diff --git a/deps/v8/src/compiler/js-graph.cc b/deps/v8/src/compiler/js-graph.cc index a3805ec125620d..43a4beadeeb754 100644 --- a/deps/v8/src/compiler/js-graph.cc +++ b/deps/v8/src/compiler/js-graph.cc @@ -128,9 +128,17 @@ void JSGraph::GetCachedNodes(NodeVector* nodes) { DEFINE_GETTER(AllocateInYoungGenerationStubConstant, HeapConstant(BUILTIN_CODE(isolate(), AllocateInYoungGeneration))) +DEFINE_GETTER(AllocateRegularInYoungGenerationStubConstant, + HeapConstant(BUILTIN_CODE(isolate(), + AllocateRegularInYoungGeneration))) + DEFINE_GETTER(AllocateInOldGenerationStubConstant, HeapConstant(BUILTIN_CODE(isolate(), AllocateInOldGeneration))) +DEFINE_GETTER(AllocateRegularInOldGenerationStubConstant, + HeapConstant(BUILTIN_CODE(isolate(), + AllocateRegularInOldGeneration))) + DEFINE_GETTER(ArrayConstructorStubConstant, HeapConstant(BUILTIN_CODE(isolate(), ArrayConstructorImpl))) diff --git a/deps/v8/src/compiler/js-graph.h b/deps/v8/src/compiler/js-graph.h index b5c80515ad4513..ec36c26034b1ba 100644 --- a/deps/v8/src/compiler/js-graph.h +++ b/deps/v8/src/compiler/js-graph.h @@ -80,31 +80,33 @@ class V8_EXPORT_PRIVATE JSGraph : public MachineGraph { void GetCachedNodes(NodeVector* nodes); // Cached global nodes. -#define CACHED_GLOBAL_LIST(V) \ - V(AllocateInYoungGenerationStubConstant) \ - V(AllocateInOldGenerationStubConstant) \ - V(ArrayConstructorStubConstant) \ - V(BigIntMapConstant) \ - V(BooleanMapConstant) \ - V(ToNumberBuiltinConstant) \ - V(EmptyFixedArrayConstant) \ - V(EmptyStringConstant) \ - V(FixedArrayMapConstant) \ - V(PropertyArrayMapConstant) \ - V(FixedDoubleArrayMapConstant) \ - V(HeapNumberMapConstant) \ - V(OptimizedOutConstant) \ - V(StaleRegisterConstant) \ - V(UndefinedConstant) \ - V(TheHoleConstant) \ - V(TrueConstant) \ - V(FalseConstant) \ - V(NullConstant) \ - V(ZeroConstant) \ - V(OneConstant) \ - V(NaNConstant) \ - V(MinusOneConstant) \ - V(EmptyStateValues) \ +#define CACHED_GLOBAL_LIST(V) \ + V(AllocateInYoungGenerationStubConstant) \ + V(AllocateRegularInYoungGenerationStubConstant) \ + V(AllocateInOldGenerationStubConstant) \ + V(AllocateRegularInOldGenerationStubConstant) \ + V(ArrayConstructorStubConstant) \ + V(BigIntMapConstant) \ + V(BooleanMapConstant) \ + V(ToNumberBuiltinConstant) \ + V(EmptyFixedArrayConstant) \ + V(EmptyStringConstant) \ + V(FixedArrayMapConstant) \ + V(PropertyArrayMapConstant) \ + V(FixedDoubleArrayMapConstant) \ + V(HeapNumberMapConstant) \ + V(OptimizedOutConstant) \ + V(StaleRegisterConstant) \ + V(UndefinedConstant) \ + V(TheHoleConstant) \ + V(TrueConstant) \ + V(FalseConstant) \ + V(NullConstant) \ + V(ZeroConstant) \ + V(OneConstant) \ + V(NaNConstant) \ + V(MinusOneConstant) \ + V(EmptyStateValues) \ V(SingleDeadTypedStateValues) // Cached global node accessor methods. diff --git a/deps/v8/src/compiler/js-heap-broker.cc b/deps/v8/src/compiler/js-heap-broker.cc index 86250e9d1fec1a..c79c793ae69a63 100644 --- a/deps/v8/src/compiler/js-heap-broker.cc +++ b/deps/v8/src/compiler/js-heap-broker.cc @@ -3,6 +3,7 @@ // found in the LICENSE file. #include "src/compiler/js-heap-broker.h" +#include "src/compiler/heap-refs.h" #ifdef ENABLE_SLOW_DCHECKS #include @@ -12,6 +13,7 @@ #include "src/ast/modules.h" #include "src/codegen/code-factory.h" #include "src/compiler/access-info.h" +#include "src/compiler/bytecode-analysis.h" #include "src/compiler/graph-reducer.h" #include "src/compiler/per-isolate-compiler-cache.h" #include "src/compiler/vector-slot-pair.h" @@ -26,6 +28,7 @@ #include "src/objects/js-regexp-inl.h" #include "src/objects/module-inl.h" #include "src/objects/objects-inl.h" +#include "src/objects/template-objects-inl.h" #include "src/objects/templates.h" #include "src/utils/boxed-float.h" #include "src/utils/utils.h" @@ -121,17 +124,31 @@ class PropertyCellData : public HeapObjectData { ObjectData* value_ = nullptr; }; +// TODO(mslekova): Once we have real-world usage data, we might want to +// reimplement this as sorted vector instead, to reduce the memory overhead. +typedef ZoneMap KnownReceiversMap; + class FunctionTemplateInfoData : public HeapObjectData { public: FunctionTemplateInfoData(JSHeapBroker* broker, ObjectData** storage, Handle object); - void Serialize(JSHeapBroker* broker); - ObjectData* call_code() const { return call_code_; } + bool is_signature_undefined() const { return is_signature_undefined_; } + bool accept_any_receiver() const { return accept_any_receiver_; } + bool has_call_code() const { return has_call_code_; } + + void SerializeCallCode(JSHeapBroker* broker); + CallHandlerInfoData* call_code() const { return call_code_; } + KnownReceiversMap& known_receivers() { return known_receivers_; } private: - bool serialized_ = false; - ObjectData* call_code_ = nullptr; + bool serialized_call_code_ = false; + CallHandlerInfoData* call_code_ = nullptr; + bool is_signature_undefined_ = false; + bool accept_any_receiver_ = false; + bool has_call_code_ = false; + + KnownReceiversMap known_receivers_; }; class CallHandlerInfoData : public HeapObjectData { @@ -154,7 +171,16 @@ class CallHandlerInfoData : public HeapObjectData { FunctionTemplateInfoData::FunctionTemplateInfoData( JSHeapBroker* broker, ObjectData** storage, Handle object) - : HeapObjectData(broker, storage, object) {} + : HeapObjectData(broker, storage, object), + known_receivers_(broker->zone()) { + auto function_template_info = Handle::cast(object); + is_signature_undefined_ = + function_template_info->signature().IsUndefined(broker->isolate()); + accept_any_receiver_ = function_template_info->accept_any_receiver(); + + CallOptimization call_optimization(broker->isolate(), object); + has_call_code_ = call_optimization.is_simple_api_call(); +} CallHandlerInfoData::CallHandlerInfoData(JSHeapBroker* broker, ObjectData** storage, @@ -181,18 +207,17 @@ void PropertyCellData::Serialize(JSHeapBroker* broker) { value_ = broker->GetOrCreateData(cell->value()); } -void FunctionTemplateInfoData::Serialize(JSHeapBroker* broker) { - if (serialized_) return; - serialized_ = true; +void FunctionTemplateInfoData::SerializeCallCode(JSHeapBroker* broker) { + if (serialized_call_code_) return; + serialized_call_code_ = true; - TraceScope tracer(broker, this, "FunctionTemplateInfoData::Serialize"); + TraceScope tracer(broker, this, + "FunctionTemplateInfoData::SerializeCallCode"); auto function_template_info = Handle::cast(object()); DCHECK_NULL(call_code_); - call_code_ = broker->GetOrCreateData(function_template_info->call_code()); - - if (call_code_->IsCallHandlerInfo()) { - call_code_->AsCallHandlerInfo()->Serialize(broker); - } + call_code_ = broker->GetOrCreateData(function_template_info->call_code()) + ->AsCallHandlerInfo(); + call_code_->Serialize(broker); } void CallHandlerInfoData::Serialize(JSHeapBroker* broker) { @@ -231,6 +256,12 @@ class JSObjectField { uint64_t number_bits_ = 0; }; +struct FieldIndexHasher { + size_t operator()(FieldIndex field_index) const { + return field_index.index(); + } +}; + class JSObjectData : public HeapObjectData { public: JSObjectData(JSHeapBroker* broker, ObjectData** storage, @@ -253,12 +284,15 @@ class JSObjectData : public HeapObjectData { ObjectData* GetOwnConstantElement(JSHeapBroker* broker, uint32_t index, bool serialize); + ObjectData* GetOwnProperty(JSHeapBroker* broker, + Representation representation, + FieldIndex field_index, bool serialize); // This method is only used to assert our invariants. bool cow_or_empty_elements_tenured() const; private: - void SerializeRecursive(JSHeapBroker* broker, int max_depths); + void SerializeRecursiveAsBoilerplate(JSHeapBroker* broker, int max_depths); FixedArrayBaseData* elements_ = nullptr; bool cow_or_empty_elements_tenured_ = false; @@ -277,6 +311,12 @@ class JSObjectData : public HeapObjectData { // non-configurable, or (2) are known not to (possibly they don't exist at // all). In case (2), the second pair component is nullptr. ZoneVector> own_constant_elements_; + // Properties that either: + // (1) are known to exist directly on the object, or + // (2) are known not to (possibly they don't exist at all). + // In case (2), the second pair component is nullptr. + // For simplicity, this may in theory overlap with inobject_fields_. + ZoneUnorderedMap own_properties_; }; void JSObjectData::SerializeObjectCreateMap(JSHeapBroker* broker) { @@ -312,6 +352,15 @@ base::Optional GetOwnElementFromHeap(JSHeapBroker* broker, } return base::nullopt; } + +ObjectRef GetOwnPropertyFromHeap(JSHeapBroker* broker, + Handle receiver, + Representation representation, + FieldIndex field_index) { + Handle constant = + JSObject::FastPropertyAt(receiver, representation, field_index); + return ObjectRef(broker, constant); +} } // namespace ObjectData* JSObjectData::GetOwnConstantElement(JSHeapBroker* broker, @@ -333,6 +382,27 @@ ObjectData* JSObjectData::GetOwnConstantElement(JSHeapBroker* broker, return result; } +ObjectData* JSObjectData::GetOwnProperty(JSHeapBroker* broker, + Representation representation, + FieldIndex field_index, + bool serialize) { + auto p = own_properties_.find(field_index); + if (p != own_properties_.end()) return p->second; + + if (!serialize) { + TRACE_MISSING(broker, "knowledge about property with index " + << field_index.property_index() << " on " + << this); + return nullptr; + } + + ObjectRef property = GetOwnPropertyFromHeap( + broker, Handle::cast(object()), representation, field_index); + ObjectData* result(property.data()); + own_properties_.insert(std::make_pair(field_index, result)); + return result; +} + class JSTypedArrayData : public JSObjectData { public: JSTypedArrayData(JSHeapBroker* broker, ObjectData** storage, @@ -503,24 +573,18 @@ class ContextData : public HeapObjectData { public: ContextData(JSHeapBroker* broker, ObjectData** storage, Handle object); - void SerializeContextChain(JSHeapBroker* broker); - ContextData* previous() const { - CHECK(serialized_context_chain_); - return previous_; - } + // {previous} will return the closest valid context possible to desired + // {depth}, decrementing {depth} for each previous link successfully followed. + // If {serialize} is true, it will serialize contexts along the way. + ContextData* previous(JSHeapBroker* broker, size_t* depth, bool serialize); - void SerializeSlot(JSHeapBroker* broker, int index); - - ObjectData* GetSlot(int index) { - auto search = slots_.find(index); - CHECK(search != slots_.end()); - return search->second; - } + // Returns nullptr if the slot index isn't valid or wasn't serialized + // (unless {serialize} is true). + ObjectData* GetSlot(JSHeapBroker* broker, int index, bool serialize); private: ZoneMap slots_; - bool serialized_context_chain_ = false; ContextData* previous_ = nullptr; }; @@ -528,28 +592,46 @@ ContextData::ContextData(JSHeapBroker* broker, ObjectData** storage, Handle object) : HeapObjectData(broker, storage, object), slots_(broker->zone()) {} -void ContextData::SerializeContextChain(JSHeapBroker* broker) { - if (serialized_context_chain_) return; - serialized_context_chain_ = true; +ContextData* ContextData::previous(JSHeapBroker* broker, size_t* depth, + bool serialize) { + if (*depth == 0) return this; - TraceScope tracer(broker, this, "ContextData::SerializeContextChain"); - Handle context = Handle::cast(object()); + if (serialize && previous_ == nullptr) { + TraceScope tracer(broker, this, "ContextData::previous"); + Handle context = Handle::cast(object()); + Object prev = context->unchecked_previous(); + if (prev.IsContext()) { + previous_ = broker->GetOrCreateData(prev)->AsContext(); + } + } - DCHECK_NULL(previous_); - // Context::previous DCHECK-fails when called on the native context. - if (!context->IsNativeContext()) { - previous_ = broker->GetOrCreateData(context->previous())->AsContext(); - previous_->SerializeContextChain(broker); + if (previous_ != nullptr) { + *depth = *depth - 1; + return previous_->previous(broker, depth, serialize); } + return this; } -void ContextData::SerializeSlot(JSHeapBroker* broker, int index) { - TraceScope tracer(broker, this, "ContextData::SerializeSlot"); - TRACE(broker, "Serializing script context slot " << index); - Handle context = Handle::cast(object()); - CHECK(index >= 0 && index < context->length()); - ObjectData* odata = broker->GetOrCreateData(context->get(index)); - slots_.insert(std::make_pair(index, odata)); +ObjectData* ContextData::GetSlot(JSHeapBroker* broker, int index, + bool serialize) { + CHECK_GE(index, 0); + auto search = slots_.find(index); + if (search != slots_.end()) { + return search->second; + } + + if (serialize) { + Handle context = Handle::cast(object()); + if (index < context->length()) { + TraceScope tracer(broker, this, "ContextData::GetSlot"); + TRACE(broker, "Serializing context slot " << index); + ObjectData* odata = broker->GetOrCreateData(context->get(index)); + slots_.insert(std::make_pair(index, odata)); + return odata; + } + } + + return nullptr; } class NativeContextData : public ContextData { @@ -564,6 +646,11 @@ class NativeContextData : public ContextData { return function_maps_; } + ScopeInfoData* scope_info() const { + CHECK(serialized_); + return scope_info_; + } + NativeContextData(JSHeapBroker* broker, ObjectData** storage, Handle object); void Serialize(JSHeapBroker* broker); @@ -574,6 +661,7 @@ class NativeContextData : public ContextData { BROKER_NATIVE_CONTEXT_FIELDS(DECL_MEMBER) #undef DECL_MEMBER ZoneVector function_maps_; + ScopeInfoData* scope_info_ = nullptr; }; class NameData : public HeapObjectData { @@ -674,14 +762,15 @@ bool IsFastLiteralHelper(Handle boilerplate, int max_depth, DCHECK_GE(max_depth, 0); DCHECK_GE(*max_properties, 0); + Isolate* const isolate = boilerplate->GetIsolate(); + // Make sure the boilerplate map is not deprecated. - if (!JSObject::TryMigrateInstance(boilerplate)) return false; + if (!JSObject::TryMigrateInstance(isolate, boilerplate)) return false; // Check for too deep nesting. if (max_depth == 0) return false; // Check the elements. - Isolate* const isolate = boilerplate->GetIsolate(); Handle elements(boilerplate->elements(), isolate); if (elements->length() > 0 && elements->map() != ReadOnlyRoots(isolate).fixed_cow_array_map()) { @@ -780,6 +869,18 @@ class AllocationSiteData : public HeapObjectData { bool serialized_boilerplate_ = false; }; +class BigIntData : public HeapObjectData { + public: + BigIntData(JSHeapBroker* broker, ObjectData** storage, Handle object) + : HeapObjectData(broker, storage, object), + as_uint64_(object->AsUint64(nullptr)) {} + + uint64_t AsUint64() const { return as_uint64_; } + + private: + const uint64_t as_uint64_; +}; + // Only used in JSNativeContextSpecialization. class ScriptContextTableData : public HeapObjectData { public: @@ -1215,7 +1316,8 @@ JSObjectData::JSObjectData(JSHeapBroker* broker, ObjectData** storage, Handle object) : HeapObjectData(broker, storage, object), inobject_fields_(broker->zone()), - own_constant_elements_(broker->zone()) {} + own_constant_elements_(broker->zone()), + own_properties_(broker->zone()) {} FixedArrayData::FixedArrayData(JSHeapBroker* broker, ObjectData** storage, Handle object) @@ -1282,18 +1384,106 @@ class BytecodeArrayData : public FixedArrayBaseData { return incoming_new_target_or_generator_register_; } + uint8_t get(int index) const { + DCHECK(is_serialized_for_compilation_); + return bytecodes_[index]; + } + + Address GetFirstBytecodeAddress() const { + return reinterpret_cast
    (bytecodes_.data()); + } + + Handle GetConstantAtIndex(int index, Isolate* isolate) const { + return constant_pool_[index]->object(); + } + + bool IsConstantAtIndexSmi(int index) const { + return constant_pool_[index]->is_smi(); + } + + Smi GetConstantAtIndexAsSmi(int index) const { + return *(Handle::cast(constant_pool_[index]->object())); + } + + bool IsSerializedForCompilation() const { + return is_serialized_for_compilation_; + } + + void SerializeForCompilation(JSHeapBroker* broker) { + if (is_serialized_for_compilation_) return; + + Handle bytecode_array = + Handle::cast(object()); + + DCHECK(bytecodes_.empty()); + bytecodes_.reserve(bytecode_array->length()); + for (int i = 0; i < bytecode_array->length(); i++) { + bytecodes_.push_back(bytecode_array->get(i)); + } + + DCHECK(constant_pool_.empty()); + Handle constant_pool(bytecode_array->constant_pool(), + broker->isolate()); + constant_pool_.reserve(constant_pool->length()); + for (int i = 0; i < constant_pool->length(); i++) { + constant_pool_.push_back(broker->GetOrCreateData(constant_pool->get(i))); + } + + Handle source_position_table( + bytecode_array->SourcePositionTableIfCollected(), broker->isolate()); + source_positions_.reserve(source_position_table->length()); + for (int i = 0; i < source_position_table->length(); i++) { + source_positions_.push_back(source_position_table->get(i)); + } + + Handle handlers(bytecode_array->handler_table(), + broker->isolate()); + handler_table_.reserve(handlers->length()); + for (int i = 0; i < handlers->length(); i++) { + handler_table_.push_back(handlers->get(i)); + } + + is_serialized_for_compilation_ = true; + } + + const byte* source_positions_address() const { + return source_positions_.data(); + } + + size_t source_positions_size() const { return source_positions_.size(); } + + Address handler_table_address() const { + CHECK(is_serialized_for_compilation_); + return reinterpret_cast
    (handler_table_.data()); + } + + int handler_table_size() const { + CHECK(is_serialized_for_compilation_); + return static_cast(handler_table_.size()); + } + BytecodeArrayData(JSHeapBroker* broker, ObjectData** storage, Handle object) : FixedArrayBaseData(broker, storage, object), register_count_(object->register_count()), parameter_count_(object->parameter_count()), incoming_new_target_or_generator_register_( - object->incoming_new_target_or_generator_register()) {} + object->incoming_new_target_or_generator_register()), + bytecodes_(broker->zone()), + source_positions_(broker->zone()), + handler_table_(broker->zone()), + constant_pool_(broker->zone()) {} private: int const register_count_; int const parameter_count_; interpreter::Register const incoming_new_target_or_generator_register_; + + bool is_serialized_for_compilation_ = false; + ZoneVector bytecodes_; + ZoneVector source_positions_; + ZoneVector handler_table_; + ZoneVector constant_pool_; }; class JSArrayData : public JSObjectData { @@ -1377,6 +1567,22 @@ class SharedFunctionInfoData : public HeapObjectData { void SetSerializedForCompilation(JSHeapBroker* broker, FeedbackVectorRef feedback); bool IsSerializedForCompilation(FeedbackVectorRef feedback) const; + void SerializeFunctionTemplateInfo(JSHeapBroker* broker); + FunctionTemplateInfoData* function_template_info() const { + return function_template_info_; + } + JSArrayData* GetTemplateObject(FeedbackSlot slot) const { + auto lookup_it = template_objects_.find(slot.ToInt()); + if (lookup_it != template_objects_.cend()) { + return lookup_it->second; + } + return nullptr; + } + void SetTemplateObject(FeedbackSlot slot, JSArrayData* object) { + CHECK( + template_objects_.insert(std::make_pair(slot.ToInt(), object)).second); + } + #define DECL_ACCESSOR(type, name) \ type name() const { return name##_; } BROKER_SFI_FIELDS(DECL_ACCESSOR) @@ -1391,6 +1597,8 @@ class SharedFunctionInfoData : public HeapObjectData { #define DECL_MEMBER(type, name) type const name##_; BROKER_SFI_FIELDS(DECL_MEMBER) #undef DECL_MEMBER + FunctionTemplateInfoData* function_template_info_; + ZoneMap template_objects_; }; SharedFunctionInfoData::SharedFunctionInfoData( @@ -1408,7 +1616,9 @@ SharedFunctionInfoData::SharedFunctionInfoData( #define INIT_MEMBER(type, name) , name##_(object->name()) BROKER_SFI_FIELDS(INIT_MEMBER) #undef INIT_MEMBER -{ + , + function_template_info_(nullptr), + template_objects_(broker->zone()) { DCHECK_EQ(HasBuiltinId_, builtin_id_ != Builtins::kNoBuiltinId); DCHECK_EQ(HasBytecodeArray_, GetBytecodeArray_ != nullptr); } @@ -1420,15 +1630,28 @@ void SharedFunctionInfoData::SetSerializedForCompilation( << " as serialized for compilation"); } +void SharedFunctionInfoData::SerializeFunctionTemplateInfo( + JSHeapBroker* broker) { + if (function_template_info_) return; + + function_template_info_ = + broker + ->GetOrCreateData(handle( + Handle::cast(object())->function_data(), + broker->isolate())) + ->AsFunctionTemplateInfo(); +} + bool SharedFunctionInfoData::IsSerializedForCompilation( FeedbackVectorRef feedback) const { return serialized_for_compilation_.find(feedback.object()) != serialized_for_compilation_.end(); } -class ModuleData : public HeapObjectData { +class SourceTextModuleData : public HeapObjectData { public: - ModuleData(JSHeapBroker* broker, ObjectData** storage, Handle object); + SourceTextModuleData(JSHeapBroker* broker, ObjectData** storage, + Handle object); void Serialize(JSHeapBroker* broker); CellData* GetCell(int cell_index) const; @@ -1439,35 +1662,36 @@ class ModuleData : public HeapObjectData { ZoneVector exports_; }; -ModuleData::ModuleData(JSHeapBroker* broker, ObjectData** storage, - Handle object) +SourceTextModuleData::SourceTextModuleData(JSHeapBroker* broker, + ObjectData** storage, + Handle object) : HeapObjectData(broker, storage, object), imports_(broker->zone()), exports_(broker->zone()) {} -CellData* ModuleData::GetCell(int cell_index) const { +CellData* SourceTextModuleData::GetCell(int cell_index) const { CHECK(serialized_); CellData* cell; - switch (ModuleDescriptor::GetCellIndexKind(cell_index)) { - case ModuleDescriptor::kImport: - cell = imports_.at(Module::ImportIndex(cell_index)); + switch (SourceTextModuleDescriptor::GetCellIndexKind(cell_index)) { + case SourceTextModuleDescriptor::kImport: + cell = imports_.at(SourceTextModule::ImportIndex(cell_index)); break; - case ModuleDescriptor::kExport: - cell = exports_.at(Module::ExportIndex(cell_index)); + case SourceTextModuleDescriptor::kExport: + cell = exports_.at(SourceTextModule::ExportIndex(cell_index)); break; - case ModuleDescriptor::kInvalid: + case SourceTextModuleDescriptor::kInvalid: UNREACHABLE(); } CHECK_NOT_NULL(cell); return cell; } -void ModuleData::Serialize(JSHeapBroker* broker) { +void SourceTextModuleData::Serialize(JSHeapBroker* broker) { if (serialized_) return; serialized_ = true; - TraceScope tracer(broker, this, "ModuleData::Serialize"); - Handle module = Handle::cast(object()); + TraceScope tracer(broker, this, "SourceTextModuleData::Serialize"); + Handle module = Handle::cast(object()); // TODO(neis): We could be smarter and only serialize the cells we care about. // TODO(neis): Define a helper for serializing a FixedArray into a ZoneVector. @@ -1614,7 +1838,7 @@ bool JSObjectData::cow_or_empty_elements_tenured() const { FixedArrayBaseData* JSObjectData::elements() const { return elements_; } void JSObjectData::SerializeAsBoilerplate(JSHeapBroker* broker) { - SerializeRecursive(broker, kMaxFastLiteralDepth); + SerializeRecursiveAsBoilerplate(broker, kMaxFastLiteralDepth); } void JSObjectData::SerializeElements(JSHeapBroker* broker) { @@ -1717,11 +1941,13 @@ void MapData::SerializeOwnDescriptor(JSHeapBroker* broker, << contents.size() << " total)"); } -void JSObjectData::SerializeRecursive(JSHeapBroker* broker, int depth) { +void JSObjectData::SerializeRecursiveAsBoilerplate(JSHeapBroker* broker, + int depth) { if (serialized_as_boilerplate_) return; serialized_as_boilerplate_ = true; - TraceScope tracer(broker, this, "JSObjectData::SerializeRecursive"); + TraceScope tracer(broker, this, + "JSObjectData::SerializeRecursiveAsBoilerplate"); Handle boilerplate = Handle::cast(object()); // We only serialize boilerplates that pass the IsInlinableFastLiteral @@ -1767,7 +1993,8 @@ void JSObjectData::SerializeRecursive(JSHeapBroker* broker, int depth) { Handle value(fast_elements->get(i), isolate); if (value->IsJSObject()) { ObjectData* value_data = broker->GetOrCreateData(value); - value_data->AsJSObject()->SerializeRecursive(broker, depth - 1); + value_data->AsJSObject()->SerializeRecursiveAsBoilerplate(broker, + depth - 1); } } } else { @@ -1802,9 +2029,22 @@ void JSObjectData::SerializeRecursive(JSHeapBroker* broker, int depth) { } else { Handle value(boilerplate->RawFastPropertyAt(field_index), isolate); + // In case of unboxed double fields we use a sentinel NaN value to mark + // uninitialized fields. A boilerplate value with such a field may migrate + // from its unboxed double to a tagged representation. In the process the + // raw double is converted to a heap number. The sentinel value carries no + // special meaning when it occurs in a heap number, so we would like to + // recover the uninitialized value. + // We check for the sentinel here, specifically, since migrations might + // have been triggered as part of boilerplate serialization. + if (value->IsHeapNumber() && + HeapNumber::cast(*value).value_as_bits() == kHoleNanInt64) { + value = isolate->factory()->uninitialized_value(); + } ObjectData* value_data = broker->GetOrCreateData(value); if (value->IsJSObject()) { - value_data->AsJSObject()->SerializeRecursive(broker, depth - 1); + value_data->AsJSObject()->SerializeRecursiveAsBoilerplate(broker, + depth - 1); } inobject_fields_.push_back(JSObjectField{value_data}); } @@ -1839,35 +2079,50 @@ bool ObjectRef::equals(const ObjectRef& other) const { Isolate* ObjectRef::isolate() const { return broker()->isolate(); } -ContextRef ContextRef::previous() const { +ContextRef ContextRef::previous(size_t* depth, bool serialize) const { + DCHECK_NOT_NULL(depth); if (broker()->mode() == JSHeapBroker::kDisabled) { AllowHandleAllocation handle_allocation; AllowHandleDereference handle_dereference; - return ContextRef(broker(), - handle(object()->previous(), broker()->isolate())); + Context current = *object(); + while (*depth != 0 && current.unchecked_previous().IsContext()) { + current = Context::cast(current.unchecked_previous()); + (*depth)--; + } + return ContextRef(broker(), handle(current, broker()->isolate())); } - return ContextRef(broker(), data()->AsContext()->previous()); + ContextData* current = this->data()->AsContext(); + return ContextRef(broker(), current->previous(broker(), depth, serialize)); } -// Not needed for TypedLowering. -ObjectRef ContextRef::get(int index) const { +base::Optional ContextRef::get(int index, bool serialize) const { if (broker()->mode() == JSHeapBroker::kDisabled) { AllowHandleAllocation handle_allocation; AllowHandleDereference handle_dereference; Handle value(object()->get(index), broker()->isolate()); return ObjectRef(broker(), value); } - return ObjectRef(broker(), data()->AsContext()->GetSlot(index)); + ObjectData* optional_slot = + data()->AsContext()->GetSlot(broker(), index, serialize); + if (optional_slot != nullptr) { + return ObjectRef(broker(), optional_slot); + } + return base::nullopt; } -JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone) +JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone, + bool tracing_enabled) : isolate_(isolate), broker_zone_(broker_zone), current_zone_(broker_zone), refs_(new (zone()) RefsMap(kMinimalRefsBucketCount, AddressMatcher(), zone())), array_and_object_prototypes_(zone()), - feedback_(zone()) { + tracing_enabled_(tracing_enabled), + feedback_(zone()), + bytecode_analyses_(zone()), + ais_for_loading_then_(zone()), + ais_for_loading_exec_(zone()) { // Note that this initialization of the refs_ pointer with the minimal // initial capacity is redundant in the normal use case (concurrent // compilation enabled, standard objects to be serialized), as the map @@ -1939,7 +2194,9 @@ void JSHeapBroker::SerializeShareableObjects() { { Builtins::Name builtins[] = { Builtins::kAllocateInYoungGeneration, + Builtins::kAllocateRegularInYoungGeneration, Builtins::kAllocateInOldGeneration, + Builtins::kAllocateRegularInOldGeneration, Builtins::kArgumentsAdaptorTrampoline, Builtins::kArrayConstructorImpl, Builtins::kCallFunctionForwardVarargs, @@ -2400,6 +2657,11 @@ bool AllocationSiteRef::IsFastLiteral() const { return data()->AsAllocationSite()->IsFastLiteral(); } +void JSObjectRef::SerializeElements() { + CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing); + data()->AsJSObject()->SerializeElements(broker()); +} + void JSObjectRef::EnsureElementsTenured() { if (broker()->mode() == JSHeapBroker::kDisabled) { AllowHandleAllocation allow_handle_allocation; @@ -2553,6 +2815,95 @@ double FixedDoubleArrayRef::get_scalar(int i) const { return data()->AsFixedDoubleArray()->Get(i).get_scalar(); } +uint8_t BytecodeArrayRef::get(int index) const { + if (broker()->mode() == JSHeapBroker::kDisabled) { + AllowHandleAllocation handle_allocation; + AllowHandleDereference allow_handle_dereference; + return object()->get(index); + } + return data()->AsBytecodeArray()->get(index); +} + +Address BytecodeArrayRef::GetFirstBytecodeAddress() const { + if (broker()->mode() == JSHeapBroker::kDisabled) { + AllowHandleAllocation handle_allocation; + AllowHandleDereference allow_handle_dereference; + return object()->GetFirstBytecodeAddress(); + } + return data()->AsBytecodeArray()->GetFirstBytecodeAddress(); +} + +Handle BytecodeArrayRef::GetConstantAtIndex(int index) const { + if (broker()->mode() == JSHeapBroker::kDisabled) { + AllowHandleAllocation handle_allocation; + AllowHandleDereference allow_handle_dereference; + return handle(object()->constant_pool().get(index), broker()->isolate()); + } + return data()->AsBytecodeArray()->GetConstantAtIndex(index, + broker()->isolate()); +} + +bool BytecodeArrayRef::IsConstantAtIndexSmi(int index) const { + if (broker()->mode() == JSHeapBroker::kDisabled) { + AllowHandleAllocation handle_allocation; + AllowHandleDereference allow_handle_dereference; + return object()->constant_pool().get(index).IsSmi(); + } + return data()->AsBytecodeArray()->IsConstantAtIndexSmi(index); +} + +Smi BytecodeArrayRef::GetConstantAtIndexAsSmi(int index) const { + if (broker()->mode() == JSHeapBroker::kDisabled) { + AllowHandleAllocation handle_allocation; + AllowHandleDereference allow_handle_dereference; + return Smi::cast(object()->constant_pool().get(index)); + } + return data()->AsBytecodeArray()->GetConstantAtIndexAsSmi(index); +} + +bool BytecodeArrayRef::IsSerializedForCompilation() const { + if (broker()->mode() == JSHeapBroker::kDisabled) return true; + return data()->AsBytecodeArray()->IsSerializedForCompilation(); +} + +void BytecodeArrayRef::SerializeForCompilation() { + if (broker()->mode() == JSHeapBroker::kDisabled) return; + data()->AsBytecodeArray()->SerializeForCompilation(broker()); +} + +const byte* BytecodeArrayRef::source_positions_address() const { + if (broker()->mode() == JSHeapBroker::kDisabled) { + AllowHandleDereference allow_handle_dereference; + return object()->SourcePositionTableIfCollected().GetDataStartAddress(); + } + return data()->AsBytecodeArray()->source_positions_address(); +} + +int BytecodeArrayRef::source_positions_size() const { + if (broker()->mode() == JSHeapBroker::kDisabled) { + AllowHandleDereference allow_handle_dereference; + return object()->SourcePositionTableIfCollected().length(); + } + return static_cast(data()->AsBytecodeArray()->source_positions_size()); +} + +Address BytecodeArrayRef::handler_table_address() const { + if (broker()->mode() == JSHeapBroker::kDisabled) { + AllowHandleDereference allow_handle_dereference; + return reinterpret_cast
    ( + object()->handler_table().GetDataStartAddress()); + } + return data()->AsBytecodeArray()->handler_table_address(); +} + +int BytecodeArrayRef::handler_table_size() const { + if (broker()->mode() == JSHeapBroker::kDisabled) { + AllowHandleDereference allow_handle_dereference; + return object()->handler_table().length(); + } + return data()->AsBytecodeArray()->handler_table_size(); +} + #define IF_BROKER_DISABLED_ACCESS_HANDLE_C(holder, name) \ if (broker()->mode() == JSHeapBroker::kDisabled) { \ AllowHandleAllocation handle_allocation; \ @@ -2630,15 +2981,13 @@ BIMODAL_ACCESSOR_C(JSTypedArray, size_t, length) BIMODAL_ACCESSOR(JSTypedArray, HeapObject, buffer) BIMODAL_ACCESSOR_B(Map, bit_field2, elements_kind, Map::ElementsKindBits) -BIMODAL_ACCESSOR_B(Map, bit_field2, is_extensible, Map::IsExtensibleBit) -BIMODAL_ACCESSOR_B(Map, bit_field2, has_hidden_prototype, - Map::HasHiddenPrototypeBit) -BIMODAL_ACCESSOR_B(Map, bit_field3, is_deprecated, Map::IsDeprecatedBit) BIMODAL_ACCESSOR_B(Map, bit_field3, is_dictionary_map, Map::IsDictionaryMapBit) +BIMODAL_ACCESSOR_B(Map, bit_field3, is_deprecated, Map::IsDeprecatedBit) BIMODAL_ACCESSOR_B(Map, bit_field3, NumberOfOwnDescriptors, Map::NumberOfOwnDescriptorsBits) BIMODAL_ACCESSOR_B(Map, bit_field3, is_migration_target, Map::IsMigrationTargetBit) +BIMODAL_ACCESSOR_B(Map, bit_field3, is_extensible, Map::IsExtensibleBit) BIMODAL_ACCESSOR_B(Map, bit_field, has_prototype_slot, Map::HasPrototypeSlotBit) BIMODAL_ACCESSOR_B(Map, bit_field, is_access_check_needed, Map::IsAccessCheckNeededBit) @@ -2663,7 +3012,109 @@ BROKER_NATIVE_CONTEXT_FIELDS(DEF_NATIVE_CONTEXT_ACCESSOR) BIMODAL_ACCESSOR(PropertyCell, Object, value) BIMODAL_ACCESSOR_C(PropertyCell, PropertyDetails, property_details) -BIMODAL_ACCESSOR(FunctionTemplateInfo, Object, call_code) +base::Optional FunctionTemplateInfoRef::call_code() const { + if (broker()->mode() == JSHeapBroker::kDisabled) { + return CallHandlerInfoRef( + broker(), handle(object()->call_code(), broker()->isolate())); + } + CallHandlerInfoData* call_code = + data()->AsFunctionTemplateInfo()->call_code(); + if (!call_code) return base::nullopt; + return CallHandlerInfoRef(broker(), call_code); +} + +bool FunctionTemplateInfoRef::is_signature_undefined() const { + if (broker()->mode() == JSHeapBroker::kDisabled) { + AllowHandleDereference allow_handle_dereference; + AllowHandleAllocation allow_handle_allocation; + + return object()->signature().IsUndefined(broker()->isolate()); + } + return data()->AsFunctionTemplateInfo()->is_signature_undefined(); +} + +bool FunctionTemplateInfoRef::has_call_code() const { + if (broker()->mode() == JSHeapBroker::kDisabled) { + AllowHandleDereference allow_handle_dereference; + AllowHandleAllocation allow_handle_allocation; + + CallOptimization call_optimization(broker()->isolate(), object()); + return call_optimization.is_simple_api_call(); + } + return data()->AsFunctionTemplateInfo()->has_call_code(); +} + +BIMODAL_ACCESSOR_C(FunctionTemplateInfo, bool, accept_any_receiver) + +HolderLookupResult FunctionTemplateInfoRef::LookupHolderOfExpectedType( + MapRef receiver_map, bool serialize) { + const HolderLookupResult not_found; + + if (broker()->mode() == JSHeapBroker::kDisabled) { + AllowHandleDereference allow_handle_dereference; + AllowHandleAllocation allow_handle_allocation; + + CallOptimization call_optimization(broker()->isolate(), object()); + Handle receiver_map_ref(receiver_map.object()); + if (!receiver_map_ref->IsJSReceiverMap() || + (receiver_map_ref->is_access_check_needed() && + !object()->accept_any_receiver())) { + return not_found; + } + + HolderLookupResult result; + Handle holder = call_optimization.LookupHolderOfExpectedType( + receiver_map_ref, &result.lookup); + + switch (result.lookup) { + case CallOptimization::kHolderFound: + result.holder = JSObjectRef(broker(), holder); + break; + default: + DCHECK_EQ(result.holder, base::nullopt); + break; + } + return result; + } + + FunctionTemplateInfoData* fti_data = data()->AsFunctionTemplateInfo(); + KnownReceiversMap::iterator lookup_it = + fti_data->known_receivers().find(receiver_map.data()->AsMap()); + if (lookup_it != fti_data->known_receivers().cend()) { + return lookup_it->second; + } + if (!serialize) { + TRACE_BROKER_MISSING(broker(), + "holder for receiver with map " << receiver_map); + return not_found; + } + if (!receiver_map.IsJSReceiverMap() || + (receiver_map.is_access_check_needed() && !accept_any_receiver())) { + fti_data->known_receivers().insert( + {receiver_map.data()->AsMap(), not_found}); + return not_found; + } + + HolderLookupResult result; + CallOptimization call_optimization(broker()->isolate(), object()); + Handle holder = call_optimization.LookupHolderOfExpectedType( + receiver_map.object(), &result.lookup); + + switch (result.lookup) { + case CallOptimization::kHolderFound: { + result.holder = JSObjectRef(broker(), holder); + fti_data->known_receivers().insert( + {receiver_map.data()->AsMap(), result}); + break; + } + default: { + DCHECK_EQ(result.holder, base::nullopt); + fti_data->known_receivers().insert( + {receiver_map.data()->AsMap(), result}); + } + } + return result; +} BIMODAL_ACCESSOR(CallHandlerInfo, Object, data) @@ -2746,11 +3197,21 @@ bool StringRef::IsSeqString() const { return data()->AsString()->is_seq_string(); } +ScopeInfoRef NativeContextRef::scope_info() const { + if (broker()->mode() == JSHeapBroker::kDisabled) { + AllowHandleAllocation handle_allocation; + AllowHandleDereference handle_dereference; + return ScopeInfoRef(broker(), + handle(object()->scope_info(), broker()->isolate())); + } + return ScopeInfoRef(broker(), data()->AsNativeContext()->scope_info()); +} + MapRef NativeContextRef::GetFunctionMapFromIndex(int index) const { DCHECK_GE(index, Context::FIRST_FUNCTION_MAP_INDEX); DCHECK_LE(index, Context::LAST_FUNCTION_MAP_INDEX); if (broker()->mode() == JSHeapBroker::kDisabled) { - return get(index).AsMap(); + return get(index).value().AsMap(); } return MapRef(broker(), data()->AsNativeContext()->function_maps().at( index - Context::FIRST_FUNCTION_MAP_INDEX)); @@ -2853,6 +3314,19 @@ base::Optional ObjectRef::GetOwnConstantElement( return ObjectRef(broker(), element); } +base::Optional JSObjectRef::GetOwnProperty( + Representation field_representation, FieldIndex index, + bool serialize) const { + if (broker()->mode() == JSHeapBroker::kDisabled) { + return GetOwnPropertyFromHeap(broker(), Handle::cast(object()), + field_representation, index); + } + ObjectData* property = data()->AsJSObject()->GetOwnProperty( + broker(), field_representation, index, serialize); + if (property == nullptr) return base::nullopt; + return ObjectRef(broker(), property); +} + base::Optional JSArrayRef::GetOwnCowElement(uint32_t index, bool serialize) const { if (broker()->mode() == JSHeapBroker::kDisabled) { @@ -2884,14 +3358,19 @@ double MutableHeapNumberRef::value() const { return data()->AsMutableHeapNumber()->value(); } -CellRef ModuleRef::GetCell(int cell_index) const { +uint64_t BigIntRef::AsUint64() const { + IF_BROKER_DISABLED_ACCESS_HANDLE_C(BigInt, AsUint64); + return data()->AsBigInt()->AsUint64(); +} + +CellRef SourceTextModuleRef::GetCell(int cell_index) const { if (broker()->mode() == JSHeapBroker::kDisabled) { AllowHandleAllocation handle_allocation; AllowHandleDereference allow_handle_dereference; return CellRef(broker(), handle(object()->GetCell(cell_index), broker()->isolate())); } - return CellRef(broker(), data()->AsModule()->GetCell(cell_index)); + return CellRef(broker(), data()->AsSourceTextModule()->GetCell(cell_index)); } ObjectRef::ObjectRef(JSHeapBroker* broker, Handle object) @@ -3108,6 +3587,8 @@ void NativeContextData::Serialize(JSHeapBroker* broker) { for (int i = first; i <= last; ++i) { function_maps_.push_back(broker->GetOrCreateData(context->get(i))->AsMap()); } + + scope_info_ = broker->GetOrCreateData(context->scope_info())->AsScopeInfo(); } void JSFunctionRef::Serialize() { @@ -3133,6 +3614,46 @@ bool JSFunctionRef::IsSerializedForCompilation() const { shared().IsSerializedForCompilation(feedback_vector()); } +JSArrayRef SharedFunctionInfoRef::GetTemplateObject(ObjectRef description, + FeedbackVectorRef vector, + FeedbackSlot slot, + bool serialize) { + // Look in the feedback vector for the array. A Smi indicates that it's + // not yet cached here. + ObjectRef candidate = vector.get(slot); + if (!candidate.IsSmi()) { + return candidate.AsJSArray(); + } + + if (broker()->mode() == JSHeapBroker::kDisabled) { + AllowHandleAllocation handle_allocation; + AllowHandleDereference allow_handle_dereference; + Handle tod = + Handle::cast(description.object()); + Handle template_object = + TemplateObjectDescription::GetTemplateObject( + broker()->isolate(), broker()->native_context().object(), tod, + object(), slot.ToInt()); + return JSArrayRef(broker(), template_object); + } + + JSArrayData* array = data()->AsSharedFunctionInfo()->GetTemplateObject(slot); + if (array != nullptr) return JSArrayRef(broker(), array); + + CHECK(serialize); + CHECK(broker()->SerializingAllowed()); + + Handle tod = + Handle::cast(description.object()); + Handle template_object = + TemplateObjectDescription::GetTemplateObject( + broker()->isolate(), broker()->native_context().object(), tod, + object(), slot.ToInt()); + array = broker()->GetOrCreateData(template_object)->AsJSArray(); + data()->AsSharedFunctionInfo()->SetTemplateObject(slot, array); + return JSArrayRef(broker(), array); +} + void SharedFunctionInfoRef::SetSerializedForCompilation( FeedbackVectorRef feedback) { CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing); @@ -3140,9 +3661,27 @@ void SharedFunctionInfoRef::SetSerializedForCompilation( feedback); } +void SharedFunctionInfoRef::SerializeFunctionTemplateInfo() { + CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing); + + data()->AsSharedFunctionInfo()->SerializeFunctionTemplateInfo(broker()); +} + +base::Optional +SharedFunctionInfoRef::function_template_info() const { + if (broker()->mode() == JSHeapBroker::kDisabled) { + return FunctionTemplateInfoRef( + broker(), handle(object()->function_data(), broker()->isolate())); + } + FunctionTemplateInfoData* function_template_info = + data()->AsSharedFunctionInfo()->function_template_info(); + if (!function_template_info) return base::nullopt; + return FunctionTemplateInfoRef(broker(), function_template_info); +} + bool SharedFunctionInfoRef::IsSerializedForCompilation( FeedbackVectorRef feedback) const { - CHECK_NE(broker()->mode(), JSHeapBroker::kDisabled); + if (broker()->mode() == JSHeapBroker::kDisabled) return true; return data()->AsSharedFunctionInfo()->IsSerializedForCompilation(feedback); } @@ -3181,22 +3720,10 @@ bool MapRef::serialized_prototype() const { return data()->AsMap()->serialized_prototype(); } -void ModuleRef::Serialize() { +void SourceTextModuleRef::Serialize() { if (broker()->mode() == JSHeapBroker::kDisabled) return; CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing); - data()->AsModule()->Serialize(broker()); -} - -void ContextRef::SerializeContextChain() { - if (broker()->mode() == JSHeapBroker::kDisabled) return; - CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing); - data()->AsContext()->SerializeContextChain(broker()); -} - -void ContextRef::SerializeSlot(int index) { - if (broker()->mode() == JSHeapBroker::kDisabled) return; - CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing); - data()->AsContext()->SerializeSlot(broker(), index); + data()->AsSourceTextModule()->Serialize(broker()); } void NativeContextRef::Serialize() { @@ -3228,10 +3755,10 @@ void PropertyCellRef::Serialize() { data()->AsPropertyCell()->Serialize(broker()); } -void FunctionTemplateInfoRef::Serialize() { +void FunctionTemplateInfoRef::SerializeCallCode() { if (broker()->mode() == JSHeapBroker::kDisabled) return; CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing); - data()->AsFunctionTemplateInfo()->Serialize(broker()); + data()->AsFunctionTemplateInfo()->SerializeCallCode(broker()); } base::Optional JSGlobalProxyRef::GetPropertyCell( @@ -3307,10 +3834,67 @@ base::Optional GlobalAccessFeedback::GetConstantHint() const { return {}; } -ElementAccessFeedback::ElementAccessFeedback(Zone* zone) +KeyedAccessMode KeyedAccessMode::FromNexus(FeedbackNexus const& nexus) { + if (IsKeyedLoadICKind(nexus.kind())) { + return KeyedAccessMode(AccessMode::kLoad, nexus.GetKeyedAccessLoadMode()); + } + if (IsKeyedHasICKind(nexus.kind())) { + return KeyedAccessMode(AccessMode::kHas, nexus.GetKeyedAccessLoadMode()); + } + if (IsKeyedStoreICKind(nexus.kind())) { + return KeyedAccessMode(AccessMode::kStore, nexus.GetKeyedAccessStoreMode()); + } + if (IsStoreInArrayLiteralICKind(nexus.kind())) { + return KeyedAccessMode(AccessMode::kStoreInLiteral, + nexus.GetKeyedAccessStoreMode()); + } + UNREACHABLE(); +} + +AccessMode KeyedAccessMode::access_mode() const { return access_mode_; } + +bool KeyedAccessMode::IsLoad() const { + return access_mode_ == AccessMode::kLoad || access_mode_ == AccessMode::kHas; +} +bool KeyedAccessMode::IsStore() const { + return access_mode_ == AccessMode::kStore || + access_mode_ == AccessMode::kStoreInLiteral; +} + +KeyedAccessLoadMode KeyedAccessMode::load_mode() const { + CHECK(IsLoad()); + return load_store_mode_.load_mode; +} + +KeyedAccessStoreMode KeyedAccessMode::store_mode() const { + CHECK(IsStore()); + return load_store_mode_.store_mode; +} + +KeyedAccessMode::LoadStoreMode::LoadStoreMode(KeyedAccessLoadMode load_mode) + : load_mode(load_mode) {} +KeyedAccessMode::LoadStoreMode::LoadStoreMode(KeyedAccessStoreMode store_mode) + : store_mode(store_mode) {} + +KeyedAccessMode::KeyedAccessMode(AccessMode access_mode, + KeyedAccessLoadMode load_mode) + : access_mode_(access_mode), load_store_mode_(load_mode) { + CHECK(!IsStore()); + CHECK(IsLoad()); +} +KeyedAccessMode::KeyedAccessMode(AccessMode access_mode, + KeyedAccessStoreMode store_mode) + : access_mode_(access_mode), load_store_mode_(store_mode) { + CHECK(!IsLoad()); + CHECK(IsStore()); +} + +ElementAccessFeedback::ElementAccessFeedback(Zone* zone, + KeyedAccessMode const& keyed_mode) : ProcessedFeedback(kElementAccess), receiver_maps(zone), - transitions(zone) {} + transitions(zone), + keyed_mode(keyed_mode) {} ElementAccessFeedback::MapIterator::MapIterator( ElementAccessFeedback const& processed, JSHeapBroker* broker) @@ -3383,7 +3967,7 @@ GlobalAccessFeedback const* JSHeapBroker::GetGlobalAccessFeedback( } ElementAccessFeedback const* JSHeapBroker::ProcessFeedbackMapsForElementAccess( - MapHandles const& maps) { + MapHandles const& maps, KeyedAccessMode const& keyed_mode) { DCHECK(!maps.empty()); // Collect possible transition targets. @@ -3397,7 +3981,8 @@ ElementAccessFeedback const* JSHeapBroker::ProcessFeedbackMapsForElementAccess( } } - ElementAccessFeedback* result = new (zone()) ElementAccessFeedback(zone()); + ElementAccessFeedback* result = + new (zone()) ElementAccessFeedback(zone(), keyed_mode); // Separate the actual receiver maps and the possible transition sources. for (Handle map : maps) { @@ -3464,7 +4049,7 @@ GlobalAccessFeedback const* JSHeapBroker::ProcessFeedbackForGlobalAccess( } ContextRef context_ref(this, context); if (immutable) { - context_ref.SerializeSlot(context_slot_index); + context_ref.get(context_slot_index, true); } return new (zone()) GlobalAccessFeedback(context_ref, context_slot_index, immutable); @@ -3489,6 +4074,54 @@ base::Optional JSHeapBroker::GetNameFeedback( return NameRef(this, handle(raw_name, isolate())); } +PropertyAccessInfo JSHeapBroker::GetAccessInfoForLoadingThen(MapRef map) { + auto access_info = ais_for_loading_then_.find(map); + if (access_info == ais_for_loading_then_.end()) { + TRACE_BROKER_MISSING( + this, "access info for reducing JSResolvePromise with map " << map); + return PropertyAccessInfo::Invalid(zone()); + } + return access_info->second; +} + +void JSHeapBroker::CreateAccessInfoForLoadingThen( + MapRef map, CompilationDependencies* dependencies) { + auto access_info = ais_for_loading_then_.find(map); + if (access_info == ais_for_loading_then_.end()) { + AccessInfoFactory access_info_factory(this, dependencies, zone()); + Handle then_string = isolate()->factory()->then_string(); + ais_for_loading_then_.insert( + std::make_pair(map, access_info_factory.ComputePropertyAccessInfo( + map.object(), then_string, AccessMode::kLoad))); + } +} + +PropertyAccessInfo JSHeapBroker::GetAccessInfoForLoadingExec(MapRef map) { + auto access_info = ais_for_loading_exec_.find(map); + if (access_info == ais_for_loading_exec_.end()) { + TRACE_BROKER_MISSING(this, + "access info for property 'exec' on map " << map); + return PropertyAccessInfo::Invalid(zone()); + } + return access_info->second; +} + +PropertyAccessInfo const& JSHeapBroker::CreateAccessInfoForLoadingExec( + MapRef map, CompilationDependencies* dependencies) { + auto access_info = ais_for_loading_exec_.find(map); + if (access_info != ais_for_loading_exec_.end()) { + return access_info->second; + } + + ZoneVector access_infos(zone()); + AccessInfoFactory access_info_factory(this, dependencies, zone()); + PropertyAccessInfo ai_exec = access_info_factory.ComputePropertyAccessInfo( + map.object(), isolate()->factory()->exec_string(), AccessMode::kLoad); + + auto inserted_ai = ais_for_loading_exec_.insert(std::make_pair(map, ai_exec)); + return inserted_ai.first->second; +} + ElementAccessFeedback const* ProcessedFeedback::AsElementAccess() const { CHECK_EQ(kElementAccess, kind()); return static_cast(this); @@ -3499,6 +4132,66 @@ NamedAccessFeedback const* ProcessedFeedback::AsNamedAccess() const { return static_cast(this); } +BytecodeAnalysis const& JSHeapBroker::GetBytecodeAnalysis( + Handle bytecode_array, BailoutId osr_bailout_id, + bool analyze_liveness, bool serialize) { + ObjectData* bytecode_array_data = GetData(bytecode_array); + CHECK_NOT_NULL(bytecode_array_data); + + auto it = bytecode_analyses_.find(bytecode_array_data); + if (it != bytecode_analyses_.end()) { + // Bytecode analysis can be run for OSR or for non-OSR. In the rare case + // where we optimize for OSR and consider the top-level function itself for + // inlining (because of recursion), we need both the OSR and the non-OSR + // analysis. Fortunately, the only difference between the two lies in + // whether the OSR entry offset gets computed (from the OSR bailout id). + // Hence it's okay to reuse the OSR-version when asked for the non-OSR + // version, such that we need to store at most one analysis result per + // bytecode array. + CHECK_IMPLIES(osr_bailout_id != it->second->osr_bailout_id(), + osr_bailout_id.IsNone()); + CHECK_EQ(analyze_liveness, it->second->liveness_analyzed()); + return *it->second; + } + + CHECK(serialize); + BytecodeAnalysis* analysis = new (zone()) BytecodeAnalysis( + bytecode_array, zone(), osr_bailout_id, analyze_liveness); + DCHECK_EQ(analysis->osr_bailout_id(), osr_bailout_id); + bytecode_analyses_[bytecode_array_data] = analysis; + return *analysis; +} + +OffHeapBytecodeArray::OffHeapBytecodeArray(BytecodeArrayRef bytecode_array) + : array_(bytecode_array) {} + +int OffHeapBytecodeArray::length() const { return array_.length(); } + +int OffHeapBytecodeArray::parameter_count() const { + return array_.parameter_count(); +} + +uint8_t OffHeapBytecodeArray::get(int index) const { return array_.get(index); } + +void OffHeapBytecodeArray::set(int index, uint8_t value) { UNREACHABLE(); } + +Address OffHeapBytecodeArray::GetFirstBytecodeAddress() const { + return array_.GetFirstBytecodeAddress(); +} + +Handle OffHeapBytecodeArray::GetConstantAtIndex( + int index, Isolate* isolate) const { + return array_.GetConstantAtIndex(index); +} + +bool OffHeapBytecodeArray::IsConstantAtIndexSmi(int index) const { + return array_.IsConstantAtIndexSmi(index); +} + +Smi OffHeapBytecodeArray::GetConstantAtIndexAsSmi(int index) const { + return array_.GetConstantAtIndexAsSmi(index); +} + #undef BIMODAL_ACCESSOR #undef BIMODAL_ACCESSOR_B #undef BIMODAL_ACCESSOR_C diff --git a/deps/v8/src/compiler/js-heap-broker.h b/deps/v8/src/compiler/js-heap-broker.h index 2c4cc766bced00..ffc10d2b93a023 100644 --- a/deps/v8/src/compiler/js-heap-broker.h +++ b/deps/v8/src/compiler/js-heap-broker.h @@ -8,796 +8,24 @@ #include "src/base/compiler-specific.h" #include "src/base/optional.h" #include "src/common/globals.h" +#include "src/compiler/access-info.h" #include "src/compiler/refs-map.h" #include "src/handles/handles.h" +#include "src/interpreter/bytecode-array-accessor.h" #include "src/objects/feedback-vector.h" #include "src/objects/function-kind.h" -#include "src/objects/instance-type.h" #include "src/objects/objects.h" #include "src/utils/ostreams.h" #include "src/zone/zone-containers.h" namespace v8 { namespace internal { - -class BytecodeArray; -class CallHandlerInfo; -class FixedDoubleArray; -class FunctionTemplateInfo; -class HeapNumber; -class InternalizedString; -class JSBoundFunction; -class JSDataView; -class JSGlobalProxy; -class JSRegExp; -class JSTypedArray; -class NativeContext; -class ScriptContextTable; -class VectorSlotPair; - namespace compiler { -// Whether we are loading a property or storing to a property. -// For a store during literal creation, do not walk up the prototype chain. -enum class AccessMode { kLoad, kStore, kStoreInLiteral, kHas }; - -enum class OddballType : uint8_t { - kNone, // Not an Oddball. - kBoolean, // True or False. - kUndefined, - kNull, - kHole, - kUninitialized, - kOther // Oddball, but none of the above. -}; - -// This list is sorted such that subtypes appear before their supertypes. -// DO NOT VIOLATE THIS PROPERTY! -#define HEAP_BROKER_OBJECT_LIST(V) \ - /* Subtypes of JSObject */ \ - V(JSArray) \ - V(JSBoundFunction) \ - V(JSDataView) \ - V(JSFunction) \ - V(JSGlobalProxy) \ - V(JSRegExp) \ - V(JSTypedArray) \ - /* Subtypes of Context */ \ - V(NativeContext) \ - /* Subtypes of FixedArray */ \ - V(Context) \ - V(ScopeInfo) \ - V(ScriptContextTable) \ - /* Subtypes of FixedArrayBase */ \ - V(BytecodeArray) \ - V(FixedArray) \ - V(FixedDoubleArray) \ - /* Subtypes of Name */ \ - V(InternalizedString) \ - V(String) \ - V(Symbol) \ - /* Subtypes of HeapObject */ \ - V(AllocationSite) \ - V(CallHandlerInfo) \ - V(Cell) \ - V(Code) \ - V(DescriptorArray) \ - V(FeedbackCell) \ - V(FeedbackVector) \ - V(FixedArrayBase) \ - V(FunctionTemplateInfo) \ - V(HeapNumber) \ - V(JSObject) \ - V(Map) \ - V(Module) \ - V(MutableHeapNumber) \ - V(Name) \ - V(PropertyCell) \ - V(SharedFunctionInfo) \ - /* Subtypes of Object */ \ - V(HeapObject) - -class CompilationDependencies; -class JSHeapBroker; -class ObjectData; -class PerIsolateCompilerCache; -class PropertyAccessInfo; -#define FORWARD_DECL(Name) class Name##Ref; -HEAP_BROKER_OBJECT_LIST(FORWARD_DECL) -#undef FORWARD_DECL - -class V8_EXPORT_PRIVATE ObjectRef { - public: - ObjectRef(JSHeapBroker* broker, Handle object); - ObjectRef(JSHeapBroker* broker, ObjectData* data) - : data_(data), broker_(broker) { - CHECK_NOT_NULL(data_); - } - - Handle object() const; - - bool equals(const ObjectRef& other) const; - - bool IsSmi() const; - int AsSmi() const; - -#define HEAP_IS_METHOD_DECL(Name) bool Is##Name() const; - HEAP_BROKER_OBJECT_LIST(HEAP_IS_METHOD_DECL) -#undef HEAP_IS_METHOD_DECL - -#define HEAP_AS_METHOD_DECL(Name) Name##Ref As##Name() const; - HEAP_BROKER_OBJECT_LIST(HEAP_AS_METHOD_DECL) -#undef HEAP_AS_METHOD_DECL - - bool IsNullOrUndefined() const; - - bool BooleanValue() const; - Maybe OddballToNumber() const; - - // Return the element at key {index} if {index} is known to be an own data - // property of the object that is non-writable and non-configurable. - base::Optional GetOwnConstantElement(uint32_t index, - bool serialize = false) const; - - Isolate* isolate() const; - - protected: - JSHeapBroker* broker() const; - ObjectData* data() const; - ObjectData* data_; // Should be used only by object() getters. - - private: - friend class JSArrayData; - friend class JSGlobalProxyRef; - friend class JSGlobalProxyData; - friend class JSObjectData; - friend class StringData; - - friend std::ostream& operator<<(std::ostream& os, const ObjectRef& ref); - - JSHeapBroker* broker_; -}; - +class BytecodeAnalysis; +class ObjectRef; std::ostream& operator<<(std::ostream& os, const ObjectRef& ref); -// Temporary class that carries information from a Map. We'd like to remove -// this class and use MapRef instead, but we can't as long as we support the -// kDisabled broker mode. That's because obtaining the MapRef via -// HeapObjectRef::map() requires a HandleScope when the broker is disabled. -// During OptimizeGraph we generally don't have a HandleScope, however. There -// are two places where we therefore use GetHeapObjectType() instead. Both that -// function and this class should eventually be removed. -class HeapObjectType { - public: - enum Flag : uint8_t { kUndetectable = 1 << 0, kCallable = 1 << 1 }; - - using Flags = base::Flags; - - HeapObjectType(InstanceType instance_type, Flags flags, - OddballType oddball_type) - : instance_type_(instance_type), - oddball_type_(oddball_type), - flags_(flags) { - DCHECK_EQ(instance_type == ODDBALL_TYPE, - oddball_type != OddballType::kNone); - } - - OddballType oddball_type() const { return oddball_type_; } - InstanceType instance_type() const { return instance_type_; } - Flags flags() const { return flags_; } - - bool is_callable() const { return flags_ & kCallable; } - bool is_undetectable() const { return flags_ & kUndetectable; } - - private: - InstanceType const instance_type_; - OddballType const oddball_type_; - Flags const flags_; -}; - -class HeapObjectRef : public ObjectRef { - public: - using ObjectRef::ObjectRef; - Handle object() const; - - MapRef map() const; - - // See the comment on the HeapObjectType class. - HeapObjectType GetHeapObjectType() const; -}; - -class PropertyCellRef : public HeapObjectRef { - public: - using HeapObjectRef::HeapObjectRef; - Handle object() const; - - PropertyDetails property_details() const; - - void Serialize(); - ObjectRef value() const; -}; - -class JSObjectRef : public HeapObjectRef { - public: - using HeapObjectRef::HeapObjectRef; - Handle object() const; - - uint64_t RawFastDoublePropertyAsBitsAt(FieldIndex index) const; - double RawFastDoublePropertyAt(FieldIndex index) const; - ObjectRef RawFastPropertyAt(FieldIndex index) const; - - FixedArrayBaseRef elements() const; - void EnsureElementsTenured(); - ElementsKind GetElementsKind() const; - - void SerializeObjectCreateMap(); - base::Optional GetObjectCreateMap() const; -}; - -class JSDataViewRef : public JSObjectRef { - public: - using JSObjectRef::JSObjectRef; - Handle object() const; - - size_t byte_length() const; - size_t byte_offset() const; -}; - -class JSBoundFunctionRef : public JSObjectRef { - public: - using JSObjectRef::JSObjectRef; - Handle object() const; - - void Serialize(); - - // The following are available only after calling Serialize(). - ObjectRef bound_target_function() const; - ObjectRef bound_this() const; - FixedArrayRef bound_arguments() const; -}; - -class V8_EXPORT_PRIVATE JSFunctionRef : public JSObjectRef { - public: - using JSObjectRef::JSObjectRef; - Handle object() const; - - bool has_feedback_vector() const; - bool has_initial_map() const; - bool has_prototype() const; - bool PrototypeRequiresRuntimeLookup() const; - - void Serialize(); - bool serialized() const; - - // The following are available only after calling Serialize(). - ObjectRef prototype() const; - MapRef initial_map() const; - ContextRef context() const; - NativeContextRef native_context() const; - SharedFunctionInfoRef shared() const; - FeedbackVectorRef feedback_vector() const; - int InitialMapInstanceSizeWithMinSlack() const; - - bool IsSerializedForCompilation() const; -}; - -class JSRegExpRef : public JSObjectRef { - public: - using JSObjectRef::JSObjectRef; - Handle object() const; - - ObjectRef raw_properties_or_hash() const; - ObjectRef data() const; - ObjectRef source() const; - ObjectRef flags() const; - ObjectRef last_index() const; -}; - -class HeapNumberRef : public HeapObjectRef { - public: - using HeapObjectRef::HeapObjectRef; - Handle object() const; - - double value() const; -}; - -class MutableHeapNumberRef : public HeapObjectRef { - public: - using HeapObjectRef::HeapObjectRef; - Handle object() const; - - double value() const; -}; - -class ContextRef : public HeapObjectRef { - public: - using HeapObjectRef::HeapObjectRef; - Handle object() const; - - void SerializeContextChain(); - ContextRef previous() const; - - void SerializeSlot(int index); - ObjectRef get(int index) const; -}; - -#define BROKER_COMPULSORY_NATIVE_CONTEXT_FIELDS(V) \ - V(JSFunction, array_function) \ - V(JSFunction, boolean_function) \ - V(JSFunction, bigint_function) \ - V(JSFunction, number_function) \ - V(JSFunction, object_function) \ - V(JSFunction, promise_function) \ - V(JSFunction, promise_then) \ - V(JSFunction, string_function) \ - V(JSFunction, symbol_function) \ - V(JSGlobalProxy, global_proxy_object) \ - V(JSObject, promise_prototype) \ - V(Map, bound_function_with_constructor_map) \ - V(Map, bound_function_without_constructor_map) \ - V(Map, fast_aliased_arguments_map) \ - V(Map, initial_array_iterator_map) \ - V(Map, initial_string_iterator_map) \ - V(Map, iterator_result_map) \ - V(Map, js_array_holey_double_elements_map) \ - V(Map, js_array_holey_elements_map) \ - V(Map, js_array_holey_smi_elements_map) \ - V(Map, js_array_packed_double_elements_map) \ - V(Map, js_array_packed_elements_map) \ - V(Map, js_array_packed_smi_elements_map) \ - V(Map, sloppy_arguments_map) \ - V(Map, slow_object_with_null_prototype_map) \ - V(Map, strict_arguments_map) \ - V(ScriptContextTable, script_context_table) \ - V(SharedFunctionInfo, promise_capability_default_reject_shared_fun) \ - V(SharedFunctionInfo, promise_catch_finally_shared_fun) \ - V(SharedFunctionInfo, promise_then_finally_shared_fun) \ - V(SharedFunctionInfo, promise_capability_default_resolve_shared_fun) - -// Those are set by Bootstrapper::ExportFromRuntime, which may not yet have -// happened when Turbofan is invoked via --always-opt. -#define BROKER_OPTIONAL_NATIVE_CONTEXT_FIELDS(V) \ - V(Map, async_function_object_map) \ - V(Map, map_key_iterator_map) \ - V(Map, map_key_value_iterator_map) \ - V(Map, map_value_iterator_map) \ - V(Map, set_key_value_iterator_map) \ - V(Map, set_value_iterator_map) - -#define BROKER_NATIVE_CONTEXT_FIELDS(V) \ - BROKER_COMPULSORY_NATIVE_CONTEXT_FIELDS(V) \ - BROKER_OPTIONAL_NATIVE_CONTEXT_FIELDS(V) - -class NativeContextRef : public ContextRef { - public: - using ContextRef::ContextRef; - Handle object() const; - - void Serialize(); - -#define DECL_ACCESSOR(type, name) type##Ref name() const; - BROKER_NATIVE_CONTEXT_FIELDS(DECL_ACCESSOR) -#undef DECL_ACCESSOR - - MapRef GetFunctionMapFromIndex(int index) const; - MapRef GetInitialJSArrayMap(ElementsKind kind) const; - base::Optional GetConstructorFunction(const MapRef& map) const; -}; - -class NameRef : public HeapObjectRef { - public: - using HeapObjectRef::HeapObjectRef; - Handle object() const; - - bool IsUniqueName() const; -}; - -class ScriptContextTableRef : public HeapObjectRef { - public: - using HeapObjectRef::HeapObjectRef; - Handle object() const; - - struct LookupResult { - ContextRef context; - bool immutable; - int index; - }; - - base::Optional lookup(const NameRef& name) const; -}; - -class DescriptorArrayRef : public HeapObjectRef { - public: - using HeapObjectRef::HeapObjectRef; - Handle object() const; -}; - -class FeedbackCellRef : public HeapObjectRef { - public: - using HeapObjectRef::HeapObjectRef; - Handle object() const; - - HeapObjectRef value() const; -}; - -class FeedbackVectorRef : public HeapObjectRef { - public: - using HeapObjectRef::HeapObjectRef; - Handle object() const; - - ObjectRef get(FeedbackSlot slot) const; - - void SerializeSlots(); -}; - -class FunctionTemplateInfoRef : public HeapObjectRef { - public: - using HeapObjectRef::HeapObjectRef; - Handle object() const; - - void Serialize(); - ObjectRef call_code() const; -}; - -class CallHandlerInfoRef : public HeapObjectRef { - public: - using HeapObjectRef::HeapObjectRef; - Handle object() const; - - Address callback() const; - - void Serialize(); - ObjectRef data() const; -}; - -class AllocationSiteRef : public HeapObjectRef { - public: - using HeapObjectRef::HeapObjectRef; - Handle object() const; - - bool PointsToLiteral() const; - AllocationType GetAllocationType() const; - ObjectRef nested_site() const; - - // {IsFastLiteral} determines whether the given array or object literal - // boilerplate satisfies all limits to be considered for fast deep-copying - // and computes the total size of all objects that are part of the graph. - // - // If PointsToLiteral() is false, then IsFastLiteral() is also false. - bool IsFastLiteral() const; - // We only serialize boilerplate if IsFastLiteral is true. - base::Optional boilerplate() const; - - ElementsKind GetElementsKind() const; - bool CanInlineCall() const; -}; - -class V8_EXPORT_PRIVATE MapRef : public HeapObjectRef { - public: - using HeapObjectRef::HeapObjectRef; - Handle object() const; - - int instance_size() const; - InstanceType instance_type() const; - int GetInObjectProperties() const; - int GetInObjectPropertiesStartInWords() const; - int NumberOfOwnDescriptors() const; - int GetInObjectPropertyOffset(int index) const; - int constructor_function_index() const; - int NextFreePropertyIndex() const; - int UnusedPropertyFields() const; - ElementsKind elements_kind() const; - bool is_stable() const; - bool is_extensible() const; - bool is_constructor() const; - bool has_prototype_slot() const; - bool is_access_check_needed() const; - bool is_deprecated() const; - bool CanBeDeprecated() const; - bool CanTransition() const; - bool IsInobjectSlackTrackingInProgress() const; - bool is_dictionary_map() const; - bool IsFixedCowArrayMap() const; - bool IsPrimitiveMap() const; - bool is_undetectable() const; - bool is_callable() const; - bool has_indexed_interceptor() const; - bool has_hidden_prototype() const; - bool is_migration_target() const; - bool supports_fast_array_iteration() const; - bool supports_fast_array_resize() const; - bool IsMapOfCurrentGlobalProxy() const; - - OddballType oddball_type() const; - -#define DEF_TESTER(Type, ...) bool Is##Type##Map() const; - INSTANCE_TYPE_CHECKERS(DEF_TESTER) -#undef DEF_TESTER - - void SerializeBackPointer(); - HeapObjectRef GetBackPointer() const; - - void SerializePrototype(); - bool serialized_prototype() const; - HeapObjectRef prototype() const; - - void SerializeForElementLoad(); - - void SerializeForElementStore(); - bool HasOnlyStablePrototypesWithFastElements( - ZoneVector* prototype_maps); - - // Concerning the underlying instance_descriptors: - void SerializeOwnDescriptors(); - void SerializeOwnDescriptor(int descriptor_index); - MapRef FindFieldOwner(int descriptor_index) const; - PropertyDetails GetPropertyDetails(int descriptor_index) const; - NameRef GetPropertyKey(int descriptor_index) const; - FieldIndex GetFieldIndexFor(int descriptor_index) const; - ObjectRef GetFieldType(int descriptor_index) const; - bool IsUnboxedDoubleField(int descriptor_index) const; - - // Available after calling JSFunctionRef::Serialize on a function that has - // this map as initial map. - ObjectRef GetConstructor() const; - base::Optional AsElementsKind(ElementsKind kind) const; -}; - -class FixedArrayBaseRef : public HeapObjectRef { - public: - using HeapObjectRef::HeapObjectRef; - Handle object() const; - - int length() const; -}; - -class FixedArrayRef : public FixedArrayBaseRef { - public: - using FixedArrayBaseRef::FixedArrayBaseRef; - Handle object() const; - - ObjectRef get(int i) const; -}; - -class FixedDoubleArrayRef : public FixedArrayBaseRef { - public: - using FixedArrayBaseRef::FixedArrayBaseRef; - Handle object() const; - - double get_scalar(int i) const; - bool is_the_hole(int i) const; -}; - -class BytecodeArrayRef : public FixedArrayBaseRef { - public: - using FixedArrayBaseRef::FixedArrayBaseRef; - Handle object() const; - - int register_count() const; - int parameter_count() const; - interpreter::Register incoming_new_target_or_generator_register() const; -}; - -class JSArrayRef : public JSObjectRef { - public: - using JSObjectRef::JSObjectRef; - Handle object() const; - - ObjectRef length() const; - - // Return the element at key {index} if the array has a copy-on-write elements - // storage and {index} is known to be an own data property. - base::Optional GetOwnCowElement(uint32_t index, - bool serialize = false) const; -}; - -class ScopeInfoRef : public HeapObjectRef { - public: - using HeapObjectRef::HeapObjectRef; - Handle object() const; - - int ContextLength() const; -}; - -#define BROKER_SFI_FIELDS(V) \ - V(int, internal_formal_parameter_count) \ - V(bool, has_duplicate_parameters) \ - V(int, function_map_index) \ - V(FunctionKind, kind) \ - V(LanguageMode, language_mode) \ - V(bool, native) \ - V(bool, HasBreakInfo) \ - V(bool, HasBuiltinId) \ - V(bool, construct_as_builtin) \ - V(bool, HasBytecodeArray) \ - V(bool, is_safe_to_skip_arguments_adaptor) \ - V(bool, IsInlineable) \ - V(bool, is_compiled) - -class V8_EXPORT_PRIVATE SharedFunctionInfoRef : public HeapObjectRef { - public: - using HeapObjectRef::HeapObjectRef; - Handle object() const; - - int builtin_id() const; - BytecodeArrayRef GetBytecodeArray() const; - -#define DECL_ACCESSOR(type, name) type name() const; - BROKER_SFI_FIELDS(DECL_ACCESSOR) -#undef DECL_ACCESSOR - - bool IsSerializedForCompilation(FeedbackVectorRef feedback) const; - void SetSerializedForCompilation(FeedbackVectorRef feedback); -}; - -class StringRef : public NameRef { - public: - using NameRef::NameRef; - Handle object() const; - - int length() const; - uint16_t GetFirstChar(); - base::Optional ToNumber(); - bool IsSeqString() const; - bool IsExternalString() const; -}; - -class SymbolRef : public NameRef { - public: - using NameRef::NameRef; - Handle object() const; -}; - -class JSTypedArrayRef : public JSObjectRef { - public: - using JSObjectRef::JSObjectRef; - Handle object() const; - - bool is_on_heap() const; - size_t length() const; - void* external_pointer() const; - - void Serialize(); - bool serialized() const; - - HeapObjectRef buffer() const; -}; - -class ModuleRef : public HeapObjectRef { - public: - using HeapObjectRef::HeapObjectRef; - Handle object() const; - - void Serialize(); - - CellRef GetCell(int cell_index) const; -}; - -class CellRef : public HeapObjectRef { - public: - using HeapObjectRef::HeapObjectRef; - Handle object() const; - - ObjectRef value() const; -}; - -class JSGlobalProxyRef : public JSObjectRef { - public: - using JSObjectRef::JSObjectRef; - Handle object() const; - - // If {serialize} is false: - // If the property is known to exist as a property cell (on the global - // object), return that property cell. Otherwise (not known to exist as a - // property cell or known not to exist as a property cell) return nothing. - // If {serialize} is true: - // Like above but potentially access the heap and serialize the necessary - // information. - base::Optional GetPropertyCell(NameRef const& name, - bool serialize = false) const; -}; - -class CodeRef : public HeapObjectRef { - public: - using HeapObjectRef::HeapObjectRef; - Handle object() const; -}; - -class InternalizedStringRef : public StringRef { - public: - using StringRef::StringRef; - Handle object() const; -}; - -class ElementAccessFeedback; -class NamedAccessFeedback; - -class ProcessedFeedback : public ZoneObject { - public: - enum Kind { kInsufficient, kGlobalAccess, kNamedAccess, kElementAccess }; - Kind kind() const { return kind_; } - - ElementAccessFeedback const* AsElementAccess() const; - NamedAccessFeedback const* AsNamedAccess() const; - - protected: - explicit ProcessedFeedback(Kind kind) : kind_(kind) {} - - private: - Kind const kind_; -}; - -class InsufficientFeedback final : public ProcessedFeedback { - public: - InsufficientFeedback(); -}; - -class GlobalAccessFeedback : public ProcessedFeedback { - public: - explicit GlobalAccessFeedback(PropertyCellRef cell); - GlobalAccessFeedback(ContextRef script_context, int slot_index, - bool immutable); - - bool IsPropertyCell() const; - PropertyCellRef property_cell() const; - - bool IsScriptContextSlot() const { return !IsPropertyCell(); } - ContextRef script_context() const; - int slot_index() const; - bool immutable() const; - - base::Optional GetConstantHint() const; - - private: - ObjectRef const cell_or_context_; - int const index_and_immutable_; -}; - -class ElementAccessFeedback : public ProcessedFeedback { - public: - explicit ElementAccessFeedback(Zone* zone); - - // No transition sources appear in {receiver_maps}. - // All transition targets appear in {receiver_maps}. - ZoneVector> receiver_maps; - ZoneVector, Handle>> transitions; - - class MapIterator { - public: - bool done() const; - void advance(); - MapRef current() const; - - private: - friend class ElementAccessFeedback; - - explicit MapIterator(ElementAccessFeedback const& processed, - JSHeapBroker* broker); - - ElementAccessFeedback const& processed_; - JSHeapBroker* const broker_; - size_t index_ = 0; - }; - - // Iterator over all maps: first {receiver_maps}, then transition sources. - MapIterator all_maps(JSHeapBroker* broker) const; -}; - -class NamedAccessFeedback : public ProcessedFeedback { - public: - NamedAccessFeedback(NameRef const& name, - ZoneVector const& access_infos); - - NameRef const& name() const { return name_; } - ZoneVector const& access_infos() const { - return access_infos_; - } - - private: - NameRef const name_; - ZoneVector const access_infos_; -}; - struct FeedbackSource { FeedbackSource(Handle vector_, FeedbackSlot slot_) : vector(vector_), slot(slot_) {} @@ -821,26 +49,28 @@ struct FeedbackSource { }; }; -#define TRACE_BROKER(broker, x) \ - do { \ - if (FLAG_trace_heap_broker_verbose) broker->Trace() << x << '\n'; \ +#define TRACE_BROKER(broker, x) \ + do { \ + if (broker->tracing_enabled() && FLAG_trace_heap_broker_verbose) \ + broker->Trace() << x << '\n'; \ } while (false) #define TRACE_BROKER_MISSING(broker, x) \ do { \ - if (FLAG_trace_heap_broker) \ + if (broker->tracing_enabled()) \ broker->Trace() << __FUNCTION__ << ": missing " << x << '\n'; \ } while (false) class V8_EXPORT_PRIVATE JSHeapBroker { public: - JSHeapBroker(Isolate* isolate, Zone* broker_zone); + JSHeapBroker(Isolate* isolate, Zone* broker_zone, bool tracing_enabled); void SetNativeContextRef(); void SerializeStandardObjects(); Isolate* isolate() const { return isolate_; } Zone* zone() const { return current_zone_; } + bool tracing_enabled() const { return tracing_enabled_; } NativeContextRef native_context() const { return native_context_.value(); } PerIsolateCompilerCache* compiler_cache() const { return compiler_cache_; } @@ -875,12 +105,25 @@ class V8_EXPORT_PRIVATE JSHeapBroker { // TODO(neis): Move these into serializer when we're always in the background. ElementAccessFeedback const* ProcessFeedbackMapsForElementAccess( - MapHandles const& maps); + MapHandles const& maps, KeyedAccessMode const& keyed_mode); GlobalAccessFeedback const* ProcessFeedbackForGlobalAccess( FeedbackSource const& source); + BytecodeAnalysis const& GetBytecodeAnalysis( + Handle bytecode_array, BailoutId osr_offset, + bool analyze_liveness, bool serialize); + base::Optional GetNameFeedback(FeedbackNexus const& nexus); + // If there is no result stored for {map}, we return an Invalid + // PropertyAccessInfo. + PropertyAccessInfo GetAccessInfoForLoadingThen(MapRef map); + void CreateAccessInfoForLoadingThen(MapRef map, + CompilationDependencies* dependencies); + PropertyAccessInfo GetAccessInfoForLoadingExec(MapRef map); + PropertyAccessInfo const& CreateAccessInfoForLoadingExec( + MapRef map, CompilationDependencies* dependencies); + std::ostream& Trace(); void IncrementTracingIndentation(); void DecrementTracingIndentation(); @@ -902,12 +145,19 @@ class V8_EXPORT_PRIVATE JSHeapBroker { Handle::equal_to> array_and_object_prototypes_; BrokerMode mode_ = kDisabled; + bool const tracing_enabled_; StdoutStream trace_out_; unsigned trace_indentation_ = 0; PerIsolateCompilerCache* compiler_cache_; ZoneUnorderedMap feedback_; + ZoneUnorderedMap bytecode_analyses_; + typedef ZoneUnorderedMap + MapToAccessInfos; + MapToAccessInfos ais_for_loading_then_; + MapToAccessInfos ais_for_loading_exec_; static const size_t kMinimalRefsBucketCount = 8; // must be power of 2 static const size_t kInitialRefsBucketCount = 1024; // must be power of 2 @@ -948,6 +198,23 @@ Reduction NoChangeBecauseOfMissingData(JSHeapBroker* broker, // compilation is finished. bool CanInlineElementAccess(MapRef const& map); +class OffHeapBytecodeArray final : public interpreter::AbstractBytecodeArray { + public: + explicit OffHeapBytecodeArray(BytecodeArrayRef bytecode_array); + + int length() const override; + int parameter_count() const override; + uint8_t get(int index) const override; + void set(int index, uint8_t value) override; + Address GetFirstBytecodeAddress() const override; + Handle GetConstantAtIndex(int index, Isolate* isolate) const override; + bool IsConstantAtIndexSmi(int index) const override; + Smi GetConstantAtIndexAsSmi(int index) const override; + + private: + BytecodeArrayRef array_; +}; + } // namespace compiler } // namespace internal } // namespace v8 diff --git a/deps/v8/src/compiler/js-heap-copy-reducer.cc b/deps/v8/src/compiler/js-heap-copy-reducer.cc index cc48ae80cbc026..7e7c9e3a0e1e52 100644 --- a/deps/v8/src/compiler/js-heap-copy-reducer.cc +++ b/deps/v8/src/compiler/js-heap-copy-reducer.cc @@ -30,8 +30,7 @@ Reduction JSHeapCopyReducer::Reduce(Node* node) { ObjectRef object(broker(), HeapConstantOf(node->op())); if (object.IsJSFunction()) object.AsJSFunction().Serialize(); if (object.IsJSObject()) object.AsJSObject().SerializeObjectCreateMap(); - if (object.IsModule()) object.AsModule().Serialize(); - if (object.IsContext()) object.AsContext().SerializeContextChain(); + if (object.IsSourceTextModule()) object.AsSourceTextModule().Serialize(); break; } case IrOpcode::kJSCreateArray: { diff --git a/deps/v8/src/compiler/js-inlining-heuristic.cc b/deps/v8/src/compiler/js-inlining-heuristic.cc index f78635b1397560..e11d6b59a30349 100644 --- a/deps/v8/src/compiler/js-inlining-heuristic.cc +++ b/deps/v8/src/compiler/js-inlining-heuristic.cc @@ -7,6 +7,7 @@ #include "src/codegen/optimized-compilation-info.h" #include "src/compiler/common-operator.h" #include "src/compiler/compiler-source-position-table.h" +#include "src/compiler/js-heap-broker.h" #include "src/compiler/node-matchers.h" #include "src/compiler/simplified-operator.h" #include "src/objects/objects-inl.h" @@ -21,15 +22,9 @@ namespace compiler { } while (false) namespace { - -bool IsSmallInlineFunction(BytecodeArrayRef bytecode) { - // Forcibly inline small functions. - if (bytecode.length() <= FLAG_max_inlined_bytecode_size_small) { - return true; - } - return false; +bool IsSmall(BytecodeArrayRef bytecode) { + return bytecode.length() <= FLAG_max_inlined_bytecode_size_small; } - } // namespace JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions( @@ -65,7 +60,7 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions( out.functions[n] = m.Ref(broker()).AsJSFunction(); JSFunctionRef function = out.functions[n].value(); if (function.IsSerializedForCompilation()) { - out.bytecode[n] = function.shared().GetBytecodeArray(), isolate(); + out.bytecode[n] = function.shared().GetBytecodeArray(); } } out.num_functions = value_input_count; @@ -91,6 +86,11 @@ Reduction JSInliningHeuristic::Reduce(Node* node) { if (!IrOpcode::IsInlineeOpcode(node->opcode())) return NoChange(); + if (total_inlined_bytecode_size_ >= FLAG_max_inlined_bytecode_size_absolute && + mode_ != kStressInlining) { + return NoChange(); + } + // Check if we already saw that {node} before, and if so, just skip it. if (seen_.find(node->id()) != seen_.end()) return NoChange(); seen_.insert(node->id()); @@ -107,7 +107,7 @@ Reduction JSInliningHeuristic::Reduce(Node* node) { return NoChange(); } - bool can_inline = false, force_inline_small = true; + bool can_inline_candidate = false, candidate_is_small = true; candidate.total_size = 0; Node* frame_state = NodeProperties::GetFrameStateInput(node); FrameStateInfo const& frame_info = FrameStateInfoOf(frame_state->op()); @@ -155,15 +155,12 @@ Reduction JSInliningHeuristic::Reduce(Node* node) { // serialized. BytecodeArrayRef bytecode = candidate.bytecode[i].value(); if (candidate.can_inline_function[i]) { - can_inline = true; + can_inline_candidate = true; candidate.total_size += bytecode.length(); } - // We don't force inline small functions if any of them is not inlineable. - if (!IsSmallInlineFunction(bytecode)) { - force_inline_small = false; - } + candidate_is_small = candidate_is_small && IsSmall(bytecode); } - if (!can_inline) return NoChange(); + if (!can_inline_candidate) return NoChange(); // Gather feedback on how often this call site has been hit before. if (node->opcode() == IrOpcode::kJSCall) { @@ -195,9 +192,8 @@ Reduction JSInliningHeuristic::Reduce(Node* node) { } // Forcibly inline small functions here. In the case of polymorphic inlining - // force_inline_small is set only when all functions are small. - if (force_inline_small && - cumulative_count_ < FLAG_max_inlined_bytecode_size_absolute) { + // candidate_is_small is set only when all functions are small. + if (candidate_is_small) { TRACE("Inlining small function(s) at call site #%d:%s\n", node->id(), node->op()->mnemonic()); return InlineCandidate(candidate, true); @@ -221,21 +217,24 @@ void JSInliningHeuristic::Finalize() { Candidate candidate = *i; candidates_.erase(i); + // Make sure we don't try to inline dead candidate nodes. + if (candidate.node->IsDead()) { + continue; + } + // Make sure we have some extra budget left, so that any small functions // exposed by this function would be given a chance to inline. double size_of_candidate = candidate.total_size * FLAG_reserve_inline_budget_scale_factor; - int total_size = cumulative_count_ + static_cast(size_of_candidate); + int total_size = + total_inlined_bytecode_size_ + static_cast(size_of_candidate); if (total_size > FLAG_max_inlined_bytecode_size_cumulative) { // Try if any smaller functions are available to inline. continue; } - // Make sure we don't try to inline dead candidate nodes. - if (!candidate.node->IsDead()) { - Reduction const reduction = InlineCandidate(candidate, false); - if (reduction.Changed()) return; - } + Reduction const reduction = InlineCandidate(candidate, false); + if (reduction.Changed()) return; } } @@ -630,7 +629,7 @@ Reduction JSInliningHeuristic::InlineCandidate(Candidate const& candidate, if (num_calls == 1) { Reduction const reduction = inliner_.ReduceJSCall(node); if (reduction.Changed()) { - cumulative_count_ += candidate.bytecode[0].value().length(); + total_inlined_bytecode_size_ += candidate.bytecode[0].value().length(); } return reduction; } @@ -688,20 +687,19 @@ Reduction JSInliningHeuristic::InlineCandidate(Candidate const& candidate, ReplaceWithValue(node, value, effect, control); // Inline the individual, cloned call sites. - for (int i = 0; i < num_calls; ++i) { - Node* node = calls[i]; + for (int i = 0; i < num_calls && total_inlined_bytecode_size_ < + FLAG_max_inlined_bytecode_size_absolute; + ++i) { if (candidate.can_inline_function[i] && - (small_function || - cumulative_count_ < FLAG_max_inlined_bytecode_size_cumulative)) { + (small_function || total_inlined_bytecode_size_ < + FLAG_max_inlined_bytecode_size_cumulative)) { + Node* node = calls[i]; Reduction const reduction = inliner_.ReduceJSCall(node); if (reduction.Changed()) { + total_inlined_bytecode_size_ += candidate.bytecode[i]->length(); // Killing the call node is not strictly necessary, but it is safer to // make sure we do not resurrect the node. node->Kill(); - // Small functions don't count towards the budget. - if (!small_function) { - cumulative_count_ += candidate.bytecode[i]->length(); - } } } } diff --git a/deps/v8/src/compiler/js-inlining-heuristic.h b/deps/v8/src/compiler/js-inlining-heuristic.h index 99ad258c31e0dc..b143e9b67fd846 100644 --- a/deps/v8/src/compiler/js-inlining-heuristic.h +++ b/deps/v8/src/compiler/js-inlining-heuristic.h @@ -97,7 +97,7 @@ class JSInliningHeuristic final : public AdvancedReducer { SourcePositionTable* source_positions_; JSGraph* const jsgraph_; JSHeapBroker* const broker_; - int cumulative_count_ = 0; + int total_inlined_bytecode_size_ = 0; }; } // namespace compiler diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc index e43e710da779e5..91cbea2346a37b 100644 --- a/deps/v8/src/compiler/js-inlining.cc +++ b/deps/v8/src/compiler/js-inlining.cc @@ -7,11 +7,13 @@ #include "src/ast/ast.h" #include "src/codegen/compiler.h" #include "src/codegen/optimized-compilation-info.h" +#include "src/codegen/tick-counter.h" #include "src/compiler/all-nodes.h" #include "src/compiler/bytecode-graph-builder.h" #include "src/compiler/common-operator.h" #include "src/compiler/compiler-source-position-table.h" #include "src/compiler/graph-reducer.h" +#include "src/compiler/js-heap-broker.h" #include "src/compiler/js-operator.h" #include "src/compiler/node-matchers.h" #include "src/compiler/node-properties.h" @@ -466,14 +468,13 @@ Reduction JSInliner::ReduceJSCall(Node* node) { AllowHandleAllocation allow_handle_alloc; AllowHeapAllocation allow_heap_alloc; AllowCodeDependencyChange allow_code_dep_change; - Handle native_context = - handle(info_->native_context(), isolate()); - - BuildGraphFromBytecode(broker(), zone(), bytecode_array.object(), - shared_info.value().object(), - feedback_vector.object(), BailoutId::None(), - jsgraph(), call.frequency(), source_positions_, - native_context, inlining_id, flags); + CallFrequency frequency = call.frequency(); + Handle native_context(info_->native_context(), isolate()); + BuildGraphFromBytecode( + broker(), zone(), bytecode_array.object(), + shared_info.value().object(), feedback_vector.object(), + BailoutId::None(), jsgraph(), frequency, source_positions_, + native_context, inlining_id, flags, &info_->tick_counter()); } // Extract the inlinee start/end nodes. diff --git a/deps/v8/src/compiler/js-inlining.h b/deps/v8/src/compiler/js-inlining.h index 94a9e71b2e27d0..f50f7b591d2559 100644 --- a/deps/v8/src/compiler/js-inlining.h +++ b/deps/v8/src/compiler/js-inlining.h @@ -59,7 +59,8 @@ class JSInliner final : public AdvancedReducer { SourcePositionTable* const source_positions_; base::Optional DetermineCallTarget(Node* node); - FeedbackVectorRef DetermineCallContext(Node* node, Node*& context_out); + FeedbackVectorRef DetermineCallContext( + Node* node, Node*& context_out); // NOLINT(runtime/references) Node* CreateArtificialFrameState(Node* node, Node* outer_frame_state, int parameter_count, BailoutId bailout_id, diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc index 312ab38f517380..8f7552baa18458 100644 --- a/deps/v8/src/compiler/js-native-context-specialization.cc +++ b/deps/v8/src/compiler/js-native-context-specialization.cc @@ -33,12 +33,6 @@ namespace v8 { namespace internal { namespace compiler { -// This is needed for gc_mole which will compile this file without the full set -// of GN defined macros. -#ifndef V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP -#define V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP 64 -#endif - namespace { bool HasNumberMaps(JSHeapBroker* broker, ZoneVector> const& maps) { @@ -513,8 +507,8 @@ JSNativeContextSpecialization::InferHasInPrototypeChain( Node* receiver, Node* effect, Handle prototype) { ZoneHandleSet receiver_maps; NodeProperties::InferReceiverMapsResult result = - NodeProperties::InferReceiverMaps(broker(), receiver, effect, - &receiver_maps); + NodeProperties::InferReceiverMapsUnsafe(broker(), receiver, effect, + &receiver_maps); if (result == NodeProperties::kNoReceiverMaps) return kMayBeInPrototypeChain; // Try to determine either that all of the {receiver_maps} have the given @@ -686,6 +680,7 @@ Reduction JSNativeContextSpecialization::ReduceJSPromiseResolve(Node* node) { // ES section #sec-promise-resolve-functions Reduction JSNativeContextSpecialization::ReduceJSResolvePromise(Node* node) { + DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining); DCHECK_EQ(IrOpcode::kJSResolvePromise, node->opcode()); Node* promise = NodeProperties::GetValueInput(node, 0); Node* resolution = NodeProperties::GetValueInput(node, 1); @@ -702,9 +697,17 @@ Reduction JSNativeContextSpecialization::ReduceJSResolvePromise(Node* node) { ZoneVector access_infos(graph()->zone()); AccessInfoFactory access_info_factory(broker(), dependencies(), graph()->zone()); - access_info_factory.ComputePropertyAccessInfos( - resolution_maps, factory()->then_string(), AccessMode::kLoad, - &access_infos); + if (!FLAG_concurrent_inlining) { + access_info_factory.ComputePropertyAccessInfos( + resolution_maps, factory()->then_string(), AccessMode::kLoad, + &access_infos); + } else { + // Obtain pre-computed access infos from the broker. + for (auto map : resolution_maps) { + MapRef map_ref(broker(), map); + access_infos.push_back(broker()->GetAccessInfoForLoadingThen(map_ref)); + } + } PropertyAccessInfo access_info = access_info_factory.FinalizePropertyAccessInfosAsOne(access_infos, AccessMode::kLoad); @@ -975,9 +978,8 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess( } Reduction JSNativeContextSpecialization::ReduceJSLoadGlobal(Node* node) { - DCHECK_EQ(IrOpcode::kJSLoadGlobal, node->opcode()); DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining); - + DCHECK_EQ(IrOpcode::kJSLoadGlobal, node->opcode()); LoadGlobalParameters const& p = LoadGlobalParametersOf(node->op()); if (!p.feedback().IsValid()) return NoChange(); FeedbackSource source(p.feedback()); @@ -1007,9 +1009,8 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadGlobal(Node* node) { } Reduction JSNativeContextSpecialization::ReduceJSStoreGlobal(Node* node) { - DCHECK_EQ(IrOpcode::kJSStoreGlobal, node->opcode()); DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining); - + DCHECK_EQ(IrOpcode::kJSStoreGlobal, node->opcode()); Node* value = NodeProperties::GetValueInput(node, 0); StoreGlobalParameters const& p = StoreGlobalParametersOf(node->op()); @@ -1059,7 +1060,8 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess( Node* control = NodeProperties::GetControlInput(node); ZoneVector access_infos(zone()); - AccessInfoFactory access_info_factory(broker(), dependencies(), zone()); + AccessInfoFactory access_info_factory(broker(), dependencies(), + graph()->zone()); if (!access_info_factory.FinalizePropertyAccessInfos( feedback.access_infos(), access_mode, &access_infos)) { return NoChange(); @@ -1298,7 +1300,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess( } Reduction JSNativeContextSpecialization::ReduceNamedAccessFromNexus( - Node* node, Node* value, FeedbackNexus const& nexus, NameRef const& name, + Node* node, Node* value, FeedbackSource const& source, NameRef const& name, AccessMode access_mode) { DCHECK(node->opcode() == IrOpcode::kJSLoadNamed || node->opcode() == IrOpcode::kJSStoreNamed || @@ -1312,11 +1314,11 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccessFromNexus( return ReduceGlobalAccess(node, nullptr, value, name, access_mode); } - return ReducePropertyAccessUsingProcessedFeedback(node, nullptr, name, value, - nexus, access_mode); + return ReducePropertyAccess(node, nullptr, name, value, source, access_mode); } Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) { + DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining); DCHECK_EQ(IrOpcode::kJSLoadNamed, node->opcode()); NamedAccess const& p = NamedAccessOf(node->op()); Node* const receiver = NodeProperties::GetValueInput(node, 0); @@ -1355,56 +1357,47 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) { } } - // Extract receiver maps from the load IC using the FeedbackNexus. if (!p.feedback().IsValid()) return NoChange(); - FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot()); - - // Try to lower the named access based on the {receiver_maps}. - return ReduceNamedAccessFromNexus(node, jsgraph()->Dead(), nexus, name, + return ReduceNamedAccessFromNexus(node, jsgraph()->Dead(), + FeedbackSource(p.feedback()), name, AccessMode::kLoad); } Reduction JSNativeContextSpecialization::ReduceJSStoreNamed(Node* node) { + DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining); DCHECK_EQ(IrOpcode::kJSStoreNamed, node->opcode()); NamedAccess const& p = NamedAccessOf(node->op()); Node* const value = NodeProperties::GetValueInput(node, 1); - // Extract receiver maps from the store IC using the FeedbackNexus. if (!p.feedback().IsValid()) return NoChange(); - FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot()); - - // Try to lower the named access based on the {receiver_maps}. - return ReduceNamedAccessFromNexus( - node, value, nexus, NameRef(broker(), p.name()), AccessMode::kStore); + return ReduceNamedAccessFromNexus(node, value, FeedbackSource(p.feedback()), + NameRef(broker(), p.name()), + AccessMode::kStore); } Reduction JSNativeContextSpecialization::ReduceJSStoreNamedOwn(Node* node) { + DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining); DCHECK_EQ(IrOpcode::kJSStoreNamedOwn, node->opcode()); StoreNamedOwnParameters const& p = StoreNamedOwnParametersOf(node->op()); Node* const value = NodeProperties::GetValueInput(node, 1); - // Extract receiver maps from the IC using the FeedbackNexus. if (!p.feedback().IsValid()) return NoChange(); - FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot()); - - // Try to lower the creation of a named property based on the {receiver_maps}. - return ReduceNamedAccessFromNexus(node, value, nexus, + return ReduceNamedAccessFromNexus(node, value, FeedbackSource(p.feedback()), NameRef(broker(), p.name()), AccessMode::kStoreInLiteral); } Reduction JSNativeContextSpecialization::ReduceElementAccessOnString( - Node* node, Node* index, Node* value, AccessMode access_mode, - KeyedAccessLoadMode load_mode) { + Node* node, Node* index, Node* value, KeyedAccessMode const& keyed_mode) { Node* receiver = NodeProperties::GetValueInput(node, 0); Node* effect = NodeProperties::GetEffectInput(node); Node* control = NodeProperties::GetControlInput(node); // Strings are immutable in JavaScript. - if (access_mode == AccessMode::kStore) return NoChange(); + if (keyed_mode.access_mode() == AccessMode::kStore) return NoChange(); // `in` cannot be used on strings. - if (access_mode == AccessMode::kHas) return NoChange(); + if (keyed_mode.access_mode() == AccessMode::kHas) return NoChange(); // Ensure that the {receiver} is actually a String. receiver = effect = graph()->NewNode( @@ -1416,7 +1409,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccessOnString( // Load the single character string from {receiver} or yield undefined // if the {index} is out of bounds (depending on the {load_mode}). value = BuildIndexedStringLoad(receiver, index, length, &effect, &control, - load_mode); + keyed_mode.load_mode()); ReplaceWithValue(node, value, effect, control); return Replace(value); @@ -1437,24 +1430,31 @@ base::Optional GetTypedArrayConstant(JSHeapBroker* broker, Reduction JSNativeContextSpecialization::ReduceElementAccess( Node* node, Node* index, Node* value, - ElementAccessFeedback const& processed, AccessMode access_mode, - KeyedAccessLoadMode load_mode, KeyedAccessStoreMode store_mode) { + ElementAccessFeedback const& processed) { DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining); - DCHECK(node->opcode() == IrOpcode::kJSLoadProperty || node->opcode() == IrOpcode::kJSStoreProperty || node->opcode() == IrOpcode::kJSStoreInArrayLiteral || node->opcode() == IrOpcode::kJSHasProperty); + Node* receiver = NodeProperties::GetValueInput(node, 0); Node* effect = NodeProperties::GetEffectInput(node); Node* control = NodeProperties::GetControlInput(node); Node* frame_state = NodeProperties::FindFrameStateBefore(node, jsgraph()->Dead()); + AccessMode access_mode = processed.keyed_mode.access_mode(); + if ((access_mode == AccessMode::kLoad || access_mode == AccessMode::kHas) && + receiver->opcode() == IrOpcode::kHeapConstant) { + Reduction reduction = ReduceKeyedLoadFromHeapConstant( + node, index, access_mode, processed.keyed_mode.load_mode()); + if (reduction.Changed()) return reduction; + } + if (HasOnlyStringMaps(broker(), processed.receiver_maps)) { DCHECK(processed.transitions.empty()); - return ReduceElementAccessOnString(node, index, value, access_mode, - load_mode); + return ReduceElementAccessOnString(node, index, value, + processed.keyed_mode); } // Compute element access infos for the receiver maps. @@ -1485,7 +1485,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess( // then we need to check that all prototypes have stable maps with // fast elements (and we need to guard against changes to that below). if ((IsHoleyOrDictionaryElementsKind(receiver_map.elements_kind()) || - IsGrowStoreMode(store_mode)) && + IsGrowStoreMode(processed.keyed_mode.store_mode())) && !receiver_map.HasOnlyStablePrototypesWithFastElements( &prototype_maps)) { return NoChange(); @@ -1558,7 +1558,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess( // Access the actual element. ValueEffectControl continuation = BuildElementAccess(receiver, index, value, effect, control, access_info, - access_mode, load_mode, store_mode); + processed.keyed_mode); value = continuation.value(); effect = continuation.effect(); control = continuation.control(); @@ -1591,7 +1591,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess( ? ElementsTransition::kFastTransition : ElementsTransition::kSlowTransition, transition_source.object(), transition_target.object())), - receiver, effect, control); + receiver, this_effect, this_control); } // Perform map check(s) on {receiver}. @@ -1623,9 +1623,9 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess( } // Access the actual element. - ValueEffectControl continuation = BuildElementAccess( - this_receiver, this_index, this_value, this_effect, this_control, - access_info, access_mode, load_mode, store_mode); + ValueEffectControl continuation = + BuildElementAccess(this_receiver, this_index, this_value, this_effect, + this_control, access_info, processed.keyed_mode); values.push_back(continuation.value()); effects.push_back(continuation.effect()); controls.push_back(continuation.control()); @@ -1659,7 +1659,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess( } Reduction JSNativeContextSpecialization::ReduceKeyedLoadFromHeapConstant( - Node* node, Node* key, FeedbackNexus const& nexus, AccessMode access_mode, + Node* node, Node* key, AccessMode access_mode, KeyedAccessLoadMode load_mode) { DCHECK(node->opcode() == IrOpcode::kJSLoadProperty || node->opcode() == IrOpcode::kJSHasProperty); @@ -1715,54 +1715,24 @@ Reduction JSNativeContextSpecialization::ReduceKeyedLoadFromHeapConstant( // accesses using the known length, which doesn't change. if (receiver_ref.IsString()) { DCHECK_NE(access_mode, AccessMode::kHas); - // We can only assume that the {index} is a valid array index if the - // IC is in element access mode and not MEGAMORPHIC, otherwise there's - // no guard for the bounds check below. - if (nexus.ic_state() != MEGAMORPHIC && nexus.GetKeyType() == ELEMENT) { - // Ensure that {key} is less than {receiver} length. - Node* length = jsgraph()->Constant(receiver_ref.AsString().length()); - - // Load the single character string from {receiver} or yield - // undefined if the {key} is out of bounds (depending on the - // {load_mode}). - Node* value = BuildIndexedStringLoad(receiver, key, length, &effect, - &control, load_mode); - ReplaceWithValue(node, value, effect, control); - return Replace(value); - } - } - - return NoChange(); -} + // Ensure that {key} is less than {receiver} length. + Node* length = jsgraph()->Constant(receiver_ref.AsString().length()); -Reduction JSNativeContextSpecialization::ReduceKeyedAccess( - Node* node, Node* key, Node* value, FeedbackNexus const& nexus, - AccessMode access_mode, KeyedAccessLoadMode load_mode, - KeyedAccessStoreMode store_mode) { - DCHECK(node->opcode() == IrOpcode::kJSLoadProperty || - node->opcode() == IrOpcode::kJSStoreProperty || - node->opcode() == IrOpcode::kJSStoreInArrayLiteral || - node->opcode() == IrOpcode::kJSHasProperty); - - Node* receiver = NodeProperties::GetValueInput(node, 0); - - if ((access_mode == AccessMode::kLoad || access_mode == AccessMode::kHas) && - receiver->opcode() == IrOpcode::kHeapConstant) { - Reduction reduction = ReduceKeyedLoadFromHeapConstant( - node, key, nexus, access_mode, load_mode); - if (reduction.Changed()) return reduction; + // Load the single character string from {receiver} or yield + // undefined if the {key} is out of bounds (depending on the + // {load_mode}). + Node* value = BuildIndexedStringLoad(receiver, key, length, &effect, + &control, load_mode); + ReplaceWithValue(node, value, effect, control); + return Replace(value); } - return ReducePropertyAccessUsingProcessedFeedback(node, key, base::nullopt, - value, nexus, access_mode, - load_mode, store_mode); + return NoChange(); } -Reduction -JSNativeContextSpecialization::ReducePropertyAccessUsingProcessedFeedback( +Reduction JSNativeContextSpecialization::ReducePropertyAccess( Node* node, Node* key, base::Optional static_name, Node* value, - FeedbackNexus const& nexus, AccessMode access_mode, - KeyedAccessLoadMode load_mode, KeyedAccessStoreMode store_mode) { + FeedbackSource const& source, AccessMode access_mode) { DCHECK_EQ(key == nullptr, static_name.has_value()); DCHECK(node->opcode() == IrOpcode::kJSLoadProperty || node->opcode() == IrOpcode::kJSStoreProperty || @@ -1777,11 +1747,12 @@ JSNativeContextSpecialization::ReducePropertyAccessUsingProcessedFeedback( ProcessedFeedback const* processed = nullptr; if (FLAG_concurrent_inlining) { - processed = broker()->GetFeedback(FeedbackSource(nexus)); + processed = broker()->GetFeedback(source); // TODO(neis): Infer maps from the graph and consolidate with feedback/hints // and filter impossible candidates based on inferred root map. } else { // TODO(neis): Try to unify this with the similar code in the serializer. + FeedbackNexus nexus(source.vector, source.slot); if (nexus.ic_state() == UNINITIALIZED) { processed = new (zone()) InsufficientFeedback(); } else { @@ -1795,14 +1766,14 @@ JSNativeContextSpecialization::ReducePropertyAccessUsingProcessedFeedback( if (name.has_value()) { ZoneVector access_infos(zone()); AccessInfoFactory access_info_factory(broker(), dependencies(), - zone()); + graph()->zone()); access_info_factory.ComputePropertyAccessInfos( receiver_maps, name->object(), access_mode, &access_infos); processed = new (zone()) NamedAccessFeedback(*name, access_infos); } else if (nexus.GetKeyType() == ELEMENT && MEGAMORPHIC != nexus.ic_state()) { - processed = - broker()->ProcessFeedbackMapsForElementAccess(receiver_maps); + processed = broker()->ProcessFeedbackMapsForElementAccess( + receiver_maps, KeyedAccessMode::FromNexus(nexus)); } } } @@ -1818,9 +1789,10 @@ JSNativeContextSpecialization::ReducePropertyAccessUsingProcessedFeedback( return ReduceNamedAccess(node, value, *processed->AsNamedAccess(), access_mode, key); case ProcessedFeedback::kElementAccess: + CHECK_EQ(processed->AsElementAccess()->keyed_mode.access_mode(), + access_mode); return ReduceElementAccess(node, key, value, - *processed->AsElementAccess(), access_mode, - load_mode, store_mode); + *processed->AsElementAccess()); case ProcessedFeedback::kGlobalAccess: UNREACHABLE(); } @@ -1846,21 +1818,15 @@ Reduction JSNativeContextSpecialization::ReduceSoftDeoptimize( } Reduction JSNativeContextSpecialization::ReduceJSHasProperty(Node* node) { + DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining); DCHECK_EQ(IrOpcode::kJSHasProperty, node->opcode()); PropertyAccess const& p = PropertyAccessOf(node->op()); Node* key = NodeProperties::GetValueInput(node, 1); Node* value = jsgraph()->Dead(); - // Extract receiver maps from the has property IC using the FeedbackNexus. if (!p.feedback().IsValid()) return NoChange(); - FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot()); - - // Extract the keyed access load mode from the keyed load IC. - KeyedAccessLoadMode load_mode = nexus.GetKeyedAccessLoadMode(); - - // Try to lower the keyed access based on the {nexus}. - return ReduceKeyedAccess(node, key, value, nexus, AccessMode::kHas, load_mode, - STANDARD_STORE); + return ReducePropertyAccess(node, key, base::nullopt, value, + FeedbackSource(p.feedback()), AccessMode::kHas); } Reduction JSNativeContextSpecialization::ReduceJSLoadPropertyWithEnumeratedKey( @@ -1970,6 +1936,7 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadPropertyWithEnumeratedKey( } Reduction JSNativeContextSpecialization::ReduceJSLoadProperty(Node* node) { + DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining); DCHECK_EQ(IrOpcode::kJSLoadProperty, node->opcode()); PropertyAccess const& p = PropertyAccessOf(node->op()); Node* name = NodeProperties::GetValueInput(node, 1); @@ -1979,62 +1946,49 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadProperty(Node* node) { if (reduction.Changed()) return reduction; } - // Extract receiver maps from the keyed load IC using the FeedbackNexus. if (!p.feedback().IsValid()) return NoChange(); - FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot()); - - // Extract the keyed access load mode from the keyed load IC. - KeyedAccessLoadMode load_mode = nexus.GetKeyedAccessLoadMode(); - - // Try to lower the keyed access based on the {nexus}. Node* value = jsgraph()->Dead(); - return ReduceKeyedAccess(node, name, value, nexus, AccessMode::kLoad, - load_mode, STANDARD_STORE); + return ReducePropertyAccess(node, name, base::nullopt, value, + FeedbackSource(p.feedback()), AccessMode::kLoad); } Reduction JSNativeContextSpecialization::ReduceJSStoreProperty(Node* node) { + DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining); DCHECK_EQ(IrOpcode::kJSStoreProperty, node->opcode()); PropertyAccess const& p = PropertyAccessOf(node->op()); Node* const key = NodeProperties::GetValueInput(node, 1); Node* const value = NodeProperties::GetValueInput(node, 2); - // Extract receiver maps from the keyed store IC using the FeedbackNexus. if (!p.feedback().IsValid()) return NoChange(); - FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot()); - - // Extract the keyed access store mode from the keyed store IC. - KeyedAccessStoreMode store_mode = nexus.GetKeyedAccessStoreMode(); - - // Try to lower the keyed access based on the {nexus}. - return ReduceKeyedAccess(node, key, value, nexus, AccessMode::kStore, - STANDARD_LOAD, store_mode); + return ReducePropertyAccess(node, key, base::nullopt, value, + FeedbackSource(p.feedback()), AccessMode::kStore); } Node* JSNativeContextSpecialization::InlinePropertyGetterCall( Node* receiver, Node* context, Node* frame_state, Node** effect, Node** control, ZoneVector* if_exceptions, PropertyAccessInfo const& access_info) { - Node* target = jsgraph()->Constant(access_info.constant()); + ObjectRef constant(broker(), access_info.constant()); + Node* target = jsgraph()->Constant(constant); FrameStateInfo const& frame_info = FrameStateInfoOf(frame_state->op()); // Introduce the call to the getter function. Node* value; - ObjectRef constant(broker(), access_info.constant()); if (constant.IsJSFunction()) { value = *effect = *control = graph()->NewNode( jsgraph()->javascript()->Call(2, CallFrequency(), VectorSlotPair(), ConvertReceiverMode::kNotNullOrUndefined), target, receiver, context, frame_state, *effect, *control); } else { - auto function_template_info = constant.AsFunctionTemplateInfo(); - function_template_info.Serialize(); - Node* holder = - access_info.holder().is_null() - ? receiver - : jsgraph()->Constant(access_info.holder().ToHandleChecked()); + Node* holder = access_info.holder().is_null() + ? receiver + : jsgraph()->Constant(ObjectRef( + broker(), access_info.holder().ToHandleChecked())); SharedFunctionInfoRef shared_info( broker(), frame_info.shared_info().ToHandleChecked()); - value = InlineApiCall(receiver, holder, frame_state, nullptr, effect, - control, shared_info, function_template_info); + + value = + InlineApiCall(receiver, holder, frame_state, nullptr, effect, control, + shared_info, constant.AsFunctionTemplateInfo()); } // Remember to rewire the IfException edge if this is inside a try-block. if (if_exceptions != nullptr) { @@ -2052,26 +2006,24 @@ void JSNativeContextSpecialization::InlinePropertySetterCall( Node* receiver, Node* value, Node* context, Node* frame_state, Node** effect, Node** control, ZoneVector* if_exceptions, PropertyAccessInfo const& access_info) { - Node* target = jsgraph()->Constant(access_info.constant()); + ObjectRef constant(broker(), access_info.constant()); + Node* target = jsgraph()->Constant(constant); FrameStateInfo const& frame_info = FrameStateInfoOf(frame_state->op()); // Introduce the call to the setter function. - ObjectRef constant(broker(), access_info.constant()); if (constant.IsJSFunction()) { *effect = *control = graph()->NewNode( jsgraph()->javascript()->Call(3, CallFrequency(), VectorSlotPair(), ConvertReceiverMode::kNotNullOrUndefined), target, receiver, value, context, frame_state, *effect, *control); } else { - auto function_template_info = constant.AsFunctionTemplateInfo(); - function_template_info.Serialize(); - Node* holder = - access_info.holder().is_null() - ? receiver - : jsgraph()->Constant(access_info.holder().ToHandleChecked()); + Node* holder = access_info.holder().is_null() + ? receiver + : jsgraph()->Constant(ObjectRef( + broker(), access_info.holder().ToHandleChecked())); SharedFunctionInfoRef shared_info( broker(), frame_info.shared_info().ToHandleChecked()); InlineApiCall(receiver, holder, frame_state, value, effect, control, - shared_info, function_template_info); + shared_info, constant.AsFunctionTemplateInfo()); } // Remember to rewire the IfException edge if this is inside a try-block. if (if_exceptions != nullptr) { @@ -2088,8 +2040,16 @@ Node* JSNativeContextSpecialization::InlineApiCall( Node* receiver, Node* holder, Node* frame_state, Node* value, Node** effect, Node** control, SharedFunctionInfoRef const& shared_info, FunctionTemplateInfoRef const& function_template_info) { - auto call_handler_info = - function_template_info.call_code().AsCallHandlerInfo(); + if (!function_template_info.has_call_code()) { + return nullptr; + } + + if (!function_template_info.call_code().has_value()) { + TRACE_BROKER_MISSING(broker(), "call code for function template info " + << function_template_info); + return nullptr; + } + CallHandlerInfoRef call_handler_info = *function_template_info.call_code(); // Only setters have a value. int const argc = value == nullptr ? 0 : 1; @@ -2151,7 +2111,8 @@ JSNativeContextSpecialization::BuildPropertyLoad( value = InlinePropertyGetterCall(receiver, context, frame_state, &effect, &control, if_exceptions, access_info); } else if (access_info.IsModuleExport()) { - Node* cell = jsgraph()->Constant(access_info.export_cell()); + Node* cell = jsgraph()->Constant( + ObjectRef(broker(), access_info.constant()).AsCell()); value = effect = graph()->NewNode(simplified()->LoadField(AccessBuilder::ForCellValue()), cell, effect, control); @@ -2382,7 +2343,6 @@ JSNativeContextSpecialization::BuildPropertyStore( // Check if we need to grow the properties backing store // with this transitioning store. MapRef transition_map_ref(broker(), transition_map); - transition_map_ref.SerializeBackPointer(); MapRef original_map = transition_map_ref.GetBackPointer().AsMap(); if (original_map.UnusedPropertyFields() == 0) { DCHECK(!field_index.is_inobject()); @@ -2404,7 +2364,7 @@ JSNativeContextSpecialization::BuildPropertyStore( common()->BeginRegion(RegionObservability::kObservable), effect); effect = graph()->NewNode( simplified()->StoreField(AccessBuilder::ForMap()), receiver, - jsgraph()->Constant(transition_map), effect, control); + jsgraph()->Constant(transition_map_ref), effect, control); effect = graph()->NewNode(simplified()->StoreField(field_access), storage, value, effect, control); effect = graph()->NewNode(common()->FinishRegion(), @@ -2495,21 +2455,16 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreDataPropertyInLiteral( Reduction JSNativeContextSpecialization::ReduceJSStoreInArrayLiteral( Node* node) { + DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining); DCHECK_EQ(IrOpcode::kJSStoreInArrayLiteral, node->opcode()); FeedbackParameter const& p = FeedbackParameterOf(node->op()); Node* const index = NodeProperties::GetValueInput(node, 1); Node* const value = NodeProperties::GetValueInput(node, 2); - // Extract receiver maps from the keyed store IC using the FeedbackNexus. if (!p.feedback().IsValid()) return NoChange(); - FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot()); - - // Extract the keyed access store mode from the keyed store IC. - KeyedAccessStoreMode store_mode = nexus.GetKeyedAccessStoreMode(); - - return ReduceKeyedAccess(node, index, value, nexus, - AccessMode::kStoreInLiteral, STANDARD_LOAD, - store_mode); + return ReducePropertyAccess(node, index, base::nullopt, value, + FeedbackSource(p.feedback()), + AccessMode::kStoreInLiteral); } Reduction JSNativeContextSpecialization::ReduceJSToObject(Node* node) { @@ -2546,8 +2501,7 @@ ExternalArrayType GetArrayTypeFromElementsKind(ElementsKind kind) { JSNativeContextSpecialization::ValueEffectControl JSNativeContextSpecialization::BuildElementAccess( Node* receiver, Node* index, Node* value, Node* effect, Node* control, - ElementAccessInfo const& access_info, AccessMode access_mode, - KeyedAccessLoadMode load_mode, KeyedAccessStoreMode store_mode) { + ElementAccessInfo const& access_info, KeyedAccessMode const& keyed_mode) { // TODO(bmeurer): We currently specialize based on elements kind. We should // also be able to properly support strings and other JSObjects here. ElementsKind elements_kind = access_info.elements_kind(); @@ -2583,7 +2537,7 @@ JSNativeContextSpecialization::BuildElementAccess( // for Chrome. Node and Electron both set this limit to 0. Setting // the base to Smi zero here allows the EffectControlLinearizer to // optimize away the tricky part of the access later. - if (V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP == 0) { + if (JSTypedArray::kMaxSizeInHeap == 0) { base_pointer = jsgraph()->ZeroConstant(); } else { base_pointer = effect = @@ -2629,8 +2583,10 @@ JSNativeContextSpecialization::BuildElementAccess( buffer_or_receiver = buffer; } - if (load_mode == LOAD_IGNORE_OUT_OF_BOUNDS || - store_mode == STORE_IGNORE_OUT_OF_BOUNDS) { + if ((keyed_mode.IsLoad() && + keyed_mode.load_mode() == LOAD_IGNORE_OUT_OF_BOUNDS) || + (keyed_mode.IsStore() && + keyed_mode.store_mode() == STORE_IGNORE_OUT_OF_BOUNDS)) { // Only check that the {index} is in SignedSmall range. We do the actual // bounds check below and just skip the property access if it's out of // bounds for the {receiver}. @@ -2651,10 +2607,10 @@ JSNativeContextSpecialization::BuildElementAccess( // Access the actual element. ExternalArrayType external_array_type = GetArrayTypeFromElementsKind(elements_kind); - switch (access_mode) { + switch (keyed_mode.access_mode()) { case AccessMode::kLoad: { // Check if we can return undefined for out-of-bounds loads. - if (load_mode == LOAD_IGNORE_OUT_OF_BOUNDS) { + if (keyed_mode.load_mode() == LOAD_IGNORE_OUT_OF_BOUNDS) { Node* check = graph()->NewNode(simplified()->NumberLessThan(), index, length); Node* branch = graph()->NewNode( @@ -2716,7 +2672,7 @@ JSNativeContextSpecialization::BuildElementAccess( } // Check if we can skip the out-of-bounds store. - if (store_mode == STORE_IGNORE_OUT_OF_BOUNDS) { + if (keyed_mode.store_mode() == STORE_IGNORE_OUT_OF_BOUNDS) { Node* check = graph()->NewNode(simplified()->NumberLessThan(), index, length); Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue), @@ -2766,9 +2722,9 @@ JSNativeContextSpecialization::BuildElementAccess( // Don't try to store to a copy-on-write backing store (unless supported by // the store mode). - if (access_mode == AccessMode::kStore && + if (keyed_mode.access_mode() == AccessMode::kStore && IsSmiOrObjectElementsKind(elements_kind) && - !IsCOWHandlingStoreMode(store_mode)) { + !IsCOWHandlingStoreMode(keyed_mode.store_mode())) { effect = graph()->NewNode( simplified()->CheckMaps( CheckMapsFlag::kNone, @@ -2791,11 +2747,10 @@ JSNativeContextSpecialization::BuildElementAccess( elements, effect, control); // Check if we might need to grow the {elements} backing store. - if (IsGrowStoreMode(store_mode)) { + if (keyed_mode.IsStore() && IsGrowStoreMode(keyed_mode.store_mode())) { // For growing stores we validate the {index} below. - DCHECK(access_mode == AccessMode::kStore || - access_mode == AccessMode::kStoreInLiteral); - } else if (load_mode == LOAD_IGNORE_OUT_OF_BOUNDS && + } else if (keyed_mode.IsLoad() && + keyed_mode.load_mode() == LOAD_IGNORE_OUT_OF_BOUNDS && CanTreatHoleAsUndefined(receiver_maps)) { // Check that the {index} is a valid array index, we do the actual // bounds check below and just skip the store below if it's out of @@ -2826,7 +2781,7 @@ JSNativeContextSpecialization::BuildElementAccess( kFullWriteBarrier, LoadSensitivity::kCritical}; // Access the actual element. - if (access_mode == AccessMode::kLoad) { + if (keyed_mode.access_mode() == AccessMode::kLoad) { // Compute the real element access type, which includes the hole in case // of holey backing stores. if (IsHoleyElementsKind(elements_kind)) { @@ -2839,7 +2794,7 @@ JSNativeContextSpecialization::BuildElementAccess( } // Check if we can return undefined for out-of-bounds loads. - if (load_mode == LOAD_IGNORE_OUT_OF_BOUNDS && + if (keyed_mode.load_mode() == LOAD_IGNORE_OUT_OF_BOUNDS && CanTreatHoleAsUndefined(receiver_maps)) { Node* check = graph()->NewNode(simplified()->NumberLessThan(), index, length); @@ -2923,7 +2878,7 @@ JSNativeContextSpecialization::BuildElementAccess( effect, control); } } - } else if (access_mode == AccessMode::kHas) { + } else if (keyed_mode.access_mode() == AccessMode::kHas) { // For packed arrays with NoElementsProctector valid, a bound check // is equivalent to HasProperty. value = effect = graph()->NewNode(simplified()->SpeculativeNumberLessThan( @@ -2996,8 +2951,9 @@ JSNativeContextSpecialization::BuildElementAccess( vtrue, vfalse, control); } } else { - DCHECK(access_mode == AccessMode::kStore || - access_mode == AccessMode::kStoreInLiteral); + DCHECK(keyed_mode.access_mode() == AccessMode::kStore || + keyed_mode.access_mode() == AccessMode::kStoreInLiteral); + if (IsSmiElementsKind(elements_kind)) { value = effect = graph()->NewNode( simplified()->CheckSmi(VectorSlotPair()), value, effect, control); @@ -3011,11 +2967,11 @@ JSNativeContextSpecialization::BuildElementAccess( // Ensure that copy-on-write backing store is writable. if (IsSmiOrObjectElementsKind(elements_kind) && - store_mode == STORE_HANDLE_COW) { + keyed_mode.store_mode() == STORE_HANDLE_COW) { elements = effect = graph()->NewNode(simplified()->EnsureWritableFastElements(), receiver, elements, effect, control); - } else if (IsGrowStoreMode(store_mode)) { + } else if (IsGrowStoreMode(keyed_mode.store_mode())) { // Determine the length of the {elements} backing store. Node* elements_length = effect = graph()->NewNode( simplified()->LoadField(AccessBuilder::ForFixedArrayLength()), @@ -3053,7 +3009,7 @@ JSNativeContextSpecialization::BuildElementAccess( // If we didn't grow {elements}, it might still be COW, in which case we // copy it now. if (IsSmiOrObjectElementsKind(elements_kind) && - store_mode == STORE_AND_GROW_HANDLE_COW) { + keyed_mode.store_mode() == STORE_AND_GROW_HANDLE_COW) { elements = effect = graph()->NewNode(simplified()->EnsureWritableFastElements(), receiver, elements, effect, control); @@ -3295,7 +3251,8 @@ bool JSNativeContextSpecialization::InferReceiverMaps( Node* receiver, Node* effect, MapHandles* receiver_maps) { ZoneHandleSet maps; NodeProperties::InferReceiverMapsResult result = - NodeProperties::InferReceiverMaps(broker(), receiver, effect, &maps); + NodeProperties::InferReceiverMapsUnsafe(broker(), receiver, effect, + &maps); if (result == NodeProperties::kReliableReceiverMaps) { for (size_t i = 0; i < maps.size(); ++i) { receiver_maps->push_back(maps[i]); @@ -3357,8 +3314,6 @@ SimplifiedOperatorBuilder* JSNativeContextSpecialization::simplified() const { return jsgraph()->simplified(); } -#undef V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP - } // namespace compiler } // namespace internal } // namespace v8 diff --git a/deps/v8/src/compiler/js-native-context-specialization.h b/deps/v8/src/compiler/js-native-context-specialization.h index 7de2639966ee49..8510c76bfc3d59 100644 --- a/deps/v8/src/compiler/js-native-context-specialization.h +++ b/deps/v8/src/compiler/js-native-context-specialization.h @@ -7,6 +7,7 @@ #include "src/base/flags.h" #include "src/compiler/graph-reducer.h" +#include "src/compiler/js-heap-broker.h" #include "src/deoptimizer/deoptimize-reason.h" #include "src/objects/map.h" @@ -93,24 +94,15 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final Reduction ReduceJSToObject(Node* node); Reduction ReduceElementAccess(Node* node, Node* index, Node* value, - ElementAccessFeedback const& processed, - AccessMode access_mode, - KeyedAccessLoadMode load_mode, - KeyedAccessStoreMode store_mode); + ElementAccessFeedback const& processed); // In the case of non-keyed (named) accesses, pass the name as {static_name} // and use {nullptr} for {key} (load/store modes are irrelevant). - Reduction ReducePropertyAccessUsingProcessedFeedback( - Node* node, Node* key, base::Optional static_name, Node* value, - FeedbackNexus const& nexus, AccessMode access_mode, - KeyedAccessLoadMode load_mode = STANDARD_LOAD, - KeyedAccessStoreMode store_mode = STANDARD_STORE); - Reduction ReduceKeyedAccess(Node* node, Node* key, Node* value, - FeedbackNexus const& nexus, - AccessMode access_mode, - KeyedAccessLoadMode load_mode, - KeyedAccessStoreMode store_mode); + Reduction ReducePropertyAccess(Node* node, Node* key, + base::Optional static_name, + Node* value, FeedbackSource const& source, + AccessMode access_mode); Reduction ReduceNamedAccessFromNexus(Node* node, Node* value, - FeedbackNexus const& nexus, + FeedbackSource const& source, NameRef const& name, AccessMode access_mode); Reduction ReduceNamedAccess(Node* node, Node* value, @@ -123,12 +115,10 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final NameRef const& name, AccessMode access_mode, Node* key, PropertyCellRef const& property_cell); Reduction ReduceKeyedLoadFromHeapConstant(Node* node, Node* key, - FeedbackNexus const& nexus, AccessMode access_mode, KeyedAccessLoadMode load_mode); Reduction ReduceElementAccessOnString(Node* node, Node* index, Node* value, - AccessMode access_mode, - KeyedAccessLoadMode load_mode); + KeyedAccessMode const& keyed_mode); Reduction ReduceSoftDeoptimize(Node* node, DeoptimizeReason reason); Reduction ReduceJSToString(Node* node); @@ -197,10 +187,11 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final FunctionTemplateInfoRef const& function_template_info); // Construct the appropriate subgraph for element access. - ValueEffectControl BuildElementAccess( - Node* receiver, Node* index, Node* value, Node* effect, Node* control, - ElementAccessInfo const& access_info, AccessMode access_mode, - KeyedAccessLoadMode load_mode, KeyedAccessStoreMode store_mode); + ValueEffectControl BuildElementAccess(Node* receiver, Node* index, + Node* value, Node* effect, + Node* control, + ElementAccessInfo const& access_info, + KeyedAccessMode const& keyed_mode); // Construct appropriate subgraph to load from a String. Node* BuildIndexedStringLoad(Node* receiver, Node* index, Node* length, diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc index a779790b8df31a..e0f97922b2ced0 100644 --- a/deps/v8/src/compiler/js-operator.cc +++ b/deps/v8/src/compiler/js-operator.cc @@ -17,7 +17,7 @@ namespace v8 { namespace internal { namespace compiler { -std::ostream& operator<<(std::ostream& os, CallFrequency f) { +std::ostream& operator<<(std::ostream& os, CallFrequency const& f) { if (f.IsUnknown()) return os << "unknown"; return os << f.value(); } @@ -28,7 +28,6 @@ CallFrequency CallFrequencyOf(Operator const* op) { return OpParameter(op); } - std::ostream& operator<<(std::ostream& os, ConstructForwardVarargsParameters const& p) { return os << p.arity() << ", " << p.start_index(); @@ -843,7 +842,8 @@ const Operator* JSOperatorBuilder::Call(size_t arity, parameters); // parameter } -const Operator* JSOperatorBuilder::CallWithArrayLike(CallFrequency frequency) { +const Operator* JSOperatorBuilder::CallWithArrayLike( + CallFrequency const& frequency) { return new (zone()) Operator1( // -- IrOpcode::kJSCallWithArrayLike, Operator::kNoProperties, // opcode "JSCallWithArrayLike", // name @@ -899,8 +899,10 @@ const Operator* JSOperatorBuilder::ConstructForwardVarargs( parameters); // parameter } +// Note: frequency is taken by reference to work around a GCC bug +// on AIX (v8:8193). const Operator* JSOperatorBuilder::Construct(uint32_t arity, - CallFrequency frequency, + CallFrequency const& frequency, VectorSlotPair const& feedback) { ConstructParameters parameters(arity, frequency, feedback); return new (zone()) Operator1( // -- @@ -911,7 +913,7 @@ const Operator* JSOperatorBuilder::Construct(uint32_t arity, } const Operator* JSOperatorBuilder::ConstructWithArrayLike( - CallFrequency frequency) { + CallFrequency const& frequency) { return new (zone()) Operator1( // -- IrOpcode::kJSConstructWithArrayLike, // opcode Operator::kNoProperties, // properties @@ -921,7 +923,8 @@ const Operator* JSOperatorBuilder::ConstructWithArrayLike( } const Operator* JSOperatorBuilder::ConstructWithSpread( - uint32_t arity, CallFrequency frequency, VectorSlotPair const& feedback) { + uint32_t arity, CallFrequency const& frequency, + VectorSlotPair const& feedback) { ConstructParameters parameters(arity, frequency, feedback); return new (zone()) Operator1( // -- IrOpcode::kJSConstructWithSpread, Operator::kNoProperties, // opcode diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h index 0f315b1cb56dac..e7d9acb152acfe 100644 --- a/deps/v8/src/compiler/js-operator.h +++ b/deps/v8/src/compiler/js-operator.h @@ -48,7 +48,7 @@ class CallFrequency final { } bool operator!=(CallFrequency const& that) const { return !(*this == that); } - friend size_t hash_value(CallFrequency f) { + friend size_t hash_value(CallFrequency const& f) { return bit_cast(f.value_); } @@ -58,7 +58,7 @@ class CallFrequency final { float value_; }; -std::ostream& operator<<(std::ostream&, CallFrequency); +std::ostream& operator<<(std::ostream&, CallFrequency const&); CallFrequency CallFrequencyOf(Operator const* op) V8_WARN_UNUSED_RESULT; @@ -101,7 +101,7 @@ ConstructForwardVarargsParameters const& ConstructForwardVarargsParametersOf( // used as a parameter by JSConstruct and JSConstructWithSpread operators. class ConstructParameters final { public: - ConstructParameters(uint32_t arity, CallFrequency frequency, + ConstructParameters(uint32_t arity, CallFrequency const& frequency, VectorSlotPair const& feedback) : arity_(arity), frequency_(frequency), feedback_(feedback) {} @@ -757,7 +757,7 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final VectorSlotPair const& feedback = VectorSlotPair(), ConvertReceiverMode convert_mode = ConvertReceiverMode::kAny, SpeculationMode speculation_mode = SpeculationMode::kDisallowSpeculation); - const Operator* CallWithArrayLike(CallFrequency frequency); + const Operator* CallWithArrayLike(CallFrequency const& frequency); const Operator* CallWithSpread( uint32_t arity, CallFrequency const& frequency = CallFrequency(), VectorSlotPair const& feedback = VectorSlotPair(), @@ -768,11 +768,11 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final const Operator* ConstructForwardVarargs(size_t arity, uint32_t start_index); const Operator* Construct(uint32_t arity, - CallFrequency frequency = CallFrequency(), + CallFrequency const& frequency = CallFrequency(), VectorSlotPair const& feedback = VectorSlotPair()); - const Operator* ConstructWithArrayLike(CallFrequency frequency); + const Operator* ConstructWithArrayLike(CallFrequency const& frequency); const Operator* ConstructWithSpread( - uint32_t arity, CallFrequency frequency = CallFrequency(), + uint32_t arity, CallFrequency const& frequency = CallFrequency(), VectorSlotPair const& feedback = VectorSlotPair()); const Operator* LoadProperty(VectorSlotPair const& feedback); diff --git a/deps/v8/src/compiler/js-type-hint-lowering.cc b/deps/v8/src/compiler/js-type-hint-lowering.cc index 9d882e823835d2..f3696bcc4887f4 100644 --- a/deps/v8/src/compiler/js-type-hint-lowering.cc +++ b/deps/v8/src/compiler/js-type-hint-lowering.cc @@ -44,6 +44,25 @@ bool BinaryOperationHintToNumberOperationHint( return false; } +bool BinaryOperationHintToBigIntOperationHint( + BinaryOperationHint binop_hint, BigIntOperationHint* bigint_hint) { + switch (binop_hint) { + case BinaryOperationHint::kSignedSmall: + case BinaryOperationHint::kSignedSmallInputs: + case BinaryOperationHint::kSigned32: + case BinaryOperationHint::kNumber: + case BinaryOperationHint::kNumberOrOddball: + case BinaryOperationHint::kAny: + case BinaryOperationHint::kNone: + case BinaryOperationHint::kString: + return false; + case BinaryOperationHint::kBigInt: + *bigint_hint = BigIntOperationHint::kBigInt; + return true; + } + UNREACHABLE(); +} + } // namespace class JSSpeculativeBinopBuilder final { @@ -74,6 +93,11 @@ class JSSpeculativeBinopBuilder final { hint); } + bool GetBinaryBigIntOperationHint(BigIntOperationHint* hint) { + return BinaryOperationHintToBigIntOperationHint(GetBinaryOperationHint(), + hint); + } + bool GetCompareNumberOperationHint(NumberOperationHint* hint) { switch (GetCompareOperationHint()) { case CompareOperationHint::kSignedSmall: @@ -138,6 +162,16 @@ class JSSpeculativeBinopBuilder final { UNREACHABLE(); } + const Operator* SpeculativeBigIntOp(BigIntOperationHint hint) { + switch (op_->opcode()) { + case IrOpcode::kJSAdd: + return simplified()->SpeculativeBigIntAdd(hint); + default: + break; + } + UNREACHABLE(); + } + const Operator* SpeculativeCompareOp(NumberOperationHint hint) { switch (op_->opcode()) { case IrOpcode::kJSEqual: @@ -179,6 +213,16 @@ class JSSpeculativeBinopBuilder final { return nullptr; } + Node* TryBuildBigIntBinop() { + BigIntOperationHint hint; + if (GetBinaryBigIntOperationHint(&hint)) { + const Operator* op = SpeculativeBigIntOp(hint); + Node* node = BuildSpeculativeOperation(op); + return node; + } + return nullptr; + } + Node* TryBuildNumberCompare() { NumberOperationHint hint; if (GetCompareNumberOperationHint(&hint)) { @@ -264,6 +308,15 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceUnaryOperation( operand, jsgraph()->SmiConstant(-1), effect, control, slot); node = b.TryBuildNumberBinop(); + if (!node) { + FeedbackNexus nexus(feedback_vector(), slot); + if (nexus.GetBinaryOperationFeedback() == + BinaryOperationHint::kBigInt) { + const Operator* op = jsgraph()->simplified()->SpeculativeBigIntNegate( + BigIntOperationHint::kBigInt); + node = jsgraph()->graph()->NewNode(op, operand, effect, control); + } + } break; } default: @@ -345,6 +398,11 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceBinaryOperation( if (Node* node = b.TryBuildNumberBinop()) { return LoweringResult::SideEffectFree(node, node, control); } + if (op->opcode() == IrOpcode::kJSAdd) { + if (Node* node = b.TryBuildBigIntBinop()) { + return LoweringResult::SideEffectFree(node, node, control); + } + } break; } case IrOpcode::kJSExponentiate: { diff --git a/deps/v8/src/compiler/js-type-hint-lowering.h b/deps/v8/src/compiler/js-type-hint-lowering.h index 7164a0b708a23d..a74c0193558734 100644 --- a/deps/v8/src/compiler/js-type-hint-lowering.h +++ b/deps/v8/src/compiler/js-type-hint-lowering.h @@ -153,7 +153,8 @@ class JSTypeHintLowering { private: friend class JSSpeculativeBinopBuilder; - Node* TryBuildSoftDeopt(FeedbackNexus& nexus, Node* effect, Node* control, + Node* TryBuildSoftDeopt(FeedbackNexus& nexus, // NOLINT(runtime/references) + Node* effect, Node* control, DeoptimizeReason reson) const; JSGraph* jsgraph() const { return jsgraph_; } diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc index ba50b7579234c3..3190fc993056c1 100644 --- a/deps/v8/src/compiler/js-typed-lowering.cc +++ b/deps/v8/src/compiler/js-typed-lowering.cc @@ -10,6 +10,7 @@ #include "src/compiler/access-builder.h" #include "src/compiler/allocation-builder.h" #include "src/compiler/js-graph.h" +#include "src/compiler/js-heap-broker.h" #include "src/compiler/linkage.h" #include "src/compiler/node-matchers.h" #include "src/compiler/node-properties.h" @@ -1364,20 +1365,21 @@ Node* JSTypedLowering::BuildGetModuleCell(Node* node) { Type module_type = NodeProperties::GetType(module); if (module_type.IsHeapConstant()) { - ModuleRef module_constant = module_type.AsHeapConstant()->Ref().AsModule(); + SourceTextModuleRef module_constant = + module_type.AsHeapConstant()->Ref().AsSourceTextModule(); CellRef cell_constant = module_constant.GetCell(cell_index); return jsgraph()->Constant(cell_constant); } FieldAccess field_access; int index; - if (ModuleDescriptor::GetCellIndexKind(cell_index) == - ModuleDescriptor::kExport) { + if (SourceTextModuleDescriptor::GetCellIndexKind(cell_index) == + SourceTextModuleDescriptor::kExport) { field_access = AccessBuilder::ForModuleRegularExports(); index = cell_index - 1; } else { - DCHECK_EQ(ModuleDescriptor::GetCellIndexKind(cell_index), - ModuleDescriptor::kImport); + DCHECK_EQ(SourceTextModuleDescriptor::GetCellIndexKind(cell_index), + SourceTextModuleDescriptor::kImport); field_access = AccessBuilder::ForModuleRegularImports(); index = -cell_index - 1; } @@ -1408,9 +1410,9 @@ Reduction JSTypedLowering::ReduceJSStoreModule(Node* node) { Node* effect = NodeProperties::GetEffectInput(node); Node* control = NodeProperties::GetControlInput(node); Node* value = NodeProperties::GetValueInput(node, 1); - DCHECK_EQ( - ModuleDescriptor::GetCellIndexKind(OpParameter(node->op())), - ModuleDescriptor::kExport); + DCHECK_EQ(SourceTextModuleDescriptor::GetCellIndexKind( + OpParameter(node->op())), + SourceTextModuleDescriptor::kExport); Node* cell = BuildGetModuleCell(node); if (cell->op()->EffectOutputCount() > 0) effect = cell; diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc index 8bb47b43e9f26c..1d88a27a5f758e 100644 --- a/deps/v8/src/compiler/linkage.cc +++ b/deps/v8/src/compiler/linkage.cc @@ -137,13 +137,19 @@ bool CallDescriptor::CanTailCall(const Node* node) const { return HasSameReturnLocationsAs(CallDescriptorOf(node->op())); } -int CallDescriptor::CalculateFixedFrameSize() const { +// TODO(jkummerow, sigurds): Arguably frame size calculation should be +// keyed on code/frame type, not on CallDescriptor kind. Think about a +// good way to organize this logic. +int CallDescriptor::CalculateFixedFrameSize(Code::Kind code_kind) const { switch (kind_) { case kCallJSFunction: return PushArgumentCount() ? OptimizedBuiltinFrameConstants::kFixedSlotCount : StandardFrameConstants::kFixedSlotCount; case kCallAddress: + if (code_kind == Code::C_WASM_ENTRY) { + return CWasmEntryFrameConstants::kFixedSlotCount; + } return CommonFrameConstants::kFixedSlotCountAboveFp + CommonFrameConstants::kCPSlotCount; case kCallCodeObject: diff --git a/deps/v8/src/compiler/linkage.h b/deps/v8/src/compiler/linkage.h index e4fa6f9f207b46..05eb0e7d11732f 100644 --- a/deps/v8/src/compiler/linkage.h +++ b/deps/v8/src/compiler/linkage.h @@ -325,7 +325,7 @@ class V8_EXPORT_PRIVATE CallDescriptor final bool CanTailCall(const Node* call) const; - int CalculateFixedFrameSize() const; + int CalculateFixedFrameSize(Code::Kind code_kind) const; RegList AllocatableRegisters() const { return allocatable_registers_; } diff --git a/deps/v8/src/compiler/load-elimination.cc b/deps/v8/src/compiler/load-elimination.cc index c42bfd839a4ac5..f9998723f387da 100644 --- a/deps/v8/src/compiler/load-elimination.cc +++ b/deps/v8/src/compiler/load-elimination.cc @@ -419,14 +419,15 @@ bool LoadElimination::AbstractState::Equals(AbstractState const* that) const { } void LoadElimination::AbstractState::FieldsMerge( - AbstractFields& this_fields, AbstractFields const& that_fields, + AbstractFields* this_fields, AbstractFields const& that_fields, Zone* zone) { - for (size_t i = 0; i < this_fields.size(); ++i) { - if (this_fields[i]) { + for (size_t i = 0; i < this_fields->size(); ++i) { + AbstractField const*& this_field = (*this_fields)[i]; + if (this_field) { if (that_fields[i]) { - this_fields[i] = this_fields[i]->Merge(that_fields[i], zone); + this_field = this_field->Merge(that_fields[i], zone); } else { - this_fields[i] = nullptr; + this_field = nullptr; } } } @@ -442,8 +443,8 @@ void LoadElimination::AbstractState::Merge(AbstractState const* that, } // Merge the information we have about the fields. - FieldsMerge(this->fields_, that->fields_, zone); - FieldsMerge(this->const_fields_, that->const_fields_, zone); + FieldsMerge(&this->fields_, that->fields_, zone); + FieldsMerge(&this->const_fields_, that->const_fields_, zone); // Merge the information we have about the maps. if (this->maps_) { @@ -923,20 +924,23 @@ Reduction LoadElimination::ReduceStoreField(Node* node, FieldInfo const* lookup_result = state->LookupField(object, field_index, constness); - if (lookup_result && constness == PropertyConstness::kMutable) { + if (lookup_result && (constness == PropertyConstness::kMutable || + V8_ENABLE_DOUBLE_CONST_STORE_CHECK_BOOL)) { // At runtime, we should never encounter // - any store replacing existing info with a different, incompatible // representation, nor // - two consecutive const stores. // However, we may see such code statically, so we guard against // executing it by emitting Unreachable. - // TODO(gsps): Re-enable the double const store check once we have - // identified other FieldAccesses that should be marked mutable - // instead of const (cf. JSCreateLowering::AllocateFastLiteral). + // TODO(gsps): Re-enable the double const store check even for + // non-debug builds once we have identified other FieldAccesses + // that should be marked mutable instead of const + // (cf. JSCreateLowering::AllocateFastLiteral). bool incompatible_representation = !lookup_result->name.is_null() && !IsCompatible(representation, lookup_result->representation); - if (incompatible_representation) { + if (incompatible_representation || + constness == PropertyConstness::kConst) { Node* control = NodeProperties::GetControlInput(node); Node* unreachable = graph()->NewNode(common()->Unreachable(), effect, control); diff --git a/deps/v8/src/compiler/load-elimination.h b/deps/v8/src/compiler/load-elimination.h index 7658d013652979..4ad1fa64a201ef 100644 --- a/deps/v8/src/compiler/load-elimination.h +++ b/deps/v8/src/compiler/load-elimination.h @@ -233,7 +233,7 @@ class V8_EXPORT_PRIVATE LoadElimination final bool FieldsEquals(AbstractFields const& this_fields, AbstractFields const& that_fields) const; - void FieldsMerge(AbstractFields& this_fields, + void FieldsMerge(AbstractFields* this_fields, AbstractFields const& that_fields, Zone* zone); AbstractElements const* elements_ = nullptr; diff --git a/deps/v8/src/compiler/loop-analysis.cc b/deps/v8/src/compiler/loop-analysis.cc index d6b88b13f5a73b..41d50549b3154b 100644 --- a/deps/v8/src/compiler/loop-analysis.cc +++ b/deps/v8/src/compiler/loop-analysis.cc @@ -4,6 +4,7 @@ #include "src/compiler/loop-analysis.h" +#include "src/codegen/tick-counter.h" #include "src/compiler/graph.h" #include "src/compiler/node-marker.h" #include "src/compiler/node-properties.h" @@ -12,6 +13,9 @@ namespace v8 { namespace internal { + +class TickCounter; + namespace compiler { #define OFFSET(x) ((x)&0x1F) @@ -51,7 +55,8 @@ struct TempLoopInfo { // marks on edges into/out-of the loop header nodes. class LoopFinderImpl { public: - LoopFinderImpl(Graph* graph, LoopTree* loop_tree, Zone* zone) + LoopFinderImpl(Graph* graph, LoopTree* loop_tree, TickCounter* tick_counter, + Zone* zone) : zone_(zone), end_(graph->end()), queue_(zone), @@ -63,7 +68,8 @@ class LoopFinderImpl { loops_found_(0), width_(0), backward_(nullptr), - forward_(nullptr) {} + forward_(nullptr), + tick_counter_(tick_counter) {} void Run() { PropagateBackward(); @@ -116,6 +122,7 @@ class LoopFinderImpl { int width_; uint32_t* backward_; uint32_t* forward_; + TickCounter* const tick_counter_; int num_nodes() { return static_cast(loop_tree_->node_to_loop_num_.size()); @@ -183,6 +190,7 @@ class LoopFinderImpl { Queue(end_); while (!queue_.empty()) { + tick_counter_->DoTick(); Node* node = queue_.front(); info(node); queue_.pop_front(); @@ -301,6 +309,7 @@ class LoopFinderImpl { } // Propagate forward on paths that were backward reachable from backedges. while (!queue_.empty()) { + tick_counter_->DoTick(); Node* node = queue_.front(); queue_.pop_front(); queued_.Set(node, false); @@ -512,11 +521,11 @@ class LoopFinderImpl { } }; - -LoopTree* LoopFinder::BuildLoopTree(Graph* graph, Zone* zone) { +LoopTree* LoopFinder::BuildLoopTree(Graph* graph, TickCounter* tick_counter, + Zone* zone) { LoopTree* loop_tree = new (graph->zone()) LoopTree(graph->NodeCount(), graph->zone()); - LoopFinderImpl finder(graph, loop_tree, zone); + LoopFinderImpl finder(graph, loop_tree, tick_counter, zone); finder.Run(); if (FLAG_trace_turbo_loop) { finder.Print(); @@ -524,7 +533,6 @@ LoopTree* LoopFinder::BuildLoopTree(Graph* graph, Zone* zone) { return loop_tree; } - Node* LoopTree::HeaderNode(Loop* loop) { Node* first = *HeaderNodes(loop).begin(); if (first->opcode() == IrOpcode::kLoop) return first; diff --git a/deps/v8/src/compiler/loop-analysis.h b/deps/v8/src/compiler/loop-analysis.h index 620a9554e08edd..043833a54ca099 100644 --- a/deps/v8/src/compiler/loop-analysis.h +++ b/deps/v8/src/compiler/loop-analysis.h @@ -13,6 +13,9 @@ namespace v8 { namespace internal { + +class TickCounter; + namespace compiler { // TODO(titzer): don't assume entry edges have a particular index. @@ -156,7 +159,8 @@ class LoopTree : public ZoneObject { class V8_EXPORT_PRIVATE LoopFinder { public: // Build a loop tree for the entire graph. - static LoopTree* BuildLoopTree(Graph* graph, Zone* temp_zone); + static LoopTree* BuildLoopTree(Graph* graph, TickCounter* tick_counter, + Zone* temp_zone); }; diff --git a/deps/v8/src/compiler/machine-graph-verifier.cc b/deps/v8/src/compiler/machine-graph-verifier.cc index f8e78b216953bb..80205f80b64685 100644 --- a/deps/v8/src/compiler/machine-graph-verifier.cc +++ b/deps/v8/src/compiler/machine-graph-verifier.cc @@ -240,6 +240,7 @@ class MachineRepresentationInferrer { MachineType::PointerRepresentation(); break; case IrOpcode::kBitcastTaggedToWord: + case IrOpcode::kBitcastTaggedSignedToWord: representation_vector_[node->id()] = MachineType::PointerRepresentation(); break; @@ -428,6 +429,7 @@ class MachineRepresentationChecker { MachineRepresentation::kWord64); break; case IrOpcode::kBitcastTaggedToWord: + case IrOpcode::kBitcastTaggedSignedToWord: case IrOpcode::kTaggedPoisonOnSpeculation: CheckValueInputIsTagged(node, 0); break; @@ -556,7 +558,7 @@ class MachineRepresentationChecker { case IrOpcode::kParameter: case IrOpcode::kProjection: break; - case IrOpcode::kDebugAbort: + case IrOpcode::kAbortCSAAssert: CheckValueInputIsTagged(node, 0); break; case IrOpcode::kLoad: @@ -700,6 +702,7 @@ class MachineRepresentationChecker { case IrOpcode::kThrow: case IrOpcode::kTypedStateValues: case IrOpcode::kFrameState: + case IrOpcode::kStaticAssert: break; default: if (node->op()->ValueInputCount() != 0) { @@ -748,6 +751,11 @@ class MachineRepresentationChecker { case MachineRepresentation::kCompressedPointer: case MachineRepresentation::kCompressedSigned: return; + case MachineRepresentation::kNone: + if (input->opcode() == IrOpcode::kCompressedHeapConstant) { + return; + } + break; default: break; } @@ -851,6 +859,9 @@ class MachineRepresentationChecker { case MachineRepresentation::kCompressedPointer: return; case MachineRepresentation::kNone: { + if (input->opcode() == IrOpcode::kCompressedHeapConstant) { + return; + } std::ostringstream str; str << "TypeError: node #" << input->id() << ":" << *input->op() << " is untyped."; diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc index a6a8e87cf462ca..f720c2908461ba 100644 --- a/deps/v8/src/compiler/machine-operator-reducer.cc +++ b/deps/v8/src/compiler/machine-operator-reducer.cc @@ -710,7 +710,8 @@ Reduction MachineOperatorReducer::Reduce(Node* node) { return ReduceFloat64Compare(node); case IrOpcode::kFloat64RoundDown: return ReduceFloat64RoundDown(node); - case IrOpcode::kBitcastTaggedToWord: { + case IrOpcode::kBitcastTaggedToWord: + case IrOpcode::kBitcastTaggedSignedToWord: { NodeMatcher m(node->InputAt(0)); if (m.IsBitcastWordToTaggedSigned()) { RelaxEffectsAndControls(node); diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc index d2ddedc8fa1708..f447861aca758b 100644 --- a/deps/v8/src/compiler/machine-operator.cc +++ b/deps/v8/src/compiler/machine-operator.cc @@ -140,6 +140,7 @@ MachineType AtomicOpType(Operator const* op) { V(Word64Clz, Operator::kNoProperties, 1, 0, 1) \ V(Word32ReverseBytes, Operator::kNoProperties, 1, 0, 1) \ V(Word64ReverseBytes, Operator::kNoProperties, 1, 0, 1) \ + V(BitcastTaggedSignedToWord, Operator::kNoProperties, 1, 0, 1) \ V(BitcastWordToTaggedSigned, Operator::kNoProperties, 1, 0, 1) \ V(TruncateFloat64ToWord32, Operator::kNoProperties, 1, 0, 1) \ V(ChangeFloat32ToFloat64, Operator::kNoProperties, 1, 0, 1) \ @@ -244,6 +245,13 @@ MachineType AtomicOpType(Operator const* op) { V(Word32PairShl, Operator::kNoProperties, 3, 0, 2) \ V(Word32PairShr, Operator::kNoProperties, 3, 0, 2) \ V(Word32PairSar, Operator::kNoProperties, 3, 0, 2) \ + V(F64x2Splat, Operator::kNoProperties, 1, 0, 1) \ + V(F64x2Abs, Operator::kNoProperties, 1, 0, 1) \ + V(F64x2Neg, Operator::kNoProperties, 1, 0, 1) \ + V(F64x2Eq, Operator::kCommutative, 2, 0, 1) \ + V(F64x2Ne, Operator::kCommutative, 2, 0, 1) \ + V(F64x2Lt, Operator::kNoProperties, 2, 0, 1) \ + V(F64x2Le, Operator::kNoProperties, 2, 0, 1) \ V(F32x4Splat, Operator::kNoProperties, 1, 0, 1) \ V(F32x4SConvertI32x4, Operator::kNoProperties, 1, 0, 1) \ V(F32x4UConvertI32x4, Operator::kNoProperties, 1, 0, 1) \ @@ -261,6 +269,17 @@ MachineType AtomicOpType(Operator const* op) { V(F32x4Ne, Operator::kCommutative, 2, 0, 1) \ V(F32x4Lt, Operator::kNoProperties, 2, 0, 1) \ V(F32x4Le, Operator::kNoProperties, 2, 0, 1) \ + V(I64x2Splat, Operator::kNoProperties, 1, 0, 1) \ + V(I64x2Neg, Operator::kNoProperties, 1, 0, 1) \ + V(I64x2Add, Operator::kCommutative, 2, 0, 1) \ + V(I64x2Sub, Operator::kNoProperties, 2, 0, 1) \ + V(I64x2Mul, Operator::kCommutative, 2, 0, 1) \ + V(I64x2Eq, Operator::kCommutative, 2, 0, 1) \ + V(I64x2Ne, Operator::kCommutative, 2, 0, 1) \ + V(I64x2GtS, Operator::kNoProperties, 2, 0, 1) \ + V(I64x2GeS, Operator::kNoProperties, 2, 0, 1) \ + V(I64x2GtU, Operator::kNoProperties, 2, 0, 1) \ + V(I64x2GeU, Operator::kNoProperties, 2, 0, 1) \ V(I32x4Splat, Operator::kNoProperties, 1, 0, 1) \ V(I32x4SConvertF32x4, Operator::kNoProperties, 1, 0, 1) \ V(I32x4SConvertI16x8Low, Operator::kNoProperties, 1, 0, 1) \ @@ -338,6 +357,8 @@ MachineType AtomicOpType(Operator const* op) { V(S128Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \ V(S128Not, Operator::kNoProperties, 1, 0, 1) \ V(S128Select, Operator::kNoProperties, 3, 0, 1) \ + V(S1x2AnyTrue, Operator::kNoProperties, 1, 0, 1) \ + V(S1x2AllTrue, Operator::kNoProperties, 1, 0, 1) \ V(S1x4AnyTrue, Operator::kNoProperties, 1, 0, 1) \ V(S1x4AllTrue, Operator::kNoProperties, 1, 0, 1) \ V(S1x8AnyTrue, Operator::kNoProperties, 1, 0, 1) \ @@ -439,12 +460,15 @@ MachineType AtomicOpType(Operator const* op) { V(Exchange) #define SIMD_LANE_OP_LIST(V) \ + V(F64x2, 2) \ V(F32x4, 4) \ + V(I64x2, 2) \ V(I32x4, 4) \ V(I16x8, 8) \ V(I8x16, 16) #define SIMD_FORMAT_LIST(V) \ + V(64x2, 64) \ V(32x4, 32) \ V(16x8, 16) \ V(8x16, 8) @@ -754,6 +778,14 @@ struct MachineOperatorGlobalCache { }; Word32AtomicPairCompareExchangeOperator kWord32AtomicPairCompareExchange; + struct MemoryBarrierOperator : public Operator { + MemoryBarrierOperator() + : Operator(IrOpcode::kMemoryBarrier, + Operator::kNoDeopt | Operator::kNoThrow, "MemoryBarrier", 0, + 1, 1, 0, 1, 0) {} + }; + MemoryBarrierOperator kMemoryBarrier; + // The {BitcastWordToTagged} operator must not be marked as pure (especially // not idempotent), because otherwise the splitting logic in the Scheduler // might decide to split these operators, thus potentially creating live @@ -807,12 +839,12 @@ struct MachineOperatorGlobalCache { }; Word64PoisonOnSpeculation kWord64PoisonOnSpeculation; - struct DebugAbortOperator : public Operator { - DebugAbortOperator() - : Operator(IrOpcode::kDebugAbort, Operator::kNoThrow, "DebugAbort", 1, - 1, 1, 0, 1, 0) {} + struct AbortCSAAssertOperator : public Operator { + AbortCSAAssertOperator() + : Operator(IrOpcode::kAbortCSAAssert, Operator::kNoThrow, + "AbortCSAAssert", 1, 1, 1, 0, 1, 0) {} }; - DebugAbortOperator kDebugAbort; + AbortCSAAssertOperator kAbortCSAAssert; struct DebugBreakOperator : public Operator { DebugBreakOperator() @@ -1005,8 +1037,8 @@ const Operator* MachineOperatorBuilder::BitcastMaybeObjectToWord() { return &cache_.kBitcastMaybeObjectToWord; } -const Operator* MachineOperatorBuilder::DebugAbort() { - return &cache_.kDebugAbort; +const Operator* MachineOperatorBuilder::AbortCSAAssert() { + return &cache_.kAbortCSAAssert; } const Operator* MachineOperatorBuilder::DebugBreak() { @@ -1017,6 +1049,10 @@ const Operator* MachineOperatorBuilder::Comment(const char* msg) { return new (zone_) CommentOperator(msg); } +const Operator* MachineOperatorBuilder::MemBarrier() { + return &cache_.kMemoryBarrier; +} + const Operator* MachineOperatorBuilder::Word32AtomicLoad( LoadRepresentation rep) { #define LOAD(Type) \ @@ -1300,6 +1336,11 @@ const Operator* MachineOperatorBuilder::S8x16Shuffle( 2, 0, 0, 1, 0, 0, array); } +const uint8_t* S8x16ShuffleOf(Operator const* op) { + DCHECK_EQ(IrOpcode::kS8x16Shuffle, op->opcode()); + return OpParameter(op); +} + #undef PURE_BINARY_OP_LIST_32 #undef PURE_BINARY_OP_LIST_64 #undef MACHINE_PURE_OP_LIST diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h index 8b1250dd30553a..0f8130120693f9 100644 --- a/deps/v8/src/compiler/machine-operator.h +++ b/deps/v8/src/compiler/machine-operator.h @@ -112,6 +112,9 @@ MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) MachineType AtomicOpType(Operator const* op) V8_WARN_UNUSED_RESULT; +V8_EXPORT_PRIVATE const uint8_t* S8x16ShuffleOf(Operator const* op) + V8_WARN_UNUSED_RESULT; + // Interface for building machine-level operators. These operators are // machine-level but machine-independent and thus define a language suitable // for generating code to run on architectures such as ia32, x64, arm, etc. @@ -216,7 +219,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final AlignmentRequirements::FullUnalignedAccessSupport()); const Operator* Comment(const char* msg); - const Operator* DebugAbort(); + const Operator* AbortCSAAssert(); const Operator* DebugBreak(); const Operator* UnsafePointerAdd(); @@ -295,9 +298,12 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final const Operator* Uint64LessThanOrEqual(); const Operator* Uint64Mod(); - // This operator reinterprets the bits of a tagged pointer as word. + // This operator reinterprets the bits of a tagged pointer as a word. const Operator* BitcastTaggedToWord(); + // This operator reinterprets the bits of a Smi as a word. + const Operator* BitcastTaggedSignedToWord(); + // This operator reinterprets the bits of a tagged MaybeObject pointer as // word. const Operator* BitcastMaybeObjectToWord(); @@ -462,6 +468,16 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final const Operator* Float64SilenceNaN(); // SIMD operators. + const Operator* F64x2Splat(); + const Operator* F64x2Abs(); + const Operator* F64x2Neg(); + const Operator* F64x2ExtractLane(int32_t); + const Operator* F64x2ReplaceLane(int32_t); + const Operator* F64x2Eq(); + const Operator* F64x2Ne(); + const Operator* F64x2Lt(); + const Operator* F64x2Le(); + const Operator* F32x4Splat(); const Operator* F32x4ExtractLane(int32_t); const Operator* F32x4ReplaceLane(int32_t); @@ -483,6 +499,23 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final const Operator* F32x4Lt(); const Operator* F32x4Le(); + const Operator* I64x2Splat(); + const Operator* I64x2ExtractLane(int32_t); + const Operator* I64x2ReplaceLane(int32_t); + const Operator* I64x2Neg(); + const Operator* I64x2Shl(int32_t); + const Operator* I64x2ShrS(int32_t); + const Operator* I64x2Add(); + const Operator* I64x2Sub(); + const Operator* I64x2Mul(); + const Operator* I64x2Eq(); + const Operator* I64x2Ne(); + const Operator* I64x2GtS(); + const Operator* I64x2GeS(); + const Operator* I64x2ShrU(int32_t); + const Operator* I64x2GtU(); + const Operator* I64x2GeU(); + const Operator* I32x4Splat(); const Operator* I32x4ExtractLane(int32_t); const Operator* I32x4ReplaceLane(int32_t); @@ -585,6 +618,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final const Operator* S8x16Shuffle(const uint8_t shuffle[16]); + const Operator* S1x2AnyTrue(); + const Operator* S1x2AllTrue(); const Operator* S1x4AnyTrue(); const Operator* S1x4AllTrue(); const Operator* S1x8AnyTrue(); @@ -620,6 +655,9 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final const Operator* LoadFramePointer(); const Operator* LoadParentFramePointer(); + // Memory barrier. + const Operator* MemBarrier(); + // atomic-load [base + index] const Operator* Word32AtomicLoad(LoadRepresentation rep); // atomic-load [base + index] diff --git a/deps/v8/src/compiler/map-inference.cc b/deps/v8/src/compiler/map-inference.cc index f43ba0d15536f8..07ac95b4f7a539 100644 --- a/deps/v8/src/compiler/map-inference.cc +++ b/deps/v8/src/compiler/map-inference.cc @@ -19,7 +19,7 @@ MapInference::MapInference(JSHeapBroker* broker, Node* object, Node* effect) : broker_(broker), object_(object) { ZoneHandleSet maps; auto result = - NodeProperties::InferReceiverMaps(broker_, object_, effect, &maps); + NodeProperties::InferReceiverMapsUnsafe(broker_, object_, effect, &maps); maps_.insert(maps_.end(), maps.begin(), maps.end()); maps_state_ = (result == NodeProperties::kUnreliableReceiverMaps) ? kUnreliableDontNeedGuard @@ -65,21 +65,25 @@ bool MapInference::AllOfInstanceTypes(std::function f) { bool MapInference::AllOfInstanceTypesUnsafe( std::function f) const { - // TODO(neis): Brokerize the MapInference. - AllowHandleDereference allow_handle_deref; CHECK(HaveMaps()); - return std::all_of(maps_.begin(), maps_.end(), - [f](Handle map) { return f(map->instance_type()); }); + auto instance_type = [this, f](Handle map) { + MapRef map_ref(broker_, map); + return f(map_ref.instance_type()); + }; + return std::all_of(maps_.begin(), maps_.end(), instance_type); } bool MapInference::AnyOfInstanceTypesUnsafe( std::function f) const { - AllowHandleDereference allow_handle_deref; CHECK(HaveMaps()); - return std::any_of(maps_.begin(), maps_.end(), - [f](Handle map) { return f(map->instance_type()); }); + auto instance_type = [this, f](Handle map) { + MapRef map_ref(broker_, map); + return f(map_ref.instance_type()); + }; + + return std::any_of(maps_.begin(), maps_.end(), instance_type); } MapHandles const& MapInference::GetMaps() { @@ -122,7 +126,10 @@ bool MapInference::RelyOnMapsHelper(CompilationDependencies* dependencies, const VectorSlotPair& feedback) { if (Safe()) return true; - auto is_stable = [](Handle map) { return map->is_stable(); }; + auto is_stable = [this](Handle map) { + MapRef map_ref(broker_, map); + return map_ref.is_stable(); + }; if (dependencies != nullptr && std::all_of(maps_.cbegin(), maps_.cend(), is_stable)) { for (Handle map : maps_) { diff --git a/deps/v8/src/compiler/memory-optimizer.cc b/deps/v8/src/compiler/memory-optimizer.cc index 29cbb4d26c2e6d..368c060c1d90e8 100644 --- a/deps/v8/src/compiler/memory-optimizer.cc +++ b/deps/v8/src/compiler/memory-optimizer.cc @@ -5,6 +5,7 @@ #include "src/compiler/memory-optimizer.h" #include "src/codegen/interface-descriptors.h" +#include "src/codegen/tick-counter.h" #include "src/compiler/js-graph.h" #include "src/compiler/linkage.h" #include "src/compiler/node-matchers.h" @@ -20,7 +21,8 @@ namespace compiler { MemoryOptimizer::MemoryOptimizer(JSGraph* jsgraph, Zone* zone, PoisoningMitigationLevel poisoning_level, AllocationFolding allocation_folding, - const char* function_debug_name) + const char* function_debug_name, + TickCounter* tick_counter) : jsgraph_(jsgraph), empty_state_(AllocationState::Empty(zone)), pending_(zone), @@ -29,7 +31,8 @@ MemoryOptimizer::MemoryOptimizer(JSGraph* jsgraph, Zone* zone, graph_assembler_(jsgraph, nullptr, nullptr, zone), poisoning_level_(poisoning_level), allocation_folding_(allocation_folding), - function_debug_name_(function_debug_name) {} + function_debug_name_(function_debug_name), + tick_counter_(tick_counter) {} void MemoryOptimizer::Optimize() { EnqueueUses(graph()->start(), empty_state()); @@ -99,7 +102,7 @@ bool CanAllocate(const Node* node) { case IrOpcode::kBitcastTaggedToWord: case IrOpcode::kBitcastWordToTagged: case IrOpcode::kComment: - case IrOpcode::kDebugAbort: + case IrOpcode::kAbortCSAAssert: case IrOpcode::kDebugBreak: case IrOpcode::kDeoptimizeIf: case IrOpcode::kDeoptimizeUnless: @@ -108,6 +111,7 @@ bool CanAllocate(const Node* node) { case IrOpcode::kLoad: case IrOpcode::kLoadElement: case IrOpcode::kLoadField: + case IrOpcode::kLoadFromObject: case IrOpcode::kPoisonedLoad: case IrOpcode::kProtectedLoad: case IrOpcode::kProtectedStore: @@ -118,6 +122,7 @@ bool CanAllocate(const Node* node) { case IrOpcode::kStore: case IrOpcode::kStoreElement: case IrOpcode::kStoreField: + case IrOpcode::kStoreToObject: case IrOpcode::kTaggedPoisonOnSpeculation: case IrOpcode::kUnalignedLoad: case IrOpcode::kUnalignedStore: @@ -214,6 +219,7 @@ Node* EffectPhiForPhi(Node* phi) { } // namespace void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) { + tick_counter_->DoTick(); DCHECK(!node->IsDead()); DCHECK_LT(0, node->op()->EffectInputCount()); switch (node->opcode()) { @@ -296,6 +302,21 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node, } } + Node* allocate_builtin; + if (allocation_type == AllocationType::kYoung) { + if (allocation.allow_large_objects() == AllowLargeObjects::kTrue) { + allocate_builtin = __ AllocateInYoungGenerationStubConstant(); + } else { + allocate_builtin = __ AllocateRegularInYoungGenerationStubConstant(); + } + } else { + if (allocation.allow_large_objects() == AllowLargeObjects::kTrue) { + allocate_builtin = __ AllocateInOldGenerationStubConstant(); + } else { + allocate_builtin = __ AllocateRegularInOldGenerationStubConstant(); + } + } + // Determine the top/limit addresses. Node* top_address = __ ExternalConstant( allocation_type == AllocationType::kYoung @@ -371,11 +392,6 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node, __ Bind(&call_runtime); { - Node* target = allocation_type == AllocationType::kYoung - ? __ - AllocateInYoungGenerationStubConstant() - : __ - AllocateInOldGenerationStubConstant(); if (!allocate_operator_.is_set()) { auto descriptor = AllocateDescriptor{}; auto call_descriptor = Linkage::GetStubCallDescriptor( @@ -384,7 +400,7 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node, allocate_operator_.set(common()->Call(call_descriptor)); } Node* vfalse = __ BitcastTaggedToWord( - __ Call(allocate_operator_.get(), target, size)); + __ Call(allocate_operator_.get(), allocate_builtin, size)); vfalse = __ IntSub(vfalse, __ IntPtrConstant(kHeapObjectTag)); __ Goto(&done, vfalse); } @@ -434,11 +450,6 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node, __ IntAdd(top, __ IntPtrConstant(kHeapObjectTag)))); __ Bind(&call_runtime); - Node* target = allocation_type == AllocationType::kYoung - ? __ - AllocateInYoungGenerationStubConstant() - : __ - AllocateInOldGenerationStubConstant(); if (!allocate_operator_.is_set()) { auto descriptor = AllocateDescriptor{}; auto call_descriptor = Linkage::GetStubCallDescriptor( @@ -446,7 +457,7 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node, CallDescriptor::kCanUseRoots, Operator::kNoThrow); allocate_operator_.set(common()->Call(call_descriptor)); } - __ Goto(&done, __ Call(allocate_operator_.get(), target, size)); + __ Goto(&done, __ Call(allocate_operator_.get(), allocate_builtin, size)); __ Bind(&done); value = done.PhiAt(0); @@ -483,8 +494,6 @@ void MemoryOptimizer::VisitLoadFromObject(Node* node, AllocationState const* state) { DCHECK_EQ(IrOpcode::kLoadFromObject, node->opcode()); ObjectAccess const& access = ObjectAccessOf(node->op()); - Node* offset = node->InputAt(1); - node->ReplaceInput(1, __ IntSub(offset, __ IntPtrConstant(kHeapObjectTag))); NodeProperties::ChangeOp(node, machine()->Load(access.machine_type)); EnqueueUses(node, state); } @@ -494,9 +503,7 @@ void MemoryOptimizer::VisitStoreToObject(Node* node, DCHECK_EQ(IrOpcode::kStoreToObject, node->opcode()); ObjectAccess const& access = ObjectAccessOf(node->op()); Node* object = node->InputAt(0); - Node* offset = node->InputAt(1); Node* value = node->InputAt(2); - node->ReplaceInput(1, __ IntSub(offset, __ IntPtrConstant(kHeapObjectTag))); WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind( node, object, value, state, access.write_barrier_kind); NodeProperties::ChangeOp( diff --git a/deps/v8/src/compiler/memory-optimizer.h b/deps/v8/src/compiler/memory-optimizer.h index cbefcb67de4fb7..71f33fa3d7d7df 100644 --- a/deps/v8/src/compiler/memory-optimizer.h +++ b/deps/v8/src/compiler/memory-optimizer.h @@ -10,6 +10,9 @@ namespace v8 { namespace internal { + +class TickCounter; + namespace compiler { // Forward declarations. @@ -36,7 +39,7 @@ class MemoryOptimizer final { MemoryOptimizer(JSGraph* jsgraph, Zone* zone, PoisoningMitigationLevel poisoning_level, AllocationFolding allocation_folding, - const char* function_debug_name); + const char* function_debug_name, TickCounter* tick_counter); ~MemoryOptimizer() = default; void Optimize(); @@ -158,6 +161,7 @@ class MemoryOptimizer final { PoisoningMitigationLevel poisoning_level_; AllocationFolding allocation_folding_; const char* function_debug_name_; + TickCounter* const tick_counter_; DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryOptimizer); }; diff --git a/deps/v8/src/compiler/node-properties.cc b/deps/v8/src/compiler/node-properties.cc index d6528c553a149c..1e00ec00f48a29 100644 --- a/deps/v8/src/compiler/node-properties.cc +++ b/deps/v8/src/compiler/node-properties.cc @@ -5,6 +5,7 @@ #include "src/compiler/node-properties.h" #include "src/compiler/common-operator.h" #include "src/compiler/graph.h" +#include "src/compiler/js-heap-broker.h" #include "src/compiler/js-operator.h" #include "src/compiler/linkage.h" #include "src/compiler/map-inference.h" @@ -392,7 +393,7 @@ base::Optional NodeProperties::GetJSCreateMap(JSHeapBroker* broker, } // static -NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMaps( +NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMapsUnsafe( JSHeapBroker* broker, Node* receiver, Node* effect, ZoneHandleSet* maps_return) { HeapObjectMatcher m(receiver); diff --git a/deps/v8/src/compiler/node-properties.h b/deps/v8/src/compiler/node-properties.h index 4a23b6781d9b8c..a660fe70220670 100644 --- a/deps/v8/src/compiler/node-properties.h +++ b/deps/v8/src/compiler/node-properties.h @@ -151,7 +151,8 @@ class V8_EXPORT_PRIVATE NodeProperties final { kReliableReceiverMaps, // Receiver maps can be trusted. kUnreliableReceiverMaps // Receiver maps might have changed (side-effect). }; - static InferReceiverMapsResult InferReceiverMaps( + // DO NOT USE InferReceiverMapsUnsafe IN NEW CODE. Use MapInference instead. + static InferReceiverMapsResult InferReceiverMapsUnsafe( JSHeapBroker* broker, Node* receiver, Node* effect, ZoneHandleSet* maps_return); diff --git a/deps/v8/src/compiler/node.cc b/deps/v8/src/compiler/node.cc index 50cfdf62486bf4..7688379e9f317d 100644 --- a/deps/v8/src/compiler/node.cc +++ b/deps/v8/src/compiler/node.cc @@ -303,7 +303,13 @@ void Node::Print() const { void Node::Print(std::ostream& os) const { os << *this << std::endl; for (Node* input : this->inputs()) { - os << " " << *input << std::endl; + os << " "; + if (input) { + os << *input; + } else { + os << "(NULL)"; + } + os << std::endl; } } diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h index 9ac8ec581f31b3..d621e23e3a3c42 100644 --- a/deps/v8/src/compiler/opcodes.h +++ b/deps/v8/src/compiler/opcodes.h @@ -45,6 +45,7 @@ V(NumberConstant) \ V(PointerConstant) \ V(HeapConstant) \ + V(CompressedHeapConstant) \ V(RelocatableInt32Constant) \ V(RelocatableInt64Constant) @@ -231,6 +232,7 @@ // Opcodes for VirtuaMachine-level operators. #define SIMPLIFIED_CHANGE_OP_LIST(V) \ + V(ChangeCompressedSignedToInt32) \ V(ChangeTaggedSignedToInt32) \ V(ChangeTaggedSignedToInt64) \ V(ChangeTaggedToInt32) \ @@ -240,6 +242,7 @@ V(ChangeTaggedToTaggedSigned) \ V(ChangeCompressedToTaggedSigned) \ V(ChangeTaggedToCompressedSigned) \ + V(ChangeInt31ToCompressedSigned) \ V(ChangeInt31ToTaggedSigned) \ V(ChangeInt32ToTagged) \ V(ChangeInt64ToTagged) \ @@ -249,6 +252,8 @@ V(ChangeFloat64ToTaggedPointer) \ V(ChangeTaggedToBit) \ V(ChangeBitToTagged) \ + V(ChangeUint64ToBigInt) \ + V(TruncateBigIntToUint64) \ V(TruncateTaggedToWord32) \ V(TruncateTaggedToFloat64) \ V(TruncateTaggedToBit) \ @@ -262,6 +267,7 @@ V(CheckedUint32Div) \ V(CheckedUint32Mod) \ V(CheckedInt32Mul) \ + V(CheckedInt32ToCompressedSigned) \ V(CheckedInt32ToTaggedSigned) \ V(CheckedInt64ToInt32) \ V(CheckedInt64ToTaggedSigned) \ @@ -318,6 +324,8 @@ V(NumberMin) \ V(NumberPow) +#define SIMPLIFIED_BIGINT_BINOP_LIST(V) V(BigIntAdd) + #define SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(V) \ V(SpeculativeNumberAdd) \ V(SpeculativeNumberSubtract) \ @@ -369,6 +377,11 @@ V(NumberToUint8Clamped) \ V(NumberSilenceNaN) +#define SIMPLIFIED_BIGINT_UNOP_LIST(V) \ + V(BigIntAsUintN) \ + V(BigIntNegate) \ + V(CheckBigInt) + #define SIMPLIFIED_SPECULATIVE_NUMBER_UNOP_LIST(V) V(SpeculativeToNumber) #define SIMPLIFIED_OTHER_OP_LIST(V) \ @@ -382,6 +395,7 @@ V(StringCodePointAt) \ V(StringFromSingleCharCode) \ V(StringFromSingleCodePoint) \ + V(StringFromCodePointAt) \ V(StringIndexOf) \ V(StringLength) \ V(StringToLowerCaseIntl) \ @@ -461,16 +475,24 @@ V(FindOrderedHashMapEntryForInt32Key) \ V(PoisonIndex) \ V(RuntimeAbort) \ + V(AssertType) \ V(DateNow) +#define SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(V) V(SpeculativeBigIntAdd) +#define SIMPLIFIED_SPECULATIVE_BIGINT_UNOP_LIST(V) V(SpeculativeBigIntNegate) + #define SIMPLIFIED_OP_LIST(V) \ SIMPLIFIED_CHANGE_OP_LIST(V) \ SIMPLIFIED_CHECKED_OP_LIST(V) \ SIMPLIFIED_COMPARE_BINOP_LIST(V) \ SIMPLIFIED_NUMBER_BINOP_LIST(V) \ + SIMPLIFIED_BIGINT_BINOP_LIST(V) \ SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(V) \ SIMPLIFIED_NUMBER_UNOP_LIST(V) \ + SIMPLIFIED_BIGINT_UNOP_LIST(V) \ SIMPLIFIED_SPECULATIVE_NUMBER_UNOP_LIST(V) \ + SIMPLIFIED_SPECULATIVE_BIGINT_UNOP_LIST(V) \ + SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(V) \ SIMPLIFIED_OTHER_OP_LIST(V) // Opcodes for Machine-level operators. @@ -616,7 +638,7 @@ MACHINE_FLOAT64_BINOP_LIST(V) \ MACHINE_FLOAT64_UNOP_LIST(V) \ MACHINE_WORD64_ATOMIC_OP_LIST(V) \ - V(DebugAbort) \ + V(AbortCSAAssert) \ V(DebugBreak) \ V(Comment) \ V(Load) \ @@ -631,6 +653,7 @@ V(Word64ReverseBytes) \ V(Int64AbsWithOverflow) \ V(BitcastTaggedToWord) \ + V(BitcastTaggedSignedToWord) \ V(BitcastWordToTagged) \ V(BitcastWordToTaggedSigned) \ V(TruncateFloat64ToWord32) \ @@ -692,6 +715,7 @@ V(Word32PairSar) \ V(ProtectedLoad) \ V(ProtectedStore) \ + V(MemoryBarrier) \ V(Word32AtomicLoad) \ V(Word32AtomicStore) \ V(Word32AtomicExchange) \ @@ -718,6 +742,15 @@ V(UnsafePointerAdd) #define MACHINE_SIMD_OP_LIST(V) \ + V(F64x2Splat) \ + V(F64x2ExtractLane) \ + V(F64x2ReplaceLane) \ + V(F64x2Abs) \ + V(F64x2Neg) \ + V(F64x2Eq) \ + V(F64x2Ne) \ + V(F64x2Lt) \ + V(F64x2Le) \ V(F32x4Splat) \ V(F32x4ExtractLane) \ V(F32x4ReplaceLane) \ @@ -739,6 +772,22 @@ V(F32x4Le) \ V(F32x4Gt) \ V(F32x4Ge) \ + V(I64x2Splat) \ + V(I64x2ExtractLane) \ + V(I64x2ReplaceLane) \ + V(I64x2Neg) \ + V(I64x2Shl) \ + V(I64x2ShrS) \ + V(I64x2Add) \ + V(I64x2Sub) \ + V(I64x2Mul) \ + V(I64x2Eq) \ + V(I64x2Ne) \ + V(I64x2GtS) \ + V(I64x2GeS) \ + V(I64x2ShrU) \ + V(I64x2GtU) \ + V(I64x2GeU) \ V(I32x4Splat) \ V(I32x4ExtractLane) \ V(I32x4ReplaceLane) \ @@ -844,6 +893,8 @@ V(S128Xor) \ V(S128Select) \ V(S8x16Shuffle) \ + V(S1x2AnyTrue) \ + V(S1x2AllTrue) \ V(S1x4AnyTrue) \ V(S1x4AllTrue) \ V(S1x8AnyTrue) \ diff --git a/deps/v8/src/compiler/operation-typer.cc b/deps/v8/src/compiler/operation-typer.cc index 475623f76b3781..8cb991ceb73c5c 100644 --- a/deps/v8/src/compiler/operation-typer.cc +++ b/deps/v8/src/compiler/operation-typer.cc @@ -5,6 +5,7 @@ #include "src/compiler/operation-typer.h" #include "src/compiler/common-operator.h" +#include "src/compiler/js-heap-broker.h" #include "src/compiler/type-cache.h" #include "src/compiler/types.h" #include "src/execution/isolate.h" @@ -259,7 +260,8 @@ Type OperationTyper::ConvertReceiver(Type type) { type = Type::Intersect(type, Type::Receiver(), zone()); if (maybe_primitive) { // ConvertReceiver maps null and undefined to the JSGlobalProxy of the - // target function, and all other primitives are wrapped into a JSValue. + // target function, and all other primitives are wrapped into a + // JSPrimitiveWrapper. type = Type::Union(type, Type::OtherObject(), zone()); } return type; @@ -577,6 +579,13 @@ Type OperationTyper::NumberSilenceNaN(Type type) { return type; } +Type OperationTyper::BigIntAsUintN(Type type) { + DCHECK(type.Is(Type::BigInt())); + return Type::BigInt(); +} + +Type OperationTyper::CheckBigInt(Type type) { return Type::BigInt(); } + Type OperationTyper::NumberAdd(Type lhs, Type rhs) { DCHECK(lhs.Is(Type::Number())); DCHECK(rhs.Is(Type::Number())); @@ -1111,6 +1120,26 @@ SPECULATIVE_NUMBER_BINOP(NumberShiftRight) SPECULATIVE_NUMBER_BINOP(NumberShiftRightLogical) #undef SPECULATIVE_NUMBER_BINOP +Type OperationTyper::BigIntAdd(Type lhs, Type rhs) { + if (lhs.IsNone() || rhs.IsNone()) return Type::None(); + return Type::BigInt(); +} + +Type OperationTyper::BigIntNegate(Type type) { + if (type.IsNone()) return type; + return Type::BigInt(); +} + +Type OperationTyper::SpeculativeBigIntAdd(Type lhs, Type rhs) { + if (lhs.IsNone() || rhs.IsNone()) return Type::None(); + return Type::BigInt(); +} + +Type OperationTyper::SpeculativeBigIntNegate(Type type) { + if (type.IsNone()) return type; + return Type::BigInt(); +} + Type OperationTyper::SpeculativeToNumber(Type type) { return ToNumber(Type::Intersect(type, Type::NumberOrOddball(), zone())); } diff --git a/deps/v8/src/compiler/operation-typer.h b/deps/v8/src/compiler/operation-typer.h index a905662ad17bdb..728e297a1b218b 100644 --- a/deps/v8/src/compiler/operation-typer.h +++ b/deps/v8/src/compiler/operation-typer.h @@ -43,14 +43,18 @@ class V8_EXPORT_PRIVATE OperationTyper { // Unary operators. #define DECLARE_METHOD(Name) Type Name(Type type); SIMPLIFIED_NUMBER_UNOP_LIST(DECLARE_METHOD) + SIMPLIFIED_BIGINT_UNOP_LIST(DECLARE_METHOD) SIMPLIFIED_SPECULATIVE_NUMBER_UNOP_LIST(DECLARE_METHOD) + SIMPLIFIED_SPECULATIVE_BIGINT_UNOP_LIST(DECLARE_METHOD) DECLARE_METHOD(ConvertReceiver) #undef DECLARE_METHOD -// Number binary operators. +// Numeric binary operators. #define DECLARE_METHOD(Name) Type Name(Type lhs, Type rhs); SIMPLIFIED_NUMBER_BINOP_LIST(DECLARE_METHOD) + SIMPLIFIED_BIGINT_BINOP_LIST(DECLARE_METHOD) SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_METHOD) + SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(DECLARE_METHOD) #undef DECLARE_METHOD // Comparison operators. diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc index e771cef1230836..eb060b71e1fcd3 100644 --- a/deps/v8/src/compiler/pipeline.cc +++ b/deps/v8/src/compiler/pipeline.cc @@ -16,6 +16,7 @@ #include "src/codegen/compiler.h" #include "src/codegen/optimized-compilation-info.h" #include "src/codegen/register-configuration.h" +#include "src/compiler/add-type-assertions-reducer.h" #include "src/compiler/backend/code-generator.h" #include "src/compiler/backend/frame-elider.h" #include "src/compiler/backend/instruction-selector.h" @@ -34,6 +35,7 @@ #include "src/compiler/compiler-source-position-table.h" #include "src/compiler/constant-folding-reducer.h" #include "src/compiler/control-flow-optimizer.h" +#include "src/compiler/csa-load-elimination.h" #include "src/compiler/dead-code-elimination.h" #include "src/compiler/decompression-elimination.h" #include "src/compiler/effect-control-linearizer.h" @@ -114,7 +116,8 @@ class PipelineData { instruction_zone_(instruction_zone_scope_.zone()), codegen_zone_scope_(zone_stats_, ZONE_NAME), codegen_zone_(codegen_zone_scope_.zone()), - broker_(new JSHeapBroker(isolate_, info_->zone())), + broker_(new JSHeapBroker(isolate_, info_->zone(), + info_->trace_heap_broker_enabled())), register_allocation_zone_scope_(zone_stats_, ZONE_NAME), register_allocation_zone_(register_allocation_zone_scope_.zone()), assembler_options_(AssemblerOptions::Default(isolate)) { @@ -266,7 +269,7 @@ class PipelineData { JSOperatorBuilder* javascript() const { return javascript_; } JSGraph* jsgraph() const { return jsgraph_; } MachineGraph* mcgraph() const { return mcgraph_; } - Handle native_context() const { + Handle native_context() const { return handle(info()->native_context(), isolate()); } Handle global_object() const { @@ -324,7 +327,8 @@ class PipelineData { Typer* CreateTyper() { DCHECK_NULL(typer_); - typer_ = new Typer(broker(), typer_flags_, graph()); + typer_ = + new Typer(broker(), typer_flags_, graph(), &info()->tick_counter()); return typer_; } @@ -397,7 +401,8 @@ class PipelineData { DCHECK_NULL(frame_); int fixed_frame_size = 0; if (call_descriptor != nullptr) { - fixed_frame_size = call_descriptor->CalculateFixedFrameSize(); + fixed_frame_size = + call_descriptor->CalculateFixedFrameSize(info()->code_kind()); } frame_ = new (codegen_zone()) Frame(fixed_frame_size); } @@ -408,7 +413,8 @@ class PipelineData { DCHECK_NULL(register_allocation_data_); register_allocation_data_ = new (register_allocation_zone()) RegisterAllocationData(config, register_allocation_zone(), frame(), - sequence(), flags, debug_name()); + sequence(), flags, &info()->tick_counter(), + debug_name()); } void InitializeOsrHelper() { @@ -1040,6 +1046,119 @@ void PipelineCompilationJob::RegisterWeakObjectsInOptimizedCode( code->set_can_have_weak_objects(true); } +class WasmHeapStubCompilationJob final : public OptimizedCompilationJob { + public: + WasmHeapStubCompilationJob(Isolate* isolate, CallDescriptor* call_descriptor, + std::unique_ptr zone, Graph* graph, + Code::Kind kind, + std::unique_ptr debug_name, + const AssemblerOptions& options, + SourcePositionTable* source_positions) + // Note that the OptimizedCompilationInfo is not initialized at the time + // we pass it to the CompilationJob constructor, but it is not + // dereferenced there. + : OptimizedCompilationJob(isolate->stack_guard()->real_climit(), &info_, + "TurboFan"), + debug_name_(std::move(debug_name)), + info_(CStrVector(debug_name_.get()), graph->zone(), kind), + call_descriptor_(call_descriptor), + zone_stats_(isolate->allocator()), + zone_(std::move(zone)), + graph_(graph), + data_(&zone_stats_, &info_, isolate, graph_, nullptr, source_positions, + new (zone_.get()) NodeOriginTable(graph_), nullptr, options), + pipeline_(&data_) {} + + ~WasmHeapStubCompilationJob() = default; + + protected: + Status PrepareJobImpl(Isolate* isolate) final; + Status ExecuteJobImpl() final; + Status FinalizeJobImpl(Isolate* isolate) final; + + private: + std::unique_ptr debug_name_; + OptimizedCompilationInfo info_; + CallDescriptor* call_descriptor_; + ZoneStats zone_stats_; + std::unique_ptr zone_; + Graph* graph_; + PipelineData data_; + PipelineImpl pipeline_; + + DISALLOW_COPY_AND_ASSIGN(WasmHeapStubCompilationJob); +}; + +// static +std::unique_ptr +Pipeline::NewWasmHeapStubCompilationJob(Isolate* isolate, + CallDescriptor* call_descriptor, + std::unique_ptr zone, + Graph* graph, Code::Kind kind, + std::unique_ptr debug_name, + const AssemblerOptions& options, + SourcePositionTable* source_positions) { + return base::make_unique( + isolate, call_descriptor, std::move(zone), graph, kind, + std::move(debug_name), options, source_positions); +} + +CompilationJob::Status WasmHeapStubCompilationJob::PrepareJobImpl( + Isolate* isolate) { + std::unique_ptr pipeline_statistics; + if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) { + pipeline_statistics.reset(new PipelineStatistics( + &info_, isolate->GetTurboStatistics(), &zone_stats_)); + pipeline_statistics->BeginPhaseKind("V8.WasmStubCodegen"); + } + if (info_.trace_turbo_json_enabled() || info_.trace_turbo_graph_enabled()) { + CodeTracer::Scope tracing_scope(data_.GetCodeTracer()); + OFStream os(tracing_scope.file()); + os << "---------------------------------------------------\n" + << "Begin compiling method " << info_.GetDebugName().get() + << " using TurboFan" << std::endl; + } + if (info_.trace_turbo_graph_enabled()) { // Simple textual RPO. + StdoutStream{} << "-- wasm stub " << Code::Kind2String(info_.code_kind()) + << " graph -- " << std::endl + << AsRPO(*data_.graph()); + } + + if (info_.trace_turbo_json_enabled()) { + TurboJsonFile json_of(&info_, std::ios_base::trunc); + json_of << "{\"function\":\"" << info_.GetDebugName().get() + << "\", \"source\":\"\",\n\"phases\":["; + } + pipeline_.RunPrintAndVerify("V8.WasmMachineCode", true); + return CompilationJob::SUCCEEDED; +} + +CompilationJob::Status WasmHeapStubCompilationJob::ExecuteJobImpl() { + pipeline_.ComputeScheduledGraph(); + if (pipeline_.SelectInstructionsAndAssemble(call_descriptor_)) { + return CompilationJob::SUCCEEDED; + } + return CompilationJob::FAILED; +} + +CompilationJob::Status WasmHeapStubCompilationJob::FinalizeJobImpl( + Isolate* isolate) { + Handle code; + if (pipeline_.FinalizeCode(call_descriptor_).ToHandle(&code) && + pipeline_.CommitDependencies(code)) { + info_.SetCode(code); +#ifdef ENABLE_DISASSEMBLER + if (FLAG_print_opt_code) { + CodeTracer::Scope tracing_scope(isolate->GetCodeTracer()); + OFStream os(tracing_scope.file()); + code->Disassemble(compilation_info()->GetDebugName().get(), os); + } +#endif + return SUCCEEDED; + } + return FAILED; +} + template void PipelineImpl::Run(Args&&... args) { PipelineRunScope scope(this->data_, Phase::phase_name()); @@ -1065,7 +1184,7 @@ struct GraphBuilderPhase { handle(data->info()->closure()->feedback_vector(), data->isolate()), data->info()->osr_offset(), data->jsgraph(), frequency, data->source_positions(), data->native_context(), - SourcePosition::kNotInlined, flags); + SourcePosition::kNotInlined, flags, &data->info()->tick_counter()); } }; @@ -1102,7 +1221,7 @@ struct InliningPhase { void Run(PipelineData* data, Zone* temp_zone) { Isolate* isolate = data->isolate(); OptimizedCompilationInfo* info = data->info(); - GraphReducer graph_reducer(temp_zone, data->graph(), + GraphReducer graph_reducer(temp_zone, data->graph(), &info->tick_counter(), data->jsgraph()->Dead()); DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(), data->common(), temp_zone); @@ -1196,6 +1315,7 @@ struct UntyperPhase { } GraphReducer graph_reducer(temp_zone, data->graph(), + &data->info()->tick_counter(), data->jsgraph()->Dead()); RemoveTypeReducer remove_type_reducer; AddReducer(data, &graph_reducer, &remove_type_reducer); @@ -1216,6 +1336,7 @@ struct CopyMetadataForConcurrentCompilePhase { void Run(PipelineData* data, Zone* temp_zone) { GraphReducer graph_reducer(temp_zone, data->graph(), + &data->info()->tick_counter(), data->jsgraph()->Dead()); JSHeapCopyReducer heap_copy_reducer(data->broker()); AddReducer(data, &graph_reducer, &heap_copy_reducer); @@ -1242,13 +1363,13 @@ struct SerializationPhase { if (data->info()->is_source_positions_enabled()) { flags |= SerializerForBackgroundCompilationFlag::kCollectSourcePositions; } - if (data->info()->is_osr()) { - flags |= SerializerForBackgroundCompilationFlag::kOsr; + if (data->info()->is_analyze_environment_liveness()) { + flags |= + SerializerForBackgroundCompilationFlag::kAnalyzeEnvironmentLiveness; } - SerializerForBackgroundCompilation serializer( - data->broker(), data->dependencies(), temp_zone, - data->info()->closure(), flags); - serializer.Run(); + RunSerializerForBackgroundCompilation(data->broker(), data->dependencies(), + temp_zone, data->info()->closure(), + flags, data->info()->osr_offset()); } }; @@ -1257,6 +1378,7 @@ struct TypedLoweringPhase { void Run(PipelineData* data, Zone* temp_zone) { GraphReducer graph_reducer(temp_zone, data->graph(), + &data->info()->tick_counter(), data->jsgraph()->Dead()); DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(), data->common(), temp_zone); @@ -1292,9 +1414,12 @@ struct EscapeAnalysisPhase { static const char* phase_name() { return "V8.TFEscapeAnalysis"; } void Run(PipelineData* data, Zone* temp_zone) { - EscapeAnalysis escape_analysis(data->jsgraph(), temp_zone); + EscapeAnalysis escape_analysis(data->jsgraph(), + &data->info()->tick_counter(), temp_zone); escape_analysis.ReduceGraph(); - GraphReducer reducer(temp_zone, data->graph(), data->jsgraph()->Dead()); + GraphReducer reducer(temp_zone, data->graph(), + &data->info()->tick_counter(), + data->jsgraph()->Dead()); EscapeAnalysisReducer escape_reducer(&reducer, data->jsgraph(), escape_analysis.analysis_result(), temp_zone); @@ -1305,13 +1430,28 @@ struct EscapeAnalysisPhase { } }; +struct TypeAssertionsPhase { + static const char* phase_name() { return "V8.TFTypeAssertions"; } + + void Run(PipelineData* data, Zone* temp_zone) { + GraphReducer graph_reducer(temp_zone, data->graph(), + &data->info()->tick_counter(), + data->jsgraph()->Dead()); + AddTypeAssertionsReducer type_assertions(&graph_reducer, data->jsgraph(), + temp_zone); + AddReducer(data, &graph_reducer, &type_assertions); + graph_reducer.ReduceGraph(); + } +}; + struct SimplifiedLoweringPhase { static const char* phase_name() { return "V8.TFSimplifiedLowering"; } void Run(PipelineData* data, Zone* temp_zone) { SimplifiedLowering lowering(data->jsgraph(), data->broker(), temp_zone, data->source_positions(), data->node_origins(), - data->info()->GetPoisoningMitigationLevel()); + data->info()->GetPoisoningMitigationLevel(), + &data->info()->tick_counter()); lowering.LowerAllNodes(); } }; @@ -1325,8 +1465,8 @@ struct LoopPeelingPhase { data->jsgraph()->GetCachedNodes(&roots); trimmer.TrimGraph(roots.begin(), roots.end()); - LoopTree* loop_tree = - LoopFinder::BuildLoopTree(data->jsgraph()->graph(), temp_zone); + LoopTree* loop_tree = LoopFinder::BuildLoopTree( + data->jsgraph()->graph(), &data->info()->tick_counter(), temp_zone); LoopPeeler(data->graph(), data->common(), loop_tree, temp_zone, data->source_positions(), data->node_origins()) .PeelInnerLoopsOfTree(); @@ -1346,6 +1486,7 @@ struct GenericLoweringPhase { void Run(PipelineData* data, Zone* temp_zone) { GraphReducer graph_reducer(temp_zone, data->graph(), + &data->info()->tick_counter(), data->jsgraph()->Dead()); JSGenericLowering generic_lowering(data->jsgraph(), &graph_reducer); AddReducer(data, &graph_reducer, &generic_lowering); @@ -1358,6 +1499,7 @@ struct EarlyOptimizationPhase { void Run(PipelineData* data, Zone* temp_zone) { GraphReducer graph_reducer(temp_zone, data->graph(), + &data->info()->tick_counter(), data->jsgraph()->Dead()); DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(), data->common(), temp_zone); @@ -1384,7 +1526,8 @@ struct ControlFlowOptimizationPhase { void Run(PipelineData* data, Zone* temp_zone) { ControlFlowOptimizer optimizer(data->graph(), data->common(), - data->machine(), temp_zone); + data->machine(), + &data->info()->tick_counter(), temp_zone); optimizer.Optimize(); } }; @@ -1406,8 +1549,9 @@ struct EffectControlLinearizationPhase { // fix the effect and control flow for nodes with low-level side // effects (such as changing representation to tagged or // 'floating' allocation regions.) - Schedule* schedule = Scheduler::ComputeSchedule(temp_zone, data->graph(), - Scheduler::kTempSchedule); + Schedule* schedule = Scheduler::ComputeSchedule( + temp_zone, data->graph(), Scheduler::kTempSchedule, + &data->info()->tick_counter()); if (FLAG_turbo_verify) ScheduleVerifier::Run(schedule); TraceSchedule(data->info(), data, schedule, "effect linearization schedule"); @@ -1433,6 +1577,7 @@ struct EffectControlLinearizationPhase { // doing a common operator reducer and dead code elimination just before // it, to eliminate conditional deopts with a constant condition. GraphReducer graph_reducer(temp_zone, data->graph(), + &data->info()->tick_counter(), data->jsgraph()->Dead()); DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(), data->common(), temp_zone); @@ -1455,7 +1600,8 @@ struct StoreStoreEliminationPhase { data->jsgraph()->GetCachedNodes(&roots); trimmer.TrimGraph(roots.begin(), roots.end()); - StoreStoreElimination::Run(data->jsgraph(), temp_zone); + StoreStoreElimination::Run(data->jsgraph(), &data->info()->tick_counter(), + temp_zone); } }; @@ -1464,6 +1610,7 @@ struct LoadEliminationPhase { void Run(PipelineData* data, Zone* temp_zone) { GraphReducer graph_reducer(temp_zone, data->graph(), + &data->info()->tick_counter(), data->jsgraph()->Dead()); BranchElimination branch_condition_elimination(&graph_reducer, data->jsgraph(), temp_zone); @@ -1513,7 +1660,7 @@ struct MemoryOptimizationPhase { data->info()->is_allocation_folding_enabled() ? MemoryOptimizer::AllocationFolding::kDoAllocationFolding : MemoryOptimizer::AllocationFolding::kDontAllocationFolding, - data->debug_name()); + data->debug_name(), &data->info()->tick_counter()); optimizer.Optimize(); } }; @@ -1523,6 +1670,7 @@ struct LateOptimizationPhase { void Run(PipelineData* data, Zone* temp_zone) { GraphReducer graph_reducer(temp_zone, data->graph(), + &data->info()->tick_counter(), data->jsgraph()->Dead()); BranchElimination branch_condition_elimination(&graph_reducer, data->jsgraph(), temp_zone); @@ -1555,6 +1703,7 @@ struct MachineOperatorOptimizationPhase { void Run(PipelineData* data, Zone* temp_zone) { GraphReducer graph_reducer(temp_zone, data->graph(), + &data->info()->tick_counter(), data->jsgraph()->Dead()); ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone()); MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph()); @@ -1565,11 +1714,38 @@ struct MachineOperatorOptimizationPhase { } }; +struct CsaEarlyOptimizationPhase { + static const char* phase_name() { return "V8.CSAEarlyOptimization"; } + + void Run(PipelineData* data, Zone* temp_zone) { + GraphReducer graph_reducer(temp_zone, data->graph(), + &data->info()->tick_counter(), + data->jsgraph()->Dead()); + BranchElimination branch_condition_elimination(&graph_reducer, + data->jsgraph(), temp_zone); + DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(), + data->common(), temp_zone); + CommonOperatorReducer common_reducer(&graph_reducer, data->graph(), + data->broker(), data->common(), + data->machine(), temp_zone); + ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone()); + CsaLoadElimination load_elimination(&graph_reducer, data->jsgraph(), + temp_zone); + AddReducer(data, &graph_reducer, &branch_condition_elimination); + AddReducer(data, &graph_reducer, &dead_code_elimination); + AddReducer(data, &graph_reducer, &common_reducer); + AddReducer(data, &graph_reducer, &value_numbering); + AddReducer(data, &graph_reducer, &load_elimination); + graph_reducer.ReduceGraph(); + } +}; + struct CsaOptimizationPhase { static const char* phase_name() { return "V8.CSAOptimization"; } void Run(PipelineData* data, Zone* temp_zone) { GraphReducer graph_reducer(temp_zone, data->graph(), + &data->info()->tick_counter(), data->jsgraph()->Dead()); BranchElimination branch_condition_elimination(&graph_reducer, data->jsgraph(), temp_zone); @@ -1621,9 +1797,10 @@ struct ComputeSchedulePhase { void Run(PipelineData* data, Zone* temp_zone) { Schedule* schedule = Scheduler::ComputeSchedule( - temp_zone, data->graph(), data->info()->is_splitting_enabled() - ? Scheduler::kSplitNodes - : Scheduler::kNoFlags); + temp_zone, data->graph(), + data->info()->is_splitting_enabled() ? Scheduler::kSplitNodes + : Scheduler::kNoFlags, + &data->info()->tick_counter()); if (FLAG_turbo_verify) ScheduleVerifier::Run(schedule); data->set_schedule(schedule); } @@ -1671,6 +1848,7 @@ struct InstructionSelectionPhase { data->info()->switch_jump_table_enabled() ? InstructionSelector::kEnableSwitchJumpTable : InstructionSelector::kDisableSwitchJumpTable, + &data->info()->tick_counter(), data->info()->is_source_positions_enabled() ? InstructionSelector::kAllSourcePositions : InstructionSelector::kCallSourcePositions, @@ -1920,7 +2098,8 @@ struct PrintGraphPhase { Schedule* schedule = data->schedule(); if (schedule == nullptr) { schedule = Scheduler::ComputeSchedule(temp_zone, data->graph(), - Scheduler::kNoFlags); + Scheduler::kNoFlags, + &info->tick_counter()); } AllowHandleDereference allow_deref; @@ -2089,6 +2268,11 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) { RunPrintAndVerify(EscapeAnalysisPhase::phase_name()); } + if (FLAG_assert_types) { + Run(); + RunPrintAndVerify(TypeAssertionsPhase::phase_name()); + } + // Perform simplified lowering. This has to run w/o the Typer decorator, // because we cannot compute meaningful types anyways, and the computed types // might even conflict with the representation/truncation logic. @@ -2201,6 +2385,9 @@ MaybeHandle Pipeline::GenerateCodeForCodeStub( pipeline.Run("V8.TFMachineCode"); } + pipeline.Run(); + pipeline.RunPrintAndVerify(CsaEarlyOptimizationPhase::phase_name(), true); + // Optimize memory access and allocation operations. pipeline.Run(); pipeline.RunPrintAndVerify(MemoryOptimizationPhase::phase_name(), true); @@ -2330,58 +2517,6 @@ wasm::WasmCompilationResult Pipeline::GenerateCodeForWasmNativeStub( return result; } -// static -MaybeHandle Pipeline::GenerateCodeForWasmHeapStub( - Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph, - Code::Kind kind, const char* debug_name, const AssemblerOptions& options, - SourcePositionTable* source_positions) { - OptimizedCompilationInfo info(CStrVector(debug_name), graph->zone(), kind); - // Construct a pipeline for scheduling and code generation. - ZoneStats zone_stats(isolate->allocator()); - NodeOriginTable* node_positions = new (graph->zone()) NodeOriginTable(graph); - PipelineData data(&zone_stats, &info, isolate, graph, nullptr, - source_positions, node_positions, nullptr, options); - std::unique_ptr pipeline_statistics; - if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) { - pipeline_statistics.reset(new PipelineStatistics( - &info, isolate->GetTurboStatistics(), &zone_stats)); - pipeline_statistics->BeginPhaseKind("V8.WasmStubCodegen"); - } - - PipelineImpl pipeline(&data); - - if (info.trace_turbo_json_enabled() || - info.trace_turbo_graph_enabled()) { - CodeTracer::Scope tracing_scope(data.GetCodeTracer()); - OFStream os(tracing_scope.file()); - os << "---------------------------------------------------\n" - << "Begin compiling method " << info.GetDebugName().get() - << " using TurboFan" << std::endl; - } - - if (info.trace_turbo_graph_enabled()) { // Simple textual RPO. - StdoutStream{} << "-- wasm stub " << Code::Kind2String(kind) << " graph -- " - << std::endl - << AsRPO(*graph); - } - - if (info.trace_turbo_json_enabled()) { - TurboJsonFile json_of(&info, std::ios_base::trunc); - json_of << "{\"function\":\"" << info.GetDebugName().get() - << "\", \"source\":\"\",\n\"phases\":["; - } - - pipeline.RunPrintAndVerify("V8.WasmMachineCode", true); - pipeline.ComputeScheduledGraph(); - - Handle code; - if (pipeline.GenerateCode(call_descriptor).ToHandle(&code) && - pipeline.CommitDependencies(code)) { - return code; - } - return MaybeHandle(); -} - // static MaybeHandle Pipeline::GenerateCodeForTesting( OptimizedCompilationInfo* info, Isolate* isolate, @@ -2449,11 +2584,11 @@ MaybeHandle Pipeline::GenerateCodeForTesting( } // static -OptimizedCompilationJob* Pipeline::NewCompilationJob( +std::unique_ptr Pipeline::NewCompilationJob( Isolate* isolate, Handle function, bool has_script) { Handle shared = handle(function->shared(), function->GetIsolate()); - return new PipelineCompilationJob(isolate, shared, function); + return base::make_unique(isolate, shared, function); } // static @@ -2490,13 +2625,14 @@ void Pipeline::GenerateCodeForWasmFunction( pipeline.RunPrintAndVerify("V8.WasmMachineCode", true); data.BeginPhaseKind("V8.WasmOptimization"); - const bool is_asm_js = module->origin == wasm::kAsmJsOrigin; + const bool is_asm_js = is_asmjs_module(module); if (FLAG_turbo_splitting && !is_asm_js) { data.info()->MarkAsSplittingEnabled(); } if (FLAG_wasm_opt || is_asm_js) { PipelineRunScope scope(&data, "V8.WasmFullOptimization"); GraphReducer graph_reducer(scope.zone(), data.graph(), + &data.info()->tick_counter(), data.mcgraph()->Dead()); DeadCodeElimination dead_code_elimination(&graph_reducer, data.graph(), data.common(), scope.zone()); @@ -2515,6 +2651,7 @@ void Pipeline::GenerateCodeForWasmFunction( } else { PipelineRunScope scope(&data, "V8.WasmBaseOptimization"); GraphReducer graph_reducer(scope.zone(), data.graph(), + &data.info()->tick_counter(), data.mcgraph()->Dead()); ValueNumberingReducer value_numbering(scope.zone(), data.graph()->zone()); AddReducer(&data, &graph_reducer, &value_numbering); @@ -2870,8 +3007,9 @@ bool PipelineImpl::SelectInstructionsAndAssemble( } MaybeHandle PipelineImpl::GenerateCode(CallDescriptor* call_descriptor) { - if (!SelectInstructionsAndAssemble(call_descriptor)) + if (!SelectInstructionsAndAssemble(call_descriptor)) { return MaybeHandle(); + } return FinalizeCode(); } @@ -2928,6 +3066,9 @@ void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config, if (data->info()->is_turbo_preprocess_ranges()) { flags |= RegisterAllocationFlag::kTurboPreprocessRanges; } + if (data->info()->trace_turbo_allocation_enabled()) { + flags |= RegisterAllocationFlag::kTraceAllocation; + } data->InitializeRegisterAllocationData(config, call_descriptor, flags); if (info()->is_osr()) data->osr_helper()->SetupFrame(data->frame()); diff --git a/deps/v8/src/compiler/pipeline.h b/deps/v8/src/compiler/pipeline.h index 7f9a242d9872f4..6898faaad0d432 100644 --- a/deps/v8/src/compiler/pipeline.h +++ b/deps/v8/src/compiler/pipeline.h @@ -41,9 +41,8 @@ class SourcePositionTable; class Pipeline : public AllStatic { public: // Returns a new compilation job for the given JavaScript function. - static OptimizedCompilationJob* NewCompilationJob(Isolate* isolate, - Handle function, - bool has_script); + static std::unique_ptr NewCompilationJob( + Isolate* isolate, Handle function, bool has_script); // Run the pipeline for the WebAssembly compilation info. static void GenerateCodeForWasmFunction( @@ -60,11 +59,11 @@ class Pipeline : public AllStatic { const char* debug_name, const AssemblerOptions& assembler_options, SourcePositionTable* source_positions = nullptr); - // Run the pipeline on a machine graph and generate code. - static MaybeHandle GenerateCodeForWasmHeapStub( - Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph, - Code::Kind kind, const char* debug_name, - const AssemblerOptions& assembler_options, + // Returns a new compilation job for a wasm heap stub. + static std::unique_ptr NewWasmHeapStubCompilationJob( + Isolate* isolate, CallDescriptor* call_descriptor, + std::unique_ptr zone, Graph* graph, Code::Kind kind, + std::unique_ptr debug_name, const AssemblerOptions& options, SourcePositionTable* source_positions = nullptr); // Run the pipeline on a machine graph and generate code. diff --git a/deps/v8/src/compiler/property-access-builder.cc b/deps/v8/src/compiler/property-access-builder.cc index dafd481797a671..99a06ef874a289 100644 --- a/deps/v8/src/compiler/property-access-builder.cc +++ b/deps/v8/src/compiler/property-access-builder.cc @@ -127,7 +127,7 @@ Node* PropertyAccessBuilder::ResolveHolder( PropertyAccessInfo const& access_info, Node* receiver) { Handle holder; if (access_info.holder().ToHandle(&holder)) { - return jsgraph()->Constant(holder); + return jsgraph()->Constant(ObjectRef(broker(), holder)); } return receiver; } @@ -151,7 +151,16 @@ MachineRepresentation PropertyAccessBuilder::ConvertRepresentation( Node* PropertyAccessBuilder::TryBuildLoadConstantDataField( NameRef const& name, PropertyAccessInfo const& access_info, Node* receiver) { + // TODO(neis): Eliminate FastPropertyAt call below by doing the lookup during + // acccess info computation. Requires extra care in the case where the + // receiver is the holder. + AllowCodeDependencyChange dependency_change_; + AllowHandleAllocation handle_allocation_; + AllowHandleDereference handle_dereference_; + AllowHeapAllocation heap_allocation_; + if (!access_info.IsDataConstant()) return nullptr; + // First, determine if we have a constant holder to load from. Handle holder; // If {access_info} has a holder, just use it. @@ -165,7 +174,7 @@ Node* PropertyAccessBuilder::TryBuildLoadConstantDataField( MapRef receiver_map = m.Ref(broker()).map(); if (std::find_if(access_info.receiver_maps().begin(), access_info.receiver_maps().end(), [&](Handle map) { - return map.address() == receiver_map.object().address(); + return map.equals(receiver_map.object()); }) == access_info.receiver_maps().end()) { // The map of the receiver is not in the feedback, let us bail out. return nullptr; diff --git a/deps/v8/src/compiler/raw-machine-assembler.cc b/deps/v8/src/compiler/raw-machine-assembler.cc index dc1edc710d1f2f..277c89c932e92f 100644 --- a/deps/v8/src/compiler/raw-machine-assembler.cc +++ b/deps/v8/src/compiler/raw-machine-assembler.cc @@ -556,8 +556,8 @@ void RawMachineAssembler::PopAndReturn(Node* pop, Node* v1, Node* v2, Node* v3, current_block_ = nullptr; } -void RawMachineAssembler::DebugAbort(Node* message) { - AddNode(machine()->DebugAbort(), message); +void RawMachineAssembler::AbortCSAAssert(Node* message) { + AddNode(machine()->AbortCSAAssert(), message); } void RawMachineAssembler::DebugBreak() { AddNode(machine()->DebugBreak()); } diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h index 67326ac7307b5f..890c38c5515af7 100644 --- a/deps/v8/src/compiler/raw-machine-assembler.h +++ b/deps/v8/src/compiler/raw-machine-assembler.h @@ -732,6 +732,9 @@ class V8_EXPORT_PRIVATE RawMachineAssembler { Node* BitcastTaggedToWord(Node* a) { return AddNode(machine()->BitcastTaggedToWord(), a); } + Node* BitcastTaggedSignedToWord(Node* a) { + return AddNode(machine()->BitcastTaggedSignedToWord(), a); + } Node* BitcastMaybeObjectToWord(Node* a) { return AddNode(machine()->BitcastMaybeObjectToWord(), a); } @@ -1016,7 +1019,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler { void PopAndReturn(Node* pop, Node* v1, Node* v2, Node* v3, Node* v4); void Bind(RawMachineLabel* label); void Deoptimize(Node* state); - void DebugAbort(Node* message); + void AbortCSAAssert(Node* message); void DebugBreak(); void Unreachable(); void Comment(const std::string& msg); diff --git a/deps/v8/src/compiler/redundancy-elimination.cc b/deps/v8/src/compiler/redundancy-elimination.cc index 0822e47bbab046..9b401bcf43f786 100644 --- a/deps/v8/src/compiler/redundancy-elimination.cc +++ b/deps/v8/src/compiler/redundancy-elimination.cc @@ -19,6 +19,7 @@ RedundancyElimination::~RedundancyElimination() = default; Reduction RedundancyElimination::Reduce(Node* node) { if (node_checks_.Get(node)) return NoChange(); switch (node->opcode()) { + case IrOpcode::kCheckBigInt: case IrOpcode::kCheckBounds: case IrOpcode::kCheckEqualsInternalizedString: case IrOpcode::kCheckEqualsSymbol: @@ -147,7 +148,9 @@ bool CheckSubsumes(Node const* a, Node const* b) { case IrOpcode::kCheckSmi: case IrOpcode::kCheckString: case IrOpcode::kCheckNumber: + case IrOpcode::kCheckBigInt: break; + case IrOpcode::kCheckedInt32ToCompressedSigned: case IrOpcode::kCheckedInt32ToTaggedSigned: case IrOpcode::kCheckedInt64ToInt32: case IrOpcode::kCheckedInt64ToTaggedSigned: diff --git a/deps/v8/src/compiler/representation-change.cc b/deps/v8/src/compiler/representation-change.cc index cebd87e73d6473..7a4577b799a0e7 100644 --- a/deps/v8/src/compiler/representation-change.cc +++ b/deps/v8/src/compiler/representation-change.cc @@ -8,6 +8,7 @@ #include "src/base/bits.h" #include "src/codegen/code-factory.h" +#include "src/compiler/js-heap-broker.h" #include "src/compiler/machine-operator.h" #include "src/compiler/node-matchers.h" #include "src/compiler/type-cache.h" @@ -25,12 +26,14 @@ const char* Truncation::description() const { return "truncate-to-bool"; case TruncationKind::kWord32: return "truncate-to-word32"; - case TruncationKind::kFloat64: + case TruncationKind::kWord64: + return "truncate-to-word64"; + case TruncationKind::kOddballAndBigIntToNumber: switch (identify_zeros()) { case kIdentifyZeros: - return "truncate-to-float64 (identify zeros)"; + return "truncate-oddball&bigint-to-number (identify zeros)"; case kDistinguishZeros: - return "truncate-to-float64 (distinguish zeros)"; + return "truncate-oddball&bigint-to-number (distinguish zeros)"; } case TruncationKind::kAny: switch (identify_zeros()) { @@ -45,22 +48,25 @@ const char* Truncation::description() const { // Partial order for truncations: // -// kAny <-------+ -// ^ | -// | | -// kFloat64 | -// ^ | -// / | -// kWord32 kBool -// ^ ^ -// \ / -// \ / -// \ / -// \ / -// \ / -// kNone +// kAny <-------+ +// ^ | +// | | +// kOddballAndBigIntToNumber | +// ^ | +// / | +// kWord64 | +// ^ | +// | | +// kWord32 kBool +// ^ ^ +// \ / +// \ / +// \ / +// \ / +// \ / +// kNone // -// TODO(jarin) We might consider making kBool < kFloat64. +// TODO(jarin) We might consider making kBool < kOddballAndBigIntToNumber. // static Truncation::TruncationKind Truncation::Generalize(TruncationKind rep1, @@ -68,9 +74,9 @@ Truncation::TruncationKind Truncation::Generalize(TruncationKind rep1, if (LessGeneral(rep1, rep2)) return rep2; if (LessGeneral(rep2, rep1)) return rep1; // Handle the generalization of float64-representable values. - if (LessGeneral(rep1, TruncationKind::kFloat64) && - LessGeneral(rep2, TruncationKind::kFloat64)) { - return TruncationKind::kFloat64; + if (LessGeneral(rep1, TruncationKind::kOddballAndBigIntToNumber) && + LessGeneral(rep2, TruncationKind::kOddballAndBigIntToNumber)) { + return TruncationKind::kOddballAndBigIntToNumber; } // Handle the generalization of any-representable values. if (LessGeneral(rep1, TruncationKind::kAny) && @@ -101,9 +107,16 @@ bool Truncation::LessGeneral(TruncationKind rep1, TruncationKind rep2) { return rep2 == TruncationKind::kBool || rep2 == TruncationKind::kAny; case TruncationKind::kWord32: return rep2 == TruncationKind::kWord32 || - rep2 == TruncationKind::kFloat64 || rep2 == TruncationKind::kAny; - case TruncationKind::kFloat64: - return rep2 == TruncationKind::kFloat64 || rep2 == TruncationKind::kAny; + rep2 == TruncationKind::kWord64 || + rep2 == TruncationKind::kOddballAndBigIntToNumber || + rep2 == TruncationKind::kAny; + case TruncationKind::kWord64: + return rep2 == TruncationKind::kWord64 || + rep2 == TruncationKind::kOddballAndBigIntToNumber || + rep2 == TruncationKind::kAny; + case TruncationKind::kOddballAndBigIntToNumber: + return rep2 == TruncationKind::kOddballAndBigIntToNumber || + rep2 == TruncationKind::kAny; case TruncationKind::kAny: return rep2 == TruncationKind::kAny; } @@ -125,10 +138,11 @@ bool IsWord(MachineRepresentation rep) { } // namespace -RepresentationChanger::RepresentationChanger(JSGraph* jsgraph, Isolate* isolate) +RepresentationChanger::RepresentationChanger(JSGraph* jsgraph, + JSHeapBroker* broker) : cache_(TypeCache::Get()), jsgraph_(jsgraph), - isolate_(isolate), + broker_(broker), testing_type_errors_(false), type_error_(false) {} @@ -169,7 +183,8 @@ Node* RepresentationChanger::GetRepresentationFor( use_node, use_info); case MachineRepresentation::kTaggedPointer: DCHECK(use_info.type_check() == TypeCheckKind::kNone || - use_info.type_check() == TypeCheckKind::kHeapObject); + use_info.type_check() == TypeCheckKind::kHeapObject || + use_info.type_check() == TypeCheckKind::kBigInt); return GetTaggedPointerRepresentationFor(node, output_rep, output_type, use_node, use_info); case MachineRepresentation::kTagged: @@ -207,7 +222,8 @@ Node* RepresentationChanger::GetRepresentationFor( use_info); case MachineRepresentation::kWord64: DCHECK(use_info.type_check() == TypeCheckKind::kNone || - use_info.type_check() == TypeCheckKind::kSigned64); + use_info.type_check() == TypeCheckKind::kSigned64 || + use_info.type_check() == TypeCheckKind::kBigInt); return GetWord64RepresentationFor(node, output_rep, output_type, use_node, use_info); case MachineRepresentation::kSimd128: @@ -418,6 +434,8 @@ Node* RepresentationChanger::GetTaggedPointerRepresentationFor( op = machine()->ChangeInt64ToFloat64(); node = jsgraph()->graph()->NewNode(op, node); op = simplified()->ChangeFloat64ToTaggedPointer(); + } else if (output_type.Is(Type::BigInt())) { + op = simplified()->ChangeUint64ToBigInt(); } else { return TypeError(node, output_rep, output_type, MachineRepresentation::kTaggedPointer); @@ -447,16 +465,37 @@ Node* RepresentationChanger::GetTaggedPointerRepresentationFor( // TODO(turbofan): Consider adding a Bailout operator that just deopts // for TaggedSigned output representation. op = simplified()->CheckedTaggedToTaggedPointer(use_info.feedback()); + } else if (IsAnyTagged(output_rep) && + (use_info.type_check() == TypeCheckKind::kBigInt || + output_type.Is(Type::BigInt()))) { + if (output_type.Is(Type::BigInt())) { + return node; + } + op = simplified()->CheckBigInt(use_info.feedback()); } else if (output_rep == MachineRepresentation::kCompressedPointer) { + if (use_info.type_check() == TypeCheckKind::kBigInt && + !output_type.Is(Type::BigInt())) { + node = InsertChangeCompressedToTagged(node); + op = simplified()->CheckBigInt(use_info.feedback()); + } else { + op = machine()->ChangeCompressedPointerToTaggedPointer(); + } + } else if (output_rep == MachineRepresentation::kCompressed && + output_type.Is(Type::BigInt())) { op = machine()->ChangeCompressedPointerToTaggedPointer(); + } else if (output_rep == MachineRepresentation::kCompressed && + use_info.type_check() == TypeCheckKind::kBigInt) { + node = InsertChangeCompressedToTagged(node); + op = simplified()->CheckBigInt(use_info.feedback()); } else if (CanBeCompressedSigned(output_rep) && use_info.type_check() == TypeCheckKind::kHeapObject) { if (!output_type.Maybe(Type::SignedSmall())) { op = machine()->ChangeCompressedPointerToTaggedPointer(); + } else { + // TODO(turbofan): Consider adding a Bailout operator that just deopts + // for CompressedSigned output representation. + op = simplified()->CheckedCompressedToTaggedPointer(use_info.feedback()); } - // TODO(turbofan): Consider adding a Bailout operator that just deopts - // for CompressedSigned output representation. - op = simplified()->CheckedCompressedToTaggedPointer(use_info.feedback()); } else { return TypeError(node, output_rep, output_type, MachineRepresentation::kTaggedPointer); @@ -535,6 +574,9 @@ Node* RepresentationChanger::GetTaggedRepresentationFor( } else if (output_type.Is(cache_->kSafeInteger)) { // int64 -> tagged op = simplified()->ChangeInt64ToTagged(); + } else if (output_type.Is(Type::BigInt())) { + // uint64 -> BigInt + op = simplified()->ChangeUint64ToBigInt(); } else { return TypeError(node, output_rep, output_type, MachineRepresentation::kTagged); @@ -560,7 +602,7 @@ Node* RepresentationChanger::GetTaggedRepresentationFor( op = simplified()->ChangeUint32ToTagged(); } else if (output_type.Is(Type::Number()) || (output_type.Is(Type::NumberOrOddball()) && - truncation.IsUsedAsFloat64())) { + truncation.TruncatesOddballAndBigIntToNumber())) { op = simplified()->ChangeFloat64ToTagged( output_type.Maybe(Type::MinusZero()) ? CheckForMinusZeroMode::kCheckForMinusZero @@ -569,7 +611,11 @@ Node* RepresentationChanger::GetTaggedRepresentationFor( return TypeError(node, output_rep, output_type, MachineRepresentation::kTagged); } - } else if (IsAnyCompressed(output_rep)) { + } else if (output_rep == MachineRepresentation::kCompressedSigned) { + op = machine()->ChangeCompressedSignedToTaggedSigned(); + } else if (output_rep == MachineRepresentation::kCompressedPointer) { + op = machine()->ChangeCompressedPointerToTaggedPointer(); + } else if (output_rep == MachineRepresentation::kCompressed) { op = machine()->ChangeCompressedToTagged(); } else { return TypeError(node, output_rep, output_type, @@ -606,9 +652,20 @@ Node* RepresentationChanger::GetCompressedSignedRepresentationFor( use_node, use_info); op = machine()->ChangeTaggedSignedToCompressedSigned(); } else if (IsWord(output_rep)) { - node = GetTaggedSignedRepresentationFor(node, output_rep, output_type, - use_node, use_info); - op = machine()->ChangeTaggedSignedToCompressedSigned(); + if (output_type.Is(Type::Signed31())) { + op = simplified()->ChangeInt31ToCompressedSigned(); + } else if (output_type.Is(Type::Signed32())) { + if (use_info.type_check() == TypeCheckKind::kSignedSmall) { + op = simplified()->CheckedInt32ToCompressedSigned(use_info.feedback()); + } else { + return TypeError(node, output_rep, output_type, + MachineRepresentation::kCompressedSigned); + } + } else { + node = GetTaggedSignedRepresentationFor(node, output_rep, output_type, + use_node, use_info); + op = machine()->ChangeTaggedSignedToCompressedSigned(); + } } else if (output_rep == MachineRepresentation::kWord64) { node = GetTaggedSignedRepresentationFor(node, output_rep, output_type, use_node, use_info); @@ -645,10 +702,11 @@ Node* RepresentationChanger::GetCompressedPointerRepresentationFor( use_info.type_check() == TypeCheckKind::kHeapObject) { if (!output_type.Maybe(Type::SignedSmall())) { op = machine()->ChangeTaggedPointerToCompressedPointer(); + } else { + // TODO(turbofan): Consider adding a Bailout operator that just deopts + // for TaggedSigned output representation. + op = simplified()->CheckedTaggedToCompressedPointer(use_info.feedback()); } - // TODO(turbofan): Consider adding a Bailout operator that just deopts - // for TaggedSigned output representation. - op = simplified()->CheckedTaggedToCompressedPointer(use_info.feedback()); } else if (output_rep == MachineRepresentation::kBit) { // TODO(v8:8977): specialize here and below node = GetTaggedPointerRepresentationFor(node, output_rep, output_type, @@ -810,11 +868,14 @@ Node* RepresentationChanger::GetFloat64RepresentationFor( Node* use_node, UseInfo use_info) { NumberMatcher m(node); if (m.HasValue()) { + // BigInts are not used as number constants. + DCHECK(use_info.type_check() != TypeCheckKind::kBigInt); switch (use_info.type_check()) { case TypeCheckKind::kNone: case TypeCheckKind::kNumber: case TypeCheckKind::kNumberOrOddball: return jsgraph()->Float64Constant(m.Value()); + case TypeCheckKind::kBigInt: case TypeCheckKind::kHeapObject: case TypeCheckKind::kSigned32: case TypeCheckKind::kSigned64: @@ -843,9 +904,7 @@ Node* RepresentationChanger::GetFloat64RepresentationFor( } } else if (output_rep == MachineRepresentation::kBit) { CHECK(output_type.Is(Type::Boolean())); - // TODO(tebbi): TypeCheckKind::kNumberOrOddball should imply Float64 - // truncation, since this exactly means that we treat Oddballs as Numbers. - if (use_info.truncation().IsUsedAsFloat64() || + if (use_info.truncation().TruncatesOddballAndBigIntToNumber() || use_info.type_check() == TypeCheckKind::kNumberOrOddball) { op = machine()->ChangeUint32ToFloat64(); } else { @@ -867,7 +926,7 @@ Node* RepresentationChanger::GetFloat64RepresentationFor( } else if (output_type.Is(Type::Number())) { op = simplified()->ChangeTaggedToFloat64(); } else if ((output_type.Is(Type::NumberOrOddball()) && - use_info.truncation().IsUsedAsFloat64()) || + use_info.truncation().TruncatesOddballAndBigIntToNumber()) || output_type.Is(Type::NumberOrHole())) { // JavaScript 'null' is an Oddball that results in +0 when truncated to // Number. In a context like -0 == null, which must evaluate to false, @@ -1063,11 +1122,15 @@ Node* RepresentationChanger::GetWord32RepresentationFor( output_type, use_node, use_info); } else if (output_rep == MachineRepresentation::kCompressedSigned) { // TODO(v8:8977): Specialise here - op = machine()->ChangeCompressedSignedToTaggedSigned(); - node = jsgraph()->graph()->NewNode(op, node); - return GetWord32RepresentationFor(node, - MachineRepresentation::kTaggedSigned, - output_type, use_node, use_info); + if (output_type.Is(Type::SignedSmall())) { + op = simplified()->ChangeCompressedSignedToInt32(); + } else { + op = machine()->ChangeCompressedSignedToTaggedSigned(); + node = jsgraph()->graph()->NewNode(op, node); + return GetWord32RepresentationFor(node, + MachineRepresentation::kTaggedSigned, + output_type, use_node, use_info); + } } else if (output_rep == MachineRepresentation::kCompressedPointer) { // TODO(v8:8977): Specialise here op = machine()->ChangeCompressedPointerToTaggedPointer(); @@ -1252,6 +1315,15 @@ Node* RepresentationChanger::GetWord64RepresentationFor( } break; } + case IrOpcode::kHeapConstant: { + HeapObjectMatcher m(node); + if (m.HasValue() && m.Ref(broker_).IsBigInt()) { + auto bigint = m.Ref(broker_).AsBigInt(); + return jsgraph()->Int64Constant( + static_cast(bigint.AsUint64())); + } + break; + } default: break; } @@ -1272,9 +1344,15 @@ Node* RepresentationChanger::GetWord64RepresentationFor( jsgraph()->common()->DeadValue(MachineRepresentation::kWord64), unreachable); } else if (IsWord(output_rep)) { - if (output_type.Is(Type::Unsigned32())) { + if (output_type.Is(Type::Unsigned32OrMinusZero())) { + // uint32 -> uint64 + CHECK_IMPLIES(output_type.Maybe(Type::MinusZero()), + use_info.truncation().IdentifiesZeroAndMinusZero()); op = machine()->ChangeUint32ToUint64(); - } else if (output_type.Is(Type::Signed32())) { + } else if (output_type.Is(Type::Signed32OrMinusZero())) { + // int32 -> int64 + CHECK_IMPLIES(output_type.Maybe(Type::MinusZero()), + use_info.truncation().IdentifiesZeroAndMinusZero()); op = machine()->ChangeInt32ToInt64(); } else { return TypeError(node, output_rep, output_type, @@ -1323,6 +1401,13 @@ Node* RepresentationChanger::GetWord64RepresentationFor( return TypeError(node, output_rep, output_type, MachineRepresentation::kWord64); } + } else if (IsAnyTagged(output_rep) && + use_info.truncation().IsUsedAsWord64() && + (use_info.type_check() == TypeCheckKind::kBigInt || + output_type.Is(Type::BigInt()))) { + node = GetTaggedPointerRepresentationFor(node, output_rep, output_type, + use_node, use_info); + op = simplified()->TruncateBigIntToUint64(); } else if (CanBeTaggedPointer(output_rep)) { if (output_type.Is(cache_->kInt64)) { op = simplified()->ChangeTaggedToInt64(); @@ -1656,6 +1741,13 @@ Node* RepresentationChanger::InsertTruncateInt64ToInt32(Node* node) { return jsgraph()->graph()->NewNode(machine()->TruncateInt64ToInt32(), node); } +Node* RepresentationChanger::InsertChangeCompressedToTagged(Node* node) { + return jsgraph()->graph()->NewNode(machine()->ChangeCompressedToTagged(), + node); +} + +Isolate* RepresentationChanger::isolate() const { return broker_->isolate(); } + } // namespace compiler } // namespace internal } // namespace v8 diff --git a/deps/v8/src/compiler/representation-change.h b/deps/v8/src/compiler/representation-change.h index e8bb3f12ac577f..d3386676032b7a 100644 --- a/deps/v8/src/compiler/representation-change.h +++ b/deps/v8/src/compiler/representation-change.h @@ -29,8 +29,13 @@ class Truncation final { static Truncation Word32() { return Truncation(TruncationKind::kWord32, kIdentifyZeros); } - static Truncation Float64(IdentifyZeros identify_zeros = kDistinguishZeros) { - return Truncation(TruncationKind::kFloat64, identify_zeros); + static Truncation Word64() { + return Truncation(TruncationKind::kWord64, kIdentifyZeros); + } + static Truncation OddballAndBigIntToNumber( + IdentifyZeros identify_zeros = kDistinguishZeros) { + return Truncation(TruncationKind::kOddballAndBigIntToNumber, + identify_zeros); } static Truncation Any(IdentifyZeros identify_zeros = kDistinguishZeros) { return Truncation(TruncationKind::kAny, identify_zeros); @@ -50,8 +55,11 @@ class Truncation final { bool IsUsedAsWord32() const { return LessGeneral(kind_, TruncationKind::kWord32); } - bool IsUsedAsFloat64() const { - return LessGeneral(kind_, TruncationKind::kFloat64); + bool IsUsedAsWord64() const { + return LessGeneral(kind_, TruncationKind::kWord64); + } + bool TruncatesOddballAndBigIntToNumber() const { + return LessGeneral(kind_, TruncationKind::kOddballAndBigIntToNumber); } bool IdentifiesUndefinedAndZero() { return LessGeneral(kind_, TruncationKind::kWord32) || @@ -81,13 +89,15 @@ class Truncation final { kNone, kBool, kWord32, - kFloat64, + kWord64, + kOddballAndBigIntToNumber, kAny }; explicit Truncation(TruncationKind kind, IdentifyZeros identify_zeros) : kind_(kind), identify_zeros_(identify_zeros) { - DCHECK(kind == TruncationKind::kAny || kind == TruncationKind::kFloat64 || + DCHECK(kind == TruncationKind::kAny || + kind == TruncationKind::kOddballAndBigIntToNumber || identify_zeros == kIdentifyZeros); } TruncationKind kind() const { return kind_; } @@ -109,7 +119,8 @@ enum class TypeCheckKind : uint8_t { kSigned64, kNumber, kNumberOrOddball, - kHeapObject + kHeapObject, + kBigInt, }; inline std::ostream& operator<<(std::ostream& os, TypeCheckKind type_check) { @@ -128,6 +139,8 @@ inline std::ostream& operator<<(std::ostream& os, TypeCheckKind type_check) { return os << "NumberOrOddball"; case TypeCheckKind::kHeapObject: return os << "HeapObject"; + case TypeCheckKind::kBigInt: + return os << "BigInt"; } UNREACHABLE(); } @@ -160,6 +173,13 @@ class UseInfo { static UseInfo TruncatingWord32() { return UseInfo(MachineRepresentation::kWord32, Truncation::Word32()); } + static UseInfo TruncatingWord64() { + return UseInfo(MachineRepresentation::kWord64, Truncation::Word64()); + } + static UseInfo CheckedBigIntTruncatingWord64(const VectorSlotPair& feedback) { + return UseInfo(MachineRepresentation::kWord64, Truncation::Word64(), + TypeCheckKind::kBigInt, feedback); + } static UseInfo Word64() { return UseInfo(MachineRepresentation::kWord64, Truncation::Any()); } @@ -175,7 +195,7 @@ class UseInfo { static UseInfo TruncatingFloat64( IdentifyZeros identify_zeros = kDistinguishZeros) { return UseInfo(MachineRepresentation::kFloat64, - Truncation::Float64(identify_zeros)); + Truncation::OddballAndBigIntToNumber(identify_zeros)); } static UseInfo AnyTagged() { return UseInfo(MachineRepresentation::kTagged, Truncation::Any()); @@ -203,6 +223,12 @@ class UseInfo { return UseInfo(MachineRepresentation::kTaggedPointer, Truncation::Any(), TypeCheckKind::kHeapObject, feedback); } + + static UseInfo CheckedBigIntAsTaggedPointer(const VectorSlotPair& feedback) { + return UseInfo(MachineRepresentation::kTaggedPointer, Truncation::Any(), + TypeCheckKind::kBigInt, feedback); + } + static UseInfo CheckedSignedSmallAsTaggedSigned( const VectorSlotPair& feedback, IdentifyZeros identify_zeros = kDistinguishZeros) { @@ -240,8 +266,6 @@ class UseInfo { } static UseInfo CheckedNumberOrOddballAsFloat64( IdentifyZeros identify_zeros, const VectorSlotPair& feedback) { - // TODO(tebbi): We should use Float64 truncation here, since this exactly - // means that we treat Oddballs as Numbers. return UseInfo(MachineRepresentation::kFloat64, Truncation::Any(identify_zeros), TypeCheckKind::kNumberOrOddball, feedback); @@ -287,7 +311,7 @@ class UseInfo { // Eagerly folds any representation changes for constants. class V8_EXPORT_PRIVATE RepresentationChanger final { public: - RepresentationChanger(JSGraph* jsgraph, Isolate* isolate); + RepresentationChanger(JSGraph* jsgraph, JSHeapBroker* broker); // Changes representation from {output_type} to {use_rep}. The {truncation} // parameter is only used for sanity checking - if the changer cannot figure @@ -317,7 +341,7 @@ class V8_EXPORT_PRIVATE RepresentationChanger final { private: TypeCache const* cache_; JSGraph* jsgraph_; - Isolate* isolate_; + JSHeapBroker* broker_; friend class RepresentationChangerTester; // accesses the below fields. @@ -371,12 +395,13 @@ class V8_EXPORT_PRIVATE RepresentationChanger final { Node* InsertChangeTaggedSignedToInt32(Node* node); Node* InsertChangeTaggedToFloat64(Node* node); Node* InsertChangeUint32ToFloat64(Node* node); + Node* InsertChangeCompressedToTagged(Node* node); Node* InsertConversion(Node* node, const Operator* op, Node* use_node); Node* InsertTruncateInt64ToInt32(Node* node); Node* InsertUnconditionalDeopt(Node* node, DeoptimizeReason reason); JSGraph* jsgraph() const { return jsgraph_; } - Isolate* isolate() const { return isolate_; } + Isolate* isolate() const; Factory* factory() const { return isolate()->factory(); } SimplifiedOperatorBuilder* simplified() { return jsgraph()->simplified(); } MachineOperatorBuilder* machine() { return jsgraph()->machine(); } diff --git a/deps/v8/src/compiler/scheduler.cc b/deps/v8/src/compiler/scheduler.cc index b57162f7f5a193..25919bb3b3a35f 100644 --- a/deps/v8/src/compiler/scheduler.cc +++ b/deps/v8/src/compiler/scheduler.cc @@ -7,6 +7,7 @@ #include #include "src/base/adapters.h" +#include "src/codegen/tick-counter.h" #include "src/compiler/common-operator.h" #include "src/compiler/control-equivalence.h" #include "src/compiler/graph.h" @@ -26,7 +27,7 @@ namespace compiler { } while (false) Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule, Flags flags, - size_t node_count_hint) + size_t node_count_hint, TickCounter* tick_counter) : zone_(zone), graph_(graph), schedule_(schedule), @@ -34,12 +35,14 @@ Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule, Flags flags, scheduled_nodes_(zone), schedule_root_nodes_(zone), schedule_queue_(zone), - node_data_(zone) { + node_data_(zone), + tick_counter_(tick_counter) { node_data_.reserve(node_count_hint); node_data_.resize(graph->NodeCount(), DefaultSchedulerData()); } -Schedule* Scheduler::ComputeSchedule(Zone* zone, Graph* graph, Flags flags) { +Schedule* Scheduler::ComputeSchedule(Zone* zone, Graph* graph, Flags flags, + TickCounter* tick_counter) { Zone* schedule_zone = (flags & Scheduler::kTempSchedule) ? zone : graph->zone(); @@ -50,7 +53,8 @@ Schedule* Scheduler::ComputeSchedule(Zone* zone, Graph* graph, Flags flags) { Schedule* schedule = new (schedule_zone) Schedule(schedule_zone, node_count_hint); - Scheduler scheduler(zone, graph, schedule, flags, node_count_hint); + Scheduler scheduler(zone, graph, schedule, flags, node_count_hint, + tick_counter); scheduler.BuildCFG(); scheduler.ComputeSpecialRPONumbering(); @@ -65,7 +69,6 @@ Schedule* Scheduler::ComputeSchedule(Zone* zone, Graph* graph, Flags flags) { return schedule; } - Scheduler::SchedulerData Scheduler::DefaultSchedulerData() { SchedulerData def = {schedule_->start(), 0, kUnknown}; return def; @@ -258,6 +261,7 @@ class CFGBuilder : public ZoneObject { Queue(scheduler_->graph_->end()); while (!queue_.empty()) { // Breadth-first backwards traversal. + scheduler_->tick_counter_->DoTick(); Node* node = queue_.front(); queue_.pop(); int max = NodeProperties::PastControlIndex(node); @@ -283,6 +287,7 @@ class CFGBuilder : public ZoneObject { component_end_ = schedule_->block(exit); scheduler_->equivalence_->Run(exit); while (!queue_.empty()) { // Breadth-first backwards traversal. + scheduler_->tick_counter_->DoTick(); Node* node = queue_.front(); queue_.pop(); @@ -728,11 +733,10 @@ class SpecialRPONumberer : public ZoneObject { } }; - int Push(ZoneVector& stack, int depth, - BasicBlock* child, int unvisited) { + int Push(int depth, BasicBlock* child, int unvisited) { if (child->rpo_number() == unvisited) { - stack[depth].block = child; - stack[depth].index = 0; + stack_[depth].block = child; + stack_[depth].index = 0; child->set_rpo_number(kBlockOnStack); return depth + 1; } @@ -780,7 +784,7 @@ class SpecialRPONumberer : public ZoneObject { DCHECK_LT(previous_block_count_, schedule_->BasicBlockCount()); stack_.resize(schedule_->BasicBlockCount() - previous_block_count_); previous_block_count_ = schedule_->BasicBlockCount(); - int stack_depth = Push(stack_, 0, entry, kBlockUnvisited1); + int stack_depth = Push(0, entry, kBlockUnvisited1); int num_loops = static_cast(loops_.size()); while (stack_depth > 0) { @@ -802,7 +806,7 @@ class SpecialRPONumberer : public ZoneObject { } else { // Push the successor onto the stack. DCHECK_EQ(kBlockUnvisited1, succ->rpo_number()); - stack_depth = Push(stack_, stack_depth, succ, kBlockUnvisited1); + stack_depth = Push(stack_depth, succ, kBlockUnvisited1); } } else { // Finished with all successors; pop the stack and add the block. @@ -827,7 +831,7 @@ class SpecialRPONumberer : public ZoneObject { // edges that lead out of loops. Visits each block once, but linking loop // sections together is linear in the loop size, so overall is // O(|B| + max(loop_depth) * max(|loop|)) - stack_depth = Push(stack_, 0, entry, kBlockUnvisited2); + stack_depth = Push(0, entry, kBlockUnvisited2); while (stack_depth > 0) { SpecialRPOStackFrame* frame = &stack_[stack_depth - 1]; BasicBlock* block = frame->block; @@ -874,7 +878,7 @@ class SpecialRPONumberer : public ZoneObject { loop->AddOutgoing(zone_, succ); } else { // Push the successor onto the stack. - stack_depth = Push(stack_, stack_depth, succ, kBlockUnvisited2); + stack_depth = Push(stack_depth, succ, kBlockUnvisited2); if (HasLoopNumber(succ)) { // Push the inner loop onto the loop stack. DCHECK(GetLoopNumber(succ) < num_loops); @@ -958,8 +962,9 @@ class SpecialRPONumberer : public ZoneObject { } // Computes loop membership from the backedges of the control flow graph. - void ComputeLoopInfo(ZoneVector& queue, - size_t num_loops, ZoneVector* backedges) { + void ComputeLoopInfo( + ZoneVector& queue, // NOLINT(runtime/references) + size_t num_loops, ZoneVector* backedges) { // Extend existing loop membership vectors. for (LoopInfo& loop : loops_) { loop.members->Resize(static_cast(schedule_->BasicBlockCount()), @@ -1234,6 +1239,7 @@ void Scheduler::PrepareUses() { visited[node->id()] = true; stack.push(node->input_edges().begin()); while (!stack.empty()) { + tick_counter_->DoTick(); Edge edge = *stack.top(); Node* node = edge.to(); if (visited[node->id()]) { @@ -1262,6 +1268,7 @@ class ScheduleEarlyNodeVisitor { for (Node* const root : *roots) { queue_.push(root); while (!queue_.empty()) { + scheduler_->tick_counter_->DoTick(); VisitNode(queue_.front()); queue_.pop(); } @@ -1388,6 +1395,7 @@ class ScheduleLateNodeVisitor { queue->push(node); do { + scheduler_->tick_counter_->DoTick(); Node* const node = queue->front(); queue->pop(); VisitNode(node); diff --git a/deps/v8/src/compiler/scheduler.h b/deps/v8/src/compiler/scheduler.h index bd2f2780ddec9f..3d1fa40025b10a 100644 --- a/deps/v8/src/compiler/scheduler.h +++ b/deps/v8/src/compiler/scheduler.h @@ -15,6 +15,9 @@ namespace v8 { namespace internal { + +class TickCounter; + namespace compiler { // Forward declarations. @@ -23,7 +26,6 @@ class ControlEquivalence; class Graph; class SpecialRPONumberer; - // Computes a schedule from a graph, placing nodes into basic blocks and // ordering the basic blocks in the special RPO order. class V8_EXPORT_PRIVATE Scheduler { @@ -34,7 +36,8 @@ class V8_EXPORT_PRIVATE Scheduler { // The complete scheduling algorithm. Creates a new schedule and places all // nodes from the graph into it. - static Schedule* ComputeSchedule(Zone* temp_zone, Graph* graph, Flags flags); + static Schedule* ComputeSchedule(Zone* temp_zone, Graph* graph, Flags flags, + TickCounter* tick_counter); // Compute the RPO of blocks in an existing schedule. static BasicBlockVector* ComputeSpecialRPO(Zone* zone, Schedule* schedule); @@ -78,9 +81,10 @@ class V8_EXPORT_PRIVATE Scheduler { CFGBuilder* control_flow_builder_; // Builds basic blocks for controls. SpecialRPONumberer* special_rpo_; // Special RPO numbering of blocks. ControlEquivalence* equivalence_; // Control dependence equivalence. + TickCounter* const tick_counter_; Scheduler(Zone* zone, Graph* graph, Schedule* schedule, Flags flags, - size_t node_count_hint_); + size_t node_count_hint_, TickCounter* tick_counter); inline SchedulerData DefaultSchedulerData(); inline SchedulerData* GetData(Node* node); diff --git a/deps/v8/src/compiler/serializer-for-background-compilation.cc b/deps/v8/src/compiler/serializer-for-background-compilation.cc index ecbd9cc0309947..5597850b0612c4 100644 --- a/deps/v8/src/compiler/serializer-for-background-compilation.cc +++ b/deps/v8/src/compiler/serializer-for-background-compilation.cc @@ -6,30 +6,495 @@ #include +#include "src/base/optional.h" +#include "src/compiler/access-info.h" +#include "src/compiler/bytecode-analysis.h" +#include "src/compiler/compilation-dependencies.h" #include "src/compiler/js-heap-broker.h" #include "src/compiler/vector-slot-pair.h" #include "src/handles/handles-inl.h" +#include "src/ic/call-optimization.h" #include "src/interpreter/bytecode-array-iterator.h" #include "src/objects/code.h" +#include "src/objects/js-array-inl.h" +#include "src/objects/js-regexp-inl.h" #include "src/objects/shared-function-info-inl.h" +#include "src/zone/zone-containers.h" #include "src/zone/zone.h" namespace v8 { namespace internal { namespace compiler { +#define CLEAR_ENVIRONMENT_LIST(V) \ + V(CallRuntimeForPair) \ + V(Debugger) \ + V(ResumeGenerator) \ + V(SuspendGenerator) + +#define KILL_ENVIRONMENT_LIST(V) \ + V(Abort) \ + V(ReThrow) \ + V(Throw) + +#define CLEAR_ACCUMULATOR_LIST(V) \ + V(Add) \ + V(AddSmi) \ + V(BitwiseAnd) \ + V(BitwiseAndSmi) \ + V(BitwiseNot) \ + V(BitwiseOr) \ + V(BitwiseOrSmi) \ + V(BitwiseXor) \ + V(BitwiseXorSmi) \ + V(CallRuntime) \ + V(CloneObject) \ + V(CreateArrayFromIterable) \ + V(CreateArrayLiteral) \ + V(CreateEmptyArrayLiteral) \ + V(CreateEmptyObjectLiteral) \ + V(CreateMappedArguments) \ + V(CreateObjectLiteral) \ + V(CreateRegExpLiteral) \ + V(CreateRestParameter) \ + V(CreateUnmappedArguments) \ + V(Dec) \ + V(DeletePropertySloppy) \ + V(DeletePropertyStrict) \ + V(Div) \ + V(DivSmi) \ + V(Exp) \ + V(ExpSmi) \ + V(ForInContinue) \ + V(ForInEnumerate) \ + V(ForInNext) \ + V(ForInStep) \ + V(Inc) \ + V(LdaLookupSlot) \ + V(LdaLookupSlotInsideTypeof) \ + V(LogicalNot) \ + V(Mod) \ + V(ModSmi) \ + V(Mul) \ + V(MulSmi) \ + V(Negate) \ + V(SetPendingMessage) \ + V(ShiftLeft) \ + V(ShiftLeftSmi) \ + V(ShiftRight) \ + V(ShiftRightLogical) \ + V(ShiftRightLogicalSmi) \ + V(ShiftRightSmi) \ + V(StaLookupSlot) \ + V(Sub) \ + V(SubSmi) \ + V(TestEqual) \ + V(TestEqualStrict) \ + V(TestGreaterThan) \ + V(TestGreaterThanOrEqual) \ + V(TestInstanceOf) \ + V(TestLessThan) \ + V(TestLessThanOrEqual) \ + V(TestNull) \ + V(TestReferenceEqual) \ + V(TestTypeOf) \ + V(TestUndefined) \ + V(TestUndetectable) \ + V(ToBooleanLogicalNot) \ + V(ToName) \ + V(ToNumber) \ + V(ToNumeric) \ + V(ToString) \ + V(TypeOf) + +#define UNCONDITIONAL_JUMPS_LIST(V) \ + V(Jump) \ + V(JumpConstant) \ + V(JumpLoop) + +#define CONDITIONAL_JUMPS_LIST(V) \ + V(JumpIfFalse) \ + V(JumpIfFalseConstant) \ + V(JumpIfJSReceiver) \ + V(JumpIfJSReceiverConstant) \ + V(JumpIfNotNull) \ + V(JumpIfNotNullConstant) \ + V(JumpIfNotUndefined) \ + V(JumpIfNotUndefinedConstant) \ + V(JumpIfNull) \ + V(JumpIfNullConstant) \ + V(JumpIfToBooleanFalse) \ + V(JumpIfToBooleanFalseConstant) \ + V(JumpIfToBooleanTrue) \ + V(JumpIfToBooleanTrueConstant) \ + V(JumpIfTrue) \ + V(JumpIfTrueConstant) \ + V(JumpIfUndefined) \ + V(JumpIfUndefinedConstant) + +#define IGNORED_BYTECODE_LIST(V) \ + V(CallNoFeedback) \ + V(IncBlockCounter) \ + V(LdaNamedPropertyNoFeedback) \ + V(StackCheck) \ + V(StaNamedPropertyNoFeedback) \ + V(ThrowReferenceErrorIfHole) \ + V(ThrowSuperAlreadyCalledIfNotHole) \ + V(ThrowSuperNotCalledIfHole) + +#define UNREACHABLE_BYTECODE_LIST(V) \ + V(ExtraWide) \ + V(Illegal) \ + V(Wide) + +#define SUPPORTED_BYTECODE_LIST(V) \ + V(CallAnyReceiver) \ + V(CallJSRuntime) \ + V(CallProperty) \ + V(CallProperty0) \ + V(CallProperty1) \ + V(CallProperty2) \ + V(CallUndefinedReceiver) \ + V(CallUndefinedReceiver0) \ + V(CallUndefinedReceiver1) \ + V(CallUndefinedReceiver2) \ + V(CallWithSpread) \ + V(Construct) \ + V(ConstructWithSpread) \ + V(CreateBlockContext) \ + V(CreateCatchContext) \ + V(CreateClosure) \ + V(CreateEvalContext) \ + V(CreateFunctionContext) \ + V(CreateWithContext) \ + V(GetSuperConstructor) \ + V(GetTemplateObject) \ + V(InvokeIntrinsic) \ + V(LdaConstant) \ + V(LdaContextSlot) \ + V(LdaCurrentContextSlot) \ + V(LdaImmutableContextSlot) \ + V(LdaImmutableCurrentContextSlot) \ + V(LdaModuleVariable) \ + V(LdaFalse) \ + V(LdaGlobal) \ + V(LdaGlobalInsideTypeof) \ + V(LdaKeyedProperty) \ + V(LdaLookupContextSlot) \ + V(LdaLookupContextSlotInsideTypeof) \ + V(LdaLookupGlobalSlot) \ + V(LdaLookupGlobalSlotInsideTypeof) \ + V(LdaNamedProperty) \ + V(LdaNull) \ + V(Ldar) \ + V(LdaSmi) \ + V(LdaTheHole) \ + V(LdaTrue) \ + V(LdaUndefined) \ + V(LdaZero) \ + V(Mov) \ + V(PopContext) \ + V(PushContext) \ + V(Return) \ + V(StaContextSlot) \ + V(StaCurrentContextSlot) \ + V(StaGlobal) \ + V(StaInArrayLiteral) \ + V(StaKeyedProperty) \ + V(StaModuleVariable) \ + V(StaNamedOwnProperty) \ + V(StaNamedProperty) \ + V(Star) \ + V(SwitchOnGeneratorState) \ + V(SwitchOnSmiNoFeedback) \ + V(TestIn) \ + CLEAR_ACCUMULATOR_LIST(V) \ + CLEAR_ENVIRONMENT_LIST(V) \ + CONDITIONAL_JUMPS_LIST(V) \ + IGNORED_BYTECODE_LIST(V) \ + KILL_ENVIRONMENT_LIST(V) \ + UNCONDITIONAL_JUMPS_LIST(V) \ + UNREACHABLE_BYTECODE_LIST(V) + +template +struct HandleComparator { + bool operator()(const Handle& lhs, const Handle& rhs) const { + return lhs.address() < rhs.address(); + } +}; + +struct VirtualContext { + unsigned int distance; + Handle context; + + VirtualContext(unsigned int distance_in, Handle context_in) + : distance(distance_in), context(context_in) { + CHECK_GT(distance, 0); + } + bool operator<(const VirtualContext& other) const { + return HandleComparator()(context, other.context) && + distance < other.distance; + } +}; + +class FunctionBlueprint; +using ConstantsSet = ZoneSet, HandleComparator>; +using VirtualContextsSet = ZoneSet; +using MapsSet = ZoneSet, HandleComparator>; +using BlueprintsSet = ZoneSet; + +class Hints { + public: + explicit Hints(Zone* zone); + + const ConstantsSet& constants() const; + const MapsSet& maps() const; + const BlueprintsSet& function_blueprints() const; + const VirtualContextsSet& virtual_contexts() const; + + void AddConstant(Handle constant); + void AddMap(Handle map); + void AddFunctionBlueprint(FunctionBlueprint function_blueprint); + void AddVirtualContext(VirtualContext virtual_context); + + void Add(const Hints& other); + + void Clear(); + bool IsEmpty() const; + +#ifdef ENABLE_SLOW_DCHECKS + bool Includes(Hints const& other) const; + bool Equals(Hints const& other) const; +#endif + + private: + VirtualContextsSet virtual_contexts_; + ConstantsSet constants_; + MapsSet maps_; + BlueprintsSet function_blueprints_; +}; + +using HintsVector = ZoneVector; + +class FunctionBlueprint { + public: + FunctionBlueprint(Handle function, Isolate* isolate, Zone* zone); + + FunctionBlueprint(Handle shared, + Handle feedback_vector, + const Hints& context_hints); + + Handle shared() const { return shared_; } + Handle feedback_vector() const { return feedback_vector_; } + const Hints& context_hints() const { return context_hints_; } + + bool operator<(const FunctionBlueprint& other) const { + // A feedback vector is never used for more than one SFI, so it can + // be used for strict ordering of blueprints. + DCHECK_IMPLIES(feedback_vector_.equals(other.feedback_vector_), + shared_.equals(other.shared_)); + return HandleComparator()(feedback_vector_, + other.feedback_vector_); + } + + private: + Handle shared_; + Handle feedback_vector_; + Hints context_hints_; +}; + +class CompilationSubject { + public: + explicit CompilationSubject(FunctionBlueprint blueprint) + : blueprint_(blueprint) {} + + // The zone parameter is to correctly initialize the blueprint, + // which contains zone-allocated context information. + CompilationSubject(Handle closure, Isolate* isolate, Zone* zone); + + const FunctionBlueprint& blueprint() const { return blueprint_; } + MaybeHandle closure() const { return closure_; } + + private: + FunctionBlueprint blueprint_; + MaybeHandle closure_; +}; + +// The SerializerForBackgroundCompilation makes sure that the relevant function +// data such as bytecode, SharedFunctionInfo and FeedbackVector, used by later +// optimizations in the compiler, is copied to the heap broker. +class SerializerForBackgroundCompilation { + public: + SerializerForBackgroundCompilation( + JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone, + Handle closure, SerializerForBackgroundCompilationFlags flags, + BailoutId osr_offset); + Hints Run(); // NOTE: Returns empty for an already-serialized function. + + class Environment; + + private: + SerializerForBackgroundCompilation( + JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone, + CompilationSubject function, base::Optional new_target, + const HintsVector& arguments, + SerializerForBackgroundCompilationFlags flags); + + bool BailoutOnUninitialized(FeedbackSlot slot); + + void TraverseBytecode(); + +#define DECLARE_VISIT_BYTECODE(name, ...) \ + void Visit##name(interpreter::BytecodeArrayIterator* iterator); + SUPPORTED_BYTECODE_LIST(DECLARE_VISIT_BYTECODE) +#undef DECLARE_VISIT_BYTECODE + + void ProcessCallOrConstruct(Hints callee, base::Optional new_target, + const HintsVector& arguments, FeedbackSlot slot, + bool with_spread = false); + void ProcessCallVarArgs(interpreter::BytecodeArrayIterator* iterator, + ConvertReceiverMode receiver_mode, + bool with_spread = false); + void ProcessApiCall(Handle target, + const HintsVector& arguments); + void ProcessReceiverMapForApiCall( + FunctionTemplateInfoRef& target, // NOLINT(runtime/references) + Handle receiver); + void ProcessBuiltinCall(Handle target, + const HintsVector& arguments); + + void ProcessJump(interpreter::BytecodeArrayIterator* iterator); + + void ProcessKeyedPropertyAccess(Hints const& receiver, Hints const& key, + FeedbackSlot slot, AccessMode mode); + void ProcessNamedPropertyAccess(interpreter::BytecodeArrayIterator* iterator, + AccessMode mode); + void ProcessNamedPropertyAccess(Hints const& receiver, NameRef const& name, + FeedbackSlot slot, AccessMode mode); + void ProcessMapHintsForPromises(Hints const& receiver_hints); + void ProcessHintsForPromiseResolve(Hints const& resolution_hints); + void ProcessHintsForRegExpTest(Hints const& regexp_hints); + PropertyAccessInfo ProcessMapForRegExpTest(MapRef map); + void ProcessHintsForFunctionCall(Hints const& target_hints); + + GlobalAccessFeedback const* ProcessFeedbackForGlobalAccess(FeedbackSlot slot); + NamedAccessFeedback const* ProcessFeedbackMapsForNamedAccess( + const MapHandles& maps, AccessMode mode, NameRef const& name); + ElementAccessFeedback const* ProcessFeedbackMapsForElementAccess( + const MapHandles& maps, AccessMode mode, + KeyedAccessMode const& keyed_mode); + void ProcessFeedbackForPropertyAccess(FeedbackSlot slot, AccessMode mode, + base::Optional static_name); + void ProcessMapForNamedPropertyAccess(MapRef const& map, NameRef const& name); + + void ProcessCreateContext(); + enum ContextProcessingMode { + kIgnoreSlot, + kSerializeSlot, + kSerializeSlotAndAddToAccumulator + }; + + void ProcessContextAccess(const Hints& context_hints, int slot, int depth, + ContextProcessingMode mode); + void ProcessImmutableLoad(ContextRef& context, // NOLINT(runtime/references) + int slot, ContextProcessingMode mode); + void ProcessLdaLookupGlobalSlot(interpreter::BytecodeArrayIterator* iterator); + void ProcessLdaLookupContextSlot( + interpreter::BytecodeArrayIterator* iterator); + + // Performs extension lookups for [0, depth) like + // BytecodeGraphBuilder::CheckContextExtensions(). + void ProcessCheckContextExtensions(int depth); + + Hints RunChildSerializer(CompilationSubject function, + base::Optional new_target, + const HintsVector& arguments, bool with_spread); + + // When (forward-)branching bytecodes are encountered, e.g. a conditional + // jump, we call ContributeToJumpTargetEnvironment to "remember" the current + // environment, associated with the jump target offset. When serialization + // eventually reaches that offset, we call IncorporateJumpTargetEnvironment to + // merge that environment back into whatever is the current environment then. + // Note: Since there may be multiple jumps to the same target, + // ContributeToJumpTargetEnvironment may actually do a merge as well. + void ContributeToJumpTargetEnvironment(int target_offset); + void IncorporateJumpTargetEnvironment(int target_offset); + + Handle bytecode_array() const; + BytecodeAnalysis const& GetBytecodeAnalysis(bool serialize); + + JSHeapBroker* broker() const { return broker_; } + CompilationDependencies* dependencies() const { return dependencies_; } + Zone* zone() const { return zone_; } + Environment* environment() const { return environment_; } + SerializerForBackgroundCompilationFlags flags() const { return flags_; } + BailoutId osr_offset() const { return osr_offset_; } + + JSHeapBroker* const broker_; + CompilationDependencies* const dependencies_; + Zone* const zone_; + Environment* const environment_; + ZoneUnorderedMap jump_target_environments_; + SerializerForBackgroundCompilationFlags const flags_; + BailoutId const osr_offset_; +}; + +void RunSerializerForBackgroundCompilation( + JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone, + Handle closure, SerializerForBackgroundCompilationFlags flags, + BailoutId osr_offset) { + SerializerForBackgroundCompilation serializer(broker, dependencies, zone, + closure, flags, osr_offset); + serializer.Run(); +} + using BytecodeArrayIterator = interpreter::BytecodeArrayIterator; +FunctionBlueprint::FunctionBlueprint(Handle shared, + Handle feedback_vector, + const Hints& context_hints) + : shared_(shared), + feedback_vector_(feedback_vector), + context_hints_(context_hints) {} + +FunctionBlueprint::FunctionBlueprint(Handle function, + Isolate* isolate, Zone* zone) + : shared_(handle(function->shared(), isolate)), + feedback_vector_(handle(function->feedback_vector(), isolate)), + context_hints_(zone) { + context_hints_.AddConstant(handle(function->context(), isolate)); +} + CompilationSubject::CompilationSubject(Handle closure, - Isolate* isolate) - : blueprint_{handle(closure->shared(), isolate), - handle(closure->feedback_vector(), isolate)}, - closure_(closure) { + Isolate* isolate, Zone* zone) + : blueprint_(closure, isolate, zone), closure_(closure) { CHECK(closure->has_feedback_vector()); } Hints::Hints(Zone* zone) - : constants_(zone), maps_(zone), function_blueprints_(zone) {} + : virtual_contexts_(zone), + constants_(zone), + maps_(zone), + function_blueprints_(zone) {} + +#ifdef ENABLE_SLOW_DCHECKS +namespace { +template +bool SetIncludes(ZoneSet const& lhs, + ZoneSet const& rhs) { + return std::all_of(rhs.cbegin(), rhs.cend(), + [&](K const& x) { return lhs.find(x) != lhs.cend(); }); +} +} // namespace +bool Hints::Includes(Hints const& other) const { + return SetIncludes(constants(), other.constants()) && + SetIncludes(function_blueprints(), other.function_blueprints()) && + SetIncludes(maps(), other.maps()); +} +bool Hints::Equals(Hints const& other) const { + return this->Includes(other) && other.Includes(*this); +} +#endif const ConstantsSet& Hints::constants() const { return constants_; } @@ -39,6 +504,14 @@ const BlueprintsSet& Hints::function_blueprints() const { return function_blueprints_; } +const VirtualContextsSet& Hints::virtual_contexts() const { + return virtual_contexts_; +} + +void Hints::AddVirtualContext(VirtualContext virtual_context) { + virtual_contexts_.insert(virtual_context); +} + void Hints::AddConstant(Handle constant) { constants_.insert(constant); } @@ -53,16 +526,29 @@ void Hints::Add(const Hints& other) { for (auto x : other.constants()) AddConstant(x); for (auto x : other.maps()) AddMap(x); for (auto x : other.function_blueprints()) AddFunctionBlueprint(x); + for (auto x : other.virtual_contexts()) AddVirtualContext(x); } bool Hints::IsEmpty() const { - return constants().empty() && maps().empty() && function_blueprints().empty(); + return constants().empty() && maps().empty() && + function_blueprints().empty() && virtual_contexts().empty(); } +std::ostream& operator<<(std::ostream& out, + const VirtualContext& virtual_context) { + out << "Distance " << virtual_context.distance << " from " + << Brief(*virtual_context.context) << std::endl; + return out; +} + +std::ostream& operator<<(std::ostream& out, const Hints& hints); + std::ostream& operator<<(std::ostream& out, const FunctionBlueprint& blueprint) { - out << Brief(*blueprint.shared) << std::endl; - out << Brief(*blueprint.feedback_vector) << std::endl; + out << Brief(*blueprint.shared()) << std::endl; + out << Brief(*blueprint.feedback_vector()) << std::endl; + !blueprint.context_hints().IsEmpty() && out << blueprint.context_hints() + << "):" << std::endl; return out; } @@ -76,10 +562,14 @@ std::ostream& operator<<(std::ostream& out, const Hints& hints) { for (FunctionBlueprint const& blueprint : hints.function_blueprints()) { out << " blueprint " << blueprint << std::endl; } + for (VirtualContext const& virtual_context : hints.virtual_contexts()) { + out << " virtual context " << virtual_context << std::endl; + } return out; } void Hints::Clear() { + virtual_contexts_.clear(); constants_.clear(); maps_.clear(); function_blueprints_.clear(); @@ -92,50 +582,53 @@ class SerializerForBackgroundCompilation::Environment : public ZoneObject { Environment(Zone* zone, Isolate* isolate, CompilationSubject function, base::Optional new_target, const HintsVector& arguments); - bool IsDead() const { return environment_hints_.empty(); } + bool IsDead() const { return ephemeral_hints_.empty(); } void Kill() { DCHECK(!IsDead()); - environment_hints_.clear(); + ephemeral_hints_.clear(); DCHECK(IsDead()); } void Revive() { DCHECK(IsDead()); - environment_hints_.resize(environment_hints_size(), Hints(zone())); + ephemeral_hints_.resize(ephemeral_hints_size(), Hints(zone())); DCHECK(!IsDead()); } - // When control flow bytecodes are encountered, e.g. a conditional jump, - // the current environment needs to be stashed together with the target jump - // address. Later, when this target bytecode is handled, the stashed - // environment will be merged into the current one. + // Merge {other} into {this} environment (leaving {other} unmodified). void Merge(Environment* other); FunctionBlueprint function() const { return function_; } + Hints const& closure_hints() const { return closure_hints_; } + Hints const& current_context_hints() const { return current_context_hints_; } + Hints& current_context_hints() { return current_context_hints_; } + Hints const& return_value_hints() const { return return_value_hints_; } + Hints& return_value_hints() { return return_value_hints_; } + Hints& accumulator_hints() { - CHECK_LT(accumulator_index(), environment_hints_.size()); - return environment_hints_[accumulator_index()]; + CHECK_LT(accumulator_index(), ephemeral_hints_.size()); + return ephemeral_hints_[accumulator_index()]; } + Hints& register_hints(interpreter::Register reg) { + if (reg.is_function_closure()) return closure_hints_; + if (reg.is_current_context()) return current_context_hints_; int local_index = RegisterToLocalIndex(reg); - CHECK_LT(local_index, environment_hints_.size()); - return environment_hints_[local_index]; + CHECK_LT(local_index, ephemeral_hints_.size()); + return ephemeral_hints_[local_index]; } - Hints& return_value_hints() { return return_value_hints_; } - // Clears all hints except those for the return value and the closure. + // Clears all hints except those for the context, return value, and the + // closure. void ClearEphemeralHints() { - DCHECK_EQ(environment_hints_.size(), function_closure_index() + 1); - for (int i = 0; i < function_closure_index(); ++i) { - environment_hints_[i].Clear(); - } + for (auto& hints : ephemeral_hints_) hints.Clear(); } // Appends the hints for the given register range to {dst} (in order). void ExportRegisterHints(interpreter::Register first, size_t count, - HintsVector& dst); + HintsVector& dst); // NOLINT(runtime/references) private: friend std::ostream& operator<<(std::ostream& out, const Environment& env); @@ -153,34 +646,39 @@ class SerializerForBackgroundCompilation::Environment : public ZoneObject { int const parameter_count_; int const register_count_; - // environment_hints_ contains hints for the contents of the registers, + Hints closure_hints_; + Hints current_context_hints_; + Hints return_value_hints_; + + // ephemeral_hints_ contains hints for the contents of the registers, // the accumulator and the parameters. The layout is as follows: - // [ parameters | registers | accumulator | context | closure ] + // [ parameters | registers | accumulator ] // The first parameter is the receiver. - HintsVector environment_hints_; + HintsVector ephemeral_hints_; int accumulator_index() const { return parameter_count() + register_count(); } - int current_context_index() const { return accumulator_index() + 1; } - int function_closure_index() const { return current_context_index() + 1; } - int environment_hints_size() const { return function_closure_index() + 1; } - - Hints return_value_hints_; + int ephemeral_hints_size() const { return accumulator_index() + 1; } }; SerializerForBackgroundCompilation::Environment::Environment( Zone* zone, CompilationSubject function) : zone_(zone), function_(function.blueprint()), - parameter_count_(function_.shared->GetBytecodeArray().parameter_count()), - register_count_(function_.shared->GetBytecodeArray().register_count()), - environment_hints_(environment_hints_size(), Hints(zone), zone), - return_value_hints_(zone) { + parameter_count_( + function_.shared()->GetBytecodeArray().parameter_count()), + register_count_(function_.shared()->GetBytecodeArray().register_count()), + closure_hints_(zone), + current_context_hints_(zone), + return_value_hints_(zone), + ephemeral_hints_(ephemeral_hints_size(), Hints(zone), zone) { Handle closure; if (function.closure().ToHandle(&closure)) { - environment_hints_[function_closure_index()].AddConstant(closure); + closure_hints_.AddConstant(closure); } else { - environment_hints_[function_closure_index()].AddFunctionBlueprint( - function.blueprint()); + closure_hints_.AddFunctionBlueprint(function.blueprint()); } + + // Consume blueprint context hint information. + current_context_hints().Add(function.blueprint().context_hints()); } SerializerForBackgroundCompilation::Environment::Environment( @@ -191,18 +689,19 @@ SerializerForBackgroundCompilation::Environment::Environment( // the parameter_count. size_t param_count = static_cast(parameter_count()); for (size_t i = 0; i < std::min(arguments.size(), param_count); ++i) { - environment_hints_[i] = arguments[i]; + ephemeral_hints_[i] = arguments[i]; } // Pad the rest with "undefined". Hints undefined_hint(zone); undefined_hint.AddConstant(isolate->factory()->undefined_value()); for (size_t i = arguments.size(); i < param_count; ++i) { - environment_hints_[i] = undefined_hint; + ephemeral_hints_[i] = undefined_hint; } interpreter::Register new_target_reg = - function_.shared->GetBytecodeArray() + function_.shared() + ->GetBytecodeArray() .incoming_new_target_or_generator_register(); if (new_target_reg.is_valid()) { DCHECK(register_hints(new_target_reg).IsEmpty()); @@ -219,16 +718,20 @@ void SerializerForBackgroundCompilation::Environment::Merge( CHECK_EQ(parameter_count(), other->parameter_count()); CHECK_EQ(register_count(), other->register_count()); + SLOW_DCHECK(closure_hints_.Equals(other->closure_hints_)); + if (IsDead()) { - environment_hints_ = other->environment_hints_; + ephemeral_hints_ = other->ephemeral_hints_; + SLOW_DCHECK(return_value_hints_.Includes(other->return_value_hints_)); CHECK(!IsDead()); return; } - CHECK_EQ(environment_hints_.size(), other->environment_hints_.size()); - for (size_t i = 0; i < environment_hints_.size(); ++i) { - environment_hints_[i].Add(other->environment_hints_[i]); + CHECK_EQ(ephemeral_hints_.size(), other->ephemeral_hints_.size()); + for (size_t i = 0; i < ephemeral_hints_.size(); ++i) { + ephemeral_hints_[i].Add(other->ephemeral_hints_[i]); } + return_value_hints_.Add(other->return_value_hints_); } @@ -236,42 +739,39 @@ std::ostream& operator<<( std::ostream& out, const SerializerForBackgroundCompilation::Environment& env) { std::ostringstream output_stream; + output_stream << "Function "; + env.function_.shared()->Name().Print(output_stream); - for (size_t i = 0; i << env.parameter_count(); ++i) { - Hints const& hints = env.environment_hints_[i]; - if (!hints.IsEmpty()) { - output_stream << "Hints for a" << i << ":\n" << hints; - } - } - for (size_t i = 0; i << env.register_count(); ++i) { - Hints const& hints = env.environment_hints_[env.parameter_count() + i]; - if (!hints.IsEmpty()) { - output_stream << "Hints for r" << i << ":\n" << hints; - } - } - { - Hints const& hints = env.environment_hints_[env.accumulator_index()]; - if (!hints.IsEmpty()) { - output_stream << "Hints for :\n" << hints; + if (env.IsDead()) { + output_stream << "dead\n"; + } else { + output_stream << "alive\n"; + for (int i = 0; i < static_cast(env.ephemeral_hints_.size()); ++i) { + Hints const& hints = env.ephemeral_hints_[i]; + if (!hints.IsEmpty()) { + if (i < env.parameter_count()) { + output_stream << "Hints for a" << i << ":\n"; + } else if (i < env.parameter_count() + env.register_count()) { + int local_register = i - env.parameter_count(); + output_stream << "Hints for r" << local_register << ":\n"; + } else if (i == env.accumulator_index()) { + output_stream << "Hints for :\n"; + } else { + UNREACHABLE(); + } + output_stream << hints; + } } } - { - Hints const& hints = env.environment_hints_[env.function_closure_index()]; - if (!hints.IsEmpty()) { - output_stream << "Hints for :\n" << hints; - } + + if (!env.closure_hints().IsEmpty()) { + output_stream << "Hints for :\n" << env.closure_hints(); } - { - Hints const& hints = env.environment_hints_[env.current_context_index()]; - if (!hints.IsEmpty()) { - output_stream << "Hints for :\n" << hints; - } + if (!env.current_context_hints().IsEmpty()) { + output_stream << "Hints for :\n" << env.current_context_hints(); } - { - Hints const& hints = env.return_value_hints_; - if (!hints.IsEmpty()) { - output_stream << "Hints for {return value}:\n" << hints; - } + if (!env.return_value_hints().IsEmpty()) { + output_stream << "Hints for {return value}:\n" << env.return_value_hints(); } out << output_stream.str(); @@ -280,25 +780,26 @@ std::ostream& operator<<( int SerializerForBackgroundCompilation::Environment::RegisterToLocalIndex( interpreter::Register reg) const { - // TODO(mslekova): We also want to gather hints for the context. - if (reg.is_current_context()) return current_context_index(); - if (reg.is_function_closure()) return function_closure_index(); if (reg.is_parameter()) { return reg.ToParameterIndex(parameter_count()); } else { + DCHECK(!reg.is_function_closure()); return parameter_count() + reg.index(); } } SerializerForBackgroundCompilation::SerializerForBackgroundCompilation( JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone, - Handle closure, SerializerForBackgroundCompilationFlags flags) + Handle closure, SerializerForBackgroundCompilationFlags flags, + BailoutId osr_offset) : broker_(broker), dependencies_(dependencies), zone_(zone), - environment_(new (zone) Environment(zone, {closure, broker_->isolate()})), - stashed_environments_(zone), - flags_(flags) { + environment_(new (zone) Environment( + zone, CompilationSubject(closure, broker_->isolate(), zone))), + jump_target_environments_(zone), + flags_(flags), + osr_offset_(osr_offset) { JSFunctionRef(broker, closure).Serialize(); } @@ -311,9 +812,9 @@ SerializerForBackgroundCompilation::SerializerForBackgroundCompilation( zone_(zone), environment_(new (zone) Environment(zone, broker_->isolate(), function, new_target, arguments)), - stashed_environments_(zone), - flags_(flags) { - DCHECK(!(flags_ & SerializerForBackgroundCompilationFlag::kOsr)); + jump_target_environments_(zone), + flags_(flags), + osr_offset_(BailoutId::None()) { TraceScope tracer( broker_, this, "SerializerForBackgroundCompilation::SerializerForBackgroundCompilation"); @@ -331,12 +832,12 @@ bool SerializerForBackgroundCompilation::BailoutOnUninitialized( SerializerForBackgroundCompilationFlag::kBailoutOnUninitialized)) { return false; } - if (flags() & SerializerForBackgroundCompilationFlag::kOsr) { + if (!osr_offset().IsNone()) { // Exclude OSR from this optimization because we might end up skipping the // OSR entry point. TODO(neis): Support OSR? return false; } - FeedbackNexus nexus(environment()->function().feedback_vector, slot); + FeedbackNexus nexus(environment()->function().feedback_vector(), slot); if (!slot.IsInvalid() && nexus.IsUninitialized()) { FeedbackSource source(nexus); if (broker()->HasFeedback(source)) { @@ -354,9 +855,9 @@ bool SerializerForBackgroundCompilation::BailoutOnUninitialized( Hints SerializerForBackgroundCompilation::Run() { TraceScope tracer(broker(), this, "SerializerForBackgroundCompilation::Run"); - SharedFunctionInfoRef shared(broker(), environment()->function().shared); - FeedbackVectorRef feedback_vector(broker(), - environment()->function().feedback_vector); + SharedFunctionInfoRef shared(broker(), environment()->function().shared()); + FeedbackVectorRef feedback_vector( + broker(), environment()->function().feedback_vector()); if (shared.IsSerializedForCompilation(feedback_vector)) { TRACE_BROKER(broker(), "Already ran serializer for SharedFunctionInfo " << Brief(*shared.object()) @@ -382,9 +883,10 @@ Hints SerializerForBackgroundCompilation::Run() { class ExceptionHandlerMatcher { public: explicit ExceptionHandlerMatcher( - BytecodeArrayIterator const& bytecode_iterator) + BytecodeArrayIterator const& bytecode_iterator, + Handle bytecode_array) : bytecode_iterator_(bytecode_iterator) { - HandlerTable table(*bytecode_iterator_.bytecode_array()); + HandlerTable table(*bytecode_array); for (int i = 0, n = table.NumberOfRangeEntries(); i < n; ++i) { handlers_.insert(table.GetRangeHandler(i)); } @@ -407,30 +909,53 @@ class ExceptionHandlerMatcher { std::set::const_iterator handlers_iterator_; }; +Handle SerializerForBackgroundCompilation::bytecode_array() + const { + return handle(environment()->function().shared()->GetBytecodeArray(), + broker()->isolate()); +} + +BytecodeAnalysis const& SerializerForBackgroundCompilation::GetBytecodeAnalysis( + bool serialize) { + return broker()->GetBytecodeAnalysis( + bytecode_array(), osr_offset(), + flags() & + SerializerForBackgroundCompilationFlag::kAnalyzeEnvironmentLiveness, + serialize); +} + void SerializerForBackgroundCompilation::TraverseBytecode() { - BytecodeArrayRef bytecode_array( - broker(), handle(environment()->function().shared->GetBytecodeArray(), - broker()->isolate())); - BytecodeArrayIterator iterator(bytecode_array.object()); - ExceptionHandlerMatcher handler_matcher(iterator); + BytecodeAnalysis const& bytecode_analysis = GetBytecodeAnalysis(true); + BytecodeArrayRef(broker(), bytecode_array()).SerializeForCompilation(); + + BytecodeArrayIterator iterator(bytecode_array()); + ExceptionHandlerMatcher handler_matcher(iterator, bytecode_array()); for (; !iterator.done(); iterator.Advance()) { - MergeAfterJump(&iterator); + int const current_offset = iterator.current_offset(); + IncorporateJumpTargetEnvironment(current_offset); + + TRACE_BROKER(broker(), + "Handling bytecode: " << current_offset << " " + << iterator.current_bytecode()); + TRACE_BROKER(broker(), "Current environment: " << *environment()); if (environment()->IsDead()) { - if (iterator.current_bytecode() == - interpreter::Bytecode::kResumeGenerator || - handler_matcher.CurrentBytecodeIsExceptionHandlerStart()) { + if (handler_matcher.CurrentBytecodeIsExceptionHandlerStart()) { environment()->Revive(); } else { continue; // Skip this bytecode since TF won't generate code for it. } } - TRACE_BROKER(broker(), - "Handling bytecode: " << iterator.current_offset() << " " - << iterator.current_bytecode()); - TRACE_BROKER(broker(), "Current environment:\n" << *environment()); + if (bytecode_analysis.IsLoopHeader(current_offset)) { + // Graph builder might insert jumps to resume targets in the loop body. + LoopInfo const& loop_info = + bytecode_analysis.GetLoopInfoFor(current_offset); + for (const auto& target : loop_info.resume_jump_targets()) { + ContributeToJumpTargetEnvironment(target.target_offset()); + } + } switch (iterator.current_bytecode()) { #define DEFINE_BYTECODE_CASE(name) \ @@ -447,21 +972,6 @@ void SerializerForBackgroundCompilation::TraverseBytecode() { } } -void SerializerForBackgroundCompilation::VisitIllegal( - BytecodeArrayIterator* iterator) { - UNREACHABLE(); -} - -void SerializerForBackgroundCompilation::VisitWide( - BytecodeArrayIterator* iterator) { - UNREACHABLE(); -} - -void SerializerForBackgroundCompilation::VisitExtraWide( - BytecodeArrayIterator* iterator) { - UNREACHABLE(); -} - void SerializerForBackgroundCompilation::VisitGetSuperConstructor( BytecodeArrayIterator* iterator) { interpreter::Register dst = iterator->GetRegisterOperand(0); @@ -480,6 +990,20 @@ void SerializerForBackgroundCompilation::VisitGetSuperConstructor( } } +void SerializerForBackgroundCompilation::VisitGetTemplateObject( + BytecodeArrayIterator* iterator) { + ObjectRef description( + broker(), iterator->GetConstantForIndexOperand(0, broker()->isolate())); + FeedbackSlot slot = iterator->GetSlotOperand(1); + FeedbackVectorRef feedback_vector( + broker(), environment()->function().feedback_vector()); + SharedFunctionInfoRef shared(broker(), environment()->function().shared()); + JSArrayRef template_object = + shared.GetTemplateObject(description, feedback_vector, slot, true); + environment()->accumulator_hints().Clear(); + environment()->accumulator_hints().AddConstant(template_object.object()); +} + void SerializerForBackgroundCompilation::VisitLdaTrue( BytecodeArrayIterator* iterator) { environment()->accumulator_hints().Clear(); @@ -529,11 +1053,171 @@ void SerializerForBackgroundCompilation::VisitLdaSmi( Smi::FromInt(iterator->GetImmediateOperand(0)), broker()->isolate())); } +void SerializerForBackgroundCompilation::VisitInvokeIntrinsic( + BytecodeArrayIterator* iterator) { + Runtime::FunctionId functionId = iterator->GetIntrinsicIdOperand(0); + // For JSNativeContextSpecialization::ReduceJSAsyncFunctionResolve and + // JSNativeContextSpecialization::ReduceJSResolvePromise. + if (functionId == Runtime::kInlineAsyncFunctionResolve) { + interpreter::Register first_reg = iterator->GetRegisterOperand(1); + size_t reg_count = iterator->GetRegisterCountOperand(2); + CHECK_EQ(reg_count, 3); + HintsVector arguments(zone()); + environment()->ExportRegisterHints(first_reg, reg_count, arguments); + Hints const& resolution_hints = arguments[1]; // The resolution object. + ProcessHintsForPromiseResolve(resolution_hints); + environment()->accumulator_hints().Clear(); + return; + } + environment()->ClearEphemeralHints(); +} + void SerializerForBackgroundCompilation::VisitLdaConstant( BytecodeArrayIterator* iterator) { environment()->accumulator_hints().Clear(); environment()->accumulator_hints().AddConstant( - handle(iterator->GetConstantForIndexOperand(0), broker()->isolate())); + iterator->GetConstantForIndexOperand(0, broker()->isolate())); +} + +void SerializerForBackgroundCompilation::VisitPushContext( + BytecodeArrayIterator* iterator) { + // Transfer current context hints to the destination register hints. + Hints& current_context_hints = environment()->current_context_hints(); + Hints& saved_context_hints = + environment()->register_hints(iterator->GetRegisterOperand(0)); + saved_context_hints.Clear(); + saved_context_hints.Add(current_context_hints); + + // New Context is in the accumulator. Put those hints into the current context + // register hints. + current_context_hints.Clear(); + current_context_hints.Add(environment()->accumulator_hints()); +} + +void SerializerForBackgroundCompilation::VisitPopContext( + BytecodeArrayIterator* iterator) { + // Replace current context hints with hints given in the argument register. + Hints& new_context_hints = + environment()->register_hints(iterator->GetRegisterOperand(0)); + environment()->current_context_hints().Clear(); + environment()->current_context_hints().Add(new_context_hints); +} + +void SerializerForBackgroundCompilation::ProcessImmutableLoad( + ContextRef& context_ref, int slot, ContextProcessingMode mode) { + DCHECK(mode == kSerializeSlot || mode == kSerializeSlotAndAddToAccumulator); + base::Optional slot_value = context_ref.get(slot, true); + + // Also, put the object into the constant hints for the accumulator. + if (mode == kSerializeSlotAndAddToAccumulator && slot_value.has_value()) { + environment()->accumulator_hints().AddConstant(slot_value.value().object()); + } +} + +void SerializerForBackgroundCompilation::ProcessContextAccess( + const Hints& context_hints, int slot, int depth, + ContextProcessingMode mode) { + // This function is for JSContextSpecialization::ReduceJSLoadContext and + // ReduceJSStoreContext. Those reductions attempt to eliminate as many + // loads as possible by making use of constant Context objects. In the + // case of an immutable load, ReduceJSLoadContext even attempts to load + // the value at {slot}, replacing the load with a constant. + for (auto x : context_hints.constants()) { + if (x->IsContext()) { + // Walk this context to the given depth and serialize the slot found. + ContextRef context_ref(broker(), x); + size_t remaining_depth = depth; + context_ref = context_ref.previous(&remaining_depth, true); + if (remaining_depth == 0 && mode != kIgnoreSlot) { + ProcessImmutableLoad(context_ref, slot, mode); + } + } + } + for (auto x : context_hints.virtual_contexts()) { + if (x.distance <= static_cast(depth)) { + ContextRef context_ref(broker(), x.context); + size_t remaining_depth = depth - x.distance; + context_ref = context_ref.previous(&remaining_depth, true); + if (remaining_depth == 0 && mode != kIgnoreSlot) { + ProcessImmutableLoad(context_ref, slot, mode); + } + } + } +} + +void SerializerForBackgroundCompilation::VisitLdaContextSlot( + BytecodeArrayIterator* iterator) { + Hints& context_hints = + environment()->register_hints(iterator->GetRegisterOperand(0)); + const int slot = iterator->GetIndexOperand(1); + const int depth = iterator->GetUnsignedImmediateOperand(2); + environment()->accumulator_hints().Clear(); + ProcessContextAccess(context_hints, slot, depth, kIgnoreSlot); +} + +void SerializerForBackgroundCompilation::VisitLdaCurrentContextSlot( + BytecodeArrayIterator* iterator) { + const int slot = iterator->GetIndexOperand(0); + const int depth = 0; + Hints& context_hints = environment()->current_context_hints(); + environment()->accumulator_hints().Clear(); + ProcessContextAccess(context_hints, slot, depth, kIgnoreSlot); +} + +void SerializerForBackgroundCompilation::VisitLdaImmutableContextSlot( + BytecodeArrayIterator* iterator) { + const int slot = iterator->GetIndexOperand(1); + const int depth = iterator->GetUnsignedImmediateOperand(2); + Hints& context_hints = + environment()->register_hints(iterator->GetRegisterOperand(0)); + environment()->accumulator_hints().Clear(); + ProcessContextAccess(context_hints, slot, depth, + kSerializeSlotAndAddToAccumulator); +} + +void SerializerForBackgroundCompilation::VisitLdaImmutableCurrentContextSlot( + BytecodeArrayIterator* iterator) { + const int slot = iterator->GetIndexOperand(0); + const int depth = 0; + Hints& context_hints = environment()->current_context_hints(); + environment()->accumulator_hints().Clear(); + ProcessContextAccess(context_hints, slot, depth, + kSerializeSlotAndAddToAccumulator); +} + +void SerializerForBackgroundCompilation::VisitLdaModuleVariable( + BytecodeArrayIterator* iterator) { + const int depth = iterator->GetUnsignedImmediateOperand(1); + + // TODO(mvstanton): If we have a constant module, should we serialize the + // cell as well? Then we could put the value in the accumulator. + environment()->accumulator_hints().Clear(); + ProcessContextAccess(environment()->current_context_hints(), + Context::EXTENSION_INDEX, depth, kSerializeSlot); +} + +void SerializerForBackgroundCompilation::VisitStaModuleVariable( + BytecodeArrayIterator* iterator) { + const int depth = iterator->GetUnsignedImmediateOperand(1); + ProcessContextAccess(environment()->current_context_hints(), + Context::EXTENSION_INDEX, depth, kSerializeSlot); +} + +void SerializerForBackgroundCompilation::VisitStaContextSlot( + BytecodeArrayIterator* iterator) { + const int slot = iterator->GetIndexOperand(1); + const int depth = iterator->GetUnsignedImmediateOperand(2); + Hints& register_hints = + environment()->register_hints(iterator->GetRegisterOperand(0)); + ProcessContextAccess(register_hints, slot, depth, kIgnoreSlot); +} + +void SerializerForBackgroundCompilation::VisitStaCurrentContextSlot( + BytecodeArrayIterator* iterator) { + const int slot = iterator->GetIndexOperand(0); + const int depth = 0; + Hints& context_hints = environment()->current_context_hints(); + ProcessContextAccess(context_hints, slot, depth, kIgnoreSlot); } void SerializerForBackgroundCompilation::VisitLdar( @@ -558,14 +1242,60 @@ void SerializerForBackgroundCompilation::VisitMov( environment()->register_hints(dst).Add(environment()->register_hints(src)); } +void SerializerForBackgroundCompilation::VisitCreateFunctionContext( + BytecodeArrayIterator* iterator) { + ProcessCreateContext(); +} + +void SerializerForBackgroundCompilation::VisitCreateBlockContext( + BytecodeArrayIterator* iterator) { + ProcessCreateContext(); +} + +void SerializerForBackgroundCompilation::VisitCreateEvalContext( + BytecodeArrayIterator* iterator) { + ProcessCreateContext(); +} + +void SerializerForBackgroundCompilation::VisitCreateWithContext( + BytecodeArrayIterator* iterator) { + ProcessCreateContext(); +} + +void SerializerForBackgroundCompilation::VisitCreateCatchContext( + BytecodeArrayIterator* iterator) { + ProcessCreateContext(); +} + +void SerializerForBackgroundCompilation::ProcessCreateContext() { + Hints& accumulator_hints = environment()->accumulator_hints(); + accumulator_hints.Clear(); + Hints& current_context_hints = environment()->current_context_hints(); + + // For each constant context, we must create a virtual context from + // it of distance one. + for (auto x : current_context_hints.constants()) { + if (x->IsContext()) { + Handle as_context(Handle::cast(x)); + accumulator_hints.AddVirtualContext(VirtualContext(1, as_context)); + } + } + + // For each virtual context, we must create a virtual context from + // it of distance {existing distance} + 1. + for (auto x : current_context_hints.virtual_contexts()) { + accumulator_hints.AddVirtualContext( + VirtualContext(x.distance + 1, x.context)); + } +} + void SerializerForBackgroundCompilation::VisitCreateClosure( BytecodeArrayIterator* iterator) { - Handle shared( - SharedFunctionInfo::cast(iterator->GetConstantForIndexOperand(0)), - broker()->isolate()); + Handle shared = Handle::cast( + iterator->GetConstantForIndexOperand(0, broker()->isolate())); Handle feedback_cell = - environment()->function().feedback_vector->GetClosureFeedbackCell( + environment()->function().feedback_vector()->GetClosureFeedbackCell( iterator->GetIndexOperand(1)); FeedbackCellRef feedback_cell_ref(broker(), feedback_cell); Handle cell_value(feedback_cell->value(), broker()->isolate()); @@ -573,8 +1303,13 @@ void SerializerForBackgroundCompilation::VisitCreateClosure( environment()->accumulator_hints().Clear(); if (cell_value->IsFeedbackVector()) { - environment()->accumulator_hints().AddFunctionBlueprint( - {shared, Handle::cast(cell_value)}); + // Gather the context hints from the current context register hint + // structure. + FunctionBlueprint blueprint(shared, + Handle::cast(cell_value), + environment()->current_context_hints()); + + environment()->accumulator_hints().AddFunctionBlueprint(blueprint); } } @@ -685,6 +1420,16 @@ void SerializerForBackgroundCompilation::VisitCallWithSpread( ProcessCallVarArgs(iterator, ConvertReceiverMode::kAny, true); } +void SerializerForBackgroundCompilation::VisitCallJSRuntime( + BytecodeArrayIterator* iterator) { + environment()->accumulator_hints().Clear(); + + // BytecodeGraphBuilder::VisitCallJSRuntime needs the {runtime_index} + // slot in the native context to be serialized. + const int runtime_index = iterator->GetNativeContextIndexOperand(0); + broker()->native_context().get(runtime_index, true); +} + Hints SerializerForBackgroundCompilation::RunChildSerializer( CompilationSubject function, base::Optional new_target, const HintsVector& arguments, bool with_spread) { @@ -700,14 +1445,14 @@ Hints SerializerForBackgroundCompilation::RunChildSerializer( padded.pop_back(); // Remove the spread element. // Fill the rest with empty hints. padded.resize( - function.blueprint().shared->GetBytecodeArray().parameter_count(), + function.blueprint().shared()->GetBytecodeArray().parameter_count(), Hints(zone())); return RunChildSerializer(function, new_target, padded, false); } SerializerForBackgroundCompilation child_serializer( broker(), dependencies(), zone(), function, new_target, arguments, - flags().without(SerializerForBackgroundCompilationFlag::kOsr)); + flags()); return child_serializer.Run(); } @@ -734,7 +1479,7 @@ void SerializerForBackgroundCompilation::ProcessCallOrConstruct( // Incorporate feedback into hints. base::Optional feedback = GetHeapObjectFeedback( - broker(), environment()->function().feedback_vector, slot); + broker(), environment()->function().feedback_vector(), slot); if (feedback.has_value() && feedback->map().is_callable()) { if (new_target.has_value()) { // Construct; feedback is new_target, which often is also the callee. @@ -752,15 +1497,37 @@ void SerializerForBackgroundCompilation::ProcessCallOrConstruct( if (!hint->IsJSFunction()) continue; Handle function = Handle::cast(hint); - if (!function->shared().IsInlineable() || !function->has_feedback_vector()) - continue; + JSFunctionRef(broker(), function).Serialize(); + + Handle shared(function->shared(), broker()->isolate()); + + if (shared->IsApiFunction()) { + ProcessApiCall(shared, arguments); + DCHECK(!shared->IsInlineable()); + } else if (shared->HasBuiltinId()) { + ProcessBuiltinCall(shared, arguments); + DCHECK(!shared->IsInlineable()); + } + + if (!shared->IsInlineable() || !function->has_feedback_vector()) continue; environment()->accumulator_hints().Add(RunChildSerializer( - {function, broker()->isolate()}, new_target, arguments, with_spread)); + CompilationSubject(function, broker()->isolate(), zone()), new_target, + arguments, with_spread)); } for (auto hint : callee.function_blueprints()) { - if (!hint.shared->IsInlineable()) continue; + Handle shared = hint.shared(); + + if (shared->IsApiFunction()) { + ProcessApiCall(shared, arguments); + DCHECK(!shared->IsInlineable()); + } else if (shared->HasBuiltinId()) { + ProcessBuiltinCall(shared, arguments); + DCHECK(!shared->IsInlineable()); + } + + if (!shared->IsInlineable()) continue; environment()->accumulator_hints().Add(RunChildSerializer( CompilationSubject(hint), new_target, arguments, with_spread)); } @@ -788,22 +1555,222 @@ void SerializerForBackgroundCompilation::ProcessCallVarArgs( ProcessCallOrConstruct(callee, base::nullopt, arguments, slot); } -void SerializerForBackgroundCompilation::ProcessJump( - interpreter::BytecodeArrayIterator* iterator) { - int jump_target = iterator->GetJumpTargetOffset(); - int current_offset = iterator->current_offset(); - if (current_offset >= jump_target) return; +void SerializerForBackgroundCompilation::ProcessApiCall( + Handle target, const HintsVector& arguments) { + FunctionTemplateInfoRef target_template_info( + broker(), handle(target->function_data(), broker()->isolate())); + if (!target_template_info.has_call_code()) return; + + target_template_info.SerializeCallCode(); + + SharedFunctionInfoRef target_ref(broker(), target); + target_ref.SerializeFunctionTemplateInfo(); + + if (target_template_info.accept_any_receiver() && + target_template_info.is_signature_undefined()) + return; - stashed_environments_[jump_target] = new (zone()) Environment(*environment()); + CHECK_GE(arguments.size(), 1); + Hints const& receiver_hints = arguments[0]; + for (auto hint : receiver_hints.constants()) { + if (hint->IsUndefined()) { + // The receiver is the global proxy. + Handle global_proxy = + broker()->native_context().global_proxy_object().object(); + ProcessReceiverMapForApiCall( + target_template_info, + handle(global_proxy->map(), broker()->isolate())); + continue; + } + + if (!hint->IsJSReceiver()) continue; + Handle receiver(Handle::cast(hint)); + + ProcessReceiverMapForApiCall(target_template_info, + handle(receiver->map(), broker()->isolate())); + } + + for (auto receiver_map : receiver_hints.maps()) { + ProcessReceiverMapForApiCall(target_template_info, receiver_map); + } } -void SerializerForBackgroundCompilation::MergeAfterJump( +void SerializerForBackgroundCompilation::ProcessReceiverMapForApiCall( + FunctionTemplateInfoRef& target, Handle receiver) { + if (receiver->is_access_check_needed()) { + return; + } + + MapRef receiver_map(broker(), receiver); + TRACE_BROKER(broker(), "Serializing holder for target:" << target); + + target.LookupHolderOfExpectedType(receiver_map, true); +} + +void SerializerForBackgroundCompilation::ProcessBuiltinCall( + Handle target, const HintsVector& arguments) { + DCHECK(target->HasBuiltinId()); + const int builtin_id = target->builtin_id(); + const char* name = Builtins::name(builtin_id); + TRACE_BROKER(broker(), "Serializing for call to builtin " << name); + switch (builtin_id) { + case Builtins::kPromisePrototypeCatch: { + // For JSCallReducer::ReducePromisePrototypeCatch. + CHECK_GE(arguments.size(), 1); + ProcessMapHintsForPromises(arguments[0]); + break; + } + case Builtins::kPromisePrototypeFinally: { + // For JSCallReducer::ReducePromisePrototypeFinally. + CHECK_GE(arguments.size(), 1); + ProcessMapHintsForPromises(arguments[0]); + break; + } + case Builtins::kPromisePrototypeThen: { + // For JSCallReducer::ReducePromisePrototypeThen. + CHECK_GE(arguments.size(), 1); + ProcessMapHintsForPromises(arguments[0]); + break; + } + case Builtins::kPromiseResolveTrampoline: + // For JSCallReducer::ReducePromiseInternalResolve and + // JSNativeContextSpecialization::ReduceJSResolvePromise. + if (arguments.size() >= 2) { + Hints const& resolution_hints = arguments[1]; + ProcessHintsForPromiseResolve(resolution_hints); + } + break; + case Builtins::kPromiseInternalResolve: + // For JSCallReducer::ReducePromiseInternalResolve and + // JSNativeContextSpecialization::ReduceJSResolvePromise. + if (arguments.size() >= 3) { + Hints const& resolution_hints = arguments[2]; + ProcessHintsForPromiseResolve(resolution_hints); + } + break; + case Builtins::kRegExpPrototypeTest: { + // For JSCallReducer::ReduceRegExpPrototypeTest. + if (arguments.size() >= 1) { + Hints const& regexp_hints = arguments[0]; + ProcessHintsForRegExpTest(regexp_hints); + } + break; + } + case Builtins::kFunctionPrototypeCall: + if (arguments.size() >= 1) { + Hints const& target_hints = arguments[0]; + ProcessHintsForFunctionCall(target_hints); + } + break; + default: + break; + } +} + +void SerializerForBackgroundCompilation::ProcessHintsForPromiseResolve( + Hints const& resolution_hints) { + auto processMap = [&](Handle map) { + broker()->CreateAccessInfoForLoadingThen(MapRef(broker(), map), + dependencies()); + }; + + for (auto hint : resolution_hints.constants()) { + if (!hint->IsJSReceiver()) continue; + Handle receiver(Handle::cast(hint)); + processMap(handle(receiver->map(), broker()->isolate())); + } + for (auto map_hint : resolution_hints.maps()) { + processMap(map_hint); + } +} + +void SerializerForBackgroundCompilation::ProcessMapHintsForPromises( + Hints const& receiver_hints) { + // We need to serialize the prototypes on each receiver map. + for (auto constant : receiver_hints.constants()) { + if (!constant->IsJSPromise()) continue; + Handle map(Handle::cast(constant)->map(), + broker()->isolate()); + MapRef(broker(), map).SerializePrototype(); + } + for (auto map : receiver_hints.maps()) { + if (!map->IsJSPromiseMap()) continue; + MapRef(broker(), map).SerializePrototype(); + } +} + +PropertyAccessInfo SerializerForBackgroundCompilation::ProcessMapForRegExpTest( + MapRef map) { + PropertyAccessInfo ai_exec = + broker()->CreateAccessInfoForLoadingExec(map, dependencies()); + + Handle holder; + if (ai_exec.IsDataConstant() && ai_exec.holder().ToHandle(&holder)) { + // The property is on the prototype chain. + JSObjectRef holder_ref(broker(), holder); + holder_ref.GetOwnProperty(ai_exec.field_representation(), + ai_exec.field_index(), true); + } + return ai_exec; +} + +void SerializerForBackgroundCompilation::ProcessHintsForRegExpTest( + Hints const& regexp_hints) { + for (auto hint : regexp_hints.constants()) { + if (!hint->IsJSRegExp()) continue; + Handle regexp(Handle::cast(hint)); + Handle regexp_map(regexp->map(), broker()->isolate()); + PropertyAccessInfo ai_exec = + ProcessMapForRegExpTest(MapRef(broker(), regexp_map)); + Handle holder; + if (ai_exec.IsDataConstant() && !ai_exec.holder().ToHandle(&holder)) { + // The property is on the object itself. + JSObjectRef holder_ref(broker(), regexp); + holder_ref.GetOwnProperty(ai_exec.field_representation(), + ai_exec.field_index(), true); + } + } + + for (auto map : regexp_hints.maps()) { + if (!map->IsJSRegExpMap()) continue; + ProcessMapForRegExpTest(MapRef(broker(), map)); + } +} + +void SerializerForBackgroundCompilation::ProcessHintsForFunctionCall( + Hints const& target_hints) { + for (auto constant : target_hints.constants()) { + if (!constant->IsJSFunction()) continue; + JSFunctionRef func(broker(), constant); + func.Serialize(); + } +} + +void SerializerForBackgroundCompilation::ContributeToJumpTargetEnvironment( + int target_offset) { + auto it = jump_target_environments_.find(target_offset); + if (it == jump_target_environments_.end()) { + jump_target_environments_[target_offset] = + new (zone()) Environment(*environment()); + } else { + it->second->Merge(environment()); + } +} + +void SerializerForBackgroundCompilation::IncorporateJumpTargetEnvironment( + int target_offset) { + auto it = jump_target_environments_.find(target_offset); + if (it != jump_target_environments_.end()) { + environment()->Merge(it->second); + jump_target_environments_.erase(it); + } +} + +void SerializerForBackgroundCompilation::ProcessJump( interpreter::BytecodeArrayIterator* iterator) { - int current_offset = iterator->current_offset(); - auto stash = stashed_environments_.find(current_offset); - if (stash != stashed_environments_.end()) { - environment()->Merge(stash->second); - stashed_environments_.erase(stash); + int jump_target = iterator->GetJumpTargetOffset(); + if (iterator->current_offset() < jump_target) { + ContributeToJumpTargetEnvironment(jump_target); } } @@ -813,10 +1780,25 @@ void SerializerForBackgroundCompilation::VisitReturn( environment()->ClearEphemeralHints(); } +void SerializerForBackgroundCompilation::VisitSwitchOnSmiNoFeedback( + interpreter::BytecodeArrayIterator* iterator) { + interpreter::JumpTableTargetOffsets targets = + iterator->GetJumpTableTargetOffsets(); + for (const auto& target : targets) { + ContributeToJumpTargetEnvironment(target.target_offset); + } +} + +void SerializerForBackgroundCompilation::VisitSwitchOnGeneratorState( + interpreter::BytecodeArrayIterator* iterator) { + for (const auto& target : GetBytecodeAnalysis(false).resume_jump_targets()) { + ContributeToJumpTargetEnvironment(target.target_offset()); + } +} + void SerializerForBackgroundCompilation::Environment::ExportRegisterHints( interpreter::Register first, size_t count, HintsVector& dst) { - dst.resize(dst.size() + count, Hints(zone())); - int reg_base = first.index(); + const int reg_base = first.index(); for (int i = 0; i < static_cast(count); ++i) { dst.push_back(register_hints(interpreter::Register(reg_base + i))); } @@ -856,8 +1838,8 @@ GlobalAccessFeedback const* SerializerForBackgroundCompilation::ProcessFeedbackForGlobalAccess( FeedbackSlot slot) { if (slot.IsInvalid()) return nullptr; - if (environment()->function().feedback_vector.is_null()) return nullptr; - FeedbackSource source(environment()->function().feedback_vector, slot); + if (environment()->function().feedback_vector().is_null()) return nullptr; + FeedbackSource source(environment()->function().feedback_vector(), slot); if (broker()->HasFeedback(source)) { return broker()->GetGlobalAccessFeedback(source); @@ -889,14 +1871,31 @@ void SerializerForBackgroundCompilation::VisitLdaGlobalInsideTypeof( VisitLdaGlobal(iterator); } -void SerializerForBackgroundCompilation::VisitLdaLookupGlobalSlot( +void SerializerForBackgroundCompilation::ProcessCheckContextExtensions( + int depth) { + // for BytecodeGraphBuilder::CheckContextExtensions. + Hints& context_hints = environment()->current_context_hints(); + for (int i = 0; i < depth; i++) { + ProcessContextAccess(context_hints, Context::EXTENSION_INDEX, i, + kSerializeSlot); + } +} + +void SerializerForBackgroundCompilation::ProcessLdaLookupGlobalSlot( BytecodeArrayIterator* iterator) { + ProcessCheckContextExtensions(iterator->GetUnsignedImmediateOperand(2)); + // TODO(neis): BytecodeGraphBilder may insert a JSLoadGlobal. VisitLdaGlobal(iterator); } +void SerializerForBackgroundCompilation::VisitLdaLookupGlobalSlot( + BytecodeArrayIterator* iterator) { + ProcessLdaLookupGlobalSlot(iterator); +} + void SerializerForBackgroundCompilation::VisitLdaLookupGlobalSlotInsideTypeof( BytecodeArrayIterator* iterator) { - VisitLdaGlobal(iterator); + ProcessLdaLookupGlobalSlot(iterator); } void SerializerForBackgroundCompilation::VisitStaGlobal( @@ -905,6 +1904,26 @@ void SerializerForBackgroundCompilation::VisitStaGlobal( ProcessFeedbackForGlobalAccess(slot); } +void SerializerForBackgroundCompilation::ProcessLdaLookupContextSlot( + BytecodeArrayIterator* iterator) { + const int slot_index = iterator->GetIndexOperand(1); + const int depth = iterator->GetUnsignedImmediateOperand(2); + ProcessCheckContextExtensions(depth); + Hints& context_hints = environment()->current_context_hints(); + environment()->accumulator_hints().Clear(); + ProcessContextAccess(context_hints, slot_index, depth, kIgnoreSlot); +} + +void SerializerForBackgroundCompilation::VisitLdaLookupContextSlot( + BytecodeArrayIterator* iterator) { + ProcessLdaLookupContextSlot(iterator); +} + +void SerializerForBackgroundCompilation::VisitLdaLookupContextSlotInsideTypeof( + BytecodeArrayIterator* iterator) { + ProcessLdaLookupContextSlot(iterator); +} + namespace { template MapHandles GetRelevantReceiverMaps(Isolate* isolate, MapContainer const& maps) { @@ -922,9 +1941,10 @@ MapHandles GetRelevantReceiverMaps(Isolate* isolate, MapContainer const& maps) { ElementAccessFeedback const* SerializerForBackgroundCompilation::ProcessFeedbackMapsForElementAccess( - const MapHandles& maps, AccessMode mode) { + const MapHandles& maps, AccessMode mode, + KeyedAccessMode const& keyed_mode) { ElementAccessFeedback const* result = - broker()->ProcessFeedbackMapsForElementAccess(maps); + broker()->ProcessFeedbackMapsForElementAccess(maps, keyed_mode); for (ElementAccessFeedback::MapIterator it = result->all_maps(broker()); !it.done(); it.advance()) { switch (mode) { @@ -952,9 +1972,34 @@ SerializerForBackgroundCompilation::ProcessFeedbackMapsForNamedAccess( ProcessMapForNamedPropertyAccess(map_ref, name); AccessInfoFactory access_info_factory(broker(), dependencies(), broker()->zone()); - access_infos.push_back(access_info_factory.ComputePropertyAccessInfo( + PropertyAccessInfo info(access_info_factory.ComputePropertyAccessInfo( map, name.object(), mode)); + access_infos.push_back(info); + + // TODO(turbofan): We want to take receiver hints into account as well, + // not only the feedback maps. + // For JSNativeContextSpecialization::InlinePropertySetterCall + // and InlinePropertyGetterCall. + if (info.IsAccessorConstant() && !info.constant().is_null()) { + if (info.constant()->IsJSFunction()) { + // For JSCallReducer::ReduceCallApiFunction. + Handle sfi( + handle(Handle::cast(info.constant())->shared(), + broker()->isolate())); + if (sfi->IsApiFunction()) { + FunctionTemplateInfoRef fti_ref( + broker(), handle(sfi->get_api_func_data(), broker()->isolate())); + if (fti_ref.has_call_code()) fti_ref.SerializeCallCode(); + ProcessReceiverMapForApiCall(fti_ref, map); + } + } else { + FunctionTemplateInfoRef fti_ref( + broker(), Handle::cast(info.constant())); + if (fti_ref.has_call_code()) fti_ref.SerializeCallCode(); + } + } } + DCHECK(!access_infos.empty()); return new (broker()->zone()) NamedAccessFeedback(name, access_infos); } @@ -962,9 +2007,9 @@ SerializerForBackgroundCompilation::ProcessFeedbackMapsForNamedAccess( void SerializerForBackgroundCompilation::ProcessFeedbackForPropertyAccess( FeedbackSlot slot, AccessMode mode, base::Optional static_name) { if (slot.IsInvalid()) return; - if (environment()->function().feedback_vector.is_null()) return; + if (environment()->function().feedback_vector().is_null()) return; - FeedbackNexus nexus(environment()->function().feedback_vector, slot); + FeedbackNexus nexus(environment()->function().feedback_vector(), slot); FeedbackSource source(nexus); if (broker()->HasFeedback(source)) return; @@ -992,8 +2037,10 @@ void SerializerForBackgroundCompilation::ProcessFeedbackForPropertyAccess( static_name.has_value() ? static_name : broker()->GetNameFeedback(nexus); if (name.has_value()) { processed = ProcessFeedbackMapsForNamedAccess(maps, mode, *name); - } else if (nexus.GetKeyType() == ELEMENT && nexus.ic_state() != MEGAMORPHIC) { - processed = ProcessFeedbackMapsForElementAccess(maps, mode); + } else if (nexus.GetKeyType() == ELEMENT) { + DCHECK_NE(nexus.ic_state(), MEGAMORPHIC); + processed = ProcessFeedbackMapsForElementAccess( + maps, mode, KeyedAccessMode::FromNexus(nexus)); } broker()->SetFeedback(source, processed); } @@ -1087,8 +2134,8 @@ void SerializerForBackgroundCompilation::ProcessNamedPropertyAccess( BytecodeArrayIterator* iterator, AccessMode mode) { Hints const& receiver = environment()->register_hints(iterator->GetRegisterOperand(0)); - Handle name(Name::cast(iterator->GetConstantForIndexOperand(1)), - broker()->isolate()); + Handle name = Handle::cast( + iterator->GetConstantForIndexOperand(1, broker()->isolate())); FeedbackSlot slot = iterator->GetSlotOperand(2); ProcessNamedPropertyAccess(receiver, NameRef(broker(), name), slot, mode); } @@ -1176,6 +2223,31 @@ UNCONDITIONAL_JUMPS_LIST(DEFINE_UNCONDITIONAL_JUMP) IGNORED_BYTECODE_LIST(DEFINE_IGNORE) #undef DEFINE_IGNORE +#define DEFINE_UNREACHABLE(name, ...) \ + void SerializerForBackgroundCompilation::Visit##name( \ + BytecodeArrayIterator* iterator) { \ + UNREACHABLE(); \ + } +UNREACHABLE_BYTECODE_LIST(DEFINE_UNREACHABLE) +#undef DEFINE_UNREACHABLE + +#define DEFINE_KILL(name, ...) \ + void SerializerForBackgroundCompilation::Visit##name( \ + BytecodeArrayIterator* iterator) { \ + environment()->Kill(); \ + } +KILL_ENVIRONMENT_LIST(DEFINE_KILL) +#undef DEFINE_KILL + +#undef CLEAR_ENVIRONMENT_LIST +#undef KILL_ENVIRONMENT_LIST +#undef CLEAR_ACCUMULATOR_LIST +#undef UNCONDITIONAL_JUMPS_LIST +#undef CONDITIONAL_JUMPS_LIST +#undef IGNORED_BYTECODE_LIST +#undef UNREACHABLE_BYTECODE_LIST +#undef SUPPORTED_BYTECODE_LIST + } // namespace compiler } // namespace internal } // namespace v8 diff --git a/deps/v8/src/compiler/serializer-for-background-compilation.h b/deps/v8/src/compiler/serializer-for-background-compilation.h index 0ee37ef280074e..881ed61a555231 100644 --- a/deps/v8/src/compiler/serializer-for-background-compilation.h +++ b/deps/v8/src/compiler/serializer-for-background-compilation.h @@ -5,346 +5,31 @@ #ifndef V8_COMPILER_SERIALIZER_FOR_BACKGROUND_COMPILATION_H_ #define V8_COMPILER_SERIALIZER_FOR_BACKGROUND_COMPILATION_H_ -#include "src/base/optional.h" -#include "src/compiler/access-info.h" -#include "src/utils/utils.h" #include "src/handles/handles.h" -#include "src/handles/maybe-handles.h" -#include "src/zone/zone-containers.h" namespace v8 { namespace internal { -namespace interpreter { -class BytecodeArrayIterator; -} // namespace interpreter - -class BytecodeArray; -class FeedbackVector; -class LookupIterator; -class NativeContext; -class ScriptContextTable; -class SharedFunctionInfo; -class SourcePositionTableIterator; +class BailoutId; class Zone; namespace compiler { -#define CLEAR_ENVIRONMENT_LIST(V) \ - V(Abort) \ - V(CallRuntime) \ - V(CallRuntimeForPair) \ - V(CreateBlockContext) \ - V(CreateEvalContext) \ - V(CreateFunctionContext) \ - V(Debugger) \ - V(PopContext) \ - V(PushContext) \ - V(ResumeGenerator) \ - V(ReThrow) \ - V(StaContextSlot) \ - V(StaCurrentContextSlot) \ - V(SuspendGenerator) \ - V(SwitchOnGeneratorState) \ - V(Throw) - -#define CLEAR_ACCUMULATOR_LIST(V) \ - V(Add) \ - V(AddSmi) \ - V(BitwiseAnd) \ - V(BitwiseAndSmi) \ - V(BitwiseNot) \ - V(BitwiseOr) \ - V(BitwiseOrSmi) \ - V(BitwiseXor) \ - V(BitwiseXorSmi) \ - V(CloneObject) \ - V(CreateArrayFromIterable) \ - V(CreateArrayLiteral) \ - V(CreateEmptyArrayLiteral) \ - V(CreateEmptyObjectLiteral) \ - V(CreateMappedArguments) \ - V(CreateObjectLiteral) \ - V(CreateRestParameter) \ - V(CreateUnmappedArguments) \ - V(Dec) \ - V(DeletePropertySloppy) \ - V(DeletePropertyStrict) \ - V(Div) \ - V(DivSmi) \ - V(Exp) \ - V(ExpSmi) \ - V(ForInContinue) \ - V(ForInEnumerate) \ - V(ForInNext) \ - V(ForInStep) \ - V(GetTemplateObject) \ - V(Inc) \ - V(LdaContextSlot) \ - V(LdaCurrentContextSlot) \ - V(LdaImmutableContextSlot) \ - V(LdaImmutableCurrentContextSlot) \ - V(LogicalNot) \ - V(Mod) \ - V(ModSmi) \ - V(Mul) \ - V(MulSmi) \ - V(Negate) \ - V(SetPendingMessage) \ - V(ShiftLeft) \ - V(ShiftLeftSmi) \ - V(ShiftRight) \ - V(ShiftRightLogical) \ - V(ShiftRightLogicalSmi) \ - V(ShiftRightSmi) \ - V(Sub) \ - V(SubSmi) \ - V(TestEqual) \ - V(TestEqualStrict) \ - V(TestGreaterThan) \ - V(TestGreaterThanOrEqual) \ - V(TestInstanceOf) \ - V(TestLessThan) \ - V(TestLessThanOrEqual) \ - V(TestNull) \ - V(TestReferenceEqual) \ - V(TestTypeOf) \ - V(TestUndefined) \ - V(TestUndetectable) \ - V(ToBooleanLogicalNot) \ - V(ToName) \ - V(ToNumber) \ - V(ToNumeric) \ - V(ToString) \ - V(TypeOf) - -#define UNCONDITIONAL_JUMPS_LIST(V) \ - V(Jump) \ - V(JumpConstant) \ - V(JumpLoop) - -#define CONDITIONAL_JUMPS_LIST(V) \ - V(JumpIfFalse) \ - V(JumpIfFalseConstant) \ - V(JumpIfJSReceiver) \ - V(JumpIfJSReceiverConstant) \ - V(JumpIfNotNull) \ - V(JumpIfNotNullConstant) \ - V(JumpIfNotUndefined) \ - V(JumpIfNotUndefinedConstant) \ - V(JumpIfNull) \ - V(JumpIfNullConstant) \ - V(JumpIfToBooleanFalse) \ - V(JumpIfToBooleanFalseConstant) \ - V(JumpIfToBooleanTrue) \ - V(JumpIfToBooleanTrueConstant) \ - V(JumpIfTrue) \ - V(JumpIfTrueConstant) \ - V(JumpIfUndefined) \ - V(JumpIfUndefinedConstant) - -#define IGNORED_BYTECODE_LIST(V) \ - V(CallNoFeedback) \ - V(LdaNamedPropertyNoFeedback) \ - V(StackCheck) \ - V(StaNamedPropertyNoFeedback) \ - V(ThrowReferenceErrorIfHole) \ - V(ThrowSuperAlreadyCalledIfNotHole) \ - V(ThrowSuperNotCalledIfHole) - -#define SUPPORTED_BYTECODE_LIST(V) \ - V(CallAnyReceiver) \ - V(CallProperty) \ - V(CallProperty0) \ - V(CallProperty1) \ - V(CallProperty2) \ - V(CallUndefinedReceiver) \ - V(CallUndefinedReceiver0) \ - V(CallUndefinedReceiver1) \ - V(CallUndefinedReceiver2) \ - V(CallWithSpread) \ - V(Construct) \ - V(ConstructWithSpread) \ - V(CreateClosure) \ - V(ExtraWide) \ - V(GetSuperConstructor) \ - V(Illegal) \ - V(LdaConstant) \ - V(LdaFalse) \ - V(LdaGlobal) \ - V(LdaGlobalInsideTypeof) \ - V(LdaKeyedProperty) \ - V(LdaLookupGlobalSlot) \ - V(LdaLookupGlobalSlotInsideTypeof) \ - V(LdaNamedProperty) \ - V(LdaNull) \ - V(Ldar) \ - V(LdaSmi) \ - V(LdaTheHole) \ - V(LdaTrue) \ - V(LdaUndefined) \ - V(LdaZero) \ - V(Mov) \ - V(Return) \ - V(StaGlobal) \ - V(StaInArrayLiteral) \ - V(StaKeyedProperty) \ - V(StaNamedOwnProperty) \ - V(StaNamedProperty) \ - V(Star) \ - V(TestIn) \ - V(Wide) \ - CLEAR_ENVIRONMENT_LIST(V) \ - CLEAR_ACCUMULATOR_LIST(V) \ - CONDITIONAL_JUMPS_LIST(V) \ - UNCONDITIONAL_JUMPS_LIST(V) \ - IGNORED_BYTECODE_LIST(V) - +class CompilationDependencies; class JSHeapBroker; -template -struct HandleComparator { - bool operator()(const Handle& lhs, const Handle& rhs) const { - return lhs.address() < rhs.address(); - } -}; - -struct FunctionBlueprint { - Handle shared; - Handle feedback_vector; - - bool operator<(const FunctionBlueprint& other) const { - // A feedback vector is never used for more than one SFI, so it can - // be used for strict ordering of blueprints. - DCHECK_IMPLIES(feedback_vector.equals(other.feedback_vector), - shared.equals(other.shared)); - return HandleComparator()(feedback_vector, - other.feedback_vector); - } -}; - -class CompilationSubject { - public: - explicit CompilationSubject(FunctionBlueprint blueprint) - : blueprint_(blueprint) {} - CompilationSubject(Handle closure, Isolate* isolate); - - FunctionBlueprint blueprint() const { return blueprint_; } - MaybeHandle closure() const { return closure_; } - - private: - FunctionBlueprint blueprint_; - MaybeHandle closure_; -}; - -using ConstantsSet = ZoneSet, HandleComparator>; -using MapsSet = ZoneSet, HandleComparator>; -using BlueprintsSet = ZoneSet; - -class Hints { - public: - explicit Hints(Zone* zone); - - const ConstantsSet& constants() const; - const MapsSet& maps() const; - const BlueprintsSet& function_blueprints() const; - - void AddConstant(Handle constant); - void AddMap(Handle map); - void AddFunctionBlueprint(FunctionBlueprint function_blueprint); - - void Add(const Hints& other); - - void Clear(); - bool IsEmpty() const; - - private: - ConstantsSet constants_; - MapsSet maps_; - BlueprintsSet function_blueprints_; -}; -using HintsVector = ZoneVector; - enum class SerializerForBackgroundCompilationFlag : uint8_t { kBailoutOnUninitialized = 1 << 0, kCollectSourcePositions = 1 << 1, - kOsr = 1 << 2, + kAnalyzeEnvironmentLiveness = 1 << 2, }; using SerializerForBackgroundCompilationFlags = base::Flags; -// The SerializerForBackgroundCompilation makes sure that the relevant function -// data such as bytecode, SharedFunctionInfo and FeedbackVector, used by later -// optimizations in the compiler, is copied to the heap broker. -class SerializerForBackgroundCompilation { - public: - SerializerForBackgroundCompilation( - JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone, - Handle closure, - SerializerForBackgroundCompilationFlags flags); - Hints Run(); // NOTE: Returns empty for an already-serialized function. - - class Environment; - - private: - SerializerForBackgroundCompilation( - JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone, - CompilationSubject function, base::Optional new_target, - const HintsVector& arguments, - SerializerForBackgroundCompilationFlags flags); - - bool BailoutOnUninitialized(FeedbackSlot slot); - - void TraverseBytecode(); - -#define DECLARE_VISIT_BYTECODE(name, ...) \ - void Visit##name(interpreter::BytecodeArrayIterator* iterator); - SUPPORTED_BYTECODE_LIST(DECLARE_VISIT_BYTECODE) -#undef DECLARE_VISIT_BYTECODE - - void ProcessCallOrConstruct(Hints callee, base::Optional new_target, - const HintsVector& arguments, FeedbackSlot slot, - bool with_spread = false); - void ProcessCallVarArgs(interpreter::BytecodeArrayIterator* iterator, - ConvertReceiverMode receiver_mode, - bool with_spread = false); - - void ProcessJump(interpreter::BytecodeArrayIterator* iterator); - void MergeAfterJump(interpreter::BytecodeArrayIterator* iterator); - - void ProcessKeyedPropertyAccess(Hints const& receiver, Hints const& key, - FeedbackSlot slot, AccessMode mode); - void ProcessNamedPropertyAccess(interpreter::BytecodeArrayIterator* iterator, - AccessMode mode); - void ProcessNamedPropertyAccess(Hints const& receiver, NameRef const& name, - FeedbackSlot slot, AccessMode mode); - - GlobalAccessFeedback const* ProcessFeedbackForGlobalAccess(FeedbackSlot slot); - NamedAccessFeedback const* ProcessFeedbackMapsForNamedAccess( - const MapHandles& maps, AccessMode mode, NameRef const& name); - ElementAccessFeedback const* ProcessFeedbackMapsForElementAccess( - const MapHandles& maps, AccessMode mode); - void ProcessFeedbackForPropertyAccess(FeedbackSlot slot, AccessMode mode, - base::Optional static_name); - void ProcessMapForNamedPropertyAccess(MapRef const& map, NameRef const& name); - - Hints RunChildSerializer(CompilationSubject function, - base::Optional new_target, - const HintsVector& arguments, bool with_spread); - - JSHeapBroker* broker() const { return broker_; } - CompilationDependencies* dependencies() const { return dependencies_; } - Zone* zone() const { return zone_; } - Environment* environment() const { return environment_; } - SerializerForBackgroundCompilationFlags flags() const { return flags_; } - - JSHeapBroker* const broker_; - CompilationDependencies* const dependencies_; - Zone* const zone_; - Environment* const environment_; - ZoneUnorderedMap stashed_environments_; - SerializerForBackgroundCompilationFlags const flags_; -}; +void RunSerializerForBackgroundCompilation( + JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone, + Handle closure, SerializerForBackgroundCompilationFlags flags, + BailoutId osr_offset); } // namespace compiler } // namespace internal diff --git a/deps/v8/src/compiler/simd-scalar-lowering.cc b/deps/v8/src/compiler/simd-scalar-lowering.cc index cab398c1606df3..6deba2b00291c1 100644 --- a/deps/v8/src/compiler/simd-scalar-lowering.cc +++ b/deps/v8/src/compiler/simd-scalar-lowering.cc @@ -16,6 +16,7 @@ namespace internal { namespace compiler { namespace { +static const int kNumLanes64 = 2; static const int kNumLanes32 = 4; static const int kNumLanes16 = 8; static const int kNumLanes8 = 16; @@ -76,6 +77,8 @@ void SimdScalarLowering::LowerGraph() { } } +#define FOREACH_INT64X2_OPCODE(V) V(I64x2Splat) + #define FOREACH_INT32X4_OPCODE(V) \ V(I32x4Splat) \ V(I32x4ExtractLane) \ @@ -119,6 +122,8 @@ void SimdScalarLowering::LowerGraph() { V(S1x16AnyTrue) \ V(S1x16AllTrue) +#define FOREACH_FLOAT64X2_OPCODE(V) V(F64x2Splat) + #define FOREACH_FLOAT32X4_OPCODE(V) \ V(F32x4Splat) \ V(F32x4ExtractLane) \ @@ -208,8 +213,12 @@ void SimdScalarLowering::LowerGraph() { MachineType SimdScalarLowering::MachineTypeFrom(SimdType simdType) { switch (simdType) { + case SimdType::kFloat64x2: + return MachineType::Float64(); case SimdType::kFloat32x4: return MachineType::Float32(); + case SimdType::kInt64x2: + return MachineType::Int64(); case SimdType::kInt32x4: return MachineType::Int32(); case SimdType::kInt16x8: @@ -223,6 +232,14 @@ MachineType SimdScalarLowering::MachineTypeFrom(SimdType simdType) { void SimdScalarLowering::SetLoweredType(Node* node, Node* output) { switch (node->opcode()) { #define CASE_STMT(name) case IrOpcode::k##name: + FOREACH_FLOAT64X2_OPCODE(CASE_STMT) { + replacements_[node->id()].type = SimdType::kFloat64x2; + break; + } + FOREACH_INT64X2_OPCODE(CASE_STMT) { + replacements_[node->id()].type = SimdType::kInt64x2; + break; + } FOREACH_INT32X4_OPCODE(CASE_STMT) case IrOpcode::kReturn: case IrOpcode::kParameter: @@ -326,7 +343,9 @@ static int GetReturnCountAfterLoweringSimd128( int SimdScalarLowering::NumLanes(SimdType type) { int num_lanes = 0; - if (type == SimdType::kFloat32x4 || type == SimdType::kInt32x4) { + if (type == SimdType::kFloat64x2 || type == SimdType::kInt64x2) { + num_lanes = kNumLanes64; + } else if (type == SimdType::kFloat32x4 || type == SimdType::kInt32x4) { num_lanes = kNumLanes32; } else if (type == SimdType::kInt16x8) { num_lanes = kNumLanes16; @@ -1198,7 +1217,7 @@ void SimdScalarLowering::LowerNode(Node* node) { } F32X4_UNOP_CASE(Abs) F32X4_UNOP_CASE(Neg) -#undef F32x4_UNOP_CASE +#undef F32X4_UNOP_CASE case IrOpcode::kF32x4RecipApprox: case IrOpcode::kF32x4RecipSqrtApprox: { DCHECK_EQ(1, node->InputCount()); @@ -1223,8 +1242,10 @@ void SimdScalarLowering::LowerNode(Node* node) { LowerUnaryOp(node, SimdType::kInt32x4, machine()->RoundUint32ToFloat32()); break; } - case IrOpcode::kI32x4Splat: + case IrOpcode::kF64x2Splat: case IrOpcode::kF32x4Splat: + case IrOpcode::kI64x2Splat: + case IrOpcode::kI32x4Splat: case IrOpcode::kI16x8Splat: case IrOpcode::kI8x16Splat: { Node** rep_node = zone()->NewArray(num_lanes); @@ -1347,7 +1368,7 @@ void SimdScalarLowering::LowerNode(Node* node) { } case IrOpcode::kS8x16Shuffle: { DCHECK_EQ(2, node->InputCount()); - const uint8_t* shuffle = OpParameter(node->op()); + const uint8_t* shuffle = S8x16ShuffleOf(node->op()); Node** rep_left = GetReplacementsWithType(node->InputAt(0), rep_type); Node** rep_right = GetReplacementsWithType(node->InputAt(1), rep_type); Node** rep_node = zone()->NewArray(16); diff --git a/deps/v8/src/compiler/simd-scalar-lowering.h b/deps/v8/src/compiler/simd-scalar-lowering.h index 01ea195bdc8bf7..76723fcc7726bc 100644 --- a/deps/v8/src/compiler/simd-scalar-lowering.h +++ b/deps/v8/src/compiler/simd-scalar-lowering.h @@ -32,7 +32,14 @@ class SimdScalarLowering { private: enum class State : uint8_t { kUnvisited, kOnStack, kVisited }; - enum class SimdType : uint8_t { kFloat32x4, kInt32x4, kInt16x8, kInt8x16 }; + enum class SimdType : uint8_t { + kFloat64x2, + kFloat32x4, + kInt64x2, + kInt32x4, + kInt16x8, + kInt8x16 + }; #if defined(V8_TARGET_BIG_ENDIAN) static constexpr int kLaneOffsets[16] = {15, 14, 13, 12, 11, 10, 9, 8, diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc index 8bc0e7af7b16b8..b028a76bb0d8ea 100644 --- a/deps/v8/src/compiler/simplified-lowering.cc +++ b/deps/v8/src/compiler/simplified-lowering.cc @@ -8,6 +8,7 @@ #include "src/base/bits.h" #include "src/codegen/code-factory.h" +#include "src/codegen/tick-counter.h" #include "src/compiler/access-builder.h" #include "src/compiler/common-operator.h" #include "src/compiler/compiler-source-position-table.h" @@ -22,8 +23,8 @@ #include "src/compiler/simplified-operator.h" #include "src/compiler/type-cache.h" #include "src/numbers/conversions-inl.h" -#include "src/utils/address-map.h" #include "src/objects/objects.h" +#include "src/utils/address-map.h" namespace v8 { namespace internal { @@ -279,7 +280,8 @@ class RepresentationSelector { RepresentationSelector(JSGraph* jsgraph, JSHeapBroker* broker, Zone* zone, RepresentationChanger* changer, SourcePositionTable* source_positions, - NodeOriginTable* node_origins) + NodeOriginTable* node_origins, + TickCounter* tick_counter) : jsgraph_(jsgraph), zone_(zone), count_(jsgraph->graph()->NodeCount()), @@ -296,7 +298,8 @@ class RepresentationSelector { source_positions_(source_positions), node_origins_(node_origins), type_cache_(TypeCache::Get()), - op_typer_(broker, graph_zone()) { + op_typer_(broker, graph_zone()), + tick_counter_(tick_counter) { } // Forward propagation of types from type feedback. @@ -444,6 +447,7 @@ class RepresentationSelector { break; \ } SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_CASE) + SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(DECLARE_CASE) #undef DECLARE_CASE #define DECLARE_CASE(Name) \ @@ -747,21 +751,32 @@ class RepresentationSelector { !GetUpperBound(node->InputAt(1)).Maybe(type); } + void ChangeToDeadValue(Node* node, Node* effect, Node* control) { + DCHECK(TypeOf(node).IsNone()); + // If the node is unreachable, insert an Unreachable node and mark the + // value dead. + // TODO(jarin,tebbi) Find a way to unify/merge this insertion with + // InsertUnreachableIfNecessary. + Node* unreachable = effect = + graph()->NewNode(jsgraph_->common()->Unreachable(), effect, control); + const Operator* dead_value = + jsgraph_->common()->DeadValue(GetInfo(node)->representation()); + node->ReplaceInput(0, unreachable); + node->TrimInputCount(dead_value->ValueInputCount()); + ReplaceEffectControlUses(node, effect, control); + NodeProperties::ChangeOp(node, dead_value); + } + void ChangeToPureOp(Node* node, const Operator* new_op) { DCHECK(new_op->HasProperty(Operator::kPure)); + DCHECK_EQ(new_op->ValueInputCount(), node->op()->ValueInputCount()); if (node->op()->EffectInputCount() > 0) { DCHECK_LT(0, node->op()->ControlInputCount()); Node* control = NodeProperties::GetControlInput(node); Node* effect = NodeProperties::GetEffectInput(node); if (TypeOf(node).IsNone()) { - // If the node is unreachable, insert an Unreachable node and mark the - // value dead. - // TODO(jarin,tebbi) Find a way to unify/merge this insertion with - // InsertUnreachableIfNecessary. - Node* unreachable = effect = graph()->NewNode( - jsgraph_->common()->Unreachable(), effect, control); - new_op = jsgraph_->common()->DeadValue(GetInfo(node)->representation()); - node->ReplaceInput(0, unreachable); + ChangeToDeadValue(node, effect, control); + return; } // Rewire the effect and control chains. node->TrimInputCount(new_op->ValueInputCount()); @@ -772,6 +787,30 @@ class RepresentationSelector { NodeProperties::ChangeOp(node, new_op); } + void ChangeUnaryToPureBinaryOp(Node* node, const Operator* new_op, + int new_input_index, Node* new_input) { + DCHECK(new_op->HasProperty(Operator::kPure)); + DCHECK_EQ(new_op->ValueInputCount(), 2); + DCHECK_EQ(node->op()->ValueInputCount(), 1); + DCHECK_LE(0, new_input_index); + DCHECK_LE(new_input_index, 1); + if (node->op()->EffectInputCount() > 0) { + DCHECK_LT(0, node->op()->ControlInputCount()); + Node* control = NodeProperties::GetControlInput(node); + Node* effect = NodeProperties::GetEffectInput(node); + if (TypeOf(node).IsNone()) { + ChangeToDeadValue(node, effect, control); + return; + } + node->TrimInputCount(node->op()->ValueInputCount()); + ReplaceEffectControlUses(node, effect, control); + } else { + DCHECK_EQ(0, node->op()->ControlInputCount()); + } + node->InsertInput(jsgraph_->zone(), new_input_index, new_input); + NodeProperties::ChangeOp(node, new_op); + } + // Converts input {index} of {node} according to given UseInfo {use}, // assuming the type of the input is {input_type}. If {input_type} is null, // it takes the input from the input node {TypeOf(node->InputAt(index))}. @@ -804,6 +843,10 @@ class RepresentationSelector { } void ProcessInput(Node* node, int index, UseInfo use) { + DCHECK_IMPLIES(use.type_check() != TypeCheckKind::kNone, + !node->op()->HasProperty(Operator::kNoDeopt) && + node->op()->EffectInputCount() > 0); + switch (phase_) { case PROPAGATE: EnqueueInput(node, index, use); @@ -958,7 +1001,8 @@ class RepresentationSelector { return MachineRepresentation::kWord32; } else if (type.Is(Type::Boolean())) { return MachineRepresentation::kBit; - } else if (type.Is(Type::NumberOrOddball()) && use.IsUsedAsFloat64()) { + } else if (type.Is(Type::NumberOrOddball()) && + use.TruncatesOddballAndBigIntToNumber()) { return MachineRepresentation::kFloat64; } else if (type.Is(Type::Union(Type::SignedSmall(), Type::NaN(), zone()))) { // TODO(turbofan): For Phis that return either NaN or some Smi, it's @@ -968,6 +1012,8 @@ class RepresentationSelector { return MachineRepresentation::kTagged; } else if (type.Is(Type::Number())) { return MachineRepresentation::kFloat64; + } else if (type.Is(Type::BigInt()) && use.IsUsedAsWord64()) { + return MachineRepresentation::kWord64; } else if (type.Is(Type::ExternalPointer())) { return MachineType::PointerRepresentation(); } @@ -1109,8 +1155,11 @@ class RepresentationSelector { if (IsAnyCompressed(rep)) { return MachineType::AnyCompressed(); } - // Word64 representation is only valid for safe integer values. if (rep == MachineRepresentation::kWord64) { + if (type.Is(Type::BigInt())) { + return MachineType::AnyTagged(); + } + DCHECK(type.Is(TypeCache::Get()->kSafeInteger)); return MachineType(rep, MachineSemantic::kInt64); } @@ -1126,7 +1175,17 @@ class RepresentationSelector { void VisitStateValues(Node* node) { if (propagate()) { for (int i = 0; i < node->InputCount(); i++) { - EnqueueInput(node, i, UseInfo::Any()); + // When lowering 64 bit BigInts to Word64 representation, we have to + // make sure they are rematerialized before deoptimization. By + // propagating a AnyTagged use, the RepresentationChanger is going to + // insert the necessary conversions. + // TODO(nicohartmann): Remove, once the deoptimizer can rematerialize + // truncated BigInts. + if (TypeOf(node->InputAt(i)).Is(Type::BigInt())) { + EnqueueInput(node, i, UseInfo::AnyTagged()); + } else { + EnqueueInput(node, i, UseInfo::Any()); + } } } else if (lower()) { Zone* zone = jsgraph_->zone(); @@ -1135,6 +1194,12 @@ class RepresentationSelector { ZoneVector(node->InputCount(), zone); for (int i = 0; i < node->InputCount(); i++) { Node* input = node->InputAt(i); + // TODO(nicohartmann): Remove, once the deoptimizer can rematerialize + // truncated BigInts. + if (TypeOf(input).Is(Type::BigInt())) { + ProcessInput(node, i, UseInfo::AnyTagged()); + } + (*types)[i] = DeoptMachineTypeOf(GetInfo(input)->representation(), TypeOf(input)); } @@ -1621,6 +1686,8 @@ class RepresentationSelector { // Depending on the operator, propagate new usage info to the inputs. void VisitNode(Node* node, Truncation truncation, SimplifiedLowering* lowering) { + tick_counter_->DoTick(); + // Unconditionally eliminate unused pure nodes (only relevant if there's // a pure operation in between two effectful ones, where the last one // is unused). @@ -1715,13 +1782,15 @@ class RepresentationSelector { case IrOpcode::kJSToNumber: case IrOpcode::kJSToNumberConvertBigInt: case IrOpcode::kJSToNumeric: { + DCHECK(NodeProperties::GetType(node).Is(Type::Union( + Type::BigInt(), Type::NumberOrOddball(), graph()->zone()))); VisitInputs(node); // TODO(bmeurer): Optimize somewhat based on input type? if (truncation.IsUsedAsWord32()) { SetOutput(node, MachineRepresentation::kWord32); if (lower()) lowering->DoJSToNumberOrNumericTruncatesToWord32(node, this); - } else if (truncation.IsUsedAsFloat64()) { + } else if (truncation.TruncatesOddballAndBigIntToNumber()) { SetOutput(node, MachineRepresentation::kFloat64); if (lower()) lowering->DoJSToNumberOrNumericTruncatesToFloat64(node, this); @@ -2461,6 +2530,20 @@ class RepresentationSelector { } return; } + case IrOpcode::kCheckBigInt: { + if (InputIs(node, Type::BigInt())) { + VisitNoop(node, truncation); + } else { + VisitUnop(node, UseInfo::AnyTagged(), + MachineRepresentation::kTaggedPointer); + } + return; + } + case IrOpcode::kBigIntAsUintN: { + ProcessInput(node, 0, UseInfo::TruncatingWord64()); + SetOutput(node, MachineRepresentation::kWord64, Type::BigInt()); + return; + } case IrOpcode::kNumberAcos: case IrOpcode::kNumberAcosh: case IrOpcode::kNumberAsin: @@ -2621,6 +2704,43 @@ class RepresentationSelector { SetOutput(node, MachineRepresentation::kTaggedPointer); return; } + case IrOpcode::kSpeculativeBigIntAdd: { + if (truncation.IsUsedAsWord64()) { + VisitBinop(node, + UseInfo::CheckedBigIntTruncatingWord64(VectorSlotPair{}), + MachineRepresentation::kWord64); + if (lower()) { + ChangeToPureOp(node, lowering->machine()->Int64Add()); + } + } else { + VisitBinop(node, + UseInfo::CheckedBigIntAsTaggedPointer(VectorSlotPair{}), + MachineRepresentation::kTaggedPointer); + if (lower()) { + NodeProperties::ChangeOp(node, lowering->simplified()->BigIntAdd()); + } + } + return; + } + case IrOpcode::kSpeculativeBigIntNegate: { + if (truncation.IsUsedAsWord64()) { + VisitUnop(node, + UseInfo::CheckedBigIntTruncatingWord64(VectorSlotPair{}), + MachineRepresentation::kWord64); + if (lower()) { + ChangeUnaryToPureBinaryOp(node, lowering->machine()->Int64Sub(), 0, + jsgraph_->Int64Constant(0)); + } + } else { + VisitUnop(node, + UseInfo::CheckedBigIntAsTaggedPointer(VectorSlotPair{}), + MachineRepresentation::kTaggedPointer); + if (lower()) { + ChangeToPureOp(node, lowering->simplified()->BigIntNegate()); + } + } + return; + } case IrOpcode::kStringConcat: { // TODO(turbofan): We currently depend on having this first length input // to make sure that the overflow check is properly scheduled before the @@ -2657,6 +2777,10 @@ class RepresentationSelector { MachineRepresentation::kTaggedPointer); return; } + case IrOpcode::kStringFromCodePointAt: { + return VisitBinop(node, UseInfo::AnyTagged(), UseInfo::Word(), + MachineRepresentation::kTaggedPointer); + } case IrOpcode::kStringIndexOf: { ProcessInput(node, 0, UseInfo::AnyTagged()); ProcessInput(node, 1, UseInfo::AnyTagged()); @@ -2983,7 +3107,7 @@ class RepresentationSelector { simplified()->PlainPrimitiveToWord32()); } } - } else if (truncation.IsUsedAsFloat64()) { + } else if (truncation.TruncatesOddballAndBigIntToNumber()) { if (InputIs(node, Type::NumberOrOddball())) { VisitUnop(node, UseInfo::TruncatingFloat64(), MachineRepresentation::kFloat64); @@ -3236,7 +3360,7 @@ class RepresentationSelector { // identifies NaN and undefined, we can just pass along // the {truncation} and completely wipe the {node}. if (truncation.IsUnused()) return VisitUnused(node); - if (truncation.IsUsedAsFloat64()) { + if (truncation.TruncatesOddballAndBigIntToNumber()) { VisitUnop(node, UseInfo::TruncatingFloat64(), MachineRepresentation::kFloat64); if (lower()) DeferReplacement(node, node->InputAt(0)); @@ -3263,7 +3387,7 @@ class RepresentationSelector { MachineRepresentation::kWord32); if (lower()) DeferReplacement(node, node->InputAt(0)); } else if (InputIs(node, Type::NumberOrOddball()) && - truncation.IsUsedAsFloat64()) { + truncation.TruncatesOddballAndBigIntToNumber()) { // Propagate the Float64 truncation. VisitUnop(node, UseInfo::TruncatingFloat64(), MachineRepresentation::kFloat64); @@ -3431,6 +3555,9 @@ class RepresentationSelector { return SetOutput(node, MachineRepresentation::kNone); case IrOpcode::kStaticAssert: return VisitUnop(node, UseInfo::Any(), MachineRepresentation::kTagged); + case IrOpcode::kAssertType: + return VisitUnop(node, UseInfo::AnyTagged(), + MachineRepresentation::kTagged); default: FATAL( "Representation inference: unsupported opcode %i (%s), node #%i\n.", @@ -3534,6 +3661,7 @@ class RepresentationSelector { NodeOriginTable* node_origins_; TypeCache const* type_cache_; OperationTyper op_typer_; // helper for the feedback typer + TickCounter* const tick_counter_; NodeInfo* GetInfo(Node* node) { DCHECK(node->id() < count_); @@ -3547,19 +3675,22 @@ SimplifiedLowering::SimplifiedLowering(JSGraph* jsgraph, JSHeapBroker* broker, Zone* zone, SourcePositionTable* source_positions, NodeOriginTable* node_origins, - PoisoningMitigationLevel poisoning_level) + PoisoningMitigationLevel poisoning_level, + TickCounter* tick_counter) : jsgraph_(jsgraph), broker_(broker), zone_(zone), type_cache_(TypeCache::Get()), source_positions_(source_positions), node_origins_(node_origins), - poisoning_level_(poisoning_level) {} + poisoning_level_(poisoning_level), + tick_counter_(tick_counter) {} void SimplifiedLowering::LowerAllNodes() { - RepresentationChanger changer(jsgraph(), jsgraph()->isolate()); + RepresentationChanger changer(jsgraph(), broker_); RepresentationSelector selector(jsgraph(), broker_, zone_, &changer, - source_positions_, node_origins_); + source_positions_, node_origins_, + tick_counter_); selector.Run(this); } diff --git a/deps/v8/src/compiler/simplified-lowering.h b/deps/v8/src/compiler/simplified-lowering.h index e434af9d4f11b2..414e3588d72c59 100644 --- a/deps/v8/src/compiler/simplified-lowering.h +++ b/deps/v8/src/compiler/simplified-lowering.h @@ -12,6 +12,9 @@ namespace v8 { namespace internal { + +class TickCounter; + namespace compiler { // Forward declarations. @@ -26,7 +29,8 @@ class V8_EXPORT_PRIVATE SimplifiedLowering final { SimplifiedLowering(JSGraph* jsgraph, JSHeapBroker* broker, Zone* zone, SourcePositionTable* source_position, NodeOriginTable* node_origins, - PoisoningMitigationLevel poisoning_level); + PoisoningMitigationLevel poisoning_level, + TickCounter* tick_counter); ~SimplifiedLowering() = default; void LowerAllNodes(); @@ -67,6 +71,8 @@ class V8_EXPORT_PRIVATE SimplifiedLowering final { PoisoningMitigationLevel poisoning_level_; + TickCounter* const tick_counter_; + Node* Float64Round(Node* const node); Node* Float64Sign(Node* const node); Node* Int32Abs(Node* const node); diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc index ed3cfa861751cc..4f83635422eb9b 100644 --- a/deps/v8/src/compiler/simplified-operator.cc +++ b/deps/v8/src/compiler/simplified-operator.cc @@ -492,6 +492,18 @@ Handle FastMapParameterOf(const Operator* op) { return Handle::null(); } +std::ostream& operator<<(std::ostream& os, BigIntOperationHint hint) { + switch (hint) { + case BigIntOperationHint::kBigInt: + return os << "BigInt"; + } + UNREACHABLE(); +} + +size_t hash_value(BigIntOperationHint hint) { + return static_cast(hint); +} + std::ostream& operator<<(std::ostream& os, NumberOperationHint hint) { switch (hint) { case NumberOperationHint::kSignedSmall: @@ -585,12 +597,6 @@ Type AllocateTypeOf(const Operator* op) { return AllocateParametersOf(op).type(); } -UnicodeEncoding UnicodeEncodingOf(const Operator* op) { - DCHECK(op->opcode() == IrOpcode::kStringFromSingleCodePoint || - op->opcode() == IrOpcode::kStringCodePointAt); - return OpParameter(op); -} - AbortReason AbortReasonOf(const Operator* op) { DCHECK_EQ(IrOpcode::kRuntimeAbort, op->opcode()); return static_cast(OpParameter(op)); @@ -702,9 +708,11 @@ bool operator==(CheckMinusZeroParameters const& lhs, V(NumberToUint32, Operator::kNoProperties, 1, 0) \ V(NumberToUint8Clamped, Operator::kNoProperties, 1, 0) \ V(NumberSilenceNaN, Operator::kNoProperties, 1, 0) \ + V(BigIntNegate, Operator::kNoProperties, 1, 0) \ V(StringConcat, Operator::kNoProperties, 3, 0) \ V(StringToNumber, Operator::kNoProperties, 1, 0) \ V(StringFromSingleCharCode, Operator::kNoProperties, 1, 0) \ + V(StringFromSingleCodePoint, Operator::kNoProperties, 1, 0) \ V(StringIndexOf, Operator::kNoProperties, 3, 0) \ V(StringLength, Operator::kNoProperties, 1, 0) \ V(StringToLowerCaseIntl, Operator::kNoProperties, 1, 0) \ @@ -713,6 +721,7 @@ bool operator==(CheckMinusZeroParameters const& lhs, V(PlainPrimitiveToNumber, Operator::kNoProperties, 1, 0) \ V(PlainPrimitiveToWord32, Operator::kNoProperties, 1, 0) \ V(PlainPrimitiveToFloat64, Operator::kNoProperties, 1, 0) \ + V(ChangeCompressedSignedToInt32, Operator::kNoProperties, 1, 0) \ V(ChangeTaggedSignedToInt32, Operator::kNoProperties, 1, 0) \ V(ChangeTaggedSignedToInt64, Operator::kNoProperties, 1, 0) \ V(ChangeTaggedToInt32, Operator::kNoProperties, 1, 0) \ @@ -723,6 +732,7 @@ bool operator==(CheckMinusZeroParameters const& lhs, V(ChangeCompressedToTaggedSigned, Operator::kNoProperties, 1, 0) \ V(ChangeTaggedToCompressedSigned, Operator::kNoProperties, 1, 0) \ V(ChangeFloat64ToTaggedPointer, Operator::kNoProperties, 1, 0) \ + V(ChangeInt31ToCompressedSigned, Operator::kNoProperties, 1, 0) \ V(ChangeInt31ToTaggedSigned, Operator::kNoProperties, 1, 0) \ V(ChangeInt32ToTagged, Operator::kNoProperties, 1, 0) \ V(ChangeInt64ToTagged, Operator::kNoProperties, 1, 0) \ @@ -730,6 +740,8 @@ bool operator==(CheckMinusZeroParameters const& lhs, V(ChangeUint64ToTagged, Operator::kNoProperties, 1, 0) \ V(ChangeTaggedToBit, Operator::kNoProperties, 1, 0) \ V(ChangeBitToTagged, Operator::kNoProperties, 1, 0) \ + V(TruncateBigIntToUint64, Operator::kNoProperties, 1, 0) \ + V(ChangeUint64ToBigInt, Operator::kNoProperties, 1, 0) \ V(TruncateTaggedToBit, Operator::kNoProperties, 1, 0) \ V(TruncateTaggedPointerToBit, Operator::kNoProperties, 1, 0) \ V(TruncateTaggedToWord32, Operator::kNoProperties, 1, 0) \ @@ -769,9 +781,12 @@ bool operator==(CheckMinusZeroParameters const& lhs, V(NewConsString, Operator::kNoProperties, 3, 0) \ V(PoisonIndex, Operator::kNoProperties, 1, 0) -#define EFFECT_DEPENDENT_OP_LIST(V) \ - V(StringCharCodeAt, Operator::kNoProperties, 2, 1) \ - V(StringSubstring, Operator::kNoProperties, 3, 1) \ +#define EFFECT_DEPENDENT_OP_LIST(V) \ + V(BigIntAdd, Operator::kNoProperties, 2, 1) \ + V(StringCharCodeAt, Operator::kNoProperties, 2, 1) \ + V(StringCodePointAt, Operator::kNoProperties, 2, 1) \ + V(StringFromCodePointAt, Operator::kNoProperties, 2, 1) \ + V(StringSubstring, Operator::kNoProperties, 3, 1) \ V(DateNow, Operator::kNoProperties, 0, 1) #define SPECULATIVE_NUMBER_BINOP_LIST(V) \ @@ -801,6 +816,8 @@ bool operator==(CheckMinusZeroParameters const& lhs, V(CheckNumber, 1, 1) \ V(CheckSmi, 1, 1) \ V(CheckString, 1, 1) \ + V(CheckBigInt, 1, 1) \ + V(CheckedInt32ToCompressedSigned, 1, 1) \ V(CheckedInt32ToTaggedSigned, 1, 1) \ V(CheckedInt64ToInt32, 1, 1) \ V(CheckedInt64ToTaggedSigned, 1, 1) \ @@ -895,32 +912,6 @@ struct SimplifiedOperatorGlobalCache final { DEOPTIMIZE_REASON_LIST(CHECK_IF) #undef CHECK_IF - template - struct StringCodePointAtOperator final : public Operator1 { - StringCodePointAtOperator() - : Operator1(IrOpcode::kStringCodePointAt, - Operator::kFoldable | Operator::kNoThrow, - "StringCodePointAt", 2, 1, 1, 1, 1, 0, - kEncoding) {} - }; - StringCodePointAtOperator - kStringCodePointAtOperatorUTF16; - StringCodePointAtOperator - kStringCodePointAtOperatorUTF32; - - template - struct StringFromSingleCodePointOperator final - : public Operator1 { - StringFromSingleCodePointOperator() - : Operator1( - IrOpcode::kStringFromSingleCodePoint, Operator::kPure, - "StringFromSingleCodePoint", 1, 0, 0, 1, 0, 0, kEncoding) {} - }; - StringFromSingleCodePointOperator - kStringFromSingleCodePointOperatorUTF16; - StringFromSingleCodePointOperator - kStringFromSingleCodePointOperatorUTF32; - struct FindOrderedHashMapEntryOperator final : public Operator { FindOrderedHashMapEntryOperator() : Operator(IrOpcode::kFindOrderedHashMapEntry, Operator::kEliminatable, @@ -1236,6 +1227,20 @@ const Operator* SimplifiedOperatorBuilder::RuntimeAbort(AbortReason reason) { static_cast(reason)); // parameter } +const Operator* SimplifiedOperatorBuilder::BigIntAsUintN(int bits) { + CHECK(0 <= bits && bits <= 64); + + return new (zone()) Operator1(IrOpcode::kBigIntAsUintN, Operator::kPure, + "BigIntAsUintN", 1, 0, 0, 1, 0, 0, bits); +} + +const Operator* SimplifiedOperatorBuilder::AssertType(Type type) { + DCHECK(type.IsRange()); + return new (zone()) Operator1(IrOpcode::kAssertType, + Operator::kNoThrow | Operator::kNoDeopt, + "AssertType", 1, 0, 0, 1, 0, 0, type); +} + const Operator* SimplifiedOperatorBuilder::CheckIf( DeoptimizeReason reason, const VectorSlotPair& feedback) { if (!feedback.IsValid()) { @@ -1433,6 +1438,21 @@ const Operator* SimplifiedOperatorBuilder::CheckFloat64Hole( CheckFloat64HoleParameters(mode, feedback)); } +const Operator* SimplifiedOperatorBuilder::SpeculativeBigIntAdd( + BigIntOperationHint hint) { + return new (zone()) Operator1( + IrOpcode::kSpeculativeBigIntAdd, Operator::kFoldable | Operator::kNoThrow, + "SpeculativeBigIntAdd", 2, 1, 1, 1, 1, 0, hint); +} + +const Operator* SimplifiedOperatorBuilder::SpeculativeBigIntNegate( + BigIntOperationHint hint) { + return new (zone()) Operator1( + IrOpcode::kSpeculativeBigIntNegate, + Operator::kFoldable | Operator::kNoThrow, "SpeculativeBigIntNegate", 1, 1, + 1, 1, 1, 0, hint); +} + const Operator* SimplifiedOperatorBuilder::SpeculativeToNumber( NumberOperationHint hint, const VectorSlotPair& feedback) { if (!feedback.IsValid()) { @@ -1655,28 +1675,6 @@ const Operator* SimplifiedOperatorBuilder::AllocateRaw( AllocateParameters(type, allocation, allow_large_objects)); } -const Operator* SimplifiedOperatorBuilder::StringCodePointAt( - UnicodeEncoding encoding) { - switch (encoding) { - case UnicodeEncoding::UTF16: - return &cache_.kStringCodePointAtOperatorUTF16; - case UnicodeEncoding::UTF32: - return &cache_.kStringCodePointAtOperatorUTF32; - } - UNREACHABLE(); -} - -const Operator* SimplifiedOperatorBuilder::StringFromSingleCodePoint( - UnicodeEncoding encoding) { - switch (encoding) { - case UnicodeEncoding::UTF16: - return &cache_.kStringFromSingleCodePointOperatorUTF16; - case UnicodeEncoding::UTF32: - return &cache_.kStringFromSingleCodePointOperatorUTF32; - } - UNREACHABLE(); -} - #define SPECULATIVE_NUMBER_BINOP(Name) \ const Operator* SimplifiedOperatorBuilder::Name(NumberOperationHint hint) { \ switch (hint) { \ diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h index d93544c5cd5869..bdac796adfff69 100644 --- a/deps/v8/src/compiler/simplified-operator.h +++ b/deps/v8/src/compiler/simplified-operator.h @@ -475,10 +475,15 @@ enum class NumberOperationHint : uint8_t { kNumberOrOddball, // Inputs were Number or Oddball, output was Number. }; +enum class BigIntOperationHint : uint8_t { + kBigInt, +}; + size_t hash_value(NumberOperationHint); +size_t hash_value(BigIntOperationHint); V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, NumberOperationHint); - +V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, BigIntOperationHint); V8_EXPORT_PRIVATE NumberOperationHint NumberOperationHintOf(const Operator* op) V8_WARN_UNUSED_RESULT; @@ -634,6 +639,9 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final const Operator* NumberSilenceNaN(); + const Operator* BigIntAdd(); + const Operator* BigIntNegate(); + const Operator* SpeculativeSafeIntegerAdd(NumberOperationHint hint); const Operator* SpeculativeSafeIntegerSubtract(NumberOperationHint hint); @@ -653,6 +661,10 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final const Operator* SpeculativeNumberLessThanOrEqual(NumberOperationHint hint); const Operator* SpeculativeNumberEqual(NumberOperationHint hint); + const Operator* SpeculativeBigIntAdd(BigIntOperationHint hint); + const Operator* SpeculativeBigIntNegate(BigIntOperationHint hint); + const Operator* BigIntAsUintN(int bits); + const Operator* ReferenceEqual(); const Operator* SameValue(); const Operator* SameValueNumbersOnly(); @@ -666,9 +678,10 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final const Operator* StringLessThan(); const Operator* StringLessThanOrEqual(); const Operator* StringCharCodeAt(); - const Operator* StringCodePointAt(UnicodeEncoding encoding); + const Operator* StringCodePointAt(); const Operator* StringFromSingleCharCode(); - const Operator* StringFromSingleCodePoint(UnicodeEncoding encoding); + const Operator* StringFromSingleCodePoint(); + const Operator* StringFromCodePointAt(); const Operator* StringIndexOf(); const Operator* StringLength(); const Operator* StringToLowerCaseIntl(); @@ -686,6 +699,7 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final const Operator* PlainPrimitiveToWord32(); const Operator* PlainPrimitiveToFloat64(); + const Operator* ChangeCompressedSignedToInt32(); const Operator* ChangeTaggedSignedToInt32(); const Operator* ChangeTaggedSignedToInt64(); const Operator* ChangeTaggedToInt32(); @@ -695,6 +709,7 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final const Operator* ChangeTaggedToTaggedSigned(); const Operator* ChangeCompressedToTaggedSigned(); const Operator* ChangeTaggedToCompressedSigned(); + const Operator* ChangeInt31ToCompressedSigned(); const Operator* ChangeInt31ToTaggedSigned(); const Operator* ChangeInt32ToTagged(); const Operator* ChangeInt64ToTagged(); @@ -704,6 +719,8 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final const Operator* ChangeFloat64ToTaggedPointer(); const Operator* ChangeTaggedToBit(); const Operator* ChangeBitToTagged(); + const Operator* TruncateBigIntToUint64(); + const Operator* ChangeUint64ToBigInt(); const Operator* TruncateTaggedToWord32(); const Operator* TruncateTaggedToFloat64(); const Operator* TruncateTaggedToBit(); @@ -740,6 +757,8 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final const Operator* CheckedInt32Mod(); const Operator* CheckedInt32Mul(CheckForMinusZeroMode); const Operator* CheckedInt32Sub(); + const Operator* CheckedInt32ToCompressedSigned( + const VectorSlotPair& feedback); const Operator* CheckedInt32ToTaggedSigned(const VectorSlotPair& feedback); const Operator* CheckedInt64ToInt32(const VectorSlotPair& feedback); const Operator* CheckedInt64ToTaggedSigned(const VectorSlotPair& feedback); @@ -752,6 +771,7 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final const VectorSlotPair& feedback); const Operator* CheckedTaggedToTaggedPointer(const VectorSlotPair& feedback); const Operator* CheckedTaggedToTaggedSigned(const VectorSlotPair& feedback); + const Operator* CheckBigInt(const VectorSlotPair& feedback); const Operator* CheckedCompressedToTaggedPointer( const VectorSlotPair& feedback); const Operator* CheckedCompressedToTaggedSigned( @@ -874,6 +894,9 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final // Abort (for terminating execution on internal error). const Operator* RuntimeAbort(AbortReason reason); + // Abort if the value input does not inhabit the given type + const Operator* AssertType(Type type); + const Operator* DateNow(); private: diff --git a/deps/v8/src/compiler/state-values-utils.cc b/deps/v8/src/compiler/state-values-utils.cc index c00613c232902b..2bb5a0a4b532ca 100644 --- a/deps/v8/src/compiler/state-values-utils.cc +++ b/deps/v8/src/compiler/state-values-utils.cc @@ -329,9 +329,7 @@ void StateValuesAccess::iterator::Pop() { current_depth_--; } - -bool StateValuesAccess::iterator::done() { return current_depth_ < 0; } - +bool StateValuesAccess::iterator::done() const { return current_depth_ < 0; } void StateValuesAccess::iterator::Advance() { Top()->Advance(); @@ -392,14 +390,12 @@ MachineType StateValuesAccess::iterator::type() { } } - -bool StateValuesAccess::iterator::operator!=(iterator& other) { +bool StateValuesAccess::iterator::operator!=(iterator const& other) { // We only allow comparison with end(). CHECK(other.done()); return !done(); } - StateValuesAccess::iterator& StateValuesAccess::iterator::operator++() { Advance(); return *this; diff --git a/deps/v8/src/compiler/state-values-utils.h b/deps/v8/src/compiler/state-values-utils.h index 00ec3bb351282c..0ff5d218f1815e 100644 --- a/deps/v8/src/compiler/state-values-utils.h +++ b/deps/v8/src/compiler/state-values-utils.h @@ -92,7 +92,7 @@ class V8_EXPORT_PRIVATE StateValuesAccess { class V8_EXPORT_PRIVATE iterator { public: // Bare minimum of operators needed for range iteration. - bool operator!=(iterator& other); + bool operator!=(iterator const& other); iterator& operator++(); TypedNode operator*(); @@ -104,7 +104,7 @@ class V8_EXPORT_PRIVATE StateValuesAccess { Node* node(); MachineType type(); - bool done(); + bool done() const; void Advance(); void EnsureValid(); diff --git a/deps/v8/src/compiler/store-store-elimination.cc b/deps/v8/src/compiler/store-store-elimination.cc index 13d8199745af49..b71bcd7e669fb8 100644 --- a/deps/v8/src/compiler/store-store-elimination.cc +++ b/deps/v8/src/compiler/store-store-elimination.cc @@ -6,6 +6,7 @@ #include "src/compiler/store-store-elimination.h" +#include "src/codegen/tick-counter.h" #include "src/compiler/all-nodes.h" #include "src/compiler/js-graph.h" #include "src/compiler/node-properties.h" @@ -129,7 +130,8 @@ namespace { class RedundantStoreFinder final { public: - RedundantStoreFinder(JSGraph* js_graph, Zone* temp_zone); + RedundantStoreFinder(JSGraph* js_graph, TickCounter* tick_counter, + Zone* temp_zone); void Find(); @@ -157,6 +159,7 @@ class RedundantStoreFinder final { ZoneSet& to_remove() { return to_remove_; } JSGraph* const jsgraph_; + TickCounter* const tick_counter_; Zone* const temp_zone_; ZoneStack revisit_; @@ -199,6 +202,7 @@ void RedundantStoreFinder::Find() { Visit(jsgraph()->graph()->end()); while (!revisit_.empty()) { + tick_counter_->DoTick(); Node* next = revisit_.top(); revisit_.pop(); DCHECK_LT(next->id(), in_revisit_.size()); @@ -230,9 +234,10 @@ bool RedundantStoreFinder::HasBeenVisited(Node* node) { return !unobservable_for_id(node->id()).IsUnvisited(); } -void StoreStoreElimination::Run(JSGraph* js_graph, Zone* temp_zone) { +void StoreStoreElimination::Run(JSGraph* js_graph, TickCounter* tick_counter, + Zone* temp_zone) { // Find superfluous nodes - RedundantStoreFinder finder(js_graph, temp_zone); + RedundantStoreFinder finder(js_graph, tick_counter, temp_zone); finder.Find(); // Remove superfluous nodes @@ -336,8 +341,11 @@ bool RedundantStoreFinder::CannotObserveStoreField(Node* node) { } // Initialize unobservable_ with js_graph->graph->NodeCount() empty sets. -RedundantStoreFinder::RedundantStoreFinder(JSGraph* js_graph, Zone* temp_zone) +RedundantStoreFinder::RedundantStoreFinder(JSGraph* js_graph, + TickCounter* tick_counter, + Zone* temp_zone) : jsgraph_(js_graph), + tick_counter_(tick_counter), temp_zone_(temp_zone), revisit_(temp_zone), in_revisit_(js_graph->graph()->NodeCount(), temp_zone), diff --git a/deps/v8/src/compiler/store-store-elimination.h b/deps/v8/src/compiler/store-store-elimination.h index cda7591fccf10f..646640a3104fa7 100644 --- a/deps/v8/src/compiler/store-store-elimination.h +++ b/deps/v8/src/compiler/store-store-elimination.h @@ -11,11 +11,15 @@ namespace v8 { namespace internal { + +class TickCounter; + namespace compiler { class StoreStoreElimination final { public: - static void Run(JSGraph* js_graph, Zone* temp_zone); + static void Run(JSGraph* js_graph, TickCounter* tick_counter, + Zone* temp_zone); }; } // namespace compiler diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc index 4cf2c38bdb24c9..5dbbad3dcd64bd 100644 --- a/deps/v8/src/compiler/typer.cc +++ b/deps/v8/src/compiler/typer.cc @@ -7,6 +7,7 @@ #include #include "src/base/flags.h" +#include "src/codegen/tick-counter.h" #include "src/compiler/common-operator.h" #include "src/compiler/graph-reducer.h" #include "src/compiler/js-operator.h" @@ -33,13 +34,15 @@ class Typer::Decorator final : public GraphDecorator { Typer* const typer_; }; -Typer::Typer(JSHeapBroker* broker, Flags flags, Graph* graph) +Typer::Typer(JSHeapBroker* broker, Flags flags, Graph* graph, + TickCounter* tick_counter) : flags_(flags), graph_(graph), decorator_(nullptr), cache_(TypeCache::Get()), broker_(broker), - operation_typer_(broker, zone()) { + operation_typer_(broker, zone()), + tick_counter_(tick_counter) { singleton_false_ = operation_typer_.singleton_false(); singleton_true_ = operation_typer_.singleton_true(); @@ -47,7 +50,6 @@ Typer::Typer(JSHeapBroker* broker, Flags flags, Graph* graph) graph_->AddDecorator(decorator_); } - Typer::~Typer() { graph_->RemoveDecorator(decorator_); } @@ -91,14 +93,18 @@ class Typer::Visitor : public Reducer { case IrOpcode::k##x: \ return UpdateType(node, TypeBinaryOp(node, x)); SIMPLIFIED_NUMBER_BINOP_LIST(DECLARE_CASE) + SIMPLIFIED_BIGINT_BINOP_LIST(DECLARE_CASE) SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_CASE) + SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(DECLARE_CASE) #undef DECLARE_CASE #define DECLARE_CASE(x) \ case IrOpcode::k##x: \ return UpdateType(node, TypeUnaryOp(node, x)); SIMPLIFIED_NUMBER_UNOP_LIST(DECLARE_CASE) + SIMPLIFIED_BIGINT_UNOP_LIST(DECLARE_CASE) SIMPLIFIED_SPECULATIVE_NUMBER_UNOP_LIST(DECLARE_CASE) + SIMPLIFIED_SPECULATIVE_BIGINT_UNOP_LIST(DECLARE_CASE) #undef DECLARE_CASE #define DECLARE_CASE(x) case IrOpcode::k##x: @@ -157,14 +163,18 @@ class Typer::Visitor : public Reducer { case IrOpcode::k##x: \ return TypeBinaryOp(node, x); SIMPLIFIED_NUMBER_BINOP_LIST(DECLARE_CASE) + SIMPLIFIED_BIGINT_BINOP_LIST(DECLARE_CASE) SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_CASE) + SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(DECLARE_CASE) #undef DECLARE_CASE #define DECLARE_CASE(x) \ case IrOpcode::k##x: \ return TypeUnaryOp(node, x); SIMPLIFIED_NUMBER_UNOP_LIST(DECLARE_CASE) + SIMPLIFIED_BIGINT_UNOP_LIST(DECLARE_CASE) SIMPLIFIED_SPECULATIVE_NUMBER_UNOP_LIST(DECLARE_CASE) + SIMPLIFIED_SPECULATIVE_BIGINT_UNOP_LIST(DECLARE_CASE) #undef DECLARE_CASE #define DECLARE_CASE(x) case IrOpcode::k##x: @@ -276,14 +286,18 @@ class Typer::Visitor : public Reducer { return t->operation_typer_.Name(type); \ } SIMPLIFIED_NUMBER_UNOP_LIST(DECLARE_METHOD) + SIMPLIFIED_BIGINT_UNOP_LIST(DECLARE_METHOD) SIMPLIFIED_SPECULATIVE_NUMBER_UNOP_LIST(DECLARE_METHOD) + SIMPLIFIED_SPECULATIVE_BIGINT_UNOP_LIST(DECLARE_METHOD) #undef DECLARE_METHOD #define DECLARE_METHOD(Name) \ static Type Name(Type lhs, Type rhs, Typer* t) { \ return t->operation_typer_.Name(lhs, rhs); \ } SIMPLIFIED_NUMBER_BINOP_LIST(DECLARE_METHOD) + SIMPLIFIED_BIGINT_BINOP_LIST(DECLARE_METHOD) SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_METHOD) + SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(DECLARE_METHOD) #undef DECLARE_METHOD static Type ObjectIsArrayBufferView(Type, Typer*); @@ -410,7 +424,7 @@ void Typer::Run(const NodeVector& roots, induction_vars->ChangeToInductionVariablePhis(); } Visitor visitor(this, induction_vars); - GraphReducer graph_reducer(zone(), graph()); + GraphReducer graph_reducer(zone(), graph(), tick_counter_); graph_reducer.AddReducer(&visitor); for (Node* const root : roots) graph_reducer.ReduceNode(root); graph_reducer.ReduceGraph(); @@ -798,6 +812,8 @@ Type Typer::Visitor::TypeHeapConstant(Node* node) { return TypeConstant(HeapConstantOf(node->op())); } +Type Typer::Visitor::TypeCompressedHeapConstant(Node* node) { UNREACHABLE(); } + Type Typer::Visitor::TypeExternalConstant(Node* node) { return Type::ExternalPointer(); } @@ -2060,6 +2076,10 @@ Type Typer::Visitor::TypeStringFromSingleCodePoint(Node* node) { return TypeUnaryOp(node, StringFromSingleCodePointTyper); } +Type Typer::Visitor::TypeStringFromCodePointAt(Node* node) { + return Type::String(); +} + Type Typer::Visitor::TypeStringIndexOf(Node* node) { return Type::Range(-1.0, String::kMaxLength, zone()); } @@ -2336,6 +2356,8 @@ Type Typer::Visitor::TypeFindOrderedHashMapEntryForInt32Key(Node* node) { Type Typer::Visitor::TypeRuntimeAbort(Node* node) { UNREACHABLE(); } +Type Typer::Visitor::TypeAssertType(Node* node) { UNREACHABLE(); } + // Heap constants. Type Typer::Visitor::TypeConstant(Handle value) { diff --git a/deps/v8/src/compiler/typer.h b/deps/v8/src/compiler/typer.h index fa87d81f1e05d6..305470d72421e8 100644 --- a/deps/v8/src/compiler/typer.h +++ b/deps/v8/src/compiler/typer.h @@ -11,6 +11,9 @@ namespace v8 { namespace internal { + +class TickCounter; + namespace compiler { // Forward declarations. @@ -25,7 +28,8 @@ class V8_EXPORT_PRIVATE Typer { }; using Flags = base::Flags; - Typer(JSHeapBroker* broker, Flags flags, Graph* graph); + Typer(JSHeapBroker* broker, Flags flags, Graph* graph, + TickCounter* tick_counter); ~Typer(); void Run(); @@ -49,6 +53,7 @@ class V8_EXPORT_PRIVATE Typer { TypeCache const* cache_; JSHeapBroker* broker_; OperationTyper operation_typer_; + TickCounter* const tick_counter_; Type singleton_false_; Type singleton_true_; diff --git a/deps/v8/src/compiler/types.cc b/deps/v8/src/compiler/types.cc index edf07a4ffd99a5..d4267a75fe0f58 100644 --- a/deps/v8/src/compiler/types.cc +++ b/deps/v8/src/compiler/types.cc @@ -6,9 +6,10 @@ #include "src/compiler/types.h" -#include "src/utils/ostreams.h" #include "src/handles/handles-inl.h" +#include "src/objects/instance-type.h" #include "src/objects/objects-inl.h" +#include "src/utils/ostreams.h" namespace v8 { namespace internal { @@ -202,7 +203,7 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) { return kOtherObject; case JS_ARRAY_TYPE: return kArray; - case JS_VALUE_TYPE: + case JS_PRIMITIVE_WRAPPER_TYPE: case JS_MESSAGE_OBJECT_TYPE: case JS_DATE_TYPE: #ifdef V8_INTL_SUPPORT @@ -312,8 +313,9 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) { case SCRIPT_TYPE: case CODE_TYPE: case PROPERTY_CELL_TYPE: - case MODULE_TYPE: - case MODULE_INFO_ENTRY_TYPE: + case SOURCE_TEXT_MODULE_TYPE: + case SOURCE_TEXT_MODULE_INFO_ENTRY_TYPE: + case SYNTHETIC_MODULE_TYPE: case CELL_TYPE: case PREPARSE_DATA_TYPE: case UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE: @@ -349,6 +351,7 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) { case ENUM_CACHE_TYPE: case SOURCE_POSITION_TABLE_WITH_FRAME_CACHE_TYPE: case WASM_CAPI_FUNCTION_DATA_TYPE: + case WASM_INDIRECT_FUNCTION_TABLE_TYPE: case WASM_DEBUG_INFO_TYPE: case WASM_EXCEPTION_TAG_TYPE: case WASM_EXPORTED_FUNCTION_DATA_TYPE: @@ -363,6 +366,9 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) { case PROMISE_REJECT_REACTION_JOB_TASK_TYPE: case PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE: case FINALIZATION_GROUP_CLEANUP_JOB_TASK_TYPE: +#define MAKE_TORQUE_CLASS_TYPE(V) case V: + TORQUE_DEFINED_INSTANCE_TYPES(MAKE_TORQUE_CLASS_TYPE) +#undef MAKE_TORQUE_CLASS_TYPE UNREACHABLE(); } UNREACHABLE(); diff --git a/deps/v8/src/compiler/types.h b/deps/v8/src/compiler/types.h index 21aaab50362a0d..0dc1aa77b06d13 100644 --- a/deps/v8/src/compiler/types.h +++ b/deps/v8/src/compiler/types.h @@ -7,7 +7,7 @@ #include "src/base/compiler-specific.h" #include "src/common/globals.h" -#include "src/compiler/js-heap-broker.h" +#include "src/compiler/heap-refs.h" #include "src/handles/handles.h" #include "src/numbers/conversions.h" #include "src/objects/objects.h" @@ -220,6 +220,7 @@ namespace compiler { INTERNAL_BITSET_TYPE_LIST(V) \ PROPER_BITSET_TYPE_LIST(V) +class JSHeapBroker; class HeapConstantType; class OtherNumberConstantType; class TupleType; diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc index 3f1b2e9f13f11e..d3d4d54ea25485 100644 --- a/deps/v8/src/compiler/verifier.cc +++ b/deps/v8/src/compiler/verifier.cc @@ -431,6 +431,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) { CheckTypeIs(node, Type::Number()); break; case IrOpcode::kHeapConstant: + case IrOpcode::kCompressedHeapConstant: // Constants have no inputs. CHECK_EQ(0, input_count); // Type is anything. @@ -933,7 +934,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) { break; case IrOpcode::kComment: - case IrOpcode::kDebugAbort: + case IrOpcode::kAbortCSAAssert: case IrOpcode::kDebugBreak: case IrOpcode::kRetain: case IrOpcode::kUnsafePointerAdd: @@ -975,6 +976,25 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) { case IrOpcode::kSpeculativeNumberLessThanOrEqual: CheckTypeIs(node, Type::Boolean()); break; + case IrOpcode::kSpeculativeBigIntAdd: + CheckTypeIs(node, Type::BigInt()); + break; + case IrOpcode::kSpeculativeBigIntNegate: + CheckTypeIs(node, Type::BigInt()); + break; + case IrOpcode::kBigIntAsUintN: + CheckValueInputIs(node, 0, Type::BigInt()); + CheckTypeIs(node, Type::BigInt()); + break; + case IrOpcode::kBigIntAdd: + CheckValueInputIs(node, 0, Type::BigInt()); + CheckValueInputIs(node, 1, Type::BigInt()); + CheckTypeIs(node, Type::BigInt()); + break; + case IrOpcode::kBigIntNegate: + CheckValueInputIs(node, 0, Type::BigInt()); + CheckTypeIs(node, Type::BigInt()); + break; case IrOpcode::kNumberAdd: case IrOpcode::kNumberSubtract: case IrOpcode::kNumberMultiply: @@ -1156,6 +1176,12 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) { CheckValueInputIs(node, 0, Type::Number()); CheckTypeIs(node, Type::String()); break; + case IrOpcode::kStringFromCodePointAt: + // (String, Unsigned32) -> UnsignedSmall + CheckValueInputIs(node, 0, Type::String()); + CheckValueInputIs(node, 1, Type::Unsigned32()); + CheckTypeIs(node, Type::String()); + break; case IrOpcode::kStringIndexOf: // (String, String, SignedSmall) -> SignedSmall CheckValueInputIs(node, 0, Type::String()); @@ -1306,6 +1332,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) { CheckNotTyped(node); break; + case IrOpcode::kChangeCompressedSignedToInt32: case IrOpcode::kChangeTaggedSignedToInt32: { // Signed32 /\ Tagged -> Signed32 /\ UntaggedInt32 // TODO(neis): Activate once ChangeRepresentation works in typer. @@ -1360,6 +1387,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) { // CheckTypeIs(node, to)); break; } + case IrOpcode::kChangeInt31ToCompressedSigned: case IrOpcode::kChangeInt31ToTaggedSigned: { // Signed31 /\ UntaggedInt32 -> Signed31 /\ Tagged // TODO(neis): Activate once ChangeRepresentation works in typer. @@ -1429,6 +1457,14 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) { // CheckTypeIs(node, to)); break; } + case IrOpcode::kTruncateBigIntToUint64: + CheckValueInputIs(node, 0, Type::BigInt()); + CheckTypeIs(node, Type::BigInt()); + break; + case IrOpcode::kChangeUint64ToBigInt: + CheckValueInputIs(node, 0, Type::BigInt()); + CheckTypeIs(node, Type::BigInt()); + break; case IrOpcode::kTruncateTaggedToBit: case IrOpcode::kTruncateTaggedPointerToBit: break; @@ -1498,6 +1534,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) { case IrOpcode::kCheckedUint32Div: case IrOpcode::kCheckedUint32Mod: case IrOpcode::kCheckedInt32Mul: + case IrOpcode::kCheckedInt32ToCompressedSigned: case IrOpcode::kCheckedInt32ToTaggedSigned: case IrOpcode::kCheckedInt64ToInt32: case IrOpcode::kCheckedInt64ToTaggedSigned: @@ -1520,6 +1557,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) { case IrOpcode::kCheckedTaggedToCompressedSigned: case IrOpcode::kCheckedTaggedToCompressedPointer: case IrOpcode::kCheckedTruncateTaggedToWord32: + case IrOpcode::kAssertType: break; case IrOpcode::kCheckFloat64Hole: @@ -1619,6 +1657,10 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) { CHECK_EQ(0, value_count); CheckTypeIs(node, Type::Number()); break; + case IrOpcode::kCheckBigInt: + CheckValueInputIs(node, 0, Type::Any()); + CheckTypeIs(node, Type::BigInt()); + break; // Machine operators // ----------------------- @@ -1755,6 +1797,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) { case IrOpcode::kBitcastInt32ToFloat32: case IrOpcode::kBitcastInt64ToFloat64: case IrOpcode::kBitcastTaggedToWord: + case IrOpcode::kBitcastTaggedSignedToWord: case IrOpcode::kBitcastWordToTagged: case IrOpcode::kBitcastWordToTaggedSigned: case IrOpcode::kChangeInt32ToInt64: @@ -1800,6 +1843,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) { case IrOpcode::kLoadParentFramePointer: case IrOpcode::kUnalignedLoad: case IrOpcode::kUnalignedStore: + case IrOpcode::kMemoryBarrier: case IrOpcode::kWord32AtomicLoad: case IrOpcode::kWord32AtomicStore: case IrOpcode::kWord32AtomicExchange: diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc index 3396214e5894b3..2da7177ece2d54 100644 --- a/deps/v8/src/compiler/wasm-compiler.cc +++ b/deps/v8/src/compiler/wasm-compiler.cc @@ -14,6 +14,7 @@ #include "src/codegen/assembler-inl.h" #include "src/codegen/assembler.h" #include "src/codegen/code-factory.h" +#include "src/codegen/compiler.h" #include "src/codegen/interface-descriptors.h" #include "src/codegen/optimized-compilation-info.h" #include "src/compiler/backend/code-generator.h" @@ -276,8 +277,9 @@ Node* WasmGraphBuilder::EffectPhi(unsigned count, Node** effects, } Node* WasmGraphBuilder::RefNull() { - return LOAD_INSTANCE_FIELD(NullValue, - MachineType::TypeCompressedTaggedPointer()); + Node* isolate_root = LOAD_INSTANCE_FIELD(IsolateRoot, MachineType::Pointer()); + return LOAD_TAGGED_POINTER( + isolate_root, IsolateData::root_slot_offset(RootIndex::kNullValue)); } Node* WasmGraphBuilder::RefFunc(uint32_t function_index) { @@ -2195,8 +2197,8 @@ Node* WasmGraphBuilder::Throw(uint32_t exception_index, graph()->NewNode(m->I32x4ExtractLane(3), value)); break; case wasm::kWasmAnyRef: - case wasm::kWasmAnyFunc: - case wasm::kWasmExceptRef: + case wasm::kWasmFuncRef: + case wasm::kWasmExnRef: STORE_FIXED_ARRAY_SLOT_ANY(values_array, index, value); ++index; break; @@ -2334,8 +2336,8 @@ Node** WasmGraphBuilder::GetExceptionValues( BuildDecodeException32BitValue(values_array, &index)); break; case wasm::kWasmAnyRef: - case wasm::kWasmAnyFunc: - case wasm::kWasmExceptRef: + case wasm::kWasmFuncRef: + case wasm::kWasmExnRef: value = LOAD_FIXED_ARRAY_SLOT_ANY(values_array, index); ++index; break; @@ -2853,25 +2855,69 @@ Node* WasmGraphBuilder::CallDirect(uint32_t index, Node** args, Node*** rets, Node* WasmGraphBuilder::CallIndirect(uint32_t table_index, uint32_t sig_index, Node** args, Node*** rets, wasm::WasmCodePosition position) { - if (table_index == 0) { - return BuildIndirectCall(sig_index, args, rets, position, kCallContinues); - } return BuildIndirectCall(table_index, sig_index, args, rets, position, kCallContinues); } -Node* WasmGraphBuilder::BuildIndirectCall(uint32_t sig_index, Node** args, +void WasmGraphBuilder::LoadIndirectFunctionTable(uint32_t table_index, + Node** ift_size, + Node** ift_sig_ids, + Node** ift_targets, + Node** ift_instances) { + if (table_index == 0) { + *ift_size = + LOAD_INSTANCE_FIELD(IndirectFunctionTableSize, MachineType::Uint32()); + *ift_sig_ids = LOAD_INSTANCE_FIELD(IndirectFunctionTableSigIds, + MachineType::Pointer()); + *ift_targets = LOAD_INSTANCE_FIELD(IndirectFunctionTableTargets, + MachineType::Pointer()); + *ift_instances = LOAD_INSTANCE_FIELD( + IndirectFunctionTableRefs, MachineType::TypeCompressedTaggedPointer()); + return; + } + + Node* ift_tables = LOAD_INSTANCE_FIELD( + IndirectFunctionTables, MachineType::TypeCompressedTaggedPointer()); + Node* ift_table = LOAD_FIXED_ARRAY_SLOT_ANY(ift_tables, table_index); + + *ift_size = LOAD_RAW( + ift_table, + wasm::ObjectAccess::ToTagged(WasmIndirectFunctionTable::kSizeOffset), + MachineType::Int32()); + + *ift_sig_ids = LOAD_RAW( + ift_table, + wasm::ObjectAccess::ToTagged(WasmIndirectFunctionTable::kSigIdsOffset), + MachineType::Pointer()); + + *ift_targets = LOAD_RAW( + ift_table, + wasm::ObjectAccess::ToTagged(WasmIndirectFunctionTable::kTargetsOffset), + MachineType::Pointer()); + + *ift_instances = LOAD_RAW( + ift_table, + wasm::ObjectAccess::ToTagged(WasmIndirectFunctionTable::kRefsOffset), + MachineType::TypeCompressedTaggedPointer()); +} + +Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index, + uint32_t sig_index, Node** args, Node*** rets, wasm::WasmCodePosition position, IsReturnCall continuation) { DCHECK_NOT_NULL(args[0]); DCHECK_NOT_NULL(env_); - // Assume only one table for now. - wasm::FunctionSig* sig = env_->module->signatures[sig_index]; + // First we have to load the table. + Node* ift_size; + Node* ift_sig_ids; + Node* ift_targets; + Node* ift_instances; + LoadIndirectFunctionTable(table_index, &ift_size, &ift_sig_ids, &ift_targets, + &ift_instances); - Node* ift_size = - LOAD_INSTANCE_FIELD(IndirectFunctionTableSize, MachineType::Uint32()); + wasm::FunctionSig* sig = env_->module->signatures[sig_index]; MachineOperatorBuilder* machine = mcgraph()->machine(); Node* key = args[0]; @@ -2894,9 +2940,6 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t sig_index, Node** args, } // Load signature from the table and check. - Node* ift_sig_ids = - LOAD_INSTANCE_FIELD(IndirectFunctionTableSigIds, MachineType::Pointer()); - int32_t expected_sig_id = env_->module->signature_ids[sig_index]; Node* int32_scaled_key = Uint32ToUintptr( graph()->NewNode(machine->Word32Shl(), key, Int32Constant(2))); @@ -2909,11 +2952,6 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t sig_index, Node** args, TrapIfFalse(wasm::kTrapFuncSigMismatch, sig_match, position); - Node* ift_targets = - LOAD_INSTANCE_FIELD(IndirectFunctionTableTargets, MachineType::Pointer()); - Node* ift_instances = LOAD_INSTANCE_FIELD( - IndirectFunctionTableRefs, MachineType::TypeCompressedTaggedPointer()); - Node* tagged_scaled_key; if (kTaggedSize == kInt32Size) { tagged_scaled_key = int32_scaled_key; @@ -2955,48 +2993,6 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t sig_index, Node** args, } } -Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index, - uint32_t sig_index, Node** args, - Node*** rets, - wasm::WasmCodePosition position, - IsReturnCall continuation) { - DCHECK_NOT_NULL(args[0]); - Node* entry_index = args[0]; - DCHECK_NOT_NULL(env_); - BoundsCheckTable(table_index, entry_index, position, wasm::kTrapFuncInvalid, - nullptr); - - DCHECK(Smi::IsValid(table_index)); - DCHECK(Smi::IsValid(sig_index)); - Node* runtime_args[]{ - graph()->NewNode(mcgraph()->common()->NumberConstant(table_index)), - BuildChangeUint31ToSmi(entry_index), - graph()->NewNode(mcgraph()->common()->NumberConstant(sig_index))}; - - Node* target_instance = BuildCallToRuntime( - Runtime::kWasmIndirectCallCheckSignatureAndGetTargetInstance, - runtime_args, arraysize(runtime_args)); - - // We reuse the runtime_args array here, even though we only need the first - // two arguments. - Node* call_target = BuildCallToRuntime( - Runtime::kWasmIndirectCallGetTargetAddress, runtime_args, 2); - - wasm::FunctionSig* sig = env_->module->signatures[sig_index]; - args[0] = call_target; - const UseRetpoline use_retpoline = - untrusted_code_mitigations_ ? kRetpoline : kNoRetpoline; - - switch (continuation) { - case kCallContinues: - return BuildWasmCall(sig, args, rets, position, target_instance, - use_retpoline); - case kReturnCall: - return BuildWasmReturnCall(sig, args, position, target_instance, - use_retpoline); - } -} - Node* WasmGraphBuilder::ReturnCall(uint32_t index, Node** args, wasm::WasmCodePosition position) { DCHECK_NULL(args[0]); @@ -3019,9 +3015,6 @@ Node* WasmGraphBuilder::ReturnCall(uint32_t index, Node** args, Node* WasmGraphBuilder::ReturnCallIndirect(uint32_t table_index, uint32_t sig_index, Node** args, wasm::WasmCodePosition position) { - if (table_index == 0) { - return BuildIndirectCall(sig_index, args, nullptr, position, kReturnCall); - } return BuildIndirectCall(table_index, sig_index, args, nullptr, position, kReturnCall); } @@ -3324,13 +3317,6 @@ Node* WasmGraphBuilder::CurrentMemoryPages() { return result; } -Node* WasmGraphBuilder::BuildLoadBuiltinFromInstance(int builtin_index) { - DCHECK(Builtins::IsBuiltinId(builtin_index)); - Node* isolate_root = LOAD_INSTANCE_FIELD(IsolateRoot, MachineType::Pointer()); - return LOAD_TAGGED_POINTER(isolate_root, - IsolateData::builtin_slot_offset(builtin_index)); -} - // Only call this function for code which is not reused across instantiations, // as we do not patch the embedded js_context. Node* WasmGraphBuilder::BuildCallToRuntimeWithContext( @@ -3492,7 +3478,7 @@ void WasmGraphBuilder::GetTableBaseAndOffset(uint32_t table_index, wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(0))); } -Node* WasmGraphBuilder::GetTable(uint32_t table_index, Node* index, +Node* WasmGraphBuilder::TableGet(uint32_t table_index, Node* index, wasm::WasmCodePosition position) { if (env_->module->tables[table_index].type == wasm::kWasmAnyRef) { Node* base = nullptr; @@ -3501,7 +3487,7 @@ Node* WasmGraphBuilder::GetTable(uint32_t table_index, Node* index, return LOAD_RAW_NODE_OFFSET(base, offset, MachineType::TypeCompressedTagged()); } - // We access anyfunc tables through runtime calls. + // We access funcref tables through runtime calls. WasmTableGetDescriptor interface_descriptor; auto call_descriptor = Linkage::GetStubCallDescriptor( mcgraph()->zone(), // zone @@ -3521,7 +3507,7 @@ Node* WasmGraphBuilder::GetTable(uint32_t table_index, Node* index, Effect(), Control()))); } -Node* WasmGraphBuilder::SetTable(uint32_t table_index, Node* index, Node* val, +Node* WasmGraphBuilder::TableSet(uint32_t table_index, Node* index, Node* val, wasm::WasmCodePosition position) { if (env_->module->tables[table_index].type == wasm::kWasmAnyRef) { Node* base = nullptr; @@ -3530,7 +3516,7 @@ Node* WasmGraphBuilder::SetTable(uint32_t table_index, Node* index, Node* val, return STORE_RAW_NODE_OFFSET( base, offset, val, MachineRepresentation::kTagged, kFullWriteBarrier); } else { - // We access anyfunc tables through runtime calls. + // We access funcref tables through runtime calls. WasmTableSetDescriptor interface_descriptor; auto call_descriptor = Linkage::GetStubCallDescriptor( mcgraph()->zone(), // zone @@ -4000,6 +3986,30 @@ Node* WasmGraphBuilder::S128Zero() { Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) { has_simd_ = true; switch (opcode) { + case wasm::kExprF64x2Splat: + return graph()->NewNode(mcgraph()->machine()->F64x2Splat(), inputs[0]); + case wasm::kExprF64x2Abs: + return graph()->NewNode(mcgraph()->machine()->F64x2Abs(), inputs[0]); + case wasm::kExprF64x2Neg: + return graph()->NewNode(mcgraph()->machine()->F64x2Neg(), inputs[0]); + case wasm::kExprF64x2Eq: + return graph()->NewNode(mcgraph()->machine()->F64x2Eq(), inputs[0], + inputs[1]); + case wasm::kExprF64x2Ne: + return graph()->NewNode(mcgraph()->machine()->F64x2Ne(), inputs[0], + inputs[1]); + case wasm::kExprF64x2Lt: + return graph()->NewNode(mcgraph()->machine()->F64x2Lt(), inputs[0], + inputs[1]); + case wasm::kExprF64x2Le: + return graph()->NewNode(mcgraph()->machine()->F64x2Le(), inputs[0], + inputs[1]); + case wasm::kExprF64x2Gt: + return graph()->NewNode(mcgraph()->machine()->F64x2Lt(), inputs[1], + inputs[0]); + case wasm::kExprF64x2Ge: + return graph()->NewNode(mcgraph()->machine()->F64x2Le(), inputs[1], + inputs[0]); case wasm::kExprF32x4Splat: return graph()->NewNode(mcgraph()->machine()->F32x4Splat(), inputs[0]); case wasm::kExprF32x4SConvertI32x4: @@ -4054,6 +4064,49 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) { case wasm::kExprF32x4Ge: return graph()->NewNode(mcgraph()->machine()->F32x4Le(), inputs[1], inputs[0]); + case wasm::kExprI64x2Splat: + return graph()->NewNode(mcgraph()->machine()->I64x2Splat(), inputs[0]); + case wasm::kExprI64x2Neg: + return graph()->NewNode(mcgraph()->machine()->I64x2Neg(), inputs[0]); + case wasm::kExprI64x2Add: + return graph()->NewNode(mcgraph()->machine()->I64x2Add(), inputs[0], + inputs[1]); + case wasm::kExprI64x2Sub: + return graph()->NewNode(mcgraph()->machine()->I64x2Sub(), inputs[0], + inputs[1]); + case wasm::kExprI64x2Mul: + return graph()->NewNode(mcgraph()->machine()->I64x2Mul(), inputs[0], + inputs[1]); + case wasm::kExprI64x2Eq: + return graph()->NewNode(mcgraph()->machine()->I64x2Eq(), inputs[0], + inputs[1]); + case wasm::kExprI64x2Ne: + return graph()->NewNode(mcgraph()->machine()->I64x2Ne(), inputs[0], + inputs[1]); + case wasm::kExprI64x2LtS: + return graph()->NewNode(mcgraph()->machine()->I64x2GtS(), inputs[1], + inputs[0]); + case wasm::kExprI64x2LeS: + return graph()->NewNode(mcgraph()->machine()->I64x2GeS(), inputs[1], + inputs[0]); + case wasm::kExprI64x2GtS: + return graph()->NewNode(mcgraph()->machine()->I64x2GtS(), inputs[0], + inputs[1]); + case wasm::kExprI64x2GeS: + return graph()->NewNode(mcgraph()->machine()->I64x2GeS(), inputs[0], + inputs[1]); + case wasm::kExprI64x2LtU: + return graph()->NewNode(mcgraph()->machine()->I64x2GtU(), inputs[1], + inputs[0]); + case wasm::kExprI64x2LeU: + return graph()->NewNode(mcgraph()->machine()->I64x2GeU(), inputs[1], + inputs[0]); + case wasm::kExprI64x2GtU: + return graph()->NewNode(mcgraph()->machine()->I64x2GtU(), inputs[0], + inputs[1]); + case wasm::kExprI64x2GeU: + return graph()->NewNode(mcgraph()->machine()->I64x2GeU(), inputs[0], + inputs[1]); case wasm::kExprI32x4Splat: return graph()->NewNode(mcgraph()->machine()->I32x4Splat(), inputs[0]); case wasm::kExprI32x4SConvertF32x4: @@ -4305,6 +4358,10 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) { case wasm::kExprS128Select: return graph()->NewNode(mcgraph()->machine()->S128Select(), inputs[2], inputs[0], inputs[1]); + case wasm::kExprS1x2AnyTrue: + return graph()->NewNode(mcgraph()->machine()->S1x2AnyTrue(), inputs[0]); + case wasm::kExprS1x2AllTrue: + return graph()->NewNode(mcgraph()->machine()->S1x2AllTrue(), inputs[0]); case wasm::kExprS1x4AnyTrue: return graph()->NewNode(mcgraph()->machine()->S1x4AnyTrue(), inputs[0]); case wasm::kExprS1x4AllTrue: @@ -4326,12 +4383,24 @@ Node* WasmGraphBuilder::SimdLaneOp(wasm::WasmOpcode opcode, uint8_t lane, Node* const* inputs) { has_simd_ = true; switch (opcode) { + case wasm::kExprF64x2ExtractLane: + return graph()->NewNode(mcgraph()->machine()->F64x2ExtractLane(lane), + inputs[0]); + case wasm::kExprF64x2ReplaceLane: + return graph()->NewNode(mcgraph()->machine()->F64x2ReplaceLane(lane), + inputs[0], inputs[1]); case wasm::kExprF32x4ExtractLane: return graph()->NewNode(mcgraph()->machine()->F32x4ExtractLane(lane), inputs[0]); case wasm::kExprF32x4ReplaceLane: return graph()->NewNode(mcgraph()->machine()->F32x4ReplaceLane(lane), inputs[0], inputs[1]); + case wasm::kExprI64x2ExtractLane: + return graph()->NewNode(mcgraph()->machine()->I64x2ExtractLane(lane), + inputs[0]); + case wasm::kExprI64x2ReplaceLane: + return graph()->NewNode(mcgraph()->machine()->I64x2ReplaceLane(lane), + inputs[0], inputs[1]); case wasm::kExprI32x4ExtractLane: return graph()->NewNode(mcgraph()->machine()->I32x4ExtractLane(lane), inputs[0]); @@ -4359,6 +4428,14 @@ Node* WasmGraphBuilder::SimdShiftOp(wasm::WasmOpcode opcode, uint8_t shift, Node* const* inputs) { has_simd_ = true; switch (opcode) { + case wasm::kExprI64x2Shl: + return graph()->NewNode(mcgraph()->machine()->I64x2Shl(shift), inputs[0]); + case wasm::kExprI64x2ShrS: + return graph()->NewNode(mcgraph()->machine()->I64x2ShrS(shift), + inputs[0]); + case wasm::kExprI64x2ShrU: + return graph()->NewNode(mcgraph()->machine()->I64x2ShrU(shift), + inputs[0]); case wasm::kExprI32x4Shl: return graph()->NewNode(mcgraph()->machine()->I32x4Shl(shift), inputs[0]); case wasm::kExprI32x4ShrS: @@ -4612,6 +4689,11 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs, return SetEffect(node); } +Node* WasmGraphBuilder::AtomicFence() { + return SetEffect(graph()->NewNode(mcgraph()->machine()->MemBarrier(), + Effect(), Control())); +} + #undef ATOMIC_BINOP_LIST #undef ATOMIC_CMP_EXCHG_LIST #undef ATOMIC_LOAD_LIST @@ -4636,8 +4718,19 @@ Node* WasmGraphBuilder::MemoryInit(uint32_t data_segment_index, Node* dst, Node* src, Node* size, wasm::WasmCodePosition position) { CheckDataSegmentIsPassiveAndNotDropped(data_segment_index, position); - Node* dst_fail = BoundsCheckMemRange(&dst, &size, position); auto m = mcgraph()->machine(); + auto common = mcgraph()->common(); + Node* size_null_check = + graph()->NewNode(m->Word32Equal(), size, mcgraph()->Int32Constant(0)); + Node* size_null_branch = graph()->NewNode(common->Branch(BranchHint::kFalse), + size_null_check, Control()); + + Node* size_null_etrue = Effect(); + Node* size_null_if_false = + graph()->NewNode(common->IfFalse(), size_null_branch); + SetControl(size_null_if_false); + + Node* dst_fail = BoundsCheckMemRange(&dst, &size, position); Node* seg_index = Uint32Constant(data_segment_index); Node* src_fail; @@ -4679,9 +4772,16 @@ Node* WasmGraphBuilder::MemoryInit(uint32_t data_segment_index, Node* dst, MachineType::Uint32()}; MachineSignature sig(0, 3, sig_types); BuildCCall(&sig, function, dst, src, size); - return TrapIfTrue(wasm::kTrapMemOutOfBounds, - graph()->NewNode(m->Word32Or(), dst_fail, src_fail), - position); + TrapIfTrue(wasm::kTrapMemOutOfBounds, + graph()->NewNode(m->Word32Or(), dst_fail, src_fail), position); + Node* size_null_if_true = + graph()->NewNode(common->IfTrue(), size_null_branch); + + Node* merge = SetControl( + graph()->NewNode(common->Merge(2), size_null_if_true, Control())); + SetEffect( + graph()->NewNode(common->EffectPhi(2), size_null_etrue, Effect(), merge)); + return merge; } Node* WasmGraphBuilder::DataDrop(uint32_t data_segment_index, @@ -4699,16 +4799,19 @@ Node* WasmGraphBuilder::DataDrop(uint32_t data_segment_index, Node* WasmGraphBuilder::MemoryCopy(Node* dst, Node* src, Node* size, wasm::WasmCodePosition position) { auto m = mcgraph()->machine(); - // The data must be copied backward if the regions overlap and src < dst. The - // regions overlap if {src + size > dst && dst + size > src}. Since we already - // test that {src < dst}, we know that {dst + size > src}, so this simplifies - // to just {src + size > dst}. That sum can overflow, but if we subtract - // {size} from both sides of the inequality we get the equivalent test - // {size > dst - src}. - Node* copy_backward = graph()->NewNode( - m->Word32And(), graph()->NewNode(m->Uint32LessThan(), src, dst), - graph()->NewNode(m->Uint32LessThan(), - graph()->NewNode(m->Int32Sub(), dst, src), size)); + auto common = mcgraph()->common(); + // If size == 0, then memory.copy is a no-op. + Node* size_null_check = + graph()->NewNode(m->Word32Equal(), size, mcgraph()->Int32Constant(0)); + Node* size_null_branch = graph()->NewNode(common->Branch(BranchHint::kFalse), + size_null_check, Control()); + + Node* size_null_etrue = Effect(); + Node* size_null_if_false = + graph()->NewNode(common->IfFalse(), size_null_branch); + SetControl(size_null_if_false); + // The data must be copied backward if src < dst. + Node* copy_backward = graph()->NewNode(m->Uint32LessThan(), src, dst); Node* dst_fail = BoundsCheckMemRange(&dst, &size, position); @@ -4728,13 +4831,32 @@ Node* WasmGraphBuilder::MemoryCopy(Node* dst, Node* src, Node* size, MachineType::Uint32()}; MachineSignature sig(0, 3, sig_types); BuildCCall(&sig, function, dst, src, size); - return TrapIfTrue(wasm::kTrapMemOutOfBounds, - graph()->NewNode(m->Word32Or(), dst_fail, src_fail), - position); + TrapIfTrue(wasm::kTrapMemOutOfBounds, + graph()->NewNode(m->Word32Or(), dst_fail, src_fail), position); + Node* size_null_if_true = + graph()->NewNode(common->IfTrue(), size_null_branch); + + Node* merge = SetControl( + graph()->NewNode(common->Merge(2), size_null_if_true, Control())); + SetEffect( + graph()->NewNode(common->EffectPhi(2), size_null_etrue, Effect(), merge)); + return merge; } Node* WasmGraphBuilder::MemoryFill(Node* dst, Node* value, Node* size, wasm::WasmCodePosition position) { + auto machine = mcgraph()->machine(); + auto common = mcgraph()->common(); + // If size == 0, then memory.copy is a no-op. + Node* size_null_check = graph()->NewNode(machine->Word32Equal(), size, + mcgraph()->Int32Constant(0)); + Node* size_null_branch = graph()->NewNode(common->Branch(BranchHint::kFalse), + size_null_check, Control()); + + Node* size_null_etrue = Effect(); + Node* size_null_if_false = + graph()->NewNode(common->IfFalse(), size_null_branch); + SetControl(size_null_if_false); Node* fail = BoundsCheckMemRange(&dst, &size, position); Node* function = graph()->NewNode(mcgraph()->common()->ExternalConstant( ExternalReference::wasm_memory_fill())); @@ -4742,7 +4864,15 @@ Node* WasmGraphBuilder::MemoryFill(Node* dst, Node* value, Node* size, MachineType::Uint32()}; MachineSignature sig(0, 3, sig_types); BuildCCall(&sig, function, dst, value, size); - return TrapIfTrue(wasm::kTrapMemOutOfBounds, fail, position); + TrapIfTrue(wasm::kTrapMemOutOfBounds, fail, position); + Node* size_null_if_true = + graph()->NewNode(common->IfTrue(), size_null_branch); + + Node* merge = SetControl( + graph()->NewNode(common->Merge(2), size_null_if_true, Control())); + SetEffect( + graph()->NewNode(common->EffectPhi(2), size_null_etrue, Effect(), merge)); + return merge; } Node* WasmGraphBuilder::CheckElemSegmentIsPassiveAndNotDropped( @@ -4789,13 +4919,13 @@ Node* WasmGraphBuilder::ElemDrop(uint32_t elem_segment_index, mcgraph()->Int32Constant(1), Effect(), Control())); } -Node* WasmGraphBuilder::TableCopy(uint32_t table_src_index, - uint32_t table_dst_index, Node* dst, +Node* WasmGraphBuilder::TableCopy(uint32_t table_dst_index, + uint32_t table_src_index, Node* dst, Node* src, Node* size, wasm::WasmCodePosition position) { Node* args[] = { - graph()->NewNode(mcgraph()->common()->NumberConstant(table_src_index)), graph()->NewNode(mcgraph()->common()->NumberConstant(table_dst_index)), + graph()->NewNode(mcgraph()->common()->NumberConstant(table_src_index)), BuildConvertUint32ToSmiWithSaturation(dst, FLAG_wasm_max_table_size), BuildConvertUint32ToSmiWithSaturation(src, FLAG_wasm_max_table_size), BuildConvertUint32ToSmiWithSaturation(size, FLAG_wasm_max_table_size)}; @@ -4878,28 +5008,6 @@ void WasmGraphBuilder::RemoveBytecodePositionDecorator() { } namespace { -bool must_record_function_compilation(Isolate* isolate) { - return isolate->logger()->is_listening_to_code_events() || - isolate->is_profiling(); -} - -PRINTF_FORMAT(4, 5) -void RecordFunctionCompilation(CodeEventListener::LogEventsAndTags tag, - Isolate* isolate, Handle code, - const char* format, ...) { - DCHECK(must_record_function_compilation(isolate)); - - ScopedVector buffer(128); - va_list arguments; - va_start(arguments, format); - int len = VSNPrintF(buffer, format, arguments); - CHECK_LT(0, len); - va_end(arguments); - Handle name_str = - isolate->factory()->NewStringFromAsciiChecked(buffer.begin()); - PROFILE(isolate, CodeCreateEvent(tag, AbstractCode::cast(*code), *name_str)); -} - class WasmWrapperGraphBuilder : public WasmGraphBuilder { public: WasmWrapperGraphBuilder(Zone* zone, JSGraph* jsgraph, wasm::FunctionSig* sig, @@ -4914,12 +5022,12 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { Node* BuildAllocateHeapNumberWithValue(Node* value, Node* control) { MachineOperatorBuilder* machine = mcgraph()->machine(); CommonOperatorBuilder* common = mcgraph()->common(); - Node* target = (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) - ? mcgraph()->RelocatableIntPtrConstant( - wasm::WasmCode::kWasmAllocateHeapNumber, - RelocInfo::WASM_STUB_CALL) - : jsgraph()->HeapConstant( - BUILTIN_CODE(isolate_, AllocateHeapNumber)); + Node* target = + (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) + ? mcgraph()->RelocatableIntPtrConstant( + wasm::WasmCode::kWasmAllocateHeapNumber, + RelocInfo::WASM_STUB_CALL) + : BuildLoadBuiltinFromInstance(Builtins::kAllocateHeapNumber); if (!allocate_heap_number_operator_.is_set()) { auto call_descriptor = Linkage::GetStubCallDescriptor( mcgraph()->zone(), AllocateHeapNumberDescriptor(), 0, @@ -4956,6 +5064,34 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { return mcgraph()->IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag); } + Node* BuildLoadUndefinedValueFromInstance() { + if (undefined_value_node_ == nullptr) { + Node* isolate_root = graph()->NewNode( + mcgraph()->machine()->Load(MachineType::Pointer()), + instance_node_.get(), + mcgraph()->Int32Constant(WASM_INSTANCE_OBJECT_OFFSET(IsolateRoot)), + graph()->start(), graph()->start()); + undefined_value_node_ = InsertDecompressionIfNeeded( + MachineType::TypeCompressedTaggedPointer(), + graph()->NewNode( + mcgraph()->machine()->Load( + MachineType::TypeCompressedTaggedPointer()), + isolate_root, + mcgraph()->Int32Constant( + IsolateData::root_slot_offset(RootIndex::kUndefinedValue)), + isolate_root, graph()->start())); + } + return undefined_value_node_.get(); + } + + Node* BuildLoadBuiltinFromInstance(int builtin_index) { + DCHECK(Builtins::IsBuiltinId(builtin_index)); + Node* isolate_root = + LOAD_INSTANCE_FIELD(IsolateRoot, MachineType::Pointer()); + return LOAD_TAGGED_POINTER(isolate_root, + IsolateData::builtin_slot_offset(builtin_index)); + } + Node* BuildChangeInt32ToTagged(Node* value) { MachineOperatorBuilder* machine = mcgraph()->machine(); CommonOperatorBuilder* common = mcgraph()->common(); @@ -5096,7 +5232,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) ? mcgraph()->RelocatableIntPtrConstant( wasm::WasmCode::kWasmToNumber, RelocInfo::WASM_STUB_CALL) - : jsgraph()->HeapConstant(BUILTIN_CODE(isolate_, ToNumber)); + : BuildLoadBuiltinFromInstance(Builtins::kToNumber); Node* result = SetEffect( graph()->NewNode(mcgraph()->common()->Call(call_descriptor), stub_code, @@ -5126,8 +5262,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { SetControl(is_heap_object.if_true); Node* orig_effect = Effect(); - Node* undefined_node = LOAD_INSTANCE_FIELD( - UndefinedValue, MachineType::TypeCompressedTaggedPointer()); + Node* undefined_node = BuildLoadUndefinedValueFromInstance(); Node* check_undefined = graph()->NewNode(machine->WordEqual(), value, undefined_node); Node* effect_tagged = Effect(); @@ -5173,8 +5308,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { case wasm::kWasmF64: return BuildChangeFloat64ToTagged(node); case wasm::kWasmAnyRef: - case wasm::kWasmAnyFunc: - case wasm::kWasmExceptRef: + case wasm::kWasmFuncRef: + case wasm::kWasmExnRef: return node; default: UNREACHABLE(); @@ -5196,7 +5331,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) ? mcgraph()->RelocatableIntPtrConstant( wasm::WasmCode::kWasmI64ToBigInt, RelocInfo::WASM_STUB_CALL) - : jsgraph()->HeapConstant(BUILTIN_CODE(isolate_, I64ToBigInt)); + : BuildLoadBuiltinFromInstance(Builtins::kI64ToBigInt); return SetEffect( SetControl(graph()->NewNode(mcgraph()->common()->Call(call_descriptor), @@ -5218,7 +5353,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) ? mcgraph()->RelocatableIntPtrConstant( wasm::WasmCode::kWasmBigIntToI64, RelocInfo::WASM_STUB_CALL) - : jsgraph()->HeapConstant(BUILTIN_CODE(isolate_, BigIntToI64)); + : BuildLoadBuiltinFromInstance(Builtins::kBigIntToI64); return SetEffect(SetControl( graph()->NewNode(mcgraph()->common()->Call(call_descriptor), target, @@ -5228,15 +5363,15 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { Node* FromJS(Node* node, Node* js_context, wasm::ValueType type) { DCHECK_NE(wasm::kWasmStmt, type); - // The parameter is of type anyref or except_ref, we take it as is. - if (type == wasm::kWasmAnyRef || type == wasm::kWasmExceptRef) { + // The parameter is of type anyref or exnref, we take it as is. + if (type == wasm::kWasmAnyRef || type == wasm::kWasmExnRef) { return node; } - if (type == wasm::kWasmAnyFunc) { + if (type == wasm::kWasmFuncRef) { Node* check = BuildChangeSmiToInt32(SetEffect(BuildCallToRuntimeWithContext( - Runtime::kWasmIsValidAnyFuncValue, js_context, &node, 1, effect_, + Runtime::kWasmIsValidFuncRefValue, js_context, &node, 1, effect_, Control()))); Diamond type_check(graph(), mcgraph()->common(), check, @@ -5471,8 +5606,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { // The callable is passed as the last parameter, after WASM arguments. Node* callable_node = Param(wasm_count + 1); - Node* undefined_node = LOAD_INSTANCE_FIELD( - UndefinedValue, MachineType::TypeCompressedTaggedPointer()); + Node* undefined_node = BuildLoadUndefinedValueFromInstance(); Node* call = nullptr; bool sloppy_receiver = true; @@ -5811,22 +5945,26 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { } void BuildCWasmEntry() { - // Build the start and the JS parameter nodes. - SetEffect(SetControl(Start(CWasmEntryParameters::kNumParameters + 5))); + // +1 offset for first parameter index being -1. + SetEffect(SetControl(Start(CWasmEntryParameters::kNumParameters + 1))); - // Create parameter nodes (offset by 1 for the receiver parameter). - Node* code_entry = Param(CWasmEntryParameters::kCodeEntry + 1); - Node* object_ref_node = Param(CWasmEntryParameters::kObjectRef + 1); - Node* arg_buffer = Param(CWasmEntryParameters::kArgumentsBuffer + 1); + Node* code_entry = Param(CWasmEntryParameters::kCodeEntry); + Node* object_ref = Param(CWasmEntryParameters::kObjectRef); + Node* arg_buffer = Param(CWasmEntryParameters::kArgumentsBuffer); + Node* c_entry_fp = Param(CWasmEntryParameters::kCEntryFp); + + Node* fp_value = graph()->NewNode(mcgraph()->machine()->LoadFramePointer()); + STORE_RAW(fp_value, TypedFrameConstants::kFirstPushedFrameValueOffset, + c_entry_fp, MachineType::PointerRepresentation(), + kNoWriteBarrier); int wasm_arg_count = static_cast(sig_->parameter_count()); - int arg_count = - wasm_arg_count + 4; // code, object_ref_node, control, effect + int arg_count = wasm_arg_count + 4; // code, object_ref, control, effect Node** args = Buffer(arg_count); int pos = 0; args[pos++] = code_entry; - args[pos++] = object_ref_node; + args[pos++] = object_ref; int offset = 0; for (wasm::ValueType type : sig_->parameters()) { @@ -5847,26 +5985,43 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { Node* call = SetEffect(graph()->NewNode( mcgraph()->common()->Call(call_descriptor), arg_count, args)); - // Store the return value. - DCHECK_GE(1, sig_->return_count()); - if (sig_->return_count() == 1) { + Node* if_success = graph()->NewNode(mcgraph()->common()->IfSuccess(), call); + Node* if_exception = + graph()->NewNode(mcgraph()->common()->IfException(), call, call); + + // Handle exception: return it. + SetControl(if_exception); + Return(if_exception); + + // Handle success: store the return value(s). + SetControl(if_success); + pos = 0; + offset = 0; + for (wasm::ValueType type : sig_->returns()) { StoreRepresentation store_rep( - wasm::ValueTypes::MachineRepresentationFor(sig_->GetReturn()), - kNoWriteBarrier); + wasm::ValueTypes::MachineRepresentationFor(type), kNoWriteBarrier); + Node* value = sig_->return_count() == 1 + ? call + : graph()->NewNode(mcgraph()->common()->Projection(pos), + call, Control()); SetEffect(graph()->NewNode(mcgraph()->machine()->Store(store_rep), - arg_buffer, Int32Constant(0), call, Effect(), - Control())); + arg_buffer, Int32Constant(offset), value, + Effect(), Control())); + offset += wasm::ValueTypes::ElementSizeInBytes(type); + pos++; } + Return(jsgraph()->SmiConstant(0)); if (mcgraph()->machine()->Is32() && ContainsInt64(sig_)) { MachineRepresentation sig_reps[] = { - MachineRepresentation::kWord32, // return value - MachineRepresentation::kTagged, // receiver - MachineRepresentation::kTagged, // arg0 (code) - MachineRepresentation::kTagged // arg1 (buffer) + MachineType::PointerRepresentation(), // return value + MachineType::PointerRepresentation(), // target + MachineRepresentation::kTagged, // object_ref + MachineType::PointerRepresentation(), // argv + MachineType::PointerRepresentation() // c_entry_fp }; - Signature c_entry_sig(1, 2, sig_reps); + Signature c_entry_sig(1, 4, sig_reps); Int64Lowering r(mcgraph()->graph(), mcgraph()->machine(), mcgraph()->common(), mcgraph()->zone(), &c_entry_sig); r.LowerGraph(); @@ -5879,6 +6034,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { Isolate* const isolate_; JSGraph* jsgraph_; StubCallMode stub_mode_; + SetOncePointer undefined_value_node_; SetOncePointer allocate_heap_number_operator_; wasm::WasmFeatures enabled_features_; }; @@ -5901,27 +6057,25 @@ void AppendSignature(char* buffer, size_t max_name_len, } // namespace -MaybeHandle CompileJSToWasmWrapper(Isolate* isolate, - wasm::FunctionSig* sig, - bool is_import) { - TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), - "CompileJSToWasmWrapper"); +std::unique_ptr NewJSToWasmCompilationJob( + Isolate* isolate, wasm::FunctionSig* sig, bool is_import) { //---------------------------------------------------------------------------- // Create the Graph. //---------------------------------------------------------------------------- - Zone zone(isolate->allocator(), ZONE_NAME); - Graph graph(&zone); - CommonOperatorBuilder common(&zone); + std::unique_ptr zone = + base::make_unique(isolate->allocator(), ZONE_NAME); + Graph* graph = new (zone.get()) Graph(zone.get()); + CommonOperatorBuilder common(zone.get()); MachineOperatorBuilder machine( - &zone, MachineType::PointerRepresentation(), + zone.get(), MachineType::PointerRepresentation(), InstructionSelector::SupportedMachineOperatorFlags(), InstructionSelector::AlignmentRequirements()); - JSGraph jsgraph(isolate, &graph, &common, nullptr, nullptr, &machine); + JSGraph jsgraph(isolate, graph, &common, nullptr, nullptr, &machine); Node* control = nullptr; Node* effect = nullptr; - WasmWrapperGraphBuilder builder(&zone, &jsgraph, sig, nullptr, + WasmWrapperGraphBuilder builder(zone.get(), &jsgraph, sig, nullptr, StubCallMode::kCallCodeObject, wasm::WasmFeaturesFromIsolate(isolate)); builder.set_control_ptr(&control); @@ -5929,73 +6083,66 @@ MaybeHandle CompileJSToWasmWrapper(Isolate* isolate, builder.BuildJSToWasmWrapper(is_import); //---------------------------------------------------------------------------- - // Run the compilation pipeline. + // Create the compilation job. //---------------------------------------------------------------------------- static constexpr size_t kMaxNameLen = 128; - char debug_name[kMaxNameLen] = "js_to_wasm:"; - AppendSignature(debug_name, kMaxNameLen, sig); + auto debug_name = std::unique_ptr(new char[kMaxNameLen]); + memcpy(debug_name.get(), "js_to_wasm:", 12); + AppendSignature(debug_name.get(), kMaxNameLen, sig); - // Schedule and compile to machine code. int params = static_cast(sig->parameter_count()); CallDescriptor* incoming = Linkage::GetJSCallDescriptor( - &zone, false, params + 1, CallDescriptor::kNoFlags); + zone.get(), false, params + 1, CallDescriptor::kNoFlags); - MaybeHandle maybe_code = Pipeline::GenerateCodeForWasmHeapStub( - isolate, incoming, &graph, Code::JS_TO_WASM_FUNCTION, debug_name, - WasmAssemblerOptions()); - Handle code; - if (!maybe_code.ToHandle(&code)) { - return maybe_code; - } -#ifdef ENABLE_DISASSEMBLER - if (FLAG_print_opt_code) { - CodeTracer::Scope tracing_scope(isolate->GetCodeTracer()); - OFStream os(tracing_scope.file()); - code->Disassemble(debug_name, os); - } -#endif - - if (must_record_function_compilation(isolate)) { - RecordFunctionCompilation(CodeEventListener::STUB_TAG, isolate, code, "%s", - debug_name); - } - - return code; + return Pipeline::NewWasmHeapStubCompilationJob( + isolate, incoming, std::move(zone), graph, Code::JS_TO_WASM_FUNCTION, + std::move(debug_name), WasmAssemblerOptions()); } -WasmImportCallKind GetWasmImportCallKind(Handle target, - wasm::FunctionSig* expected_sig, - bool has_bigint_feature) { - if (WasmExportedFunction::IsWasmExportedFunction(*target)) { - auto imported_function = WasmExportedFunction::cast(*target); - auto func_index = imported_function.function_index(); - auto module = imported_function.instance().module(); +std::pair> ResolveWasmImportCall( + Handle callable, wasm::FunctionSig* expected_sig, + bool has_bigint_feature) { + if (WasmExportedFunction::IsWasmExportedFunction(*callable)) { + auto imported_function = Handle::cast(callable); + auto func_index = imported_function->function_index(); + auto module = imported_function->instance().module(); wasm::FunctionSig* imported_sig = module->functions[func_index].sig; if (*imported_sig != *expected_sig) { - return WasmImportCallKind::kLinkError; + return std::make_pair(WasmImportCallKind::kLinkError, callable); } - if (static_cast(func_index) < module->num_imported_functions) { - // TODO(wasm): this redirects all imported-reexported functions - // through the call builtin. Fall through to JS function cases below? - return WasmImportCallKind::kUseCallBuiltin; + if (static_cast(func_index) >= module->num_imported_functions) { + return std::make_pair(WasmImportCallKind::kWasmToWasm, callable); } - return WasmImportCallKind::kWasmToWasm; - } - if (WasmCapiFunction::IsWasmCapiFunction(*target)) { - WasmCapiFunction capi_function = WasmCapiFunction::cast(*target); - if (!capi_function.IsSignatureEqual(expected_sig)) { - return WasmImportCallKind::kLinkError; + Isolate* isolate = callable->GetIsolate(); + // Resolve the short-cut to the underlying callable and continue. + Handle instance(imported_function->instance(), isolate); + ImportedFunctionEntry entry(instance, func_index); + callable = handle(entry.callable(), isolate); + } + if (WasmJSFunction::IsWasmJSFunction(*callable)) { + auto js_function = Handle::cast(callable); + if (!js_function->MatchesSignature(expected_sig)) { + return std::make_pair(WasmImportCallKind::kLinkError, callable); + } + Isolate* isolate = callable->GetIsolate(); + // Resolve the short-cut to the underlying callable and continue. + callable = handle(js_function->GetCallable(), isolate); + } + if (WasmCapiFunction::IsWasmCapiFunction(*callable)) { + auto capi_function = Handle::cast(callable); + if (!capi_function->IsSignatureEqual(expected_sig)) { + return std::make_pair(WasmImportCallKind::kLinkError, callable); } - return WasmImportCallKind::kWasmToCapi; + return std::make_pair(WasmImportCallKind::kWasmToCapi, callable); } // Assuming we are calling to JS, check whether this would be a runtime error. if (!wasm::IsJSCompatibleSignature(expected_sig, has_bigint_feature)) { - return WasmImportCallKind::kRuntimeTypeError; + return std::make_pair(WasmImportCallKind::kRuntimeTypeError, callable); } // For JavaScript calls, determine whether the target has an arity match // and whether it has a sloppy receiver. - if (target->IsJSFunction()) { - Handle function = Handle::cast(target); + if (callable->IsJSFunction()) { + Handle function = Handle::cast(callable); SharedFunctionInfo shared = function->shared(); // Check for math intrinsics. @@ -6004,7 +6151,9 @@ WasmImportCallKind GetWasmImportCallKind(Handle target, wasm::FunctionSig* sig = wasm::WasmOpcodes::Signature(wasm::kExpr##name); \ if (!sig) sig = wasm::WasmOpcodes::AsmjsSignature(wasm::kExpr##name); \ DCHECK_NOT_NULL(sig); \ - if (*expected_sig == *sig) return WasmImportCallKind::k##name; \ + if (*expected_sig == *sig) { \ + return std::make_pair(WasmImportCallKind::k##name, callable); \ + } \ } #define COMPARE_SIG_FOR_BUILTIN_F64(name) \ case Builtins::kMath##name: \ @@ -6051,19 +6200,23 @@ WasmImportCallKind GetWasmImportCallKind(Handle target, if (IsClassConstructor(shared.kind())) { // Class constructor will throw anyway. - return WasmImportCallKind::kUseCallBuiltin; + return std::make_pair(WasmImportCallKind::kUseCallBuiltin, callable); } bool sloppy = is_sloppy(shared.language_mode()) && !shared.native(); if (shared.internal_formal_parameter_count() == expected_sig->parameter_count()) { - return sloppy ? WasmImportCallKind::kJSFunctionArityMatchSloppy - : WasmImportCallKind::kJSFunctionArityMatch; + return std::make_pair( + sloppy ? WasmImportCallKind::kJSFunctionArityMatchSloppy + : WasmImportCallKind::kJSFunctionArityMatch, + callable); } - return sloppy ? WasmImportCallKind::kJSFunctionArityMismatchSloppy - : WasmImportCallKind::kJSFunctionArityMismatch; + return std::make_pair( + sloppy ? WasmImportCallKind::kJSFunctionArityMismatchSloppy + : WasmImportCallKind::kJSFunctionArityMismatch, + callable); } // Unknown case. Use the call builtin. - return WasmImportCallKind::kUseCallBuiltin; + return std::make_pair(WasmImportCallKind::kUseCallBuiltin, callable); } wasm::WasmOpcode GetMathIntrinsicOpcode(WasmImportCallKind kind, @@ -6103,10 +6256,9 @@ wasm::WasmOpcode GetMathIntrinsicOpcode(WasmImportCallKind kind, #undef CASE } -wasm::WasmCode* CompileWasmMathIntrinsic(wasm::WasmEngine* wasm_engine, - wasm::NativeModule* native_module, - WasmImportCallKind kind, - wasm::FunctionSig* sig) { +wasm::WasmCompilationResult CompileWasmMathIntrinsic( + wasm::WasmEngine* wasm_engine, WasmImportCallKind kind, + wasm::FunctionSig* sig) { DCHECK_EQ(1, sig->return_count()); TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), @@ -6125,7 +6277,7 @@ wasm::WasmCode* CompileWasmMathIntrinsic(wasm::WasmEngine* wasm_engine, InstructionSelector::AlignmentRequirements())); wasm::CompilationEnv env( - native_module->module(), wasm::UseTrapHandler::kNoTrapHandler, + nullptr, wasm::UseTrapHandler::kNoTrapHandler, wasm::RuntimeExceptionSupport::kNoRuntimeExceptionSupport, wasm::kAllWasmFeatures, wasm::LowerSimd::kNoLowerSimd); @@ -6167,21 +6319,12 @@ wasm::WasmCode* CompileWasmMathIntrinsic(wasm::WasmEngine* wasm_engine, wasm_engine, call_descriptor, mcgraph, Code::WASM_FUNCTION, wasm::WasmCode::kFunction, debug_name, WasmStubAssemblerOptions(), source_positions); - std::unique_ptr wasm_code = native_module->AddCode( - wasm::WasmCode::kAnonymousFuncIndex, result.code_desc, - result.frame_slot_count, result.tagged_parameter_slots, - std::move(result.protected_instructions), - std::move(result.source_positions), wasm::WasmCode::kFunction, - wasm::ExecutionTier::kNone); - // TODO(titzer): add counters for math intrinsic code size / allocation - return native_module->PublishCode(std::move(wasm_code)); + return result; } -wasm::WasmCode* CompileWasmImportCallWrapper(wasm::WasmEngine* wasm_engine, - wasm::NativeModule* native_module, - WasmImportCallKind kind, - wasm::FunctionSig* sig, - bool source_positions) { +wasm::WasmCompilationResult CompileWasmImportCallWrapper( + wasm::WasmEngine* wasm_engine, wasm::CompilationEnv* env, + WasmImportCallKind kind, wasm::FunctionSig* sig, bool source_positions) { DCHECK_NE(WasmImportCallKind::kLinkError, kind); DCHECK_NE(WasmImportCallKind::kWasmToWasm, kind); @@ -6189,7 +6332,7 @@ wasm::WasmCode* CompileWasmImportCallWrapper(wasm::WasmEngine* wasm_engine, if (FLAG_wasm_math_intrinsics && kind >= WasmImportCallKind::kFirstMathIntrinsic && kind <= WasmImportCallKind::kLastMathIntrinsic) { - return CompileWasmMathIntrinsic(wasm_engine, native_module, kind, sig); + return CompileWasmMathIntrinsic(wasm_engine, kind, sig); } TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), @@ -6214,7 +6357,7 @@ wasm::WasmCode* CompileWasmImportCallWrapper(wasm::WasmEngine* wasm_engine, WasmWrapperGraphBuilder builder(&zone, &jsgraph, sig, source_position_table, StubCallMode::kCallWasmRuntimeStub, - native_module->enabled_features()); + env->enabled_features); builder.set_control_ptr(&control); builder.set_effect_ptr(&effect); builder.BuildWasmImportCallWrapper(kind); @@ -6232,13 +6375,8 @@ wasm::WasmCode* CompileWasmImportCallWrapper(wasm::WasmEngine* wasm_engine, wasm_engine, incoming, &jsgraph, Code::WASM_TO_JS_FUNCTION, wasm::WasmCode::kWasmToJsWrapper, func_name, WasmStubAssemblerOptions(), source_position_table); - std::unique_ptr wasm_code = native_module->AddCode( - wasm::WasmCode::kAnonymousFuncIndex, result.code_desc, - result.frame_slot_count, result.tagged_parameter_slots, - std::move(result.protected_instructions), - std::move(result.source_positions), wasm::WasmCode::kWasmToJsWrapper, - wasm::ExecutionTier::kNone); - return native_module->PublishCode(std::move(wasm_code)); + result.kind = wasm::WasmCompilationResult::kWasmToJsWrapper; + return result; } wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::WasmEngine* wasm_engine, @@ -6290,9 +6428,8 @@ wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::WasmEngine* wasm_engine, wasm::WasmCode::kWasmToCapiWrapper, debug_name, WasmStubAssemblerOptions(), source_positions); std::unique_ptr wasm_code = native_module->AddCode( - wasm::WasmCode::kAnonymousFuncIndex, result.code_desc, - result.frame_slot_count, result.tagged_parameter_slots, - std::move(result.protected_instructions), + wasm::kAnonymousFuncIndex, result.code_desc, result.frame_slot_count, + result.tagged_parameter_slots, std::move(result.protected_instructions), std::move(result.source_positions), wasm::WasmCode::kWasmToCapiWrapper, wasm::ExecutionTier::kNone); return native_module->PublishCode(std::move(wasm_code)); @@ -6338,24 +6475,26 @@ wasm::WasmCompilationResult CompileWasmInterpreterEntry( wasm::WasmCode::kInterpreterEntry, func_name.begin(), WasmStubAssemblerOptions()); result.result_tier = wasm::ExecutionTier::kInterpreter; + result.kind = wasm::WasmCompilationResult::kInterpreterEntry; return result; } MaybeHandle CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig) { - Zone zone(isolate->allocator(), ZONE_NAME); - Graph graph(&zone); - CommonOperatorBuilder common(&zone); + std::unique_ptr zone = + base::make_unique(isolate->allocator(), ZONE_NAME); + Graph* graph = new (zone.get()) Graph(zone.get()); + CommonOperatorBuilder common(zone.get()); MachineOperatorBuilder machine( - &zone, MachineType::PointerRepresentation(), + zone.get(), MachineType::PointerRepresentation(), InstructionSelector::SupportedMachineOperatorFlags(), InstructionSelector::AlignmentRequirements()); - JSGraph jsgraph(isolate, &graph, &common, nullptr, nullptr, &machine); + JSGraph jsgraph(isolate, graph, &common, nullptr, nullptr, &machine); Node* control = nullptr; Node* effect = nullptr; - WasmWrapperGraphBuilder builder(&zone, &jsgraph, sig, nullptr, + WasmWrapperGraphBuilder builder(zone.get(), &jsgraph, sig, nullptr, StubCallMode::kCallCodeObject, wasm::WasmFeaturesFromIsolate(isolate)); builder.set_control_ptr(&control); @@ -6363,29 +6502,36 @@ MaybeHandle CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig) { builder.BuildCWasmEntry(); // Schedule and compile to machine code. - CallDescriptor* incoming = Linkage::GetJSCallDescriptor( - &zone, false, CWasmEntryParameters::kNumParameters + 1, - CallDescriptor::kNoFlags); + MachineType sig_types[] = {MachineType::Pointer(), // return + MachineType::Pointer(), // target + MachineType::AnyTagged(), // object_ref + MachineType::Pointer(), // argv + MachineType::Pointer()}; // c_entry_fp + MachineSignature incoming_sig(1, 4, sig_types); + // Traps need the root register, for TailCallRuntimeWithCEntry to call + // Runtime::kThrowWasmError. + bool initialize_root_flag = true; + CallDescriptor* incoming = Linkage::GetSimplifiedCDescriptor( + zone.get(), &incoming_sig, initialize_root_flag); // Build a name in the form "c-wasm-entry::". static constexpr size_t kMaxNameLen = 128; - char debug_name[kMaxNameLen] = "c-wasm-entry:"; - AppendSignature(debug_name, kMaxNameLen, sig); - - MaybeHandle maybe_code = Pipeline::GenerateCodeForWasmHeapStub( - isolate, incoming, &graph, Code::C_WASM_ENTRY, debug_name, - AssemblerOptions::Default(isolate)); - Handle code; - if (!maybe_code.ToHandle(&code)) { - return maybe_code; - } -#ifdef ENABLE_DISASSEMBLER - if (FLAG_print_opt_code) { - CodeTracer::Scope tracing_scope(isolate->GetCodeTracer()); - OFStream os(tracing_scope.file()); - code->Disassemble(debug_name, os); - } -#endif + auto debug_name = std::unique_ptr(new char[kMaxNameLen]); + memcpy(debug_name.get(), "c-wasm-entry:", 14); + AppendSignature(debug_name.get(), kMaxNameLen, sig); + + // Run the compilation job synchronously. + std::unique_ptr job( + Pipeline::NewWasmHeapStubCompilationJob( + isolate, incoming, std::move(zone), graph, Code::C_WASM_ENTRY, + std::move(debug_name), AssemblerOptions::Default(isolate))); + + if (job->PrepareJob(isolate) == CompilationJob::FAILED || + job->ExecuteJob() == CompilationJob::FAILED || + job->FinalizeJob(isolate) == CompilationJob::FAILED) { + return {}; + } + Handle code = job->compilation_info()->code(); return code; } diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h index 460d0d2f1b1bbe..315733c396d70c 100644 --- a/deps/v8/src/compiler/wasm-compiler.h +++ b/deps/v8/src/compiler/wasm-compiler.h @@ -6,6 +6,7 @@ #define V8_COMPILER_WASM_COMPILER_H_ #include +#include // Clients of this interface shouldn't depend on lots of compiler internals. // Do not include anything from src/compiler here! @@ -20,6 +21,7 @@ namespace v8 { namespace internal { struct AssemblerOptions; +class OptimizedCompilationJob; namespace compiler { // Forward declarations for some compiler data structures. @@ -103,13 +105,23 @@ enum class WasmImportCallKind : uint8_t { kUseCallBuiltin }; -V8_EXPORT_PRIVATE WasmImportCallKind -GetWasmImportCallKind(Handle callable, wasm::FunctionSig* sig, +// TODO(wasm): There should be only one import kind for sloppy and strict in +// order to reduce wrapper cache misses. The mode can be checked at runtime +// instead. +constexpr WasmImportCallKind kDefaultImportCallKind = + WasmImportCallKind::kJSFunctionArityMatchSloppy; + +// Resolves which import call wrapper is required for the given JS callable. +// Returns the kind of wrapper need and the ultimate target callable. Note that +// some callables (e.g. a {WasmExportedFunction} or {WasmJSFunction}) just wrap +// another target, which is why the ultimate target is returned as well. +V8_EXPORT_PRIVATE std::pair> +ResolveWasmImportCall(Handle callable, wasm::FunctionSig* sig, bool has_bigint_feature); // Compiles an import call wrapper, which allows WASM to call imports. -V8_EXPORT_PRIVATE wasm::WasmCode* CompileWasmImportCallWrapper( - wasm::WasmEngine*, wasm::NativeModule*, WasmImportCallKind, +V8_EXPORT_PRIVATE wasm::WasmCompilationResult CompileWasmImportCallWrapper( + wasm::WasmEngine*, wasm::CompilationEnv* env, WasmImportCallKind, wasm::FunctionSig*, bool source_positions); // Compiles a host call wrapper, which allows WASM to call host functions. @@ -117,11 +129,9 @@ wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::WasmEngine*, wasm::NativeModule*, wasm::FunctionSig*, Address address); -// Creates a code object calling a wasm function with the given signature, -// callable from JS. -V8_EXPORT_PRIVATE MaybeHandle CompileJSToWasmWrapper(Isolate*, - wasm::FunctionSig*, - bool is_import); +// Returns an OptimizedCompilationJob object for a JS to Wasm wrapper. +std::unique_ptr NewJSToWasmCompilationJob( + Isolate* isolate, wasm::FunctionSig* sig, bool is_import); // Compiles a stub that redirects a call to a wasm function to the wasm // interpreter. It's ABI compatible with the compiled wasm function. @@ -133,13 +143,13 @@ enum CWasmEntryParameters { kCodeEntry, kObjectRef, kArgumentsBuffer, + kCEntryFp, // marker: kNumParameters }; -// Compiles a stub with JS linkage, taking parameters as described by -// {CWasmEntryParameters}. It loads the wasm parameters from the argument -// buffer and calls the wasm function given as first parameter. +// Compiles a stub with C++ linkage, to be called from Execution::CallWasm, +// which knows how to feed it its parameters. MaybeHandle CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig); // Values from the instance object are cached between WASM-level function calls. @@ -280,9 +290,9 @@ class WasmGraphBuilder { Node* GetGlobal(uint32_t index); Node* SetGlobal(uint32_t index, Node* val); - Node* GetTable(uint32_t table_index, Node* index, + Node* TableGet(uint32_t table_index, Node* index, wasm::WasmCodePosition position); - Node* SetTable(uint32_t table_index, Node* index, Node* val, + Node* TableSet(uint32_t table_index, Node* index, Node* val, wasm::WasmCodePosition position); //----------------------------------------------------------------------- // Operations that concern the linear memory. @@ -377,6 +387,7 @@ class WasmGraphBuilder { Node* AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs, uint32_t alignment, uint32_t offset, wasm::WasmCodePosition position); + Node* AtomicFence(); // Returns a pointer to the dropped_data_segments array. Traps if the data // segment is active or has been dropped. @@ -395,7 +406,7 @@ class WasmGraphBuilder { Node* TableInit(uint32_t table_index, uint32_t elem_segment_index, Node* dst, Node* src, Node* size, wasm::WasmCodePosition position); Node* ElemDrop(uint32_t elem_segment_index, wasm::WasmCodePosition position); - Node* TableCopy(uint32_t table_src_index, uint32_t table_dst_index, Node* dst, + Node* TableCopy(uint32_t table_dst_index, uint32_t table_src_index, Node* dst, Node* src, Node* size, wasm::WasmCodePosition position); Node* TableGrow(uint32_t table_index, Node* value, Node* delta); Node* TableSize(uint32_t table_index); @@ -485,10 +496,10 @@ class WasmGraphBuilder { Node* BuildCallNode(wasm::FunctionSig* sig, Node** args, wasm::WasmCodePosition position, Node* instance_node, const Operator* op); - // Special implementation for CallIndirect for table 0. - Node* BuildIndirectCall(uint32_t sig_index, Node** args, Node*** rets, - wasm::WasmCodePosition position, - IsReturnCall continuation); + // Helper function for {BuildIndirectCall}. + void LoadIndirectFunctionTable(uint32_t table_index, Node** ift_size, + Node** ift_sig_ids, Node** ift_targets, + Node** ift_instances); Node* BuildIndirectCall(uint32_t table_index, uint32_t sig_index, Node** args, Node*** rets, wasm::WasmCodePosition position, IsReturnCall continuation); @@ -591,8 +602,6 @@ class WasmGraphBuilder { return buf; } - Node* BuildLoadBuiltinFromInstance(int builtin_index); - //----------------------------------------------------------------------- // Operations involving the CEntry, a dependency we want to remove // to get off the GC heap. diff --git a/deps/v8/src/d8/d8.cc b/deps/v8/src/d8/d8.cc index a29c596909e0a6..6656ab608dc56a 100644 --- a/deps/v8/src/d8/d8.cc +++ b/deps/v8/src/d8/d8.cc @@ -48,6 +48,10 @@ #include "src/utils/utils.h" #include "src/wasm/wasm-engine.h" +#ifdef V8_USE_PERFETTO +#include "perfetto/tracing.h" +#endif // V8_USE_PERFETTO + #ifdef V8_INTL_SUPPORT #include "unicode/locid.h" #endif // V8_INTL_SUPPORT @@ -247,15 +251,7 @@ namespace tracing { namespace { -// String options that can be used to initialize TraceOptions. -const char kRecordUntilFull[] = "record-until-full"; -const char kRecordContinuously[] = "record-continuously"; -const char kRecordAsMuchAsPossible[] = "record-as-much-as-possible"; - -const char kRecordModeParam[] = "record_mode"; -const char kEnableSystraceParam[] = "enable_systrace"; -const char kEnableArgumentFilterParam[] = "enable_argument_filter"; -const char kIncludedCategoriesParam[] = "included_categories"; +static constexpr char kIncludedCategoriesParam[] = "included_categories"; class TraceConfigParser { public: @@ -273,30 +269,11 @@ class TraceConfigParser { Local result = JSON::Parse(context, source).ToLocalChecked(); Local trace_config_object = Local::Cast(result); - trace_config->SetTraceRecordMode( - GetTraceRecordMode(isolate, context, trace_config_object)); - if (GetBoolean(isolate, context, trace_config_object, - kEnableSystraceParam)) { - trace_config->EnableSystrace(); - } - if (GetBoolean(isolate, context, trace_config_object, - kEnableArgumentFilterParam)) { - trace_config->EnableArgumentFilter(); - } UpdateIncludedCategoriesList(isolate, context, trace_config_object, trace_config); } private: - static bool GetBoolean(v8::Isolate* isolate, Local context, - Local object, const char* property) { - Local value = GetValue(isolate, context, object, property); - if (value->IsNumber()) { - return value->BooleanValue(isolate); - } - return false; - } - static int UpdateIncludedCategoriesList( v8::Isolate* isolate, Local context, Local object, platform::tracing::TraceConfig* trace_config) { @@ -316,23 +293,6 @@ class TraceConfigParser { } return 0; } - - static platform::tracing::TraceRecordMode GetTraceRecordMode( - v8::Isolate* isolate, Local context, Local object) { - Local value = GetValue(isolate, context, object, kRecordModeParam); - if (value->IsString()) { - Local v8_string = value->ToString(context).ToLocalChecked(); - String::Utf8Value str(isolate, v8_string); - if (strcmp(kRecordUntilFull, *str) == 0) { - return platform::tracing::TraceRecordMode::RECORD_UNTIL_FULL; - } else if (strcmp(kRecordContinuously, *str) == 0) { - return platform::tracing::TraceRecordMode::RECORD_CONTINUOUSLY; - } else if (strcmp(kRecordAsMuchAsPossible, *str) == 0) { - return platform::tracing::TraceRecordMode::RECORD_AS_MUCH_AS_POSSIBLE; - } - } - return platform::tracing::TraceRecordMode::RECORD_UNTIL_FULL; - } }; } // namespace @@ -1927,7 +1887,7 @@ static void PrintNonErrorsMessageCallback(Local message, auto ToCString = [](const v8::String::Utf8Value& value) { return *value ? *value : ""; }; - Isolate* isolate = Isolate::GetCurrent(); + Isolate* isolate = message->GetIsolate(); v8::String::Utf8Value msg(isolate, message->Get()); const char* msg_string = ToCString(msg); // Print (filename):(line number): (message). @@ -2001,20 +1961,20 @@ int LineFromOffset(Local script, int offset) { return location.GetLineNumber(); } -void WriteLcovDataForRange(std::vector& lines, int start_line, +void WriteLcovDataForRange(std::vector* lines, int start_line, int end_line, uint32_t count) { // Ensure space in the array. - lines.resize(std::max(static_cast(end_line + 1), lines.size()), 0); + lines->resize(std::max(static_cast(end_line + 1), lines->size()), 0); // Boundary lines could be shared between two functions with different // invocation counts. Take the maximum. - lines[start_line] = std::max(lines[start_line], count); - lines[end_line] = std::max(lines[end_line], count); + (*lines)[start_line] = std::max((*lines)[start_line], count); + (*lines)[end_line] = std::max((*lines)[end_line], count); // Invocation counts for non-boundary lines are overwritten. - for (int k = start_line + 1; k < end_line; k++) lines[k] = count; + for (int k = start_line + 1; k < end_line; k++) (*lines)[k] = count; } void WriteLcovDataForNamedRange(std::ostream& sink, - std::vector& lines, + std::vector* lines, const std::string& name, int start_line, int end_line, uint32_t count) { WriteLcovDataForRange(lines, start_line, end_line, count); @@ -2064,7 +2024,7 @@ void Shell::WriteLcovData(v8::Isolate* isolate, const char* file) { name_stream << start.GetColumnNumber() << ">"; } - WriteLcovDataForNamedRange(sink, lines, name_stream.str(), start_line, + WriteLcovDataForNamedRange(sink, &lines, name_stream.str(), start_line, end_line, count); } @@ -2074,7 +2034,7 @@ void Shell::WriteLcovData(v8::Isolate* isolate, const char* file) { int start_line = LineFromOffset(script, block_data.StartOffset()); int end_line = LineFromOffset(script, block_data.EndOffset() - 1); uint32_t count = block_data.Count(); - WriteLcovDataForRange(lines, start_line, end_line, count); + WriteLcovDataForRange(&lines, start_line, end_line, count); } } // Write per-line coverage. LCOV uses 1-based line numbers. @@ -3350,24 +3310,25 @@ int Shell::Main(int argc, char* argv[]) { std::unique_ptr tracing; std::ofstream trace_file; -#ifdef V8_USE_PERFETTO - std::ofstream perfetto_trace_file; -#endif // V8_USE_PERFETTO if (options.trace_enabled && !i::FLAG_verify_predictable) { tracing = base::make_unique(); - trace_file.open(options.trace_path ? options.trace_path : "v8_trace.json"); DCHECK(trace_file.good()); + +#ifdef V8_USE_PERFETTO + // Set up the in-process backend that the tracing controller will connect + // to. + perfetto::TracingInitArgs init_args; + init_args.backends = perfetto::BackendType::kInProcessBackend; + perfetto::Tracing::Initialize(init_args); + + tracing->InitializeForPerfetto(&trace_file); +#else platform::tracing::TraceBuffer* trace_buffer = platform::tracing::TraceBuffer::CreateTraceBufferRingBuffer( platform::tracing::TraceBuffer::kRingBufferChunks, platform::tracing::TraceWriter::CreateJSONTraceWriter(trace_file)); tracing->Initialize(trace_buffer); - -#ifdef V8_USE_PERFETTO - perfetto_trace_file.open("v8_perfetto_trace.json"); - DCHECK(trace_file.good()); - tracing->InitializeForPerfetto(&perfetto_trace_file); #endif // V8_USE_PERFETTO } diff --git a/deps/v8/src/date/OWNERS b/deps/v8/src/date/OWNERS index fc4aa8d5acf2f4..6edeeae0ea188a 100644 --- a/deps/v8/src/date/OWNERS +++ b/deps/v8/src/date/OWNERS @@ -1,3 +1,6 @@ ishell@chromium.org jshin@chromium.org ulan@chromium.org +verwaest@chromium.org + +# COMPONENT: Blink>JavaScript>Runtime diff --git a/deps/v8/src/debug/OWNERS b/deps/v8/src/debug/OWNERS index 46b472480d1a79..220aa1ce26b153 100644 --- a/deps/v8/src/debug/OWNERS +++ b/deps/v8/src/debug/OWNERS @@ -1,5 +1,3 @@ -set noparent - bmeurer@chromium.org jgruber@chromium.org mvstanton@chromium.org diff --git a/deps/v8/src/debug/debug-coverage.cc b/deps/v8/src/debug/debug-coverage.cc index 24aa617524d011..15aad1fcc25556 100644 --- a/deps/v8/src/debug/debug-coverage.cc +++ b/deps/v8/src/debug/debug-coverage.cc @@ -61,7 +61,8 @@ bool CompareCoverageBlock(const CoverageBlock& a, const CoverageBlock& b) { return a.start < b.start; } -void SortBlockData(std::vector& v) { +void SortBlockData( + std::vector& v) { // NOLINT(runtime/references) // Sort according to the block nesting structure. std::sort(v.begin(), v.end(), CompareCoverageBlock); } @@ -507,9 +508,9 @@ void CollectAndMaybeResetCounts(Isolate* isolate, ->feedback_vectors_for_profiling_tools() ->IsArrayList()); DCHECK_EQ(v8::debug::CoverageMode::kBestEffort, coverage_mode); - HeapIterator heap_iterator(isolate->heap()); - for (HeapObject current_obj = heap_iterator.next(); - !current_obj.is_null(); current_obj = heap_iterator.next()) { + HeapObjectIterator heap_iterator(isolate->heap()); + for (HeapObject current_obj = heap_iterator.Next(); + !current_obj.is_null(); current_obj = heap_iterator.Next()) { if (!current_obj.IsJSFunction()) continue; JSFunction func = JSFunction::cast(current_obj); SharedFunctionInfo shared = func.shared(); @@ -714,9 +715,9 @@ void Coverage::SelectMode(Isolate* isolate, debug::CoverageMode mode) { std::vector> funcs_needing_feedback_vector; { - HeapIterator heap_iterator(isolate->heap()); - for (HeapObject o = heap_iterator.next(); !o.is_null(); - o = heap_iterator.next()) { + HeapObjectIterator heap_iterator(isolate->heap()); + for (HeapObject o = heap_iterator.Next(); !o.is_null(); + o = heap_iterator.Next()) { if (o.IsJSFunction()) { JSFunction func = JSFunction::cast(o); if (func.has_closure_feedback_cell_array()) { diff --git a/deps/v8/src/debug/debug-evaluate.cc b/deps/v8/src/debug/debug-evaluate.cc index 65e62f2aac8c75..0d8a7b2c7e67d4 100644 --- a/deps/v8/src/debug/debug-evaluate.cc +++ b/deps/v8/src/debug/debug-evaluate.cc @@ -51,7 +51,7 @@ MaybeHandle DebugEvaluate::Global(Isolate* isolate, } MaybeHandle DebugEvaluate::Local(Isolate* isolate, - StackFrame::Id frame_id, + StackFrameId frame_id, int inlined_jsframe_index, Handle source, bool throw_on_side_effect) { @@ -312,6 +312,7 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) { V(ObjectValuesSkipFastPath) \ V(ObjectGetOwnPropertyNames) \ V(ObjectGetOwnPropertyNamesTryFast) \ + V(ObjectIsExtensible) \ V(RegExpInitializeAndCompile) \ V(StackGuard) \ V(StringAdd) \ @@ -771,6 +772,8 @@ DebugInfo::SideEffectState BuiltinGetSideEffectState(Builtins::Name id) { case Builtins::kStrictPoisonPillThrower: case Builtins::kAllocateInYoungGeneration: case Builtins::kAllocateInOldGeneration: + case Builtins::kAllocateRegularInYoungGeneration: + case Builtins::kAllocateRegularInOldGeneration: return DebugInfo::kHasNoSideEffect; // Set builtins. @@ -904,7 +907,7 @@ static bool TransitivelyCalledBuiltinHasNoSideEffect(Builtins::Name caller, switch (callee) { // Transitively called Builtins: case Builtins::kAbort: - case Builtins::kAbortJS: + case Builtins::kAbortCSAAssert: case Builtins::kAdaptorWithBuiltinExitFrame: case Builtins::kArrayConstructorImpl: case Builtins::kArrayEveryLoopContinuation: @@ -959,6 +962,8 @@ static bool TransitivelyCalledBuiltinHasNoSideEffect(Builtins::Name caller, case Builtins::kOrdinaryToPrimitive_String: case Builtins::kParseInt: case Builtins::kProxyHasProperty: + case Builtins::kProxyIsExtensible: + case Builtins::kProxyGetPrototypeOf: case Builtins::kRecordWrite: case Builtins::kStringAdd_CheckNone: case Builtins::kStringEqual: diff --git a/deps/v8/src/debug/debug-evaluate.h b/deps/v8/src/debug/debug-evaluate.h index 50817691d72b96..7819892050261e 100644 --- a/deps/v8/src/debug/debug-evaluate.h +++ b/deps/v8/src/debug/debug-evaluate.h @@ -7,8 +7,11 @@ #include +#include "src/common/globals.h" #include "src/debug/debug-frames.h" #include "src/debug/debug-scopes.h" +#include "src/debug/debug.h" +#include "src/execution/frames.h" #include "src/objects/objects.h" #include "src/objects/shared-function-info.h" #include "src/objects/string-table.h" @@ -28,7 +31,7 @@ class DebugEvaluate : public AllStatic { // - Parameters and stack-allocated locals need to be materialized. Altered // values need to be written back to the stack afterwards. // - The arguments object needs to materialized. - static MaybeHandle Local(Isolate* isolate, StackFrame::Id frame_id, + static MaybeHandle Local(Isolate* isolate, StackFrameId frame_id, int inlined_jsframe_index, Handle source, bool throw_on_side_effect); diff --git a/deps/v8/src/debug/debug-frames.cc b/deps/v8/src/debug/debug-frames.cc index a6ee31738dc3fc..4fe062b277a382 100644 --- a/deps/v8/src/debug/debug-frames.cc +++ b/deps/v8/src/debug/debug-frames.cc @@ -52,10 +52,13 @@ FrameInspector::FrameInspector(StandardFrame* frame, int inlined_frame_index, } } -// NOLINTNEXTLINE -FrameInspector::~FrameInspector() { - // Destructor needs to be defined in the .cc file, because it instantiates - // std::unique_ptr destructors but the types are not known in the header. +// Destructor needs to be defined in the .cc file, because it instantiates +// std::unique_ptr destructors but the types are not known in the header. +FrameInspector::~FrameInspector() = default; + +JavaScriptFrame* FrameInspector::javascript_frame() { + return frame_->is_arguments_adaptor() ? ArgumentsAdaptorFrame::cast(frame_) + : JavaScriptFrame::cast(frame_); } int FrameInspector::GetParametersCount() { @@ -90,8 +93,10 @@ bool FrameInspector::ParameterIsShadowedByContextLocal( VariableMode mode; InitializationFlag init_flag; MaybeAssignedFlag maybe_assigned_flag; + RequiresBrandCheckFlag requires_brand_check; return ScopeInfo::ContextSlotIndex(*info, *parameter_name, &mode, &init_flag, - &maybe_assigned_flag) != -1; + &maybe_assigned_flag, + &requires_brand_check) != -1; } RedirectActiveFunctions::RedirectActiveFunctions(SharedFunctionInfo shared, diff --git a/deps/v8/src/debug/debug-frames.h b/deps/v8/src/debug/debug-frames.h index 5ee4f8b61f472c..274d10030af516 100644 --- a/deps/v8/src/debug/debug-frames.h +++ b/deps/v8/src/debug/debug-frames.h @@ -6,7 +6,6 @@ #define V8_DEBUG_DEBUG_FRAMES_H_ #include "src/deoptimizer/deoptimizer.h" -#include "src/execution/frames.h" #include "src/execution/isolate.h" #include "src/execution/v8threads.h" #include "src/objects/objects.h" @@ -15,12 +14,15 @@ namespace v8 { namespace internal { +class JavaScriptFrame; +class StandardFrame; + class FrameInspector { public: FrameInspector(StandardFrame* frame, int inlined_frame_index, Isolate* isolate); - ~FrameInspector(); // NOLINT (modernize-use-equals-default) + ~FrameInspector(); int GetParametersCount(); Handle GetFunction() const { return function_; } @@ -37,10 +39,7 @@ class FrameInspector { bool IsWasm(); bool IsJavaScript(); - inline JavaScriptFrame* javascript_frame() { - return frame_->is_arguments_adaptor() ? ArgumentsAdaptorFrame::cast(frame_) - : JavaScriptFrame::cast(frame_); - } + JavaScriptFrame* javascript_frame(); int inlined_frame_index() const { return inlined_frame_index_; } diff --git a/deps/v8/src/debug/debug-interface.h b/deps/v8/src/debug/debug-interface.h index 79222371f966cb..59bc6d08632e75 100644 --- a/deps/v8/src/debug/debug-interface.h +++ b/deps/v8/src/debug/debug-interface.h @@ -164,8 +164,9 @@ class WasmScript : public Script { uint32_t GetFunctionHash(int function_index); }; -V8_EXPORT_PRIVATE void GetLoadedScripts(Isolate* isolate, - PersistentValueVector