diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 5018ed4c..61cfa187 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -12,11 +12,8 @@ jobs: name: Tests on ${{ matrix.go-version }} ${{ matrix.platform }} strategy: matrix: - go-version: [1.16.8, 1.17.1] - # We use macos-11 over macos-latest because macos-latest defaults to Catalina(10.15) and not Big Sur(11.0) - # We can switch to macos-latest whenever Big Sur becomes the default - # See https://github.com/actions/virtual-environments#available-environments - platform: [ubuntu-latest, macos-11] + go-version: [1.18.10, 1.19.5] + platform: [ubuntu-latest, macos-latest] runs-on: ${{ matrix.platform }} steps: diff --git a/.github/workflows/v8build.yml b/.github/workflows/v8build.yml index b9cd7ea4..560a8f5c 100644 --- a/.github/workflows/v8build.yml +++ b/.github/workflows/v8build.yml @@ -14,7 +14,7 @@ jobs: # # We need xcode 12.4 or newer to cross compile between arm64/amd64 # https://github.com/actions/virtual-environments/blob/main/images/macos/macos-11-Readme.md#xcode - platform: [ubuntu-18.04, macos-11] + platform: [ubuntu-22.04, macos-11] arch: [x86_64, arm64] runs-on: ${{ matrix.platform }} steps: @@ -27,10 +27,10 @@ jobs: run: cd deps/depot_tools && git config --unset-all remote.origin.fetch; git config --add remote.origin.fetch +refs/heads/*:refs/remotes/origin/* shell: bash - name: Install g++-aarch64-linux-gnu - if: matrix.platform == 'ubuntu-18.04' && matrix.arch == 'arm64' + if: matrix.platform == 'ubuntu-22.04' && matrix.arch == 'arm64' run: sudo apt update && sudo apt install g++-aarch64-linux-gnu -y - name: Build V8 linux - if: matrix.platform == 'ubuntu-18.04' + if: matrix.platform == 'ubuntu-22.04' run: cd deps && ./build.py --no-clang --arch ${{ matrix.arch }} - name: Build V8 macOS if: matrix.platform == 'macos-11' diff --git a/.gitignore b/.gitignore index f676dd07..530cd503 100644 --- a/.gitignore +++ b/.gitignore @@ -6,7 +6,7 @@ .gclient_entries deps/darwin-x86_64/libv8_debug.a - +deps/.gclient_previous* c.out - +.idea/* /v8go.test diff --git a/CHANGELOG.md b/CHANGELOG.md index 8cd8d1cc..458e3a8e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Fixed - Use string length to ensure null character-containing strings in Go/JS are not terminated early. - Object.Set with an empty key string is now supported +- Upgrade to V8 10.9.194.9 +- Upgrade V8 build OS to Ubuntu 22.04 ## [v0.7.0] - 2021-12-09 diff --git a/cgo.go b/cgo.go index a8d2a878..4346cbcb 100644 --- a/cgo.go +++ b/cgo.go @@ -6,7 +6,7 @@ package v8go //go:generate clang-format -i --verbose -style=Chromium v8go.h v8go.cc -// #cgo CXXFLAGS: -fno-rtti -fPIC -std=c++14 -DV8_COMPRESS_POINTERS -DV8_31BIT_SMIS_ON_64BIT_ARCH -I${SRCDIR}/deps/include -Wall +// #cgo CXXFLAGS: -fno-rtti -fPIC -std=c++17 -DV8_COMPRESS_POINTERS -DV8_31BIT_SMIS_ON_64BIT_ARCH -I${SRCDIR}/deps/include -Wall -DV8_ENABLE_SANDBOX // #cgo LDFLAGS: -pthread -lv8 // #cgo darwin,amd64 LDFLAGS: -L${SRCDIR}/deps/darwin_x86_64 // #cgo darwin,arm64 LDFLAGS: -L${SRCDIR}/deps/darwin_arm64 diff --git a/context_test.go b/context_test.go index 503a463a..84f844bb 100644 --- a/context_test.go +++ b/context_test.go @@ -48,7 +48,7 @@ func TestJSExceptions(t *testing.T) { origin string err string }{ - {"SyntaxError", "bad js syntax", "syntax.js", "SyntaxError: Unexpected identifier"}, + {"SyntaxError", "bad js syntax", "syntax.js", "SyntaxError: Unexpected identifier 'js'"}, {"ReferenceError", "add()", "add.js", "ReferenceError: add is not defined"}, } diff --git a/deps/build.py b/deps/build.py index 740218f5..805585bd 100755 --- a/deps/build.py +++ b/deps/build.py @@ -67,7 +67,6 @@ v8_enable_i18n_support=true icu_use_data_file=false v8_enable_test_features=false -v8_untrusted_code_mitigations=false exclude_unwind_tables=true """ diff --git a/deps/darwin_arm64/libv8.a b/deps/darwin_arm64/libv8.a index e5d3d472..2b761650 100644 Binary files a/deps/darwin_arm64/libv8.a and b/deps/darwin_arm64/libv8.a differ diff --git a/deps/darwin_x86_64/libv8.a b/deps/darwin_x86_64/libv8.a index 9c12f4a2..3a75d220 100644 Binary files a/deps/darwin_x86_64/libv8.a and b/deps/darwin_x86_64/libv8.a differ diff --git a/deps/depot_tools b/deps/depot_tools index 1b4881c9..5e5802d7 160000 --- a/deps/depot_tools +++ b/deps/depot_tools @@ -1 +1 @@ -Subproject commit 1b4881c9300a81bac80eace84caa2c10c2e41fa5 +Subproject commit 5e5802d7a244d01039076113b3d91b953105c58b diff --git a/deps/include/OWNERS b/deps/include/OWNERS index 0222513d..535040c5 100644 --- a/deps/include/OWNERS +++ b/deps/include/OWNERS @@ -7,9 +7,12 @@ yangguo@chromium.org per-file *DEPS=file:../COMMON_OWNERS per-file v8-internal.h=file:../COMMON_OWNERS -per-file v8-inspector.h=file:../src/inspector/OWNERS -per-file v8-inspector-protocol.h=file:../src/inspector/OWNERS + +per-file v8-debug.h=file:../src/debug/OWNERS + per-file js_protocol.pdl=file:../src/inspector/OWNERS +per-file v8-inspector*=file:../src/inspector/OWNERS +per-file v8-inspector*=file:../src/inspector/OWNERS # Needed by the auto_tag builder per-file v8-version.h=v8-ci-autoroll-builder@chops-service-accounts.iam.gserviceaccount.com diff --git a/deps/include/cppgc/README.md b/deps/include/cppgc/README.md index e4543998..d825ea5b 100644 --- a/deps/include/cppgc/README.md +++ b/deps/include/cppgc/README.md @@ -1,16 +1,135 @@ # Oilpan: C++ Garbage Collection Oilpan is an open-source garbage collection library for C++ that can be used stand-alone or in collaboration with V8's JavaScript garbage collector. +Oilpan implements mark-and-sweep garbage collection (GC) with limited compaction (for a subset of objects). **Key properties** + - Trace-based garbage collection; +- Incremental and concurrent marking; +- Incremental and concurrent sweeping; - Precise on-heap memory layout; - Conservative on-stack memory layout; - Allows for collection with and without considering stack; -- Incremental and concurrent marking; -- Incremental and concurrent sweeping; - Non-incremental and non-concurrent compaction for selected spaces; See the [Hello World](https://chromium.googlesource.com/v8/v8/+/main/samples/cppgc/hello-world.cc) example on how to get started using Oilpan to manage C++ code. Oilpan follows V8's project organization, see e.g. on how we accept [contributions](https://v8.dev/docs/contribute) and [provide a stable API](https://v8.dev/docs/api). + +## Threading model + +Oilpan features thread-local garbage collection and assumes heaps are not shared among threads. +In other words, objects are accessed and ultimately reclaimed by the garbage collector on the same thread that allocates them. +This allows Oilpan to run garbage collection in parallel with mutators running in other threads. + +References to objects belonging to another thread's heap are modeled using cross-thread roots. +This is even true for on-heap to on-heap references. + +Oilpan heaps may generally not be accessed from different threads unless otherwise noted. + +## Heap partitioning + +Oilpan's heaps are partitioned into spaces. +The space for an object is chosen depending on a number of criteria, e.g.: + +- Objects over 64KiB are allocated in a large object space +- Objects can be assigned to a dedicated custom space. + Custom spaces can also be marked as compactable. +- Other objects are allocated in one of the normal page spaces bucketed depending on their size. + +## Precise and conservative garbage collection + +Oilpan supports two kinds of GCs: + +1. **Conservative GC.** +A GC is called conservative when it is executed while the regular native stack is not empty. +In this case, the native stack might contain references to objects in Oilpan's heap, which should be kept alive. +The GC scans the native stack and treats the pointers discovered via the native stack as part of the root set. +This kind of GC is considered imprecise because values on stack other than references may accidentally appear as references to on-heap object, which means these objects will be kept alive despite being in practice unreachable from the application as an actual reference. + +2. **Precise GC.** +A precise GC is triggered at the end of an event loop, which is controlled by an embedder via a platform. +At this point, it is guaranteed that there are no on-stack references pointing to Oilpan's heap. +This means there is no risk of confusing other value types with references. +Oilpan has precise knowledge of on-heap object layouts, and so it knows exactly where pointers lie in memory. +Oilpan can just start marking from the regular root set and collect all garbage precisely. + +## Atomic, incremental and concurrent garbage collection + +Oilpan has three modes of operation: + +1. **Atomic GC.** +The entire GC cycle, including all its phases (e.g. see [Marking](#Marking-phase) and [Sweeping](#Sweeping-phase)), are executed back to back in a single pause. +This mode of operation is also known as Stop-The-World (STW) garbage collection. +It results in the most jank (due to a single long pause), but is overall the most efficient (e.g. no need for write barriers). + +2. **Incremental GC.** +Garbage collection work is split up into multiple steps which are interleaved with the mutator, i.e. user code chunked into tasks. +Each step is a small chunk of work that is executed either as dedicated tasks between mutator tasks or, as needed, during mutator tasks. +Using incremental GC introduces the need for write barriers that record changes to the object graph so that a consistent state is observed and no objects are accidentally considered dead and reclaimed. +The incremental steps are followed by a smaller atomic pause to finalize garbage collection. +The smaller pause times, due to smaller chunks of work, helps with reducing jank. + +3. **Concurrent GC.** +This is the most common type of GC. +It builds on top of incremental GC and offloads much of the garbage collection work away from the mutator thread and on to background threads. +Using concurrent GC allows the mutator thread to spend less time on GC and more on the actual mutator. + +## Marking phase + +The marking phase consists of the following steps: + +1. Mark all objects in the root set. + +2. Mark all objects transitively reachable from the root set by calling `Trace()` methods defined on each object. + +3. Clear out all weak handles to unreachable objects and run weak callbacks. + +The marking phase can be executed atomically in a stop-the-world manner, in which all 3 steps are executed one after the other. + +Alternatively, it can also be executed incrementally/concurrently. +With incremental/concurrent marking, step 1 is executed in a short pause after which the mutator regains control. +Step 2 is repeatedly executed in an interleaved manner with the mutator. +When the GC is ready to finalize, i.e. step 2 is (almost) finished, another short pause is triggered in which step 2 is finished and step 3 is performed. + +To prevent a user-after-free (UAF) issues it is required for Oilpan to know about all edges in the object graph. +This means that all pointers except on-stack pointers must be wrapped with Oilpan's handles (i.e., Persistent<>, Member<>, WeakMember<>). +Raw pointers to on-heap objects create an edge that Oilpan cannot observe and cause UAF issues +Thus, raw pointers shall not be used to reference on-heap objects (except for raw pointers on native stacks). + +## Sweeping phase + +The sweeping phase consists of the following steps: + +1. Invoke pre-finalizers. +At this point, no destructors have been invoked and no memory has been reclaimed. +Pre-finalizers are allowed to access any other on-heap objects, even those that may get destructed. + +2. Sweeping invokes destructors of the dead (unreachable) objects and reclaims memory to be reused by future allocations. + +Assumptions should not be made about the order and the timing of their execution. +There is no guarantee on the order in which the destructors are invoked. +That's why destructors must not access any other on-heap objects (which might have already been destructed). +If some destructor unavoidably needs to access other on-heap objects, it will have to be converted to a pre-finalizer. +The pre-finalizer is allowed to access other on-heap objects. + +The mutator is resumed before all destructors have ran. +For example, imagine a case where X is a client of Y, and Y holds a list of clients. +If the code relies on X's destructor removing X from the list, there is a risk that Y iterates the list and calls some method of X which may touch other on-heap objects. +This causes a use-after-free. +Care must be taken to make sure that X is explicitly removed from the list before the mutator resumes its execution in a way that doesn't rely on X's destructor (e.g. a pre-finalizer). + +Similar to marking, sweeping can be executed in either an atomic stop-the-world manner or incrementally/concurrently. +With incremental/concurrent sweeping, step 2 is interleaved with mutator. +Incremental/concurrent sweeping can be atomically finalized in case it is needed to trigger another GC cycle. +Even with concurrent sweeping, destructors are guaranteed to run on the thread the object has been allocated on to preserve C++ semantics. + +Notes: + +* Weak processing runs only when the holder object of the WeakMember outlives the pointed object. +If the holder object and the pointed object die at the same time, weak processing doesn't run. +It is wrong to write code assuming that the weak processing always runs. + +* Pre-finalizers are heavy because the thread needs to scan all pre-finalizers at each sweeping phase to determine which pre-finalizers should be invoked (the thread needs to invoke pre-finalizers of dead objects). +Adding pre-finalizers to frequently created objects should be avoided. diff --git a/deps/include/cppgc/common.h b/deps/include/cppgc/common.h index b6dbff3d..96103836 100644 --- a/deps/include/cppgc/common.h +++ b/deps/include/cppgc/common.h @@ -5,7 +5,6 @@ #ifndef INCLUDE_CPPGC_COMMON_H_ #define INCLUDE_CPPGC_COMMON_H_ -// TODO(chromium:1056170): Remove dependency on v8. #include "v8config.h" // NOLINT(build/include_directory) namespace cppgc { diff --git a/deps/include/cppgc/cross-thread-persistent.h b/deps/include/cppgc/cross-thread-persistent.h index c8751e1d..1fa28afa 100644 --- a/deps/include/cppgc/cross-thread-persistent.h +++ b/deps/include/cppgc/cross-thread-persistent.h @@ -120,7 +120,7 @@ class BasicCrossThreadPersistent final : public CrossThreadPersistentBase, if (!IsValid(raw)) return; PersistentRegionLock guard; CrossThreadPersistentRegion& region = this->GetPersistentRegion(raw); - SetNode(region.AllocateNode(this, &Trace)); + SetNode(region.AllocateNode(this, &TraceAsRoot)); this->CheckPointer(raw); } @@ -138,7 +138,7 @@ class BasicCrossThreadPersistent final : public CrossThreadPersistentBase, : CrossThreadPersistentBase(raw), LocationPolicy(loc) { if (!IsValid(raw)) return; CrossThreadPersistentRegion& region = this->GetPersistentRegion(raw); - SetNode(region.AllocateNode(this, &Trace)); + SetNode(region.AllocateNode(this, &TraceAsRoot)); this->CheckPointer(raw); } @@ -349,9 +349,8 @@ class BasicCrossThreadPersistent final : public CrossThreadPersistentBase, return ptr && ptr != kSentinelPointer; } - static void Trace(Visitor* v, const void* ptr) { - const auto* handle = static_cast(ptr); - v->TraceRoot(*handle, handle->Location()); + static void TraceAsRoot(RootVisitor& root_visitor, const void* ptr) { + root_visitor.Trace(*static_cast(ptr)); } void AssignUnsafe(T* ptr) { @@ -378,7 +377,7 @@ class BasicCrossThreadPersistent final : public CrossThreadPersistentBase, SetValue(ptr); if (!IsValid(ptr)) return; PersistentRegionLock guard; - SetNode(this->GetPersistentRegion(ptr).AllocateNode(this, &Trace)); + SetNode(this->GetPersistentRegion(ptr).AllocateNode(this, &TraceAsRoot)); this->CheckPointer(ptr); } @@ -398,7 +397,7 @@ class BasicCrossThreadPersistent final : public CrossThreadPersistentBase, } SetValue(ptr); if (!IsValid(ptr)) return; - SetNode(this->GetPersistentRegion(ptr).AllocateNode(this, &Trace)); + SetNode(this->GetPersistentRegion(ptr).AllocateNode(this, &TraceAsRoot)); this->CheckPointer(ptr); } @@ -416,7 +415,7 @@ class BasicCrossThreadPersistent final : public CrossThreadPersistentBase, return static_cast(const_cast(GetValueFromGC())); } - friend class cppgc::Visitor; + friend class internal::RootVisitor; }; template diff --git a/deps/include/cppgc/default-platform.h b/deps/include/cppgc/default-platform.h index 2ccdeddd..a27871cc 100644 --- a/deps/include/cppgc/default-platform.h +++ b/deps/include/cppgc/default-platform.h @@ -6,7 +6,6 @@ #define INCLUDE_CPPGC_DEFAULT_PLATFORM_H_ #include -#include #include "cppgc/platform.h" #include "libplatform/libplatform.h" @@ -20,15 +19,6 @@ namespace cppgc { */ class V8_EXPORT DefaultPlatform : public Platform { public: - /** - * Use this method instead of 'cppgc::InitializeProcess' when using - * 'cppgc::DefaultPlatform'. 'cppgc::DefaultPlatform::InitializeProcess' - * will initialize cppgc and v8 if needed (for non-standalone builds). - * - * \param platform DefaultPlatform instance used to initialize cppgc/v8. - */ - static void InitializeProcess(DefaultPlatform* platform); - using IdleTaskSupport = v8::platform::IdleTaskSupport; explicit DefaultPlatform( int thread_pool_size = 0, @@ -64,6 +54,8 @@ class V8_EXPORT DefaultPlatform : public Platform { return v8_platform_->GetTracingController(); } + v8::Platform* GetV8Platform() const { return v8_platform_.get(); } + protected: static constexpr v8::Isolate* kNoIsolate = nullptr; diff --git a/deps/include/cppgc/explicit-management.h b/deps/include/cppgc/explicit-management.h index cdb6af48..0290328d 100644 --- a/deps/include/cppgc/explicit-management.h +++ b/deps/include/cppgc/explicit-management.h @@ -15,11 +15,27 @@ namespace cppgc { class HeapHandle; +namespace subtle { + +template +void FreeUnreferencedObject(HeapHandle& heap_handle, T& object); +template +bool Resize(T& object, AdditionalBytes additional_bytes); + +} // namespace subtle + namespace internal { -V8_EXPORT void FreeUnreferencedObject(HeapHandle&, void*); -V8_EXPORT bool Resize(void*, size_t); +class ExplicitManagementImpl final { + private: + V8_EXPORT static void FreeUnreferencedObject(HeapHandle&, void*); + V8_EXPORT static bool Resize(void*, size_t); + template + friend void subtle::FreeUnreferencedObject(HeapHandle&, T&); + template + friend bool subtle::Resize(T&, AdditionalBytes); +}; } // namespace internal namespace subtle { @@ -45,7 +61,8 @@ template void FreeUnreferencedObject(HeapHandle& heap_handle, T& object) { static_assert(IsGarbageCollectedTypeV, "Object must be of type GarbageCollected."); - internal::FreeUnreferencedObject(heap_handle, &object); + internal::ExplicitManagementImpl::FreeUnreferencedObject(heap_handle, + &object); } /** @@ -73,7 +90,8 @@ template bool Resize(T& object, AdditionalBytes additional_bytes) { static_assert(IsGarbageCollectedTypeV, "Object must be of type GarbageCollected."); - return internal::Resize(&object, sizeof(T) + additional_bytes.value); + return internal::ExplicitManagementImpl::Resize( + &object, sizeof(T) + additional_bytes.value); } } // namespace subtle diff --git a/deps/include/cppgc/garbage-collected.h b/deps/include/cppgc/garbage-collected.h index a3839e1b..6737c8be 100644 --- a/deps/include/cppgc/garbage-collected.h +++ b/deps/include/cppgc/garbage-collected.h @@ -5,8 +5,6 @@ #ifndef INCLUDE_CPPGC_GARBAGE_COLLECTED_H_ #define INCLUDE_CPPGC_GARBAGE_COLLECTED_H_ -#include - #include "cppgc/internal/api-constants.h" #include "cppgc/platform.h" #include "cppgc/trace-trait.h" @@ -16,28 +14,6 @@ namespace cppgc { class Visitor; -namespace internal { - -class GarbageCollectedBase { - public: - // Must use MakeGarbageCollected. - void* operator new(size_t) = delete; - void* operator new[](size_t) = delete; - // The garbage collector is taking care of reclaiming the object. Also, - // virtual destructor requires an unambiguous, accessible 'operator delete'. - void operator delete(void*) { -#ifdef V8_ENABLE_CHECKS - internal::Abort(); -#endif // V8_ENABLE_CHECKS - } - void operator delete[](void*) = delete; - - protected: - GarbageCollectedBase() = default; -}; - -} // namespace internal - /** * Base class for managed objects. Only descendent types of `GarbageCollected` * can be constructed using `MakeGarbageCollected()`. Must be inherited from as @@ -74,11 +50,24 @@ class GarbageCollectedBase { * \endcode */ template -class GarbageCollected : public internal::GarbageCollectedBase { +class GarbageCollected { public: using IsGarbageCollectedTypeMarker = void; using ParentMostGarbageCollectedType = T; + // Must use MakeGarbageCollected. + void* operator new(size_t) = delete; + void* operator new[](size_t) = delete; + // The garbage collector is taking care of reclaiming the object. Also, + // virtual destructor requires an unambiguous, accessible 'operator delete'. + void operator delete(void*) { +#ifdef V8_ENABLE_CHECKS + internal::Fatal( + "Manually deleting a garbage collected object is not allowed"); +#endif // V8_ENABLE_CHECKS + } + void operator delete[](void*) = delete; + protected: GarbageCollected() = default; }; @@ -101,7 +90,7 @@ class GarbageCollected : public internal::GarbageCollectedBase { * }; * \endcode */ -class GarbageCollectedMixin : public internal::GarbageCollectedBase { +class GarbageCollectedMixin { public: using IsGarbageCollectedMixinTypeMarker = void; diff --git a/deps/include/cppgc/heap-consistency.h b/deps/include/cppgc/heap-consistency.h index 8e603d5d..35c59ed1 100644 --- a/deps/include/cppgc/heap-consistency.h +++ b/deps/include/cppgc/heap-consistency.h @@ -9,6 +9,7 @@ #include "cppgc/internal/write-barrier.h" #include "cppgc/macros.h" +#include "cppgc/member.h" #include "cppgc/trace-trait.h" #include "v8config.h" // NOLINT(build/include_directory) @@ -47,6 +48,29 @@ class HeapConsistency final { return internal::WriteBarrier::GetWriteBarrierType(slot, value, params); } + /** + * Gets the required write barrier type for a specific write. This override is + * only used for all the BasicMember types. + * + * \param slot Slot containing the pointer to the object. The slot itself + * must reside in an object that has been allocated using + * `MakeGarbageCollected()`. + * \param value The pointer to the object held via `BasicMember`. + * \param params Parameters that may be used for actual write barrier calls. + * Only filled if return value indicates that a write barrier is needed. The + * contents of the `params` are an implementation detail. + * \returns whether a write barrier is needed and which barrier to invoke. + */ + template + static V8_INLINE WriteBarrierType GetWriteBarrierType( + const internal::BasicMember& value, + WriteBarrierParams& params) { + return internal::WriteBarrier::GetWriteBarrierType( + value.GetRawSlot(), value.GetRawStorage(), params); + } + /** * Gets the required write barrier type for a specific write. * @@ -146,7 +170,39 @@ class HeapConsistency final { */ static V8_INLINE void GenerationalBarrier(const WriteBarrierParams& params, const void* slot) { - internal::WriteBarrier::GenerationalBarrier(params, slot); + internal::WriteBarrier::GenerationalBarrier< + internal::WriteBarrier::GenerationalBarrierType::kPreciseSlot>(params, + slot); + } + + /** + * Generational barrier for maintaining consistency when running with multiple + * generations. This version is used when slot contains uncompressed pointer. + * + * \param params The parameters retrieved from `GetWriteBarrierType()`. + * \param slot Uncompressed slot containing the direct pointer to the object. + * The slot itself must reside in an object that has been allocated using + * `MakeGarbageCollected()`. + */ + static V8_INLINE void GenerationalBarrierForUncompressedSlot( + const WriteBarrierParams& params, const void* uncompressed_slot) { + internal::WriteBarrier::GenerationalBarrier< + internal::WriteBarrier::GenerationalBarrierType:: + kPreciseUncompressedSlot>(params, uncompressed_slot); + } + + /** + * Generational barrier for source object that may contain outgoing pointers + * to objects in young generation. + * + * \param params The parameters retrieved from `GetWriteBarrierType()`. + * \param inner_pointer Pointer to the source object. + */ + static V8_INLINE void GenerationalBarrierForSourceObject( + const WriteBarrierParams& params, const void* inner_pointer) { + internal::WriteBarrier::GenerationalBarrier< + internal::WriteBarrier::GenerationalBarrierType::kImpreciseSlot>( + params, inner_pointer); } private: diff --git a/deps/include/cppgc/heap-handle.h b/deps/include/cppgc/heap-handle.h new file mode 100644 index 00000000..0d1d21e6 --- /dev/null +++ b/deps/include/cppgc/heap-handle.h @@ -0,0 +1,48 @@ +// Copyright 2022 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef INCLUDE_CPPGC_HEAP_HANDLE_H_ +#define INCLUDE_CPPGC_HEAP_HANDLE_H_ + +#include "v8config.h" // NOLINT(build/include_directory) + +namespace cppgc { + +namespace internal { +class HeapBase; +class WriteBarrierTypeForCagedHeapPolicy; +class WriteBarrierTypeForNonCagedHeapPolicy; +} // namespace internal + +/** + * Opaque handle used for additional heap APIs. + */ +class HeapHandle { + public: + // Deleted copy ctor to avoid treating the type by value. + HeapHandle(const HeapHandle&) = delete; + HeapHandle& operator=(const HeapHandle&) = delete; + + private: + HeapHandle() = default; + + V8_INLINE bool is_incremental_marking_in_progress() const { + return is_incremental_marking_in_progress_; + } + + V8_INLINE bool is_young_generation_enabled() const { + return is_young_generation_enabled_; + } + + bool is_incremental_marking_in_progress_ = false; + bool is_young_generation_enabled_ = false; + + friend class internal::HeapBase; + friend class internal::WriteBarrierTypeForCagedHeapPolicy; + friend class internal::WriteBarrierTypeForNonCagedHeapPolicy; +}; + +} // namespace cppgc + +#endif // INCLUDE_CPPGC_HEAP_HANDLE_H_ diff --git a/deps/include/cppgc/heap-state.h b/deps/include/cppgc/heap-state.h index 3fd6b54a..28212589 100644 --- a/deps/include/cppgc/heap-state.h +++ b/deps/include/cppgc/heap-state.h @@ -38,6 +38,18 @@ class V8_EXPORT HeapState final { */ static bool IsSweeping(const HeapHandle& heap_handle); + /* + * Returns whether the garbage collector is currently sweeping on the thread + * owning this heap. This API allows the caller to determine whether it has + * been called from a destructor of a managed object. This API is experimental + * and may be removed in future. + * + * \param heap_handle The corresponding heap. + * \returns true if the garbage collector is currently sweeping on this + * thread, and false otherwise. + */ + static bool IsSweepingOnOwningThread(const HeapHandle& heap_handle); + /** * Returns whether the garbage collector is in the atomic pause, i.e., the * mutator is stopped from running. This API is experimental and is expected diff --git a/deps/include/cppgc/heap-statistics.h b/deps/include/cppgc/heap-statistics.h index 8e626596..5e389874 100644 --- a/deps/include/cppgc/heap-statistics.h +++ b/deps/include/cppgc/heap-statistics.h @@ -56,7 +56,7 @@ struct HeapStatistics final { /** Amount of memory actually used on the page. */ size_t used_size_bytes = 0; /** Statistics for object allocated on the page. Filled only when - * NameProvider::HideInternalNames() is false. */ + * NameProvider::SupportsCppClassNamesAsObjectNames() is true. */ std::vector object_statistics; }; @@ -98,7 +98,7 @@ struct HeapStatistics final { /** Overall committed amount of memory for the heap. */ size_t committed_size_bytes = 0; - /** Resident amount of memory help by the heap. */ + /** Resident amount of memory held by the heap. */ size_t resident_size_bytes = 0; /** Amount of memory actually used on the heap. */ size_t used_size_bytes = 0; diff --git a/deps/include/cppgc/heap.h b/deps/include/cppgc/heap.h index 136c4fb4..02ee12ea 100644 --- a/deps/include/cppgc/heap.h +++ b/deps/include/cppgc/heap.h @@ -21,6 +21,7 @@ namespace cppgc { class AllocationHandle; +class HeapHandle; /** * Implementation details of cppgc. Those details are considered internal and @@ -31,11 +32,6 @@ namespace internal { class Heap; } // namespace internal -/** - * Used for additional heap APIs. - */ -class HeapHandle; - class V8_EXPORT Heap { public: /** @@ -59,7 +55,7 @@ class V8_EXPORT Heap { }; /** - * Specifies supported marking types + * Specifies supported marking types. */ enum class MarkingType : uint8_t { /** @@ -68,8 +64,8 @@ class V8_EXPORT Heap { */ kAtomic, /** - * Incremental marking, i.e. interleave marking is the rest of the - * application on the same thread. + * Incremental marking interleaves marking with the rest of the application + * workload on the same thread. */ kIncremental, /** @@ -79,13 +75,18 @@ class V8_EXPORT Heap { }; /** - * Specifies supported sweeping types + * Specifies supported sweeping types. */ enum class SweepingType : uint8_t { /** * Atomic stop-the-world sweeping. All of sweeping is performed at once. */ kAtomic, + /** + * Incremental sweeping interleaves sweeping with the rest of the + * application workload on the same thread. + */ + kIncremental, /** * Incremental and concurrent sweeping. Sweeping is split and interleaved * with the rest of the application. diff --git a/deps/include/cppgc/internal/api-constants.h b/deps/include/cppgc/internal/api-constants.h index 791039f1..023426e9 100644 --- a/deps/include/cppgc/internal/api-constants.h +++ b/deps/include/cppgc/internal/api-constants.h @@ -32,18 +32,31 @@ static constexpr uint16_t kFullyConstructedBitMask = uint16_t{1}; static constexpr size_t kPageSize = size_t{1} << 17; +#if defined(V8_TARGET_ARCH_ARM64) && defined(V8_OS_MACOS) +constexpr size_t kGuardPageSize = 0; +#else +constexpr size_t kGuardPageSize = 4096; +#endif + static constexpr size_t kLargeObjectSizeThreshold = kPageSize / 2; #if defined(CPPGC_CAGED_HEAP) +#if defined(CPPGC_2GB_CAGE) +constexpr size_t kCagedHeapReservationSize = static_cast(2) * kGB; +#else // !defined(CPPGC_2GB_CAGE) constexpr size_t kCagedHeapReservationSize = static_cast(4) * kGB; +#endif // !defined(CPPGC_2GB_CAGE) constexpr size_t kCagedHeapReservationAlignment = kCagedHeapReservationSize; -#endif +#endif // defined(CPPGC_CAGED_HEAP) static constexpr size_t kDefaultAlignment = sizeof(void*); // Maximum support alignment for a type as in `alignof(T)`. static constexpr size_t kMaxSupportedAlignment = 2 * kDefaultAlignment; +// Granularity of heap allocations. +constexpr size_t kAllocationGranularity = sizeof(void*); + } // namespace api_constants } // namespace internal diff --git a/deps/include/cppgc/internal/base-page-handle.h b/deps/include/cppgc/internal/base-page-handle.h new file mode 100644 index 00000000..9c690755 --- /dev/null +++ b/deps/include/cppgc/internal/base-page-handle.h @@ -0,0 +1,45 @@ +// Copyright 2022 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef INCLUDE_CPPGC_INTERNAL_BASE_PAGE_HANDLE_H_ +#define INCLUDE_CPPGC_INTERNAL_BASE_PAGE_HANDLE_H_ + +#include "cppgc/heap-handle.h" +#include "cppgc/internal/api-constants.h" +#include "cppgc/internal/logging.h" +#include "v8config.h" // NOLINT(build/include_directory) + +namespace cppgc { +namespace internal { + +// The class is needed in the header to allow for fast access to HeapHandle in +// the write barrier. +class BasePageHandle { + public: + static V8_INLINE BasePageHandle* FromPayload(void* payload) { + return reinterpret_cast( + (reinterpret_cast(payload) & + ~(api_constants::kPageSize - 1)) + + api_constants::kGuardPageSize); + } + static V8_INLINE const BasePageHandle* FromPayload(const void* payload) { + return FromPayload(const_cast(payload)); + } + + HeapHandle& heap_handle() { return heap_handle_; } + const HeapHandle& heap_handle() const { return heap_handle_; } + + protected: + explicit BasePageHandle(HeapHandle& heap_handle) : heap_handle_(heap_handle) { + CPPGC_DCHECK(reinterpret_cast(this) % api_constants::kPageSize == + api_constants::kGuardPageSize); + } + + HeapHandle& heap_handle_; +}; + +} // namespace internal +} // namespace cppgc + +#endif // INCLUDE_CPPGC_INTERNAL_BASE_PAGE_HANDLE_H_ diff --git a/deps/include/cppgc/internal/caged-heap-local-data.h b/deps/include/cppgc/internal/caged-heap-local-data.h index 5b30d670..7d689f87 100644 --- a/deps/include/cppgc/internal/caged-heap-local-data.h +++ b/deps/include/cppgc/internal/caged-heap-local-data.h @@ -6,45 +6,86 @@ #define INCLUDE_CPPGC_INTERNAL_CAGED_HEAP_LOCAL_DATA_H_ #include +#include +#include #include "cppgc/internal/api-constants.h" +#include "cppgc/internal/caged-heap.h" #include "cppgc/internal/logging.h" #include "cppgc/platform.h" #include "v8config.h" // NOLINT(build/include_directory) +#if __cpp_lib_bitopts +#include +#endif // __cpp_lib_bitopts + +#if defined(CPPGC_CAGED_HEAP) + namespace cppgc { namespace internal { class HeapBase; +class HeapBaseHandle; #if defined(CPPGC_YOUNG_GENERATION) -// AgeTable contains entries that correspond to 4KB memory regions. Each entry -// can be in one of three states: kOld, kYoung or kUnknown. -class AgeTable final { - static constexpr size_t kGranularityBits = 12; // 4KiB per byte. +// AgeTable is the bytemap needed for the fast generation check in the write +// barrier. AgeTable contains entries that correspond to 4096 bytes memory +// regions (cards). Each entry in the table represents generation of the objects +// that reside on the corresponding card (young, old or mixed). +class V8_EXPORT AgeTable final { + static constexpr size_t kRequiredSize = 1 * api_constants::kMB; + static constexpr size_t kAllocationGranularity = + api_constants::kAllocationGranularity; public: - enum class Age : uint8_t { kOld, kYoung, kUnknown }; + // Represents age of the objects living on a single card. + enum class Age : uint8_t { kOld, kYoung, kMixed }; + // When setting age for a range, consider or ignore ages of the adjacent + // cards. + enum class AdjacentCardsPolicy : uint8_t { kConsider, kIgnore }; - static constexpr size_t kEntrySizeInBytes = 1 << kGranularityBits; + static constexpr size_t kCardSizeInBytes = + api_constants::kCagedHeapReservationSize / kRequiredSize; - Age& operator[](uintptr_t offset) { return table_[entry(offset)]; } - Age operator[](uintptr_t offset) const { return table_[entry(offset)]; } + void SetAge(uintptr_t cage_offset, Age age) { + table_[card(cage_offset)] = age; + } - void Reset(PageAllocator* allocator); + V8_INLINE Age GetAge(uintptr_t cage_offset) const { + return table_[card(cage_offset)]; + } - private: - static constexpr size_t kAgeTableSize = - api_constants::kCagedHeapReservationSize >> kGranularityBits; + void SetAgeForRange(uintptr_t cage_offset_begin, uintptr_t cage_offset_end, + Age age, AdjacentCardsPolicy adjacent_cards_policy); - size_t entry(uintptr_t offset) const { + Age GetAgeForRange(uintptr_t cage_offset_begin, + uintptr_t cage_offset_end) const; + + void ResetForTesting(); + + private: + V8_INLINE size_t card(uintptr_t offset) const { + constexpr size_t kGranularityBits = +#if __cpp_lib_bitopts + std::countr_zero(static_cast(kCardSizeInBytes)); +#elif V8_HAS_BUILTIN_CTZ + __builtin_ctz(static_cast(kCardSizeInBytes)); +#else //! V8_HAS_BUILTIN_CTZ + // Hardcode and check with assert. +#if defined(CPPGC_2GB_CAGE) + 11; +#else // !defined(CPPGC_2GB_CAGE) + 12; +#endif // !defined(CPPGC_2GB_CAGE) +#endif // !V8_HAS_BUILTIN_CTZ + static_assert((1 << kGranularityBits) == kCardSizeInBytes); const size_t entry = offset >> kGranularityBits; CPPGC_DCHECK(table_.size() > entry); return entry; } - std::array table_; + std::array table_; }; static_assert(sizeof(AgeTable) == 1 * api_constants::kMB, @@ -53,10 +94,10 @@ static_assert(sizeof(AgeTable) == 1 * api_constants::kMB, #endif // CPPGC_YOUNG_GENERATION struct CagedHeapLocalData final { - CagedHeapLocalData(HeapBase&, PageAllocator&); + V8_INLINE static CagedHeapLocalData& Get() { + return *reinterpret_cast(CagedHeapBase::GetBase()); + } - bool is_incremental_marking_in_progress = false; - HeapBase& heap_base; #if defined(CPPGC_YOUNG_GENERATION) AgeTable age_table; #endif @@ -65,4 +106,6 @@ struct CagedHeapLocalData final { } // namespace internal } // namespace cppgc +#endif // defined(CPPGC_CAGED_HEAP) + #endif // INCLUDE_CPPGC_INTERNAL_CAGED_HEAP_LOCAL_DATA_H_ diff --git a/deps/include/cppgc/internal/caged-heap.h b/deps/include/cppgc/internal/caged-heap.h new file mode 100644 index 00000000..4db42aee --- /dev/null +++ b/deps/include/cppgc/internal/caged-heap.h @@ -0,0 +1,61 @@ +// Copyright 2022 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef INCLUDE_CPPGC_INTERNAL_CAGED_HEAP_H_ +#define INCLUDE_CPPGC_INTERNAL_CAGED_HEAP_H_ + +#include +#include + +#include "cppgc/internal/api-constants.h" +#include "cppgc/internal/base-page-handle.h" +#include "v8config.h" // NOLINT(build/include_directory) + +#if defined(CPPGC_CAGED_HEAP) + +namespace cppgc { +namespace internal { + +class V8_EXPORT CagedHeapBase { + public: + V8_INLINE static uintptr_t OffsetFromAddress(const void* address) { + return reinterpret_cast(address) & + (api_constants::kCagedHeapReservationAlignment - 1); + } + + V8_INLINE static bool IsWithinCage(const void* address) { + CPPGC_DCHECK(g_heap_base_); + return (reinterpret_cast(address) & + ~(api_constants::kCagedHeapReservationAlignment - 1)) == + g_heap_base_; + } + + V8_INLINE static bool AreWithinCage(const void* addr1, const void* addr2) { +#if defined(CPPGC_2GB_CAGE) + static constexpr size_t kHalfWordShift = sizeof(uint32_t) * CHAR_BIT - 1; +#else //! defined(CPPGC_2GB_CAGE) + static constexpr size_t kHalfWordShift = sizeof(uint32_t) * CHAR_BIT; +#endif //! defined(CPPGC_2GB_CAGE) + static_assert((static_cast(1) << kHalfWordShift) == + api_constants::kCagedHeapReservationSize); + CPPGC_DCHECK(g_heap_base_); + return !(((reinterpret_cast(addr1) ^ g_heap_base_) | + (reinterpret_cast(addr2) ^ g_heap_base_)) >> + kHalfWordShift); + } + + V8_INLINE static uintptr_t GetBase() { return g_heap_base_; } + + private: + friend class CagedHeap; + + static uintptr_t g_heap_base_; +}; + +} // namespace internal +} // namespace cppgc + +#endif // defined(CPPGC_CAGED_HEAP) + +#endif // INCLUDE_CPPGC_INTERNAL_CAGED_HEAP_H_ diff --git a/deps/include/cppgc/internal/finalizer-trait.h b/deps/include/cppgc/internal/finalizer-trait.h index 7bd6f83b..ab49af87 100644 --- a/deps/include/cppgc/internal/finalizer-trait.h +++ b/deps/include/cppgc/internal/finalizer-trait.h @@ -19,7 +19,8 @@ struct HasFinalizeGarbageCollectedObject : std::false_type {}; template struct HasFinalizeGarbageCollectedObject< - T, void_t().FinalizeGarbageCollectedObject())>> + T, + std::void_t().FinalizeGarbageCollectedObject())>> : std::true_type {}; // The FinalizerTraitImpl specifies how to finalize objects. diff --git a/deps/include/cppgc/internal/gc-info.h b/deps/include/cppgc/internal/gc-info.h index 82a0d053..e8f90fed 100644 --- a/deps/include/cppgc/internal/gc-info.h +++ b/deps/include/cppgc/internal/gc-info.h @@ -48,7 +48,6 @@ struct V8_EXPORT EnsureGCInfoIndexTrait final { static GCInfoIndex EnsureGCInfoIndexNonPolymorphic(std::atomic&, TraceCallback, FinalizationCallback, - NameCallback); static GCInfoIndex EnsureGCInfoIndexNonPolymorphic(std::atomic&, TraceCallback, diff --git a/deps/include/cppgc/internal/logging.h b/deps/include/cppgc/internal/logging.h index 79beaef7..3a279fe0 100644 --- a/deps/include/cppgc/internal/logging.h +++ b/deps/include/cppgc/internal/logging.h @@ -20,18 +20,18 @@ FatalImpl(const char*, const SourceLocation& = SourceLocation::Current()); template struct EatParams {}; -#if DEBUG +#if defined(DEBUG) #define CPPGC_DCHECK_MSG(condition, message) \ do { \ if (V8_UNLIKELY(!(condition))) { \ ::cppgc::internal::DCheckImpl(message); \ } \ } while (false) -#else +#else // !defined(DEBUG) #define CPPGC_DCHECK_MSG(condition, message) \ (static_cast(::cppgc::internal::EatParams(condition), message)>{})) -#endif +#endif // !defined(DEBUG) #define CPPGC_DCHECK(condition) CPPGC_DCHECK_MSG(condition, #condition) diff --git a/deps/include/cppgc/internal/member-storage.h b/deps/include/cppgc/internal/member-storage.h new file mode 100644 index 00000000..0eb63820 --- /dev/null +++ b/deps/include/cppgc/internal/member-storage.h @@ -0,0 +1,236 @@ +// Copyright 2022 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef INCLUDE_CPPGC_INTERNAL_MEMBER_STORAGE_H_ +#define INCLUDE_CPPGC_INTERNAL_MEMBER_STORAGE_H_ + +#include +#include +#include + +#include "cppgc/internal/api-constants.h" +#include "cppgc/internal/logging.h" +#include "cppgc/sentinel-pointer.h" +#include "v8config.h" // NOLINT(build/include_directory) + +namespace cppgc { +namespace internal { + +#if defined(CPPGC_POINTER_COMPRESSION) + +#if defined(__clang__) +// Attribute const allows the compiler to assume that CageBaseGlobal::g_base_ +// doesn't change (e.g. across calls) and thereby avoid redundant loads. +#define CPPGC_CONST __attribute__((const)) +#define CPPGC_REQUIRE_CONSTANT_INIT \ + __attribute__((require_constant_initialization)) +#else // defined(__clang__) +#define CPPGC_CONST +#define CPPGC_REQUIRE_CONSTANT_INIT +#endif // defined(__clang__) + +class CageBaseGlobal final { + public: + V8_INLINE CPPGC_CONST static uintptr_t Get() { + CPPGC_DCHECK(IsBaseConsistent()); + return g_base_; + } + + V8_INLINE CPPGC_CONST static bool IsSet() { + CPPGC_DCHECK(IsBaseConsistent()); + return (g_base_ & ~kLowerHalfWordMask) != 0; + } + + private: + // We keep the lower halfword as ones to speed up decompression. + static constexpr uintptr_t kLowerHalfWordMask = + (api_constants::kCagedHeapReservationAlignment - 1); + + static V8_EXPORT uintptr_t g_base_ CPPGC_REQUIRE_CONSTANT_INIT; + + CageBaseGlobal() = delete; + + V8_INLINE static bool IsBaseConsistent() { + return kLowerHalfWordMask == (g_base_ & kLowerHalfWordMask); + } + + friend class CageBaseGlobalUpdater; +}; + +#undef CPPGC_REQUIRE_CONSTANT_INIT +#undef CPPGC_CONST + +class V8_TRIVIAL_ABI CompressedPointer final { + public: + using IntegralType = uint32_t; + + V8_INLINE CompressedPointer() : value_(0u) {} + V8_INLINE explicit CompressedPointer(const void* ptr) + : value_(Compress(ptr)) {} + V8_INLINE explicit CompressedPointer(std::nullptr_t) : value_(0u) {} + V8_INLINE explicit CompressedPointer(SentinelPointer) + : value_(kCompressedSentinel) {} + + V8_INLINE const void* Load() const { return Decompress(value_); } + V8_INLINE const void* LoadAtomic() const { + return Decompress( + reinterpret_cast&>(value_).load( + std::memory_order_relaxed)); + } + + V8_INLINE void Store(const void* ptr) { value_ = Compress(ptr); } + V8_INLINE void StoreAtomic(const void* value) { + reinterpret_cast&>(value_).store( + Compress(value), std::memory_order_relaxed); + } + + V8_INLINE void Clear() { value_ = 0u; } + V8_INLINE bool IsCleared() const { return !value_; } + + V8_INLINE bool IsSentinel() const { return value_ == kCompressedSentinel; } + + V8_INLINE uint32_t GetAsInteger() const { return value_; } + + V8_INLINE friend bool operator==(CompressedPointer a, CompressedPointer b) { + return a.value_ == b.value_; + } + V8_INLINE friend bool operator!=(CompressedPointer a, CompressedPointer b) { + return a.value_ != b.value_; + } + V8_INLINE friend bool operator<(CompressedPointer a, CompressedPointer b) { + return a.value_ < b.value_; + } + V8_INLINE friend bool operator<=(CompressedPointer a, CompressedPointer b) { + return a.value_ <= b.value_; + } + V8_INLINE friend bool operator>(CompressedPointer a, CompressedPointer b) { + return a.value_ > b.value_; + } + V8_INLINE friend bool operator>=(CompressedPointer a, CompressedPointer b) { + return a.value_ >= b.value_; + } + + static V8_INLINE IntegralType Compress(const void* ptr) { + static_assert( + SentinelPointer::kSentinelValue == 0b10, + "The compression scheme relies on the sentinel encoded as 0b10"); + static constexpr size_t kGigaCageMask = + ~(api_constants::kCagedHeapReservationAlignment - 1); + + CPPGC_DCHECK(CageBaseGlobal::IsSet()); + const uintptr_t base = CageBaseGlobal::Get(); + CPPGC_DCHECK(!ptr || ptr == kSentinelPointer || + (base & kGigaCageMask) == + (reinterpret_cast(ptr) & kGigaCageMask)); + +#if defined(CPPGC_2GB_CAGE) + // Truncate the pointer. + auto compressed = + static_cast(reinterpret_cast(ptr)); +#else // !defined(CPPGC_2GB_CAGE) + const auto uptr = reinterpret_cast(ptr); + // Shift the pointer by one and truncate. + auto compressed = static_cast(uptr >> 1); +#endif // !defined(CPPGC_2GB_CAGE) + // Normal compressed pointers must have the MSB set. + CPPGC_DCHECK((!compressed || compressed == kCompressedSentinel) || + (compressed & (1 << 31))); + return compressed; + } + + static V8_INLINE void* Decompress(IntegralType ptr) { + CPPGC_DCHECK(CageBaseGlobal::IsSet()); + const uintptr_t base = CageBaseGlobal::Get(); + // Treat compressed pointer as signed and cast it to uint64_t, which will + // sign-extend it. +#if defined(CPPGC_2GB_CAGE) + const uint64_t mask = static_cast(static_cast(ptr)); +#else // !defined(CPPGC_2GB_CAGE) + // Then, shift the result by one. It's important to shift the unsigned + // value, as otherwise it would result in undefined behavior. + const uint64_t mask = static_cast(static_cast(ptr)) << 1; +#endif // !defined(CPPGC_2GB_CAGE) + return reinterpret_cast(mask & base); + } + + private: +#if defined(CPPGC_2GB_CAGE) + static constexpr IntegralType kCompressedSentinel = + SentinelPointer::kSentinelValue; +#else // !defined(CPPGC_2GB_CAGE) + static constexpr IntegralType kCompressedSentinel = + SentinelPointer::kSentinelValue >> 1; +#endif // !defined(CPPGC_2GB_CAGE) + // All constructors initialize `value_`. Do not add a default value here as it + // results in a non-atomic write on some builds, even when the atomic version + // of the constructor is used. + IntegralType value_; +}; + +#endif // defined(CPPGC_POINTER_COMPRESSION) + +class V8_TRIVIAL_ABI RawPointer final { + public: + using IntegralType = uintptr_t; + + V8_INLINE RawPointer() : ptr_(nullptr) {} + V8_INLINE explicit RawPointer(const void* ptr) : ptr_(ptr) {} + + V8_INLINE const void* Load() const { return ptr_; } + V8_INLINE const void* LoadAtomic() const { + return reinterpret_cast&>(ptr_).load( + std::memory_order_relaxed); + } + + V8_INLINE void Store(const void* ptr) { ptr_ = ptr; } + V8_INLINE void StoreAtomic(const void* ptr) { + reinterpret_cast&>(ptr_).store( + ptr, std::memory_order_relaxed); + } + + V8_INLINE void Clear() { ptr_ = nullptr; } + V8_INLINE bool IsCleared() const { return !ptr_; } + + V8_INLINE bool IsSentinel() const { return ptr_ == kSentinelPointer; } + + V8_INLINE uintptr_t GetAsInteger() const { + return reinterpret_cast(ptr_); + } + + V8_INLINE friend bool operator==(RawPointer a, RawPointer b) { + return a.ptr_ == b.ptr_; + } + V8_INLINE friend bool operator!=(RawPointer a, RawPointer b) { + return a.ptr_ != b.ptr_; + } + V8_INLINE friend bool operator<(RawPointer a, RawPointer b) { + return a.ptr_ < b.ptr_; + } + V8_INLINE friend bool operator<=(RawPointer a, RawPointer b) { + return a.ptr_ <= b.ptr_; + } + V8_INLINE friend bool operator>(RawPointer a, RawPointer b) { + return a.ptr_ > b.ptr_; + } + V8_INLINE friend bool operator>=(RawPointer a, RawPointer b) { + return a.ptr_ >= b.ptr_; + } + + private: + // All constructors initialize `ptr_`. Do not add a default value here as it + // results in a non-atomic write on some builds, even when the atomic version + // of the constructor is used. + const void* ptr_; +}; + +#if defined(CPPGC_POINTER_COMPRESSION) +using MemberStorage = CompressedPointer; +#else // !defined(CPPGC_POINTER_COMPRESSION) +using MemberStorage = RawPointer; +#endif // !defined(CPPGC_POINTER_COMPRESSION) + +} // namespace internal +} // namespace cppgc + +#endif // INCLUDE_CPPGC_INTERNAL_MEMBER_STORAGE_H_ diff --git a/deps/include/cppgc/internal/name-trait.h b/deps/include/cppgc/internal/name-trait.h index 32a33478..1d927a9d 100644 --- a/deps/include/cppgc/internal/name-trait.h +++ b/deps/include/cppgc/internal/name-trait.h @@ -6,6 +6,7 @@ #define INCLUDE_CPPGC_INTERNAL_NAME_TRAIT_H_ #include +#include #include #include "cppgc/name-provider.h" @@ -58,6 +59,11 @@ struct HeapObjectName { bool name_was_hidden; }; +enum class HeapObjectNameForUnnamedObject : uint8_t { + kUseClassNameIfSupported, + kUseHiddenName, +}; + class V8_EXPORT NameTraitBase { protected: static HeapObjectName GetNameFromTypeSignature(const char*); @@ -78,16 +84,24 @@ class NameTrait final : public NameTraitBase { #endif // !CPPGC_SUPPORTS_OBJECT_NAMES } - static HeapObjectName GetName(const void* obj) { - return GetNameFor(static_cast(obj)); + static HeapObjectName GetName( + const void* obj, HeapObjectNameForUnnamedObject name_retrieval_mode) { + return GetNameFor(static_cast(obj), name_retrieval_mode); } private: - static HeapObjectName GetNameFor(const NameProvider* name_provider) { + static HeapObjectName GetNameFor(const NameProvider* name_provider, + HeapObjectNameForUnnamedObject) { + // Objects inheriting from `NameProvider` are not considered unnamed as + // users already provided a name for them. return {name_provider->GetHumanReadableName(), false}; } - static HeapObjectName GetNameFor(...) { + static HeapObjectName GetNameFor( + const void*, HeapObjectNameForUnnamedObject name_retrieval_mode) { + if (name_retrieval_mode == HeapObjectNameForUnnamedObject::kUseHiddenName) + return {NameProvider::kHiddenName, true}; + #if CPPGC_SUPPORTS_COMPILE_TIME_TYPENAME return {GetTypename(), false}; #elif CPPGC_SUPPORTS_OBJECT_NAMES @@ -102,7 +116,7 @@ class NameTrait final : public NameTraitBase { static const HeapObjectName leaky_name = GetNameFromTypeSignature(PRETTY_FUNCTION_VALUE); - return {leaky_name, false}; + return leaky_name; #undef PRETTY_FUNCTION_VALUE @@ -112,7 +126,8 @@ class NameTrait final : public NameTraitBase { } }; -using NameCallback = HeapObjectName (*)(const void*); +using NameCallback = HeapObjectName (*)(const void*, + HeapObjectNameForUnnamedObject); } // namespace internal } // namespace cppgc diff --git a/deps/include/cppgc/internal/persistent-node.h b/deps/include/cppgc/internal/persistent-node.h index 68a8096c..d22692a7 100644 --- a/deps/include/cppgc/internal/persistent-node.h +++ b/deps/include/cppgc/internal/persistent-node.h @@ -14,13 +14,11 @@ #include "v8config.h" // NOLINT(build/include_directory) namespace cppgc { - -class Visitor; - namespace internal { class CrossThreadPersistentRegion; class FatalOutOfMemoryHandler; +class RootVisitor; // PersistentNode represents a variant of two states: // 1) traceable node with a back pointer to the Persistent object; @@ -32,7 +30,7 @@ class PersistentNode final { PersistentNode(const PersistentNode&) = delete; PersistentNode& operator=(const PersistentNode&) = delete; - void InitializeAsUsedNode(void* owner, TraceCallback trace) { + void InitializeAsUsedNode(void* owner, TraceRootCallback trace) { CPPGC_DCHECK(trace); owner_ = owner; trace_ = trace; @@ -53,9 +51,9 @@ class PersistentNode final { return next_; } - void Trace(Visitor* visitor) const { + void Trace(RootVisitor& root_visitor) const { CPPGC_DCHECK(IsUsed()); - trace_(visitor, owner_); + trace_(root_visitor, owner_); } bool IsUsed() const { return trace_; } @@ -73,30 +71,38 @@ class PersistentNode final { void* owner_ = nullptr; PersistentNode* next_; }; - TraceCallback trace_ = nullptr; + TraceRootCallback trace_ = nullptr; }; class V8_EXPORT PersistentRegionBase { using PersistentNodeSlots = std::array; public: - explicit PersistentRegionBase(const FatalOutOfMemoryHandler& oom_handler); // Clears Persistent fields to avoid stale pointers after heap teardown. ~PersistentRegionBase(); PersistentRegionBase(const PersistentRegionBase&) = delete; PersistentRegionBase& operator=(const PersistentRegionBase&) = delete; - PersistentNode* AllocateNode(void* owner, TraceCallback trace) { - if (!free_list_head_) { - EnsureNodeSlots(); - CPPGC_DCHECK(free_list_head_); + void Iterate(RootVisitor&); + + size_t NodesInUse() const; + + void ClearAllUsedNodes(); + + protected: + explicit PersistentRegionBase(const FatalOutOfMemoryHandler& oom_handler); + + PersistentNode* TryAllocateNodeFromFreeList(void* owner, + TraceRootCallback trace) { + PersistentNode* node = nullptr; + if (V8_LIKELY(free_list_head_)) { + node = free_list_head_; + free_list_head_ = free_list_head_->FreeListNext(); + CPPGC_DCHECK(!node->IsUsed()); + node->InitializeAsUsedNode(owner, trace); + nodes_in_use_++; } - PersistentNode* node = free_list_head_; - free_list_head_ = free_list_head_->FreeListNext(); - CPPGC_DCHECK(!node->IsUsed()); - node->InitializeAsUsedNode(owner, trace); - nodes_in_use_++; return node; } @@ -109,18 +115,15 @@ class V8_EXPORT PersistentRegionBase { nodes_in_use_--; } - void Trace(Visitor*); - - size_t NodesInUse() const; - - void ClearAllUsedNodes(); + PersistentNode* RefillFreeListAndAllocateNode(void* owner, + TraceRootCallback trace); private: - void EnsureNodeSlots(); - template void ClearAllUsedNodes(); + void RefillFreeList(); + std::vector> nodes_; PersistentNode* free_list_head_ = nullptr; size_t nodes_in_use_ = 0; @@ -140,9 +143,14 @@ class V8_EXPORT PersistentRegion final : public PersistentRegionBase { PersistentRegion(const PersistentRegion&) = delete; PersistentRegion& operator=(const PersistentRegion&) = delete; - V8_INLINE PersistentNode* AllocateNode(void* owner, TraceCallback trace) { + V8_INLINE PersistentNode* AllocateNode(void* owner, TraceRootCallback trace) { CPPGC_DCHECK(IsCreationThread()); - return PersistentRegionBase::AllocateNode(owner, trace); + auto* node = TryAllocateNodeFromFreeList(owner, trace); + if (V8_LIKELY(node)) return node; + + // Slow path allocation allows for checking thread correspondence. + CPPGC_CHECK(IsCreationThread()); + return RefillFreeListAndAllocateNode(owner, trace); } V8_INLINE void FreeNode(PersistentNode* node) { @@ -179,9 +187,12 @@ class V8_EXPORT CrossThreadPersistentRegion final CrossThreadPersistentRegion& operator=(const CrossThreadPersistentRegion&) = delete; - V8_INLINE PersistentNode* AllocateNode(void* owner, TraceCallback trace) { + V8_INLINE PersistentNode* AllocateNode(void* owner, TraceRootCallback trace) { PersistentRegionLock::AssertLocked(); - return PersistentRegionBase::AllocateNode(owner, trace); + auto* node = TryAllocateNodeFromFreeList(owner, trace); + if (V8_LIKELY(node)) return node; + + return RefillFreeListAndAllocateNode(owner, trace); } V8_INLINE void FreeNode(PersistentNode* node) { @@ -189,7 +200,7 @@ class V8_EXPORT CrossThreadPersistentRegion final PersistentRegionBase::FreeNode(node); } - void Trace(Visitor*); + void Iterate(RootVisitor&); size_t NodesInUse() const; diff --git a/deps/include/cppgc/internal/pointer-policies.h b/deps/include/cppgc/internal/pointer-policies.h index 853d7031..8455b3df 100644 --- a/deps/include/cppgc/internal/pointer-policies.h +++ b/deps/include/cppgc/internal/pointer-policies.h @@ -8,6 +8,7 @@ #include #include +#include "cppgc/internal/member-storage.h" #include "cppgc/internal/write-barrier.h" #include "cppgc/sentinel-pointer.h" #include "cppgc/source-location.h" @@ -27,15 +28,34 @@ class WeakMemberTag; class UntracedMemberTag; struct DijkstraWriteBarrierPolicy { - static void InitializingBarrier(const void*, const void*) { + V8_INLINE static void InitializingBarrier(const void*, const void*) { // Since in initializing writes the source object is always white, having no // barrier doesn't break the tri-color invariant. } - static void AssigningBarrier(const void* slot, const void* value) { + + V8_INLINE static void AssigningBarrier(const void* slot, const void* value) { + WriteBarrier::Params params; + const WriteBarrier::Type type = + WriteBarrier::GetWriteBarrierType(slot, value, params); + WriteBarrier(type, params, slot, value); + } + + V8_INLINE static void AssigningBarrier(const void* slot, + MemberStorage storage) { WriteBarrier::Params params; - switch (WriteBarrier::GetWriteBarrierType(slot, value, params)) { + const WriteBarrier::Type type = + WriteBarrier::GetWriteBarrierType(slot, storage, params); + WriteBarrier(type, params, slot, storage.Load()); + } + + private: + V8_INLINE static void WriteBarrier(WriteBarrier::Type type, + const WriteBarrier::Params& params, + const void* slot, const void* value) { + switch (type) { case WriteBarrier::Type::kGenerational: - WriteBarrier::GenerationalBarrier(params, slot); + WriteBarrier::GenerationalBarrier< + WriteBarrier::GenerationalBarrierType::kPreciseSlot>(params, slot); break; case WriteBarrier::Type::kMarking: WriteBarrier::DijkstraMarkingBarrier(params, value); @@ -47,8 +67,9 @@ struct DijkstraWriteBarrierPolicy { }; struct NoWriteBarrierPolicy { - static void InitializingBarrier(const void*, const void*) {} - static void AssigningBarrier(const void*, const void*) {} + V8_INLINE static void InitializingBarrier(const void*, const void*) {} + V8_INLINE static void AssigningBarrier(const void*, const void*) {} + V8_INLINE static void AssigningBarrier(const void*, MemberStorage) {} }; class V8_EXPORT SameThreadEnabledCheckingPolicyBase { @@ -89,7 +110,7 @@ class V8_EXPORT SameThreadEnabledCheckingPolicy class DisabledCheckingPolicy { protected: - void CheckPointer(const void*) {} + V8_INLINE void CheckPointer(const void*) {} }; #ifdef DEBUG diff --git a/deps/include/cppgc/internal/prefinalizer-handler.h b/deps/include/cppgc/internal/prefinalizer-handler.h deleted file mode 100644 index 64b07ec9..00000000 --- a/deps/include/cppgc/internal/prefinalizer-handler.h +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2020 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef INCLUDE_CPPGC_INTERNAL_PREFINALIZER_HANDLER_H_ -#define INCLUDE_CPPGC_INTERNAL_PREFINALIZER_HANDLER_H_ - -#include "cppgc/heap.h" -#include "cppgc/liveness-broker.h" - -namespace cppgc { -namespace internal { - -class V8_EXPORT PreFinalizerRegistrationDispatcher final { - public: - using PreFinalizerCallback = bool (*)(const LivenessBroker&, void*); - struct PreFinalizer { - void* object; - PreFinalizerCallback callback; - - bool operator==(const PreFinalizer& other) const; - }; - - static void RegisterPrefinalizer(PreFinalizer pre_finalizer); -}; - -} // namespace internal -} // namespace cppgc - -#endif // INCLUDE_CPPGC_INTERNAL_PREFINALIZER_HANDLER_H_ diff --git a/deps/include/cppgc/internal/write-barrier.h b/deps/include/cppgc/internal/write-barrier.h index 67f039c6..37bc5c97 100644 --- a/deps/include/cppgc/internal/write-barrier.h +++ b/deps/include/cppgc/internal/write-barrier.h @@ -8,9 +8,12 @@ #include #include +#include "cppgc/heap-handle.h" #include "cppgc/heap-state.h" #include "cppgc/internal/api-constants.h" #include "cppgc/internal/atomic-entry-flag.h" +#include "cppgc/internal/base-page-handle.h" +#include "cppgc/internal/member-storage.h" #include "cppgc/platform.h" #include "cppgc/sentinel-pointer.h" #include "cppgc/trace-trait.h" @@ -18,6 +21,7 @@ #if defined(CPPGC_CAGED_HEAP) #include "cppgc/internal/caged-heap-local-data.h" +#include "cppgc/internal/caged-heap.h" #endif namespace cppgc { @@ -40,16 +44,18 @@ class V8_EXPORT WriteBarrier final { kGenerational, }; + enum class GenerationalBarrierType : uint8_t { + kPreciseSlot, + kPreciseUncompressedSlot, + kImpreciseSlot, + }; + struct Params { HeapHandle* heap = nullptr; #if V8_ENABLE_CHECKS Type type = Type::kNone; #endif // !V8_ENABLE_CHECKS #if defined(CPPGC_CAGED_HEAP) - uintptr_t start = 0; - CagedHeapLocalData& caged_heap() const { - return *reinterpret_cast(start); - } uintptr_t slot_offset = 0; uintptr_t value_offset = 0; #endif // CPPGC_CAGED_HEAP @@ -63,6 +69,9 @@ class V8_EXPORT WriteBarrier final { // Returns the required write barrier for a given `slot` and `value`. static V8_INLINE Type GetWriteBarrierType(const void* slot, const void* value, Params& params); + // Returns the required write barrier for a given `slot` and `value`. + static V8_INLINE Type GetWriteBarrierType(const void* slot, MemberStorage, + Params& params); // Returns the required write barrier for a given `slot`. template static V8_INLINE Type GetWriteBarrierType(const void* slot, Params& params, @@ -70,10 +79,6 @@ class V8_EXPORT WriteBarrier final { // Returns the required write barrier for a given `value`. static V8_INLINE Type GetWriteBarrierType(const void* value, Params& params); - template - static V8_INLINE Type GetWriteBarrierTypeForExternallyReferencedObject( - const void* value, Params& params, HeapHandleCallback callback); - static V8_INLINE void DijkstraMarkingBarrier(const Params& params, const void* object); static V8_INLINE void DijkstraMarkingBarrierRange( @@ -82,11 +87,13 @@ class V8_EXPORT WriteBarrier final { static V8_INLINE void SteeleMarkingBarrier(const Params& params, const void* object); #if defined(CPPGC_YOUNG_GENERATION) + template static V8_INLINE void GenerationalBarrier(const Params& params, const void* slot); -#else // !CPPGC_YOUNG_GENERATION +#else // !CPPGC_YOUNG_GENERATION + template static V8_INLINE void GenerationalBarrier(const Params& params, - const void* slot) {} + const void* slot){} #endif // CPPGC_YOUNG_GENERATION #if V8_ENABLE_CHECKS @@ -95,12 +102,10 @@ class V8_EXPORT WriteBarrier final { static void CheckParams(Type expected_type, const Params& params) {} #endif // !V8_ENABLE_CHECKS - // The IncrementalOrConcurrentUpdater class allows cppgc internal to update - // |incremental_or_concurrent_marking_flag_|. - class IncrementalOrConcurrentMarkingFlagUpdater; - static bool IsAnyIncrementalOrConcurrentMarking() { - return incremental_or_concurrent_marking_flag_.MightBeEntered(); - } + // The FlagUpdater class allows cppgc internal to update + // |write_barrier_enabled_|. + class FlagUpdater; + static bool IsEnabled() { return write_barrier_enabled_.MightBeEntered(); } private: WriteBarrier() = delete; @@ -124,16 +129,24 @@ class V8_EXPORT WriteBarrier final { #if defined(CPPGC_YOUNG_GENERATION) static CagedHeapLocalData& GetLocalData(HeapHandle&); static void GenerationalBarrierSlow(const CagedHeapLocalData& local_data, - const AgeTable& ageTable, - const void* slot, uintptr_t value_offset); + const AgeTable& age_table, + const void* slot, uintptr_t value_offset, + HeapHandle* heap_handle); + static void GenerationalBarrierForUncompressedSlotSlow( + const CagedHeapLocalData& local_data, const AgeTable& age_table, + const void* slot, uintptr_t value_offset, HeapHandle* heap_handle); + static void GenerationalBarrierForSourceObjectSlow( + const CagedHeapLocalData& local_data, const void* object, + HeapHandle* heap_handle); #endif // CPPGC_YOUNG_GENERATION - static AtomicEntryFlag incremental_or_concurrent_marking_flag_; + static AtomicEntryFlag write_barrier_enabled_; }; template V8_INLINE WriteBarrier::Type SetAndReturnType(WriteBarrier::Params& params) { - if (type == WriteBarrier::Type::kNone) return WriteBarrier::Type::kNone; + if constexpr (type == WriteBarrier::Type::kNone) + return WriteBarrier::Type::kNone; #if V8_ENABLE_CHECKS params.type = type; #endif // !V8_ENABLE_CHECKS @@ -151,16 +164,16 @@ class V8_EXPORT WriteBarrierTypeForCagedHeapPolicy final { } template - static V8_INLINE WriteBarrier::Type Get(const void* value, + static V8_INLINE WriteBarrier::Type Get(const void* slot, MemberStorage value, WriteBarrier::Params& params, HeapHandleCallback callback) { - return GetNoSlot(value, params, callback); + return ValueModeDispatch::Get(slot, value, params, callback); } - template - static V8_INLINE WriteBarrier::Type GetForExternallyReferenced( - const void* value, WriteBarrier::Params& params, - HeapHandleCallback callback) { + template + static V8_INLINE WriteBarrier::Type Get(const void* value, + WriteBarrier::Params& params, + HeapHandleCallback callback) { return GetNoSlot(value, params, callback); } @@ -171,69 +184,77 @@ class V8_EXPORT WriteBarrierTypeForCagedHeapPolicy final { static V8_INLINE WriteBarrier::Type GetNoSlot(const void* value, WriteBarrier::Params& params, HeapHandleCallback) { - if (!TryGetCagedHeap(value, value, params)) { - return WriteBarrier::Type::kNone; - } - if (V8_UNLIKELY(params.caged_heap().is_incremental_marking_in_progress)) { + const bool within_cage = CagedHeapBase::IsWithinCage(value); + if (!within_cage) return WriteBarrier::Type::kNone; + + // We know that |value| points either within the normal page or to the + // beginning of large-page, so extract the page header by bitmasking. + BasePageHandle* page = + BasePageHandle::FromPayload(const_cast(value)); + + HeapHandle& heap_handle = page->heap_handle(); + if (V8_UNLIKELY(heap_handle.is_incremental_marking_in_progress())) { return SetAndReturnType(params); } + return SetAndReturnType(params); } template struct ValueModeDispatch; - - static V8_INLINE bool TryGetCagedHeap(const void* slot, const void* value, - WriteBarrier::Params& params) { - // TODO(chromium:1056170): Check if the null check can be folded in with - // the rest of the write barrier. - if (!value) return false; - params.start = reinterpret_cast(value) & - ~(api_constants::kCagedHeapReservationAlignment - 1); - const uintptr_t slot_offset = - reinterpret_cast(slot) - params.start; - if (slot_offset > api_constants::kCagedHeapReservationSize) { - // Check if slot is on stack or value is sentinel or nullptr. This relies - // on the fact that kSentinelPointer is encoded as 0x1. - return false; - } - return true; - } - - // Returns whether marking is in progress. If marking is not in progress - // sets the start of the cage accordingly. - // - // TODO(chromium:1056170): Create fast path on API. - static bool IsMarking(const HeapHandle&, WriteBarrier::Params&); }; template <> struct WriteBarrierTypeForCagedHeapPolicy::ValueModeDispatch< WriteBarrier::ValueMode::kValuePresent> { + template + static V8_INLINE WriteBarrier::Type Get(const void* slot, + MemberStorage storage, + WriteBarrier::Params& params, + HeapHandleCallback) { + if (V8_LIKELY(!WriteBarrier::IsEnabled())) + return SetAndReturnType(params); + + return BarrierEnabledGet(slot, storage.Load(), params); + } + template static V8_INLINE WriteBarrier::Type Get(const void* slot, const void* value, WriteBarrier::Params& params, HeapHandleCallback) { -#if !defined(CPPGC_YOUNG_GENERATION) - if (V8_LIKELY(!WriteBarrier::IsAnyIncrementalOrConcurrentMarking())) { + if (V8_LIKELY(!WriteBarrier::IsEnabled())) return SetAndReturnType(params); - } -#endif // !CPPGC_YOUNG_GENERATION - bool within_cage = TryGetCagedHeap(slot, value, params); - if (!within_cage) { - return WriteBarrier::Type::kNone; - } - if (V8_LIKELY(!params.caged_heap().is_incremental_marking_in_progress)) { + + return BarrierEnabledGet(slot, value, params); + } + + private: + static V8_INLINE WriteBarrier::Type BarrierEnabledGet( + const void* slot, const void* value, WriteBarrier::Params& params) { + const bool within_cage = CagedHeapBase::AreWithinCage(slot, value); + if (!within_cage) return WriteBarrier::Type::kNone; + + // We know that |value| points either within the normal page or to the + // beginning of large-page, so extract the page header by bitmasking. + BasePageHandle* page = + BasePageHandle::FromPayload(const_cast(value)); + + HeapHandle& heap_handle = page->heap_handle(); + if (V8_LIKELY(!heap_handle.is_incremental_marking_in_progress())) { #if defined(CPPGC_YOUNG_GENERATION) - params.heap = reinterpret_cast(params.start); - params.slot_offset = reinterpret_cast(slot) - params.start; - params.value_offset = reinterpret_cast(value) - params.start; + if (!heap_handle.is_young_generation_enabled()) + return WriteBarrier::Type::kNone; + params.heap = &heap_handle; + params.slot_offset = CagedHeapBase::OffsetFromAddress(slot); + params.value_offset = CagedHeapBase::OffsetFromAddress(value); return SetAndReturnType(params); #else // !CPPGC_YOUNG_GENERATION return SetAndReturnType(params); #endif // !CPPGC_YOUNG_GENERATION } - params.heap = reinterpret_cast(params.start); + + // Use marking barrier. + params.heap = &heap_handle; return SetAndReturnType(params); } }; @@ -245,28 +266,28 @@ struct WriteBarrierTypeForCagedHeapPolicy::ValueModeDispatch< static V8_INLINE WriteBarrier::Type Get(const void* slot, const void*, WriteBarrier::Params& params, HeapHandleCallback callback) { -#if defined(CPPGC_YOUNG_GENERATION) + if (V8_LIKELY(!WriteBarrier::IsEnabled())) + return SetAndReturnType(params); + HeapHandle& handle = callback(); - if (V8_LIKELY(!IsMarking(handle, params))) { - // params.start is populated by IsMarking(). +#if defined(CPPGC_YOUNG_GENERATION) + if (V8_LIKELY(!handle.is_incremental_marking_in_progress())) { + if (!handle.is_young_generation_enabled()) { + return WriteBarrier::Type::kNone; + } params.heap = &handle; - params.slot_offset = reinterpret_cast(slot) - params.start; - // params.value_offset stays 0. - if (params.slot_offset > api_constants::kCagedHeapReservationSize) { - // Check if slot is on stack. + // Check if slot is on stack. + if (V8_UNLIKELY(!CagedHeapBase::IsWithinCage(slot))) { return SetAndReturnType(params); } + params.slot_offset = CagedHeapBase::OffsetFromAddress(slot); return SetAndReturnType(params); } -#else // !CPPGC_YOUNG_GENERATION - if (V8_LIKELY(!WriteBarrier::IsAnyIncrementalOrConcurrentMarking())) { +#else // !defined(CPPGC_YOUNG_GENERATION) + if (V8_UNLIKELY(!handle.is_incremental_marking_in_progress())) { return SetAndReturnType(params); } - HeapHandle& handle = callback(); - if (V8_UNLIKELY(!subtle::HeapState::IsMarking(handle))) { - return SetAndReturnType(params); - } -#endif // !CPPGC_YOUNG_GENERATION +#endif // !defined(CPPGC_YOUNG_GENERATION) params.heap = &handle; return SetAndReturnType(params); } @@ -284,18 +305,19 @@ class V8_EXPORT WriteBarrierTypeForNonCagedHeapPolicy final { } template - static V8_INLINE WriteBarrier::Type Get(const void* value, + static V8_INLINE WriteBarrier::Type Get(const void* slot, MemberStorage value, WriteBarrier::Params& params, HeapHandleCallback callback) { - // The slot will never be used in `Get()` below. - return Get(nullptr, value, params, - callback); + // `MemberStorage` will always be `RawPointer` for non-caged heap builds. + // Just convert to `void*` in this case. + return ValueModeDispatch::Get(slot, value.Load(), params, + callback); } - template - static V8_INLINE WriteBarrier::Type GetForExternallyReferenced( - const void* value, WriteBarrier::Params& params, - HeapHandleCallback callback) { + template + static V8_INLINE WriteBarrier::Type Get(const void* value, + WriteBarrier::Params& params, + HeapHandleCallback callback) { // The slot will never be used in `Get()` below. return Get(nullptr, value, params, callback); @@ -305,11 +327,6 @@ class V8_EXPORT WriteBarrierTypeForNonCagedHeapPolicy final { template struct ValueModeDispatch; - // TODO(chromium:1056170): Create fast path on API. - static bool IsMarking(const void*, HeapHandle**); - // TODO(chromium:1056170): Create fast path on API. - static bool IsMarking(HeapHandle&); - WriteBarrierTypeForNonCagedHeapPolicy() = delete; }; @@ -324,10 +341,16 @@ struct WriteBarrierTypeForNonCagedHeapPolicy::ValueModeDispatch< if (object <= static_cast(kSentinelPointer)) { return SetAndReturnType(params); } - if (V8_LIKELY(!WriteBarrier::IsAnyIncrementalOrConcurrentMarking())) { + if (V8_LIKELY(!WriteBarrier::IsEnabled())) { return SetAndReturnType(params); } - if (IsMarking(object, ¶ms.heap)) { + // We know that |object| is within the normal page or in the beginning of a + // large page, so extract the page header by bitmasking. + BasePageHandle* page = + BasePageHandle::FromPayload(const_cast(object)); + + HeapHandle& heap_handle = page->heap_handle(); + if (V8_LIKELY(heap_handle.is_incremental_marking_in_progress())) { return SetAndReturnType(params); } return SetAndReturnType(params); @@ -341,9 +364,9 @@ struct WriteBarrierTypeForNonCagedHeapPolicy::ValueModeDispatch< static V8_INLINE WriteBarrier::Type Get(const void*, const void*, WriteBarrier::Params& params, HeapHandleCallback callback) { - if (V8_UNLIKELY(WriteBarrier::IsAnyIncrementalOrConcurrentMarking())) { + if (V8_UNLIKELY(WriteBarrier::IsEnabled())) { HeapHandle& handle = callback(); - if (IsMarking(handle)) { + if (V8_LIKELY(handle.is_incremental_marking_in_progress())) { params.heap = &handle; return SetAndReturnType(params); } @@ -359,6 +382,13 @@ WriteBarrier::Type WriteBarrier::GetWriteBarrierType( params, []() {}); } +// static +WriteBarrier::Type WriteBarrier::GetWriteBarrierType( + const void* slot, MemberStorage value, WriteBarrier::Params& params) { + return WriteBarrierTypePolicy::Get(slot, value, + params, []() {}); +} + // static template WriteBarrier::Type WriteBarrier::GetWriteBarrierType( @@ -375,15 +405,6 @@ WriteBarrier::Type WriteBarrier::GetWriteBarrierType( []() {}); } -// static -template -WriteBarrier::Type -WriteBarrier::GetWriteBarrierTypeForExternallyReferencedObject( - const void* value, Params& params, HeapHandleCallback callback) { - return WriteBarrierTypePolicy::GetForExternallyReferenced(value, params, - callback); -} - // static void WriteBarrier::DijkstraMarkingBarrier(const Params& params, const void* object) { @@ -420,17 +441,32 @@ void WriteBarrier::SteeleMarkingBarrier(const Params& params, } #if defined(CPPGC_YOUNG_GENERATION) + // static +template void WriteBarrier::GenerationalBarrier(const Params& params, const void* slot) { CheckParams(Type::kGenerational, params); - const CagedHeapLocalData& local_data = params.caged_heap(); + const CagedHeapLocalData& local_data = CagedHeapLocalData::Get(); const AgeTable& age_table = local_data.age_table; - // Bail out if the slot is in young generation. - if (V8_LIKELY(age_table[params.slot_offset] == AgeTable::Age::kYoung)) return; - - GenerationalBarrierSlow(local_data, age_table, slot, params.value_offset); + // Bail out if the slot (precise or imprecise) is in young generation. + if (V8_LIKELY(age_table.GetAge(params.slot_offset) == AgeTable::Age::kYoung)) + return; + + // Dispatch between different types of barriers. + // TODO(chromium:1029379): Consider reload local_data in the slow path to + // reduce register pressure. + if constexpr (type == GenerationalBarrierType::kPreciseSlot) { + GenerationalBarrierSlow(local_data, age_table, slot, params.value_offset, + params.heap); + } else if constexpr (type == + GenerationalBarrierType::kPreciseUncompressedSlot) { + GenerationalBarrierForUncompressedSlotSlow( + local_data, age_table, slot, params.value_offset, params.heap); + } else { + GenerationalBarrierForSourceObjectSlow(local_data, slot, params.heap); + } } #endif // !CPPGC_YOUNG_GENERATION diff --git a/deps/include/cppgc/liveness-broker.h b/deps/include/cppgc/liveness-broker.h index c94eef0d..2c94f1c0 100644 --- a/deps/include/cppgc/liveness-broker.h +++ b/deps/include/cppgc/liveness-broker.h @@ -7,6 +7,7 @@ #include "cppgc/heap.h" #include "cppgc/member.h" +#include "cppgc/sentinel-pointer.h" #include "cppgc/trace-trait.h" #include "v8config.h" // NOLINT(build/include_directory) @@ -44,24 +45,24 @@ class V8_EXPORT LivenessBroker final { public: template bool IsHeapObjectAlive(const T* object) const { - // nullptr objects are considered alive to allow weakness to be used from + // - nullptr objects are considered alive to allow weakness to be used from // stack while running into a conservative GC. Treating nullptr as dead - // would mean that e.g. custom collectins could not be strongified on stack. - return !object || + // would mean that e.g. custom collections could not be strongified on + // stack. + // - Sentinel pointers are also preserved in weakness and not cleared. + return !object || object == kSentinelPointer || IsHeapObjectAliveImpl( TraceTrait::GetTraceDescriptor(object).base_object_payload); } template bool IsHeapObjectAlive(const WeakMember& weak_member) const { - return (weak_member != kSentinelPointer) && - IsHeapObjectAlive(weak_member.Get()); + return IsHeapObjectAlive(weak_member.Get()); } template bool IsHeapObjectAlive(const UntracedMember& untraced_member) const { - return (untraced_member != kSentinelPointer) && - IsHeapObjectAlive(untraced_member.Get()); + return IsHeapObjectAlive(untraced_member.Get()); } private: diff --git a/deps/include/cppgc/member.h b/deps/include/cppgc/member.h index 38105b8e..9bc38363 100644 --- a/deps/include/cppgc/member.h +++ b/deps/include/cppgc/member.h @@ -9,6 +9,8 @@ #include #include +#include "cppgc/internal/api-constants.h" +#include "cppgc/internal/member-storage.h" #include "cppgc/internal/pointer-policies.h" #include "cppgc/sentinel-pointer.h" #include "cppgc/type-traits.h" @@ -16,174 +18,247 @@ namespace cppgc { +namespace subtle { +class HeapConsistency; +} // namespace subtle + class Visitor; namespace internal { // MemberBase always refers to the object as const object and defers to // BasicMember on casting to the right type as needed. -class MemberBase { +class V8_TRIVIAL_ABI MemberBase { + public: +#if defined(CPPGC_POINTER_COMPRESSION) + using RawStorage = CompressedPointer; +#else // !defined(CPPGC_POINTER_COMPRESSION) + using RawStorage = RawPointer; +#endif // !defined(CPPGC_POINTER_COMPRESSION) protected: struct AtomicInitializerTag {}; - MemberBase() = default; - explicit MemberBase(const void* value) : raw_(value) {} - MemberBase(const void* value, AtomicInitializerTag) { SetRawAtomic(value); } + V8_INLINE MemberBase() = default; + V8_INLINE explicit MemberBase(const void* value) : raw_(value) {} + V8_INLINE MemberBase(const void* value, AtomicInitializerTag) { + SetRawAtomic(value); + } - const void** GetRawSlot() const { return &raw_; } - const void* GetRaw() const { return raw_; } - void SetRaw(void* value) { raw_ = value; } + V8_INLINE explicit MemberBase(RawStorage raw) : raw_(raw) {} + V8_INLINE explicit MemberBase(std::nullptr_t) : raw_(nullptr) {} + V8_INLINE explicit MemberBase(SentinelPointer s) : raw_(s) {} - const void* GetRawAtomic() const { - return reinterpret_cast*>(&raw_)->load( - std::memory_order_relaxed); + V8_INLINE const void** GetRawSlot() const { + return reinterpret_cast(const_cast(this)); } - void SetRawAtomic(const void* value) { - reinterpret_cast*>(&raw_)->store( - value, std::memory_order_relaxed); + V8_INLINE const void* GetRaw() const { return raw_.Load(); } + V8_INLINE void SetRaw(void* value) { raw_.Store(value); } + + V8_INLINE const void* GetRawAtomic() const { return raw_.LoadAtomic(); } + V8_INLINE void SetRawAtomic(const void* value) { raw_.StoreAtomic(value); } + + V8_INLINE RawStorage GetRawStorage() const { return raw_; } + V8_INLINE void SetRawStorageAtomic(RawStorage other) { + reinterpret_cast&>(raw_).store( + other, std::memory_order_relaxed); } - void ClearFromGC() const { raw_ = nullptr; } + V8_INLINE bool IsCleared() const { return raw_.IsCleared(); } + + V8_INLINE void ClearFromGC() const { raw_.Clear(); } private: - mutable const void* raw_ = nullptr; + friend class MemberDebugHelper; + + mutable RawStorage raw_; }; // The basic class from which all Member classes are 'generated'. template -class BasicMember final : private MemberBase, private CheckingPolicy { +class V8_TRIVIAL_ABI BasicMember final : private MemberBase, + private CheckingPolicy { public: using PointeeType = T; - constexpr BasicMember() = default; - constexpr BasicMember(std::nullptr_t) {} // NOLINT - BasicMember(SentinelPointer s) : MemberBase(s) {} // NOLINT - BasicMember(T* raw) : MemberBase(raw) { // NOLINT - InitializingWriteBarrier(); + V8_INLINE constexpr BasicMember() = default; + V8_INLINE constexpr BasicMember(std::nullptr_t) {} // NOLINT + V8_INLINE BasicMember(SentinelPointer s) : MemberBase(s) {} // NOLINT + V8_INLINE BasicMember(T* raw) : MemberBase(raw) { // NOLINT + InitializingWriteBarrier(raw); this->CheckPointer(Get()); } - BasicMember(T& raw) : BasicMember(&raw) {} // NOLINT + V8_INLINE BasicMember(T& raw) // NOLINT + : BasicMember(&raw) {} + // Atomic ctor. Using the AtomicInitializerTag forces BasicMember to // initialize using atomic assignments. This is required for preventing // data races with concurrent marking. using AtomicInitializerTag = MemberBase::AtomicInitializerTag; - BasicMember(std::nullptr_t, AtomicInitializerTag atomic) + V8_INLINE BasicMember(std::nullptr_t, AtomicInitializerTag atomic) : MemberBase(nullptr, atomic) {} - BasicMember(SentinelPointer s, AtomicInitializerTag atomic) + V8_INLINE BasicMember(SentinelPointer s, AtomicInitializerTag atomic) : MemberBase(s, atomic) {} - BasicMember(T* raw, AtomicInitializerTag atomic) : MemberBase(raw, atomic) { - InitializingWriteBarrier(); + V8_INLINE BasicMember(T* raw, AtomicInitializerTag atomic) + : MemberBase(raw, atomic) { + InitializingWriteBarrier(raw); this->CheckPointer(Get()); } - BasicMember(T& raw, AtomicInitializerTag atomic) + V8_INLINE BasicMember(T& raw, AtomicInitializerTag atomic) : BasicMember(&raw, atomic) {} + // Copy ctor. - BasicMember(const BasicMember& other) : BasicMember(other.Get()) {} - // Allow heterogeneous construction. + V8_INLINE BasicMember(const BasicMember& other) + : BasicMember(other.GetRawStorage()) {} + + // Heterogeneous copy constructors. When the source pointer have a different + // type, perform a compress-decompress round, because the source pointer may + // need to be adjusted. template ::value>> - BasicMember( // NOLINT + std::enable_if_t>* = nullptr> + V8_INLINE BasicMember( // NOLINT + const BasicMember& other) + : BasicMember(other.GetRawStorage()) {} + + template >* = nullptr> + V8_INLINE BasicMember( // NOLINT const BasicMember& other) : BasicMember(other.Get()) {} + // Move ctor. - BasicMember(BasicMember&& other) noexcept : BasicMember(other.Get()) { + V8_INLINE BasicMember(BasicMember&& other) noexcept + : BasicMember(other.GetRawStorage()) { other.Clear(); } - // Allow heterogeneous move construction. + + // Heterogeneous move constructors. When the source pointer have a different + // type, perform a compress-decompress round, because the source pointer may + // need to be adjusted. template ::value>> - BasicMember(BasicMember&& other) noexcept + std::enable_if_t>* = nullptr> + V8_INLINE BasicMember(BasicMember&& other) noexcept + : BasicMember(other.GetRawStorage()) { + other.Clear(); + } + + template >* = nullptr> + V8_INLINE BasicMember(BasicMember&& other) noexcept : BasicMember(other.Get()) { other.Clear(); } + // Construction from Persistent. template ::value>> - BasicMember(const BasicPersistent& p) + V8_INLINE BasicMember(const BasicPersistent& p) : BasicMember(p.Get()) {} // Copy assignment. - BasicMember& operator=(const BasicMember& other) { - return operator=(other.Get()); + V8_INLINE BasicMember& operator=(const BasicMember& other) { + return operator=(other.GetRawStorage()); } - // Allow heterogeneous copy assignment. + + // Heterogeneous copy assignment. When the source pointer have a different + // type, perform a compress-decompress round, because the source pointer may + // need to be adjusted. template ::value>> - BasicMember& operator=( + typename OtherCheckingPolicy> + V8_INLINE BasicMember& operator=( const BasicMember& other) { - return operator=(other.Get()); + if constexpr (internal::IsDecayedSameV) { + return operator=(other.GetRawStorage()); + } else { + static_assert(internal::IsStrictlyBaseOfV); + return operator=(other.Get()); + } } + // Move assignment. - BasicMember& operator=(BasicMember&& other) noexcept { - operator=(other.Get()); + V8_INLINE BasicMember& operator=(BasicMember&& other) noexcept { + operator=(other.GetRawStorage()); other.Clear(); return *this; } - // Heterogeneous move assignment. + + // Heterogeneous move assignment. When the source pointer have a different + // type, perform a compress-decompress round, because the source pointer may + // need to be adjusted. template ::value>> - BasicMember& operator=(BasicMember&& other) noexcept { - operator=(other.Get()); + typename OtherCheckingPolicy> + V8_INLINE BasicMember& operator=( + BasicMember&& other) noexcept { + if constexpr (internal::IsDecayedSameV) { + operator=(other.GetRawStorage()); + } else { + static_assert(internal::IsStrictlyBaseOfV); + operator=(other.Get()); + } other.Clear(); return *this; } + // Assignment from Persistent. template ::value>> - BasicMember& operator=( + V8_INLINE BasicMember& operator=( const BasicPersistent& other) { return operator=(other.Get()); } - BasicMember& operator=(T* other) { + + V8_INLINE BasicMember& operator=(T* other) { SetRawAtomic(other); - AssigningWriteBarrier(); + AssigningWriteBarrier(other); this->CheckPointer(Get()); return *this; } - BasicMember& operator=(std::nullptr_t) { + + V8_INLINE BasicMember& operator=(std::nullptr_t) { Clear(); return *this; } - BasicMember& operator=(SentinelPointer s) { + V8_INLINE BasicMember& operator=(SentinelPointer s) { SetRawAtomic(s); return *this; } template - void Swap(BasicMember& other) { - T* tmp = Get(); + V8_INLINE void Swap(BasicMember& other) { + auto tmp = GetRawStorage(); *this = other; other = tmp; } - explicit operator bool() const { return Get(); } - operator T*() const { return Get(); } - T* operator->() const { return Get(); } - T& operator*() const { return *Get(); } + V8_INLINE explicit operator bool() const { return !IsCleared(); } + V8_INLINE operator T*() const { return Get(); } + V8_INLINE T* operator->() const { return Get(); } + V8_INLINE T& operator*() const { return *Get(); } // CFI cast exemption to allow passing SentinelPointer through T* and support // heterogeneous assignments between different Member and Persistent handles // based on their actual types. - V8_CLANG_NO_SANITIZE("cfi-unrelated-cast") T* Get() const { + V8_INLINE V8_CLANG_NO_SANITIZE("cfi-unrelated-cast") T* Get() const { // Executed by the mutator, hence non atomic load. // // The const_cast below removes the constness from MemberBase storage. The @@ -192,59 +267,262 @@ class BasicMember final : private MemberBase, private CheckingPolicy { return static_cast(const_cast(MemberBase::GetRaw())); } - void Clear() { SetRawAtomic(nullptr); } + V8_INLINE void Clear() { SetRawStorageAtomic(RawStorage{}); } - T* Release() { + V8_INLINE T* Release() { T* result = Get(); Clear(); return result; } - const T** GetSlotForTesting() const { + V8_INLINE const T** GetSlotForTesting() const { return reinterpret_cast(GetRawSlot()); } + V8_INLINE RawStorage GetRawStorage() const { + return MemberBase::GetRawStorage(); + } + private: - const T* GetRawAtomic() const { + V8_INLINE explicit BasicMember(RawStorage raw) : MemberBase(raw) { + InitializingWriteBarrier(Get()); + this->CheckPointer(Get()); + } + + V8_INLINE BasicMember& operator=(RawStorage other) { + SetRawStorageAtomic(other); + AssigningWriteBarrier(); + this->CheckPointer(Get()); + return *this; + } + + V8_INLINE const T* GetRawAtomic() const { return static_cast(MemberBase::GetRawAtomic()); } - void InitializingWriteBarrier() const { - WriteBarrierPolicy::InitializingBarrier(GetRawSlot(), GetRaw()); + V8_INLINE void InitializingWriteBarrier(T* value) const { + WriteBarrierPolicy::InitializingBarrier(GetRawSlot(), value); } - void AssigningWriteBarrier() const { - WriteBarrierPolicy::AssigningBarrier(GetRawSlot(), GetRaw()); + V8_INLINE void AssigningWriteBarrier(T* value) const { + WriteBarrierPolicy::AssigningBarrier(GetRawSlot(), value); + } + V8_INLINE void AssigningWriteBarrier() const { + WriteBarrierPolicy::AssigningBarrier(GetRawSlot(), GetRawStorage()); } - void ClearFromGC() const { MemberBase::ClearFromGC(); } + V8_INLINE void ClearFromGC() const { MemberBase::ClearFromGC(); } - T* GetFromGC() const { return Get(); } + V8_INLINE T* GetFromGC() const { return Get(); } + friend class cppgc::subtle::HeapConsistency; friend class cppgc::Visitor; template friend struct cppgc::TraceTrait; + template + friend class BasicMember; }; +// Member equality operators. template -bool operator==(const BasicMember& member1, - const BasicMember& member2) { - return member1.Get() == member2.Get(); +V8_INLINE bool operator==( + const BasicMember& + member1, + const BasicMember& + member2) { + if constexpr (internal::IsDecayedSameV) { + // Check compressed pointers if types are the same. + return member1.GetRawStorage() == member2.GetRawStorage(); + } else { + static_assert(internal::IsStrictlyBaseOfV || + internal::IsStrictlyBaseOfV); + // Otherwise, check decompressed pointers. + return member1.Get() == member2.Get(); + } } template -bool operator!=(const BasicMember& member1, - const BasicMember& member2) { +V8_INLINE bool operator!=( + const BasicMember& + member1, + const BasicMember& + member2) { return !(member1 == member2); } +// Equality with raw pointers. +template +V8_INLINE bool operator==(const BasicMember& member, + U* raw) { + // Never allow comparison with erased pointers. + static_assert(!internal::IsDecayedSameV); + + if constexpr (internal::IsDecayedSameV) { + // Check compressed pointers if types are the same. + return member.GetRawStorage() == MemberBase::RawStorage(raw); + } else if constexpr (internal::IsStrictlyBaseOfV) { + // Cast the raw pointer to T, which may adjust the pointer. + return member.GetRawStorage() == + MemberBase::RawStorage(static_cast(raw)); + } else { + // Otherwise, decompressed the member. + return member.Get() == raw; + } +} + +template +V8_INLINE bool operator!=(const BasicMember& member, + U* raw) { + return !(member == raw); +} + +template +V8_INLINE bool operator==(T* raw, + const BasicMember& member) { + return member == raw; +} + +template +V8_INLINE bool operator!=(T* raw, + const BasicMember& member) { + return !(raw == member); +} + +// Equality with sentinel. +template +V8_INLINE bool operator==(const BasicMember& member, + SentinelPointer) { + return member.GetRawStorage().IsSentinel(); +} + +template +V8_INLINE bool operator!=(const BasicMember& member, + SentinelPointer s) { + return !(member == s); +} + +template +V8_INLINE bool operator==(SentinelPointer s, + const BasicMember& member) { + return member == s; +} + +template +V8_INLINE bool operator!=(SentinelPointer s, + const BasicMember& member) { + return !(s == member); +} + +// Equality with nullptr. +template +V8_INLINE bool operator==(const BasicMember& member, + std::nullptr_t) { + return !static_cast(member); +} + +template +V8_INLINE bool operator!=(const BasicMember& member, + std::nullptr_t n) { + return !(member == n); +} + +template +V8_INLINE bool operator==(std::nullptr_t n, + const BasicMember& member) { + return member == n; +} + +template +V8_INLINE bool operator!=(std::nullptr_t n, + const BasicMember& member) { + return !(n == member); +} + +// Relational operators. +template +V8_INLINE bool operator<( + const BasicMember& + member1, + const BasicMember& + member2) { + static_assert( + internal::IsDecayedSameV, + "Comparison works only for same pointer type modulo cv-qualifiers"); + return member1.GetRawStorage() < member2.GetRawStorage(); +} + +template +V8_INLINE bool operator<=( + const BasicMember& + member1, + const BasicMember& + member2) { + static_assert( + internal::IsDecayedSameV, + "Comparison works only for same pointer type modulo cv-qualifiers"); + return member1.GetRawStorage() <= member2.GetRawStorage(); +} + +template +V8_INLINE bool operator>( + const BasicMember& + member1, + const BasicMember& + member2) { + static_assert( + internal::IsDecayedSameV, + "Comparison works only for same pointer type modulo cv-qualifiers"); + return member1.GetRawStorage() > member2.GetRawStorage(); +} + +template +V8_INLINE bool operator>=( + const BasicMember& + member1, + const BasicMember& + member2) { + static_assert( + internal::IsDecayedSameV, + "Comparison works only for same pointer type modulo cv-qualifiers"); + return member1.GetRawStorage() >= member2.GetRawStorage(); +} + template struct IsWeak< internal::BasicMember> diff --git a/deps/include/cppgc/name-provider.h b/deps/include/cppgc/name-provider.h index 224dd4b5..216f6098 100644 --- a/deps/include/cppgc/name-provider.h +++ b/deps/include/cppgc/name-provider.h @@ -37,15 +37,15 @@ class V8_EXPORT NameProvider { static constexpr const char kNoNameDeducible[] = ""; /** - * Indicating whether internal names are hidden or not. + * Indicating whether the build supports extracting C++ names as object names. * * @returns true if C++ names should be hidden and represented by kHiddenName. */ - static constexpr bool HideInternalNames() { + static constexpr bool SupportsCppClassNamesAsObjectNames() { #if CPPGC_SUPPORTS_OBJECT_NAMES - return false; -#else // !CPPGC_SUPPORTS_OBJECT_NAMES return true; +#else // !CPPGC_SUPPORTS_OBJECT_NAMES + return false; #endif // !CPPGC_SUPPORTS_OBJECT_NAMES } diff --git a/deps/include/cppgc/persistent.h b/deps/include/cppgc/persistent.h index 182fb085..3a66ccc0 100644 --- a/deps/include/cppgc/persistent.h +++ b/deps/include/cppgc/persistent.h @@ -16,9 +16,6 @@ #include "v8config.h" // NOLINT(build/include_directory) namespace cppgc { - -class Visitor; - namespace internal { // PersistentBase always refers to the object as const object and defers to @@ -78,7 +75,7 @@ class BasicPersistent final : public PersistentBase, : PersistentBase(raw), LocationPolicy(loc) { if (!IsValid()) return; SetNode(WeaknessPolicy::GetPersistentRegion(GetValue()) - .AllocateNode(this, &BasicPersistent::Trace)); + .AllocateNode(this, &TraceAsRoot)); this->CheckPointer(Get()); } @@ -118,10 +115,10 @@ class BasicPersistent final : public PersistentBase, template ::value>> - BasicPersistent(internal::BasicMember - member, - const SourceLocation& loc = SourceLocation::Current()) + BasicPersistent( + const internal::BasicMember& member, + const SourceLocation& loc = SourceLocation::Current()) : BasicPersistent(member.Get(), loc) {} ~BasicPersistent() { Clear(); } @@ -159,9 +156,8 @@ class BasicPersistent final : public PersistentBase, typename MemberWeaknessTag, typename MemberCheckingPolicy, typename = std::enable_if_t::value>> BasicPersistent& operator=( - internal::BasicMember - member) { + const internal::BasicMember& member) { return operator=(member.Get()); } @@ -222,9 +218,8 @@ class BasicPersistent final : public PersistentBase, } private: - static void Trace(Visitor* v, const void* ptr) { - const auto* persistent = static_cast(ptr); - v->TraceRoot(*persistent, persistent->Location()); + static void TraceAsRoot(RootVisitor& root_visitor, const void* ptr) { + root_visitor.Trace(*static_cast(ptr)); } bool IsValid() const { @@ -248,7 +243,7 @@ class BasicPersistent final : public PersistentBase, SetValue(ptr); if (!IsValid()) return; SetNode(WeaknessPolicy::GetPersistentRegion(GetValue()) - .AllocateNode(this, &BasicPersistent::Trace)); + .AllocateNode(this, &TraceAsRoot)); this->CheckPointer(Get()); } @@ -265,7 +260,7 @@ class BasicPersistent final : public PersistentBase, return static_cast(const_cast(GetValue())); } - friend class cppgc::Visitor; + friend class internal::RootVisitor; }; template -bool operator==(const BasicPersistent& p, - BasicMember - m) { +bool operator==( + const BasicPersistent& + p, + const BasicMember& m) { return p.Get() == m.Get(); } @@ -305,12 +300,12 @@ template -bool operator!=(const BasicPersistent& p, - BasicMember - m) { +bool operator!=( + const BasicPersistent& + p, + const BasicMember& m) { return !(p == m); } @@ -318,12 +313,12 @@ template -bool operator==(BasicMember - m, - const BasicPersistent& p) { +bool operator==( + const BasicMember& m, + const BasicPersistent& + p) { return m.Get() == p.Get(); } @@ -331,12 +326,12 @@ template -bool operator!=(BasicMember - m, - const BasicPersistent& p) { +bool operator!=( + const BasicMember& m, + const BasicPersistent& + p) { return !(m == p); } diff --git a/deps/include/cppgc/platform.h b/deps/include/cppgc/platform.h index 3276a26b..5a0a40ec 100644 --- a/deps/include/cppgc/platform.h +++ b/deps/include/cppgc/platform.h @@ -7,6 +7,7 @@ #include +#include "cppgc/source-location.h" #include "v8-platform.h" // NOLINT(build/include_directory) #include "v8config.h" // NOLINT(build/include_directory) @@ -32,8 +33,9 @@ class V8_EXPORT Platform { virtual ~Platform() = default; /** - * Returns the allocator used by cppgc to allocate its heap and various - * support structures. + * \returns the allocator used by cppgc to allocate its heap and various + * support structures. Returning nullptr results in using the `PageAllocator` + * provided by `cppgc::InitializeProcess()` instead. */ virtual PageAllocator* GetPageAllocator() = 0; @@ -131,10 +133,11 @@ class V8_EXPORT Platform { * * Can be called multiple times when paired with `ShutdownProcess()`. * - * \param page_allocator The allocator used for maintaining meta data. Must not - * change between multiple calls to InitializeProcess. + * \param page_allocator The allocator used for maintaining meta data. Must stay + * always alive and not change between multiple calls to InitializeProcess. If + * no allocator is provided, a default internal version will be used. */ -V8_EXPORT void InitializeProcess(PageAllocator* page_allocator); +V8_EXPORT void InitializeProcess(PageAllocator* page_allocator = nullptr); /** * Must be called after destroying the last used heap. Some process-global @@ -145,7 +148,8 @@ V8_EXPORT void ShutdownProcess(); namespace internal { -V8_EXPORT void Abort(); +V8_EXPORT void Fatal(const std::string& reason = std::string(), + const SourceLocation& = SourceLocation::Current()); } // namespace internal diff --git a/deps/include/cppgc/prefinalizer.h b/deps/include/cppgc/prefinalizer.h index 6153b37f..51f2eac8 100644 --- a/deps/include/cppgc/prefinalizer.h +++ b/deps/include/cppgc/prefinalizer.h @@ -6,23 +6,17 @@ #define INCLUDE_CPPGC_PREFINALIZER_H_ #include "cppgc/internal/compiler-specific.h" -#include "cppgc/internal/prefinalizer-handler.h" #include "cppgc/liveness-broker.h" namespace cppgc { namespace internal { -template -class PrefinalizerRegistration final { +class V8_EXPORT PrefinalizerRegistration final { public: - explicit PrefinalizerRegistration(T* self) { - static_assert(sizeof(&T::InvokePreFinalizer) > 0, - "USING_PRE_FINALIZER(T) must be defined."); + using Callback = bool (*)(const cppgc::LivenessBroker&, void*); - cppgc::internal::PreFinalizerRegistrationDispatcher::RegisterPrefinalizer( - {self, T::InvokePreFinalizer}); - } + PrefinalizerRegistration(void*, Callback); void* operator new(size_t, void* location) = delete; void* operator new(size_t) = delete; @@ -30,6 +24,35 @@ class PrefinalizerRegistration final { } // namespace internal +/** + * Macro must be used in the private section of `Class` and registers a + * prefinalization callback `void Class::PreFinalizer()`. The callback is + * invoked on garbage collection after the collector has found an object to be + * dead. + * + * Callback properties: + * - The callback is invoked before a possible destructor for the corresponding + * object. + * - The callback may access the whole object graph, irrespective of whether + * objects are considered dead or alive. + * - The callback is invoked on the same thread as the object was created on. + * + * Example: + * \code + * class WithPrefinalizer : public GarbageCollected { + * CPPGC_USING_PRE_FINALIZER(WithPrefinalizer, Dispose); + * + * public: + * void Trace(Visitor*) const {} + * void Dispose() { prefinalizer_called = true; } + * ~WithPrefinalizer() { + * // prefinalizer_called == true + * } + * private: + * bool prefinalizer_called = false; + * }; + * \endcode + */ #define CPPGC_USING_PRE_FINALIZER(Class, PreFinalizer) \ public: \ static bool InvokePreFinalizer(const cppgc::LivenessBroker& liveness_broker, \ @@ -43,8 +66,8 @@ class PrefinalizerRegistration final { } \ \ private: \ - CPPGC_NO_UNIQUE_ADDRESS cppgc::internal::PrefinalizerRegistration \ - prefinalizer_dummy_{this}; \ + CPPGC_NO_UNIQUE_ADDRESS cppgc::internal::PrefinalizerRegistration \ + prefinalizer_dummy_{this, Class::InvokePreFinalizer}; \ static_assert(true, "Force semicolon.") } // namespace cppgc diff --git a/deps/include/cppgc/sentinel-pointer.h b/deps/include/cppgc/sentinel-pointer.h index b049d1a2..8dbbab0e 100644 --- a/deps/include/cppgc/sentinel-pointer.h +++ b/deps/include/cppgc/sentinel-pointer.h @@ -13,9 +13,9 @@ namespace internal { // Special tag type used to denote some sentinel member. The semantics of the // sentinel is defined by the embedder. struct SentinelPointer { + static constexpr intptr_t kSentinelValue = 0b10; template operator T*() const { - static constexpr intptr_t kSentinelValue = 1; return reinterpret_cast(kSentinelValue); } // Hidden friends. diff --git a/deps/include/cppgc/testing.h b/deps/include/cppgc/testing.h index 229ce140..bddd1fc1 100644 --- a/deps/include/cppgc/testing.h +++ b/deps/include/cppgc/testing.h @@ -19,8 +19,13 @@ class HeapHandle; namespace testing { /** - * Overrides the state of the stack with the provided value. Takes precedence - * over other parameters that set the stack state. Must no be nested. + * Overrides the state of the stack with the provided value. Parameters passed + * to explicit garbage collection calls still take precedence. Must not be + * nested. + * + * This scope is useful to make the garbage collector consider the stack when + * tasks that invoke garbage collection (through the provided platform) contain + * interesting pointers on its stack. */ class V8_EXPORT V8_NODISCARD OverrideEmbedderStackStateScope final { CPPGC_STACK_ALLOCATED(); @@ -93,6 +98,8 @@ class V8_EXPORT StandaloneTestingHeap final { HeapHandle& heap_handle_; }; +V8_EXPORT bool IsHeapObjectOld(void*); + } // namespace testing } // namespace cppgc diff --git a/deps/include/cppgc/trace-trait.h b/deps/include/cppgc/trace-trait.h index 83619b1d..694fbfdc 100644 --- a/deps/include/cppgc/trace-trait.h +++ b/deps/include/cppgc/trace-trait.h @@ -16,6 +16,10 @@ class Visitor; namespace internal { +class RootVisitor; + +using TraceRootCallback = void (*)(RootVisitor&, const void* object); + // Implementation of the default TraceTrait handling GarbageCollected and // GarbageCollectedMixin. template -struct make_void { - typedef void type; -}; -template -using void_t = typename make_void::type; - // Not supposed to be specialized by the user. template struct IsWeak : std::false_type {}; @@ -42,7 +34,7 @@ template struct IsTraceMethodConst : std::false_type {}; template -struct IsTraceMethodConst().Trace( +struct IsTraceMethodConst().Trace( std::declval()))>> : std::true_type { }; @@ -53,7 +45,7 @@ struct IsTraceable : std::false_type { template struct IsTraceable< - T, void_t().Trace(std::declval()))>> + T, std::void_t().Trace(std::declval()))>> : std::true_type { // All Trace methods should be marked as const. If an object of type // 'T' is traceable then any object of type 'const T' should also @@ -72,8 +64,8 @@ struct HasGarbageCollectedMixinTypeMarker : std::false_type { template struct HasGarbageCollectedMixinTypeMarker< - T, - void_t::IsGarbageCollectedMixinTypeMarker>> + T, std::void_t< + typename std::remove_const_t::IsGarbageCollectedMixinTypeMarker>> : std::true_type { static_assert(sizeof(T), "T must be fully defined"); }; @@ -85,7 +77,8 @@ struct HasGarbageCollectedTypeMarker : std::false_type { template struct HasGarbageCollectedTypeMarker< - T, void_t::IsGarbageCollectedTypeMarker>> + T, + std::void_t::IsGarbageCollectedTypeMarker>> : std::true_type { static_assert(sizeof(T), "T must be fully defined"); }; @@ -177,6 +170,15 @@ struct IsComplete { decltype(IsSizeOfKnown(std::declval()))::value; }; +template +constexpr bool IsDecayedSameV = + std::is_same_v, std::decay_t>; + +template +constexpr bool IsStrictlyBaseOfV = + std::is_base_of_v, std::decay_t> && + !IsDecayedSameV; + } // namespace internal /** diff --git a/deps/include/cppgc/visitor.h b/deps/include/cppgc/visitor.h index 57e2ce39..f7ebc1d0 100644 --- a/deps/include/cppgc/visitor.h +++ b/deps/include/cppgc/visitor.h @@ -62,22 +62,6 @@ class V8_EXPORT Visitor { virtual ~Visitor() = default; - /** - * Trace method for raw pointers. Prefer the versions for managed pointers. - * - * \param member Reference retaining an object. - */ - template - void Trace(const T* t) { - static_assert(sizeof(T), "Pointee type must be fully defined."); - static_assert(internal::IsGarbageCollectedOrMixinType::value, - "T must be GarbageCollected or GarbageCollectedMixin type"); - if (!t) { - return; - } - Visit(t, TraceTrait::GetTraceDescriptor(t)); - } - /** * Trace method for Member. * @@ -87,7 +71,7 @@ class V8_EXPORT Visitor { void Trace(const Member& member) { const T* value = member.GetRawAtomic(); CPPGC_DCHECK(value != kSentinelPointer); - Trace(value); + TraceImpl(value); } /** @@ -231,23 +215,33 @@ class V8_EXPORT Visitor { void TraceStrongly(const WeakMember& weak_member) { const T* value = weak_member.GetRawAtomic(); CPPGC_DCHECK(value != kSentinelPointer); - Trace(value); + TraceImpl(value); + } + + /** + * Trace method for retaining containers strongly. + * + * \param object reference to the container. + */ + template + void TraceStrongContainer(const T* object) { + TraceImpl(object); } /** - * Trace method for weak containers. + * Trace method for retaining containers weakly. * - * \param object reference of the weak container. + * \param object reference to the container. * \param callback to be invoked. - * \param data custom data that is passed to the callback. + * \param callback_data custom data that is passed to the callback. */ template void TraceWeakContainer(const T* object, WeakCallback callback, - const void* data) { + const void* callback_data) { if (!object) return; VisitWeakContainer(object, TraceTrait::GetTraceDescriptor(object), TraceTrait::GetWeakTraceDescriptor(object), callback, - data); + callback_data); } /** @@ -255,6 +249,7 @@ class V8_EXPORT Visitor { * compactable space. Such references maybe be arbitrarily moved by the GC. * * \param slot location of reference to object that might be moved by the GC. + * The slot must contain an uncompressed pointer. */ template void RegisterMovableReference(const T** slot) { @@ -297,9 +292,6 @@ class V8_EXPORT Visitor { virtual void Visit(const void* self, TraceDescriptor) {} virtual void VisitWeak(const void* self, TraceDescriptor, WeakCallback, const void* weak_member) {} - virtual void VisitRoot(const void*, TraceDescriptor, const SourceLocation&) {} - virtual void VisitWeakRoot(const void* self, TraceDescriptor, WeakCallback, - const void* weak_root, const SourceLocation&) {} virtual void VisitEphemeron(const void* key, const void* value, TraceDescriptor value_desc) {} virtual void VisitWeakContainer(const void* self, TraceDescriptor strong_desc, @@ -320,44 +312,20 @@ class V8_EXPORT Visitor { static void HandleWeak(const LivenessBroker& info, const void* object) { const PointerType* weak = static_cast(object); auto* raw_ptr = weak->GetFromGC(); - // Sentinel values are preserved for weak pointers. - if (raw_ptr == kSentinelPointer) return; if (!info.IsHeapObjectAlive(raw_ptr)) { weak->ClearFromGC(); } } - template * = nullptr> - void TraceRoot(const Persistent& p, const SourceLocation& loc) { - using PointeeType = typename Persistent::PointeeType; - static_assert(sizeof(PointeeType), - "Persistent's pointee type must be fully defined"); - static_assert(internal::IsGarbageCollectedOrMixinType::value, - "Persistent's pointee type must be GarbageCollected or " - "GarbageCollectedMixin"); - auto* ptr = p.GetFromGC(); - if (!ptr) { + template + void TraceImpl(const T* t) { + static_assert(sizeof(T), "Pointee type must be fully defined."); + static_assert(internal::IsGarbageCollectedOrMixinType::value, + "T must be GarbageCollected or GarbageCollectedMixin type"); + if (!t) { return; } - VisitRoot(ptr, TraceTrait::GetTraceDescriptor(ptr), loc); - } - - template < - typename WeakPersistent, - std::enable_if_t* = nullptr> - void TraceRoot(const WeakPersistent& p, const SourceLocation& loc) { - using PointeeType = typename WeakPersistent::PointeeType; - static_assert(sizeof(PointeeType), - "Persistent's pointee type must be fully defined"); - static_assert(internal::IsGarbageCollectedOrMixinType::value, - "Persistent's pointee type must be GarbageCollected or " - "GarbageCollectedMixin"); - static_assert(!internal::IsAllocatedOnCompactableSpace::value, - "Weak references to compactable objects are not allowed"); - auto* ptr = p.GetFromGC(); - VisitWeakRoot(ptr, TraceTrait::GetTraceDescriptor(ptr), - &HandleWeak, &p, loc); + Visit(t, TraceTrait::GetTraceDescriptor(t)); } #if V8_ENABLE_CHECKS @@ -374,6 +342,70 @@ class V8_EXPORT Visitor { friend class internal::VisitorBase; }; +namespace internal { + +class V8_EXPORT RootVisitor { + public: + explicit RootVisitor(Visitor::Key) {} + + virtual ~RootVisitor() = default; + + template * = nullptr> + void Trace(const AnyStrongPersistentType& p) { + using PointeeType = typename AnyStrongPersistentType::PointeeType; + const void* object = Extract(p); + if (!object) { + return; + } + VisitRoot(object, TraceTrait::GetTraceDescriptor(object), + p.Location()); + } + + template * = nullptr> + void Trace(const AnyWeakPersistentType& p) { + using PointeeType = typename AnyWeakPersistentType::PointeeType; + static_assert(!internal::IsAllocatedOnCompactableSpace::value, + "Weak references to compactable objects are not allowed"); + const void* object = Extract(p); + if (!object) { + return; + } + VisitWeakRoot(object, TraceTrait::GetTraceDescriptor(object), + &HandleWeak, &p, p.Location()); + } + + protected: + virtual void VisitRoot(const void*, TraceDescriptor, const SourceLocation&) {} + virtual void VisitWeakRoot(const void* self, TraceDescriptor, WeakCallback, + const void* weak_root, const SourceLocation&) {} + + private: + template + static const void* Extract(AnyPersistentType& p) { + using PointeeType = typename AnyPersistentType::PointeeType; + static_assert(sizeof(PointeeType), + "Persistent's pointee type must be fully defined"); + static_assert(internal::IsGarbageCollectedOrMixinType::value, + "Persistent's pointee type must be GarbageCollected or " + "GarbageCollectedMixin"); + return p.GetFromGC(); + } + + template + static void HandleWeak(const LivenessBroker& info, const void* object) { + const PointerType* weak = static_cast(object); + auto* raw_ptr = weak->GetFromGC(); + if (!info.IsHeapObjectAlive(raw_ptr)) { + weak->ClearFromGC(); + } + } +}; + +} // namespace internal } // namespace cppgc #endif // INCLUDE_CPPGC_VISITOR_H_ diff --git a/deps/include/js_protocol-1.3.json b/deps/include/js_protocol-1.3.json index ea573d11..a998d461 100644 --- a/deps/include/js_protocol-1.3.json +++ b/deps/include/js_protocol-1.3.json @@ -946,34 +946,6 @@ { "name": "url", "type": "string", "description": "JavaScript script name or url." }, { "name": "functions", "type": "array", "items": { "$ref": "FunctionCoverage" }, "description": "Functions contained in the script that has coverage data." } ] - }, - { "id": "TypeObject", - "type": "object", - "description": "Describes a type collected during runtime.", - "properties": [ - { "name": "name", "type": "string", "description": "Name of a type collected with type profiling." } - ], - "experimental": true - }, - { "id": "TypeProfileEntry", - "type": "object", - "description": "Source offset and types for a parameter or return value.", - "properties": [ - { "name": "offset", "type": "integer", "description": "Source offset of the parameter or end of function for return values." }, - { "name": "types", "type": "array", "items": {"$ref": "TypeObject"}, "description": "The types for this parameter or return value."} - ], - "experimental": true - }, - { - "id": "ScriptTypeProfile", - "type": "object", - "description": "Type profile data collected during runtime for a JavaScript script.", - "properties": [ - { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "JavaScript script id." }, - { "name": "url", "type": "string", "description": "JavaScript script name or url." }, - { "name": "entries", "type": "array", "items": { "$ref": "TypeProfileEntry" }, "description": "Type profile entries for parameters and return values of the functions in the script." } - ], - "experimental": true } ], "commands": [ @@ -1024,24 +996,6 @@ { "name": "result", "type": "array", "items": { "$ref": "ScriptCoverage" }, "description": "Coverage data for the current isolate." } ], "description": "Collect coverage data for the current isolate. The coverage data may be incomplete due to garbage collection." - }, - { - "name": "startTypeProfile", - "description": "Enable type profile.", - "experimental": true - }, - { - "name": "stopTypeProfile", - "description": "Disable type profile. Disabling releases type profile data collected so far.", - "experimental": true - }, - { - "name": "takeTypeProfile", - "returns": [ - { "name": "result", "type": "array", "items": { "$ref": "ScriptTypeProfile" }, "description": "Type profile for all scripts since startTypeProfile() was turned on." } - ], - "description": "Collect type profile.", - "experimental": true } ], "events": [ diff --git a/deps/include/js_protocol.pdl b/deps/include/js_protocol.pdl index b34c8551..6efcf787 100644 --- a/deps/include/js_protocol.pdl +++ b/deps/include/js_protocol.pdl @@ -104,13 +104,20 @@ domain Debugger # Location in the source code. Location location # JavaScript script name or url. - string url + # Deprecated in favor of using the `location.scriptId` to resolve the URL via a previously + # sent `Debugger.scriptParsed` event. + deprecated string url # Scope chain for this call frame. array of Scope scopeChain # `this` object for this call frame. Runtime.RemoteObject this # The value being returned, if the function is at return point. optional Runtime.RemoteObject returnValue + # Valid only while the VM is paused and indicates whether this frame + # can be restarted or not. Note that a `true` value here does not + # guarantee that Debugger#restartFrame with this CallFrameId will be + # successful, but it is very likely. + experimental optional boolean canBeRestarted # Scope description. type Scope extends object @@ -237,6 +244,40 @@ domain Debugger # Wasm bytecode. optional binary bytecode + experimental type WasmDisassemblyChunk extends object + properties + # The next chunk of disassembled lines. + array of string lines + # The bytecode offsets describing the start of each line. + array of integer bytecodeOffsets + + experimental command disassembleWasmModule + parameters + # Id of the script to disassemble + Runtime.ScriptId scriptId + returns + # For large modules, return a stream from which additional chunks of + # disassembly can be read successively. + optional string streamId + # The total number of lines in the disassembly text. + integer totalNumberOfLines + # The offsets of all function bodies, in the format [start1, end1, + # start2, end2, ...] where all ends are exclusive. + array of integer functionBodyOffsets + # The first chunk of disassembly. + WasmDisassemblyChunk chunk + + # Disassemble the next chunk of lines for the module corresponding to the + # stream. If disassembly is complete, this API will invalidate the streamId + # and return an empty chunk. Any subsequent calls for the now invalid stream + # will return errors. + experimental command nextWasmDisassemblyChunk + parameters + string streamId + returns + # The next chunk of disassembly. + WasmDisassemblyChunk chunk + # This command is deprecated. Use getScriptSource instead. deprecated command getWasmBytecode parameters @@ -266,18 +307,35 @@ domain Debugger parameters BreakpointId breakpointId - # Restarts particular call frame from the beginning. - deprecated command restartFrame + # Restarts particular call frame from the beginning. The old, deprecated + # behavior of `restartFrame` is to stay paused and allow further CDP commands + # after a restart was scheduled. This can cause problems with restarting, so + # we now continue execution immediatly after it has been scheduled until we + # reach the beginning of the restarted frame. + # + # To stay back-wards compatible, `restartFrame` now expects a `mode` + # parameter to be present. If the `mode` parameter is missing, `restartFrame` + # errors out. + # + # The various return values are deprecated and `callFrames` is always empty. + # Use the call frames from the `Debugger#paused` events instead, that fires + # once V8 pauses at the beginning of the restarted function. + command restartFrame parameters # Call frame identifier to evaluate on. CallFrameId callFrameId + # The `mode` parameter must be present and set to 'StepInto', otherwise + # `restartFrame` will error out. + experimental optional enum mode + # Pause at the beginning of the restarted function + StepInto returns # New stack trace. - array of CallFrame callFrames + deprecated array of CallFrame callFrames # Async stack trace, if any. - optional Runtime.StackTrace asyncStackTrace + deprecated optional Runtime.StackTrace asyncStackTrace # Async stack trace, if any. - experimental optional Runtime.StackTraceId asyncStackTraceId + deprecated optional Runtime.StackTraceId asyncStackTraceId # Resumes JavaScript execution. command resume @@ -400,13 +458,14 @@ domain Debugger # New value for breakpoints active state. boolean active - # Defines pause on exceptions state. Can be set to stop on all exceptions, uncaught exceptions or - # no exceptions. Initial pause on exceptions state is `none`. + # Defines pause on exceptions state. Can be set to stop on all exceptions, uncaught exceptions, + # or caught exceptions, no exceptions. Initial pause on exceptions state is `none`. command setPauseOnExceptions parameters # Pause on exceptions mode. enum state none + caught uncaught all @@ -417,6 +476,12 @@ domain Debugger Runtime.CallArgument newValue # Edits JavaScript source live. + # + # In general, functions that are currently on the stack can not be edited with + # a single exception: If the edited function is the top-most stack frame and + # that is the only activation of that function on the stack. In this case + # the live edit will be successful and a `Debugger.restartFrame` for the + # top-most function is automatically triggered. command setScriptSource parameters # Id of the script to edit. @@ -426,16 +491,27 @@ domain Debugger # If true the change will not actually be applied. Dry run may be used to get result # description without actually modifying the code. optional boolean dryRun + # If true, then `scriptSource` is allowed to change the function on top of the stack + # as long as the top-most stack frame is the only activation of that function. + experimental optional boolean allowTopFrameEditing returns # New stack trace in case editing has happened while VM was stopped. - optional array of CallFrame callFrames + deprecated optional array of CallFrame callFrames # Whether current call stack was modified after applying the changes. - optional boolean stackChanged + deprecated optional boolean stackChanged # Async stack trace, if any. - optional Runtime.StackTrace asyncStackTrace + deprecated optional Runtime.StackTrace asyncStackTrace # Async stack trace, if any. - experimental optional Runtime.StackTraceId asyncStackTraceId - # Exception details if any. + deprecated optional Runtime.StackTraceId asyncStackTraceId + # Whether the operation was successful or not. Only `Ok` denotes a + # successful live edit while the other enum variants denote why + # the live edit failed. + experimental enum status + Ok + CompileError + BlockedByActiveGenerator + BlockedByActiveFunction + # Exception details if any. Only present when `status` is `CompileError`. optional Runtime.ExceptionDetails exceptionDetails # Makes page not interrupt on any pauses (breakpoint, exception, dom exception etc). @@ -552,7 +628,7 @@ domain Debugger integer endColumn # Specifies script creation context. Runtime.ExecutionContextId executionContextId - # Content hash of the script. + # Content hash of the script, SHA-256. string hash # Embedder-specific auxiliary data. optional object executionContextAuxData @@ -591,7 +667,7 @@ domain Debugger integer endColumn # Specifies script creation context. Runtime.ExecutionContextId executionContextId - # Content hash of the script. + # Content hash of the script, SHA-256. string hash # Embedder-specific auxiliary data. optional object executionContextAuxData @@ -691,6 +767,22 @@ experimental domain HeapProfiler # Average sample interval in bytes. Poisson distribution is used for the intervals. The # default value is 32768 bytes. optional number samplingInterval + # By default, the sampling heap profiler reports only objects which are + # still alive when the profile is returned via getSamplingProfile or + # stopSampling, which is useful for determining what functions contribute + # the most to steady-state memory usage. This flag instructs the sampling + # heap profiler to also include information about objects discarded by + # major GC, which will show which functions cause large temporary memory + # usage or long GC pauses. + optional boolean includeObjectsCollectedByMajorGC + # By default, the sampling heap profiler reports only objects which are + # still alive when the profile is returned via getSamplingProfile or + # stopSampling, which is useful for determining what functions contribute + # the most to steady-state memory usage. This flag instructs the sampling + # heap profiler to also include information about objects discarded by + # minor GC, which is useful when tuning a latency-sensitive application + # for minimal GC activity. + optional boolean includeObjectsCollectedByMinorGC command startTrackingHeapObjects parameters @@ -706,18 +798,24 @@ experimental domain HeapProfiler # If true 'reportHeapSnapshotProgress' events will be generated while snapshot is being taken # when the tracking is stopped. optional boolean reportProgress - optional boolean treatGlobalObjectsAsRoots + # Deprecated in favor of `exposeInternals`. + deprecated optional boolean treatGlobalObjectsAsRoots # If true, numerical values are included in the snapshot optional boolean captureNumericValue + # If true, exposes internals of the snapshot. + experimental optional boolean exposeInternals command takeHeapSnapshot parameters # If true 'reportHeapSnapshotProgress' events will be generated while snapshot is being taken. optional boolean reportProgress - # If true, a raw snapshot without artificial roots will be generated - optional boolean treatGlobalObjectsAsRoots + # If true, a raw snapshot without artificial roots will be generated. + # Deprecated in favor of `exposeInternals`. + deprecated optional boolean treatGlobalObjectsAsRoots # If true, numerical values are included in the snapshot optional boolean captureNumericValue + # If true, exposes internals of the snapshot. + experimental optional boolean exposeInternals event addHeapSnapshotChunk parameters @@ -821,30 +919,6 @@ domain Profiler # Functions contained in the script that has coverage data. array of FunctionCoverage functions - # Describes a type collected during runtime. - experimental type TypeObject extends object - properties - # Name of a type collected with type profiling. - string name - - # Source offset and types for a parameter or return value. - experimental type TypeProfileEntry extends object - properties - # Source offset of the parameter or end of function for return values. - integer offset - # The types for this parameter or return value. - array of TypeObject types - - # Type profile data collected during runtime for a JavaScript script. - experimental type ScriptTypeProfile extends object - properties - # JavaScript script id. - Runtime.ScriptId scriptId - # JavaScript script name or url. - string url - # Type profile entries for parameters and return values of the functions in the script. - array of TypeProfileEntry entries - command disable command enable @@ -879,9 +953,6 @@ domain Profiler # Monotonically increasing time (in seconds) when the coverage update was taken in the backend. number timestamp - # Enable type profile. - experimental command startTypeProfile - command stop returns # Recorded profile. @@ -891,9 +962,6 @@ domain Profiler # executing optimized code. command stopPreciseCoverage - # Disable type profile. Disabling releases type profile data collected so far. - experimental command stopTypeProfile - # Collect coverage data for the current isolate, and resets execution counters. Precise code # coverage needs to have started. command takePreciseCoverage @@ -903,12 +971,6 @@ domain Profiler # Monotonically increasing time (in seconds) when the coverage update was taken in the backend. number timestamp - # Collect type profile. - experimental command takeTypeProfile - returns - # Type profile for all scripts since startTypeProfile() was turned on. - array of ScriptTypeProfile result - event consoleProfileFinished parameters string id @@ -950,6 +1012,37 @@ domain Runtime # Unique script identifier. type ScriptId extends string + # Represents the value serialiazed by the WebDriver BiDi specification + # https://w3c.github.io/webdriver-bidi. + type WebDriverValue extends object + properties + enum type + undefined + null + string + number + boolean + bigint + regexp + date + symbol + array + object + function + map + set + weakmap + weakset + error + proxy + promise + typedarray + arraybuffer + node + window + optional any value + optional string objectId + # Unique object identifier. type RemoteObjectId extends string @@ -1002,6 +1095,8 @@ domain Runtime optional UnserializableValue unserializableValue # String representation of the object. optional string description + # WebDriver BiDi representation of the value. + experimental optional WebDriverValue webDriverValue # Unique object identifier (for non-primitive values). optional RemoteObjectId objectId # Preview containing abbreviated property values. Specified for `object` type values only. @@ -1307,6 +1402,10 @@ domain Runtime optional string objectGroup # Whether to throw an exception if side effect cannot be ruled out during evaluation. experimental optional boolean throwOnSideEffect + # Whether the result should contain `webDriverValue`, serialized according to + # https://w3c.github.io/webdriver-bidi. This is mutually exclusive with `returnByValue`, but + # resulting `objectId` is still provided. + experimental optional boolean generateWebDriverValue returns # Call result. RemoteObject result @@ -1392,6 +1491,8 @@ domain Runtime # boundaries). # This is mutually exclusive with `contextId`. experimental optional string uniqueContextId + # Whether the result should be serialized according to https://w3c.github.io/webdriver-bidi. + experimental optional boolean generateWebDriverValue returns # Evaluation result. RemoteObject result @@ -1550,6 +1651,18 @@ domain Runtime parameters string name + # This method tries to lookup and populate exception details for a + # JavaScript Error object. + # Note that the stackTrace portion of the resulting exceptionDetails will + # only be populated if the Runtime domain was enabled at the time when the + # Error was thrown. + experimental command getExceptionDetails + parameters + # The error object for which to resolve the exception details. + RemoteObjectId errorObjectId + returns + optional ExceptionDetails exceptionDetails + # Notification is issued every time when binding is called. experimental event bindingCalled parameters diff --git a/deps/include/libplatform/libplatform.h b/deps/include/libplatform/libplatform.h index fb79bcfe..9ec60c04 100644 --- a/deps/include/libplatform/libplatform.h +++ b/deps/include/libplatform/libplatform.h @@ -89,17 +89,6 @@ V8_PLATFORM_EXPORT void RunIdleTasks(v8::Platform* platform, v8::Isolate* isolate, double idle_time_in_seconds); -/** - * Attempts to set the tracing controller for the given platform. - * - * The |platform| has to be created using |NewDefaultPlatform|. - * - */ -V8_DEPRECATED("Access the DefaultPlatform directly") -V8_PLATFORM_EXPORT void SetTracingController( - v8::Platform* platform, - v8::platform::tracing::TracingController* tracing_controller); - /** * Notifies the given platform about the Isolate getting deleted soon. Has to be * called for all Isolates which are deleted - unless we're shutting down the diff --git a/deps/include/libplatform/v8-tracing.h b/deps/include/libplatform/v8-tracing.h index c7a5c4f9..12489327 100644 --- a/deps/include/libplatform/v8-tracing.h +++ b/deps/include/libplatform/v8-tracing.h @@ -37,7 +37,6 @@ const int kTraceMaxNumArgs = 2; class V8_PLATFORM_EXPORT TraceObject { public: union ArgValue { - V8_DEPRECATED("use as_uint ? true : false") bool as_bool; uint64_t as_uint; int64_t as_int; double as_double; diff --git a/deps/include/v8-array-buffer.h b/deps/include/v8-array-buffer.h index 0ce2b653..841bd02a 100644 --- a/deps/include/v8-array-buffer.h +++ b/deps/include/v8-array-buffer.h @@ -175,8 +175,8 @@ class V8_EXPORT ArrayBuffer : public Object { /** * Convenience allocator. * - * When the virtual memory cage is enabled, this allocator will allocate its - * backing memory inside the cage. Otherwise, it will rely on malloc/free. + * When the sandbox is enabled, this allocator will allocate its backing + * memory inside the sandbox. Otherwise, it will rely on malloc/free. * * Caller takes ownership, i.e. the returned object needs to be freed using * |delete allocator| once it is no longer in use. @@ -240,22 +240,54 @@ class V8_EXPORT ArrayBuffer : public Object { */ bool IsDetachable() const; + /** + * Returns true if this ArrayBuffer has been detached. + */ + bool WasDetached() const; + /** * Detaches this ArrayBuffer and all its views (typed arrays). * Detaching sets the byte length of the buffer and all typed arrays to zero, * preventing JavaScript from ever accessing underlying backing store. * ArrayBuffer should have been externalized and must be detachable. */ + V8_DEPRECATE_SOON( + "Use the version which takes a key parameter (passing a null handle is " + "ok).") void Detach(); + /** + * Detaches this ArrayBuffer and all its views (typed arrays). + * Detaching sets the byte length of the buffer and all typed arrays to zero, + * preventing JavaScript from ever accessing underlying backing store. + * ArrayBuffer should have been externalized and must be detachable. Returns + * Nothing if the key didn't pass the [[ArrayBufferDetachKey]] check, + * Just(true) otherwise. + */ + V8_WARN_UNUSED_RESULT Maybe Detach(v8::Local key); + + /** + * Sets the ArrayBufferDetachKey. + */ + void SetDetachKey(v8::Local key); + /** * Get a shared pointer to the backing store of this array buffer. This * pointer coordinates the lifetime management of the internal storage * with any live ArrayBuffers on the heap, even across isolates. The embedder * should not attempt to manage lifetime of the storage through other means. + * + * The returned shared pointer will not be empty, even if the ArrayBuffer has + * been detached. Use |WasDetached| to tell if it has been detached instead. */ std::shared_ptr GetBackingStore(); + /** + * More efficient shortcut for GetBackingStore()->Data(). The returned pointer + * is valid as long as the ArrayBuffer is alive. + */ + void* Data() const; + V8_INLINE static ArrayBuffer* Cast(Value* value) { #ifdef V8_ENABLE_CHECKS CheckCast(value); @@ -414,6 +446,12 @@ class V8_EXPORT SharedArrayBuffer : public Object { */ std::shared_ptr GetBackingStore(); + /** + * More efficient shortcut for GetBackingStore()->Data(). The returned pointer + * is valid as long as the ArrayBuffer is alive. + */ + void* Data() const; + V8_INLINE static SharedArrayBuffer* Cast(Value* value) { #ifdef V8_ENABLE_CHECKS CheckCast(value); diff --git a/deps/include/v8-callbacks.h b/deps/include/v8-callbacks.h index 870df6a8..0ffdfb66 100644 --- a/deps/include/v8-callbacks.h +++ b/deps/include/v8-callbacks.h @@ -12,6 +12,7 @@ #include "cppgc/common.h" #include "v8-data.h" // NOLINT(build/include_directory) #include "v8-local-handle.h" // NOLINT(build/include_directory) +#include "v8-promise.h" // NOLINT(build/include_directory) #include "v8config.h" // NOLINT(build/include_directory) #if defined(V8_OS_WIN) @@ -105,7 +106,7 @@ struct JitCodeEvent { size_t line_number_table_size; }; - wasm_source_info_t* wasm_source_info; + wasm_source_info_t* wasm_source_info = nullptr; union { // Only valid for CODE_ADDED. @@ -148,11 +149,13 @@ using JitCodeEventHandler = void (*)(const JitCodeEvent* event); */ enum GCType { kGCTypeScavenge = 1 << 0, - kGCTypeMarkSweepCompact = 1 << 1, - kGCTypeIncrementalMarking = 1 << 2, - kGCTypeProcessWeakCallbacks = 1 << 3, - kGCTypeAll = kGCTypeScavenge | kGCTypeMarkSweepCompact | - kGCTypeIncrementalMarking | kGCTypeProcessWeakCallbacks + kGCTypeMinorMarkCompact = 1 << 1, + kGCTypeMarkSweepCompact = 1 << 2, + kGCTypeIncrementalMarking = 1 << 3, + kGCTypeProcessWeakCallbacks = 1 << 4, + kGCTypeAll = kGCTypeScavenge | kGCTypeMinorMarkCompact | + kGCTypeMarkSweepCompact | kGCTypeIncrementalMarking | + kGCTypeProcessWeakCallbacks }; /** @@ -214,7 +217,13 @@ using AddHistogramSampleCallback = void (*)(void* histogram, int sample); using FatalErrorCallback = void (*)(const char* location, const char* message); -using OOMErrorCallback = void (*)(const char* location, bool is_heap_oom); +struct OOMDetails { + bool is_heap_oom = false; + const char* detail = nullptr; +}; + +using OOMErrorCallback = void (*)(const char* location, + const OOMDetails& details); using MessageCallback = void (*)(Local message, Local data); @@ -228,9 +237,13 @@ using LogEventCallback = void (*)(const char* name, enum class CrashKeyId { kIsolateAddress, kReadonlySpaceFirstPageAddress, - kMapSpaceFirstPageAddress, + kMapSpaceFirstPageAddress V8_ENUM_DEPRECATE_SOON("Map space got removed"), + kOldSpaceFirstPageAddress, + kCodeRangeBaseAddress, kCodeSpaceFirstPageAddress, kDumpType, + kSnapshotChecksumCalculated, + kSnapshotChecksumExpected, }; using AddCrashKeyCallback = void (*)(CrashKeyId id, const std::string& value); @@ -298,6 +311,13 @@ using ApiImplementationCallback = void (*)(const FunctionCallbackInfo&); // --- Callback for WebAssembly.compileStreaming --- using WasmStreamingCallback = void (*)(const FunctionCallbackInfo&); +enum class WasmAsyncSuccess { kSuccess, kFail }; + +// --- Callback called when async WebAssembly operations finish --- +using WasmAsyncResolvePromiseCallback = void (*)( + Isolate* isolate, Local context, Local resolver, + Local result, WasmAsyncSuccess success); + // --- Callback for loading source map file for Wasm profiling support using WasmLoadSourceMapCallback = Local (*)(Isolate* isolate, const char* name); @@ -308,15 +328,12 @@ using WasmSimdEnabledCallback = bool (*)(Local context); // --- Callback for checking if WebAssembly exceptions are enabled --- using WasmExceptionsEnabledCallback = bool (*)(Local context); -// --- Callback for checking if WebAssembly dynamic tiering is enabled --- -using WasmDynamicTieringEnabledCallback = bool (*)(Local context); - // --- Callback for checking if the SharedArrayBuffer constructor is enabled --- using SharedArrayBufferConstructorEnabledCallback = bool (*)(Local context); /** - * HostImportModuleDynamicallyWithImportAssertionsCallback is called when we + * HostImportModuleDynamicallyCallback is called when we * require the embedder to load a module. This is used as part of the dynamic * import syntax. * @@ -346,6 +363,10 @@ using HostImportModuleDynamicallyWithImportAssertionsCallback = Local referrer, Local specifier, Local import_assertions); +using HostImportModuleDynamicallyCallback = MaybeLocal (*)( + Local context, Local host_defined_options, + Local resource_name, Local specifier, + Local import_assertions); /** * HostInitializeImportMetaObjectCallback is called the first time import.meta @@ -361,6 +382,20 @@ using HostInitializeImportMetaObjectCallback = void (*)(Local context, Local module, Local meta); +/** + * HostCreateShadowRealmContextCallback is called each time a ShadowRealm is + * being constructed in the initiator_context. + * + * The method combines Context creation and implementation defined abstract + * operation HostInitializeShadowRealm into one. + * + * The embedder should use v8::Context::New or v8::Context:NewFromSnapshot to + * create a new context. If the creation fails, the embedder must propagate + * that exception by returning an empty MaybeLocal. + */ +using HostCreateShadowRealmContextCallback = + MaybeLocal (*)(Local initiator_context); + /** * PrepareStackTraceCallback is called when the stack property of an error is * first accessed. The return value will be used as the stack value. If this diff --git a/deps/include/v8-context.h b/deps/include/v8-context.h index d398ac4b..3ce0eb0a 100644 --- a/deps/include/v8-context.h +++ b/deps/include/v8-context.h @@ -169,6 +169,9 @@ class V8_EXPORT Context : public Data { /** Returns the microtask queue associated with a current context. */ MicrotaskQueue* GetMicrotaskQueue(); + /** Sets the microtask queue associated with the current context. */ + void SetMicrotaskQueue(MicrotaskQueue* queue); + /** * The field at kDebugIdIndex used to be reserved for the inspector. * It now serves no purpose. @@ -244,6 +247,12 @@ class V8_EXPORT Context : public Data { */ void SetErrorMessageForCodeGenerationFromStrings(Local message); + /** + * Sets the error description for the exception that is thrown when + * wasm code generation is not allowed. + */ + void SetErrorMessageForWasmCodeGeneration(Local message); + /** * Return data that was previously attached to the context snapshot via * SnapshotCreator, and removes the reference to it. @@ -284,6 +293,7 @@ class V8_EXPORT Context : public Data { Local after_hook, Local resolve_hook); + bool HasTemplateLiteralObject(Local object); /** * Stack-allocated class which sets the execution context for all * operations executed within a local scope. @@ -313,17 +323,6 @@ class V8_EXPORT Context : public Data { explicit BackupIncumbentScope(Local backup_incumbent_context); ~BackupIncumbentScope(); - /** - * Returns address that is comparable with JS stack address. Note that JS - * stack may be allocated separately from the native stack. See also - * |TryCatch::JSStackComparableAddressPrivate| for details. - */ - V8_DEPRECATED( - "This is private V8 information that should not be exposed in the API.") - uintptr_t JSStackComparableAddress() const { - return JSStackComparableAddressPrivate(); - } - private: friend class internal::Isolate; @@ -379,21 +378,19 @@ Local Context::GetEmbedderData(int index) { } void* Context::GetAlignedPointerFromEmbedderData(int index) { -#ifndef V8_ENABLE_CHECKS +#if !defined(V8_ENABLE_CHECKS) using A = internal::Address; using I = internal::Internals; A ctx = *reinterpret_cast(this); A embedder_data = I::ReadTaggedPointerField(ctx, I::kNativeContextEmbedderDataOffset); - int value_offset = - I::kEmbedderDataArrayHeaderSize + (I::kEmbedderDataSlotSize * index); -#ifdef V8_HEAP_SANDBOX - value_offset += I::kEmbedderDataSlotRawPayloadOffset; -#endif - internal::Isolate* isolate = I::GetIsolateForHeapSandbox(ctx); + int value_offset = I::kEmbedderDataArrayHeaderSize + + (I::kEmbedderDataSlotSize * index) + + I::kEmbedderDataSlotExternalPointerOffset; + Isolate* isolate = I::GetIsolateForSandbox(ctx); return reinterpret_cast( - I::ReadExternalPointerField(isolate, embedder_data, value_offset, - internal::kEmbedderDataSlotPayloadTag)); + I::ReadExternalPointerField( + isolate, embedder_data, value_offset)); #else return SlowGetAlignedPointerFromEmbedderData(index); #endif diff --git a/deps/include/v8-cppgc.h b/deps/include/v8-cppgc.h index 64b42c2b..139af8fd 100644 --- a/deps/include/v8-cppgc.h +++ b/deps/include/v8-cppgc.h @@ -12,7 +12,6 @@ #include "cppgc/common.h" #include "cppgc/custom-space.h" #include "cppgc/heap-statistics.h" -#include "cppgc/internal/write-barrier.h" #include "cppgc/visitor.h" #include "v8-internal.h" // NOLINT(build/include_directory) #include "v8-platform.h" // NOLINT(build/include_directory) @@ -78,15 +77,28 @@ struct WrapperDescriptor final { }; struct V8_EXPORT CppHeapCreateParams { - CppHeapCreateParams(const CppHeapCreateParams&) = delete; - CppHeapCreateParams& operator=(const CppHeapCreateParams&) = delete; - std::vector> custom_spaces; WrapperDescriptor wrapper_descriptor; + /** + * Specifies which kind of marking are supported by the heap. The type may be + * further reduced via runtime flags when attaching the heap to an Isolate. + */ + cppgc::Heap::MarkingType marking_support = + cppgc::Heap::MarkingType::kIncrementalAndConcurrent; + /** + * Specifies which kind of sweeping is supported by the heap. The type may be + * further reduced via runtime flags when attaching the heap to an Isolate. + */ + cppgc::Heap::SweepingType sweeping_support = + cppgc::Heap::SweepingType::kIncrementalAndConcurrent; }; /** * A heap for allocating managed C++ objects. + * + * Similar to v8::Isolate, the heap may only be accessed from one thread at a + * time. The heap may be used from different threads using the + * v8::Locker/v8::Unlocker APIs which is different from generic Oilpan. */ class V8_EXPORT CppHeap { public: @@ -148,6 +160,14 @@ class V8_EXPORT CppHeap { */ void CollectGarbageForTesting(cppgc::EmbedderStackState stack_state); + /** + * Performs a stop-the-world minor garbage collection for testing purposes. + * + * \param stack_state The stack state to assume for the garbage collection. + */ + void CollectGarbageInYoungGenerationForTesting( + cppgc::EmbedderStackState stack_state); + private: CppHeap() = default; @@ -157,6 +177,7 @@ class V8_EXPORT CppHeap { class JSVisitor : public cppgc::Visitor { public: explicit JSVisitor(cppgc::Visitor::Key key) : cppgc::Visitor(key) {} + ~JSVisitor() override = default; void Trace(const TracedReferenceBase& ref) { if (ref.IsEmptyThreadSafe()) return; @@ -169,133 +190,6 @@ class JSVisitor : public cppgc::Visitor { virtual void Visit(const TracedReferenceBase& ref) {} }; -/** - * **DO NOT USE: Use the appropriate managed types.** - * - * Consistency helpers that aid in maintaining a consistent internal state of - * the garbage collector. - */ -class V8_EXPORT JSHeapConsistency final { - public: - using WriteBarrierParams = cppgc::internal::WriteBarrier::Params; - using WriteBarrierType = cppgc::internal::WriteBarrier::Type; - - /** - * Gets the required write barrier type for a specific write. - * - * Note: Handling for C++ to JS references. - * - * \param ref The reference being written to. - * \param params Parameters that may be used for actual write barrier calls. - * Only filled if return value indicates that a write barrier is needed. The - * contents of the `params` are an implementation detail. - * \param callback Callback returning the corresponding heap handle. The - * callback is only invoked if the heap cannot otherwise be figured out. The - * callback must not allocate. - * \returns whether a write barrier is needed and which barrier to invoke. - */ - template - V8_DEPRECATE_SOON("Write barriers automatically emitted by TracedReference.") - static V8_INLINE WriteBarrierType - GetWriteBarrierType(const TracedReferenceBase& ref, - WriteBarrierParams& params, - HeapHandleCallback callback) { - if (ref.IsEmpty()) return WriteBarrierType::kNone; - - if (V8_LIKELY(!cppgc::internal::WriteBarrier:: - IsAnyIncrementalOrConcurrentMarking())) { - return cppgc::internal::WriteBarrier::Type::kNone; - } - cppgc::HeapHandle& handle = callback(); - if (!cppgc::subtle::HeapState::IsMarking(handle)) { - return cppgc::internal::WriteBarrier::Type::kNone; - } - params.heap = &handle; -#if V8_ENABLE_CHECKS - params.type = cppgc::internal::WriteBarrier::Type::kMarking; -#endif // !V8_ENABLE_CHECKS - return cppgc::internal::WriteBarrier::Type::kMarking; - } - - /** - * Gets the required write barrier type for a specific write. - * - * Note: Handling for JS to C++ references. - * - * \param wrapper The wrapper that has been written into. - * \param wrapper_index The wrapper index in `wrapper` that has been written - * into. - * \param wrappable The value that was written. - * \param params Parameters that may be used for actual write barrier calls. - * Only filled if return value indicates that a write barrier is needed. The - * contents of the `params` are an implementation detail. - * \param callback Callback returning the corresponding heap handle. The - * callback is only invoked if the heap cannot otherwise be figured out. The - * callback must not allocate. - * \returns whether a write barrier is needed and which barrier to invoke. - */ - template - static V8_INLINE WriteBarrierType GetWriteBarrierType( - v8::Local& wrapper, int wrapper_index, const void* wrappable, - WriteBarrierParams& params, HeapHandleCallback callback) { -#if V8_ENABLE_CHECKS - CheckWrapper(wrapper, wrapper_index, wrappable); -#endif // V8_ENABLE_CHECKS - return cppgc::internal::WriteBarrier:: - GetWriteBarrierTypeForExternallyReferencedObject(wrappable, params, - callback); - } - - /** - * Conservative Dijkstra-style write barrier that processes an object if it - * has not yet been processed. - * - * \param params The parameters retrieved from `GetWriteBarrierType()`. - * \param ref The reference being written to. - */ - V8_DEPRECATE_SOON("Write barriers automatically emitted by TracedReference.") - static V8_INLINE void DijkstraMarkingBarrier(const WriteBarrierParams& params, - cppgc::HeapHandle& heap_handle, - const TracedReferenceBase& ref) { - cppgc::internal::WriteBarrier::CheckParams(WriteBarrierType::kMarking, - params); - DijkstraMarkingBarrierSlow(heap_handle, ref); - } - - /** - * Conservative Dijkstra-style write barrier that processes an object if it - * has not yet been processed. - * - * \param params The parameters retrieved from `GetWriteBarrierType()`. - * \param object The pointer to the object. May be an interior pointer to a - * an interface of the actual object. - */ - static V8_INLINE void DijkstraMarkingBarrier(const WriteBarrierParams& params, - cppgc::HeapHandle& heap_handle, - const void* object) { - cppgc::internal::WriteBarrier::DijkstraMarkingBarrier(params, object); - } - - /** - * Generational barrier for maintaining consistency when running with multiple - * generations. - * - * \param params The parameters retrieved from `GetWriteBarrierType()`. - * \param ref The reference being written to. - */ - V8_DEPRECATE_SOON("Write barriers automatically emitted by TracedReference.") - static V8_INLINE void GenerationalBarrier(const WriteBarrierParams& params, - const TracedReferenceBase& ref) {} - - private: - JSHeapConsistency() = delete; - - static void CheckWrapper(v8::Local&, int, const void*); - - static void DijkstraMarkingBarrierSlow(cppgc::HeapHandle&, - const TracedReferenceBase& ref); -}; - /** * Provided as input to `CppHeap::CollectCustomSpaceStatisticsAtLastGC()`. * diff --git a/deps/include/v8-data.h b/deps/include/v8-data.h index dbd36c9a..fc4dea92 100644 --- a/deps/include/v8-data.h +++ b/deps/include/v8-data.h @@ -27,6 +27,11 @@ class V8_EXPORT Data { */ bool IsModule() const; + /** + * Returns tru if this data is a |v8::FixedArray| + */ + bool IsFixedArray() const; + /** * Returns true if this data is a |v8::Private|. */ @@ -48,7 +53,7 @@ class V8_EXPORT Data { bool IsContext() const; private: - Data(); + Data() = delete; }; /** @@ -58,6 +63,16 @@ class V8_EXPORT FixedArray : public Data { public: int Length() const; Local Get(Local context, int i) const; + + V8_INLINE static FixedArray* Cast(Data* data) { +#ifdef V8_ENABLE_CHECKS + CheckCast(data); +#endif + return reinterpret_cast(data); + } + + private: + static void CheckCast(Data* obj); }; } // namespace v8 diff --git a/deps/include/v8-date.h b/deps/include/v8-date.h index e7a01f29..8d82ccc9 100644 --- a/deps/include/v8-date.h +++ b/deps/include/v8-date.h @@ -27,6 +27,11 @@ class V8_EXPORT Date : public Object { */ double ValueOf() const; + /** + * Generates ISO string representation. + */ + v8::Local ToISOString() const; + V8_INLINE static Date* Cast(Value* value) { #ifdef V8_ENABLE_CHECKS CheckCast(value); diff --git a/deps/include/v8-debug.h b/deps/include/v8-debug.h index a13ae3f6..52255f37 100644 --- a/deps/include/v8-debug.h +++ b/deps/include/v8-debug.h @@ -7,8 +7,8 @@ #include -#include "v8-local-handle.h" // NOLINT(build/include_directory) -#include "v8config.h" // NOLINT(build/include_directory) +#include "v8-script.h" // NOLINT(build/include_directory) +#include "v8config.h" // NOLINT(build/include_directory) namespace v8 { @@ -20,13 +20,18 @@ class String; */ class V8_EXPORT StackFrame { public: + /** + * Returns the source location, 0-based, for the associated function call. + */ + Location GetLocation() const; + /** * Returns the number, 1-based, of the line for the associate function call. * This method will return Message::kNoLineNumberInfo if it is unable to * retrieve the line number, or if kLineNumber was not passed as an option * when capturing the StackTrace. */ - int GetLineNumber() const; + int GetLineNumber() const { return GetLocation().GetLineNumber() + 1; } /** * Returns the 1-based column offset on the line for the associated function @@ -35,7 +40,7 @@ class V8_EXPORT StackFrame { * the column number, or if kColumnOffset was not passed as an option when * capturing the StackTrace. */ - int GetColumn() const; + int GetColumn() const { return GetLocation().GetColumnNumber() + 1; } /** * Returns the id of the script for the function for this StackFrame. @@ -144,6 +149,18 @@ class V8_EXPORT StackTrace { */ static Local CurrentStackTrace( Isolate* isolate, int frame_limit, StackTraceOptions options = kDetailed); + + /** + * Returns the first valid script name or source URL starting at the top of + * the JS stack. The returned string is either an empty handle if no script + * name/url was found or a non-zero-length string. + * + * This method is equivalent to calling StackTrace::CurrentStackTrace and + * walking the resulting frames from the beginning until a non-empty script + * name/url is found. The difference is that this method won't allocate + * a stack trace. + */ + static Local CurrentScriptNameOrSourceURL(Isolate* isolate); }; } // namespace v8 diff --git a/deps/include/v8-embedder-heap.h b/deps/include/v8-embedder-heap.h index c3e5ddc1..f994cdfd 100644 --- a/deps/include/v8-embedder-heap.h +++ b/deps/include/v8-embedder-heap.h @@ -34,24 +34,21 @@ class V8_EXPORT EmbedderRootsHandler { virtual ~EmbedderRootsHandler() = default; /** - * Returns true if the TracedGlobal handle should be considered as root for - * the currently running non-tracing garbage collection and false otherwise. - * The default implementation will keep all TracedGlobal references as roots. + * Returns true if the |TracedReference| handle should be considered as root + * for the currently running non-tracing garbage collection and false + * otherwise. The default implementation will keep all |TracedReference| + * references as roots. * * If this returns false, then V8 may decide that the object referred to by - * such a handle is reclaimed. In that case: - * - No action is required if handles are used with destructors, i.e., by just - * using |TracedGlobal|. - * - When run without destructors, i.e., by using |TracedReference|, V8 calls - * |ResetRoot|. + * such a handle is reclaimed. In that case, V8 calls |ResetRoot()| for the + * |TracedReference|. * - * Note that the |handle| is different from the handle that the embedder holds + * Note that the `handle` is different from the handle that the embedder holds * for retaining the object. The embedder may use |WrapperClassId()| to * distinguish cases where it wants handles to be treated as roots from not * being treated as roots. */ virtual bool IsRoot(const v8::TracedReference& handle) = 0; - virtual bool IsRoot(const v8::TracedGlobal& handle) = 0; /** * Used in combination with |IsRoot|. Called by V8 when an @@ -72,7 +69,12 @@ class V8_EXPORT EmbedderRootsHandler { * trace through its heap and use reporter to report each JavaScript object * reachable from any of the given wrappers. */ -class V8_EXPORT EmbedderHeapTracer { +class V8_EXPORT +// GCC doesn't like combining __attribute__(()) with [[deprecated]]. +#ifdef __clang__ +V8_DEPRECATED("Use CppHeap when working with v8::TracedReference.") +#endif // __clang__ + EmbedderHeapTracer { public: using EmbedderStackState = cppgc::EmbedderStackState; @@ -83,12 +85,11 @@ class V8_EXPORT EmbedderHeapTracer { }; /** - * Interface for iterating through TracedGlobal handles. + * Interface for iterating through |TracedReference| handles. */ class V8_EXPORT TracedGlobalHandleVisitor { public: virtual ~TracedGlobalHandleVisitor() = default; - virtual void VisitTracedGlobalHandle(const TracedGlobal& handle) {} virtual void VisitTracedReference(const TracedReference& handle) {} }; @@ -113,8 +114,8 @@ class V8_EXPORT EmbedderHeapTracer { virtual ~EmbedderHeapTracer() = default; /** - * Iterates all TracedGlobal handles created for the v8::Isolate the tracer is - * attached to. + * Iterates all |TracedReference| handles created for the |v8::Isolate| the + * tracer is attached to. */ void IterateTracedGlobalHandles(TracedGlobalHandleVisitor* visitor); @@ -124,14 +125,6 @@ class V8_EXPORT EmbedderHeapTracer { */ void SetStackStart(void* stack_start); - /** - * Called by the embedder to notify V8 of an empty execution stack. - */ - V8_DEPRECATED( - "This call only optimized internal caches which V8 is able to figure out " - "on its own now.") - void NotifyEmptyEmbedderStack(); - /** * Called by v8 to register internal fields of found wrappers. * @@ -197,7 +190,6 @@ class V8_EXPORT EmbedderHeapTracer { */ virtual bool IsRootForNonTracingGC( const v8::TracedReference& handle); - virtual bool IsRootForNonTracingGC(const v8::TracedGlobal& handle); /** * See documentation on EmbedderRootsHandler. @@ -205,13 +197,6 @@ class V8_EXPORT EmbedderHeapTracer { virtual void ResetHandleInNonTracingGC( const v8::TracedReference& handle); - /* - * Called by the embedder to immediately perform a full garbage collection. - * - * Should only be used in testing code. - */ - void GarbageCollectionForTesting(EmbedderStackState stack_state); - /* * Called by the embedder to signal newly allocated or freed memory. Not bound * to tracing phases. Embedders should trade off when increments are reported @@ -225,10 +210,10 @@ class V8_EXPORT EmbedderHeapTracer { * Returns the v8::Isolate this tracer is attached too and |nullptr| if it * is not attached to any v8::Isolate. */ - v8::Isolate* isolate() const { return isolate_; } + v8::Isolate* isolate() const { return v8_isolate_; } protected: - v8::Isolate* isolate_ = nullptr; + v8::Isolate* v8_isolate_ = nullptr; friend class internal::LocalEmbedderHeapTracer; }; diff --git a/deps/include/v8-embedder-state-scope.h b/deps/include/v8-embedder-state-scope.h new file mode 100644 index 00000000..d8a3b08d --- /dev/null +++ b/deps/include/v8-embedder-state-scope.h @@ -0,0 +1,51 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef INCLUDE_V8_EMBEDDER_STATE_SCOPE_H_ +#define INCLUDE_V8_EMBEDDER_STATE_SCOPE_H_ + +#include + +#include "v8-context.h" // NOLINT(build/include_directory) +#include "v8-internal.h" // NOLINT(build/include_directory) +#include "v8-local-handle.h" // NOLINT(build/include_directory) + +namespace v8 { + +namespace internal { +class EmbedderState; +} // namespace internal + +// A StateTag represents a possible state of the embedder. +enum class EmbedderStateTag : uint8_t { + // reserved + EMPTY = 0, + OTHER = 1, + // embedder can define any state after +}; + +// A stack-allocated class that manages an embedder state on the isolate. +// After an EmbedderState scope has been created, a new embedder state will be +// pushed on the isolate stack. +class V8_EXPORT EmbedderStateScope { + public: + EmbedderStateScope(Isolate* isolate, Local context, + EmbedderStateTag tag); + + ~EmbedderStateScope(); + + private: + // Declaring operator new and delete as deleted is not spec compliant. + // Therefore declare them private instead to disable dynamic alloc + void* operator new(size_t size); + void* operator new[](size_t size); + void operator delete(void*, size_t); + void operator delete[](void*, size_t); + + std::unique_ptr embedder_state_; +}; + +} // namespace v8 + +#endif // INCLUDE_V8_EMBEDDER_STATE_SCOPE_H_ diff --git a/deps/include/v8-exception.h b/deps/include/v8-exception.h index faa46487..bc058e3f 100644 --- a/deps/include/v8-exception.h +++ b/deps/include/v8-exception.h @@ -169,13 +169,6 @@ class V8_EXPORT TryCatch { */ void SetCaptureMessage(bool value); - V8_DEPRECATED( - "This is private information that should not be exposed by the API") - static void* JSStackComparableAddress(TryCatch* handler) { - if (handler == nullptr) return nullptr; - return reinterpret_cast(handler->JSStackComparableAddressPrivate()); - } - TryCatch(const TryCatch&) = delete; void operator=(const TryCatch&) = delete; @@ -204,7 +197,7 @@ class V8_EXPORT TryCatch { void ResetInternal(); - internal::Isolate* isolate_; + internal::Isolate* i_isolate_; TryCatch* next_; void* exception_; void* message_obj_; diff --git a/deps/include/v8-fast-api-calls.h b/deps/include/v8-fast-api-calls.h index 141fddd2..1826f133 100644 --- a/deps/include/v8-fast-api-calls.h +++ b/deps/include/v8-fast-api-calls.h @@ -240,6 +240,7 @@ class CTypeInfo { enum class Type : uint8_t { kVoid, kBool, + kUint8, kInt32, kUint32, kInt64, @@ -249,6 +250,15 @@ class CTypeInfo { kV8Value, kApiObject, // This will be deprecated once all users have // migrated from v8::ApiObject to v8::Local. + kAny, // This is added to enable untyped representation of fast + // call arguments for test purposes. It can represent any of + // the other types stored in the same memory as a union (see + // the AnyCType struct declared below). This allows for + // uniform passing of arguments w.r.t. their location + // (in a register or on the stack), independent of their + // actual type. It's currently used by the arm64 simulator + // and can be added to the other simulators as well when fast + // calls having both GP and FP params need to be supported. }; // kCallbackOptionsType is not part of the Type enum @@ -293,8 +303,9 @@ class CTypeInfo { constexpr Flags GetFlags() const { return flags_; } static constexpr bool IsIntegralType(Type type) { - return type == Type::kInt32 || type == Type::kUint32 || - type == Type::kInt64 || type == Type::kUint64; + return type == Type::kUint8 || type == Type::kInt32 || + type == Type::kUint32 || type == Type::kInt64 || + type == Type::kUint64; } static constexpr bool IsFloatingPointType(Type type) { @@ -404,6 +415,38 @@ class V8_EXPORT CFunctionInfo { const CTypeInfo* arg_info_; }; +struct FastApiCallbackOptions; + +// Provided for testing. +struct AnyCType { + AnyCType() : int64_value(0) {} + + union { + bool bool_value; + int32_t int32_value; + uint32_t uint32_value; + int64_t int64_value; + uint64_t uint64_value; + float float_value; + double double_value; + Local object_value; + Local sequence_value; + const FastApiTypedArray* uint8_ta_value; + const FastApiTypedArray* int32_ta_value; + const FastApiTypedArray* uint32_ta_value; + const FastApiTypedArray* int64_ta_value; + const FastApiTypedArray* uint64_ta_value; + const FastApiTypedArray* float_ta_value; + const FastApiTypedArray* double_ta_value; + FastApiCallbackOptions* options_value; + }; +}; + +static_assert( + sizeof(AnyCType) == 8, + "The AnyCType struct should have size == 64 bits, as this is assumed " + "by EffectControlLinearizer."); + class V8_EXPORT CFunction { public: constexpr CFunction() : address_(nullptr), type_info_(nullptr) {} @@ -460,6 +503,19 @@ class V8_EXPORT CFunction { return ArgUnwrap::Make(func); } + // Provided for testing purposes. + template + static CFunction Make(R (*func)(Args...), + R_Patch (*patching_func)(Args_Patch...)) { + CFunction c_func = ArgUnwrap::Make(func); + static_assert( + sizeof...(Args_Patch) == sizeof...(Args), + "The patching function must have the same number of arguments."); + c_func.address_ = reinterpret_cast(patching_func); + return c_func; + } + CFunction(const void* address, const CFunctionInfo* type_info); private: @@ -479,10 +535,6 @@ class V8_EXPORT CFunction { }; }; -struct V8_DEPRECATED("Use v8::Local instead.") ApiObject { - uintptr_t address; -}; - /** * A struct which may be passed to a fast call callback, like so: * \code @@ -495,7 +547,7 @@ struct FastApiCallbackOptions { * returned instance may be filled with mock data. */ static FastApiCallbackOptions CreateForTesting(Isolate* isolate) { - return {false, {0}}; + return {false, {0}, nullptr}; } /** @@ -517,8 +569,13 @@ struct FastApiCallbackOptions { */ union { uintptr_t data_ptr; - v8::Value data; + v8::Local data; }; + + /** + * When called from WebAssembly, a view of the calling module's memory. + */ + FastApiTypedArray* const wasm_memory; }; namespace internal { @@ -555,7 +612,8 @@ class CFunctionInfoImpl : public CFunctionInfo { kReturnType == CTypeInfo::Type::kInt32 || kReturnType == CTypeInfo::Type::kUint32 || kReturnType == CTypeInfo::Type::kFloat32 || - kReturnType == CTypeInfo::Type::kFloat64, + kReturnType == CTypeInfo::Type::kFloat64 || + kReturnType == CTypeInfo::Type::kAny, "64-bit int and api object values are not currently " "supported return types."); } @@ -598,7 +656,8 @@ struct CTypeInfoTraits {}; V(int64_t, kInt64) \ V(uint64_t, kUint64) \ V(float, kFloat32) \ - V(double, kFloat64) + V(double, kFloat64) \ + V(uint8_t, kUint8) // Same as above, but includes deprecated types for compatibility. #define ALL_C_TYPES(V) \ @@ -606,7 +665,7 @@ struct CTypeInfoTraits {}; V(void, kVoid) \ V(v8::Local, kV8Value) \ V(v8::Local, kV8Value) \ - V(ApiObject, kApiObject) + V(AnyCType, kAny) // ApiObject was a temporary solution to wrap the pointer to the v8::Value. // Please use v8::Local in new code for the arguments and @@ -637,7 +696,8 @@ PRIMITIVE_C_TYPES(DEFINE_TYPE_INFO_TRAITS) V(int64_t, kInt64) \ V(uint64_t, kUint64) \ V(float, kFloat32) \ - V(double, kFloat64) + V(double, kFloat64) \ + V(uint8_t, kUint8) TYPED_ARRAY_C_TYPES(SPECIALIZE_GET_TYPE_INFO_HELPER_FOR_TA) @@ -752,6 +812,16 @@ class CFunctionBuilderWithFunction { std::make_index_sequence()); } + // Provided for testing purposes. + template + auto Patch(Ret (*patching_func)(Args...)) { + static_assert( + sizeof...(Args) == sizeof...(ArgBuilders), + "The patching function must have the same number of arguments."); + fn_ = reinterpret_cast(patching_func); + return *this; + } + auto Build() { static CFunctionInfoImpl instance; return CFunction(fn_, &instance); @@ -831,31 +901,6 @@ static constexpr CTypeInfo kTypeInfoFloat64 = * to the requested destination type, is considered unsupported. The operation * returns true on success. `type_info` will be used for conversions. */ -template -V8_DEPRECATE_SOON( - "Use TryToCopyAndConvertArrayToCppBuffer()") -bool V8_EXPORT V8_WARN_UNUSED_RESULT - TryCopyAndConvertArrayToCppBuffer(Local src, T* dst, - uint32_t max_length); - -template <> -V8_DEPRECATE_SOON( - "Use TryToCopyAndConvertArrayToCppBuffer()") -inline bool V8_WARN_UNUSED_RESULT - TryCopyAndConvertArrayToCppBuffer<&kTypeInfoInt32, int32_t>( - Local src, int32_t* dst, uint32_t max_length) { - return false; -} - -template <> -V8_DEPRECATE_SOON( - "Use TryToCopyAndConvertArrayToCppBuffer()") -inline bool V8_WARN_UNUSED_RESULT - TryCopyAndConvertArrayToCppBuffer<&kTypeInfoFloat64, double>( - Local src, double* dst, uint32_t max_length) { - return false; -} - template bool V8_EXPORT V8_WARN_UNUSED_RESULT TryToCopyAndConvertArrayToCppBuffer( Local src, T* dst, uint32_t max_length); diff --git a/deps/include/v8-function.h b/deps/include/v8-function.h index 897e6ed6..2dc7e722 100644 --- a/deps/include/v8-function.h +++ b/deps/include/v8-function.h @@ -106,6 +106,14 @@ class V8_EXPORT Function : public Object { V8_WARN_UNUSED_RESULT MaybeLocal FunctionProtoToString( Local context); + /** + * Returns true if the function does nothing. + * The function returns false on error. + * Note that this function is experimental. Embedders should not rely on + * this existing. We may remove this function in the future. + */ + V8_WARN_UNUSED_RESULT bool Experimental_IsNopFunction() const; + ScriptOrigin GetScriptOrigin() const; V8_INLINE static Function* Cast(Value* value) { #ifdef V8_ENABLE_CHECKS diff --git a/deps/include/v8-initialization.h b/deps/include/v8-initialization.h index 822d1503..d3e35d6e 100644 --- a/deps/include/v8-initialization.h +++ b/deps/include/v8-initialization.h @@ -8,10 +8,11 @@ #include #include -#include "v8-internal.h" // NOLINT(build/include_directory) -#include "v8-isolate.h" // NOLINT(build/include_directory) -#include "v8-platform.h" // NOLINT(build/include_directory) -#include "v8config.h" // NOLINT(build/include_directory) +#include "v8-callbacks.h" // NOLINT(build/include_directory) +#include "v8-internal.h" // NOLINT(build/include_directory) +#include "v8-isolate.h" // NOLINT(build/include_directory) +#include "v8-platform.h" // NOLINT(build/include_directory) +#include "v8config.h" // NOLINT(build/include_directory) // We reserve the V8_* prefix for macros defined in V8 public API and // assume there are no name conflicts with the embedder's code. @@ -99,8 +100,7 @@ class V8_EXPORT V8 { const int kBuildConfiguration = (internal::PointerCompressionIsEnabled() ? kPointerCompression : 0) | (internal::SmiValuesAre31Bits() ? k31BitSmis : 0) | - (internal::HeapSandboxIsEnabled() ? kHeapSandbox : 0) | - (internal::VirtualMemoryCageIsEnabled() ? kVirtualMemoryCage : 0); + (internal::SandboxIsEnabled() ? kSandbox : 0); return Initialize(kBuildConfiguration); } @@ -180,64 +180,55 @@ class V8_EXPORT V8 { * Clears all references to the v8::Platform. This should be invoked after * V8 was disposed. */ - static void ShutdownPlatform(); - -#ifdef V8_VIRTUAL_MEMORY_CAGE - // - // Virtual Memory Cage related API. - // - // This API is not yet stable and subject to changes in the future. - // + static void DisposePlatform(); +#if defined(V8_ENABLE_SANDBOX) /** - * Initializes the virtual memory cage for V8. - * - * This must be invoked after the platform was initialized but before V8 is - * initialized. The virtual memory cage is torn down during platform shutdown. - * Returns true on success, false otherwise. + * Returns true if the sandbox is configured securely. * - * TODO(saelo) Once it is no longer optional to create the virtual memory - * cage when compiling with V8_VIRTUAL_MEMORY_CAGE, the cage initialization - * will likely happen as part of V8::Initialize, at which point this function - * should be removed. + * If V8 cannot create a regular sandbox during initialization, for example + * because not enough virtual address space can be reserved, it will instead + * create a fallback sandbox that still allows it to function normally but + * does not have the same security properties as a regular sandbox. This API + * can be used to determine if such a fallback sandbox is being used, in + * which case it will return false. */ - static bool InitializeVirtualMemoryCage(); + static bool IsSandboxConfiguredSecurely(); /** - * Provides access to the virtual memory cage page allocator. + * Provides access to the virtual address subspace backing the sandbox. * - * This allocator allocates pages inside the virtual memory cage. It can for - * example be used to obtain virtual memory for ArrayBuffer backing stores, - * which must be located inside the cage. + * This can be used to allocate pages inside the sandbox, for example to + * obtain virtual memory for ArrayBuffer backing stores, which must be + * located inside the sandbox. * - * It should be assumed that an attacker can corrupt data inside the cage, - * and so in particular the contents of pages returned by this allocator, - * arbitrarily and concurrently. Due to this, it is recommended to to only - * place pure data buffers in pages obtained through this allocator. - * - * This function must only be called after initializing the virtual memory - * cage and V8. + * It should be assumed that an attacker can corrupt data inside the sandbox, + * and so in particular the contents of pages allocagted in this virtual + * address space, arbitrarily and concurrently. Due to this, it is + * recommended to to only place pure data buffers in them. */ - static PageAllocator* GetVirtualMemoryCagePageAllocator(); + static VirtualAddressSpace* GetSandboxAddressSpace(); /** - * Returns the size of the virtual memory cage in bytes. + * Returns the size of the sandbox in bytes. * - * If the cage has not been initialized, or if the initialization failed, - * this returns zero. + * This represents the size of the address space that V8 can directly address + * and in which it allocates its objects. */ - static size_t GetVirtualMemoryCageSizeInBytes(); + static size_t GetSandboxSizeInBytes(); /** - * Returns whether the virtual memory cage is configured securely. + * Returns the size of the address space reservation backing the sandbox. * - * If V8 cannot create a proper virtual memory cage, it will fall back to - * creating a cage that doesn't have the desired security properties but at - * least still allows V8 to function. This API can be used to determine if - * such an insecure cage is being used, in which case it will return false. + * This may be larger than the sandbox (i.e. |GetSandboxSizeInBytes()|) due + * to surrounding guard regions, or may be smaller than the sandbox in case a + * fallback sandbox is being used, which will use a smaller virtual address + * space reservation. In the latter case this will also be different from + * |GetSandboxAddressSpace()->size()| as that will cover a larger part of the + * address space than what has actually been reserved. */ - static bool IsUsingSecureVirtualMemoryCage(); -#endif + static size_t GetSandboxReservationSizeInBytes(); +#endif // V8_ENABLE_SANDBOX /** * Activate trap-based bounds checking for WebAssembly. @@ -258,9 +249,16 @@ class V8_EXPORT V8 { * exceptions in V8-generated code. */ static void SetUnhandledExceptionCallback( - UnhandledExceptionCallback unhandled_exception_callback); + UnhandledExceptionCallback callback); #endif + /** + * Allows the host application to provide a callback that will be called when + * v8 has encountered a fatal failure to allocate memory and is about to + * terminate. + */ + static void SetFatalMemoryErrorCallback(OOMErrorCallback callback); + /** * Get statistics about the shared memory usage. */ @@ -272,8 +270,7 @@ class V8_EXPORT V8 { enum BuildConfigurationFeatures { kPointerCompression = 1 << 0, k31BitSmis = 1 << 1, - kHeapSandbox = 1 << 2, - kVirtualMemoryCage = 1 << 3, + kSandbox = 1 << 2, }; /** diff --git a/deps/include/v8-inspector.h b/deps/include/v8-inspector.h index 8ba21ffd..aa5a044a 100644 --- a/deps/include/v8-inspector.h +++ b/deps/include/v8-inspector.h @@ -23,6 +23,10 @@ class Value; namespace v8_inspector { +namespace internal { +class V8DebuggerId; +} // namespace internal + namespace protocol { namespace Debugger { namespace API { @@ -106,6 +110,30 @@ class V8_EXPORT V8ContextInfo { V8ContextInfo& operator=(const V8ContextInfo&) = delete; }; +// This debugger id tries to be unique by generating two random +// numbers, which should most likely avoid collisions. +// Debugger id has a 1:1 mapping to context group. It is used to +// attribute stack traces to a particular debugging, when doing any +// cross-debugger operations (e.g. async step in). +// See also Runtime.UniqueDebuggerId in the protocol. +class V8_EXPORT V8DebuggerId { + public: + V8DebuggerId() = default; + V8DebuggerId(const V8DebuggerId&) = default; + V8DebuggerId& operator=(const V8DebuggerId&) = default; + + std::unique_ptr toString() const; + bool isValid() const; + std::pair pair() const; + + private: + friend class internal::V8DebuggerId; + explicit V8DebuggerId(std::pair); + + int64_t m_first = 0; + int64_t m_second = 0; +}; + class V8_EXPORT V8StackTrace { public: virtual StringView firstNonEmptySourceURL() const = 0; @@ -114,14 +142,10 @@ class V8_EXPORT V8StackTrace { virtual int topLineNumber() const = 0; virtual int topColumnNumber() const = 0; virtual int topScriptId() const = 0; - V8_DEPRECATED("Use V8::StackTrace::topScriptId() instead.") - int topScriptIdAsInteger() const { return topScriptId(); } virtual StringView topFunctionName() const = 0; virtual ~V8StackTrace() = default; virtual std::unique_ptr - buildInspectorObject() const = 0; - virtual std::unique_ptr buildInspectorObject(int maxAsyncDepth) const = 0; virtual std::unique_ptr toString() const = 0; @@ -181,11 +205,23 @@ class V8_EXPORT V8InspectorSession { virtual void triggerPreciseCoverageDeltaUpdate(StringView occasion) = 0; }; +class V8_EXPORT WebDriverValue { + public: + explicit WebDriverValue(std::unique_ptr type, + v8::MaybeLocal value = {}) + : type(std::move(type)), value(value) {} + std::unique_ptr type; + v8::MaybeLocal value; +}; + class V8_EXPORT V8InspectorClient { public: virtual ~V8InspectorClient() = default; virtual void runMessageLoopOnPause(int contextGroupId) {} + virtual void runMessageLoopOnInstrumentationPause(int contextGroupId) { + runMessageLoopOnPause(contextGroupId); + } virtual void quitMessageLoopOnPause() {} virtual void runIfWaitingForDebugger(int contextGroupId) {} @@ -195,6 +231,10 @@ class V8_EXPORT V8InspectorClient { virtual void beginUserGesture() {} virtual void endUserGesture() {} + virtual std::unique_ptr serializeToWebDriverValue( + v8::Local v8_value, int max_depth) { + return nullptr; + } virtual std::unique_ptr valueSubtype(v8::Local) { return nullptr; } @@ -246,6 +286,9 @@ class V8_EXPORT V8InspectorClient { // The caller would defer to generating a random 64 bit integer if // this method returns 0. virtual int64_t generateUniqueId() { return 0; } + + virtual void dispatchError(v8::Local, v8::Local, + v8::Local) {} }; // These stack trace ids are intended to be passed between debuggers and be @@ -280,6 +323,7 @@ class V8_EXPORT V8Inspector { virtual void contextDestroyed(v8::Local) = 0; virtual void resetContextGroup(int contextGroupId) = 0; virtual v8::MaybeLocal contextById(int contextId) = 0; + virtual V8DebuggerId uniqueDebuggerId(int contextId) = 0; // Various instrumentation. virtual void idleStarted() = 0; @@ -320,9 +364,12 @@ class V8_EXPORT V8Inspector { virtual void sendNotification(std::unique_ptr message) = 0; virtual void flushProtocolNotifications() = 0; }; - virtual std::unique_ptr connect(int contextGroupId, - Channel*, - StringView state) = 0; + enum ClientTrustLevel { kUntrusted, kFullyTrusted }; + virtual std::unique_ptr connect( + int contextGroupId, Channel*, StringView state, + ClientTrustLevel client_trust_level) { + return nullptr; + } // API methods. virtual std::unique_ptr createStackTrace( diff --git a/deps/include/v8-internal.h b/deps/include/v8-internal.h index f0531bcf..704e89e2 100644 --- a/deps/include/v8-internal.h +++ b/deps/include/v8-internal.h @@ -8,6 +8,8 @@ #include #include #include + +#include #include #include "v8-version.h" // NOLINT(build/include_directory) @@ -29,6 +31,13 @@ class Isolate; typedef uintptr_t Address; static const Address kNullAddress = 0; +constexpr int KB = 1024; +constexpr int MB = KB * 1024; +constexpr int GB = MB * 1024; +#ifdef V8_TARGET_ARCH_X64 +constexpr size_t TB = size_t{GB} * 1024; +#endif + /** * Configuration of tagging scheme. */ @@ -43,6 +52,7 @@ const int kHeapObjectTag = 1; const int kWeakHeapObjectTag = 3; const int kHeapObjectTagSize = 2; const intptr_t kHeapObjectTagMask = (1 << kHeapObjectTagSize) - 1; +const intptr_t kHeapObjectReferenceTagMask = 1 << (kHeapObjectTagSize - 1); // Tag information for fowarding pointers stored in object headers. // 0b00 at the lowest 2 bits in the header indicates that the map word is a @@ -109,6 +119,11 @@ struct SmiTagging<8> { }; #ifdef V8_COMPRESS_POINTERS +// See v8:7703 or src/common/ptr-compr-inl.h for details about pointer +// compression. +constexpr size_t kPtrComprCageReservationSize = size_t{1} << 32; +constexpr size_t kPtrComprCageBaseAlignment = size_t{1} << 32; + static_assert( kApiSystemPointerSize == kApiInt64Size, "Pointer compression can be enabled only for 64-bit architectures"); @@ -121,36 +136,6 @@ constexpr bool PointerCompressionIsEnabled() { return kApiTaggedSize != kApiSystemPointerSize; } -constexpr bool HeapSandboxIsEnabled() { -#ifdef V8_HEAP_SANDBOX - return true; -#else - return false; -#endif -} - -using ExternalPointer_t = Address; - -// If the heap sandbox is enabled, these tag values will be ORed with the -// external pointers in the external pointer table to prevent use of pointers of -// the wrong type. When a pointer is loaded, it is ANDed with the inverse of the -// expected type's tag. The tags are constructed in a way that guarantees that a -// failed type check will result in one or more of the top bits of the pointer -// to be set, rendering the pointer inacessible. This construction allows -// performing the type check and removing GC marking bits from the pointer at -// the same time. -enum ExternalPointerTag : uint64_t { - kExternalPointerNullTag = 0x0000000000000000, - kExternalStringResourceTag = 0x00ff000000000000, // 0b000000011111111 - kExternalStringResourceDataTag = 0x017f000000000000, // 0b000000101111111 - kForeignForeignAddressTag = 0x01bf000000000000, // 0b000000110111111 - kNativeContextMicrotaskQueueTag = 0x01df000000000000, // 0b000000111011111 - kEmbedderDataSlotPayloadTag = 0x01ef000000000000, // 0b000000111101111 - kCodeEntryPointTag = 0x01f7000000000000, // 0b000000111110111 -}; - -constexpr uint64_t kExternalPointerTagMask = 0xffff000000000000; - #ifdef V8_31BIT_SMIS_ON_64BIT_ARCH using PlatformSmiTagging = SmiTagging; #else @@ -171,10 +156,331 @@ V8_INLINE static constexpr internal::Address IntToSmi(int value) { kSmiTag; } -// Converts encoded external pointer to address. -V8_EXPORT Address DecodeExternalPointerImpl(const Isolate* isolate, - ExternalPointer_t pointer, - ExternalPointerTag tag); +/* + * Sandbox related types, constants, and functions. + */ +constexpr bool SandboxIsEnabled() { +#ifdef V8_ENABLE_SANDBOX + return true; +#else + return false; +#endif +} + +// SandboxedPointers are guaranteed to point into the sandbox. This is achieved +// for example by storing them as offset rather than as raw pointers. +using SandboxedPointer_t = Address; + +#ifdef V8_ENABLE_SANDBOX + +// Size of the sandbox, excluding the guard regions surrounding it. +#ifdef V8_TARGET_OS_ANDROID +// On Android, most 64-bit devices seem to be configured with only 39 bits of +// virtual address space for userspace. As such, limit the sandbox to 128GB (a +// quarter of the total available address space). +constexpr size_t kSandboxSizeLog2 = 37; // 128 GB +#else +// Everywhere else use a 1TB sandbox. +constexpr size_t kSandboxSizeLog2 = 40; // 1 TB +#endif // V8_TARGET_OS_ANDROID +constexpr size_t kSandboxSize = 1ULL << kSandboxSizeLog2; + +// Required alignment of the sandbox. For simplicity, we require the +// size of the guard regions to be a multiple of this, so that this specifies +// the alignment of the sandbox including and excluding surrounding guard +// regions. The alignment requirement is due to the pointer compression cage +// being located at the start of the sandbox. +constexpr size_t kSandboxAlignment = kPtrComprCageBaseAlignment; + +// Sandboxed pointers are stored inside the heap as offset from the sandbox +// base shifted to the left. This way, it is guaranteed that the offset is +// smaller than the sandbox size after shifting it to the right again. This +// constant specifies the shift amount. +constexpr uint64_t kSandboxedPointerShift = 64 - kSandboxSizeLog2; + +// Size of the guard regions surrounding the sandbox. This assumes a worst-case +// scenario of a 32-bit unsigned index used to access an array of 64-bit +// values. +constexpr size_t kSandboxGuardRegionSize = 32ULL * GB; + +static_assert((kSandboxGuardRegionSize % kSandboxAlignment) == 0, + "The size of the guard regions around the sandbox must be a " + "multiple of its required alignment."); + +// On OSes where reserving virtual memory is too expensive to reserve the +// entire address space backing the sandbox, notably Windows pre 8.1, we create +// a partially reserved sandbox that doesn't actually reserve most of the +// memory, and so doesn't have the desired security properties as unrelated +// memory allocations could end up inside of it, but which still ensures that +// objects that should be located inside the sandbox are allocated within +// kSandboxSize bytes from the start of the sandbox. The minimum size of the +// region that is actually reserved for such a sandbox is specified by this +// constant and should be big enough to contain the pointer compression cage as +// well as the ArrayBuffer partition. +constexpr size_t kSandboxMinimumReservationSize = 8ULL * GB; + +static_assert(kSandboxMinimumReservationSize > kPtrComprCageReservationSize, + "The minimum reservation size for a sandbox must be larger than " + "the pointer compression cage contained within it."); + +// The maximum buffer size allowed inside the sandbox. This is mostly dependent +// on the size of the guard regions around the sandbox: an attacker must not be +// able to construct a buffer that appears larger than the guard regions and +// thereby "reach out of" the sandbox. +constexpr size_t kMaxSafeBufferSizeForSandbox = 32ULL * GB - 1; +static_assert(kMaxSafeBufferSizeForSandbox <= kSandboxGuardRegionSize, + "The maximum allowed buffer size must not be larger than the " + "sandbox's guard regions"); + +constexpr size_t kBoundedSizeShift = 29; +static_assert(1ULL << (64 - kBoundedSizeShift) == + kMaxSafeBufferSizeForSandbox + 1, + "The maximum size of a BoundedSize must be synchronized with the " + "kMaxSafeBufferSizeForSandbox"); + +#endif // V8_ENABLE_SANDBOX + +#ifdef V8_COMPRESS_POINTERS + +// The size of the virtual memory reservation for an external pointer table. +// This determines the maximum number of entries in a table. Using a maximum +// size allows omitting bounds checks on table accesses if the indices are +// guaranteed (e.g. through shifting) to be below the maximum index. This +// value must be a power of two. +static const size_t kExternalPointerTableReservationSize = 512 * MB; + +// The maximum number of entries in an external pointer table. +static const size_t kMaxExternalPointers = + kExternalPointerTableReservationSize / kApiSystemPointerSize; + +// The external pointer table indices stored in HeapObjects as external +// pointers are shifted to the left by this amount to guarantee that they are +// smaller than the maximum table size. +static const uint32_t kExternalPointerIndexShift = 6; +static_assert((1 << (32 - kExternalPointerIndexShift)) == kMaxExternalPointers, + "kExternalPointerTableReservationSize and " + "kExternalPointerIndexShift don't match"); + +#else // !V8_COMPRESS_POINTERS + +// Needed for the V8.SandboxedExternalPointersCount histogram. +static const size_t kMaxExternalPointers = 0; + +#endif // V8_COMPRESS_POINTERS + +// A ExternalPointerHandle represents a (opaque) reference to an external +// pointer that can be stored inside the sandbox. A ExternalPointerHandle has +// meaning only in combination with an (active) Isolate as it references an +// external pointer stored in the currently active Isolate's +// ExternalPointerTable. Internally, an ExternalPointerHandles is simply an +// index into an ExternalPointerTable that is shifted to the left to guarantee +// that it is smaller than the size of the table. +using ExternalPointerHandle = uint32_t; + +// ExternalPointers point to objects located outside the sandbox. When +// sandboxed external pointers are enabled, these are stored on heap as +// ExternalPointerHandles, otherwise they are simply raw pointers. +#ifdef V8_ENABLE_SANDBOX +using ExternalPointer_t = ExternalPointerHandle; +#else +using ExternalPointer_t = Address; +#endif + +// When the sandbox is enabled, external pointers are stored in an external +// pointer table and are referenced from HeapObjects through an index (a +// "handle"). When stored in the table, the pointers are tagged with per-type +// tags to prevent type confusion attacks between different external objects. +// Besides type information bits, these tags also contain the GC marking bit +// which indicates whether the pointer table entry is currently alive. When a +// pointer is written into the table, the tag is ORed into the top bits. When +// that pointer is later loaded from the table, it is ANDed with the inverse of +// the expected tag. If the expected and actual type differ, this will leave +// some of the top bits of the pointer set, rendering the pointer inaccessible. +// The AND operation also removes the GC marking bit from the pointer. +// +// The tags are constructed such that UNTAG(TAG(0, T1), T2) != 0 for any two +// (distinct) tags T1 and T2. In practice, this is achieved by generating tags +// that all have the same number of zeroes and ones but different bit patterns. +// With N type tag bits, this allows for (N choose N/2) possible type tags. +// Besides the type tag bits, the tags also have the GC marking bit set so that +// the marking bit is automatically set when a pointer is written into the +// external pointer table (in which case it is clearly alive) and is cleared +// when the pointer is loaded. The exception to this is the free entry tag, +// which doesn't have the mark bit set, as the entry is not alive. This +// construction allows performing the type check and removing GC marking bits +// from the pointer in one efficient operation (bitwise AND). The number of +// available bits is limited in the following way: on x64, bits [47, 64) are +// generally available for tagging (userspace has 47 address bits available). +// On Arm64, userspace typically has a 40 or 48 bit address space. However, due +// to top-byte ignore (TBI) and memory tagging (MTE), the top byte is unusable +// for type checks as type-check failures would go unnoticed or collide with +// MTE bits. Some bits of the top byte can, however, still be used for the GC +// marking bit. The bits available for the type tags are therefore limited to +// [48, 56), i.e. (8 choose 4) = 70 different types. +// The following options exist to increase the number of possible types: +// - Using multiple ExternalPointerTables since tags can safely be reused +// across different tables +// - Using "extended" type checks, where additional type information is stored +// either in an adjacent pointer table entry or at the pointed-to location +// - Using a different tagging scheme, for example based on XOR which would +// allow for 2**8 different tags but require a separate operation to remove +// the marking bit +// +// The external pointer sandboxing mechanism ensures that every access to an +// external pointer field will result in a valid pointer of the expected type +// even in the presence of an attacker able to corrupt memory inside the +// sandbox. However, if any data related to the external object is stored +// inside the sandbox it may still be corrupted and so must be validated before +// use or moved into the external object. Further, an attacker will always be +// able to substitute different external pointers of the same type for each +// other. Therefore, code using external pointers must be written in a +// "substitution-safe" way, i.e. it must always be possible to substitute +// external pointers of the same type without causing memory corruption outside +// of the sandbox. Generally this is achieved by referencing any group of +// related external objects through a single external pointer. +// +// Currently we use bit 62 for the marking bit which should always be unused as +// it's part of the non-canonical address range. When Arm's top-byte ignore +// (TBI) is enabled, this bit will be part of the ignored byte, and we assume +// that the Embedder is not using this byte (really only this one bit) for any +// other purpose. This bit also does not collide with the memory tagging +// extension (MTE) which would use bits [56, 60). +constexpr uint64_t kExternalPointerMarkBit = 1ULL << 62; +constexpr uint64_t kExternalPointerTagMask = 0x40ff000000000000; +constexpr uint64_t kExternalPointerTagShift = 48; + +// All possible 8-bit type tags. +// These are sorted so that tags can be grouped together and it can efficiently +// be checked if a tag belongs to a given group. See for example the +// IsSharedExternalPointerType routine. +constexpr uint64_t kAllExternalPointerTypeTags[] = { + 0b00001111, 0b00010111, 0b00011011, 0b00011101, 0b00011110, 0b00100111, + 0b00101011, 0b00101101, 0b00101110, 0b00110011, 0b00110101, 0b00110110, + 0b00111001, 0b00111010, 0b00111100, 0b01000111, 0b01001011, 0b01001101, + 0b01001110, 0b01010011, 0b01010101, 0b01010110, 0b01011001, 0b01011010, + 0b01011100, 0b01100011, 0b01100101, 0b01100110, 0b01101001, 0b01101010, + 0b01101100, 0b01110001, 0b01110010, 0b01110100, 0b01111000, 0b10000111, + 0b10001011, 0b10001101, 0b10001110, 0b10010011, 0b10010101, 0b10010110, + 0b10011001, 0b10011010, 0b10011100, 0b10100011, 0b10100101, 0b10100110, + 0b10101001, 0b10101010, 0b10101100, 0b10110001, 0b10110010, 0b10110100, + 0b10111000, 0b11000011, 0b11000101, 0b11000110, 0b11001001, 0b11001010, + 0b11001100, 0b11010001, 0b11010010, 0b11010100, 0b11011000, 0b11100001, + 0b11100010, 0b11100100, 0b11101000, 0b11110000}; + +// clang-format off +// New entries should be added with state "sandboxed". +// When adding new tags, please ensure that the code using these tags is +// "substitution-safe", i.e. still operate safely if external pointers of the +// same type are swapped by an attacker. See comment above for more details. +#define TAG(i) (kAllExternalPointerTypeTags[i]) + +// Shared external pointers are owned by the shared Isolate and stored in the +// shared external pointer table associated with that Isolate, where they can +// be accessed from multiple threads at the same time. The objects referenced +// in this way must therefore always be thread-safe. +#define SHARED_EXTERNAL_POINTER_TAGS(V) \ + V(kFirstSharedTag, sandboxed, TAG(0)) \ + V(kWaiterQueueNodeTag, sandboxed, TAG(0)) \ + V(kExternalStringResourceTag, sandboxed, TAG(1)) \ + V(kExternalStringResourceDataTag, sandboxed, TAG(2)) \ + V(kLastSharedTag, sandboxed, TAG(2)) + +// External pointers using these tags are kept in a per-Isolate external +// pointer table and can only be accessed when this Isolate is active. +#define PER_ISOLATE_EXTERNAL_POINTER_TAGS(V) \ + V(kForeignForeignAddressTag, sandboxed, TAG(10)) \ + V(kNativeContextMicrotaskQueueTag, sandboxed, TAG(11)) \ + V(kEmbedderDataSlotPayloadTag, sandboxed, TAG(12)) \ + V(kExternalObjectValueTag, sandboxed, TAG(13)) \ + V(kCallHandlerInfoCallbackTag, sandboxed, TAG(14)) \ + V(kAccessorInfoGetterTag, sandboxed, TAG(15)) \ + V(kAccessorInfoSetterTag, sandboxed, TAG(16)) \ + V(kWasmInternalFunctionCallTargetTag, sandboxed, TAG(17)) \ + V(kWasmTypeInfoNativeTypeTag, sandboxed, TAG(18)) \ + V(kWasmExportedFunctionDataSignatureTag, sandboxed, TAG(19)) \ + V(kWasmContinuationJmpbufTag, sandboxed, TAG(20)) + +// All external pointer tags. +#define ALL_EXTERNAL_POINTER_TAGS(V) \ + SHARED_EXTERNAL_POINTER_TAGS(V) \ + PER_ISOLATE_EXTERNAL_POINTER_TAGS(V) + +// When the sandbox is enabled, external pointers marked as "sandboxed" above +// use the external pointer table (i.e. are sandboxed). This allows a gradual +// rollout of external pointer sandboxing. If the sandbox is off, no external +// pointers are sandboxed. +// +// Sandboxed external pointer tags are available when compressing pointers even +// when the sandbox is off. Some tags (e.g. kWaiterQueueNodeTag) are used +// manually with the external pointer table even when the sandbox is off to ease +// alignment requirements. +#define sandboxed(X) (X << kExternalPointerTagShift) | kExternalPointerMarkBit +#define unsandboxed(X) kUnsandboxedExternalPointerTag +#if defined(V8_COMPRESS_POINTERS) +#define EXTERNAL_POINTER_TAG_ENUM(Name, State, Bits) Name = State(Bits), +#else +#define EXTERNAL_POINTER_TAG_ENUM(Name, State, Bits) Name = unsandboxed(Bits), +#endif + +#define MAKE_TAG(HasMarkBit, TypeTag) \ + ((static_cast(TypeTag) << kExternalPointerTagShift) | \ + (HasMarkBit ? kExternalPointerMarkBit : 0)) +enum ExternalPointerTag : uint64_t { + // Empty tag value. Mostly used as placeholder. + kExternalPointerNullTag = MAKE_TAG(0, 0b00000000), + // Tag to use for unsandboxed external pointers, which are still stored as + // raw pointers on the heap. + kUnsandboxedExternalPointerTag = MAKE_TAG(0, 0b00000000), + // External pointer tag that will match any external pointer. Use with care! + kAnyExternalPointerTag = MAKE_TAG(1, 0b11111111), + // The free entry tag has all type bits set so every type check with a + // different type fails. It also doesn't have the mark bit set as free + // entries are (by definition) not alive. + kExternalPointerFreeEntryTag = MAKE_TAG(0, 0b11111111), + // Evacuation entries are used during external pointer table compaction. + kExternalPointerEvacuationEntryTag = MAKE_TAG(1, 0b11100111), + + ALL_EXTERNAL_POINTER_TAGS(EXTERNAL_POINTER_TAG_ENUM) +}; + +#undef MAKE_TAG +#undef unsandboxed +#undef sandboxed +#undef TAG +#undef EXTERNAL_POINTER_TAG_ENUM + +// clang-format on + +// True if the external pointer is sandboxed and so must be referenced through +// an external pointer table. +V8_INLINE static constexpr bool IsSandboxedExternalPointerType( + ExternalPointerTag tag) { + return tag != kUnsandboxedExternalPointerTag; +} + +// True if the external pointer must be accessed from the shared isolate's +// external pointer table. +V8_INLINE static constexpr bool IsSharedExternalPointerType( + ExternalPointerTag tag) { + return tag >= kFirstSharedTag && tag <= kLastSharedTag; +} + +// Sanity checks. +#define CHECK_SHARED_EXTERNAL_POINTER_TAGS(Tag, ...) \ + static_assert(!IsSandboxedExternalPointerType(Tag) || \ + IsSharedExternalPointerType(Tag)); +#define CHECK_NON_SHARED_EXTERNAL_POINTER_TAGS(Tag, ...) \ + static_assert(!IsSandboxedExternalPointerType(Tag) || \ + !IsSharedExternalPointerType(Tag)); + +SHARED_EXTERNAL_POINTER_TAGS(CHECK_SHARED_EXTERNAL_POINTER_TAGS) +PER_ISOLATE_EXTERNAL_POINTER_TAGS(CHECK_NON_SHARED_EXTERNAL_POINTER_TAGS) + +#undef CHECK_NON_SHARED_EXTERNAL_POINTER_TAGS +#undef CHECK_SHARED_EXTERNAL_POINTER_TAGS + +#undef SHARED_EXTERNAL_POINTER_TAGS +#undef EXTERNAL_POINTER_TAGS // {obj} must be the raw tagged pointer representation of a HeapObject // that's guaranteed to never be in ReadOnlySpace. @@ -184,9 +490,6 @@ V8_EXPORT internal::Isolate* IsolateFromNeverReadOnlySpaceObject(Address obj); // mode based on the current context and the closure. This returns true if the // language mode is strict. V8_EXPORT bool ShouldThrowOnError(v8::internal::Isolate* isolate); - -V8_EXPORT bool CanHaveInternalField(int instance_type); - /** * This class exports constants and functionality from within v8 that * is necessary to implement inline functions in the v8 api. Don't @@ -214,26 +517,34 @@ class Internals { static const int kFixedArrayHeaderSize = 2 * kApiTaggedSize; static const int kEmbedderDataArrayHeaderSize = 2 * kApiTaggedSize; static const int kEmbedderDataSlotSize = kApiSystemPointerSize; -#ifdef V8_HEAP_SANDBOX - static const int kEmbedderDataSlotRawPayloadOffset = kApiTaggedSize; +#ifdef V8_ENABLE_SANDBOX + static const int kEmbedderDataSlotExternalPointerOffset = kApiTaggedSize; +#else + static const int kEmbedderDataSlotExternalPointerOffset = 0; #endif static const int kNativeContextEmbedderDataOffset = 6 * kApiTaggedSize; - static const int kFullStringRepresentationMask = 0x0f; + static const int kStringRepresentationAndEncodingMask = 0x0f; static const int kStringEncodingMask = 0x8; static const int kExternalTwoByteRepresentationTag = 0x02; static const int kExternalOneByteRepresentationTag = 0x0a; static const uint32_t kNumIsolateDataSlots = 4; static const int kStackGuardSize = 7 * kApiSystemPointerSize; - static const int kBuiltinTier0EntryTableSize = 13 * kApiSystemPointerSize; - static const int kBuiltinTier0TableSize = 13 * kApiSystemPointerSize; + static const int kBuiltinTier0EntryTableSize = 7 * kApiSystemPointerSize; + static const int kBuiltinTier0TableSize = 7 * kApiSystemPointerSize; + + // ExternalPointerTable layout guarantees. + static const int kExternalPointerTableBufferOffset = 0; + static const int kExternalPointerTableSize = 4 * kApiSystemPointerSize; // IsolateData layout guarantees. static const int kIsolateCageBaseOffset = 0; static const int kIsolateStackGuardOffset = kIsolateCageBaseOffset + kApiSystemPointerSize; - static const int kBuiltinTier0EntryTableOffset = + static const int kVariousBooleanFlagsOffset = kIsolateStackGuardOffset + kStackGuardSize; + static const int kBuiltinTier0EntryTableOffset = + kVariousBooleanFlagsOffset + kApiSystemPointerSize; static const int kBuiltinTier0TableOffset = kBuiltinTier0EntryTableOffset + kBuiltinTier0EntryTableSize; static const int kIsolateEmbedderDataOffset = @@ -246,14 +557,17 @@ class Internals { kIsolateFastCCallCallerPcOffset + kApiSystemPointerSize; static const int kIsolateLongTaskStatsCounterOffset = kIsolateFastApiCallTargetOffset + kApiSystemPointerSize; +#ifdef V8_COMPRESS_POINTERS + static const int kIsolateExternalPointerTableOffset = + kIsolateLongTaskStatsCounterOffset + kApiSizetSize; + static const int kIsolateSharedExternalPointerTableAddressOffset = + kIsolateExternalPointerTableOffset + kExternalPointerTableSize; + static const int kIsolateRootsOffset = + kIsolateSharedExternalPointerTableAddressOffset + kApiSystemPointerSize; +#else static const int kIsolateRootsOffset = kIsolateLongTaskStatsCounterOffset + kApiSizetSize; - - static const int kExternalPointerTableBufferOffset = 0; - static const int kExternalPointerTableLengthOffset = - kExternalPointerTableBufferOffset + kApiSystemPointerSize; - static const int kExternalPointerTableCapacityOffset = - kExternalPointerTableLengthOffset + kApiInt32Size; +#endif static const int kUndefinedValueRootIndex = 4; static const int kTheHoleValueRootIndex = 5; @@ -264,13 +578,14 @@ class Internals { static const int kNodeClassIdOffset = 1 * kApiSystemPointerSize; static const int kNodeFlagsOffset = 1 * kApiSystemPointerSize + 3; - static const int kNodeStateMask = 0x7; + static const int kNodeStateMask = 0x3; static const int kNodeStateIsWeakValue = 2; - static const int kNodeStateIsPendingValue = 3; - static const int kFirstNonstringType = 0x40; - static const int kOddballType = 0x43; - static const int kForeignType = 0x46; + static const int kTracedNodeClassIdOffset = kApiSystemPointerSize; + + static const int kFirstNonstringType = 0x80; + static const int kOddballType = 0x83; + static const int kForeignType = 0xcc; static const int kJSSpecialApiObjectType = 0x410; static const int kJSObjectType = 0x421; static const int kFirstJSApiObjectType = 0x422; @@ -337,10 +652,22 @@ class Internals { } V8_INLINE static bool IsExternalTwoByteString(int instance_type) { - int representation = (instance_type & kFullStringRepresentationMask); + int representation = (instance_type & kStringRepresentationAndEncodingMask); return representation == kExternalTwoByteRepresentationTag; } + V8_INLINE static constexpr bool CanHaveInternalField(int instance_type) { + static_assert(kJSObjectType + 1 == kFirstJSApiObjectType); + static_assert(kJSObjectType < kLastJSApiObjectType); + static_assert(kFirstJSApiObjectType < kLastJSApiObjectType); + // Check for IsJSObject() || IsJSSpecialApiObject() || IsJSApiObject() + return instance_type == kJSSpecialApiObjectType || + // inlined version of base::IsInRange + (static_cast(static_cast(instance_type) - + static_cast(kJSObjectType)) <= + static_cast(kLastJSApiObjectType - kJSObjectType)); + } + V8_INLINE static uint8_t GetNodeFlag(internal::Address* obj, int shift) { uint8_t* addr = reinterpret_cast(obj) + kNodeFlagsOffset; return *addr & static_cast(1U << shift); @@ -392,6 +719,25 @@ class Internals { return reinterpret_cast(addr); } +#ifdef V8_ENABLE_SANDBOX + V8_INLINE static internal::Address* GetExternalPointerTableBase( + v8::Isolate* isolate) { + internal::Address addr = reinterpret_cast(isolate) + + kIsolateExternalPointerTableOffset + + kExternalPointerTableBufferOffset; + return *reinterpret_cast(addr); + } + + V8_INLINE static internal::Address* GetSharedExternalPointerTableBase( + v8::Isolate* isolate) { + internal::Address addr = reinterpret_cast(isolate) + + kIsolateSharedExternalPointerTableAddressOffset; + addr = *reinterpret_cast(addr); + addr += kExternalPointerTableBufferOffset; + return *reinterpret_cast(addr); + } +#endif + template V8_INLINE static T ReadRawField(internal::Address heap_object_ptr, int offset) { @@ -432,45 +778,41 @@ class Internals { #endif } - V8_INLINE static internal::Isolate* GetIsolateForHeapSandbox( - internal::Address obj) { -#ifdef V8_HEAP_SANDBOX - return internal::IsolateFromNeverReadOnlySpaceObject(obj); + V8_INLINE static v8::Isolate* GetIsolateForSandbox(internal::Address obj) { +#ifdef V8_ENABLE_SANDBOX + return reinterpret_cast( + internal::IsolateFromNeverReadOnlySpaceObject(obj)); #else // Not used in non-sandbox mode. return nullptr; #endif } - V8_INLINE static Address DecodeExternalPointer( - const Isolate* isolate, ExternalPointer_t encoded_pointer, - ExternalPointerTag tag) { -#ifdef V8_HEAP_SANDBOX - return internal::DecodeExternalPointerImpl(isolate, encoded_pointer, tag); -#else - return encoded_pointer; -#endif - } - + template V8_INLINE static internal::Address ReadExternalPointerField( - internal::Isolate* isolate, internal::Address heap_object_ptr, int offset, - ExternalPointerTag tag) { -#ifdef V8_HEAP_SANDBOX - internal::ExternalPointer_t encoded_value = - ReadRawField(heap_object_ptr, offset); - // We currently have to treat zero as nullptr in embedder slots. - return encoded_value ? DecodeExternalPointer(isolate, encoded_value, tag) - : 0; -#else - return ReadRawField
(heap_object_ptr, offset); + v8::Isolate* isolate, internal::Address heap_object_ptr, int offset) { +#ifdef V8_ENABLE_SANDBOX + if (IsSandboxedExternalPointerType(tag)) { + // See src/sandbox/external-pointer-table-inl.h. Logic duplicated here so + // it can be inlined and doesn't require an additional call. + internal::Address* table = + IsSharedExternalPointerType(tag) + ? GetSharedExternalPointerTableBase(isolate) + : GetExternalPointerTableBase(isolate); + internal::ExternalPointerHandle handle = + ReadRawField(heap_object_ptr, offset); + uint32_t index = handle >> kExternalPointerIndexShift; + std::atomic* ptr = + reinterpret_cast*>(&table[index]); + internal::Address entry = + std::atomic_load_explicit(ptr, std::memory_order_relaxed); + return entry & ~tag; + } #endif + return ReadRawField
(heap_object_ptr, offset); } #ifdef V8_COMPRESS_POINTERS - // See v8:7703 or src/ptr-compr.* for details about pointer compression. - static constexpr size_t kPtrComprCageReservationSize = size_t{1} << 32; - static constexpr size_t kPtrComprCageBaseAlignment = size_t{1} << 32; - V8_INLINE static internal::Address GetPtrComprCageBaseFromOnHeapAddress( internal::Address addr) { return addr & -static_cast(kPtrComprCageBaseAlignment); @@ -486,101 +828,6 @@ class Internals { #endif // V8_COMPRESS_POINTERS }; -constexpr bool VirtualMemoryCageIsEnabled() { -#ifdef V8_VIRTUAL_MEMORY_CAGE - return true; -#else - return false; -#endif -} - -#ifdef V8_VIRTUAL_MEMORY_CAGE_IS_AVAILABLE - -#define GB (1ULL << 30) -#define TB (1ULL << 40) - -// Size of the virtual memory cage, excluding the guard regions surrounding it. -constexpr size_t kVirtualMemoryCageSizeLog2 = 40; // 1 TB -constexpr size_t kVirtualMemoryCageSize = 1ULL << kVirtualMemoryCageSizeLog2; - -// Required alignment of the virtual memory cage. For simplicity, we require the -// size of the guard regions to be a multiple of this, so that this specifies -// the alignment of the cage including and excluding surrounding guard regions. -// The alignment requirement is due to the pointer compression cage being -// located at the start of the virtual memory cage. -constexpr size_t kVirtualMemoryCageAlignment = - Internals::kPtrComprCageBaseAlignment; - -#ifdef V8_CAGED_POINTERS -// CagedPointers are guaranteed to point into the virtual memory cage. This is -// achieved by storing them as offset from the cage base rather than as raw -// pointers. -using CagedPointer_t = Address; - -// For efficiency, the offset is stored shifted to the left, so that -// it is guaranteed that the offset is smaller than the cage size after -// shifting it to the right again. This constant specifies the shift amount. -constexpr uint64_t kCagedPointerShift = 64 - kVirtualMemoryCageSizeLog2; -#endif - -// Size of the guard regions surrounding the virtual memory cage. This assumes a -// worst-case scenario of a 32-bit unsigned index being used to access an array -// of 64-bit values. -constexpr size_t kVirtualMemoryCageGuardRegionSize = 32ULL * GB; - -static_assert((kVirtualMemoryCageGuardRegionSize % - kVirtualMemoryCageAlignment) == 0, - "The size of the virtual memory cage guard region must be a " - "multiple of its required alignment."); - -// Minimum size of the virtual memory cage, excluding the guard regions -// surrounding it. If the cage reservation fails, its size is currently halved -// until either the reservation succeeds or the minimum size is reached. A -// minimum of 32GB allows the 4GB pointer compression region as well as the -// ArrayBuffer partition and two 10GB WASM memory cages to fit into the cage. -// 32GB should also be the minimum possible size of the userspace address space -// as there are some machine configurations with only 36 virtual address bits. -constexpr size_t kVirtualMemoryCageMinimumSize = 32ULL * GB; - -static_assert(kVirtualMemoryCageMinimumSize <= kVirtualMemoryCageSize, - "The minimal size of the virtual memory cage must be smaller or " - "equal to the regular size."); - -// On OSes where reservation virtual memory is too expensive to create a real -// cage, notably Windows pre 8.1, we create a fake cage that doesn't actually -// reserve most of the memory, and so doesn't have the desired security -// properties, but still ensures that objects that should be located inside the -// cage are allocated within kVirtualMemoryCageSize bytes from the start of the -// cage, and so appear to be inside the cage. The minimum size of the virtual -// memory range that is actually reserved for a fake cage is specified by this -// constant and should be big enough to contain the pointer compression region -// as well as the ArrayBuffer partition. -constexpr size_t kFakeVirtualMemoryCageMinReservationSize = 8ULL * GB; - -static_assert(kVirtualMemoryCageMinimumSize > - Internals::kPtrComprCageReservationSize, - "The virtual memory cage must be larger than the pointer " - "compression cage contained within it."); -static_assert(kFakeVirtualMemoryCageMinReservationSize > - Internals::kPtrComprCageReservationSize, - "The reservation for a fake virtual memory cage must be larger " - "than the pointer compression cage contained within it."); - -// For now, even if the virtual memory cage is enabled, we still allow backing -// stores to be allocated outside of it as fallback. This will simplify the -// initial rollout. However, if the heap sandbox is also enabled, we already use -// the "enforcing mode" of the virtual memory cage. This is useful for testing. -#ifdef V8_HEAP_SANDBOX -constexpr bool kAllowBackingStoresOutsideCage = false; -#else -constexpr bool kAllowBackingStoresOutsideCage = true; -#endif // V8_HEAP_SANDBOX - -#undef GB -#undef TB - -#endif // V8_VIRTUAL_MEMORY_CAGE_IS_AVAILABLE - // Only perform cast check for types derived from v8::Data since // other types do not implement the Cast method. template @@ -609,6 +856,10 @@ V8_INLINE void PerformCastCheck(T* data) { // how static casts work with std::shared_ptr. class BackingStoreBase {}; +// The maximum value in enum GarbageCollectionReason, defined in heap.h. +// This is needed for histograms sampling garbage collection reasons. +constexpr int kGarbageCollectionReasonMaxValue = 27; + } // namespace internal } // namespace v8 diff --git a/deps/include/v8-isolate.h b/deps/include/v8-isolate.h index 32b53f1b..e9f53197 100644 --- a/deps/include/v8-isolate.h +++ b/deps/include/v8-isolate.h @@ -10,7 +10,6 @@ #include #include -#include #include "cppgc/common.h" #include "v8-array-buffer.h" // NOLINT(build/include_directory) @@ -195,6 +194,11 @@ enum RAILMode : unsigned { */ enum class MemoryPressureLevel { kNone, kModerate, kCritical }; +/** + * Indicator for the stack state. + */ +using StackState = cppgc::EmbedderStackState; + /** * Isolate represents an isolated instance of the V8 engine. V8 isolates have * completely separate states. Objects from one isolate must not be used in @@ -212,6 +216,8 @@ class V8_EXPORT Isolate { CreateParams(); ~CreateParams(); + ALLOW_COPY_AND_MOVE_WITH_DEPRECATED_FIELDS(CreateParams) + /** * Allows the host application to provide the address of a function that is * notified each time code is added, moved or removed. @@ -225,6 +231,7 @@ class V8_EXPORT Isolate { /** * Explicitly specify a startup snapshot blob. The embedder owns the blob. + * The embedder *must* ensure that the snapshot is from a trusted source. */ StartupData* snapshot_blob = nullptr; @@ -283,10 +290,10 @@ class V8_EXPORT Isolate { int embedder_wrapper_object_index = -1; /** - * The following parameter is experimental and may change significantly. - * This is currently for internal testing. + * Callbacks to invoke in case of fatal or OOM errors. */ - Isolate* experimental_attach_to_shared_isolate = nullptr; + FatalErrorCallback fatal_error_callback = nullptr; + OOMErrorCallback oom_error_callback = nullptr; }; /** @@ -295,16 +302,18 @@ class V8_EXPORT Isolate { */ class V8_EXPORT V8_NODISCARD Scope { public: - explicit Scope(Isolate* isolate) : isolate_(isolate) { isolate->Enter(); } + explicit Scope(Isolate* isolate) : v8_isolate_(isolate) { + v8_isolate_->Enter(); + } - ~Scope() { isolate_->Exit(); } + ~Scope() { v8_isolate_->Exit(); } // Prevent copying of Scope objects. Scope(const Scope&) = delete; Scope& operator=(const Scope&) = delete; private: - Isolate* const isolate_; + Isolate* const v8_isolate_; }; /** @@ -325,7 +334,7 @@ class V8_EXPORT Isolate { private: OnFailure on_failure_; - Isolate* isolate_; + v8::Isolate* v8_isolate_; bool was_execution_allowed_assert_; bool was_execution_allowed_throws_; @@ -347,7 +356,7 @@ class V8_EXPORT Isolate { const AllowJavascriptExecutionScope&) = delete; private: - Isolate* isolate_; + Isolate* v8_isolate_; bool was_execution_allowed_assert_; bool was_execution_allowed_throws_; bool was_execution_allowed_dump_; @@ -370,7 +379,7 @@ class V8_EXPORT Isolate { const SuppressMicrotaskExecutionScope&) = delete; private: - internal::Isolate* const isolate_; + internal::Isolate* const i_isolate_; internal::MicrotaskQueue* const microtask_queue_; internal::Address previous_stack_height_; @@ -383,7 +392,7 @@ class V8_EXPORT Isolate { */ class V8_EXPORT V8_NODISCARD SafeForTerminationScope { public: - explicit SafeForTerminationScope(v8::Isolate* isolate); + explicit SafeForTerminationScope(v8::Isolate* v8_isolate); ~SafeForTerminationScope(); // Prevent copying of Scope objects. @@ -391,7 +400,7 @@ class V8_EXPORT Isolate { SafeForTerminationScope& operator=(const SafeForTerminationScope&) = delete; private: - internal::Isolate* isolate_; + internal::Isolate* i_isolate_; bool prev_value_; }; @@ -523,6 +532,11 @@ class V8_EXPORT Isolate { kWasmMultiValue = 110, kWasmExceptionHandling = 111, kInvalidatedMegaDOMProtector = 112, + kFunctionPrototypeArguments = 113, + kFunctionPrototypeCaller = 114, + kTurboFanOsrCompileStarted = 115, + kAsyncStackTaggingCreateTaskCall = 116, + kDurationFormat = 117, // If you add new values here, you'll also need to update Chromium's: // web_feature.mojom, use_counter_callback.cc, and enums.xml. V8 changes to @@ -629,7 +643,7 @@ class V8_EXPORT Isolate { * import() language feature to load modules. */ void SetHostImportModuleDynamicallyCallback( - HostImportModuleDynamicallyWithImportAssertionsCallback callback); + HostImportModuleDynamicallyCallback callback); /** * This specifies the callback called by the upcoming import.meta @@ -638,6 +652,13 @@ class V8_EXPORT Isolate { void SetHostInitializeImportMetaObjectCallback( HostInitializeImportMetaObjectCallback callback); + /** + * This specifies the callback called by the upcoming ShadowRealm + * construction language feature to retrieve host created globals. + */ + void SetHostCreateShadowRealmContextCallback( + HostCreateShadowRealmContextCallback callback); + /** * This specifies the callback called when the stack property of Error * is accessed. @@ -821,12 +842,6 @@ class V8_EXPORT Isolate { */ int64_t AdjustAmountOfExternalAllocatedMemory(int64_t change_in_bytes); - /** - * Returns the number of phantom handles without callbacks that were reset - * by the garbage collector since the last call to this function. - */ - size_t NumberOfPhantomHandleResetsSinceLastCall(); - /** * Returns heap profiler for this isolate. Will return NULL until the isolate * is initialized. @@ -909,15 +924,19 @@ class V8_EXPORT Isolate { void RemoveGCPrologueCallback(GCCallbackWithData, void* data = nullptr); void RemoveGCPrologueCallback(GCCallback callback); + START_ALLOW_USE_DEPRECATED() /** * Sets the embedder heap tracer for the isolate. + * SetEmbedderHeapTracer cannot be used simultaneously with AttachCppHeap. */ void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer); /* - * Gets the currently active heap tracer for the isolate. + * Gets the currently active heap tracer for the isolate that was set with + * SetEmbedderHeapTracer. */ EmbedderHeapTracer* GetEmbedderHeapTracer(); + END_ALLOW_USE_DEPRECATED() /** * Sets an embedder roots handle that V8 should consider when performing @@ -936,20 +955,19 @@ class V8_EXPORT Isolate { * embedder maintains ownership of the CppHeap. At most one C++ heap can be * attached to V8. * - * This is an experimental feature and may still change significantly. + * AttachCppHeap cannot be used simultaneously with SetEmbedderHeapTracer. + * + * Multi-threaded use requires the use of v8::Locker/v8::Unlocker, see + * CppHeap. */ void AttachCppHeap(CppHeap*); /** * Detaches a managed C++ heap if one was attached using `AttachCppHeap()`. - * - * This is an experimental feature and may still change significantly. */ void DetachCppHeap(); /** - * This is an experimental feature and may still change significantly. - * \returns the C++ heap managed by V8. Only available if such a heap has been * attached using `AttachCppHeap()`. */ @@ -1131,6 +1149,20 @@ class V8_EXPORT Isolate { */ void RequestGarbageCollectionForTesting(GarbageCollectionType type); + /** + * Request garbage collection with a specific embedderstack state in this + * Isolate. It is only valid to call this function if --expose_gc was + * specified. + * + * This should only be used for testing purposes and not to enforce a garbage + * collection schedule. It has strong negative impact on the garbage + * collection performance. Use IdleNotificationDeadline() or + * LowMemoryNotification() instead to influence the garbage collection + * schedule. + */ + void RequestGarbageCollectionForTesting(GarbageCollectionType type, + StackState stack_state); + /** * Set the callback to invoke for logging event. */ @@ -1487,15 +1519,17 @@ class V8_EXPORT Isolate { void SetWasmStreamingCallback(WasmStreamingCallback callback); + void SetWasmAsyncResolvePromiseCallback( + WasmAsyncResolvePromiseCallback callback); + void SetWasmLoadSourceMapCallback(WasmLoadSourceMapCallback callback); + V8_DEPRECATED("Wasm SIMD is always enabled") void SetWasmSimdEnabledCallback(WasmSimdEnabledCallback callback); + V8_DEPRECATED("Wasm exceptions are always enabled") void SetWasmExceptionsEnabledCallback(WasmExceptionsEnabledCallback callback); - void SetWasmDynamicTieringEnabledCallback( - WasmDynamicTieringEnabledCallback callback); - void SetSharedArrayBufferConstructorEnabledCallback( SharedArrayBufferConstructorEnabledCallback callback); @@ -1562,19 +1596,6 @@ class V8_EXPORT Isolate { */ void VisitExternalResources(ExternalResourceVisitor* visitor); - /** - * Iterates through all the persistent handles in the current isolate's heap - * that have class_ids. - */ - void VisitHandlesWithClassIds(PersistentHandleVisitor* visitor); - - /** - * Iterates through all the persistent handles in the current isolate's heap - * that have class_ids and are weak to be marked as inactive if there is no - * pending activity for the handle. - */ - void VisitWeakHandles(PersistentHandleVisitor* visitor); - /** * Check if this isolate is in use. * True if at least one thread Enter'ed this isolate. diff --git a/deps/include/v8-local-handle.h b/deps/include/v8-local-handle.h index 66a8e93a..cbf87f94 100644 --- a/deps/include/v8-local-handle.h +++ b/deps/include/v8-local-handle.h @@ -46,8 +46,6 @@ class String; template class Traced; template -class TracedGlobal; -template class TracedReference; class TracedReferenceBase; class Utils; @@ -88,7 +86,7 @@ class V8_EXPORT V8_NODISCARD HandleScope { static int NumberOfHandles(Isolate* isolate); V8_INLINE Isolate* GetIsolate() const { - return reinterpret_cast(isolate_); + return reinterpret_cast(i_isolate_); } HandleScope(const HandleScope&) = delete; @@ -99,7 +97,7 @@ class V8_EXPORT V8_NODISCARD HandleScope { void Initialize(Isolate* isolate); - static internal::Address* CreateHandle(internal::Isolate* isolate, + static internal::Address* CreateHandle(internal::Isolate* i_isolate, internal::Address value); private: @@ -110,7 +108,7 @@ class V8_EXPORT V8_NODISCARD HandleScope { void operator delete(void*, size_t); void operator delete[](void*, size_t); - internal::Isolate* isolate_; + internal::Isolate* i_isolate_; internal::Address* prev_next_; internal::Address* prev_limit_; @@ -312,8 +310,6 @@ class Local { template friend class Traced; template - friend class TracedGlobal; - template friend class BasicTracedReference; template friend class TracedReference; @@ -358,7 +354,7 @@ class MaybeLocal { /** * Converts this MaybeLocal<> to a Local<>. If this MaybeLocal<> is empty, - * |false| is returned and |out| is left untouched. + * |false| is returned and |out| is assigned with nullptr. */ template V8_WARN_UNUSED_RESULT V8_INLINE bool ToLocal(Local* out) const { @@ -449,7 +445,7 @@ class V8_EXPORT V8_NODISCARD SealHandleScope { void operator delete(void*, size_t); void operator delete[](void*, size_t); - internal::Isolate* const isolate_; + internal::Isolate* const i_isolate_; internal::Address* prev_limit_; int prev_sealed_level_; }; diff --git a/deps/include/v8-locker.h b/deps/include/v8-locker.h index 360022b7..22b7a876 100644 --- a/deps/include/v8-locker.h +++ b/deps/include/v8-locker.h @@ -121,16 +121,6 @@ class V8_EXPORT Locker { */ static bool IsLocked(Isolate* isolate); - /** - * Returns whether any v8::Locker has ever been used in this process. - * TODO(cbruni, chromium:1240851): Fix locking checks on a per-thread basis. - * The current implementation is quite confusing and leads to unexpected - * results if anybody uses v8::Locker in the current process. - */ - static bool WasEverUsed(); - V8_DEPRECATE_SOON("Use WasEverUsed instead") - static bool IsActive(); - // Disallow copying and assigning. Locker(const Locker&) = delete; void operator=(const Locker&) = delete; diff --git a/deps/include/v8-maybe.h b/deps/include/v8-maybe.h index 0532a510..8d3aeabe 100644 --- a/deps/include/v8-maybe.h +++ b/deps/include/v8-maybe.h @@ -5,6 +5,9 @@ #ifndef INCLUDE_V8_MAYBE_H_ #define INCLUDE_V8_MAYBE_H_ +#include +#include + #include "v8-internal.h" // NOLINT(build/include_directory) #include "v8config.h" // NOLINT(build/include_directory) @@ -57,11 +60,20 @@ class Maybe { * Converts this Maybe<> to a value of type T. If this Maybe<> is * nothing (empty), V8 will crash the process. */ - V8_INLINE T FromJust() const { + V8_INLINE T FromJust() const& { if (V8_UNLIKELY(!IsJust())) api_internal::FromJustIsNothing(); return value_; } + /** + * Converts this Maybe<> to a value of type T. If this Maybe<> is + * nothing (empty), V8 will crash the process. + */ + V8_INLINE T FromJust() && { + if (V8_UNLIKELY(!IsJust())) api_internal::FromJustIsNothing(); + return std::move(value_); + } + /** * Converts this Maybe<> to a value of type T, using a default value if this * Maybe<> is nothing (empty). @@ -82,6 +94,7 @@ class Maybe { private: Maybe() : has_value_(false) {} explicit Maybe(const T& t) : has_value_(true), value_(t) {} + explicit Maybe(T&& t) : has_value_(true), value_(std::move(t)) {} bool has_value_; T value_; @@ -90,6 +103,8 @@ class Maybe { friend Maybe Nothing(); template friend Maybe Just(const U& u); + template >*> + friend Maybe Just(U&& u); }; template @@ -102,6 +117,14 @@ inline Maybe Just(const T& t) { return Maybe(t); } +// Don't use forwarding references here but instead use two overloads. +// Forwarding references only work when type deduction takes place, which is not +// the case for callsites such as Just(t). +template >* = nullptr> +inline Maybe Just(T&& t) { + return Maybe(std::move(t)); +} + // A template specialization of Maybe for the case of T = void. template <> class Maybe { diff --git a/deps/include/v8-message.h b/deps/include/v8-message.h index e9d668ca..09f9a0a9 100644 --- a/deps/include/v8-message.h +++ b/deps/include/v8-message.h @@ -11,6 +11,7 @@ #include "v8-local-handle.h" // NOLINT(build/include_directory) #include "v8-maybe.h" // NOLINT(build/include_directory) +#include "v8-primitive.h" // NOLINT(build/include_directory) #include "v8config.h" // NOLINT(build/include_directory) namespace v8 { @@ -60,35 +61,16 @@ class ScriptOriginOptions { */ class V8_EXPORT ScriptOrigin { public: - V8_DEPRECATED("Use constructor with primitive C++ types") - ScriptOrigin( - Local resource_name, Local resource_line_offset, - Local resource_column_offset, - Local resource_is_shared_cross_origin = Local(), - Local script_id = Local(), - Local source_map_url = Local(), - Local resource_is_opaque = Local(), - Local is_wasm = Local(), - Local is_module = Local(), - Local host_defined_options = Local()); - V8_DEPRECATED("Use constructor that takes an isolate") - explicit ScriptOrigin( - Local resource_name, int resource_line_offset = 0, - int resource_column_offset = 0, - bool resource_is_shared_cross_origin = false, int script_id = -1, - Local source_map_url = Local(), - bool resource_is_opaque = false, bool is_wasm = false, - bool is_module = false, - Local host_defined_options = Local()); - V8_INLINE ScriptOrigin( - Isolate* isolate, Local resource_name, - int resource_line_offset = 0, int resource_column_offset = 0, - bool resource_is_shared_cross_origin = false, int script_id = -1, - Local source_map_url = Local(), - bool resource_is_opaque = false, bool is_wasm = false, - bool is_module = false, - Local host_defined_options = Local()) - : isolate_(isolate), + V8_INLINE ScriptOrigin(Isolate* isolate, Local resource_name, + int resource_line_offset = 0, + int resource_column_offset = 0, + bool resource_is_shared_cross_origin = false, + int script_id = -1, + Local source_map_url = Local(), + bool resource_is_opaque = false, bool is_wasm = false, + bool is_module = false, + Local host_defined_options = Local()) + : v8_isolate_(isolate), resource_name_(resource_name), resource_line_offset_(resource_line_offset), resource_column_offset_(resource_column_offset), @@ -96,31 +78,28 @@ class V8_EXPORT ScriptOrigin { is_module), script_id_(script_id), source_map_url_(source_map_url), - host_defined_options_(host_defined_options) {} + host_defined_options_(host_defined_options) { + VerifyHostDefinedOptions(); + } V8_INLINE Local ResourceName() const; - V8_DEPRECATED("Use getter with primitive C++ types.") - V8_INLINE Local ResourceLineOffset() const; - V8_DEPRECATED("Use getter with primitive C++ types.") - V8_INLINE Local ResourceColumnOffset() const; - V8_DEPRECATED("Use getter with primitive C++ types.") - V8_INLINE Local ScriptID() const; V8_INLINE int LineOffset() const; V8_INLINE int ColumnOffset() const; V8_INLINE int ScriptId() const; V8_INLINE Local SourceMapUrl() const; - V8_INLINE Local HostDefinedOptions() const; + V8_INLINE Local GetHostDefinedOptions() const; V8_INLINE ScriptOriginOptions Options() const { return options_; } private: - Isolate* isolate_; + void VerifyHostDefinedOptions() const; + Isolate* v8_isolate_; Local resource_name_; int resource_line_offset_; int resource_column_offset_; ScriptOriginOptions options_; int script_id_; Local source_map_url_; - Local host_defined_options_; + Local host_defined_options_; }; /** @@ -208,8 +187,6 @@ class V8_EXPORT Message { bool IsSharedCrossOrigin() const; bool IsOpaque() const; - V8_DEPRECATE_SOON("Use the version that takes a std::ostream&.") - static void PrintCurrentStackTrace(Isolate* isolate, FILE* out); static void PrintCurrentStackTrace(Isolate* isolate, std::ostream& out); static const int kNoLineNumberInfo = 0; @@ -220,7 +197,7 @@ class V8_EXPORT Message { Local ScriptOrigin::ResourceName() const { return resource_name_; } -Local ScriptOrigin::HostDefinedOptions() const { +Local ScriptOrigin::GetHostDefinedOptions() const { return host_defined_options_; } diff --git a/deps/include/v8-metrics.h b/deps/include/v8-metrics.h index 62738442..887012ac 100644 --- a/deps/include/v8-metrics.h +++ b/deps/include/v8-metrics.h @@ -21,6 +21,7 @@ class Isolate; namespace metrics { struct GarbageCollectionPhases { + int64_t total_wall_clock_duration_in_us = -1; int64_t compact_wall_clock_duration_in_us = -1; int64_t mark_wall_clock_duration_in_us = -1; int64_t sweep_wall_clock_duration_in_us = -1; @@ -34,6 +35,7 @@ struct GarbageCollectionSizes { }; struct GarbageCollectionFullCycle { + int reason = -1; GarbageCollectionPhases total; GarbageCollectionPhases total_cpp; GarbageCollectionPhases main_thread; @@ -59,25 +61,38 @@ struct GarbageCollectionFullMainThreadIncrementalMark { int64_t cpp_wall_clock_duration_in_us = -1; }; -struct GarbageCollectionFullMainThreadBatchedIncrementalMark { - std::vector events; -}; - struct GarbageCollectionFullMainThreadIncrementalSweep { int64_t wall_clock_duration_in_us = -1; int64_t cpp_wall_clock_duration_in_us = -1; }; -struct GarbageCollectionFullMainThreadBatchedIncrementalSweep { - std::vector events; +template +struct GarbageCollectionBatchedEvents { + std::vector events; }; +using GarbageCollectionFullMainThreadBatchedIncrementalMark = + GarbageCollectionBatchedEvents< + GarbageCollectionFullMainThreadIncrementalMark>; +using GarbageCollectionFullMainThreadBatchedIncrementalSweep = + GarbageCollectionBatchedEvents< + GarbageCollectionFullMainThreadIncrementalSweep>; + struct GarbageCollectionYoungCycle { + int reason = -1; int64_t total_wall_clock_duration_in_us = -1; int64_t main_thread_wall_clock_duration_in_us = -1; - double collection_rate_in_percent; - double efficiency_in_bytes_per_us; - double main_thread_efficiency_in_bytes_per_us; + double collection_rate_in_percent = -1.0; + double efficiency_in_bytes_per_us = -1.0; + double main_thread_efficiency_in_bytes_per_us = -1.0; +#if defined(CPPGC_YOUNG_GENERATION) + GarbageCollectionPhases total_cpp; + GarbageCollectionSizes objects_cpp; + GarbageCollectionSizes memory_cpp; + double collection_rate_cpp_in_percent = -1.0; + double efficiency_cpp_in_bytes_per_us = -1.0; + double main_thread_efficiency_cpp_in_bytes_per_us = -1.0; +#endif // defined(CPPGC_YOUNG_GENERATION) }; struct WasmModuleDecoded { @@ -110,31 +125,10 @@ struct WasmModuleInstantiated { int64_t wall_clock_duration_in_us = -1; }; -struct WasmModuleTieredUp { - bool lazy = false; - size_t code_size_in_bytes = 0; - int64_t wall_clock_duration_in_us = -1; - int64_t cpu_duration_in_us = -1; -}; - struct WasmModulesPerIsolate { size_t count = 0; }; -#define V8_MAIN_THREAD_METRICS_EVENTS(V) \ - V(GarbageCollectionFullCycle) \ - V(GarbageCollectionFullMainThreadIncrementalMark) \ - V(GarbageCollectionFullMainThreadBatchedIncrementalMark) \ - V(GarbageCollectionFullMainThreadIncrementalSweep) \ - V(GarbageCollectionFullMainThreadBatchedIncrementalSweep) \ - V(GarbageCollectionYoungCycle) \ - V(WasmModuleDecoded) \ - V(WasmModuleCompiled) \ - V(WasmModuleInstantiated) \ - V(WasmModuleTieredUp) - -#define V8_THREAD_SAFE_METRICS_EVENTS(V) V(WasmModulesPerIsolate) - /** * This class serves as a base class for recording event-based metrics in V8. * There a two kinds of metrics, those which are expected to be thread-safe and @@ -144,19 +138,6 @@ struct WasmModulesPerIsolate { * background thread, it will be delayed and executed by the foreground task * runner. * - * The thread-safe events are listed in the V8_THREAD_SAFE_METRICS_EVENTS - * macro above while the main thread event are listed in - * V8_MAIN_THREAD_METRICS_EVENTS above. For the former, a virtual method - * AddMainThreadEvent(const E& event, v8::Context::Token token) will be - * generated and for the latter AddThreadSafeEvent(const E& event). - * - * Thread-safe events are not allowed to access the context and therefore do - * not carry a context ID with them. These IDs can be generated using - * Recorder::GetContextId() and the ID will be valid throughout the lifetime - * of the isolate. It is not guaranteed that the ID will still resolve to - * a valid context using Recorder::GetContext() at the time the metric is - * recorded. In this case, an empty handle will be returned. - * * The embedder is expected to call v8::Isolate::SetMetricsRecorder() * providing its implementation and have the virtual methods overwritten * for the events it cares about. @@ -187,14 +168,30 @@ class V8_EXPORT Recorder { virtual ~Recorder() = default; + // Main thread events. Those are only triggered on the main thread, and hence + // can access the context. #define ADD_MAIN_THREAD_EVENT(E) \ - virtual void AddMainThreadEvent(const E& event, ContextId context_id) {} - V8_MAIN_THREAD_METRICS_EVENTS(ADD_MAIN_THREAD_EVENT) + virtual void AddMainThreadEvent(const E&, ContextId) {} + ADD_MAIN_THREAD_EVENT(GarbageCollectionFullCycle) + ADD_MAIN_THREAD_EVENT(GarbageCollectionFullMainThreadIncrementalMark) + ADD_MAIN_THREAD_EVENT(GarbageCollectionFullMainThreadBatchedIncrementalMark) + ADD_MAIN_THREAD_EVENT(GarbageCollectionFullMainThreadIncrementalSweep) + ADD_MAIN_THREAD_EVENT(GarbageCollectionFullMainThreadBatchedIncrementalSweep) + ADD_MAIN_THREAD_EVENT(GarbageCollectionYoungCycle) + ADD_MAIN_THREAD_EVENT(WasmModuleDecoded) + ADD_MAIN_THREAD_EVENT(WasmModuleCompiled) + ADD_MAIN_THREAD_EVENT(WasmModuleInstantiated) #undef ADD_MAIN_THREAD_EVENT + // Thread-safe events are not allowed to access the context and therefore do + // not carry a context ID with them. These IDs can be generated using + // Recorder::GetContextId() and the ID will be valid throughout the lifetime + // of the isolate. It is not guaranteed that the ID will still resolve to + // a valid context using Recorder::GetContext() at the time the metric is + // recorded. In this case, an empty handle will be returned. #define ADD_THREAD_SAFE_EVENT(E) \ - virtual void AddThreadSafeEvent(const E& event) {} - V8_THREAD_SAFE_METRICS_EVENTS(ADD_THREAD_SAFE_EVENT) + virtual void AddThreadSafeEvent(const E&) {} + ADD_THREAD_SAFE_EVENT(WasmModulesPerIsolate) #undef ADD_THREAD_SAFE_EVENT virtual void NotifyIsolateDisposal() {} @@ -230,6 +227,8 @@ struct V8_EXPORT LongTaskStats { int64_t gc_full_atomic_wall_clock_duration_us = 0; int64_t gc_full_incremental_wall_clock_duration_us = 0; int64_t gc_young_wall_clock_duration_us = 0; + // Only collected with --slow-histograms + int64_t v8_execute_us = 0; }; } // namespace metrics diff --git a/deps/include/v8-microtask-queue.h b/deps/include/v8-microtask-queue.h index af9caa54..85d227fa 100644 --- a/deps/include/v8-microtask-queue.h +++ b/deps/include/v8-microtask-queue.h @@ -118,7 +118,12 @@ class V8_EXPORT V8_NODISCARD MicrotasksScope { public: enum Type { kRunMicrotasks, kDoNotRunMicrotasks }; + V8_DEPRECATE_SOON( + "May be incorrect if context was created with non-default microtask " + "queue") MicrotasksScope(Isolate* isolate, Type type); + + MicrotasksScope(Local context, Type type); MicrotasksScope(Isolate* isolate, MicrotaskQueue* microtask_queue, Type type); ~MicrotasksScope(); @@ -142,7 +147,7 @@ class V8_EXPORT V8_NODISCARD MicrotasksScope { MicrotasksScope& operator=(const MicrotasksScope&) = delete; private: - internal::Isolate* const isolate_; + internal::Isolate* const i_isolate_; internal::MicrotaskQueue* const microtask_queue_; bool run_; }; diff --git a/deps/include/v8-object.h b/deps/include/v8-object.h index 6716162d..d7332ba0 100644 --- a/deps/include/v8-object.h +++ b/deps/include/v8-object.h @@ -493,7 +493,7 @@ class V8_EXPORT Object : public Value { return object.val_->GetAlignedPointerFromInternalField(index); } - /** Same as above, but works for TracedGlobal. */ + /** Same as above, but works for TracedReference. */ V8_INLINE static void* GetAlignedPointerFromInternalField( const BasicTracedReference& object, int index) { return object->GetAlignedPointerFromInternalField(index); @@ -594,8 +594,6 @@ class V8_EXPORT Object : public Value { /** * Returns the context in which the object was created. */ - V8_DEPRECATE_SOON("Use MaybeLocal GetCreationContext()") - Local CreationContext(); MaybeLocal GetCreationContext(); /** @@ -604,10 +602,6 @@ class V8_EXPORT Object : public Value { Local GetCreationContextChecked(); /** Same as above, but works for Persistents */ - V8_DEPRECATE_SOON( - "Use MaybeLocal GetCreationContext(const " - "PersistentBase& object)") - static Local CreationContext(const PersistentBase& object); V8_INLINE static MaybeLocal GetCreationContext( const PersistentBase& object) { return object.val_->GetCreationContext(); @@ -717,7 +711,7 @@ Local Object::GetInternalField(int index) { // Fast path: If the object is a plain JSObject, which is the common case, we // know where to find the internal fields and can return the value directly. int instance_type = I::GetInstanceType(obj); - if (v8::internal::CanHaveInternalField(instance_type)) { + if (I::CanHaveInternalField(instance_type)) { int offset = I::kJSObjectHeaderSize + (I::kEmbedderDataSlotSize * index); A value = I::ReadRawField(obj, offset); #ifdef V8_COMPRESS_POINTERS @@ -735,21 +729,20 @@ Local Object::GetInternalField(int index) { } void* Object::GetAlignedPointerFromInternalField(int index) { -#ifndef V8_ENABLE_CHECKS +#if !defined(V8_ENABLE_CHECKS) using A = internal::Address; using I = internal::Internals; A obj = *reinterpret_cast(this); // Fast path: If the object is a plain JSObject, which is the common case, we // know where to find the internal fields and can return the value directly. auto instance_type = I::GetInstanceType(obj); - if (v8::internal::CanHaveInternalField(instance_type)) { - int offset = I::kJSObjectHeaderSize + (I::kEmbedderDataSlotSize * index); -#ifdef V8_HEAP_SANDBOX - offset += I::kEmbedderDataSlotRawPayloadOffset; -#endif - internal::Isolate* isolate = I::GetIsolateForHeapSandbox(obj); - A value = I::ReadExternalPointerField( - isolate, obj, offset, internal::kEmbedderDataSlotPayloadTag); + if (I::CanHaveInternalField(instance_type)) { + int offset = I::kJSObjectHeaderSize + (I::kEmbedderDataSlotSize * index) + + I::kEmbedderDataSlotExternalPointerOffset; + Isolate* isolate = I::GetIsolateForSandbox(obj); + A value = + I::ReadExternalPointerField( + isolate, obj, offset); return reinterpret_cast(value); } #endif diff --git a/deps/include/v8-persistent-handle.h b/deps/include/v8-persistent-handle.h index a6c21268..4fe79862 100644 --- a/deps/include/v8-persistent-handle.h +++ b/deps/include/v8-persistent-handle.h @@ -169,8 +169,6 @@ class PersistentBase { * Turns this handle into a weak phantom handle without finalization callback. * The handle will be reset automatically when the garbage collector detects * that the object is no longer reachable. - * A related function Isolate::NumberOfPhantomHandleResetsSinceLastCall - * returns how many phantom handles were reset by the garbage collector. */ V8_INLINE void SetWeak(); @@ -254,7 +252,7 @@ class NonCopyablePersistentTraits { * This will clone the contents of storage cell, but not any of the flags, etc. */ template -struct CopyablePersistentTraits { +struct V8_DEPRECATED("Use v8::Global instead") CopyablePersistentTraits { using CopyablePersistent = Persistent>; static const bool kResetInDestructor = true; template diff --git a/deps/include/v8-platform.h b/deps/include/v8-platform.h index 234582f0..32a82f88 100644 --- a/deps/include/v8-platform.h +++ b/deps/include/v8-platform.h @@ -158,9 +158,10 @@ class TaskRunner { class JobDelegate { public: /** - * Returns true if this thread should return from the worker task on the + * Returns true if this thread *must* return from the worker task on the * current thread ASAP. Workers should periodically invoke ShouldYield (or * YieldIfNeeded()) as often as is reasonable. + * After this method returned true, ShouldYield must not be called again. */ virtual bool ShouldYield() = 0; @@ -401,6 +402,8 @@ class PageAllocator { // this is used to set the MAP_JIT flag on Apple Silicon. // TODO(jkummerow): Remove this when Wasm has a platform-independent // w^x implementation. + // TODO(saelo): Remove this once all JIT pages are allocated through the + // VirtualAddressSpace API. kNoAccessWillJitLater }; @@ -427,6 +430,17 @@ class PageAllocator { virtual bool SetPermissions(void* address, size_t length, Permission permissions) = 0; + /** + * Recommits discarded pages in the given range with given permissions. + * Discarded pages must be recommitted with their original permissions + * before they are used again. + */ + virtual bool RecommitPages(void* address, size_t length, + Permission permissions) { + // TODO(v8:12797): make it pure once it's implemented on Chromium side. + return false; + } + /** * Frees memory in the given [address, address + size) range. address and size * should be operating system page-aligned. The next write to this @@ -510,6 +524,371 @@ class PageAllocator { virtual bool CanAllocateSharedPages() { return false; } }; +// Opaque type representing a handle to a shared memory region. +using PlatformSharedMemoryHandle = intptr_t; +static constexpr PlatformSharedMemoryHandle kInvalidSharedMemoryHandle = -1; + +// Conversion routines from the platform-dependent shared memory identifiers +// into the opaque PlatformSharedMemoryHandle type. These use the underlying +// types (e.g. unsigned int) instead of the typedef'd ones (e.g. mach_port_t) +// to avoid pulling in large OS header files into this header file. Instead, +// the users of these routines are expected to include the respecitve OS +// headers in addition to this one. +#if V8_OS_MACOS +// Convert between a shared memory handle and a mach_port_t referencing a memory +// entry object. +inline PlatformSharedMemoryHandle SharedMemoryHandleFromMachMemoryEntry( + unsigned int port) { + return static_cast(port); +} +inline unsigned int MachMemoryEntryFromSharedMemoryHandle( + PlatformSharedMemoryHandle handle) { + return static_cast(handle); +} +#elif V8_OS_FUCHSIA +// Convert between a shared memory handle and a zx_handle_t to a VMO. +inline PlatformSharedMemoryHandle SharedMemoryHandleFromVMO(uint32_t handle) { + return static_cast(handle); +} +inline uint32_t VMOFromSharedMemoryHandle(PlatformSharedMemoryHandle handle) { + return static_cast(handle); +} +#elif V8_OS_WIN +// Convert between a shared memory handle and a Windows HANDLE to a file mapping +// object. +inline PlatformSharedMemoryHandle SharedMemoryHandleFromFileMapping( + void* handle) { + return reinterpret_cast(handle); +} +inline void* FileMappingFromSharedMemoryHandle( + PlatformSharedMemoryHandle handle) { + return reinterpret_cast(handle); +} +#else +// Convert between a shared memory handle and a file descriptor. +inline PlatformSharedMemoryHandle SharedMemoryHandleFromFileDescriptor(int fd) { + return static_cast(fd); +} +inline int FileDescriptorFromSharedMemoryHandle( + PlatformSharedMemoryHandle handle) { + return static_cast(handle); +} +#endif + +/** + * Possible permissions for memory pages. + */ +enum class PagePermissions { + kNoAccess, + kRead, + kReadWrite, + kReadWriteExecute, + kReadExecute, +}; + +/** + * Class to manage a virtual memory address space. + * + * This class represents a contiguous region of virtual address space in which + * sub-spaces and (private or shared) memory pages can be allocated, freed, and + * modified. This interface is meant to eventually replace the PageAllocator + * interface, and can be used as an alternative in the meantime. + * + * This API is not yet stable and may change without notice! + */ +class VirtualAddressSpace { + public: + using Address = uintptr_t; + + VirtualAddressSpace(size_t page_size, size_t allocation_granularity, + Address base, size_t size, + PagePermissions max_page_permissions) + : page_size_(page_size), + allocation_granularity_(allocation_granularity), + base_(base), + size_(size), + max_page_permissions_(max_page_permissions) {} + + virtual ~VirtualAddressSpace() = default; + + /** + * The page size used inside this space. Guaranteed to be a power of two. + * Used as granularity for all page-related operations except for allocation, + * which use the allocation_granularity(), see below. + * + * \returns the page size in bytes. + */ + size_t page_size() const { return page_size_; } + + /** + * The granularity of page allocations and, by extension, of subspace + * allocations. This is guaranteed to be a power of two and a multiple of the + * page_size(). In practice, this is equal to the page size on most OSes, but + * on Windows it is usually 64KB, while the page size is 4KB. + * + * \returns the allocation granularity in bytes. + */ + size_t allocation_granularity() const { return allocation_granularity_; } + + /** + * The base address of the address space managed by this instance. + * + * \returns the base address of this address space. + */ + Address base() const { return base_; } + + /** + * The size of the address space managed by this instance. + * + * \returns the size of this address space in bytes. + */ + size_t size() const { return size_; } + + /** + * The maximum page permissions that pages allocated inside this space can + * obtain. + * + * \returns the maximum page permissions. + */ + PagePermissions max_page_permissions() const { return max_page_permissions_; } + + /** + * Sets the random seed so that GetRandomPageAddress() will generate + * repeatable sequences of random addresses. + * + * \param The seed for the PRNG. + */ + virtual void SetRandomSeed(int64_t seed) = 0; + + /** + * Returns a random address inside this address space, suitable for page + * allocations hints. + * + * \returns a random address aligned to allocation_granularity(). + */ + virtual Address RandomPageAddress() = 0; + + /** + * Allocates private memory pages with the given alignment and permissions. + * + * \param hint If nonzero, the allocation is attempted to be placed at the + * given address first. If that fails, the allocation is attempted to be + * placed elsewhere, possibly nearby, but that is not guaranteed. Specifying + * zero for the hint always causes this function to choose a random address. + * The hint, if specified, must be aligned to the specified alignment. + * + * \param size The size of the allocation in bytes. Must be a multiple of the + * allocation_granularity(). + * + * \param alignment The alignment of the allocation in bytes. Must be a + * multiple of the allocation_granularity() and should be a power of two. + * + * \param permissions The page permissions of the newly allocated pages. + * + * \returns the start address of the allocated pages on success, zero on + * failure. + */ + static constexpr Address kNoHint = 0; + virtual V8_WARN_UNUSED_RESULT Address + AllocatePages(Address hint, size_t size, size_t alignment, + PagePermissions permissions) = 0; + + /** + * Frees previously allocated pages. + * + * This function will terminate the process on failure as this implies a bug + * in the client. As such, there is no return value. + * + * \param address The start address of the pages to free. This address must + * have been obtained through a call to AllocatePages. + * + * \param size The size in bytes of the region to free. This must match the + * size passed to AllocatePages when the pages were allocated. + */ + virtual void FreePages(Address address, size_t size) = 0; + + /** + * Sets permissions of all allocated pages in the given range. + * + * This operation can fail due to OOM, in which case false is returned. If + * the operation fails for a reason other than OOM, this function will + * terminate the process as this implies a bug in the client. + * + * \param address The start address of the range. Must be aligned to + * page_size(). + * + * \param size The size in bytes of the range. Must be a multiple + * of page_size(). + * + * \param permissions The new permissions for the range. + * + * \returns true on success, false on OOM. + */ + virtual V8_WARN_UNUSED_RESULT bool SetPagePermissions( + Address address, size_t size, PagePermissions permissions) = 0; + + /** + * Creates a guard region at the specified address. + * + * Guard regions are guaranteed to cause a fault when accessed and generally + * do not count towards any memory consumption limits. Further, allocating + * guard regions can usually not fail in subspaces if the region does not + * overlap with another region, subspace, or page allocation. + * + * \param address The start address of the guard region. Must be aligned to + * the allocation_granularity(). + * + * \param size The size of the guard region in bytes. Must be a multiple of + * the allocation_granularity(). + * + * \returns true on success, false otherwise. + */ + virtual V8_WARN_UNUSED_RESULT bool AllocateGuardRegion(Address address, + size_t size) = 0; + + /** + * Frees an existing guard region. + * + * This function will terminate the process on failure as this implies a bug + * in the client. As such, there is no return value. + * + * \param address The start address of the guard region to free. This address + * must have previously been used as address parameter in a successful + * invocation of AllocateGuardRegion. + * + * \param size The size in bytes of the guard region to free. This must match + * the size passed to AllocateGuardRegion when the region was created. + */ + virtual void FreeGuardRegion(Address address, size_t size) = 0; + + /** + * Allocates shared memory pages with the given permissions. + * + * \param hint Placement hint. See AllocatePages. + * + * \param size The size of the allocation in bytes. Must be a multiple of the + * allocation_granularity(). + * + * \param permissions The page permissions of the newly allocated pages. + * + * \param handle A platform-specific handle to a shared memory object. See + * the SharedMemoryHandleFromX routines above for ways to obtain these. + * + * \param offset The offset in the shared memory object at which the mapping + * should start. Must be a multiple of the allocation_granularity(). + * + * \returns the start address of the allocated pages on success, zero on + * failure. + */ + virtual V8_WARN_UNUSED_RESULT Address + AllocateSharedPages(Address hint, size_t size, PagePermissions permissions, + PlatformSharedMemoryHandle handle, uint64_t offset) = 0; + + /** + * Frees previously allocated shared pages. + * + * This function will terminate the process on failure as this implies a bug + * in the client. As such, there is no return value. + * + * \param address The start address of the pages to free. This address must + * have been obtained through a call to AllocateSharedPages. + * + * \param size The size in bytes of the region to free. This must match the + * size passed to AllocateSharedPages when the pages were allocated. + */ + virtual void FreeSharedPages(Address address, size_t size) = 0; + + /** + * Whether this instance can allocate subspaces or not. + * + * \returns true if subspaces can be allocated, false if not. + */ + virtual bool CanAllocateSubspaces() = 0; + + /* + * Allocate a subspace. + * + * The address space of a subspace stays reserved in the parent space for the + * lifetime of the subspace. As such, it is guaranteed that page allocations + * on the parent space cannot end up inside a subspace. + * + * \param hint Hints where the subspace should be allocated. See + * AllocatePages() for more details. + * + * \param size The size in bytes of the subspace. Must be a multiple of the + * allocation_granularity(). + * + * \param alignment The alignment of the subspace in bytes. Must be a multiple + * of the allocation_granularity() and should be a power of two. + * + * \param max_page_permissions The maximum permissions that pages allocated in + * the subspace can obtain. + * + * \returns a new subspace or nullptr on failure. + */ + virtual std::unique_ptr AllocateSubspace( + Address hint, size_t size, size_t alignment, + PagePermissions max_page_permissions) = 0; + + // + // TODO(v8) maybe refactor the methods below before stabilizing the API. For + // example by combining them into some form of page operation method that + // takes a command enum as parameter. + // + + /** + * Recommits discarded pages in the given range with given permissions. + * Discarded pages must be recommitted with their original permissions + * before they are used again. + * + * \param address The start address of the range. Must be aligned to + * page_size(). + * + * \param size The size in bytes of the range. Must be a multiple + * of page_size(). + * + * \param permissions The permissions for the range that the pages must have. + * + * \returns true on success, false otherwise. + */ + virtual V8_WARN_UNUSED_RESULT bool RecommitPages( + Address address, size_t size, PagePermissions permissions) = 0; + + /** + * Frees memory in the given [address, address + size) range. address and + * size should be aligned to the page_size(). The next write to this memory + * area brings the memory transparently back. This should be treated as a + * hint to the OS that the pages are no longer needed. It does not guarantee + * that the pages will be discarded immediately or at all. + * + * \returns true on success, false otherwise. Since this method is only a + * hint, a successful invocation does not imply that pages have been removed. + */ + virtual V8_WARN_UNUSED_RESULT bool DiscardSystemPages(Address address, + size_t size) { + return true; + } + /** + * Decommits any wired memory pages in the given range, allowing the OS to + * reclaim them, and marks the region as inacessible (kNoAccess). The address + * range stays reserved and can be accessed again later by changing its + * permissions. However, in that case the memory content is guaranteed to be + * zero-initialized again. The memory must have been previously allocated by a + * call to AllocatePages. + * + * \returns true on success, false otherwise. + */ + virtual V8_WARN_UNUSED_RESULT bool DecommitPages(Address address, + size_t size) = 0; + + private: + const size_t page_size_; + const size_t allocation_granularity_; + const Address base_; + const size_t size_; + const PagePermissions max_page_permissions_; +}; + /** * V8 Allocator used for allocating zone backings. */ @@ -522,6 +901,16 @@ class ZoneBackingAllocator { virtual FreeFn GetFreeFn() const { return ::free; } }; +/** + * Observer used by V8 to notify the embedder about entering/leaving sections + * with high throughput of malloc/free operations. + */ +class HighAllocationThroughputObserver { + public: + virtual void EnterSection() {} + virtual void LeaveSection() {} +}; + /** * V8 Platform abstraction layer. * @@ -534,11 +923,9 @@ class Platform { /** * Allows the embedder to manage memory page allocations. + * Returning nullptr will cause V8 to use the default page allocator. */ - virtual PageAllocator* GetPageAllocator() { - // TODO(bbudge) Make this abstract after all embedders implement this. - return nullptr; - } + virtual PageAllocator* GetPageAllocator() = 0; /** * Allows the embedder to specify a custom allocator used for zones. @@ -555,21 +942,7 @@ class Platform { * error. * Embedder overrides of this function must NOT call back into V8. */ - virtual void OnCriticalMemoryPressure() { - // TODO(bbudge) Remove this when embedders override the following method. - // See crbug.com/634547. - } - - /** - * Enables the embedder to respond in cases where V8 can't allocate large - * memory regions. The |length| parameter is the amount of memory needed. - * Returns true if memory is now available. Returns false if no memory could - * be made available. V8 will retry allocations until this method returns - * false. - * - * Embedder overrides of this function must NOT call back into V8. - */ - virtual bool OnCriticalMemoryPressure(size_t length) { return false; } + virtual void OnCriticalMemoryPressure() {} /** * Gets the number of worker threads used by @@ -667,16 +1040,28 @@ class Platform { * thread (A=>B/B=>A deadlock) and [2] JobTask::Run or * JobTask::GetMaxConcurrency may be invoked synchronously from JobHandle * (B=>JobHandle::foo=>B deadlock). + */ + virtual std::unique_ptr PostJob( + TaskPriority priority, std::unique_ptr job_task) { + auto handle = CreateJob(priority, std::move(job_task)); + handle->NotifyConcurrencyIncrease(); + return handle; + } + + /** + * Creates and returns a JobHandle associated with a Job. Unlike PostJob(), + * this doesn't immediately schedules |worker_task| to run; the Job is then + * scheduled by calling either NotifyConcurrencyIncrease() or Join(). * - * A sufficient PostJob() implementation that uses the default Job provided in - * libplatform looks like: - * std::unique_ptr PostJob( + * A sufficient CreateJob() implementation that uses the default Job provided + * in libplatform looks like: + * std::unique_ptr CreateJob( * TaskPriority priority, std::unique_ptr job_task) override { * return v8::platform::NewDefaultJobHandle( * this, priority, std::move(job_task), NumberOfWorkerThreads()); * } */ - virtual std::unique_ptr PostJob( + virtual std::unique_ptr CreateJob( TaskPriority priority, std::unique_ptr job_task) = 0; /** @@ -713,6 +1098,16 @@ class Platform { */ virtual void DumpWithoutCrashing() {} + /** + * Allows the embedder to observe sections with high throughput allocation + * operations. + */ + virtual HighAllocationThroughputObserver* + GetHighAllocationThroughputObserver() { + static HighAllocationThroughputObserver default_observer; + return &default_observer; + } + protected: /** * Default implementation of current wall-clock time in milliseconds diff --git a/deps/include/v8-primitive.h b/deps/include/v8-primitive.h index 8a95c151..4fef8da7 100644 --- a/deps/include/v8-primitive.h +++ b/deps/include/v8-primitive.h @@ -20,6 +20,7 @@ class String; namespace internal { class ExternalString; class ScopedExternalStringLock; +class StringForwardingTable; } // namespace internal /** @@ -54,12 +55,22 @@ class V8_EXPORT Boolean : public Primitive { * This is passed back to the embedder as part of * HostImportModuleDynamicallyCallback for module loading. */ -class V8_EXPORT PrimitiveArray { +class V8_EXPORT PrimitiveArray : public Data { public: static Local New(Isolate* isolate, int length); int Length() const; void Set(Isolate* isolate, int index, Local item); Local Get(Isolate* isolate, int index); + + V8_INLINE static PrimitiveArray* Cast(Data* data) { +#ifdef V8_ENABLE_CHECKS + CheckCast(data); +#endif + return reinterpret_cast(data); + } + + private: + static void CheckCast(Data* obj); }; /** @@ -259,6 +270,7 @@ class V8_EXPORT String : public Name { private: friend class internal::ExternalString; friend class v8::String; + friend class internal::StringForwardingTable; friend class internal::ScopedExternalStringLock; }; @@ -575,8 +587,6 @@ class V8_EXPORT Symbol : public Name { /** * Returns the description string of the symbol, or undefined if none. */ - V8_DEPRECATED("Use Symbol::Description(isolate)") - Local Description() const; Local Description(Isolate* isolate) const; /** @@ -777,10 +787,9 @@ String::ExternalStringResource* String::GetExternalStringResource() const { ExternalStringResource* result; if (I::IsExternalTwoByteString(I::GetInstanceType(obj))) { - internal::Isolate* isolate = I::GetIsolateForHeapSandbox(obj); - A value = - I::ReadExternalPointerField(isolate, obj, I::kStringResourceOffset, - internal::kExternalStringResourceTag); + Isolate* isolate = I::GetIsolateForSandbox(obj); + A value = I::ReadExternalPointerField( + isolate, obj, I::kStringResourceOffset); result = reinterpret_cast(value); } else { result = GetExternalStringResourceSlow(); @@ -796,15 +805,14 @@ String::ExternalStringResourceBase* String::GetExternalStringResourceBase( using A = internal::Address; using I = internal::Internals; A obj = *reinterpret_cast(this); - int type = I::GetInstanceType(obj) & I::kFullStringRepresentationMask; + int type = I::GetInstanceType(obj) & I::kStringRepresentationAndEncodingMask; *encoding_out = static_cast(type & I::kStringEncodingMask); ExternalStringResourceBase* resource; if (type == I::kExternalOneByteRepresentationTag || type == I::kExternalTwoByteRepresentationTag) { - internal::Isolate* isolate = I::GetIsolateForHeapSandbox(obj); - A value = - I::ReadExternalPointerField(isolate, obj, I::kStringResourceOffset, - internal::kExternalStringResourceTag); + Isolate* isolate = I::GetIsolateForSandbox(obj); + A value = I::ReadExternalPointerField( + isolate, obj, I::kStringResourceOffset); resource = reinterpret_cast(value); } else { resource = GetExternalStringResourceBaseSlow(encoding_out); diff --git a/deps/include/v8-profiler.h b/deps/include/v8-profiler.h index ccf15bab..6b73fc60 100644 --- a/deps/include/v8-profiler.h +++ b/deps/include/v8-profiler.h @@ -20,12 +20,15 @@ */ namespace v8 { +enum class EmbedderStateTag : uint8_t; class HeapGraphNode; struct HeapStatsUpdate; class Object; +enum StateTag : int; using NativeObject = void*; using SnapshotObjectId = uint32_t; +using ProfilerId = uint32_t; struct CpuProfileDeoptFrame { int script_id; @@ -172,6 +175,32 @@ class V8_EXPORT CpuProfileNode { static const int kNoColumnNumberInfo = Message::kNoColumnInfo; }; +/** + * An interface for exporting data from V8, using "push" model. + */ +class V8_EXPORT OutputStream { + public: + enum WriteResult { kContinue = 0, kAbort = 1 }; + virtual ~OutputStream() = default; + /** Notify about the end of stream. */ + virtual void EndOfStream() = 0; + /** Get preferred output chunk size. Called only once. */ + virtual int GetChunkSize() { return 1024; } + /** + * Writes the next chunk of snapshot data into the stream. Writing + * can be stopped by returning kAbort as function result. EndOfStream + * will not be called in case writing was aborted. + */ + virtual WriteResult WriteAsciiChunk(char* data, int size) = 0; + /** + * Writes the next chunk of heap stats data into the stream. Writing + * can be stopped by returning kAbort as function result. EndOfStream + * will not be called in case writing was aborted. + */ + virtual WriteResult WriteHeapStatsChunk(HeapStatsUpdate* data, int count) { + return kAbort; + } +}; /** * CpuProfile contains a CPU profile in a form of top-down call tree @@ -179,6 +208,9 @@ class V8_EXPORT CpuProfileNode { */ class V8_EXPORT CpuProfile { public: + enum SerializationFormat { + kJSON = 0 // See format description near 'Serialize' method. + }; /** Returns CPU profile title. */ Local GetTitle() const; @@ -210,6 +242,16 @@ class V8_EXPORT CpuProfile { */ int64_t GetStartTime() const; + /** + * Returns state of the vm when sample was captured. + */ + StateTag GetSampleState(int index) const; + + /** + * Returns state of the embedder when sample was captured. + */ + EmbedderStateTag GetSampleEmbedderState(int index) const; + /** * Returns time when the profile recording was stopped (in microseconds) * since some unspecified starting point. @@ -222,6 +264,25 @@ class V8_EXPORT CpuProfile { * All pointers to nodes previously returned become invalid. */ void Delete(); + + /** + * Prepare a serialized representation of the profile. The result + * is written into the stream provided in chunks of specified size. + * + * For the JSON format, heap contents are represented as an object + * with the following structure: + * + * { + * nodes: [nodes array], + * startTime: number, + * endTime: number + * samples: [strings array] + * timeDeltas: [numbers array] + * } + * + */ + void Serialize(OutputStream* stream, + SerializationFormat format = kJSON) const; }; enum CpuProfilingMode { @@ -261,15 +322,33 @@ enum class CpuProfilingStatus { kErrorTooManyProfilers }; +/** + * Result from StartProfiling returning the Profiling Status, and + * id of the started profiler, or 0 if profiler is not started + */ +struct CpuProfilingResult { + const ProfilerId id; + const CpuProfilingStatus status; +}; + /** * Delegate for when max samples reached and samples are discarded. */ class V8_EXPORT DiscardedSamplesDelegate { public: - DiscardedSamplesDelegate() {} + DiscardedSamplesDelegate() = default; virtual ~DiscardedSamplesDelegate() = default; virtual void Notify() = 0; + + ProfilerId GetId() const { return profiler_id_; } + + private: + friend internal::CpuProfile; + + void SetId(ProfilerId id) { profiler_id_ = id; } + + ProfilerId profiler_id_; }; /** @@ -300,6 +379,9 @@ class V8_EXPORT CpuProfilingOptions { unsigned max_samples = kNoSampleLimit, int sampling_interval_us = 0, MaybeLocal filter_context = MaybeLocal()); + CpuProfilingOptions(CpuProfilingOptions&&) = default; + CpuProfilingOptions& operator=(CpuProfilingOptions&&) = default; + CpuProfilingMode mode() const { return mode_; } unsigned max_samples() const { return max_samples_; } int sampling_interval_us() const { return sampling_interval_us_; } @@ -313,7 +395,7 @@ class V8_EXPORT CpuProfilingOptions { CpuProfilingMode mode_; unsigned max_samples_; int sampling_interval_us_; - CopyablePersistentTraits::CopyablePersistent filter_context_; + Global filter_context_; }; /** @@ -359,6 +441,45 @@ class V8_EXPORT CpuProfiler { */ void SetUsePreciseSampling(bool); + /** + * Starts collecting a CPU profile. Several profiles may be collected at once. + * Generates an anonymous profiler, without a String identifier. + */ + CpuProfilingResult Start( + CpuProfilingOptions options, + std::unique_ptr delegate = nullptr); + + /** + * Starts collecting a CPU profile. Title may be an empty string. Several + * profiles may be collected at once. Attempts to start collecting several + * profiles with the same title are silently ignored. + */ + CpuProfilingResult Start( + Local title, CpuProfilingOptions options, + std::unique_ptr delegate = nullptr); + + /** + * Starts profiling with the same semantics as above, except with expanded + * parameters. + * + * |record_samples| parameter controls whether individual samples should + * be recorded in addition to the aggregated tree. + * + * |max_samples| controls the maximum number of samples that should be + * recorded by the profiler. Samples obtained after this limit will be + * discarded. + */ + CpuProfilingResult Start( + Local title, CpuProfilingMode mode, bool record_samples = false, + unsigned max_samples = CpuProfilingOptions::kNoSampleLimit); + + /** + * The same as StartProfiling above, but the CpuProfilingMode defaults to + * kLeafNodeLineNumbers mode, which was the previous default behavior of the + * profiler. + */ + CpuProfilingResult Start(Local title, bool record_samples = false); + /** * Starts collecting a CPU profile. Title may be an empty string. Several * profiles may be collected at once. Attempts to start collecting several @@ -382,6 +503,7 @@ class V8_EXPORT CpuProfiler { CpuProfilingStatus StartProfiling( Local title, CpuProfilingMode mode, bool record_samples = false, unsigned max_samples = CpuProfilingOptions::kNoSampleLimit); + /** * The same as StartProfiling above, but the CpuProfilingMode defaults to * kLeafNodeLineNumbers mode, which was the previous default behavior of the @@ -390,6 +512,11 @@ class V8_EXPORT CpuProfiler { CpuProfilingStatus StartProfiling(Local title, bool record_samples = false); + /** + * Stops collecting CPU profile with a given id and returns it. + */ + CpuProfile* Stop(ProfilerId id); + /** * Stops collecting CPU profile with a given title and returns it. * If the title given is empty, finishes the last profile started. @@ -466,7 +593,9 @@ class V8_EXPORT HeapGraphNode { kConsString = 10, // Concatenated string. A pair of pointers to strings. kSlicedString = 11, // Sliced string. A fragment of another string. kSymbol = 12, // A Symbol (ES6). - kBigInt = 13 // BigInt. + kBigInt = 13, // BigInt. + kObjectShape = 14, // Internal data used for tracking the shapes (or + // "hidden classes") of JS objects. }; /** Returns node type (see HeapGraphNode::Type). */ @@ -495,37 +624,6 @@ class V8_EXPORT HeapGraphNode { const HeapGraphEdge* GetChild(int index) const; }; - -/** - * An interface for exporting data from V8, using "push" model. - */ -class V8_EXPORT OutputStream { - public: - enum WriteResult { - kContinue = 0, - kAbort = 1 - }; - virtual ~OutputStream() = default; - /** Notify about the end of stream. */ - virtual void EndOfStream() = 0; - /** Get preferred output chunk size. Called only once. */ - virtual int GetChunkSize() { return 1024; } - /** - * Writes the next chunk of snapshot data into the stream. Writing - * can be stopped by returning kAbort as function result. EndOfStream - * will not be called in case writing was aborted. - */ - virtual WriteResult WriteAsciiChunk(char* data, int size) = 0; - /** - * Writes the next chunk of heap stats data into the stream. Writing - * can be stopped by returning kAbort as function result. EndOfStream - * will not be called in case writing was aborted. - */ - virtual WriteResult WriteHeapStatsChunk(HeapStatsUpdate* data, int count) { - return kAbort; - } -}; - /** * HeapSnapshots record the state of the JS heap at some moment. */ @@ -822,6 +920,8 @@ class V8_EXPORT HeapProfiler { enum SamplingFlags { kSamplingNoFlags = 0, kSamplingForceGC = 1 << 0, + kSamplingIncludeObjectsCollectedByMajorGC = 1 << 1, + kSamplingIncludeObjectsCollectedByMinorGC = 1 << 2, }; /** @@ -899,14 +999,71 @@ class V8_EXPORT HeapProfiler { virtual ~ObjectNameResolver() = default; }; + enum class HeapSnapshotMode { + /** + * Heap snapshot for regular developers. + */ + kRegular, + /** + * Heap snapshot is exposing internals that may be useful for experts. + */ + kExposeInternals, + }; + + enum class NumericsMode { + /** + * Numeric values are hidden as they are values of the corresponding + * objects. + */ + kHideNumericValues, + /** + * Numeric values are exposed in artificial fields. + */ + kExposeNumericValues + }; + + struct HeapSnapshotOptions final { + // Manually define default constructor here to be able to use it in + // `TakeSnapshot()` below. + // NOLINTNEXTLINE + HeapSnapshotOptions() {} + + /** + * The control used to report intermediate progress to. + */ + ActivityControl* control = nullptr; + /** + * The resolver used by the snapshot generator to get names for V8 objects. + */ + ObjectNameResolver* global_object_name_resolver = nullptr; + /** + * Mode for taking the snapshot, see `HeapSnapshotMode`. + */ + HeapSnapshotMode snapshot_mode = HeapSnapshotMode::kRegular; + /** + * Mode for dealing with numeric values, see `NumericsMode`. + */ + NumericsMode numerics_mode = NumericsMode::kHideNumericValues; + }; + /** - * Takes a heap snapshot and returns it. + * Takes a heap snapshot. + * + * \returns the snapshot. + */ + const HeapSnapshot* TakeHeapSnapshot( + const HeapSnapshotOptions& options = HeapSnapshotOptions()); + + /** + * Takes a heap snapshot. See `HeapSnapshotOptions` for details on the + * parameters. + * + * \returns the snapshot. */ const HeapSnapshot* TakeHeapSnapshot( - ActivityControl* control = nullptr, + ActivityControl* control, ObjectNameResolver* global_object_name_resolver = nullptr, - bool treat_global_objects_as_roots = true, - bool capture_numeric_value = false); + bool hide_internals = true, bool capture_numeric_value = false); /** * Starts tracking of heap objects population statistics. After calling @@ -959,10 +1116,8 @@ class V8_EXPORT HeapProfiler { * |stack_depth| parameter controls the maximum number of stack frames to be * captured on each allocation. * - * NOTE: This is a proof-of-concept at this point. Right now we only sample - * newspace allocations. Support for paged space allocation (e.g. pre-tenured - * objects, large objects, code objects, etc.) and native allocations - * doesn't exist yet, but is anticipated in the future. + * NOTE: Support for native allocations doesn't exist yet, but is anticipated + * in the future. * * Objects allocated before the sampling is started will not be included in * the profile. @@ -1025,18 +1180,18 @@ struct HeapStatsUpdate { uint32_t size; // New value of size field for the interval with this index. }; -#define CODE_EVENTS_LIST(V) \ - V(Builtin) \ - V(Callback) \ - V(Eval) \ - V(Function) \ - V(InterpretedFunction) \ - V(Handler) \ - V(BytecodeHandler) \ - V(LazyCompile) \ - V(RegExp) \ - V(Script) \ - V(Stub) \ +#define CODE_EVENTS_LIST(V) \ + V(Builtin) \ + V(Callback) \ + V(Eval) \ + V(Function) \ + V(InterpretedFunction) \ + V(Handler) \ + V(BytecodeHandler) \ + V(LazyCompile) /* Unused, use kFunction instead */ \ + V(RegExp) \ + V(Script) \ + V(Stub) \ V(Relocation) /** diff --git a/deps/include/v8-regexp.h b/deps/include/v8-regexp.h index 3791bc03..135977bf 100644 --- a/deps/include/v8-regexp.h +++ b/deps/include/v8-regexp.h @@ -37,9 +37,10 @@ class V8_EXPORT RegExp : public Object { kDotAll = 1 << 5, kLinear = 1 << 6, kHasIndices = 1 << 7, + kUnicodeSets = 1 << 8, }; - static constexpr int kFlagCount = 8; + static constexpr int kFlagCount = 9; /** * Creates a regular expression from the given pattern string and diff --git a/deps/include/v8-script.h b/deps/include/v8-script.h index bc68dd9a..e2ba8452 100644 --- a/deps/include/v8-script.h +++ b/deps/include/v8-script.h @@ -20,6 +20,7 @@ namespace v8 { class Function; +class Message; class Object; class PrimitiveArray; class Script; @@ -47,7 +48,7 @@ class V8_EXPORT ScriptOrModule { * The options that were passed by the embedder as HostDefinedOptions to * the ScriptOrigin. */ - Local GetHostDefinedOptions(); + Local HostDefinedOptions(); }; /** @@ -76,7 +77,13 @@ class V8_EXPORT UnboundScript { * Returns zero based line number of the code_pos location in the script. * -1 will be returned if no information available. */ - int GetLineNumber(int code_pos); + int GetLineNumber(int code_pos = 0); + + /** + * Returns zero based column number of the code_pos location in the script. + * -1 will be returned if no information available. + */ + int GetColumnNumber(int code_pos = 0); static const int kNoScriptId = 0; }; @@ -85,7 +92,15 @@ class V8_EXPORT UnboundScript { * A compiled JavaScript module, not yet tied to a Context. */ class V8_EXPORT UnboundModuleScript : public Data { - // Only used as a container for code caching. + public: + /** + * Data read from magic sourceURL comments. + */ + Local GetSourceURL(); + /** + * Data read from magic sourceMappingURL comments. + */ + Local GetSourceMappingURL(); }; /** @@ -170,29 +185,6 @@ class V8_EXPORT Module : public Data { */ Local GetException() const; - /** - * Returns the number of modules requested by this module. - */ - V8_DEPRECATED("Use Module::GetModuleRequests() and FixedArray::Length().") - int GetModuleRequestsLength() const; - - /** - * Returns the ith module specifier in this module. - * i must be < GetModuleRequestsLength() and >= 0. - */ - V8_DEPRECATED( - "Use Module::GetModuleRequests() and ModuleRequest::GetSpecifier().") - Local GetModuleRequest(int i) const; - - /** - * Returns the source location (line number and column number) of the ith - * module specifier's first occurrence in this module. - */ - V8_DEPRECATED( - "Use Module::GetModuleRequests(), ModuleRequest::GetSourceOffset(), and " - "Module::SourceOffsetToLocation().") - Location GetModuleRequestLocation(int i) const; - /** * Returns the ModuleRequests for this module. */ @@ -209,9 +201,6 @@ class V8_EXPORT Module : public Data { */ int GetIdentityHash() const; - using ResolveCallback V8_DEPRECATED("Use ResolveModuleCallback") = - MaybeLocal (*)(Local context, Local specifier, - Local referrer); using ResolveModuleCallback = MaybeLocal (*)( Local context, Local specifier, Local import_assertions, Local referrer); @@ -223,11 +212,6 @@ class V8_EXPORT Module : public Data { * instantiation. (In the case where the callback throws an exception, that * exception is propagated.) */ - V8_DEPRECATED( - "Use the version of InstantiateModule that takes a ResolveModuleCallback " - "parameter") - V8_WARN_UNUSED_RESULT Maybe InstantiateModule(Local context, - ResolveCallback callback); V8_WARN_UNUSED_RESULT Maybe InstantiateModule( Local context, ResolveModuleCallback callback); @@ -315,6 +299,16 @@ class V8_EXPORT Module : public Data { V8_WARN_UNUSED_RESULT Maybe SetSyntheticModuleExport( Isolate* isolate, Local export_name, Local export_value); + /** + * Search the modules requested directly or indirectly by the module for + * any top-level await that has not yet resolved. If there is any, the + * returned vector contains a tuple of the unresolved module and a message + * with the pending top-level await. + * An embedder may call this before exiting to improve error messages. + */ + std::vector, Local>> + GetStalledTopLevelAwaitMessage(Isolate* isolate); + V8_INLINE static Module* Cast(Data* data); private: @@ -340,6 +334,8 @@ class V8_EXPORT Script { * UnboundScript::BindToCurrentContext()). */ V8_WARN_UNUSED_RESULT MaybeLocal Run(Local context); + V8_WARN_UNUSED_RESULT MaybeLocal Run(Local context, + Local host_defined_options); /** * Returns the corresponding context-unbound script. @@ -403,6 +399,7 @@ class V8_EXPORT ScriptCompiler { class Source { public: // Source takes ownership of both CachedData and CodeCacheConsumeTask. + // The caller *must* ensure that the cached data is from a trusted source. V8_INLINE Source(Local source_string, const ScriptOrigin& origin, CachedData* cached_data = nullptr, ConsumeCodeCacheTask* consume_cache_task = nullptr); @@ -430,7 +427,7 @@ class V8_EXPORT ScriptCompiler { int resource_column_offset; ScriptOriginOptions resource_options; Local source_map_url; - Local host_defined_options; + Local host_defined_options; // Cached data from previous compilation (if a kConsume*Cache flag is // set), or hold newly generated cache data (kProduce*Cache flags) are @@ -469,18 +466,6 @@ class V8_EXPORT ScriptCompiler { * V8 has parsed the data it received so far. */ virtual size_t GetMoreData(const uint8_t** src) = 0; - - /** - * [DEPRECATED]: No longer used, will be removed soon. - */ - V8_DEPRECATED("Not used") - virtual bool SetBookmark() { return false; } - - /** - * [DEPRECATED]: No longer used, will be removed soon. - */ - V8_DEPRECATED("Not used") - virtual void ResetToBookmark() {} }; /** @@ -527,7 +512,7 @@ class V8_EXPORT ScriptCompiler { /** * A task which the embedder must run on a background thread to * consume a V8 code cache. Returned by - * ScriptCompiler::StarConsumingCodeCache. + * ScriptCompiler::StartConsumingCodeCache. */ class V8_EXPORT ConsumeCodeCacheTask final { public: @@ -535,6 +520,36 @@ class V8_EXPORT ScriptCompiler { void Run(); + /** + * Provides the source text string and origin information to the consumption + * task. May be called before, during, or after Run(). This step checks + * whether the script matches an existing script in the Isolate's + * compilation cache. To check whether such a script was found, call + * ShouldMergeWithExistingScript. + * + * The Isolate provided must be the same one used during + * StartConsumingCodeCache and must be currently entered on the thread that + * calls this function. The source text and origin provided in this step + * must precisely match those used later in the ScriptCompiler::Source that + * will contain this ConsumeCodeCacheTask. + */ + void SourceTextAvailable(Isolate* isolate, Local source_text, + const ScriptOrigin& origin); + + /** + * Returns whether the embedder should call MergeWithExistingScript. This + * function may be called from any thread, any number of times, but its + * return value is only meaningful after SourceTextAvailable has completed. + */ + bool ShouldMergeWithExistingScript() const; + + /** + * Merges newly deserialized data into an existing script which was found + * during SourceTextAvailable. May be called only after Run() has completed. + * Can execute on any thread, like Run(). + */ + void MergeWithExistingScript(); + private: friend class ScriptCompiler; @@ -619,7 +634,8 @@ class V8_EXPORT ScriptCompiler { */ static ScriptStreamingTask* StartStreaming( Isolate* isolate, StreamedSource* source, - ScriptType type = ScriptType::kClassic); + ScriptType type = ScriptType::kClassic, + CompileOptions options = kNoCompileOptions); static ConsumeCodeCacheTask* StartConsumingCodeCache( Isolate* isolate, std::unique_ptr source); @@ -688,7 +704,7 @@ class V8_EXPORT ScriptCompiler { * It is possible to specify multiple context extensions (obj in the above * example). */ - V8_DEPRECATE_SOON("Use CompileFunction") + V8_DEPRECATED("Use CompileFunction") static V8_WARN_UNUSED_RESULT MaybeLocal CompileFunctionInContext( Local context, Source* source, size_t arguments_count, Local arguments[], size_t context_extension_count, @@ -696,6 +712,7 @@ class V8_EXPORT ScriptCompiler { CompileOptions options = kNoCompileOptions, NoCacheReason no_cache_reason = kNoCacheNoReason, Local* script_or_module_out = nullptr); + static V8_WARN_UNUSED_RESULT MaybeLocal CompileFunction( Local context, Source* source, size_t arguments_count = 0, Local arguments[] = nullptr, size_t context_extension_count = 0, @@ -748,7 +765,7 @@ ScriptCompiler::Source::Source(Local string, const ScriptOrigin& origin, resource_column_offset(origin.ColumnOffset()), resource_options(origin.Options()), source_map_url(origin.SourceMapUrl()), - host_defined_options(origin.HostDefinedOptions()), + host_defined_options(origin.GetHostDefinedOptions()), cached_data(data), consume_cache_task(consume_cache_task) {} diff --git a/deps/include/v8-snapshot.h b/deps/include/v8-snapshot.h index ed02598c..2400357c 100644 --- a/deps/include/v8-snapshot.h +++ b/deps/include/v8-snapshot.h @@ -5,8 +5,6 @@ #ifndef INCLUDE_V8_SNAPSHOT_H_ #define INCLUDE_V8_SNAPSHOT_H_ -#include - #include "v8-internal.h" // NOLINT(build/include_directory) #include "v8-local-handle.h" // NOLINT(build/include_directory) #include "v8config.h" // NOLINT(build/include_directory) diff --git a/deps/include/v8-statistics.h b/deps/include/v8-statistics.h index 7f69e5d6..ca20bc9f 100644 --- a/deps/include/v8-statistics.h +++ b/deps/include/v8-statistics.h @@ -201,11 +201,13 @@ class V8_EXPORT HeapCodeStatistics { size_t code_and_metadata_size() { return code_and_metadata_size_; } size_t bytecode_and_metadata_size() { return bytecode_and_metadata_size_; } size_t external_script_source_size() { return external_script_source_size_; } + size_t cpu_profiler_metadata_size() { return cpu_profiler_metadata_size_; } private: size_t code_and_metadata_size_; size_t bytecode_and_metadata_size_; size_t external_script_source_size_; + size_t cpu_profiler_metadata_size_; friend class Isolate; }; diff --git a/deps/include/v8-template.h b/deps/include/v8-template.h index 96fcab60..669012a9 100644 --- a/deps/include/v8-template.h +++ b/deps/include/v8-template.h @@ -14,7 +14,6 @@ namespace v8 { -class AccessorSignature; class CFunction; class FunctionTemplate; class ObjectTemplate; @@ -83,17 +82,11 @@ class V8_EXPORT Template : public Data { * cross-context access. * \param attribute The attributes of the property for which an accessor * is added. - * \param signature The signature describes valid receivers for the accessor - * and is used to perform implicit instance checks against them. If the - * receiver is incompatible (i.e. is not an instance of the constructor as - * defined by FunctionTemplate::HasInstance()), an implicit TypeError is - * thrown and no callback is invoked. */ void SetNativeDataProperty( Local name, AccessorGetterCallback getter, AccessorSetterCallback setter = nullptr, Local data = Local(), PropertyAttribute attribute = None, - Local signature = Local(), AccessControl settings = DEFAULT, SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect, SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect); @@ -101,7 +94,6 @@ class V8_EXPORT Template : public Data { Local name, AccessorNameGetterCallback getter, AccessorNameSetterCallback setter = nullptr, Local data = Local(), PropertyAttribute attribute = None, - Local signature = Local(), AccessControl settings = DEFAULT, SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect, SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect); @@ -137,7 +129,8 @@ class V8_EXPORT Template : public Data { * Interceptor for get requests on an object. * * Use `info.GetReturnValue().Set()` to set the return value of the - * intercepted get request. + * intercepted get request. If the property does not exist the callback should + * not set the result and must not produce side effects. * * \param property The name of the property for which the request was * intercepted. @@ -178,9 +171,9 @@ using GenericNamedPropertyGetterCallback = * Use `info.GetReturnValue()` to indicate whether the request was intercepted * or not. If the setter successfully intercepts the request, i.e., if the * request should not be further executed, call - * `info.GetReturnValue().Set(value)`. If the setter - * did not intercept the request, i.e., if the request should be handled as - * if no interceptor is present, do not not call `Set()`. + * `info.GetReturnValue().Set(value)`. If the setter did not intercept the + * request, i.e., if the request should be handled as if no interceptor is + * present, do not not call `Set()` and do not produce side effects. * * \param property The name of the property for which the request was * intercepted. @@ -203,7 +196,9 @@ using GenericNamedPropertySetterCallback = * defineProperty(). * * Use `info.GetReturnValue().Set(value)` to set the property attributes. The - * value is an integer encoding a `v8::PropertyAttribute`. + * value is an integer encoding a `v8::PropertyAttribute`. If the property does + * not exist the callback should not set the result and must not produce side + * effects. * * \param property The name of the property for which the request was * intercepted. @@ -228,7 +223,8 @@ using GenericNamedPropertyQueryCallback = * or not. If the deleter successfully intercepts the request, i.e., if the * request should not be further executed, call * `info.GetReturnValue().Set(value)` with a boolean `value`. The `value` is - * used as the return value of `delete`. + * used as the return value of `delete`. If the deleter does not intercept the + * request then it should not set the result and must not produce side effects. * * \param property The name of the property for which the request was * intercepted. @@ -260,9 +256,9 @@ using GenericNamedPropertyEnumeratorCallback = * Use `info.GetReturnValue()` to indicate whether the request was intercepted * or not. If the definer successfully intercepts the request, i.e., if the * request should not be further executed, call - * `info.GetReturnValue().Set(value)`. If the definer - * did not intercept the request, i.e., if the request should be handled as - * if no interceptor is present, do not not call `Set()`. + * `info.GetReturnValue().Set(value)`. If the definer did not intercept the + * request, i.e., if the request should be handled as if no interceptor is + * present, do not not call `Set()` and do not produce side effects. * * \param property The name of the property for which the request was * intercepted. @@ -807,18 +803,12 @@ class V8_EXPORT ObjectTemplate : public Template { * cross-context access. * \param attribute The attributes of the property for which an accessor * is added. - * \param signature The signature describes valid receivers for the accessor - * and is used to perform implicit instance checks against them. If the - * receiver is incompatible (i.e. is not an instance of the constructor as - * defined by FunctionTemplate::HasInstance()), an implicit TypeError is - * thrown and no callback is invoked. */ void SetAccessor( Local name, AccessorGetterCallback getter, AccessorSetterCallback setter = nullptr, Local data = Local(), AccessControl settings = DEFAULT, PropertyAttribute attribute = None, - Local signature = Local(), SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect, SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect); void SetAccessor( @@ -826,7 +816,6 @@ class V8_EXPORT ObjectTemplate : public Template { AccessorNameSetterCallback setter = nullptr, Local data = Local(), AccessControl settings = DEFAULT, PropertyAttribute attribute = None, - Local signature = Local(), SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect, SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect); @@ -992,24 +981,6 @@ class V8_EXPORT Signature : public Data { static void CheckCast(Data* that); }; -/** - * An AccessorSignature specifies which receivers are valid parameters - * to an accessor callback. - */ -class V8_EXPORT AccessorSignature : public Data { - public: - static Local New( - Isolate* isolate, - Local receiver = Local()); - - V8_INLINE static AccessorSignature* Cast(Data* data); - - private: - AccessorSignature(); - - static void CheckCast(Data* that); -}; - // --- Implementation --- void Template::Set(Isolate* isolate, const char* name, Local value, @@ -1040,13 +1011,6 @@ Signature* Signature::Cast(Data* data) { return reinterpret_cast(data); } -AccessorSignature* AccessorSignature::Cast(Data* data) { -#ifdef V8_ENABLE_CHECKS - CheckCast(data); -#endif - return reinterpret_cast(data); -} - } // namespace v8 #endif // INCLUDE_V8_TEMPLATE_H_ diff --git a/deps/include/v8-traced-handle.h b/deps/include/v8-traced-handle.h index 7db34a97..e0fd57c4 100644 --- a/deps/include/v8-traced-handle.h +++ b/deps/include/v8-traced-handle.h @@ -11,10 +11,8 @@ #include #include -#include #include #include -#include #include "v8-internal.h" // NOLINT(build/include_directory) #include "v8-local-handle.h" // NOLINT(build/include_directory) @@ -29,8 +27,6 @@ namespace internal { class BasicTracedReferenceExtractor; -enum class GlobalHandleDestructionMode { kWithDestructor, kWithoutDestructor }; - enum class GlobalHandleStoreMode { kInitializingStore, kAssigningStore, @@ -38,25 +34,15 @@ enum class GlobalHandleStoreMode { V8_EXPORT internal::Address* GlobalizeTracedReference( internal::Isolate* isolate, internal::Address* handle, - internal::Address* slot, GlobalHandleDestructionMode destruction_mode, - GlobalHandleStoreMode store_mode); -V8_EXPORT void MoveTracedGlobalReference(internal::Address** from, - internal::Address** to); -V8_EXPORT void CopyTracedGlobalReference(const internal::Address* const* from, - internal::Address** to); -V8_EXPORT void DisposeTracedGlobal(internal::Address* global_handle); -V8_EXPORT void SetFinalizationCallbackTraced( - internal::Address* location, void* parameter, - WeakCallbackInfo::Callback callback); + internal::Address* slot, GlobalHandleStoreMode store_mode); +V8_EXPORT void MoveTracedReference(internal::Address** from, + internal::Address** to); +V8_EXPORT void CopyTracedReference(const internal::Address* const* from, + internal::Address** to); +V8_EXPORT void DisposeTracedReference(internal::Address* global_handle); } // namespace internal -/** - * Deprecated. Use |TracedReference| instead. - */ -template -struct TracedGlobalTrait {}; - class TracedReferenceBase { public: /** @@ -140,9 +126,8 @@ class TracedReferenceBase { * |v8::EmbedderRootsHandler::IsRoot()| whether the handle should * be treated as root or not. * - * Note that the base class cannot be instantiated itself. Choose from - * - TracedGlobal - * - TracedReference + * Note that the base class cannot be instantiated itself, use |TracedReference| + * instead. */ template class BasicTracedReference : public TracedReferenceBase { @@ -179,7 +164,6 @@ class BasicTracedReference : public TracedReferenceBase { V8_INLINE static internal::Address* New( Isolate* isolate, T* that, void* slot, - internal::GlobalHandleDestructionMode destruction_mode, internal::GlobalHandleStoreMode store_mode); friend class EmbedderHeapTracer; @@ -187,8 +171,6 @@ class BasicTracedReference : public TracedReferenceBase { friend class Local; friend class Object; template - friend class TracedGlobal; - template friend class TracedReference; template friend class BasicTracedReference; @@ -196,129 +178,6 @@ class BasicTracedReference : public TracedReferenceBase { friend class ReturnValue; }; -/** - * A traced handle with destructor that clears the handle. For more details see - * BasicTracedReference. - */ -template -class TracedGlobal : public BasicTracedReference { - public: - using BasicTracedReference::Reset; - - /** - * Destructor resetting the handle.Is - */ - ~TracedGlobal() { this->Reset(); } - - /** - * An empty TracedGlobal without storage cell. - */ - TracedGlobal() : BasicTracedReference() {} - - /** - * Construct a TracedGlobal from a Local. - * - * When the Local is non-empty, a new storage cell is created - * pointing to the same object. - */ - template - TracedGlobal(Isolate* isolate, Local that) : BasicTracedReference() { - this->val_ = - this->New(isolate, that.val_, &this->val_, - internal::GlobalHandleDestructionMode::kWithDestructor, - internal::GlobalHandleStoreMode::kInitializingStore); - static_assert(std::is_base_of::value, "type check"); - } - - /** - * Move constructor initializing TracedGlobal from an existing one. - */ - V8_INLINE TracedGlobal(TracedGlobal&& other) noexcept { - // Forward to operator=. - *this = std::move(other); - } - - /** - * Move constructor initializing TracedGlobal from an existing one. - */ - template - V8_INLINE TracedGlobal(TracedGlobal&& other) noexcept { - // Forward to operator=. - *this = std::move(other); - } - - /** - * Copy constructor initializing TracedGlobal from an existing one. - */ - V8_INLINE TracedGlobal(const TracedGlobal& other) { - // Forward to operator=; - *this = other; - } - - /** - * Copy constructor initializing TracedGlobal from an existing one. - */ - template - V8_INLINE TracedGlobal(const TracedGlobal& other) { - // Forward to operator=; - *this = other; - } - - /** - * Move assignment operator initializing TracedGlobal from an existing one. - */ - V8_INLINE TracedGlobal& operator=(TracedGlobal&& rhs) noexcept; - - /** - * Move assignment operator initializing TracedGlobal from an existing one. - */ - template - V8_INLINE TracedGlobal& operator=(TracedGlobal&& rhs) noexcept; - - /** - * Copy assignment operator initializing TracedGlobal from an existing one. - * - * Note: Prohibited when |other| has a finalization callback set through - * |SetFinalizationCallback|. - */ - V8_INLINE TracedGlobal& operator=(const TracedGlobal& rhs); - - /** - * Copy assignment operator initializing TracedGlobal from an existing one. - * - * Note: Prohibited when |other| has a finalization callback set through - * |SetFinalizationCallback|. - */ - template - V8_INLINE TracedGlobal& operator=(const TracedGlobal& rhs); - - /** - * If non-empty, destroy the underlying storage cell and create a new one with - * the contents of other if other is non empty - */ - template - V8_INLINE void Reset(Isolate* isolate, const Local& other); - - template - V8_INLINE TracedGlobal& As() const { - return reinterpret_cast&>( - const_cast&>(*this)); - } - - /** - * Adds a finalization callback to the handle. The type of this callback is - * similar to WeakCallbackType::kInternalFields, i.e., it will pass the - * parameter and the first two internal fields of the object. - * - * The callback is then supposed to reset the handle in the callback. No - * further V8 API may be called in this callback. In case additional work - * involving V8 needs to be done, a second callback can be scheduled using - * WeakCallbackInfo::SetSecondPassCallback. - */ - V8_INLINE void SetFinalizationCallback( - void* parameter, WeakCallbackInfo::Callback callback); -}; - /** * A traced handle without destructor that clears the handle. The embedder needs * to ensure that the handle is not accessed once the V8 object has been @@ -348,10 +207,8 @@ class TracedReference : public BasicTracedReference { */ template TracedReference(Isolate* isolate, Local that) : BasicTracedReference() { - this->val_ = - this->New(isolate, that.val_, &this->val_, - internal::GlobalHandleDestructionMode::kWithoutDestructor, - internal::GlobalHandleStoreMode::kInitializingStore); + this->val_ = this->New(isolate, that.val_, &this->val_, + internal::GlobalHandleStoreMode::kInitializingStore); static_assert(std::is_base_of::value, "type check"); } @@ -394,23 +251,23 @@ class TracedReference : public BasicTracedReference { } /** - * Move assignment operator initializing TracedGlobal from an existing one. + * Move assignment operator initializing TracedReference from an existing one. */ V8_INLINE TracedReference& operator=(TracedReference&& rhs) noexcept; /** - * Move assignment operator initializing TracedGlobal from an existing one. + * Move assignment operator initializing TracedReference from an existing one. */ template V8_INLINE TracedReference& operator=(TracedReference&& rhs) noexcept; /** - * Copy assignment operator initializing TracedGlobal from an existing one. + * Copy assignment operator initializing TracedReference from an existing one. */ V8_INLINE TracedReference& operator=(const TracedReference& rhs); /** - * Copy assignment operator initializing TracedGlobal from an existing one. + * Copy assignment operator initializing TracedReference from an existing one. */ template V8_INLINE TracedReference& operator=(const TracedReference& rhs); @@ -433,18 +290,17 @@ class TracedReference : public BasicTracedReference { template internal::Address* BasicTracedReference::New( Isolate* isolate, T* that, void* slot, - internal::GlobalHandleDestructionMode destruction_mode, internal::GlobalHandleStoreMode store_mode) { if (that == nullptr) return nullptr; internal::Address* p = reinterpret_cast(that); return internal::GlobalizeTracedReference( reinterpret_cast(isolate), p, - reinterpret_cast(slot), destruction_mode, store_mode); + reinterpret_cast(slot), store_mode); } void TracedReferenceBase::Reset() { if (IsEmpty()) return; - internal::DisposeTracedGlobal(reinterpret_cast(val_)); + internal::DisposeTracedReference(reinterpret_cast(val_)); SetSlotThreadSafe(nullptr); } @@ -490,56 +346,6 @@ V8_INLINE bool operator!=(const v8::Local& lhs, return !(rhs == lhs); } -template -template -void TracedGlobal::Reset(Isolate* isolate, const Local& other) { - static_assert(std::is_base_of::value, "type check"); - Reset(); - if (other.IsEmpty()) return; - this->val_ = this->New(isolate, other.val_, &this->val_, - internal::GlobalHandleDestructionMode::kWithDestructor, - internal::GlobalHandleStoreMode::kAssigningStore); -} - -template -template -TracedGlobal& TracedGlobal::operator=(TracedGlobal&& rhs) noexcept { - static_assert(std::is_base_of::value, "type check"); - *this = std::move(rhs.template As()); - return *this; -} - -template -template -TracedGlobal& TracedGlobal::operator=(const TracedGlobal& rhs) { - static_assert(std::is_base_of::value, "type check"); - *this = rhs.template As(); - return *this; -} - -template -TracedGlobal& TracedGlobal::operator=(TracedGlobal&& rhs) noexcept { - if (this != &rhs) { - internal::MoveTracedGlobalReference( - reinterpret_cast(&rhs.val_), - reinterpret_cast(&this->val_)); - } - return *this; -} - -template -TracedGlobal& TracedGlobal::operator=(const TracedGlobal& rhs) { - if (this != &rhs) { - this->Reset(); - if (rhs.val_ != nullptr) { - internal::CopyTracedGlobalReference( - reinterpret_cast(&rhs.val_), - reinterpret_cast(&this->val_)); - } - } - return *this; -} - template template void TracedReference::Reset(Isolate* isolate, const Local& other) { @@ -548,7 +354,6 @@ void TracedReference::Reset(Isolate* isolate, const Local& other) { if (other.IsEmpty()) return; this->SetSlotThreadSafe( this->New(isolate, other.val_, &this->val_, - internal::GlobalHandleDestructionMode::kWithoutDestructor, internal::GlobalHandleStoreMode::kAssigningStore)); } @@ -574,7 +379,7 @@ template TracedReference& TracedReference::operator=( TracedReference&& rhs) noexcept { if (this != &rhs) { - internal::MoveTracedGlobalReference( + internal::MoveTracedReference( reinterpret_cast(&rhs.val_), reinterpret_cast(&this->val_)); } @@ -586,7 +391,7 @@ TracedReference& TracedReference::operator=(const TracedReference& rhs) { if (this != &rhs) { this->Reset(); if (rhs.val_ != nullptr) { - internal::CopyTracedGlobalReference( + internal::CopyTracedReference( reinterpret_cast(&rhs.val_), reinterpret_cast(&this->val_)); } @@ -598,7 +403,7 @@ void TracedReferenceBase::SetWrapperClassId(uint16_t class_id) { using I = internal::Internals; if (IsEmpty()) return; internal::Address* obj = reinterpret_cast(val_); - uint8_t* addr = reinterpret_cast(obj) + I::kNodeClassIdOffset; + uint8_t* addr = reinterpret_cast(obj) + I::kTracedNodeClassIdOffset; *reinterpret_cast(addr) = class_id; } @@ -606,17 +411,10 @@ uint16_t TracedReferenceBase::WrapperClassId() const { using I = internal::Internals; if (IsEmpty()) return 0; internal::Address* obj = reinterpret_cast(val_); - uint8_t* addr = reinterpret_cast(obj) + I::kNodeClassIdOffset; + uint8_t* addr = reinterpret_cast(obj) + I::kTracedNodeClassIdOffset; return *reinterpret_cast(addr); } -template -void TracedGlobal::SetFinalizationCallback( - void* parameter, typename WeakCallbackInfo::Callback callback) { - internal::SetFinalizationCallbackTraced( - reinterpret_cast(this->val_), parameter, callback); -} - } // namespace v8 #endif // INCLUDE_V8_TRACED_HANDLE_H_ diff --git a/deps/include/v8-unwinder-state.h b/deps/include/v8-unwinder-state.h index a30f7325..18bb410d 100644 --- a/deps/include/v8-unwinder-state.h +++ b/deps/include/v8-unwinder-state.h @@ -17,10 +17,10 @@ struct CalleeSavedRegisters { void* arm_r9; void* arm_r10; }; -#elif V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64 || \ - V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC || \ - V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_S390 || \ - V8_TARGET_ARCH_LOONG64 +#elif V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64 || \ + V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || \ + V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_LOONG64 || \ + V8_TARGET_ARCH_RISCV32 struct CalleeSavedRegisters {}; #else #error Target architecture was not detected as supported by v8 diff --git a/deps/include/v8-unwinder.h b/deps/include/v8-unwinder.h index 22a5cd71..8dca52f4 100644 --- a/deps/include/v8-unwinder.h +++ b/deps/include/v8-unwinder.h @@ -7,7 +7,8 @@ #include -#include "v8config.h" // NOLINT(build/include_directory) +#include "v8-embedder-state-scope.h" // NOLINT(build/include_directory) +#include "v8config.h" // NOLINT(build/include_directory) namespace v8 { // Holds the callee saved registers needed for the stack unwinder. It is the @@ -32,7 +33,7 @@ struct V8_EXPORT RegisterState { }; // A StateTag represents a possible state of the VM. -enum StateTag { +enum StateTag : int { JS, GC, PARSER, @@ -46,11 +47,13 @@ enum StateTag { // The output structure filled up by GetStackSample API function. struct SampleInfo { - size_t frames_count; // Number of frames collected. - StateTag vm_state; // Current VM state. - void* external_callback_entry; // External callback address if VM is - // executing an external callback. - void* context; // Incumbent native context address. + size_t frames_count; // Number of frames collected. + void* external_callback_entry; // External callback address if VM is + // executing an external callback. + void* context; // Incumbent native context address. + void* embedder_context; // Native context address for embedder state + StateTag vm_state; // Current VM state. + EmbedderStateTag embedder_state; // Current Embedder state }; struct MemoryRange { diff --git a/deps/include/v8-util.h b/deps/include/v8-util.h index c54418aa..159027d3 100644 --- a/deps/include/v8-util.h +++ b/deps/include/v8-util.h @@ -537,7 +537,6 @@ class StdGlobalValueMap : public GlobalValueMap { : GlobalValueMap(isolate) {} }; - class DefaultPersistentValueVectorTraits { public: typedef std::vector Impl; @@ -562,7 +561,6 @@ class DefaultPersistentValueVectorTraits { } }; - /** * A vector wrapper that safely stores Global values. * C++11 embedders don't need this class, as they can use Global @@ -573,8 +571,8 @@ class DefaultPersistentValueVectorTraits { * PersistentContainerValue, with all conversion into and out of V8 * handles being transparently handled by this class. */ -template -class PersistentValueVector { +template +class V8_DEPRECATE_SOON("Use std::vector>.") PersistentValueVector { public: explicit PersistentValueVector(Isolate* isolate) : isolate_(isolate) { } diff --git a/deps/include/v8-value-serializer-version.h b/deps/include/v8-value-serializer-version.h index c72911c6..25eb19ca 100644 --- a/deps/include/v8-value-serializer-version.h +++ b/deps/include/v8-value-serializer-version.h @@ -17,7 +17,7 @@ namespace v8 { -constexpr uint32_t CurrentValueSerializerFormatVersion() { return 13; } +constexpr uint32_t CurrentValueSerializerFormatVersion() { return 15; } } // namespace v8 diff --git a/deps/include/v8-value-serializer.h b/deps/include/v8-value-serializer.h index 574567bd..729730c6 100644 --- a/deps/include/v8-value-serializer.h +++ b/deps/include/v8-value-serializer.h @@ -8,6 +8,7 @@ #include #include +#include #include #include "v8-local-handle.h" // NOLINT(build/include_directory) @@ -26,8 +27,37 @@ class Value; namespace internal { struct ScriptStreamingData; +class SharedObjectConveyorHandles; +class ValueDeserializer; +class ValueSerializer; } // namespace internal +/** + * A move-only class for managing the lifetime of shared value conveyors used + * by V8 to keep JS shared values alive in transit when serialized. + * + * This class is not directly constructible and is always passed to a + * ValueSerializer::Delegate via ValueSerializer::SetSharedValueConveyor. + * + * The embedder must not destruct the SharedValueConveyor until the associated + * serialized data will no longer be deserialized. + */ +class V8_EXPORT SharedValueConveyor final { + public: + SharedValueConveyor(SharedValueConveyor&&) noexcept; + ~SharedValueConveyor(); + + SharedValueConveyor& operator=(SharedValueConveyor&&) noexcept; + + private: + friend class internal::ValueSerializer; + friend class internal::ValueDeserializer; + + explicit SharedValueConveyor(Isolate* isolate); + + std::unique_ptr private_; +}; + /** * Value serialization compatible with the HTML structured clone algorithm. * The format is backward-compatible (i.e. safe to store to disk). @@ -67,6 +97,23 @@ class V8_EXPORT ValueSerializer { virtual Maybe GetWasmModuleTransferId( Isolate* isolate, Local module); + + /** + * Called when the first shared value is serialized. All subsequent shared + * values will use the same conveyor. + * + * The embedder must ensure the lifetime of the conveyor matches the + * lifetime of the serialized data. + * + * If the embedder supports serializing shared values, this method should + * return true. Otherwise the embedder should throw an exception and return + * false. + * + * This method is called at most once per serializer. + */ + virtual bool AdoptSharedValueConveyor(Isolate* isolate, + SharedValueConveyor&& conveyor); + /** * Allocates memory for the buffer of at least the size provided. The actual * size (which may be greater or equal) is written to |actual_size|. If no @@ -166,17 +213,23 @@ class V8_EXPORT ValueDeserializer { /** * Get a WasmModuleObject given a transfer_id previously provided - * by ValueSerializer::GetWasmModuleTransferId + * by ValueSerializer::Delegate::GetWasmModuleTransferId */ virtual MaybeLocal GetWasmModuleFromId( Isolate* isolate, uint32_t transfer_id); /** * Get a SharedArrayBuffer given a clone_id previously provided - * by ValueSerializer::GetSharedArrayBufferId + * by ValueSerializer::Delegate::GetSharedArrayBufferId */ virtual MaybeLocal GetSharedArrayBufferFromId( Isolate* isolate, uint32_t clone_id); + + /** + * Get the SharedValueConveyor previously provided by + * ValueSerializer::Delegate::AdoptSharedValueConveyor. + */ + virtual const SharedValueConveyor* GetSharedValueConveyor(Isolate* isolate); }; ValueDeserializer(Isolate* isolate, const uint8_t* data, size_t size); diff --git a/deps/include/v8-value.h b/deps/include/v8-value.h index adca989e..866da201 100644 --- a/deps/include/v8-value.h +++ b/deps/include/v8-value.h @@ -244,6 +244,11 @@ class V8_EXPORT Value : public Data { */ bool IsWeakSet() const; + /** + * Returns true if this value is a WeakRef. + */ + bool IsWeakRef() const; + /** * Returns true if this value is an ArrayBuffer. */ diff --git a/deps/include/v8-version.h b/deps/include/v8-version.h index dd6cf463..a9d6f92a 100644 --- a/deps/include/v8-version.h +++ b/deps/include/v8-version.h @@ -8,10 +8,10 @@ // These macros define the version number for the current version. // NOTE these macros are used by some of the tool scripts and the build // system so their names cannot be changed without changing the scripts. -#define V8_MAJOR_VERSION 9 -#define V8_MINOR_VERSION 7 -#define V8_BUILD_NUMBER 106 -#define V8_PATCH_LEVEL 19 +#define V8_MAJOR_VERSION 10 +#define V8_MINOR_VERSION 9 +#define V8_BUILD_NUMBER 194 +#define V8_PATCH_LEVEL 9 // Use 1 for candidates and 0 otherwise. // (Boolean macro values are not supported by all preprocessors.) diff --git a/deps/include/v8-wasm.h b/deps/include/v8-wasm.h index 612ed2fa..05acd2e8 100644 --- a/deps/include/v8-wasm.h +++ b/deps/include/v8-wasm.h @@ -5,6 +5,7 @@ #ifndef INCLUDE_V8_WASM_H_ #define INCLUDE_V8_WASM_H_ +#include #include #include @@ -103,6 +104,12 @@ class V8_EXPORT WasmModuleObject : public Object { */ CompiledWasmModule GetCompiledModule(); + /** + * Compile a Wasm module from the provided uncompiled bytes. + */ + static MaybeLocal Compile( + Isolate* isolate, MemorySpan wire_bytes); + V8_INLINE static WasmModuleObject* Cast(Value* value) { #ifdef V8_ENABLE_CHECKS CheckCast(value); @@ -124,19 +131,6 @@ class V8_EXPORT WasmStreaming final { public: class WasmStreamingImpl; - /** - * Client to receive streaming event notifications. - */ - class Client { - public: - virtual ~Client() = default; - /** - * Passes the fully compiled module to the client. This can be used to - * implement code caching. - */ - virtual void OnModuleCompiled(CompiledWasmModule compiled_module) = 0; - }; - explicit WasmStreaming(std::unique_ptr impl); ~WasmStreaming(); @@ -177,10 +171,11 @@ class V8_EXPORT WasmStreaming final { bool SetCompiledModuleBytes(const uint8_t* bytes, size_t size); /** - * Sets the client object that will receive streaming event notifications. - * This must be called before {OnBytesReceived}, {Finish}, or {Abort}. + * Sets a callback which is called whenever a significant number of new + * functions are ready for serialization. */ - void SetClient(std::shared_ptr client); + void SetMoreFunctionsCanBeSerializedCallback( + std::function); /* * Sets the UTF-8 encoded source URL for the {Script} object. This must be @@ -200,52 +195,6 @@ class V8_EXPORT WasmStreaming final { std::unique_ptr impl_; }; -// TODO(mtrofin): when streaming compilation is done, we can rename this -// to simply WasmModuleObjectBuilder -class V8_EXPORT WasmModuleObjectBuilderStreaming final { - public: - explicit WasmModuleObjectBuilderStreaming(Isolate* isolate); - /** - * The buffer passed into OnBytesReceived is owned by the caller. - */ - void OnBytesReceived(const uint8_t*, size_t size); - void Finish(); - /** - * Abort streaming compilation. If {exception} has a value, then the promise - * associated with streaming compilation is rejected with that value. If - * {exception} does not have value, the promise does not get rejected. - */ - void Abort(MaybeLocal exception); - Local GetPromise(); - - ~WasmModuleObjectBuilderStreaming() = default; - - private: - WasmModuleObjectBuilderStreaming(const WasmModuleObjectBuilderStreaming&) = - delete; - WasmModuleObjectBuilderStreaming(WasmModuleObjectBuilderStreaming&&) = - default; - WasmModuleObjectBuilderStreaming& operator=( - const WasmModuleObjectBuilderStreaming&) = delete; - WasmModuleObjectBuilderStreaming& operator=( - WasmModuleObjectBuilderStreaming&&) = default; - Isolate* isolate_ = nullptr; - -#if V8_CC_MSVC - /** - * We don't need the static Copy API, so the default - * NonCopyablePersistentTraits would be sufficient, however, - * MSVC eagerly instantiates the Copy. - * We ensure we don't use Copy, however, by compiling with the - * defaults everywhere else. - */ - Persistent> promise_; -#else - Persistent promise_; -#endif - std::shared_ptr streaming_decoder_; -}; - } // namespace v8 #endif // INCLUDE_V8_WASM_H_ diff --git a/deps/include/v8-weak-callback-info.h b/deps/include/v8-weak-callback-info.h index ff3c0823..df4dcb8e 100644 --- a/deps/include/v8-weak-callback-info.h +++ b/deps/include/v8-weak-callback-info.h @@ -51,12 +51,19 @@ class WeakCallbackInfo { void* embedder_fields_[kEmbedderFieldsInWeakCallback]; }; -// kParameter will pass a void* parameter back to the callback, kInternalFields -// will pass the first two internal fields back to the callback, kFinalizer -// will pass a void* parameter back, but is invoked before the object is -// actually collected, so it can be resurrected. In the last case, it is not -// possible to request a second pass callback. -enum class WeakCallbackType { kParameter, kInternalFields, kFinalizer }; +/** + * Weakness type for weak handles. + */ +enum class WeakCallbackType { + /** + * Passes a user-defined void* parameter back to the callback. + */ + kParameter, + /** + * Passes the first two internal fields of the object back to the callback. + */ + kInternalFields, +}; template void* WeakCallbackInfo::GetInternalField(int index) const { diff --git a/deps/include/v8.h b/deps/include/v8.h index dd91f880..1067d3eb 100644 --- a/deps/include/v8.h +++ b/deps/include/v8.h @@ -19,7 +19,6 @@ #include #include -#include #include "cppgc/common.h" #include "v8-array-buffer.h" // NOLINT(build/include_directory) diff --git a/deps/include/v8config.h b/deps/include/v8config.h index ecb99282..2ac27b36 100644 --- a/deps/include/v8config.h +++ b/deps/include/v8config.h @@ -65,13 +65,14 @@ path. Add it with -I to the command line // Operating system detection (host) // // V8_OS_ANDROID - Android -// V8_OS_BSD - BSDish (Mac OS X, Net/Free/Open/DragonFlyBSD) +// V8_OS_BSD - BSDish (macOS, Net/Free/Open/DragonFlyBSD) // V8_OS_CYGWIN - Cygwin // V8_OS_DRAGONFLYBSD - DragonFlyBSD // V8_OS_FREEBSD - FreeBSD // V8_OS_FUCHSIA - Fuchsia -// V8_OS_LINUX - Linux -// V8_OS_MACOSX - Mac OS X +// V8_OS_LINUX - Linux (Android, ChromeOS, Linux, ...) +// V8_OS_DARWIN - Darwin (macOS, iOS) +// V8_OS_MACOS - macOS // V8_OS_IOS - iOS // V8_OS_NETBSD - NetBSD // V8_OS_OPENBSD - OpenBSD @@ -89,13 +90,14 @@ path. Add it with -I to the command line # define V8_OS_STRING "android" #elif defined(__APPLE__) -# define V8_OS_BSD 1 -# define V8_OS_MACOSX 1 # define V8_OS_POSIX 1 +# define V8_OS_BSD 1 +# define V8_OS_DARWIN 1 # if defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE # define V8_OS_IOS 1 # define V8_OS_STRING "ios" # else +# define V8_OS_MACOS 1 # define V8_OS_STRING "macos" # endif // defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE @@ -169,7 +171,7 @@ path. Add it with -I to the command line // V8_TARGET_OS_FUCHSIA // V8_TARGET_OS_IOS // V8_TARGET_OS_LINUX -// V8_TARGET_OS_MACOSX +// V8_TARGET_OS_MACOS // V8_TARGET_OS_WIN // // If not set explicitly, these fall back to corresponding V8_OS_ values. @@ -181,7 +183,7 @@ path. Add it with -I to the command line && !defined(V8_TARGET_OS_FUCHSIA) \ && !defined(V8_TARGET_OS_IOS) \ && !defined(V8_TARGET_OS_LINUX) \ - && !defined(V8_TARGET_OS_MACOSX) \ + && !defined(V8_TARGET_OS_MACOS) \ && !defined(V8_TARGET_OS_WIN) # error No known target OS defined. # endif @@ -192,7 +194,7 @@ path. Add it with -I to the command line || defined(V8_TARGET_OS_FUCHSIA) \ || defined(V8_TARGET_OS_IOS) \ || defined(V8_TARGET_OS_LINUX) \ - || defined(V8_TARGET_OS_MACOSX) \ + || defined(V8_TARGET_OS_MACOS) \ || defined(V8_TARGET_OS_WIN) # error A target OS is defined but V8_HAVE_TARGET_OS is unset. # endif @@ -214,8 +216,8 @@ path. Add it with -I to the command line # define V8_TARGET_OS_LINUX #endif -#ifdef V8_OS_MACOSX -# define V8_TARGET_OS_MACOSX +#ifdef V8_OS_MACOS +# define V8_TARGET_OS_MACOS #endif #ifdef V8_OS_WIN @@ -232,7 +234,7 @@ path. Add it with -I to the command line # define V8_TARGET_OS_STRING "ios" #elif defined(V8_TARGET_OS_LINUX) # define V8_TARGET_OS_STRING "linux" -#elif defined(V8_TARGET_OS_MACOSX) +#elif defined(V8_TARGET_OS_MACOS) # define V8_TARGET_OS_STRING "macos" #elif defined(V8_TARGET_OS_WINDOWS) # define V8_TARGET_OS_STRING "windows" @@ -286,6 +288,9 @@ path. Add it with -I to the command line // // V8_HAS_ATTRIBUTE_ALWAYS_INLINE - __attribute__((always_inline)) // supported +// V8_HAS_ATTRIBUTE_CONSTINIT - __attribute__((require_constant_ +// initialization)) +// supported // V8_HAS_ATTRIBUTE_NONNULL - __attribute__((nonnull)) supported // V8_HAS_ATTRIBUTE_NOINLINE - __attribute__((noinline)) supported // V8_HAS_ATTRIBUTE_UNUSED - __attribute__((unused)) supported @@ -293,6 +298,8 @@ path. Add it with -I to the command line // V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT - __attribute__((warn_unused_result)) // supported // V8_HAS_CPP_ATTRIBUTE_NODISCARD - [[nodiscard]] supported +// V8_HAS_CPP_ATTRIBUTE_NO_UNIQUE_ADDRESS +// - [[no_unique_address]] supported // V8_HAS_BUILTIN_BSWAP16 - __builtin_bswap16() supported // V8_HAS_BUILTIN_BSWAP32 - __builtin_bswap32() supported // V8_HAS_BUILTIN_BSWAP64 - __builtin_bswap64() supported @@ -304,6 +311,7 @@ path. Add it with -I to the command line // V8_HAS_BUILTIN_SADD_OVERFLOW - __builtin_sadd_overflow() supported // V8_HAS_BUILTIN_SSUB_OVERFLOW - __builtin_ssub_overflow() supported // V8_HAS_BUILTIN_UADD_OVERFLOW - __builtin_uadd_overflow() supported +// V8_HAS_BUILTIN_SMUL_OVERFLOW - __builtin_smul_overflow() supported // V8_HAS_COMPUTED_GOTO - computed goto/labels as values // supported // V8_HAS_DECLSPEC_NOINLINE - __declspec(noinline) supported @@ -329,6 +337,8 @@ path. Add it with -I to the command line #endif # define V8_HAS_ATTRIBUTE_ALWAYS_INLINE (__has_attribute(always_inline)) +# define V8_HAS_ATTRIBUTE_CONSTINIT \ + (__has_attribute(require_constant_initialization)) # define V8_HAS_ATTRIBUTE_NONNULL (__has_attribute(nonnull)) # define V8_HAS_ATTRIBUTE_NOINLINE (__has_attribute(noinline)) # define V8_HAS_ATTRIBUTE_UNUSED (__has_attribute(unused)) @@ -337,7 +347,10 @@ path. Add it with -I to the command line (__has_attribute(warn_unused_result)) # define V8_HAS_CPP_ATTRIBUTE_NODISCARD (V8_HAS_CPP_ATTRIBUTE(nodiscard)) +# define V8_HAS_CPP_ATTRIBUTE_NO_UNIQUE_ADDRESS \ + (V8_HAS_CPP_ATTRIBUTE(no_unique_address)) +# define V8_HAS_BUILTIN_ASSUME (__has_builtin(__builtin_assume)) # define V8_HAS_BUILTIN_ASSUME_ALIGNED (__has_builtin(__builtin_assume_aligned)) # define V8_HAS_BUILTIN_BSWAP16 (__has_builtin(__builtin_bswap16)) # define V8_HAS_BUILTIN_BSWAP32 (__has_builtin(__builtin_bswap32)) @@ -350,6 +363,8 @@ path. Add it with -I to the command line # define V8_HAS_BUILTIN_SADD_OVERFLOW (__has_builtin(__builtin_sadd_overflow)) # define V8_HAS_BUILTIN_SSUB_OVERFLOW (__has_builtin(__builtin_ssub_overflow)) # define V8_HAS_BUILTIN_UADD_OVERFLOW (__has_builtin(__builtin_uadd_overflow)) +# define V8_HAS_BUILTIN_SMUL_OVERFLOW (__has_builtin(__builtin_smul_overflow)) +# define V8_HAS_BUILTIN_UNREACHABLE (__has_builtin(__builtin_unreachable)) // Clang has no __has_feature for computed gotos. // GCC doc: https://gcc.gnu.org/onlinedocs/gcc/Labels-as-Values.html @@ -388,6 +403,7 @@ path. Add it with -I to the command line # define V8_HAS_BUILTIN_EXPECT 1 # define V8_HAS_BUILTIN_FRAME_ADDRESS 1 # define V8_HAS_BUILTIN_POPCOUNT 1 +# define V8_HAS_BUILTIN_UNREACHABLE 1 // GCC doc: https://gcc.gnu.org/onlinedocs/gcc/Labels-as-Values.html #define V8_HAS_COMPUTED_GOTO 1 @@ -419,6 +435,18 @@ path. Add it with -I to the command line # define V8_INLINE inline #endif +#ifdef DEBUG +// In debug mode, check assumptions instead of actually adding annotations. +# define V8_ASSUME(condition) DCHECK(condition) +#elif V8_HAS_BUILTIN_ASSUME +# define V8_ASSUME(condition) __builtin_assume(condition) +#elif V8_HAS_BUILTIN_UNREACHABLE +# define V8_ASSUME(condition) \ + do { if (!(condition)) __builtin_unreachable(); } while (false) +#else +# define V8_ASSUME(condition) +#endif + #if V8_HAS_BUILTIN_ASSUME_ALIGNED # define V8_ASSUME_ALIGNED(ptr, alignment) \ __builtin_assume_aligned((ptr), (alignment)) @@ -427,6 +455,16 @@ path. Add it with -I to the command line #endif +// A macro to mark a declaration as requiring constant initialization. +// Use like: +// int* foo V8_CONSTINIT; +#if V8_HAS_ATTRIBUTE_CONSTINIT +# define V8_CONSTINIT __attribute__((require_constant_initialization)) +#else +# define V8_CONSTINIT +#endif + + // A macro to mark specific arguments as non-null. // Use like: // int add(int* x, int y, int* z) V8_NONNULL(1, 3) { return *x + y + *z; } @@ -465,6 +503,34 @@ path. Add it with -I to the command line #endif +#if defined(V8_IMMINENT_DEPRECATION_WARNINGS) || \ + defined(V8_DEPRECATION_WARNINGS) +#if defined(V8_CC_MSVC) +# define START_ALLOW_USE_DEPRECATED() \ + __pragma(warning(push)) \ + __pragma(warning(disable : 4996)) +# define END_ALLOW_USE_DEPRECATED() __pragma(warning(pop)) +#else // !defined(V8_CC_MSVC) +# define START_ALLOW_USE_DEPRECATED() \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"") +#define END_ALLOW_USE_DEPRECATED() _Pragma("GCC diagnostic pop") +#endif // !defined(V8_CC_MSVC) +#else // !(defined(V8_IMMINENT_DEPRECATION_WARNINGS) || + // defined(V8_DEPRECATION_WARNINGS)) +#define START_ALLOW_USE_DEPRECATED() +#define END_ALLOW_USE_DEPRECATED() +#endif // !(defined(V8_IMMINENT_DEPRECATION_WARNINGS) || + // defined(V8_DEPRECATION_WARNINGS)) +#define ALLOW_COPY_AND_MOVE_WITH_DEPRECATED_FIELDS(ClassName) \ + START_ALLOW_USE_DEPRECATED() \ + ClassName(const ClassName&) = default; \ + ClassName(ClassName&&) = default; \ + ClassName& operator=(const ClassName&) = default; \ + ClassName& operator=(ClassName&&) = default; \ + END_ALLOW_USE_DEPRECATED() + + #if defined(__GNUC__) && !defined(__clang__) && (__GNUC__ < 6) # define V8_ENUM_DEPRECATED(message) # define V8_ENUM_DEPRECATE_SOON(message) @@ -507,6 +573,58 @@ path. Add it with -I to the command line #define V8_NODISCARD /* NOT SUPPORTED */ #endif +// The no_unique_address attribute allows tail padding in a non-static data +// member to overlap other members of the enclosing class (and in the special +// case when the type is empty, permits it to fully overlap other members). The +// field is laid out as if a base class were encountered at the corresponding +// point within the class (except that it does not share a vptr with the +// enclosing object). +// +// Apply to a data member like: +// +// class Foo { +// V8_NO_UNIQUE_ADDRESS Bar bar_; +// }; +// +// [[no_unique_address]] comes in C++20 but supported in clang with +// -std >= c++11. +#if V8_HAS_CPP_ATTRIBUTE_NO_UNIQUE_ADDRESS +#define V8_NO_UNIQUE_ADDRESS [[no_unique_address]] +#else +#define V8_NO_UNIQUE_ADDRESS /* NOT SUPPORTED */ +#endif + +// Marks a type as being eligible for the "trivial" ABI despite having a +// non-trivial destructor or copy/move constructor. Such types can be relocated +// after construction by simply copying their memory, which makes them eligible +// to be passed in registers. The canonical example is std::unique_ptr. +// +// Use with caution; this has some subtle effects on constructor/destructor +// ordering and will be very incorrect if the type relies on its address +// remaining constant. When used as a function argument (by value), the value +// may be constructed in the caller's stack frame, passed in a register, and +// then used and destructed in the callee's stack frame. A similar thing can +// occur when values are returned. +// +// TRIVIAL_ABI is not needed for types which have a trivial destructor and +// copy/move constructors, since those are automatically trivial by the ABI +// spec. +// +// It is also not likely to be effective on types too large to be passed in one +// or two registers on typical target ABIs. +// +// See also: +// https://clang.llvm.org/docs/AttributeReference.html#trivial-abi +// https://libcxx.llvm.org/docs/DesignDocs/UniquePtrTrivialAbi.html +#if defined(__clang__) && defined(__has_attribute) +#if __has_attribute(trivial_abi) +#define V8_TRIVIAL_ABI [[clang::trivial_abi]] +#endif // __has_attribute(trivial_abi) +#endif // defined(__clang__) && defined(__has_attribute) +#if !defined(V8_TRIVIAL_ABI) +#define V8_TRIVIAL_ABI +#endif //!defined(V8_TRIVIAL_ABI) + // Helper macro to define no_sanitize attributes only with clang. #if defined(__clang__) && defined(__has_attribute) #if __has_attribute(no_sanitize) @@ -553,20 +671,216 @@ V8 shared library set USING_V8_SHARED. #endif // V8_OS_WIN -// The virtual memory cage is available (i.e. defined) when pointer compression -// is enabled, but it is only used when V8_VIRTUAL_MEMORY_CAGE is enabled as -// well. This allows better test coverage of the cage. -#if defined(V8_COMPRESS_POINTERS) -#define V8_VIRTUAL_MEMORY_CAGE_IS_AVAILABLE +// clang-format on + +// Processor architecture detection. For more info on what's defined, see: +// http://msdn.microsoft.com/en-us/library/b0084kay.aspx +// http://www.agner.org/optimize/calling_conventions.pdf +// or with gcc, run: "echo | gcc -E -dM -" +// The V8_HOST_ARCH_* macros correspond to the architecture on which V8, as a +// virtual machine and compiler, runs. Don't confuse this with the architecture +// on which V8 is built. +#if defined(_M_X64) || defined(__x86_64__) +#define V8_HOST_ARCH_X64 1 +#if defined(__x86_64__) && __SIZEOF_POINTER__ == 4 // Check for x32. +#define V8_HOST_ARCH_32_BIT 1 +#else +#define V8_HOST_ARCH_64_BIT 1 +#endif +#elif defined(_M_IX86) || defined(__i386__) +#define V8_HOST_ARCH_IA32 1 +#define V8_HOST_ARCH_32_BIT 1 +#elif defined(__AARCH64EL__) || defined(_M_ARM64) +#define V8_HOST_ARCH_ARM64 1 +#define V8_HOST_ARCH_64_BIT 1 +#elif defined(__ARMEL__) +#define V8_HOST_ARCH_ARM 1 +#define V8_HOST_ARCH_32_BIT 1 +#elif defined(__mips64) +#define V8_HOST_ARCH_MIPS64 1 +#define V8_HOST_ARCH_64_BIT 1 +#elif defined(__loongarch64) +#define V8_HOST_ARCH_LOONG64 1 +#define V8_HOST_ARCH_64_BIT 1 +#elif defined(__PPC64__) || defined(_ARCH_PPC64) +#define V8_HOST_ARCH_PPC64 1 +#define V8_HOST_ARCH_64_BIT 1 +#elif defined(__PPC__) || defined(_ARCH_PPC) +#define V8_HOST_ARCH_PPC 1 +#define V8_HOST_ARCH_32_BIT 1 +#elif defined(__s390__) || defined(__s390x__) +#define V8_HOST_ARCH_S390 1 +#if defined(__s390x__) +#define V8_HOST_ARCH_64_BIT 1 +#else +#define V8_HOST_ARCH_32_BIT 1 +#endif +#elif defined(__riscv) || defined(__riscv__) +#if __riscv_xlen == 64 +#define V8_HOST_ARCH_RISCV64 1 +#define V8_HOST_ARCH_64_BIT 1 +#elif __riscv_xlen == 32 +#define V8_HOST_ARCH_RISCV32 1 +#define V8_HOST_ARCH_32_BIT 1 +#else +#error "Cannot detect Riscv's bitwidth" +#endif +#else +#error "Host architecture was not detected as supported by v8" +#endif + +// Target architecture detection. This corresponds to the architecture for which +// V8's JIT will generate code (the last stage of the canadian cross-compiler). +// The macros may be set externally. If not, detect in the same way as the host +// architecture, that is, target the native environment as presented by the +// compiler. +#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && \ + !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_PPC && \ + !V8_TARGET_ARCH_PPC64 && !V8_TARGET_ARCH_S390 && \ + !V8_TARGET_ARCH_RISCV64 && !V8_TARGET_ARCH_LOONG64 && \ + !V8_TARGET_ARCH_RISCV32 +#if defined(_M_X64) || defined(__x86_64__) +#define V8_TARGET_ARCH_X64 1 +#elif defined(_M_IX86) || defined(__i386__) +#define V8_TARGET_ARCH_IA32 1 +#elif defined(__AARCH64EL__) || defined(_M_ARM64) +#define V8_TARGET_ARCH_ARM64 1 +#elif defined(__ARMEL__) +#define V8_TARGET_ARCH_ARM 1 +#elif defined(__mips64) +#define V8_TARGET_ARCH_MIPS64 1 +#elif defined(__loongarch64) +#define V8_TARGET_ARCH_LOONG64 1 +#elif defined(_ARCH_PPC64) +#define V8_TARGET_ARCH_PPC64 1 +#elif defined(_ARCH_PPC) +#define V8_TARGET_ARCH_PPC 1 +#elif defined(__s390__) +#define V8_TARGET_ARCH_S390 1 +#if defined(__s390x__) +#define V8_TARGET_ARCH_S390X 1 +#endif +#elif defined(__riscv) || defined(__riscv__) +#if __riscv_xlen == 64 +#define V8_TARGET_ARCH_RISCV64 1 +#elif __riscv_xlen == 32 +#define V8_TARGET_ARCH_RISCV32 1 +#endif +#else +#error Target architecture was not detected as supported by v8 +#endif #endif -// CagedPointers are currently only used if the heap sandbox is enabled. -// In the future, they will be enabled when the virtual memory cage is enabled. -#if defined(V8_HEAP_SANDBOX) -#define V8_CAGED_POINTERS +// Determine architecture pointer size. +#if V8_TARGET_ARCH_IA32 +#define V8_TARGET_ARCH_32_BIT 1 +#elif V8_TARGET_ARCH_X64 +#if !V8_TARGET_ARCH_32_BIT && !V8_TARGET_ARCH_64_BIT +#if defined(__x86_64__) && __SIZEOF_POINTER__ == 4 // Check for x32. +#define V8_TARGET_ARCH_32_BIT 1 +#else +#define V8_TARGET_ARCH_64_BIT 1 +#endif +#endif +#elif V8_TARGET_ARCH_ARM +#define V8_TARGET_ARCH_32_BIT 1 +#elif V8_TARGET_ARCH_ARM64 +#define V8_TARGET_ARCH_64_BIT 1 +#elif V8_TARGET_ARCH_MIPS +#define V8_TARGET_ARCH_32_BIT 1 +#elif V8_TARGET_ARCH_MIPS64 +#define V8_TARGET_ARCH_64_BIT 1 +#elif V8_TARGET_ARCH_LOONG64 +#define V8_TARGET_ARCH_64_BIT 1 +#elif V8_TARGET_ARCH_PPC +#define V8_TARGET_ARCH_32_BIT 1 +#elif V8_TARGET_ARCH_PPC64 +#define V8_TARGET_ARCH_64_BIT 1 +#elif V8_TARGET_ARCH_S390 +#if V8_TARGET_ARCH_S390X +#define V8_TARGET_ARCH_64_BIT 1 +#else +#define V8_TARGET_ARCH_32_BIT 1 +#endif +#elif V8_TARGET_ARCH_RISCV64 +#define V8_TARGET_ARCH_64_BIT 1 +#elif V8_TARGET_ARCH_RISCV32 +#define V8_TARGET_ARCH_32_BIT 1 +#else +#error Unknown target architecture pointer size #endif -// clang-format on +// Check for supported combinations of host and target architectures. +#if V8_TARGET_ARCH_IA32 && !V8_HOST_ARCH_IA32 +#error Target architecture ia32 is only supported on ia32 host +#endif +#if (V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_64_BIT && \ + !((V8_HOST_ARCH_X64 || V8_HOST_ARCH_ARM64) && V8_HOST_ARCH_64_BIT)) +#error Target architecture x64 is only supported on x64 and arm64 host +#endif +#if (V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT && \ + !(V8_HOST_ARCH_X64 && V8_HOST_ARCH_32_BIT)) +#error Target architecture x32 is only supported on x64 host with x32 support +#endif +#if (V8_TARGET_ARCH_ARM && !(V8_HOST_ARCH_IA32 || V8_HOST_ARCH_ARM)) +#error Target architecture arm is only supported on arm and ia32 host +#endif +#if (V8_TARGET_ARCH_ARM64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_ARM64)) +#error Target architecture arm64 is only supported on arm64 and x64 host +#endif +#if (V8_TARGET_ARCH_MIPS64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_MIPS64)) +#error Target architecture mips64 is only supported on mips64 and x64 host +#endif +#if (V8_TARGET_ARCH_RISCV64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_RISCV64)) +#error Target architecture riscv64 is only supported on riscv64 and x64 host +#endif +#if (V8_TARGET_ARCH_RISCV32 && !(V8_HOST_ARCH_IA32 || V8_HOST_ARCH_RISCV32)) +#error Target architecture riscv32 is only supported on riscv32 and ia32 host +#endif +#if (V8_TARGET_ARCH_LOONG64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_LOONG64)) +#error Target architecture loong64 is only supported on loong64 and x64 host +#endif + +// Determine architecture endianness. +#if V8_TARGET_ARCH_IA32 +#define V8_TARGET_LITTLE_ENDIAN 1 +#elif V8_TARGET_ARCH_X64 +#define V8_TARGET_LITTLE_ENDIAN 1 +#elif V8_TARGET_ARCH_ARM +#define V8_TARGET_LITTLE_ENDIAN 1 +#elif V8_TARGET_ARCH_ARM64 +#define V8_TARGET_LITTLE_ENDIAN 1 +#elif V8_TARGET_ARCH_LOONG64 +#define V8_TARGET_LITTLE_ENDIAN 1 +#elif V8_TARGET_ARCH_MIPS64 +#if defined(__MIPSEB__) || defined(V8_TARGET_ARCH_MIPS64_BE) +#define V8_TARGET_BIG_ENDIAN 1 +#else +#define V8_TARGET_LITTLE_ENDIAN 1 +#endif +#elif defined(__BIG_ENDIAN__) // FOR PPCGR on AIX +#define V8_TARGET_BIG_ENDIAN 1 +#elif V8_TARGET_ARCH_PPC_LE +#define V8_TARGET_LITTLE_ENDIAN 1 +#elif V8_TARGET_ARCH_PPC_BE +#define V8_TARGET_BIG_ENDIAN 1 +#elif V8_TARGET_ARCH_S390 +#if V8_TARGET_ARCH_S390_LE_SIM +#define V8_TARGET_LITTLE_ENDIAN 1 +#else +#define V8_TARGET_BIG_ENDIAN 1 +#endif +#elif V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64 +#define V8_TARGET_LITTLE_ENDIAN 1 +#elif defined(__BYTE_ORDER__) +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ +#define V8_TARGET_BIG_ENDIAN 1 +#else +#define V8_TARGET_LITTLE_ENDIAN 1 +#endif +#else +#error Unknown target architecture endianness +#endif #undef V8_HAS_CPP_ATTRIBUTE diff --git a/deps/linux_arm64/libv8.a b/deps/linux_arm64/libv8.a index e7854f4b..9193cc48 100644 Binary files a/deps/linux_arm64/libv8.a and b/deps/linux_arm64/libv8.a differ diff --git a/deps/linux_x86_64/libv8.a b/deps/linux_x86_64/libv8.a index f6bfe25e..86428c9f 100644 Binary files a/deps/linux_x86_64/libv8.a and b/deps/linux_x86_64/libv8.a differ diff --git a/deps/v8 b/deps/v8 index 41de6611..4b4e4733 160000 --- a/deps/v8 +++ b/deps/v8 @@ -1 +1 @@ -Subproject commit 41de66111ed4ab6aec3d798a2ded2c1b730dcb71 +Subproject commit 4b4e473387ed62f7fcbc95a3bf05244ea0e76a0a diff --git a/deps/v8_version b/deps/v8_version index 5d0c8d02..ff8f5c65 100644 --- a/deps/v8_version +++ b/deps/v8_version @@ -1 +1 @@ -9.7.106.19 \ No newline at end of file +10.9.194.9 \ No newline at end of file diff --git a/isolate.go b/isolate.go index 661fbec0..6e7f9de6 100644 --- a/isolate.go +++ b/isolate.go @@ -52,9 +52,7 @@ type HeapStatistics struct { // An *Isolate can be used as a v8go.ContextOption to create a new // Context, rather than creating a new default Isolate. func NewIsolate() *Isolate { - v8once.Do(func() { - C.Init() - }) + initializeIfNecessary() iso := &Isolate{ ptr: C.NewIsolate(), cbs: make(map[int]FunctionCallback), diff --git a/v8go.cc b/v8go.cc index b435db59..86cdc874 100644 --- a/v8go.cc +++ b/v8go.cc @@ -19,7 +19,7 @@ using namespace v8; auto default_platform = platform::NewDefaultPlatform(); -auto default_allocator = ArrayBuffer::Allocator::NewDefaultAllocator(); +ArrayBuffer::Allocator* default_allocator; const int ScriptCompilerNoCompileOptions = ScriptCompiler::kNoCompileOptions; const int ScriptCompilerConsumeCodeCache = ScriptCompiler::kConsumeCodeCache; @@ -143,6 +143,8 @@ void Init() { #endif V8::InitializePlatform(default_platform.get()); V8::Initialize(); + + default_allocator = ArrayBuffer::Allocator::NewDefaultAllocator(); return; } @@ -237,7 +239,7 @@ RtnUnboundScript IsolateCompileUnboundScript(IsolatePtr iso, opts.cachedData.length); } - ScriptOrigin script_origin(ogn); + ScriptOrigin script_origin(iso, ogn); ScriptCompiler::Source source(src, script_origin, cached_data); @@ -639,7 +641,7 @@ RtnValue RunScript(ContextPtr ctx, const char* source, const char* origin) { return rtn; } - ScriptOrigin script_origin(ogn); + ScriptOrigin script_origin(iso, ogn); Local