From 09f134fccf605476e0a0b8df01164946c5b1236a Mon Sep 17 00:00:00 2001 From: Anna Henningsen Date: Fri, 15 Mar 2019 22:58:05 +0100 Subject: [PATCH] deps: V8: cherry-pick 392316d MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Original commit message: [ptr-compr][x64] Define kTaggedSize as kInt32Size ... when pointer compression is enabled and some number of cleanups. Bug: v8:7703 Change-Id: If7344abf68a1c4d54e4a79d066dc185f25055d7d Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1477737 Commit-Queue: Igor Sheludko Auto-Submit: Igor Sheludko Reviewed-by: Ulan Degenbaev Reviewed-by: Toon Verwaest Cr-Commit-Position: refs/heads/master@{#60056} Refs: https://github.com/v8/v8/commit/392316ddd11104ad759131732dfe0805f3972ab2 PR-URL: https://github.com/nodejs/node/pull/26685 Reviewed-By: Anna Henningsen Reviewed-By: Michaƫl Zasso Reviewed-By: Refael Ackermann --- common.gypi | 2 +- deps/v8/include/v8-internal.h | 46 +++------- deps/v8/include/v8.h | 38 +++++++-- deps/v8/src/globals.h | 46 ++++++---- deps/v8/src/memcopy.h | 26 ++++++ deps/v8/src/objects-inl.h | 21 +---- deps/v8/src/objects/code.h | 4 +- deps/v8/src/objects/embedder-data-slot-inl.h | 82 +++++++++--------- deps/v8/src/objects/embedder-data-slot.h | 35 ++++---- deps/v8/src/objects/slots-atomic-inl.h | 2 +- deps/v8/src/objects/slots-inl.h | 11 +-- deps/v8/src/objects/slots.h | 23 +++-- deps/v8/src/ptr-compr-inl.h | 4 - deps/v8/src/ptr-compr.h | 18 ++-- deps/v8/src/snapshot/partial-serializer.cc | 2 +- deps/v8/src/x64/assembler-x64.h | 83 +++++++++---------- deps/v8/src/x64/macro-assembler-x64.cc | 8 +- deps/v8/test/cctest/compiler/codegen-tester.h | 15 +++- .../cctest/compiler/test-run-load-store.cc | 13 ++- 19 files changed, 255 insertions(+), 224 deletions(-) diff --git a/common.gypi b/common.gypi index 4f0143220163b1..e2dc19aeee32c6 100644 --- a/common.gypi +++ b/common.gypi @@ -37,7 +37,7 @@ # Reset this number to 0 on major V8 upgrades. # Increment by one for each non-official patch applied to deps/v8. - 'v8_embedder_string': '-node.1', + 'v8_embedder_string': '-node.2', ##### V8 defaults for Node.js ##### diff --git a/deps/v8/include/v8-internal.h b/deps/v8/include/v8-internal.h index bb69bb915dd5f2..5cc62f3e726f7c 100644 --- a/deps/v8/include/v8-internal.h +++ b/deps/v8/include/v8-internal.h @@ -29,7 +29,6 @@ static const Address kNullAddress = 0; * Configuration of tagging scheme. */ const int kApiSystemPointerSize = sizeof(void*); -const int kApiTaggedSize = kApiSystemPointerSize; const int kApiDoubleSize = sizeof(double); const int kApiInt32Size = sizeof(int32_t); const int kApiInt64Size = sizeof(int64_t); @@ -92,6 +91,9 @@ struct SmiTagging<8> { static_assert( kApiSystemPointerSize == kApiInt64Size, "Pointer compression can be enabled only for 64-bit architectures"); +const int kApiTaggedSize = kApiInt32Size; +#else +const int kApiTaggedSize = kApiSystemPointerSize; #endif #ifdef V8_31BIT_SMIS_ON_64BIT_ARCH @@ -131,11 +133,7 @@ class Internals { static const int kJSObjectHeaderSize = 3 * kApiTaggedSize; static const int kFixedArrayHeaderSize = 2 * kApiTaggedSize; static const int kEmbedderDataArrayHeaderSize = 2 * kApiTaggedSize; - static const int kEmbedderDataSlotSize = -#ifdef V8_COMPRESS_POINTERS - 2 * -#endif - kApiSystemPointerSize; + static const int kEmbedderDataSlotSize = kApiSystemPointerSize; static const int kNativeContextEmbedderDataOffset = 7 * kApiTaggedSize; static const int kFullStringRepresentationMask = 0x0f; static const int kStringEncodingMask = 0x8; @@ -301,22 +299,8 @@ class Internals { #endif } - V8_INLINE static internal::Address ReadTaggedAnyField( - internal::Address heap_object_ptr, int offset) { -#ifdef V8_COMPRESS_POINTERS - int32_t value = ReadRawField(heap_object_ptr, offset); - internal::Address root_mask = static_cast( - -static_cast(value & kSmiTagMask)); - internal::Address root_or_zero = - root_mask & GetRootFromOnHeapAddress(heap_object_ptr); - return root_or_zero + - static_cast(static_cast(value)); -#else - return ReadRawField(heap_object_ptr, offset); -#endif - } - #ifdef V8_COMPRESS_POINTERS + // See v8:7703 or src/ptr-compr.* for details about pointer compression. static constexpr size_t kPtrComprHeapReservationSize = size_t{1} << 32; static constexpr size_t kPtrComprIsolateRootBias = kPtrComprHeapReservationSize / 2; @@ -328,18 +312,14 @@ class Internals { -static_cast(kPtrComprIsolateRootAlignment); } -#else - - template - V8_INLINE static T ReadEmbedderData(const v8::Context* context, int index) { - typedef internal::Address A; - typedef internal::Internals I; - A ctx = *reinterpret_cast(context); - A embedder_data = - I::ReadTaggedPointerField(ctx, I::kNativeContextEmbedderDataOffset); - int value_offset = - I::kEmbedderDataArrayHeaderSize + (I::kEmbedderDataSlotSize * index); - return I::ReadRawField(embedder_data, value_offset); + V8_INLINE static internal::Address DecompressTaggedAnyField( + internal::Address heap_object_ptr, int32_t value) { + internal::Address root_mask = static_cast( + -static_cast(value & kSmiTagMask)); + internal::Address root_or_zero = + root_mask & GetRootFromOnHeapAddress(heap_object_ptr); + return root_or_zero + + static_cast(static_cast(value)); } #endif // V8_COMPRESS_POINTERS }; diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h index 987748ca003fa9..5e45cc762079d3 100644 --- a/deps/v8/include/v8.h +++ b/deps/v8/include/v8.h @@ -10322,7 +10322,7 @@ AccessorSignature* AccessorSignature::Cast(Data* data) { } Local Object::GetInternalField(int index) { -#if !defined(V8_ENABLE_CHECKS) && !defined(V8_COMPRESS_POINTERS) +#ifndef V8_ENABLE_CHECKS typedef internal::Address A; typedef internal::Internals I; A obj = *reinterpret_cast(this); @@ -10333,7 +10333,12 @@ Local Object::GetInternalField(int index) { instance_type == I::kJSApiObjectType || instance_type == I::kJSSpecialApiObjectType) { int offset = I::kJSObjectHeaderSize + (I::kEmbedderDataSlotSize * index); - A value = I::ReadTaggedAnyField(obj, offset); + A value = I::ReadRawField(obj, offset); +#ifdef V8_COMPRESS_POINTERS + // We read the full pointer value and then decompress it in order to avoid + // dealing with potential endiannes issues. + value = I::DecompressTaggedAnyField(obj, static_cast(value)); +#endif internal::Isolate* isolate = internal::IsolateFromNeverReadOnlySpaceObject(obj); A* result = HandleScope::CreateHandle(isolate, value); @@ -10345,7 +10350,7 @@ Local Object::GetInternalField(int index) { void* Object::GetAlignedPointerFromInternalField(int index) { -#if !defined(V8_ENABLE_CHECKS) && !defined(V8_COMPRESS_POINTERS) +#ifndef V8_ENABLE_CHECKS typedef internal::Address A; typedef internal::Internals I; A obj = *reinterpret_cast(this); @@ -10956,13 +10961,24 @@ int64_t Isolate::AdjustAmountOfExternalAllocatedMemory( } Local Context::GetEmbedderData(int index) { -#if !defined(V8_ENABLE_CHECKS) && !defined(V8_COMPRESS_POINTERS) +#ifndef V8_ENABLE_CHECKS typedef internal::Address A; typedef internal::Internals I; + A ctx = *reinterpret_cast(this); + A embedder_data = + I::ReadTaggedPointerField(ctx, I::kNativeContextEmbedderDataOffset); + int value_offset = + I::kEmbedderDataArrayHeaderSize + (I::kEmbedderDataSlotSize * index); + A value = I::ReadRawField(embedder_data, value_offset); +#ifdef V8_COMPRESS_POINTERS + // We read the full pointer value and then decompress it in order to avoid + // dealing with potential endiannes issues. + value = + I::DecompressTaggedAnyField(embedder_data, static_cast(value)); +#endif internal::Isolate* isolate = internal::IsolateFromNeverReadOnlySpaceObject( *reinterpret_cast(this)); - A* result = - HandleScope::CreateHandle(isolate, I::ReadEmbedderData(this, index)); + A* result = HandleScope::CreateHandle(isolate, value); return Local(reinterpret_cast(result)); #else return SlowGetEmbedderData(index); @@ -10971,9 +10987,15 @@ Local Context::GetEmbedderData(int index) { void* Context::GetAlignedPointerFromEmbedderData(int index) { -#if !defined(V8_ENABLE_CHECKS) && !defined(V8_COMPRESS_POINTERS) +#ifndef V8_ENABLE_CHECKS + typedef internal::Address A; typedef internal::Internals I; - return I::ReadEmbedderData(this, index); + A ctx = *reinterpret_cast(this); + A embedder_data = + I::ReadTaggedPointerField(ctx, I::kNativeContextEmbedderDataOffset); + int value_offset = + I::kEmbedderDataArrayHeaderSize + (I::kEmbedderDataSlotSize * index); + return I::ReadRawField(embedder_data, value_offset); #else return SlowGetAlignedPointerFromEmbedderData(index); #endif diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h index da6b889b484e8f..15fb183083ad69 100644 --- a/deps/v8/src/globals.h +++ b/deps/v8/src/globals.h @@ -200,28 +200,47 @@ constexpr size_t kReservedCodeRangePages = 0; STATIC_ASSERT(kSystemPointerSize == (1 << kSystemPointerSizeLog2)); +#ifdef V8_COMPRESS_POINTERS +static_assert( + kSystemPointerSize == kInt64Size, + "Pointer compression can be enabled only for 64-bit architectures"); + +constexpr int kTaggedSize = kInt32Size; +constexpr int kTaggedSizeLog2 = 2; + +// These types define raw and atomic storage types for tagged values stored +// on V8 heap. +using Tagged_t = int32_t; +using AtomicTagged_t = base::Atomic32; + +#else + constexpr int kTaggedSize = kSystemPointerSize; constexpr int kTaggedSizeLog2 = kSystemPointerSizeLog2; -STATIC_ASSERT(kTaggedSize == (1 << kTaggedSizeLog2)); // These types define raw and atomic storage types for tagged values stored // on V8 heap. using Tagged_t = Address; using AtomicTagged_t = base::AtomicWord; + +#endif // V8_COMPRESS_POINTERS + +STATIC_ASSERT(kTaggedSize == (1 << kTaggedSizeLog2)); + using AsAtomicTagged = base::AsAtomicPointerImpl; STATIC_ASSERT(sizeof(Tagged_t) == kTaggedSize); STATIC_ASSERT(sizeof(AtomicTagged_t) == kTaggedSize); +STATIC_ASSERT(kTaggedSize == kApiTaggedSize); + // TODO(ishell): use kTaggedSize or kSystemPointerSize instead. +#ifndef V8_COMPRESS_POINTERS constexpr int kPointerSize = kSystemPointerSize; constexpr int kPointerSizeLog2 = kSystemPointerSizeLog2; STATIC_ASSERT(kPointerSize == (1 << kPointerSizeLog2)); - -constexpr int kEmbedderDataSlotSize = -#ifdef V8_COMPRESS_POINTERS - kTaggedSize + #endif - kTaggedSize; + +constexpr int kEmbedderDataSlotSize = kSystemPointerSize; constexpr int kEmbedderDataSlotSizeInTaggedSlots = kEmbedderDataSlotSize / kTaggedSize; @@ -870,24 +889,24 @@ constexpr int kIeeeDoubleExponentWordOffset = 0; ::i::kHeapObjectTag)) // OBJECT_POINTER_ALIGN returns the value aligned as a HeapObject pointer -#define OBJECT_POINTER_ALIGN(value) \ - (((value) + kObjectAlignmentMask) & ~kObjectAlignmentMask) +#define OBJECT_POINTER_ALIGN(value) \ + (((value) + ::i::kObjectAlignmentMask) & ~::i::kObjectAlignmentMask) // OBJECT_POINTER_PADDING returns the padding size required to align value // as a HeapObject pointer #define OBJECT_POINTER_PADDING(value) (OBJECT_POINTER_ALIGN(value) - (value)) // POINTER_SIZE_ALIGN returns the value aligned as a system pointer. -#define POINTER_SIZE_ALIGN(value) \ - (((value) + kPointerAlignmentMask) & ~kPointerAlignmentMask) +#define POINTER_SIZE_ALIGN(value) \ + (((value) + ::i::kPointerAlignmentMask) & ~::i::kPointerAlignmentMask) // POINTER_SIZE_PADDING returns the padding size required to align value // as a system pointer. #define POINTER_SIZE_PADDING(value) (POINTER_SIZE_ALIGN(value) - (value)) // CODE_POINTER_ALIGN returns the value aligned as a generated code segment. -#define CODE_POINTER_ALIGN(value) \ - (((value) + kCodeAlignmentMask) & ~kCodeAlignmentMask) +#define CODE_POINTER_ALIGN(value) \ + (((value) + ::i::kCodeAlignmentMask) & ~::i::kCodeAlignmentMask) // CODE_POINTER_PADDING returns the padding size required to align value // as a generated code segment. @@ -895,8 +914,7 @@ constexpr int kIeeeDoubleExponentWordOffset = 0; // DOUBLE_POINTER_ALIGN returns the value algined for double pointers. #define DOUBLE_POINTER_ALIGN(value) \ - (((value) + kDoubleAlignmentMask) & ~kDoubleAlignmentMask) - + (((value) + ::i::kDoubleAlignmentMask) & ~::i::kDoubleAlignmentMask) // Defines hints about receiver values based on structural knowledge. enum class ConvertReceiverMode : unsigned { diff --git a/deps/v8/src/memcopy.h b/deps/v8/src/memcopy.h index 1229ec916da4c7..79e6e3c955f0a5 100644 --- a/deps/v8/src/memcopy.h +++ b/deps/v8/src/memcopy.h @@ -138,6 +138,32 @@ inline void CopyBytes(T* dst, const T* src, size_t num_bytes) { CopyImpl(dst, src, num_bytes); } +inline void MemsetInt32(int32_t* dest, int32_t value, size_t counter) { +#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64 +#define STOS "stosl" +#endif + +#if defined(MEMORY_SANITIZER) + // MemorySanitizer does not understand inline assembly. +#undef STOS +#endif + +#if defined(__GNUC__) && defined(STOS) + asm volatile( + "cld;" + "rep ; " STOS + : "+&c"(counter), "+&D"(dest) + : "a"(value) + : "memory", "cc"); +#else + for (size_t i = 0; i < counter; i++) { + dest[i] = value; + } +#endif + +#undef STOS +} + inline void MemsetPointer(Address* dest, Address value, size_t counter) { #if V8_HOST_ARCH_IA32 #define STOS "stosl" diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h index 4ea613067c2c6a..6f146241b13ee2 100644 --- a/deps/v8/src/objects-inl.h +++ b/deps/v8/src/objects-inl.h @@ -614,33 +614,18 @@ HeapObject MapWord::ToForwardingAddress() { #ifdef VERIFY_HEAP void HeapObject::VerifyObjectField(Isolate* isolate, int offset) { VerifyPointer(isolate, READ_FIELD(*this, offset)); -#ifdef V8_COMPRESS_POINTERS - STATIC_ASSERT(kTaggedSize == kSystemPointerSize); - // Ensure upper 32-bits are zeros. - Address value = *(FullObjectSlot(FIELD_ADDR(*this, offset)).location()); - CHECK_EQ(kNullAddress, RoundDown(value)); -#endif + STATIC_ASSERT(!COMPRESS_POINTERS_BOOL || kTaggedSize == kInt32Size); } void HeapObject::VerifyMaybeObjectField(Isolate* isolate, int offset) { MaybeObject::VerifyMaybeObjectPointer(isolate, READ_WEAK_FIELD(*this, offset)); -#ifdef V8_COMPRESS_POINTERS - STATIC_ASSERT(kTaggedSize == kSystemPointerSize); - // Ensure upper 32-bits are zeros. - Address value = *(FullObjectSlot(FIELD_ADDR(*this, offset)).location()); - CHECK_EQ(kNullAddress, RoundDown(value)); -#endif + STATIC_ASSERT(!COMPRESS_POINTERS_BOOL || kTaggedSize == kInt32Size); } void HeapObject::VerifySmiField(int offset) { CHECK(READ_FIELD(*this, offset)->IsSmi()); -#ifdef V8_COMPRESS_POINTERS - STATIC_ASSERT(kTaggedSize == kSystemPointerSize); - // Ensure upper 32-bits are zeros. - Address value = *(FullObjectSlot(FIELD_ADDR(*this, offset)).location()); - CHECK_EQ(kNullAddress, RoundDown(value)); -#endif + STATIC_ASSERT(!COMPRESS_POINTERS_BOOL || kTaggedSize == kInt32Size); } #endif diff --git a/deps/v8/src/objects/code.h b/deps/v8/src/objects/code.h index aa4c820b956fa4..a6a4c038131ac1 100644 --- a/deps/v8/src/objects/code.h +++ b/deps/v8/src/objects/code.h @@ -402,11 +402,11 @@ class Code : public HeapObject { // This documents the amount of free space we have in each Code object header // due to padding for code alignment. #if V8_TARGET_ARCH_ARM64 - static constexpr int kHeaderPaddingSize = 0; + static constexpr int kHeaderPaddingSize = COMPRESS_POINTERS_BOOL ? 20 : 0; #elif V8_TARGET_ARCH_MIPS64 static constexpr int kHeaderPaddingSize = 0; #elif V8_TARGET_ARCH_X64 - static constexpr int kHeaderPaddingSize = 0; + static constexpr int kHeaderPaddingSize = COMPRESS_POINTERS_BOOL ? 20 : 0; #elif V8_TARGET_ARCH_ARM static constexpr int kHeaderPaddingSize = 20; #elif V8_TARGET_ARCH_IA32 diff --git a/deps/v8/src/objects/embedder-data-slot-inl.h b/deps/v8/src/objects/embedder-data-slot-inl.h index 1a4d85d778ee61..7762479cf9ce44 100644 --- a/deps/v8/src/objects/embedder-data-slot-inl.h +++ b/deps/v8/src/objects/embedder-data-slot-inl.h @@ -33,6 +33,7 @@ Object EmbedderDataSlot::load_tagged() const { void EmbedderDataSlot::store_smi(Smi value) { ObjectSlot(address() + kTaggedPayloadOffset).Relaxed_Store(value); #ifdef V8_COMPRESS_POINTERS + // See gc_safe_store() for the reasons behind two stores. ObjectSlot(address() + kRawPayloadOffset).Relaxed_Store(Smi::kZero); #endif } @@ -43,8 +44,9 @@ void EmbedderDataSlot::store_tagged(EmbedderDataArray array, int entry_index, int slot_offset = EmbedderDataArray::OffsetOfElementAt(entry_index); ObjectSlot(FIELD_ADDR(array, slot_offset + kTaggedPayloadOffset)) .Relaxed_Store(value); - WRITE_BARRIER(array, slot_offset, value); + WRITE_BARRIER(array, slot_offset + kTaggedPayloadOffset, value); #ifdef V8_COMPRESS_POINTERS + // See gc_safe_store() for the reasons behind two stores. ObjectSlot(FIELD_ADDR(array, slot_offset + kRawPayloadOffset)) .Relaxed_Store(Smi::kZero); #endif @@ -56,68 +58,64 @@ void EmbedderDataSlot::store_tagged(JSObject object, int embedder_field_index, int slot_offset = object->GetEmbedderFieldOffset(embedder_field_index); ObjectSlot(FIELD_ADDR(object, slot_offset + kTaggedPayloadOffset)) .Relaxed_Store(value); - WRITE_BARRIER(object, slot_offset, value); + WRITE_BARRIER(object, slot_offset + kTaggedPayloadOffset, value); #ifdef V8_COMPRESS_POINTERS + // See gc_safe_store() for the reasons behind two stores. ObjectSlot(FIELD_ADDR(object, slot_offset + kRawPayloadOffset)) .Relaxed_Store(Smi::kZero); #endif } bool EmbedderDataSlot::ToAlignedPointer(void** out_pointer) const { - Object tagged_value = - ObjectSlot(address() + kTaggedPayloadOffset).Relaxed_Load(); - if (!tagged_value->IsSmi()) return false; -#ifdef V8_COMPRESS_POINTERS - STATIC_ASSERT(kSmiShiftSize == 0); - STATIC_ASSERT(SmiValuesAre31Bits()); - Address value_lo = static_cast(tagged_value->ptr()); - STATIC_ASSERT(kTaggedSize == kSystemPointerSize); - Address value_hi = - FullObjectSlot(address() + kRawPayloadOffset).Relaxed_Load()->ptr(); - Address value = value_lo | (value_hi << 32); - *out_pointer = reinterpret_cast(value); -#else - *out_pointer = reinterpret_cast(tagged_value->ptr()); -#endif - return true; + // We don't care about atomicity of access here because embedder slots + // are accessed this way only from the main thread via API during "mutator" + // phase which is propely synched with GC (concurrent marker may still look + // at the tagged part of the embedder slot but read-only access is ok). + Address raw_value = *location(); + *out_pointer = reinterpret_cast(raw_value); + return HAS_SMI_TAG(raw_value); } bool EmbedderDataSlot::store_aligned_pointer(void* ptr) { Address value = reinterpret_cast
(ptr); if (!HAS_SMI_TAG(value)) return false; -#ifdef V8_COMPRESS_POINTERS - STATIC_ASSERT(kSmiShiftSize == 0); - STATIC_ASSERT(SmiValuesAre31Bits()); - // Sign-extend lower 32-bits in order to form a proper Smi value. - STATIC_ASSERT(kTaggedSize == kSystemPointerSize); - Address lo = static_cast(static_cast(value)); - ObjectSlot(address() + kTaggedPayloadOffset).Relaxed_Store(Smi(lo)); - Address hi = value >> 32; - ObjectSlot(address() + kRawPayloadOffset).Relaxed_Store(Object(hi)); -#else - ObjectSlot(address() + kTaggedPayloadOffset).Relaxed_Store(Smi(value)); -#endif + gc_safe_store(value); return true; } EmbedderDataSlot::RawData EmbedderDataSlot::load_raw( const DisallowHeapAllocation& no_gc) const { - STATIC_ASSERT(kTaggedSize == kSystemPointerSize); - return RawData{ - ObjectSlot(address() + kTaggedPayloadOffset).Relaxed_Load()->ptr(), -#ifdef V8_COMPRESS_POINTERS - FullObjectSlot(address() + kRawPayloadOffset).Relaxed_Load()->ptr() -#endif - }; + // We don't care about atomicity of access here because embedder slots + // are accessed this way only by serializer from the main thread when + // GC is not active (concurrent marker may still look at the tagged part + // of the embedder slot but read-only access is ok). + return *location(); } -void EmbedderDataSlot::store_raw(const EmbedderDataSlot::RawData& data, +void EmbedderDataSlot::store_raw(EmbedderDataSlot::RawData data, const DisallowHeapAllocation& no_gc) { - ObjectSlot(address() + kTaggedPayloadOffset) - .Relaxed_Store(Object(data.data_[0])); + gc_safe_store(data); +} + +void EmbedderDataSlot::gc_safe_store(Address value) { #ifdef V8_COMPRESS_POINTERS - ObjectSlot(address() + kRawPayloadOffset) - .Relaxed_Store(Object(data.data_[1])); + STATIC_ASSERT(kSmiShiftSize == 0); + STATIC_ASSERT(SmiValuesAre31Bits()); + STATIC_ASSERT(kTaggedSize == kInt32Size); + // We have to do two 32-bit stores here because + // 1) tagged part modifications must be atomic to be properly synchronized + // with the concurrent marker. + // 2) atomicity of full pointer store is not guaranteed for embedder slots + // since the address of the slot may not be kSystemPointerSize aligned + // (only kTaggedSize alignment is guaranteed). + // TODO(ishell, v8:8875): revisit this once the allocation alignment + // inconsistency is fixed. + Address lo = static_cast(static_cast(value)); + ObjectSlot(address() + kTaggedPayloadOffset).Relaxed_Store(Smi(lo)); + Address hi = value >> 32; + ObjectSlot(address() + kRawPayloadOffset).Relaxed_Store(Object(hi)); +#else + ObjectSlot(address() + kTaggedPayloadOffset).Relaxed_Store(Smi(value)); #endif } diff --git a/deps/v8/src/objects/embedder-data-slot.h b/deps/v8/src/objects/embedder-data-slot.h index 371452253ceb9f..e0c95623754d06 100644 --- a/deps/v8/src/objects/embedder-data-slot.h +++ b/deps/v8/src/objects/embedder-data-slot.h @@ -30,41 +30,42 @@ class Object; // Storing heap object through this slot may require triggering write barriers // so this operation must be done via static store_tagged() methods. class EmbedderDataSlot - : public SlotBase { + : public SlotBase { public: EmbedderDataSlot() : SlotBase(kNullAddress) {} V8_INLINE EmbedderDataSlot(EmbedderDataArray array, int entry_index); V8_INLINE EmbedderDataSlot(JSObject object, int embedder_field_index); - // TODO(ishell): these offsets are currently little-endian specific. - // The less significant part contains tagged value and the other part - // contains the raw value. +#ifdef V8_TARGET_LITTLE_ENDIAN static constexpr int kTaggedPayloadOffset = 0; +#else + static constexpr int kTaggedPayloadOffset = kTaggedSize; +#endif + #ifdef V8_COMPRESS_POINTERS - static constexpr int kRawPayloadOffset = kTaggedSize; + // The raw payload is located in the other tagged part of the full pointer. + static constexpr int kRawPayloadOffset = kTaggedSize - kTaggedPayloadOffset; #endif static constexpr int kRequiredPtrAlignment = kSmiTagSize; // Opaque type used for storing raw embedder data. - struct RawData { - const Address data_[kEmbedderDataSlotSizeInTaggedSlots]; - }; + typedef Address RawData; V8_INLINE Object load_tagged() const; V8_INLINE void store_smi(Smi value); // Setting an arbitrary tagged value requires triggering a write barrier // which requires separate object and offset values, therefore these static - // functions a + // functions also has the target object parameter. static V8_INLINE void store_tagged(EmbedderDataArray array, int entry_index, Object value); static V8_INLINE void store_tagged(JSObject object, int embedder_field_index, Object value); - // Tries reinterpret the value as an aligned pointer and on success sets - // *out_result to the pointer-like value and returns true. Note, that some - // Smis could still look like an aligned pointers. - // Returns false otherwise. + // Tries reinterpret the value as an aligned pointer and sets *out_result to + // the pointer-like value. Note, that some Smis could still look like an + // aligned pointers. + // Returns true on success. V8_INLINE bool ToAlignedPointer(void** out_result) const; // Returns true if the pointer was successfully stored or false it the pointer @@ -72,8 +73,12 @@ class EmbedderDataSlot V8_INLINE V8_WARN_UNUSED_RESULT bool store_aligned_pointer(void* ptr); V8_INLINE RawData load_raw(const DisallowHeapAllocation& no_gc) const; - V8_INLINE void store_raw(const RawData& data, - const DisallowHeapAllocation& no_gc); + V8_INLINE void store_raw(RawData data, const DisallowHeapAllocation& no_gc); + + private: + // Stores given value to the embedder data slot in a concurrent-marker + // friendly manner (tagged part of the slot is written atomically). + V8_INLINE void gc_safe_store(Address value); }; } // namespace internal diff --git a/deps/v8/src/objects/slots-atomic-inl.h b/deps/v8/src/objects/slots-atomic-inl.h index 3db4e387203494..c74875d2d921d3 100644 --- a/deps/v8/src/objects/slots-atomic-inl.h +++ b/deps/v8/src/objects/slots-atomic-inl.h @@ -25,7 +25,7 @@ namespace internal { // Note how the comparator operates on Address values, representing the raw // data found at the given heap location, so you probably want to construct // an Object from it. -class AtomicSlot : public SlotBase { +class AtomicSlot : public SlotBase { public: // This class is a stand-in for "Address&" that uses custom atomic // read/write operations for the actual memory accesses. diff --git a/deps/v8/src/objects/slots-inl.h b/deps/v8/src/objects/slots-inl.h index 0d705888e80fdd..f0baf686e35eca 100644 --- a/deps/v8/src/objects/slots-inl.h +++ b/deps/v8/src/objects/slots-inl.h @@ -118,13 +118,14 @@ inline void CopyTagged(Address dst, const Address src, size_t num_tagged) { // Sets |counter| number of kTaggedSize-sized values starting at |start| slot. inline void MemsetTagged(ObjectSlot start, Object value, size_t counter) { - // TODO(ishell): revisit this implementation, maybe use "rep stosl" - STATIC_ASSERT(kTaggedSize == kSystemPointerSize); - Address raw_value = value.ptr(); #ifdef V8_COMPRESS_POINTERS - raw_value = CompressTagged(raw_value); -#endif + Tagged_t raw_value = CompressTagged(value.ptr()); + STATIC_ASSERT(kTaggedSize == kInt32Size); + MemsetInt32(start.location(), raw_value, counter); +#else + Address raw_value = value.ptr(); MemsetPointer(start.location(), raw_value, counter); +#endif } // Sets |counter| number of kSystemPointerSize-sized values starting at |start| diff --git a/deps/v8/src/objects/slots.h b/deps/v8/src/objects/slots.h index 95acb94ff3a806..3af615a695aaf6 100644 --- a/deps/v8/src/objects/slots.h +++ b/deps/v8/src/objects/slots.h @@ -12,14 +12,14 @@ namespace internal { class Object; -template +template class SlotBase { public: using TData = Data; - // TODO(ishell): This should eventually become just sizeof(TData) once - // pointer compression is implemented. - static constexpr size_t kSlotDataSize = SlotDataSize; + static constexpr size_t kSlotDataSize = sizeof(Data); + static constexpr size_t kSlotDataAlignment = SlotDataAlignment; Subclass& operator++() { // Prefix increment. ptr_ += kSlotDataSize; @@ -72,9 +72,8 @@ class SlotBase { TData* location() const { return reinterpret_cast(ptr_); } protected: - STATIC_ASSERT(IsAligned(kSlotDataSize, kTaggedSize)); explicit SlotBase(Address ptr) : ptr_(ptr) { - DCHECK(IsAligned(ptr, kTaggedSize)); + DCHECK(IsAligned(ptr, kSlotDataAlignment)); } private: @@ -88,8 +87,7 @@ class SlotBase { // ("slot") holding a tagged pointer (smi or strong heap object). // Its address() is the address of the slot. // The slot's contents can be read and written using operator* and store(). -class FullObjectSlot - : public SlotBase { +class FullObjectSlot : public SlotBase { public: using TObject = Object; using THeapObjectSlot = FullHeapObjectSlot; @@ -103,7 +101,7 @@ class FullObjectSlot : SlotBase(reinterpret_cast
(ptr)) {} inline explicit FullObjectSlot(Object* object); template - explicit FullObjectSlot(SlotBase slot) + explicit FullObjectSlot(SlotBase slot) : SlotBase(slot.address()) {} // Compares memory representation of a value stored in the slot with given @@ -140,7 +138,7 @@ class FullMaybeObjectSlot explicit FullMaybeObjectSlot(MaybeObject* ptr) : SlotBase(reinterpret_cast
(ptr)) {} template - explicit FullMaybeObjectSlot(SlotBase slot) + explicit FullMaybeObjectSlot(SlotBase slot) : SlotBase(slot.address()) {} inline const MaybeObject operator*() const; @@ -158,15 +156,14 @@ class FullMaybeObjectSlot // The slot's contents can be read and written using operator* and store(). // In case it is known that that slot contains a strong heap object pointer, // ToHeapObject() can be used to retrieve that heap object. -class FullHeapObjectSlot - : public SlotBase { +class FullHeapObjectSlot : public SlotBase { public: FullHeapObjectSlot() : SlotBase(kNullAddress) {} explicit FullHeapObjectSlot(Address ptr) : SlotBase(ptr) {} explicit FullHeapObjectSlot(Object* ptr) : SlotBase(reinterpret_cast
(ptr)) {} template - explicit FullHeapObjectSlot(SlotBase slot) + explicit FullHeapObjectSlot(SlotBase slot) : SlotBase(slot.address()) {} inline const HeapObjectReference operator*() const; diff --git a/deps/v8/src/ptr-compr-inl.h b/deps/v8/src/ptr-compr-inl.h index ba2d15dc8aab63..ac234b02814e15 100644 --- a/deps/v8/src/ptr-compr-inl.h +++ b/deps/v8/src/ptr-compr-inl.h @@ -29,8 +29,6 @@ V8_INLINE Address GetRootFromOnHeapAddress(Address addr) { // preserving both weak- and smi- tags. V8_INLINE Address DecompressTaggedPointer(Address on_heap_addr, Tagged_t raw_value) { - static_assert(kTaggedSize == kSystemPointerSize, "has to be updated"); - static_assert(!std::is_same::value, "remove cast below"); int32_t value = static_cast(raw_value); Address root = GetRootFromOnHeapAddress(on_heap_addr); // Current compression scheme requires value to be sign-extended to inptr_t @@ -41,8 +39,6 @@ V8_INLINE Address DecompressTaggedPointer(Address on_heap_addr, // Decompresses any tagged value, preserving both weak- and smi- tags. V8_INLINE Address DecompressTaggedAny(Address on_heap_addr, Tagged_t raw_value) { - static_assert(kTaggedSize == kSystemPointerSize, "has to be updated"); - static_assert(!std::is_same::value, "remove cast below"); int32_t value = static_cast(raw_value); // |root_mask| is 0 if the |value| was a smi or -1 otherwise. Address root_mask = -static_cast
(value & kSmiTagMask); diff --git a/deps/v8/src/ptr-compr.h b/deps/v8/src/ptr-compr.h index 93d7834df3077b..00d410e1ce3aaa 100644 --- a/deps/v8/src/ptr-compr.h +++ b/deps/v8/src/ptr-compr.h @@ -13,6 +13,7 @@ namespace v8 { namespace internal { +// See v8:7703 for details about how pointer compression works. constexpr size_t kPtrComprHeapReservationSize = size_t{4} * GB; constexpr size_t kPtrComprIsolateRootBias = kPtrComprHeapReservationSize / 2; constexpr size_t kPtrComprIsolateRootAlignment = size_t{4} * GB; @@ -21,8 +22,7 @@ constexpr size_t kPtrComprIsolateRootAlignment = size_t{4} * GB; // holding a compressed tagged pointer (smi or heap object). // Its address() is the address of the slot. // The slot's contents can be read and written using operator* and store(). -class CompressedObjectSlot - : public SlotBase { +class CompressedObjectSlot : public SlotBase { public: using TObject = Object; using THeapObjectSlot = CompressedHeapObjectSlot; @@ -37,7 +37,7 @@ class CompressedObjectSlot explicit CompressedObjectSlot(Object const* const* ptr) : SlotBase(reinterpret_cast
(ptr)) {} template - explicit CompressedObjectSlot(SlotBase slot) + explicit CompressedObjectSlot(SlotBase slot) : SlotBase(slot.address()) {} inline Object operator*() const; @@ -57,8 +57,7 @@ class CompressedObjectSlot // forwarding pointer is different. // Its address() is the address of the slot. // The slot's contents can be read and written using operator* and store(). -class CompressedMapWordSlot - : public SlotBase { +class CompressedMapWordSlot : public SlotBase { public: using TObject = Object; @@ -88,7 +87,7 @@ class CompressedMapWordSlot // Its address() is the address of the slot. // The slot's contents can be read and written using operator* and store(). class CompressedMaybeObjectSlot - : public SlotBase { + : public SlotBase { public: using TObject = MaybeObject; using THeapObjectSlot = CompressedHeapObjectSlot; @@ -102,7 +101,8 @@ class CompressedMaybeObjectSlot explicit CompressedMaybeObjectSlot(MaybeObject* ptr) : SlotBase(reinterpret_cast
(ptr)) {} template - explicit CompressedMaybeObjectSlot(SlotBase slot) + explicit CompressedMaybeObjectSlot( + SlotBase slot) : SlotBase(slot.address()) {} inline MaybeObject operator*() const; @@ -121,14 +121,14 @@ class CompressedMaybeObjectSlot // In case it is known that that slot contains a strong heap object pointer, // ToHeapObject() can be used to retrieve that heap object. class CompressedHeapObjectSlot - : public SlotBase { + : public SlotBase { public: CompressedHeapObjectSlot() : SlotBase(kNullAddress) {} explicit CompressedHeapObjectSlot(Address ptr) : SlotBase(ptr) {} explicit CompressedHeapObjectSlot(Object* ptr) : SlotBase(reinterpret_cast
(ptr)) {} template - explicit CompressedHeapObjectSlot(SlotBase slot) + explicit CompressedHeapObjectSlot(SlotBase slot) : SlotBase(slot.address()) {} inline HeapObjectReference operator*() const; diff --git a/deps/v8/src/snapshot/partial-serializer.cc b/deps/v8/src/snapshot/partial-serializer.cc index c71d05b385c5b4..638c4f702cc4a2 100644 --- a/deps/v8/src/snapshot/partial-serializer.cc +++ b/deps/v8/src/snapshot/partial-serializer.cc @@ -182,7 +182,7 @@ bool PartialSerializer::SerializeJSObjectWithEmbedderFields(Object obj) { // with embedder callbacks. for (int i = 0; i < embedder_fields_count; i++) { if (!DataIsEmpty(serialized_data[i])) { - EmbedderDataSlot(js_obj, i).store_raw({kNullAddress}, no_gc); + EmbedderDataSlot(js_obj, i).store_raw(kNullAddress, no_gc); } } diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h index a491bbfcee2b6c..5e1d93b0a8b3e4 100644 --- a/deps/v8/src/x64/assembler-x64.h +++ b/deps/v8/src/x64/assembler-x64.h @@ -426,50 +426,45 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // - Instructions on 64-bit (quadword) operands/registers use 'q'. // - Instructions on operands/registers with pointer size use 'p'. -#define DECLARE_INSTRUCTION(instruction) \ - template \ - void instruction##_tagged(P1 p1) { \ - STATIC_ASSERT(kTaggedSize == kSystemPointerSize); \ - /* TODO(ishell): change to kTaggedSize */ \ - emit_##instruction(p1, COMPRESS_POINTERS_BOOL ? kInt32Size : kTaggedSize); \ - } \ - \ - template \ - void instruction##l(P1 p1) { \ - emit_##instruction(p1, kInt32Size); \ - } \ - \ - template \ - void instruction##q(P1 p1) { \ - emit_##instruction(p1, kInt64Size); \ - } \ - \ - template \ - void instruction##_tagged(P1 p1, P2 p2) { \ - STATIC_ASSERT(kTaggedSize == kSystemPointerSize); \ - /* TODO(ishell): change to kTaggedSize */ \ - emit_##instruction(p1, p2, \ - COMPRESS_POINTERS_BOOL ? kInt32Size : kTaggedSize); \ - } \ - \ - template \ - void instruction##l(P1 p1, P2 p2) { \ - emit_##instruction(p1, p2, kInt32Size); \ - } \ - \ - template \ - void instruction##q(P1 p1, P2 p2) { \ - emit_##instruction(p1, p2, kInt64Size); \ - } \ - \ - template \ - void instruction##l(P1 p1, P2 p2, P3 p3) { \ - emit_##instruction(p1, p2, p3, kInt32Size); \ - } \ - \ - template \ - void instruction##q(P1 p1, P2 p2, P3 p3) { \ - emit_##instruction(p1, p2, p3, kInt64Size); \ +#define DECLARE_INSTRUCTION(instruction) \ + template \ + void instruction##_tagged(P1 p1) { \ + emit_##instruction(p1, kTaggedSize); \ + } \ + \ + template \ + void instruction##l(P1 p1) { \ + emit_##instruction(p1, kInt32Size); \ + } \ + \ + template \ + void instruction##q(P1 p1) { \ + emit_##instruction(p1, kInt64Size); \ + } \ + \ + template \ + void instruction##_tagged(P1 p1, P2 p2) { \ + emit_##instruction(p1, p2, kTaggedSize); \ + } \ + \ + template \ + void instruction##l(P1 p1, P2 p2) { \ + emit_##instruction(p1, p2, kInt32Size); \ + } \ + \ + template \ + void instruction##q(P1 p1, P2 p2) { \ + emit_##instruction(p1, p2, kInt64Size); \ + } \ + \ + template \ + void instruction##l(P1 p1, P2 p2, P3 p3) { \ + emit_##instruction(p1, p2, p3, kInt32Size); \ + } \ + \ + template \ + void instruction##q(P1 p1, P2 p2, P3 p3) { \ + emit_##instruction(p1, p2, p3, kInt64Size); \ } ASSEMBLER_INSTRUCTION_LIST(DECLARE_INSTRUCTION) #undef DECLARE_INSTRUCTION diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc index 0a60d5e5575f87..ecc8058d0495fb 100644 --- a/deps/v8/src/x64/macro-assembler-x64.cc +++ b/deps/v8/src/x64/macro-assembler-x64.cc @@ -269,7 +269,6 @@ void TurboAssembler::StoreTaggedField(Operand dst_field_operand, #ifdef V8_COMPRESS_POINTERS RecordComment("[ StoreTagged"); movl(dst_field_operand, value); - movl(Operand(dst_field_operand, 4), Immediate(0)); RecordComment("]"); #else movq(dst_field_operand, value); @@ -281,7 +280,6 @@ void TurboAssembler::StoreTaggedField(Operand dst_field_operand, #ifdef V8_COMPRESS_POINTERS RecordComment("[ StoreTagged"); movl(dst_field_operand, value); - movl(Operand(dst_field_operand, 4), Immediate(0)); RecordComment("]"); #else movq(dst_field_operand, value); @@ -1124,7 +1122,11 @@ void TurboAssembler::SmiUntag(Register dst, Operand src) { movsxlq(dst, dst); } else { DCHECK(SmiValuesAre31Bits()); +#ifdef V8_COMPRESS_POINTERS + movsxlq(dst, src); +#else movq(dst, src); +#endif sarq(dst, Immediate(kSmiShift)); } } @@ -1132,7 +1134,7 @@ void TurboAssembler::SmiUntag(Register dst, Operand src) { void MacroAssembler::SmiCompare(Register smi1, Register smi2) { AssertSmi(smi1); AssertSmi(smi2); - cmpq(smi1, smi2); + cmp_tagged(smi1, smi2); } void MacroAssembler::SmiCompare(Register dst, Smi src) { diff --git a/deps/v8/test/cctest/compiler/codegen-tester.h b/deps/v8/test/cctest/compiler/codegen-tester.h index 93d93e1671aec6..ff35d8b45328db 100644 --- a/deps/v8/test/cctest/compiler/codegen-tester.h +++ b/deps/v8/test/cctest/compiler/codegen-tester.h @@ -131,9 +131,18 @@ class BufferedRawMachineAssemblerTester // Store node is provided as a parameter. By storing the return value in // memory it is possible to return 64 bit values. void Return(Node* input) { - Store(MachineTypeForC().representation(), - RawMachineAssembler::Parameter(return_parameter_index_), input, - kNoWriteBarrier); + if (COMPRESS_POINTERS_BOOL && MachineTypeForC().IsTagged()) { + // Since we are returning values via storing to off-heap location + // generate full-word store here. + Store(MachineType::PointerRepresentation(), + RawMachineAssembler::Parameter(return_parameter_index_), + BitcastTaggedToWord(input), kNoWriteBarrier); + + } else { + Store(MachineTypeForC().representation(), + RawMachineAssembler::Parameter(return_parameter_index_), input, + kNoWriteBarrier); + } RawMachineAssembler::Return(Int32Constant(1234)); } diff --git a/deps/v8/test/cctest/compiler/test-run-load-store.cc b/deps/v8/test/cctest/compiler/test-run-load-store.cc index 26d681299df878..d3ed7d64055159 100644 --- a/deps/v8/test/cctest/compiler/test-run-load-store.cc +++ b/deps/v8/test/cctest/compiler/test-run-load-store.cc @@ -199,13 +199,10 @@ void CheckEq(CType in_value, CType out_value) { // Specializations for checking the result of compressing store. template <> void CheckEq(Object in_value, Object out_value) { - Isolate* isolate = CcTest::InitIsolateOnce(); - // |out_value| is compressed. Check that it's valid. - CHECK_EQ(CompressTagged(in_value->ptr()), out_value->ptr()); - STATIC_ASSERT(kTaggedSize == kSystemPointerSize); - CHECK_EQ(in_value->ptr(), - DecompressTaggedAny(isolate->isolate_root(), - static_cast(out_value->ptr()))); + // Compare only lower 32-bits of the value because tagged load/stores are + // 32-bit operations anyway. + CHECK_EQ(static_cast(in_value.ptr()), + static_cast(out_value.ptr())); } template <> @@ -269,7 +266,7 @@ void RunLoadImmIndex(MachineType rep, TestAlignment t) { // When pointer compression is enabled then we need to access only // the lower 32-bit of the tagged value while the buffer contains // full 64-bit values. - base_pointer = LSB(base_pointer, kSystemPointerSize / 2); + base_pointer = LSB(base_pointer, kTaggedSize); } #endif Node* base = m.PointerConstant(base_pointer);