Skip to content
Permalink
Browse files
A lot more classes have padding that can be reduced by reordering the…
…ir fields

https://bugs.webkit.org/show_bug.cgi?id=195579

Reviewed by Mark Lam.

Source/bmalloc:

* bmalloc/Heap.h:
* bmalloc/Scavenger.h:

Source/JavaScriptCore:

* assembler/LinkBuffer.h:
* dfg/DFGArrayifySlowPathGenerator.h:
(JSC::DFG::ArrayifySlowPathGenerator::ArrayifySlowPathGenerator):
* dfg/DFGCallArrayAllocatorSlowPathGenerator.h:
(JSC::DFG::CallArrayAllocatorSlowPathGenerator::CallArrayAllocatorSlowPathGenerator):
(JSC::DFG::CallArrayAllocatorWithVariableSizeSlowPathGenerator::CallArrayAllocatorWithVariableSizeSlowPathGenerator):
* dfg/DFGGraph.h:
* dfg/DFGNode.h:
(JSC::DFG::SwitchData::SwitchData):
* dfg/DFGPlan.cpp:
(JSC::DFG::Plan::Plan):
* dfg/DFGPlan.h:
* dfg/DFGSlowPathGenerator.h:
(JSC::DFG::CallSlowPathGenerator::CallSlowPathGenerator):
* dfg/DFGSpeculativeJIT.cpp:
(JSC::DFG::SpeculativeJIT::SpeculativeJIT):
* dfg/DFGSpeculativeJIT.h:
* domjit/DOMJITSignature.h:
(JSC::DOMJIT::Signature::Signature):
(JSC::DOMJIT::Signature::effect):
(JSC::DOMJIT::Signature::argumentCount): Deleted.
* heap/MarkingConstraintSolver.h:
* heap/SlotVisitor.h:
* jit/CallFrameShuffleData.h:
* jit/JITDivGenerator.h:
* jit/SpillRegistersMode.h:
* parser/Nodes.h:
* profiler/ProfilerOSRExit.cpp:
(JSC::Profiler::OSRExit::OSRExit):
* profiler/ProfilerOSRExit.h:
* runtime/ArrayBufferView.h:
* runtime/SamplingProfiler.cpp:
(JSC::SamplingProfiler::SamplingProfiler):
* runtime/SamplingProfiler.h:
* runtime/TypeSet.cpp:
(JSC::StructureShape::StructureShape):
* runtime/TypeSet.h:
* runtime/Watchdog.h:

Source/WTF:

* wtf/CrossThreadQueue.h:
* wtf/Logger.h:
* wtf/MemoryPressureHandler.h:
* wtf/MetaAllocator.h:
* wtf/Threading.cpp:


Canonical link: https://commits.webkit.org/209940@main
git-svn-id: https://svn.webkit.org/repository/webkit/trunk@242812 268f45cc-cd09-0410-ab3c-d52691b4dbfc
  • Loading branch information
Robin Morisset committed Mar 12, 2019
1 parent 92eb2b1 commit 17b8ed8092e2ddb28f1faf53f76c9684e6f3a4b5
Showing 35 changed files with 153 additions and 83 deletions.
@@ -1,3 +1,49 @@
2019-03-12 Robin Morisset <rmorisset@apple.com>

A lot more classes have padding that can be reduced by reordering their fields
https://bugs.webkit.org/show_bug.cgi?id=195579

Reviewed by Mark Lam.

* assembler/LinkBuffer.h:
* dfg/DFGArrayifySlowPathGenerator.h:
(JSC::DFG::ArrayifySlowPathGenerator::ArrayifySlowPathGenerator):
* dfg/DFGCallArrayAllocatorSlowPathGenerator.h:
(JSC::DFG::CallArrayAllocatorSlowPathGenerator::CallArrayAllocatorSlowPathGenerator):
(JSC::DFG::CallArrayAllocatorWithVariableSizeSlowPathGenerator::CallArrayAllocatorWithVariableSizeSlowPathGenerator):
* dfg/DFGGraph.h:
* dfg/DFGNode.h:
(JSC::DFG::SwitchData::SwitchData):
* dfg/DFGPlan.cpp:
(JSC::DFG::Plan::Plan):
* dfg/DFGPlan.h:
* dfg/DFGSlowPathGenerator.h:
(JSC::DFG::CallSlowPathGenerator::CallSlowPathGenerator):
* dfg/DFGSpeculativeJIT.cpp:
(JSC::DFG::SpeculativeJIT::SpeculativeJIT):
* dfg/DFGSpeculativeJIT.h:
* domjit/DOMJITSignature.h:
(JSC::DOMJIT::Signature::Signature):
(JSC::DOMJIT::Signature::effect):
(JSC::DOMJIT::Signature::argumentCount): Deleted.
* heap/MarkingConstraintSolver.h:
* heap/SlotVisitor.h:
* jit/CallFrameShuffleData.h:
* jit/JITDivGenerator.h:
* jit/SpillRegistersMode.h:
* parser/Nodes.h:
* profiler/ProfilerOSRExit.cpp:
(JSC::Profiler::OSRExit::OSRExit):
* profiler/ProfilerOSRExit.h:
* runtime/ArrayBufferView.h:
* runtime/SamplingProfiler.cpp:
(JSC::SamplingProfiler::SamplingProfiler):
* runtime/SamplingProfiler.h:
* runtime/TypeSet.cpp:
(JSC::StructureShape::StructureShape):
* runtime/TypeSet.h:
* runtime/Watchdog.h:

2019-03-12 Mark Lam <mark.lam@apple.com>

The HasIndexedProperty node does GC.
@@ -93,10 +93,10 @@ class LinkBuffer {
LinkBuffer(MacroAssembler& macroAssembler, MacroAssemblerCodePtr<tag> code, size_t size, JITCompilationEffort effort = JITCompilationMustSucceed, bool shouldPerformBranchCompaction = true)
: m_size(size)
, m_didAllocate(false)
, m_code(code.template retagged<LinkBufferPtrTag>())
#ifndef NDEBUG
, m_completed(false)
#endif
, m_code(code.template retagged<LinkBufferPtrTag>())
{
#if ENABLE(BRANCH_COMPACTION)
m_shouldPerformBranchCompaction = shouldPerformBranchCompaction;
@@ -342,11 +342,11 @@ class LinkBuffer {
bool m_shouldPerformBranchCompaction { true };
#endif
bool m_didAllocate;
MacroAssemblerCodePtr<LinkBufferPtrTag> m_code;
#ifndef NDEBUG
bool m_completed;
#endif
bool m_alreadyDisassembled { false };
MacroAssemblerCodePtr<LinkBufferPtrTag> m_code;
Vector<RefPtr<SharedTask<void(LinkBuffer&)>>> m_linkTasks;
};

@@ -43,8 +43,8 @@ class ArrayifySlowPathGenerator : public JumpingSlowPathGenerator<MacroAssembler
GPRReg propertyGPR, GPRReg tempGPR, GPRReg structureGPR)
: JumpingSlowPathGenerator<MacroAssembler::JumpList>(from, jit)
, m_op(node->op())
, m_arrayMode(node->arrayMode())
, m_structure(node->op() == ArrayifyToStructure ? node->structure() : RegisteredStructure())
, m_arrayMode(node->arrayMode())
, m_baseGPR(baseGPR)
, m_propertyGPR(propertyGPR)
, m_tempGPR(tempGPR)
@@ -134,8 +134,8 @@ class ArrayifySlowPathGenerator : public JumpingSlowPathGenerator<MacroAssembler

private:
NodeType m_op;
ArrayMode m_arrayMode;
RegisteredStructure m_structure;
ArrayMode m_arrayMode;
GPRReg m_baseGPR;
GPRReg m_propertyGPR;
GPRReg m_tempGPR;
@@ -42,8 +42,8 @@ class CallArrayAllocatorSlowPathGenerator : public JumpingSlowPathGenerator<Macr
, m_function(function)
, m_resultGPR(resultGPR)
, m_storageGPR(storageGPR)
, m_structure(structure)
, m_size(size)
, m_structure(structure)
{
ASSERT(size < static_cast<size_t>(std::numeric_limits<int32_t>::max()));
jit->silentSpillAllRegistersImpl(false, m_plans, resultGPR);
@@ -67,8 +67,8 @@ class CallArrayAllocatorSlowPathGenerator : public JumpingSlowPathGenerator<Macr
P_JITOperation_EStZB m_function;
GPRReg m_resultGPR;
GPRReg m_storageGPR;
RegisteredStructure m_structure;
int m_size;
RegisteredStructure m_structure;
Vector<SilentRegisterSavePlan, 2> m_plans;
};

@@ -79,9 +79,9 @@ class CallArrayAllocatorWithVariableSizeSlowPathGenerator : public JumpingSlowPa
GPRReg resultGPR, RegisteredStructure contiguousStructure, RegisteredStructure arrayStorageStructure, GPRReg sizeGPR, GPRReg storageGPR)
: JumpingSlowPathGenerator<MacroAssembler::JumpList>(from, jit)
, m_function(function)
, m_resultGPR(resultGPR)
, m_contiguousStructure(contiguousStructure)
, m_arrayStorageOrContiguousStructure(arrayStorageStructure)
, m_resultGPR(resultGPR)
, m_sizeGPR(sizeGPR)
, m_storageGPR(storageGPR)
{
@@ -113,9 +113,9 @@ class CallArrayAllocatorWithVariableSizeSlowPathGenerator : public JumpingSlowPa

private:
P_JITOperation_EStZB m_function;
GPRReg m_resultGPR;
RegisteredStructure m_contiguousStructure;
RegisteredStructure m_arrayStorageOrContiguousStructure;
GPRReg m_resultGPR;
GPRReg m_sizeGPR;
GPRReg m_storageGPR;
Vector<SilentRegisterSavePlan, 2> m_plans;
@@ -1022,16 +1022,6 @@ class Graph : public virtual Scannable {
// So argumentFormats[0] are the argument formats for the normal call entrypoint.
Vector<Vector<FlushFormat>> m_argumentFormats;

// This maps an entrypoint index to a particular op_catch bytecode offset. By convention,
// it'll never have zero as a key because we use zero to mean the op_enter entrypoint.
HashMap<unsigned, unsigned> m_entrypointIndexToCatchBytecodeOffset;

// This is the number of logical entrypoints that we're compiling. This is only used
// in SSA. Each EntrySwitch node must have m_numberOfEntrypoints cases. Note, this is
// not the same as m_roots.size(). m_roots.size() represents the number of roots in
// the CFG. In SSA, m_roots.size() == 1 even if we're compiling more than one entrypoint.
unsigned m_numberOfEntrypoints { UINT_MAX };

SegmentedVector<VariableAccessData, 16> m_variableAccessData;
SegmentedVector<ArgumentPosition, 8> m_argumentPositions;
Bag<Transition> m_transitions;
@@ -1064,7 +1054,17 @@ class Graph : public virtual Scannable {
unsigned m_localVars;
unsigned m_nextMachineLocal;
unsigned m_parameterSlots;


// This is the number of logical entrypoints that we're compiling. This is only used
// in SSA. Each EntrySwitch node must have m_numberOfEntrypoints cases. Note, this is
// not the same as m_roots.size(). m_roots.size() represents the number of roots in
// the CFG. In SSA, m_roots.size() == 1 even if we're compiling more than one entrypoint.
unsigned m_numberOfEntrypoints { UINT_MAX };

// This maps an entrypoint index to a particular op_catch bytecode offset. By convention,
// it'll never have zero as a key because we use zero to mean the op_enter entrypoint.
HashMap<unsigned, unsigned> m_entrypointIndexToCatchBytecodeOffset;

HashSet<String> m_localStrings;
HashMap<const StringImpl*, String> m_copiedStrings;

@@ -213,16 +213,16 @@ struct SwitchData {
// constructing this should make sure to initialize everything they
// care about manually.
SwitchData()
: kind(static_cast<SwitchKind>(-1))
, switchTableIndex(UINT_MAX)
: switchTableIndex(UINT_MAX)
, kind(static_cast<SwitchKind>(-1))
, didUseJumpTable(false)
{
}

Vector<SwitchCase> cases;
BranchTarget fallThrough;
SwitchKind kind;
size_t switchTableIndex;
SwitchKind kind;
bool didUseJumpTable;
};

@@ -136,12 +136,12 @@ Profiler::CompilationKind profilerCompilationKindForMode(CompilationMode mode)
Plan::Plan(CodeBlock* passedCodeBlock, CodeBlock* profiledDFGCodeBlock,
CompilationMode mode, unsigned osrEntryBytecodeIndex,
const Operands<Optional<JSValue>>& mustHandleValues)
: m_vm(passedCodeBlock->vm())
: m_mode(mode)
, m_vm(passedCodeBlock->vm())
, m_codeBlock(passedCodeBlock)
, m_profiledDFGCodeBlock(profiledDFGCodeBlock)
, m_mode(mode)
, m_osrEntryBytecodeIndex(osrEntryBytecodeIndex)
, m_mustHandleValues(mustHandleValues)
, m_osrEntryBytecodeIndex(osrEntryBytecodeIndex)
, m_compilation(UNLIKELY(m_vm->m_perBytecodeProfiler) ? adoptRef(new Profiler::Compilation(m_vm->m_perBytecodeProfiler->ensureBytecodesFor(m_codeBlock), profilerCompilationKindForMode(mode))) : nullptr)
, m_inlineCallFrames(adoptRef(new InlineCallFrameSet()))
, m_identifiers(m_codeBlock)
@@ -129,18 +129,22 @@ class Plan : public ThreadSafeRefCounted<Plan> {
// Warning: pretty much all of the pointer fields in this object get nulled by cancel(). So, if
// you're writing code that is callable on the cancel path, be sure to null check everything!

CompilationMode m_mode;

VM* m_vm;

// These can be raw pointers because we visit them during every GC in checkLivenessAndVisitChildren.
CodeBlock* m_codeBlock;
CodeBlock* m_profiledDFGCodeBlock;

CompilationMode m_mode;
const unsigned m_osrEntryBytecodeIndex;
Operands<Optional<JSValue>> m_mustHandleValues;
bool m_mustHandleValuesMayIncludeGarbage { true };
Lock m_mustHandleValueCleaningLock;

bool m_willTryToTierUp { false };

const unsigned m_osrEntryBytecodeIndex;

ThreadData* m_threadData;

RefPtr<Profiler::Compilation> m_compilation;
@@ -155,8 +159,6 @@ class Plan : public ThreadSafeRefCounted<Plan> {
DesiredGlobalProperties m_globalProperties;
RecordedStatuses m_recordedStatuses;

bool m_willTryToTierUp { false };

HashMap<unsigned, Vector<unsigned>> m_tierUpInLoopHierarchy;
Vector<unsigned> m_tierUpAndOSREnterBytecodes;

@@ -66,8 +66,8 @@ class SlowPathGenerator {

protected:
virtual void generateInternal(SpeculativeJIT*) = 0;
MacroAssembler::Label m_label;
Node* m_currentNode;
MacroAssembler::Label m_label;
unsigned m_streamIndex;
NodeOrigin m_origin;
};
@@ -97,7 +97,7 @@ class JumpingSlowPathGenerator : public SlowPathGenerator {
MacroAssembler::Label m_to;
};

enum class ExceptionCheckRequirement {
enum class ExceptionCheckRequirement : uint8_t {
CheckNeeded,
CheckNotNeeded
};
@@ -109,10 +109,10 @@ class CallSlowPathGenerator : public JumpingSlowPathGenerator<JumpType> {
JumpType from, SpeculativeJIT* jit, FunctionType function,
SpillRegistersMode spillMode, ExceptionCheckRequirement requirement, ResultType result)
: JumpingSlowPathGenerator<JumpType>(from, jit)
, m_function(function)
, m_spillMode(spillMode)
, m_exceptionCheckRequirement(requirement)
, m_result(result)
, m_function(function)
{
if (m_spillMode == NeedToSpill)
jit->silentSpillAllRegistersImpl(false, m_plans, extractResult(result));
@@ -149,11 +149,11 @@ class CallSlowPathGenerator : public JumpingSlowPathGenerator<JumpType> {
this->jumpTo(jit);
}

FunctionType m_function;
MacroAssembler::Call m_call;
SpillRegistersMode m_spillMode;
ExceptionCheckRequirement m_exceptionCheckRequirement;
ResultType m_result;
MacroAssembler::Call m_call;
FunctionType m_function;
Vector<SilentRegisterSavePlan, 2> m_plans;
};

@@ -71,13 +71,13 @@
namespace JSC { namespace DFG {

SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
: m_compileOkay(true)
, m_jit(jit)
: m_jit(jit)
, m_graph(m_jit.graph())
, m_currentNode(0)
, m_lastGeneratedNode(LastNodeType)
, m_indexInBlock(0)
, m_generationInfo(m_jit.graph().frameRegisterCount())
, m_compileOkay(true)
, m_state(m_jit.graph())
, m_interpreter(m_jit.graph(), m_state)
, m_stream(&jit.jitCode()->variableEventStream)
@@ -1652,12 +1652,6 @@ class SpeculativeJIT {

void cageTypedArrayStorage(GPRReg);

// It is possible, during speculative generation, to reach a situation in which we
// can statically determine a speculation will fail (for example, when two nodes
// will make conflicting speculations about the same operand). In such cases this
// flag is cleared, indicating no further code generation should take place.
bool m_compileOkay;

void recordSetLocal(
VirtualRegister bytecodeReg, VirtualRegister machineReg, DataFormat format)
{
@@ -1700,6 +1694,12 @@ class SpeculativeJIT {
RegisterBank<GPRInfo> m_gprs;
RegisterBank<FPRInfo> m_fprs;

// It is possible, during speculative generation, to reach a situation in which we
// can statically determine a speculation will fail (for example, when two nodes
// will make conflicting speculations about the same operand). In such cases this
// flag is cleared, indicating no further code generation should take place.
bool m_compileOkay;

Vector<MacroAssembler::Label> m_osrEntryHeads;

struct BranchRecord {
@@ -43,19 +43,19 @@ class Signature {
constexpr Signature(uintptr_t unsafeFunction, const ClassInfo* classInfo, Effect effect, SpeculatedType result, Arguments... arguments)
: unsafeFunction(unsafeFunction)
, classInfo(classInfo)
, effect(effect)
, result(result)
, arguments {static_cast<SpeculatedType>(arguments)...}
, argumentCount(sizeof...(Arguments))
, effect(effect)
{
}

uintptr_t unsafeFunction;
const ClassInfo* const classInfo;
const Effect effect;
const SpeculatedType result;
const SpeculatedType arguments[JSC_DOMJIT_SIGNATURE_MAX_ARGUMENTS];
const unsigned argumentCount;
const Effect effect;
};

} }
@@ -97,8 +97,8 @@ class MarkingConstraintSolver {
Vector<unsigned, 32> m_toExecuteSequentially;
Lock m_lock;
Condition m_condition;
unsigned m_numThreadsThatMayProduceWork { 0 };
bool m_pickNextIsStillActive { true };
unsigned m_numThreadsThatMayProduceWork { 0 };
Vector<VisitCounter, 16> m_visitCounters;
};

@@ -234,13 +234,13 @@ class SlotVisitor {

MarkStackArray m_collectorStack;
MarkStackArray m_mutatorStack;
bool m_ignoreNewOpaqueRoots { false }; // Useful as a debugging mode.

size_t m_bytesVisited;
size_t m_visitCount;
size_t m_nonCellVisitCount { 0 }; // Used for incremental draining, ignored otherwise.
Checked<size_t, RecordOverflow> m_extraMemorySize { 0 };
bool m_isInParallelMode;
bool m_ignoreNewOpaqueRoots { false }; // Useful as a debugging mode.

HeapVersion m_markingVersion;

@@ -35,9 +35,9 @@ namespace JSC {
struct CallFrameShuffleData {
WTF_MAKE_FAST_ALLOCATED;
public:
unsigned numLocals { UINT_MAX };
ValueRecovery callee;
Vector<ValueRecovery> args;
unsigned numLocals { UINT_MAX };
unsigned numPassedArgs { UINT_MAX };
#if USE(JSVALUE64)
RegisterMap<ValueRecovery> registers;
@@ -70,8 +70,8 @@ class JITDivGenerator {
FPRReg m_rightFPR;
GPRReg m_scratchGPR;
FPRReg m_scratchFPR;
ArithProfile* m_arithProfile;
bool m_didEmitFastPath { false };
ArithProfile* m_arithProfile;

CCallHelpers::JumpList m_endJumpList;
CCallHelpers::JumpList m_slowPathJumpList;

0 comments on commit 17b8ed8

Please sign in to comment.