Skip to content
Permalink
Browse files
Use std::numeric_limits<unsigned>::max() instead of (unsigned)-1.
<https://webkit.org/b/142539>

Reviewed by Benjamin Poulain.

* jit/JIT.cpp:
(JSC::JIT::JIT):
(JSC::JIT::privateCompileMainPass):
(JSC::JIT::privateCompileSlowCases):
(JSC::JIT::privateCompile):
(JSC::JIT::privateCompileExceptionHandlers):
* jit/JITInlines.h:
(JSC::JIT::emitNakedCall):
(JSC::JIT::addSlowCase):
(JSC::JIT::addJump):
(JSC::JIT::emitJumpSlowToHot):
(JSC::JIT::emitGetVirtualRegister):
* jit/SlowPathCall.h:
(JSC::JITSlowPathCall::call):
* yarr/Yarr.h:


Canonical link: https://commits.webkit.org/160584@main
git-svn-id: https://svn.webkit.org/repository/webkit/trunk@181343 268f45cc-cd09-0410-ab3c-d52691b4dbfc
  • Loading branch information
Mark Lam committed Mar 10, 2015
1 parent ff4ed32 commit 9b15b21d1e4640a1a673cd7d9b53b20bb0d84851
@@ -1,3 +1,26 @@
2015-03-10 Mark Lam <mark.lam@apple.com>

Use std::numeric_limits<unsigned>::max() instead of (unsigned)-1.
<https://webkit.org/b/142539>

Reviewed by Benjamin Poulain.

* jit/JIT.cpp:
(JSC::JIT::JIT):
(JSC::JIT::privateCompileMainPass):
(JSC::JIT::privateCompileSlowCases):
(JSC::JIT::privateCompile):
(JSC::JIT::privateCompileExceptionHandlers):
* jit/JITInlines.h:
(JSC::JIT::emitNakedCall):
(JSC::JIT::addSlowCase):
(JSC::JIT::addJump):
(JSC::JIT::emitJumpSlowToHot):
(JSC::JIT::emitGetVirtualRegister):
* jit/SlowPathCall.h:
(JSC::JITSlowPathCall::call):
* yarr/Yarr.h:

2015-03-10 Mark Lam <mark.lam@apple.com>

[Win] JSC Build Warnings Need to be Resolved.
@@ -75,7 +75,7 @@ JIT::JIT(VM* vm, CodeBlock* codeBlock)
: JSInterfaceJIT(vm, codeBlock)
, m_interpreter(vm->interpreter)
, m_labels(codeBlock ? codeBlock->numberOfInstructions() : 0)
, m_bytecodeOffset((unsigned)-1)
, m_bytecodeOffset(std::numeric_limits<unsigned>::max())
, m_getByIdIndex(UINT_MAX)
, m_putByIdIndex(UINT_MAX)
, m_byValInstructionIndex(UINT_MAX)
@@ -317,7 +317,7 @@ void JIT::privateCompileMainPass()

#ifndef NDEBUG
// Reset this, in order to guard its use with ASSERTs.
m_bytecodeOffset = (unsigned)-1;
m_bytecodeOffset = std::numeric_limits<unsigned>::max();
#endif
}

@@ -450,7 +450,7 @@ void JIT::privateCompileSlowCases()

#ifndef NDEBUG
// Reset this, in order to guard its use with ASSERTs.
m_bytecodeOffset = (unsigned)-1;
m_bytecodeOffset = std::numeric_limits<unsigned>::max();
#endif
}

@@ -518,7 +518,7 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort)
#endif

if (m_codeBlock->codeType() == FunctionCode) {
ASSERT(m_bytecodeOffset == (unsigned)-1);
ASSERT(m_bytecodeOffset == std::numeric_limits<unsigned>::max());
if (shouldEmitProfiling()) {
for (int argument = 0; argument < m_codeBlock->numParameters(); ++argument) {
// If this is a constructor, then we want to put in a dummy profiling site (to
@@ -587,7 +587,7 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort)
emitNakedCall(m_vm->getCTIStub(arityFixupGenerator).code());

#if !ASSERT_DISABLED
m_bytecodeOffset = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs.
m_bytecodeOffset = std::numeric_limits<unsigned>::max(); // Reset this, in order to guard its use with ASSERTs.
#endif

jump(beginLabel);
@@ -719,7 +719,7 @@ void JIT::privateCompileExceptionHandlers()
poke(GPRInfo::argumentGPR0);
poke(GPRInfo::argumentGPR1, 1);
#endif
m_calls.append(CallRecord(call(), (unsigned)-1, FunctionPtr(lookupExceptionHandlerFromCallerFrame).value()));
m_calls.append(CallRecord(call(), std::numeric_limits<unsigned>::max(), FunctionPtr(lookupExceptionHandlerFromCallerFrame).value()));
jumpToExceptionHandler();
}

@@ -735,7 +735,7 @@ void JIT::privateCompileExceptionHandlers()
poke(GPRInfo::argumentGPR0);
poke(GPRInfo::argumentGPR1, 1);
#endif
m_calls.append(CallRecord(call(), (unsigned)-1, FunctionPtr(lookupExceptionHandler).value()));
m_calls.append(CallRecord(call(), std::numeric_limits<unsigned>::max(), FunctionPtr(lookupExceptionHandler).value()));
jumpToExceptionHandler();
}
}
@@ -119,7 +119,7 @@ ALWAYS_INLINE void JIT::emitLoadCharacterString(RegisterID src, RegisterID dst,

ALWAYS_INLINE JIT::Call JIT::emitNakedCall(CodePtr function)
{
ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
Call nakedCall = nearCall();
m_calls.append(CallRecord(nakedCall, m_bytecodeOffset, function.executableAddress()));
return nakedCall;
@@ -648,14 +648,14 @@ ALWAYS_INLINE void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator&

ALWAYS_INLINE void JIT::addSlowCase(Jump jump)
{
ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.

m_slowCases.append(SlowCaseEntry(jump, m_bytecodeOffset));
}

ALWAYS_INLINE void JIT::addSlowCase(JumpList jumpList)
{
ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.

const JumpList::JumpVector& jumpVector = jumpList.jumps();
size_t size = jumpVector.size();
@@ -665,22 +665,22 @@ ALWAYS_INLINE void JIT::addSlowCase(JumpList jumpList)

ALWAYS_INLINE void JIT::addSlowCase()
{
ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.

Jump emptyJump; // Doing it this way to make Windows happy.
m_slowCases.append(SlowCaseEntry(emptyJump, m_bytecodeOffset));
}

ALWAYS_INLINE void JIT::addJump(Jump jump, int relativeOffset)
{
ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.

m_jmpTable.append(JumpTable(jump, m_bytecodeOffset + relativeOffset));
}

ALWAYS_INLINE void JIT::emitJumpSlowToHot(Jump jump, int relativeOffset)
{
ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.

jump.linkTo(m_labels[m_bytecodeOffset + relativeOffset], this);
}
@@ -1018,7 +1018,7 @@ ALWAYS_INLINE bool JIT::getOperandConstantImmediateInt(int op1, int op2, int& op
// get arg puts an arg from the SF register array into a h/w register
ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst)
{
ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.

// TODO: we want to reuse values that are already in registers if we can - add a register allocator!
if (m_codeBlock->isConstantRegisterIndex(src)) {
@@ -45,7 +45,7 @@ class JITSlowPathCall {
JIT::Call call()
{
#if ENABLE(OPCODE_SAMPLING)
if (m_jit->m_bytecodeOffset != (unsigned)-1)
if (m_jit->m_bytecodeOffset != std::numeric_limits<unsigned>::max())
m_jit->sampleInstruction(m_jit->m_codeBlock->instructions().begin() + m_jit->m_bytecodeOffset, true);
#endif
m_jit->updateTopCallFrame();
@@ -73,7 +73,7 @@ class JITSlowPathCall {
#endif

#if ENABLE(OPCODE_SAMPLING)
if (m_jit->m_bytecodeOffset != (unsigned)-1)
if (m_jit->m_bytecodeOffset != std::numeric_limits<unsigned>::max())
m_jit->sampleInstruction(m_jit->m_codeBlock->instructions().begin() + m_jit->m_bytecodeOffset, false);
#endif

@@ -43,7 +43,7 @@ namespace JSC { namespace Yarr {
#define YarrStackSpaceForBackTrackInfoParentheses 2

static const unsigned quantifyInfinite = UINT_MAX;
static const unsigned offsetNoMatch = (unsigned)-1;
static const unsigned offsetNoMatch = std::numeric_limits<unsigned>::max();

// The below limit restricts the number of "recursive" match calls in order to
// avoid spending exponential time on complex regular expressions.

0 comments on commit 9b15b21

Please sign in to comment.