Skip to content

Commit

Permalink
Merge r181343 - Use std::numeric_limits<unsigned>::max() instead of (…
Browse files Browse the repository at this point in the history
…unsigned)-1.

<https://webkit.org/b/142539>

Reviewed by Benjamin Poulain.

* jit/JIT.cpp:
(JSC::JIT::JIT):
(JSC::JIT::privateCompileMainPass):
(JSC::JIT::privateCompileSlowCases):
(JSC::JIT::privateCompile):
(JSC::JIT::privateCompileExceptionHandlers):
* jit/JITInlines.h:
(JSC::JIT::emitNakedCall):
(JSC::JIT::addSlowCase):
(JSC::JIT::addJump):
(JSC::JIT::emitJumpSlowToHot):
(JSC::JIT::emitGetVirtualRegister):
* jit/SlowPathCall.h:
(JSC::JITSlowPathCall::call):
* yarr/Yarr.h:
  • Loading branch information
Mark Lam authored and carlosgcampos committed Mar 11, 2015
1 parent fe7f84d commit ca2cc1c
Show file tree
Hide file tree
Showing 5 changed files with 40 additions and 17 deletions.
23 changes: 23 additions & 0 deletions Source/JavaScriptCore/ChangeLog
@@ -1,3 +1,26 @@
2015-03-10 Mark Lam <mark.lam@apple.com>

Use std::numeric_limits<unsigned>::max() instead of (unsigned)-1.
<https://webkit.org/b/142539>

Reviewed by Benjamin Poulain.

* jit/JIT.cpp:
(JSC::JIT::JIT):
(JSC::JIT::privateCompileMainPass):
(JSC::JIT::privateCompileSlowCases):
(JSC::JIT::privateCompile):
(JSC::JIT::privateCompileExceptionHandlers):
* jit/JITInlines.h:
(JSC::JIT::emitNakedCall):
(JSC::JIT::addSlowCase):
(JSC::JIT::addJump):
(JSC::JIT::emitJumpSlowToHot):
(JSC::JIT::emitGetVirtualRegister):
* jit/SlowPathCall.h:
(JSC::JITSlowPathCall::call):
* yarr/Yarr.h:

2015-03-10 Michael Catanzaro <mcatanzaro@igalia.com>

GCC: CRASH() should be annotated with NORETURN
Expand Down
14 changes: 7 additions & 7 deletions Source/JavaScriptCore/jit/JIT.cpp
Expand Up @@ -75,7 +75,7 @@ JIT::JIT(VM* vm, CodeBlock* codeBlock)
: JSInterfaceJIT(vm, codeBlock)
, m_interpreter(vm->interpreter)
, m_labels(codeBlock ? codeBlock->numberOfInstructions() : 0)
, m_bytecodeOffset((unsigned)-1)
, m_bytecodeOffset(std::numeric_limits<unsigned>::max())
, m_getByIdIndex(UINT_MAX)
, m_putByIdIndex(UINT_MAX)
, m_byValInstructionIndex(UINT_MAX)
Expand Down Expand Up @@ -317,7 +317,7 @@ void JIT::privateCompileMainPass()

#ifndef NDEBUG
// Reset this, in order to guard its use with ASSERTs.
m_bytecodeOffset = (unsigned)-1;
m_bytecodeOffset = std::numeric_limits<unsigned>::max();
#endif
}

Expand Down Expand Up @@ -451,7 +451,7 @@ void JIT::privateCompileSlowCases()

#ifndef NDEBUG
// Reset this, in order to guard its use with ASSERTs.
m_bytecodeOffset = (unsigned)-1;
m_bytecodeOffset = std::numeric_limits<unsigned>::max();
#endif
}

Expand Down Expand Up @@ -524,7 +524,7 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort)
#endif

if (m_codeBlock->codeType() == FunctionCode) {
ASSERT(m_bytecodeOffset == (unsigned)-1);
ASSERT(m_bytecodeOffset == std::numeric_limits<unsigned>::max());
if (shouldEmitProfiling()) {
for (int argument = 0; argument < m_codeBlock->numParameters(); ++argument) {
// If this is a constructor, then we want to put in a dummy profiling site (to
Expand Down Expand Up @@ -593,7 +593,7 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort)
emitNakedCall(m_vm->getCTIStub(arityFixupGenerator).code());

#if !ASSERT_DISABLED
m_bytecodeOffset = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs.
m_bytecodeOffset = std::numeric_limits<unsigned>::max(); // Reset this, in order to guard its use with ASSERTs.
#endif

jump(beginLabel);
Expand Down Expand Up @@ -725,7 +725,7 @@ void JIT::privateCompileExceptionHandlers()
poke(GPRInfo::argumentGPR0);
poke(GPRInfo::argumentGPR1, 1);
#endif
m_calls.append(CallRecord(call(), (unsigned)-1, FunctionPtr(lookupExceptionHandlerFromCallerFrame).value()));
m_calls.append(CallRecord(call(), std::numeric_limits<unsigned>::max(), FunctionPtr(lookupExceptionHandlerFromCallerFrame).value()));
jumpToExceptionHandler();
}

Expand All @@ -741,7 +741,7 @@ void JIT::privateCompileExceptionHandlers()
poke(GPRInfo::argumentGPR0);
poke(GPRInfo::argumentGPR1, 1);
#endif
m_calls.append(CallRecord(call(), (unsigned)-1, FunctionPtr(lookupExceptionHandler).value()));
m_calls.append(CallRecord(call(), std::numeric_limits<unsigned>::max(), FunctionPtr(lookupExceptionHandler).value()));
jumpToExceptionHandler();
}
}
Expand Down
14 changes: 7 additions & 7 deletions Source/JavaScriptCore/jit/JITInlines.h
Expand Up @@ -119,7 +119,7 @@ ALWAYS_INLINE void JIT::emitLoadCharacterString(RegisterID src, RegisterID dst,

ALWAYS_INLINE JIT::Call JIT::emitNakedCall(CodePtr function)
{
ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
Call nakedCall = nearCall();
m_calls.append(CallRecord(nakedCall, m_bytecodeOffset, function.executableAddress()));
return nakedCall;
Expand Down Expand Up @@ -648,14 +648,14 @@ ALWAYS_INLINE void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator&

ALWAYS_INLINE void JIT::addSlowCase(Jump jump)
{
ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.

m_slowCases.append(SlowCaseEntry(jump, m_bytecodeOffset));
}

ALWAYS_INLINE void JIT::addSlowCase(JumpList jumpList)
{
ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.

const JumpList::JumpVector& jumpVector = jumpList.jumps();
size_t size = jumpVector.size();
Expand All @@ -665,22 +665,22 @@ ALWAYS_INLINE void JIT::addSlowCase(JumpList jumpList)

ALWAYS_INLINE void JIT::addSlowCase()
{
ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.

Jump emptyJump; // Doing it this way to make Windows happy.
m_slowCases.append(SlowCaseEntry(emptyJump, m_bytecodeOffset));
}

ALWAYS_INLINE void JIT::addJump(Jump jump, int relativeOffset)
{
ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.

m_jmpTable.append(JumpTable(jump, m_bytecodeOffset + relativeOffset));
}

ALWAYS_INLINE void JIT::emitJumpSlowToHot(Jump jump, int relativeOffset)
{
ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.

jump.linkTo(m_labels[m_bytecodeOffset + relativeOffset], this);
}
Expand Down Expand Up @@ -1013,7 +1013,7 @@ ALWAYS_INLINE bool JIT::getOperandConstantImmediateInt(int op1, int op2, int& op
// get arg puts an arg from the SF register array into a h/w register
ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst)
{
ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.

// TODO: we want to reuse values that are already in registers if we can - add a register allocator!
if (m_codeBlock->isConstantRegisterIndex(src)) {
Expand Down
4 changes: 2 additions & 2 deletions Source/JavaScriptCore/jit/SlowPathCall.h
Expand Up @@ -45,7 +45,7 @@ class JITSlowPathCall {
JIT::Call call()
{
#if ENABLE(OPCODE_SAMPLING)
if (m_jit->m_bytecodeOffset != (unsigned)-1)
if (m_jit->m_bytecodeOffset != std::numeric_limits<unsigned>::max())
m_jit->sampleInstruction(m_jit->m_codeBlock->instructions().begin() + m_jit->m_bytecodeOffset, true);
#endif
m_jit->updateTopCallFrame();
Expand Down Expand Up @@ -73,7 +73,7 @@ class JITSlowPathCall {
#endif

#if ENABLE(OPCODE_SAMPLING)
if (m_jit->m_bytecodeOffset != (unsigned)-1)
if (m_jit->m_bytecodeOffset != std::numeric_limits<unsigned>::max())
m_jit->sampleInstruction(m_jit->m_codeBlock->instructions().begin() + m_jit->m_bytecodeOffset, false);
#endif

Expand Down
2 changes: 1 addition & 1 deletion Source/JavaScriptCore/yarr/Yarr.h
Expand Up @@ -43,7 +43,7 @@ namespace JSC { namespace Yarr {
#define YarrStackSpaceForBackTrackInfoParentheses 2

static const unsigned quantifyInfinite = UINT_MAX;
static const unsigned offsetNoMatch = (unsigned)-1;
static const unsigned offsetNoMatch = std::numeric_limits<unsigned>::max();

// The below limit restricts the number of "recursive" match calls in order to
// avoid spending exponential time on complex regular expressions.
Expand Down

0 comments on commit ca2cc1c

Please sign in to comment.