@@ -81,100 +81,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
}


static const int32_t kBranchBeforeInterrupt = 0x5a000004;

// The back edge bookkeeping code matches the pattern:
//
// <decrement profiling counter>
// 2a 00 00 01 bpl ok
// e5 9f c? ?? ldr ip, [pc, <interrupt stub address>]
// e1 2f ff 3c blx ip
// ok-label
//
// We patch the code to the following form:
//
// <decrement profiling counter>
// e1 a0 00 00 mov r0, r0 (NOP)
// e5 9f c? ?? ldr ip, [pc, <on-stack replacement address>]
// e1 2f ff 3c blx ip
// ok-label

void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
Code* replacement_code) {
static const int kInstrSize = Assembler::kInstrSize;
// Turn the jump into nops.
CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
patcher.masm()->nop();
// Replace the call address.
uint32_t interrupt_address_offset = Memory::uint16_at(pc_after -
2 * kInstrSize) & 0xfff;
Address interrupt_address_pointer = pc_after + interrupt_address_offset;
Memory::uint32_at(interrupt_address_pointer) =
reinterpret_cast<uint32_t>(replacement_code->entry());

unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
unoptimized_code, pc_after - 2 * kInstrSize, replacement_code);
}


void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
Code* interrupt_code) {
static const int kInstrSize = Assembler::kInstrSize;
// Restore the original jump.
CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
patcher.masm()->b(4 * kInstrSize, pl); // ok-label is 4 instructions later.
ASSERT_EQ(kBranchBeforeInterrupt,
Memory::int32_at(pc_after - 3 * kInstrSize));
// Restore the original call address.
uint32_t interrupt_address_offset = Memory::uint16_at(pc_after -
2 * kInstrSize) & 0xfff;
Address interrupt_address_pointer = pc_after + interrupt_address_offset;
Memory::uint32_at(interrupt_address_pointer) =
reinterpret_cast<uint32_t>(interrupt_code->entry());

interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
unoptimized_code, pc_after - 2 * kInstrSize, interrupt_code);
}


#ifdef DEBUG
Deoptimizer::InterruptPatchState Deoptimizer::GetInterruptPatchState(
Isolate* isolate,
Code* unoptimized_code,
Address pc_after) {
static const int kInstrSize = Assembler::kInstrSize;
ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp);

uint32_t interrupt_address_offset =
Memory::uint16_at(pc_after - 2 * kInstrSize) & 0xfff;
Address interrupt_address_pointer = pc_after + interrupt_address_offset;

if (Assembler::IsNop(Assembler::instr_at(pc_after - 3 * kInstrSize))) {
ASSERT(Assembler::IsLdrPcImmediateOffset(
Assembler::instr_at(pc_after - 2 * kInstrSize)));
Code* osr_builtin =
isolate->builtins()->builtin(Builtins::kOnStackReplacement);
ASSERT(reinterpret_cast<uint32_t>(osr_builtin->entry()) ==
Memory::uint32_at(interrupt_address_pointer));
return PATCHED_FOR_OSR;
} else {
// Get the interrupt stub code object to match against from cache.
Code* interrupt_builtin =
isolate->builtins()->builtin(Builtins::kInterruptCheck);
ASSERT(Assembler::IsLdrPcImmediateOffset(
Assembler::instr_at(pc_after - 2 * kInstrSize)));
ASSERT_EQ(kBranchBeforeInterrupt,
Memory::int32_at(pc_after - 3 * kInstrSize));
ASSERT(reinterpret_cast<uint32_t>(interrupt_builtin->entry()) ==
Memory::uint32_at(interrupt_address_pointer));
return NOT_PATCHED;
}
}
#endif // DEBUG


void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
// Set the register values. The values are not important as there are no
// callee saved registers in JavaScript frames, so all registers are
@@ -201,10 +107,7 @@ void Deoptimizer::SetPlatformCompiledStubRegisters(
ApiFunction function(descriptor->deoptimization_handler_);
ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
int params = descriptor->register_param_count_;
if (descriptor->stack_parameter_count_ != NULL) {
params++;
}
int params = descriptor->environment_length();
output_frame->SetRegister(r0.code(), params);
output_frame->SetRegister(r1.code(), handler);
}
@@ -362,8 +265,8 @@ void Deoptimizer::EntryGenerator::Generate() {
__ bind(&inner_push_loop);
__ sub(r3, r3, Operand(sizeof(uint32_t)));
__ add(r6, r2, Operand(r3));
__ ldr(r7, MemOperand(r6, FrameDescription::frame_content_offset()));
__ push(r7);
__ ldr(r6, MemOperand(r6, FrameDescription::frame_content_offset()));
__ push(r6);
__ bind(&inner_loop_header);
__ cmp(r3, Operand::Zero());
__ b(ne, &inner_push_loop); // test for gt?
@@ -409,9 +312,9 @@ void Deoptimizer::EntryGenerator::Generate() {
__ InitializeRootRegister();

__ pop(ip); // remove pc
__ pop(r7); // get continuation, leave pc on stack
__ pop(ip); // get continuation, leave pc on stack
__ pop(lr);
__ Jump(r7);
__ Jump(ip);
__ stop("Unreachable.");
}

@@ -64,7 +64,7 @@ const RegList kCalleeSaved =
1 << 4 | // r4 v1
1 << 5 | // r5 v2
1 << 6 | // r6 v3
1 << 7 | // r7 v4
1 << 7 | // r7 v4 (pp in JavaScript code)
1 << 8 | // r8 v5 (cp in JavaScript code)
kR9Available << 9 | // r9 v6
1 << 10 | // r10 v7
@@ -148,13 +148,10 @@ void FullCodeGenerator::Generate() {
// receiver object). r5 is zero for method calls and non-zero for
// function calls.
if (!info->is_classic_mode() || info->is_native()) {
Label ok;
__ cmp(r5, Operand::Zero());
__ b(eq, &ok);
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
__ str(r2, MemOperand(sp, receiver_offset));
__ bind(&ok);
__ str(r2, MemOperand(sp, receiver_offset), ne);
}

// Open a frame scope to indicate that there is a frame on the stack. The
@@ -163,16 +160,7 @@ void FullCodeGenerator::Generate() {
FrameScope frame_scope(masm_, StackFrame::MANUAL);

info->set_prologue_offset(masm_->pc_offset());
{
PredictableCodeSizeScope predictible_code_size_scope(
masm_, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
// The following three instructions must remain together and unmodified
// for code aging to work properly.
__ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
__ nop(ip.code());
// Adjust FP to point to saved FP.
__ add(fp, sp, Operand(2 * kPointerSize));
}
__ Prologue(BUILD_FUNCTION_FRAME);
info->AddNoFrameRange(0, masm_->pc_offset());

{ Comment cmnt(masm_, "[ Allocate locals");
@@ -1167,7 +1155,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Handle<Object>(Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
isolate()));
RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
__ LoadHeapObject(r1, cell);
__ Move(r1, cell);
__ mov(r2, Operand(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
__ str(r2, FieldMemOperand(r1, Cell::kValueOffset));

@@ -1651,13 +1639,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ mov(r0, Operand(Smi::FromInt(flags)));
int properties_count = constant_properties->length() / 2;
if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
expr->depth() > 1) {
__ Push(r3, r2, r1, r0);
__ CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else if (Serializer::enabled() || flags != ObjectLiteral::kFastElements ||
expr->depth() > 1 || Serializer::enabled() ||
flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
__ Push(r3, r2, r1, r0);
__ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
__ CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else {
FastCloneShallowObjectStub stub(properties_count);
__ CallStub(&stub);
@@ -3592,8 +3578,8 @@ void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 1);
// Load the argument on the stack and call the stub.
VisitForStackValue(args->at(0));
// Load the argument into r0 and call the stub.
VisitForAccumulatorValue(args->at(0));

NumberToStringStub stub;
__ CallStub(&stub);
@@ -3964,9 +3950,8 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {


void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
Label bailout, done, one_char_separator, long_separator,
non_trivial_array, not_size_one_array, loop,
empty_separator_loop, one_char_separator_loop,
Label bailout, done, one_char_separator, long_separator, non_trivial_array,
not_size_one_array, loop, empty_separator_loop, one_char_separator_loop,
one_char_separator_loop_entry, long_separator_loop;
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
@@ -3984,19 +3969,18 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
Register string = r4;
Register element = r5;
Register elements_end = r6;
Register scratch1 = r7;
Register scratch2 = r9;
Register scratch = r9;

// Separator operand is on the stack.
__ pop(separator);

// Check that the array is a JSArray.
__ JumpIfSmi(array, &bailout);
__ CompareObjectType(array, scratch1, scratch2, JS_ARRAY_TYPE);
__ CompareObjectType(array, scratch, array_length, JS_ARRAY_TYPE);
__ b(ne, &bailout);

// Check that the array has fast elements.
__ CheckFastElements(scratch1, scratch2, &bailout);
__ CheckFastElements(scratch, array_length, &bailout);

// If the array has length zero, return the empty string.
__ ldr(array_length, FieldMemOperand(array, JSArray::kLengthOffset));
@@ -4033,11 +4017,11 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ bind(&loop);
__ ldr(string, MemOperand(element, kPointerSize, PostIndex));
__ JumpIfSmi(string, &bailout);
__ ldr(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
__ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
__ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
__ ldr(scratch1, FieldMemOperand(string, SeqOneByteString::kLengthOffset));
__ add(string_length, string_length, Operand(scratch1), SetCC);
__ ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
__ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
__ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &bailout);
__ ldr(scratch, FieldMemOperand(string, SeqOneByteString::kLengthOffset));
__ add(string_length, string_length, Operand(scratch), SetCC);
__ b(vs, &bailout);
__ cmp(element, elements_end);
__ b(lt, &loop);
@@ -4058,23 +4042,23 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {

// Check that the separator is a flat ASCII string.
__ JumpIfSmi(separator, &bailout);
__ ldr(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset));
__ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
__ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
__ ldr(scratch, FieldMemOperand(separator, HeapObject::kMapOffset));
__ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
__ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &bailout);

// Add (separator length times array_length) - separator length to the
// string_length to get the length of the result string. array_length is not
// smi but the other values are, so the result is a smi
__ ldr(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
__ sub(string_length, string_length, Operand(scratch1));
__ smull(scratch2, ip, array_length, scratch1);
__ ldr(scratch, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
__ sub(string_length, string_length, Operand(scratch));
__ smull(scratch, ip, array_length, scratch);
// Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
// zero.
__ cmp(ip, Operand::Zero());
__ b(ne, &bailout);
__ tst(scratch2, Operand(0x80000000));
__ tst(scratch, Operand(0x80000000));
__ b(ne, &bailout);
__ add(string_length, string_length, Operand(scratch2), SetCC);
__ add(string_length, string_length, Operand(scratch), SetCC);
__ b(vs, &bailout);
__ SmiUntag(string_length);

@@ -4091,9 +4075,9 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// array_length: Length of the array.
__ AllocateAsciiString(result,
string_length,
scratch1,
scratch2,
elements_end,
scratch,
string, // used as scratch
elements_end, // used as scratch
&bailout);
// Prepare for looping. Set up elements_end to end of the array. Set
// result_pos to the position of the result where to write the first
@@ -4106,8 +4090,8 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));

// Check the length of the separator.
__ ldr(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
__ cmp(scratch1, Operand(Smi::FromInt(1)));
__ ldr(scratch, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
__ cmp(scratch, Operand(Smi::FromInt(1)));
__ b(eq, &one_char_separator);
__ b(gt, &long_separator);

@@ -4125,7 +4109,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ add(string,
string,
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ CopyBytes(string, result_pos, string_length, scratch1);
__ CopyBytes(string, result_pos, string_length, scratch);
__ cmp(element, elements_end);
__ b(lt, &empty_separator_loop); // End while (element < elements_end).
ASSERT(result.is(r0));
@@ -4157,7 +4141,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ add(string,
string,
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ CopyBytes(string, result_pos, string_length, scratch1);
__ CopyBytes(string, result_pos, string_length, scratch);
__ cmp(element, elements_end);
__ b(lt, &one_char_separator_loop); // End while (element < elements_end).
ASSERT(result.is(r0));
@@ -4178,7 +4162,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ add(string,
separator,
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ CopyBytes(string, result_pos, string_length, scratch1);
__ CopyBytes(string, result_pos, string_length, scratch);

__ bind(&long_separator);
__ ldr(string, MemOperand(element, kPointerSize, PostIndex));
@@ -4187,7 +4171,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ add(string,
string,
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ CopyBytes(string, result_pos, string_length, scratch1);
__ CopyBytes(string, result_pos, string_length, scratch);
__ cmp(element, elements_end);
__ b(lt, &long_separator_loop); // End while (element < elements_end).
ASSERT(result.is(r0));
@@ -4894,6 +4878,91 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(

#undef __


static const int32_t kBranchBeforeInterrupt = 0x5a000004;


void BackEdgeTable::PatchAt(Code* unoptimized_code,
Address pc,
BackEdgeState target_state,
Code* replacement_code) {
static const int kInstrSize = Assembler::kInstrSize;
Address branch_address = pc - 3 * kInstrSize;
CodePatcher patcher(branch_address, 1);

switch (target_state) {
case INTERRUPT:
// <decrement profiling counter>
// 2a 00 00 01 bpl ok
// e5 9f c? ?? ldr ip, [pc, <interrupt stub address>]
// e1 2f ff 3c blx ip
// ok-label
patcher.masm()->b(4 * kInstrSize, pl); // Jump offset is 4 instructions.
ASSERT_EQ(kBranchBeforeInterrupt, Memory::int32_at(branch_address));
break;
case ON_STACK_REPLACEMENT:
case OSR_AFTER_STACK_CHECK:
// <decrement profiling counter>
// e1 a0 00 00 mov r0, r0 (NOP)
// e5 9f c? ?? ldr ip, [pc, <on-stack replacement address>]
// e1 2f ff 3c blx ip
// ok-label
patcher.masm()->nop();
break;
}

Address pc_immediate_load_address = pc - 2 * kInstrSize;
// Replace the call address.
uint32_t interrupt_address_offset =
Memory::uint16_at(pc_immediate_load_address) & 0xfff;
Address interrupt_address_pointer = pc + interrupt_address_offset;
Memory::uint32_at(interrupt_address_pointer) =
reinterpret_cast<uint32_t>(replacement_code->entry());

unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
unoptimized_code, pc_immediate_load_address, replacement_code);
}


BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
Isolate* isolate,
Code* unoptimized_code,
Address pc) {
static const int kInstrSize = Assembler::kInstrSize;
ASSERT(Memory::int32_at(pc - kInstrSize) == kBlxIp);

Address branch_address = pc - 3 * kInstrSize;
Address pc_immediate_load_address = pc - 2 * kInstrSize;
uint32_t interrupt_address_offset =
Memory::uint16_at(pc_immediate_load_address) & 0xfff;
Address interrupt_address_pointer = pc + interrupt_address_offset;

if (Memory::int32_at(branch_address) == kBranchBeforeInterrupt) {
ASSERT(Memory::uint32_at(interrupt_address_pointer) ==
reinterpret_cast<uint32_t>(
isolate->builtins()->InterruptCheck()->entry()));
ASSERT(Assembler::IsLdrPcImmediateOffset(
Assembler::instr_at(pc_immediate_load_address)));
return INTERRUPT;
}

ASSERT(Assembler::IsNop(Assembler::instr_at(branch_address)));
ASSERT(Assembler::IsLdrPcImmediateOffset(
Assembler::instr_at(pc_immediate_load_address)));

if (Memory::uint32_at(interrupt_address_pointer) ==
reinterpret_cast<uint32_t>(
isolate->builtins()->OnStackReplacement()->entry())) {
return ON_STACK_REPLACEMENT;
}

ASSERT(Memory::uint32_at(interrupt_address_pointer) ==
reinterpret_cast<uint32_t>(
isolate->builtins()->OsrAfterStackCheck()->entry()));
return OSR_AFTER_STACK_CHECK;
}


} } // namespace v8::internal

#endif // V8_TARGET_ARCH_ARM
@@ -656,7 +656,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {

// Probe the stub cache.
Code::Flags flags = Code::ComputeFlags(
Code::STUB, MONOMORPHIC, Code::kNoExtraICState,
Code::HANDLER, MONOMORPHIC, Code::kNoExtraICState,
Code::NORMAL, Code::LOAD_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, r0, r2, r3, r4, r5, r6);
@@ -1394,7 +1394,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
Register receiver = r2;
Register receiver_map = r3;
Register elements_map = r6;
Register elements = r7; // Elements array of the receiver.
Register elements = r9; // Elements array of the receiver.
// r4 and r5 are used as general scratch registers.

// Check that the key is a smi.
@@ -1487,7 +1487,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,

// Get the receiver from the stack and probe the stub cache.
Code::Flags flags = Code::ComputeFlags(
Code::STUB, MONOMORPHIC, strict_mode,
Code::HANDLER, MONOMORPHIC, strict_mode,
Code::NORMAL, Code::STORE_IC);

masm->isolate()->stub_cache()->GenerateProbe(

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

@@ -32,6 +32,7 @@

#include "arm/lithium-gap-resolver-arm.h"
#include "deoptimizer.h"
#include "lithium-codegen.h"
#include "safepoint-table.h"
#include "scopes.h"
#include "v8utils.h"
@@ -43,43 +44,26 @@ namespace internal {
class LDeferredCode;
class SafepointGenerator;

class LCodeGen V8_FINAL BASE_EMBEDDED {
class LCodeGen: public LCodeGenBase {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
: zone_(info->zone()),
chunk_(static_cast<LPlatformChunk*>(chunk)),
masm_(assembler),
info_(info),
current_block_(-1),
current_instruction_(-1),
instructions_(chunk->instructions()),
: LCodeGenBase(chunk, assembler, info),
deoptimizations_(4, info->zone()),
deopt_jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
translations_(info->zone()),
deferred_(8, info->zone()),
osr_pc_offset_(-1),
last_lazy_deopt_pc_(0),
frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple),
old_position_(RelocInfo::kNoPosition) {
expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}


// Simple accessors.
MacroAssembler* masm() const { return masm_; }
CompilationInfo* info() const { return info_; }
Isolate* isolate() const { return info_->isolate(); }
Factory* factory() const { return isolate()->factory(); }
Heap* heap() const { return isolate()->heap(); }
Zone* zone() const { return zone_; }

int LookupDestination(int block_id) const {
return chunk()->LookupDestination(block_id);
}
@@ -178,30 +162,15 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
#undef DECLARE_DO

private:
enum Status {
UNUSED,
GENERATING,
DONE,
ABORTED
};

bool is_unused() const { return status_ == UNUSED; }
bool is_generating() const { return status_ == GENERATING; }
bool is_done() const { return status_ == DONE; }
bool is_aborted() const { return status_ == ABORTED; }

StrictModeFlag strict_mode_flag() const {
return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
}

LPlatformChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
HGraph* graph() const { return chunk()->graph(); }

Register scratch0() { return r9; }
LowDwVfpRegister double_scratch0() { return kScratchDoubleReg; }

int GetNextEmittedBlock() const;
LInstruction* GetNextInstruction();

void EmitClassOfTest(Label* if_true,
@@ -214,14 +183,12 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }

void Abort(BailoutReason reason);
void FPRINTF_CHECKING Comment(const char* format, ...);

void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }

// Code generation passes. Returns true if code generation should
// continue.
bool GeneratePrologue();
bool GenerateBody();
bool GenerateDeferredCode();
bool GenerateDeoptJumpTable();
bool GenerateSafepointTable();
@@ -249,7 +216,8 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {

void CallRuntime(const Runtime::Function* function,
int num_arguments,
LInstruction* instr);
LInstruction* instr,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);

void CallRuntime(Runtime::FunctionId id,
int num_arguments,
@@ -258,9 +226,11 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
CallRuntime(function, num_arguments, instr);
}

void LoadContextFromDeferred(LOperand* context);
void CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
LInstruction* instr);
LInstruction* instr,
LOperand* context);

enum R1State {
R1_UNINITIALIZED,
@@ -276,8 +246,6 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
CallKind call_kind,
R1State r1_state);

void LoadHeapObject(Register result, Handle<HeapObject> object);

void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode);

@@ -320,8 +288,8 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
int arguments,
Safepoint::DeoptMode mode);
void RecordPosition(int position);
void RecordAndUpdatePosition(int position);

void RecordAndWritePosition(int position) V8_OVERRIDE;

static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
@@ -383,32 +351,22 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
Register scratch,
LEnvironment* environment);

void EnsureSpaceForLazyDeopt();
void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
void DoLoadKeyedExternalArray(LLoadKeyed* instr);
void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
void DoLoadKeyedFixedArray(LLoadKeyed* instr);
void DoStoreKeyedExternalArray(LStoreKeyed* instr);
void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
void DoStoreKeyedFixedArray(LStoreKeyed* instr);

Zone* zone_;
LPlatformChunk* const chunk_;
MacroAssembler* const masm_;
CompilationInfo* const info_;

int current_block_;
int current_instruction_;
const ZoneList<LInstruction*>* instructions_;
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry> deopt_jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_;
Scope* const scope_;
Status status_;
TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
int osr_pc_offset_;
int last_lazy_deopt_pc_;
bool frame_is_built_;

// Builder that keeps track of safepoints in the code. The table
@@ -420,8 +378,6 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {

Safepoint::Kind expected_safepoint_kind_;

int old_position_;

class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED {
public:
PushSafepointRegistersScope(LCodeGen* codegen,
@@ -252,7 +252,7 @@ void LGapResolver::EmitMove(int index) {
if (cgen_->IsInteger32(constant_source)) {
__ mov(dst, Operand(cgen_->ToRepresentation(constant_source, r)));
} else {
__ LoadObject(dst, cgen_->ToHandle(constant_source));
__ Move(dst, cgen_->ToHandle(constant_source));
}
} else if (destination->IsDoubleRegister()) {
DwVfpRegister result = cgen_->ToDoubleRegister(destination);
@@ -267,7 +267,7 @@ void LGapResolver::EmitMove(int index) {
__ mov(kSavedValueRegister,
Operand(cgen_->ToRepresentation(constant_source, r)));
} else {
__ LoadObject(kSavedValueRegister,
__ Move(kSavedValueRegister,
cgen_->ToHandle(constant_source));
}
__ str(kSavedValueRegister, cgen_->ToMemOperand(destination));

Large diffs are not rendered by default.

@@ -45,8 +45,9 @@ inline MemOperand FieldMemOperand(Register object, int offset) {


// Give alias names to registers
const Register cp = { 8 }; // JavaScript context pointer
const Register kRootRegister = { 10 }; // Roots array pointer.
const Register pp = { kRegister_r7_Code }; // Constant pool pointer.
const Register cp = { kRegister_r8_Code }; // JavaScript context pointer.
const Register kRootRegister = { kRegister_r10_Code }; // Roots array pointer.

// Flags used for AllocateHeapNumber
enum TaggingMode {
@@ -169,17 +170,6 @@ class MacroAssembler: public Assembler {
Heap::RootListIndex index,
Condition cond = al);

void LoadHeapObject(Register dst, Handle<HeapObject> object);

void LoadObject(Register result, Handle<Object> object) {
AllowDeferredHandleDereference heap_object_check;
if (object->IsHeapObject()) {
LoadHeapObject(result, Handle<HeapObject>::cast(object));
} else {
Move(result, object);
}
}

// ---------------------------------------------------------------------------
// GC Support

@@ -469,8 +459,13 @@ class MacroAssembler: public Assembler {
void VFPEnsureFPSCRState(Register scratch);

// If the value is a NaN, canonicalize the value else, do nothing.
void VFPCanonicalizeNaN(const DwVfpRegister value,
void VFPCanonicalizeNaN(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond = al);
void VFPCanonicalizeNaN(const DwVfpRegister value,
const Condition cond = al) {
VFPCanonicalizeNaN(value, value, cond);
}

// Compare double values and move the result to the normal condition flags.
void VFPCompareAndSetFlags(const DwVfpRegister src1,
@@ -533,6 +528,8 @@ class MacroAssembler: public Assembler {
LowDwVfpRegister double_scratch1,
Label* not_int32);

// Generates function and stub prologue code.
void Prologue(PrologueFrameMode frame_mode);

// Enter exit frame.
// stack_space - extra stack space, used for alignment before call to C.
@@ -541,7 +538,9 @@ class MacroAssembler: public Assembler {
// Leave the current exit frame. Expects the return value in r0.
// Expect the number of values, pushed prior to the exit frame, to
// remove in a register (or no_reg, if there is nothing to remove).
void LeaveExitFrame(bool save_doubles, Register argument_count);
void LeaveExitFrame(bool save_doubles,
Register argument_count,
bool restore_context);

// Get the actual activation frame alignment for target environment.
static int ActivationFrameAlignment();
@@ -1037,11 +1036,18 @@ class MacroAssembler: public Assembler {
void TailCallStub(CodeStub* stub, Condition cond = al);

// Call a runtime routine.
void CallRuntime(const Runtime::Function* f, int num_arguments);
void CallRuntimeSaveDoubles(Runtime::FunctionId id);
void CallRuntime(const Runtime::Function* f,
int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
const Runtime::Function* function = Runtime::FunctionForId(id);
CallRuntime(function, function->nargs, kSaveFPRegs);
}

// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid, int num_arguments);
void CallRuntime(Runtime::FunctionId id, int num_arguments) {
CallRuntime(Runtime::FunctionForId(id), num_arguments);
}

// Convenience function: call an external reference.
void CallExternalReference(const ExternalReference& ext,
@@ -1111,7 +1117,8 @@ class MacroAssembler: public Assembler {
ExternalReference thunk_ref,
Register thunk_last_arg,
int stack_space,
int return_value_offset_from_fp);
MemOperand return_value_operand,
MemOperand* context_restore_operand);

// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& builtin);
@@ -1286,6 +1293,18 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// String utilities

// Generate code to do a lookup in the number string cache. If the number in
// the register object is found in the cache the generated code falls through
// with the result in the result register. The object and the result register
// can be the same. If the number is not found in the cache the code jumps to
// the label not_found with only the content of register object unchanged.
void LookupNumberStringCache(Register object,
Register result,
Register scratch1,
Register scratch2,
Register scratch3,
Label* not_found);

// Checks if both objects are sequential ASCII strings and jumps to label
// if either is not. Assumes that neither object is a smi.
void JumpIfNonSmisNotBothSequentialAsciiStrings(Register object1,
@@ -1360,9 +1379,20 @@ class MacroAssembler: public Assembler {
// to another type.
// On entry, receiver_reg should point to the array object.
// scratch_reg gets clobbered.
// If allocation info is present, condition flags are set to eq
// If allocation info is present, condition flags are set to eq.
void TestJSArrayForAllocationMemento(Register receiver_reg,
Register scratch_reg);
Register scratch_reg,
Label* no_memento_found);

void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
Register scratch_reg,
Label* memento_found) {
Label no_memento_found;
TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
&no_memento_found);
b(eq, memento_found);
bind(&no_memento_found);
}

private:
void CallCFunctionHelper(Register function,
@@ -223,11 +223,6 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
// are always 0..num_saved_registers_-1)
int num_saved_registers_;

// Manage a small pre-allocated pool for writing label targets
// to for pushing backtrack addresses.
int backtrack_constant_pool_offset_;
int backtrack_constant_pool_capacity_;

// Labels used internally.
Label entry_label_;
Label start_label_;
@@ -912,6 +912,12 @@ double Simulator::get_double_from_register_pair(int reg) {
}


void Simulator::set_register_pair_from_double(int reg, double* value) {
ASSERT((reg >= 0) && (reg < num_registers) && ((reg % 2) == 0));
memcpy(registers_ + reg, value, sizeof(*value));
}


void Simulator::set_dw_register(int dreg, const int* dbl) {
ASSERT((dreg >= 0) && (dreg < num_d_registers));
registers_[dreg] = dbl[0];
@@ -1026,27 +1032,22 @@ ReturnType Simulator::GetFromVFPRegister(int reg_index) {
}


// Runtime FP routines take up to two double arguments and zero
// or one integer arguments. All are consructed here.
// from r0-r3 or d0 and d1.
// Runtime FP routines take:
// - two double arguments
// - one double argument and zero or one integer arguments.
// All are consructed here from r0-r3 or d0, d1 and r0.
void Simulator::GetFpArgs(double* x, double* y, int32_t* z) {
if (use_eabi_hardfloat()) {
*x = vfp_registers_[0];
*y = vfp_registers_[1];
*z = registers_[1];
*x = get_double_from_d_register(0);
*y = get_double_from_d_register(1);
*z = get_register(0);
} else {
// We use a char buffer to get around the strict-aliasing rules which
// otherwise allow the compiler to optimize away the copy.
char buffer[sizeof(*x)];
// Registers 0 and 1 -> x.
OS::MemCopy(buffer, registers_, sizeof(*x));
OS::MemCopy(x, buffer, sizeof(*x));
*x = get_double_from_register_pair(0);
// Register 2 and 3 -> y.
OS::MemCopy(buffer, registers_ + 2, sizeof(*y));
OS::MemCopy(y, buffer, sizeof(*y));
*y = get_double_from_register_pair(2);
// Register 2 -> z
memcpy(buffer, registers_ + 2, sizeof(*z));
memcpy(z, buffer, sizeof(*z));
*z = get_register(2);
}
}

@@ -1718,32 +1719,6 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
(redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) ||
(redirection->type() == ExternalReference::BUILTIN_FP_CALL) ||
(redirection->type() == ExternalReference::BUILTIN_FP_INT_CALL);
if (use_eabi_hardfloat()) {
// With the hard floating point calling convention, double
// arguments are passed in VFP registers. Fetch the arguments
// from there and call the builtin using soft floating point
// convention.
switch (redirection->type()) {
case ExternalReference::BUILTIN_FP_FP_CALL:
case ExternalReference::BUILTIN_COMPARE_CALL:
arg0 = vfp_registers_[0];
arg1 = vfp_registers_[1];
arg2 = vfp_registers_[2];
arg3 = vfp_registers_[3];
break;
case ExternalReference::BUILTIN_FP_CALL:
arg0 = vfp_registers_[0];
arg1 = vfp_registers_[1];
break;
case ExternalReference::BUILTIN_FP_INT_CALL:
arg0 = vfp_registers_[0];
arg1 = vfp_registers_[1];
arg2 = get_register(0);
break;
default:
break;
}
}
// This is dodgy but it works because the C entry stubs are never moved.
// See comment in codegen-arm.cc and bug 1242173.
int32_t saved_lr = get_register(lr);
@@ -3816,19 +3791,27 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) {
}


double Simulator::CallFP(byte* entry, double d0, double d1) {
void Simulator::CallFP(byte* entry, double d0, double d1) {
if (use_eabi_hardfloat()) {
set_d_register_from_double(0, d0);
set_d_register_from_double(1, d1);
} else {
int buffer[2];
ASSERT(sizeof(buffer[0]) * 2 == sizeof(d0));
OS::MemCopy(buffer, &d0, sizeof(d0));
set_dw_register(0, buffer);
OS::MemCopy(buffer, &d1, sizeof(d1));
set_dw_register(2, buffer);
set_register_pair_from_double(0, &d0);
set_register_pair_from_double(2, &d1);
}
CallInternal(entry);
}


int32_t Simulator::CallFPReturnsInt(byte* entry, double d0, double d1) {
CallFP(entry, d0, d1);
int32_t result = get_register(r0);
return result;
}


double Simulator::CallFPReturnsDouble(byte* entry, double d0, double d1) {
CallFP(entry, d0, d1);
if (use_eabi_hardfloat()) {
return get_double_from_d_register(0);
} else {
@@ -163,6 +163,7 @@ class Simulator {
void set_register(int reg, int32_t value);
int32_t get_register(int reg) const;
double get_double_from_register_pair(int reg);
void set_register_pair_from_double(int reg, double* value);
void set_dw_register(int dreg, const int* dbl);

// Support for VFP.
@@ -220,7 +221,9 @@ class Simulator {
// which sets up the simulator state and grabs the result on return.
int32_t Call(byte* entry, int argument_count, ...);
// Alternative: call a 2-argument double function.
double CallFP(byte* entry, double d0, double d1);
void CallFP(byte* entry, double d0, double d1);
int32_t CallFPReturnsInt(byte* entry, double d0, double d1);
double CallFPReturnsDouble(byte* entry, double d0, double d1);

// Push an address onto the JS stack.
uintptr_t PushAddress(uintptr_t address);
@@ -444,6 +447,10 @@ class Simulator {
reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))

#define CALL_GENERATED_FP_INT(entry, p0, p1) \
Simulator::current(Isolate::Current())->CallFPReturnsInt( \
FUNCTION_ADDR(entry), p0, p1)

#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
Simulator::current(Isolate::Current())->Call( \
entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8)

Large diffs are not rendered by default.

@@ -36,9 +36,9 @@ var ARRAY_ITERATOR_KIND_VALUES = 2;
var ARRAY_ITERATOR_KIND_ENTRIES = 3;
// The spec draft also has "sparse" but it is never used.

var iteratorObjectSymbol = %CreateSymbol(void 0);
var arrayIteratorNextIndexSymbol = %CreateSymbol(void 0);
var arrayIterationKindSymbol = %CreateSymbol(void 0);
var iteratorObjectSymbol = %CreateSymbol(UNDEFINED);
var arrayIteratorNextIndexSymbol = %CreateSymbol(UNDEFINED);
var arrayIterationKindSymbol = %CreateSymbol(UNDEFINED);

function ArrayIterator() {}

@@ -74,7 +74,7 @@ function ArrayIteratorNext() {

if (index >= length) {
iterator[arrayIteratorNextIndexSymbol] = 1 / 0; // Infinity
return CreateIteratorResultObject(void 0, true);
return CreateIteratorResultObject(UNDEFINED, true);
}

iterator[arrayIteratorNextIndexSymbol] = index + 1;
@@ -399,14 +399,13 @@ function ObservedArrayPop(n) {
n--;
var value = this[n];

EnqueueSpliceRecord(this, n, [value], 0);

try {
BeginPerformSplice(this);
delete this[n];
this.length = n;
} finally {
EndPerformSplice(this);
EnqueueSpliceRecord(this, n, [value], 0);
}

return value;
@@ -431,7 +430,7 @@ function ArrayPop() {

n--;
var value = this[n];
delete this[n];
Delete(this, ToName(n), true);
this.length = n;
return value;
}
@@ -441,8 +440,6 @@ function ObservedArrayPush() {
var n = TO_UINT32(this.length);
var m = %_ArgumentsLength();

EnqueueSpliceRecord(this, n, [], m);

try {
BeginPerformSplice(this);
for (var i = 0; i < m; i++) {
@@ -451,6 +448,7 @@ function ObservedArrayPush() {
this.length = n + m;
} finally {
EndPerformSplice(this);
EnqueueSpliceRecord(this, n, [], m);
}

return this.length;
@@ -581,14 +579,13 @@ function ArrayReverse() {
function ObservedArrayShift(len) {
var first = this[0];

EnqueueSpliceRecord(this, 0, [first], 0);

try {
BeginPerformSplice(this);
SimpleMove(this, 0, 1, len, 0);
this.length = len - 1;
} finally {
EndPerformSplice(this);
EnqueueSpliceRecord(this, 0, [first], 0);
}

return first;
@@ -627,8 +624,6 @@ function ObservedArrayUnshift() {
var len = TO_UINT32(this.length);
var num_arguments = %_ArgumentsLength();

EnqueueSpliceRecord(this, 0, [], num_arguments);

try {
BeginPerformSplice(this);
SimpleMove(this, 0, 0, len, num_arguments);
@@ -638,6 +633,7 @@ function ObservedArrayUnshift() {
this.length = len + num_arguments;
} finally {
EndPerformSplice(this);
EnqueueSpliceRecord(this, 0, [], num_arguments);
}

return len + num_arguments;
@@ -681,7 +677,7 @@ function ArraySlice(start, end) {
var start_i = TO_INTEGER(start);
var end_i = len;

if (end !== void 0) end_i = TO_INTEGER(end);
if (!IS_UNDEFINED(end)) end_i = TO_INTEGER(end);

if (start_i < 0) {
start_i += len;
@@ -1020,15 +1016,15 @@ function ArraySort(comparefn) {
var proto_length = indices;
for (var i = from; i < proto_length; i++) {
if (proto.hasOwnProperty(i)) {
obj[i] = void 0;
obj[i] = UNDEFINED;
}
}
} else {
for (var i = 0; i < indices.length; i++) {
var index = indices[i];
if (!IS_UNDEFINED(index) && from <= index &&
proto.hasOwnProperty(index)) {
obj[index] = void 0;
obj[index] = UNDEFINED;
}
}
}
@@ -1065,7 +1061,7 @@ function ArraySort(comparefn) {
if (first_undefined < last_defined) {
// Fill in hole or undefined.
obj[first_undefined] = obj[last_defined];
obj[last_defined] = void 0;
obj[last_defined] = UNDEFINED;
}
}
// If there were any undefineds in the entire array, first_undefined
@@ -1077,12 +1073,12 @@ function ArraySort(comparefn) {
// an undefined should be and vice versa.
var i;
for (i = first_undefined; i < length - num_holes; i++) {
obj[i] = void 0;
obj[i] = UNDEFINED;
}
for (i = length - num_holes; i < length; i++) {
// For compatability with Webkit, do not expose elements in the prototype.
if (i in %GetPrototype(obj)) {
obj[i] = void 0;
obj[i] = UNDEFINED;
} else {
delete obj[i];
}
@@ -81,6 +81,10 @@ function ArrayBufferSlice(start, end) {
return result;
}

function ArrayBufferIsView(obj) {
return %ArrayBufferIsView(obj);
}

function SetUpArrayBuffer() {
%CheckIsBootstrapping();

@@ -93,6 +97,10 @@ function SetUpArrayBuffer() {

InstallGetter($ArrayBuffer.prototype, "byteLength", ArrayBufferGetByteLength);

InstallFunctions($ArrayBuffer, DONT_ENUM, $Array(
"isView", ArrayBufferIsView
));

InstallFunctions($ArrayBuffer.prototype, DONT_ENUM, $Array(
"slice", ArrayBufferSlice
));
@@ -98,6 +98,7 @@ struct DoubleConstant BASE_EMBEDDED {
double negative_infinity;
double canonical_non_hole_nan;
double the_hole_nan;
double uint32_bias;
};

static DoubleConstant double_constants;
@@ -206,6 +207,24 @@ CpuFeatureScope::~CpuFeatureScope() {
#endif


// -----------------------------------------------------------------------------
// Implementation of PlatformFeatureScope

PlatformFeatureScope::PlatformFeatureScope(CpuFeature f)
: old_cross_compile_(CpuFeatures::cross_compile_) {
// CpuFeatures is a global singleton, therefore this is only safe in
// single threaded code.
ASSERT(Serializer::enabled());
uint64_t mask = static_cast<uint64_t>(1) << f;
CpuFeatures::cross_compile_ |= mask;
}


PlatformFeatureScope::~PlatformFeatureScope() {
CpuFeatures::cross_compile_ = old_cross_compile_;
}


// -----------------------------------------------------------------------------
// Implementation of Label

@@ -890,6 +909,8 @@ void ExternalReference::SetUp() {
double_constants.canonical_non_hole_nan = OS::nan_value();
double_constants.the_hole_nan = BitCast<double>(kHoleNanInt64);
double_constants.negative_infinity = -V8_INFINITY;
double_constants.uint32_bias =
static_cast<double>(static_cast<uint32_t>(0xFFFFFFFF)) + 1;

math_exp_data_mutex = new Mutex();
}
@@ -1067,6 +1088,13 @@ ExternalReference ExternalReference::get_make_code_young_function(
}


ExternalReference ExternalReference::get_mark_code_as_executed_function(
Isolate* isolate) {
return ExternalReference(Redirect(
isolate, FUNCTION_ADDR(Code::MarkCodeAsExecuted)));
}


ExternalReference ExternalReference::date_cache_stamp(Isolate* isolate) {
return ExternalReference(isolate->date_cache()->stamp_address());
}
@@ -1315,6 +1343,20 @@ ExternalReference ExternalReference::address_of_the_hole_nan() {
}


ExternalReference ExternalReference::record_object_allocation_function(
Isolate* isolate) {
return ExternalReference(
Redirect(isolate,
FUNCTION_ADDR(HeapProfiler::RecordObjectAllocationFromMasm)));
}


ExternalReference ExternalReference::address_of_uint32_bias() {
return ExternalReference(
reinterpret_cast<void*>(&double_constants.uint32_bias));
}


#ifndef V8_INTERPRETED_REGEXP

ExternalReference ExternalReference::re_check_stack_guard_state(
@@ -134,6 +134,18 @@ class CpuFeatureScope BASE_EMBEDDED {
};


// Enable a unsupported feature within a scope for cross-compiling for a
// different CPU.
class PlatformFeatureScope BASE_EMBEDDED {
public:
explicit PlatformFeatureScope(CpuFeature f);
~PlatformFeatureScope();

private:
uint64_t old_cross_compile_;
};


// -----------------------------------------------------------------------------
// Labels represent pc locations; they are typically jump or call targets.
// After declaration, a label can be freely used to denote known or (yet)
@@ -389,6 +401,7 @@ class RelocInfo BASE_EMBEDDED {
INLINE(Handle<Cell> target_cell_handle());
INLINE(void set_target_cell(Cell* cell,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER));
INLINE(Handle<Object> code_age_stub_handle(Assembler* origin));
INLINE(Code* code_age_stub());
INLINE(void set_code_age_stub(Code* stub));

@@ -715,6 +728,10 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference date_cache_stamp(Isolate* isolate);

static ExternalReference get_make_code_young_function(Isolate* isolate);
static ExternalReference get_mark_code_as_executed_function(Isolate* isolate);

// New heap objects tracking support.
static ExternalReference record_object_allocation_function(Isolate* isolate);

// Deoptimization support.
static ExternalReference new_deoptimizer_function(Isolate* isolate);
@@ -798,6 +815,7 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference address_of_negative_infinity();
static ExternalReference address_of_canonical_non_hole_nan();
static ExternalReference address_of_the_hole_nan();
static ExternalReference address_of_uint32_bias();

static ExternalReference math_sin_double_function(Isolate* isolate);
static ExternalReference math_cos_double_function(Isolate* isolate);
@@ -82,14 +82,13 @@ bool Expression::IsUndefinedLiteral(Isolate* isolate) {
}


VariableProxy::VariableProxy(Isolate* isolate, Variable* var)
: Expression(isolate),
VariableProxy::VariableProxy(Isolate* isolate, Variable* var, int position)
: Expression(isolate, position),
name_(var->name()),
var_(NULL), // Will be set by the call to BindTo.
is_this_(var->is_this()),
is_trivial_(false),
is_lvalue_(false),
position_(RelocInfo::kNoPosition),
interface_(var->interface()) {
BindTo(var);
}
@@ -100,13 +99,12 @@ VariableProxy::VariableProxy(Isolate* isolate,
bool is_this,
Interface* interface,
int position)
: Expression(isolate),
: Expression(isolate, position),
name_(name),
var_(NULL),
is_this_(is_this),
is_trivial_(false),
is_lvalue_(false),
position_(position),
interface_(interface) {
// Names must be canonicalized for fast equality checks.
ASSERT(name->IsInternalizedString());
@@ -133,15 +131,15 @@ Assignment::Assignment(Isolate* isolate,
Expression* target,
Expression* value,
int pos)
: Expression(isolate),
: Expression(isolate, pos),
op_(op),
target_(target),
value_(value),
pos_(pos),
binary_operation_(NULL),
assignment_id_(GetNextId(isolate)),
is_monomorphic_(false),
is_uninitialized_(false),
is_pre_monomorphic_(false),
store_mode_(STANDARD_STORE) { }


@@ -234,33 +232,6 @@ bool ObjectLiteral::Property::emit_store() {
}


bool IsEqualString(void* first, void* second) {
ASSERT((*reinterpret_cast<String**>(first))->IsString());
ASSERT((*reinterpret_cast<String**>(second))->IsString());
Handle<String> h1(reinterpret_cast<String**>(first));
Handle<String> h2(reinterpret_cast<String**>(second));
return (*h1)->Equals(*h2);
}


bool IsEqualNumber(void* first, void* second) {
ASSERT((*reinterpret_cast<Object**>(first))->IsNumber());
ASSERT((*reinterpret_cast<Object**>(second))->IsNumber());

Handle<Object> h1(reinterpret_cast<Object**>(first));
Handle<Object> h2(reinterpret_cast<Object**>(second));
if (h1->IsSmi()) {
return h2->IsSmi() && *h1 == *h2;
}
if (h2->IsSmi()) return false;
Handle<HeapNumber> n1 = Handle<HeapNumber>::cast(h1);
Handle<HeapNumber> n2 = Handle<HeapNumber>::cast(h2);
ASSERT(std::isfinite(n1->value()));
ASSERT(std::isfinite(n2->value()));
return n1->value() == n2->value();
}


void ObjectLiteral::CalculateEmitStore(Zone* zone) {
ZoneAllocationPolicy allocator(zone);

@@ -456,14 +427,13 @@ void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle,
is_uninitialized_ = oracle->LoadIsUninitialized(this);
if (is_uninitialized_) return;

is_pre_monomorphic_ = oracle->LoadIsPreMonomorphic(this);
is_monomorphic_ = oracle->LoadIsMonomorphicNormal(this);
ASSERT(!is_pre_monomorphic_ || !is_monomorphic_);
receiver_types_.Clear();
if (key()->IsPropertyName()) {
FunctionPrototypeStub proto_stub(Code::LOAD_IC);
StringLengthStub string_stub(Code::LOAD_IC, false);
if (oracle->LoadIsStub(this, &string_stub)) {
is_string_length_ = true;
} else if (oracle->LoadIsStub(this, &proto_stub)) {
if (oracle->LoadIsStub(this, &proto_stub)) {
is_function_prototype_ = true;
} else {
Literal* lit_key = key()->AsLiteral();
@@ -474,8 +444,7 @@ void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle,
} else if (oracle->LoadIsBuiltin(this, Builtins::kKeyedLoadIC_String)) {
is_string_access_ = true;
} else if (is_monomorphic_) {
receiver_types_.Add(oracle->LoadMonomorphicReceiverType(this),
zone);
receiver_types_.Add(oracle->LoadMonomorphicReceiverType(this), zone);
} else if (oracle->LoadIsPolymorphic(this)) {
receiver_types_.Reserve(kMaxKeyedPolymorphism, zone);
oracle->CollectKeyedReceiverTypes(PropertyFeedbackId(), &receiver_types_);
@@ -490,7 +459,10 @@ void Assignment::RecordTypeFeedback(TypeFeedbackOracle* oracle,
TypeFeedbackId id = AssignmentFeedbackId();
is_uninitialized_ = oracle->StoreIsUninitialized(id);
if (is_uninitialized_) return;

is_pre_monomorphic_ = oracle->StoreIsPreMonomorphic(id);
is_monomorphic_ = oracle->StoreIsMonomorphicNormal(id);
ASSERT(!is_pre_monomorphic_ || !is_monomorphic_);
receiver_types_.Clear();
if (prop->key()->IsPropertyName()) {
Literal* lit_key = prop->key()->AsLiteral();
@@ -655,7 +627,7 @@ void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle,
holder_ = GetPrototypeForPrimitiveCheck(check_type_, oracle->isolate());
receiver_types_.Add(handle(holder_->map()), oracle->zone());
}
#ifdef DEBUG
#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
int length = receiver_types_.length();
for (int i = 0; i < length; i++) {
@@ -1067,9 +1039,9 @@ CaseClause::CaseClause(Isolate* isolate,
Expression* label,
ZoneList<Statement*>* statements,
int pos)
: label_(label),
: AstNode(pos),
label_(label),
statements_(statements),
position_(pos),
compare_type_(Type::None(), isolate),
compare_id_(AstNode::GetNextId(isolate)),
entry_id_(AstNode::GetNextId(isolate)) {
@@ -1111,6 +1083,7 @@ REGULAR_NODE(ContinueStatement)
REGULAR_NODE(BreakStatement)
REGULAR_NODE(ReturnStatement)
REGULAR_NODE(SwitchStatement)
REGULAR_NODE(CaseClause)
REGULAR_NODE(Conditional)
REGULAR_NODE(Literal)
REGULAR_NODE(ArrayLiteral)
@@ -1146,7 +1119,7 @@ DONT_OPTIMIZE_NODE(WithStatement)
DONT_OPTIMIZE_NODE(TryCatchStatement)
DONT_OPTIMIZE_NODE(TryFinallyStatement)
DONT_OPTIMIZE_NODE(DebuggerStatement)
DONT_OPTIMIZE_NODE(SharedFunctionInfoLiteral)
DONT_OPTIMIZE_NODE(NativeFunctionLiteral)

DONT_SELFOPTIMIZE_NODE(DoWhileStatement)
DONT_SELFOPTIMIZE_NODE(WhileStatement)

Large diffs are not rendered by default.

@@ -824,7 +824,7 @@ void Genesis::HookUpInnerGlobal(Handle<GlobalObject> inner_global) {
// work in the snapshot case is done in HookUpInnerGlobal.
void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<JSFunction> empty_function) {
// --- G l o b a l C o n t e x t ---
// --- N a t i v e C o n t e x t ---
// Use the empty function as closure (no scope info).
native_context()->set_closure(*empty_function);
native_context()->set_previous(NULL);
@@ -1043,7 +1043,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
}

{ // -- J S O N
Handle<String> name = factory->NewStringFromAscii(CStrVector("JSON"));
Handle<String> name = factory->InternalizeUtf8String("JSON");
Handle<JSFunction> cons = factory->NewFunction(name,
factory->the_hole_value());
JSFunction::SetInstancePrototype(cons,
@@ -2067,6 +2067,11 @@ bool Genesis::InstallExperimentalNatives() {
"native harmony-array.js") == 0) {
if (!CompileExperimentalBuiltin(isolate(), i)) return false;
}
if (FLAG_harmony_maths &&
strcmp(ExperimentalNatives::GetScriptName(i).start(),
"native harmony-math.js") == 0) {
if (!CompileExperimentalBuiltin(isolate(), i)) return false;
}
}

InstallExperimentalNativeFunctions();
@@ -195,79 +195,6 @@ BUILTIN(EmptyFunction) {
}


static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
Isolate* isolate,
JSFunction* constructor) {
ASSERT(args->length() >= 1);
Heap* heap = isolate->heap();
isolate->counters()->array_function_runtime()->Increment();

JSArray* array;
if (CalledAsConstructor(isolate)) {
array = JSArray::cast((*args)[0]);
// Initialize elements and length in case later allocations fail so that the
// array object is initialized in a valid state.
MaybeObject* maybe_array = array->Initialize(0);
if (maybe_array->IsFailure()) return maybe_array;

AllocationMemento* memento = AllocationMemento::FindForJSObject(array);
if (memento != NULL && memento->IsValid()) {
AllocationSite* site = memento->GetAllocationSite();
ElementsKind to_kind = site->GetElementsKind();
if (IsMoreGeneralElementsKindTransition(array->GetElementsKind(),
to_kind)) {
// We have advice that we should change the elements kind
if (FLAG_trace_track_allocation_sites) {
PrintF("AllocationSite: pre-transitioning array %p(%s->%s)\n",
reinterpret_cast<void*>(array),
ElementsKindToString(array->GetElementsKind()),
ElementsKindToString(to_kind));
}

maybe_array = array->TransitionElementsKind(to_kind);
if (maybe_array->IsFailure()) return maybe_array;
}
}

if (!FLAG_smi_only_arrays) {
Context* native_context = isolate->context()->native_context();
if (array->GetElementsKind() == GetInitialFastElementsKind() &&
!native_context->js_array_maps()->IsUndefined()) {
FixedArray* map_array =
FixedArray::cast(native_context->js_array_maps());
array->set_map(Map::cast(map_array->
get(TERMINAL_FAST_ELEMENTS_KIND)));
}
}
} else {
// Allocate the JS Array
MaybeObject* maybe_obj = heap->AllocateJSObject(constructor);
if (!maybe_obj->To(&array)) return maybe_obj;
}

Arguments adjusted_arguments(args->length() - 1, args->arguments() - 1);
ASSERT(adjusted_arguments.length() < 1 ||
adjusted_arguments[0] == (*args)[1]);
return ArrayConstructInitializeElements(array, &adjusted_arguments);
}


BUILTIN(InternalArrayCodeGeneric) {
return ArrayCodeGenericCommon(
&args,
isolate,
isolate->context()->native_context()->internal_array_function());
}


BUILTIN(ArrayCodeGeneric) {
return ArrayCodeGenericCommon(
&args,
isolate,
isolate->context()->native_context()->array_function());
}


static void MoveDoubleElements(FixedDoubleArray* dst,
int dst_index,
FixedDoubleArray* src,
@@ -346,10 +273,20 @@ static FixedArrayBase* LeftTrimFixedArray(Heap* heap,
MemoryChunk::IncrementLiveBytesFromMutator(elms->address(), -size_delta);
}

HEAP_PROFILE(heap, ObjectMoveEvent(elms->address(),
elms->address() + size_delta));
return FixedArrayBase::cast(HeapObject::FromAddress(
elms->address() + to_trim * entry_size));
FixedArrayBase* new_elms = FixedArrayBase::cast(HeapObject::FromAddress(
elms->address() + size_delta));
HeapProfiler* profiler = heap->isolate()->heap_profiler();
if (profiler->is_profiling()) {
profiler->ObjectMoveEvent(elms->address(),
new_elms->address(),
new_elms->Size());
if (profiler->is_tracking_allocations()) {
// Report filler object as a new allocation.
// Otherwise it will become an untracked object.
profiler->NewObjectEvent(elms->address(), elms->Size());
}
}
return new_elms;
}


@@ -1392,7 +1329,8 @@ static void Generate_LoadIC_Normal(MacroAssembler* masm) {


static void Generate_LoadIC_Getter_ForDeopt(MacroAssembler* masm) {
LoadStubCompiler::GenerateLoadViaGetter(masm, Handle<JSFunction>());
LoadStubCompiler::GenerateLoadViaGetter(
masm, LoadStubCompiler::registers()[0], Handle<JSFunction>());
}


@@ -1451,6 +1389,11 @@ static void Generate_StoreIC_Slow(MacroAssembler* masm) {
}


static void Generate_StoreIC_Slow_Strict(MacroAssembler* masm) {
StoreIC::GenerateSlow(masm);
}


static void Generate_StoreIC_Initialize(MacroAssembler* masm) {
StoreIC::GenerateInitialize(masm);
}
@@ -1546,6 +1489,11 @@ static void Generate_KeyedStoreIC_Slow(MacroAssembler* masm) {
}


static void Generate_KeyedStoreIC_Slow_Strict(MacroAssembler* masm) {
KeyedStoreIC::GenerateSlow(masm);
}


static void Generate_KeyedStoreIC_Initialize(MacroAssembler* masm) {
KeyedStoreIC::GenerateInitialize(masm);
}
@@ -1728,8 +1676,19 @@ void Builtins::InitBuiltinFunctionTable() {
functions->extra_args = NO_EXTRA_ARGUMENTS; \
++functions;

#define DEF_FUNCTION_PTR_H(aname, kind, extra) \
functions->generator = FUNCTION_ADDR(Generate_##aname); \
functions->c_code = NULL; \
functions->s_name = #aname; \
functions->name = k##aname; \
functions->flags = Code::ComputeFlags( \
Code::HANDLER, MONOMORPHIC, extra, Code::NORMAL, Code::kind); \
functions->extra_args = NO_EXTRA_ARGUMENTS; \
++functions;

BUILTIN_LIST_C(DEF_FUNCTION_PTR_C)
BUILTIN_LIST_A(DEF_FUNCTION_PTR_A)
BUILTIN_LIST_H(DEF_FUNCTION_PTR_H)
BUILTIN_LIST_DEBUG_A(DEF_FUNCTION_PTR_A)

#undef DEF_FUNCTION_PTR_C
@@ -1854,8 +1813,15 @@ Handle<Code> Builtins::name() { \
reinterpret_cast<Code**>(builtin_address(k##name)); \
return Handle<Code>(code_address); \
}
#define DEFINE_BUILTIN_ACCESSOR_H(name, kind, extra) \
Handle<Code> Builtins::name() { \
Code** code_address = \
reinterpret_cast<Code**>(builtin_address(k##name)); \
return Handle<Code>(code_address); \
}
BUILTIN_LIST_C(DEFINE_BUILTIN_ACCESSOR_C)
BUILTIN_LIST_A(DEFINE_BUILTIN_ACCESSOR_A)
BUILTIN_LIST_H(DEFINE_BUILTIN_ACCESSOR_H)
BUILTIN_LIST_DEBUG_A(DEFINE_BUILTIN_ACCESSOR_A)
#undef DEFINE_BUILTIN_ACCESSOR_C
#undef DEFINE_BUILTIN_ACCESSOR_A
@@ -50,6 +50,10 @@ enum BuiltinExtraArguments {
#define CODE_AGE_LIST(V) \
CODE_AGE_LIST_WITH_ARG(CODE_AGE_LIST_IGNORE_ARG, V)

#define CODE_AGE_LIST_WITH_NO_AGE(V) \
V(NoAge) \
CODE_AGE_LIST_WITH_ARG(CODE_AGE_LIST_IGNORE_ARG, V)

#define DECLARE_CODE_AGE_BUILTIN(C, V) \
V(Make##C##CodeYoungAgainOddMarking, BUILTIN, \
UNINITIALIZED, Code::kNoExtraICState) \
@@ -63,9 +67,6 @@ enum BuiltinExtraArguments {
\
V(EmptyFunction, NO_EXTRA_ARGUMENTS) \
\
V(InternalArrayCodeGeneric, NO_EXTRA_ARGUMENTS) \
V(ArrayCodeGeneric, NO_EXTRA_ARGUMENTS) \
\
V(ArrayPush, NO_EXTRA_ARGUMENTS) \
V(ArrayPop, NO_EXTRA_ARGUMENTS) \
V(ArrayShift, NO_EXTRA_ARGUMENTS) \
@@ -110,8 +111,6 @@ enum BuiltinExtraArguments {
V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(NotifyStubFailure, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(NotifyOSR, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
\
V(LoadIC_Miss, BUILTIN, UNINITIALIZED, \
@@ -120,29 +119,19 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \
V(KeyedLoadIC_MissForceGeneric, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(KeyedLoadIC_Slow, STUB, MONOMORPHIC, \
Code::kNoExtraICState) \
V(StoreIC_Miss, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(StoreIC_Slow, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(KeyedStoreIC_Miss, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(KeyedStoreIC_MissForceGeneric, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(KeyedStoreIC_Slow, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(LoadIC_Initialize, LOAD_IC, UNINITIALIZED, \
Code::kNoExtraICState) \
V(LoadIC_PreMonomorphic, LOAD_IC, PREMONOMORPHIC, \
Code::kNoExtraICState) \
V(LoadIC_Normal, LOAD_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
V(LoadIC_Megamorphic, LOAD_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
V(LoadIC_Getter_ForDeopt, LOAD_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
V(LoadIC_Slow, STUB, MONOMORPHIC, \
Code::kNoExtraICState) \
\
V(KeyedLoadIC_Initialize, KEYED_LOAD_IC, UNINITIALIZED, \
@@ -162,8 +151,6 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \
V(StoreIC_PreMonomorphic, STORE_IC, PREMONOMORPHIC, \
Code::kNoExtraICState) \
V(StoreIC_Normal, STORE_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
V(StoreIC_Megamorphic, STORE_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
V(StoreIC_Generic, STORE_IC, GENERIC, \
@@ -176,8 +163,6 @@ enum BuiltinExtraArguments {
kStrictMode) \
V(StoreIC_PreMonomorphic_Strict, STORE_IC, PREMONOMORPHIC, \
kStrictMode) \
V(StoreIC_Normal_Strict, STORE_IC, MONOMORPHIC, \
kStrictMode) \
V(StoreIC_Megamorphic_Strict, STORE_IC, MEGAMORPHIC, \
kStrictMode) \
V(StoreIC_GlobalProxy_Strict, STORE_IC, GENERIC, \
@@ -219,10 +204,29 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \
V(InterruptCheck, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(OsrAfterStackCheck, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(StackCheck, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
\
V(MarkCodeAsExecutedOnce, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(MarkCodeAsExecutedTwice, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
CODE_AGE_LIST_WITH_ARG(DECLARE_CODE_AGE_BUILTIN, V)

// Define list of builtin handlers implemented in assembly.
#define BUILTIN_LIST_H(V) \
V(LoadIC_Slow, LOAD_IC, Code::kNoExtraICState) \
V(KeyedLoadIC_Slow, KEYED_LOAD_IC, Code::kNoExtraICState) \
V(StoreIC_Slow, STORE_IC, Code::kNoExtraICState) \
V(StoreIC_Slow_Strict, STORE_IC, kStrictMode) \
V(KeyedStoreIC_Slow, KEYED_STORE_IC, Code::kNoExtraICState)\
V(KeyedStoreIC_Slow_Strict, KEYED_STORE_IC, kStrictMode) \
V(LoadIC_Normal, LOAD_IC, Code::kNoExtraICState) \
V(StoreIC_Normal, STORE_IC, Code::kNoExtraICState) \
V(StoreIC_Normal_Strict, STORE_IC, kStrictMode)

#ifdef ENABLE_DEBUGGER_SUPPORT
// Define list of builtins used by the debugger implemented in assembly.
#define BUILTIN_LIST_DEBUG_A(V) \
@@ -310,8 +314,10 @@ class Builtins {
enum Name {
#define DEF_ENUM_C(name, ignore) k##name,
#define DEF_ENUM_A(name, kind, state, extra) k##name,
#define DEF_ENUM_H(name, kind, extra) k##name,
BUILTIN_LIST_C(DEF_ENUM_C)
BUILTIN_LIST_A(DEF_ENUM_A)
BUILTIN_LIST_H(DEF_ENUM_H)
BUILTIN_LIST_DEBUG_A(DEF_ENUM_A)
#undef DEF_ENUM_C
#undef DEF_ENUM_A
@@ -335,8 +341,10 @@ class Builtins {
#define DECLARE_BUILTIN_ACCESSOR_C(name, ignore) Handle<Code> name();
#define DECLARE_BUILTIN_ACCESSOR_A(name, kind, state, extra) \
Handle<Code> name();
#define DECLARE_BUILTIN_ACCESSOR_H(name, kind, extra) Handle<Code> name();
BUILTIN_LIST_C(DECLARE_BUILTIN_ACCESSOR_C)
BUILTIN_LIST_A(DECLARE_BUILTIN_ACCESSOR_A)
BUILTIN_LIST_H(DECLARE_BUILTIN_ACCESSOR_H)
BUILTIN_LIST_DEBUG_A(DECLARE_BUILTIN_ACCESSOR_A)
#undef DECLARE_BUILTIN_ACCESSOR_C
#undef DECLARE_BUILTIN_ACCESSOR_A
@@ -391,7 +399,6 @@ class Builtins {
static void Generate_NotifyDeoptimized(MacroAssembler* masm);
static void Generate_NotifySoftDeoptimized(MacroAssembler* masm);
static void Generate_NotifyLazyDeoptimized(MacroAssembler* masm);
static void Generate_NotifyOSR(MacroAssembler* masm);
static void Generate_NotifyStubFailure(MacroAssembler* masm);
static void Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm);

@@ -403,7 +410,7 @@ class Builtins {

static void Generate_StringConstructCode(MacroAssembler* masm);
static void Generate_OnStackReplacement(MacroAssembler* masm);

static void Generate_OsrAfterStackCheck(MacroAssembler* masm);
static void Generate_InterruptCheck(MacroAssembler* masm);
static void Generate_StackCheck(MacroAssembler* masm);

@@ -415,6 +422,9 @@ class Builtins {
CODE_AGE_LIST(DECLARE_CODE_AGE_BUILTIN_GENERATOR)
#undef DECLARE_CODE_AGE_BUILTIN_GENERATOR

static void Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm);
static void Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm);

static void InitBuiltinFunctionTable();

bool initialized_;
@@ -25,11 +25,48 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

#include <stdarg.h>
#include "checks.h"

#include "v8.h"
#if V8_LIBC_GLIBC || V8_OS_BSD
# include <cxxabi.h>
# include <execinfo.h>
#endif // V8_LIBC_GLIBC || V8_OS_BSD
#include <stdio.h>

#include "platform.h"
#include "v8.h"


// Attempts to dump a backtrace (if supported).
static V8_INLINE void DumpBacktrace() {
#if V8_LIBC_GLIBC || V8_OS_BSD
void* trace[100];
int size = backtrace(trace, ARRAY_SIZE(trace));
char** symbols = backtrace_symbols(trace, size);
i::OS::PrintError("\n==== C stack trace ===============================\n\n");
if (size == 0) {
i::OS::PrintError("(empty)\n");
} else if (symbols == NULL) {
i::OS::PrintError("(no symbols)\n");
} else {
for (int i = 1; i < size; ++i) {
i::OS::PrintError("%2d: ", i);
char mangled[201];
if (sscanf(symbols[i], "%*[^(]%*[(]%200[^)+]", mangled) == 1) { // NOLINT
int status;
size_t length;
char* demangled = abi::__cxa_demangle(mangled, NULL, &length, &status);
i::OS::PrintError("%s\n", demangled != NULL ? demangled : mangled);
free(demangled);
} else {
i::OS::PrintError("??\n");
}
}
}
free(symbols);
#endif // V8_LIBC_GLIBC || V8_OS_BSD
}


// Contains protection against recursive calls (faults while handling faults).
extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) {
@@ -43,7 +80,8 @@ extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) {
i::OS::VPrintError(format, arguments);
va_end(arguments);
i::OS::PrintError("\n#\n");
i::OS::DumpBacktrace();
DumpBacktrace();
fflush(stderr);
i::OS::Abort();
}

@@ -91,8 +129,6 @@ void API_Fatal(const char* location, const char* format, ...) {

namespace v8 { namespace internal {

bool EnableSlowAsserts() { return FLAG_enable_slow_asserts; }

intptr_t HeapObjectTagMask() { return kHeapObjectTagMask; }

} } // namespace v8::internal
@@ -272,7 +272,24 @@ template <int> class StaticAssertionHelper { };
#endif


#ifdef DEBUG
#ifndef OPTIMIZED_DEBUG
#define ENABLE_SLOW_ASSERTS 1
#endif
#endif

namespace v8 {
namespace internal {
#ifdef ENABLE_SLOW_ASSERTS
#define SLOW_ASSERT(condition) \
CHECK(!v8::internal::FLAG_enable_slow_asserts || (condition))
extern bool FLAG_enable_slow_asserts;
#else
#define SLOW_ASSERT(condition) ((void) 0)
const bool FLAG_enable_slow_asserts = false;
#endif
} // namespace internal
} // namespace v8


// The ASSERT macro is equivalent to CHECK except that it only
@@ -285,7 +302,6 @@ extern bool FLAG_enable_slow_asserts;
#define ASSERT_GE(v1, v2) CHECK_GE(v1, v2)
#define ASSERT_LT(v1, v2) CHECK_LT(v1, v2)
#define ASSERT_LE(v1, v2) CHECK_LE(v1, v2)
#define SLOW_ASSERT(condition) CHECK(!FLAG_enable_slow_asserts || (condition))
#else
#define ASSERT_RESULT(expr) (expr)
#define ASSERT(condition) ((void) 0)
@@ -294,7 +310,6 @@ extern bool FLAG_enable_slow_asserts;
#define ASSERT_GE(v1, v2) ((void) 0)
#define ASSERT_LT(v1, v2) ((void) 0)
#define ASSERT_LE(v1, v2) ((void) 0)
#define SLOW_ASSERT(condition) ((void) 0)
#endif
// Static asserts has no impact on runtime performance, so they can be
// safely enabled in release mode. Moreover, the ((void) 0) expression

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

@@ -113,10 +113,12 @@ Handle<Code> CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm,
masm->GetCode(&desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, flags, masm->CodeObject(),
false, is_crankshafted);
false, is_crankshafted,
info->prologue_offset());
isolate->counters()->total_compiled_code_size()->Increment(
code->instruction_size());
code->set_prologue_offset(info->prologue_offset());
isolate->heap()->IncrementCodeGeneratedBytes(is_crankshafted,
code->instruction_size());
return code;
}

@@ -132,7 +134,9 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
if (print_code) {
// Print the source code if available.
FunctionLiteral* function = info->function();
if (code->kind() == Code::OPTIMIZED_FUNCTION) {
bool print_source = code->kind() == Code::OPTIMIZED_FUNCTION ||
code->kind() == Code::FUNCTION;
if (print_source) {
Handle<Script> script = info->script();
if (!script->IsUndefined() && !script->source()->IsUndefined()) {
PrintF("--- Raw source ---\n");
@@ -160,12 +164,16 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
} else {
PrintF("--- Code ---\n");
}
if (print_source) {
PrintF("source_position = %d\n", function->start_position());
}
if (info->IsStub()) {
CodeStub::Major major_key = info->code_stub()->MajorKey();
code->Disassemble(CodeStub::MajorName(major_key, false));
} else {
code->Disassemble(*function->debug_name()->ToCString());
}
PrintF("--- End code ---\n");
}
#endif // ENABLE_DISASSEMBLER
}
@@ -112,7 +112,7 @@ void CompilationInfo::Initialize(Isolate* isolate,
zone_ = zone;
deferred_handles_ = NULL;
code_stub_ = NULL;
prologue_offset_ = kPrologueOffsetNotSet;
prologue_offset_ = Code::kPrologueOffsetNotSet;
opt_count_ = shared_info().is_null() ? 0 : shared_info()->opt_count();
no_frame_ranges_ = isolate->cpu_profiler()->is_profiling()
? new List<OffsetRange>(2) : NULL;
@@ -123,7 +123,7 @@ void CompilationInfo::Initialize(Isolate* isolate,
mode_ = STUB;
return;
}
mode_ = isolate->use_crankshaft() ? mode : NONOPT;
mode_ = mode;
abort_due_to_dependency_ = false;
if (script_->type()->value() == Script::TYPE_NATIVE) {
MarkAsNative();
@@ -260,7 +260,7 @@ static bool AlwaysFullCompiler(Isolate* isolate) {
}


void OptimizingCompiler::RecordOptimizationStats() {
void RecompileJob::RecordOptimizationStats() {
Handle<JSFunction> function = info()->closure();
int opt_count = function->shared()->opt_count();
function->shared()->set_opt_count(opt_count + 1);
@@ -297,23 +297,60 @@ void OptimizingCompiler::RecordOptimizationStats() {
// A return value of true indicates the compilation pipeline is still
// going, not necessarily that we optimized the code.
static bool MakeCrankshaftCode(CompilationInfo* info) {
OptimizingCompiler compiler(info);
OptimizingCompiler::Status status = compiler.CreateGraph();
RecompileJob job(info);
RecompileJob::Status status = job.CreateGraph();

if (status != OptimizingCompiler::SUCCEEDED) {
return status != OptimizingCompiler::FAILED;
if (status != RecompileJob::SUCCEEDED) {
return status != RecompileJob::FAILED;
}
status = compiler.OptimizeGraph();
if (status != OptimizingCompiler::SUCCEEDED) {
status = compiler.AbortOptimization();
return status != OptimizingCompiler::FAILED;
status = job.OptimizeGraph();
if (status != RecompileJob::SUCCEEDED) {
status = job.AbortOptimization();
return status != RecompileJob::FAILED;
}
status = compiler.GenerateAndInstallCode();
return status != OptimizingCompiler::FAILED;
status = job.GenerateAndInstallCode();
return status != RecompileJob::FAILED;
}


OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
class HOptimizedGraphBuilderWithPotisions: public HOptimizedGraphBuilder {
public:
explicit HOptimizedGraphBuilderWithPotisions(CompilationInfo* info)
: HOptimizedGraphBuilder(info) {
}

#define DEF_VISIT(type) \
virtual void Visit##type(type* node) V8_OVERRIDE { \
if (node->position() != RelocInfo::kNoPosition) { \
SetSourcePosition(node->position()); \
} \
HOptimizedGraphBuilder::Visit##type(node); \
}
EXPRESSION_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT

#define DEF_VISIT(type) \
virtual void Visit##type(type* node) V8_OVERRIDE { \
if (node->position() != RelocInfo::kNoPosition) { \
SetSourcePosition(node->position()); \
} \
HOptimizedGraphBuilder::Visit##type(node); \
}
STATEMENT_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT

#define DEF_VISIT(type) \
virtual void Visit##type(type* node) V8_OVERRIDE { \
HOptimizedGraphBuilder::Visit##type(node); \
}
MODULE_NODE_LIST(DEF_VISIT)
DECLARATION_NODE_LIST(DEF_VISIT)
AUXILIARY_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
};


RecompileJob::Status RecompileJob::CreateGraph() {
ASSERT(isolate()->use_crankshaft());
ASSERT(info()->IsOptimizing());
ASSERT(!info()->IsCompilingForDebugging());
@@ -419,7 +456,9 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
// Type-check the function.
AstTyper::Run(info());

graph_builder_ = new(info()->zone()) HOptimizedGraphBuilder(info());
graph_builder_ = FLAG_emit_opt_code_positions
? new(info()->zone()) HOptimizedGraphBuilderWithPotisions(info())
: new(info()->zone()) HOptimizedGraphBuilder(info());

Timer t(this, &time_taken_to_create_graph_);
graph_ = graph_builder_->CreateGraph();
@@ -452,7 +491,7 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
}


OptimizingCompiler::Status OptimizingCompiler::OptimizeGraph() {
RecompileJob::Status RecompileJob::OptimizeGraph() {
DisallowHeapAllocation no_allocation;
DisallowHandleAllocation no_handles;
DisallowHandleDereference no_deref;
@@ -475,7 +514,7 @@ OptimizingCompiler::Status OptimizingCompiler::OptimizeGraph() {
}


OptimizingCompiler::Status OptimizingCompiler::GenerateAndInstallCode() {
RecompileJob::Status RecompileJob::GenerateAndInstallCode() {
ASSERT(last_status() == SUCCEEDED);
ASSERT(!info()->HasAbortedDueToDependencyChange());
DisallowCodeDependencyChange no_dependency_change;
@@ -555,6 +594,33 @@ static bool DebuggerWantsEagerCompilation(CompilationInfo* info,
}


// Sets the expected number of properties based on estimate from compiler.
void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared,
int estimate) {
// See the comment in SetExpectedNofProperties.
if (shared->live_objects_may_exist()) return;

// If no properties are added in the constructor, they are more likely
// to be added later.
if (estimate == 0) estimate = 2;

// TODO(yangguo): check whether those heuristics are still up-to-date.
// We do not shrink objects that go into a snapshot (yet), so we adjust
// the estimate conservatively.
if (Serializer::enabled()) {
estimate += 2;
} else if (FLAG_clever_optimizations) {
// Inobject slack tracking will reclaim redundant inobject space later,
// so we can afford to adjust the estimate generously.
estimate += 8;
} else {
estimate += 3;
}

shared->set_expected_nof_properties(estimate);
}


static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
Isolate* isolate = info->isolate();
PostponeInterruptsScope postpone(isolate);
@@ -599,66 +665,70 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
}
}

// Measure how long it takes to do the compilation; only take the
// rest of the function into account to avoid overlap with the
// parsing statistics.
HistogramTimer* rate = info->is_eval()
? info->isolate()->counters()->compile_eval()
: info->isolate()->counters()->compile();
HistogramTimerScope timer(rate);

// Compile the code.
FunctionLiteral* lit = info->function();
LiveEditFunctionTracker live_edit_tracker(isolate, lit);
if (!MakeCode(info)) {
if (!isolate->has_pending_exception()) isolate->StackOverflow();
return Handle<SharedFunctionInfo>::null();
}
Handle<SharedFunctionInfo> result;
{
// Measure how long it takes to do the compilation; only take the
// rest of the function into account to avoid overlap with the
// parsing statistics.
HistogramTimer* rate = info->is_eval()
? info->isolate()->counters()->compile_eval()
: info->isolate()->counters()->compile();
HistogramTimerScope timer(rate);

// Allocate function.
ASSERT(!info->code().is_null());
Handle<SharedFunctionInfo> result =
isolate->factory()->NewSharedFunctionInfo(
lit->name(),
lit->materialized_literal_count(),
lit->is_generator(),
info->code(),
ScopeInfo::Create(info->scope(), info->zone()));

ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position());
Compiler::SetFunctionInfo(result, lit, true, script);

if (script->name()->IsString()) {
PROFILE(isolate, CodeCreateEvent(
info->is_eval()
? Logger::EVAL_TAG
: Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
*info->code(),
*result,
info,
String::cast(script->name())));
GDBJIT(AddCode(Handle<String>(String::cast(script->name())),
script,
info->code(),
info));
} else {
PROFILE(isolate, CodeCreateEvent(
info->is_eval()
? Logger::EVAL_TAG
: Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
*info->code(),
*result,
info,
isolate->heap()->empty_string()));
GDBJIT(AddCode(Handle<String>(), script, info->code(), info));
}
// Compile the code.
if (!MakeCode(info)) {
if (!isolate->has_pending_exception()) isolate->StackOverflow();
return Handle<SharedFunctionInfo>::null();
}

// Allocate function.
ASSERT(!info->code().is_null());
result =
isolate->factory()->NewSharedFunctionInfo(
lit->name(),
lit->materialized_literal_count(),
lit->is_generator(),
info->code(),
ScopeInfo::Create(info->scope(), info->zone()));

ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position());
Compiler::SetFunctionInfo(result, lit, true, script);

if (script->name()->IsString()) {
PROFILE(isolate, CodeCreateEvent(
info->is_eval()
? Logger::EVAL_TAG
: Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
*info->code(),
*result,
info,
String::cast(script->name())));
GDBJIT(AddCode(Handle<String>(String::cast(script->name())),
script,
info->code(),
info));
} else {
PROFILE(isolate, CodeCreateEvent(
info->is_eval()
? Logger::EVAL_TAG
: Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
*info->code(),
*result,
info,
isolate->heap()->empty_string()));
GDBJIT(AddCode(Handle<String>(), script, info->code(), info));
}

// Hint to the runtime system used when allocating space for initial
// property space by setting the expected number of properties for
// the instances of the function.
SetExpectedNofPropertiesFromEstimate(result, lit->expected_property_count());
// Hint to the runtime system used when allocating space for initial
// property space by setting the expected number of properties for
// the instances of the function.
SetExpectedNofPropertiesFromEstimate(result,
lit->expected_property_count());

script->set_compilation_state(Script::COMPILATION_STATE_COMPILED);
script->set_compilation_state(Script::COMPILATION_STATE_COMPILED);
}

#ifdef ENABLE_DEBUGGER_SUPPORT
// Notify debugger
@@ -1032,16 +1102,15 @@ bool Compiler::RecompileConcurrent(Handle<JSFunction> closure,
info->SaveHandles();

if (Rewriter::Rewrite(*info) && Scope::Analyze(*info)) {
OptimizingCompiler* compiler =
new(info->zone()) OptimizingCompiler(*info);
OptimizingCompiler::Status status = compiler->CreateGraph();
if (status == OptimizingCompiler::SUCCEEDED) {
RecompileJob* job = new(info->zone()) RecompileJob(*info);
RecompileJob::Status status = job->CreateGraph();
if (status == RecompileJob::SUCCEEDED) {
info.Detach();
shared->code()->set_profiler_ticks(0);
isolate->optimizing_compiler_thread()->QueueForOptimization(compiler);
isolate->optimizing_compiler_thread()->QueueForOptimization(job);
ASSERT(!isolate->has_pending_exception());
return true;
} else if (status == OptimizingCompiler::BAILED_OUT) {
} else if (status == RecompileJob::BAILED_OUT) {
isolate->clear_pending_exception();
InstallFullCode(*info);
}
@@ -1054,9 +1123,8 @@ bool Compiler::RecompileConcurrent(Handle<JSFunction> closure,
}


Handle<Code> Compiler::InstallOptimizedCode(
OptimizingCompiler* optimizing_compiler) {
SmartPointer<CompilationInfo> info(optimizing_compiler->info());
Handle<Code> Compiler::InstallOptimizedCode(RecompileJob* job) {
SmartPointer<CompilationInfo> info(job->info());
// The function may have already been optimized by OSR. Simply continue.
// Except when OSR already disabled optimization for some reason.
if (info->shared_info()->optimization_disabled()) {
@@ -1077,24 +1145,24 @@ Handle<Code> Compiler::InstallOptimizedCode(
isolate, Logger::TimerEventScope::v8_recompile_synchronous);
// If crankshaft succeeded, install the optimized code else install
// the unoptimized code.
OptimizingCompiler::Status status = optimizing_compiler->last_status();
RecompileJob::Status status = job->last_status();
if (info->HasAbortedDueToDependencyChange()) {
info->set_bailout_reason(kBailedOutDueToDependencyChange);
status = optimizing_compiler->AbortOptimization();
} else if (status != OptimizingCompiler::SUCCEEDED) {
status = job->AbortOptimization();
} else if (status != RecompileJob::SUCCEEDED) {
info->set_bailout_reason(kFailedBailedOutLastTime);
status = optimizing_compiler->AbortOptimization();
status = job->AbortOptimization();
} else if (isolate->DebuggerHasBreakPoints()) {
info->set_bailout_reason(kDebuggerIsActive);
status = optimizing_compiler->AbortOptimization();
status = job->AbortOptimization();
} else {
status = optimizing_compiler->GenerateAndInstallCode();
ASSERT(status == OptimizingCompiler::SUCCEEDED ||
status == OptimizingCompiler::BAILED_OUT);
status = job->GenerateAndInstallCode();
ASSERT(status == RecompileJob::SUCCEEDED ||
status == RecompileJob::BAILED_OUT);
}

InstallCodeCommon(*info);
if (status == OptimizingCompiler::SUCCEEDED) {
if (status == RecompileJob::SUCCEEDED) {
Handle<Code> code = info->code();
ASSERT(info->shared_info()->scope_info() != ScopeInfo::Empty(isolate));
info->closure()->ReplaceCode(*code);
@@ -1115,8 +1183,8 @@ Handle<Code> Compiler::InstallOptimizedCode(
// profiler ticks to prevent too soon re-opt after a deopt.
info->shared_info()->code()->set_profiler_ticks(0);
ASSERT(!info->closure()->IsInRecompileQueue());
return (status == OptimizingCompiler::SUCCEEDED) ? info->code()
: Handle<Code>::null();
return (status == RecompileJob::SUCCEEDED) ? info->code()
: Handle<Code>::null();
}


@@ -35,8 +35,6 @@
namespace v8 {
namespace internal {

static const int kPrologueOffsetNotSet = -1;

class ScriptDataImpl;
class HydrogenCodeStub;

@@ -86,6 +84,7 @@ class CompilationInfo {
ScriptDataImpl* pre_parse_data() const { return pre_parse_data_; }
Handle<Context> context() const { return context_; }
BailoutId osr_ast_id() const { return osr_ast_id_; }
uint32_t osr_pc_offset() const { return osr_pc_offset_; }
int opt_count() const { return opt_count_; }
int num_parameters() const;
int num_heap_slots() const;
@@ -268,12 +267,12 @@ class CompilationInfo {
void set_bailout_reason(BailoutReason reason) { bailout_reason_ = reason; }

int prologue_offset() const {
ASSERT_NE(kPrologueOffsetNotSet, prologue_offset_);
ASSERT_NE(Code::kPrologueOffsetNotSet, prologue_offset_);
return prologue_offset_;
}

void set_prologue_offset(int prologue_offset) {
ASSERT_EQ(kPrologueOffsetNotSet, prologue_offset_);
ASSERT_EQ(Code::kPrologueOffsetNotSet, prologue_offset_);
prologue_offset_ = prologue_offset;
}

@@ -505,14 +504,15 @@ class LChunk;
// fail, bail-out to the full code generator or succeed. Apart from
// their return value, the status of the phase last run can be checked
// using last_status().
class OptimizingCompiler: public ZoneObject {
class RecompileJob: public ZoneObject {
public:
explicit OptimizingCompiler(CompilationInfo* info)
explicit RecompileJob(CompilationInfo* info)
: info_(info),
graph_builder_(NULL),
graph_(NULL),
chunk_(NULL),
last_status_(FAILED) { }
last_status_(FAILED),
awaiting_install_(false) { }

enum Status {
FAILED, BAILED_OUT, SUCCEEDED
@@ -532,6 +532,13 @@ class OptimizingCompiler: public ZoneObject {
return SetLastStatus(BAILED_OUT);
}

void WaitForInstall() {
ASSERT(info_->is_osr());
awaiting_install_ = true;
}

bool IsWaitingForInstall() { return awaiting_install_; }

private:
CompilationInfo* info_;
HOptimizedGraphBuilder* graph_builder_;
@@ -541,6 +548,7 @@ class OptimizingCompiler: public ZoneObject {
TimeDelta time_taken_to_optimize_;
TimeDelta time_taken_to_codegen_;
Status last_status_;
bool awaiting_install_;

MUST_USE_RESULT Status SetLastStatus(Status status) {
last_status_ = status;
@@ -549,9 +557,8 @@ class OptimizingCompiler: public ZoneObject {
void RecordOptimizationStats();

struct Timer {
Timer(OptimizingCompiler* compiler, TimeDelta* location)
: compiler_(compiler),
location_(location) {
Timer(RecompileJob* job, TimeDelta* location)
: job_(job), location_(location) {
ASSERT(location_ != NULL);
timer_.Start();
}
@@ -560,7 +567,7 @@ class OptimizingCompiler: public ZoneObject {
*location_ += timer_.Elapsed();
}

OptimizingCompiler* compiler_;
RecompileJob* job_;
ElapsedTimer timer_;
TimeDelta* location_;
};
@@ -625,7 +632,7 @@ class Compiler : public AllStatic {
bool is_toplevel,
Handle<Script> script);

static Handle<Code> InstallOptimizedCode(OptimizingCompiler* info);
static Handle<Code> InstallOptimizedCode(RecompileJob* job);

#ifdef ENABLE_DEBUGGER_SUPPORT
static bool MakeCodeForLiveEdit(CompilationInfo* info);
@@ -259,7 +259,7 @@ Handle<Object> Context::Lookup(Handle<String> name,

void Context::AddOptimizedFunction(JSFunction* function) {
ASSERT(IsNativeContext());
#ifdef DEBUG
#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
Object* element = get(OPTIMIZED_FUNCTIONS_LIST);
while (!element->IsUndefined()) {
@@ -355,7 +355,7 @@ double InternalStringToInt(UnicodeCache* unicode_cache,
return JunkStringValue();
}

ASSERT(buffer_pos < kBufferSize);
SLOW_ASSERT(buffer_pos < kBufferSize);
buffer[buffer_pos] = '\0';
Vector<const char> buffer_vector(buffer, buffer_pos);
return negative ? -Strtod(buffer_vector, 0) : Strtod(buffer_vector, 0);
@@ -692,7 +692,7 @@ double InternalStringToDouble(UnicodeCache* unicode_cache,
exponent--;
}

ASSERT(buffer_pos < kBufferSize);
SLOW_ASSERT(buffer_pos < kBufferSize);
buffer[buffer_pos] = '\0';

double converted = Strtod(Vector<const char>(buffer, buffer_pos), exponent);
@@ -31,6 +31,7 @@

#include "conversions-inl.h"
#include "dtoa.h"
#include "list-inl.h"
#include "strtod.h"
#include "utils.h"

@@ -45,8 +46,11 @@ namespace internal {

double StringToDouble(UnicodeCache* unicode_cache,
const char* str, int flags, double empty_string_val) {
const char* end = str + StrLength(str);
return InternalStringToDouble(unicode_cache, str, end, flags,
// We cast to const uint8_t* here to avoid instantiating the
// InternalStringToDouble() template for const char* as well.
const uint8_t* start = reinterpret_cast<const uint8_t*>(str);
const uint8_t* end = start + StrLength(str);
return InternalStringToDouble(unicode_cache, start, end, flags,
empty_string_val);
}

@@ -55,11 +59,15 @@ double StringToDouble(UnicodeCache* unicode_cache,
Vector<const char> str,
int flags,
double empty_string_val) {
const char* end = str.start() + str.length();
return InternalStringToDouble(unicode_cache, str.start(), end, flags,
// We cast to const uint8_t* here to avoid instantiating the
// InternalStringToDouble() template for const char* as well.
const uint8_t* start = reinterpret_cast<const uint8_t*>(str.start());
const uint8_t* end = start + str.length();
return InternalStringToDouble(unicode_cache, start, end, flags,
empty_string_val);
}


double StringToDouble(UnicodeCache* unicode_cache,
Vector<const uc16> str,
int flags,
@@ -259,22 +259,51 @@ class HistogramTimer : public Histogram {
return Enabled() && timer_.IsStarted();
}

// TODO(bmeurer): Remove this when HistogramTimerScope is fixed.
#ifdef DEBUG
ElapsedTimer* timer() { return &timer_; }
#endif

private:
ElapsedTimer timer_;
};

// Helper class for scoping a HistogramTimer.
// TODO(bmeurer): The ifdeffery is an ugly hack around the fact that the
// Parser is currently reentrant (when it throws an error, we call back
// into JavaScript and all bets are off), but ElapsedTimer is not
// reentry-safe. Fix this properly and remove |allow_nesting|.
class HistogramTimerScope BASE_EMBEDDED {
public:
explicit HistogramTimerScope(HistogramTimer* timer) :
timer_(timer) {
explicit HistogramTimerScope(HistogramTimer* timer,
bool allow_nesting = false)
#ifdef DEBUG
: timer_(timer),
skipped_timer_start_(false) {
if (timer_->timer()->IsStarted() && allow_nesting) {
skipped_timer_start_ = true;
} else {
timer_->Start();
}
#else
: timer_(timer) {
timer_->Start();
#endif
}
~HistogramTimerScope() {
#ifdef DEBUG
if (!skipped_timer_start_) {
timer_->Stop();
}
#else
timer_->Stop();
#endif
}
private:
HistogramTimer* timer_;
#ifdef DEBUG
bool skipped_timer_start_;
#endif
};