@@ -516,6 +516,8 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,

if (FLAG_track_fields && representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_restore_name);
} else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_restore_name);
} else if (FLAG_track_double_fields && representation.IsDouble()) {
Label do_store, heap_number;
__ LoadRoot(scratch3, Heap::kHeapNumberMapRootIndex);
@@ -685,6 +687,8 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
ASSERT(!representation.IsNone());
if (FLAG_track_fields && representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
} else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_label);
} else if (FLAG_track_double_fields && representation.IsDouble()) {
// Load the double storage.
if (index < 0) {
@@ -848,7 +852,7 @@ static void CompileCallLoadPropertyWithInterceptor(
}


static const int kFastApiCallArguments = 4;
static const int kFastApiCallArguments = FunctionCallbackArguments::kArgsLength;

// Reserves space for the extra arguments to API function in the
// caller's frame.
@@ -877,10 +881,11 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
// -- sp[4] : callee JS function
// -- sp[8] : call data
// -- sp[12] : isolate
// -- sp[16] : last JS argument
// -- sp[16] : ReturnValue
// -- sp[20] : last JS argument
// -- ...
// -- sp[(argc + 3) * 4] : first JS argument
// -- sp[(argc + 4) * 4] : receiver
// -- sp[(argc + 4) * 4] : first JS argument
// -- sp[(argc + 5) * 4] : receiver
// -----------------------------------
// Get the function and setup the context.
Handle<JSFunction> function = optimization.constant_function();
@@ -897,11 +902,13 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
__ Move(r6, call_data);
}
__ mov(r7, Operand(ExternalReference::isolate_address(masm->isolate())));
// Store JS function, call data and isolate.
// Store JS function, call data, isolate and ReturnValue.
__ stm(ib, sp, r5.bit() | r6.bit() | r7.bit());
__ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
__ str(r5, MemOperand(sp, 4 * kPointerSize));

// Prepare arguments.
__ add(r2, sp, Operand(3 * kPointerSize));
__ add(r2, sp, Operand(4 * kPointerSize));

// Allocate the v8::Arguments structure in the arguments' space since
// it's not controlled by GC.
@@ -927,13 +934,21 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,

const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
Address function_address = v8::ToCData<Address>(api_call_info->callback());
bool returns_handle =
!CallbackTable::ReturnsVoid(masm->isolate(), function_address);
ApiFunction fun(function_address);
ExternalReference::Type type =
returns_handle ?
ExternalReference::DIRECT_API_CALL :
ExternalReference::DIRECT_API_CALL_NEW;
ExternalReference ref = ExternalReference(&fun,
ExternalReference::DIRECT_API_CALL,
type,
masm->isolate());
AllowExternalCallThatCantCauseGC scope(masm);

__ CallApiFunctionAndReturn(ref, kStackUnwindSpace);
__ CallApiFunctionAndReturn(ref,
kStackUnwindSpace,
returns_handle,
kFastApiCallArguments + 1);
}


@@ -1409,7 +1424,8 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
__ Push(reg, scratch3());
__ mov(scratch3(),
Operand(ExternalReference::isolate_address(isolate())));
__ Push(scratch3(), name());
__ LoadRoot(scratch4(), Heap::kUndefinedValueRootIndex);
__ Push(scratch3(), scratch4(), name());
__ mov(r0, sp); // r0 = Handle<Name>

const int kApiStackSpace = 1;
@@ -1421,12 +1437,21 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
__ str(scratch2(), MemOperand(sp, 1 * kPointerSize));
__ add(r1, sp, Operand(1 * kPointerSize)); // r1 = AccessorInfo&

const int kStackUnwindSpace = 5;
const int kStackUnwindSpace = kFastApiCallArguments + 1;
Address getter_address = v8::ToCData<Address>(callback->getter());
bool returns_handle =
!CallbackTable::ReturnsVoid(isolate(), getter_address);
ApiFunction fun(getter_address);
ExternalReference ref = ExternalReference(
&fun, ExternalReference::DIRECT_GETTER_CALL, isolate());
__ CallApiFunctionAndReturn(ref, kStackUnwindSpace);
ExternalReference::Type type =
returns_handle ?
ExternalReference::DIRECT_GETTER_CALL :
ExternalReference::DIRECT_GETTER_CALL_NEW;

ExternalReference ref = ExternalReference(&fun, type, isolate());
__ CallApiFunctionAndReturn(ref,
kStackUnwindSpace,
returns_handle,
3);
}


@@ -1676,8 +1701,6 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(

// Get the array's length into r0 and calculate new length.
__ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
__ add(r0, r0, Operand(Smi::FromInt(argc)));

// Get the elements' length.
@@ -1697,8 +1720,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// Store the value.
// We may need a register containing the address end_elements below,
// so write back the value in end_elements.
__ add(end_elements, elements,
Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
__ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(r0));
const int kEndElementsOffset =
FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
__ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
@@ -1718,8 +1740,6 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(

// Get the array's length into r0 and calculate new length.
__ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
__ add(r0, r0, Operand(Smi::FromInt(argc)));

// Get the elements' length.
@@ -1793,8 +1813,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// Store the value.
// We may need a register containing the address end_elements below,
// so write back the value in end_elements.
__ add(end_elements, elements,
Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
__ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(r0));
__ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));

__ RecordWrite(elements,
@@ -1831,8 +1850,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(

const int kAllocationDelta = 4;
// Load top and check if it is the end of elements.
__ add(end_elements, elements,
Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
__ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(r0));
__ add(end_elements, end_elements, Operand(kEndElementsOffset));
__ mov(r7, Operand(new_space_allocation_top));
__ ldr(r3, MemOperand(r7));
@@ -1928,11 +1946,9 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall(

// Get the last element.
__ LoadRoot(r6, Heap::kTheHoleValueRootIndex);
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
// We can't address the last element in one operation. Compute the more
// expensive shift first, and use an offset later on.
__ add(elements, elements, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize));
__ add(elements, elements, Operand::PointerOffsetFromSmiKey(r4));
__ ldr(r0, FieldMemOperand(elements, FixedArray::kHeaderSize));
__ cmp(r0, r6);
__ b(eq, &call_builtin);
@@ -2154,7 +2170,6 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
if (cell.is_null()) {
__ ldr(r1, MemOperand(sp, 1 * kPointerSize));

STATIC_ASSERT(kSmiTag == 0);
__ JumpIfSmi(r1, &miss);

CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
@@ -2172,7 +2187,6 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(

// Check the code is a smi.
Label slow;
STATIC_ASSERT(kSmiTag == 0);
__ JumpIfNotSmi(code, &slow);

// Convert the smi code to uint16.
@@ -2226,7 +2240,6 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(

if (cell.is_null()) {
__ ldr(r1, MemOperand(sp, 1 * kPointerSize));
STATIC_ASSERT(kSmiTag == 0);
__ JumpIfSmi(r1, &miss);
CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
name, &miss);
@@ -2241,8 +2254,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
__ ldr(r0, MemOperand(sp, 0 * kPointerSize));

// If the argument is a smi, just return.
STATIC_ASSERT(kSmiTag == 0);
__ tst(r0, Operand(kSmiTagMask));
__ SmiTst(r0);
__ Drop(argc + 1, eq);
__ Ret(eq);

@@ -2288,11 +2300,9 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
__ bind(&smi_check);
// Check if the result can fit into an smi. If we had an overflow,
// the result is either 0x80000000 or 0x7FFFFFFF and won't fit into an smi.
__ add(r1, r0, Operand(0x40000000), SetCC);
// If result doesn't fit into an smi, branch to slow.
__ b(&slow, mi);
// Tag the result.
__ mov(r0, Operand(r0, LSL, kSmiTagSize));
__ SmiTag(r0, SetCC);
__ b(vs, &slow);

__ bind(&just_return);
__ Drop(argc + 1);
@@ -2337,7 +2347,6 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
GenerateNameCheck(name, &miss);
if (cell.is_null()) {
__ ldr(r1, MemOperand(sp, 1 * kPointerSize));
STATIC_ASSERT(kSmiTag == 0);
__ JumpIfSmi(r1, &miss);
CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
name, &miss);
@@ -2353,7 +2362,6 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(

// Check if the argument is a smi.
Label not_smi;
STATIC_ASSERT(kSmiTag == 0);
__ JumpIfNotSmi(r0, &not_smi);

// Do bitwise not or do nothing depending on the sign of the
@@ -3233,8 +3241,7 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
Register key = r0;
Register receiver = r1;

__ JumpIfNotSmi(key, &miss_force_generic);
__ mov(r2, Operand(key, ASR, kSmiTagSize));
__ UntagAndJumpIfNotSmi(r2, key, &miss_force_generic);
__ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ LoadFromNumberDictionary(&slow, r4, key, r0, r2, r3, r5);
__ Ret();
@@ -3266,7 +3273,6 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
static void GenerateSmiKeyCheck(MacroAssembler* masm,
Register key,
Register scratch0,
Register scratch1,
DwVfpRegister double_scratch0,
DwVfpRegister double_scratch1,
Label* fail) {
@@ -3284,8 +3290,7 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm,
__ vldr(double_scratch0, ip, HeapNumber::kValueOffset);
__ TryDoubleToInt32Exact(scratch0, double_scratch0, double_scratch1);
__ b(ne, fail);
__ TrySmiTag(scratch0, fail, scratch1);
__ mov(key, scratch0);
__ TrySmiTag(key, scratch0, fail);
__ bind(&key_ok);
}

@@ -3311,7 +3316,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
// have been verified by the caller to not be a smi.

// Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, key, r4, r5, d1, d2, &miss_force_generic);
GenerateSmiKeyCheck(masm, key, r4, d1, d2, &miss_force_generic);

__ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));

@@ -3326,11 +3331,10 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
// r3: external array.
if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
// Double to pixel conversion is only implemented in the runtime for now.
__ JumpIfNotSmi(value, &slow);
__ UntagAndJumpIfNotSmi(r5, value, &slow);
} else {
__ JumpIfNotSmi(value, &check_heap_number);
__ UntagAndJumpIfNotSmi(r5, value, &check_heap_number);
}
__ SmiUntag(r5, value);
__ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));

// r3: base pointer of external storage.
@@ -3501,7 +3505,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
// have been verified by the caller to not be a smi.

// Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, d2, &miss_force_generic);
GenerateSmiKeyCheck(masm, key_reg, r4, d1, d2, &miss_force_generic);

if (IsFastSmiElementsKind(elements_kind)) {
__ JumpIfNotSmi(value_reg, &transition_elements_kind);
@@ -3535,20 +3539,14 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
__ add(scratch,
elements_reg,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
__ add(scratch,
scratch,
Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
__ add(scratch, scratch, Operand::PointerOffsetFromSmiKey(key_reg));
__ str(value_reg, MemOperand(scratch));
} else {
ASSERT(IsFastObjectElementsKind(elements_kind));
__ add(scratch,
elements_reg,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
__ add(scratch,
scratch,
Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
__ add(scratch, scratch, Operand::PointerOffsetFromSmiKey(key_reg));
__ str(value_reg, MemOperand(scratch));
__ mov(receiver_reg, value_reg);
__ RecordWrite(elements_reg, // Object.
@@ -3662,7 +3660,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// have been verified by the caller to not be a smi.

// Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, d2, &miss_force_generic);
GenerateSmiKeyCheck(masm, key_reg, r4, d1, d2, &miss_force_generic);

__ ldr(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
@@ -416,6 +416,26 @@ function ArrayPop() {
}


function ObservedArrayPush() {
var n = TO_UINT32(this.length);
var m = %_ArgumentsLength();

EnqueueSpliceRecord(this, n, [], 0, m);

try {
BeginPerformSplice(this);

for (var i = 0; i < m; i++) {
this[i+n] = %_Arguments(i);
}
this.length = n + m;
} finally {
EndPerformSplice(this);
}

return this.length;
}

// Appends the arguments to the end of the array and returns the new
// length of the array. See ECMA-262, section 15.4.4.7.
function ArrayPush() {
@@ -424,6 +444,9 @@ function ArrayPush() {
["Array.prototype.push"]);
}

if (%IsObserved(this))
return ObservedArrayPush.apply(this, arguments);

var n = TO_UINT32(this.length);
var m = %_ArgumentsLength();
for (var i = 0; i < m; i++) {
@@ -647,9 +647,17 @@ class ExternalReference BASE_EMBEDDED {
// Handle<Value> f(v8::Arguments&)
DIRECT_API_CALL,

// Direct call to API function callback.
// void f(v8::Arguments&)
DIRECT_API_CALL_NEW,

// Direct call to accessor getter callback.
// Handle<value> f(Local<String> property, AccessorInfo& info)
DIRECT_GETTER_CALL
DIRECT_GETTER_CALL,

// Direct call to accessor getter callback.
// void f(Local<String> property, AccessorInfo& info)
DIRECT_GETTER_CALL_NEW
};

static void SetUp();
@@ -278,14 +278,17 @@ class SmallMapList {
int length() const { return list_.length(); }

void AddMapIfMissing(Handle<Map> map, Zone* zone) {
map = Map::CurrentMapForDeprecated(map);
Map* updated = map->CurrentMapForDeprecated();
if (updated == NULL) return;
map = Handle<Map>(updated);
for (int i = 0; i < length(); ++i) {
if (at(i).is_identical_to(map)) return;
}
Add(map, zone);
}

void Add(Handle<Map> handle, Zone* zone) {
ASSERT(!handle->is_deprecated());
list_.Add(handle.location(), zone);
}

@@ -1992,6 +1995,18 @@ class Yield: public Expression {
Kind yield_kind() const { return yield_kind_; }
virtual int position() const { return pos_; }

// Delegating yield surrounds the "yield" in a "try/catch". This index
// locates the catch handler in the handler table, and is equivalent to
// TryCatchStatement::index().
int index() const {
ASSERT(yield_kind() == DELEGATING);
return index_;
}
void set_index(int index) {
ASSERT(yield_kind() == DELEGATING);
index_ = index;
}

protected:
Yield(Isolate* isolate,
Expression* generator_object,
@@ -2002,12 +2017,14 @@ class Yield: public Expression {
generator_object_(generator_object),
expression_(expression),
yield_kind_(yield_kind),
index_(-1),
pos_(pos) { }

private:
Expression* generator_object_;
Expression* expression_;
Kind yield_kind_;
int index_;
int pos_;
};

Large diffs are not rendered by default.

@@ -88,6 +88,8 @@ class SourceCodeCache BASE_EMBEDDED {
// context.
class Bootstrapper {
public:
static void InitializeOncePerProcess();

// Requires: Heap::SetUp has been called.
void Initialize(bool create_heap_objects);
void TearDown();
@@ -845,7 +845,7 @@ BUILTIN(ArraySlice) {
if (start < kMinInt || start > kMaxInt) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
relative_start = static_cast<int>(start);
relative_start = std::isnan(start) ? 0 : static_cast<int>(start);
} else if (!arg1->IsUndefined()) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
@@ -858,7 +858,7 @@ BUILTIN(ArraySlice) {
if (end < kMinInt || end > kMaxInt) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
relative_end = static_cast<int>(end);
relative_end = std::isnan(end) ? 0 : static_cast<int>(end);
} else if (!arg2->IsUndefined()) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
@@ -1317,23 +1317,21 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallHelper(
LOG(isolate, ApiObjectAccess("call", JSObject::cast(*args.receiver())));
ASSERT(raw_holder->IsJSObject());

CustomArguments custom(isolate);
v8::ImplementationUtilities::PrepareArgumentsData(custom.end(),
isolate, data_obj, *function, raw_holder);

v8::Arguments new_args = v8::ImplementationUtilities::NewArguments(
custom.end(),
&args[0] - 1,
args.length() - 1,
is_construct);
FunctionCallbackArguments custom(isolate,
data_obj,
*function,
raw_holder,
&args[0] - 1,
args.length() - 1,
is_construct);

v8::Handle<v8::Value> value;
{
// Leaving JavaScript.
VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate,
v8::ToCData<Address>(callback_obj));
value = callback(new_args);
value = custom.Call(callback);
}
if (value.IsEmpty()) {
result = heap->undefined_value();
@@ -1396,21 +1394,20 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallAsFunctionOrConstructor(
HandleScope scope(isolate);
LOG(isolate, ApiObjectAccess("call non-function", obj));

CustomArguments custom(isolate);
v8::ImplementationUtilities::PrepareArgumentsData(custom.end(),
isolate, call_data->data(), constructor, obj);
v8::Arguments new_args = v8::ImplementationUtilities::NewArguments(
custom.end(),
&args[0] - 1,
args.length() - 1,
is_construct_call);
FunctionCallbackArguments custom(isolate,
call_data->data(),
constructor,
obj,
&args[0] - 1,
args.length() - 1,
is_construct_call);
v8::Handle<v8::Value> value;
{
// Leaving JavaScript.
VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate,
v8::ToCData<Address>(callback_obj));
value = callback(new_args);
value = custom.Call(callback);
}
if (value.IsEmpty()) {
result = heap->undefined_value();
@@ -107,6 +107,8 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \
V(NotifyDeoptimized, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(NotifySoftDeoptimized, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(NotifyStubFailure, BUILTIN, UNINITIALIZED, \
@@ -380,6 +382,7 @@ class Builtins {
static void Generate_LazyCompile(MacroAssembler* masm);
static void Generate_LazyRecompile(MacroAssembler* masm);
static void Generate_NotifyDeoptimized(MacroAssembler* masm);
static void Generate_NotifySoftDeoptimized(MacroAssembler* masm);
static void Generate_NotifyLazyDeoptimized(MacroAssembler* masm);
static void Generate_NotifyOSR(MacroAssembler* masm);
static void Generate_NotifyStubFailure(MacroAssembler* masm);
@@ -53,7 +53,7 @@ extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) {
if (fatal_error_handler_nesting_depth < 3) {
if (i::FLAG_stack_trace_on_abort) {
// Call this one twice on double fault
i::Isolate::Current()->PrintStack();
i::Isolate::Current()->PrintStack(stderr);
}
}
i::OS::Abort();
@@ -418,7 +418,7 @@ HValue* CodeStubGraphBuilder<KeyedLoadFastElementStub>::BuildCodeStub() {
HInstruction* load = BuildUncheckedMonomorphicElementAccess(
GetParameter(0), GetParameter(1), NULL, NULL,
casted_stub()->is_js_array(), casted_stub()->elements_kind(),
false, STANDARD_STORE, Representation::Tagged());
false, NEVER_RETURN_HOLE, STANDARD_STORE, Representation::Tagged());
return load;
}

@@ -463,7 +463,8 @@ HValue* CodeStubGraphBuilder<KeyedStoreFastElementStub>::BuildCodeStub() {
BuildUncheckedMonomorphicElementAccess(
GetParameter(0), GetParameter(1), GetParameter(2), NULL,
casted_stub()->is_js_array(), casted_stub()->elements_kind(),
true, casted_stub()->store_mode(), Representation::Tagged());
true, NEVER_RETURN_HOLE, casted_stub()->store_mode(),
Representation::Tagged());

return GetParameter(2);
}
@@ -232,37 +232,37 @@ void BinaryOpStub::Generate(MacroAssembler* masm) {
void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
switch (op_) {
case Token::ADD:
__ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
__ InvokeBuiltin(Builtins::ADD, CALL_FUNCTION);
break;
case Token::SUB:
__ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
__ InvokeBuiltin(Builtins::SUB, CALL_FUNCTION);
break;
case Token::MUL:
__ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
__ InvokeBuiltin(Builtins::MUL, CALL_FUNCTION);
break;
case Token::DIV:
__ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
__ InvokeBuiltin(Builtins::DIV, CALL_FUNCTION);
break;
case Token::MOD:
__ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
__ InvokeBuiltin(Builtins::MOD, CALL_FUNCTION);
break;
case Token::BIT_OR:
__ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
__ InvokeBuiltin(Builtins::BIT_OR, CALL_FUNCTION);
break;
case Token::BIT_AND:
__ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
__ InvokeBuiltin(Builtins::BIT_AND, CALL_FUNCTION);
break;
case Token::BIT_XOR:
__ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
__ InvokeBuiltin(Builtins::BIT_XOR, CALL_FUNCTION);
break;
case Token::SAR:
__ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
__ InvokeBuiltin(Builtins::SAR, CALL_FUNCTION);
break;
case Token::SHR:
__ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
__ InvokeBuiltin(Builtins::SHR, CALL_FUNCTION);
break;
case Token::SHL:
__ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
__ InvokeBuiltin(Builtins::SHL, CALL_FUNCTION);
break;
default:
UNREACHABLE();
@@ -408,41 +408,50 @@ void ICCompareStub::Generate(MacroAssembler* masm) {
}


CompareNilICStub::Types CompareNilICStub::GetPatchedICFlags(
Code::ExtraICState extra_ic_state,
Handle<Object> object,
bool* already_monomorphic) {
Types types = TypesField::decode(extra_ic_state);
NilValue nil = NilValueField::decode(extra_ic_state);
EqualityKind kind = EqualityKindField::decode(extra_ic_state);
ASSERT(types != CompareNilICStub::kFullCompare);
*already_monomorphic =
(types & CompareNilICStub::kCompareAgainstMonomorphicMap) != 0;
if (kind == kStrictEquality) {
if (nil == kNullValue) {
return CompareNilICStub::kCompareAgainstNull;
} else {
return CompareNilICStub::kCompareAgainstUndefined;
}
void CompareNilICStub::Record(Handle<Object> object) {
ASSERT(types_ != Types::FullCompare());
if (equality_kind_ == kStrictEquality) {
// When testing for strict equality only one value will evaluate to true
types_.RemoveAll();
types_.Add((nil_value_ == kNullValue) ? NULL_TYPE:
UNDEFINED);
} else {
if (object->IsNull()) {
types = static_cast<CompareNilICStub::Types>(
types | CompareNilICStub::kCompareAgainstNull);
types_.Add(NULL_TYPE);
} else if (object->IsUndefined()) {
types = static_cast<CompareNilICStub::Types>(
types | CompareNilICStub::kCompareAgainstUndefined);
types_.Add(UNDEFINED);
} else if (object->IsUndetectableObject() ||
object->IsOddball() ||
!object->IsHeapObject()) {
types = CompareNilICStub::kFullCompare;
} else if ((types & CompareNilICStub::kCompareAgainstMonomorphicMap) != 0) {
types = CompareNilICStub::kFullCompare;
types_ = Types::FullCompare();
} else if (IsMonomorphic()) {
types_ = Types::FullCompare();
} else {
types = static_cast<CompareNilICStub::Types>(
types | CompareNilICStub::kCompareAgainstMonomorphicMap);
types_.Add(MONOMORPHIC_MAP);
}
}
return types;
}


void CompareNilICStub::PrintName(StringStream* stream) {
stream->Add("CompareNilICStub_");
types_.Print(stream);
stream->Add((nil_value_ == kNullValue) ? "(NullValue|":
"(UndefinedValue|");
stream->Add((equality_kind_ == kStrictEquality) ? "StrictEquality)":
"NonStrictEquality)");
}


void CompareNilICStub::Types::Print(StringStream* stream) const {
stream->Add("(");
SimpleListPrinter printer(stream);
if (IsEmpty()) printer.Add("None");
if (Contains(UNDEFINED)) printer.Add("Undefined");
if (Contains(NULL_TYPE)) printer.Add("Null");
if (Contains(MONOMORPHIC_MAP)) printer.Add("MonomorphicMap");
if (Contains(UNDETECTABLE)) printer.Add("Undetectable");
stream->Add(")");
}


@@ -552,15 +561,18 @@ void ToBooleanStub::PrintName(StringStream* stream) {


void ToBooleanStub::Types::Print(StringStream* stream) const {
if (IsEmpty()) stream->Add("None");
if (Contains(UNDEFINED)) stream->Add("Undefined");
if (Contains(BOOLEAN)) stream->Add("Bool");
if (Contains(NULL_TYPE)) stream->Add("Null");
if (Contains(SMI)) stream->Add("Smi");
if (Contains(SPEC_OBJECT)) stream->Add("SpecObject");
if (Contains(STRING)) stream->Add("String");
if (Contains(SYMBOL)) stream->Add("Symbol");
if (Contains(HEAP_NUMBER)) stream->Add("HeapNumber");
stream->Add("(");
SimpleListPrinter printer(stream);
if (IsEmpty()) printer.Add("None");
if (Contains(UNDEFINED)) printer.Add("Undefined");
if (Contains(BOOLEAN)) printer.Add("Bool");
if (Contains(NULL_TYPE)) printer.Add("Null");
if (Contains(SMI)) printer.Add("Smi");
if (Contains(SPEC_OBJECT)) printer.Add("SpecObject");
if (Contains(STRING)) printer.Add("String");
if (Contains(SYMBOL)) printer.Add("Symbol");
if (Contains(HEAP_NUMBER)) printer.Add("HeapNumber");
stream->Add(")");
}


@@ -1047,95 +1047,129 @@ class ICCompareStub: public PlatformCodeStub {

class CompareNilICStub : public HydrogenCodeStub {
public:
enum Types {
kCompareAgainstNull = 1 << 0,
kCompareAgainstUndefined = 1 << 1,
kCompareAgainstMonomorphicMap = 1 << 2,
kCompareAgainstUndetectable = 1 << 3,
kFullCompare = kCompareAgainstNull | kCompareAgainstUndefined |
kCompareAgainstUndetectable
enum Type {
UNDEFINED,
NULL_TYPE,
MONOMORPHIC_MAP,
UNDETECTABLE,
NUMBER_OF_TYPES
};

class Types : public EnumSet<Type, byte> {
public:
Types() : EnumSet<Type, byte>(0) { }
explicit Types(byte bits) : EnumSet<Type, byte>(bits) { }

static Types FullCompare() {
Types set;
set.Add(UNDEFINED);
set.Add(NULL_TYPE);
set.Add(UNDETECTABLE);
return set;
}

void Print(StringStream* stream) const;
};

// At most 6 different types can be distinguished, because the Code object
// only has room for a single byte to hold a set and there are two more
// boolean flags we need to store. :-P
STATIC_ASSERT(NUMBER_OF_TYPES <= 6);

CompareNilICStub(EqualityKind kind, NilValue nil, Types types)
: HydrogenCodeStub(CODE_STUB_IS_NOT_MISS), bit_field_(0) {
bit_field_ = EqualityKindField::encode(kind) |
NilValueField::encode(nil) |
TypesField::encode(types);
: HydrogenCodeStub(CODE_STUB_IS_NOT_MISS), types_(types) {
equality_kind_ = kind;
nil_value_ = nil;
}

virtual InlineCacheState GetICState() {
Types types = GetTypes();
if (types == kFullCompare) {
return MEGAMORPHIC;
} else if ((types & kCompareAgainstMonomorphicMap) != 0) {
return MONOMORPHIC;
} else {
return PREMONOMORPHIC;
}
explicit CompareNilICStub(Code::ExtraICState ic_state)
: HydrogenCodeStub(CODE_STUB_IS_NOT_MISS) {
equality_kind_ = EqualityKindField::decode(ic_state);
nil_value_ = NilValueField::decode(ic_state);
types_ = Types(ExtractTypesFromExtraICState(ic_state));
}

virtual Code::Kind GetCodeKind() const { return Code::COMPARE_NIL_IC; }

Handle<Code> GenerateCode();

static Handle<Code> GetUninitialized(Isolate* isolate,
EqualityKind kind,
NilValue nil) {
return CompareNilICStub(kind, nil).GetCode(isolate);
return CompareNilICStub(kind, nil, CODE_STUB_IS_MISS).GetCode(isolate);
}

virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor);

static void InitializeForIsolate(Isolate* isolate) {
CompareNilICStub compare_stub(kStrictEquality, kNullValue);
CompareNilICStub compare_stub(kStrictEquality, kNullValue,
CODE_STUB_IS_MISS);
compare_stub.InitializeInterfaceDescriptor(
isolate,
isolate->code_stub_interface_descriptor(CodeStub::CompareNilIC));
}

virtual Code::ExtraICState GetExtraICState() {
return bit_field_;
virtual InlineCacheState GetICState() {
if (types_ == Types::FullCompare()) {
return MEGAMORPHIC;
} else if (types_.Contains(MONOMORPHIC_MAP)) {
return MONOMORPHIC;
} else {
return PREMONOMORPHIC;
}
}

EqualityKind GetKind() { return EqualityKindField::decode(bit_field_); }
NilValue GetNilValue() { return NilValueField::decode(bit_field_); }
Types GetTypes() { return TypesField::decode(bit_field_); }
virtual Code::Kind GetCodeKind() const { return Code::COMPARE_NIL_IC; }

static Types TypesFromExtraICState(
Code::ExtraICState state) {
return TypesField::decode(state);
Handle<Code> GenerateCode();

// extra ic state = nil_value | equality_kind | type_n-1 | ... | type_0
virtual Code::ExtraICState GetExtraICState() {
return NilValueField::encode(nil_value_) |
EqualityKindField::encode(equality_kind_) |
types_.ToIntegral();
}
static EqualityKind EqualityKindFromExtraICState(
static byte ExtractTypesFromExtraICState(
Code::ExtraICState state) {
return EqualityKindField::decode(state);
}
static NilValue NilValueFromExtraICState(Code::ExtraICState state) {
return NilValueField::decode(state);
return state & ((1<<NUMBER_OF_TYPES)-1);
}

static Types GetPatchedICFlags(Code::ExtraICState extra_ic_state,
Handle<Object> object,
bool* already_monomorphic);
void Record(Handle<Object> object);

bool IsMonomorphic() const { return types_.Contains(MONOMORPHIC_MAP); }
EqualityKind GetKind() const { return equality_kind_; }
NilValue GetNilValue() const { return nil_value_; }
Types GetTypes() const { return types_; }
void ClearTypes() { types_.RemoveAll(); }
void SetKind(EqualityKind kind) { equality_kind_ = kind; }

virtual void PrintName(StringStream* stream);

private:
friend class CompareNilIC;

class EqualityKindField : public BitField<EqualityKind, 0, 1> {};
class NilValueField : public BitField<NilValue, 1, 1> {};
class TypesField : public BitField<Types, 3, 4> {};
CompareNilICStub(EqualityKind kind, NilValue nil,
InitializationState init_state)
: HydrogenCodeStub(init_state), types_(0) {
equality_kind_ = kind;
nil_value_ = nil;
}

CompareNilICStub(EqualityKind kind, NilValue nil)
: HydrogenCodeStub(CODE_STUB_IS_MISS), bit_field_(0) {
bit_field_ = EqualityKindField::encode(kind) |
NilValueField::encode(nil);
CompareNilICStub(Code::ExtraICState ic_state, InitializationState init_state)
: HydrogenCodeStub(init_state) {
equality_kind_ = EqualityKindField::decode(ic_state);
nil_value_ = NilValueField::decode(ic_state);
types_ = Types(ExtractTypesFromExtraICState(ic_state));
}

class EqualityKindField : public BitField<EqualityKind, NUMBER_OF_TYPES, 1> {
};
class NilValueField : public BitField<NilValue, NUMBER_OF_TYPES+1, 1> {};

virtual CodeStub::Major MajorKey() { return CompareNilIC; }
virtual int NotMissMinorKey() { return bit_field_; }
virtual int NotMissMinorKey() { return GetExtraICState(); }

int bit_field_;
EqualityKind equality_kind_;
NilValue nil_value_;
Types types_;

DISALLOW_COPY_AND_ASSIGN(CompareNilICStub);
};
@@ -1795,26 +1829,17 @@ class ToBooleanStub: public PlatformCodeStub {
// only has room for a single byte to hold a set of these types. :-P
STATIC_ASSERT(NUMBER_OF_TYPES <= 8);

class Types {
class Types : public EnumSet<Type, byte> {
public:
Types() {}
explicit Types(byte bits) : set_(bits) {}
explicit Types(byte bits) : EnumSet<Type, byte>(bits) {}

bool IsEmpty() const { return set_.IsEmpty(); }
bool Contains(Type type) const { return set_.Contains(type); }
bool ContainsAnyOf(Types types) const {
return set_.ContainsAnyOf(types.set_);
}
void Add(Type type) { set_.Add(type); }
byte ToByte() const { return set_.ToIntegral(); }
byte ToByte() const { return ToIntegral(); }
void Print(StringStream* stream) const;
void TraceTransition(Types to) const;
bool Record(Handle<Object> object);
bool NeedsMap() const;
bool CanBeUndetectable() const;

private:
EnumSet<Type, byte> set_;
};

static Types no_types() { return Types(); }
@@ -1831,7 +1856,8 @@ class ToBooleanStub: public PlatformCodeStub {

private:
Major MajorKey() { return ToBoolean; }
int MinorKey() { return (tos_.code() << NUMBER_OF_TYPES) | types_.ToByte(); }
int MinorKey() { return (tos_.code() << NUMBER_OF_TYPES) |
types_.ToByte(); }

virtual void FinishCode(Handle<Code> code) {
code->set_to_boolean_state(types_.ToByte());
@@ -103,6 +103,8 @@ void CompilationInfo::Initialize(Isolate* isolate, Mode mode, Zone* zone) {
code_stub_ = NULL;
prologue_offset_ = kPrologueOffsetNotSet;
opt_count_ = shared_info().is_null() ? 0 : shared_info()->opt_count();
no_frame_ranges_ = isolate->cpu_profiler()->is_profiling()
? new List<OffsetRange>(2) : NULL;
if (mode == STUB) {
mode_ = STUB;
return;
@@ -121,6 +123,7 @@ void CompilationInfo::Initialize(Isolate* isolate, Mode mode, Zone* zone) {

CompilationInfo::~CompilationInfo() {
delete deferred_handles_;
delete no_frame_ranges_;
}


@@ -216,9 +219,8 @@ void OptimizingCompiler::RecordOptimizationStats() {
double ms_optimize = static_cast<double>(time_taken_to_optimize_) / 1000;
double ms_codegen = static_cast<double>(time_taken_to_codegen_) / 1000;
if (FLAG_trace_opt) {
PrintF("[optimizing: ");
function->PrintName();
PrintF(" / %" V8PRIxPTR, reinterpret_cast<intptr_t>(*function));
PrintF("[optimizing ");
function->ShortPrint();
PrintF(" - took %0.3f, %0.3f, %0.3f ms]\n", ms_creategraph, ms_optimize,
ms_codegen);
}
@@ -315,15 +317,9 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
}

// Take --hydrogen-filter into account.
Handle<String> name = info()->function()->debug_name();
if (*FLAG_hydrogen_filter != '\0') {
Vector<const char> filter = CStrVector(FLAG_hydrogen_filter);
if ((filter[0] == '-'
&& name->IsUtf8EqualTo(filter.SubVector(1, filter.length())))
|| (filter[0] != '-' && !name->IsUtf8EqualTo(filter))) {
if (!info()->closure()->PassesHydrogenFilter()) {
info()->SetCode(code);
return SetLastStatus(BAILED_OUT);
}
}

// Recompile the unoptimized version of the code if the current version
@@ -360,6 +356,7 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
ASSERT(info()->shared_info()->has_deoptimization_support());

if (FLAG_trace_hydrogen) {
Handle<String> name = info()->function()->debug_name();
PrintF("-----------------------------------------------------------\n");
PrintF("Compiling method %s using hydrogen\n", *name->ToCString());
isolate()->GetHTracer()->TraceCompilation(info());
@@ -574,6 +571,7 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
: Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
*info->code(),
*result,
info,
String::cast(script->name())));
GDBJIT(AddCode(Handle<String>(String::cast(script->name())),
script,
@@ -586,6 +584,7 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
: Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
*info->code(),
*result,
info,
isolate->heap()->empty_string()));
GDBJIT(AddCode(Handle<String>(), script, info->code(), info));
}
@@ -813,6 +812,10 @@ static void InstallCodeCommon(CompilationInfo* info) {
// reset this bit when lazy compiling the code again.
if (shared->optimization_disabled()) code->set_optimizable(false);

if (shared->code() == *code) {
// Do not send compilation event for the same code twice.
return;
}
Compiler::RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info, shared);
}

@@ -843,9 +846,9 @@ static bool InstallCodeFromOptimizedCodeMap(CompilationInfo* info) {
int index = shared->SearchOptimizedCodeMap(*native_context);
if (index > 0) {
if (FLAG_trace_opt) {
PrintF("[found optimized code for: ");
function->PrintName();
PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(*function));
PrintF("[found optimized code for ");
function->ShortPrint();
PrintF("]\n");
}
// Caching of optimized code enabled and optimized code found.
shared->InstallFromOptimizedCodeMap(*function, index);
@@ -1157,13 +1160,15 @@ void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag,
CodeCreateEvent(Logger::ToNativeByScript(tag, *script),
*code,
*shared,
info,
String::cast(script->name()),
line_num));
} else {
PROFILE(info->isolate(),
CodeCreateEvent(Logger::ToNativeByScript(tag, *script),
*code,
*shared,
info,
shared->DebugName()));
}
}
@@ -47,6 +47,12 @@ enum ParseRestriction {
ONLY_SINGLE_FUNCTION_LITERAL // Only a single FunctionLiteral expression.
};

struct OffsetRange {
OffsetRange(int from, int to) : from(from), to(to) {}
int from;
int to;
};

// CompilationInfo encapsulates some information known at compile time. It
// is constructed based on the resources available at compile-time.
class CompilationInfo {
@@ -257,6 +263,20 @@ class CompilationInfo {
prologue_offset_ = prologue_offset;
}

// Adds offset range [from, to) where fp register does not point
// to the current frame base. Used in CPU profiler to detect stack
// samples where top frame is not set up.
inline void AddNoFrameRange(int from, int to) {
if (no_frame_ranges_) no_frame_ranges_->Add(OffsetRange(from, to));
}

List<OffsetRange>* ReleaseNoFrameRanges() {
List<OffsetRange>* result = no_frame_ranges_;
no_frame_ranges_ = NULL;
return result;
}


private:
Isolate* isolate_;

@@ -361,6 +381,8 @@ class CompilationInfo {

int prologue_offset_;

List<OffsetRange>* no_frame_ranges_;

// A copy of shared_info()->opt_count() to avoid handle deref
// during graph optimization.
int opt_count_;
@@ -112,6 +112,7 @@ enum BindingFlags {
V(JSON_OBJECT_INDEX, JSObject, json_object) \
V(REGEXP_FUNCTION_INDEX, JSFunction, regexp_function) \
V(INITIAL_OBJECT_PROTOTYPE_INDEX, JSObject, initial_object_prototype) \
V(INITIAL_ARRAY_PROTOTYPE_INDEX, JSObject, initial_array_prototype) \
V(CREATE_DATE_FUN_INDEX, JSFunction, create_date_fun) \
V(TO_NUMBER_FUN_INDEX, JSFunction, to_number_fun) \
V(TO_STRING_FUN_INDEX, JSFunction, to_string_fun) \
@@ -138,9 +139,6 @@ enum BindingFlags {
V(FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, function_without_prototype_map) \
V(STRICT_MODE_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, \
strict_mode_function_without_prototype_map) \
V(FUNCTION_INSTANCE_MAP_INDEX, Map, function_instance_map) \
V(STRICT_MODE_FUNCTION_INSTANCE_MAP_INDEX, Map, \
strict_mode_function_instance_map) \
V(REGEXP_RESULT_MAP_INDEX, Map, regexp_result_map)\
V(ARGUMENTS_BOILERPLATE_INDEX, JSObject, arguments_boilerplate) \
V(ALIASED_ARGUMENTS_BOILERPLATE_INDEX, JSObject, \
@@ -260,9 +258,8 @@ class Context: public FixedArray {
STRICT_MODE_FUNCTION_MAP_INDEX,
FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX,
STRICT_MODE_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX,
FUNCTION_INSTANCE_MAP_INDEX,
STRICT_MODE_FUNCTION_INSTANCE_MAP_INDEX,
INITIAL_OBJECT_PROTOTYPE_INDEX,
INITIAL_ARRAY_PROTOTYPE_INDEX,
BOOLEAN_FUNCTION_INDEX,
NUMBER_FUNCTION_INDEX,
STRING_FUNCTION_INDEX,
@@ -434,6 +431,10 @@ class Context: public FixedArray {
ASSERT(IsNativeContext()); \
set(index, value); \
} \
bool is_##name(type* value) { \
ASSERT(IsNativeContext()); \
return type::cast(get(index)) == value; \
} \
type* name() { \
ASSERT(IsNativeContext()); \
return type::cast(get(index)); \
@@ -29,6 +29,7 @@

#include "cpu-profiler-inl.h"

#include "compiler.h"
#include "frames-inl.h"
#include "hashmap.h"
#include "log-inl.h"
@@ -80,14 +81,18 @@ void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
int line_number,
Address start,
unsigned size,
Address shared) {
Address shared,
CompilationInfo* info) {
if (FilterOutCodeCreateEvent(tag)) return;
CodeEventsContainer evt_rec;
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
rec->type = CodeEventRecord::CODE_CREATION;
rec->order = ++enqueue_order_;
rec->start = start;
rec->entry = profiles_->NewCodeEntry(tag, name, resource_name, line_number);
if (info) {
rec->entry->set_no_frame_ranges(info->ReleaseNoFrameRanges());
}
rec->size = size;
rec->shared = shared;
events_buffer_.Enqueue(evt_rec);
@@ -323,13 +328,15 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
v8::CpuProfileNode::kNoLineNumberInfo,
code->address(),
code->ExecutableSize(),
NULL,
NULL);
}


void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code,
SharedFunctionInfo* shared,
CompilationInfo* info,
Name* name) {
processor_->CodeCreateEvent(
tag,
@@ -338,13 +345,15 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
v8::CpuProfileNode::kNoLineNumberInfo,
code->address(),
code->ExecutableSize(),
shared->address());
shared->address(),
info);
}


void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code,
SharedFunctionInfo* shared,
CompilationInfo* info,
String* source, int line) {
processor_->CodeCreateEvent(
tag,
@@ -353,7 +362,8 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
line,
code->address(),
code->ExecutableSize(),
shared->address());
shared->address(),
info);
}


@@ -40,6 +40,7 @@ namespace internal {
// Forward declarations.
class CodeEntry;
class CodeMap;
class CompilationInfo;
class CpuProfile;
class CpuProfilesCollection;
class ProfileGenerator;
@@ -142,7 +143,8 @@ class ProfilerEventsProcessor : public Thread {
Name* name,
String* resource_name, int line_number,
Address start, unsigned size,
Address shared);
Address shared,
CompilationInfo* info);
void CodeCreateEvent(Logger::LogEventsAndTags tag,
const char* name,
Address start, unsigned size);
@@ -227,11 +229,13 @@ class CpuProfiler {
Code* code, Name* name);
void CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code,
SharedFunctionInfo* shared,
Name* name);
SharedFunctionInfo* shared,
CompilationInfo* info,
Name* name);
void CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code,
SharedFunctionInfo* shared,
CompilationInfo* info,
String* source, int line);
void CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code, int args_count);

Large diffs are not rendered by default.

Large diffs are not rendered by default.

@@ -98,53 +98,34 @@ class OptimizedFunctionFilter BASE_EMBEDDED {
class Deoptimizer;


class DeoptimizerData {
public:
explicit DeoptimizerData(MemoryAllocator* allocator);
~DeoptimizerData();

#ifdef ENABLE_DEBUGGER_SUPPORT
void Iterate(ObjectVisitor* v);
#endif

Code* FindDeoptimizingCode(Address addr);
void RemoveDeoptimizingCode(Code* code);

private:
MemoryAllocator* allocator_;
int eager_deoptimization_entry_code_entries_;
int lazy_deoptimization_entry_code_entries_;
MemoryChunk* eager_deoptimization_entry_code_;
MemoryChunk* lazy_deoptimization_entry_code_;
Deoptimizer* current_;

#ifdef ENABLE_DEBUGGER_SUPPORT
DeoptimizedFrameInfo* deoptimized_frame_info_;
#endif

// List of deoptimized code which still have references from active stack
// frames. These code objects are needed by the deoptimizer when deoptimizing
// a frame for which the code object for the function function has been
// changed from the code present when deoptimizing was done.
DeoptimizingCodeListNode* deoptimizing_code_list_;

friend class Deoptimizer;

DISALLOW_COPY_AND_ASSIGN(DeoptimizerData);
};


class Deoptimizer : public Malloced {
public:
enum BailoutType {
EAGER,
LAZY,
SOFT,
OSR,
// This last bailout type is not really a bailout, but used by the
// debugger to deoptimize stack frames to allow inspection.
DEBUGGER
};

static const int kBailoutTypesWithCodeEntry = SOFT + 1;

struct JumpTableEntry {
inline JumpTableEntry(Address entry,
Deoptimizer::BailoutType type,
bool frame)
: label(),
address(entry),
bailout_type(type),
needs_frame(frame) { }
Label label;
Address address;
Deoptimizer::BailoutType bailout_type;
bool needs_frame;
};

static bool TraceEnabledFor(BailoutType deopt_type,
StackFrame::Type frame_type);
static const char* MessageFor(BailoutType type);
@@ -354,7 +335,6 @@ class Deoptimizer : public Malloced {
int fp_to_sp_delta,
Code* optimized_code);
Code* FindOptimizedCode(JSFunction* function, Code* optimized_code);
void Trace();
void PrintFunctionName();
void DeleteFrameDescriptions();

@@ -426,6 +406,10 @@ class Deoptimizer : public Malloced {
// from the input frame's double registers.
void CopyDoubleRegisters(FrameDescription* output_frame);

// Determines whether the input frame contains alignment padding by looking
// at the dynamic alignment state slot inside the frame.
bool HasAlignmentPadding(JSFunction* function);

Isolate* isolate_;
JSFunction* function_;
Code* compiled_code_;
@@ -626,6 +610,40 @@ class FrameDescription {
};


class DeoptimizerData {
public:
explicit DeoptimizerData(MemoryAllocator* allocator);
~DeoptimizerData();

#ifdef ENABLE_DEBUGGER_SUPPORT
void Iterate(ObjectVisitor* v);
#endif

Code* FindDeoptimizingCode(Address addr);
void RemoveDeoptimizingCode(Code* code);

private:
MemoryAllocator* allocator_;
int deopt_entry_code_entries_[Deoptimizer::kBailoutTypesWithCodeEntry];
MemoryChunk* deopt_entry_code_[Deoptimizer::kBailoutTypesWithCodeEntry];
Deoptimizer* current_;

#ifdef ENABLE_DEBUGGER_SUPPORT
DeoptimizedFrameInfo* deoptimized_frame_info_;
#endif

// List of deoptimized code which still have references from active stack
// frames. These code objects are needed by the deoptimizer when deoptimizing
// a frame for which the code object for the function function has been
// changed from the code present when deoptimizing was done.
DeoptimizingCodeListNode* deoptimizing_code_list_;

friend class Deoptimizer;

DISALLOW_COPY_AND_ASSIGN(DeoptimizerData);
};


class TranslationBuffer BASE_EMBEDDED {
public:
explicit TranslationBuffer(Zone* zone) : contents_(256, zone) { }
@@ -293,7 +293,14 @@ static int DecodeIt(Isolate* isolate,
addr,
Deoptimizer::LAZY);
if (id == Deoptimizer::kNotDeoptimizationEntry) {
out.AddFormatted(" ;; %s", RelocInfo::RelocModeName(rmode));
id = Deoptimizer::GetDeoptimizationId(isolate,
addr,
Deoptimizer::SOFT);
if (id == Deoptimizer::kNotDeoptimizationEntry) {
out.AddFormatted(" ;; %s", RelocInfo::RelocModeName(rmode));
} else {
out.AddFormatted(" ;; soft deoptimization bailout %d", id);
}
} else {
out.AddFormatted(" ;; lazy deoptimization bailout %d", id);
}
@@ -63,10 +63,10 @@ const char* const ExternalizeStringExtension::kSource =

v8::Handle<v8::FunctionTemplate> ExternalizeStringExtension::GetNativeFunction(
v8::Handle<v8::String> str) {
if (strcmp(*v8::String::AsciiValue(str), "externalizeString") == 0) {
if (strcmp(*v8::String::Utf8Value(str), "externalizeString") == 0) {
return v8::FunctionTemplate::New(ExternalizeStringExtension::Externalize);
} else {
ASSERT(strcmp(*v8::String::AsciiValue(str), "isAsciiString") == 0);
ASSERT(strcmp(*v8::String::Utf8Value(str), "isAsciiString") == 0);
return v8::FunctionTemplate::New(ExternalizeStringExtension::IsAscii);
}
}
@@ -36,7 +36,7 @@ const char* const StatisticsExtension::kSource =

v8::Handle<v8::FunctionTemplate> StatisticsExtension::GetNativeFunction(
v8::Handle<v8::String> str) {
ASSERT(strcmp(*v8::String::AsciiValue(str), "getV8Statistics") == 0);
ASSERT(strcmp(*v8::String::Utf8Value(str), "getV8Statistics") == 0);
return v8::FunctionTemplate::New(StatisticsExtension::GetCounters);
}

@@ -266,10 +266,12 @@ class Factory {
PretenureFlag pretenure = NOT_TENURED);
Handle<Object> NewNumberFromUint(uint32_t value,
PretenureFlag pretenure = NOT_TENURED);

inline Handle<Object> NewNumberFromSize(size_t value,
PretenureFlag pretenure = NOT_TENURED);
Handle<HeapNumber> NewHeapNumber(double value,
PretenureFlag pretenure = NOT_TENURED);


// These objects are used by the api to create env-independent data
// structures in the heap.
Handle<JSObject> NewNeanderObject();
@@ -539,6 +541,18 @@ class Factory {
};


Handle<Object> Factory::NewNumberFromSize(size_t value,
PretenureFlag pretenure) {
if (Smi::IsValid(static_cast<intptr_t>(value))) {
return Handle<Object>(Smi::FromIntptr(static_cast<intptr_t>(value)),
isolate());
} else {
return NewNumber(static_cast<double>(value), pretenure);
}
}



} } // namespace v8::internal

#endif // V8_FACTORY_H_
@@ -193,9 +193,11 @@ DEFINE_bool(clever_optimizations,
true,
"Optimize object size, Array shift, DOM strings and string +")
DEFINE_bool(pretenure_literals, true, "allocate literals in old space")
DEFINE_bool(track_fields, false, "track fields with only smi values")
DEFINE_bool(track_double_fields, false, "track fields with double values")
DEFINE_bool(track_fields, true, "track fields with only smi values")
DEFINE_bool(track_double_fields, true, "track fields with double values")
DEFINE_bool(track_heap_object_fields, true, "track fields with heap values")
DEFINE_implication(track_double_fields, track_fields)
DEFINE_implication(track_heap_object_fields, track_fields)

// Flags for data representation optimizations
DEFINE_bool(unbox_double_arrays, true, "automatically unbox arrays of doubles")
@@ -205,7 +207,6 @@ DEFINE_bool(string_slices, true, "use string slices")
DEFINE_bool(crankshaft, true, "use crankshaft")
DEFINE_string(hydrogen_filter, "", "optimization filter")
DEFINE_bool(use_range, true, "use hydrogen range analysis")
DEFINE_bool(eliminate_dead_phis, true, "eliminate dead phis")
DEFINE_bool(use_gvn, true, "use hydrogen global value numbering")
DEFINE_bool(use_canonicalizing, true, "use hydrogen instruction canonicalizing")
DEFINE_bool(use_inlining, true, "use function inlining")
@@ -267,6 +268,8 @@ DEFINE_bool(lookup_sample_by_shared, true,
"info, not JSFunction itself")
DEFINE_bool(cache_optimized_code, true,
"cache optimized code for closures")
DEFINE_bool(flush_optimized_code_cache, true,
"flushes the cache of optimized code for closures on every GC")
DEFINE_bool(inline_construct, true, "inline constructor calls")
DEFINE_bool(inline_arguments, true, "inline functions with arguments object")
DEFINE_bool(inline_accessors, true, "inline JavaScript accessors")
@@ -463,6 +466,7 @@ DEFINE_bool(flush_code, true,
"flush code that we expect not to use again (during full gc)")
DEFINE_bool(flush_code_incrementally, true,
"flush code that we expect not to use again (incrementally)")
DEFINE_bool(trace_code_flushing, false, "trace code flushing progress")
DEFINE_bool(age_code, true,
"track un-executed functions to age code and flush only "
"old code")
@@ -584,6 +584,10 @@ class JavaScriptFrame: public StandardFrame {
// Build a list with summaries for this frame including all inlined frames.
virtual void Summarize(List<FrameSummary>* frames);

// Architecture-specific register description.
static Register fp_register();
static Register context_register();

static JavaScriptFrame* cast(StackFrame* frame) {
ASSERT(frame->is_java_script());
return static_cast<JavaScriptFrame*>(frame);
@@ -545,19 +545,14 @@ int GetScriptLineNumberSafe(Handle<Script> script, int code_pos) {
}


void CustomArguments::IterateInstance(ObjectVisitor* v) {
v->VisitPointers(values_, values_ + ARRAY_SIZE(values_));
}


// Compute the property keys from the interceptor.
// TODO(rossberg): support symbols in API, and filter here if needed.
v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSReceiver> receiver,
Handle<JSObject> object) {
Isolate* isolate = receiver->GetIsolate();
Handle<InterceptorInfo> interceptor(object->GetNamedInterceptor());
CustomArguments args(isolate, interceptor->data(), *receiver, *object);
v8::AccessorInfo info(args.end());
PropertyCallbackArguments
args(isolate, interceptor->data(), *receiver, *object);
v8::Handle<v8::Array> result;
if (!interceptor->enumerator()->IsUndefined()) {
v8::NamedPropertyEnumerator enum_fun =
@@ -566,7 +561,7 @@ v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSReceiver> receiver,
{
// Leaving JavaScript.
VMState<EXTERNAL> state(isolate);
result = enum_fun(info);
result = args.Call(enum_fun);
}
}
#if ENABLE_EXTRA_CHECKS
@@ -581,8 +576,8 @@ v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSReceiver> receiver,
Handle<JSObject> object) {
Isolate* isolate = receiver->GetIsolate();
Handle<InterceptorInfo> interceptor(object->GetIndexedInterceptor());
CustomArguments args(isolate, interceptor->data(), *receiver, *object);
v8::AccessorInfo info(args.end());
PropertyCallbackArguments
args(isolate, interceptor->data(), *receiver, *object);
v8::Handle<v8::Array> result;
if (!interceptor->enumerator()->IsUndefined()) {
v8::IndexedPropertyEnumerator enum_fun =
@@ -591,7 +586,7 @@ v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSReceiver> receiver,
{
// Leaving JavaScript.
VMState<EXTERNAL> state(isolate);
result = enum_fun(info);
result = args.Call(enum_fun);
#if ENABLE_EXTRA_CHECKS
CHECK(result.IsEmpty() || v8::Utils::OpenHandle(*result)->IsJSObject());
#endif
@@ -120,7 +120,6 @@ Heap::Heap()
new_space_high_promotion_mode_active_(false),
old_gen_promotion_limit_(kMinimumPromotionLimit),
old_gen_allocation_limit_(kMinimumAllocationLimit),
old_gen_limit_factor_(1),
size_of_old_gen_at_last_old_space_gc_(0),
external_allocation_limit_(0),
amount_of_external_allocated_memory_(0),
@@ -912,26 +911,11 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
// Perform mark-sweep with optional compaction.
MarkCompact(tracer);
sweep_generation_++;
bool high_survival_rate_during_scavenges = IsHighSurvivalRate() &&
IsStableOrIncreasingSurvivalTrend();

UpdateSurvivalRateTrend(start_new_space_size);

size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects();

if (high_survival_rate_during_scavenges &&
IsStableOrIncreasingSurvivalTrend()) {
// Stable high survival rates of young objects both during partial and
// full collection indicate that mutator is either building or modifying
// a structure with a long lifetime.
// In this case we aggressively raise old generation memory limits to
// postpone subsequent mark-sweep collection and thus trade memory
// space for the mutation speed.
old_gen_limit_factor_ = 2;
} else {
old_gen_limit_factor_ = 1;
}

old_gen_promotion_limit_ =
OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_);
old_gen_allocation_limit_ =
@@ -2517,6 +2501,54 @@ bool Heap::CreateInitialMaps() {
}
set_external_double_array_map(Map::cast(obj));

{ MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalByteArray);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_empty_external_byte_array(ExternalArray::cast(obj));

{ MaybeObject* maybe_obj =
AllocateEmptyExternalArray(kExternalUnsignedByteArray);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_empty_external_unsigned_byte_array(ExternalArray::cast(obj));

{ MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalShortArray);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_empty_external_short_array(ExternalArray::cast(obj));

{ MaybeObject* maybe_obj = AllocateEmptyExternalArray(
kExternalUnsignedShortArray);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_empty_external_unsigned_short_array(ExternalArray::cast(obj));

{ MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalIntArray);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_empty_external_int_array(ExternalArray::cast(obj));

{ MaybeObject* maybe_obj =
AllocateEmptyExternalArray(kExternalUnsignedIntArray);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_empty_external_unsigned_int_array(ExternalArray::cast(obj));

{ MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalFloatArray);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_empty_external_float_array(ExternalArray::cast(obj));

{ MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalDoubleArray);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_empty_external_double_array(ExternalArray::cast(obj));

{ MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalPixelArray);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_empty_external_pixel_array(ExternalArray::cast(obj));

{ MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
if (!maybe_obj->ToObject(&obj)) return false;
}
@@ -3248,6 +3280,40 @@ Heap::RootListIndex Heap::RootIndexForExternalArrayType(
}
}

Heap::RootListIndex Heap::RootIndexForEmptyExternalArray(
ElementsKind elementsKind) {
switch (elementsKind) {
case EXTERNAL_BYTE_ELEMENTS:
return kEmptyExternalByteArrayRootIndex;
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
return kEmptyExternalUnsignedByteArrayRootIndex;
case EXTERNAL_SHORT_ELEMENTS:
return kEmptyExternalShortArrayRootIndex;
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
return kEmptyExternalUnsignedShortArrayRootIndex;
case EXTERNAL_INT_ELEMENTS:
return kEmptyExternalIntArrayRootIndex;
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
return kEmptyExternalUnsignedIntArrayRootIndex;
case EXTERNAL_FLOAT_ELEMENTS:
return kEmptyExternalFloatArrayRootIndex;
case EXTERNAL_DOUBLE_ELEMENTS:
return kEmptyExternalDoubleArrayRootIndex;
case EXTERNAL_PIXEL_ELEMENTS:
return kEmptyExternalPixelArrayRootIndex;
default:
UNREACHABLE();
return kUndefinedValueRootIndex;
}
}

ExternalArray* Heap::EmptyExternalArrayForMap(Map* map) {
return ExternalArray::cast(
roots_[RootIndexForEmptyExternalArray(map->elements_kind())]);
}




MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
// We need to distinguish the minus zero value and this cannot be
@@ -3291,7 +3357,7 @@ MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
share->set_name(name);
Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
share->set_code(illegal);
share->ClearOptimizedCodeMap();
share->set_optimized_code_map(Smi::FromInt(0));
share->set_scope_info(ScopeInfo::Empty(isolate_));
Code* construct_stub =
isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
@@ -3982,28 +4048,25 @@ MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
// Make sure to use globals from the function's context, since the function
// can be from a different context.
Context* native_context = function->context()->native_context();
bool needs_constructor_property;
Map* new_map;
if (function->shared()->is_generator()) {
// Generator prototypes can share maps since they don't have "constructor"
// properties.
new_map = native_context->generator_object_prototype_map();
needs_constructor_property = false;
} else {
// Each function prototype gets a fresh map to avoid unwanted sharing of
// maps between prototypes of different constructors.
JSFunction* object_function = native_context->object_function();
ASSERT(object_function->has_initial_map());
MaybeObject* maybe_map = object_function->initial_map()->Copy();
if (!maybe_map->To(&new_map)) return maybe_map;
needs_constructor_property = true;
}

Object* prototype;
MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map);
if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;

if (needs_constructor_property) {
if (!function->shared()->is_generator()) {
MaybeObject* maybe_failure =
JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
constructor_string(), function, DONT_ENUM);
@@ -4143,7 +4206,7 @@ MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
// the inline_new flag so we only change the map if we generate a
// specialized construct stub.
ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields);
if (instance_type == JS_OBJECT_TYPE &&
if (!fun->shared()->is_generator() &&
fun->shared()->CanGenerateInlineConstructor(prototype)) {
int count = fun->shared()->this_property_assignments_count();
if (count > in_object_properties) {
@@ -4179,7 +4242,7 @@ MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
}
}

if (instance_type == JS_OBJECT_TYPE) {
if (!fun->shared()->is_generator()) {
fun->shared()->StartInobjectSlackTracking(map);
}

@@ -4252,7 +4315,8 @@ MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
InitializeJSObjectFromMap(JSObject::cast(obj),
FixedArray::cast(properties),
map);
ASSERT(JSObject::cast(obj)->HasFastElements());
ASSERT(JSObject::cast(obj)->HasFastElements() ||
JSObject::cast(obj)->HasExternalArrayElements());
return obj;
}

@@ -5177,15 +5241,8 @@ MaybeObject* Heap::AllocateJSArray(
Context* native_context = isolate()->context()->native_context();
JSFunction* array_function = native_context->array_function();
Map* map = array_function->initial_map();
Object* maybe_map_array = native_context->js_array_maps();
if (!maybe_map_array->IsUndefined()) {
Object* maybe_transitioned_map =
FixedArray::cast(maybe_map_array)->get(elements_kind);
if (!maybe_transitioned_map->IsUndefined()) {
map = Map::cast(maybe_transitioned_map);
}
}

Map* transition_map = isolate()->get_initial_js_array_map(elements_kind);
if (transition_map != NULL) map = transition_map;
return AllocateJSObjectFromMap(map, pretenure);
}

@@ -5223,6 +5280,10 @@ MaybeObject* Heap::AllocateEmptyFixedArray() {
return result;
}

MaybeObject* Heap::AllocateEmptyExternalArray(ExternalArrayType array_type) {
return AllocateExternalArray(0, array_type, NULL, TENURED);
}


MaybeObject* Heap::AllocateRawFixedArray(int length) {
if (length < 0 || length > FixedArray::kMaxLength) {
@@ -5872,7 +5933,7 @@ bool Heap::IdleGlobalGC() {

void Heap::Print() {
if (!HasBeenSetUp()) return;
isolate()->PrintStack();
isolate()->PrintStack(stdout);
AllSpaces spaces(this);
for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
space->Print();
@@ -5902,7 +5963,6 @@ void Heap::ReportHeapStatistics(const char* title) {
old_gen_promotion_limit_);
PrintF("old_gen_allocation_limit_ %" V8_PTR_PREFIX "d\n",
old_gen_allocation_limit_);
PrintF("old_gen_limit_factor_ %d\n", old_gen_limit_factor_);

PrintF("\n");
PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
@@ -128,7 +128,6 @@ namespace internal {
V(Map, short_external_ascii_string_map, ShortExternalAsciiStringMap) \
V(Map, undetectable_string_map, UndetectableStringMap) \
V(Map, undetectable_ascii_string_map, UndetectableAsciiStringMap) \
V(Map, external_pixel_array_map, ExternalPixelArrayMap) \
V(Map, external_byte_array_map, ExternalByteArrayMap) \
V(Map, external_unsigned_byte_array_map, ExternalUnsignedByteArrayMap) \
V(Map, external_short_array_map, ExternalShortArrayMap) \
@@ -137,6 +136,21 @@ namespace internal {
V(Map, external_unsigned_int_array_map, ExternalUnsignedIntArrayMap) \
V(Map, external_float_array_map, ExternalFloatArrayMap) \
V(Map, external_double_array_map, ExternalDoubleArrayMap) \
V(Map, external_pixel_array_map, ExternalPixelArrayMap) \
V(ExternalArray, empty_external_byte_array, \
EmptyExternalByteArray) \
V(ExternalArray, empty_external_unsigned_byte_array, \
EmptyExternalUnsignedByteArray) \
V(ExternalArray, empty_external_short_array, EmptyExternalShortArray) \
V(ExternalArray, empty_external_unsigned_short_array, \
EmptyExternalUnsignedShortArray) \
V(ExternalArray, empty_external_int_array, EmptyExternalIntArray) \
V(ExternalArray, empty_external_unsigned_int_array, \
EmptyExternalUnsignedIntArray) \
V(ExternalArray, empty_external_float_array, EmptyExternalFloatArray) \
V(ExternalArray, empty_external_double_array, EmptyExternalDoubleArray) \
V(ExternalArray, empty_external_pixel_array, \
EmptyExternalPixelArray) \
V(Map, non_strict_arguments_elements_map, NonStrictArgumentsElementsMap) \
V(Map, function_context_map, FunctionContextMap) \
V(Map, catch_context_map, CatchContextMap) \
@@ -273,7 +287,11 @@ namespace internal {
V(minus_infinity_string, "-Infinity") \
V(hidden_stack_trace_string, "v8::hidden_stack_trace") \
V(query_colon_string, "(?:)") \
V(Generator_string, "Generator")
V(Generator_string, "Generator") \
V(send_string, "send") \
V(throw_string, "throw") \
V(done_string, "done") \
V(value_string, "value")

// Forward declarations.
class GCTracer;
@@ -1567,7 +1585,11 @@ class Heap {
intptr_t limit =
Max(old_gen_size + old_gen_size / divisor, kMinimumPromotionLimit);
limit += new_space_.Capacity();
limit *= old_gen_limit_factor_;
// TODO(hpayer): Can be removed when when pretenuring is supported for all
// allocation sites.
if (IsHighSurvivalRate() && IsStableOrIncreasingSurvivalTrend()) {
limit *= 2;
}
intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
return Min(limit, halfway_to_the_max);
}
@@ -1578,7 +1600,11 @@ class Heap {
intptr_t limit =
Max(old_gen_size + old_gen_size / divisor, kMinimumAllocationLimit);
limit += new_space_.Capacity();
limit *= old_gen_limit_factor_;
// TODO(hpayer): Can be removed when when pretenuring is supported for all
// allocation sites.
if (IsHighSurvivalRate() && IsStableOrIncreasingSurvivalTrend()) {
limit *= 2;
}
intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
return Min(limit, halfway_to_the_max);
}
@@ -1626,6 +1652,9 @@ class Heap {
RootListIndex RootIndexForExternalArrayType(
ExternalArrayType array_type);

RootListIndex RootIndexForEmptyExternalArray(ElementsKind kind);
ExternalArray* EmptyExternalArrayForMap(Map* map);

void RecordStats(HeapStats* stats, bool take_snapshot = false);

// Copy block of memory from src to dst. Size of block should be aligned
@@ -1998,10 +2027,6 @@ class Heap {
// every allocation in large object space.
intptr_t old_gen_allocation_limit_;

// Sometimes the heuristics dictate that those limits are increased. This
// variable records that fact.
int old_gen_limit_factor_;

// Used to adjust the limits that control the timing of the next GC.
intptr_t size_of_old_gen_at_last_old_space_gc_;

@@ -2140,6 +2165,10 @@ class Heap {
// Allocate empty fixed array.
MUST_USE_RESULT MaybeObject* AllocateEmptyFixedArray();

// Allocate empty external array of given type.
MUST_USE_RESULT MaybeObject* AllocateEmptyExternalArray(
ExternalArrayType array_type);

// Allocate empty fixed double array.
MUST_USE_RESULT MaybeObject* AllocateEmptyFixedDoubleArray();

@@ -426,6 +426,8 @@ bool Range::MulAndCheckOverflow(Range* other) {


const char* HType::ToString() {
// Note: The c1visualizer syntax for locals allows only a sequence of the
// following characters: A-Za-z0-9_-|:
switch (type_) {
case kTagged: return "tagged";
case kTaggedPrimitive: return "primitive";
@@ -440,7 +442,7 @@ const char* HType::ToString() {
case kUninitialized: return "uninitialized";
}
UNREACHABLE();
return "Unreachable code";
return "unreachable";
}


@@ -648,7 +650,6 @@ void HValue::SetOperandAt(int index, HValue* value) {
void HValue::DeleteAndReplaceWith(HValue* other) {
// We replace all uses first, so Delete can assert that there are none.
if (other != NULL) ReplaceAllUsesWith(other);
ASSERT(HasNoUses());
Kill();
DeleteFromGraph();
}
@@ -694,16 +695,18 @@ void HValue::SetBlock(HBasicBlock* block) {

void HValue::PrintTypeTo(StringStream* stream) {
if (!representation().IsTagged() || type().Equals(HType::Tagged())) return;
stream->Add(" type[%s]", type().ToString());
stream->Add(" type:%s", type().ToString());
}


void HValue::PrintRangeTo(StringStream* stream) {
if (range() == NULL || range()->IsMostGeneric()) return;
stream->Add(" range[%d,%d,m0=%d]",
// Note: The c1visualizer syntax for locals allows only a sequence of the
// following characters: A-Za-z0-9_-|:
stream->Add(" range:%d_%d%s",
range()->lower(),
range()->upper(),
static_cast<int>(range()->CanBeMinusZero()));
range()->CanBeMinusZero() ? "_m0" : "");
}


@@ -803,6 +806,14 @@ void HInstruction::PrintTo(StringStream* stream) {
}


void HInstruction::PrintDataTo(StringStream *stream) {
for (int i = 0; i < OperandCount(); ++i) {
if (i > 0) stream->Add(" ");
OperandAt(i)->PrintNameTo(stream);
}
}


void HInstruction::PrintMnemonicTo(StringStream* stream) {
stream->Add("%s ", Mnemonic());
}
@@ -1250,14 +1261,6 @@ void HUnaryControlInstruction::PrintDataTo(StringStream* stream) {
}


void HIsNilAndBranch::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add(kind() == kStrictEquality ? " === " : " == ");
stream->Add(nil() == kNullValue ? "null" : "undefined");
HControlInstruction::PrintDataTo(stream);
}


void HReturn::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add(" (pop ");
@@ -1441,6 +1444,16 @@ HValue* HMul::Canonicalize() {
}


HValue* HMod::Canonicalize() {
return this;
}


HValue* HDiv::Canonicalize() {
return this;
}


HValue* HChange::Canonicalize() {
return (from().Equals(to())) ? value() : this;
}
@@ -1678,6 +1691,7 @@ Range* HChange::InferRange(Zone* zone) {
!value()->CheckFlag(HInstruction::kUint32) &&
input_range != NULL && input_range->IsInSmiRange()) {
set_type(HType::Smi());
ClearGVNFlag(kChangesNewSpacePromotion);
}
Range* result = (input_range != NULL)
? input_range->Copy(zone)
@@ -1768,20 +1782,22 @@ Range* HMul::InferRange(Zone* zone) {

Range* HDiv::InferRange(Zone* zone) {
if (representation().IsInteger32()) {
Range* a = left()->range();
Range* b = right()->range();
Range* result = new(zone) Range();
if (left()->range()->CanBeMinusZero()) {
if (a->CanBeMinusZero()) {
result->set_can_be_minus_zero(true);
}

if (left()->range()->CanBeZero() && right()->range()->CanBeNegative()) {
if (a->CanBeZero() && b->CanBeNegative()) {
result->set_can_be_minus_zero(true);
}

if (right()->range()->Includes(-1) && left()->range()->Includes(kMinInt)) {
SetFlag(HValue::kCanOverflow);
if (!a->Includes(kMinInt) || !b->Includes(-1)) {
ClearFlag(HValue::kCanOverflow);
}

if (!right()->range()->CanBeZero()) {
if (!b->CanBeZero()) {
ClearFlag(HValue::kCanBeDivByZero);
}
return result;
@@ -1794,16 +1810,17 @@ Range* HDiv::InferRange(Zone* zone) {
Range* HMod::InferRange(Zone* zone) {
if (representation().IsInteger32()) {
Range* a = left()->range();
Range* b = right()->range();
Range* result = new(zone) Range();
if (a->CanBeMinusZero() || a->CanBeNegative()) {
result->set_can_be_minus_zero(true);
}

if (right()->range()->Includes(-1) && left()->range()->Includes(kMinInt)) {
SetFlag(HValue::kCanOverflow);
if (!a->Includes(kMinInt) || !b->Includes(-1)) {
ClearFlag(HValue::kCanOverflow);
}

if (!right()->range()->CanBeZero()) {
if (!b->CanBeZero()) {
ClearFlag(HValue::kCanBeDivByZero);
}
return result;
@@ -1893,14 +1910,15 @@ void HPhi::PrintTo(StringStream* stream) {
value->PrintNameTo(stream);
stream->Add(" ");
}
stream->Add(" uses%d_%di_%dd_%dt",
stream->Add(" uses:%d_%di_%dd_%dt",
UseCount(),
int32_non_phi_uses() + int32_indirect_uses(),
double_non_phi_uses() + double_indirect_uses(),
tagged_non_phi_uses() + tagged_indirect_uses());
stream->Add("%s%s]",
is_live() ? "_live" : "",
IsConvertibleToInteger() ? "" : "_ncti");
if (!IsConvertibleToInteger()) stream->Add("_ncti");
PrintRangeTo(stream);
PrintTypeTo(stream);
stream->Add("]");
}


@@ -2681,7 +2699,12 @@ bool HLoadKeyed::UsesMustHandleHole() const {
return false;
}

if (hole_mode() == ALLOW_RETURN_HOLE) return true;
if (hole_mode() == ALLOW_RETURN_HOLE) {
if (IsFastDoubleElementsKind(elements_kind())) {
return AllUsesCanTreatHoleAsNaN();
}
return true;
}

if (IsFastDoubleElementsKind(elements_kind())) {
return false;
@@ -2698,6 +2721,22 @@ bool HLoadKeyed::UsesMustHandleHole() const {
}


bool HLoadKeyed::AllUsesCanTreatHoleAsNaN() const {
if (!IsFastDoubleElementsKind(elements_kind())) {
return false;
}

for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
HValue* use = it.value();
if (use->CheckFlag(HValue::kDeoptimizeOnUndefined)) {
return false;
}
}

return true;
}


bool HLoadKeyed::RequiresHoleCheck() const {
if (IsFastPackedElementsKind(elements_kind())) {
return false;
@@ -3008,16 +3047,6 @@ void HAllocate::PrintDataTo(StringStream* stream) {
}


HType HArrayLiteral::CalculateInferredType() {
return HType::JSArray();
}


HType HObjectLiteral::CalculateInferredType() {
return HType::JSObject();
}


HType HRegExpLiteral::CalculateInferredType() {
return HType::JSObject();
}
@@ -3350,6 +3379,9 @@ HInstruction* HMod::New(
if (c_left->HasInteger32Value() && c_right->HasInteger32Value()) {
int32_t dividend = c_left->Integer32Value();
int32_t divisor = c_right->Integer32Value();
if (dividend == kMinInt && divisor == -1) {
return H_CONSTANT_DOUBLE(-0.0);
}
if (divisor != 0) {
int32_t res = dividend % divisor;
if ((res == 0) && (dividend < 0)) {
@@ -70,7 +70,6 @@ class LChunkBuilder;
V(ArgumentsElements) \
V(ArgumentsLength) \
V(ArgumentsObject) \
V(ArrayLiteral) \
V(Bitwise) \
V(BitNot) \
V(BlockEntry) \
@@ -104,6 +103,7 @@ class LChunkBuilder;
V(CompareConstantEqAndBranch) \
V(Constant) \
V(Context) \
V(DebugBreak) \
V(DeclareGlobals) \
V(DeleteProperty) \
V(Deoptimize) \
@@ -128,7 +128,6 @@ class LChunkBuilder;
V(InstanceSize) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
V(IsNilAndBranch) \
V(IsObjectAndBranch) \
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
@@ -150,7 +149,6 @@ class LChunkBuilder;
V(Mod) \
V(Mul) \
V(NumericConstraint) \
V(ObjectLiteral) \
V(OsrEntry) \
V(OuterContext) \
V(Parameter) \
@@ -796,6 +794,7 @@ class HValue: public ZoneObject {
kDeoptimizeOnUndefined,
kIsArguments,
kTruncatingToInt32,
// Set after an instruction is killed.
kIsDead,
// Instructions that are allowed to produce full range unsigned integer
// values are marked with kUint32 flag. If arithmetic shift or a load from
@@ -811,6 +810,8 @@ class HValue: public ZoneObject {
// has processed this instruction.
kIDefsProcessingDone,
kHasNoObservableSideEffects,
// Indicates the instruction is live during dead code elimination.
kIsLive,
kLastFlag = kIDefsProcessingDone
};

@@ -1073,8 +1074,9 @@ class HValue: public ZoneObject {
UNREACHABLE();
}

bool IsDead() const {
return HasNoUses() && !HasObservableSideEffects() && IsDeletable();
// Check if this instruction has some reason that prevents elimination.
bool CannotBeEliminated() const {
return HasObservableSideEffects() || !IsDeletable();
}

#ifdef DEBUG
@@ -1249,7 +1251,7 @@ class HInstruction: public HValue {
HInstruction* previous() const { return previous_; }

virtual void PrintTo(StringStream* stream);
virtual void PrintDataTo(StringStream* stream) { }
virtual void PrintDataTo(StringStream* stream);

bool IsLinked() const { return block() != NULL; }
void Unlink();
@@ -1462,6 +1464,17 @@ class HSoftDeoptimize: public HTemplateInstruction<0> {
};


// Inserts an int3/stop break instruction for debugging purposes.
class HDebugBreak: public HTemplateInstruction<0> {
public:
virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
}

DECLARE_CONCRETE_INSTRUCTION(DebugBreak)
};


class HDeoptimize: public HControlInstruction {
public:
HDeoptimize(int environment_length, Zone* zone)
@@ -2971,7 +2984,6 @@ class HPhi: public HValue {
: inputs_(2, zone),
merged_index_(merged_index),
phi_id_(-1),
is_live_(false),
is_convertible_to_integer_(true) {
for (int i = 0; i < Representation::kNumRepresentations; i++) {
non_phi_uses_[i] = 0;
@@ -2996,7 +3008,7 @@ class HPhi: public HValue {
void AddInput(HValue* value);
bool HasRealUses();

bool IsReceiver() { return merged_index_ == 0; }
bool IsReceiver() const { return merged_index_ == 0; }

int merged_index() const { return merged_index_; }

@@ -3031,8 +3043,6 @@ class HPhi: public HValue {
return indirect_uses_[Representation::kDouble];
}
int phi_id() { return phi_id_; }
bool is_live() { return is_live_; }
void set_is_live(bool b) { is_live_ = b; }

static HPhi* cast(HValue* value) {
ASSERT(value->IsPhi());
@@ -3064,6 +3074,9 @@ class HPhi: public HValue {

void SimplifyConstantInputs();

// TODO(titzer): we can't eliminate the receiver for generating backtraces
virtual bool IsDeletable() const { return !IsReceiver(); }

protected:
virtual void DeleteFromGraph();
virtual void InternalSetOperandAt(int index, HValue* value) {
@@ -3082,7 +3095,6 @@ class HPhi: public HValue {
int non_phi_uses_[Representation::kNumRepresentations];
int indirect_uses_[Representation::kNumRepresentations];
int phi_id_;
bool is_live_;
bool is_convertible_to_integer_;
};

@@ -3923,31 +3935,6 @@ class HCompareConstantEqAndBranch: public HUnaryControlInstruction {
};


class HIsNilAndBranch: public HUnaryControlInstruction {
public:
HIsNilAndBranch(HValue* value, EqualityKind kind, NilValue nil)
: HUnaryControlInstruction(value, NULL, NULL), kind_(kind), nil_(nil) { }

EqualityKind kind() const { return kind_; }
NilValue nil() const { return nil_; }

virtual void PrintDataTo(StringStream* stream);

virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
virtual Representation observed_input_representation(int index) {
return Representation::Tagged();
}

DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch)

private:
EqualityKind kind_;
NilValue nil_;
};


class HIsObjectAndBranch: public HUnaryControlInstruction {
public:
explicit HIsObjectAndBranch(HValue* value)
@@ -4416,6 +4403,8 @@ class HMod: public HArithmeticBinaryOperation {

virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);

virtual HValue* Canonicalize();

DECLARE_CONCRETE_INSTRUCTION(Mod)

protected:
@@ -4427,6 +4416,7 @@ class HMod: public HArithmeticBinaryOperation {
HMod(HValue* context, HValue* left, HValue* right)
: HArithmeticBinaryOperation(context, left, right) {
SetFlag(kCanBeDivByZero);
SetFlag(kCanOverflow);
}
};

@@ -4450,6 +4440,8 @@ class HDiv: public HArithmeticBinaryOperation {

virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);

virtual HValue* Canonicalize();

DECLARE_CONCRETE_INSTRUCTION(Div)

protected:
@@ -5220,6 +5212,10 @@ class HLoadNamedField: public HTemplateInstruction<2> {
set_representation(Representation::Tagged());
} else if (FLAG_track_double_fields && field_representation.IsDouble()) {
set_representation(field_representation);
} else if (FLAG_track_heap_object_fields &&
field_representation.IsHeapObject()) {
set_type(HType::NonPrimitive());
set_representation(Representation::Tagged());
} else {
set_representation(Representation::Tagged());
}
@@ -5415,7 +5411,7 @@ class HLoadKeyed
IsFastDoubleElementsKind(elements_kind));

if (IsFastSmiOrObjectElementsKind(elements_kind)) {
if (IsFastSmiElementsKind(elements_kind)) {
if (elements_kind == FAST_SMI_ELEMENTS) {
set_type(HType::Smi());
}

@@ -5490,6 +5486,7 @@ class HLoadKeyed
virtual void PrintDataTo(StringStream* stream);

bool UsesMustHandleHole() const;
bool AllUsesCanTreatHoleAsNaN() const;
bool RequiresHoleCheck() const;

virtual Range* InferRange(Zone* zone);
@@ -6081,106 +6078,6 @@ class HMaterializedLiteral: public HTemplateInstruction<V> {
};


class HArrayLiteral: public HMaterializedLiteral<1> {
public:
HArrayLiteral(HValue* context,
Handle<HeapObject> boilerplate_object,
Handle<FixedArray> literals,
int length,
int literal_index,
int depth,
AllocationSiteMode mode)
: HMaterializedLiteral<1>(literal_index, depth, mode),
length_(length),
boilerplate_object_(boilerplate_object),
literals_(literals) {
SetOperandAt(0, context);
SetGVNFlag(kChangesNewSpacePromotion);

boilerplate_elements_kind_ = boilerplate_object_->IsJSObject()
? Handle<JSObject>::cast(boilerplate_object_)->GetElementsKind()
: TERMINAL_FAST_ELEMENTS_KIND;

is_copy_on_write_ = boilerplate_object_->IsJSObject() &&
(Handle<JSObject>::cast(boilerplate_object_)->elements()->map() ==
HEAP->fixed_cow_array_map());
}

HValue* context() { return OperandAt(0); }
ElementsKind boilerplate_elements_kind() const {
return boilerplate_elements_kind_;
}
Handle<HeapObject> boilerplate_object() const { return boilerplate_object_; }
Handle<FixedArray> literals() const { return literals_; }
int length() const { return length_; }
bool IsCopyOnWrite() const { return is_copy_on_write_; }

virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
virtual HType CalculateInferredType();

DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral)

private:
int length_;
Handle<HeapObject> boilerplate_object_;
Handle<FixedArray> literals_;
ElementsKind boilerplate_elements_kind_;
bool is_copy_on_write_;
};


class HObjectLiteral: public HMaterializedLiteral<1> {
public:
HObjectLiteral(HValue* context,
Handle<FixedArray> constant_properties,
Handle<FixedArray> literals,
bool fast_elements,
int literal_index,
int depth,
bool may_store_doubles,
bool has_function)
: HMaterializedLiteral<1>(literal_index, depth),
constant_properties_(constant_properties),
constant_properties_length_(constant_properties->length()),
literals_(literals),
fast_elements_(fast_elements),
may_store_doubles_(may_store_doubles),
has_function_(has_function) {
SetOperandAt(0, context);
SetGVNFlag(kChangesNewSpacePromotion);
}

HValue* context() { return OperandAt(0); }
Handle<FixedArray> constant_properties() const {
return constant_properties_;
}
int constant_properties_length() const {
return constant_properties_length_;
}
Handle<FixedArray> literals() const { return literals_; }
bool fast_elements() const { return fast_elements_; }
bool may_store_doubles() const { return may_store_doubles_; }
bool has_function() const { return has_function_; }

virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
virtual HType CalculateInferredType();

DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral)

private:
Handle<FixedArray> constant_properties_;
int constant_properties_length_;
Handle<FixedArray> literals_;
bool fast_elements_ : 1;
bool may_store_doubles_ : 1;
bool has_function_ : 1;
};


class HRegExpLiteral: public HMaterializedLiteral<1> {
public:
HRegExpLiteral(HValue* context,
@@ -6301,8 +6198,13 @@ class HToFastProperties: public HUnaryOperation {
explicit HToFastProperties(HValue* value) : HUnaryOperation(value) {
// This instruction is not marked as having side effects, but
// changes the map of the input operand. Use it only when creating
// object literals.
ASSERT(value->IsObjectLiteral());
// object literals via a runtime call.
ASSERT(value->IsCallRuntime());
#ifdef DEBUG
const Runtime::Function* function = HCallRuntime::cast(value)->function();
ASSERT(function->function_id == Runtime::kCreateObjectLiteral ||
function->function_id == Runtime::kCreateObjectLiteralShallow);
#endif
set_representation(Representation::Tagged());
}