@@ -1041,6 +1041,7 @@ class MessageDispatchHelperThread: public Thread {
private:
void Run();

Isolate* isolate_;
Semaphore* const sem_;
Mutex* const mutex_;
bool already_signalled_;
@@ -1280,29 +1280,37 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
}

intptr_t caller_arg_count = 0;
if (descriptor->stack_parameter_count_ != NULL) {
caller_arg_count =
input_->GetRegister(descriptor->stack_parameter_count_->code());
}
bool arg_count_known = descriptor->stack_parameter_count_ == NULL;

// Build the Arguments object for the caller's parameters and a pointer to it.
output_frame_offset -= kPointerSize;
value = frame_ptr + StandardFrameConstants::kCallerSPOffset +
(caller_arg_count - 1) * kPointerSize;
output_frame->SetFrameSlot(output_frame_offset, value);
int args_arguments_offset = output_frame_offset;
intptr_t the_hole = reinterpret_cast<intptr_t>(
isolate_->heap()->the_hole_value());
if (arg_count_known) {
value = frame_ptr + StandardFrameConstants::kCallerSPOffset +
(caller_arg_count - 1) * kPointerSize;
} else {
value = the_hole;
}

output_frame->SetFrameSlot(args_arguments_offset, value);
if (trace_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; args.arguments\n",
top_address + output_frame_offset, output_frame_offset, value);
V8PRIxPTR " ; args.arguments %s\n",
top_address + args_arguments_offset, args_arguments_offset, value,
arg_count_known ? "" : "(the hole)");
}

output_frame_offset -= kPointerSize;
value = caller_arg_count;
output_frame->SetFrameSlot(output_frame_offset, value);
int length_frame_offset = output_frame_offset;
value = arg_count_known ? caller_arg_count : the_hole;
output_frame->SetFrameSlot(length_frame_offset, value);
if (trace_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; args.length\n",
top_address + output_frame_offset, output_frame_offset, value);
V8PRIxPTR " ; args.length %s\n",
top_address + length_frame_offset, length_frame_offset, value,
arg_count_known ? "" : "(the hole)");
}

output_frame_offset -= kPointerSize;
@@ -1321,6 +1329,20 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
DoTranslateCommand(iterator, 0, output_frame_offset);
}

if (!arg_count_known) {
DoTranslateCommand(iterator, 0, length_frame_offset,
TRANSLATED_VALUE_IS_NATIVE);
caller_arg_count = output_frame->GetFrameSlot(length_frame_offset);
value = frame_ptr + StandardFrameConstants::kCallerSPOffset +
(caller_arg_count - 1) * kPointerSize;
output_frame->SetFrameSlot(args_arguments_offset, value);
if (trace_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; args.arguments\n",
top_address + args_arguments_offset, args_arguments_offset, value);
}
}

ASSERT(0 == output_frame_offset);

// Copy the double registers from the input into the output frame.
@@ -1331,8 +1353,9 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,

// Compute this frame's PC, state, and continuation.
Code* trampoline = NULL;
int extra = descriptor->extra_expression_stack_count_;
StubFailureTrampolineStub(extra).FindCodeInCache(&trampoline, isolate_);
StubFunctionMode function_mode = descriptor->function_mode_;
StubFailureTrampolineStub(function_mode).FindCodeInCache(&trampoline,
isolate_);
ASSERT(trampoline != NULL);
output_frame->SetPc(reinterpret_cast<intptr_t>(
trampoline->instruction_start()));
@@ -1476,12 +1499,25 @@ void Deoptimizer::MaterializeHeapNumbersForDebuggerInspectableFrame(
#endif


static const char* TraceValueType(bool is_smi, bool is_native) {
if (is_native) {
return "native";
} else if (is_smi) {
return "smi";
}

return "heap number";
}


void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
int frame_index,
unsigned output_offset) {
int frame_index,
unsigned output_offset,
DeoptimizerTranslatedValueType value_type) {
disasm::NameConverter converter;
// A GC-safe temporary placeholder that we can put in the output frame.
const intptr_t kPlaceholder = reinterpret_cast<intptr_t>(Smi::FromInt(0));
bool is_native = value_type == TRANSLATED_VALUE_IS_NATIVE;

// Ignore commands marked as duplicate and act on the first non-duplicate.
Translation::Opcode opcode =
@@ -1524,23 +1560,28 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
case Translation::INT32_REGISTER: {
int input_reg = iterator->Next();
intptr_t value = input_->GetRegister(input_reg);
bool is_smi = Smi::IsValid(value);
bool is_smi = (value_type == TRANSLATED_VALUE_IS_TAGGED) &&
Smi::IsValid(value);

if (trace_) {
PrintF(
" 0x%08" V8PRIxPTR ": [top + %d] <- %" V8PRIdPTR " ; %s (%s)\n",
output_[frame_index]->GetTop() + output_offset,
output_offset,
value,
converter.NameOfCPURegister(input_reg),
is_smi ? "smi" : "heap number");
TraceValueType(is_smi, is_native));
}
if (is_smi) {
intptr_t tagged_value =
reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
} else if (value_type == TRANSLATED_VALUE_IS_NATIVE) {
output_[frame_index]->SetFrameSlot(output_offset, value);
} else {
// We save the untagged value on the side and store a GC-safe
// temporary placeholder in the frame.
ASSERT(value_type == TRANSLATED_VALUE_IS_TAGGED);
AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
static_cast<double>(static_cast<int32_t>(value)));
output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
@@ -1551,7 +1592,8 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
case Translation::UINT32_REGISTER: {
int input_reg = iterator->Next();
uintptr_t value = static_cast<uintptr_t>(input_->GetRegister(input_reg));
bool is_smi = (value <= static_cast<uintptr_t>(Smi::kMaxValue));
bool is_smi = (value_type == TRANSLATED_VALUE_IS_TAGGED) &&
(value <= static_cast<uintptr_t>(Smi::kMaxValue));
if (trace_) {
PrintF(
" 0x%08" V8PRIxPTR ": [top + %d] <- %" V8PRIuPTR
@@ -1560,15 +1602,18 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
output_offset,
value,
converter.NameOfCPURegister(input_reg),
is_smi ? "smi" : "heap number");
TraceValueType(is_smi, is_native));
}
if (is_smi) {
intptr_t tagged_value =
reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
} else if (value_type == TRANSLATED_VALUE_IS_NATIVE) {
output_[frame_index]->SetFrameSlot(output_offset, value);
} else {
// We save the untagged value on the side and store a GC-safe
// temporary placeholder in the frame.
ASSERT(value_type == TRANSLATED_VALUE_IS_TAGGED);
AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
static_cast<double>(static_cast<uint32_t>(value)));
output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
@@ -1617,23 +1662,27 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
unsigned input_offset =
input_->GetOffsetFromSlotIndex(input_slot_index);
intptr_t value = input_->GetFrameSlot(input_offset);
bool is_smi = Smi::IsValid(value);
bool is_smi = (value_type == TRANSLATED_VALUE_IS_TAGGED) &&
Smi::IsValid(value);
if (trace_) {
PrintF(" 0x%08" V8PRIxPTR ": ",
output_[frame_index]->GetTop() + output_offset);
PrintF("[top + %d] <- %" V8PRIdPTR " ; [sp + %d] (%s)\n",
output_offset,
value,
input_offset,
is_smi ? "smi" : "heap number");
TraceValueType(is_smi, is_native));
}
if (is_smi) {
intptr_t tagged_value =
reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
} else if (value_type == TRANSLATED_VALUE_IS_NATIVE) {
output_[frame_index]->SetFrameSlot(output_offset, value);
} else {
// We save the untagged value on the side and store a GC-safe
// temporary placeholder in the frame.
ASSERT(value_type == TRANSLATED_VALUE_IS_TAGGED);
AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
static_cast<double>(static_cast<int32_t>(value)));
output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
@@ -1647,23 +1696,27 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
input_->GetOffsetFromSlotIndex(input_slot_index);
uintptr_t value =
static_cast<uintptr_t>(input_->GetFrameSlot(input_offset));
bool is_smi = (value <= static_cast<uintptr_t>(Smi::kMaxValue));
bool is_smi = (value_type == TRANSLATED_VALUE_IS_TAGGED) &&
(value <= static_cast<uintptr_t>(Smi::kMaxValue));
if (trace_) {
PrintF(" 0x%08" V8PRIxPTR ": ",
output_[frame_index]->GetTop() + output_offset);
PrintF("[top + %d] <- %" V8PRIuPTR " ; [sp + %d] (uint32 %s)\n",
output_offset,
value,
input_offset,
is_smi ? "smi" : "heap number");
TraceValueType(is_smi, is_native));
}
if (is_smi) {
intptr_t tagged_value =
reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
} else if (value_type == TRANSLATED_VALUE_IS_NATIVE) {
output_[frame_index]->SetFrameSlot(output_offset, value);
} else {
// We save the untagged value on the side and store a GC-safe
// temporary placeholder in the frame.
ASSERT(value_type == TRANSLATED_VALUE_IS_TAGGED);
AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
static_cast<double>(static_cast<uint32_t>(value)));
output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
@@ -2130,7 +2183,8 @@ void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate,
ASSERT(static_cast<int>(Deoptimizer::GetMaxDeoptTableSize()) >=
desc.instr_size);
chunk->CommitArea(desc.instr_size);
memcpy(chunk->area_start(), desc.buffer, desc.instr_size);
CopyBytes(chunk->area_start(), desc.buffer,
static_cast<size_t>(desc.instr_size));
CPU::FlushICache(chunk->area_start(), desc.instr_size);

if (type == EAGER) {
@@ -356,9 +356,17 @@ class Deoptimizer : public Malloced {
bool is_setter_stub_frame);
void DoComputeCompiledStubFrame(TranslationIterator* iterator,
int frame_index);

enum DeoptimizerTranslatedValueType {
TRANSLATED_VALUE_IS_NATIVE,
TRANSLATED_VALUE_IS_TAGGED
};

void DoTranslateCommand(TranslationIterator* iterator,
int frame_index,
unsigned output_offset);
int frame_index,
unsigned output_offset,
DeoptimizerTranslatedValueType value_type = TRANSLATED_VALUE_IS_TAGGED);

// Translate a command for OSR. Updates the input offset to be used for
// the next command. Returns false if translation of the command failed
// (e.g., a number conversion failed) and may or may not have updated the
@@ -110,10 +110,15 @@ inline bool IsFastDoubleElementsKind(ElementsKind kind) {
}


inline bool IsExternalFloatOrDoubleElementsKind(ElementsKind kind) {
return kind == EXTERNAL_DOUBLE_ELEMENTS ||
kind == EXTERNAL_FLOAT_ELEMENTS;
}


inline bool IsDoubleOrFloatElementsKind(ElementsKind kind) {
return IsFastDoubleElementsKind(kind) ||
kind == EXTERNAL_DOUBLE_ELEMENTS ||
kind == EXTERNAL_FLOAT_ELEMENTS;
IsExternalFloatOrDoubleElementsKind(kind);
}


@@ -148,21 +148,28 @@ DEFINE_bool(harmony_collections, false,
"enable harmony collections (sets, maps, and weak maps)")
DEFINE_bool(harmony_observation, false,
"enable harmony object observation (implies harmony collections")
DEFINE_bool(harmony_typed_arrays, false,
"enable harmony typed arrays")
DEFINE_bool(harmony_generators, false, "enable harmony generators")
DEFINE_bool(harmony, false, "enable all harmony features (except typeof)")
DEFINE_implication(harmony, harmony_scoping)
DEFINE_implication(harmony, harmony_modules)
DEFINE_implication(harmony, harmony_symbols)
DEFINE_implication(harmony, harmony_proxies)
DEFINE_implication(harmony, harmony_collections)
DEFINE_implication(harmony, harmony_observation)
DEFINE_implication(harmony, harmony_generators)
DEFINE_implication(harmony_modules, harmony_scoping)
DEFINE_implication(harmony_observation, harmony_collections)
DEFINE_implication(harmony, harmony_typed_arrays)

// Flags for experimental implementation features.
DEFINE_bool(packed_arrays, true, "optimizes arrays that have no holes")
DEFINE_bool(smi_only_arrays, true, "tracks arrays with only smi values")
DEFINE_bool(compiled_transitions, false, "use optimizing compiler to "
"generate array elements transition stubs")
DEFINE_bool(compiled_keyed_stores, false, "use optimizing compiler to "
"generate keyed store stubs")
DEFINE_bool(clever_optimizations,
true,
"Optimize object size, Array shift, DOM strings and string +")
@@ -424,7 +431,7 @@ DEFINE_bool(trace_external_memory, false,
"it is adjusted.")
DEFINE_bool(collect_maps, true,
"garbage collect maps from which no objects can be reached")
DEFINE_bool(weak_embedded_maps_in_optimized_code, true,
DEFINE_bool(weak_embedded_maps_in_optimized_code, false,
"make maps embedded in optimized code weak")
DEFINE_bool(flush_code, true,
"flush code that we expect not to use again (during full gc)")
@@ -1311,18 +1311,19 @@ Address StubFailureTrampolineFrame::GetCallerStackPointer() const {


Code* StubFailureTrampolineFrame::unchecked_code() const {
int i = 0;
for (; i <= StubFailureTrampolineStub::kMaxExtraExpressionStackCount; ++i) {
Code* trampoline;
StubFailureTrampolineStub(i).FindCodeInCache(&trampoline, isolate());
ASSERT(trampoline != NULL);
Address current_pc = pc();
Address code_start = trampoline->instruction_start();
Address code_end = code_start + trampoline->instruction_size();
if (code_start <= current_pc && current_pc < code_end) {
return trampoline;
}
Code* trampoline;
StubFailureTrampolineStub(NOT_JS_FUNCTION_STUB_MODE).
FindCodeInCache(&trampoline, isolate());
if (trampoline->contains(pc())) {
return trampoline;
}

StubFailureTrampolineStub(JS_FUNCTION_STUB_MODE).
FindCodeInCache(&trampoline, isolate());
if (trampoline->contains(pc())) {
return trampoline;
}

UNREACHABLE();
return NULL;
}
@@ -232,6 +232,12 @@ void BreakableStatementChecker::VisitAssignment(Assignment* expr) {
}


void BreakableStatementChecker::VisitYield(Yield* expr) {
// Yield is breakable if the expression is.
Visit(expr->expression());
}


void BreakableStatementChecker::VisitThrow(Throw* expr) {
// Throw is breakable if the expression is.
Visit(expr->exception());
@@ -1538,6 +1544,28 @@ void FullCodeGenerator::VisitSharedFunctionInfoLiteral(
}


void FullCodeGenerator::VisitYield(Yield* expr) {
if (expr->is_delegating_yield())
UNIMPLEMENTED();

Comment cmnt(masm_, "[ Yield");
VisitForAccumulatorValue(expr->expression());
// TODO(wingo): Assert that the operand stack depth is 0, at least while
// general yield expressions are unimplemented.

// TODO(wingo): What follows is as in VisitReturnStatement. Replace it with a
// call to a builtin that will resume the generator.
NestedStatement* current = nesting_stack_;
int stack_depth = 0;
int context_length = 0;
while (current != NULL) {
current = current->Exit(&stack_depth, &context_length);
}
__ Drop(stack_depth);
EmitReturnSequence();
}


void FullCodeGenerator::VisitThrow(Throw* expr) {
Comment cmnt(masm_, "[ Throw");
VisitForStackValue(expr->exception());
@@ -187,7 +187,7 @@ class Writer BASE_EMBEDDED {
byte* buffer_;
};

class StringTable;
class ELFStringTable;

template<typename THeader>
class DebugSectionBase : public ZoneObject {
@@ -338,7 +338,7 @@ class ELFSection : public DebugSectionBase<ELFSectionHeader> {

virtual ~ELFSection() { }

void PopulateHeader(Writer::Slot<Header> header, StringTable* strtab);
void PopulateHeader(Writer::Slot<Header> header, ELFStringTable* strtab);

virtual void WriteBody(Writer::Slot<Header> header, Writer* w) {
uintptr_t start = w->position();
@@ -438,9 +438,9 @@ class FullHeaderELFSection : public ELFSection {
};


class StringTable : public ELFSection {
class ELFStringTable : public ELFSection {
public:
explicit StringTable(const char* name)
explicit ELFStringTable(const char* name)
: ELFSection(name, TYPE_STRTAB, 1), writer_(NULL), offset_(0), size_(0) {
}

@@ -488,7 +488,7 @@ class StringTable : public ELFSection {


void ELFSection::PopulateHeader(Writer::Slot<ELFSection::Header> header,
StringTable* strtab) {
ELFStringTable* strtab) {
header->name = strtab->Add(name_);
header->type = type_;
header->alignment = align_;
@@ -631,7 +631,7 @@ class ELF BASE_EMBEDDED {
public:
ELF(Zone* zone) : sections_(6, zone) {
sections_.Add(new(zone) ELFSection("", ELFSection::TYPE_NULL, 0), zone);
sections_.Add(new(zone) StringTable(".shstrtab"), zone);
sections_.Add(new(zone) ELFStringTable(".shstrtab"), zone);
}

void Write(Writer* w) {
@@ -718,7 +718,7 @@ class ELF BASE_EMBEDDED {
w->CreateSlotsHere<ELFSection::Header>(sections_.length());

// String table for section table is the first section.
StringTable* strtab = static_cast<StringTable*>(SectionAt(1));
ELFStringTable* strtab = static_cast<ELFStringTable*>(SectionAt(1));
strtab->AttachWriter(w);
for (int i = 0, length = sections_.length();
i < length;
@@ -832,7 +832,7 @@ class ELFSymbol BASE_EMBEDDED {
};
#endif

void Write(Writer::Slot<SerializedLayout> s, StringTable* t) {
void Write(Writer::Slot<SerializedLayout> s, ELFStringTable* t) {
// Convert symbol names from strings to indexes in the string table.
s->name = t->Add(name);
s->value = value;
@@ -871,8 +871,8 @@ class ELFSymbolTable : public ELFSection {
header->size = w->position() - header->offset;

// String table for this symbol table should follow it in the section table.
StringTable* strtab =
static_cast<StringTable*>(w->debug_object()->SectionAt(index() + 1));
ELFStringTable* strtab =
static_cast<ELFStringTable*>(w->debug_object()->SectionAt(index() + 1));
strtab->AttachWriter(w);
symbols.at(0).set(ELFSymbol::SerializedLayout(0,
0,
@@ -905,7 +905,7 @@ class ELFSymbolTable : public ELFSection {
private:
void WriteSymbolsList(const ZoneList<ELFSymbol>* src,
Writer::Slot<ELFSymbol::SerializedLayout> dst,
StringTable* strtab) {
ELFStringTable* strtab) {
for (int i = 0, len = src->length();
i < len;
i++) {
@@ -1023,7 +1023,7 @@ static void CreateSymbolsTable(CodeDescription* desc,
int text_section_index) {
Zone* zone = desc->info()->zone();
ELFSymbolTable* symtab = new(zone) ELFSymbolTable(".symtab", zone);
StringTable* strtab = new(zone) StringTable(".strtab");
ELFStringTable* strtab = new(zone) ELFStringTable(".strtab");

// Symbol table should be followed by the linked string table.
elf->AddSection(symtab, zone);
@@ -1996,7 +1996,7 @@ static GDBJITLineInfo* UntagLineInfo(void* ptr) {
}


void GDBJITInterface::AddCode(Handle<String> name,
void GDBJITInterface::AddCode(Handle<Name> name,
Handle<Script> script,
Handle<Code> code,
CompilationInfo* info) {
@@ -2005,8 +2005,9 @@ void GDBJITInterface::AddCode(Handle<String> name,
// Force initialization of line_ends array.
GetScriptLineNumber(script, 0);

if (!name.is_null()) {
SmartArrayPointer<char> name_cstring = name->ToCString(DISALLOW_NULLS);
if (!name.is_null() && name->IsString()) {
SmartArrayPointer<char> name_cstring =
Handle<String>::cast(name)->ToCString(DISALLOW_NULLS);
AddCode(*name_cstring, *code, GDBJITInterface::FUNCTION, *script, info);
} else {
AddCode("", *code, GDBJITInterface::FUNCTION, *script, info);
@@ -2124,10 +2125,14 @@ void GDBJITInterface::AddCode(GDBJITInterface::CodeTag tag,


void GDBJITInterface::AddCode(GDBJITInterface::CodeTag tag,
String* name,
Name* name,
Code* code) {
if (!FLAG_gdbjit) return;
AddCode(tag, name != NULL ? *name->ToCString(DISALLOW_NULLS) : NULL, code);
if (name != NULL && name->IsString()) {
AddCode(tag, *String::cast(name)->ToCString(DISALLOW_NULLS), code);
} else {
AddCode(tag, "", code);
}
}


@@ -118,12 +118,12 @@ class GDBJITInterface: public AllStatic {
Script* script,
CompilationInfo* info);

static void AddCode(Handle<String> name,
static void AddCode(Handle<Name> name,
Handle<Script> script,
Handle<Code> code,
CompilationInfo* info);

static void AddCode(CodeTag tag, String* name, Code* code);
static void AddCode(CodeTag tag, Name* name, Code* code);

static void AddCode(CodeTag tag, const char* name, Code* code);

@@ -72,7 +72,7 @@ class GlobalHandles::Node {
Internals::kNodeIsPartiallyDependentShift);
}

#ifdef DEBUG
#ifdef ENABLE_EXTRA_CHECKS
~Node() {
// TODO(1428): if it's a weak handle we should have invoked its callback.
// Zap the values for eager trapping.
@@ -111,10 +111,9 @@ class GlobalHandles::Node {
void Release(GlobalHandles* global_handles) {
ASSERT(state() != FREE);
set_state(FREE);
// TODO(mstarzinger): Put behind debug flag once embedders are stabilized.
object_ = reinterpret_cast<Object*>(kGlobalHandleZapValue);
#ifdef DEBUG
#ifdef ENABLE_EXTRA_CHECKS
// Zap the values for eager trapping.
object_ = reinterpret_cast<Object*>(kGlobalHandleZapValue);
class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
set_independent(false);
set_partially_dependent(false);
@@ -107,7 +107,7 @@ void HandleScope::CloseScope() {
current->limit = prev_limit_;
DeleteExtensions(isolate_);
}
#ifdef DEBUG
#ifdef ENABLE_EXTRA_CHECKS
ZapRange(prev_next_, prev_limit_);
#endif
}
@@ -101,12 +101,14 @@ void HandleScope::DeleteExtensions(Isolate* isolate) {
}


#ifdef ENABLE_EXTRA_CHECKS
void HandleScope::ZapRange(Object** start, Object** end) {
ASSERT(end - start <= kHandleBlockSize);
for (Object** p = start; p != end; p++) {
*reinterpret_cast<Address*>(p) = v8::internal::kHandleZapValue;
}
}
#endif


Address HandleScope::current_level_address(Isolate* isolate) {
@@ -259,20 +261,6 @@ Handle<Object> ForceDeleteProperty(Handle<JSObject> object,
}


Handle<Object> SetPropertyWithInterceptor(Handle<JSObject> object,
Handle<Name> key,
Handle<Object> value,
PropertyAttributes attributes,
StrictModeFlag strict_mode) {
CALL_HEAP_FUNCTION(object->GetIsolate(),
object->SetPropertyWithInterceptor(*key,
*value,
attributes,
strict_mode),
Object);
}


Handle<Object> GetProperty(Handle<JSReceiver> obj,
const char* name) {
Isolate* isolate = obj->GetIsolate();
@@ -289,19 +277,6 @@ Handle<Object> GetProperty(Isolate* isolate,
}


Handle<Object> GetPropertyWithInterceptor(Handle<JSObject> receiver,
Handle<JSObject> holder,
Handle<Name> name,
PropertyAttributes* attributes) {
Isolate* isolate = receiver->GetIsolate();
CALL_HEAP_FUNCTION(isolate,
holder->GetPropertyWithInterceptor(*receiver,
*name,
attributes),
Object);
}


Handle<Object> SetPrototype(Handle<JSObject> obj, Handle<Object> value) {
const bool skip_hidden_prototypes = false;
CALL_HEAP_FUNCTION(obj->GetIsolate(),
@@ -160,13 +160,14 @@ class HandleScope {
// Extend the handle scope making room for more handles.
static internal::Object** Extend(Isolate* isolate);

#ifdef ENABLE_EXTRA_CHECKS
// Zaps the handles in the half-open interval [start, end).
static void ZapRange(internal::Object** start, internal::Object** end);
#endif

friend class v8::internal::DeferredHandles;
friend class v8::HandleScope;
friend class v8::internal::DeferredHandles;
friend class v8::internal::HandleScopeImplementer;
friend class v8::ImplementationUtilities;
friend class v8::internal::Isolate;
};

@@ -232,11 +233,6 @@ Handle<Object> GetProperty(Isolate* isolate,
Handle<Object> obj,
Handle<Object> key);

Handle<Object> GetPropertyWithInterceptor(Handle<JSObject> receiver,
Handle<JSObject> holder,
Handle<String> name,
PropertyAttributes* attributes);

Handle<Object> SetPrototype(Handle<JSObject> obj, Handle<Object> value);

Handle<Object> LookupSingleCharacterStringFromCode(Isolate* isolate,
@@ -399,7 +399,9 @@ AllocationSpace Heap::TargetSpaceId(InstanceType type) {
ASSERT(type != ODDBALL_TYPE);
ASSERT(type != JS_GLOBAL_PROPERTY_CELL_TYPE);

if (type < FIRST_NONSTRING_TYPE) {
if (type <= LAST_NAME_TYPE) {
if (type == SYMBOL_TYPE) return OLD_POINTER_SPACE;
ASSERT(type < FIRST_NONSTRING_TYPE);
// There are four string representations: sequential strings, external
// strings, cons strings, and sliced strings.
// Only the latter two contain non-map-word pointers to heap objects.
@@ -44,72 +44,13 @@ HeapProfiler::~HeapProfiler() {
}


void HeapProfiler::ResetSnapshots() {
void HeapProfiler::DeleteAllSnapshots() {
Heap* the_heap = heap();
delete snapshots_;
snapshots_ = new HeapSnapshotsCollection(the_heap);
}


void HeapProfiler::SetUp() {
Isolate* isolate = Isolate::Current();
if (isolate->heap_profiler() == NULL) {
isolate->set_heap_profiler(new HeapProfiler(isolate->heap()));
}
}


void HeapProfiler::TearDown() {
Isolate* isolate = Isolate::Current();
delete isolate->heap_profiler();
isolate->set_heap_profiler(NULL);
}


HeapSnapshot* HeapProfiler::TakeSnapshot(
const char* name,
int type,
v8::ActivityControl* control,
v8::HeapProfiler::ObjectNameResolver* resolver) {
ASSERT(Isolate::Current()->heap_profiler() != NULL);
return Isolate::Current()->heap_profiler()->TakeSnapshotImpl(name,
type,
control,
resolver);
}


HeapSnapshot* HeapProfiler::TakeSnapshot(
String* name,
int type,
v8::ActivityControl* control,
v8::HeapProfiler::ObjectNameResolver* resolver) {
ASSERT(Isolate::Current()->heap_profiler() != NULL);
return Isolate::Current()->heap_profiler()->TakeSnapshotImpl(name,
type,
control,
resolver);
}


void HeapProfiler::StartHeapObjectsTracking() {
ASSERT(Isolate::Current()->heap_profiler() != NULL);
Isolate::Current()->heap_profiler()->StartHeapObjectsTrackingImpl();
}


void HeapProfiler::StopHeapObjectsTracking() {
ASSERT(Isolate::Current()->heap_profiler() != NULL);
Isolate::Current()->heap_profiler()->StopHeapObjectsTrackingImpl();
}


SnapshotObjectId HeapProfiler::PushHeapObjectsStats(v8::OutputStream* stream) {
ASSERT(Isolate::Current()->heap_profiler() != NULL);
return Isolate::Current()->heap_profiler()->PushHeapObjectsStatsImpl(stream);
}


void HeapProfiler::DefineWrapperClass(
uint16_t class_id, v8::HeapProfiler::WrapperInfoCallback callback) {
ASSERT(class_id != v8::HeapProfiler::kPersistentHandleNoClassId);
@@ -129,99 +70,69 @@ v8::RetainedObjectInfo* HeapProfiler::ExecuteWrapperClassCallback(
}


HeapSnapshot* HeapProfiler::TakeSnapshotImpl(
HeapSnapshot* HeapProfiler::TakeSnapshot(
const char* name,
int type,
v8::ActivityControl* control,
v8::HeapProfiler::ObjectNameResolver* resolver) {
HeapSnapshot::Type s_type = static_cast<HeapSnapshot::Type>(type);
HeapSnapshot* result =
snapshots_->NewSnapshot(s_type, name, next_snapshot_uid_++);
bool generation_completed = true;
switch (s_type) {
case HeapSnapshot::kFull: {
HeapSnapshotGenerator generator(result, control, resolver, heap());
generation_completed = generator.GenerateSnapshot();
break;
HeapSnapshot* result = snapshots_->NewSnapshot(name, next_snapshot_uid_++);
{
HeapSnapshotGenerator generator(result, control, resolver, heap());
if (!generator.GenerateSnapshot()) {
delete result;
result = NULL;
}
default:
UNREACHABLE();
}
if (!generation_completed) {
delete result;
result = NULL;
}
snapshots_->SnapshotGenerationFinished(result);
return result;
}


HeapSnapshot* HeapProfiler::TakeSnapshotImpl(
HeapSnapshot* HeapProfiler::TakeSnapshot(
String* name,
int type,
v8::ActivityControl* control,
v8::HeapProfiler::ObjectNameResolver* resolver) {
return TakeSnapshotImpl(snapshots_->names()->GetName(name), type, control,
resolver);
return TakeSnapshot(snapshots_->names()->GetName(name), control, resolver);
}

void HeapProfiler::StartHeapObjectsTrackingImpl() {
void HeapProfiler::StartHeapObjectsTracking() {
snapshots_->StartHeapObjectsTracking();
}


SnapshotObjectId HeapProfiler::PushHeapObjectsStatsImpl(OutputStream* stream) {
SnapshotObjectId HeapProfiler::PushHeapObjectsStats(OutputStream* stream) {
return snapshots_->PushHeapObjectsStats(stream);
}


void HeapProfiler::StopHeapObjectsTrackingImpl() {
void HeapProfiler::StopHeapObjectsTracking() {
snapshots_->StopHeapObjectsTracking();
}


size_t HeapProfiler::GetMemorySizeUsedByProfiler() {
HeapProfiler* profiler = Isolate::Current()->heap_profiler();
ASSERT(profiler != NULL);
size_t size = profiler->snapshots_->GetUsedMemorySize();
return size;
return snapshots_->GetUsedMemorySize();
}


int HeapProfiler::GetSnapshotsCount() {
HeapProfiler* profiler = Isolate::Current()->heap_profiler();
ASSERT(profiler != NULL);
return profiler->snapshots_->snapshots()->length();
return snapshots_->snapshots()->length();
}


HeapSnapshot* HeapProfiler::GetSnapshot(int index) {
HeapProfiler* profiler = Isolate::Current()->heap_profiler();
ASSERT(profiler != NULL);
return profiler->snapshots_->snapshots()->at(index);
return snapshots_->snapshots()->at(index);
}


HeapSnapshot* HeapProfiler::FindSnapshot(unsigned uid) {
HeapProfiler* profiler = Isolate::Current()->heap_profiler();
ASSERT(profiler != NULL);
return profiler->snapshots_->GetSnapshot(uid);
return snapshots_->GetSnapshot(uid);
}


SnapshotObjectId HeapProfiler::GetSnapshotObjectId(Handle<Object> obj) {
if (!obj->IsHeapObject())
return v8::HeapProfiler::kUnknownObjectId;
HeapProfiler* profiler = Isolate::Current()->heap_profiler();
ASSERT(profiler != NULL);
return profiler->snapshots_->FindObjectId(HeapObject::cast(*obj)->address());
}


void HeapProfiler::DeleteAllSnapshots() {
HeapProfiler* profiler = Isolate::Current()->heap_profiler();
ASSERT(profiler != NULL);
profiler->ResetSnapshots();
return snapshots_->FindObjectId(HeapObject::cast(*obj)->address());
}


@@ -46,30 +46,28 @@ class HeapSnapshotsCollection;

class HeapProfiler {
public:
static void SetUp();
static void TearDown();
explicit HeapProfiler(Heap* heap);
~HeapProfiler();

static size_t GetMemorySizeUsedByProfiler();
size_t GetMemorySizeUsedByProfiler();

static HeapSnapshot* TakeSnapshot(
HeapSnapshot* TakeSnapshot(
const char* name,
int type,
v8::ActivityControl* control,
v8::HeapProfiler::ObjectNameResolver* resolver);
static HeapSnapshot* TakeSnapshot(
HeapSnapshot* TakeSnapshot(
String* name,
int type,
v8::ActivityControl* control,
v8::HeapProfiler::ObjectNameResolver* resolver);

static void StartHeapObjectsTracking();
static void StopHeapObjectsTracking();
static SnapshotObjectId PushHeapObjectsStats(OutputStream* stream);
static int GetSnapshotsCount();
static HeapSnapshot* GetSnapshot(int index);
static HeapSnapshot* FindSnapshot(unsigned uid);
static SnapshotObjectId GetSnapshotObjectId(Handle<Object> obj);
static void DeleteAllSnapshots();
void StartHeapObjectsTracking();
void StopHeapObjectsTracking();
SnapshotObjectId PushHeapObjectsStats(OutputStream* stream);
int GetSnapshotsCount();
HeapSnapshot* GetSnapshot(int index);
HeapSnapshot* FindSnapshot(unsigned uid);
SnapshotObjectId GetSnapshotObjectId(Handle<Object> obj);
void DeleteAllSnapshots();

void ObjectMoveEvent(Address from, Address to);

@@ -83,24 +81,6 @@ class HeapProfiler {
}

private:
explicit HeapProfiler(Heap* heap);
~HeapProfiler();
HeapSnapshot* TakeSnapshotImpl(
const char* name,
int type,
v8::ActivityControl* control,
v8::HeapProfiler::ObjectNameResolver* resolver);
HeapSnapshot* TakeSnapshotImpl(
String* name,
int type,
v8::ActivityControl* control,
v8::HeapProfiler::ObjectNameResolver* resolver);
void ResetSnapshots();

void StartHeapObjectsTrackingImpl();
void StopHeapObjectsTrackingImpl();
SnapshotObjectId PushHeapObjectsStatsImpl(OutputStream* stream);

Heap* heap() const { return snapshots_->heap(); }

HeapSnapshotsCollection* snapshots_;
@@ -85,3 +85,4 @@ int V8HeapExplorer::GetGcSubrootOrder(HeapObject* subroot) {
} } // namespace v8::internal

#endif // V8_HEAP_SNAPSHOT_GENERATOR_INL_H_

@@ -189,27 +189,25 @@ template <> struct SnapshotSizeConstants<4> {
static const int kExpectedHeapGraphEdgeSize = 12;
static const int kExpectedHeapEntrySize = 24;
static const int kExpectedHeapSnapshotsCollectionSize = 100;
static const int kExpectedHeapSnapshotSize = 136;
static const int kExpectedHeapSnapshotSize = 132;
static const size_t kMaxSerializableSnapshotRawSize = 256 * MB;
};

template <> struct SnapshotSizeConstants<8> {
static const int kExpectedHeapGraphEdgeSize = 24;
static const int kExpectedHeapEntrySize = 32;
static const int kExpectedHeapSnapshotsCollectionSize = 152;
static const int kExpectedHeapSnapshotSize = 168;
static const int kExpectedHeapSnapshotSize = 160;
static const uint64_t kMaxSerializableSnapshotRawSize =
static_cast<uint64_t>(6000) * MB;
};

} // namespace

HeapSnapshot::HeapSnapshot(HeapSnapshotsCollection* collection,
HeapSnapshot::Type type,
const char* title,
unsigned uid)
: collection_(collection),
type_(type),
title_(title),
uid_(uid),
root_index_(HeapEntry::kNoEntry),
@@ -599,11 +597,10 @@ HeapSnapshotsCollection::~HeapSnapshotsCollection() {
}


HeapSnapshot* HeapSnapshotsCollection::NewSnapshot(HeapSnapshot::Type type,
const char* name,
HeapSnapshot* HeapSnapshotsCollection::NewSnapshot(const char* name,
unsigned uid) {
is_tracking_objects_ = true; // Start watching for heap objects moves.
return new HeapSnapshot(this, type, name, uid);
return new HeapSnapshot(this, name, uid);
}


@@ -2410,7 +2407,6 @@ void HeapSnapshotJSONSerializer::Serialize(v8::OutputStream* stream) {

HeapSnapshot* HeapSnapshotJSONSerializer::CreateFakeSnapshot() {
HeapSnapshot* result = new HeapSnapshot(snapshot_->collection(),
HeapSnapshot::kFull,
snapshot_->title(),
snapshot_->uid());
result->AddRootEntry();
@@ -157,18 +157,12 @@ class HeapSnapshotsCollection;
// HeapSnapshotGenerator fills in a HeapSnapshot.
class HeapSnapshot {
public:
enum Type {
kFull = v8::HeapSnapshot::kFull
};

HeapSnapshot(HeapSnapshotsCollection* collection,
Type type,
const char* title,
unsigned uid);
void Delete();

HeapSnapshotsCollection* collection() { return collection_; }
Type type() { return type_; }
const char* title() { return title_; }
unsigned uid() { return uid_; }
size_t RawSnapshotSize() const;
@@ -203,7 +197,6 @@ class HeapSnapshot {

private:
HeapSnapshotsCollection* collection_;
Type type_;
const char* title_;
unsigned uid_;
int root_index_;
@@ -305,8 +298,7 @@ class HeapSnapshotsCollection {
void StartHeapObjectsTracking() { is_tracking_objects_ = true; }
void StopHeapObjectsTracking() { ids_.StopHeapObjectsTracking(); }

HeapSnapshot* NewSnapshot(
HeapSnapshot::Type type, const char* name, unsigned uid);
HeapSnapshot* NewSnapshot(const char* name, unsigned uid);
void SnapshotGenerationFinished(HeapSnapshot* snapshot);
List<HeapSnapshot*>* snapshots() { return &snapshots_; }
HeapSnapshot* GetSnapshot(unsigned uid);
@@ -695,3 +687,4 @@ class HeapSnapshotJSONSerializer {
} } // namespace v8::internal

#endif // V8_HEAP_SNAPSHOT_GENERATOR_H_

@@ -1779,6 +1779,10 @@ class ScavengingVisitor : public StaticVisitorBase {
&ObjectEvacuationStrategy<POINTER_OBJECT>::
template VisitSpecialized<SlicedString::kSize>);

table_.Register(kVisitSymbol,
&ObjectEvacuationStrategy<POINTER_OBJECT>::
template VisitSpecialized<Symbol::kSize>);

table_.Register(kVisitSharedFunctionInfo,
&ObjectEvacuationStrategy<POINTER_OBJECT>::
template VisitSpecialized<SharedFunctionInfo::kSize>);
@@ -3785,11 +3789,7 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
ASSERT(!isolate_->code_range()->exists() ||
isolate_->code_range()->contains(code->address()));
code->set_instruction_size(desc.instr_size);
// TODO(mstarzinger): Remove once we found the bug.
CHECK(reloc_info->IsByteArray());
code->set_relocation_info(reloc_info);
// TODO(mstarzinger): Remove once we found the bug.
CHECK(code->relocation_info()->IsByteArray());
code->set_flags(flags);
if (code->is_call_stub() || code->is_keyed_call_stub()) {
code->set_check_type(RECEIVER_MAP_CHECK);
@@ -3805,8 +3805,6 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
}
// Allow self references to created code object by patching the handle to
// point to the newly allocated Code object.
CHECK(code->IsCode());
CHECK(code->relocation_info()->IsByteArray());
if (!self_reference.is_null()) {
*(self_reference.location()) = code;
}
@@ -3815,8 +3813,6 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
// that are dereferenced during the copy to point directly to the actual heap
// objects. These pointers can include references to the code object itself,
// through the self_reference parameter.
CHECK(code->IsCode());
CHECK(code->relocation_info()->IsByteArray());
code->CopyFrom(desc);

#ifdef VERIFY_HEAP
@@ -3888,13 +3884,15 @@ MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
Address new_addr = reinterpret_cast<HeapObject*>(result)->address();

// Copy header and instructions.
memcpy(new_addr, old_addr, relocation_offset);
CopyBytes(new_addr, old_addr, relocation_offset);

Code* new_code = Code::cast(result);
new_code->set_relocation_info(ByteArray::cast(reloc_info_array));

// Copy patched rinfo.
memcpy(new_code->relocation_start(), reloc_info.start(), reloc_info.length());
CopyBytes(new_code->relocation_start(),
reloc_info.start(),
static_cast<size_t>(reloc_info.length()));

// Relocate the copy.
ASSERT(!isolate_->code_range()->exists() ||
@@ -5430,13 +5428,13 @@ MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
}


MaybeObject* Heap::AllocateSymbol(PretenureFlag pretenure) {
MaybeObject* Heap::AllocateSymbol() {
// Statically ensure that it is safe to allocate symbols in paged spaces.
STATIC_ASSERT(Symbol::kSize <= Page::kNonCodeObjectAreaSize);
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;

Object* result;
MaybeObject* maybe = AllocateRaw(Symbol::kSize, space, OLD_DATA_SPACE);
MaybeObject* maybe =
AllocateRaw(Symbol::kSize, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
if (!maybe->ToObject(&result)) return maybe;

HeapObject::cast(result)->set_map_no_write_barrier(symbol_map());
@@ -5452,6 +5450,7 @@ MaybeObject* Heap::AllocateSymbol(PretenureFlag pretenure) {

Symbol::cast(result)->set_hash_field(
Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
Symbol::cast(result)->set_name(undefined_value());

ASSERT(result->IsSymbol());
return result;
@@ -7471,6 +7470,9 @@ void KeyedLookupCache::Update(Map* map, Name* name, int field_offset) {
}
name = internalized_string;
}
// This cache is cleared only between mark compact passes, so we expect the
// cache to only contain old space names.
ASSERT(!HEAP->InNewSpace(name));

int index = (Hash(map, name) & kHashMask);
// After a GC there will be free slots, so we use them in order (this may
@@ -213,13 +213,16 @@ namespace internal {
V(prototype_string, "prototype") \
V(string_string, "string") \
V(String_string, "String") \
V(symbol_string, "symbol") \
V(Symbol_string, "Symbol") \
V(Date_string, "Date") \
V(this_string, "this") \
V(to_string_string, "toString") \
V(char_at_string, "CharAt") \
V(undefined_string, "undefined") \
V(value_of_string, "valueOf") \
V(stack_string, "stack") \
V(toJSON_string, "toJSON") \
V(InitializeVarGlobal_string, "InitializeVarGlobal") \
V(InitializeConstGlobal_string, "InitializeConstGlobal") \
V(KeyedLoadElementMonomorphic_string, \
@@ -520,6 +523,7 @@ class Heap {
int InitialSemiSpaceSize() { return initial_semispace_size_; }
intptr_t MaxOldGenerationSize() { return max_old_generation_size_; }
intptr_t MaxExecutableSize() { return max_executable_size_; }
int MaxNewSpaceAllocationSize() { return InitialSemiSpaceSize() * 3/4; }

// Returns the capacity of the heap in bytes w/o growing. Heap grows when
// more spaces are needed until it reaches the limit.
@@ -878,12 +882,11 @@ class Heap {
void* external_pointer,
PretenureFlag pretenure);

// Allocate a symbol.
// Allocate a symbol in old space.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
MUST_USE_RESULT MaybeObject* AllocateSymbol(
PretenureFlag pretenure = NOT_TENURED);
MUST_USE_RESULT MaybeObject* AllocateSymbol();

// Allocate a tenured JS global property cell.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
@@ -1277,7 +1277,8 @@ Representation HBranch::observed_input_representation(int index) {
ToBooleanStub::UNDEFINED |
ToBooleanStub::NULL_TYPE |
ToBooleanStub::SPEC_OBJECT |
ToBooleanStub::STRING);
ToBooleanStub::STRING |
ToBooleanStub::SYMBOL);
if (expected_input_types_.ContainsAnyOf(tagged_types)) {
return Representation::Tagged();
} else if (expected_input_types_.Contains(ToBooleanStub::HEAP_NUMBER)) {
@@ -1467,15 +1468,6 @@ void HChange::PrintDataTo(StringStream* stream) {
}


void HJSArrayLength::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
if (HasTypeCheck()) {
stream->Add(" ");
typecheck()->PrintNameTo(stream);
}
}


HValue* HUnaryMathOperation::Canonicalize() {
if (op() == kMathFloor) {
// If the input is integer32 then we replace the floor instruction
@@ -2415,6 +2407,10 @@ void HParameter::PrintDataTo(StringStream* stream) {
void HLoadNamedField::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
stream->Add(" @%d%s", offset(), is_in_object() ? "[in-object]" : "");
if (HasTypeCheck()) {
stream->Add(" ");
typecheck()->PrintNameTo(stream);
}
}


@@ -2589,6 +2585,10 @@ bool HLoadKeyed::UsesMustHandleHole() const {
return false;
}

if (IsExternalArrayElementsKind(elements_kind())) {
return false;
}

if (hole_mode() == ALLOW_RETURN_HOLE) return true;

if (IsFastDoubleElementsKind(elements_kind())) {
@@ -2611,6 +2611,10 @@ bool HLoadKeyed::RequiresHoleCheck() const {
return false;
}

if (IsExternalArrayElementsKind(elements_kind())) {
return false;
}

return !UsesMustHandleHole();
}

@@ -3036,10 +3040,19 @@ bool HStoreKeyed::NeedsCanonicalization() {
// If value is an integer or smi or comes from the result of a keyed load or
// constant then it is either be a non-hole value or in the case of a constant
// the hole is only being stored explicitly: no need for canonicalization.
if (value()->IsLoadKeyed() || value()->IsConstant()) {
//
// The exception to that is keyed loads from external float or double arrays:
// these can load arbitrary representation of NaN.

if (value()->IsConstant()) {
return false;
}

if (value()->IsLoadKeyed()) {
return IsExternalFloatOrDoubleElementsKind(
HLoadKeyed::cast(value())->elements_kind());
}

if (value()->IsChange()) {
if (HChange::cast(value())->from().IsInteger32()) {
return false;
@@ -134,7 +134,6 @@ class LChunkBuilder;
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
V(JSArrayLength) \
V(LeaveInlined) \
V(LoadContextSlot) \
V(LoadElements) \
@@ -2392,45 +2391,6 @@ class HCallRuntime: public HCall<1> {
};


class HJSArrayLength: public HTemplateInstruction<2> {
public:
HJSArrayLength(HValue* value, HValue* typecheck,
HType type = HType::Tagged()) {
set_type(type);
// The length of an array is stored as a tagged value in the array
// object. It is guaranteed to be 32 bit integer, but it can be
// represented as either a smi or heap number.
SetOperandAt(0, value);
SetOperandAt(1, typecheck != NULL ? typecheck : value);
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
SetGVNFlag(kDependsOnArrayLengths);
SetGVNFlag(kDependsOnMaps);
}

virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}

virtual void PrintDataTo(StringStream* stream);

HValue* value() { return OperandAt(0); }
HValue* typecheck() {
ASSERT(HasTypeCheck());
return OperandAt(1);
}
bool HasTypeCheck() const { return OperandAt(0) != OperandAt(1); }

DECLARE_CONCRETE_INSTRUCTION(JSArrayLength)

protected:
virtual bool DataEquals(HValue* other_raw) { return true; }

private:
virtual bool IsDeletable() const { return true; }
};


class HFixedArrayBaseLength: public HUnaryOperation {
public:
explicit HFixedArrayBaseLength(HValue* value) : HUnaryOperation(value) {
@@ -4693,6 +4653,14 @@ class HParameter: public HTemplateInstruction<0> {
set_representation(Representation::Tagged());
}

explicit HParameter(unsigned index,
ParameterKind kind,
Representation r)
: index_(index),
kind_(kind) {
set_representation(r);
}

unsigned index() const { return index_; }
ParameterKind kind() const { return kind_; }

@@ -5184,12 +5152,16 @@ class HStoreContextSlot: public HTemplateInstruction<2> {
};


class HLoadNamedField: public HUnaryOperation {
class HLoadNamedField: public HTemplateInstruction<2> {
public:
HLoadNamedField(HValue* object, bool is_in_object, int offset)
: HUnaryOperation(object),
is_in_object_(is_in_object),
HLoadNamedField(HValue* object, bool is_in_object, int offset,
HValue* typecheck = NULL)
: is_in_object_(is_in_object),
offset_(offset) {
ASSERT(object != NULL);
SetOperandAt(0, object);
SetOperandAt(1, typecheck != NULL ? typecheck : object);

set_representation(Representation::Tagged());
SetFlag(kUseGVN);
SetGVNFlag(kDependsOnMaps);
@@ -5200,7 +5172,24 @@ class HLoadNamedField: public HUnaryOperation {
}
}

static HLoadNamedField* NewArrayLength(Zone* zone, HValue* object,
HValue* typecheck,
HType type = HType::Tagged()) {
HLoadNamedField* result = new(zone) HLoadNamedField(
object, true, JSArray::kLengthOffset, typecheck);
result->set_type(type);
result->SetGVNFlag(kDependsOnArrayLengths);
result->ClearGVNFlag(kDependsOnInobjectFields);
return result;
}

HValue* object() { return OperandAt(0); }
HValue* typecheck() {
ASSERT(HasTypeCheck());
return OperandAt(1);
}

bool HasTypeCheck() const { return OperandAt(0) != OperandAt(1); }
bool is_in_object() const { return is_in_object_; }
int offset() const { return offset_; }

Large diffs are not rendered by default.

@@ -108,7 +108,7 @@ class HBasicBlock: public ZoneObject {
bool Dominates(HBasicBlock* other) const;
int LoopNestingDepth() const;

void SetInitialEnvironment(HEnvironment* env);
void SetInitialEnvironment(HEnvironment* env, BailoutId previous_id);
void ClearEnvironment() { last_environment_ = NULL; }
bool HasEnvironment() const { return last_environment_ != NULL; }
void UpdateEnvironment(HEnvironment* env) { last_environment_ = env; }
@@ -483,6 +483,8 @@ class HEnvironment: public ZoneObject {

BailoutId ast_id() const { return ast_id_; }
void set_ast_id(BailoutId id) { ast_id_ = id; }
BailoutId previous_ast_id() const { return previous_ast_id_; }
void set_previous_ast_id(BailoutId id) { previous_ast_id_ = id; }

HEnterInlined* entry() const { return entry_; }
void set_entry(HEnterInlined* entry) { entry_ = entry; }
@@ -644,6 +646,7 @@ class HEnvironment: public ZoneObject {
int pop_count_;
int push_count_;
BailoutId ast_id_;
BailoutId previous_ast_id_;
Zone* zone_;
};

@@ -891,8 +894,9 @@ class HGraphBuilder {
protected:
virtual bool BuildGraph() = 0;

HBasicBlock* CreateBasicBlock(HEnvironment* env);
HBasicBlock* CreateLoopHeaderBlock();
HBasicBlock* CreateBasicBlock(HEnvironment* envy,
BailoutId previous_ast_id);
HBasicBlock* CreateLoopHeaderBlock(BailoutId previous_ast_id);

// Building common constructs
HInstruction* BuildExternalArrayElementAccess(
@@ -909,7 +913,20 @@ class HGraphBuilder {
HValue* val,
HValue* dependency,
ElementsKind elements_kind,
bool is_store);
bool is_store,
KeyedAccessStoreMode store_mode);

HValue* BuildCheckForCapacityGrow(HValue* object,
HValue* elements,
ElementsKind kind,
HValue* length,
HValue* key,
bool is_js_array);

HValue* BuildCopyElementsOnWrite(HValue* object,
HValue* elements,
ElementsKind kind,
HValue* length);

HInstruction* BuildUncheckedMonomorphicElementAccess(
HValue* object,
@@ -919,20 +936,22 @@ class HGraphBuilder {
bool is_js_array,
ElementsKind elements_kind,
bool is_store,
KeyedAccessStoreMode store_mode,
Representation checked_index_representation = Representation::None());

HInstruction* BuildStoreMap(HValue* object, HValue* map, BailoutId id);
HInstruction* BuildStoreMap(HValue* object, Handle<Map> map, BailoutId id);

class CheckBuilder {
public:
CheckBuilder(HGraphBuilder* builder, BailoutId id);
explicit CheckBuilder(HGraphBuilder* builder);
~CheckBuilder() {
if (!finished_) End();
}

void CheckNotUndefined(HValue* value);
void CheckIntegerEq(HValue* left, HValue* right);
HValue* CheckNotUndefined(HValue* value);
HValue* CheckIntegerCompare(HValue* left, HValue* right, Token::Value op);
HValue* CheckIntegerEq(HValue* left, HValue* right);
void End();

private:
@@ -947,24 +966,27 @@ class HGraphBuilder {

class IfBuilder {
public:
IfBuilder(HGraphBuilder* builder, BailoutId id);
explicit IfBuilder(HGraphBuilder* builder);
~IfBuilder() {
if (!finished_) End();
}

HInstruction* BeginTrue(
HInstruction* BeginIf(
HValue* left,
HValue* right,
Token::Value token,
Representation input_representation = Representation::Integer32());
void BeginFalse();
HInstruction* BeginIfObjectsEqual(HValue* left, HValue* right);
HInstruction* BeginIfMapEquals(HValue* value, Handle<Map> map);
void BeginElse();
void End();

private:
Zone* zone() { return builder_->zone(); }

HGraphBuilder* builder_;
bool finished_;
bool did_else_;
HBasicBlock* first_true_block_;
HBasicBlock* last_true_block_;
HBasicBlock* first_false_block_;
@@ -983,8 +1005,7 @@ class HGraphBuilder {

LoopBuilder(HGraphBuilder* builder,
HValue* context,
Direction direction,
BailoutId id);
Direction direction);
~LoopBuilder() {
ASSERT(finished_);
}
@@ -1011,16 +1032,35 @@ class HGraphBuilder {
bool finished_;
};

HValue* BuildAllocateElements(HContext* context,
HValue* BuildNewElementsCapacity(HValue* context,
HValue* old_capacity);

void BuildNewSpaceArrayCheck(HValue* length,
ElementsKind kind);

HValue* BuildAllocateElements(HValue* context,
ElementsKind kind,
HValue* capacity);

void BuildCopyElements(HContext* context,
HValue* BuildGrowElementsCapacity(HValue* object,
HValue* elements,
ElementsKind kind,
HValue* length,
HValue* new_capacity);

void BuildFillElementsWithHole(HValue* context,
HValue* elements,
ElementsKind elements_kind,
HValue* from,
HValue* to);

void BuildCopyElements(HValue* context,
HValue* from_elements,
ElementsKind from_elements_kind,
HValue* to_elements,
ElementsKind to_elements_kind,
HValue* length);
HValue* length,
HValue* capacity);

private:
HGraphBuilder();
@@ -1311,6 +1351,10 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
HValue* object,
SmallMapList* types,
Handle<String> name);
bool HandlePolymorphicArrayLengthLoad(Property* expr,
HValue* object,
SmallMapList* types,
Handle<String> name);
void HandlePolymorphicStoreNamedField(Assignment* expr,
HValue* object,
HValue* value,
@@ -1348,7 +1392,8 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
HValue* val,
HValue* dependency,
Handle<Map> map,
bool is_store);
bool is_store,
KeyedAccessStoreMode store_mode);

HValue* HandlePolymorphicElementAccess(HValue* object,
HValue* key,
@@ -1357,6 +1402,7 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
BailoutId ast_id,
int position,
bool is_store,
KeyedAccessStoreMode store_mode,
bool* has_side_effects);

HValue* HandleKeyedElementAccess(HValue* obj,
@@ -1383,6 +1429,8 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
Property* expr,
Handle<Map> map);

void AddCheckMap(HValue* object, Handle<Map> map);

void AddCheckMapsWithTransitions(HValue* object,
Handle<Map> map);

@@ -216,8 +216,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// eax: initial map
__ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset));
__ shl(edi, kPointerSizeLog2);
__ AllocateInNewSpace(
edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
__ Allocate(edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
// Allocated the JSObject, now initialize the fields.
// eax: initial map
// ebx: JSObject
@@ -280,15 +279,15 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// ebx: JSObject
// edi: start of next object (will be start of FixedArray)
// edx: number of elements in properties array
__ AllocateInNewSpace(FixedArray::kHeaderSize,
times_pointer_size,
edx,
REGISTER_VALUE_IS_INT32,
edi,
ecx,
no_reg,
&undo_allocation,
RESULT_CONTAINS_TOP);
__ Allocate(FixedArray::kHeaderSize,
times_pointer_size,
edx,
REGISTER_VALUE_IS_INT32,
edi,
ecx,
no_reg,
&undo_allocation,
RESULT_CONTAINS_TOP);

// Initialize the FixedArray.
// ebx: JSObject
@@ -409,10 +408,6 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
__ j(above_equal, &exit);

// Symbols are "objects".
__ CmpInstanceType(ecx, SYMBOL_TYPE);
__ j(equal, &exit);

// Throw away the result of the constructor invocation and use the
// on-stack receiver as the result.
__ bind(&use_receiver);
@@ -1129,15 +1124,15 @@ static void AllocateJSArray(MacroAssembler* masm,
// Allocate the JSArray object together with space for a FixedArray with the
// requested elements.
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
__ AllocateInNewSpace(JSArray::kSize + FixedArray::kHeaderSize,
times_pointer_size,
array_size,
REGISTER_VALUE_IS_SMI,
result,
elements_array_end,
scratch,
gc_required,
TAG_OBJECT);
__ Allocate(JSArray::kSize + FixedArray::kHeaderSize,
times_pointer_size,
array_size,
REGISTER_VALUE_IS_SMI,
result,
elements_array_end,
scratch,
gc_required,
TAG_OBJECT);

// Allocated the JSArray. Now initialize the fields except for the elements
// array.
@@ -67,6 +67,17 @@ void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
}


void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { edx, ecx, eax };
descriptor->register_param_count_ = 3;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure);
}


void TransitionElementsKindStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -89,7 +100,7 @@ static void InitializeArrayConstructorDescriptor(Isolate* isolate,
// stack param count needs (constructor pointer, and single argument)
descriptor->stack_parameter_count_ = &eax;
descriptor->register_params_ = registers;
descriptor->extra_expression_stack_count_ = 1;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
FUNCTION_ADDR(ArrayConstructor_StubFailure);
}
@@ -621,6 +632,14 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
__ bind(&not_string);
}

if (types_.Contains(SYMBOL)) {
// Symbol value -> true.
Label not_symbol;
__ CmpInstanceType(map, SYMBOL_TYPE);
__ j(not_equal, &not_symbol, Label::kNear);
__ bind(&not_symbol);
}

if (types_.Contains(HEAP_NUMBER)) {
// heap number -> false iff +0, -0, or NaN.
Label not_heap_number, false_result;
@@ -3285,25 +3304,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
}


void ArrayLengthStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- ecx : name
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
Label miss;

if (kind() == Code::KEYED_LOAD_IC) {
__ cmp(ecx, Immediate(masm->isolate()->factory()->length_string()));
__ j(not_equal, &miss);
}

StubCompiler::GenerateLoadArrayLength(masm, edx, eax, &miss);
__ bind(&miss);
StubCompiler::TailCallBuiltin(masm, StubCompiler::MissBuiltin(kind()));
}


void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- ecx : name
@@ -3558,7 +3558,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ add(ebx, Immediate(Heap::kArgumentsObjectSize));

// Do the allocation of all three objects in one go.
__ AllocateInNewSpace(ebx, eax, edx, edi, &runtime, TAG_OBJECT);
__ Allocate(ebx, eax, edx, edi, &runtime, TAG_OBJECT);

// eax = address of new object(s) (tagged)
// ecx = argument count (tagged)
@@ -3756,7 +3756,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ add(ecx, Immediate(Heap::kArgumentsObjectSizeStrict));

// Do the allocation of both objects in one go.
__ AllocateInNewSpace(ecx, eax, edx, ebx, &runtime, TAG_OBJECT);
__ Allocate(ecx, eax, edx, ebx, &runtime, TAG_OBJECT);

// Get the arguments boilerplate from the current native context.
__ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
@@ -4280,15 +4280,15 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
// Allocate RegExpResult followed by FixedArray with size in ebx.
// JSArray: [Map][empty properties][Elements][Length-smi][index][input]
// Elements: [Map][Length][..elements..]
__ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize,
times_pointer_size,
ebx, // In: Number of elements as a smi
REGISTER_VALUE_IS_SMI,
eax, // Out: Start of allocation (tagged).
ecx, // Out: End of allocation.
edx, // Scratch register
&slowcase,
TAG_OBJECT);
__ Allocate(JSRegExpResult::kSize + FixedArray::kHeaderSize,
times_pointer_size,
ebx, // In: Number of elements as a smi
REGISTER_VALUE_IS_SMI,
eax, // Out: Start of allocation (tagged).
ecx, // Out: End of allocation.
edx, // Scratch register
&slowcase,
TAG_OBJECT);
// eax: Start of allocated area, object-tagged.

// Set JSArray map to global.regexp_result_map().
@@ -4525,6 +4525,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {

// Identical objects can be compared fast, but there are some tricky cases
// for NaN and undefined.
Label generic_heap_number_comparison;
{
Label not_identical;
__ cmp(eax, edx);
@@ -4541,12 +4542,11 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
__ bind(&check_for_nan);
}

// Test for NaN. Sadly, we can't just compare to factory->nan_value(),
// so we do the second best thing - test it ourselves.
Label heap_number;
// Test for NaN. Compare heap numbers in a general way,
// to hanlde NaNs correctly.
__ cmp(FieldOperand(edx, HeapObject::kMapOffset),
Immediate(masm->isolate()->factory()->heap_number_map()));
__ j(equal, &heap_number, Label::kNear);
__ j(equal, &generic_heap_number_comparison, Label::kNear);
if (cc != equal) {
// Call runtime on identical JSObjects. Otherwise return equal.
__ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
@@ -4555,37 +4555,6 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
__ Set(eax, Immediate(Smi::FromInt(EQUAL)));
__ ret(0);

__ bind(&heap_number);
// It is a heap number, so return non-equal if it's NaN and equal if
// it's not NaN.
// The representation of NaN values has all exponent bits (52..62) set,
// and not all mantissa bits (0..51) clear.
// We only accept QNaNs, which have bit 51 set.
// Read top bits of double representation (second word of value).

// Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
// all bits in the mask are set. We only need to check the word
// that contains the exponent and high bit of the mantissa.
STATIC_ASSERT(((kQuietNaNHighBitsMask << 1) & 0x80000000u) != 0);
__ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset));
__ Set(eax, Immediate(0));
// Shift value and mask so kQuietNaNHighBitsMask applies to topmost
// bits.
__ add(edx, edx);
__ cmp(edx, kQuietNaNHighBitsMask << 1);
if (cc == equal) {
STATIC_ASSERT(EQUAL != 1);
__ setcc(above_equal, eax);
__ ret(0);
} else {
Label nan;
__ j(above_equal, &nan, Label::kNear);
__ Set(eax, Immediate(Smi::FromInt(EQUAL)));
__ ret(0);
__ bind(&nan);
__ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
__ ret(0);
}

__ bind(&not_identical);
}
@@ -4665,6 +4634,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// Generate the number comparison code.
Label non_number_comparison;
Label unordered;
__ bind(&generic_heap_number_comparison);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
CpuFeatureScope use_cmov(masm, CMOV);
@@ -7825,8 +7795,10 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
__ mov(ebx, MemOperand(ebp, parameter_count_offset));
masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
__ pop(ecx);
__ lea(esp, MemOperand(esp, ebx, times_pointer_size,
extra_expression_stack_count_ * kPointerSize));
int additional_offset = function_mode_ == JS_FUNCTION_STUB_MODE
? kPointerSize
: 0;
__ lea(esp, MemOperand(esp, ebx, times_pointer_size, additional_offset));
__ jmp(ecx); // Return to IC Miss stub, continuation still on stack.
}

@@ -450,9 +450,8 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// edi: length of source FixedArray (smi-tagged)
AllocationFlags flags =
static_cast<AllocationFlags>(TAG_OBJECT | DOUBLE_ALIGNMENT);
__ AllocateInNewSpace(FixedDoubleArray::kHeaderSize, times_8,
edi, REGISTER_VALUE_IS_SMI,
eax, ebx, no_reg, &gc_required, flags);
__ Allocate(FixedDoubleArray::kHeaderSize, times_8, edi,
REGISTER_VALUE_IS_SMI, eax, ebx, no_reg, &gc_required, flags);

// eax: destination FixedDoubleArray
// edi: number of elements
@@ -589,7 +588,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Allocate new FixedArray.
// ebx: length of source FixedDoubleArray (smi-tagged)
__ lea(edi, Operand(ebx, times_2, FixedArray::kHeaderSize));
__ AllocateInNewSpace(edi, eax, esi, no_reg, &gc_required, TAG_OBJECT);
__ Allocate(edi, eax, esi, no_reg, &gc_required, TAG_OBJECT);

// eax: destination FixedArray
// ebx: number of elements
@@ -952,7 +951,7 @@ void Code::PatchPlatformCodeAge(byte* sequence,
uint32_t young_length;
byte* young_sequence = GetNoCodeAgeSequence(&young_length);
if (age == kNoAge) {
memcpy(sequence, young_sequence, young_length);
CopyBytes(sequence, young_sequence, young_length);
CPU::FlushICache(sequence, young_length);
} else {
Code* stub = GetCodeAgeStub(age, parity);
@@ -2567,7 +2567,6 @@ void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {


void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
// TODO(rossberg): incorporate symbols.
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);

@@ -2703,28 +2702,6 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
}


void FullCodeGenerator::EmitIsSymbol(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);

VisitForAccumulatorValue(args->at(0));

Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);

__ JumpIfSmi(eax, if_false);
__ CmpObjectType(eax, SYMBOL_TYPE, ebx);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);

context()->Plug(if_true, if_false);
}


void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
@@ -4275,6 +4252,10 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ test_b(FieldOperand(edx, Map::kBitFieldOffset),
1 << Map::kIsUndetectable);
Split(zero, if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->symbol_string())) {
__ JumpIfSmi(eax, if_false);
__ CmpObjectType(eax, SYMBOL_TYPE, edx);
Split(equal, if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->boolean_string())) {
__ cmp(eax, isolate()->factory()->true_value());
__ j(equal, if_true);
@@ -4306,10 +4287,6 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ cmp(eax, isolate()->factory()->null_value());
__ j(equal, if_true);
}
if (FLAG_harmony_symbols) {
__ CmpObjectType(eax, SYMBOL_TYPE, edx);
__ j(equal, if_true);
}
__ CmpObjectType(eax, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, edx);
__ j(below, if_false);
__ CmpInstanceType(edx, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
@@ -1740,13 +1740,6 @@ void LCodeGen::DoConstantT(LConstantT* instr) {
}


void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
Register result = ToRegister(instr->result());
Register array = ToRegister(instr->value());
__ mov(result, FieldOperand(array, JSArray::kLengthOffset));
}


void LCodeGen::DoFixedArrayBaseLength(
LFixedArrayBaseLength* instr) {
Register result = ToRegister(instr->result());
@@ -2116,6 +2109,12 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ bind(&not_string);
}

if (expected.Contains(ToBooleanStub::SYMBOL)) {
// Symbol value -> true.
__ CmpInstanceType(map, SYMBOL_TYPE);
__ j(equal, true_label);
}

if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
// heap number -> false iff +0, -0, or NaN.
Label not_heap_number;
@@ -5566,7 +5565,7 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
__ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
} else {
Register size = ToRegister(instr->size());
__ AllocateInNewSpace(size, result, temp, no_reg, deferred->entry(), flags);
__ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
}

__ bind(deferred->exit());
@@ -5979,6 +5978,11 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
1 << Map::kIsUndetectable);
final_branch_condition = zero;

} else if (type_name->Equals(heap()->symbol_string())) {
__ JumpIfSmi(input, false_label);
__ CmpObjectType(input, SYMBOL_TYPE, input);
final_branch_condition = equal;

} else if (type_name->Equals(heap()->boolean_string())) {
__ cmp(input, factory()->true_value());
__ j(equal, true_label);
@@ -6013,13 +6017,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
__ cmp(input, factory()->null_value());
__ j(equal, true_label);
}
if (FLAG_harmony_symbols) {
__ CmpObjectType(input, SYMBOL_TYPE, input);
__ j(equal, true_label);
__ CmpInstanceType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
} else {
__ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
}
__ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
__ j(below, false_label);
__ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
__ j(above, false_label);
@@ -923,6 +923,35 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
LInstruction* instr = current->CompileToLithium(this);

if (instr != NULL) {
#if DEBUG
// Make sure that the lithium instruction has either no fixed register
// constraints in temps or the result OR no uses that are only used at
// start. If this invariant doesn't hold, the register allocator can decide
// to insert a split of a range immediately before the instruction due to an
// already allocated register needing to be used for the instruction's fixed
// register constraint. In this case, The register allocator won't see an
// interference between the split child and the use-at-start (it would if
// the it was just a plain use), so it is free to move the split child into
// the same register that is used for the use-at-start.
// See https://code.google.com/p/chromium/issues/detail?id=201590
if (!(instr->ClobbersRegisters() && instr->ClobbersDoubleRegisters())) {
int fixed = 0;
int used_at_start = 0;
for (UseIterator it(instr); !it.Done(); it.Advance()) {
LUnallocated* operand = LUnallocated::cast(it.Current());
if (operand->IsUsedAtStart()) ++used_at_start;
}
if (instr->Output() != NULL) {
if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
}
for (TempIterator it(instr); !it.Done(); it.Advance()) {
LUnallocated* operand = LUnallocated::cast(it.Current());
if (operand->HasFixedPolicy()) ++fixed;
}
ASSERT(fixed == 0 || used_at_start == 0);
}
#endif

if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
instr = AssignPointerMap(instr);
}
@@ -1182,16 +1211,20 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
input);
return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
} else {
LOperand* input = UseRegisterAtStart(instr->value());
LOperand* context = UseAny(instr->context()); // Deferred use by MathAbs.
LOperand* input = NULL;
if (op == kMathPowHalf) {
input = UseRegisterAtStart(instr->value());
LOperand* temp = TempRegister();
LMathPowHalf* result = new(zone()) LMathPowHalf(context, input, temp);
return DefineSameAsFirst(result);
} else if (op == kMathRound) {
input = UseRegister(instr->value());
LOperand* temp = FixedTemp(xmm4);
LMathRound* result = new(zone()) LMathRound(context, input, temp);
return AssignEnvironment(DefineAsRegister(result));
} else {
input = UseRegisterAtStart(instr->value());
}
LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(context,
input);
@@ -1716,12 +1749,6 @@ LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
}


LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) {
LOperand* array = UseRegisterAtStart(instr->value());
return DefineAsRegister(new(zone()) LJSArrayLength(array));
}


LInstruction* LChunkBuilder::DoFixedArrayBaseLength(
HFixedArrayBaseLength* instr) {
LOperand* array = UseRegisterAtStart(instr->value());
@@ -114,7 +114,6 @@ class LCodeGen;
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
V(JSArrayLength) \
V(Label) \
V(LazyBailout) \
V(LoadContextSlot) \
@@ -1147,19 +1146,6 @@ class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 0> {
};


class LJSArrayLength: public LTemplateInstruction<1, 1, 0> {
public:
explicit LJSArrayLength(LOperand* value) {
inputs_[0] = value;
}

LOperand* value() { return inputs_[0]; }

DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js-array-length")
DECLARE_HYDROGEN_ACCESSOR(JSArrayLength)
};


class LFixedArrayBaseLength: public LTemplateInstruction<1, 1, 0> {
public:
explicit LFixedArrayBaseLength(LOperand* value) {
@@ -1332,18 +1332,16 @@ void MacroAssembler::Allocate(int object_size,
}


void MacroAssembler::AllocateInNewSpace(
int header_size,
ScaleFactor element_size,
Register element_count,
RegisterValueType element_count_type,
Register result,
Register result_end,
Register scratch,
Label* gc_required,
AllocationFlags flags) {
void MacroAssembler::Allocate(int header_size,
ScaleFactor element_size,
Register element_count,
RegisterValueType element_count_type,
Register result,
Register result_end,
Register scratch,
Label* gc_required,
AllocationFlags flags) {
ASSERT((flags & SIZE_IN_WORDS) == 0);
ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -1365,6 +1363,7 @@ void MacroAssembler::AllocateInNewSpace(
// Align the next allocation. Storing the filler map without checking top is
// always safe because the limit of the heap is always aligned.
if ((flags & DOUBLE_ALIGNMENT) != 0) {
ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
Label aligned;
test(result, Immediate(kDoubleAlignmentMask));
@@ -1375,9 +1374,9 @@ void MacroAssembler::AllocateInNewSpace(
bind(&aligned);
}

// Calculate new top and bail out if new space is exhausted.
ExternalReference new_space_allocation_limit =
ExternalReference::new_space_allocation_limit_address(isolate());
// Calculate new top and bail out if space is exhausted.
ExternalReference allocation_limit =
AllocationUtils::GetAllocationLimitReference(isolate(), flags);

// We assume that element_count*element_size + header_size does not
// overflow.
@@ -1394,7 +1393,7 @@ void MacroAssembler::AllocateInNewSpace(
lea(result_end, Operand(element_count, element_size, header_size));
add(result_end, result);
j(carry, gc_required);
cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
cmp(result_end, Operand::StaticVariable(allocation_limit));
j(above, gc_required);

if ((flags & TAG_OBJECT) != 0) {
@@ -1407,14 +1406,13 @@ void MacroAssembler::AllocateInNewSpace(
}


void MacroAssembler::AllocateInNewSpace(Register object_size,
Register result,
Register result_end,
Register scratch,
Label* gc_required,
AllocationFlags flags) {
void MacroAssembler::Allocate(Register object_size,
Register result,
Register result_end,
Register scratch,
Label* gc_required,
AllocationFlags flags) {
ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -1436,6 +1434,7 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
// Align the next allocation. Storing the filler map without checking top is
// always safe because the limit of the heap is always aligned.
if ((flags & DOUBLE_ALIGNMENT) != 0) {
ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
Label aligned;
test(result, Immediate(kDoubleAlignmentMask));
@@ -1446,15 +1445,16 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
bind(&aligned);
}

// Calculate new top and bail out if new space is exhausted.
ExternalReference new_space_allocation_limit =
ExternalReference::new_space_allocation_limit_address(isolate());
// Calculate new top and bail out if space is exhausted.
ExternalReference allocation_limit =
AllocationUtils::GetAllocationLimitReference(isolate(), flags);

if (!object_size.is(result_end)) {
mov(result_end, object_size);
}
add(result_end, result);
j(carry, gc_required);
cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
cmp(result_end, Operand::StaticVariable(allocation_limit));
j(above, gc_required);

// Tag result if requested.
@@ -1511,15 +1511,15 @@ void MacroAssembler::AllocateTwoByteString(Register result,
and_(scratch1, Immediate(~kObjectAlignmentMask));

// Allocate two byte string in new space.
AllocateInNewSpace(SeqTwoByteString::kHeaderSize,
times_1,
scratch1,
REGISTER_VALUE_IS_INT32,
result,
scratch2,
scratch3,
gc_required,
TAG_OBJECT);
Allocate(SeqTwoByteString::kHeaderSize,
times_1,
scratch1,
REGISTER_VALUE_IS_INT32,
result,
scratch2,
scratch3,
gc_required,
TAG_OBJECT);

// Set the map, length and hash field.
mov(FieldOperand(result, HeapObject::kMapOffset),
@@ -1547,15 +1547,15 @@ void MacroAssembler::AllocateAsciiString(Register result,
and_(scratch1, Immediate(~kObjectAlignmentMask));

// Allocate ASCII string in new space.
AllocateInNewSpace(SeqOneByteString::kHeaderSize,
times_1,
scratch1,
REGISTER_VALUE_IS_INT32,
result,
scratch2,
scratch3,
gc_required,
TAG_OBJECT);
Allocate(SeqOneByteString::kHeaderSize,
times_1,
scratch1,
REGISTER_VALUE_IS_INT32,
result,
scratch2,
scratch3,
gc_required,
TAG_OBJECT);

// Set the map, length and hash field.
mov(FieldOperand(result, HeapObject::kMapOffset),
@@ -582,22 +582,22 @@ class MacroAssembler: public Assembler {
Label* gc_required,
AllocationFlags flags);

void AllocateInNewSpace(int header_size,
ScaleFactor element_size,
Register element_count,
RegisterValueType element_count_type,
Register result,
Register result_end,
Register scratch,
Label* gc_required,
AllocationFlags flags);

void AllocateInNewSpace(Register object_size,
Register result,
Register result_end,
Register scratch,
Label* gc_required,
AllocationFlags flags);
void Allocate(int header_size,
ScaleFactor element_size,
Register element_count,
RegisterValueType element_count_type,
Register result,
Register result_end,
Register scratch,
Label* gc_required,
AllocationFlags flags);

void Allocate(Register object_size,
Register result,
Register result_end,
Register scratch,
Label* gc_required,
AllocationFlags flags);

// Undo allocation in new space. The object passed and objects allocated after
// it will no longer be allocated. Make sure that no pointers are left to the
@@ -730,7 +730,7 @@ void BaseStoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
// but may be destroyed if store is successful.
void StubCompiler::GenerateStoreField(MacroAssembler* masm,
Handle<JSObject> object,
int index,
LookupResult* lookup,
Handle<Map> transition,
Handle<Name> name,
Register receiver_reg,
@@ -740,16 +740,6 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
Register scratch2,
Label* miss_label,
Label* miss_restore_name) {
LookupResult lookup(masm->isolate());
object->Lookup(*name, &lookup);
if (lookup.IsFound() && (lookup.IsReadOnly() || !lookup.IsCacheable())) {
// In sloppy mode, we could just return the value and be done. However, we
// might be in strict mode, where we have to throw. Since we cannot tell,
// go into slow case unconditionally.
__ jmp(miss_label);
return;
}

// Check that the map of the object hasn't changed.
CompareMapMode mode = transition.is_null() ? ALLOW_ELEMENT_TRANSITION_MAPS
: REQUIRE_EXACT_MAP;
@@ -764,8 +754,9 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Check that we are allowed to write this.
if (!transition.is_null() && object->GetPrototype()->IsJSObject()) {
JSObject* holder;
if (lookup.IsFound()) {
holder = lookup.holder();
// holder == object indicates that no property was found.
if (lookup->holder() != *object) {
holder = lookup->holder();
} else {
// Find the top object.
holder = *object;
@@ -774,8 +765,19 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
} while (holder->GetPrototype()->IsJSObject());
}
// We need an extra register, push
CheckPrototypes(object, receiver_reg, Handle<JSObject>(holder), name_reg,
scratch1, scratch2, name, miss_restore_name);
Register holder_reg = CheckPrototypes(
object, receiver_reg, Handle<JSObject>(holder), name_reg,
scratch1, scratch2, name, miss_restore_name);
// If no property was found, and the holder (the last object in the
// prototype chain) is in slow mode, we need to do a negative lookup on the
// holder.
if (lookup->holder() == *object &&
!holder->HasFastProperties() &&
!holder->IsJSGlobalProxy() &&
!holder->IsJSGlobalObject()) {
GenerateDictionaryNegativeLookup(
masm, miss_restore_name, holder_reg, name, scratch1, scratch2);
}
}

// Stub never generated for non-global objects that require access
@@ -799,6 +801,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
return;
}

int index;
if (!transition.is_null()) {
// Update the map of the object.
__ mov(scratch1, Immediate(transition));
@@ -813,8 +816,13 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
index = transition->instance_descriptors()->GetFieldIndex(
transition->LastAdded());
} else {
index = lookup->GetFieldIndex().field_index();
}


// Adjust for the number of properties stored in the object. Even in the
// face of a transition we can use the old map here because the size of the
// object and the number of in-object properties is not going to change.
@@ -2350,6 +2358,12 @@ void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object,
// Check that the object is a symbol.
__ CmpObjectType(edx, SYMBOL_TYPE, eax);
__ j(not_equal, &miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::SYMBOL_FUNCTION_INDEX, eax, &miss);
CheckPrototypes(
Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
eax, holder, ebx, edx, edi, name, &miss);
break;

case NUMBER_CHECK: {
@@ -109,7 +109,7 @@ InlineCacheHolderFlag IC::GetCodeCacheForObject(Object* object,
// If the object is a value, we use the prototype map for the cache.
ASSERT(object->IsString() || object->IsSymbol() ||
object->IsNumber() || object->IsBoolean());
return DELEGATE_MAP;
return PROTOTYPE_MAP;
}


@@ -124,7 +124,7 @@ InlineCacheHolderFlag IC::GetCodeCacheForObject(JSObject* object,
!object->HasFastProperties() &&
!object->IsJSGlobalProxy() &&
!object->IsJSGlobalObject()) {
return DELEGATE_MAP;
return PROTOTYPE_MAP;
}
return OWN_MAP;
}
@@ -133,7 +133,8 @@ InlineCacheHolderFlag IC::GetCodeCacheForObject(JSObject* object,
JSObject* IC::GetCodeCacheHolder(Isolate* isolate,
Object* object,
InlineCacheHolderFlag holder) {
Object* map_owner = holder == OWN_MAP ? object : object->GetDelegate(isolate);
Object* map_owner =
holder == OWN_MAP ? object : object->GetPrototype(isolate);
ASSERT(map_owner->IsJSObject());
return JSObject::cast(map_owner);
}