@@ -368,20 +368,6 @@ void Execution::RunMicrotasks(Isolate* isolate) {
}


void Execution::EnqueueMicrotask(Isolate* isolate, Handle<Object> microtask) {
bool threw = false;
Handle<Object> args[] = { microtask };
Execution::Call(
isolate,
isolate->enqueue_external_microtask(),
isolate->factory()->undefined_value(),
1,
args,
&threw);
ASSERT(!threw);
}


bool StackGuard::IsStackOverflow() {
ExecutionAccess access(isolate_);
return (thread_local_.jslimit_ != kInterruptLimit &&
@@ -516,15 +502,15 @@ void StackGuard::FullDeopt() {
}


bool StackGuard::IsDeoptMarkedAllocationSites() {
bool StackGuard::IsDeoptMarkedCode() {
ExecutionAccess access(isolate_);
return (thread_local_.interrupt_flags_ & DEOPT_MARKED_ALLOCATION_SITES) != 0;
return (thread_local_.interrupt_flags_ & DEOPT_MARKED_CODE) != 0;
}


void StackGuard::DeoptMarkedAllocationSites() {
void StackGuard::DeoptMarkedCode() {
ExecutionAccess access(isolate_);
thread_local_.interrupt_flags_ |= DEOPT_MARKED_ALLOCATION_SITES;
thread_local_.interrupt_flags_ |= DEOPT_MARKED_CODE;
set_interrupt_limits(access);
}

@@ -1040,9 +1026,9 @@ MaybeObject* Execution::HandleStackGuardInterrupt(Isolate* isolate) {
stack_guard->Continue(FULL_DEOPT);
Deoptimizer::DeoptimizeAll(isolate);
}
if (stack_guard->IsDeoptMarkedAllocationSites()) {
stack_guard->Continue(DEOPT_MARKED_ALLOCATION_SITES);
isolate->heap()->DeoptMarkedAllocationSites();
if (stack_guard->IsDeoptMarkedCode()) {
stack_guard->Continue(DEOPT_MARKED_CODE);
Deoptimizer::DeoptimizeMarkedCode(isolate);
}
if (stack_guard->IsInstallCodeRequest()) {
ASSERT(isolate->concurrent_recompilation_enabled());
@@ -45,7 +45,7 @@ enum InterruptFlag {
FULL_DEOPT = 1 << 6,
INSTALL_CODE = 1 << 7,
API_INTERRUPT = 1 << 8,
DEOPT_MARKED_ALLOCATION_SITES = 1 << 9
DEOPT_MARKED_CODE = 1 << 9
};


@@ -175,7 +175,6 @@ class Execution : public AllStatic {
bool* has_pending_exception);

static void RunMicrotasks(Isolate* isolate);
static void EnqueueMicrotask(Isolate* isolate, Handle<Object> microtask);
};


@@ -223,8 +222,8 @@ class StackGuard {
void RequestInstallCode();
bool IsFullDeopt();
void FullDeopt();
bool IsDeoptMarkedAllocationSites();
void DeoptMarkedAllocationSites();
bool IsDeoptMarkedCode();
void DeoptMarkedCode();
void Continue(InterruptFlag after_what);

void RequestInterrupt(InterruptCallback callback, void* data);
@@ -282,7 +281,7 @@ class StackGuard {
void EnableInterrupts();
void DisableInterrupts();

#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_A64
#if V8_TARGET_ARCH_X64
static const uintptr_t kInterruptLimit = V8_UINT64_C(0xfffffffffffffffe);
static const uintptr_t kIllegalLimit = V8_UINT64_C(0xfffffffffffffff8);
#else
@@ -1300,6 +1300,12 @@ Handle<Code> Factory::CopyCode(Handle<Code> code, Vector<byte> reloc_info) {
}


Handle<String> Factory::InternalizedStringFromString(Handle<String> value) {
CALL_HEAP_FUNCTION(isolate(),
isolate()->heap()->InternalizeString(*value), String);
}


Handle<JSObject> Factory::NewJSObject(Handle<JSFunction> constructor,
PretenureFlag pretenure) {
JSFunction::EnsureHasInitialMap(constructor);
@@ -1566,13 +1572,15 @@ Handle<JSMessageObject> Factory::NewJSMessageObject(
int start_position,
int end_position,
Handle<Object> script,
Handle<Object> stack_trace,
Handle<Object> stack_frames) {
CALL_HEAP_FUNCTION(isolate(),
isolate()->heap()->AllocateJSMessageObject(*type,
*arguments,
start_position,
end_position,
*script,
*stack_trace,
*stack_frames),
JSMessageObject);
}
@@ -225,6 +225,9 @@ class Factory {
Handle<Context> previous,
Handle<ScopeInfo> scope_info);

// Return the internalized version of the passed in string.
Handle<String> InternalizedStringFromString(Handle<String> value);

// Allocate a new struct. The struct is pretenured (allocated directly in
// the old generation).
Handle<Struct> NewStruct(InstanceType type);
@@ -525,6 +528,7 @@ class Factory {
int start_position,
int end_position,
Handle<Object> script,
Handle<Object> stack_trace,
Handle<Object> stack_frames);

Handle<SeededNumberDictionary> DictionaryAtNumberPut(

This file was deleted.

@@ -234,6 +234,7 @@ DEFINE_implication(track_double_fields, track_fields)
DEFINE_implication(track_heap_object_fields, track_fields)
DEFINE_implication(track_computed_fields, track_fields)
DEFINE_bool(smi_binop, true, "support smi representation in binary operations")
DEFINE_bool(smi_x64_store_opt, false, "optimized stores of smi on x64")

// Flags for optimization types.
DEFINE_bool(optimize_for_size, false,
@@ -254,9 +255,6 @@ DEFINE_bool(use_canonicalizing, true, "use hydrogen instruction canonicalizing")
DEFINE_bool(use_inlining, true, "use function inlining")
DEFINE_bool(use_escape_analysis, true, "use hydrogen escape analysis")
DEFINE_bool(use_allocation_folding, true, "use allocation folding")
DEFINE_bool(use_local_allocation_folding, false, "only fold in basic blocks")
DEFINE_bool(use_write_barrier_elimination, true,
"eliminate write barriers targeting allocations in optimized code")
DEFINE_int(max_inlining_levels, 5, "maximum number of inlining levels")
DEFINE_int(max_inlined_source_size, 600,
"maximum source size in bytes considered for a single inlining")
@@ -418,6 +416,10 @@ DEFINE_bool(disable_native_files, false, "disable builtin natives files")
// builtins-ia32.cc
DEFINE_bool(inline_new, true, "use fast inline allocation")

// checks.cc
DEFINE_bool(stack_trace_on_abort, true,
"print a stack trace if an assertion failure occurs")

// codegen-ia32.cc / codegen-arm.cc
DEFINE_bool(trace_codegen, false,
"print name of functions for which code is generated")
@@ -533,7 +535,6 @@ DEFINE_bool(parallel_sweeping, true, "enable parallel sweeping")
DEFINE_bool(concurrent_sweeping, false, "enable concurrent sweeping")
DEFINE_int(sweeper_threads, 0,
"number of parallel and concurrent sweeping threads")
DEFINE_bool(job_based_sweeping, false, "enable job based sweeping")
#ifdef VERIFY_HEAP
DEFINE_bool(verify_heap, false, "verify heap pointers before and after GC")
#endif
@@ -581,35 +582,19 @@ DEFINE_bool(use_verbose_printer, true, "allows verbose printing")
DEFINE_bool(allow_natives_syntax, false, "allow natives syntax")
DEFINE_bool(trace_parse, false, "trace parsing and preparsing")

// simulator-arm.cc, simulator-a64.cc and simulator-mips.cc
// simulator-arm.cc and simulator-mips.cc
DEFINE_bool(trace_sim, false, "Trace simulator execution")
DEFINE_bool(check_icache, false,
"Check icache flushes in ARM and MIPS simulator")
DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions")
#ifdef V8_TARGET_ARCH_A64
DEFINE_int(sim_stack_alignment, 16,
"Stack alignment in bytes in simulator. This must be a power of two "
"and it must be at least 16. 16 is default.")
#else
DEFINE_int(sim_stack_alignment, 8,
"Stack alingment in bytes in simulator (4 or 8, 8 is default)")
#endif
DEFINE_int(sim_stack_size, 2 * MB / KB,
"Stack size of the A64 simulator in kBytes (default is 2 MB)")
DEFINE_bool(log_regs_modified, true,
"When logging register values, only print modified registers.")
DEFINE_bool(log_colour, true,
"When logging, try to use coloured output.")
DEFINE_bool(ignore_asm_unimplemented_break, false,
"Don't break for ASM_UNIMPLEMENTED_BREAK macros.")
DEFINE_bool(trace_sim_messages, false,
"Trace simulator debug messages. Implied by --trace-sim.")

// isolate.cc
DEFINE_bool(stack_trace_on_illegal, false,
"print stack trace when an illegal exception is thrown")
DEFINE_bool(abort_on_uncaught_exception, false,
"abort program (dump core) when an uncaught exception is thrown")
DEFINE_bool(trace_exception, false,
"print stack trace when throwing exceptions")
DEFINE_bool(randomize_hashes, true,
"randomize hashes to avoid predictable hash collisions "
"(with snapshots this option cannot override the baked-in seed)")
@@ -814,21 +799,13 @@ DEFINE_bool(log_timer_events, false,
"Time events including external callbacks.")
DEFINE_implication(log_timer_events, log_internal_timer_events)
DEFINE_implication(log_internal_timer_events, prof)
DEFINE_bool(log_instruction_stats, false, "Log AArch64 instruction statistics.")
DEFINE_string(log_instruction_file, "a64_inst.csv",
"AArch64 instruction statistics log file.")
DEFINE_int(log_instruction_period, 1 << 22,
"AArch64 instruction statistics logging period.")

DEFINE_bool(redirect_code_traces, false,
"output deopt information and disassembly into file "
"code-<pid>-<isolate id>.asm")
DEFINE_string(redirect_code_traces_to, NULL,
"output deopt information and disassembly into the given file")

DEFINE_bool(hydrogen_track_positions, false,
"track source code positions when building IR")

//
// Disassembler only flags
//
@@ -861,6 +838,8 @@ DEFINE_bool(print_unopt_code, false, "print unoptimized code before "
"printing optimized code based on it")
DEFINE_bool(print_code_verbose, false, "print more information for code")
DEFINE_bool(print_builtin_code, false, "print generated code for builtins")
DEFINE_bool(emit_opt_code_positions, false,
"annotate optimize code with source code positions")

#ifdef ENABLE_DISASSEMBLER
DEFINE_bool(sodium, false, "print generated code output suitable for use with "
@@ -869,7 +848,7 @@ DEFINE_bool(sodium, false, "print generated code output suitable for use with "
DEFINE_implication(sodium, print_code_stubs)
DEFINE_implication(sodium, print_code)
DEFINE_implication(sodium, print_opt_code)
DEFINE_implication(sodium, hydrogen_track_positions)
DEFINE_implication(sodium, emit_opt_code_positions)
DEFINE_implication(sodium, code_comments)

DEFINE_bool(print_all_code, false, "enable all flags related to printing code")
@@ -36,8 +36,6 @@
#include "ia32/frames-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/frames-x64.h"
#elif V8_TARGET_ARCH_A64
#include "a64/frames-a64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/frames-arm.h"
#elif V8_TARGET_ARCH_MIPS
@@ -35,11 +35,7 @@
namespace v8 {
namespace internal {

#if V8_TARGET_ARCH_A64
typedef uint64_t RegList;
#else
typedef uint32_t RegList;
#endif

// Get the number of registers in a given register list.
int NumRegs(RegList list);
@@ -345,6 +345,7 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
info->function()->scope()->AllowsLazyCompilation());
cgen.PopulateDeoptimizationData(code);
cgen.PopulateTypeFeedbackInfo(code);
cgen.PopulateTypeFeedbackCells(code);
code->set_has_deoptimization_support(info->HasDeoptimizationSupport());
code->set_handler_table(*cgen.handler_table());
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -386,15 +387,6 @@ unsigned FullCodeGenerator::EmitBackEdgeTable() {
}


void FullCodeGenerator::InitializeFeedbackVector() {
int length = info_->function()->slot_count();
ASSERT_EQ(isolate()->heap()->the_hole_value(),
*TypeFeedbackInfo::UninitializedSentinel(isolate()));
feedback_vector_ = isolate()->factory()->NewFixedArrayWithHoles(length,
TENURED);
}


void FullCodeGenerator::PopulateDeoptimizationData(Handle<Code> code) {
// Fill in the deoptimization information.
ASSERT(info_->HasDeoptimizationSupport() || bailout_entries_.is_empty());
@@ -413,7 +405,6 @@ void FullCodeGenerator::PopulateDeoptimizationData(Handle<Code> code) {
void FullCodeGenerator::PopulateTypeFeedbackInfo(Handle<Code> code) {
Handle<TypeFeedbackInfo> info = isolate()->factory()->NewTypeFeedbackInfo();
info->set_ic_total_count(ic_total_count_);
info->set_feedback_vector(*FeedbackVector());
ASSERT(!isolate()->heap()->InNewSpace(*info));
code->set_type_feedback_info(*info);
}
@@ -434,6 +425,21 @@ void FullCodeGenerator::Initialize() {
}


void FullCodeGenerator::PopulateTypeFeedbackCells(Handle<Code> code) {
if (type_feedback_cells_.is_empty()) return;
int length = type_feedback_cells_.length();
int array_size = TypeFeedbackCells::LengthOfFixedArray(length);
Handle<TypeFeedbackCells> cache = Handle<TypeFeedbackCells>::cast(
isolate()->factory()->NewFixedArray(array_size, TENURED));
for (int i = 0; i < length; i++) {
cache->SetAstId(i, type_feedback_cells_[i].ast_id);
cache->SetCell(i, *type_feedback_cells_[i].cell);
}
TypeFeedbackInfo::cast(code->type_feedback_info())->set_type_feedback_cells(
*cache);
}


void FullCodeGenerator::PrepareForBailout(Expression* node, State state) {
PrepareForBailoutForId(node->id(), state);
}
@@ -443,13 +449,13 @@ void FullCodeGenerator::CallLoadIC(ContextualMode contextual_mode,
TypeFeedbackId id) {
ExtraICState extra_state = LoadIC::ComputeExtraICState(contextual_mode);
Handle<Code> ic = LoadIC::initialize_stub(isolate(), extra_state);
CallIC(ic, id);
CallIC(ic, contextual_mode, id);
}


void FullCodeGenerator::CallStoreIC(TypeFeedbackId id) {
void FullCodeGenerator::CallStoreIC(ContextualMode mode, TypeFeedbackId id) {
Handle<Code> ic = StoreIC::initialize_stub(isolate(), strict_mode());
CallIC(ic, id);
CallIC(ic, mode, id);
}


@@ -484,6 +490,13 @@ void FullCodeGenerator::PrepareForBailoutForId(BailoutId id, State state) {
}


void FullCodeGenerator::RecordTypeFeedbackCell(
TypeFeedbackId id, Handle<Cell> cell) {
TypeFeedbackCellEntry entry = { id, cell };
type_feedback_cells_.Add(entry, zone());
}


void FullCodeGenerator::RecordBackEdge(BailoutId ast_id) {
// The pc offset does not need to be encoded and packed together with a state.
ASSERT(masm_->pc_offset() > 0);
@@ -96,6 +96,9 @@ class FullCodeGenerator: public AstVisitor {
? info->function()->ast_node_count() : 0,
info->zone()),
back_edges_(2, info->zone()),
type_feedback_cells_(info->HasDeoptimizationSupport()
? info->function()->ast_node_count() : 0,
info->zone()),
ic_total_count_(0) {
Initialize();
}
@@ -127,9 +130,6 @@ class FullCodeGenerator: public AstVisitor {
static const int kCodeSizeMultiplier = 162;
#elif V8_TARGET_ARCH_ARM
static const int kCodeSizeMultiplier = 142;
#elif V8_TARGET_ARCH_A64
// TODO(all): Copied ARM value. Check this is sensible for A64.
static const int kCodeSizeMultiplier = 142;
#elif V8_TARGET_ARCH_MIPS
static const int kCodeSizeMultiplier = 142;
#else
@@ -434,15 +434,9 @@ class FullCodeGenerator: public AstVisitor {
void PrepareForBailout(Expression* node, State state);
void PrepareForBailoutForId(BailoutId id, State state);

// Feedback slot support. The feedback vector will be cleared during gc and
// collected by the type-feedback oracle.
Handle<FixedArray> FeedbackVector() {
return feedback_vector_;
}
void StoreFeedbackVectorSlot(int slot, Handle<Object> object) {
feedback_vector_->set(slot, *object);
}
void InitializeFeedbackVector();
// Cache cell support. This associates AST ids with global property cells
// that will be cleared during GC and collected by the type-feedback oracle.
void RecordTypeFeedbackCell(TypeFeedbackId id, Handle<Cell> cell);

// Record a call's return site offset, used to rebuild the frame if the
// called function was inlined at the site.
@@ -558,11 +552,6 @@ class FullCodeGenerator: public AstVisitor {
void EmitVariableAssignment(Variable* var,
Token::Value op);

// Helper functions to EmitVariableAssignment
void EmitStoreToStackLocalOrContextSlot(Variable* var,
MemOperand location);
void EmitCallStoreContextSlot(Handle<String> name, LanguageMode mode);

// Complete a named property assignment. The receiver is expected on top
// of the stack and the right-hand-side value in the accumulator.
void EmitNamedPropertyAssignment(Assignment* expr);
@@ -573,11 +562,13 @@ class FullCodeGenerator: public AstVisitor {
void EmitKeyedPropertyAssignment(Assignment* expr);

void CallIC(Handle<Code> code,
ContextualMode mode = NOT_CONTEXTUAL,
TypeFeedbackId id = TypeFeedbackId::None());

void CallLoadIC(ContextualMode mode,
TypeFeedbackId id = TypeFeedbackId::None());
void CallStoreIC(TypeFeedbackId id = TypeFeedbackId::None());
void CallStoreIC(ContextualMode mode,
TypeFeedbackId id = TypeFeedbackId::None());

void SetFunctionPosition(FunctionLiteral* fun);
void SetReturnPosition(FunctionLiteral* fun);
@@ -644,6 +635,7 @@ class FullCodeGenerator: public AstVisitor {
void Generate();
void PopulateDeoptimizationData(Handle<Code> code);
void PopulateTypeFeedbackInfo(Handle<Code> code);
void PopulateTypeFeedbackCells(Handle<Code> code);

Handle<FixedArray> handler_table() { return handler_table_; }

@@ -658,6 +650,12 @@ class FullCodeGenerator: public AstVisitor {
uint32_t loop_depth;
};

struct TypeFeedbackCellEntry {
TypeFeedbackId ast_id;
Handle<Cell> cell;
};


class ExpressionContext BASE_EMBEDDED {
public:
explicit ExpressionContext(FullCodeGenerator* codegen)
@@ -847,9 +845,9 @@ class FullCodeGenerator: public AstVisitor {
ZoneList<BailoutEntry> bailout_entries_;
GrowableBitVector prepared_bailout_ids_;
ZoneList<BackEdgeEntry> back_edges_;
ZoneList<TypeFeedbackCellEntry> type_feedback_cells_;
int ic_total_count_;
Handle<FixedArray> handler_table_;
Handle<FixedArray> feedback_vector_;
Handle<Cell> profiling_counter_;
bool generate_debug_code_;

@@ -71,18 +71,14 @@ namespace internal {
#define V8_HOST_ARCH_IA32 1
#define V8_HOST_ARCH_32_BIT 1
#define V8_HOST_CAN_READ_UNALIGNED 1
#elif defined(__AARCH64EL__)
#define V8_HOST_ARCH_A64 1
#define V8_HOST_ARCH_64_BIT 1
#define V8_HOST_CAN_READ_UNALIGNED 1
#elif defined(__ARMEL__)
#define V8_HOST_ARCH_ARM 1
#define V8_HOST_ARCH_32_BIT 1
#elif defined(__MIPSEL__)
#define V8_HOST_ARCH_MIPS 1
#define V8_HOST_ARCH_32_BIT 1
#else
#error "Host architecture was not detected as supported by v8"
#error Host architecture was not detected as supported by v8
#endif

#if defined(__ARM_ARCH_7A__) || \
@@ -99,13 +95,11 @@ namespace internal {
// in the same way as the host architecture, that is, target the native
// environment as presented by the compiler.
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && \
!V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_A64 && !V8_TARGET_ARCH_MIPS
!V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS
#if defined(_M_X64) || defined(__x86_64__)
#define V8_TARGET_ARCH_X64 1
#elif defined(_M_IX86) || defined(__i386__)
#define V8_TARGET_ARCH_IA32 1
#elif defined(__AARCH64EL__)
#define V8_TARGET_ARCH_A64 1
#elif defined(__ARMEL__)
#define V8_TARGET_ARCH_ARM 1
#elif defined(__MIPSEL__)
@@ -125,9 +119,6 @@ namespace internal {
#if (V8_TARGET_ARCH_ARM && !(V8_HOST_ARCH_IA32 || V8_HOST_ARCH_ARM))
#error Target architecture arm is only supported on arm and ia32 host
#endif
#if (V8_TARGET_ARCH_A64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_A64))
#error Target architecture a64 is only supported on a64 and x64 host
#endif
#if (V8_TARGET_ARCH_MIPS && !(V8_HOST_ARCH_IA32 || V8_HOST_ARCH_MIPS))
#error Target architecture mips is only supported on mips and ia32 host
#endif
@@ -136,9 +127,6 @@ namespace internal {
// Setting USE_SIMULATOR explicitly from the build script will force
// the use of a simulated environment.
#if !defined(USE_SIMULATOR)
#if (V8_TARGET_ARCH_A64 && !V8_HOST_ARCH_A64)
#define USE_SIMULATOR 1
#endif
#if (V8_TARGET_ARCH_ARM && !V8_HOST_ARCH_ARM)
#define USE_SIMULATOR 1
#endif
@@ -154,8 +142,6 @@ namespace internal {
#define V8_TARGET_LITTLE_ENDIAN 1
#elif V8_TARGET_ARCH_ARM
#define V8_TARGET_LITTLE_ENDIAN 1
#elif V8_TARGET_ARCH_A64
#define V8_TARGET_LITTLE_ENDIAN 1
#elif V8_TARGET_ARCH_MIPS
#define V8_TARGET_LITTLE_ENDIAN 1
#else
@@ -59,7 +59,8 @@ function MathSinh(x) {
// ES6 draft 09-27-13, section 20.2.2.12.
function MathCosh(x) {
if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
if (!NUMBER_IS_FINITE(x)) return MathAbs(x);
// Idempotent for NaN and +/-Infinity.
if (!NUMBER_IS_FINITE(x)) return x;
return (MathExp(x) + MathExp(-x)) / 2;
}

@@ -109,19 +110,19 @@ function MathAtanh(x) {
}


// ES6 draft 09-27-13, section 20.2.2.21.
//ES6 draft 09-27-13, section 20.2.2.21.
function MathLog10(x) {
return MathLog(x) * 0.434294481903251828; // log10(x) = log(x)/log(10).
}


// ES6 draft 09-27-13, section 20.2.2.22.
//ES6 draft 09-27-13, section 20.2.2.22.
function MathLog2(x) {
return MathLog(x) * 1.442695040888963407; // log2(x) = log(x)/log(2).
}


// ES6 draft 09-27-13, section 20.2.2.17.
//ES6 draft 09-27-13, section 20.2.2.17.
function MathHypot(x, y) { // Function length is 2.
// We may want to introduce fast paths for two arguments and when
// normalization to avoid overflow is not necessary. For now, we
@@ -154,26 +155,6 @@ function MathHypot(x, y) { // Function length is 2.
}


// ES6 draft 09-27-13, section 20.2.2.16.
function MathFround(x) {
return %Math_fround(TO_NUMBER_INLINE(x));
}


function MathClz32(x) {
x = ToUint32(TO_NUMBER_INLINE(x));
if (x == 0) return 32;
var result = 0;
// Binary search.
if ((x & 0xFFFF0000) === 0) { x <<= 16; result += 16; };
if ((x & 0xFF000000) === 0) { x <<= 8; result += 8; };
if ((x & 0xF0000000) === 0) { x <<= 4; result += 4; };
if ((x & 0xC0000000) === 0) { x <<= 2; result += 2; };
if ((x & 0x80000000) === 0) { x <<= 1; result += 1; };
return result;
}


function ExtendMath() {
%CheckIsBootstrapping();

@@ -189,11 +170,8 @@ function ExtendMath() {
"atanh", MathAtanh,
"log10", MathLog10,
"log2", MathLog2,
"hypot", MathHypot,
"fround", MathFround,
"clz32", MathClz32
"hypot", MathHypot
));
}


ExtendMath();
@@ -809,21 +809,6 @@ NoWeakObjectVerificationScope::~NoWeakObjectVerificationScope() {
#endif


GCCallbacksScope::GCCallbacksScope(Heap* heap) : heap_(heap) {
heap_->gc_callbacks_depth_++;
}


GCCallbacksScope::~GCCallbacksScope() {
heap_->gc_callbacks_depth_--;
}


bool GCCallbacksScope::CheckReenter() {
return heap_->gc_callbacks_depth_ == 1;
}


void VerifyPointersVisitor::VisitPointers(Object** start, Object** end) {
for (Object** current = start; current < end; current++) {
if ((*current)->IsHeapObject()) {
@@ -835,13 +820,6 @@ void VerifyPointersVisitor::VisitPointers(Object** start, Object** end) {
}


void VerifySmisVisitor::VisitPointers(Object** start, Object** end) {
for (Object** current = start; current < end; current++) {
CHECK((*current)->IsSmi());
}
}


double GCTracer::SizeOfHeapObjects() {
return (static_cast<double>(heap_->SizeOfObjects())) / MB;
}
@@ -34,7 +34,6 @@
#include "heap-profiler.h"
#include "debug.h"
#include "types.h"
#include "v8conversions.h"

namespace v8 {
namespace internal {
@@ -73,7 +72,7 @@ HeapEntry::HeapEntry(HeapSnapshot* snapshot,
Type type,
const char* name,
SnapshotObjectId id,
size_t self_size)
int self_size)
: type_(type),
children_count_(0),
children_index_(-1),
@@ -104,7 +103,7 @@ void HeapEntry::SetIndexedReference(HeapGraphEdge::Type type,
void HeapEntry::Print(
const char* prefix, const char* edge_name, int max_depth, int indent) {
STATIC_CHECK(sizeof(unsigned) == sizeof(id()));
OS::Print("%6" V8PRIuPTR " @%6u %*c %s%s: ",
OS::Print("%6d @%6u %*c %s%s: ",
self_size(), id(), indent, ' ', prefix, edge_name);
if (type() != kString) {
OS::Print("%s %.40s\n", TypeAsString(), name_);
@@ -194,7 +193,7 @@ template <> struct SnapshotSizeConstants<4> {

template <> struct SnapshotSizeConstants<8> {
static const int kExpectedHeapGraphEdgeSize = 24;
static const int kExpectedHeapEntrySize = 40;
static const int kExpectedHeapEntrySize = 32;
};

} // namespace
@@ -277,7 +276,7 @@ HeapEntry* HeapSnapshot::AddGcSubrootEntry(int tag) {
HeapEntry* HeapSnapshot::AddEntry(HeapEntry::Type type,
const char* name,
SnapshotObjectId id,
size_t size) {
int size) {
HeapEntry entry(this, type, name, id, size);
entries_.Add(entry);
return &entries_.last();
@@ -900,17 +899,10 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object) {
HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object,
HeapEntry::Type type,
const char* name) {
return AddEntry(object->address(), type, name, object->Size());
}


HeapEntry* V8HeapExplorer::AddEntry(Address address,
HeapEntry::Type type,
const char* name,
size_t size) {
SnapshotObjectId object_id = heap_object_map_->FindOrAddEntry(
address, static_cast<unsigned int>(size));
return snapshot_->AddEntry(type, name, object_id, size);
int object_size = object->Size();
SnapshotObjectId object_id =
heap_object_map_->FindOrAddEntry(object->address(), object_size);
return snapshot_->AddEntry(type, name, object_id, object_size);
}


@@ -1037,8 +1029,6 @@ void V8HeapExplorer::ExtractReferences(HeapObject* obj) {

if (obj->IsJSGlobalProxy()) {
ExtractJSGlobalProxyReferences(entry, JSGlobalProxy::cast(obj));
} else if (obj->IsJSArrayBuffer()) {
ExtractJSArrayBufferReferences(entry, JSArrayBuffer::cast(obj));
} else if (obj->IsJSObject()) {
ExtractJSObjectReferences(entry, JSObject::cast(obj));
} else if (obj->IsString()) {
@@ -1157,6 +1147,13 @@ void V8HeapExplorer::ExtractJSObjectReferences(
JSArrayBufferView::kBufferOffset);
SetWeakReference(view, entry, "weak_next", view->weak_next(),
JSArrayBufferView::kWeakNextOffset);
} else if (obj->IsJSArrayBuffer()) {
JSArrayBuffer* buffer = JSArrayBuffer::cast(obj);
SetWeakReference(buffer, entry, "weak_next", buffer->weak_next(),
JSArrayBuffer::kWeakNextOffset);
SetWeakReference(buffer, entry,
"weak_first_view", buffer->weak_first_view(),
JSArrayBuffer::kWeakFirstViewOffset);
}
TagObject(js_obj->properties(), "(object properties)");
SetInternalReference(obj, entry,
@@ -1457,42 +1454,6 @@ void V8HeapExplorer::ExtractAllocationSiteReferences(int entry,
}


class JSArrayBufferDataEntryAllocator : public HeapEntriesAllocator {
public:
JSArrayBufferDataEntryAllocator(size_t size, V8HeapExplorer* explorer)
: size_(size)
, explorer_(explorer) {
}
virtual HeapEntry* AllocateEntry(HeapThing ptr) {
return explorer_->AddEntry(
static_cast<Address>(ptr),
HeapEntry::kNative, "system / JSArrayBufferData", size_);
}
private:
size_t size_;
V8HeapExplorer* explorer_;
};


void V8HeapExplorer::ExtractJSArrayBufferReferences(
int entry, JSArrayBuffer* buffer) {
SetWeakReference(buffer, entry, "weak_next", buffer->weak_next(),
JSArrayBuffer::kWeakNextOffset);
SetWeakReference(buffer, entry,
"weak_first_view", buffer->weak_first_view(),
JSArrayBuffer::kWeakFirstViewOffset);
// Setup a reference to a native memory backing_store object.
if (!buffer->backing_store())
return;
size_t data_size = NumberToSize(heap_->isolate(), buffer->byte_length());
JSArrayBufferDataEntryAllocator allocator(data_size, this);
HeapEntry* data_entry =
filler_->FindOrAddEntry(buffer->backing_store(), &allocator);
filler_->SetNamedReference(HeapGraphEdge::kInternal,
entry, "backing_store", data_entry);
}


void V8HeapExplorer::ExtractClosureReferences(JSObject* js_obj, int entry) {
if (!js_obj->IsJSFunction()) return;

@@ -2702,49 +2663,24 @@ int HeapSnapshotJSONSerializer::GetStringId(const char* s) {
}


namespace {

template<size_t size> struct ToUnsigned;

template<> struct ToUnsigned<4> {
typedef uint32_t Type;
};

template<> struct ToUnsigned<8> {
typedef uint64_t Type;
};

} // namespace


template<typename T>
static int utoa_impl(T value, const Vector<char>& buffer, int buffer_pos) {
STATIC_CHECK(static_cast<T>(-1) > 0); // Check that T is unsigned
static int utoa(unsigned value, const Vector<char>& buffer, int buffer_pos) {
int number_of_digits = 0;
T t = value;
unsigned t = value;
do {
++number_of_digits;
} while (t /= 10);

buffer_pos += number_of_digits;
int result = buffer_pos;
do {
int last_digit = static_cast<int>(value % 10);
int last_digit = value % 10;
buffer[--buffer_pos] = '0' + last_digit;
value /= 10;
} while (value);
return result;
}


template<typename T>
static int utoa(T value, const Vector<char>& buffer, int buffer_pos) {
typename ToUnsigned<sizeof(value)>::Type unsigned_value = value;
STATIC_CHECK(sizeof(value) == sizeof(unsigned_value));
return utoa_impl(unsigned_value, buffer, buffer_pos);
}


void HeapSnapshotJSONSerializer::SerializeEdge(HeapGraphEdge* edge,
bool first_edge) {
// The buffer needs space for 3 unsigned ints, 3 commas, \n and \0
@@ -2781,10 +2717,9 @@ void HeapSnapshotJSONSerializer::SerializeEdges() {


void HeapSnapshotJSONSerializer::SerializeNode(HeapEntry* entry) {
// The buffer needs space for 4 unsigned ints, 1 size_t, 5 commas, \n and \0
// The buffer needs space for 5 unsigned ints, 5 commas, \n and \0
static const int kBufferSize =
4 * MaxDecimalDigitsIn<sizeof(unsigned)>::kUnsigned // NOLINT
+ MaxDecimalDigitsIn<sizeof(size_t)>::kUnsigned // NOLINT
5 * MaxDecimalDigitsIn<sizeof(unsigned)>::kUnsigned // NOLINT
+ 5 + 1 + 1;
EmbeddedVector<char, kBufferSize> buffer;
int buffer_pos = 0;
@@ -114,14 +114,14 @@ class HeapEntry BASE_EMBEDDED {
Type type,
const char* name,
SnapshotObjectId id,
size_t self_size);
int self_size);

HeapSnapshot* snapshot() { return snapshot_; }
Type type() { return static_cast<Type>(type_); }
const char* name() { return name_; }
void set_name(const char* name) { name_ = name; }
inline SnapshotObjectId id() { return id_; }
size_t self_size() { return self_size_; }
int self_size() { return self_size_; }
INLINE(int index() const);
int children_count() const { return children_count_; }
INLINE(int set_children_index(int index));
@@ -146,7 +146,7 @@ class HeapEntry BASE_EMBEDDED {
unsigned type_: 4;
int children_count_: 28;
int children_index_;
size_t self_size_;
int self_size_;
SnapshotObjectId id_;
HeapSnapshot* snapshot_;
const char* name_;
@@ -186,7 +186,7 @@ class HeapSnapshot {
HeapEntry* AddEntry(HeapEntry::Type type,
const char* name,
SnapshotObjectId id,
size_t size);
int size);
HeapEntry* AddRootEntry();
HeapEntry* AddGcRootsEntry();
HeapEntry* AddGcSubrootEntry(int tag);
@@ -386,10 +386,6 @@ class V8HeapExplorer : public HeapEntriesAllocator {
void TagGlobalObjects();
void TagCodeObject(Code* code);
void TagBuiltinCodeObject(Code* code, const char* name);
HeapEntry* AddEntry(Address address,
HeapEntry::Type type,
const char* name,
size_t size);

static String* GetConstructorName(JSObject* object);

@@ -400,7 +396,6 @@ class V8HeapExplorer : public HeapEntriesAllocator {
HeapEntry* AddEntry(HeapObject* object,
HeapEntry::Type type,
const char* name);

const char* GetSystemEntryName(HeapObject* object);

void ExtractReferences(HeapObject* obj);
@@ -419,7 +414,6 @@ class V8HeapExplorer : public HeapEntriesAllocator {
void ExtractCellReferences(int entry, Cell* cell);
void ExtractPropertyCellReferences(int entry, PropertyCell* cell);
void ExtractAllocationSiteReferences(int entry, AllocationSite* site);
void ExtractJSArrayBufferReferences(int entry, JSArrayBuffer* buffer);
void ExtractClosureReferences(JSObject* js_obj, int entry);
void ExtractPropertyReferences(JSObject* js_obj, int entry);
bool ExtractAccessorPairProperty(JSObject* js_obj, int entry,
@@ -155,8 +155,7 @@ Heap::Heap()
configured_(false),
external_string_table_(this),
chunks_queued_for_free_(NULL),
relocation_mutex_(NULL),
gc_callbacks_depth_(0) {
relocation_mutex_(NULL) {
// Allow build-time customization of the max semispace size. Building
// V8 with snapshots and a non-default max semispace size is much
// easier if you can define it as part of the build environment.
@@ -546,9 +545,7 @@ void Heap::ProcessPretenuringFeedback() {
}
}

if (trigger_deoptimization) {
isolate_->stack_guard()->DeoptMarkedAllocationSites();
}
if (trigger_deoptimization) isolate_->stack_guard()->DeoptMarkedCode();

FlushAllocationSitesScratchpad();

@@ -570,25 +567,6 @@ void Heap::ProcessPretenuringFeedback() {
}


void Heap::DeoptMarkedAllocationSites() {
// TODO(hpayer): If iterating over the allocation sites list becomes a
// performance issue, use a cache heap data structure instead (similar to the
// allocation sites scratchpad).
Object* list_element = allocation_sites_list();
while (list_element->IsAllocationSite()) {
AllocationSite* site = AllocationSite::cast(list_element);
if (site->deopt_dependent_code()) {
site->dependent_code()->MarkCodeForDeoptimization(
isolate_,
DependentCode::kAllocationSiteTenuringChangedGroup);
site->set_deopt_dependent_code(false);
}
list_element = site->weak_next();
}
Deoptimizer::DeoptimizeMarkedCode(isolate_);
}


void Heap::GarbageCollectionEpilogue() {
store_buffer()->GCEpilogue();

@@ -597,9 +575,6 @@ void Heap::GarbageCollectionEpilogue() {
ZapFromSpace();
}

// Process pretenuring feedback and update allocation sites.
ProcessPretenuringFeedback();

#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
Verify();
@@ -774,21 +749,6 @@ void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
}


void Heap::EnsureFillerObjectAtTop() {
// There may be an allocation memento behind every object in new space.
// If we evacuate a not full new space or if we are on the last page of
// the new space, then there may be uninitialized memory behind the top
// pointer of the new space page. We store a filler object there to
// identify the unused space.
Address from_top = new_space_.top();
Address from_limit = new_space_.limit();
if (from_top < from_limit) {
int remaining_in_page = static_cast<int>(from_limit - from_top);
CreateFillerObjectAt(from_top, remaining_in_page);
}
}


bool Heap::CollectGarbage(GarbageCollector collector,
const char* gc_reason,
const char* collector_reason,
@@ -805,7 +765,17 @@ bool Heap::CollectGarbage(GarbageCollector collector,
allocation_timeout_ = Max(6, FLAG_gc_interval);
#endif

EnsureFillerObjectAtTop();
// There may be an allocation memento behind every object in new space.
// If we evacuate a not full new space or if we are on the last page of
// the new space, then there may be uninitialized memory behind the top
// pointer of the new space page. We store a filler object there to
// identify the unused space.
Address from_top = new_space_.top();
Address from_limit = new_space_.limit();
if (from_top < from_limit) {
int remaining_in_page = static_cast<int>(from_limit - from_top);
CreateFillerObjectAt(from_top, remaining_in_page);
}

if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
if (FLAG_trace_incremental_marking) {
@@ -879,6 +849,16 @@ int Heap::NotifyContextDisposed() {
}


void Heap::PerformScavenge() {
GCTracer tracer(this, NULL, NULL);
if (incremental_marking()->IsStopped()) {
PerformGarbageCollection(SCAVENGER, &tracer);
} else {
PerformGarbageCollection(MARK_COMPACTOR, &tracer);
}
}


void Heap::MoveElements(FixedArray* array,
int dst_index,
int src_index,
@@ -1085,14 +1065,11 @@ bool Heap::PerformGarbageCollection(
GCType gc_type =
collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;

{ GCCallbacksScope scope(this);
if (scope.CheckReenter()) {
AllowHeapAllocation allow_allocation;
GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
VMState<EXTERNAL> state(isolate_);
HandleScope handle_scope(isolate_);
CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
}
{
GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
VMState<EXTERNAL> state(isolate_);
HandleScope handle_scope(isolate_);
CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
}

EnsureFromSpaceIsCommitted();
@@ -1197,14 +1174,11 @@ bool Heap::PerformGarbageCollection(
amount_of_external_allocated_memory_;
}

{ GCCallbacksScope scope(this);
if (scope.CheckReenter()) {
AllowHeapAllocation allow_allocation;
GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
VMState<EXTERNAL> state(isolate_);
HandleScope handle_scope(isolate_);
CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
}
{
GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
VMState<EXTERNAL> state(isolate_);
HandleScope handle_scope(isolate_);
CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
}

#ifdef VERIFY_HEAP
@@ -1644,6 +1618,8 @@ void Heap::Scavenge() {
IncrementYoungSurvivorsCounter(static_cast<int>(
(PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));

ProcessPretenuringFeedback();

LOG(isolate_, ResourceEvent("scavenge", "end"));

gc_state_ = NOT_IN_GC;
@@ -2023,12 +1999,14 @@ void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) {
AllocationSite* casted = AllocationSite::cast(cur);
if (casted->GetPretenureMode() == flag) {
casted->ResetPretenureDecision();
casted->set_deopt_dependent_code(true);
marked = true;
bool got_marked = casted->dependent_code()->MarkCodeForDeoptimization(
isolate_,
DependentCode::kAllocationSiteTenuringChangedGroup);
if (got_marked) marked = true;
}
cur = casted->weak_next();
}
if (marked) isolate_->stack_guard()->DeoptMarkedAllocationSites();
if (marked) isolate_->stack_guard()->DeoptMarkedCode();
}


@@ -2691,7 +2669,8 @@ MaybeObject* Heap::AllocateTypeFeedbackInfo() {
if (!maybe_info->To(&info)) return maybe_info;
}
info->initialize_storage();
info->set_feedback_vector(empty_fixed_array(), SKIP_WRITE_BARRIER);
info->set_type_feedback_cells(TypeFeedbackCells::cast(empty_fixed_array()),
SKIP_WRITE_BARRIER);
return info;
}

@@ -3073,17 +3052,6 @@ void Heap::CreateFixedStubs() {
// The eliminates the need for doing dictionary lookup in the
// stub cache for these stubs.
HandleScope scope(isolate());

// Create stubs that should be there, so we don't unexpectedly have to
// create them if we need them during the creation of another stub.
// Stub creation mixes raw pointers and handles in an unsafe manner so
// we cannot create stubs while we are creating stubs.
CodeStub::GenerateStubsAheadOfTime(isolate());

// MacroAssembler::Abort calls (usually enabled with --debug-code) depend on
// CEntryStub, so we need to call GenerateStubsAheadOfTime before JSEntryStub
// is created.

// gcc-4.4 has problem generating correct code of following snippet:
// { JSEntryStub stub;
// js_entry_code_ = *stub.GetCode();
@@ -3094,6 +3062,12 @@ void Heap::CreateFixedStubs() {
// To workaround the problem, make separate functions without inlining.
Heap::CreateJSEntryStub();
Heap::CreateJSConstructEntryStub();

// Create stubs that should be there, so we don't unexpectedly have to
// create them if we need them during the creation of another stub.
// Stub creation mixes raw pointers and handles in an unsafe manner so
// we cannot create stubs while we are creating stubs.
CodeStub::GenerateStubsAheadOfTime(isolate());
}


@@ -3295,15 +3269,6 @@ bool Heap::CreateInitialObjects() {
}
set_observation_state(JSObject::cast(obj));

// Allocate object to hold object microtask state.
{ MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
if (!maybe_obj->ToObject(&obj)) return false;
}
{ MaybeObject* maybe_obj = AllocateJSObjectFromMap(Map::cast(obj));
if (!maybe_obj->ToObject(&obj)) return false;
}
set_microtask_state(JSObject::cast(obj));

{ MaybeObject* maybe_obj = AllocateSymbol();
if (!maybe_obj->ToObject(&obj)) return false;
}
@@ -3657,14 +3622,8 @@ void Heap::InitializeAllocationSitesScratchpad() {

void Heap::AddAllocationSiteToScratchpad(AllocationSite* site) {
if (allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize) {
// We cannot use the normal write-barrier because slots need to be
// recorded with non-incremental marking as well. We have to explicitly
// record the slot to take evacuation candidates into account.
allocation_sites_scratchpad()->set(
allocation_sites_scratchpad_length_, site, SKIP_WRITE_BARRIER);
Object** slot = allocation_sites_scratchpad()->RawFieldOfElementAt(
allocation_sites_scratchpad_length_);
mark_compact_collector()->RecordSlot(slot, slot, *slot);
allocation_sites_scratchpad_length_, site);
allocation_sites_scratchpad_length_++;
}
}
@@ -3811,6 +3770,7 @@ MaybeObject* Heap::AllocateJSMessageObject(String* type,
int start_position,
int end_position,
Object* script,
Object* stack_trace,
Object* stack_frames) {
Object* result;
{ MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
@@ -3825,6 +3785,7 @@ MaybeObject* Heap::AllocateJSMessageObject(String* type,
message->set_start_position(start_position);
message->set_end_position(end_position);
message->set_script(script);
message->set_stack_trace(stack_trace);
message->set_stack_frames(stack_frames);
return result;
}
@@ -5862,9 +5823,6 @@ void Heap::Verify() {
VerifyPointersVisitor visitor;
IterateRoots(&visitor, VISIT_ONLY_STRONG);

VerifySmisVisitor smis_visitor;
IterateSmiRoots(&smis_visitor);

new_space_.Verify();

old_pointer_space_->Verify(&visitor);
@@ -6162,12 +6120,6 @@ void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
}


void Heap::IterateSmiRoots(ObjectVisitor* v) {
v->VisitPointers(&roots_[kSmiRootsStart], &roots_[kRootListLength]);
v->Synchronize(VisitorSynchronization::kSmiRootList);
}


void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
v->Synchronize(VisitorSynchronization::kStrongRootList);
@@ -6390,7 +6342,7 @@ intptr_t Heap::PromotedSpaceSizeOfObjects() {


bool Heap::AdvanceSweepers(int step_size) {
ASSERT(!mark_compact_collector()->AreSweeperThreadsActivated());
ASSERT(isolate()->num_sweeper_threads() == 0);
bool sweeping_complete = old_data_space()->AdvanceSweeper(step_size);
sweeping_complete &= old_pointer_space()->AdvanceSweeper(step_size);
return sweeping_complete;
@@ -78,6 +78,7 @@ namespace internal {
V(ByteArray, empty_byte_array, EmptyByteArray) \
V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \
V(ConstantPoolArray, empty_constant_pool_array, EmptyConstantPoolArray) \
V(Smi, stack_limit, StackLimit) \
V(Oddball, arguments_marker, ArgumentsMarker) \
/* The roots above this line should be boring from a GC point of view. */ \
/* This means they are never in new space and never on a page that is */ \
@@ -185,8 +186,14 @@ namespace internal {
V(Code, js_entry_code, JsEntryCode) \
V(Code, js_construct_entry_code, JsConstructEntryCode) \
V(FixedArray, natives_source_cache, NativesSourceCache) \
V(Smi, last_script_id, LastScriptId) \
V(Script, empty_script, EmptyScript) \
V(Smi, real_stack_limit, RealStackLimit) \
V(NameDictionary, intrinsic_function_names, IntrinsicFunctionNames) \
V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset) \
V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset) \
V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset) \
V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset) \
V(Cell, undefined_cell, UndefineCell) \
V(JSObject, observation_state, ObservationState) \
V(Map, external_map, ExternalMap) \
@@ -196,22 +203,10 @@ namespace internal {
EmptySlowElementDictionary) \
V(Symbol, observed_symbol, ObservedSymbol) \
V(FixedArray, materialized_objects, MaterializedObjects) \
V(FixedArray, allocation_sites_scratchpad, AllocationSitesScratchpad) \
V(JSObject, microtask_state, MicrotaskState)

// Entries in this list are limited to Smis and are not visited during GC.
#define SMI_ROOT_LIST(V) \
V(Smi, stack_limit, StackLimit) \
V(Smi, real_stack_limit, RealStackLimit) \
V(Smi, last_script_id, LastScriptId) \
V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset) \
V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset) \
V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset) \
V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset)
V(FixedArray, allocation_sites_scratchpad, AllocationSitesScratchpad)

#define ROOT_LIST(V) \
STRONG_ROOT_LIST(V) \
SMI_ROOT_LIST(V) \
V(StringTable, string_table, StringTable)

// Heap roots that are known to be immortal immovable, for which we can safely
@@ -1139,6 +1134,7 @@ class Heap {
int start_position,
int end_position,
Object* script,
Object* stack_trace,
Object* stack_frames);

// Allocate a new external string object, which is backed by a string
@@ -1259,6 +1255,10 @@ class Heap {
// Notify the heap that a context has been disposed.
int NotifyContextDisposed();

// Utility to invoke the scavenger. This is needed in test code to
// ensure correct callback for weak global handles.
void PerformScavenge();

inline void increment_scan_on_scavenge_pages() {
scan_on_scavenge_pages_++;
if (FLAG_gc_verbose) {
@@ -1347,9 +1347,6 @@ class Heap {
void IterateRoots(ObjectVisitor* v, VisitMode mode);
// Iterates over all strong roots in the heap.
void IterateStrongRoots(ObjectVisitor* v, VisitMode mode);
// Iterates over entries in the smi roots list. Only interesting to the
// serializer/deserializer, since GC does not care about smis.
void IterateSmiRoots(ObjectVisitor* v);
// Iterates over all the other roots in the heap.
void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);

@@ -1585,7 +1582,7 @@ class Heap {
// Implements the corresponding V8 API function.
bool IdleNotification(int hint);

// Declare all the root indices. This defines the root list order.
// Declare all the root indices.
enum RootListIndex {
#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION)
@@ -1601,14 +1598,8 @@ class Heap {
#undef DECLARE_STRUCT_MAP

kStringTableRootIndex,

#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
SMI_ROOT_LIST(ROOT_INDEX_DECLARATION)
#undef ROOT_INDEX_DECLARATION

kRootListLength,
kStrongRootListLength = kStringTableRootIndex,
kSmiRootsStart = kStringTableRootIndex + 1
kRootListLength
};

STATIC_CHECK(kUndefinedValueRootIndex == Internals::kUndefinedValueRootIndex);
@@ -1843,8 +1834,6 @@ class Heap {
return amount_of_external_allocated_memory_;
}

void DeoptMarkedAllocationSites();

// ObjectStats are kept in two arrays, counts and sizes. Related stats are
// stored in a contiguous linear buffer. Stats groups are stored one after
// another.
@@ -2131,11 +2120,6 @@ class Heap {
GarbageCollector SelectGarbageCollector(AllocationSpace space,
const char** reason);

// Make sure there is a filler value behind the top of the new space
// so that the GC does not confuse some unintialized/stale memory
// with the allocation memento of the object at the top
void EnsureFillerObjectAtTop();

// Performs garbage collection operation.
// Returns whether there is a chance that another major GC could
// collect more garbage.
@@ -2510,8 +2494,6 @@ class Heap {
bool relocation_mutex_locked_by_optimizer_thread_;
#endif // DEBUG;

int gc_callbacks_depth_;

friend class Factory;
friend class GCTracer;
friend class DisallowAllocationFailure;
@@ -2524,7 +2506,6 @@ class Heap {
#ifdef VERIFY_HEAP
friend class NoWeakObjectVerificationScope;
#endif
friend class GCCallbacksScope;

DISALLOW_COPY_AND_ASSIGN(Heap);
};
@@ -2597,18 +2578,6 @@ class NoWeakObjectVerificationScope {
#endif


class GCCallbacksScope {
public:
explicit inline GCCallbacksScope(Heap* heap);
inline ~GCCallbacksScope();

inline bool CheckReenter();

private:
Heap* heap_;
};


// Visitor class to verify interior pointers in spaces that do not contain
// or care about intergenerational references. All heap object pointers have to
// point into the heap to a location that has a map pointer at its first word.
@@ -2620,13 +2589,6 @@ class VerifyPointersVisitor: public ObjectVisitor {
};


// Verify that all objects are Smis.
class VerifySmisVisitor: public ObjectVisitor {
public:
inline void VisitPointers(Object** start, Object** end);
};


// Space iterator for iterating over all spaces of the heap. Returns each space
// in turn, and null when it is done.
class AllSpaces BASE_EMBEDDED {
@@ -91,8 +91,8 @@ class BoundsCheckKey : public ZoneObject {

private:
BoundsCheckKey(HValue* index_base, HValue* length)
: index_base_(index_base),
length_(length) { }
: index_base_(index_base),
length_(length) { }

HValue* index_base_;
HValue* length_;
@@ -144,10 +144,7 @@ class BoundsCheckBbData: public ZoneObject {
// (either upper or lower; note that HasSingleCheck() becomes false).
// Otherwise one of the current checks is modified so that it also covers
// new_offset, and new_check is removed.
//
// If the check cannot be modified because the context is unknown it
// returns false, otherwise it returns true.
bool CoverCheck(HBoundsCheck* new_check,
void CoverCheck(HBoundsCheck* new_check,
int32_t new_offset) {
ASSERT(new_check->index()->representation().IsSmiOrInteger32());
bool keep_new_check = false;
@@ -158,48 +155,35 @@ class BoundsCheckBbData: public ZoneObject {
keep_new_check = true;
upper_check_ = new_check;
} else {
bool result = BuildOffsetAdd(upper_check_,
&added_upper_index_,
&added_upper_offset_,
Key()->IndexBase(),
new_check->index()->representation(),
new_offset);
if (!result) return false;
upper_check_->ReplaceAllUsesWith(upper_check_->index());
upper_check_->SetOperandAt(0, added_upper_index_);
TightenCheck(upper_check_, new_check);
}
} else if (new_offset < lower_offset_) {
lower_offset_ = new_offset;
if (HasSingleCheck()) {
keep_new_check = true;
lower_check_ = new_check;
} else {
bool result = BuildOffsetAdd(lower_check_,
&added_lower_index_,
&added_lower_offset_,
Key()->IndexBase(),
new_check->index()->representation(),
new_offset);
if (!result) return false;
lower_check_->ReplaceAllUsesWith(lower_check_->index());
lower_check_->SetOperandAt(0, added_lower_index_);
TightenCheck(lower_check_, new_check);
}
} else {
ASSERT(false);
// Should never have called CoverCheck() in this case.
UNREACHABLE();
}

if (!keep_new_check) {
new_check->block()->graph()->isolate()->counters()->
bounds_checks_eliminated()->Increment();
new_check->DeleteAndReplaceWith(new_check->ActualValue());
} else {
HBoundsCheck* first_check = new_check == lower_check_ ? upper_check_
: lower_check_;
// The length is guaranteed to be live at first_check.
ASSERT(new_check->length() == first_check->length());
HInstruction* old_position = new_check->next();
new_check->Unlink();
new_check->InsertAfter(first_check);
MoveIndexIfNecessary(new_check->index(), new_check, old_position);
}

return true;
}

void RemoveZeroOperations() {
RemoveZeroAdd(&added_lower_index_, &added_lower_offset_);
RemoveZeroAdd(&added_upper_index_, &added_upper_offset_);
}

BoundsCheckBbData(BoundsCheckKey* key,
@@ -210,18 +194,14 @@ class BoundsCheckBbData: public ZoneObject {
HBoundsCheck* upper_check,
BoundsCheckBbData* next_in_bb,
BoundsCheckBbData* father_in_dt)
: key_(key),
lower_offset_(lower_offset),
upper_offset_(upper_offset),
basic_block_(bb),
lower_check_(lower_check),
upper_check_(upper_check),
added_lower_index_(NULL),
added_lower_offset_(NULL),
added_upper_index_(NULL),
added_upper_offset_(NULL),
next_in_bb_(next_in_bb),
father_in_dt_(father_in_dt) { }
: key_(key),
lower_offset_(lower_offset),
upper_offset_(upper_offset),
basic_block_(bb),
lower_check_(lower_check),
upper_check_(upper_check),
next_in_bb_(next_in_bb),
father_in_dt_(father_in_dt) { }

private:
BoundsCheckKey* key_;
@@ -230,57 +210,56 @@ class BoundsCheckBbData: public ZoneObject {
HBasicBlock* basic_block_;
HBoundsCheck* lower_check_;
HBoundsCheck* upper_check_;
HInstruction* added_lower_index_;
HConstant* added_lower_offset_;
HInstruction* added_upper_index_;
HConstant* added_upper_offset_;
BoundsCheckBbData* next_in_bb_;
BoundsCheckBbData* father_in_dt_;

// Given an existing add instruction and a bounds check it tries to
// find the current context (either of the add or of the check index).
HValue* IndexContext(HInstruction* add, HBoundsCheck* check) {
if (add != NULL && add->IsAdd()) {
return HAdd::cast(add)->context();
void MoveIndexIfNecessary(HValue* index_raw,
HBoundsCheck* insert_before,
HInstruction* end_of_scan_range) {
if (!index_raw->IsAdd() && !index_raw->IsSub()) {
// index_raw can be HAdd(index_base, offset), HSub(index_base, offset),
// or index_base directly. In the latter case, no need to move anything.
return;
}
if (check->index()->IsBinaryOperation()) {
return HBinaryOperation::cast(check->index())->context();
HArithmeticBinaryOperation* index =
HArithmeticBinaryOperation::cast(index_raw);
HValue* left_input = index->left();
HValue* right_input = index->right();
bool must_move_index = false;
bool must_move_left_input = false;
bool must_move_right_input = false;
for (HInstruction* cursor = end_of_scan_range; cursor != insert_before;) {
if (cursor == left_input) must_move_left_input = true;
if (cursor == right_input) must_move_right_input = true;
if (cursor == index) must_move_index = true;
if (cursor->previous() == NULL) {
cursor = cursor->block()->dominator()->end();
} else {
cursor = cursor->previous();
}
}
return NULL;
}

// This function returns false if it cannot build the add because the
// current context cannot be determined.
bool BuildOffsetAdd(HBoundsCheck* check,
HInstruction** add,
HConstant** constant,
HValue* original_value,
Representation representation,
int32_t new_offset) {
HValue* index_context = IndexContext(*add, check);
if (index_context == NULL) return false;

Zone* zone = BasicBlock()->zone();
HConstant* new_constant = HConstant::New(zone, index_context,
new_offset, representation);
if (*add == NULL) {
new_constant->InsertBefore(check);
(*add) = HAdd::New(zone, index_context, original_value, new_constant);
(*add)->AssumeRepresentation(representation);
(*add)->InsertBefore(check);
} else {
new_constant->InsertBefore(*add);
(*constant)->DeleteAndReplaceWith(new_constant);
if (must_move_index) {
index->Unlink();
index->InsertBefore(insert_before);
}
// The BCE algorithm only selects mergeable bounds checks that share
// the same "index_base", so we'll only ever have to move constants.
if (must_move_left_input) {
HConstant::cast(left_input)->Unlink();
HConstant::cast(left_input)->InsertBefore(index);
}
if (must_move_right_input) {
HConstant::cast(right_input)->Unlink();
HConstant::cast(right_input)->InsertBefore(index);
}
*constant = new_constant;
return true;
}

void RemoveZeroAdd(HInstruction** add, HConstant** constant) {
if (*add != NULL && (*add)->IsAdd() && (*constant)->Integer32Value() == 0) {
(*add)->DeleteAndReplaceWith(HAdd::cast(*add)->left());
(*constant)->DeleteAndReplaceWith(NULL);
}
void TightenCheck(HBoundsCheck* original_check,
HBoundsCheck* tighter_check) {
ASSERT(original_check->length() == tighter_check->length());
MoveIndexIfNecessary(tighter_check->index(), original_check, tighter_check);
original_check->ReplaceAllUsesWith(original_check->index());
original_check->SetOperandAt(0, tighter_check->index());
}

DISALLOW_COPY_AND_ASSIGN(BoundsCheckBbData);
@@ -394,11 +373,10 @@ BoundsCheckBbData* HBoundsCheckEliminationPhase::PreProcessBlock(
bb->graph()->isolate()->counters()->
bounds_checks_eliminated()->Increment();
check->DeleteAndReplaceWith(check->ActualValue());
} else if (data->BasicBlock() != bb ||
!data->CoverCheck(check, offset)) {
// If the check is in the current BB we try to modify it by calling
// "CoverCheck", but if also that fails we record the current offsets
// in a new data instance because from now on they are covered.
} else if (data->BasicBlock() == bb) {
data->CoverCheck(check, offset);
} else if (graph()->use_optimistic_licm() ||
bb->IsLoopSuccessorDominator()) {
int32_t new_lower_offset = offset < data->LowerOffset()
? offset
: data->LowerOffset();
@@ -424,7 +402,6 @@ BoundsCheckBbData* HBoundsCheckEliminationPhase::PreProcessBlock(
void HBoundsCheckEliminationPhase::PostProcessBlock(
HBasicBlock* block, BoundsCheckBbData* data) {
while (data != NULL) {
data->RemoveZeroOperations();
if (data->FatherInDominatorTree()) {
table_.Insert(data->Key(), data->FatherInDominatorTree(), zone());
} else {

Large diffs are not rendered by default.

@@ -122,10 +122,9 @@ class HFlowEngine {

// Skip blocks not dominated by the root node.
if (SkipNonDominatedBlock(root, block)) continue;
State* state = State::Finish(StateAt(block), block, zone_);
State* state = StateAt(block);

if (block->IsReachable()) {
ASSERT(state != NULL);
if (block->IsLoopHeader()) {
// Apply loop effects before analyzing loop body.
ComputeLoopEffects(block)->Apply(state);
@@ -145,14 +144,18 @@ class HFlowEngine {
for (int i = 0; i < max; i++) {
HBasicBlock* succ = block->end()->SuccessorAt(i);
IncrementPredecessorCount(succ);

if (max == 1 && succ->predecessors()->length() == 1) {
// Optimization: successor can inherit this state.
SetStateAt(succ, state);
if (StateAt(succ) == NULL) {
// This is the first state to reach the successor.
if (max == 1 && succ->predecessors()->length() == 1) {
// Optimization: successor can inherit this state.
SetStateAt(succ, state);
} else {
// Successor needs a copy of the state.
SetStateAt(succ, state->Copy(succ, block, zone_));
}
} else {
// Merge the current state with the state already at the successor.
SetStateAt(succ,
State::Merge(StateAt(succ), succ, state, block, zone_));
SetStateAt(succ, StateAt(succ)->Merge(succ, state, block, zone_));
}
}
}

Large diffs are not rendered by default.

@@ -36,96 +36,33 @@
namespace v8 {
namespace internal {

// This class extends GVNFlagSet with additional "special" dynamic side effects,
// which can be used to represent side effects that cannot be expressed using
// the GVNFlags of an HInstruction. These special side effects are tracked by a
// SideEffectsTracker (see below).
class SideEffects V8_FINAL {
public:
static const int kNumberOfSpecials = 64 - kNumberOfFlags;

SideEffects() : bits_(0) {
ASSERT(kNumberOfFlags + kNumberOfSpecials == sizeof(bits_) * CHAR_BIT);
}
explicit SideEffects(GVNFlagSet flags) : bits_(flags.ToIntegral()) {}
bool IsEmpty() const { return bits_ == 0; }
bool ContainsFlag(GVNFlag flag) const {
return (bits_ & MaskFlag(flag)) != 0;
}
bool ContainsSpecial(int special) const {
return (bits_ & MaskSpecial(special)) != 0;
}
bool ContainsAnyOf(SideEffects set) const { return (bits_ & set.bits_) != 0; }
void Add(SideEffects set) { bits_ |= set.bits_; }
void AddSpecial(int special) { bits_ |= MaskSpecial(special); }
void AddAllSpecial() { bits_ |= ~static_cast<uint64_t>(0) << kNumberOfFlags; }
void RemoveFlag(GVNFlag flag) { bits_ &= ~MaskFlag(flag); }
void RemoveAll() { bits_ = 0; }
uint64_t ToIntegral() const { return bits_; }
void PrintTo(StringStream* stream) const;

private:
uint64_t MaskFlag(GVNFlag flag) const {
return static_cast<uint64_t>(1) << static_cast<unsigned>(flag);
}
uint64_t MaskSpecial(int special) const {
ASSERT(special >= 0);
ASSERT(special < kNumberOfSpecials);
return static_cast<uint64_t>(1) << static_cast<unsigned>(
special + kNumberOfFlags);
}

uint64_t bits_;
};


// Tracks inobject field loads/stores in a fine grained fashion, and represents
// them using the "special" dynamic side effects of the SideEffects class (see
// above). This way unrelated inobject field stores don't prevent hoisting and
// merging of inobject field loads.
class SideEffectsTracker V8_FINAL BASE_EMBEDDED {
public:
SideEffectsTracker() : num_inobject_fields_(0) {}
SideEffects ComputeChanges(HInstruction* instr);
SideEffects ComputeDependsOn(HInstruction* instr);
void PrintSideEffectsTo(StringStream* stream, SideEffects side_effects) const;

private:
bool ComputeInobjectField(HObjectAccess access, int* index);

HObjectAccess inobject_fields_[SideEffects::kNumberOfSpecials];
int num_inobject_fields_;
};


// Perform common subexpression elimination and loop-invariant code motion.
class HGlobalValueNumberingPhase V8_FINAL : public HPhase {
class HGlobalValueNumberingPhase : public HPhase {
public:
explicit HGlobalValueNumberingPhase(HGraph* graph);

void Run();

private:
SideEffects CollectSideEffectsOnPathsToDominatedBlock(
GVNFlagSet CollectSideEffectsOnPathsToDominatedBlock(
HBasicBlock* dominator,
HBasicBlock* dominated);
void AnalyzeGraph();
void ComputeBlockSideEffects();
void LoopInvariantCodeMotion();
void ProcessLoopBlock(HBasicBlock* block,
HBasicBlock* before_loop,
SideEffects loop_kills);
GVNFlagSet loop_kills);
bool AllowCodeMotion();
bool ShouldMove(HInstruction* instr, HBasicBlock* loop_header);

SideEffectsTracker side_effects_tracker_;
bool removed_side_effects_;

// A map of block IDs to their side effects.
ZoneList<SideEffects> block_side_effects_;
ZoneList<GVNFlagSet> block_side_effects_;

// A map of loop header block IDs to their loop's side effects.
ZoneList<SideEffects> loop_side_effects_;
ZoneList<GVNFlagSet> loop_side_effects_;

// Used when collecting side effects on paths from dominator to
// dominated.
@@ -134,6 +71,7 @@ class HGlobalValueNumberingPhase V8_FINAL : public HPhase {
DISALLOW_COPY_AND_ASSIGN(HGlobalValueNumberingPhase);
};


} } // namespace v8::internal

#endif // V8_HYDROGEN_GVN_H_
@@ -35,8 +35,6 @@
#include "ia32/lithium-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/lithium-x64.h"
#elif V8_TARGET_ARCH_A64
#include "a64/lithium-a64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/lithium-arm.h"
#elif V8_TARGET_ARCH_MIPS
@@ -606,11 +604,11 @@ void HValue::PrintChangesTo(StringStream* stream) {
stream->Add("*");
} else {
bool add_comma = false;
#define PRINT_DO(Type) \
if (changes_flags.Contains(k##Type)) { \
if (add_comma) stream->Add(","); \
add_comma = true; \
stream->Add(#Type); \
#define PRINT_DO(type) \
if (changes_flags.Contains(kChanges##type)) { \
if (add_comma) stream->Add(","); \
add_comma = true; \
stream->Add(#type); \
}
GVN_TRACKED_FLAG_LIST(PRINT_DO);
GVN_UNTRACKED_FLAG_LIST(PRINT_DO);
@@ -682,19 +680,6 @@ void HValue::ComputeInitialRange(Zone* zone) {
}


void HSourcePosition::PrintTo(FILE* out) {
if (IsUnknown()) {
PrintF(out, "<?>");
} else {
if (FLAG_hydrogen_track_positions) {
PrintF(out, "<%d:%d>", inlining_id(), position());
} else {
PrintF(out, "<0:%d>", raw());
}
}
}


void HInstruction::PrintTo(StringStream* stream) {
PrintMnemonicTo(stream);
PrintDataTo(stream);
@@ -751,7 +736,8 @@ void HInstruction::InsertBefore(HInstruction* next) {
next_ = next;
previous_ = prev;
SetBlock(next->block());
if (!has_position() && next->has_position()) {
if (position() == RelocInfo::kNoPosition &&
next->position() != RelocInfo::kNoPosition) {
set_position(next->position());
}
}
@@ -788,7 +774,8 @@ void HInstruction::InsertAfter(HInstruction* previous) {
if (block->last() == previous) {
block->set_last(this);
}
if (!has_position() && previous->has_position()) {
if (position() == RelocInfo::kNoPosition &&
previous->position() != RelocInfo::kNoPosition) {
set_position(previous->position());
}
}
@@ -1529,7 +1516,7 @@ void HCheckInstanceType::GetCheckMaskAndTag(uint8_t* mask, uint8_t* tag) {

bool HCheckMaps::HandleSideEffectDominator(GVNFlag side_effect,
HValue* dominator) {
ASSERT(side_effect == kMaps);
ASSERT(side_effect == kChangesMaps);
// TODO(mstarzinger): For now we specialize on HStoreNamedField, but once
// type information is rich enough we should generalize this to any HType
// for which the map is known.
@@ -1637,7 +1624,7 @@ Range* HChange::InferRange(Zone* zone) {
input_range != NULL &&
input_range->IsInSmiRange()))) {
set_type(HType::Smi());
ClearChangesFlag(kNewSpacePromotion);
ClearGVNFlag(kChangesNewSpacePromotion);
}
Range* result = (input_range != NULL)
? input_range->Copy(zone)
@@ -1660,7 +1647,7 @@ Range* HConstant::InferRange(Zone* zone) {
}


HSourcePosition HPhi::position() const {
int HPhi::position() const {
return block()->first()->position();
}

@@ -2562,7 +2549,11 @@ HConstant::HConstant(int32_t integer_value,
boolean_value_(integer_value != 0),
int32_value_(integer_value),
double_value_(FastI2D(integer_value)) {
set_type(has_smi_value_ ? HType::Smi() : HType::TaggedNumber());
// It's possible to create a constant with a value in Smi-range but stored
// in a (pre-existing) HeapNumber. See crbug.com/349878.
bool could_be_heapobject = r.IsTagged() && !object.handle().is_null();
bool is_smi = has_smi_value_ && !could_be_heapobject;
set_type(is_smi ? HType::Smi() : HType::TaggedNumber());
Initialize(r);
}

@@ -2582,7 +2573,11 @@ HConstant::HConstant(double double_value,
int32_value_(DoubleToInt32(double_value)),
double_value_(double_value) {
has_smi_value_ = has_int32_value_ && Smi::IsValid(int32_value_);
set_type(has_smi_value_ ? HType::Smi() : HType::TaggedNumber());
// It's possible to create a constant with a value in Smi-range but stored
// in a (pre-existing) HeapNumber. See crbug.com/349878.
bool could_be_heapobject = r.IsTagged() && !object.handle().is_null();
bool is_smi = has_smi_value_ && !could_be_heapobject;
set_type(is_smi ? HType::Smi() : HType::TaggedNumber());
Initialize(r);
}

@@ -3016,7 +3011,7 @@ void HCompareObjectEqAndBranch::PrintDataTo(StringStream* stream) {
bool HCompareObjectEqAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
if (left()->IsConstant() && right()->IsConstant()) {
bool comparison_result =
HConstant::cast(left())->Equals(HConstant::cast(right()));
HConstant::cast(left())->DataEquals(HConstant::cast(right()));
*block = comparison_result
? FirstSuccessor()
: SecondSuccessor();
@@ -3118,7 +3113,7 @@ HCheckMaps* HCheckMaps::New(Zone* zone,
CompilationInfo* info,
HValue* typecheck) {
HCheckMaps* check_map = new(zone) HCheckMaps(value, zone, typecheck);
check_map->Add(map, info, zone);
check_map->Add(map, zone);
if (map->CanOmitMapChecks() &&
value->IsConstant() &&
HConstant::cast(value)->HasMap(map)) {
@@ -3425,7 +3420,7 @@ Representation HUnaryMathOperation::RepresentationFromInputs() {

bool HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
HValue* dominator) {
ASSERT(side_effect == kNewSpacePromotion);
ASSERT(side_effect == kChangesNewSpacePromotion);
Zone* zone = block()->zone();
if (!FLAG_use_allocation_folding) return false;

@@ -3438,15 +3433,6 @@ bool HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
return false;
}

// Check whether we are folding within the same block for local folding.
if (FLAG_use_local_allocation_folding && dominator->block() != block()) {
if (FLAG_trace_allocation_folding) {
PrintF("#%d (%s) cannot fold into #%d (%s), crosses basic blocks\n",
id(), Mnemonic(), dominator->id(), dominator->Mnemonic());
}
return false;
}

HAllocate* dominator_allocate = HAllocate::cast(dominator);
HValue* dominator_size = dominator_allocate->size();
HValue* current_size = size();
@@ -4412,80 +4398,56 @@ HObjectAccess HObjectAccess::ForCellPayload(Isolate* isolate) {
}


void HObjectAccess::SetGVNFlags(HValue *instr, PropertyAccessType access_type) {
void HObjectAccess::SetGVNFlags(HValue *instr, bool is_store) {
// set the appropriate GVN flags for a given load or store instruction
if (access_type == STORE) {
if (is_store) {
// track dominating allocations in order to eliminate write barriers
instr->SetDependsOnFlag(::v8::internal::kNewSpacePromotion);
instr->SetGVNFlag(kDependsOnNewSpacePromotion);
instr->SetFlag(HValue::kTrackSideEffectDominators);
} else {
// try to GVN loads, but don't hoist above map changes
instr->SetFlag(HValue::kUseGVN);
instr->SetDependsOnFlag(::v8::internal::kMaps);
instr->SetGVNFlag(kDependsOnMaps);
}

switch (portion()) {
case kArrayLengths:
if (access_type == STORE) {
instr->SetChangesFlag(::v8::internal::kArrayLengths);
} else {
instr->SetDependsOnFlag(::v8::internal::kArrayLengths);
}
instr->SetGVNFlag(is_store
? kChangesArrayLengths : kDependsOnArrayLengths);
break;
case kStringLengths:
if (access_type == STORE) {
instr->SetChangesFlag(::v8::internal::kStringLengths);
} else {
instr->SetDependsOnFlag(::v8::internal::kStringLengths);
}
instr->SetGVNFlag(is_store
? kChangesStringLengths : kDependsOnStringLengths);
break;
case kInobject:
if (access_type == STORE) {
instr->SetChangesFlag(::v8::internal::kInobjectFields);
} else {
instr->SetDependsOnFlag(::v8::internal::kInobjectFields);
}
instr->SetGVNFlag(is_store
? kChangesInobjectFields : kDependsOnInobjectFields);
break;
case kDouble:
if (access_type == STORE) {
instr->SetChangesFlag(::v8::internal::kDoubleFields);
} else {
instr->SetDependsOnFlag(::v8::internal::kDoubleFields);
}
instr->SetGVNFlag(is_store
? kChangesDoubleFields : kDependsOnDoubleFields);
break;
case kBackingStore:
if (access_type == STORE) {
instr->SetChangesFlag(::v8::internal::kBackingStoreFields);
} else {
instr->SetDependsOnFlag(::v8::internal::kBackingStoreFields);
}
instr->SetGVNFlag(is_store
? kChangesBackingStoreFields : kDependsOnBackingStoreFields);
break;
case kElementsPointer:
if (access_type == STORE) {
instr->SetChangesFlag(::v8::internal::kElementsPointer);
} else {
instr->SetDependsOnFlag(::v8::internal::kElementsPointer);
}
instr->SetGVNFlag(is_store
? kChangesElementsPointer : kDependsOnElementsPointer);
break;
case kMaps:
if (access_type == STORE) {
instr->SetChangesFlag(::v8::internal::kMaps);
} else {
instr->SetDependsOnFlag(::v8::internal::kMaps);
}
instr->SetGVNFlag(is_store
? kChangesMaps : kDependsOnMaps);
break;
case kExternalMemory:
if (access_type == STORE) {
instr->SetChangesFlag(::v8::internal::kExternalMemory);
} else {
instr->SetDependsOnFlag(::v8::internal::kExternalMemory);
}
instr->SetGVNFlag(is_store
? kChangesExternalMemory : kDependsOnExternalMemory);
break;
}
}


void HObjectAccess::PrintTo(StringStream* stream) const {
void HObjectAccess::PrintTo(StringStream* stream) {
stream->Add(".");

switch (portion()) {

Large diffs are not rendered by default.

@@ -76,7 +76,9 @@ class HLoadEliminationTable : public ZoneObject {
FieldOf(l->access()),
l->object()->ActualValue()->id()));
HValue* result = load(l);
if (result != instr) {
if (result != instr &&
result->type().Equals(instr->type()) &&
result->representation().Equals(instr->representation())) {
// The load can be replaced with a previous load or a value.
TRACE((" replace L%d -> v%d\n", instr->id(), result->id()));
instr->DeleteAndReplaceWith(result);
@@ -98,33 +100,26 @@ class HLoadEliminationTable : public ZoneObject {
}
break;
}
case HValue::kTransitionElementsKind: {
HTransitionElementsKind* t = HTransitionElementsKind::cast(instr);
HValue* object = t->object()->ActualValue();
KillFieldInternal(object, FieldOf(JSArray::kElementsOffset), NULL);
KillFieldInternal(object, FieldOf(JSObject::kMapOffset), NULL);
break;
}
default: {
if (instr->CheckChangesFlag(kInobjectFields)) {
if (instr->CheckGVNFlag(kChangesInobjectFields)) {
TRACE((" kill-all i%d\n", instr->id()));
Kill();
break;
}
if (instr->CheckChangesFlag(kMaps)) {
if (instr->CheckGVNFlag(kChangesMaps)) {
TRACE((" kill-maps i%d\n", instr->id()));
KillOffset(JSObject::kMapOffset);
}
if (instr->CheckChangesFlag(kElementsKind)) {
if (instr->CheckGVNFlag(kChangesElementsKind)) {
TRACE((" kill-elements-kind i%d\n", instr->id()));
KillOffset(JSObject::kMapOffset);
KillOffset(JSObject::kElementsOffset);
}
if (instr->CheckChangesFlag(kElementsPointer)) {
if (instr->CheckGVNFlag(kChangesElementsPointer)) {
TRACE((" kill-elements i%d\n", instr->id()));
KillOffset(JSObject::kElementsOffset);
}
if (instr->CheckChangesFlag(kOsrEntries)) {
if (instr->CheckGVNFlag(kChangesOsrEntries)) {
TRACE((" kill-osr i%d\n", instr->id()));
Kill();
}
@@ -139,32 +134,8 @@ class HLoadEliminationTable : public ZoneObject {
return this;
}

// Support for global analysis with HFlowEngine: Merge given state with
// the other incoming state.
static HLoadEliminationTable* Merge(HLoadEliminationTable* succ_state,
HBasicBlock* succ_block,
HLoadEliminationTable* pred_state,
HBasicBlock* pred_block,
Zone* zone) {
ASSERT(pred_state != NULL);
if (succ_state == NULL) {
return pred_state->Copy(succ_block, pred_block, zone);
} else {
return succ_state->Merge(succ_block, pred_state, pred_block, zone);
}
}

// Support for global analysis with HFlowEngine: Given state merged with all
// the other incoming states, prepare it for use.
static HLoadEliminationTable* Finish(HLoadEliminationTable* state,
HBasicBlock* block,
Zone* zone) {
ASSERT(state != NULL);
return state;
}

private:
// Copy state to successor block.
// Support for global analysis with HFlowEngine: Copy state to successor
// block.
HLoadEliminationTable* Copy(HBasicBlock* succ, HBasicBlock* from_block,
Zone* zone) {
HLoadEliminationTable* copy =
@@ -180,7 +151,8 @@ class HLoadEliminationTable : public ZoneObject {
return copy;
}

// Merge this state with the other incoming state.
// Support for global analysis with HFlowEngine: Merge this state with
// the other incoming state.
HLoadEliminationTable* Merge(HBasicBlock* succ, HLoadEliminationTable* that,
HBasicBlock* that_block, Zone* zone) {
if (that->fields_.length() < fields_.length()) {
@@ -460,33 +432,49 @@ class HLoadEliminationTable : public ZoneObject {
class HLoadEliminationEffects : public ZoneObject {
public:
explicit HLoadEliminationEffects(Zone* zone)
: zone_(zone), stores_(5, zone) { }
: zone_(zone),
maps_stored_(false),
fields_stored_(false),
elements_stored_(false),
stores_(5, zone) { }

inline bool Disabled() {
return false; // Effects are _not_ disabled.
}

// Process a possibly side-effecting instruction.
void Process(HInstruction* instr, Zone* zone) {
if (instr->IsStoreNamedField()) {
stores_.Add(HStoreNamedField::cast(instr), zone_);
} else {
flags_.Add(instr->ChangesFlags());
switch (instr->opcode()) {
case HValue::kStoreNamedField: {
stores_.Add(HStoreNamedField::cast(instr), zone_);
break;
}
case HValue::kOsrEntry: {
// Kill everything. Loads must not be hoisted past the OSR entry.
maps_stored_ = true;
fields_stored_ = true;
elements_stored_ = true;
}
default: {
fields_stored_ |= instr->CheckGVNFlag(kChangesInobjectFields);
maps_stored_ |= instr->CheckGVNFlag(kChangesMaps);
maps_stored_ |= instr->CheckGVNFlag(kChangesElementsKind);
elements_stored_ |= instr->CheckGVNFlag(kChangesElementsKind);
elements_stored_ |= instr->CheckGVNFlag(kChangesElementsPointer);
}
}
}

// Apply these effects to the given load elimination table.
void Apply(HLoadEliminationTable* table) {
// Loads must not be hoisted past the OSR entry, therefore we kill
// everything if we see an OSR entry.
if (flags_.Contains(kInobjectFields) || flags_.Contains(kOsrEntries)) {
if (fields_stored_) {
table->Kill();
return;
}
if (flags_.Contains(kElementsKind) || flags_.Contains(kMaps)) {
if (maps_stored_) {
table->KillOffset(JSObject::kMapOffset);
}
if (flags_.Contains(kElementsKind) || flags_.Contains(kElementsPointer)) {
if (elements_stored_) {
table->KillOffset(JSObject::kElementsOffset);
}

@@ -498,15 +486,19 @@ class HLoadEliminationEffects : public ZoneObject {

// Union these effects with the other effects.
void Union(HLoadEliminationEffects* that, Zone* zone) {
flags_.Add(that->flags_);
maps_stored_ |= that->maps_stored_;
fields_stored_ |= that->fields_stored_;
elements_stored_ |= that->elements_stored_;
for (int i = 0; i < that->stores_.length(); i++) {
stores_.Add(that->stores_[i], zone);
}
}

private:
Zone* zone_;
GVNFlagSet flags_;
bool maps_stored_ : 1;
bool fields_stored_ : 1;
bool elements_stored_ : 1;
ZoneList<HStoreNamedField*> stores_;
};

@@ -61,11 +61,10 @@ void HRepresentationChangesPhase::InsertRepresentationChangeForUse(
if (new_value == NULL) {
new_value = new(graph()->zone()) HChange(
value, to, is_truncating_to_smi, is_truncating_to_int);
if (!use_value->operand_position(use_index).IsUnknown()) {
if (use_value->operand_position(use_index) != RelocInfo::kNoPosition) {
new_value->set_position(use_value->operand_position(use_index));
} else {
ASSERT(!FLAG_hydrogen_track_positions ||
!graph()->info()->IsOptimizing());
ASSERT(!FLAG_emit_opt_code_positions || !graph()->info()->IsOptimizing());
}
}